diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7f92a3c6aa..913320e6b5 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,2 @@ # This should match the owning team set up in https://github.com/orgs/opensearch-project/teams -* @opensearch-project/sql \ No newline at end of file +* @pjfitzgibbons @ps48 @kavithacm @derek-ho @joshuali925 @dai-chen @YANG-DB @rupal-bq @mengweieric @vamsi-amazon @swiddis @penghuo @seankao-az @MaxKsyunz @Yury-Fridlyand @anirudha @forestmvey @acarbonetto @GumpacG @ykmr1224 diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index e47d8d88c0..6472a968d8 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -7,6 +7,7 @@ on: jobs: backport: + if: github.event.pull_request.merged == true runs-on: ubuntu-latest permissions: contents: write @@ -22,7 +23,8 @@ jobs: installation_id: 22958780 - name: Backport - uses: VachaShah/backport@v1.1.4 + uses: VachaShah/backport@v2.2.0 with: github_token: ${{ steps.github_app_token.outputs.token }} - branch_name: backport/backport-${{ github.event.number }} + head_template: backport/backport-<%= number %>-to-<%= base %> + failure_labels: backport-failed diff --git a/.github/workflows/bi-connectors.yml b/.github/workflows/bi-connectors.yml deleted file mode 100644 index 0251debb8d..0000000000 --- a/.github/workflows/bi-connectors.yml +++ /dev/null @@ -1,55 +0,0 @@ -name: Build connectors for BI tools - -on: - pull_request: - push: - paths: - - 'bi-connectors/PowerBIConnector/**' - - 'bi-connectors/TableauConnector/**' - - '.github/workflows/bi-connectors.yml' - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Pack Tableau JDBC connector - id: pack-tableau-jdbc - run: | - zip -r opensearch_sql_jdbc.taco . -x *.taco - working-directory: bi-connectors/TableauConnector/src - - name: Prepare Power BI ODBC connector for 'OpenSearch Project' - run: | - cp OpenSearchProject.pq OpenSearchProject.m - working-directory: bi-connectors/PowerBIConnector/src - - name: Pack Power BI ODBC connector - id: pack-powerbi-odbc-os-proj - run: | - zip OpenSearchProject.mez *.png *.m *.resx *.pqm - working-directory: bi-connectors/PowerBIConnector/src - - name: Prepare Power BI ODBC connector for 'Amazon OpenSearch Service' - id: prep-powerbi-odbc-amz-os-svc - run: | - mv OpenSearchProject.m AmazonOpenSearchService.m - sed -i 's/OpenSearch Project<\/value>/Amazon OpenSearch Service<\/value>/g' resources.resx - sed -i 's/OpenSearch Project/Amazon OpenSearch Service/g' AmazonOpenSearchService.m - sed -i 's/OpenSearchProject/AmazonOpenSearchService/g' AmazonOpenSearchService.m - sed -i 's/opensearchproject/amazonopensearchservice/g' AmazonOpenSearchService.m - working-directory: bi-connectors/PowerBIConnector/src - - name: Pack Power BI ODBC connector for 'Amazon OpenSearch Service' - id: pack-powerbi-odbc-amz-os-svc - run: | - zip AmazonOpenSearchService.mez *.png *.m *.resx *.pqm - working-directory: bi-connectors/PowerBIConnector/src - - name: Upload Tableau JDBC connector - if: steps.pack-tableau-jdbc.outcome == 'success' - uses: actions/upload-artifact@v2 - with: - name: TableauConnectors - path: bi-connectors/TableauConnector/src/opensearch_sql_jdbc.taco - - name: Upload Power BI ODBC connectors - if: steps.pack-powerbi-odbc-os-proj.outcome == 'success' || (steps.prep-powerbi-odbc-amz-os-svc.outcome == 'success' && steps.pack-powerbi-odbc-amz-os-svc.outcome == 'success') - uses: actions/upload-artifact@v2 - with: - name: PBIConnectors - path: 'bi-connectors/PowerBIConnector/src/*.mez' diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml deleted file mode 100644 index cf30ea89dc..0000000000 --- a/.github/workflows/dco.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: Developer Certificate of Origin Check - -on: [pull_request] - -jobs: - check: - runs-on: ubuntu-latest - - steps: - - name: Get PR Commits - id: 'get-pr-commits' - uses: tim-actions/get-pr-commits@v1.1.0 - with: - token: ${{ secrets.GITHUB_TOKEN }} - - name: DCO Check - uses: tim-actions/dco@v1.1.0 - with: - commits: ${{ steps.get-pr-commits.outputs.commits }} diff --git a/.github/workflows/integ-tests-with-security.yml b/.github/workflows/integ-tests-with-security.yml new file mode 100644 index 0000000000..4d19673ad9 --- /dev/null +++ b/.github/workflows/integ-tests-with-security.yml @@ -0,0 +1,89 @@ +name: Security Plugin IT + +on: + pull_request: + push: + branches-ignore: + - 'dependabot/**' + paths: + - 'integ-test/**' + - '.github/workflows/integ-tests-with-security.yml' + +jobs: + Get-CI-Image-Tag: + uses: opensearch-project/opensearch-build/.github/workflows/get-ci-image-tag.yml@main + with: + product: opensearch + + security-it-linux: + needs: Get-CI-Image-Tag + strategy: + fail-fast: false + matrix: + java: [ 11, 17, 21 ] + env: + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true + runs-on: ubuntu-latest + container: + # using the same image which is used by opensearch-build team to build the OpenSearch Distribution + # this image tag is subject to change as more dependencies and updates will arrive over time + image: ${{ needs.Get-CI-Image-Tag.outputs.ci-image-version-linux }} + # need to switch to root so that github actions can install runner binary on container without permission issues. + options: --user root + + steps: + - uses: actions/checkout@v3 + + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v3 + with: + distribution: 'temurin' + java-version: ${{ matrix.java }} + + - name: Build with Gradle + run: | + chown -R 1000:1000 `pwd` + su `id -un 1000` -c "./gradlew integTestWithSecurity" + + - name: Upload test reports + if: ${{ always() }} + uses: actions/upload-artifact@v2 + continue-on-error: true + with: + name: test-reports-${{ matrix.os }}-${{ matrix.java }} + path: | + integ-test/build/reports/** + integ-test/build/testclusters/*/logs/* + integ-test/build/testclusters/*/config/* + + security-it-windows-macos: + strategy: + fail-fast: false + matrix: + os: [ windows-latest, macos-13 ] + java: [ 11, 17, 21 ] + + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@v3 + + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v3 + with: + distribution: 'temurin' + java-version: ${{ matrix.java }} + + - name: Build with Gradle + run: ./gradlew integTestWithSecurity + + - name: Upload test reports + if: ${{ always() }} + uses: actions/upload-artifact@v2 + continue-on-error: true + with: + name: test-reports-${{ matrix.os }}-${{ matrix.java }} + path: | + integ-test/build/reports/** + integ-test/build/testclusters/*/logs/* + integ-test/build/testclusters/*/config/* diff --git a/.github/workflows/link-checker.yml b/.github/workflows/link-checker.yml index ef6d858d84..cbc6dbec17 100644 --- a/.github/workflows/link-checker.yml +++ b/.github/workflows/link-checker.yml @@ -18,7 +18,7 @@ jobs: id: lychee uses: lycheeverse/lychee-action@master with: - args: --accept=200,403,429,999 "./**/*.html" "./**/*.md" "./**/*.txt" --exclude "http://localhost*" "https://localhost" "https://odfe-node1:9200/" "https://community.tableau.com/docs/DOC-17978" ".*family.zzz" "https://pypi.python.org/pypi/opensearchsql/" "opensearch*" ".*@amazon.com" ".*email.com" "git@github.com" "http://timestamp.verisign.com/scripts/timstamp.dll" ".*/PowerBIConnector/bin/Release" + args: --accept=200,403,429,999 "./**/*.html" "./**/*.md" "./**/*.txt" --exclude "https://aws.oss.sonatype.*" "http://localhost*" "https://localhost" "https://odfe-node1:9200/" "https://community.tableau.com/docs/DOC-17978" ".*family.zzz" "https://pypi.python.org/pypi/opensearchsql/" "opensearch*" ".*@amazon.com" ".*email.com" "git@github.com" "http://timestamp.verisign.com/scripts/timstamp.dll" ".*/PowerBIConnector/bin/Release" env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} - name: Fail if there were link errors diff --git a/.github/workflows/maven-publish.yml b/.github/workflows/maven-publish.yml new file mode 100644 index 0000000000..8adf7ae52c --- /dev/null +++ b/.github/workflows/maven-publish.yml @@ -0,0 +1,38 @@ +name: Publish snapshots to maven + +on: + workflow_dispatch: + push: + branches: + - main + - 1.* + - 2.* + +jobs: + build-and-publish-snapshots: + strategy: + fail-fast: false + if: github.repository == 'opensearch-project/sql' + runs-on: ubuntu-latest + + permissions: + id-token: write + contents: write + + steps: + - uses: actions/setup-java@v3 + with: + distribution: temurin # Temurin is a distribution of adoptium + java-version: 11 + - uses: actions/checkout@v3 + - uses: aws-actions/configure-aws-credentials@v1.7.0 + with: + role-to-assume: ${{ secrets.PUBLISH_SNAPSHOTS_ROLE }} + aws-region: us-east-1 + - name: publish snapshots to maven + run: | + export SONATYPE_USERNAME=$(aws secretsmanager get-secret-value --secret-id maven-snapshots-username --query SecretString --output text) + export SONATYPE_PASSWORD=$(aws secretsmanager get-secret-value --secret-id maven-snapshots-password --query SecretString --output text) + echo "::add-mask::$SONATYPE_USERNAME" + echo "::add-mask::$SONATYPE_PASSWORD" + ./gradlew publishPluginZipPublicationToSnapshotsRepository diff --git a/.github/workflows/sql-cli-test-and-build-workflow.yml b/.github/workflows/sql-cli-test-and-build-workflow.yml deleted file mode 100644 index 9a8f5052a1..0000000000 --- a/.github/workflows/sql-cli-test-and-build-workflow.yml +++ /dev/null @@ -1,71 +0,0 @@ -name: SQL CLI Test and Build - -on: - pull_request: - push: - branches-ignore: - - 'dependabot/**' - paths: - - 'sql-cli/**' - - '.github/workflows/sql-cli-test-and-build-workflow.yml' - -jobs: - build: - runs-on: ubuntu-latest - defaults: - run: - working-directory: sql-cli - strategy: - matrix: - python-version: [3.8] - opensearch-version: [ latest ] - - steps: - - name: Checkout SQL CLI - uses: actions/checkout@v3 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - name: Install Python Dependencies - run: | - python -m pip install --upgrade pip - pip install -r requirements-dev.txt - pip install setuptools wheel - - # tests are designed to run against http://localhost:9200, so we have to disable/remove security plugin - - name: Download and run OpenSearch - run: | - docker run -p 9200:9200 -e "discovery.type=single-node" -e "DISABLE_SECURITY_PLUGIN=true" --name test -d opensearchproject/opensearch:${{ matrix.opensearch-version }} - - - name: Wait for cluster to start - uses: nick-fields/retry@v2 - with: - timeout_seconds: 1 - max_attempts: 30 - command: curl -q localhost:9200 - - - name: Run Tox Testing - run: tox - - - name: Build Artifact - run: python setup.py sdist bdist_wheel - - - name: Create Artifact Path - run: | - mkdir -p opensearchsql-builds - cp -r ./dist/*.tar.gz ./dist/*.whl opensearchsql-builds/ - - - name: Upload Artifact - uses: actions/upload-artifact@v3 - with: - name: opensearchsql - path: sql-cli/opensearchsql-builds - - - name: Clean up container - if: always() - run: | - docker container stop test - docker container rm test diff --git a/.github/workflows/sql-jdbc-test-and-build-workflow.yml b/.github/workflows/sql-jdbc-test-and-build-workflow.yml deleted file mode 100644 index 2d71c61413..0000000000 --- a/.github/workflows/sql-jdbc-test-and-build-workflow.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: SQL JDBC Java CI - -on: - pull_request: - push: - branches-ignore: - - 'dependabot/**' - paths: - - 'sql-jdbc/**' - - '.github/workflows/sql-jdbc-test-and-build-workflow.yml' - -jobs: - build: - strategy: - matrix: - java: - - 11 - - 17 - runs-on: ubuntu-latest - defaults: - run: - working-directory: sql-jdbc - - steps: - - uses: actions/checkout@v3 - - - name: Set up JDK ${{ matrix.java }} - uses: actions/setup-java@v3 - with: - distribution: 'temurin' - java-version: ${{ matrix.java }} - - - name: Build with Gradle - run: ./gradlew build test shadowJar - - - name: Create Artifact Path - run: | - mkdir -p sql-jdbc-builds - cp ./build/libs/*.jar sql-jdbc-builds - - - name: Upload Artifacts - uses: actions/upload-artifact@v2 - with: - name: sql-jdbc - path: sql-jdbc/sql-jdbc-builds diff --git a/.github/workflows/sql-odbc-main.yml b/.github/workflows/sql-odbc-main.yml deleted file mode 100644 index 5ebfcf4dc3..0000000000 --- a/.github/workflows/sql-odbc-main.yml +++ /dev/null @@ -1,179 +0,0 @@ -name: OpenSearch ODBC Driver - -on: - pull_request: - push: - branches-ignore: - - 'dependabot/**' - paths: - - 'sql-odbc/**' - - '.github/workflows/sql-odbc-main.yml' - -env: - CI_OUTPUT_PATH: "sql-odbc/ci-output" - ODBC_LIB_PATH: "./build/odbc/lib" - ODBC_BIN_PATH: "./build/odbc/bin" - ODBC_BUILD_PATH: "./build/odbc/cmake" - VCPKG_X64_INSTALL_PATH: ".\\src\\vcpkg_installed\\x64-windows" - VCPKG_X86_INSTALL_PATH: ".\\src\\vcpkg_installed\\x86-windows" - -# Tests are disabled (commented out) in all jobs because they are fail and/or outdated -# Keeping them for the brighten future when we can re-activate them -jobs: - build-mac: - runs-on: macos-12 - defaults: - run: - working-directory: sql-odbc - steps: - - uses: actions/checkout@v3 - - name: run-cppcheck - run: | - brew install cppcheck - sh run_cppcheck.sh - - name: upload-cppcheck-results - if: failure() - uses: actions/upload-artifact@v3 - with: - name: cppcheck-results - path: sql-odbc/cppcheck-results.log - - name: get-dependencies - run: | - brew unlink unixodbc - brew install curl - brew install cmake - brew install libiodbc - - name: configure-and-build-driver - run: | - ./build_mac_release64.sh - #- name: test - # run: | - # bash ./run_test_runner.sh - - name: build-installer - if: success() - run: | - cd cmake-build64 - cmake ../src - make - cpack . - cd .. - - name: create-output - if: success() - run: | - mkdir build-output - mkdir test-output - mkdir installer - cp ./build/odbc/lib/*.dylib build-output/ - cp ./build/odbc/lib/*.a build-output/ - cp ./cmake-build64/*.pkg installer/ - # cp $(ls -d ./build/odbc/bin/* | grep -v "\.") build-output - # cp ./bin64/*.html test-output - # cp ./bin64/*.log test-output - - name: upload-build - if: success() - uses: actions/upload-artifact@v3 - with: - name: mac64-build - path: sql-odbc/build-output - - name: upload-installer - if: success() - uses: actions/upload-artifact@v3 - with: - name: mac64-installer - path: sql-odbc/installer - #- name: upload-test-results - # if: always() - # uses: actions/upload-artifact@v3 - # with: - # name: mac-test-results - # path: test-output - build-windows32: - runs-on: windows-2019 - defaults: - run: - working-directory: sql-odbc - steps: - - uses: actions/checkout@v3 - - name: Get specific version CMake, v3.18.3 - uses: lukka/get-cmake@v3.18.3 - - name: add-msbuild-to-path - uses: microsoft/setup-msbuild@v1.0.2 - - name: configure-and-build-driver - run: | - .\build_win_release32.ps1 - - name: build-installer - if: success() - run: | - .\scripts\build_installer.ps1 Release Win32 .\src $Env:ODBC_BUILD_PATH $Env:VCPKG_X86_INSTALL_PATH - #- name: test - # run: | - # cp .\\libraries\\VisualLeakDetector\\bin32\\*.* .\\bin32\\Release - # cp .\\libraries\\VisualLeakDetector\\lib32\\*.lib .\\lib32\\Release - # .\run_test_runner.bat - - name: prepare-output - if: always() - run: | - .\scripts\prepare_ci_output.ps1 $Env:ODBC_BIN_PATH $Env:ODBC_LIB_PATH $Env:ODBC_BUILD_PATH - - name: upload-build - if: always() - uses: actions/upload-artifact@v3 - with: - name: windows32-build - path: sql-odbc/ci-output/build - - name: upload-installer - if: always() - uses: actions/upload-artifact@v3 - with: - name: windows32-installer - path: sql-odbc/ci-output/installer - #- name: upload-test-results - # if: always() - # uses: actions/upload-artifact@v3 - # with: - # name: windows-test-results - # path: $CI_OUTPUT_PATH/test - build-windows64: - runs-on: windows-2019 - defaults: - run: - working-directory: sql-odbc - steps: - - uses: actions/checkout@v3 - - name: Get specific version CMake, v3.18.3 - uses: lukka/get-cmake@v3.18.3 - - name: add-msbuild-to-path - uses: microsoft/setup-msbuild@v1.0.2 - - name: configure-and-build-driver - run: | - .\build_win_release64.ps1 - - name: build-installer - if: success() - run: | - .\scripts\build_installer.ps1 Release x64 .\src $Env:ODBC_BUILD_PATH $Env:VCPKG_X64_INSTALL_PATH - #- name: test - # run: | - # cp .\\libraries\\VisualLeakDetector\\bin64\\*.* .\\bin64\\Release - # cp .\\libraries\\VisualLeakDetector\\lib64\\*.lib .\\lib64\\Release - # .\run_test_runner.bat - - name: prepare-output - if: always() - run: | - .\scripts\prepare_ci_output.ps1 $Env:ODBC_BIN_PATH $Env:ODBC_LIB_PATH $Env:ODBC_BUILD_PATH - - name: upload-build - if: always() - uses: actions/upload-artifact@v3 - with: - name: windows64-build - path: sql-odbc/ci-output/build - - name: upload-installer - if: always() - uses: actions/upload-artifact@v3 - with: - name: windows64-installer - path: sql-odbc/ci-output/installer - #- name: upload-test-results - # if: always() - # uses: actions/upload-artifact@v3 - # with: - # name: windows-test-results - # path: sql-odbc/ci-output/test-output diff --git a/.github/workflows/sql-test-and-build-workflow.yml b/.github/workflows/sql-test-and-build-workflow.yml index 25e0387cf3..0b4b2caf5c 100644 --- a/.github/workflows/sql-test-and-build-workflow.yml +++ b/.github/workflows/sql-test-and-build-workflow.yml @@ -11,26 +11,37 @@ on: - '!sql-jdbc/**' - '**gradle*' - '**lombok*' - - '**checkstyle*' + - '**spotless*' - 'integ-test/**' - '**/*.jar' - '**/*.pom' - '.github/workflows/sql-test-and-build-workflow.yml' jobs: - build: + Get-CI-Image-Tag: + uses: opensearch-project/opensearch-build/.github/workflows/get-ci-image-tag.yml@main + with: + product: opensearch + + build-linux: + needs: Get-CI-Image-Tag strategy: # Run all jobs fail-fast: false matrix: - entry: - - { os: ubuntu-latest, java: 11 } - - { os: windows-latest, java: 11, os_build_args: -x doctest -x integTest -x jacocoTestReport -x compileJdbc -PbuildPlatform=windows } - - { os: macos-latest, java: 11, os_build_args: -x doctest -x integTest -x jacocoTestReport -x compileJdbc } - - { os: ubuntu-latest, java: 17 } - - { os: windows-latest, java: 17, os_build_args: -x doctest -x integTest -x jacocoTestReport -x compileJdbc -PbuildPlatform=windows } - - { os: macos-latest, java: 17, os_build_args: -x doctest -x integTest -x jacocoTestReport -x compileJdbc } - runs-on: ${{ matrix.entry.os }} + java: + - 11 + - 17 + - 21 + env: + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true + runs-on: ubuntu-latest + container: + # using the same image which is used by opensearch-build team to build the OpenSearch Distribution + # this image tag is subject to change as more dependencies and updates will arrive over time + image: ${{ needs.Get-CI-Image-Tag.outputs.ci-image-version-linux }} + # need to switch to root so that github actions can install runner binary on container without permission issues. + options: --user root steps: - uses: actions/checkout@v3 @@ -39,14 +50,17 @@ jobs: uses: actions/setup-java@v3 with: distribution: 'temurin' - java-version: ${{ matrix.entry.java }} + java-version: ${{ matrix.java }} - name: Build with Gradle - run: ./gradlew --continue build ${{ matrix.entry.os_build_args }} + run: | + chown -R 1000:1000 `pwd` + su `id -un 1000` -c "./gradlew --continue build" - name: Run backward compatibility tests - if: ${{ matrix.entry.os == 'ubuntu-latest' }} - run: ./scripts/bwctest.sh + run: | + chown -R 1000:1000 `pwd` + su `id -un 1000` -c "./scripts/bwctest.sh" - name: Create Artifact Path run: | @@ -55,7 +69,7 @@ jobs: # This step uses the codecov-action Github action: https://github.com/codecov/codecov-action - name: Upload SQL Coverage Report - if: ${{ always() && matrix.entry.os == 'ubuntu-latest' }} + if: always() uses: codecov/codecov-action@v3 with: flags: sql-engine @@ -64,12 +78,13 @@ jobs: - name: Upload Artifacts uses: actions/upload-artifact@v2 with: - name: opensearch-sql-${{ matrix.entry.os }} + name: opensearch-sql-ubuntu-latest path: opensearch-sql-builds - name: Upload test reports - if: ${{ always() && matrix.entry.os == 'ubuntu-latest' }} + if: always() uses: actions/upload-artifact@v2 + continue-on-error: true with: name: test-reports path: | @@ -82,3 +97,40 @@ jobs: protocol/build/reports/** legacy/build/reports/** plugin/build/reports/** + + build-windows-macos: + strategy: + # Run all jobs + fail-fast: false + matrix: + entry: + - { os: windows-latest, java: 11, os_build_args: -x doctest -PbuildPlatform=windows } + - { os: macos-13, java: 11} + - { os: windows-latest, java: 17, os_build_args: -x doctest -PbuildPlatform=windows } + - { os: macos-13, java: 17 } + - { os: windows-latest, java: 21, os_build_args: -x doctest -PbuildPlatform=windows } + - { os: macos-13, java: 21 } + runs-on: ${{ matrix.entry.os }} + + steps: + - uses: actions/checkout@v3 + + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v3 + with: + distribution: 'temurin' + java-version: ${{ matrix.entry.java }} + + - name: Build with Gradle + run: ./gradlew --continue build ${{ matrix.entry.os_build_args }} + + - name: Create Artifact Path + run: | + mkdir -p opensearch-sql-builds + cp -r ./plugin/build/distributions/*.zip opensearch-sql-builds/ + + - name: Upload Artifacts + uses: actions/upload-artifact@v2 + with: + name: opensearch-sql-${{ matrix.entry.os }} + path: opensearch-sql-builds diff --git a/.github/workflows/sql-test-workflow.yml b/.github/workflows/sql-test-workflow.yml deleted file mode 100644 index b5a0c4c852..0000000000 --- a/.github/workflows/sql-test-workflow.yml +++ /dev/null @@ -1,95 +0,0 @@ -name: SQL Plugin Tests - -on: - workflow_dispatch: - inputs: - name: - required: false - type: string - -run-name: - ${{ inputs.name == '' && format('{0} @ {1}', github.ref_name, github.sha) || inputs.name }} - -jobs: - build: - strategy: - matrix: - java: - - 11 - - 17 - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - - name: Set up JDK ${{ matrix.java }} - uses: actions/setup-java@v3 - with: - distribution: 'temurin' - java-version: ${{ matrix.java }} - - - name: Run tests - id: tests - run: | - # checkstyle - ./gradlew :opensearch:checkstyleMain || echo "* Checkstyle failed for opensearch/src" > report.log - ./gradlew :opensearch:checkstyleTest || echo "* Checkstyle failed for opensearch/test" >> report.log - ./gradlew :sql:checkstyleMain || echo "* Checkstyle failed for sql/src" >> report.log - ./gradlew :sql:checkstyleTest || echo "* Checkstyle failed for sql/test" >> report.log - ./gradlew :ppl:checkstyleMain || echo "* Checkstyle failed for ppl/src" >> report.log - ./gradlew :ppl:checkstyleTest || echo "* Checkstyle failed for ppl/test" >> report.log - ./gradlew :core:checkstyleMain || echo "* Checkstyle failed for core/src" >> report.log - ./gradlew :core:checkstyleTest || echo "* Checkstyle failed for core/test" >> report.log - ./gradlew :common:checkstyleMain || echo "* Checkstyle failed for common/src" >> report.log - ./gradlew :common:checkstyleTest || echo "* Checkstyle failed for common/test" >> report.log - ./gradlew :legacy:checkstyleMain || echo "* Checkstyle failed for legacy/src" >> report.log - ./gradlew :legacy:checkstyleTest || echo "* Checkstyle failed for legacy/test" >> report.log - ./gradlew :protocol:checkstyleMain || echo "* Checkstyle failed for protocol/src" >> report.log - ./gradlew :protocol:checkstyleTest || echo "* Checkstyle failed for protocol/test" >> report.log - ./gradlew :opensearch-sql-plugin:checkstyleMain || echo "* Checkstyle failed for plugin/src" >> report.log - ./gradlew :opensearch-sql-plugin:checkstyleTest || echo "* Checkstyle failed for plugin/test" >> report.log - # Add checkstyle for `integ-test` when fixed - # Unit tests - ./gradlew :opensearch:test || echo "* Unit tests failed for opensearch" >> report.log - ./gradlew :ppl:test || echo "* Unit tests failed for sql" >> report.log - ./gradlew :sql:test || echo "* Unit tests failed for ppl" >> report.log - ./gradlew :core:test || echo "* Unit tests failed for core" >> report.log - ./gradlew :protocol:test || echo "* Unit tests failed for protocol" >> report.log - ./gradlew :opensearch-sql-plugin:test || echo "* Unit tests failed for plugin" >> report.log - ./gradlew :legacy:test || echo "* Unit tests failed for legacy" >> report.log - # jacoco - ./gradlew :opensearch:jacocoTestCoverageVerification || echo "* Jacoco failed for opensearch" >> report.log - ./gradlew :ppl:jacocoTestCoverageVerification || echo "* Jacoco failed for sql" >> report.log - ./gradlew :sql:jacocoTestCoverageVerification || echo "* Jacoco failed for ppl" >> report.log - ./gradlew :core:jacocoTestCoverageVerification || echo "* Jacoco failed for core" >> report.log - ./gradlew :protocol:jacocoTestCoverageVerification || echo "* Jacoco failed for protocol" >> report.log - ./gradlew :opensearch-sql-plugin:jacocoTestCoverageVerification || echo "* Jacoco failed for plugin" >> report.log - # Misc tests - ./gradlew :integ-test:integTest || echo "* Integration test failed" >> report.log - ./gradlew :doctest:doctest || echo "* Doctest failed" >> report.log - ./scripts/bwctest.sh || echo "* Backward compatibility test failed" >> report.log - - - name: Verify test results - run: | - if [[ -e report.log ]] - then - echo "## FAILED TESTS :facepalm::warning::bangbang:" >> $GITHUB_STEP_SUMMARY - cat report.log >> $GITHUB_STEP_SUMMARY - exit 1 - fi - - - name: Upload test reports - if: always() - uses: actions/upload-artifact@v2 - with: - name: test-reports - path: | - sql/build/reports/** - ppl/build/reports/** - core/build/reports/** - common/build/reports/** - opensearch/build/reports/** - integ-test/build/reports/** - protocol/build/reports/** - legacy/build/reports/** - plugin/build/reports/** diff --git a/.github/workflows/sql-workbench-test-and-build-workflow.yml b/.github/workflows/sql-workbench-test-and-build-workflow.yml deleted file mode 100644 index c837f9669b..0000000000 --- a/.github/workflows/sql-workbench-test-and-build-workflow.yml +++ /dev/null @@ -1,80 +0,0 @@ -name: SQL Workbench Test and Build - -on: - pull_request: - push: - branches-ignore: - - 'dependabot/**' - paths: - - 'workbench/**' - - '.github/workflows/sql-workbench-test-and-build-workflow.yml' - -env: - PLUGIN_NAME: query-workbench-dashboards - OPENSEARCH_VERSION: 'main' - OPENSEARCH_PLUGIN_VERSION: 2.5.0.0 - -jobs: - build: - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - runs-on: ${{ matrix.os }} - steps: - - name: Enable longer filenames - if: ${{ matrix.os == 'windows-latest' }} - run: git config --system core.longpaths true - - - name: Checkout Plugin - uses: actions/checkout@v3 - - - name: Checkout OpenSearch Dashboards - uses: actions/checkout@v1 # can't update to v3 because `setup-node` fails - with: - repository: opensearch-project/Opensearch-Dashboards - ref: ${{ env.OPENSEARCH_VERSION }} - path: OpenSearch-Dashboards - - - name: Setup Node - uses: actions/setup-node@v3 - with: - node-version-file: "../OpenSearch-Dashboards/.nvmrc" - registry-url: 'https://registry.npmjs.org' - - - name: Move Workbench to Plugins Dir - run: | - mv workbench ../OpenSearch-Dashboards/plugins - - - name: OpenSearch Dashboards Plugin Bootstrap - uses: nick-fields/retry@v2 - with: - timeout_minutes: 60 - max_attempts: 3 - command: cd ../OpenSearch-Dashboards/plugins/workbench; yarn osd bootstrap - - - name: Test - run: | - cd ../OpenSearch-Dashboards/plugins/workbench - yarn test:jest --coverage - - - name: Upload coverage - if: ${{ always() && matrix.os == 'ubuntu-latest' }} - uses: codecov/codecov-action@v3 - with: - flags: query-workbench - directory: ../OpenSearch-Dashboards/plugins/workbench - token: ${{ secrets.CODECOV_TOKEN }} - - - name: Build Artifact - run: | - cd ../OpenSearch-Dashboards/plugins/workbench - yarn build - mv ./build/*.zip ./build/${{ env.PLUGIN_NAME }}-${{ env.OPENSEARCH_PLUGIN_VERSION }}.zip - - - name: Upload Artifact - if: always() - uses: actions/upload-artifact@v1 # can't update to v3 because upload fails - with: - name: workbench-${{ matrix.os }} - path: ../OpenSearch-Dashboards/plugins/workbench/build - \ No newline at end of file diff --git a/.gitignore b/.gitignore index 67e5bb07e9..b9775dea04 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ *.class +*.http .settings/ # Mobile Tools for Java (J2ME) .mtj.tmp/ @@ -33,7 +34,6 @@ gen/ # git mergetool artifact *.orig gen -*.tokens # Python */.venv @@ -46,3 +46,8 @@ gen /.prom.pid.lock .java-version +.worktrees +http-client.env.json +/doctest/sql-cli/ +/doctest/opensearch-job-scheduler/ +.factorypath diff --git a/DEVELOPER_GUIDE.rst b/DEVELOPER_GUIDE.rst index bf9e603930..c4fd6cc2d6 100644 --- a/DEVELOPER_GUIDE.rst +++ b/DEVELOPER_GUIDE.rst @@ -113,8 +113,8 @@ Note that missing license header will be detected by Gradle license plugin and f Making Code Changes =================== -Project Strucure ----------------- +Project Structure +----------------- The plugin codebase is in standard layout of Gradle project:: @@ -127,7 +127,7 @@ The plugin codebase is in standard layout of Gradle project:: ├── THIRD-PARTY ├── build.gradle ├── config - │ └── checkstyle + │ └── spotless ├── docs │   ├── attributions.md │   ├── category.json @@ -141,13 +141,13 @@ The plugin codebase is in standard layout of Gradle project:: ├── core ├── doctest ├── opensearch - ├── filesystem ├── prometheus ├── integ-test ├── legacy ├── plugin ├── protocol ├── ppl + ├── spark ├── sql ├── sql-cli ├── sql-jdbc @@ -162,7 +162,7 @@ Here are sub-folders (Gradle modules) for plugin source code: - ``core``: core query engine. - ``opensearch``: OpenSearch storage engine. - ``prometheus``: Prometheus storage engine. -- ``filesystem``: File System storage engine (in development). +- ``spark`` : Spark storage engine - ``protocol``: request/response protocol formatter. - ``common``: common util code. - ``integ-test``: integration and comparison test. @@ -170,7 +170,7 @@ Here are sub-folders (Gradle modules) for plugin source code: Here are other files and sub-folders that you are likely to touch: - ``build.gradle``: Gradle build script. -- ``config``: only Checkstyle configuration files for now. +- ``config``: only Spotless configuration files for now. - ``docs``: documentation for developers and reference manual for users. - ``doc-test``: code that run .rst docs in ``docs`` folder by Python doctest library. @@ -185,14 +185,31 @@ Note that other related project code has already merged into this single reposit Code Convention --------------- -We’re integrated Checkstyle plugin into Gradle build: https://github.com/opensearch-project/sql/blob/main/config/checkstyle/google_checks.xml. So any violation will fail the build. You need to identify the offending code from Gradle error message and fix them and rerun the Gradle build. Here are the highlight of some Checkstyle rules: +Java files in the OpenSearch codebase are formatted with the Eclipse JDT formatter, using the `Spotless Gradle `_ plugin. This plugin is configured in the project `./gradle.properties`. + +The formatting check can be run explicitly with:: + +./gradlew spotlessJavaCheck + +The code can be formatted with:: + +./gradlew spotlessApply + +These tasks can also be run for specific modules, e.g.:: + +./gradlew server:spotlessJavaCheck -* 2 spaces indentation. -* No line starts with tab character in source file. -* Line width <= 100 characters. -* Wildcard imports: You can enforce single import by configuring your IDE. Instructions for Intellij IDEA: https://www.jetbrains.com/help/idea/creating-and-optimizing-imports.html#disable-wildcard-imports. -* Operator needs to wrap at next line. +For more information on the spotless for the OpenSearch project please see `https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#java-language-formatting-guidelines `_. +Java files are formatted using `Spotless `_ conforming to `Google Java Format `_. + * - New line at end of file + * - No unused import statements + * - Fix import order to be alphabetical with static imports first (one block for static and one for non-static imports) + * - Max line length is 100 characters (does not apply to import statements) + * - Line spacing is 2 spaces + * - Javadocs should be properly formatted in accordance to `Javadoc guidelines `_ + * - Javadoc format can be maintained by wrapping javadoc with `
` HTML tags
+   * - Strings can be formatted on multiple lines with a `+` with the correct indentation for the string.
 
 Building and Running Tests
 ==========================
@@ -213,15 +230,19 @@ Most of the time you just need to run ./gradlew build which will make sure you p
    * - ./gradlew generateGrammarSource
      - (Re-)Generate ANTLR parser from grammar file.
    * - ./gradlew compileJava
-     - Compile all Java source files. 
-   * - ./gradlew checkstyle
-     - Run all checks according to Checkstyle configuration.
+     - Compile all Java source files.
    * - ./gradlew test
      - Run all unit tests.
    * - ./gradlew :integ-test:integTest
      - Run all integration test (this takes time).
    * - ./gradlew build
      - Build plugin by run all tasks above (this takes time).
+   * - ./gradlew pitest
+     - Run PiTest mutation testing (see more info in `#1204 `_)
+   * - ./gradlew spotlessCheck
+     - Runs Spotless to check for code style.
+   * - ./gradlew spotlessApply
+     - Automatically apply spotless code style changes.
 
 For integration test, you can use ``-Dtests.class`` “UT full path” to run a task individually. For example ``./gradlew :integ-test:integTest -Dtests.class="*QueryIT"``.
 
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index ba4ce45209..0ee07757c6 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -1,14 +1,39 @@
-# OpenSearch SQL Maintainers
-
-## Maintainers
-
-| Maintainer | GitHub ID | Affiliation |
-| --------------- | --------- | ----------- |
-| Anirudha (Ani) Jadhav | [anirudha](https://github.com/anirudha) | Amazon |
-| Peng Huo | [penghuo](https://github.com/penghuo) | Amazon |
-| Chen Dai | [dai-chen](https://github.com/dai-chen) | Amazon |
-| Chloe Zhang | [chloe-zh](https://github.com/chloe-zh) | Amazon |
-| Nick Knize | [nknize](https://github.com/nknize) | Amazon |
-| Charlotte Henkle | [CEHENKLE](https://github.com/CEHENKLE) | Amazon |
-| Max Ksyunz |  [MaxKsyunz](https://github.com/MaxKsyunz) | BitQuill |
-| Yury Fridlyand | [Yury-Fridlyand](https://github.com/Yury-Fridlyand) | BitQuill |
\ No newline at end of file
+## Overview
+
+This document contains a list of maintainers in this repo. See [opensearch-project/.github/RESPONSIBILITIES.md](https://github.com/opensearch-project/.github/blob/main/RESPONSIBILITIES.md#maintainer-responsibilities) that explains what the role of maintainer means, what maintainers do in this and other repos, and how they should be doing it. If you're interested in contributing, and becoming a maintainer, see [CONTRIBUTING](CONTRIBUTING.md).
+
+## Current Maintainers
+
+| Maintainer        | GitHub ID                                           | Affiliation |
+| ----------------- | -------------------------------------------------   | ----------- |
+| Eric Wei          | [mengweieric](https://github.com/mengweieric)       | Amazon      |
+| Joshua Li         | [joshuali925](https://github.com/joshuali925)       | Amazon      |
+| Shenoy Pratik     | [ps48](https://github.com/ps48)                     | Amazon      |
+| Kavitha Mohan     | [kavithacm](https://github.com/kavithacm)           | Amazon      |
+| Rupal Mahajan     | [rupal-bq](https://github.com/rupal-bq)             | Amazon      |
+| Derek Ho          | [derek-ho](https://github.com/derek-ho)             | Amazon      |
+| Lior Perry        | [YANG-DB](https://github.com/YANG-DB)               | Amazon      |
+| Peter Fitzgibbons | [pjfitzgibbons](https://github.com/pjfitzgibbons)   | Amazon      |
+| Simeon Widdis     | [swiddis](https://github.com/swiddis)               | Amazon      |
+| Chen Dai          | [dai-chen](https://github.com/dai-chen)             | Amazon      |
+| Vamsi Manohar     | [vamsi-amazon](https://github.com/vamsi-amazon)     | Amazon      |
+| Peng Huo          | [penghuo](https://github.com/penghuo)               | Amazon      |
+| Sean Kao          | [seankao-az](https://github.com/seankao-az)         | Amazon      |
+| Anirudha Jadhav   | [anirudha](https://github.com/anirudha)             | Amazon      |
+| Tomoyuki Morita   | [ykmr1224](https://github.com/ykmr1224)             | Amazon      |
+| Max Ksyunz        | [MaxKsyunz](https://github.com/MaxKsyunz)           | Improving   |
+| Yury Fridlyand    | [Yury-Fridlyand](https://github.com/Yury-Fridlyand) | Improving   |
+| Andrew Carbonetto | [acarbonetto](https://github.com/acarbonetto)       | Improving   |
+| Forest Vey        | [forestmvey](https://github.com/forestmvey)         | Improving   |
+| Guian Gumpac      | [GumpacG](https://github.com/GumpacG)               | Improving   |
+
+## Emeritus Maintainers
+
+| Maintainer        | GitHub ID                                               | Affiliation |
+| ----------------- | ------------------------------------------------------- | ----------- |
+| Charlotte Henkle  | [CEHENKLE](https://github.com/CEHENKLE)                 | Amazon      |
+| Nick Knize        | [nknize](https://github.com/nknize)                     | Amazon      |
+| David Cui         | [davidcui1225](https://github.com/davidcui1225)         | Amazon      |
+| Eugene Lee        | [eugenesk24](https://github.com/eugenesk24)             | Amazon      |
+| Zhongnan Su       | [zhongnansu](https://github.com/zhongnansu)             | Amazon      |
+| Chloe Zhang       | [chloe-zh](https://github.com/chloe-zh)                 | Amazon      |
diff --git a/README.md b/README.md
index 0c220838b5..95ad52a147 100644
--- a/README.md
+++ b/README.md
@@ -1,27 +1,30 @@
 
 
 - [OpenSearch SQL](#opensearch-sql)
-- [Code Summary](#code-summary)
-- [Highlights](#highlights)
-- [Documentation](#documentation)
-- [OpenSearch Forum](#forum)
-- [Contributing](#contributing)
-- [Attribution](#attribution)
-- [Code of Conduct](#code-of-conduct)
-- [Security](#security)
-- [License](#license)
-- [Copyright](#copyright)
+  - [Code Summary](#code-summary)
+    - [SQL Engine](#sql-engine)
+    - [Repository Checks](#repository-checks)
+    - [Issues](#issues)
+  - [Highlights](#highlights)
+  - [Documentation](#documentation)
+  - [Forum](#forum)
+  - [Contributing](#contributing)
+  - [Attribution](#attribution)
+  - [Code of Conduct](#code-of-conduct)
+  - [Security](#security)
+  - [License](#license)
+  - [Copyright](#copyright)
 
 # OpenSearch SQL
 
 OpenSearch enables you to extract insights out of OpenSearch using the familiar SQL or Piped Processing Language (PPL) query syntax. Use aggregations, group by, and where clauses to investigate your data. Read your data as JSON documents or CSV tables so you have the flexibility to use the format that works best for you.
 
-The following projects have been merged into this repository as separate folders as of July 9, 2020. Please refer to links below for details. This document will focus on the SQL plugin for OpenSearch.
+The following projects are related to the SQL plugin, but stored in the different repos. Please refer to links below for details. This document will focus on the SQL plugin for OpenSearch.
 
-- [SQL CLI](https://github.com/opensearch-project/sql/tree/main/sql-cli)
-- [SQL JDBC](https://github.com/opensearch-project/sql/tree/main/sql-jdbc)
-- [SQL ODBC](https://github.com/opensearch-project/sql/tree/main/sql-odbc)
-- [Query Workbench](https://github.com/opensearch-project/sql/tree/main/workbench)
+- [SQL CLI](https://github.com/opensearch-project/sql-cli)
+- [SQL JDBC](https://github.com/opensearch-project/sql-jdbc)
+- [SQL ODBC](https://github.com/opensearch-project/sql-odbc)
+- [Query Workbench](https://github.com/opensearch-project/dashboards-query-workbench)
 
 ## Code Summary
 
@@ -34,32 +37,6 @@ The following projects have been merged into this repository as separate folders
 | Distribution build tests     | [![OpenSearch IT tests][opensearch-it-badge]][opensearch-it-link] [![OpenSearch IT code][opensearch-it-code-badge]][opensearch-it-code-link] |
 | Backward compatibility tests | [![BWC tests][bwc-tests-badge]][bwc-tests-link]                                                                                              |
 
-### ODBC Driver
-
-|       |                                                 |
-| ----- | ----------------------------------------------- |
-| Build | [![ODBC CI][odbc-build-badge]][odbc-build-link] |
-
-### SQL CLI
-
-|       |                                                 |
-| ----- | ----------------------------------------------- |
-| Test and build | [![SQL CLI CI][sql-cli-build-badge]][sql-cli-build-link] |
-
-### JDBC Driver
-
-|       |                                                 |
-| ----- | ----------------------------------------------- |
-| Build | [![JDBC CI][jdbc-build-badge]][jdbc-build-link] |
-
-### Query WorkBench
-
-|                          |                                                                                                                    |
-| ------------------------ | ------------------------------------------------------------------------------------------------------------------ |
-| Test and build           | [![Observability Dashboards CI][workbench-build-badge]][workbench-build-link]                                      |
-| Code coverage            | [![codecov][workbench-codecov-badge]][sql-codecov-link]                                                                |
-| Distribution build tests | [![cypress tests][cypress-test-badge]][cypress-test-link] [![cypress code][cypress-code-badge]][cypress-code-link] |
-
 ### Repository Checks
 
 |              |                                                                 |
@@ -82,14 +59,6 @@ The following projects have been merged into this repository as separate folders
 [dco-badge-link]: https://github.com/opensearch-project/sql/actions/workflows/dco.yml
 [link-check-badge]: https://github.com/opensearch-project/sql/actions/workflows/link-checker.yml/badge.svg
 [link-check-link]: https://github.com/opensearch-project/sql/actions/workflows/link-checker.yml
-[odbc-build-badge]: https://github.com/opensearch-project/sql/actions/workflows/sql-odbc-main.yml/badge.svg
-[odbc-build-link]: https://github.com/opensearch-project/sql/actions/workflows/sql-odbc-main.yml
-[sql-cli-build-badge]: https://github.com/opensearch-project/sql/actions/workflows/sql-cli-test-and-build-workflow.yml/badge.svg
-[sql-cli-build-link]: https://github.com/opensearch-project/sql/actions/workflows/sql-cli-test-and-build-workflow.yml
-[jdbc-build-badge]: https://github.com/opensearch-project/sql/actions/workflows/sql-jdbc-test-and-build-workflow.yml/badge.svg
-[jdbc-build-link]: https://github.com/opensearch-project/sql/actions/workflows/sql-jdbc-test-and-build-workflow.yml
-[sql-ci-badge]: https://github.com/opensearch-project/sql/actions/workflows/sql-test-and-build-workflow.yml/badge.svg
-[sql-ci-link]: https://github.com/opensearch-project/sql/actions/workflows/sql-test-and-build-workflow.yml
 [bwc-tests-badge]: https://img.shields.io/badge/BWC%20tests-in%20progress-yellow
 [bwc-tests-link]: https://github.com/opensearch-project/sql/issues/193
 [good-first-badge]: https://img.shields.io/github/issues/opensearch-project/sql/good%20first%20issue.svg
@@ -104,14 +73,7 @@ The following projects have been merged into this repository as separate folders
 [untriaged-link]: https://github.com/opensearch-project/sql/issues?q=is%3Aopen+is%3Aissue+label%3Auntriaged+
 [nolabel-badge]: https://img.shields.io/github/issues-search/opensearch-project/sql?color=yellow&label=no%20label%20issues&query=is%3Aopen%20is%3Aissue%20no%3Alabel
 [nolabel-link]: https://github.com/opensearch-project/sql/issues?q=is%3Aopen+is%3Aissue+no%3Alabel+
-[workbench-build-badge]: https://github.com/opensearch-project/sql/actions/workflows/sql-workbench-test-and-build-workflow.yml/badge.svg
-[workbench-build-link]: https://github.com/opensearch-project/sql/actions/workflows/sql-workbench-test-and-build-workflow.yml
-[cypress-test-badge]: https://img.shields.io/badge/Cypress%20tests-in%20progress-yellow
-[cypress-test-link]: https://github.com/opensearch-project/opensearch-build/issues/1124
-[cypress-code-badge]: https://img.shields.io/badge/Cypress%20code-blue
-[cypress-code-link]: https://github.com/opensearch-project/sql/tree/main/workbench/.cypress/integration
 [sql-codecov-badge]: https://codecov.io/gh/opensearch-project/sql/branch/main/graphs/badge.svg?flag=sql-engine
-[workbench-codecov-badge]: https://codecov.io/gh/opensearch-project/sql/branch/main/graphs/badge.svg?flag=query-workbench
 [sql-codecov-link]: https://codecov.io/gh/opensearch-project/sql
 [opensearch-it-badge]: https://img.shields.io/badge/SQL%20IT%20tests-in%20progress-yellow
 [opensearch-it-link]: https://github.com/opensearch-project/opensearch-build/issues/1124
diff --git a/async-query-core/.gitignore b/async-query-core/.gitignore
new file mode 100644
index 0000000000..689cc5c548
--- /dev/null
+++ b/async-query-core/.gitignore
@@ -0,0 +1,42 @@
+.gradle
+build/
+!gradle/wrapper/gradle-wrapper.jar
+!src/main/**/build/
+!src/test/**/build/
+
+### IntelliJ IDEA ###
+.idea/modules.xml
+.idea/jarRepositories.xml
+.idea/compiler.xml
+.idea/libraries/
+*.iws
+*.iml
+*.ipr
+out/
+!src/main/**/out/
+!src/test/**/out/
+
+### Eclipse ###
+.apt_generated
+.classpath
+.factorypath
+.project
+.settings
+.springBeans
+.sts4-cache
+bin/
+!src/main/**/bin/
+!src/test/**/bin/
+
+### NetBeans ###
+/nbproject/private/
+/nbbuild/
+/dist/
+/nbdist/
+/.nb-gradle/
+
+### VS Code ###
+.vscode/
+
+### Mac OS ###
+.DS_Store
\ No newline at end of file
diff --git a/async-query-core/README.md b/async-query-core/README.md
new file mode 100644
index 0000000000..815088bce6
--- /dev/null
+++ b/async-query-core/README.md
@@ -0,0 +1,34 @@
+# async-query-core library
+
+This directory contains async-query-core library, which implements the core logic of async-query and provide extension points to allow plugin different implementation of data storage, etc.
+`async-query` module provides implementations for OpenSearch index based implementation.
+
+## Type of queries
+There are following types of queries, and the type is automatically identified by analysing the query. 
+- BatchQuery: Execute single query in Spark
+- InteractiveQuery: Establish session and execute queries in single Spark session
+- IndexDMLQuery: Handles DROP/ALTER/VACUUM operation for Flint indices
+- RefreshQuery: One time query request to refresh(update) Flint index
+- StreamingQuery: Continuously update flint index in single Spark session
+
+## Extension points
+Following is the list of extension points where the consumer of the library needs to provide their own implementation.
+
+- Data store interface
+  - [AsyncQueryJobMetadataStorageService](src/main/java/org/opensearch/sql/spark/asyncquery/AsyncQueryJobMetadataStorageService.java)
+  - [SessionStorageService](java/org/opensearch/sql/spark/execution/statestore/SessionStorageService.java)
+  - [StatementStorageService](src/main/java/org/opensearch/sql/spark/execution/statestore/StatementStorageService.java)
+  - [FlintIndexMetadataService](src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataService.java)
+  - [FlintIndexStateModelService](src/main/java/org/opensearch/sql/spark/flint/FlintIndexStateModelService.java)
+  - [IndexDMLResultStorageService](src/main/java/org/opensearch/sql/spark/flint/IndexDMLResultStorageService.java)
+- Other
+  - [LeaseManager](src/main/java/org/opensearch/sql/spark/leasemanager/LeaseManager.java)
+  - [JobExecutionResponseReader](src/main/java/org/opensearch/sql/spark/response/JobExecutionResponseReader.java)
+  - [QueryIdProvider](src/main/java/org/opensearch/sql/spark/dispatcher/QueryIdProvider.java)
+  - [SessionIdProvider](src/main/java/org/opensearch/sql/spark/execution/session/SessionIdProvider.java)
+  - [SessionConfigSupplier](src/main/java/org/opensearch/sql/spark/execution/session/SessionConfigSupplier.java)
+  - [EMRServerlessClientFactory](src/main/java/org/opensearch/sql/spark/client/EMRServerlessClientFactory.java)
+  - [SparkExecutionEngineConfigSupplier](src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigSupplier.java)
+  - [DataSourceSparkParameterComposer](src/main/java/org/opensearch/sql/spark/parameter/DataSourceSparkParameterComposer.java)
+  - [GeneralSparkParameterComposer](src/main/java/org/opensearch/sql/spark/parameter/GeneralSparkParameterComposer.java)
+  - [SparkSubmitParameterModifier](src/main/java/org/opensearch/sql/spark/config/SparkSubmitParameterModifier.java) To be deprecated in favor of GeneralSparkParameterComposer
diff --git a/async-query-core/build.gradle b/async-query-core/build.gradle
new file mode 100644
index 0000000000..1de6cb3105
--- /dev/null
+++ b/async-query-core/build.gradle
@@ -0,0 +1,152 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+plugins {
+    id 'java-library'
+    id "io.freefair.lombok"
+    id 'jacoco'
+    id 'antlr'
+    id 'com.diffplug.spotless' version '6.22.0'
+    id 'com.github.johnrengelman.shadow'
+}
+
+repositories {
+    mavenCentral()
+}
+
+tasks.register('downloadG4Files', Exec) {
+    description = 'Download remote .g4 files from GitHub'
+
+    executable 'curl'
+
+    args '-o', 'src/main/antlr/FlintSparkSqlExtensions.g4', 'https://raw.githubusercontent.com/opensearch-project/opensearch-spark/main/flint-spark-integration/src/main/antlr4/FlintSparkSqlExtensions.g4'
+    args '-o', 'src/main/antlr/SparkSqlBase.g4', 'https://raw.githubusercontent.com/opensearch-project/opensearch-spark/main/flint-spark-integration/src/main/antlr4/SparkSqlBase.g4'
+    args '-o', 'src/main/antlr/SqlBaseParser.g4', 'https://raw.githubusercontent.com/apache/spark/master/sql/api/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseParser.g4'
+    args '-o', 'src/main/antlr/SqlBaseLexer.g4', 'https://raw.githubusercontent.com/apache/spark/master/sql/api/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseLexer.g4'
+}
+
+generateGrammarSource {
+    arguments += ['-visitor', '-package', 'org.opensearch.sql.spark.antlr.parser']
+    source = sourceSets.main.antlr
+    outputDirectory = file("build/generated-src/antlr/main/org/opensearch/sql/asyncquery/antlr/parser")
+}
+configurations {
+    compile {
+        extendsFrom = extendsFrom.findAll { it != configurations.antlr }
+    }
+}
+
+// skip download in case of offline build
+if (!gradle.startParameter.offline) {
+    // Make sure the downloadG4File task runs before the generateGrammarSource task
+    generateGrammarSource.dependsOn downloadG4Files
+}
+
+dependencies {
+    antlr "org.antlr:antlr4:4.7.1"
+
+    implementation project(':core')
+    implementation project(':spark') // TODO: dependency to spark should be eliminated
+    implementation project(':datasources') // TODO: dependency to datasources should be eliminated
+    implementation 'org.json:json:20231013'
+    implementation 'com.google.code.gson:gson:2.8.9'
+
+    testImplementation(platform("org.junit:junit-bom:5.9.3"))
+
+    testCompileOnly('org.junit.jupiter:junit-jupiter')
+    testImplementation 'org.mockito:mockito-core:5.7.0'
+    testImplementation 'org.mockito:mockito-junit-jupiter:5.7.0'
+
+    testCompileOnly('junit:junit:4.13.1') {
+        exclude group: 'org.hamcrest', module: 'hamcrest-core'
+    }
+    testRuntimeOnly("org.junit.vintage:junit-vintage-engine") {
+        exclude group: 'org.hamcrest', module: 'hamcrest-core'
+    }
+    testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine") {
+        exclude group: 'org.hamcrest', module: 'hamcrest-core'
+    }
+    testRuntimeOnly("org.junit.platform:junit-platform-launcher") {
+        because 'allows tests to run from IDEs that bundle older version of launcher'
+    }
+}
+
+spotless {
+    java {
+        target fileTree('.') {
+            include '**/*.java'
+            exclude '**/build/**', '**/build-*/**'
+        }
+        importOrder()
+        removeUnusedImports()
+        trimTrailingWhitespace()
+        endWithNewline()
+        googleJavaFormat('1.17.0').reflowLongStrings().groupArtifact('com.google.googlejavaformat:google-java-format')
+    }
+}
+
+test {
+    useJUnitPlatform()
+    testLogging {
+        events "skipped", "failed"
+        exceptionFormat "full"
+    }
+}
+
+jacocoTestReport {
+    reports {
+        html.required = true
+        xml.required = true
+    }
+    afterEvaluate {
+        classDirectories.setFrom(files(classDirectories.files.collect {
+            fileTree(dir: it, exclude: ['**/antlr/parser/**'])
+        }))
+    }
+}
+test.finalizedBy(project.tasks.jacocoTestReport)
+jacocoTestCoverageVerification {
+    violationRules {
+        rule {
+            element = 'CLASS'
+            // TODO: Add unit tests in async-query-core and remove exclusions
+            excludes = [
+                    'org.opensearch.sql.spark.asyncquery.model.*',
+                    'org.opensearch.sql.spark.data.constants.*',
+                    'org.opensearch.sql.spark.dispatcher.model.*',
+                    'org.opensearch.sql.spark.dispatcher.*',
+                    'org.opensearch.sql.spark.execution.session.*',
+                    'org.opensearch.sql.spark.execution.statement.*',
+                    'org.opensearch.sql.spark.flint.*',
+                    'org.opensearch.sql.spark.flint.operation.*',
+                    'org.opensearch.sql.spark.rest.*',
+                    'org.opensearch.sql.spark.utils.SQLQueryUtils.*'
+            ]
+            limit {
+                counter = 'LINE'
+                minimum = 1.0
+            }
+            limit {
+                counter = 'BRANCH'
+                minimum = 1.0
+            }
+        }
+    }
+    afterEvaluate {
+        classDirectories.setFrom(files(classDirectories.files.collect {
+            fileTree(dir: it, exclude: ['**/antlr/parser/**'])
+        }))
+    }
+}
+check.dependsOn jacocoTestCoverageVerification
+
+shadowJar {
+    archiveBaseName.set('async-query-core')
+    archiveVersion.set('1.0.0')  // Set the desired version
+    archiveClassifier.set('all')
+
+    from sourceSets.main.output
+    configurations = [project.configurations.runtimeClasspath]
+}
\ No newline at end of file
diff --git a/core/src/test/java/org/opensearch/sql/expression/operator/arthmetic/UnaryFunctionTest.java b/async-query-core/src/main/antlr/.gitkeep
similarity index 100%
rename from core/src/test/java/org/opensearch/sql/expression/operator/arthmetic/UnaryFunctionTest.java
rename to async-query-core/src/main/antlr/.gitkeep
diff --git a/async-query-core/src/main/antlr/FlintSparkSqlExtensions.g4 b/async-query-core/src/main/antlr/FlintSparkSqlExtensions.g4
new file mode 100644
index 0000000000..46e814e9f5
--- /dev/null
+++ b/async-query-core/src/main/antlr/FlintSparkSqlExtensions.g4
@@ -0,0 +1,208 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+grammar FlintSparkSqlExtensions;
+
+import SparkSqlBase;
+
+
+// Flint SQL Syntax Extension
+
+singleStatement
+    : statement SEMICOLON* EOF
+    ;
+
+statement
+    : skippingIndexStatement
+    | coveringIndexStatement
+    | materializedViewStatement
+    | indexManagementStatement
+    | indexJobManagementStatement
+    ;
+
+skippingIndexStatement
+    : createSkippingIndexStatement
+    | refreshSkippingIndexStatement
+    | describeSkippingIndexStatement
+    | alterSkippingIndexStatement
+    | dropSkippingIndexStatement
+    | vacuumSkippingIndexStatement
+    | analyzeSkippingIndexStatement
+    ;
+
+createSkippingIndexStatement
+    : CREATE SKIPPING INDEX (IF NOT EXISTS)?
+        ON tableName
+        LEFT_PAREN indexColTypeList RIGHT_PAREN
+        whereClause?
+        (WITH LEFT_PAREN propertyList RIGHT_PAREN)?
+    ;
+
+refreshSkippingIndexStatement
+    : REFRESH SKIPPING INDEX ON tableName
+    ;
+
+describeSkippingIndexStatement
+    : (DESC | DESCRIBE) SKIPPING INDEX ON tableName
+    ;
+
+alterSkippingIndexStatement
+    : ALTER SKIPPING INDEX
+        ON tableName
+        WITH LEFT_PAREN propertyList RIGHT_PAREN
+    ;
+
+dropSkippingIndexStatement
+    : DROP SKIPPING INDEX ON tableName
+    ;
+
+vacuumSkippingIndexStatement
+    : VACUUM SKIPPING INDEX ON tableName
+    ;
+
+coveringIndexStatement
+    : createCoveringIndexStatement
+    | refreshCoveringIndexStatement
+    | showCoveringIndexStatement
+    | describeCoveringIndexStatement
+    | alterCoveringIndexStatement
+    | dropCoveringIndexStatement
+    | vacuumCoveringIndexStatement
+    ;
+
+createCoveringIndexStatement
+    : CREATE INDEX (IF NOT EXISTS)? indexName
+        ON tableName
+        LEFT_PAREN indexColumns=multipartIdentifierPropertyList RIGHT_PAREN
+        whereClause?
+        (WITH LEFT_PAREN propertyList RIGHT_PAREN)?
+    ;
+
+refreshCoveringIndexStatement
+    : REFRESH INDEX indexName ON tableName
+    ;
+
+showCoveringIndexStatement
+    : SHOW (INDEX | INDEXES) ON tableName
+    ;
+
+describeCoveringIndexStatement
+    : (DESC | DESCRIBE) INDEX indexName ON tableName
+    ;
+
+alterCoveringIndexStatement
+    : ALTER INDEX indexName
+        ON tableName
+        WITH LEFT_PAREN propertyList RIGHT_PAREN
+    ;
+
+dropCoveringIndexStatement
+    : DROP INDEX indexName ON tableName
+    ;
+
+vacuumCoveringIndexStatement
+    : VACUUM INDEX indexName ON tableName
+    ;
+
+analyzeSkippingIndexStatement
+    : ANALYZE SKIPPING INDEX ON tableName
+    ;
+
+materializedViewStatement
+    : createMaterializedViewStatement
+    | refreshMaterializedViewStatement
+    | showMaterializedViewStatement
+    | describeMaterializedViewStatement
+    | alterMaterializedViewStatement
+    | dropMaterializedViewStatement
+    | vacuumMaterializedViewStatement
+    ;
+
+createMaterializedViewStatement
+    : CREATE MATERIALIZED VIEW (IF NOT EXISTS)? mvName=multipartIdentifier
+        AS query=materializedViewQuery
+        (WITH LEFT_PAREN propertyList RIGHT_PAREN)?
+    ;
+
+refreshMaterializedViewStatement
+    : REFRESH MATERIALIZED VIEW mvName=multipartIdentifier
+    ;
+
+showMaterializedViewStatement
+    : SHOW MATERIALIZED (VIEW | VIEWS) IN catalogDb=multipartIdentifier
+    ;
+
+describeMaterializedViewStatement
+    : (DESC | DESCRIBE) MATERIALIZED VIEW mvName=multipartIdentifier
+    ;
+
+alterMaterializedViewStatement
+    : ALTER MATERIALIZED VIEW mvName=multipartIdentifier
+        WITH LEFT_PAREN propertyList RIGHT_PAREN
+    ;
+
+dropMaterializedViewStatement
+    : DROP MATERIALIZED VIEW mvName=multipartIdentifier
+    ;
+
+vacuumMaterializedViewStatement
+    : VACUUM MATERIALIZED VIEW mvName=multipartIdentifier
+    ;
+
+indexManagementStatement
+    : showFlintIndexStatement
+    ;
+
+showFlintIndexStatement
+    : SHOW FLINT (INDEX | INDEXES)
+        IN catalogDb=multipartIdentifier        #showFlintIndex
+    | SHOW FLINT (INDEX | INDEXES) EXTENDED
+        IN catalogDb=multipartIdentifier        #showFlintIndexExtended
+    ;
+
+indexJobManagementStatement
+    : recoverIndexJobStatement
+    ;
+
+recoverIndexJobStatement
+    : RECOVER INDEX JOB identifier
+    ;
+
+/*
+ * Match all remaining tokens in non-greedy way
+ * so WITH clause won't be captured by this rule.
+ */
+materializedViewQuery
+    : .+?
+    ;
+
+whereClause
+    : WHERE filterCondition
+    ;
+
+filterCondition
+    : .+?
+    ;
+
+indexColTypeList
+    : indexColType (COMMA indexColType)*
+    ;
+
+indexColType
+    : multipartIdentifier skipType=(PARTITION | VALUE_SET | MIN_MAX | BLOOM_FILTER)
+        (LEFT_PAREN skipParams RIGHT_PAREN)?
+    ;
+
+skipParams
+    : propertyValue (COMMA propertyValue)*
+    ;
+
+indexName
+    : identifier
+    ;
+
+tableName
+    : multipartIdentifier
+    ;
diff --git a/async-query-core/src/main/antlr/SparkSqlBase.g4 b/async-query-core/src/main/antlr/SparkSqlBase.g4
new file mode 100644
index 0000000000..c53c61adfd
--- /dev/null
+++ b/async-query-core/src/main/antlr/SparkSqlBase.g4
@@ -0,0 +1,246 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+/*
+ * This file contains code from the Apache Spark project (original license below).
+ * It contains modifications, which are licensed as above:
+ */
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+grammar SparkSqlBase;
+
+// Copy from Spark 3.3.1 SqlBaseParser.g4 and SqlBaseLexer.g4
+
+@members {
+  /**
+   * When true, parser should throw ParseExcetion for unclosed bracketed comment.
+   */
+  public boolean has_unclosed_bracketed_comment = false;
+
+  /**
+   * Verify whether current token is a valid decimal token (which contains dot).
+   * Returns true if the character that follows the token is not a digit or letter or underscore.
+   *
+   * For example:
+   * For char stream "2.3", "2." is not a valid decimal token, because it is followed by digit '3'.
+   * For char stream "2.3_", "2.3" is not a valid decimal token, because it is followed by '_'.
+   * For char stream "2.3W", "2.3" is not a valid decimal token, because it is followed by 'W'.
+   * For char stream "12.0D 34.E2+0.12 "  12.0D is a valid decimal token because it is followed
+   * by a space. 34.E2 is a valid decimal token because it is followed by symbol '+'
+   * which is not a digit or letter or underscore.
+   */
+  public boolean isValidDecimal() {
+    int nextChar = _input.LA(1);
+    if (nextChar >= 'A' && nextChar <= 'Z' || nextChar >= '0' && nextChar <= '9' ||
+      nextChar == '_') {
+      return false;
+    } else {
+      return true;
+    }
+  }
+
+  /**
+   * This method will be called when we see '/*' and try to match it as a bracketed comment.
+   * If the next character is '+', it should be parsed as hint later, and we cannot match
+   * it as a bracketed comment.
+   *
+   * Returns true if the next character is '+'.
+   */
+  public boolean isHint() {
+    int nextChar = _input.LA(1);
+    if (nextChar == '+') {
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  /**
+   * This method will be called when the character stream ends and try to find out the
+   * unclosed bracketed comment.
+   * If the method be called, it means the end of the entire character stream match,
+   * and we set the flag and fail later.
+   */
+  public void markUnclosedComment() {
+    has_unclosed_bracketed_comment = true;
+  }
+}
+
+
+multipartIdentifierPropertyList
+    : multipartIdentifierProperty (COMMA multipartIdentifierProperty)*
+    ;
+
+multipartIdentifierProperty
+    : multipartIdentifier (options=propertyList)?
+    ;
+
+propertyList
+    : property (COMMA property)*
+    ;
+
+property
+    : key=propertyKey (EQ? value=propertyValue)?
+    ;
+
+propertyKey
+    : identifier (DOT identifier)*
+    | STRING
+    ;
+
+propertyValue
+    : INTEGER_VALUE
+    | DECIMAL_VALUE
+    | booleanValue
+    | STRING
+    ;
+
+booleanValue
+    : TRUE | FALSE
+    ;
+
+
+multipartIdentifier
+    : parts+=identifier (DOT parts+=identifier)*
+    ;
+
+identifier
+    : IDENTIFIER              #unquotedIdentifier
+    | quotedIdentifier        #quotedIdentifierAlternative
+    | nonReserved             #unquotedIdentifier
+    ;
+
+quotedIdentifier
+    : BACKQUOTED_IDENTIFIER
+    ;
+
+nonReserved
+    : DROP | SKIPPING | INDEX
+    ;
+
+
+// Flint lexical tokens
+
+BLOOM_FILTER: 'BLOOM_FILTER';
+MIN_MAX: 'MIN_MAX';
+SKIPPING: 'SKIPPING';
+VALUE_SET: 'VALUE_SET';
+
+
+// Spark lexical tokens
+
+SEMICOLON: ';';
+
+LEFT_PAREN: '(';
+RIGHT_PAREN: ')';
+COMMA: ',';
+DOT: '.';
+
+
+AS: 'AS';
+ALTER: 'ALTER';
+ANALYZE: 'ANALYZE';
+CREATE: 'CREATE';
+DESC: 'DESC';
+DESCRIBE: 'DESCRIBE';
+DROP: 'DROP';
+EXISTS: 'EXISTS';
+EXTENDED: 'EXTENDED';
+FALSE: 'FALSE';
+FLINT: 'FLINT';
+IF: 'IF';
+IN: 'IN';
+INDEX: 'INDEX';
+INDEXES: 'INDEXES';
+JOB: 'JOB';
+MATERIALIZED: 'MATERIALIZED';
+NOT: 'NOT';
+ON: 'ON';
+PARTITION: 'PARTITION';
+RECOVER: 'RECOVER';
+REFRESH: 'REFRESH';
+SHOW: 'SHOW';
+TRUE: 'TRUE';
+VACUUM: 'VACUUM';
+VIEW: 'VIEW';
+VIEWS: 'VIEWS';
+WHERE: 'WHERE';
+WITH: 'WITH';
+
+
+EQ  : '=' | '==';
+MINUS: '-';
+
+
+STRING
+    : '\'' ( ~('\''|'\\') | ('\\' .) )* '\''
+    | '"' ( ~('"'|'\\') | ('\\' .) )* '"'
+    | 'R\'' (~'\'')* '\''
+    | 'R"'(~'"')* '"'
+    ;
+
+INTEGER_VALUE
+    : DIGIT+
+    ;
+
+DECIMAL_VALUE
+    : DECIMAL_DIGITS {isValidDecimal()}?
+    ;
+
+IDENTIFIER
+    : (LETTER | DIGIT | '_')+
+    ;
+
+BACKQUOTED_IDENTIFIER
+    : '`' ( ~'`' | '``' )* '`'
+    ;
+
+fragment DECIMAL_DIGITS
+    : DIGIT+ '.' DIGIT*
+    | '.' DIGIT+
+    ;
+
+fragment DIGIT
+    : [0-9]
+    ;
+
+fragment LETTER
+    : [A-Z]
+    ;
+
+SIMPLE_COMMENT
+    : '--' ('\\\n' | ~[\r\n])* '\r'? '\n'? -> channel(HIDDEN)
+    ;
+
+BRACKETED_COMMENT
+    : '/*' {!isHint()}? ( BRACKETED_COMMENT | . )*? ('*/' | {markUnclosedComment();} EOF) -> channel(HIDDEN)
+    ;
+
+WS
+    : [ \r\n\t]+ -> channel(HIDDEN)
+    ;
+
+// Catch-all for anything we can't recognize.
+// We use this to be able to ignore and recover all the text
+// when splitting statements with DelimiterLexer
+UNRECOGNIZED
+    : .
+    ;
\ No newline at end of file
diff --git a/async-query-core/src/main/antlr/SqlBaseLexer.g4 b/async-query-core/src/main/antlr/SqlBaseLexer.g4
new file mode 100644
index 0000000000..bde298c23e
--- /dev/null
+++ b/async-query-core/src/main/antlr/SqlBaseLexer.g4
@@ -0,0 +1,615 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * This file is an adaptation of Presto's presto-parser/src/main/antlr4/com/facebook/presto/sql/parser/SqlBase.g4 grammar.
+ */
+
+lexer grammar SqlBaseLexer;
+
+@members {
+  /**
+   * When true, parser should throw ParseException for unclosed bracketed comment.
+   */
+  public boolean has_unclosed_bracketed_comment = false;
+
+  /**
+   * Verify whether current token is a valid decimal token (which contains dot).
+   * Returns true if the character that follows the token is not a digit or letter or underscore.
+   *
+   * For example:
+   * For char stream "2.3", "2." is not a valid decimal token, because it is followed by digit '3'.
+   * For char stream "2.3_", "2.3" is not a valid decimal token, because it is followed by '_'.
+   * For char stream "2.3W", "2.3" is not a valid decimal token, because it is followed by 'W'.
+   * For char stream "12.0D 34.E2+0.12 "  12.0D is a valid decimal token because it is followed
+   * by a space. 34.E2 is a valid decimal token because it is followed by symbol '+'
+   * which is not a digit or letter or underscore.
+   */
+  public boolean isValidDecimal() {
+    int nextChar = _input.LA(1);
+    if (nextChar >= 'A' && nextChar <= 'Z' || nextChar >= '0' && nextChar <= '9' ||
+      nextChar == '_') {
+      return false;
+    } else {
+      return true;
+    }
+  }
+
+  /**
+   * This method will be called when we see '/*' and try to match it as a bracketed comment.
+   * If the next character is '+', it should be parsed as hint later, and we cannot match
+   * it as a bracketed comment.
+   *
+   * Returns true if the next character is '+'.
+   */
+  public boolean isHint() {
+    int nextChar = _input.LA(1);
+    if (nextChar == '+') {
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  /**
+   * This method will be called when the character stream ends and try to find out the
+   * unclosed bracketed comment.
+   * If the method be called, it means the end of the entire character stream match,
+   * and we set the flag and fail later.
+   */
+  public void markUnclosedComment() {
+    has_unclosed_bracketed_comment = true;
+  }
+
+  /**
+   * When greater than zero, it's in the middle of parsing ARRAY/MAP/STRUCT type.
+   */
+  public int complex_type_level_counter = 0;
+
+  /**
+   * Increase the counter by one when hits KEYWORD 'ARRAY', 'MAP', 'STRUCT'.
+   */
+  public void incComplexTypeLevelCounter() {
+    complex_type_level_counter++;
+  }
+
+  /**
+   * Decrease the counter by one when hits close tag '>' && the counter greater than zero
+   * which means we are in the middle of complex type parsing. Otherwise, it's a dangling
+   * GT token and we do nothing.
+   */
+  public void decComplexTypeLevelCounter() {
+    if (complex_type_level_counter > 0) complex_type_level_counter--;
+  }
+
+  /**
+   * If the counter is zero, it's a shift right operator. It can be closing tags of an complex
+   * type definition, such as MAP>.
+   */
+  public boolean isShiftRightOperator() {
+    return complex_type_level_counter == 0 ? true : false;
+  }
+}
+
+SEMICOLON: ';';
+
+LEFT_PAREN: '(';
+RIGHT_PAREN: ')';
+COMMA: ',';
+DOT: '.';
+LEFT_BRACKET: '[';
+RIGHT_BRACKET: ']';
+BANG: '!';
+
+// NOTE: If you add a new token in the list below, you should update the list of keywords
+// and reserved tag in `docs/sql-ref-ansi-compliance.md#sql-keywords`, and
+// modify `ParserUtils.toExprAlias()` which assumes all keywords are between `ADD` and `ZONE`.
+
+//============================
+// Start of the keywords list
+//============================
+//--SPARK-KEYWORD-LIST-START
+ADD: 'ADD';
+AFTER: 'AFTER';
+ALL: 'ALL';
+ALTER: 'ALTER';
+ALWAYS: 'ALWAYS';
+ANALYZE: 'ANALYZE';
+AND: 'AND';
+ANTI: 'ANTI';
+ANY: 'ANY';
+ANY_VALUE: 'ANY_VALUE';
+ARCHIVE: 'ARCHIVE';
+ARRAY: 'ARRAY' {incComplexTypeLevelCounter();};
+AS: 'AS';
+ASC: 'ASC';
+AT: 'AT';
+AUTHORIZATION: 'AUTHORIZATION';
+BEGIN: 'BEGIN';
+BETWEEN: 'BETWEEN';
+BIGINT: 'BIGINT';
+BINARY: 'BINARY';
+BINDING: 'BINDING';
+BOOLEAN: 'BOOLEAN';
+BOTH: 'BOTH';
+BUCKET: 'BUCKET';
+BUCKETS: 'BUCKETS';
+BY: 'BY';
+BYTE: 'BYTE';
+CACHE: 'CACHE';
+CALLED: 'CALLED';
+CASCADE: 'CASCADE';
+CASE: 'CASE';
+CAST: 'CAST';
+CATALOG: 'CATALOG';
+CATALOGS: 'CATALOGS';
+CHANGE: 'CHANGE';
+CHAR: 'CHAR';
+CHARACTER: 'CHARACTER';
+CHECK: 'CHECK';
+CLEAR: 'CLEAR';
+CLUSTER: 'CLUSTER';
+CLUSTERED: 'CLUSTERED';
+CODEGEN: 'CODEGEN';
+COLLATE: 'COLLATE';
+COLLATION: 'COLLATION';
+COLLECTION: 'COLLECTION';
+COLUMN: 'COLUMN';
+COLUMNS: 'COLUMNS';
+COMMENT: 'COMMENT';
+COMMIT: 'COMMIT';
+COMPACT: 'COMPACT';
+COMPACTIONS: 'COMPACTIONS';
+COMPENSATION: 'COMPENSATION';
+COMPUTE: 'COMPUTE';
+CONCATENATE: 'CONCATENATE';
+CONSTRAINT: 'CONSTRAINT';
+CONTAINS: 'CONTAINS';
+COST: 'COST';
+CREATE: 'CREATE';
+CROSS: 'CROSS';
+CUBE: 'CUBE';
+CURRENT: 'CURRENT';
+CURRENT_DATE: 'CURRENT_DATE';
+CURRENT_TIME: 'CURRENT_TIME';
+CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP';
+CURRENT_USER: 'CURRENT_USER';
+DAY: 'DAY';
+DAYS: 'DAYS';
+DAYOFYEAR: 'DAYOFYEAR';
+DATA: 'DATA';
+DATE: 'DATE';
+DATABASE: 'DATABASE';
+DATABASES: 'DATABASES';
+DATEADD: 'DATEADD';
+DATE_ADD: 'DATE_ADD';
+DATEDIFF: 'DATEDIFF';
+DATE_DIFF: 'DATE_DIFF';
+DBPROPERTIES: 'DBPROPERTIES';
+DEC: 'DEC';
+DECIMAL: 'DECIMAL';
+DECLARE: 'DECLARE';
+DEFAULT: 'DEFAULT';
+DEFINED: 'DEFINED';
+DEFINER: 'DEFINER';
+DELETE: 'DELETE';
+DELIMITED: 'DELIMITED';
+DESC: 'DESC';
+DESCRIBE: 'DESCRIBE';
+DETERMINISTIC: 'DETERMINISTIC';
+DFS: 'DFS';
+DIRECTORIES: 'DIRECTORIES';
+DIRECTORY: 'DIRECTORY';
+DISTINCT: 'DISTINCT';
+DISTRIBUTE: 'DISTRIBUTE';
+DIV: 'DIV';
+DOUBLE: 'DOUBLE';
+DROP: 'DROP';
+ELSE: 'ELSE';
+END: 'END';
+ESCAPE: 'ESCAPE';
+ESCAPED: 'ESCAPED';
+EVOLUTION: 'EVOLUTION';
+EXCEPT: 'EXCEPT';
+EXCHANGE: 'EXCHANGE';
+EXCLUDE: 'EXCLUDE';
+EXISTS: 'EXISTS';
+EXPLAIN: 'EXPLAIN';
+EXPORT: 'EXPORT';
+EXTENDED: 'EXTENDED';
+EXTERNAL: 'EXTERNAL';
+EXTRACT: 'EXTRACT';
+FALSE: 'FALSE';
+FETCH: 'FETCH';
+FIELDS: 'FIELDS';
+FILTER: 'FILTER';
+FILEFORMAT: 'FILEFORMAT';
+FIRST: 'FIRST';
+FLOAT: 'FLOAT';
+FOLLOWING: 'FOLLOWING';
+FOR: 'FOR';
+FOREIGN: 'FOREIGN';
+FORMAT: 'FORMAT';
+FORMATTED: 'FORMATTED';
+FROM: 'FROM';
+FULL: 'FULL';
+FUNCTION: 'FUNCTION';
+FUNCTIONS: 'FUNCTIONS';
+GENERATED: 'GENERATED';
+GLOBAL: 'GLOBAL';
+GRANT: 'GRANT';
+GROUP: 'GROUP';
+GROUPING: 'GROUPING';
+HAVING: 'HAVING';
+BINARY_HEX: 'X';
+HOUR: 'HOUR';
+HOURS: 'HOURS';
+IDENTIFIER_KW: 'IDENTIFIER';
+IF: 'IF';
+IGNORE: 'IGNORE';
+IMMEDIATE: 'IMMEDIATE';
+IMPORT: 'IMPORT';
+IN: 'IN';
+INCLUDE: 'INCLUDE';
+INDEX: 'INDEX';
+INDEXES: 'INDEXES';
+INNER: 'INNER';
+INPATH: 'INPATH';
+INPUT: 'INPUT';
+INPUTFORMAT: 'INPUTFORMAT';
+INSERT: 'INSERT';
+INTERSECT: 'INTERSECT';
+INTERVAL: 'INTERVAL';
+INT: 'INT';
+INTEGER: 'INTEGER';
+INTO: 'INTO';
+INVOKER: 'INVOKER';
+IS: 'IS';
+ITEMS: 'ITEMS';
+JOIN: 'JOIN';
+KEYS: 'KEYS';
+LANGUAGE: 'LANGUAGE';
+LAST: 'LAST';
+LATERAL: 'LATERAL';
+LAZY: 'LAZY';
+LEADING: 'LEADING';
+LEFT: 'LEFT';
+LIKE: 'LIKE';
+ILIKE: 'ILIKE';
+LIMIT: 'LIMIT';
+LINES: 'LINES';
+LIST: 'LIST';
+LOAD: 'LOAD';
+LOCAL: 'LOCAL';
+LOCATION: 'LOCATION';
+LOCK: 'LOCK';
+LOCKS: 'LOCKS';
+LOGICAL: 'LOGICAL';
+LONG: 'LONG';
+MACRO: 'MACRO';
+MAP: 'MAP' {incComplexTypeLevelCounter();};
+MATCHED: 'MATCHED';
+MERGE: 'MERGE';
+MICROSECOND: 'MICROSECOND';
+MICROSECONDS: 'MICROSECONDS';
+MILLISECOND: 'MILLISECOND';
+MILLISECONDS: 'MILLISECONDS';
+MINUTE: 'MINUTE';
+MINUTES: 'MINUTES';
+MODIFIES: 'MODIFIES';
+MONTH: 'MONTH';
+MONTHS: 'MONTHS';
+MSCK: 'MSCK';
+NAME: 'NAME';
+NAMESPACE: 'NAMESPACE';
+NAMESPACES: 'NAMESPACES';
+NANOSECOND: 'NANOSECOND';
+NANOSECONDS: 'NANOSECONDS';
+NATURAL: 'NATURAL';
+NO: 'NO';
+NONE: 'NONE';
+NOT: 'NOT';
+NULL: 'NULL';
+NULLS: 'NULLS';
+NUMERIC: 'NUMERIC';
+OF: 'OF';
+OFFSET: 'OFFSET';
+ON: 'ON';
+ONLY: 'ONLY';
+OPTION: 'OPTION';
+OPTIONS: 'OPTIONS';
+OR: 'OR';
+ORDER: 'ORDER';
+OUT: 'OUT';
+OUTER: 'OUTER';
+OUTPUTFORMAT: 'OUTPUTFORMAT';
+OVER: 'OVER';
+OVERLAPS: 'OVERLAPS';
+OVERLAY: 'OVERLAY';
+OVERWRITE: 'OVERWRITE';
+PARTITION: 'PARTITION';
+PARTITIONED: 'PARTITIONED';
+PARTITIONS: 'PARTITIONS';
+PERCENTLIT: 'PERCENT';
+PIVOT: 'PIVOT';
+PLACING: 'PLACING';
+POSITION: 'POSITION';
+PRECEDING: 'PRECEDING';
+PRIMARY: 'PRIMARY';
+PRINCIPALS: 'PRINCIPALS';
+PROPERTIES: 'PROPERTIES';
+PURGE: 'PURGE';
+QUARTER: 'QUARTER';
+QUERY: 'QUERY';
+RANGE: 'RANGE';
+READS: 'READS';
+REAL: 'REAL';
+RECORDREADER: 'RECORDREADER';
+RECORDWRITER: 'RECORDWRITER';
+RECOVER: 'RECOVER';
+REDUCE: 'REDUCE';
+REFERENCES: 'REFERENCES';
+REFRESH: 'REFRESH';
+RENAME: 'RENAME';
+REPAIR: 'REPAIR';
+REPEATABLE: 'REPEATABLE';
+REPLACE: 'REPLACE';
+RESET: 'RESET';
+RESPECT: 'RESPECT';
+RESTRICT: 'RESTRICT';
+RETURN: 'RETURN';
+RETURNS: 'RETURNS';
+REVOKE: 'REVOKE';
+RIGHT: 'RIGHT';
+RLIKE: 'RLIKE' | 'REGEXP';
+ROLE: 'ROLE';
+ROLES: 'ROLES';
+ROLLBACK: 'ROLLBACK';
+ROLLUP: 'ROLLUP';
+ROW: 'ROW';
+ROWS: 'ROWS';
+SECOND: 'SECOND';
+SECONDS: 'SECONDS';
+SCHEMA: 'SCHEMA';
+SCHEMAS: 'SCHEMAS';
+SECURITY: 'SECURITY';
+SELECT: 'SELECT';
+SEMI: 'SEMI';
+SEPARATED: 'SEPARATED';
+SERDE: 'SERDE';
+SERDEPROPERTIES: 'SERDEPROPERTIES';
+SESSION_USER: 'SESSION_USER';
+SET: 'SET';
+SETMINUS: 'MINUS';
+SETS: 'SETS';
+SHORT: 'SHORT';
+SHOW: 'SHOW';
+SINGLE: 'SINGLE';
+SKEWED: 'SKEWED';
+SMALLINT: 'SMALLINT';
+SOME: 'SOME';
+SORT: 'SORT';
+SORTED: 'SORTED';
+SOURCE: 'SOURCE';
+SPECIFIC: 'SPECIFIC';
+SQL: 'SQL';
+START: 'START';
+STATISTICS: 'STATISTICS';
+STORED: 'STORED';
+STRATIFY: 'STRATIFY';
+STRING: 'STRING';
+STRUCT: 'STRUCT' {incComplexTypeLevelCounter();};
+SUBSTR: 'SUBSTR';
+SUBSTRING: 'SUBSTRING';
+SYNC: 'SYNC';
+SYSTEM_TIME: 'SYSTEM_TIME';
+SYSTEM_VERSION: 'SYSTEM_VERSION';
+TABLE: 'TABLE';
+TABLES: 'TABLES';
+TABLESAMPLE: 'TABLESAMPLE';
+TARGET: 'TARGET';
+TBLPROPERTIES: 'TBLPROPERTIES';
+TEMPORARY: 'TEMPORARY' | 'TEMP';
+TERMINATED: 'TERMINATED';
+THEN: 'THEN';
+TIME: 'TIME';
+TIMEDIFF: 'TIMEDIFF';
+TIMESTAMP: 'TIMESTAMP';
+TIMESTAMP_LTZ: 'TIMESTAMP_LTZ';
+TIMESTAMP_NTZ: 'TIMESTAMP_NTZ';
+TIMESTAMPADD: 'TIMESTAMPADD';
+TIMESTAMPDIFF: 'TIMESTAMPDIFF';
+TINYINT: 'TINYINT';
+TO: 'TO';
+EXECUTE: 'EXECUTE';
+TOUCH: 'TOUCH';
+TRAILING: 'TRAILING';
+TRANSACTION: 'TRANSACTION';
+TRANSACTIONS: 'TRANSACTIONS';
+TRANSFORM: 'TRANSFORM';
+TRIM: 'TRIM';
+TRUE: 'TRUE';
+TRUNCATE: 'TRUNCATE';
+TRY_CAST: 'TRY_CAST';
+TYPE: 'TYPE';
+UNARCHIVE: 'UNARCHIVE';
+UNBOUNDED: 'UNBOUNDED';
+UNCACHE: 'UNCACHE';
+UNION: 'UNION';
+UNIQUE: 'UNIQUE';
+UNKNOWN: 'UNKNOWN';
+UNLOCK: 'UNLOCK';
+UNPIVOT: 'UNPIVOT';
+UNSET: 'UNSET';
+UPDATE: 'UPDATE';
+USE: 'USE';
+USER: 'USER';
+USING: 'USING';
+VALUES: 'VALUES';
+VARCHAR: 'VARCHAR';
+VAR: 'VAR';
+VARIABLE: 'VARIABLE';
+VARIANT: 'VARIANT';
+VERSION: 'VERSION';
+VIEW: 'VIEW';
+VIEWS: 'VIEWS';
+VOID: 'VOID';
+WEEK: 'WEEK';
+WEEKS: 'WEEKS';
+WHEN: 'WHEN';
+WHERE: 'WHERE';
+WINDOW: 'WINDOW';
+WITH: 'WITH';
+WITHIN: 'WITHIN';
+YEAR: 'YEAR';
+YEARS: 'YEARS';
+ZONE: 'ZONE';
+//--SPARK-KEYWORD-LIST-END
+//============================
+// End of the keywords list
+//============================
+
+EQ  : '=' | '==';
+NSEQ: '<=>';
+NEQ : '<>';
+NEQJ: '!=';
+LT  : '<';
+LTE : '<=' | '!>';
+GT  : '>' {decComplexTypeLevelCounter();};
+GTE : '>=' | '!<';
+SHIFT_LEFT: '<<';
+SHIFT_RIGHT: '>>' {isShiftRightOperator()}?;
+SHIFT_RIGHT_UNSIGNED: '>>>' {isShiftRightOperator()}?;
+
+PLUS: '+';
+MINUS: '-';
+ASTERISK: '*';
+SLASH: '/';
+PERCENT: '%';
+TILDE: '~';
+AMPERSAND: '&';
+PIPE: '|';
+CONCAT_PIPE: '||';
+HAT: '^';
+COLON: ':';
+DOUBLE_COLON: '::';
+ARROW: '->';
+FAT_ARROW : '=>';
+HENT_START: '/*+';
+HENT_END: '*/';
+QUESTION: '?';
+
+STRING_LITERAL
+    : '\'' ( ~('\''|'\\') | ('\\' .) )* '\''
+    | 'R\'' (~'\'')* '\''
+    | 'R"'(~'"')* '"'
+    ;
+
+DOUBLEQUOTED_STRING
+    :'"' ( ~('"'|'\\') | ('\\' .) )* '"'
+    ;
+
+// NOTE: If you move a numeric literal, you should modify `ParserUtils.toExprAlias()`
+// which assumes all numeric literals are between `BIGINT_LITERAL` and `BIGDECIMAL_LITERAL`.
+
+BIGINT_LITERAL
+    : DIGIT+ 'L'
+    ;
+
+SMALLINT_LITERAL
+    : DIGIT+ 'S'
+    ;
+
+TINYINT_LITERAL
+    : DIGIT+ 'Y'
+    ;
+
+INTEGER_VALUE
+    : DIGIT+
+    ;
+
+EXPONENT_VALUE
+    : DIGIT+ EXPONENT
+    | DECIMAL_DIGITS EXPONENT {isValidDecimal()}?
+    ;
+
+DECIMAL_VALUE
+    : DECIMAL_DIGITS {isValidDecimal()}?
+    ;
+
+FLOAT_LITERAL
+    : DIGIT+ EXPONENT? 'F'
+    | DECIMAL_DIGITS EXPONENT? 'F' {isValidDecimal()}?
+    ;
+
+DOUBLE_LITERAL
+    : DIGIT+ EXPONENT? 'D'
+    | DECIMAL_DIGITS EXPONENT? 'D' {isValidDecimal()}?
+    ;
+
+BIGDECIMAL_LITERAL
+    : DIGIT+ EXPONENT? 'BD'
+    | DECIMAL_DIGITS EXPONENT? 'BD' {isValidDecimal()}?
+    ;
+
+// Generalize the identifier to give a sensible INVALID_IDENTIFIER error message:
+// * Unicode letters rather than a-z and A-Z only
+// * URI paths for table references using paths
+// We then narrow down to ANSI rules in exitUnquotedIdentifier() in the parser.
+IDENTIFIER
+    : (UNICODE_LETTER | DIGIT | '_')+
+    | UNICODE_LETTER+ '://' (UNICODE_LETTER | DIGIT | '_' | '/' | '-' | '.' | '?' | '=' | '&' | '#' | '%')+
+    ;
+
+BACKQUOTED_IDENTIFIER
+    : '`' ( ~'`' | '``' )* '`'
+    ;
+
+fragment DECIMAL_DIGITS
+    : DIGIT+ '.' DIGIT*
+    | '.' DIGIT+
+    ;
+
+fragment EXPONENT
+    : 'E' [+-]? DIGIT+
+    ;
+
+fragment DIGIT
+    : [0-9]
+    ;
+
+fragment LETTER
+    : [A-Z]
+    ;
+
+fragment UNICODE_LETTER
+    : [\p{L}]
+    ;
+
+SIMPLE_COMMENT
+    : '--' ('\\\n' | ~[\r\n])* '\r'? '\n'? -> channel(HIDDEN)
+    ;
+
+BRACKETED_COMMENT
+    : '/*' {!isHint()}? ( BRACKETED_COMMENT | . )*? ('*/' | {markUnclosedComment();} EOF) -> channel(HIDDEN)
+    ;
+
+WS
+    : [ \t\n\f\r\u000B\u00A0\u1680\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u2028\u202F\u205F\u3000]+ -> channel(HIDDEN)
+    ;
+
+// Catch-all for anything we can't recognize.
+// We use this to be able to ignore and recover all the text
+// when splitting statements with DelimiterLexer
+UNRECOGNIZED
+    : .
+    ;
diff --git a/async-query-core/src/main/antlr/SqlBaseParser.g4 b/async-query-core/src/main/antlr/SqlBaseParser.g4
new file mode 100644
index 0000000000..c7aa56cf92
--- /dev/null
+++ b/async-query-core/src/main/antlr/SqlBaseParser.g4
@@ -0,0 +1,2104 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * This file is an adaptation of Presto's presto-parser/src/main/antlr4/com/facebook/presto/sql/parser/SqlBase.g4 grammar.
+ */
+
+parser grammar SqlBaseParser;
+
+options { tokenVocab = SqlBaseLexer; }
+
+@members {
+  /**
+   * When false, INTERSECT is given the greater precedence over the other set
+   * operations (UNION, EXCEPT and MINUS) as per the SQL standard.
+   */
+  public boolean legacy_setops_precedence_enabled = false;
+
+  /**
+   * When false, a literal with an exponent would be converted into
+   * double type rather than decimal type.
+   */
+  public boolean legacy_exponent_literal_as_decimal_enabled = false;
+
+  /**
+   * When true, the behavior of keywords follows ANSI SQL standard.
+   */
+  public boolean SQL_standard_keyword_behavior = false;
+
+  /**
+   * When true, double quoted literals are identifiers rather than STRINGs.
+   */
+  public boolean double_quoted_identifiers = false;
+}
+
+compoundOrSingleStatement
+    : singleStatement
+    | singleCompoundStatement
+    ;
+
+singleCompoundStatement
+    : beginEndCompoundBlock SEMICOLON? EOF
+    ;
+
+beginEndCompoundBlock
+    : beginLabel? BEGIN compoundBody END endLabel?
+    ;
+
+compoundBody
+    : (compoundStatements+=compoundStatement SEMICOLON)*
+    ;
+
+compoundStatement
+    : statement
+    | setStatementWithOptionalVarKeyword
+    | beginEndCompoundBlock
+    ;
+
+setStatementWithOptionalVarKeyword
+    : SET variable? assignmentList                              #setVariableWithOptionalKeyword
+    | SET variable? LEFT_PAREN multipartIdentifierList RIGHT_PAREN EQ
+        LEFT_PAREN query RIGHT_PAREN                            #setVariableWithOptionalKeyword
+    ;
+
+singleStatement
+    : (statement|setResetStatement) SEMICOLON* EOF
+    ;
+
+beginLabel
+    : multipartIdentifier COLON
+    ;
+
+endLabel
+    : multipartIdentifier
+    ;
+
+singleExpression
+    : namedExpression EOF
+    ;
+
+singleTableIdentifier
+    : tableIdentifier EOF
+    ;
+
+singleMultipartIdentifier
+    : multipartIdentifier EOF
+    ;
+
+singleFunctionIdentifier
+    : functionIdentifier EOF
+    ;
+
+singleDataType
+    : dataType EOF
+    ;
+
+singleTableSchema
+    : colTypeList EOF
+    ;
+
+statement
+    : query                                                            #statementDefault
+    | executeImmediate                                                 #visitExecuteImmediate
+    | ctes? dmlStatementNoWith                                         #dmlStatement
+    | USE identifierReference                                          #use
+    | USE namespace identifierReference                                #useNamespace
+    | SET CATALOG (errorCapturingIdentifier | stringLit)                  #setCatalog
+    | CREATE namespace (IF errorCapturingNot EXISTS)? identifierReference
+        (commentSpec |
+         locationSpec |
+         (WITH (DBPROPERTIES | PROPERTIES) propertyList))*             #createNamespace
+    | ALTER namespace identifierReference
+        SET (DBPROPERTIES | PROPERTIES) propertyList                   #setNamespaceProperties
+    | ALTER namespace identifierReference
+        UNSET (DBPROPERTIES | PROPERTIES) propertyList                 #unsetNamespaceProperties
+    | ALTER namespace identifierReference
+        SET locationSpec                                               #setNamespaceLocation
+    | DROP namespace (IF EXISTS)? identifierReference
+        (RESTRICT | CASCADE)?                                          #dropNamespace
+    | SHOW namespaces ((FROM | IN) multipartIdentifier)?
+        (LIKE? pattern=stringLit)?                                        #showNamespaces
+    | createTableHeader (LEFT_PAREN colDefinitionList RIGHT_PAREN)? tableProvider?
+        createTableClauses
+        (AS? query)?                                                   #createTable
+    | CREATE TABLE (IF errorCapturingNot EXISTS)? target=tableIdentifier
+        LIKE source=tableIdentifier
+        (tableProvider |
+        rowFormat |
+        createFileFormat |
+        locationSpec |
+        (TBLPROPERTIES tableProps=propertyList))*                      #createTableLike
+    | replaceTableHeader (LEFT_PAREN colDefinitionList RIGHT_PAREN)? tableProvider?
+        createTableClauses
+        (AS? query)?                                                   #replaceTable
+    | ANALYZE TABLE identifierReference partitionSpec? COMPUTE STATISTICS
+        (identifier | FOR COLUMNS identifierSeq | FOR ALL COLUMNS)?    #analyze
+    | ANALYZE TABLES ((FROM | IN) identifierReference)? COMPUTE STATISTICS
+        (identifier)?                                                  #analyzeTables
+    | ALTER TABLE identifierReference
+        ADD (COLUMN | COLUMNS)
+        columns=qualifiedColTypeWithPositionList                       #addTableColumns
+    | ALTER TABLE identifierReference
+        ADD (COLUMN | COLUMNS)
+        LEFT_PAREN columns=qualifiedColTypeWithPositionList RIGHT_PAREN #addTableColumns
+    | ALTER TABLE table=identifierReference
+        RENAME COLUMN
+        from=multipartIdentifier TO to=errorCapturingIdentifier        #renameTableColumn
+    | ALTER TABLE identifierReference
+        DROP (COLUMN | COLUMNS) (IF EXISTS)?
+        LEFT_PAREN columns=multipartIdentifierList RIGHT_PAREN         #dropTableColumns
+    | ALTER TABLE identifierReference
+        DROP (COLUMN | COLUMNS) (IF EXISTS)?
+        columns=multipartIdentifierList                                #dropTableColumns
+    | ALTER (TABLE | VIEW) from=identifierReference
+        RENAME TO to=multipartIdentifier                               #renameTable
+    | ALTER (TABLE | VIEW) identifierReference
+        SET TBLPROPERTIES propertyList                                 #setTableProperties
+    | ALTER (TABLE | VIEW) identifierReference
+        UNSET TBLPROPERTIES (IF EXISTS)? propertyList                  #unsetTableProperties
+    | ALTER TABLE table=identifierReference
+        (ALTER | CHANGE) COLUMN? column=multipartIdentifier
+        alterColumnAction?                                             #alterTableAlterColumn
+    | ALTER TABLE table=identifierReference partitionSpec?
+        CHANGE COLUMN?
+        colName=multipartIdentifier colType colPosition?               #hiveChangeColumn
+    | ALTER TABLE table=identifierReference partitionSpec?
+        REPLACE COLUMNS
+        LEFT_PAREN columns=qualifiedColTypeWithPositionList
+        RIGHT_PAREN                                                    #hiveReplaceColumns
+    | ALTER TABLE identifierReference (partitionSpec)?
+        SET SERDE stringLit (WITH SERDEPROPERTIES propertyList)?       #setTableSerDe
+    | ALTER TABLE identifierReference (partitionSpec)?
+        SET SERDEPROPERTIES propertyList                               #setTableSerDe
+    | ALTER (TABLE | VIEW) identifierReference ADD (IF errorCapturingNot EXISTS)?
+        partitionSpecLocation+                                         #addTablePartition
+    | ALTER TABLE identifierReference
+        from=partitionSpec RENAME TO to=partitionSpec                  #renameTablePartition
+    | ALTER (TABLE | VIEW) identifierReference
+        DROP (IF EXISTS)? partitionSpec (COMMA partitionSpec)* PURGE?  #dropTablePartitions
+    | ALTER TABLE identifierReference
+        (partitionSpec)? SET locationSpec                              #setTableLocation
+    | ALTER TABLE identifierReference RECOVER PARTITIONS                 #recoverPartitions
+    | ALTER TABLE identifierReference
+        (clusterBySpec | CLUSTER BY NONE)                              #alterClusterBy
+    | DROP TABLE (IF EXISTS)? identifierReference PURGE?               #dropTable
+    | DROP VIEW (IF EXISTS)? identifierReference                       #dropView
+    | CREATE (OR REPLACE)? (GLOBAL? TEMPORARY)?
+        VIEW (IF errorCapturingNot EXISTS)? identifierReference
+        identifierCommentList?
+        (commentSpec |
+         schemaBinding |
+         (PARTITIONED ON identifierList) |
+         (TBLPROPERTIES propertyList))*
+        AS query                                                       #createView
+    | CREATE (OR REPLACE)? GLOBAL? TEMPORARY VIEW
+        tableIdentifier (LEFT_PAREN colTypeList RIGHT_PAREN)? tableProvider
+        (OPTIONS propertyList)?                                        #createTempViewUsing
+    | ALTER VIEW identifierReference AS? query                         #alterViewQuery
+    | ALTER VIEW identifierReference schemaBinding                     #alterViewSchemaBinding
+    | CREATE (OR REPLACE)? TEMPORARY? FUNCTION (IF errorCapturingNot EXISTS)?
+        identifierReference AS className=stringLit
+        (USING resource (COMMA resource)*)?                            #createFunction
+    | CREATE (OR REPLACE)? TEMPORARY? FUNCTION (IF errorCapturingNot EXISTS)?
+        identifierReference LEFT_PAREN parameters=colDefinitionList? RIGHT_PAREN
+        (RETURNS (dataType | TABLE LEFT_PAREN returnParams=colTypeList RIGHT_PAREN))?
+        routineCharacteristics
+        RETURN (query | expression)                                    #createUserDefinedFunction
+    | DROP TEMPORARY? FUNCTION (IF EXISTS)? identifierReference        #dropFunction
+    | DECLARE (OR REPLACE)? variable?
+        identifierReference dataType? variableDefaultExpression?       #createVariable
+    | DROP TEMPORARY variable (IF EXISTS)? identifierReference         #dropVariable
+    | EXPLAIN (LOGICAL | FORMATTED | EXTENDED | CODEGEN | COST)?
+        (statement|setResetStatement)                                  #explain
+    | SHOW TABLES ((FROM | IN) identifierReference)?
+        (LIKE? pattern=stringLit)?                                        #showTables
+    | SHOW TABLE EXTENDED ((FROM | IN) ns=identifierReference)?
+        LIKE pattern=stringLit partitionSpec?                             #showTableExtended
+    | SHOW TBLPROPERTIES table=identifierReference
+        (LEFT_PAREN key=propertyKey RIGHT_PAREN)?                      #showTblProperties
+    | SHOW COLUMNS (FROM | IN) table=identifierReference
+        ((FROM | IN) ns=multipartIdentifier)?                          #showColumns
+    | SHOW VIEWS ((FROM | IN) identifierReference)?
+        (LIKE? pattern=stringLit)?                                        #showViews
+    | SHOW PARTITIONS identifierReference partitionSpec?               #showPartitions
+    | SHOW identifier? FUNCTIONS ((FROM | IN) ns=identifierReference)?
+        (LIKE? (legacy=multipartIdentifier | pattern=stringLit))?      #showFunctions
+    | SHOW CREATE TABLE identifierReference (AS SERDE)?                #showCreateTable
+    | SHOW CURRENT namespace                                           #showCurrentNamespace
+    | SHOW CATALOGS (LIKE? pattern=stringLit)?                            #showCatalogs
+    | (DESC | DESCRIBE) FUNCTION EXTENDED? describeFuncName            #describeFunction
+    | (DESC | DESCRIBE) namespace EXTENDED?
+        identifierReference                                            #describeNamespace
+    | (DESC | DESCRIBE) TABLE? option=(EXTENDED | FORMATTED)?
+        identifierReference partitionSpec? describeColName?            #describeRelation
+    | (DESC | DESCRIBE) QUERY? query                                   #describeQuery
+    | COMMENT ON namespace identifierReference IS
+        comment                                                        #commentNamespace
+    | COMMENT ON TABLE identifierReference IS comment                  #commentTable
+    | REFRESH TABLE identifierReference                                #refreshTable
+    | REFRESH FUNCTION identifierReference                             #refreshFunction
+    | REFRESH (stringLit | .*?)                                        #refreshResource
+    | CACHE LAZY? TABLE identifierReference
+        (OPTIONS options=propertyList)? (AS? query)?                   #cacheTable
+    | UNCACHE TABLE (IF EXISTS)? identifierReference                   #uncacheTable
+    | CLEAR CACHE                                                      #clearCache
+    | LOAD DATA LOCAL? INPATH path=stringLit OVERWRITE? INTO TABLE
+        identifierReference partitionSpec?                             #loadData
+    | TRUNCATE TABLE identifierReference partitionSpec?                #truncateTable
+    | (MSCK)? REPAIR TABLE identifierReference
+        (option=(ADD|DROP|SYNC) PARTITIONS)?                           #repairTable
+    | op=(ADD | LIST) identifier .*?                                   #manageResource
+    | CREATE INDEX (IF errorCapturingNot EXISTS)? identifier ON TABLE?
+        identifierReference (USING indexType=identifier)?
+        LEFT_PAREN columns=multipartIdentifierPropertyList RIGHT_PAREN
+        (OPTIONS options=propertyList)?                                #createIndex
+    | DROP INDEX (IF EXISTS)? identifier ON TABLE? identifierReference #dropIndex
+    | unsupportedHiveNativeCommands .*?                                #failNativeCommand
+    ;
+
+setResetStatement
+    : SET COLLATION collationName=identifier                           #setCollation
+    | SET ROLE .*?                                                     #failSetRole
+    | SET TIME ZONE interval                                           #setTimeZone
+    | SET TIME ZONE timezone                                           #setTimeZone
+    | SET TIME ZONE .*?                                                #setTimeZone
+    | SET variable assignmentList                                      #setVariable
+    | SET variable LEFT_PAREN multipartIdentifierList RIGHT_PAREN EQ
+        LEFT_PAREN query RIGHT_PAREN                                   #setVariable
+    | SET configKey EQ configValue                                     #setQuotedConfiguration
+    | SET configKey (EQ .*?)?                                          #setConfiguration
+    | SET .*? EQ configValue                                           #setQuotedConfiguration
+    | SET .*?                                                          #setConfiguration
+    | RESET configKey                                                  #resetQuotedConfiguration
+    | RESET .*?                                                        #resetConfiguration
+    ;
+
+executeImmediate
+    : EXECUTE IMMEDIATE queryParam=executeImmediateQueryParam (INTO targetVariable=multipartIdentifierList)? executeImmediateUsing?
+    ;
+
+executeImmediateUsing
+    : USING LEFT_PAREN params=namedExpressionSeq RIGHT_PAREN
+    | USING params=namedExpressionSeq
+    ;
+
+executeImmediateQueryParam
+    : stringLit
+    | multipartIdentifier
+    ;
+
+executeImmediateArgument
+    : (constant|multipartIdentifier) (AS name=errorCapturingIdentifier)?
+    ;
+
+executeImmediateArgumentSeq
+    : executeImmediateArgument (COMMA executeImmediateArgument)*
+    ;
+
+timezone
+    : stringLit
+    | LOCAL
+    ;
+
+configKey
+    : quotedIdentifier
+    ;
+
+configValue
+    : backQuotedIdentifier
+    ;
+
+unsupportedHiveNativeCommands
+    : kw1=CREATE kw2=ROLE
+    | kw1=DROP kw2=ROLE
+    | kw1=GRANT kw2=ROLE?
+    | kw1=REVOKE kw2=ROLE?
+    | kw1=SHOW kw2=GRANT
+    | kw1=SHOW kw2=ROLE kw3=GRANT?
+    | kw1=SHOW kw2=PRINCIPALS
+    | kw1=SHOW kw2=ROLES
+    | kw1=SHOW kw2=CURRENT kw3=ROLES
+    | kw1=EXPORT kw2=TABLE
+    | kw1=IMPORT kw2=TABLE
+    | kw1=SHOW kw2=COMPACTIONS
+    | kw1=SHOW kw2=CREATE kw3=TABLE
+    | kw1=SHOW kw2=TRANSACTIONS
+    | kw1=SHOW kw2=INDEXES
+    | kw1=SHOW kw2=LOCKS
+    | kw1=CREATE kw2=INDEX
+    | kw1=DROP kw2=INDEX
+    | kw1=ALTER kw2=INDEX
+    | kw1=LOCK kw2=TABLE
+    | kw1=LOCK kw2=DATABASE
+    | kw1=UNLOCK kw2=TABLE
+    | kw1=UNLOCK kw2=DATABASE
+    | kw1=CREATE kw2=TEMPORARY kw3=MACRO
+    | kw1=DROP kw2=TEMPORARY kw3=MACRO
+    | kw1=ALTER kw2=TABLE tableIdentifier kw3=NOT kw4=CLUSTERED
+    | kw1=ALTER kw2=TABLE tableIdentifier kw3=CLUSTERED kw4=BY
+    | kw1=ALTER kw2=TABLE tableIdentifier kw3=NOT kw4=SORTED
+    | kw1=ALTER kw2=TABLE tableIdentifier kw3=SKEWED kw4=BY
+    | kw1=ALTER kw2=TABLE tableIdentifier kw3=NOT kw4=SKEWED
+    | kw1=ALTER kw2=TABLE tableIdentifier kw3=NOT kw4=STORED kw5=AS kw6=DIRECTORIES
+    | kw1=ALTER kw2=TABLE tableIdentifier kw3=SET kw4=SKEWED kw5=LOCATION
+    | kw1=ALTER kw2=TABLE tableIdentifier kw3=EXCHANGE kw4=PARTITION
+    | kw1=ALTER kw2=TABLE tableIdentifier kw3=ARCHIVE kw4=PARTITION
+    | kw1=ALTER kw2=TABLE tableIdentifier kw3=UNARCHIVE kw4=PARTITION
+    | kw1=ALTER kw2=TABLE tableIdentifier kw3=TOUCH
+    | kw1=ALTER kw2=TABLE tableIdentifier partitionSpec? kw3=COMPACT
+    | kw1=ALTER kw2=TABLE tableIdentifier partitionSpec? kw3=CONCATENATE
+    | kw1=ALTER kw2=TABLE tableIdentifier partitionSpec? kw3=SET kw4=FILEFORMAT
+    | kw1=ALTER kw2=TABLE tableIdentifier partitionSpec? kw3=REPLACE kw4=COLUMNS
+    | kw1=START kw2=TRANSACTION
+    | kw1=COMMIT
+    | kw1=ROLLBACK
+    | kw1=DFS
+    ;
+
+createTableHeader
+    : CREATE TEMPORARY? EXTERNAL? TABLE (IF errorCapturingNot EXISTS)? identifierReference
+    ;
+
+replaceTableHeader
+    : (CREATE OR)? REPLACE TABLE identifierReference
+    ;
+
+clusterBySpec
+    : CLUSTER BY LEFT_PAREN multipartIdentifierList RIGHT_PAREN
+    ;
+
+bucketSpec
+    : CLUSTERED BY identifierList
+      (SORTED BY orderedIdentifierList)?
+      INTO INTEGER_VALUE BUCKETS
+    ;
+
+skewSpec
+    : SKEWED BY identifierList
+      ON (constantList | nestedConstantList)
+      (STORED AS DIRECTORIES)?
+    ;
+
+locationSpec
+    : LOCATION stringLit
+    ;
+
+schemaBinding
+    : WITH SCHEMA (BINDING | COMPENSATION | EVOLUTION | TYPE EVOLUTION)
+    ;
+
+commentSpec
+    : COMMENT stringLit
+    ;
+
+query
+    : ctes? queryTerm queryOrganization
+    ;
+
+insertInto
+    : INSERT OVERWRITE TABLE? identifierReference (partitionSpec (IF errorCapturingNot EXISTS)?)?  ((BY NAME) | identifierList)? #insertOverwriteTable
+    | INSERT INTO TABLE? identifierReference partitionSpec? (IF errorCapturingNot EXISTS)? ((BY NAME) | identifierList)?   #insertIntoTable
+    | INSERT INTO TABLE? identifierReference REPLACE whereClause                                             #insertIntoReplaceWhere
+    | INSERT OVERWRITE LOCAL? DIRECTORY path=stringLit rowFormat? createFileFormat?                     #insertOverwriteHiveDir
+    | INSERT OVERWRITE LOCAL? DIRECTORY (path=stringLit)? tableProvider (OPTIONS options=propertyList)? #insertOverwriteDir
+    ;
+
+partitionSpecLocation
+    : partitionSpec locationSpec?
+    ;
+
+partitionSpec
+    : PARTITION LEFT_PAREN partitionVal (COMMA partitionVal)* RIGHT_PAREN
+    ;
+
+partitionVal
+    : identifier (EQ constant)?
+    | identifier EQ DEFAULT
+    ;
+
+namespace
+    : NAMESPACE
+    | DATABASE
+    | SCHEMA
+    ;
+
+namespaces
+    : NAMESPACES
+    | DATABASES
+    | SCHEMAS
+    ;
+
+variable
+    : VARIABLE
+    | VAR
+    ;
+
+describeFuncName
+    : identifierReference
+    | stringLit
+    | comparisonOperator
+    | arithmeticOperator
+    | predicateOperator
+    | shiftOperator
+    | BANG
+    ;
+
+describeColName
+    : nameParts+=errorCapturingIdentifier (DOT nameParts+=errorCapturingIdentifier)*
+    ;
+
+ctes
+    : WITH namedQuery (COMMA namedQuery)*
+    ;
+
+namedQuery
+    : name=errorCapturingIdentifier (columnAliases=identifierList)? AS? LEFT_PAREN query RIGHT_PAREN
+    ;
+
+tableProvider
+    : USING multipartIdentifier
+    ;
+
+createTableClauses
+    :((OPTIONS options=expressionPropertyList) |
+     (PARTITIONED BY partitioning=partitionFieldList) |
+     skewSpec |
+     clusterBySpec |
+     bucketSpec |
+     rowFormat |
+     createFileFormat |
+     locationSpec |
+     commentSpec |
+     (TBLPROPERTIES tableProps=propertyList))*
+    ;
+
+propertyList
+    : LEFT_PAREN property (COMMA property)* RIGHT_PAREN
+    ;
+
+property
+    : key=propertyKey (EQ? value=propertyValue)?
+    ;
+
+propertyKey
+    : errorCapturingIdentifier (DOT errorCapturingIdentifier)*
+    | stringLit
+    ;
+
+propertyValue
+    : INTEGER_VALUE
+    | DECIMAL_VALUE
+    | booleanValue
+    | stringLit
+    ;
+
+expressionPropertyList
+    : LEFT_PAREN expressionProperty (COMMA expressionProperty)* RIGHT_PAREN
+    ;
+
+expressionProperty
+    : key=propertyKey (EQ? value=expression)?
+    ;
+
+constantList
+    : LEFT_PAREN constant (COMMA constant)* RIGHT_PAREN
+    ;
+
+nestedConstantList
+    : LEFT_PAREN constantList (COMMA constantList)* RIGHT_PAREN
+    ;
+
+createFileFormat
+    : STORED AS fileFormat
+    | STORED BY storageHandler
+    ;
+
+fileFormat
+    : INPUTFORMAT inFmt=stringLit OUTPUTFORMAT outFmt=stringLit    #tableFileFormat
+    | identifier                                             #genericFileFormat
+    ;
+
+storageHandler
+    : stringLit (WITH SERDEPROPERTIES propertyList)?
+    ;
+
+resource
+    : identifier stringLit
+    ;
+
+dmlStatementNoWith
+    : insertInto query                                                             #singleInsertQuery
+    | fromClause multiInsertQueryBody+                                             #multiInsertQuery
+    | DELETE FROM identifierReference tableAlias whereClause?                      #deleteFromTable
+    | UPDATE identifierReference tableAlias setClause whereClause?                 #updateTable
+    | MERGE (WITH SCHEMA EVOLUTION)? INTO target=identifierReference targetAlias=tableAlias
+        USING (source=identifierReference |
+          LEFT_PAREN sourceQuery=query RIGHT_PAREN) sourceAlias=tableAlias
+        ON mergeCondition=booleanExpression
+        matchedClause*
+        notMatchedClause*
+        notMatchedBySourceClause*                                                  #mergeIntoTable
+    ;
+
+identifierReference
+    : IDENTIFIER_KW LEFT_PAREN expression RIGHT_PAREN
+    | multipartIdentifier
+    ;
+
+queryOrganization
+    : (ORDER BY order+=sortItem (COMMA order+=sortItem)*)?
+      (CLUSTER BY clusterBy+=expression (COMMA clusterBy+=expression)*)?
+      (DISTRIBUTE BY distributeBy+=expression (COMMA distributeBy+=expression)*)?
+      (SORT BY sort+=sortItem (COMMA sort+=sortItem)*)?
+      windowClause?
+      (LIMIT (ALL | limit=expression))?
+      (OFFSET offset=expression)?
+    ;
+
+multiInsertQueryBody
+    : insertInto fromStatementBody
+    ;
+
+queryTerm
+    : queryPrimary                                                                       #queryTermDefault
+    | left=queryTerm {legacy_setops_precedence_enabled}?
+        operator=(INTERSECT | UNION | EXCEPT | SETMINUS) setQuantifier? right=queryTerm  #setOperation
+    | left=queryTerm {!legacy_setops_precedence_enabled}?
+        operator=INTERSECT setQuantifier? right=queryTerm                                #setOperation
+    | left=queryTerm {!legacy_setops_precedence_enabled}?
+        operator=(UNION | EXCEPT | SETMINUS) setQuantifier? right=queryTerm              #setOperation
+    ;
+
+queryPrimary
+    : querySpecification                                                    #queryPrimaryDefault
+    | fromStatement                                                         #fromStmt
+    | TABLE identifierReference                                             #table
+    | inlineTable                                                           #inlineTableDefault1
+    | LEFT_PAREN query RIGHT_PAREN                                          #subquery
+    ;
+
+sortItem
+    : expression ordering=(ASC | DESC)? (NULLS nullOrder=(LAST | FIRST))?
+    ;
+
+fromStatement
+    : fromClause fromStatementBody+
+    ;
+
+fromStatementBody
+    : transformClause
+      whereClause?
+      queryOrganization
+    | selectClause
+      lateralView*
+      whereClause?
+      aggregationClause?
+      havingClause?
+      windowClause?
+      queryOrganization
+    ;
+
+querySpecification
+    : transformClause
+      fromClause?
+      lateralView*
+      whereClause?
+      aggregationClause?
+      havingClause?
+      windowClause?                                                         #transformQuerySpecification
+    | selectClause
+      fromClause?
+      lateralView*
+      whereClause?
+      aggregationClause?
+      havingClause?
+      windowClause?                                                         #regularQuerySpecification
+    ;
+
+transformClause
+    : (SELECT kind=TRANSFORM LEFT_PAREN setQuantifier? expressionSeq RIGHT_PAREN
+            | kind=MAP setQuantifier? expressionSeq
+            | kind=REDUCE setQuantifier? expressionSeq)
+      inRowFormat=rowFormat?
+      (RECORDWRITER recordWriter=stringLit)?
+      USING script=stringLit
+      (AS (identifierSeq | colTypeList | (LEFT_PAREN (identifierSeq | colTypeList) RIGHT_PAREN)))?
+      outRowFormat=rowFormat?
+      (RECORDREADER recordReader=stringLit)?
+    ;
+
+selectClause
+    : SELECT (hints+=hint)* setQuantifier? namedExpressionSeq
+    ;
+
+setClause
+    : SET assignmentList
+    ;
+
+matchedClause
+    : WHEN MATCHED (AND matchedCond=booleanExpression)? THEN matchedAction
+    ;
+notMatchedClause
+    : WHEN errorCapturingNot MATCHED (BY TARGET)? (AND notMatchedCond=booleanExpression)? THEN notMatchedAction
+    ;
+
+notMatchedBySourceClause
+    : WHEN errorCapturingNot MATCHED BY SOURCE (AND notMatchedBySourceCond=booleanExpression)? THEN notMatchedBySourceAction
+    ;
+
+matchedAction
+    : DELETE
+    | UPDATE SET ASTERISK
+    | UPDATE SET assignmentList
+    ;
+
+notMatchedAction
+    : INSERT ASTERISK
+    | INSERT LEFT_PAREN columns=multipartIdentifierList RIGHT_PAREN
+        VALUES LEFT_PAREN expression (COMMA expression)* RIGHT_PAREN
+    ;
+
+notMatchedBySourceAction
+    : DELETE
+    | UPDATE SET assignmentList
+    ;
+
+exceptClause
+    : EXCEPT LEFT_PAREN exceptCols=multipartIdentifierList RIGHT_PAREN
+    ;
+
+assignmentList
+    : assignment (COMMA assignment)*
+    ;
+
+assignment
+    : key=multipartIdentifier EQ value=expression
+    ;
+
+whereClause
+    : WHERE booleanExpression
+    ;
+
+havingClause
+    : HAVING booleanExpression
+    ;
+
+hint
+    : HENT_START hintStatements+=hintStatement (COMMA? hintStatements+=hintStatement)* HENT_END
+    ;
+
+hintStatement
+    : hintName=identifier
+    | hintName=identifier LEFT_PAREN parameters+=primaryExpression (COMMA parameters+=primaryExpression)* RIGHT_PAREN
+    ;
+
+fromClause
+    : FROM relation (COMMA relation)* lateralView* pivotClause? unpivotClause?
+    ;
+
+temporalClause
+    : FOR? (SYSTEM_VERSION | VERSION) AS OF version
+    | FOR? (SYSTEM_TIME | TIMESTAMP) AS OF timestamp=valueExpression
+    ;
+
+aggregationClause
+    : GROUP BY groupingExpressionsWithGroupingAnalytics+=groupByClause
+        (COMMA groupingExpressionsWithGroupingAnalytics+=groupByClause)*
+    | GROUP BY groupingExpressions+=expression (COMMA groupingExpressions+=expression)* (
+      WITH kind=ROLLUP
+    | WITH kind=CUBE
+    | kind=GROUPING SETS LEFT_PAREN groupingSet (COMMA groupingSet)* RIGHT_PAREN)?
+    ;
+
+groupByClause
+    : groupingAnalytics
+    | expression
+    ;
+
+groupingAnalytics
+    : (ROLLUP | CUBE) LEFT_PAREN groupingSet (COMMA groupingSet)* RIGHT_PAREN
+    | GROUPING SETS LEFT_PAREN groupingElement (COMMA groupingElement)* RIGHT_PAREN
+    ;
+
+groupingElement
+    : groupingAnalytics
+    | groupingSet
+    ;
+
+groupingSet
+    : LEFT_PAREN (expression (COMMA expression)*)? RIGHT_PAREN
+    | expression
+    ;
+
+pivotClause
+    : PIVOT LEFT_PAREN aggregates=namedExpressionSeq FOR pivotColumn IN LEFT_PAREN pivotValues+=pivotValue (COMMA pivotValues+=pivotValue)* RIGHT_PAREN RIGHT_PAREN
+    ;
+
+pivotColumn
+    : identifiers+=errorCapturingIdentifier
+    | LEFT_PAREN identifiers+=errorCapturingIdentifier (COMMA identifiers+=errorCapturingIdentifier)* RIGHT_PAREN
+    ;
+
+pivotValue
+    : expression (AS? errorCapturingIdentifier)?
+    ;
+
+unpivotClause
+    : UNPIVOT nullOperator=unpivotNullClause? LEFT_PAREN
+        operator=unpivotOperator
+      RIGHT_PAREN (AS? errorCapturingIdentifier)?
+    ;
+
+unpivotNullClause
+    : (INCLUDE | EXCLUDE) NULLS
+    ;
+
+unpivotOperator
+    : (unpivotSingleValueColumnClause | unpivotMultiValueColumnClause)
+    ;
+
+unpivotSingleValueColumnClause
+    : unpivotValueColumn FOR unpivotNameColumn IN LEFT_PAREN unpivotColumns+=unpivotColumnAndAlias (COMMA unpivotColumns+=unpivotColumnAndAlias)* RIGHT_PAREN
+    ;
+
+unpivotMultiValueColumnClause
+    : LEFT_PAREN unpivotValueColumns+=unpivotValueColumn (COMMA unpivotValueColumns+=unpivotValueColumn)* RIGHT_PAREN
+      FOR unpivotNameColumn
+      IN LEFT_PAREN unpivotColumnSets+=unpivotColumnSet (COMMA unpivotColumnSets+=unpivotColumnSet)* RIGHT_PAREN
+    ;
+
+unpivotColumnSet
+    : LEFT_PAREN unpivotColumns+=unpivotColumn (COMMA unpivotColumns+=unpivotColumn)* RIGHT_PAREN unpivotAlias?
+    ;
+
+unpivotValueColumn
+    : identifier
+    ;
+
+unpivotNameColumn
+    : identifier
+    ;
+
+unpivotColumnAndAlias
+    : unpivotColumn unpivotAlias?
+    ;
+
+unpivotColumn
+    : multipartIdentifier
+    ;
+
+unpivotAlias
+    : AS? errorCapturingIdentifier
+    ;
+
+lateralView
+    : LATERAL VIEW (OUTER)? qualifiedName LEFT_PAREN (expression (COMMA expression)*)? RIGHT_PAREN tblName=identifier (AS? colName+=identifier (COMMA colName+=identifier)*)?
+    ;
+
+setQuantifier
+    : DISTINCT
+    | ALL
+    ;
+
+relation
+    : LATERAL? relationPrimary relationExtension*
+    ;
+
+relationExtension
+    : joinRelation
+    | pivotClause
+    | unpivotClause
+    ;
+
+joinRelation
+    : (joinType) JOIN LATERAL? right=relationPrimary joinCriteria?
+    | NATURAL joinType JOIN LATERAL? right=relationPrimary
+    ;
+
+joinType
+    : INNER?
+    | CROSS
+    | LEFT OUTER?
+    | LEFT? SEMI
+    | RIGHT OUTER?
+    | FULL OUTER?
+    | LEFT? ANTI
+    ;
+
+joinCriteria
+    : ON booleanExpression
+    | USING identifierList
+    ;
+
+sample
+    : TABLESAMPLE LEFT_PAREN sampleMethod? RIGHT_PAREN (REPEATABLE LEFT_PAREN seed=INTEGER_VALUE RIGHT_PAREN)?
+    ;
+
+sampleMethod
+    : negativeSign=MINUS? percentage=(INTEGER_VALUE | DECIMAL_VALUE) PERCENTLIT   #sampleByPercentile
+    | expression ROWS                                                             #sampleByRows
+    | sampleType=BUCKET numerator=INTEGER_VALUE OUT OF denominator=INTEGER_VALUE
+        (ON (identifier | qualifiedName LEFT_PAREN RIGHT_PAREN))?                 #sampleByBucket
+    | bytes=expression                                                            #sampleByBytes
+    ;
+
+identifierList
+    : LEFT_PAREN identifierSeq RIGHT_PAREN
+    ;
+
+identifierSeq
+    : ident+=errorCapturingIdentifier (COMMA ident+=errorCapturingIdentifier)*
+    ;
+
+orderedIdentifierList
+    : LEFT_PAREN orderedIdentifier (COMMA orderedIdentifier)* RIGHT_PAREN
+    ;
+
+orderedIdentifier
+    : ident=errorCapturingIdentifier ordering=(ASC | DESC)?
+    ;
+
+identifierCommentList
+    : LEFT_PAREN identifierComment (COMMA identifierComment)* RIGHT_PAREN
+    ;
+
+identifierComment
+    : identifier commentSpec?
+    ;
+
+relationPrimary
+    : identifierReference temporalClause?
+      optionsClause? sample? tableAlias                     #tableName
+    | LEFT_PAREN query RIGHT_PAREN sample? tableAlias       #aliasedQuery
+    | LEFT_PAREN relation RIGHT_PAREN sample? tableAlias    #aliasedRelation
+    | inlineTable                                           #inlineTableDefault2
+    | functionTable                                         #tableValuedFunction
+    ;
+
+optionsClause
+    : WITH options=propertyList
+    ;
+
+inlineTable
+    : VALUES expression (COMMA expression)* tableAlias
+    ;
+
+functionTableSubqueryArgument
+    : TABLE identifierReference tableArgumentPartitioning?
+    | TABLE LEFT_PAREN identifierReference RIGHT_PAREN tableArgumentPartitioning?
+    | TABLE LEFT_PAREN query RIGHT_PAREN tableArgumentPartitioning?
+    ;
+
+tableArgumentPartitioning
+    : ((WITH SINGLE PARTITION)
+        | ((PARTITION | DISTRIBUTE) BY
+            (((LEFT_PAREN partition+=expression (COMMA partition+=expression)* RIGHT_PAREN))
+            | (expression (COMMA invalidMultiPartitionExpression=expression)+)
+            | partition+=expression)))
+      ((ORDER | SORT) BY
+        (((LEFT_PAREN sortItem (COMMA sortItem)* RIGHT_PAREN)
+        | (sortItem (COMMA invalidMultiSortItem=sortItem)+)
+        | sortItem)))?
+    ;
+
+functionTableNamedArgumentExpression
+    : key=identifier FAT_ARROW table=functionTableSubqueryArgument
+    ;
+
+functionTableReferenceArgument
+    : functionTableSubqueryArgument
+    | functionTableNamedArgumentExpression
+    ;
+
+functionTableArgument
+    : functionTableReferenceArgument
+    | functionArgument
+    ;
+
+functionTable
+    : funcName=functionName LEFT_PAREN
+      (functionTableArgument (COMMA functionTableArgument)*)?
+      RIGHT_PAREN tableAlias
+    ;
+
+tableAlias
+    : (AS? strictIdentifier identifierList?)?
+    ;
+
+rowFormat
+    : ROW FORMAT SERDE name=stringLit (WITH SERDEPROPERTIES props=propertyList)?       #rowFormatSerde
+    | ROW FORMAT DELIMITED
+      (FIELDS TERMINATED BY fieldsTerminatedBy=stringLit (ESCAPED BY escapedBy=stringLit)?)?
+      (COLLECTION ITEMS TERMINATED BY collectionItemsTerminatedBy=stringLit)?
+      (MAP KEYS TERMINATED BY keysTerminatedBy=stringLit)?
+      (LINES TERMINATED BY linesSeparatedBy=stringLit)?
+      (NULL DEFINED AS nullDefinedAs=stringLit)?                                       #rowFormatDelimited
+    ;
+
+multipartIdentifierList
+    : multipartIdentifier (COMMA multipartIdentifier)*
+    ;
+
+multipartIdentifier
+    : parts+=errorCapturingIdentifier (DOT parts+=errorCapturingIdentifier)*
+    ;
+
+multipartIdentifierPropertyList
+    : multipartIdentifierProperty (COMMA multipartIdentifierProperty)*
+    ;
+
+multipartIdentifierProperty
+    : multipartIdentifier (OPTIONS options=propertyList)?
+    ;
+
+tableIdentifier
+    : (db=errorCapturingIdentifier DOT)? table=errorCapturingIdentifier
+    ;
+
+functionIdentifier
+    : (db=errorCapturingIdentifier DOT)? function=errorCapturingIdentifier
+    ;
+
+namedExpression
+    : expression (AS? (name=errorCapturingIdentifier | identifierList))?
+    ;
+
+namedExpressionSeq
+    : namedExpression (COMMA namedExpression)*
+    ;
+
+partitionFieldList
+    : LEFT_PAREN fields+=partitionField (COMMA fields+=partitionField)* RIGHT_PAREN
+    ;
+
+partitionField
+    : transform  #partitionTransform
+    | colType    #partitionColumn
+    ;
+
+transform
+    : qualifiedName                                                                             #identityTransform
+    | transformName=identifier
+      LEFT_PAREN argument+=transformArgument (COMMA argument+=transformArgument)* RIGHT_PAREN   #applyTransform
+    ;
+
+transformArgument
+    : qualifiedName
+    | constant
+    ;
+
+expression
+    : booleanExpression
+    ;
+
+namedArgumentExpression
+    : key=identifier FAT_ARROW value=expression
+    ;
+
+functionArgument
+    : expression
+    | namedArgumentExpression
+    ;
+
+expressionSeq
+    : expression (COMMA expression)*
+    ;
+
+booleanExpression
+    : (NOT | BANG) booleanExpression                               #logicalNot
+    | EXISTS LEFT_PAREN query RIGHT_PAREN                          #exists
+    | valueExpression predicate?                                   #predicated
+    | left=booleanExpression operator=AND right=booleanExpression  #logicalBinary
+    | left=booleanExpression operator=OR right=booleanExpression   #logicalBinary
+    ;
+
+predicate
+    : errorCapturingNot? kind=BETWEEN lower=valueExpression AND upper=valueExpression
+    | errorCapturingNot? kind=IN LEFT_PAREN expression (COMMA expression)* RIGHT_PAREN
+    | errorCapturingNot? kind=IN LEFT_PAREN query RIGHT_PAREN
+    | errorCapturingNot? kind=RLIKE pattern=valueExpression
+    | errorCapturingNot? kind=(LIKE | ILIKE) quantifier=(ANY | SOME | ALL) (LEFT_PAREN RIGHT_PAREN | LEFT_PAREN expression (COMMA expression)* RIGHT_PAREN)
+    | errorCapturingNot? kind=(LIKE | ILIKE) pattern=valueExpression (ESCAPE escapeChar=stringLit)?
+    | IS errorCapturingNot? kind=NULL
+    | IS errorCapturingNot? kind=(TRUE | FALSE | UNKNOWN)
+    | IS errorCapturingNot? kind=DISTINCT FROM right=valueExpression
+    ;
+
+errorCapturingNot
+    : NOT
+    | BANG
+    ;
+
+valueExpression
+    : primaryExpression                                                                      #valueExpressionDefault
+    | operator=(MINUS | PLUS | TILDE) valueExpression                                        #arithmeticUnary
+    | left=valueExpression operator=(ASTERISK | SLASH | PERCENT | DIV) right=valueExpression #arithmeticBinary
+    | left=valueExpression operator=(PLUS | MINUS | CONCAT_PIPE) right=valueExpression       #arithmeticBinary
+    | left=valueExpression shiftOperator right=valueExpression                               #shiftExpression
+    | left=valueExpression operator=AMPERSAND right=valueExpression                          #arithmeticBinary
+    | left=valueExpression operator=HAT right=valueExpression                                #arithmeticBinary
+    | left=valueExpression operator=PIPE right=valueExpression                               #arithmeticBinary
+    | left=valueExpression comparisonOperator right=valueExpression                          #comparison
+    ;
+
+shiftOperator
+    : SHIFT_LEFT
+    | SHIFT_RIGHT
+    | SHIFT_RIGHT_UNSIGNED
+    ;
+
+datetimeUnit
+    : YEAR | QUARTER | MONTH
+    | WEEK | DAY | DAYOFYEAR
+    | HOUR | MINUTE | SECOND | MILLISECOND | MICROSECOND
+    ;
+
+primaryExpression
+    : name=(CURRENT_DATE | CURRENT_TIMESTAMP | CURRENT_USER | USER | SESSION_USER)             #currentLike
+    | name=(TIMESTAMPADD | DATEADD | DATE_ADD) LEFT_PAREN (unit=datetimeUnit | invalidUnit=stringLit) COMMA unitsAmount=valueExpression COMMA timestamp=valueExpression RIGHT_PAREN             #timestampadd
+    | name=(TIMESTAMPDIFF | DATEDIFF | DATE_DIFF | TIMEDIFF) LEFT_PAREN (unit=datetimeUnit | invalidUnit=stringLit) COMMA startTimestamp=valueExpression COMMA endTimestamp=valueExpression RIGHT_PAREN    #timestampdiff
+    | CASE whenClause+ (ELSE elseExpression=expression)? END                                   #searchedCase
+    | CASE value=expression whenClause+ (ELSE elseExpression=expression)? END                  #simpleCase
+    | name=(CAST | TRY_CAST) LEFT_PAREN expression AS dataType RIGHT_PAREN                     #cast
+    | primaryExpression collateClause                                                      #collate
+    | primaryExpression DOUBLE_COLON dataType                                                  #castByColon
+    | STRUCT LEFT_PAREN (argument+=namedExpression (COMMA argument+=namedExpression)*)? RIGHT_PAREN #struct
+    | FIRST LEFT_PAREN expression (IGNORE NULLS)? RIGHT_PAREN                                  #first
+    | ANY_VALUE LEFT_PAREN expression (IGNORE NULLS)? RIGHT_PAREN                              #any_value
+    | LAST LEFT_PAREN expression (IGNORE NULLS)? RIGHT_PAREN                                   #last
+    | POSITION LEFT_PAREN substr=valueExpression IN str=valueExpression RIGHT_PAREN            #position
+    | constant                                                                                 #constantDefault
+    | ASTERISK exceptClause?                                                                   #star
+    | qualifiedName DOT ASTERISK exceptClause?                                                 #star
+    | LEFT_PAREN namedExpression (COMMA namedExpression)+ RIGHT_PAREN                          #rowConstructor
+    | LEFT_PAREN query RIGHT_PAREN                                                             #subqueryExpression
+    | functionName LEFT_PAREN (setQuantifier? argument+=functionArgument
+       (COMMA argument+=functionArgument)*)? RIGHT_PAREN
+       (WITHIN GROUP LEFT_PAREN ORDER BY sortItem (COMMA sortItem)* RIGHT_PAREN)?
+       (FILTER LEFT_PAREN WHERE where=booleanExpression RIGHT_PAREN)?
+       (nullsOption=(IGNORE | RESPECT) NULLS)? ( OVER windowSpec)?                             #functionCall
+    | identifier ARROW expression                                                              #lambda
+    | LEFT_PAREN identifier (COMMA identifier)+ RIGHT_PAREN ARROW expression                   #lambda
+    | value=primaryExpression LEFT_BRACKET index=valueExpression RIGHT_BRACKET                 #subscript
+    | identifier                                                                               #columnReference
+    | base=primaryExpression DOT fieldName=identifier                                          #dereference
+    | LEFT_PAREN expression RIGHT_PAREN                                                        #parenthesizedExpression
+    | EXTRACT LEFT_PAREN field=identifier FROM source=valueExpression RIGHT_PAREN              #extract
+    | (SUBSTR | SUBSTRING) LEFT_PAREN str=valueExpression (FROM | COMMA) pos=valueExpression
+      ((FOR | COMMA) len=valueExpression)? RIGHT_PAREN                                         #substring
+    | TRIM LEFT_PAREN trimOption=(BOTH | LEADING | TRAILING)? (trimStr=valueExpression)?
+       FROM srcStr=valueExpression RIGHT_PAREN                                                 #trim
+    | OVERLAY LEFT_PAREN input=valueExpression PLACING replace=valueExpression
+      FROM position=valueExpression (FOR length=valueExpression)? RIGHT_PAREN                  #overlay
+    ;
+
+literalType
+    : DATE
+    | TIMESTAMP | TIMESTAMP_LTZ | TIMESTAMP_NTZ
+    | INTERVAL
+    | BINARY_HEX
+    | unsupportedType=identifier
+    ;
+
+constant
+    : NULL                                                                                     #nullLiteral
+    | QUESTION                                                                                 #posParameterLiteral
+    | COLON identifier                                                                         #namedParameterLiteral
+    | interval                                                                                 #intervalLiteral
+    | literalType stringLit                                                                    #typeConstructor
+    | number                                                                                   #numericLiteral
+    | booleanValue                                                                             #booleanLiteral
+    | stringLit+                                                                               #stringLiteral
+    ;
+
+comparisonOperator
+    : EQ | NEQ | NEQJ | LT | LTE | GT | GTE | NSEQ
+    ;
+
+arithmeticOperator
+    : PLUS | MINUS | ASTERISK | SLASH | PERCENT | DIV | TILDE | AMPERSAND | PIPE | CONCAT_PIPE | HAT
+    ;
+
+predicateOperator
+    : OR | AND | IN | NOT
+    ;
+
+booleanValue
+    : TRUE | FALSE
+    ;
+
+interval
+    : INTERVAL (errorCapturingMultiUnitsInterval | errorCapturingUnitToUnitInterval)
+    ;
+
+errorCapturingMultiUnitsInterval
+    : body=multiUnitsInterval unitToUnitInterval?
+    ;
+
+multiUnitsInterval
+    : (intervalValue unit+=unitInMultiUnits)+
+    ;
+
+errorCapturingUnitToUnitInterval
+    : body=unitToUnitInterval (error1=multiUnitsInterval | error2=unitToUnitInterval)?
+    ;
+
+unitToUnitInterval
+    : value=intervalValue from=unitInUnitToUnit TO to=unitInUnitToUnit
+    ;
+
+intervalValue
+    : (PLUS | MINUS)?
+      (INTEGER_VALUE | DECIMAL_VALUE | stringLit)
+    ;
+
+unitInMultiUnits
+    : NANOSECOND | NANOSECONDS | MICROSECOND | MICROSECONDS | MILLISECOND | MILLISECONDS
+    | SECOND | SECONDS | MINUTE | MINUTES | HOUR | HOURS | DAY | DAYS | WEEK | WEEKS
+    | MONTH | MONTHS | YEAR | YEARS
+    ;
+
+unitInUnitToUnit
+    : SECOND | MINUTE | HOUR | DAY | MONTH | YEAR
+    ;
+
+colPosition
+    : position=FIRST | position=AFTER afterCol=errorCapturingIdentifier
+    ;
+
+collateClause
+    : COLLATE collationName=identifier
+    ;
+
+type
+    : BOOLEAN
+    | TINYINT | BYTE
+    | SMALLINT | SHORT
+    | INT | INTEGER
+    | BIGINT | LONG
+    | FLOAT | REAL
+    | DOUBLE
+    | DATE
+    | TIMESTAMP | TIMESTAMP_NTZ | TIMESTAMP_LTZ
+    | STRING collateClause?
+    | CHARACTER | CHAR
+    | VARCHAR
+    | BINARY
+    | DECIMAL | DEC | NUMERIC
+    | VOID
+    | INTERVAL
+    | VARIANT
+    | ARRAY | STRUCT | MAP
+    | unsupportedType=identifier
+    ;
+
+dataType
+    : complex=ARRAY LT dataType GT                              #complexDataType
+    | complex=MAP LT dataType COMMA dataType GT                 #complexDataType
+    | complex=STRUCT (LT complexColTypeList? GT | NEQ)          #complexDataType
+    | INTERVAL from=(YEAR | MONTH) (TO to=MONTH)?               #yearMonthIntervalDataType
+    | INTERVAL from=(DAY | HOUR | MINUTE | SECOND)
+      (TO to=(HOUR | MINUTE | SECOND))?                         #dayTimeIntervalDataType
+    | type (LEFT_PAREN INTEGER_VALUE
+      (COMMA INTEGER_VALUE)* RIGHT_PAREN)?                      #primitiveDataType
+    ;
+
+qualifiedColTypeWithPositionList
+    : qualifiedColTypeWithPosition (COMMA qualifiedColTypeWithPosition)*
+    ;
+
+qualifiedColTypeWithPosition
+    : name=multipartIdentifier dataType colDefinitionDescriptorWithPosition*
+    ;
+
+colDefinitionDescriptorWithPosition
+    : errorCapturingNot NULL
+    | defaultExpression
+    | commentSpec
+    | colPosition
+    ;
+
+defaultExpression
+    : DEFAULT expression
+    ;
+
+variableDefaultExpression
+    : (DEFAULT | EQ) expression
+    ;
+
+colTypeList
+    : colType (COMMA colType)*
+    ;
+
+colType
+    : colName=errorCapturingIdentifier dataType (errorCapturingNot NULL)? commentSpec?
+    ;
+
+colDefinitionList
+    : colDefinition (COMMA colDefinition)*
+    ;
+
+colDefinition
+    : colName=errorCapturingIdentifier dataType colDefinitionOption*
+    ;
+
+colDefinitionOption
+    : errorCapturingNot NULL
+    | defaultExpression
+    | generationExpression
+    | commentSpec
+    ;
+
+generationExpression
+    : GENERATED ALWAYS AS LEFT_PAREN expression RIGHT_PAREN
+    ;
+
+complexColTypeList
+    : complexColType (COMMA complexColType)*
+    ;
+
+complexColType
+    : errorCapturingIdentifier COLON? dataType (errorCapturingNot NULL)? commentSpec?
+    ;
+
+routineCharacteristics
+    : (routineLanguage
+    | specificName
+    | deterministic
+    | sqlDataAccess
+    | nullCall
+    | commentSpec
+    | rightsClause)*
+    ;
+
+routineLanguage
+    : LANGUAGE (SQL | IDENTIFIER)
+    ;
+
+specificName
+    : SPECIFIC specific=errorCapturingIdentifier
+    ;
+
+deterministic
+    : DETERMINISTIC
+    | errorCapturingNot DETERMINISTIC
+    ;
+
+sqlDataAccess
+    : access=NO SQL
+    | access=CONTAINS SQL
+    | access=READS SQL DATA
+    | access=MODIFIES SQL DATA
+    ;
+
+nullCall
+    : RETURNS NULL ON NULL INPUT
+    | CALLED ON NULL INPUT
+    ;
+
+rightsClause
+    : SQL SECURITY INVOKER
+    | SQL SECURITY DEFINER
+   ;
+
+whenClause
+    : WHEN condition=expression THEN result=expression
+    ;
+
+windowClause
+    : WINDOW namedWindow (COMMA namedWindow)*
+    ;
+
+namedWindow
+    : name=errorCapturingIdentifier AS windowSpec
+    ;
+
+windowSpec
+    : name=errorCapturingIdentifier                         #windowRef
+    | LEFT_PAREN name=errorCapturingIdentifier RIGHT_PAREN  #windowRef
+    | LEFT_PAREN
+      ( CLUSTER BY partition+=expression (COMMA partition+=expression)*
+      | ((PARTITION | DISTRIBUTE) BY partition+=expression (COMMA partition+=expression)*)?
+        ((ORDER | SORT) BY sortItem (COMMA sortItem)*)?)
+      windowFrame?
+      RIGHT_PAREN                                           #windowDef
+    ;
+
+windowFrame
+    : frameType=RANGE start=frameBound
+    | frameType=ROWS start=frameBound
+    | frameType=RANGE BETWEEN start=frameBound AND end=frameBound
+    | frameType=ROWS BETWEEN start=frameBound AND end=frameBound
+    ;
+
+frameBound
+    : UNBOUNDED boundType=(PRECEDING | FOLLOWING)
+    | boundType=CURRENT ROW
+    | expression boundType=(PRECEDING | FOLLOWING)
+    ;
+
+qualifiedNameList
+    : qualifiedName (COMMA qualifiedName)*
+    ;
+
+functionName
+    : IDENTIFIER_KW LEFT_PAREN expression RIGHT_PAREN
+    | identFunc=IDENTIFIER_KW   // IDENTIFIER itself is also a valid function name.
+    | qualifiedName
+    | FILTER
+    | LEFT
+    | RIGHT
+    ;
+
+qualifiedName
+    : identifier (DOT identifier)*
+    ;
+
+// this rule is used for explicitly capturing wrong identifiers such as test-table, which should actually be `test-table`
+// replace identifier with errorCapturingIdentifier where the immediate follow symbol is not an expression, otherwise
+// valid expressions such as "a-b" can be recognized as an identifier
+errorCapturingIdentifier
+    : identifier errorCapturingIdentifierExtra
+    ;
+
+// extra left-factoring grammar
+errorCapturingIdentifierExtra
+    : (MINUS identifier)+    #errorIdent
+    |                        #realIdent
+    ;
+
+identifier
+    : strictIdentifier
+    | {!SQL_standard_keyword_behavior}? strictNonReserved
+    ;
+
+strictIdentifier
+    : IDENTIFIER              #unquotedIdentifier
+    | quotedIdentifier        #quotedIdentifierAlternative
+    | {SQL_standard_keyword_behavior}? ansiNonReserved #unquotedIdentifier
+    | {!SQL_standard_keyword_behavior}? nonReserved    #unquotedIdentifier
+    ;
+
+quotedIdentifier
+    : BACKQUOTED_IDENTIFIER
+    | {double_quoted_identifiers}? DOUBLEQUOTED_STRING
+    ;
+
+backQuotedIdentifier
+    : BACKQUOTED_IDENTIFIER
+    ;
+
+number
+    : {!legacy_exponent_literal_as_decimal_enabled}? MINUS? EXPONENT_VALUE #exponentLiteral
+    | {!legacy_exponent_literal_as_decimal_enabled}? MINUS? DECIMAL_VALUE  #decimalLiteral
+    | {legacy_exponent_literal_as_decimal_enabled}? MINUS? (EXPONENT_VALUE | DECIMAL_VALUE) #legacyDecimalLiteral
+    | MINUS? INTEGER_VALUE            #integerLiteral
+    | MINUS? BIGINT_LITERAL           #bigIntLiteral
+    | MINUS? SMALLINT_LITERAL         #smallIntLiteral
+    | MINUS? TINYINT_LITERAL          #tinyIntLiteral
+    | MINUS? DOUBLE_LITERAL           #doubleLiteral
+    | MINUS? FLOAT_LITERAL            #floatLiteral
+    | MINUS? BIGDECIMAL_LITERAL       #bigDecimalLiteral
+    ;
+
+alterColumnAction
+    : TYPE dataType
+    | commentSpec
+    | colPosition
+    | setOrDrop=(SET | DROP) errorCapturingNot NULL
+    | SET defaultExpression
+    | dropDefault=DROP DEFAULT
+    ;
+
+stringLit
+    : STRING_LITERAL
+    | {!double_quoted_identifiers}? DOUBLEQUOTED_STRING
+    ;
+
+comment
+    : stringLit
+    | NULL
+    ;
+
+version
+    : INTEGER_VALUE
+    | stringLit
+    ;
+
+// When `SQL_standard_keyword_behavior=true`, there are 2 kinds of keywords in Spark SQL.
+// - Reserved keywords:
+//     Keywords that are reserved and can't be used as identifiers for table, view, column,
+//     function, alias, etc.
+// - Non-reserved keywords:
+//     Keywords that have a special meaning only in particular contexts and can be used as
+//     identifiers in other contexts. For example, `EXPLAIN SELECT ...` is a command, but EXPLAIN
+//     can be used as identifiers in other places.
+// You can find the full keywords list by searching "Start of the keywords list" in this file.
+// The non-reserved keywords are listed below. Keywords not in this list are reserved keywords.
+ansiNonReserved
+//--ANSI-NON-RESERVED-START
+    : ADD
+    | AFTER
+    | ALTER
+    | ALWAYS
+    | ANALYZE
+    | ANTI
+    | ANY_VALUE
+    | ARCHIVE
+    | ARRAY
+    | ASC
+    | AT
+    | BEGIN
+    | BETWEEN
+    | BIGINT
+    | BINARY
+    | BINARY_HEX
+    | BINDING
+    | BOOLEAN
+    | BUCKET
+    | BUCKETS
+    | BY
+    | BYTE
+    | CACHE
+    | CALLED
+    | CASCADE
+    | CATALOG
+    | CATALOGS
+    | CHANGE
+    | CHAR
+    | CHARACTER
+    | CLEAR
+    | CLUSTER
+    | CLUSTERED
+    | CODEGEN
+    | COLLECTION
+    | COLUMNS
+    | COMMENT
+    | COMMIT
+    | COMPACT
+    | COMPACTIONS
+    | COMPENSATION
+    | COMPUTE
+    | CONCATENATE
+    | CONTAINS
+    | COST
+    | CUBE
+    | CURRENT
+    | DATA
+    | DATABASE
+    | DATABASES
+    | DATE
+    | DATEADD
+    | DATE_ADD
+    | DATEDIFF
+    | DATE_DIFF
+    | DAY
+    | DAYS
+    | DAYOFYEAR
+    | DBPROPERTIES
+    | DEC
+    | DECIMAL
+    | DECLARE
+    | DEFAULT
+    | DEFINED
+    | DEFINER
+    | DELETE
+    | DELIMITED
+    | DESC
+    | DESCRIBE
+    | DETERMINISTIC
+    | DFS
+    | DIRECTORIES
+    | DIRECTORY
+    | DISTRIBUTE
+    | DIV
+    | DOUBLE
+    | DROP
+    | ESCAPED
+    | EVOLUTION
+    | EXCHANGE
+    | EXCLUDE
+    | EXISTS
+    | EXPLAIN
+    | EXPORT
+    | EXTENDED
+    | EXTERNAL
+    | EXTRACT
+    | FIELDS
+    | FILEFORMAT
+    | FIRST
+    | FLOAT
+    | FOLLOWING
+    | FORMAT
+    | FORMATTED
+    | FUNCTION
+    | FUNCTIONS
+    | GENERATED
+    | GLOBAL
+    | GROUPING
+    | HOUR
+    | HOURS
+    | IDENTIFIER_KW
+    | IF
+    | IGNORE
+    | IMMEDIATE
+    | IMPORT
+    | INCLUDE
+    | INDEX
+    | INDEXES
+    | INPATH
+    | INPUT
+    | INPUTFORMAT
+    | INSERT
+    | INT
+    | INTEGER
+    | INTERVAL
+    | INVOKER
+    | ITEMS
+    | KEYS
+    | LANGUAGE
+    | LAST
+    | LAZY
+    | LIKE
+    | ILIKE
+    | LIMIT
+    | LINES
+    | LIST
+    | LOAD
+    | LOCAL
+    | LOCATION
+    | LOCK
+    | LOCKS
+    | LOGICAL
+    | LONG
+    | MACRO
+    | MAP
+    | MATCHED
+    | MERGE
+    | MICROSECOND
+    | MICROSECONDS
+    | MILLISECOND
+    | MILLISECONDS
+    | MINUTE
+    | MINUTES
+    | MODIFIES
+    | MONTH
+    | MONTHS
+    | MSCK
+    | NAME
+    | NAMESPACE
+    | NAMESPACES
+    | NANOSECOND
+    | NANOSECONDS
+    | NO
+    | NONE
+    | NULLS
+    | NUMERIC
+    | OF
+    | OPTION
+    | OPTIONS
+    | OUT
+    | OUTPUTFORMAT
+    | OVER
+    | OVERLAY
+    | OVERWRITE
+    | PARTITION
+    | PARTITIONED
+    | PARTITIONS
+    | PERCENTLIT
+    | PIVOT
+    | PLACING
+    | POSITION
+    | PRECEDING
+    | PRINCIPALS
+    | PROPERTIES
+    | PURGE
+    | QUARTER
+    | QUERY
+    | RANGE
+    | READS
+    | REAL
+    | RECORDREADER
+    | RECORDWRITER
+    | RECOVER
+    | REDUCE
+    | REFRESH
+    | RENAME
+    | REPAIR
+    | REPEATABLE
+    | REPLACE
+    | RESET
+    | RESPECT
+    | RESTRICT
+    | RETURN
+    | RETURNS
+    | REVOKE
+    | RLIKE
+    | ROLE
+    | ROLES
+    | ROLLBACK
+    | ROLLUP
+    | ROW
+    | ROWS
+    | SCHEMA
+    | SCHEMAS
+    | SECOND
+    | SECONDS
+    | SECURITY
+    | SEMI
+    | SEPARATED
+    | SERDE
+    | SERDEPROPERTIES
+    | SET
+    | SETMINUS
+    | SETS
+    | SHORT
+    | SHOW
+    | SINGLE
+    | SKEWED
+    | SMALLINT
+    | SORT
+    | SORTED
+    | SOURCE
+    | SPECIFIC
+    | START
+    | STATISTICS
+    | STORED
+    | STRATIFY
+    | STRING
+    | STRUCT
+    | SUBSTR
+    | SUBSTRING
+    | SYNC
+    | SYSTEM_TIME
+    | SYSTEM_VERSION
+    | TABLES
+    | TABLESAMPLE
+    | TARGET
+    | TBLPROPERTIES
+    | TEMPORARY
+    | TERMINATED
+    | TIMEDIFF
+    | TIMESTAMP
+    | TIMESTAMP_LTZ
+    | TIMESTAMP_NTZ
+    | TIMESTAMPADD
+    | TIMESTAMPDIFF
+    | TINYINT
+    | TOUCH
+    | TRANSACTION
+    | TRANSACTIONS
+    | TRANSFORM
+    | TRIM
+    | TRUE
+    | TRUNCATE
+    | TRY_CAST
+    | TYPE
+    | UNARCHIVE
+    | UNBOUNDED
+    | UNCACHE
+    | UNLOCK
+    | UNPIVOT
+    | UNSET
+    | UPDATE
+    | USE
+    | VALUES
+    | VARCHAR
+    | VAR
+    | VARIABLE
+    | VARIANT
+    | VERSION
+    | VIEW
+    | VIEWS
+    | VOID
+    | WEEK
+    | WEEKS
+    | WINDOW
+    | YEAR
+    | YEARS
+    | ZONE
+//--ANSI-NON-RESERVED-END
+    ;
+
+// When `SQL_standard_keyword_behavior=false`, there are 2 kinds of keywords in Spark SQL.
+// - Non-reserved keywords:
+//     Same definition as the one when `SQL_standard_keyword_behavior=true`.
+// - Strict-non-reserved keywords:
+//     A strict version of non-reserved keywords, which can not be used as table alias.
+// You can find the full keywords list by searching "Start of the keywords list" in this file.
+// The strict-non-reserved keywords are listed in `strictNonReserved`.
+// The non-reserved keywords are listed in `nonReserved`.
+// These 2 together contain all the keywords.
+strictNonReserved
+    : ANTI
+    | CROSS
+    | EXCEPT
+    | FULL
+    | INNER
+    | INTERSECT
+    | JOIN
+    | LATERAL
+    | LEFT
+    | NATURAL
+    | ON
+    | RIGHT
+    | SEMI
+    | SETMINUS
+    | UNION
+    | USING
+    ;
+
+nonReserved
+//--DEFAULT-NON-RESERVED-START
+    : ADD
+    | AFTER
+    | ALL
+    | ALTER
+    | ALWAYS
+    | ANALYZE
+    | AND
+    | ANY
+    | ANY_VALUE
+    | ARCHIVE
+    | ARRAY
+    | AS
+    | ASC
+    | AT
+    | AUTHORIZATION
+    | BEGIN
+    | BETWEEN
+    | BIGINT
+    | BINARY
+    | BINARY_HEX
+    | BINDING
+    | BOOLEAN
+    | BOTH
+    | BUCKET
+    | BUCKETS
+    | BY
+    | BYTE
+    | CACHE
+    | CALLED
+    | CASCADE
+    | CASE
+    | CAST
+    | CATALOG
+    | CATALOGS
+    | CHANGE
+    | CHAR
+    | CHARACTER
+    | CHECK
+    | CLEAR
+    | CLUSTER
+    | CLUSTERED
+    | CODEGEN
+    | COLLATE
+    | COLLATION
+    | COLLECTION
+    | COLUMN
+    | COLUMNS
+    | COMMENT
+    | COMMIT
+    | COMPACT
+    | COMPACTIONS
+    | COMPENSATION
+    | COMPUTE
+    | CONCATENATE
+    | CONSTRAINT
+    | CONTAINS
+    | COST
+    | CREATE
+    | CUBE
+    | CURRENT
+    | CURRENT_DATE
+    | CURRENT_TIME
+    | CURRENT_TIMESTAMP
+    | CURRENT_USER
+    | DATA
+    | DATABASE
+    | DATABASES
+    | DATE
+    | DATEADD
+    | DATE_ADD
+    | DATEDIFF
+    | DATE_DIFF
+    | DAY
+    | DAYS
+    | DAYOFYEAR
+    | DBPROPERTIES
+    | DEC
+    | DECIMAL
+    | DECLARE
+    | DEFAULT
+    | DEFINED
+    | DEFINER
+    | DELETE
+    | DELIMITED
+    | DESC
+    | DESCRIBE
+    | DETERMINISTIC
+    | DFS
+    | DIRECTORIES
+    | DIRECTORY
+    | DISTINCT
+    | DISTRIBUTE
+    | DIV
+    | DOUBLE
+    | DROP
+    | ELSE
+    | END
+    | ESCAPE
+    | ESCAPED
+    | EVOLUTION
+    | EXCHANGE
+    | EXCLUDE
+    | EXECUTE
+    | EXISTS
+    | EXPLAIN
+    | EXPORT
+    | EXTENDED
+    | EXTERNAL
+    | EXTRACT
+    | FALSE
+    | FETCH
+    | FILTER
+    | FIELDS
+    | FILEFORMAT
+    | FIRST
+    | FLOAT
+    | FOLLOWING
+    | FOR
+    | FOREIGN
+    | FORMAT
+    | FORMATTED
+    | FROM
+    | FUNCTION
+    | FUNCTIONS
+    | GENERATED
+    | GLOBAL
+    | GRANT
+    | GROUP
+    | GROUPING
+    | HAVING
+    | HOUR
+    | HOURS
+    | IDENTIFIER_KW
+    | IF
+    | IGNORE
+    | IMMEDIATE
+    | IMPORT
+    | IN
+    | INCLUDE
+    | INDEX
+    | INDEXES
+    | INPATH
+    | INPUT
+    | INPUTFORMAT
+    | INSERT
+    | INT
+    | INTEGER
+    | INTERVAL
+    | INTO
+    | INVOKER
+    | IS
+    | ITEMS
+    | KEYS
+    | LANGUAGE
+    | LAST
+    | LAZY
+    | LEADING
+    | LIKE
+    | LONG
+    | ILIKE
+    | LIMIT
+    | LINES
+    | LIST
+    | LOAD
+    | LOCAL
+    | LOCATION
+    | LOCK
+    | LOCKS
+    | LOGICAL
+    | LONG
+    | MACRO
+    | MAP
+    | MATCHED
+    | MERGE
+    | MICROSECOND
+    | MICROSECONDS
+    | MILLISECOND
+    | MILLISECONDS
+    | MINUTE
+    | MINUTES
+    | MODIFIES
+    | MONTH
+    | MONTHS
+    | MSCK
+    | NAME
+    | NAMESPACE
+    | NAMESPACES
+    | NANOSECOND
+    | NANOSECONDS
+    | NO
+    | NONE
+    | NOT
+    | NULL
+    | NULLS
+    | NUMERIC
+    | OF
+    | OFFSET
+    | ONLY
+    | OPTION
+    | OPTIONS
+    | OR
+    | ORDER
+    | OUT
+    | OUTER
+    | OUTPUTFORMAT
+    | OVER
+    | OVERLAPS
+    | OVERLAY
+    | OVERWRITE
+    | PARTITION
+    | PARTITIONED
+    | PARTITIONS
+    | PERCENTLIT
+    | PIVOT
+    | PLACING
+    | POSITION
+    | PRECEDING
+    | PRIMARY
+    | PRINCIPALS
+    | PROPERTIES
+    | PURGE
+    | QUARTER
+    | QUERY
+    | RANGE
+    | READS
+    | REAL
+    | RECORDREADER
+    | RECORDWRITER
+    | RECOVER
+    | REDUCE
+    | REFERENCES
+    | REFRESH
+    | RENAME
+    | REPAIR
+    | REPEATABLE
+    | REPLACE
+    | RESET
+    | RESPECT
+    | RESTRICT
+    | RETURN
+    | RETURNS
+    | REVOKE
+    | RLIKE
+    | ROLE
+    | ROLES
+    | ROLLBACK
+    | ROLLUP
+    | ROW
+    | ROWS
+    | SCHEMA
+    | SCHEMAS
+    | SECOND
+    | SECONDS
+    | SECURITY
+    | SELECT
+    | SEPARATED
+    | SERDE
+    | SERDEPROPERTIES
+    | SESSION_USER
+    | SET
+    | SETS
+    | SHORT
+    | SHOW
+    | SINGLE
+    | SKEWED
+    | SMALLINT
+    | SOME
+    | SORT
+    | SORTED
+    | SOURCE
+    | SPECIFIC
+    | SQL
+    | START
+    | STATISTICS
+    | STORED
+    | STRATIFY
+    | STRING
+    | STRUCT
+    | SUBSTR
+    | SUBSTRING
+    | SYNC
+    | SYSTEM_TIME
+    | SYSTEM_VERSION
+    | TABLE
+    | TABLES
+    | TABLESAMPLE
+    | TARGET
+    | TBLPROPERTIES
+    | TEMPORARY
+    | TERMINATED
+    | THEN
+    | TIME
+    | TIMEDIFF
+    | TIMESTAMP
+    | TIMESTAMP_LTZ
+    | TIMESTAMP_NTZ
+    | TIMESTAMPADD
+    | TIMESTAMPDIFF
+    | TINYINT
+    | TO
+    | TOUCH
+    | TRAILING
+    | TRANSACTION
+    | TRANSACTIONS
+    | TRANSFORM
+    | TRIM
+    | TRUE
+    | TRUNCATE
+    | TRY_CAST
+    | TYPE
+    | UNARCHIVE
+    | UNBOUNDED
+    | UNCACHE
+    | UNIQUE
+    | UNKNOWN
+    | UNLOCK
+    | UNPIVOT
+    | UNSET
+    | UPDATE
+    | USE
+    | USER
+    | VALUES
+    | VARCHAR
+    | VAR
+    | VARIABLE
+    | VARIANT
+    | VERSION
+    | VIEW
+    | VIEWS
+    | VOID
+    | WEEK
+    | WEEKS
+    | WHEN
+    | WHERE
+    | WINDOW
+    | WITH
+    | WITHIN
+    | YEAR
+    | YEARS
+    | ZONE
+//--DEFAULT-NON-RESERVED-END
+    ;
diff --git a/async-query-core/src/main/java/org/opensearch/sql/asyncquery/Dummy.java b/async-query-core/src/main/java/org/opensearch/sql/asyncquery/Dummy.java
new file mode 100644
index 0000000000..b7ab572f2a
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/asyncquery/Dummy.java
@@ -0,0 +1,13 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.asyncquery;
+
+// This is a dummy class for scaffolding and should be deleted later
+public class Dummy {
+  public String hello() {
+    return "Hello!";
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorService.java b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorService.java
new file mode 100644
index 0000000000..d38c8554ae
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorService.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.asyncquery;
+
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryExecutionResponse;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext;
+import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest;
+import org.opensearch.sql.spark.rest.model.CreateAsyncQueryResponse;
+
+/**
+ * AsyncQueryExecutorService exposes functionality to create, get results and cancel an async query.
+ */
+public interface AsyncQueryExecutorService {
+
+  /**
+   * Creates async query job based on the request and returns queryId in the response.
+   *
+   * @param createAsyncQueryRequest createAsyncQueryRequest.
+   * @return {@link CreateAsyncQueryResponse}
+   */
+  CreateAsyncQueryResponse createAsyncQuery(
+      CreateAsyncQueryRequest createAsyncQueryRequest,
+      AsyncQueryRequestContext asyncQueryRequestContext);
+
+  /**
+   * Returns async query response for a given queryId.
+   *
+   * @param queryId queryId.
+   * @return {@link AsyncQueryExecutionResponse}
+   */
+  AsyncQueryExecutionResponse getAsyncQueryResults(String queryId);
+
+  /**
+   * Cancels running async query and returns the cancelled queryId.
+   *
+   * @param queryId queryId.
+   * @return {@link String} cancelledQueryId.
+   */
+  String cancelQuery(String queryId);
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImpl.java b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImpl.java
new file mode 100644
index 0000000000..6d3d5b6765
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImpl.java
@@ -0,0 +1,117 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.asyncquery;
+
+import static org.opensearch.sql.spark.data.constants.SparkConstants.ERROR_FIELD;
+import static org.opensearch.sql.spark.data.constants.SparkConstants.STATUS_FIELD;
+
+import com.amazonaws.services.emrserverless.model.JobRunState;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+import lombok.AllArgsConstructor;
+import org.json.JSONObject;
+import org.opensearch.sql.data.model.ExprValue;
+import org.opensearch.sql.spark.asyncquery.exceptions.AsyncQueryNotFoundException;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryExecutionResponse;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext;
+import org.opensearch.sql.spark.config.SparkExecutionEngineConfig;
+import org.opensearch.sql.spark.config.SparkExecutionEngineConfigSupplier;
+import org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse;
+import org.opensearch.sql.spark.functions.response.DefaultSparkSqlFunctionResponseHandle;
+import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest;
+import org.opensearch.sql.spark.rest.model.CreateAsyncQueryResponse;
+
+/** AsyncQueryExecutorService implementation of {@link AsyncQueryExecutorService}. */
+@AllArgsConstructor
+public class AsyncQueryExecutorServiceImpl implements AsyncQueryExecutorService {
+  private AsyncQueryJobMetadataStorageService asyncQueryJobMetadataStorageService;
+  private SparkQueryDispatcher sparkQueryDispatcher;
+  private SparkExecutionEngineConfigSupplier sparkExecutionEngineConfigSupplier;
+
+  @Override
+  public CreateAsyncQueryResponse createAsyncQuery(
+      CreateAsyncQueryRequest createAsyncQueryRequest,
+      AsyncQueryRequestContext asyncQueryRequestContext) {
+    SparkExecutionEngineConfig sparkExecutionEngineConfig =
+        sparkExecutionEngineConfigSupplier.getSparkExecutionEngineConfig(asyncQueryRequestContext);
+    DispatchQueryResponse dispatchQueryResponse =
+        sparkQueryDispatcher.dispatch(
+            DispatchQueryRequest.builder()
+                .accountId(sparkExecutionEngineConfig.getAccountId())
+                .applicationId(sparkExecutionEngineConfig.getApplicationId())
+                .query(createAsyncQueryRequest.getQuery())
+                .datasource(createAsyncQueryRequest.getDatasource())
+                .langType(createAsyncQueryRequest.getLang())
+                .executionRoleARN(sparkExecutionEngineConfig.getExecutionRoleARN())
+                .clusterName(sparkExecutionEngineConfig.getClusterName())
+                .sparkSubmitParameterModifier(
+                    sparkExecutionEngineConfig.getSparkSubmitParameterModifier())
+                .sessionId(createAsyncQueryRequest.getSessionId())
+                .build(),
+            asyncQueryRequestContext);
+    asyncQueryJobMetadataStorageService.storeJobMetadata(
+        AsyncQueryJobMetadata.builder()
+            .queryId(dispatchQueryResponse.getQueryId())
+            .accountId(sparkExecutionEngineConfig.getAccountId())
+            .applicationId(sparkExecutionEngineConfig.getApplicationId())
+            .jobId(dispatchQueryResponse.getJobId())
+            .resultIndex(dispatchQueryResponse.getResultIndex())
+            .sessionId(dispatchQueryResponse.getSessionId())
+            .datasourceName(dispatchQueryResponse.getDatasourceName())
+            .jobType(dispatchQueryResponse.getJobType())
+            .indexName(dispatchQueryResponse.getIndexName())
+            .build(),
+        asyncQueryRequestContext);
+    return new CreateAsyncQueryResponse(
+        dispatchQueryResponse.getQueryId(), dispatchQueryResponse.getSessionId());
+  }
+
+  @Override
+  public AsyncQueryExecutionResponse getAsyncQueryResults(String queryId) {
+    Optional jobMetadata =
+        asyncQueryJobMetadataStorageService.getJobMetadata(queryId);
+    if (jobMetadata.isPresent()) {
+      String sessionId = jobMetadata.get().getSessionId();
+      JSONObject jsonObject = sparkQueryDispatcher.getQueryResponse(jobMetadata.get());
+      if (JobRunState.SUCCESS.toString().equals(jsonObject.getString(STATUS_FIELD))) {
+        DefaultSparkSqlFunctionResponseHandle sparkSqlFunctionResponseHandle =
+            new DefaultSparkSqlFunctionResponseHandle(jsonObject);
+        List result = new ArrayList<>();
+        while (sparkSqlFunctionResponseHandle.hasNext()) {
+          result.add(sparkSqlFunctionResponseHandle.next());
+        }
+        return new AsyncQueryExecutionResponse(
+            JobRunState.SUCCESS.toString(),
+            sparkSqlFunctionResponseHandle.schema(),
+            result,
+            null,
+            sessionId);
+      } else {
+        return new AsyncQueryExecutionResponse(
+            jsonObject.optString(STATUS_FIELD, JobRunState.FAILED.toString()),
+            null,
+            null,
+            jsonObject.optString(ERROR_FIELD, ""),
+            sessionId);
+      }
+    }
+    throw new AsyncQueryNotFoundException(String.format("QueryId: %s not found", queryId));
+  }
+
+  @Override
+  public String cancelQuery(String queryId) {
+    Optional asyncQueryJobMetadata =
+        asyncQueryJobMetadataStorageService.getJobMetadata(queryId);
+    if (asyncQueryJobMetadata.isPresent()) {
+      return sparkQueryDispatcher.cancelJob(asyncQueryJobMetadata.get());
+    }
+    throw new AsyncQueryNotFoundException(String.format("QueryId: %s not found", queryId));
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/AsyncQueryJobMetadataStorageService.java b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/AsyncQueryJobMetadataStorageService.java
new file mode 100644
index 0000000000..b4e94c984d
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/AsyncQueryJobMetadataStorageService.java
@@ -0,0 +1,21 @@
+/*
+ *
+ *  * Copyright OpenSearch Contributors
+ *  * SPDX-License-Identifier: Apache-2.0
+ *
+ */
+
+package org.opensearch.sql.spark.asyncquery;
+
+import java.util.Optional;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext;
+
+public interface AsyncQueryJobMetadataStorageService {
+
+  void storeJobMetadata(
+      AsyncQueryJobMetadata asyncQueryJobMetadata,
+      AsyncQueryRequestContext asyncQueryRequestContext);
+
+  Optional getJobMetadata(String jobId);
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/exceptions/AsyncQueryNotFoundException.java b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/exceptions/AsyncQueryNotFoundException.java
new file mode 100644
index 0000000000..80a0c34b70
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/exceptions/AsyncQueryNotFoundException.java
@@ -0,0 +1,15 @@
+/*
+ *
+ *  * Copyright OpenSearch Contributors
+ *  * SPDX-License-Identifier: Apache-2.0
+ *
+ */
+
+package org.opensearch.sql.spark.asyncquery.exceptions;
+
+/** AsyncQueryNotFoundException. */
+public class AsyncQueryNotFoundException extends RuntimeException {
+  public AsyncQueryNotFoundException(String message) {
+    super(message);
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryExecutionResponse.java b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryExecutionResponse.java
new file mode 100644
index 0000000000..e5d9cffd5f
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryExecutionResponse.java
@@ -0,0 +1,23 @@
+/*
+ *
+ *  * Copyright OpenSearch Contributors
+ *  * SPDX-License-Identifier: Apache-2.0
+ *
+ */
+
+package org.opensearch.sql.spark.asyncquery.model;
+
+import java.util.List;
+import lombok.Data;
+import org.opensearch.sql.data.model.ExprValue;
+import org.opensearch.sql.executor.ExecutionEngine;
+
+/** AsyncQueryExecutionResponse to store the response form spark job execution. */
+@Data
+public class AsyncQueryExecutionResponse {
+  private final String status;
+  private final ExecutionEngine.Schema schema;
+  private final List results;
+  private final String error;
+  private final String sessionId;
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryJobMetadata.java b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryJobMetadata.java
new file mode 100644
index 0000000000..1cfab4832d
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryJobMetadata.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.asyncquery.model;
+
+import com.google.common.collect.ImmutableMap;
+import lombok.Builder.Default;
+import lombok.Data;
+import lombok.EqualsAndHashCode;
+import lombok.experimental.SuperBuilder;
+import org.opensearch.sql.spark.dispatcher.model.JobType;
+import org.opensearch.sql.spark.execution.statestore.StateModel;
+import org.opensearch.sql.utils.SerializeUtils;
+
+/** This class models all the metadata required for a job. */
+@Data
+@SuperBuilder
+@EqualsAndHashCode(callSuper = false)
+public class AsyncQueryJobMetadata extends StateModel {
+  private final String queryId;
+  // optional: accountId for EMRS cluster
+  private final String accountId;
+  private final String applicationId;
+  private final String jobId;
+  private final String resultIndex;
+  // optional sessionId.
+  private final String sessionId;
+  // since 2.13
+  // jobType could be null before OpenSearch 2.12. SparkQueryDispatcher use jobType to choose
+  // cancel query handler. if jobType is null, it will invoke BatchQueryHandler.cancel().
+  @Default private final JobType jobType = JobType.INTERACTIVE;
+  // null if JobType is null
+  private final String datasourceName;
+  // null if JobType is INTERACTIVE or null
+  private final String indexName;
+
+  @Override
+  public String toString() {
+    return SerializeUtils.buildGson().toJson(this);
+  }
+
+  /** copy builder. update seqNo and primaryTerm */
+  public static AsyncQueryJobMetadata copy(
+      AsyncQueryJobMetadata copy, ImmutableMap metadata) {
+    return builder()
+        .queryId(copy.queryId)
+        .accountId(copy.accountId)
+        .applicationId(copy.getApplicationId())
+        .jobId(copy.getJobId())
+        .resultIndex(copy.getResultIndex())
+        .sessionId(copy.getSessionId())
+        .datasourceName(copy.datasourceName)
+        .jobType(copy.jobType)
+        .indexName(copy.indexName)
+        .metadata(metadata)
+        .build();
+  }
+
+  @Override
+  public String getId() {
+    return queryId;
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryRequestContext.java b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryRequestContext.java
new file mode 100644
index 0000000000..d5a478d592
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryRequestContext.java
@@ -0,0 +1,11 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.asyncquery.model;
+
+import org.opensearch.sql.datasource.RequestContext;
+
+/** Context interface to provide additional request related information */
+public interface AsyncQueryRequestContext extends RequestContext {}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/model/NullAsyncQueryRequestContext.java b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/model/NullAsyncQueryRequestContext.java
new file mode 100644
index 0000000000..918d1d5929
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/asyncquery/model/NullAsyncQueryRequestContext.java
@@ -0,0 +1,14 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.asyncquery.model;
+
+/** An implementation of RequestContext for where context is not required */
+public class NullAsyncQueryRequestContext implements AsyncQueryRequestContext {
+  @Override
+  public Object getAttribute(String name) {
+    return null;
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/client/EMRServerlessClient.java b/async-query-core/src/main/java/org/opensearch/sql/spark/client/EMRServerlessClient.java
new file mode 100644
index 0000000000..98c115fde9
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/client/EMRServerlessClient.java
@@ -0,0 +1,46 @@
+/*
+ *
+ *  * Copyright OpenSearch Contributors
+ *  * SPDX-License-Identifier: Apache-2.0
+ *
+ */
+
+package org.opensearch.sql.spark.client;
+
+import com.amazonaws.services.emrserverless.model.CancelJobRunResult;
+import com.amazonaws.services.emrserverless.model.GetJobRunResult;
+
+/**
+ * Client Interface for spark Job Submissions. Can have multiple implementations based on the
+ * underlying spark infrastructure. Currently, we have one for EMRServerless {@link
+ * EmrServerlessClientImpl}
+ */
+public interface EMRServerlessClient {
+
+  /**
+   * Start a new job run.
+   *
+   * @param startJobRequest startJobRequest
+   * @return jobId.
+   */
+  String startJobRun(StartJobRequest startJobRequest);
+
+  /**
+   * Get status of emr serverless job run..
+   *
+   * @param applicationId serverless applicationId
+   * @param jobId jobId.
+   * @return {@link GetJobRunResult}
+   */
+  GetJobRunResult getJobRunResult(String applicationId, String jobId);
+
+  /**
+   * Cancel emr serverless job run.
+   *
+   * @param applicationId applicationId.
+   * @param jobId jobId.
+   * @return {@link CancelJobRunResult}
+   */
+  CancelJobRunResult cancelJobRun(
+      String applicationId, String jobId, boolean allowExceptionPropagation);
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/client/EMRServerlessClientFactory.java b/async-query-core/src/main/java/org/opensearch/sql/spark/client/EMRServerlessClientFactory.java
new file mode 100644
index 0000000000..c5305ba445
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/client/EMRServerlessClientFactory.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.client;
+
+/** Factory interface for creating instances of {@link EMRServerlessClient}. */
+public interface EMRServerlessClientFactory {
+
+  /**
+   * Gets an instance of {@link EMRServerlessClient}.
+   *
+   * @param accountId Account ID of the requester. It will be used to decide the cluster.
+   * @return An {@link EMRServerlessClient} instance.
+   */
+  EMRServerlessClient getClient(String accountId);
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/client/EMRServerlessClientFactoryImpl.java b/async-query-core/src/main/java/org/opensearch/sql/spark/client/EMRServerlessClientFactoryImpl.java
new file mode 100644
index 0000000000..72973b3bbb
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/client/EMRServerlessClientFactoryImpl.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.client;
+
+import static org.opensearch.sql.common.setting.Settings.Key.SPARK_EXECUTION_ENGINE_CONFIG;
+
+import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
+import com.amazonaws.services.emrserverless.AWSEMRServerless;
+import com.amazonaws.services.emrserverless.AWSEMRServerlessClientBuilder;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+import lombok.RequiredArgsConstructor;
+import org.opensearch.sql.spark.asyncquery.model.NullAsyncQueryRequestContext;
+import org.opensearch.sql.spark.config.SparkExecutionEngineConfig;
+import org.opensearch.sql.spark.config.SparkExecutionEngineConfigSupplier;
+import org.opensearch.sql.spark.metrics.MetricsService;
+
+@RequiredArgsConstructor
+public class EMRServerlessClientFactoryImpl implements EMRServerlessClientFactory {
+
+  private final SparkExecutionEngineConfigSupplier sparkExecutionEngineConfigSupplier;
+  private final MetricsService metricsService;
+  private EMRServerlessClient emrServerlessClient;
+  private String region;
+
+  @Override
+  public EMRServerlessClient getClient(String accountId) {
+    SparkExecutionEngineConfig sparkExecutionEngineConfig =
+        this.sparkExecutionEngineConfigSupplier.getSparkExecutionEngineConfig(
+            new NullAsyncQueryRequestContext());
+    validateSparkExecutionEngineConfig(sparkExecutionEngineConfig);
+    if (isNewClientCreationRequired(sparkExecutionEngineConfig.getRegion())) {
+      region = sparkExecutionEngineConfig.getRegion();
+      this.emrServerlessClient = createEMRServerlessClient(this.region);
+    }
+    return this.emrServerlessClient;
+  }
+
+  private boolean isNewClientCreationRequired(String region) {
+    return !region.equals(this.region);
+  }
+
+  private void validateSparkExecutionEngineConfig(
+      SparkExecutionEngineConfig sparkExecutionEngineConfig) {
+    if (sparkExecutionEngineConfig == null || sparkExecutionEngineConfig.getRegion() == null) {
+      throw new IllegalArgumentException(
+          String.format(
+              "Async Query APIs are disabled. Please configure %s in cluster settings to enable"
+                  + " them.",
+              SPARK_EXECUTION_ENGINE_CONFIG.getKeyValue()));
+    }
+  }
+
+  private EMRServerlessClient createEMRServerlessClient(String awsRegion) {
+    // TODO: It does not handle accountId for now. (it creates client for same account)
+    return AccessController.doPrivileged(
+        (PrivilegedAction)
+            () -> {
+              AWSEMRServerless awsemrServerless =
+                  AWSEMRServerlessClientBuilder.standard()
+                      .withRegion(awsRegion)
+                      .withCredentials(new DefaultAWSCredentialsProviderChain())
+                      .build();
+              return new EmrServerlessClientImpl(awsemrServerless, metricsService);
+            });
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/client/EmrServerlessClientImpl.java b/async-query-core/src/main/java/org/opensearch/sql/spark/client/EmrServerlessClientImpl.java
new file mode 100644
index 0000000000..c785067398
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/client/EmrServerlessClientImpl.java
@@ -0,0 +1,129 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.client;
+
+import static org.opensearch.sql.datasource.model.DataSourceMetadata.DEFAULT_RESULT_INDEX;
+import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_SQL_APPLICATION_JAR;
+import static org.opensearch.sql.spark.metrics.EmrMetrics.EMR_CANCEL_JOB_REQUEST_FAILURE_COUNT;
+import static org.opensearch.sql.spark.metrics.EmrMetrics.EMR_GET_JOB_RESULT_FAILURE_COUNT;
+import static org.opensearch.sql.spark.metrics.EmrMetrics.EMR_START_JOB_REQUEST_FAILURE_COUNT;
+
+import com.amazonaws.services.emrserverless.AWSEMRServerless;
+import com.amazonaws.services.emrserverless.model.CancelJobRunRequest;
+import com.amazonaws.services.emrserverless.model.CancelJobRunResult;
+import com.amazonaws.services.emrserverless.model.GetJobRunRequest;
+import com.amazonaws.services.emrserverless.model.GetJobRunResult;
+import com.amazonaws.services.emrserverless.model.JobDriver;
+import com.amazonaws.services.emrserverless.model.SparkSubmit;
+import com.amazonaws.services.emrserverless.model.StartJobRunRequest;
+import com.amazonaws.services.emrserverless.model.StartJobRunResult;
+import com.amazonaws.services.emrserverless.model.ValidationException;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+import lombok.RequiredArgsConstructor;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.opensearch.sql.spark.metrics.MetricsService;
+
+@RequiredArgsConstructor
+public class EmrServerlessClientImpl implements EMRServerlessClient {
+
+  private final AWSEMRServerless emrServerless;
+  private final MetricsService metricsService;
+  private static final Logger logger = LogManager.getLogger(EmrServerlessClientImpl.class);
+
+  private static final int MAX_JOB_NAME_LENGTH = 255;
+
+  public static final String GENERIC_INTERNAL_SERVER_ERROR_MESSAGE = "Internal Server Error.";
+
+  @Override
+  public String startJobRun(StartJobRequest startJobRequest) {
+    String resultIndex =
+        startJobRequest.getResultIndex() == null
+            ? DEFAULT_RESULT_INDEX
+            : startJobRequest.getResultIndex();
+    StartJobRunRequest request =
+        new StartJobRunRequest()
+            .withName(StringUtils.truncate(startJobRequest.getJobName(), MAX_JOB_NAME_LENGTH))
+            .withApplicationId(startJobRequest.getApplicationId())
+            .withExecutionRoleArn(startJobRequest.getExecutionRoleArn())
+            .withTags(startJobRequest.getTags())
+            .withExecutionTimeoutMinutes(startJobRequest.executionTimeout())
+            .withJobDriver(
+                new JobDriver()
+                    .withSparkSubmit(
+                        new SparkSubmit()
+                            .withEntryPoint(SPARK_SQL_APPLICATION_JAR)
+                            .withEntryPointArguments(resultIndex)
+                            .withSparkSubmitParameters(startJobRequest.getSparkSubmitParams())));
+
+    StartJobRunResult startJobRunResult =
+        AccessController.doPrivileged(
+            (PrivilegedAction)
+                () -> {
+                  try {
+                    return emrServerless.startJobRun(request);
+                  } catch (Throwable t) {
+                    logger.error("Error while making start job request to emr:", t);
+                    metricsService.incrementNumericalMetric(EMR_START_JOB_REQUEST_FAILURE_COUNT);
+                    if (t instanceof ValidationException) {
+                      throw new IllegalArgumentException(
+                          "The input fails to satisfy the constraints specified by AWS EMR"
+                              + " Serverless.");
+                    }
+                    throw new RuntimeException(GENERIC_INTERNAL_SERVER_ERROR_MESSAGE);
+                  }
+                });
+    logger.info("Job Run ID: " + startJobRunResult.getJobRunId());
+    return startJobRunResult.getJobRunId();
+  }
+
+  @Override
+  public GetJobRunResult getJobRunResult(String applicationId, String jobId) {
+    GetJobRunRequest request =
+        new GetJobRunRequest().withApplicationId(applicationId).withJobRunId(jobId);
+    GetJobRunResult getJobRunResult =
+        AccessController.doPrivileged(
+            (PrivilegedAction)
+                () -> {
+                  try {
+                    return emrServerless.getJobRun(request);
+                  } catch (Throwable t) {
+                    logger.error("Error while making get job run request to emr:", t);
+                    metricsService.incrementNumericalMetric(EMR_GET_JOB_RESULT_FAILURE_COUNT);
+                    throw new RuntimeException(GENERIC_INTERNAL_SERVER_ERROR_MESSAGE);
+                  }
+                });
+    logger.info("Job Run state: " + getJobRunResult.getJobRun().getState());
+    return getJobRunResult;
+  }
+
+  @Override
+  public CancelJobRunResult cancelJobRun(
+      String applicationId, String jobId, boolean allowExceptionPropagation) {
+    CancelJobRunRequest cancelJobRunRequest =
+        new CancelJobRunRequest().withJobRunId(jobId).withApplicationId(applicationId);
+    CancelJobRunResult cancelJobRunResult =
+        AccessController.doPrivileged(
+            (PrivilegedAction)
+                () -> {
+                  try {
+                    return emrServerless.cancelJobRun(cancelJobRunRequest);
+                  } catch (Throwable t) {
+                    if (allowExceptionPropagation) {
+                      throw t;
+                    } else {
+                      logger.error("Error while making cancel job request to emr:", t);
+                      metricsService.incrementNumericalMetric(EMR_CANCEL_JOB_REQUEST_FAILURE_COUNT);
+                      throw new RuntimeException(GENERIC_INTERNAL_SERVER_ERROR_MESSAGE);
+                    }
+                  }
+                });
+    logger.info(String.format("Job : %s cancelled", cancelJobRunResult.getJobRunId()));
+    return cancelJobRunResult;
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/client/StartJobRequest.java b/async-query-core/src/main/java/org/opensearch/sql/spark/client/StartJobRequest.java
new file mode 100644
index 0000000000..173b40d453
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/client/StartJobRequest.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.client;
+
+import java.util.Map;
+import lombok.Data;
+import lombok.EqualsAndHashCode;
+
+/**
+ * This POJO carries all the fields required for emr serverless job submission. Used as model in
+ * {@link EMRServerlessClient} interface.
+ */
+@Data
+@EqualsAndHashCode
+public class StartJobRequest {
+
+  public static final Long DEFAULT_JOB_TIMEOUT = 120L;
+
+  private final String jobName;
+  // optional
+  private final String accountId;
+  private final String applicationId;
+  private final String executionRoleArn;
+  private final String sparkSubmitParams;
+  private final Map tags;
+
+  /** true if it is Spark Structured Streaming job. */
+  private final boolean isStructuredStreaming;
+
+  private final String resultIndex;
+
+  public Long executionTimeout() {
+    return isStructuredStreaming ? 0L : DEFAULT_JOB_TIMEOUT;
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfig.java b/async-query-core/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfig.java
new file mode 100644
index 0000000000..51407111b6
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfig.java
@@ -0,0 +1,20 @@
+package org.opensearch.sql.spark.config;
+
+import lombok.Builder;
+import lombok.Data;
+
+/**
+ * POJO for spark Execution Engine Config. Interface between {@link
+ * org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorService} and {@link
+ * SparkExecutionEngineConfigSupplier}
+ */
+@Data
+@Builder
+public class SparkExecutionEngineConfig {
+  private String accountId;
+  private String applicationId;
+  private String region;
+  private String executionRoleARN;
+  private SparkSubmitParameterModifier sparkSubmitParameterModifier;
+  private String clusterName;
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigSupplier.java b/async-query-core/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigSupplier.java
new file mode 100644
index 0000000000..725df6bb0c
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigSupplier.java
@@ -0,0 +1,15 @@
+package org.opensearch.sql.spark.config;
+
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext;
+
+/** Interface for extracting and providing SparkExecutionEngineConfig */
+public interface SparkExecutionEngineConfigSupplier {
+
+  /**
+   * Get SparkExecutionEngineConfig
+   *
+   * @return {@link SparkExecutionEngineConfig}.
+   */
+  SparkExecutionEngineConfig getSparkExecutionEngineConfig(
+      AsyncQueryRequestContext asyncQueryRequestContext);
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/config/SparkSubmitParameterModifier.java b/async-query-core/src/main/java/org/opensearch/sql/spark/config/SparkSubmitParameterModifier.java
new file mode 100644
index 0000000000..a50491078c
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/config/SparkSubmitParameterModifier.java
@@ -0,0 +1,12 @@
+package org.opensearch.sql.spark.config;
+
+import org.opensearch.sql.spark.parameter.SparkSubmitParametersBuilder;
+
+/**
+ * Interface for extension point to allow modification of spark submit parameter. modifyParameter
+ * method is called after the default spark submit parameter is build. To be deprecated in favor of
+ * {@link org.opensearch.sql.spark.parameter.GeneralSparkParameterComposer}
+ */
+public interface SparkSubmitParameterModifier {
+  void modifyParameters(SparkSubmitParametersBuilder parametersBuilder);
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/data/constants/SparkConstants.java b/async-query-core/src/main/java/org/opensearch/sql/spark/data/constants/SparkConstants.java
new file mode 100644
index 0000000000..5b25bc175a
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/data/constants/SparkConstants.java
@@ -0,0 +1,109 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.data.constants;
+
+public class SparkConstants {
+
+  public static final String JOB_ID_FIELD = "jobRunId";
+
+  public static final String STATUS_FIELD = "status";
+
+  public static final String DATA_FIELD = "data";
+
+  public static final String ERROR_FIELD = "error";
+
+  // EMR-S will download JAR to local maven
+  public static final String SPARK_SQL_APPLICATION_JAR =
+      "file:///home/hadoop/.ivy2/jars/org.opensearch_opensearch-spark-sql-application_2.12-0.3.0-SNAPSHOT.jar";
+  public static final String SPARK_REQUEST_BUFFER_INDEX_NAME = ".query_execution_request";
+  public static final String FLINT_DEFAULT_CLUSTER_NAME = "opensearch-cluster";
+  public static final String FLINT_DEFAULT_HOST = "localhost";
+  public static final String FLINT_DEFAULT_PORT = "9200";
+  public static final String FLINT_DEFAULT_SCHEME = "http";
+  public static final String FLINT_DEFAULT_AUTH = "noauth";
+  public static final String DEFAULT_CLASS_NAME = "org.apache.spark.sql.FlintJob";
+  public static final String S3_AWS_CREDENTIALS_PROVIDER_KEY =
+      "spark.hadoop.fs.s3.customAWSCredentialsProvider";
+  public static final String DRIVER_ENV_ASSUME_ROLE_ARN_KEY =
+      "spark.emr-serverless.driverEnv.ASSUME_ROLE_CREDENTIALS_ROLE_ARN";
+  public static final String EXECUTOR_ENV_ASSUME_ROLE_ARN_KEY =
+      "spark.executorEnv.ASSUME_ROLE_CREDENTIALS_ROLE_ARN";
+  public static final String HADOOP_CATALOG_CREDENTIALS_PROVIDER_FACTORY_KEY =
+      "spark.hadoop.aws.catalog.credentials.provider.factory.class";
+  public static final String HIVE_METASTORE_GLUE_ARN_KEY = "spark.hive.metastore.glue.role.arn";
+  public static final String SPARK_JARS_KEY = "spark.jars";
+  public static final String SPARK_JAR_PACKAGES_KEY = "spark.jars.packages";
+  public static final String SPARK_JAR_REPOSITORIES_KEY = "spark.jars.repositories";
+  public static final String SPARK_DRIVER_ENV_JAVA_HOME_KEY =
+      "spark.emr-serverless.driverEnv.JAVA_HOME";
+  public static final String SPARK_EXECUTOR_ENV_JAVA_HOME_KEY = "spark.executorEnv.JAVA_HOME";
+  // Used for logging/metrics in Spark (driver)
+  public static final String SPARK_DRIVER_ENV_FLINT_CLUSTER_NAME_KEY =
+      "spark.emr-serverless.driverEnv.FLINT_CLUSTER_NAME";
+  // Used for logging/metrics in Spark (executor)
+  public static final String SPARK_EXECUTOR_ENV_FLINT_CLUSTER_NAME_KEY =
+      "spark.executorEnv.FLINT_CLUSTER_NAME";
+  public static final String FLINT_INDEX_STORE_HOST_KEY = "spark.datasource.flint.host";
+  public static final String FLINT_INDEX_STORE_PORT_KEY = "spark.datasource.flint.port";
+  public static final String FLINT_INDEX_STORE_SCHEME_KEY = "spark.datasource.flint.scheme";
+  public static final String FLINT_INDEX_STORE_AUTH_KEY = "spark.datasource.flint.auth";
+  public static final String FLINT_INDEX_STORE_AUTH_USERNAME =
+      "spark.datasource.flint.auth.username";
+  public static final String FLINT_INDEX_STORE_AUTH_PASSWORD =
+      "spark.datasource.flint.auth.password";
+  public static final String FLINT_INDEX_STORE_AWSREGION_KEY = "spark.datasource.flint.region";
+  public static final String FLINT_CREDENTIALS_PROVIDER_KEY =
+      "spark.datasource.flint.customAWSCredentialsProvider";
+  public static final String FLINT_DATA_SOURCE_KEY = "spark.flint.datasource.name";
+  public static final String SPARK_SQL_EXTENSIONS_KEY = "spark.sql.extensions";
+  public static final String HIVE_METASTORE_CLASS_KEY =
+      "spark.hadoop.hive.metastore.client.factory.class";
+  public static final String DEFAULT_S3_AWS_CREDENTIALS_PROVIDER_VALUE =
+      "com.amazonaws.emr.AssumeRoleAWSCredentialsProvider";
+  public static final String DEFAULT_GLUE_CATALOG_CREDENTIALS_PROVIDER_FACTORY_KEY =
+      "com.amazonaws.glue.catalog.metastore.STSAssumeRoleSessionCredentialsProviderFactory";
+  public static final String SPARK_STANDALONE_PACKAGE =
+      "org.opensearch:opensearch-spark-standalone_2.12:0.3.0-SNAPSHOT";
+  public static final String SPARK_LAUNCHER_PACKAGE =
+      "org.opensearch:opensearch-spark-sql-application_2.12:0.3.0-SNAPSHOT";
+  public static final String PPL_STANDALONE_PACKAGE =
+      "org.opensearch:opensearch-spark-ppl_2.12:0.3.0-SNAPSHOT";
+  public static final String AWS_SNAPSHOT_REPOSITORY =
+      "https://aws.oss.sonatype.org/content/repositories/snapshots";
+  public static final String GLUE_HIVE_CATALOG_FACTORY_CLASS =
+      "com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory";
+  public static final String FLINT_DELEGATE_CATALOG =
+      "org.opensearch.sql.FlintDelegatingSessionCatalog";
+  public static final String FLINT_SQL_EXTENSION =
+      "org.opensearch.flint.spark.FlintSparkExtensions";
+  public static final String FLINT_PPL_EXTENSION =
+      "org.opensearch.flint.spark.FlintPPLSparkExtensions";
+
+  public static final String EMR_ASSUME_ROLE_CREDENTIALS_PROVIDER =
+      "com.amazonaws.emr.AssumeRoleAWSCredentialsProvider";
+  public static final String JAVA_HOME_LOCATION = "/usr/lib/jvm/java-17-amazon-corretto.x86_64/";
+  public static final String FLINT_JOB_QUERY = "spark.flint.job.query";
+  public static final String FLINT_JOB_REQUEST_INDEX = "spark.flint.job.requestIndex";
+  public static final String FLINT_JOB_SESSION_ID = "spark.flint.job.sessionId";
+
+  public static final String FLINT_SESSION_CLASS_NAME = "org.apache.spark.sql.FlintREPL";
+
+  public static final String SPARK_CATALOG = "spark.sql.catalog.spark_catalog";
+  public static final String ICEBERG_SESSION_CATALOG =
+      "org.apache.iceberg.spark.SparkSessionCatalog";
+  public static final String ICEBERG_SPARK_EXTENSION =
+      "org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions";
+  public static final String ICEBERG_SPARK_RUNTIME_PACKAGE =
+      "/usr/share/aws/iceberg/lib/iceberg-spark3-runtime.jar";
+  public static final String SPARK_CATALOG_CATALOG_IMPL =
+      "spark.sql.catalog.spark_catalog.catalog-impl";
+  public static final String ICEBERG_GLUE_CATALOG = "org.apache.iceberg.aws.glue.GlueCatalog";
+
+  public static final String EMR_LAKEFORMATION_OPTION =
+      "spark.emr-serverless.lakeformation.enabled";
+  public static final String FLINT_ACCELERATE_USING_COVERING_INDEX =
+      "spark.flint.optimizer.covering.enabled";
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/AsyncQueryHandler.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/AsyncQueryHandler.java
new file mode 100644
index 0000000000..d61ac17aa3
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/AsyncQueryHandler.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher;
+
+import static org.opensearch.sql.spark.data.constants.SparkConstants.DATA_FIELD;
+import static org.opensearch.sql.spark.data.constants.SparkConstants.ERROR_FIELD;
+import static org.opensearch.sql.spark.data.constants.SparkConstants.STATUS_FIELD;
+
+import com.amazonaws.services.emrserverless.model.JobRunState;
+import org.json.JSONObject;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryContext;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse;
+import org.opensearch.sql.spark.execution.statement.StatementState;
+
+/** Process async query request. */
+public abstract class AsyncQueryHandler {
+
+  public JSONObject getQueryResponse(AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    JSONObject result = getResponseFromResultIndex(asyncQueryJobMetadata);
+    if (result.has(DATA_FIELD)) {
+      JSONObject items = result.getJSONObject(DATA_FIELD);
+
+      // If items have STATUS_FIELD, use it; otherwise, mark failed
+      String status = items.optString(STATUS_FIELD, JobRunState.FAILED.toString());
+      result.put(STATUS_FIELD, status);
+
+      // If items have ERROR_FIELD, use it; otherwise, set empty string
+      String error = items.optString(ERROR_FIELD, "");
+      result.put(ERROR_FIELD, error);
+      return result;
+    } else {
+      JSONObject statement = getResponseFromExecutor(asyncQueryJobMetadata);
+
+      // Consider statement still running if state is success but query result unavailable
+      if (isSuccessState(statement)) {
+        statement.put(STATUS_FIELD, StatementState.RUNNING.getState());
+      }
+      return statement;
+    }
+  }
+
+  private boolean isSuccessState(JSONObject statement) {
+    return StatementState.SUCCESS.getState().equalsIgnoreCase(statement.optString(STATUS_FIELD));
+  }
+
+  protected abstract JSONObject getResponseFromResultIndex(
+      AsyncQueryJobMetadata asyncQueryJobMetadata);
+
+  protected abstract JSONObject getResponseFromExecutor(
+      AsyncQueryJobMetadata asyncQueryJobMetadata);
+
+  public abstract String cancelJob(AsyncQueryJobMetadata asyncQueryJobMetadata);
+
+  public abstract DispatchQueryResponse submit(
+      DispatchQueryRequest request, DispatchQueryContext context);
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/BatchQueryHandler.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/BatchQueryHandler.java
new file mode 100644
index 0000000000..2654f83aad
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/BatchQueryHandler.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher;
+
+import static org.opensearch.sql.spark.data.constants.SparkConstants.ERROR_FIELD;
+import static org.opensearch.sql.spark.data.constants.SparkConstants.STATUS_FIELD;
+import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.JOB_TYPE_TAG_KEY;
+import static org.opensearch.sql.spark.metrics.EmrMetrics.EMR_BATCH_QUERY_JOBS_CREATION_COUNT;
+
+import com.amazonaws.services.emrserverless.model.GetJobRunResult;
+import java.util.Map;
+import lombok.RequiredArgsConstructor;
+import org.json.JSONObject;
+import org.opensearch.sql.datasource.model.DataSourceMetadata;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata;
+import org.opensearch.sql.spark.client.EMRServerlessClient;
+import org.opensearch.sql.spark.client.StartJobRequest;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryContext;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse;
+import org.opensearch.sql.spark.dispatcher.model.JobType;
+import org.opensearch.sql.spark.leasemanager.LeaseManager;
+import org.opensearch.sql.spark.metrics.MetricsService;
+import org.opensearch.sql.spark.parameter.SparkSubmitParametersBuilderProvider;
+import org.opensearch.sql.spark.response.JobExecutionResponseReader;
+
+/**
+ * The handler for batch query. With batch query, queries are executed as single batch. The queries
+ * are sent along with job execution request ({@link StartJobRequest}) to spark.
+ */
+@RequiredArgsConstructor
+public class BatchQueryHandler extends AsyncQueryHandler {
+  protected final EMRServerlessClient emrServerlessClient;
+  protected final JobExecutionResponseReader jobExecutionResponseReader;
+  protected final LeaseManager leaseManager;
+  protected final MetricsService metricsService;
+  protected final SparkSubmitParametersBuilderProvider sparkSubmitParametersBuilderProvider;
+
+  @Override
+  protected JSONObject getResponseFromResultIndex(AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    // either empty json when the result is not available or data with status
+    // Fetch from Result Index
+    return jobExecutionResponseReader.getResultWithJobId(
+        asyncQueryJobMetadata.getJobId(), asyncQueryJobMetadata.getResultIndex());
+  }
+
+  @Override
+  protected JSONObject getResponseFromExecutor(AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    JSONObject result = new JSONObject();
+    // make call to EMR Serverless when related result index documents are not available
+    GetJobRunResult getJobRunResult =
+        emrServerlessClient.getJobRunResult(
+            asyncQueryJobMetadata.getApplicationId(), asyncQueryJobMetadata.getJobId());
+    String jobState = getJobRunResult.getJobRun().getState();
+    result.put(STATUS_FIELD, jobState);
+    result.put(ERROR_FIELD, "");
+    return result;
+  }
+
+  @Override
+  public String cancelJob(AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    emrServerlessClient.cancelJobRun(
+        asyncQueryJobMetadata.getApplicationId(), asyncQueryJobMetadata.getJobId(), false);
+    return asyncQueryJobMetadata.getQueryId();
+  }
+
+  @Override
+  public DispatchQueryResponse submit(
+      DispatchQueryRequest dispatchQueryRequest, DispatchQueryContext context) {
+    String clusterName = dispatchQueryRequest.getClusterName();
+    Map tags = context.getTags();
+    DataSourceMetadata dataSourceMetadata = context.getDataSourceMetadata();
+
+    tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText());
+    StartJobRequest startJobRequest =
+        new StartJobRequest(
+            clusterName + ":" + JobType.BATCH.getText(),
+            dispatchQueryRequest.getAccountId(),
+            dispatchQueryRequest.getApplicationId(),
+            dispatchQueryRequest.getExecutionRoleARN(),
+            sparkSubmitParametersBuilderProvider
+                .getSparkSubmitParametersBuilder()
+                .clusterName(clusterName)
+                .query(dispatchQueryRequest.getQuery())
+                .dataSource(
+                    context.getDataSourceMetadata(),
+                    dispatchQueryRequest,
+                    context.getAsyncQueryRequestContext())
+                .acceptModifier(dispatchQueryRequest.getSparkSubmitParameterModifier())
+                .acceptComposers(dispatchQueryRequest, context.getAsyncQueryRequestContext())
+                .toString(),
+            tags,
+            false,
+            dataSourceMetadata.getResultIndex());
+    String jobId = emrServerlessClient.startJobRun(startJobRequest);
+    metricsService.incrementNumericalMetric(EMR_BATCH_QUERY_JOBS_CREATION_COUNT);
+    return DispatchQueryResponse.builder()
+        .queryId(context.getQueryId())
+        .jobId(jobId)
+        .resultIndex(dataSourceMetadata.getResultIndex())
+        .datasourceName(dataSourceMetadata.getName())
+        .jobType(JobType.INTERACTIVE)
+        .build();
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/DatasourceEmbeddedQueryIdProvider.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/DatasourceEmbeddedQueryIdProvider.java
new file mode 100644
index 0000000000..c170040718
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/DatasourceEmbeddedQueryIdProvider.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher;
+
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest;
+import org.opensearch.sql.spark.utils.IDUtils;
+
+/** Generates QueryId by embedding Datasource name and random UUID */
+public class DatasourceEmbeddedQueryIdProvider implements QueryIdProvider {
+
+  @Override
+  public String getQueryId(DispatchQueryRequest dispatchQueryRequest) {
+    return IDUtils.encode(dispatchQueryRequest.getDatasource());
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/IndexDMLHandler.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/IndexDMLHandler.java
new file mode 100644
index 0000000000..e8413f469c
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/IndexDMLHandler.java
@@ -0,0 +1,180 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher;
+
+import static org.opensearch.sql.spark.data.constants.SparkConstants.ERROR_FIELD;
+import static org.opensearch.sql.spark.data.constants.SparkConstants.STATUS_FIELD;
+
+import com.amazonaws.services.emrserverless.model.JobRunState;
+import java.util.Map;
+import lombok.RequiredArgsConstructor;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.json.JSONObject;
+import org.opensearch.sql.datasource.model.DataSourceMetadata;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryContext;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse;
+import org.opensearch.sql.spark.dispatcher.model.IndexDMLResult;
+import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails;
+import org.opensearch.sql.spark.dispatcher.model.JobType;
+import org.opensearch.sql.spark.execution.statement.StatementState;
+import org.opensearch.sql.spark.flint.FlintIndexMetadata;
+import org.opensearch.sql.spark.flint.FlintIndexMetadataService;
+import org.opensearch.sql.spark.flint.IndexDMLResultStorageService;
+import org.opensearch.sql.spark.flint.operation.FlintIndexOp;
+import org.opensearch.sql.spark.flint.operation.FlintIndexOpFactory;
+import org.opensearch.sql.spark.response.JobExecutionResponseReader;
+
+/**
+ * The handler for Index DML (Data Manipulation Language) query. Handles DROP/ALTER/VACUUM operation
+ * for flint indices. It will stop streaming query job as needed (e.g. when the flint index is
+ * automatically updated by a streaming query, the streaming query is stopped when the index is
+ * dropped)
+ */
+@RequiredArgsConstructor
+public class IndexDMLHandler extends AsyncQueryHandler {
+  private static final Logger LOG = LogManager.getLogger();
+
+  // To be deprecated in 3.0. Still using for backward compatibility.
+  public static final String DROP_INDEX_JOB_ID = "dropIndexJobId";
+  public static final String DML_QUERY_JOB_ID = "DMLQueryJobId";
+
+  private final JobExecutionResponseReader jobExecutionResponseReader;
+  private final FlintIndexMetadataService flintIndexMetadataService;
+  private final IndexDMLResultStorageService indexDMLResultStorageService;
+  private final FlintIndexOpFactory flintIndexOpFactory;
+
+  public static boolean isIndexDMLQuery(String jobId) {
+    return DROP_INDEX_JOB_ID.equalsIgnoreCase(jobId) || DML_QUERY_JOB_ID.equalsIgnoreCase(jobId);
+  }
+
+  @Override
+  public DispatchQueryResponse submit(
+      DispatchQueryRequest dispatchQueryRequest, DispatchQueryContext context) {
+    DataSourceMetadata dataSourceMetadata = context.getDataSourceMetadata();
+    long startTime = System.currentTimeMillis();
+    try {
+      IndexQueryDetails indexDetails = context.getIndexQueryDetails();
+      FlintIndexMetadata indexMetadata = getFlintIndexMetadata(indexDetails);
+
+      getIndexOp(dispatchQueryRequest, indexDetails).apply(indexMetadata);
+
+      String asyncQueryId =
+          storeIndexDMLResult(
+              context.getQueryId(),
+              dispatchQueryRequest,
+              dataSourceMetadata,
+              JobRunState.SUCCESS.toString(),
+              StringUtils.EMPTY,
+              getElapsedTimeSince(startTime),
+              context.getAsyncQueryRequestContext());
+      return DispatchQueryResponse.builder()
+          .queryId(asyncQueryId)
+          .jobId(DML_QUERY_JOB_ID)
+          .resultIndex(dataSourceMetadata.getResultIndex())
+          .datasourceName(dataSourceMetadata.getName())
+          .jobType(JobType.INTERACTIVE)
+          .build();
+    } catch (Exception e) {
+      LOG.error(e.getMessage());
+      String asyncQueryId =
+          storeIndexDMLResult(
+              context.getQueryId(),
+              dispatchQueryRequest,
+              dataSourceMetadata,
+              JobRunState.FAILED.toString(),
+              e.getMessage(),
+              getElapsedTimeSince(startTime),
+              context.getAsyncQueryRequestContext());
+      return DispatchQueryResponse.builder()
+          .queryId(asyncQueryId)
+          .jobId(DML_QUERY_JOB_ID)
+          .resultIndex(dataSourceMetadata.getResultIndex())
+          .datasourceName(dataSourceMetadata.getName())
+          .jobType(JobType.INTERACTIVE)
+          .build();
+    }
+  }
+
+  private String storeIndexDMLResult(
+      String queryId,
+      DispatchQueryRequest dispatchQueryRequest,
+      DataSourceMetadata dataSourceMetadata,
+      String status,
+      String error,
+      long queryRunTime,
+      AsyncQueryRequestContext asyncQueryRequestContext) {
+    IndexDMLResult indexDMLResult =
+        IndexDMLResult.builder()
+            .queryId(queryId)
+            .status(status)
+            .error(error)
+            .datasourceName(dispatchQueryRequest.getDatasource())
+            .queryRunTime(queryRunTime)
+            .updateTime(System.currentTimeMillis())
+            .build();
+    indexDMLResultStorageService.createIndexDMLResult(indexDMLResult, asyncQueryRequestContext);
+    return queryId;
+  }
+
+  private long getElapsedTimeSince(long startTime) {
+    return System.currentTimeMillis() - startTime;
+  }
+
+  private FlintIndexOp getIndexOp(
+      DispatchQueryRequest dispatchQueryRequest, IndexQueryDetails indexQueryDetails) {
+    switch (indexQueryDetails.getIndexQueryActionType()) {
+      case DROP:
+        return flintIndexOpFactory.getDrop(dispatchQueryRequest.getDatasource());
+      case ALTER:
+        return flintIndexOpFactory.getAlter(
+            indexQueryDetails.getFlintIndexOptions(), dispatchQueryRequest.getDatasource());
+      case VACUUM:
+        return flintIndexOpFactory.getVacuum(dispatchQueryRequest.getDatasource());
+      default:
+        throw new IllegalStateException(
+            String.format(
+                "IndexQueryActionType: %s is not supported in IndexDMLHandler.",
+                indexQueryDetails.getIndexQueryActionType()));
+    }
+  }
+
+  private FlintIndexMetadata getFlintIndexMetadata(IndexQueryDetails indexDetails) {
+    Map indexMetadataMap =
+        flintIndexMetadataService.getFlintIndexMetadata(indexDetails.openSearchIndexName());
+    if (!indexMetadataMap.containsKey(indexDetails.openSearchIndexName())) {
+      throw new IllegalStateException(
+          String.format(
+              "Couldn't fetch flint index: %s details", indexDetails.openSearchIndexName()));
+    }
+    return indexMetadataMap.get(indexDetails.openSearchIndexName());
+  }
+
+  @Override
+  protected JSONObject getResponseFromResultIndex(AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    String queryId = asyncQueryJobMetadata.getQueryId();
+    return jobExecutionResponseReader.getResultWithQueryId(
+        queryId, asyncQueryJobMetadata.getResultIndex());
+  }
+
+  @Override
+  protected JSONObject getResponseFromExecutor(AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    // Consider statement still running if result doc created in submit() is not available yet
+    JSONObject result = new JSONObject();
+    result.put(STATUS_FIELD, StatementState.RUNNING.getState());
+    result.put(ERROR_FIELD, "");
+    return result;
+  }
+
+  @Override
+  public String cancelJob(AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    throw new IllegalArgumentException("can't cancel index DML query");
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/InteractiveQueryHandler.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/InteractiveQueryHandler.java
new file mode 100644
index 0000000000..ec43bccf11
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/InteractiveQueryHandler.java
@@ -0,0 +1,163 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher;
+
+import static org.opensearch.sql.spark.data.constants.SparkConstants.ERROR_FIELD;
+import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_SESSION_CLASS_NAME;
+import static org.opensearch.sql.spark.data.constants.SparkConstants.STATUS_FIELD;
+import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.JOB_TYPE_TAG_KEY;
+
+import java.util.Map;
+import java.util.Optional;
+import lombok.RequiredArgsConstructor;
+import org.json.JSONObject;
+import org.opensearch.sql.datasource.model.DataSourceMetadata;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryContext;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse;
+import org.opensearch.sql.spark.dispatcher.model.JobType;
+import org.opensearch.sql.spark.execution.session.CreateSessionRequest;
+import org.opensearch.sql.spark.execution.session.Session;
+import org.opensearch.sql.spark.execution.session.SessionManager;
+import org.opensearch.sql.spark.execution.statement.QueryRequest;
+import org.opensearch.sql.spark.execution.statement.Statement;
+import org.opensearch.sql.spark.execution.statement.StatementId;
+import org.opensearch.sql.spark.execution.statement.StatementState;
+import org.opensearch.sql.spark.leasemanager.LeaseManager;
+import org.opensearch.sql.spark.leasemanager.model.LeaseRequest;
+import org.opensearch.sql.spark.metrics.EmrMetrics;
+import org.opensearch.sql.spark.metrics.MetricsService;
+import org.opensearch.sql.spark.parameter.SparkSubmitParametersBuilderProvider;
+import org.opensearch.sql.spark.response.JobExecutionResponseReader;
+
+/**
+ * The handler for interactive query. With interactive query, a session will be first established
+ * and then the session will be reused for the following queries(statements). Session is an
+ * abstraction of spark job, and once the job is started, the job will continuously poll the
+ * statements and execute query specified in it.
+ */
+@RequiredArgsConstructor
+public class InteractiveQueryHandler extends AsyncQueryHandler {
+  private final SessionManager sessionManager;
+  private final JobExecutionResponseReader jobExecutionResponseReader;
+  private final LeaseManager leaseManager;
+  private final MetricsService metricsService;
+  protected final SparkSubmitParametersBuilderProvider sparkSubmitParametersBuilderProvider;
+
+  @Override
+  protected JSONObject getResponseFromResultIndex(AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    String queryId = asyncQueryJobMetadata.getQueryId();
+    return jobExecutionResponseReader.getResultWithQueryId(
+        queryId, asyncQueryJobMetadata.getResultIndex());
+  }
+
+  @Override
+  protected JSONObject getResponseFromExecutor(AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    JSONObject result = new JSONObject();
+    String queryId = asyncQueryJobMetadata.getQueryId();
+    Statement statement =
+        getStatementByQueryId(
+            asyncQueryJobMetadata.getSessionId(),
+            queryId,
+            asyncQueryJobMetadata.getDatasourceName());
+    StatementState statementState = statement.getStatementState();
+    result.put(STATUS_FIELD, statementState.getState());
+    result.put(ERROR_FIELD, Optional.of(statement.getStatementModel().getError()).orElse(""));
+    return result;
+  }
+
+  @Override
+  public String cancelJob(AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    String queryId = asyncQueryJobMetadata.getQueryId();
+    getStatementByQueryId(
+            asyncQueryJobMetadata.getSessionId(),
+            queryId,
+            asyncQueryJobMetadata.getDatasourceName())
+        .cancel();
+    return queryId;
+  }
+
+  @Override
+  public DispatchQueryResponse submit(
+      DispatchQueryRequest dispatchQueryRequest, DispatchQueryContext context) {
+    Session session = null;
+    String clusterName = dispatchQueryRequest.getClusterName();
+    Map tags = context.getTags();
+    DataSourceMetadata dataSourceMetadata = context.getDataSourceMetadata();
+
+    // todo, manage lease lifecycle
+    leaseManager.borrow(
+        new LeaseRequest(JobType.INTERACTIVE, dispatchQueryRequest.getDatasource()));
+
+    if (dispatchQueryRequest.getSessionId() != null) {
+      // get session from request
+      Optional existingSession =
+          sessionManager.getSession(
+              dispatchQueryRequest.getSessionId(), dispatchQueryRequest.getDatasource());
+      if (existingSession.isPresent()) {
+        session = existingSession.get();
+      }
+    }
+    if (session == null
+        || !session.isOperationalForDataSource(dispatchQueryRequest.getDatasource())) {
+      // create session if not exist or session dead/fail
+      tags.put(JOB_TYPE_TAG_KEY, JobType.INTERACTIVE.getText());
+      session =
+          sessionManager.createSession(
+              new CreateSessionRequest(
+                  clusterName,
+                  dispatchQueryRequest.getAccountId(),
+                  dispatchQueryRequest.getApplicationId(),
+                  dispatchQueryRequest.getExecutionRoleARN(),
+                  sparkSubmitParametersBuilderProvider
+                      .getSparkSubmitParametersBuilder()
+                      .className(FLINT_SESSION_CLASS_NAME)
+                      .clusterName(clusterName)
+                      .dataSource(
+                          dataSourceMetadata,
+                          dispatchQueryRequest,
+                          context.getAsyncQueryRequestContext())
+                      .acceptModifier(dispatchQueryRequest.getSparkSubmitParameterModifier())
+                      .acceptComposers(dispatchQueryRequest, context.getAsyncQueryRequestContext()),
+                  tags,
+                  dataSourceMetadata.getResultIndex(),
+                  dataSourceMetadata.getName()),
+              context.getAsyncQueryRequestContext());
+      metricsService.incrementNumericalMetric(EmrMetrics.EMR_INTERACTIVE_QUERY_JOBS_CREATION_COUNT);
+    }
+    session.submit(
+        new QueryRequest(
+            context.getQueryId(),
+            dispatchQueryRequest.getLangType(),
+            dispatchQueryRequest.getQuery()),
+        context.getAsyncQueryRequestContext());
+    return DispatchQueryResponse.builder()
+        .queryId(context.getQueryId())
+        .jobId(session.getSessionModel().getJobId())
+        .resultIndex(dataSourceMetadata.getResultIndex())
+        .sessionId(session.getSessionId())
+        .datasourceName(dataSourceMetadata.getName())
+        .jobType(JobType.INTERACTIVE)
+        .build();
+  }
+
+  private Statement getStatementByQueryId(String sessionId, String queryId, String datasourceName) {
+    Optional session = sessionManager.getSession(sessionId, datasourceName);
+    if (session.isPresent()) {
+      // todo, statementId == jobId if statement running in session.
+      StatementId statementId = new StatementId(queryId);
+      Optional statement = session.get().get(statementId);
+      if (statement.isPresent()) {
+        return statement.get();
+      } else {
+        throw new IllegalArgumentException("no statement found. " + statementId);
+      }
+    } else {
+      throw new IllegalArgumentException("no session found. " + sessionId);
+    }
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/QueryHandlerFactory.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/QueryHandlerFactory.java
new file mode 100644
index 0000000000..d6e70a9d86
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/QueryHandlerFactory.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher;
+
+import lombok.RequiredArgsConstructor;
+import org.opensearch.sql.spark.client.EMRServerlessClientFactory;
+import org.opensearch.sql.spark.execution.session.SessionManager;
+import org.opensearch.sql.spark.flint.FlintIndexMetadataService;
+import org.opensearch.sql.spark.flint.IndexDMLResultStorageService;
+import org.opensearch.sql.spark.flint.operation.FlintIndexOpFactory;
+import org.opensearch.sql.spark.leasemanager.LeaseManager;
+import org.opensearch.sql.spark.metrics.MetricsService;
+import org.opensearch.sql.spark.parameter.SparkSubmitParametersBuilderProvider;
+import org.opensearch.sql.spark.response.JobExecutionResponseReader;
+
+@RequiredArgsConstructor
+public class QueryHandlerFactory {
+
+  private final JobExecutionResponseReader jobExecutionResponseReader;
+  private final FlintIndexMetadataService flintIndexMetadataService;
+  private final SessionManager sessionManager;
+  private final LeaseManager leaseManager;
+  private final IndexDMLResultStorageService indexDMLResultStorageService;
+  private final FlintIndexOpFactory flintIndexOpFactory;
+  private final EMRServerlessClientFactory emrServerlessClientFactory;
+  private final MetricsService metricsService;
+  protected final SparkSubmitParametersBuilderProvider sparkSubmitParametersBuilderProvider;
+
+  public RefreshQueryHandler getRefreshQueryHandler(String accountId) {
+    return new RefreshQueryHandler(
+        emrServerlessClientFactory.getClient(accountId),
+        jobExecutionResponseReader,
+        flintIndexMetadataService,
+        leaseManager,
+        flintIndexOpFactory,
+        metricsService,
+        sparkSubmitParametersBuilderProvider);
+  }
+
+  public StreamingQueryHandler getStreamingQueryHandler(String accountId) {
+    return new StreamingQueryHandler(
+        emrServerlessClientFactory.getClient(accountId),
+        jobExecutionResponseReader,
+        leaseManager,
+        metricsService,
+        sparkSubmitParametersBuilderProvider);
+  }
+
+  public BatchQueryHandler getBatchQueryHandler(String accountId) {
+    return new BatchQueryHandler(
+        emrServerlessClientFactory.getClient(accountId),
+        jobExecutionResponseReader,
+        leaseManager,
+        metricsService,
+        sparkSubmitParametersBuilderProvider);
+  }
+
+  public InteractiveQueryHandler getInteractiveQueryHandler() {
+    return new InteractiveQueryHandler(
+        sessionManager,
+        jobExecutionResponseReader,
+        leaseManager,
+        metricsService,
+        sparkSubmitParametersBuilderProvider);
+  }
+
+  public IndexDMLHandler getIndexDMLHandler() {
+    return new IndexDMLHandler(
+        jobExecutionResponseReader,
+        flintIndexMetadataService,
+        indexDMLResultStorageService,
+        flintIndexOpFactory);
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/QueryIdProvider.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/QueryIdProvider.java
new file mode 100644
index 0000000000..2167eb6b7a
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/QueryIdProvider.java
@@ -0,0 +1,13 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher;
+
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest;
+
+/** Interface for extension point to specify queryId. Called when new query is executed. */
+public interface QueryIdProvider {
+  String getQueryId(DispatchQueryRequest dispatchQueryRequest);
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/RefreshQueryHandler.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/RefreshQueryHandler.java
new file mode 100644
index 0000000000..99984ecc46
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/RefreshQueryHandler.java
@@ -0,0 +1,86 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher;
+
+import java.util.Map;
+import org.opensearch.sql.datasource.model.DataSourceMetadata;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata;
+import org.opensearch.sql.spark.client.EMRServerlessClient;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryContext;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse;
+import org.opensearch.sql.spark.dispatcher.model.JobType;
+import org.opensearch.sql.spark.flint.FlintIndexMetadata;
+import org.opensearch.sql.spark.flint.FlintIndexMetadataService;
+import org.opensearch.sql.spark.flint.operation.FlintIndexOp;
+import org.opensearch.sql.spark.flint.operation.FlintIndexOpFactory;
+import org.opensearch.sql.spark.leasemanager.LeaseManager;
+import org.opensearch.sql.spark.leasemanager.model.LeaseRequest;
+import org.opensearch.sql.spark.metrics.MetricsService;
+import org.opensearch.sql.spark.parameter.SparkSubmitParametersBuilderProvider;
+import org.opensearch.sql.spark.response.JobExecutionResponseReader;
+
+/**
+ * The handler for refresh query. Refresh query is one time query request to refresh(update) flint
+ * index, and new job is submitted to Spark.
+ */
+public class RefreshQueryHandler extends BatchQueryHandler {
+
+  private final FlintIndexMetadataService flintIndexMetadataService;
+  private final FlintIndexOpFactory flintIndexOpFactory;
+
+  public RefreshQueryHandler(
+      EMRServerlessClient emrServerlessClient,
+      JobExecutionResponseReader jobExecutionResponseReader,
+      FlintIndexMetadataService flintIndexMetadataService,
+      LeaseManager leaseManager,
+      FlintIndexOpFactory flintIndexOpFactory,
+      MetricsService metricsService,
+      SparkSubmitParametersBuilderProvider sparkSubmitParametersBuilderProvider) {
+    super(
+        emrServerlessClient,
+        jobExecutionResponseReader,
+        leaseManager,
+        metricsService,
+        sparkSubmitParametersBuilderProvider);
+    this.flintIndexMetadataService = flintIndexMetadataService;
+    this.flintIndexOpFactory = flintIndexOpFactory;
+  }
+
+  @Override
+  public String cancelJob(AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    String datasourceName = asyncQueryJobMetadata.getDatasourceName();
+    Map indexMetadataMap =
+        flintIndexMetadataService.getFlintIndexMetadata(asyncQueryJobMetadata.getIndexName());
+    if (!indexMetadataMap.containsKey(asyncQueryJobMetadata.getIndexName())) {
+      throw new IllegalStateException(
+          String.format(
+              "Couldn't fetch flint index: %s details", asyncQueryJobMetadata.getIndexName()));
+    }
+    FlintIndexMetadata indexMetadata = indexMetadataMap.get(asyncQueryJobMetadata.getIndexName());
+    FlintIndexOp jobCancelOp = flintIndexOpFactory.getCancel(datasourceName);
+    jobCancelOp.apply(indexMetadata);
+    return asyncQueryJobMetadata.getQueryId();
+  }
+
+  @Override
+  public DispatchQueryResponse submit(
+      DispatchQueryRequest dispatchQueryRequest, DispatchQueryContext context) {
+    leaseManager.borrow(new LeaseRequest(JobType.BATCH, dispatchQueryRequest.getDatasource()));
+
+    DispatchQueryResponse resp = super.submit(dispatchQueryRequest, context);
+    DataSourceMetadata dataSourceMetadata = context.getDataSourceMetadata();
+    return DispatchQueryResponse.builder()
+        .queryId(resp.getQueryId())
+        .jobId(resp.getJobId())
+        .resultIndex(resp.getResultIndex())
+        .sessionId(resp.getSessionId())
+        .datasourceName(dataSourceMetadata.getName())
+        .jobType(JobType.BATCH)
+        .indexName(context.getIndexQueryDetails().openSearchIndexName())
+        .build();
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/SparkQueryDispatcher.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/SparkQueryDispatcher.java
new file mode 100644
index 0000000000..945d59b0bc
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/SparkQueryDispatcher.java
@@ -0,0 +1,176 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher;
+
+import java.util.HashMap;
+import java.util.Map;
+import lombok.AllArgsConstructor;
+import org.jetbrains.annotations.NotNull;
+import org.json.JSONObject;
+import org.opensearch.sql.datasource.DataSourceService;
+import org.opensearch.sql.datasource.model.DataSourceMetadata;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryContext;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse;
+import org.opensearch.sql.spark.dispatcher.model.IndexQueryActionType;
+import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails;
+import org.opensearch.sql.spark.dispatcher.model.JobType;
+import org.opensearch.sql.spark.execution.session.SessionManager;
+import org.opensearch.sql.spark.rest.model.LangType;
+import org.opensearch.sql.spark.utils.SQLQueryUtils;
+
+/** This class takes care of understanding query and dispatching job query to emr serverless. */
+@AllArgsConstructor
+public class SparkQueryDispatcher {
+
+  public static final String INDEX_TAG_KEY = "index";
+  public static final String DATASOURCE_TAG_KEY = "datasource";
+  public static final String CLUSTER_NAME_TAG_KEY = "domain_ident";
+  public static final String JOB_TYPE_TAG_KEY = "type";
+
+  private final DataSourceService dataSourceService;
+  private final SessionManager sessionManager;
+  private final QueryHandlerFactory queryHandlerFactory;
+  private final QueryIdProvider queryIdProvider;
+
+  public DispatchQueryResponse dispatch(
+      DispatchQueryRequest dispatchQueryRequest,
+      AsyncQueryRequestContext asyncQueryRequestContext) {
+    DataSourceMetadata dataSourceMetadata =
+        this.dataSourceService.verifyDataSourceAccessAndGetRawMetadata(
+            dispatchQueryRequest.getDatasource(), asyncQueryRequestContext);
+
+    if (LangType.SQL.equals(dispatchQueryRequest.getLangType())
+        && SQLQueryUtils.isFlintExtensionQuery(dispatchQueryRequest.getQuery())) {
+      IndexQueryDetails indexQueryDetails = getIndexQueryDetails(dispatchQueryRequest);
+      DispatchQueryContext context =
+          getDefaultDispatchContextBuilder(dispatchQueryRequest, dataSourceMetadata)
+              .indexQueryDetails(indexQueryDetails)
+              .asyncQueryRequestContext(asyncQueryRequestContext)
+              .build();
+
+      return getQueryHandlerForFlintExtensionQuery(dispatchQueryRequest, indexQueryDetails)
+          .submit(dispatchQueryRequest, context);
+    } else {
+      DispatchQueryContext context =
+          getDefaultDispatchContextBuilder(dispatchQueryRequest, dataSourceMetadata)
+              .asyncQueryRequestContext(asyncQueryRequestContext)
+              .build();
+      return getDefaultAsyncQueryHandler(dispatchQueryRequest.getAccountId())
+          .submit(dispatchQueryRequest, context);
+    }
+  }
+
+  private DispatchQueryContext.DispatchQueryContextBuilder getDefaultDispatchContextBuilder(
+      DispatchQueryRequest dispatchQueryRequest, DataSourceMetadata dataSourceMetadata) {
+    return DispatchQueryContext.builder()
+        .dataSourceMetadata(dataSourceMetadata)
+        .tags(getDefaultTagsForJobSubmission(dispatchQueryRequest))
+        .queryId(queryIdProvider.getQueryId(dispatchQueryRequest));
+  }
+
+  private AsyncQueryHandler getQueryHandlerForFlintExtensionQuery(
+      DispatchQueryRequest dispatchQueryRequest, IndexQueryDetails indexQueryDetails) {
+    if (isEligibleForIndexDMLHandling(indexQueryDetails)) {
+      return queryHandlerFactory.getIndexDMLHandler();
+    } else if (isEligibleForStreamingQuery(indexQueryDetails)) {
+      return queryHandlerFactory.getStreamingQueryHandler(dispatchQueryRequest.getAccountId());
+    } else if (IndexQueryActionType.CREATE.equals(indexQueryDetails.getIndexQueryActionType())) {
+      // Create should be handled by batch handler. This is to avoid DROP index incorrectly cancel
+      // an interactive job.
+      return queryHandlerFactory.getBatchQueryHandler(dispatchQueryRequest.getAccountId());
+    } else if (IndexQueryActionType.REFRESH.equals(indexQueryDetails.getIndexQueryActionType())) {
+      // Manual refresh should be handled by batch handler
+      return queryHandlerFactory.getRefreshQueryHandler(dispatchQueryRequest.getAccountId());
+    } else {
+      return getDefaultAsyncQueryHandler(dispatchQueryRequest.getAccountId());
+    }
+  }
+
+  @NotNull
+  private AsyncQueryHandler getDefaultAsyncQueryHandler(String accountId) {
+    return sessionManager.isEnabled()
+        ? queryHandlerFactory.getInteractiveQueryHandler()
+        : queryHandlerFactory.getBatchQueryHandler(accountId);
+  }
+
+  @NotNull
+  private static IndexQueryDetails getIndexQueryDetails(DispatchQueryRequest dispatchQueryRequest) {
+    IndexQueryDetails indexQueryDetails =
+        SQLQueryUtils.extractIndexDetails(dispatchQueryRequest.getQuery());
+    fillDatasourceName(dispatchQueryRequest, indexQueryDetails);
+    return indexQueryDetails;
+  }
+
+  private boolean isEligibleForStreamingQuery(IndexQueryDetails indexQueryDetails) {
+    Boolean isCreateAutoRefreshIndex =
+        IndexQueryActionType.CREATE.equals(indexQueryDetails.getIndexQueryActionType())
+            && indexQueryDetails.getFlintIndexOptions().autoRefresh();
+    Boolean isAlterQuery =
+        IndexQueryActionType.ALTER.equals(indexQueryDetails.getIndexQueryActionType());
+    return isCreateAutoRefreshIndex || isAlterQuery;
+  }
+
+  private boolean isEligibleForIndexDMLHandling(IndexQueryDetails indexQueryDetails) {
+    return IndexQueryActionType.DROP.equals(indexQueryDetails.getIndexQueryActionType())
+        || IndexQueryActionType.VACUUM.equals(indexQueryDetails.getIndexQueryActionType())
+        || (IndexQueryActionType.ALTER.equals(indexQueryDetails.getIndexQueryActionType())
+            && (indexQueryDetails
+                    .getFlintIndexOptions()
+                    .getProvidedOptions()
+                    .containsKey("auto_refresh")
+                && !indexQueryDetails.getFlintIndexOptions().autoRefresh()));
+  }
+
+  public JSONObject getQueryResponse(AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    return getAsyncQueryHandlerForExistingQuery(asyncQueryJobMetadata)
+        .getQueryResponse(asyncQueryJobMetadata);
+  }
+
+  public String cancelJob(AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    return getAsyncQueryHandlerForExistingQuery(asyncQueryJobMetadata)
+        .cancelJob(asyncQueryJobMetadata);
+  }
+
+  private AsyncQueryHandler getAsyncQueryHandlerForExistingQuery(
+      AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    if (asyncQueryJobMetadata.getSessionId() != null) {
+      return queryHandlerFactory.getInteractiveQueryHandler();
+    } else if (IndexDMLHandler.isIndexDMLQuery(asyncQueryJobMetadata.getJobId())) {
+      return queryHandlerFactory.getIndexDMLHandler();
+    } else if (asyncQueryJobMetadata.getJobType() == JobType.BATCH) {
+      return queryHandlerFactory.getRefreshQueryHandler(asyncQueryJobMetadata.getAccountId());
+    } else if (asyncQueryJobMetadata.getJobType() == JobType.STREAMING) {
+      return queryHandlerFactory.getStreamingQueryHandler(asyncQueryJobMetadata.getAccountId());
+    } else {
+      return queryHandlerFactory.getBatchQueryHandler(asyncQueryJobMetadata.getAccountId());
+    }
+  }
+
+  // TODO: Revisit this logic.
+  // Currently, Spark if datasource is not provided in query.
+  // Spark Assumes the datasource to be catalog.
+  // This is required to handle drop index case properly when datasource name is not provided.
+  private static void fillDatasourceName(
+      DispatchQueryRequest dispatchQueryRequest, IndexQueryDetails indexQueryDetails) {
+    if (indexQueryDetails.getFullyQualifiedTableName() != null
+        && indexQueryDetails.getFullyQualifiedTableName().getDatasourceName() == null) {
+      indexQueryDetails
+          .getFullyQualifiedTableName()
+          .setDatasourceName(dispatchQueryRequest.getDatasource());
+    }
+  }
+
+  private static Map getDefaultTagsForJobSubmission(
+      DispatchQueryRequest dispatchQueryRequest) {
+    Map tags = new HashMap<>();
+    tags.put(CLUSTER_NAME_TAG_KEY, dispatchQueryRequest.getClusterName());
+    tags.put(DATASOURCE_TAG_KEY, dispatchQueryRequest.getDatasource());
+    return tags;
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/StreamingQueryHandler.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/StreamingQueryHandler.java
new file mode 100644
index 0000000000..2fbf2466da
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/StreamingQueryHandler.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher;
+
+import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.INDEX_TAG_KEY;
+import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.JOB_TYPE_TAG_KEY;
+import static org.opensearch.sql.spark.metrics.EmrMetrics.EMR_STREAMING_QUERY_JOBS_CREATION_COUNT;
+
+import java.util.Map;
+import org.opensearch.sql.datasource.model.DataSourceMetadata;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata;
+import org.opensearch.sql.spark.client.EMRServerlessClient;
+import org.opensearch.sql.spark.client.StartJobRequest;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryContext;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest;
+import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse;
+import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails;
+import org.opensearch.sql.spark.dispatcher.model.JobType;
+import org.opensearch.sql.spark.leasemanager.LeaseManager;
+import org.opensearch.sql.spark.leasemanager.model.LeaseRequest;
+import org.opensearch.sql.spark.metrics.MetricsService;
+import org.opensearch.sql.spark.parameter.SparkSubmitParametersBuilderProvider;
+import org.opensearch.sql.spark.response.JobExecutionResponseReader;
+
+/**
+ * The handler for streaming query. Streaming query is a job to continuously update flint index.
+ * Once started, the job can be stopped by IndexDML query.
+ */
+public class StreamingQueryHandler extends BatchQueryHandler {
+
+  public StreamingQueryHandler(
+      EMRServerlessClient emrServerlessClient,
+      JobExecutionResponseReader jobExecutionResponseReader,
+      LeaseManager leaseManager,
+      MetricsService metricsService,
+      SparkSubmitParametersBuilderProvider sparkSubmitParametersBuilderProvider) {
+    super(
+        emrServerlessClient,
+        jobExecutionResponseReader,
+        leaseManager,
+        metricsService,
+        sparkSubmitParametersBuilderProvider);
+  }
+
+  @Override
+  public String cancelJob(AsyncQueryJobMetadata asyncQueryJobMetadata) {
+    throw new IllegalArgumentException(
+        "can't cancel index DML query, using ALTER auto_refresh=off statement to stop job, using"
+            + " VACUUM statement to stop job and delete data");
+  }
+
+  @Override
+  public DispatchQueryResponse submit(
+      DispatchQueryRequest dispatchQueryRequest, DispatchQueryContext context) {
+
+    leaseManager.borrow(new LeaseRequest(JobType.STREAMING, dispatchQueryRequest.getDatasource()));
+
+    String clusterName = dispatchQueryRequest.getClusterName();
+    IndexQueryDetails indexQueryDetails = context.getIndexQueryDetails();
+    Map tags = context.getTags();
+    tags.put(INDEX_TAG_KEY, indexQueryDetails.openSearchIndexName());
+    DataSourceMetadata dataSourceMetadata = context.getDataSourceMetadata();
+    tags.put(JOB_TYPE_TAG_KEY, JobType.STREAMING.getText());
+    String jobName =
+        clusterName
+            + ":"
+            + JobType.STREAMING.getText()
+            + ":"
+            + indexQueryDetails.openSearchIndexName();
+    StartJobRequest startJobRequest =
+        new StartJobRequest(
+            jobName,
+            dispatchQueryRequest.getAccountId(),
+            dispatchQueryRequest.getApplicationId(),
+            dispatchQueryRequest.getExecutionRoleARN(),
+            sparkSubmitParametersBuilderProvider
+                .getSparkSubmitParametersBuilder()
+                .clusterName(clusterName)
+                .query(dispatchQueryRequest.getQuery())
+                .structuredStreaming(true)
+                .dataSource(
+                    dataSourceMetadata, dispatchQueryRequest, context.getAsyncQueryRequestContext())
+                .acceptModifier(dispatchQueryRequest.getSparkSubmitParameterModifier())
+                .acceptComposers(dispatchQueryRequest, context.getAsyncQueryRequestContext())
+                .toString(),
+            tags,
+            indexQueryDetails.getFlintIndexOptions().autoRefresh(),
+            dataSourceMetadata.getResultIndex());
+    String jobId = emrServerlessClient.startJobRun(startJobRequest);
+    metricsService.incrementNumericalMetric(EMR_STREAMING_QUERY_JOBS_CREATION_COUNT);
+    return DispatchQueryResponse.builder()
+        .queryId(context.getQueryId())
+        .jobId(jobId)
+        .resultIndex(dataSourceMetadata.getResultIndex())
+        .datasourceName(dataSourceMetadata.getName())
+        .jobType(JobType.STREAMING)
+        .indexName(indexQueryDetails.openSearchIndexName())
+        .build();
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryContext.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryContext.java
new file mode 100644
index 0000000000..aabe43f641
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryContext.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher.model;
+
+import java.util.Map;
+import lombok.Builder;
+import lombok.Getter;
+import org.opensearch.sql.datasource.model.DataSourceMetadata;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext;
+
+@Getter
+@Builder
+public class DispatchQueryContext {
+  private final String queryId;
+  private final DataSourceMetadata dataSourceMetadata;
+  private final Map tags;
+  private final IndexQueryDetails indexQueryDetails;
+  private final AsyncQueryRequestContext asyncQueryRequestContext;
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryRequest.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryRequest.java
new file mode 100644
index 0000000000..066349873a
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryRequest.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher.model;
+
+import lombok.AllArgsConstructor;
+import lombok.Builder;
+import lombok.Data;
+import org.opensearch.sql.spark.config.SparkSubmitParameterModifier;
+import org.opensearch.sql.spark.rest.model.LangType;
+
+@AllArgsConstructor
+@Data
+@Builder
+public class DispatchQueryRequest {
+  private final String accountId;
+  private final String applicationId;
+  private final String query;
+  private final String datasource;
+  private final LangType langType;
+  private final String executionRoleARN;
+  private final String clusterName;
+
+  /* extension point to modify or add spark submit parameter */
+  private final SparkSubmitParameterModifier sparkSubmitParameterModifier;
+
+  /** Optional sessionId. */
+  private String sessionId;
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryResponse.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryResponse.java
new file mode 100644
index 0000000000..b97d9fd7b0
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryResponse.java
@@ -0,0 +1,16 @@
+package org.opensearch.sql.spark.dispatcher.model;
+
+import lombok.Builder;
+import lombok.Getter;
+
+@Getter
+@Builder
+public class DispatchQueryResponse {
+  private final String queryId;
+  private final String jobId;
+  private final String resultIndex;
+  private final String sessionId;
+  private final String datasourceName;
+  private final JobType jobType;
+  private final String indexName;
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/FlintIndexOptions.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/FlintIndexOptions.java
new file mode 100644
index 0000000000..79af1c91ab
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/FlintIndexOptions.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher.model;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+
+/**
+ * Model to store flint index options. Currently added fields which are required, and we can extend
+ * this in the future.
+ */
+public class FlintIndexOptions {
+
+  public static final String AUTO_REFRESH = "auto_refresh";
+  public static final String INCREMENTAL_REFRESH = "incremental_refresh";
+  public static final String CHECKPOINT_LOCATION = "checkpoint_location";
+  public static final String WATERMARK_DELAY = "watermark_delay";
+  private final Map options = new HashMap<>();
+
+  public void setOption(String key, String value) {
+    options.put(key, value);
+  }
+
+  public Optional getOption(String key) {
+    return Optional.ofNullable(options.get(key));
+  }
+
+  public boolean autoRefresh() {
+    return Boolean.parseBoolean(getOption(AUTO_REFRESH).orElse("false"));
+  }
+
+  public Map getProvidedOptions() {
+    return new HashMap<>(options);
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/FullyQualifiedTableName.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/FullyQualifiedTableName.java
new file mode 100644
index 0000000000..fc1513241f
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/FullyQualifiedTableName.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher.model;
+
+import static org.apache.commons.lang3.StringUtils.strip;
+import static org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails.STRIP_CHARS;
+
+import java.util.Arrays;
+import lombok.Data;
+import lombok.NoArgsConstructor;
+
+/** Fully Qualified Table Name in the query provided. */
+@Data
+@NoArgsConstructor
+public class FullyQualifiedTableName {
+  private String datasourceName;
+  private String schemaName;
+  private String tableName;
+  private String fullyQualifiedName;
+
+  /**
+   * This constructor also takes care of logic to split the fully qualified name into respective
+   * pieces. If the name has more than three parts, first part is assigned tp datasource name,
+   * second is schemaName, third is tableName If there are only two parts, first part is assigned to
+   * schema name and second to table. If there is only one part it is assigned to table Name.
+   *
+   * @param fullyQualifiedName fullyQualifiedName.
+   */
+  public FullyQualifiedTableName(String fullyQualifiedName) {
+    this.fullyQualifiedName = fullyQualifiedName;
+    String[] parts = fullyQualifiedName.split("\\.");
+    if (parts.length >= 3) {
+      datasourceName = parts[0];
+      schemaName = parts[1];
+      tableName = String.join(".", Arrays.copyOfRange(parts, 2, parts.length));
+    } else if (parts.length == 2) {
+      schemaName = parts[0];
+      tableName = parts[1];
+    } else if (parts.length == 1) {
+      tableName = parts[0];
+    }
+  }
+
+  /**
+   * Convert qualified name to Flint name concat by underscore.
+   *
+   * @return Flint name
+   */
+  public String toFlintName() {
+    StringBuilder builder = new StringBuilder();
+    if (datasourceName != null) {
+      builder.append(strip(datasourceName, STRIP_CHARS)).append("_");
+    }
+    if (schemaName != null) {
+      builder.append(strip(schemaName, STRIP_CHARS)).append("_");
+    }
+    if (tableName != null) {
+      builder.append(strip(tableName, STRIP_CHARS));
+    }
+    return builder.toString();
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexDMLResult.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexDMLResult.java
new file mode 100644
index 0000000000..a276076f4b
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexDMLResult.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher.model;
+
+import com.google.common.collect.ImmutableMap;
+import lombok.Data;
+import lombok.EqualsAndHashCode;
+import lombok.experimental.SuperBuilder;
+import org.opensearch.sql.spark.execution.statestore.StateModel;
+
+/** Plugin create Index DML result. */
+@Data
+@SuperBuilder
+@EqualsAndHashCode(callSuper = false)
+public class IndexDMLResult extends StateModel {
+  private final String queryId;
+  private final String status;
+  private final String error;
+  private final String datasourceName;
+  private final Long queryRunTime;
+  private final Long updateTime;
+
+  public static IndexDMLResult copy(IndexDMLResult copy, ImmutableMap metadata) {
+    return builder()
+        .queryId(copy.queryId)
+        .status(copy.status)
+        .error(copy.error)
+        .datasourceName(copy.datasourceName)
+        .queryRunTime(copy.queryRunTime)
+        .updateTime(copy.updateTime)
+        .metadata(metadata)
+        .build();
+  }
+
+  @Override
+  public String getId() {
+    return queryId;
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexQueryActionType.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexQueryActionType.java
new file mode 100644
index 0000000000..96e7d159af
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexQueryActionType.java
@@ -0,0 +1,17 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher.model;
+
+/** Enum for Index Action in the given query.* */
+public enum IndexQueryActionType {
+  CREATE,
+  REFRESH,
+  DESCRIBE,
+  SHOW,
+  DROP,
+  VACUUM,
+  ALTER
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexQueryDetails.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexQueryDetails.java
new file mode 100644
index 0000000000..5596d1b425
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexQueryDetails.java
@@ -0,0 +1,127 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher.model;
+
+import static org.apache.commons.lang3.StringUtils.strip;
+
+import java.util.Set;
+import lombok.EqualsAndHashCode;
+import lombok.Getter;
+import org.apache.commons.lang3.StringUtils;
+import org.opensearch.sql.spark.flint.FlintIndexType;
+
+/** Index details in an async query. */
+@Getter
+@EqualsAndHashCode
+public class IndexQueryDetails {
+
+  public static final String STRIP_CHARS = "`";
+
+  private static final Set INVALID_INDEX_NAME_CHARS =
+      Set.of(' ', ',', ':', '"', '+', '/', '\\', '|', '?', '#', '>', '<');
+
+  private String indexName;
+  private FullyQualifiedTableName fullyQualifiedTableName;
+  // by default, auto_refresh = false;
+  private IndexQueryActionType indexQueryActionType;
+  private FlintIndexOptions flintIndexOptions;
+  // materialized view special case where
+  // table name and mv name are combined.
+  private String mvName;
+  private FlintIndexType indexType;
+
+  private IndexQueryDetails() {}
+
+  public static IndexQueryDetailsBuilder builder() {
+    return new IndexQueryDetailsBuilder();
+  }
+
+  // Builder class
+  public static class IndexQueryDetailsBuilder {
+    private final IndexQueryDetails indexQueryDetails;
+
+    public IndexQueryDetailsBuilder() {
+      indexQueryDetails = new IndexQueryDetails();
+    }
+
+    public IndexQueryDetailsBuilder indexName(String indexName) {
+      indexQueryDetails.indexName = indexName;
+      return this;
+    }
+
+    public IndexQueryDetailsBuilder fullyQualifiedTableName(FullyQualifiedTableName tableName) {
+      indexQueryDetails.fullyQualifiedTableName = tableName;
+      return this;
+    }
+
+    public IndexQueryDetailsBuilder indexQueryActionType(
+        IndexQueryActionType indexQueryActionType) {
+      indexQueryDetails.indexQueryActionType = indexQueryActionType;
+      return this;
+    }
+
+    public IndexQueryDetailsBuilder indexOptions(FlintIndexOptions flintIndexOptions) {
+      indexQueryDetails.flintIndexOptions = flintIndexOptions;
+      return this;
+    }
+
+    public IndexQueryDetailsBuilder mvName(String mvName) {
+      indexQueryDetails.mvName = mvName;
+      return this;
+    }
+
+    public IndexQueryDetailsBuilder indexType(FlintIndexType indexType) {
+      indexQueryDetails.indexType = indexType;
+      return this;
+    }
+
+    public IndexQueryDetails build() {
+      if (indexQueryDetails.flintIndexOptions == null) {
+        indexQueryDetails.flintIndexOptions = new FlintIndexOptions();
+      }
+      return indexQueryDetails;
+    }
+  }
+
+  public String openSearchIndexName() {
+    FullyQualifiedTableName fullyQualifiedTableName = getFullyQualifiedTableName();
+    String indexName = StringUtils.EMPTY;
+    switch (getIndexType()) {
+      case COVERING:
+        indexName =
+            "flint_"
+                + fullyQualifiedTableName.toFlintName()
+                + "_"
+                + strip(getIndexName(), STRIP_CHARS)
+                + "_"
+                + getIndexType().getSuffix();
+        break;
+      case SKIPPING:
+        indexName =
+            "flint_" + fullyQualifiedTableName.toFlintName() + "_" + getIndexType().getSuffix();
+        break;
+      case MATERIALIZED_VIEW:
+        indexName = "flint_" + new FullyQualifiedTableName(mvName).toFlintName();
+        break;
+    }
+    return percentEncode(indexName).toLowerCase();
+  }
+
+  /*
+   * Percent-encode invalid OpenSearch index name characters.
+   */
+  private String percentEncode(String indexName) {
+    StringBuilder builder = new StringBuilder(indexName.length());
+    for (char ch : indexName.toCharArray()) {
+      if (INVALID_INDEX_NAME_CHARS.contains(ch)) {
+        builder.append(String.format("%%%02X", (int) ch));
+      } else {
+        builder.append(ch);
+      }
+    }
+    return builder.toString();
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/JobType.java b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/JobType.java
new file mode 100644
index 0000000000..01f5f422e9
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/dispatcher/model/JobType.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.dispatcher.model;
+
+public enum JobType {
+  INTERACTIVE("interactive"),
+  STREAMING("streaming"),
+  BATCH("batch");
+
+  private String text;
+
+  JobType(String text) {
+    this.text = text;
+  }
+
+  public String getText() {
+    return this.text;
+  }
+
+  /**
+   * Get JobType from text.
+   *
+   * @param text text.
+   * @return JobType {@link JobType}.
+   */
+  public static JobType fromString(String text) {
+    for (JobType JobType : JobType.values()) {
+      if (JobType.text.equalsIgnoreCase(text)) {
+        return JobType;
+      }
+    }
+    throw new IllegalArgumentException("No JobType with text " + text + " found");
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/CreateSessionRequest.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/CreateSessionRequest.java
new file mode 100644
index 0000000000..6398dd224f
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/CreateSessionRequest.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.execution.session;
+
+import java.util.Map;
+import lombok.Data;
+import org.opensearch.sql.spark.client.StartJobRequest;
+import org.opensearch.sql.spark.dispatcher.model.JobType;
+import org.opensearch.sql.spark.parameter.SparkSubmitParametersBuilder;
+
+@Data
+public class CreateSessionRequest {
+  private final String clusterName;
+  private final String accountId;
+  private final String applicationId;
+  private final String executionRoleArn;
+  private final SparkSubmitParametersBuilder sparkSubmitParametersBuilder;
+  private final Map tags;
+  private final String resultIndex;
+  private final String datasourceName;
+
+  public StartJobRequest getStartJobRequest(String sessionId) {
+    return new InteractiveSessionStartJobRequest(
+        clusterName + ":" + JobType.INTERACTIVE.getText() + ":" + sessionId,
+        accountId,
+        applicationId,
+        executionRoleArn,
+        sparkSubmitParametersBuilder.toString(),
+        tags,
+        resultIndex);
+  }
+
+  static class InteractiveSessionStartJobRequest extends StartJobRequest {
+    public InteractiveSessionStartJobRequest(
+        String jobName,
+        String accountId,
+        String applicationId,
+        String executionRoleArn,
+        String sparkSubmitParams,
+        Map tags,
+        String resultIndex) {
+      super(
+          jobName,
+          accountId,
+          applicationId,
+          executionRoleArn,
+          sparkSubmitParams,
+          tags,
+          false,
+          resultIndex);
+    }
+
+    /** Interactive query keep running. */
+    @Override
+    public Long executionTimeout() {
+      return 0L;
+    }
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/DatasourceEmbeddedSessionIdProvider.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/DatasourceEmbeddedSessionIdProvider.java
new file mode 100644
index 0000000000..360563d657
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/DatasourceEmbeddedSessionIdProvider.java
@@ -0,0 +1,16 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.execution.session;
+
+import org.opensearch.sql.spark.utils.IDUtils;
+
+public class DatasourceEmbeddedSessionIdProvider implements SessionIdProvider {
+
+  @Override
+  public String getSessionId(CreateSessionRequest createSessionRequest) {
+    return IDUtils.encode(createSessionRequest.getDatasourceName());
+  }
+}
diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/InteractiveSession.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/InteractiveSession.java
new file mode 100644
index 0000000000..aeedaef4e7
--- /dev/null
+++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/InteractiveSession.java
@@ -0,0 +1,153 @@
+/*
+ * Copyright OpenSearch Contributors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package org.opensearch.sql.spark.execution.session;
+
+import static org.opensearch.sql.spark.execution.session.SessionModel.initInteractiveSession;
+import static org.opensearch.sql.spark.execution.session.SessionState.DEAD;
+import static org.opensearch.sql.spark.execution.session.SessionState.END_STATE;
+import static org.opensearch.sql.spark.execution.session.SessionState.FAIL;
+import static org.opensearch.sql.spark.execution.statement.StatementId.newStatementId;
+
+import java.util.Optional;
+import lombok.Builder;
+import lombok.Getter;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext;
+import org.opensearch.sql.spark.client.EMRServerlessClient;
+import org.opensearch.sql.spark.client.StartJobRequest;
+import org.opensearch.sql.spark.execution.statement.QueryRequest;
+import org.opensearch.sql.spark.execution.statement.Statement;
+import org.opensearch.sql.spark.execution.statement.StatementId;
+import org.opensearch.sql.spark.execution.statestore.SessionStorageService;
+import org.opensearch.sql.spark.execution.statestore.StatementStorageService;
+import org.opensearch.sql.spark.utils.TimeProvider;
+
+/**
+ * Interactive session.
+ *
+ * 

ENTRY_STATE: not_started + */ +@Getter +@Builder +public class InteractiveSession implements Session { + private static final Logger LOG = LogManager.getLogger(); + + public static final String SESSION_ID_TAG_KEY = "sid"; + + private final String sessionId; + private final SessionStorageService sessionStorageService; + private final StatementStorageService statementStorageService; + private final EMRServerlessClient serverlessClient; + private SessionModel sessionModel; + // the threshold of elapsed time in milliseconds before we say a session is stale + private long sessionInactivityTimeoutMilli; + private TimeProvider timeProvider; + + @Override + public void open( + CreateSessionRequest createSessionRequest, + AsyncQueryRequestContext asyncQueryRequestContext) { + // append session id; + createSessionRequest + .getSparkSubmitParametersBuilder() + .sessionExecution(sessionId, createSessionRequest.getDatasourceName()); + createSessionRequest.getTags().put(SESSION_ID_TAG_KEY, sessionId); + StartJobRequest startJobRequest = createSessionRequest.getStartJobRequest(sessionId); + String jobID = serverlessClient.startJobRun(startJobRequest); + String applicationId = startJobRequest.getApplicationId(); + String accountId = createSessionRequest.getAccountId(); + + sessionModel = + initInteractiveSession( + accountId, applicationId, jobID, sessionId, createSessionRequest.getDatasourceName()); + sessionStorageService.createSession(sessionModel, asyncQueryRequestContext); + } + + /** todo. StatementSweeper will delete doc. */ + @Override + public void close() { + Optional model = + sessionStorageService.getSession(sessionModel.getId(), sessionModel.getDatasourceName()); + if (model.isEmpty()) { + throw new IllegalStateException("session does not exist. " + sessionModel.getSessionId()); + } else { + serverlessClient.cancelJobRun( + sessionModel.getApplicationId(), sessionModel.getJobId(), false); + } + } + + /** Submit statement. If submit successfully, Statement in waiting state. */ + public StatementId submit( + QueryRequest request, AsyncQueryRequestContext asyncQueryRequestContext) { + Optional model = + sessionStorageService.getSession(sessionModel.getId(), sessionModel.getDatasourceName()); + if (model.isEmpty()) { + throw new IllegalStateException("session does not exist. " + sessionModel.getSessionId()); + } else { + sessionModel = model.get(); + if (!END_STATE.contains(sessionModel.getSessionState())) { + String qid = request.getQueryId(); + StatementId statementId = newStatementId(qid); + Statement st = + Statement.builder() + .sessionId(sessionId) + .accountId(sessionModel.getAccountId()) + .applicationId(sessionModel.getApplicationId()) + .jobId(sessionModel.getJobId()) + .statementStorageService(statementStorageService) + .statementId(statementId) + .langType(request.getLangType()) + .datasourceName(sessionModel.getDatasourceName()) + .query(request.getQuery()) + .queryId(qid) + .asyncQueryRequestContext(asyncQueryRequestContext) + .build(); + st.open(); + return statementId; + } else { + String errMsg = + String.format( + "can't submit statement, session should not be in end state, " + + "current session state is: %s", + sessionModel.getSessionState().getSessionState()); + LOG.debug(errMsg); + throw new IllegalStateException(errMsg); + } + } + } + + @Override + public Optional get(StatementId stID) { + return statementStorageService + .getStatement(stID.getId(), sessionModel.getDatasourceName()) + .map( + model -> + Statement.builder() + .sessionId(sessionId) + .accountId(model.getAccountId()) + .applicationId(model.getApplicationId()) + .jobId(model.getJobId()) + .statementId(model.getStatementId()) + .langType(model.getLangType()) + .query(model.getQuery()) + .queryId(model.getQueryId()) + .statementStorageService(statementStorageService) + .statementModel(model) + .build()); + } + + @Override + public boolean isOperationalForDataSource(String dataSourceName) { + boolean isSessionStateValid = + sessionModel.getSessionState() != DEAD && sessionModel.getSessionState() != FAIL; + boolean isSessionUpdatedRecently = + timeProvider.currentEpochMillis() - sessionModel.getLastUpdateTime() + <= sessionInactivityTimeoutMilli; + + return isSessionStateValid && isSessionUpdatedRecently; + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/Session.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/Session.java new file mode 100644 index 0000000000..fad097ca1b --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/Session.java @@ -0,0 +1,46 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.session; + +import java.util.Optional; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.execution.statement.QueryRequest; +import org.opensearch.sql.spark.execution.statement.Statement; +import org.opensearch.sql.spark.execution.statement.StatementId; + +/** Session define the statement execution context. Each session is binding to one Spark Job. */ +public interface Session { + /** open session. */ + void open( + CreateSessionRequest createSessionRequest, AsyncQueryRequestContext asyncQueryRequestContext); + + /** close session. */ + void close(); + + /** + * submit {@link QueryRequest}. + * + * @param request {@link QueryRequest} + * @param asyncQueryRequestContext {@link AsyncQueryRequestContext} + * @return {@link StatementId} + */ + StatementId submit(QueryRequest request, AsyncQueryRequestContext asyncQueryRequestContext); + + /** + * get {@link Statement}. + * + * @param stID {@link StatementId} + * @return {@link Statement} + */ + Optional get(StatementId stID); + + SessionModel getSessionModel(); + + String getSessionId(); + + /** return true if session is ready to use. */ + boolean isOperationalForDataSource(String dataSourceName); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionConfigSupplier.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionConfigSupplier.java new file mode 100644 index 0000000000..4084e0f091 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionConfigSupplier.java @@ -0,0 +1,11 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.session; + +/** Interface to abstract session config */ +public interface SessionConfigSupplier { + Long getSessionInactivityTimeoutMillis(); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionIdProvider.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionIdProvider.java new file mode 100644 index 0000000000..c6636fca0c --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionIdProvider.java @@ -0,0 +1,11 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.session; + +/** Interface for extension point to specify sessionId. Called when new session is created. */ +public interface SessionIdProvider { + String getSessionId(CreateSessionRequest createSessionRequest); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionManager.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionManager.java new file mode 100644 index 0000000000..0c0727294b --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionManager.java @@ -0,0 +1,83 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.session; + +import java.util.Optional; +import lombok.RequiredArgsConstructor; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.execution.statestore.SessionStorageService; +import org.opensearch.sql.spark.execution.statestore.StatementStorageService; +import org.opensearch.sql.spark.utils.RealTimeProvider; + +/** + * Singleton Class + * + *

todo. add Session cache and Session sweeper. + */ +@RequiredArgsConstructor +public class SessionManager { + private final SessionStorageService sessionStorageService; + private final StatementStorageService statementStorageService; + private final EMRServerlessClientFactory emrServerlessClientFactory; + private final SessionConfigSupplier sessionConfigSupplier; + private final SessionIdProvider sessionIdProvider; + + public Session createSession( + CreateSessionRequest request, AsyncQueryRequestContext asyncQueryRequestContext) { + InteractiveSession session = + InteractiveSession.builder() + .sessionId(sessionIdProvider.getSessionId(request)) + .sessionStorageService(sessionStorageService) + .statementStorageService(statementStorageService) + .serverlessClient(emrServerlessClientFactory.getClient(request.getAccountId())) + .build(); + session.open(request, asyncQueryRequestContext); + return session; + } + + /** + * Retrieves the session associated with the given session ID. + * + *

This method is particularly used in scenarios where the data source encoded in the session + * ID is deemed untrustworthy. It allows for the safe retrieval of session details based on a + * known and validated session ID, rather than relying on potentially outdated data source + * information. + * + *

For more context on the use case and implementation, refer to the documentation here: + * https://tinyurl.com/bdh6s834 + * + * @param sessionId The unique identifier of the session. It is used to fetch the corresponding + * session details. + * @param dataSourceName The name of the data source. This parameter is utilized in the session + * retrieval process. + * @return An Optional containing the session associated with the provided session ID. Returns an + * empty Optional if no matching session is found. + */ + public Optional getSession(String sessionId, String dataSourceName) { + Optional model = sessionStorageService.getSession(sessionId, dataSourceName); + if (model.isPresent()) { + InteractiveSession session = + InteractiveSession.builder() + .sessionId(sessionId) + .sessionStorageService(sessionStorageService) + .statementStorageService(statementStorageService) + .serverlessClient(emrServerlessClientFactory.getClient(model.get().getAccountId())) + .sessionModel(model.get()) + .sessionInactivityTimeoutMilli( + sessionConfigSupplier.getSessionInactivityTimeoutMillis()) + .timeProvider(new RealTimeProvider()) + .build(); + return Optional.ofNullable(session); + } + return Optional.empty(); + } + + // todo, keep it only for testing, will remove it later. + public boolean isEnabled() { + return true; + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionModel.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionModel.java new file mode 100644 index 0000000000..d24cd3f3cd --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionModel.java @@ -0,0 +1,92 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.session; + +import static org.opensearch.sql.spark.execution.session.SessionState.NOT_STARTED; +import static org.opensearch.sql.spark.execution.session.SessionType.INTERACTIVE; + +import com.google.common.collect.ImmutableMap; +import lombok.Data; +import lombok.experimental.SuperBuilder; +import org.opensearch.sql.spark.execution.statestore.StateModel; + +/** Session data in flint.ql.sessions index. */ +@Data +@SuperBuilder +public class SessionModel extends StateModel { + + public static final String UNKNOWN = "unknown"; + + private final String version; + private final SessionType sessionType; + private final String sessionId; + private final SessionState sessionState; + // optional: accountId for EMRS cluster + private final String accountId; + private final String applicationId; + private final String jobId; + private final String datasourceName; + private final String error; + private final long lastUpdateTime; + + public static SessionModel of(SessionModel copy, ImmutableMap metadata) { + return builder() + .version(copy.version) + .sessionType(copy.sessionType) + .sessionId(copy.sessionId) + .sessionState(copy.sessionState) + .datasourceName(copy.datasourceName) + .accountId(copy.accountId) + .applicationId(copy.getApplicationId()) + .jobId(copy.jobId) + .error(UNKNOWN) + .lastUpdateTime(copy.getLastUpdateTime()) + .metadata(metadata) + .build(); + } + + public static SessionModel copyWithState( + SessionModel copy, SessionState state, ImmutableMap metadata) { + return builder() + .version(copy.version) + .sessionType(copy.sessionType) + .sessionId(copy.sessionId) + .sessionState(state) + .datasourceName(copy.datasourceName) + .accountId(copy.getAccountId()) + .applicationId(copy.getApplicationId()) + .jobId(copy.jobId) + .error(UNKNOWN) + .lastUpdateTime(copy.getLastUpdateTime()) + .metadata(metadata) + .build(); + } + + public static SessionModel initInteractiveSession( + String accountId, + String applicationId, + String jobId, + String sessionId, + String datasourceName) { + return builder() + .version("1.0") + .sessionType(INTERACTIVE) + .sessionId(sessionId) + .sessionState(NOT_STARTED) + .datasourceName(datasourceName) + .accountId(accountId) + .applicationId(applicationId) + .jobId(jobId) + .error(UNKNOWN) + .lastUpdateTime(System.currentTimeMillis()) + .build(); + } + + @Override + public String getId() { + return sessionId; + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionState.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionState.java new file mode 100644 index 0000000000..bd5d14c603 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionState.java @@ -0,0 +1,43 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.session; + +import com.google.common.collect.ImmutableList; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.stream.Collectors; +import lombok.Getter; + +@Getter +public enum SessionState { + NOT_STARTED("not_started"), + RUNNING("running"), + DEAD("dead"), + FAIL("fail"); + + public static List END_STATE = ImmutableList.of(DEAD, FAIL); + + private final String sessionState; + + SessionState(String sessionState) { + this.sessionState = sessionState; + } + + private static Map STATES = + Arrays.stream(SessionState.values()) + .collect(Collectors.toMap(t -> t.name().toLowerCase(), t -> t)); + + public static SessionState fromString(String key) { + for (SessionState ss : SessionState.values()) { + if (ss.getSessionState().toLowerCase(Locale.ROOT).equals(key)) { + return ss; + } + } + throw new IllegalArgumentException("Invalid session state: " + key); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionType.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionType.java new file mode 100644 index 0000000000..10b9ce7bd5 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/session/SessionType.java @@ -0,0 +1,29 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.session; + +import java.util.Locale; +import lombok.Getter; + +@Getter +public enum SessionType { + INTERACTIVE("interactive"); + + private final String sessionType; + + SessionType(String sessionType) { + this.sessionType = sessionType; + } + + public static SessionType fromString(String key) { + for (SessionType sType : SessionType.values()) { + if (sType.getSessionType().toLowerCase(Locale.ROOT).equals(key)) { + return sType; + } + } + throw new IllegalArgumentException("Invalid session type: " + key); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/QueryRequest.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/QueryRequest.java new file mode 100644 index 0000000000..db2e96b3cd --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/QueryRequest.java @@ -0,0 +1,16 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statement; + +import lombok.Data; +import org.opensearch.sql.spark.rest.model.LangType; + +@Data +public class QueryRequest { + private final String queryId; + private final LangType langType; + private final String query; +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/Statement.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/Statement.java new file mode 100644 index 0000000000..3237a5d372 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/Statement.java @@ -0,0 +1,79 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statement; + +import static org.opensearch.sql.spark.execution.statement.StatementModel.submitStatement; + +import lombok.Builder; +import lombok.Getter; +import lombok.Setter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.execution.statestore.StatementStorageService; +import org.opensearch.sql.spark.rest.model.LangType; + +/** Statement represent query to execute in session. One statement map to one session. */ +@Getter +@Builder +public class Statement { + private static final Logger LOG = LogManager.getLogger(); + + private final String sessionId; + // optional + private final String accountId; + private final String applicationId; + private final String jobId; + private final StatementId statementId; + private final LangType langType; + private final String datasourceName; + private final String query; + private final String queryId; + private final AsyncQueryRequestContext asyncQueryRequestContext; + private final StatementStorageService statementStorageService; + + @Setter private StatementModel statementModel; + + /** Open a statement. */ + public void open() { + statementModel = + submitStatement( + sessionId, + accountId, + applicationId, + jobId, + statementId, + langType, + datasourceName, + query, + queryId); + statementModel = + statementStorageService.createStatement(statementModel, asyncQueryRequestContext); + } + + /** Cancel a statement. */ + public void cancel() { + StatementState statementState = statementModel.getStatementState(); + + if (statementState.equals(StatementState.SUCCESS) + || statementState.equals(StatementState.FAILED) + || statementState.equals(StatementState.TIMEOUT) + || statementState.equals(StatementState.CANCELLED)) { + String errorMsg = + String.format( + "can't cancel statement in %s state. statement: %s.", + statementState.getState(), statementId); + LOG.error(errorMsg); + throw new IllegalStateException(errorMsg); + } + this.statementModel = + statementStorageService.updateStatementState(statementModel, StatementState.CANCELLED); + } + + public StatementState getStatementState() { + return statementModel.getStatementState(); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/StatementId.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/StatementId.java new file mode 100644 index 0000000000..33284c4b3d --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/StatementId.java @@ -0,0 +1,23 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statement; + +import lombok.Data; + +@Data +public class StatementId { + private final String id; + + // construct statementId from queryId. + public static StatementId newStatementId(String qid) { + return new StatementId(qid); + } + + @Override + public String toString() { + return "statementId=" + id; + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/StatementModel.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/StatementModel.java new file mode 100644 index 0000000000..dc34af1d92 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/StatementModel.java @@ -0,0 +1,107 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statement; + +import static org.opensearch.sql.spark.execution.statement.StatementState.WAITING; + +import com.google.common.collect.ImmutableMap; +import lombok.Data; +import lombok.experimental.SuperBuilder; +import org.opensearch.sql.spark.execution.statestore.StateModel; +import org.opensearch.sql.spark.rest.model.LangType; + +/** Statement data in flint.ql.sessions index. */ +@Data +@SuperBuilder +public class StatementModel extends StateModel { + public static final String UNKNOWN = ""; + + private final String version; + private final StatementState statementState; + private final StatementId statementId; + private final String sessionId; + // optional: accountId for EMRS cluster + private final String accountId; + private final String applicationId; + private final String jobId; + private final LangType langType; + private final String datasourceName; + private final String query; + private final String queryId; + private final long submitTime; + private final String error; + + public static StatementModel copy(StatementModel copy, ImmutableMap metadata) { + return builder() + .version("1.0") + .statementState(copy.statementState) + .statementId(copy.statementId) + .sessionId(copy.sessionId) + .accountId(copy.accountId) + .applicationId(copy.applicationId) + .jobId(copy.jobId) + .langType(copy.langType) + .datasourceName(copy.datasourceName) + .query(copy.query) + .queryId(copy.queryId) + .submitTime(copy.submitTime) + .error(copy.error) + .metadata(metadata) + .build(); + } + + public static StatementModel copyWithState( + StatementModel copy, StatementState state, ImmutableMap metadata) { + return builder() + .version("1.0") + .statementState(state) + .statementId(copy.statementId) + .sessionId(copy.sessionId) + .accountId(copy.accountId) + .applicationId(copy.applicationId) + .jobId(copy.jobId) + .langType(copy.langType) + .datasourceName(copy.datasourceName) + .query(copy.query) + .queryId(copy.queryId) + .submitTime(copy.submitTime) + .error(copy.error) + .metadata(metadata) + .build(); + } + + public static StatementModel submitStatement( + String sessionId, + String accountId, + String applicationId, + String jobId, + StatementId statementId, + LangType langType, + String datasourceName, + String query, + String queryId) { + return builder() + .version("1.0") + .statementState(WAITING) + .statementId(statementId) + .sessionId(sessionId) + .accountId(accountId) + .applicationId(applicationId) + .jobId(jobId) + .langType(langType) + .datasourceName(datasourceName) + .query(query) + .queryId(queryId) + .submitTime(System.currentTimeMillis()) + .error(UNKNOWN) + .build(); + } + + @Override + public String getId() { + return statementId.getId(); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/StatementState.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/StatementState.java new file mode 100644 index 0000000000..d9103e5c03 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statement/StatementState.java @@ -0,0 +1,42 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statement; + +import java.util.Arrays; +import java.util.Locale; +import java.util.Map; +import java.util.stream.Collectors; +import lombok.Getter; + +/** {@link Statement} State. */ +@Getter +public enum StatementState { + WAITING("waiting"), + RUNNING("running"), + SUCCESS("success"), + FAILED("failed"), + TIMEOUT("timeout"), + CANCELLED("cancelled"); + + private final String state; + + StatementState(String state) { + this.state = state; + } + + private static Map STATES = + Arrays.stream(StatementState.values()) + .collect(Collectors.toMap(t -> t.name().toLowerCase(), t -> t)); + + public static StatementState fromString(String key) { + for (StatementState ss : StatementState.values()) { + if (ss.getState().toLowerCase(Locale.ROOT).equals(key)) { + return ss; + } + } + throw new IllegalArgumentException("Invalid statement state: " + key); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/CopyBuilder.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/CopyBuilder.java new file mode 100644 index 0000000000..e9de7064d5 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/CopyBuilder.java @@ -0,0 +1,13 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statestore; + +import com.google.common.collect.ImmutableMap; + +/** Interface for copying StateModel object. Refer {@link StateStore} */ +public interface CopyBuilder { + T of(T copy, ImmutableMap metadata); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/OpenSearchStateStoreUtil.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/OpenSearchStateStoreUtil.java new file mode 100644 index 0000000000..da9d166fcf --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/OpenSearchStateStoreUtil.java @@ -0,0 +1,20 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statestore; + +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_REQUEST_BUFFER_INDEX_NAME; + +import java.util.Locale; +import lombok.experimental.UtilityClass; + +@UtilityClass +public class OpenSearchStateStoreUtil { + + public static String getIndexName(String datasourceName) { + return String.format( + "%s_%s", SPARK_REQUEST_BUFFER_INDEX_NAME, datasourceName.toLowerCase(Locale.ROOT)); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/SessionStorageService.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/SessionStorageService.java new file mode 100644 index 0000000000..476e65714b --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/SessionStorageService.java @@ -0,0 +1,22 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statestore; + +import java.util.Optional; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.execution.session.SessionModel; +import org.opensearch.sql.spark.execution.session.SessionState; + +/** Interface for accessing {@link SessionModel} data storage. */ +public interface SessionStorageService { + + SessionModel createSession( + SessionModel sessionModel, AsyncQueryRequestContext asyncQueryRequestContext); + + Optional getSession(String id, String datasourceName); + + SessionModel updateSessionState(SessionModel sessionModel, SessionState sessionState); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/StateCopyBuilder.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/StateCopyBuilder.java new file mode 100644 index 0000000000..1f38e5a1c5 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/StateCopyBuilder.java @@ -0,0 +1,12 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statestore; + +import com.google.common.collect.ImmutableMap; + +public interface StateCopyBuilder { + T of(T copy, S state, ImmutableMap metadata); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/StateModel.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/StateModel.java new file mode 100644 index 0000000000..9d29299818 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/StateModel.java @@ -0,0 +1,37 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statestore; + +import com.google.common.collect.ImmutableMap; +import java.util.Optional; +import lombok.Builder.Default; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.experimental.SuperBuilder; + +@SuperBuilder +public abstract class StateModel { + @Getter @EqualsAndHashCode.Exclude @Default + private final ImmutableMap metadata = ImmutableMap.of(); + + public abstract String getId(); + + public Optional getMetadataItem(String name, Class type) { + if (metadata.containsKey(name)) { + Object value = metadata.get(name); + if (type.isInstance(value)) { + return Optional.of(type.cast(value)); + } else { + throw new RuntimeException( + String.format( + "The metadata field %s is an instance of %s instead of %s", + name, value.getClass(), type)); + } + } else { + return Optional.empty(); + } + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/StatementStorageService.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/StatementStorageService.java new file mode 100644 index 0000000000..39f1ecf704 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/statestore/StatementStorageService.java @@ -0,0 +1,26 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statestore; + +import java.util.Optional; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.execution.statement.StatementModel; +import org.opensearch.sql.spark.execution.statement.StatementState; + +/** + * Interface for accessing {@link StatementModel} data storage. {@link StatementModel} is an + * abstraction over the query request within a Session. + */ +public interface StatementStorageService { + + StatementModel createStatement( + StatementModel statementModel, AsyncQueryRequestContext asyncQueryRequestContext); + + StatementModel updateStatementState( + StatementModel oldStatementModel, StatementState statementState); + + Optional getStatement(String id, String datasourceName); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/execution/xcontent/XContentSerializerUtil.java b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/xcontent/XContentSerializerUtil.java new file mode 100644 index 0000000000..2f8558d723 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/execution/xcontent/XContentSerializerUtil.java @@ -0,0 +1,14 @@ +package org.opensearch.sql.spark.execution.xcontent; + +import com.google.common.collect.ImmutableMap; +import lombok.experimental.UtilityClass; + +@UtilityClass +public class XContentSerializerUtil { + public static final String SEQ_NO = "seqNo"; + public static final String PRIMARY_TERM = "primaryTerm"; + + public static ImmutableMap buildMetadata(long seqNo, long primaryTerm) { + return ImmutableMap.of(SEQ_NO, seqNo, PRIMARY_TERM, primaryTerm); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexClient.java b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexClient.java new file mode 100644 index 0000000000..af1a23d8d1 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexClient.java @@ -0,0 +1,11 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +/** Interface to abstract access to the FlintIndex */ +public interface FlintIndexClient { + void deleteIndex(String indexName); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadata.java b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadata.java new file mode 100644 index 0000000000..0b00e8390b --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadata.java @@ -0,0 +1,42 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +import java.util.Optional; +import lombok.Builder; +import lombok.Data; +import org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions; + +@Data +@Builder +public class FlintIndexMetadata { + + public static final String META_KEY = "_meta"; + public static final String LATEST_ID_KEY = "latestId"; + public static final String KIND_KEY = "kind"; + public static final String INDEXED_COLUMNS_KEY = "indexedColumns"; + public static final String NAME_KEY = "name"; + public static final String OPTIONS_KEY = "options"; + public static final String SOURCE_KEY = "source"; + public static final String VERSION_KEY = "version"; + public static final String PROPERTIES_KEY = "properties"; + public static final String ENV_KEY = "env"; + public static final String SERVERLESS_EMR_JOB_ID = "SERVERLESS_EMR_JOB_ID"; + public static final String APP_ID = "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID"; + + private final String opensearchIndexName; + private final String jobId; + private final String appId; + private final String latestId; + private final String kind; + private final String source; + private final String name; + private final FlintIndexOptions flintIndexOptions; + + public Optional getLatestId() { + return Optional.ofNullable(latestId); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataService.java b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataService.java new file mode 100644 index 0000000000..ad274e429e --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataService.java @@ -0,0 +1,30 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +import java.util.Map; +import org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions; + +/** Interface for FlintIndexMetadataReader */ +public interface FlintIndexMetadataService { + + /** + * Retrieves a map of {@link FlintIndexMetadata} instances matching the specified index pattern. + * + * @param indexPattern indexPattern. + * @return A map of {@link FlintIndexMetadata} instances against indexName, each providing + * metadata access for a matched index. Returns an empty list if no indices match the pattern. + */ + Map getFlintIndexMetadata(String indexPattern); + + /** + * Performs validation and updates flint index to manual refresh. + * + * @param indexName indexName. + * @param flintIndexOptions flintIndexOptions. + */ + void updateIndexToManualRefresh(String indexName, FlintIndexOptions flintIndexOptions); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexState.java b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexState.java new file mode 100644 index 0000000000..3d6532b8ea --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexState.java @@ -0,0 +1,62 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +import java.util.Arrays; +import java.util.Locale; +import java.util.Map; +import java.util.stream.Collectors; +import lombok.Getter; + +/** Flint index state. */ +@Getter +public enum FlintIndexState { + // stable state + EMPTY("empty"), + // transitioning state + CREATING("creating"), + // stable state + ACTIVE("active"), + // transitioning state + REFRESHING("refreshing"), + // transitioning state + CANCELLING("cancelling"), + // transitioning state + DELETING("deleting"), + // stable state + DELETED("deleted"), + // transitioning state + RECOVERING("recovering"), + // transitioning state + VACUUMING("vacuuming"), + // transitioning state + UPDATING("updating"), + // stable state + FAILED("failed"), + // unknown state, if some state update in Spark side, not reflect in here. + UNKNOWN("unknown"), + // special state that instructs StateStore to purge the index state doc + NONE("none"); + + private final String state; + + FlintIndexState(String state) { + this.state = state; + } + + private static Map STATES = + Arrays.stream(FlintIndexState.values()) + .collect(Collectors.toMap(t -> t.name().toLowerCase(), t -> t)); + + public static FlintIndexState fromString(String key) { + for (FlintIndexState ss : FlintIndexState.values()) { + if (ss.getState().toLowerCase(Locale.ROOT).equals(key)) { + return ss; + } + } + return UNKNOWN; + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexStateModel.java b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexStateModel.java new file mode 100644 index 0000000000..3dec869b91 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexStateModel.java @@ -0,0 +1,62 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +import com.google.common.collect.ImmutableMap; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.experimental.SuperBuilder; +import org.opensearch.sql.spark.execution.statestore.StateModel; + +/** Flint Index Model maintain the index state. */ +@Getter +@SuperBuilder +@EqualsAndHashCode(callSuper = false) +public class FlintIndexStateModel extends StateModel { + private final FlintIndexState indexState; + private final String accountId; + private final String applicationId; + private final String jobId; + private final String latestId; + private final String datasourceName; + private final long lastUpdateTime; + private final String error; + + public static FlintIndexStateModel copy( + FlintIndexStateModel copy, ImmutableMap metadata) { + return builder() + .indexState(copy.indexState) + .accountId(copy.accountId) + .applicationId(copy.applicationId) + .jobId(copy.jobId) + .latestId(copy.latestId) + .datasourceName(copy.datasourceName) + .lastUpdateTime(copy.lastUpdateTime) + .error(copy.error) + .metadata(metadata) + .build(); + } + + public static FlintIndexStateModel copyWithState( + FlintIndexStateModel copy, FlintIndexState state, ImmutableMap metadata) { + return builder() + .indexState(state) + .accountId(copy.accountId) + .applicationId(copy.applicationId) + .jobId(copy.jobId) + .latestId(copy.latestId) + .datasourceName(copy.datasourceName) + .lastUpdateTime(copy.lastUpdateTime) + .error(copy.error) + .metadata(metadata) + .build(); + } + + @Override + public String getId() { + return latestId; + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexStateModelService.java b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexStateModelService.java new file mode 100644 index 0000000000..94647f4e07 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexStateModelService.java @@ -0,0 +1,25 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +import java.util.Optional; + +/** + * Abstraction over flint index state storage. Flint index state will maintain the status of each + * flint index. + */ +public interface FlintIndexStateModelService { + FlintIndexStateModel createFlintIndexStateModel(FlintIndexStateModel flintIndexStateModel); + + Optional getFlintIndexStateModel(String id, String datasourceName); + + FlintIndexStateModel updateFlintIndexState( + FlintIndexStateModel flintIndexStateModel, + FlintIndexState flintIndexState, + String datasourceName); + + boolean deleteFlintIndexStateModel(String id, String datasourceName); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexType.java b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexType.java new file mode 100644 index 0000000000..8922f638e0 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/FlintIndexType.java @@ -0,0 +1,23 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +/** Enum for FlintIndex Type. */ +public enum FlintIndexType { + SKIPPING("skipping_index"), + COVERING("index"), + MATERIALIZED_VIEW("materialized_view"); + + private final String suffix; + + FlintIndexType(String suffix) { + this.suffix = suffix; + } + + public String getSuffix() { + return this.suffix; + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/flint/IndexDMLResultStorageService.java b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/IndexDMLResultStorageService.java new file mode 100644 index 0000000000..9053e5dbc8 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/IndexDMLResultStorageService.java @@ -0,0 +1,17 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.dispatcher.model.IndexDMLResult; + +/** + * Abstraction over the IndexDMLResult storage. It stores the result of IndexDML query execution. + */ +public interface IndexDMLResultStorageService { + IndexDMLResult createIndexDMLResult( + IndexDMLResult result, AsyncQueryRequestContext asyncQueryRequestContext); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOp.java b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOp.java new file mode 100644 index 0000000000..244f4aee11 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOp.java @@ -0,0 +1,199 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint.operation; + +import static org.opensearch.sql.spark.client.EmrServerlessClientImpl.GENERIC_INTERNAL_SERVER_ERROR_MESSAGE; + +import com.amazonaws.services.emrserverless.model.ValidationException; +import java.util.Locale; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import lombok.RequiredArgsConstructor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.jetbrains.annotations.NotNull; +import org.opensearch.sql.spark.client.EMRServerlessClient; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.flint.FlintIndexMetadata; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexStateModel; +import org.opensearch.sql.spark.flint.FlintIndexStateModelService; + +/** Flint Index Operation. */ +@RequiredArgsConstructor +public abstract class FlintIndexOp { + private static final Logger LOG = LogManager.getLogger(); + + private final FlintIndexStateModelService flintIndexStateModelService; + private final String datasourceName; + private final EMRServerlessClientFactory emrServerlessClientFactory; + + /** Apply operation on {@link FlintIndexMetadata} */ + public void apply(FlintIndexMetadata metadata) { + // todo, remove this logic after IndexState feature is enabled in Flint. + Optional latestId = metadata.getLatestId(); + if (latestId.isEmpty()) { + takeActionWithoutOCC(metadata); + } else { + FlintIndexStateModel initialFlintIndexStateModel = getFlintIndexStateModel(latestId.get()); + // 1.validate state. + validFlintIndexInitialState(initialFlintIndexStateModel); + + // 2.begin, move to transitioning state + FlintIndexStateModel transitionedFlintIndexStateModel = + moveToTransitioningState(initialFlintIndexStateModel); + // 3.runOp + try { + runOp(metadata, transitionedFlintIndexStateModel); + commit(transitionedFlintIndexStateModel); + } catch (Throwable e) { + LOG.error("Rolling back transient log due to transaction operation failure", e); + try { + flintIndexStateModelService.updateFlintIndexState( + transitionedFlintIndexStateModel, + initialFlintIndexStateModel.getIndexState(), + datasourceName); + } catch (Exception ex) { + LOG.error("Failed to rollback transient log", ex); + } + throw e; + } + } + } + + @NotNull + private FlintIndexStateModel getFlintIndexStateModel(String latestId) { + Optional flintIndexOptional = + flintIndexStateModelService.getFlintIndexStateModel(latestId, datasourceName); + if (flintIndexOptional.isEmpty()) { + String errorMsg = String.format(Locale.ROOT, "no state found. docId: %s", latestId); + LOG.error(errorMsg); + throw new IllegalStateException(errorMsg); + } + return flintIndexOptional.get(); + } + + private void takeActionWithoutOCC(FlintIndexMetadata metadata) { + // take action without occ. + FlintIndexStateModel fakeModel = + FlintIndexStateModel.builder() + .indexState(FlintIndexState.REFRESHING) + .applicationId(metadata.getAppId()) + .jobId(metadata.getJobId()) + .latestId("") + .datasourceName(datasourceName) + .lastUpdateTime(System.currentTimeMillis()) + .error("") + .build(); + runOp(metadata, fakeModel); + } + + private void validFlintIndexInitialState(FlintIndexStateModel flintIndex) { + LOG.debug("Validating the state before the transaction."); + FlintIndexState currentState = flintIndex.getIndexState(); + if (!validate(currentState)) { + String errorMsg = + String.format(Locale.ROOT, "validate failed. unexpected state: [%s]", currentState); + LOG.error(errorMsg); + throw new IllegalStateException("Transaction failed as flint index is not in a valid state."); + } + } + + private FlintIndexStateModel moveToTransitioningState(FlintIndexStateModel flintIndex) { + LOG.debug("Moving to transitioning state before committing."); + FlintIndexState transitioningState = transitioningState(); + try { + flintIndex = + flintIndexStateModelService.updateFlintIndexState( + flintIndex, transitioningState(), datasourceName); + } catch (Exception e) { + String errorMsg = + String.format(Locale.ROOT, "Moving to transition state:%s failed.", transitioningState); + LOG.error(errorMsg, e); + throw new IllegalStateException(errorMsg, e); + } + return flintIndex; + } + + private void commit(FlintIndexStateModel flintIndex) { + LOG.debug("Committing the transaction and moving to stable state."); + FlintIndexState stableState = stableState(); + try { + if (stableState == FlintIndexState.NONE) { + LOG.info("Deleting index state with docId: " + flintIndex.getLatestId()); + flintIndexStateModelService.deleteFlintIndexStateModel( + flintIndex.getLatestId(), datasourceName); + } else { + flintIndexStateModelService.updateFlintIndexState(flintIndex, stableState, datasourceName); + } + } catch (Exception e) { + String errorMsg = + String.format(Locale.ROOT, "commit failed. target stable state: [%s]", stableState); + LOG.error(errorMsg, e); + throw new IllegalStateException(errorMsg, e); + } + } + + /*** + * Common operation between AlterOff and Drop. So moved to FlintIndexOp. + */ + public void cancelStreamingJob(FlintIndexStateModel flintIndexStateModel) + throws InterruptedException, TimeoutException { + String applicationId = flintIndexStateModel.getApplicationId(); + String jobId = flintIndexStateModel.getJobId(); + EMRServerlessClient emrServerlessClient = + emrServerlessClientFactory.getClient(flintIndexStateModel.getAccountId()); + try { + emrServerlessClient.cancelJobRun( + flintIndexStateModel.getApplicationId(), flintIndexStateModel.getJobId(), true); + } catch (ValidationException e) { + // Exception when the job is not in cancellable state and already in terminal state. + if (e.getMessage().contains("Job run is not in a cancellable state")) { + LOG.error(e); + return; + } else { + throw new RuntimeException(GENERIC_INTERNAL_SERVER_ERROR_MESSAGE); + } + } catch (Exception e) { + LOG.error(e); + throw new RuntimeException(GENERIC_INTERNAL_SERVER_ERROR_MESSAGE); + } + + // pull job state until timeout or cancelled. + String jobRunState = ""; + int count = 3; + while (count-- != 0) { + jobRunState = + emrServerlessClient.getJobRunResult(applicationId, jobId).getJobRun().getState(); + if (jobRunState.equalsIgnoreCase("Cancelled")) { + break; + } + TimeUnit.SECONDS.sleep(1); + } + if (!jobRunState.equalsIgnoreCase("Cancelled")) { + String errMsg = + "Cancel job timeout for Application ID: " + applicationId + ", Job ID: " + jobId; + LOG.error(errMsg); + throw new TimeoutException("Cancel job operation timed out."); + } + } + + /** + * Validate expected state. + * + *

return true if validate. + */ + abstract boolean validate(FlintIndexState state); + + /** get transitioningState */ + abstract FlintIndexState transitioningState(); + + abstract void runOp(FlintIndexMetadata flintIndexMetadata, FlintIndexStateModel flintIndex); + + /** get stableState */ + abstract FlintIndexState stableState(); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpAlter.java b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpAlter.java new file mode 100644 index 0000000000..9955320253 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpAlter.java @@ -0,0 +1,63 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint.operation; + +import lombok.SneakyThrows; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions; +import org.opensearch.sql.spark.flint.FlintIndexMetadata; +import org.opensearch.sql.spark.flint.FlintIndexMetadataService; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexStateModel; +import org.opensearch.sql.spark.flint.FlintIndexStateModelService; + +/** + * Index Operation for Altering the flint index. Only handles alter operation when + * auto_refresh=false. + */ +public class FlintIndexOpAlter extends FlintIndexOp { + private static final Logger LOG = LogManager.getLogger(FlintIndexOpAlter.class); + private final FlintIndexMetadataService flintIndexMetadataService; + private final FlintIndexOptions flintIndexOptions; + + public FlintIndexOpAlter( + FlintIndexOptions flintIndexOptions, + FlintIndexStateModelService flintIndexStateModelService, + String datasourceName, + EMRServerlessClientFactory emrServerlessClientFactory, + FlintIndexMetadataService flintIndexMetadataService) { + super(flintIndexStateModelService, datasourceName, emrServerlessClientFactory); + this.flintIndexMetadataService = flintIndexMetadataService; + this.flintIndexOptions = flintIndexOptions; + } + + @Override + protected boolean validate(FlintIndexState state) { + return state == FlintIndexState.ACTIVE || state == FlintIndexState.REFRESHING; + } + + @Override + FlintIndexState transitioningState() { + return FlintIndexState.UPDATING; + } + + @SneakyThrows + @Override + void runOp(FlintIndexMetadata flintIndexMetadata, FlintIndexStateModel flintIndexStateModel) { + LOG.debug( + "Running alter index operation for index: {}", flintIndexMetadata.getOpensearchIndexName()); + this.flintIndexMetadataService.updateIndexToManualRefresh( + flintIndexMetadata.getOpensearchIndexName(), flintIndexOptions); + cancelStreamingJob(flintIndexStateModel); + } + + @Override + FlintIndexState stableState() { + return FlintIndexState.ACTIVE; + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpCancel.java b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpCancel.java new file mode 100644 index 0000000000..02c8e39c66 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpCancel.java @@ -0,0 +1,52 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint.operation; + +import lombok.SneakyThrows; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.flint.FlintIndexMetadata; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexStateModel; +import org.opensearch.sql.spark.flint.FlintIndexStateModelService; + +/** Cancel refreshing job for refresh query when user clicks cancel button on UI. */ +public class FlintIndexOpCancel extends FlintIndexOp { + private static final Logger LOG = LogManager.getLogger(); + + public FlintIndexOpCancel( + FlintIndexStateModelService flintIndexStateModelService, + String datasourceName, + EMRServerlessClientFactory emrServerlessClientFactory) { + super(flintIndexStateModelService, datasourceName, emrServerlessClientFactory); + } + + // Only in refreshing state, the job is cancellable in case of REFRESH query. + public boolean validate(FlintIndexState state) { + return state == FlintIndexState.REFRESHING; + } + + @Override + FlintIndexState transitioningState() { + return FlintIndexState.CANCELLING; + } + + /** cancel EMR-S job, wait cancelled state upto 15s. */ + @SneakyThrows + @Override + void runOp(FlintIndexMetadata flintIndexMetadata, FlintIndexStateModel flintIndexStateModel) { + LOG.debug( + "Performing drop index operation for index: {}", + flintIndexMetadata.getOpensearchIndexName()); + cancelStreamingJob(flintIndexStateModel); + } + + @Override + FlintIndexState stableState() { + return FlintIndexState.ACTIVE; + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpDrop.java b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpDrop.java new file mode 100644 index 0000000000..6613c29870 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpDrop.java @@ -0,0 +1,54 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint.operation; + +import lombok.SneakyThrows; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.flint.FlintIndexMetadata; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexStateModel; +import org.opensearch.sql.spark.flint.FlintIndexStateModelService; + +/** Operation to drop Flint index */ +public class FlintIndexOpDrop extends FlintIndexOp { + private static final Logger LOG = LogManager.getLogger(); + + public FlintIndexOpDrop( + FlintIndexStateModelService flintIndexStateModelService, + String datasourceName, + EMRServerlessClientFactory emrServerlessClientFactory) { + super(flintIndexStateModelService, datasourceName, emrServerlessClientFactory); + } + + public boolean validate(FlintIndexState state) { + return state == FlintIndexState.REFRESHING + || state == FlintIndexState.EMPTY + || state == FlintIndexState.ACTIVE + || state == FlintIndexState.CREATING; + } + + @Override + FlintIndexState transitioningState() { + return FlintIndexState.DELETING; + } + + /** cancel EMR-S job, wait cancelled state upto 15s. */ + @SneakyThrows + @Override + void runOp(FlintIndexMetadata flintIndexMetadata, FlintIndexStateModel flintIndexStateModel) { + LOG.debug( + "Performing drop index operation for index: {}", + flintIndexMetadata.getOpensearchIndexName()); + cancelStreamingJob(flintIndexStateModel); + } + + @Override + FlintIndexState stableState() { + return FlintIndexState.DELETED; + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpFactory.java b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpFactory.java new file mode 100644 index 0000000000..14cf9fa7c9 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpFactory.java @@ -0,0 +1,45 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint.operation; + +import lombok.RequiredArgsConstructor; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions; +import org.opensearch.sql.spark.flint.FlintIndexClient; +import org.opensearch.sql.spark.flint.FlintIndexMetadataService; +import org.opensearch.sql.spark.flint.FlintIndexStateModelService; + +@RequiredArgsConstructor +public class FlintIndexOpFactory { + private final FlintIndexStateModelService flintIndexStateModelService; + private final FlintIndexClient flintIndexClient; + private final FlintIndexMetadataService flintIndexMetadataService; + private final EMRServerlessClientFactory emrServerlessClientFactory; + + public FlintIndexOpDrop getDrop(String datasource) { + return new FlintIndexOpDrop( + flintIndexStateModelService, datasource, emrServerlessClientFactory); + } + + public FlintIndexOpAlter getAlter(FlintIndexOptions flintIndexOptions, String datasource) { + return new FlintIndexOpAlter( + flintIndexOptions, + flintIndexStateModelService, + datasource, + emrServerlessClientFactory, + flintIndexMetadataService); + } + + public FlintIndexOpVacuum getVacuum(String datasource) { + return new FlintIndexOpVacuum( + flintIndexStateModelService, datasource, flintIndexClient, emrServerlessClientFactory); + } + + public FlintIndexOpCancel getCancel(String datasource) { + return new FlintIndexOpCancel( + flintIndexStateModelService, datasource, emrServerlessClientFactory); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpVacuum.java b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpVacuum.java new file mode 100644 index 0000000000..a0ef955adf --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpVacuum.java @@ -0,0 +1,55 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint.operation; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.flint.FlintIndexClient; +import org.opensearch.sql.spark.flint.FlintIndexMetadata; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexStateModel; +import org.opensearch.sql.spark.flint.FlintIndexStateModelService; + +/** Flint index vacuum operation. */ +public class FlintIndexOpVacuum extends FlintIndexOp { + + private static final Logger LOG = LogManager.getLogger(); + + /** OpenSearch client. */ + private final FlintIndexClient flintIndexClient; + + public FlintIndexOpVacuum( + FlintIndexStateModelService flintIndexStateModelService, + String datasourceName, + FlintIndexClient flintIndexClient, + EMRServerlessClientFactory emrServerlessClientFactory) { + super(flintIndexStateModelService, datasourceName, emrServerlessClientFactory); + this.flintIndexClient = flintIndexClient; + } + + @Override + boolean validate(FlintIndexState state) { + return state == FlintIndexState.DELETED; + } + + @Override + FlintIndexState transitioningState() { + return FlintIndexState.VACUUMING; + } + + @Override + public void runOp(FlintIndexMetadata flintIndexMetadata, FlintIndexStateModel flintIndex) { + LOG.info("Vacuuming Flint index {}", flintIndexMetadata.getOpensearchIndexName()); + flintIndexClient.deleteIndex(flintIndexMetadata.getOpensearchIndexName()); + } + + @Override + FlintIndexState stableState() { + // Instruct StateStore to purge the index state doc + return FlintIndexState.NONE; + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/leasemanager/ConcurrencyLimitExceededException.java b/async-query-core/src/main/java/org/opensearch/sql/spark/leasemanager/ConcurrencyLimitExceededException.java new file mode 100644 index 0000000000..ab6305c835 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/leasemanager/ConcurrencyLimitExceededException.java @@ -0,0 +1,13 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.leasemanager; + +/** Concurrency limit exceeded. */ +public class ConcurrencyLimitExceededException extends RuntimeException { + public ConcurrencyLimitExceededException(String message) { + super(message); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/leasemanager/LeaseManager.java b/async-query-core/src/main/java/org/opensearch/sql/spark/leasemanager/LeaseManager.java new file mode 100644 index 0000000000..6cc74ecdc5 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/leasemanager/LeaseManager.java @@ -0,0 +1,19 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.leasemanager; + +import org.opensearch.sql.spark.leasemanager.model.LeaseRequest; + +/** Lease manager */ +public interface LeaseManager { + + /** + * Borrow from LeaseManager. If no exception, lease successfully. + * + * @throws ConcurrencyLimitExceededException + */ + void borrow(LeaseRequest request); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/leasemanager/model/LeaseRequest.java b/async-query-core/src/main/java/org/opensearch/sql/spark/leasemanager/model/LeaseRequest.java new file mode 100644 index 0000000000..190c033198 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/leasemanager/model/LeaseRequest.java @@ -0,0 +1,18 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.leasemanager.model; + +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import org.opensearch.sql.spark.dispatcher.model.JobType; + +/** Lease Request. */ +@Getter +@RequiredArgsConstructor +public class LeaseRequest { + private final JobType jobType; + private final String datasourceName; +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/metrics/EmrMetrics.java b/async-query-core/src/main/java/org/opensearch/sql/spark/metrics/EmrMetrics.java new file mode 100644 index 0000000000..2ec587bcc7 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/metrics/EmrMetrics.java @@ -0,0 +1,15 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.metrics; + +public enum EmrMetrics { + EMR_CANCEL_JOB_REQUEST_FAILURE_COUNT, + EMR_GET_JOB_RESULT_FAILURE_COUNT, + EMR_START_JOB_REQUEST_FAILURE_COUNT, + EMR_INTERACTIVE_QUERY_JOBS_CREATION_COUNT, + EMR_STREAMING_QUERY_JOBS_CREATION_COUNT, + EMR_BATCH_QUERY_JOBS_CREATION_COUNT; +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/metrics/MetricsService.java b/async-query-core/src/main/java/org/opensearch/sql/spark/metrics/MetricsService.java new file mode 100644 index 0000000000..ca9cb9db4e --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/metrics/MetricsService.java @@ -0,0 +1,11 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.metrics; + +/** Interface to abstract the emit of metrics */ +public interface MetricsService { + void incrementNumericalMetric(EmrMetrics emrMetrics); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/DataSourceSparkParameterComposer.java b/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/DataSourceSparkParameterComposer.java new file mode 100644 index 0000000000..324889b6e0 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/DataSourceSparkParameterComposer.java @@ -0,0 +1,24 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.parameter; + +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; + +/** + * Compose Spark parameters specific to the {@link + * org.opensearch.sql.datasource.model.DataSourceType} based on the {@link DataSourceMetadata}. For + * the parameters not specific to {@link org.opensearch.sql.datasource.model.DataSourceType}, please + * use {@link GeneralSparkParameterComposer}. + */ +public interface DataSourceSparkParameterComposer { + void compose( + DataSourceMetadata dataSourceMetadata, + SparkSubmitParameters sparkSubmitParameters, + DispatchQueryRequest dispatchQueryRequest, + AsyncQueryRequestContext context); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/GeneralSparkParameterComposer.java b/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/GeneralSparkParameterComposer.java new file mode 100644 index 0000000000..c3d46ba5c6 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/GeneralSparkParameterComposer.java @@ -0,0 +1,31 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.parameter; + +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; + +/** + * Compose spark submit parameters based on the request and context. For {@link + * org.opensearch.sql.datasource.model.DataSourceType} specific parameters, please use {@link + * DataSourceSparkParameterComposer}. See {@link SparkParameterComposerCollection}. + */ +public interface GeneralSparkParameterComposer { + + /** + * Modify sparkSubmitParameters based on dispatchQueryRequest and context. + * + * @param sparkSubmitParameters Implementation of this method will modify this. + * @param dispatchQueryRequest Request. Implementation can refer it to compose + * sparkSubmitParameters. + * @param context Context of the request. Implementation can refer it to compose + * sparkSubmitParameters. + */ + void compose( + SparkSubmitParameters sparkSubmitParameters, + DispatchQueryRequest dispatchQueryRequest, + AsyncQueryRequestContext context); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/SparkParameterComposerCollection.java b/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/SparkParameterComposerCollection.java new file mode 100644 index 0000000000..a6a88738bf --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/SparkParameterComposerCollection.java @@ -0,0 +1,76 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.parameter; + +import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Map; +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.datasource.model.DataSourceType; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; + +/** + * Stores Spark parameter composers and dispatch compose request to each composer. Composers should + * be registered during initialization such as in Guice Module. + */ +public class SparkParameterComposerCollection { + private Collection generalComposers = new ArrayList<>(); + private Map> datasourceComposers = + new HashMap<>(); + + /** + * Register composers for specific DataSourceType. The registered composer is called only if the + * request is for the dataSourceType. + */ + public void register(DataSourceType dataSourceType, DataSourceSparkParameterComposer composer) { + if (!datasourceComposers.containsKey(dataSourceType)) { + datasourceComposers.put(dataSourceType, new LinkedList<>()); + } + datasourceComposers.get(dataSourceType).add(composer); + } + + /** + * Register general composer. The composer is called when spark parameter is generated regardless + * of datasource type. + */ + public void register(GeneralSparkParameterComposer composer) { + generalComposers.add(composer); + } + + /** Execute composers associated with the datasource type */ + public void composeByDataSource( + DataSourceMetadata dataSourceMetadata, + SparkSubmitParameters sparkSubmitParameters, + DispatchQueryRequest dispatchQueryRequest, + AsyncQueryRequestContext context) { + for (DataSourceSparkParameterComposer composer : + getComposersFor(dataSourceMetadata.getConnector())) { + composer.compose(dataSourceMetadata, sparkSubmitParameters, dispatchQueryRequest, context); + } + } + + /** Execute all the registered generic composers */ + public void compose( + SparkSubmitParameters sparkSubmitParameters, + DispatchQueryRequest dispatchQueryRequest, + AsyncQueryRequestContext context) { + for (GeneralSparkParameterComposer composer : generalComposers) { + composer.compose(sparkSubmitParameters, dispatchQueryRequest, context); + } + } + + private Collection getComposersFor(DataSourceType type) { + return datasourceComposers.getOrDefault(type, ImmutableList.of()); + } + + public boolean isComposerRegistered(DataSourceType type) { + return datasourceComposers.containsKey(type); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/SparkSubmitParameters.java b/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/SparkSubmitParameters.java new file mode 100644 index 0000000000..2e142ed117 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/SparkSubmitParameters.java @@ -0,0 +1,52 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.parameter; + +import static org.opensearch.sql.spark.data.constants.SparkConstants.DEFAULT_CLASS_NAME; + +import java.util.LinkedHashMap; +import java.util.Map; +import lombok.Setter; + +/** Define Spark Submit Parameters. */ +public class SparkSubmitParameters { + public static final String SPACE = " "; + public static final String EQUALS = "="; + + @Setter private String className = DEFAULT_CLASS_NAME; + private Map config = new LinkedHashMap<>(); + + /** Extra parameters to append finally */ + @Setter private String extraParameters; + + public void setConfigItem(String key, String value) { + config.put(key, value); + } + + public void deleteConfigItem(String key) { + config.remove(key); + } + + @Override + public String toString() { + StringBuilder stringBuilder = new StringBuilder(); + stringBuilder.append(" --class "); + stringBuilder.append(this.className); + stringBuilder.append(SPACE); + for (String key : config.keySet()) { + stringBuilder.append(" --conf "); + stringBuilder.append(key); + stringBuilder.append(EQUALS); + stringBuilder.append(config.get(key)); + stringBuilder.append(SPACE); + } + + if (extraParameters != null) { + stringBuilder.append(extraParameters); + } + return stringBuilder.toString(); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/SparkSubmitParametersBuilder.java b/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/SparkSubmitParametersBuilder.java new file mode 100644 index 0000000000..3fe7d99373 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/SparkSubmitParametersBuilder.java @@ -0,0 +1,173 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.parameter; + +import static org.opensearch.sql.spark.data.constants.SparkConstants.AWS_SNAPSHOT_REPOSITORY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.DEFAULT_GLUE_CATALOG_CREDENTIALS_PROVIDER_FACTORY_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.DEFAULT_S3_AWS_CREDENTIALS_PROVIDER_VALUE; +import static org.opensearch.sql.spark.data.constants.SparkConstants.EMR_ASSUME_ROLE_CREDENTIALS_PROVIDER; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_CREDENTIALS_PROVIDER_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_DEFAULT_AUTH; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_DEFAULT_CLUSTER_NAME; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_DEFAULT_HOST; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_DEFAULT_PORT; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_DEFAULT_SCHEME; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_AUTH_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_HOST_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_PORT_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_SCHEME_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_JOB_QUERY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_JOB_REQUEST_INDEX; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_JOB_SESSION_ID; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_PPL_EXTENSION; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_SQL_EXTENSION; +import static org.opensearch.sql.spark.data.constants.SparkConstants.GLUE_HIVE_CATALOG_FACTORY_CLASS; +import static org.opensearch.sql.spark.data.constants.SparkConstants.HADOOP_CATALOG_CREDENTIALS_PROVIDER_FACTORY_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.HIVE_METASTORE_CLASS_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.ICEBERG_GLUE_CATALOG; +import static org.opensearch.sql.spark.data.constants.SparkConstants.ICEBERG_SESSION_CATALOG; +import static org.opensearch.sql.spark.data.constants.SparkConstants.ICEBERG_SPARK_EXTENSION; +import static org.opensearch.sql.spark.data.constants.SparkConstants.ICEBERG_SPARK_RUNTIME_PACKAGE; +import static org.opensearch.sql.spark.data.constants.SparkConstants.JAVA_HOME_LOCATION; +import static org.opensearch.sql.spark.data.constants.SparkConstants.PPL_STANDALONE_PACKAGE; +import static org.opensearch.sql.spark.data.constants.SparkConstants.S3_AWS_CREDENTIALS_PROVIDER_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_CATALOG; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_CATALOG_CATALOG_IMPL; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_DRIVER_ENV_FLINT_CLUSTER_NAME_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_DRIVER_ENV_JAVA_HOME_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_EXECUTOR_ENV_FLINT_CLUSTER_NAME_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_EXECUTOR_ENV_JAVA_HOME_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_JARS_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_JAR_PACKAGES_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_JAR_REPOSITORIES_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_LAUNCHER_PACKAGE; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_SQL_EXTENSIONS_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_STANDALONE_PACKAGE; + +import lombok.Getter; +import org.apache.commons.text.StringEscapeUtils; +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.config.SparkSubmitParameterModifier; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; +import org.opensearch.sql.spark.execution.statestore.OpenSearchStateStoreUtil; + +public class SparkSubmitParametersBuilder { + private final SparkParameterComposerCollection sparkParameterComposerCollection; + @Getter private final SparkSubmitParameters sparkSubmitParameters; + + public SparkSubmitParametersBuilder( + SparkParameterComposerCollection sparkParameterComposerCollection) { + this.sparkParameterComposerCollection = sparkParameterComposerCollection; + sparkSubmitParameters = new SparkSubmitParameters(); + setDefaultConfigs(); + } + + private void setDefaultConfigs() { + setConfigItem(S3_AWS_CREDENTIALS_PROVIDER_KEY, DEFAULT_S3_AWS_CREDENTIALS_PROVIDER_VALUE); + setConfigItem( + HADOOP_CATALOG_CREDENTIALS_PROVIDER_FACTORY_KEY, + DEFAULT_GLUE_CATALOG_CREDENTIALS_PROVIDER_FACTORY_KEY); + setConfigItem(SPARK_JARS_KEY, ICEBERG_SPARK_RUNTIME_PACKAGE); + setConfigItem( + SPARK_JAR_PACKAGES_KEY, + SPARK_STANDALONE_PACKAGE + "," + SPARK_LAUNCHER_PACKAGE + "," + PPL_STANDALONE_PACKAGE); + setConfigItem(SPARK_JAR_REPOSITORIES_KEY, AWS_SNAPSHOT_REPOSITORY); + setConfigItem(SPARK_DRIVER_ENV_JAVA_HOME_KEY, JAVA_HOME_LOCATION); + setConfigItem(SPARK_EXECUTOR_ENV_JAVA_HOME_KEY, JAVA_HOME_LOCATION); + setConfigItem(SPARK_DRIVER_ENV_FLINT_CLUSTER_NAME_KEY, FLINT_DEFAULT_CLUSTER_NAME); + setConfigItem(SPARK_EXECUTOR_ENV_FLINT_CLUSTER_NAME_KEY, FLINT_DEFAULT_CLUSTER_NAME); + setConfigItem(FLINT_INDEX_STORE_HOST_KEY, FLINT_DEFAULT_HOST); + setConfigItem(FLINT_INDEX_STORE_PORT_KEY, FLINT_DEFAULT_PORT); + setConfigItem(FLINT_INDEX_STORE_SCHEME_KEY, FLINT_DEFAULT_SCHEME); + setConfigItem(FLINT_INDEX_STORE_AUTH_KEY, FLINT_DEFAULT_AUTH); + setConfigItem(FLINT_CREDENTIALS_PROVIDER_KEY, EMR_ASSUME_ROLE_CREDENTIALS_PROVIDER); + setConfigItem( + SPARK_SQL_EXTENSIONS_KEY, + ICEBERG_SPARK_EXTENSION + "," + FLINT_SQL_EXTENSION + "," + FLINT_PPL_EXTENSION); + setConfigItem(HIVE_METASTORE_CLASS_KEY, GLUE_HIVE_CATALOG_FACTORY_CLASS); + setConfigItem(SPARK_CATALOG, ICEBERG_SESSION_CATALOG); + setConfigItem(SPARK_CATALOG_CATALOG_IMPL, ICEBERG_GLUE_CATALOG); + } + + private void setConfigItem(String key, String value) { + sparkSubmitParameters.setConfigItem(key, value); + } + + public SparkSubmitParametersBuilder className(String className) { + sparkSubmitParameters.setClassName(className); + return this; + } + + /** clusterName will be used for logging and metrics in Spark */ + public SparkSubmitParametersBuilder clusterName(String clusterName) { + setConfigItem(SPARK_DRIVER_ENV_FLINT_CLUSTER_NAME_KEY, clusterName); + setConfigItem(SPARK_EXECUTOR_ENV_FLINT_CLUSTER_NAME_KEY, clusterName); + return this; + } + + /** + * For query in spark submit parameters to be parsed correctly, escape the characters in the + * query, then wrap the query with double quotes. + */ + public SparkSubmitParametersBuilder query(String query) { + String escapedQuery = StringEscapeUtils.escapeJava(query); + String wrappedQuery = "\"" + escapedQuery + "\""; + setConfigItem(FLINT_JOB_QUERY, wrappedQuery); + return this; + } + + public SparkSubmitParametersBuilder dataSource( + DataSourceMetadata metadata, + DispatchQueryRequest dispatchQueryRequest, + AsyncQueryRequestContext context) { + if (sparkParameterComposerCollection.isComposerRegistered(metadata.getConnector())) { + sparkParameterComposerCollection.composeByDataSource( + metadata, sparkSubmitParameters, dispatchQueryRequest, context); + return this; + } else { + throw new UnsupportedOperationException( + String.format( + "UnSupported datasource type for async queries:: %s", metadata.getConnector())); + } + } + + public SparkSubmitParametersBuilder structuredStreaming(Boolean isStructuredStreaming) { + if (isStructuredStreaming) { + setConfigItem("spark.flint.job.type", "streaming"); + } + return this; + } + + public SparkSubmitParametersBuilder extraParameters(String params) { + sparkSubmitParameters.setExtraParameters(params); + return this; + } + + public SparkSubmitParametersBuilder sessionExecution(String sessionId, String datasourceName) { + setConfigItem(FLINT_JOB_REQUEST_INDEX, OpenSearchStateStoreUtil.getIndexName(datasourceName)); + setConfigItem(FLINT_JOB_SESSION_ID, sessionId); + return this; + } + + public SparkSubmitParametersBuilder acceptModifier(SparkSubmitParameterModifier modifier) { + if (modifier != null) { + modifier.modifyParameters(this); + } + return this; + } + + public SparkSubmitParametersBuilder acceptComposers( + DispatchQueryRequest dispatchQueryRequest, AsyncQueryRequestContext context) { + sparkParameterComposerCollection.compose(sparkSubmitParameters, dispatchQueryRequest, context); + return this; + } + + @Override + public String toString() { + return sparkSubmitParameters.toString(); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/SparkSubmitParametersBuilderProvider.java b/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/SparkSubmitParametersBuilderProvider.java new file mode 100644 index 0000000000..ccc9ffb680 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/parameter/SparkSubmitParametersBuilderProvider.java @@ -0,0 +1,18 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.parameter; + +import lombok.RequiredArgsConstructor; + +/** Provide SparkSubmitParametersBuilder instance with SparkParameterComposerCollection injected */ +@RequiredArgsConstructor +public class SparkSubmitParametersBuilderProvider { + private final SparkParameterComposerCollection sparkParameterComposerCollection; + + public SparkSubmitParametersBuilder getSparkSubmitParametersBuilder() { + return new SparkSubmitParametersBuilder(sparkParameterComposerCollection); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/response/JobExecutionResponseReader.java b/async-query-core/src/main/java/org/opensearch/sql/spark/response/JobExecutionResponseReader.java new file mode 100644 index 0000000000..e3184b7326 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/response/JobExecutionResponseReader.java @@ -0,0 +1,29 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.response; + +import org.json.JSONObject; + +/** Interface for reading job execution result */ +public interface JobExecutionResponseReader { + /** + * Retrieves the job execution result based on the job ID. + * + * @param jobId The job ID. + * @param resultLocation The location identifier where the result is stored (optional). + * @return A JSONObject containing the result data. + */ + JSONObject getResultWithJobId(String jobId, String resultLocation); + + /** + * Retrieves the job execution result based on the query ID. + * + * @param queryId The query ID. + * @param resultLocation The location identifier where the result is stored (optional). + * @return A JSONObject containing the result data. + */ + JSONObject getResultWithQueryId(String queryId, String resultLocation); +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryRequest.java b/async-query-core/src/main/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryRequest.java new file mode 100644 index 0000000000..e3250c7a58 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryRequest.java @@ -0,0 +1,31 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.rest.model; + +import lombok.Data; +import org.apache.commons.lang3.Validate; + +@Data +public class CreateAsyncQueryRequest { + private String query; + private String datasource; + private LangType lang; + // optional sessionId + private String sessionId; + + public CreateAsyncQueryRequest(String query, String datasource, LangType lang) { + this.query = Validate.notNull(query, "Query can't be null"); + this.datasource = Validate.notNull(datasource, "Datasource can't be null"); + this.lang = Validate.notNull(lang, "lang can't be null"); + } + + public CreateAsyncQueryRequest(String query, String datasource, LangType lang, String sessionId) { + this.query = Validate.notNull(query, "Query can't be null"); + this.datasource = Validate.notNull(datasource, "Datasource can't be null"); + this.lang = Validate.notNull(lang, "lang can't be null"); + this.sessionId = sessionId; + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryResponse.java b/async-query-core/src/main/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryResponse.java new file mode 100644 index 0000000000..2f918308c4 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryResponse.java @@ -0,0 +1,17 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.rest.model; + +import lombok.AllArgsConstructor; +import lombok.Data; + +@Data +@AllArgsConstructor +public class CreateAsyncQueryResponse { + private String queryId; + // optional sessionId + private String sessionId; +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/rest/model/LangType.java b/async-query-core/src/main/java/org/opensearch/sql/spark/rest/model/LangType.java new file mode 100644 index 0000000000..51fa8d2b13 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/rest/model/LangType.java @@ -0,0 +1,36 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.rest.model; + +/** Language type accepted in async query apis. */ +public enum LangType { + SQL("sql"), + PPL("ppl"); + private final String text; + + LangType(String text) { + this.text = text; + } + + public String getText() { + return this.text; + } + + /** + * Get LangType from text. + * + * @param text text. + * @return LangType {@link LangType}. + */ + public static LangType fromString(String text) { + for (LangType langType : LangType.values()) { + if (langType.text.equalsIgnoreCase(text)) { + return langType; + } + } + throw new IllegalArgumentException("No LangType with text " + text + " found"); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/utils/IDUtils.java b/async-query-core/src/main/java/org/opensearch/sql/spark/utils/IDUtils.java new file mode 100644 index 0000000000..438d2342b4 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/utils/IDUtils.java @@ -0,0 +1,25 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.utils; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import lombok.experimental.UtilityClass; +import org.apache.commons.lang3.RandomStringUtils; + +@UtilityClass +public class IDUtils { + public static final int PREFIX_LEN = 10; + + public static String decode(String id) { + return new String(Base64.getDecoder().decode(id)).substring(PREFIX_LEN); + } + + public static String encode(String datasourceName) { + String randomId = RandomStringUtils.randomAlphanumeric(PREFIX_LEN) + datasourceName; + return Base64.getEncoder().encodeToString(randomId.getBytes(StandardCharsets.UTF_8)); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/utils/RealTimeProvider.java b/async-query-core/src/main/java/org/opensearch/sql/spark/utils/RealTimeProvider.java new file mode 100644 index 0000000000..b42e30532b --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/utils/RealTimeProvider.java @@ -0,0 +1,13 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.utils; + +public class RealTimeProvider implements TimeProvider { + @Override + public long currentEpochMillis() { + return System.currentTimeMillis(); + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/utils/SQLQueryUtils.java b/async-query-core/src/main/java/org/opensearch/sql/spark/utils/SQLQueryUtils.java new file mode 100644 index 0000000000..a96e203cea --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/utils/SQLQueryUtils.java @@ -0,0 +1,353 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.utils; + +import java.util.LinkedList; +import java.util.List; +import java.util.Locale; +import lombok.Getter; +import lombok.experimental.UtilityClass; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.tree.ParseTree; +import org.opensearch.sql.common.antlr.CaseInsensitiveCharStream; +import org.opensearch.sql.common.antlr.SyntaxAnalysisErrorListener; +import org.opensearch.sql.common.antlr.SyntaxCheckException; +import org.opensearch.sql.spark.antlr.parser.FlintSparkSqlExtensionsBaseVisitor; +import org.opensearch.sql.spark.antlr.parser.FlintSparkSqlExtensionsLexer; +import org.opensearch.sql.spark.antlr.parser.FlintSparkSqlExtensionsParser; +import org.opensearch.sql.spark.antlr.parser.SqlBaseLexer; +import org.opensearch.sql.spark.antlr.parser.SqlBaseParser; +import org.opensearch.sql.spark.antlr.parser.SqlBaseParser.IdentifierReferenceContext; +import org.opensearch.sql.spark.antlr.parser.SqlBaseParserBaseVisitor; +import org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions; +import org.opensearch.sql.spark.dispatcher.model.FullyQualifiedTableName; +import org.opensearch.sql.spark.dispatcher.model.IndexQueryActionType; +import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails; +import org.opensearch.sql.spark.flint.FlintIndexType; + +/** + * This util class parses spark sql query and provides util functions to identify indexName, + * tableName and datasourceName. + */ +@UtilityClass +public class SQLQueryUtils { + + public static List extractFullyQualifiedTableNames(String sqlQuery) { + SqlBaseParser sqlBaseParser = + new SqlBaseParser( + new CommonTokenStream(new SqlBaseLexer(new CaseInsensitiveCharStream(sqlQuery)))); + sqlBaseParser.addErrorListener(new SyntaxAnalysisErrorListener()); + SqlBaseParser.StatementContext statement = sqlBaseParser.statement(); + SparkSqlTableNameVisitor sparkSqlTableNameVisitor = new SparkSqlTableNameVisitor(); + statement.accept(sparkSqlTableNameVisitor); + return sparkSqlTableNameVisitor.getFullyQualifiedTableNames(); + } + + public static IndexQueryDetails extractIndexDetails(String sqlQuery) { + FlintSparkSqlExtensionsParser flintSparkSqlExtensionsParser = + new FlintSparkSqlExtensionsParser( + new CommonTokenStream( + new FlintSparkSqlExtensionsLexer(new CaseInsensitiveCharStream(sqlQuery)))); + flintSparkSqlExtensionsParser.addErrorListener(new SyntaxAnalysisErrorListener()); + FlintSparkSqlExtensionsParser.SingleStatementContext singleStatementContext = + flintSparkSqlExtensionsParser.singleStatement(); + FlintSQLIndexDetailsVisitor flintSQLIndexDetailsVisitor = new FlintSQLIndexDetailsVisitor(); + singleStatementContext.accept(flintSQLIndexDetailsVisitor); + return flintSQLIndexDetailsVisitor.getIndexQueryDetailsBuilder().build(); + } + + public static boolean isFlintExtensionQuery(String sqlQuery) { + FlintSparkSqlExtensionsParser flintSparkSqlExtensionsParser = + new FlintSparkSqlExtensionsParser( + new CommonTokenStream( + new FlintSparkSqlExtensionsLexer(new CaseInsensitiveCharStream(sqlQuery)))); + flintSparkSqlExtensionsParser.addErrorListener(new SyntaxAnalysisErrorListener()); + try { + flintSparkSqlExtensionsParser.statement(); + return true; + } catch (SyntaxCheckException syntaxCheckException) { + return false; + } + } + + public static class SparkSqlTableNameVisitor extends SqlBaseParserBaseVisitor { + + @Getter private List fullyQualifiedTableNames = new LinkedList<>(); + + public SparkSqlTableNameVisitor() {} + + @Override + public Void visitIdentifierReference(IdentifierReferenceContext ctx) { + fullyQualifiedTableNames.add(new FullyQualifiedTableName(ctx.getText())); + return super.visitIdentifierReference(ctx); + } + + @Override + public Void visitDropTable(SqlBaseParser.DropTableContext ctx) { + for (ParseTree parseTree : ctx.children) { + if (parseTree instanceof SqlBaseParser.IdentifierReferenceContext) { + fullyQualifiedTableNames.add(new FullyQualifiedTableName(parseTree.getText())); + } + } + return super.visitDropTable(ctx); + } + + @Override + public Void visitDescribeRelation(SqlBaseParser.DescribeRelationContext ctx) { + for (ParseTree parseTree : ctx.children) { + if (parseTree instanceof SqlBaseParser.IdentifierReferenceContext) { + fullyQualifiedTableNames.add(new FullyQualifiedTableName(parseTree.getText())); + } + } + return super.visitDescribeRelation(ctx); + } + + // Extract table name for create Table Statement. + @Override + public Void visitCreateTableHeader(SqlBaseParser.CreateTableHeaderContext ctx) { + for (ParseTree parseTree : ctx.children) { + if (parseTree instanceof SqlBaseParser.IdentifierReferenceContext) { + fullyQualifiedTableNames.add(new FullyQualifiedTableName(parseTree.getText())); + } + } + return super.visitCreateTableHeader(ctx); + } + } + + public static class FlintSQLIndexDetailsVisitor extends FlintSparkSqlExtensionsBaseVisitor { + + @Getter private final IndexQueryDetails.IndexQueryDetailsBuilder indexQueryDetailsBuilder; + + public FlintSQLIndexDetailsVisitor() { + this.indexQueryDetailsBuilder = new IndexQueryDetails.IndexQueryDetailsBuilder(); + } + + @Override + public Void visitIndexName(FlintSparkSqlExtensionsParser.IndexNameContext ctx) { + indexQueryDetailsBuilder.indexName(ctx.getText()); + return super.visitIndexName(ctx); + } + + @Override + public Void visitTableName(FlintSparkSqlExtensionsParser.TableNameContext ctx) { + indexQueryDetailsBuilder.fullyQualifiedTableName(new FullyQualifiedTableName(ctx.getText())); + return super.visitTableName(ctx); + } + + @Override + public Void visitCreateSkippingIndexStatement( + FlintSparkSqlExtensionsParser.CreateSkippingIndexStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.CREATE); + indexQueryDetailsBuilder.indexType(FlintIndexType.SKIPPING); + visitPropertyList(ctx.propertyList()); + return super.visitCreateSkippingIndexStatement(ctx); + } + + @Override + public Void visitCreateCoveringIndexStatement( + FlintSparkSqlExtensionsParser.CreateCoveringIndexStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.CREATE); + indexQueryDetailsBuilder.indexType(FlintIndexType.COVERING); + visitPropertyList(ctx.propertyList()); + return super.visitCreateCoveringIndexStatement(ctx); + } + + @Override + public Void visitCreateMaterializedViewStatement( + FlintSparkSqlExtensionsParser.CreateMaterializedViewStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.CREATE); + indexQueryDetailsBuilder.indexType(FlintIndexType.MATERIALIZED_VIEW); + indexQueryDetailsBuilder.mvName(ctx.mvName.getText()); + visitPropertyList(ctx.propertyList()); + return super.visitCreateMaterializedViewStatement(ctx); + } + + @Override + public Void visitDropCoveringIndexStatement( + FlintSparkSqlExtensionsParser.DropCoveringIndexStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.DROP); + indexQueryDetailsBuilder.indexType(FlintIndexType.COVERING); + return super.visitDropCoveringIndexStatement(ctx); + } + + @Override + public Void visitDropSkippingIndexStatement( + FlintSparkSqlExtensionsParser.DropSkippingIndexStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.DROP); + indexQueryDetailsBuilder.indexType(FlintIndexType.SKIPPING); + return super.visitDropSkippingIndexStatement(ctx); + } + + @Override + public Void visitDropMaterializedViewStatement( + FlintSparkSqlExtensionsParser.DropMaterializedViewStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.DROP); + indexQueryDetailsBuilder.indexType(FlintIndexType.MATERIALIZED_VIEW); + indexQueryDetailsBuilder.mvName(ctx.mvName.getText()); + return super.visitDropMaterializedViewStatement(ctx); + } + + @Override + public Void visitVacuumSkippingIndexStatement( + FlintSparkSqlExtensionsParser.VacuumSkippingIndexStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.VACUUM); + indexQueryDetailsBuilder.indexType(FlintIndexType.SKIPPING); + return super.visitVacuumSkippingIndexStatement(ctx); + } + + @Override + public Void visitVacuumCoveringIndexStatement( + FlintSparkSqlExtensionsParser.VacuumCoveringIndexStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.VACUUM); + indexQueryDetailsBuilder.indexType(FlintIndexType.COVERING); + return super.visitVacuumCoveringIndexStatement(ctx); + } + + @Override + public Void visitVacuumMaterializedViewStatement( + FlintSparkSqlExtensionsParser.VacuumMaterializedViewStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.VACUUM); + indexQueryDetailsBuilder.indexType(FlintIndexType.MATERIALIZED_VIEW); + indexQueryDetailsBuilder.mvName(ctx.mvName.getText()); + return super.visitVacuumMaterializedViewStatement(ctx); + } + + @Override + public Void visitDescribeCoveringIndexStatement( + FlintSparkSqlExtensionsParser.DescribeCoveringIndexStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.DESCRIBE); + indexQueryDetailsBuilder.indexType(FlintIndexType.COVERING); + return super.visitDescribeCoveringIndexStatement(ctx); + } + + @Override + public Void visitDescribeSkippingIndexStatement( + FlintSparkSqlExtensionsParser.DescribeSkippingIndexStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.DESCRIBE); + indexQueryDetailsBuilder.indexType(FlintIndexType.SKIPPING); + return super.visitDescribeSkippingIndexStatement(ctx); + } + + @Override + public Void visitDescribeMaterializedViewStatement( + FlintSparkSqlExtensionsParser.DescribeMaterializedViewStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.DESCRIBE); + indexQueryDetailsBuilder.indexType(FlintIndexType.MATERIALIZED_VIEW); + indexQueryDetailsBuilder.mvName(ctx.mvName.getText()); + return super.visitDescribeMaterializedViewStatement(ctx); + } + + @Override + public Void visitShowCoveringIndexStatement( + FlintSparkSqlExtensionsParser.ShowCoveringIndexStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.SHOW); + indexQueryDetailsBuilder.indexType(FlintIndexType.COVERING); + return super.visitShowCoveringIndexStatement(ctx); + } + + @Override + public Void visitShowMaterializedViewStatement( + FlintSparkSqlExtensionsParser.ShowMaterializedViewStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.SHOW); + indexQueryDetailsBuilder.indexType(FlintIndexType.MATERIALIZED_VIEW); + return super.visitShowMaterializedViewStatement(ctx); + } + + @Override + public Void visitRefreshCoveringIndexStatement( + FlintSparkSqlExtensionsParser.RefreshCoveringIndexStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.REFRESH); + indexQueryDetailsBuilder.indexType(FlintIndexType.COVERING); + return super.visitRefreshCoveringIndexStatement(ctx); + } + + @Override + public Void visitRefreshSkippingIndexStatement( + FlintSparkSqlExtensionsParser.RefreshSkippingIndexStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.REFRESH); + indexQueryDetailsBuilder.indexType(FlintIndexType.SKIPPING); + return super.visitRefreshSkippingIndexStatement(ctx); + } + + @Override + public Void visitRefreshMaterializedViewStatement( + FlintSparkSqlExtensionsParser.RefreshMaterializedViewStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.REFRESH); + indexQueryDetailsBuilder.indexType(FlintIndexType.MATERIALIZED_VIEW); + indexQueryDetailsBuilder.mvName(ctx.mvName.getText()); + return super.visitRefreshMaterializedViewStatement(ctx); + } + + @Override + public Void visitPropertyList(FlintSparkSqlExtensionsParser.PropertyListContext ctx) { + FlintIndexOptions flintIndexOptions = new FlintIndexOptions(); + if (ctx != null) { + ctx.property() + .forEach( + property -> + flintIndexOptions.setOption( + removeUnwantedQuotes(propertyKey(property.key).toLowerCase(Locale.ROOT)), + removeUnwantedQuotes( + propertyValue(property.value).toLowerCase(Locale.ROOT)))); + } + indexQueryDetailsBuilder.indexOptions(flintIndexOptions); + return null; + } + + @Override + public Void visitAlterCoveringIndexStatement( + FlintSparkSqlExtensionsParser.AlterCoveringIndexStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.ALTER); + indexQueryDetailsBuilder.indexType(FlintIndexType.COVERING); + visitPropertyList(ctx.propertyList()); + return super.visitAlterCoveringIndexStatement(ctx); + } + + @Override + public Void visitAlterSkippingIndexStatement( + FlintSparkSqlExtensionsParser.AlterSkippingIndexStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.ALTER); + indexQueryDetailsBuilder.indexType(FlintIndexType.SKIPPING); + visitPropertyList(ctx.propertyList()); + return super.visitAlterSkippingIndexStatement(ctx); + } + + @Override + public Void visitAlterMaterializedViewStatement( + FlintSparkSqlExtensionsParser.AlterMaterializedViewStatementContext ctx) { + indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.ALTER); + indexQueryDetailsBuilder.indexType(FlintIndexType.MATERIALIZED_VIEW); + indexQueryDetailsBuilder.mvName(ctx.mvName.getText()); + visitPropertyList(ctx.propertyList()); + return super.visitAlterMaterializedViewStatement(ctx); + } + + private String propertyKey(FlintSparkSqlExtensionsParser.PropertyKeyContext key) { + if (key.STRING() != null) { + return key.STRING().getText(); + } else { + return key.getText(); + } + } + + private String propertyValue(FlintSparkSqlExtensionsParser.PropertyValueContext value) { + if (value.STRING() != null) { + return value.STRING().getText(); + } else if (value.booleanValue() != null) { + return value.getText(); + } else { + return value.getText(); + } + } + + // TODO: Currently escaping is handled partially. + // Full implementation should mirror this: + // https://github.com/apache/spark/blob/v3.5.0/sql/api/src/main/scala/org/apache/spark/sql/catalyst/util/SparkParserUtils.scala#L35 + public String removeUnwantedQuotes(String input) { + return input.replaceAll("^\"|\"$", ""); + } + } +} diff --git a/async-query-core/src/main/java/org/opensearch/sql/spark/utils/TimeProvider.java b/async-query-core/src/main/java/org/opensearch/sql/spark/utils/TimeProvider.java new file mode 100644 index 0000000000..13628af579 --- /dev/null +++ b/async-query-core/src/main/java/org/opensearch/sql/spark/utils/TimeProvider.java @@ -0,0 +1,10 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.utils; + +public interface TimeProvider { + long currentEpochMillis(); +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/asyncquery/DummyTest.java b/async-query-core/src/test/java/org/opensearch/sql/asyncquery/DummyTest.java new file mode 100644 index 0000000000..8fa1cf49ec --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/asyncquery/DummyTest.java @@ -0,0 +1,18 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.asyncquery; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; + +public class DummyTest { + @Test + public void test() { + Dummy dummy = new Dummy(); + assertEquals("Hello!", dummy.hello()); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryCoreIntegTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryCoreIntegTest.java new file mode 100644 index 0000000000..34ededc74d --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryCoreIntegTest.java @@ -0,0 +1,644 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.asyncquery; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.opensearch.sql.datasources.glue.GlueDataSourceFactory.GLUE_INDEX_STORE_OPENSEARCH_AUTH; +import static org.opensearch.sql.datasources.glue.GlueDataSourceFactory.GLUE_INDEX_STORE_OPENSEARCH_URI; +import static org.opensearch.sql.spark.dispatcher.IndexDMLHandler.DML_QUERY_JOB_ID; +import static org.opensearch.sql.spark.dispatcher.IndexDMLHandler.DROP_INDEX_JOB_ID; + +import com.amazonaws.services.emrserverless.AWSEMRServerless; +import com.amazonaws.services.emrserverless.model.CancelJobRunRequest; +import com.amazonaws.services.emrserverless.model.CancelJobRunResult; +import com.amazonaws.services.emrserverless.model.GetJobRunRequest; +import com.amazonaws.services.emrserverless.model.GetJobRunResult; +import com.amazonaws.services.emrserverless.model.JobRun; +import com.amazonaws.services.emrserverless.model.StartJobRunRequest; +import com.amazonaws.services.emrserverless.model.StartJobRunResult; +import com.google.common.collect.ImmutableMap; +import java.util.Optional; +import org.json.JSONArray; +import org.json.JSONObject; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.datasource.DataSourceService; +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.datasource.model.DataSourceType; +import org.opensearch.sql.datasources.auth.AuthenticationType; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryExecutionResponse; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata.AsyncQueryJobMetadataBuilder; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.client.EmrServerlessClientImpl; +import org.opensearch.sql.spark.config.SparkExecutionEngineConfig; +import org.opensearch.sql.spark.config.SparkExecutionEngineConfigSupplier; +import org.opensearch.sql.spark.config.SparkSubmitParameterModifier; +import org.opensearch.sql.spark.dispatcher.QueryHandlerFactory; +import org.opensearch.sql.spark.dispatcher.QueryIdProvider; +import org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; +import org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions; +import org.opensearch.sql.spark.dispatcher.model.IndexDMLResult; +import org.opensearch.sql.spark.dispatcher.model.JobType; +import org.opensearch.sql.spark.execution.session.CreateSessionRequest; +import org.opensearch.sql.spark.execution.session.SessionConfigSupplier; +import org.opensearch.sql.spark.execution.session.SessionIdProvider; +import org.opensearch.sql.spark.execution.session.SessionManager; +import org.opensearch.sql.spark.execution.session.SessionModel; +import org.opensearch.sql.spark.execution.session.SessionState; +import org.opensearch.sql.spark.execution.statement.StatementId; +import org.opensearch.sql.spark.execution.statement.StatementModel; +import org.opensearch.sql.spark.execution.statement.StatementState; +import org.opensearch.sql.spark.execution.statestore.SessionStorageService; +import org.opensearch.sql.spark.execution.statestore.StatementStorageService; +import org.opensearch.sql.spark.flint.FlintIndexClient; +import org.opensearch.sql.spark.flint.FlintIndexMetadata; +import org.opensearch.sql.spark.flint.FlintIndexMetadataService; +import org.opensearch.sql.spark.flint.FlintIndexStateModelService; +import org.opensearch.sql.spark.flint.IndexDMLResultStorageService; +import org.opensearch.sql.spark.flint.operation.FlintIndexOpFactory; +import org.opensearch.sql.spark.leasemanager.LeaseManager; +import org.opensearch.sql.spark.metrics.MetricsService; +import org.opensearch.sql.spark.parameter.SparkParameterComposerCollection; +import org.opensearch.sql.spark.parameter.SparkSubmitParametersBuilderProvider; +import org.opensearch.sql.spark.response.JobExecutionResponseReader; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryResponse; +import org.opensearch.sql.spark.rest.model.LangType; + +/** + * This tests async-query-core library end-to-end using mocked implementation of extension points. + * It intends to cover major happy cases. + */ +@ExtendWith(MockitoExtension.class) +public class AsyncQueryCoreIntegTest { + + public static final String QUERY_ID = "QUERY_ID"; + public static final String SESSION_ID = "SESSION_ID"; + public static final String DATASOURCE_NAME = "DATASOURCE_NAME"; + public static final String INDEX_NAME = "INDEX_NAME"; + public static final String APPLICATION_ID = "APPLICATION_ID"; + public static final String JOB_ID = "JOB_ID"; + public static final String ACCOUNT_ID = "ACCOUNT_ID"; + public static final String RESULT_INDEX = "RESULT_INDEX"; + @Mock SparkSubmitParameterModifier sparkSubmitParameterModifier; + @Mock SparkExecutionEngineConfigSupplier sparkExecutionEngineConfigSupplier; + @Mock SessionConfigSupplier sessionConfigSupplier; + @Mock LeaseManager leaseManager; + @Mock JobExecutionResponseReader jobExecutionResponseReader; + @Mock DataSourceService dataSourceService; + EMRServerlessClientFactory emrServerlessClientFactory; + @Mock AWSEMRServerless awsemrServerless; + @Mock SessionIdProvider sessionIdProvider; + @Mock QueryIdProvider queryIdProvider; + @Mock FlintIndexClient flintIndexClient; + @Mock AsyncQueryRequestContext asyncQueryRequestContext; + @Mock MetricsService metricsService; + @Mock SparkSubmitParametersBuilderProvider sparkSubmitParametersBuilderProvider; + + // storage services + @Mock AsyncQueryJobMetadataStorageService asyncQueryJobMetadataStorageService; + @Mock SessionStorageService sessionStorageService; + @Mock StatementStorageService statementStorageService; + @Mock FlintIndexMetadataService flintIndexMetadataService; + @Mock FlintIndexStateModelService flintIndexStateModelService; + @Mock IndexDMLResultStorageService indexDMLResultStorageService; + + @Captor ArgumentCaptor dispatchQueryRequestArgumentCaptor; + @Captor ArgumentCaptor cancelJobRunRequestArgumentCaptor; + @Captor ArgumentCaptor getJobRunRequestArgumentCaptor; + @Captor ArgumentCaptor indexDMLResultArgumentCaptor; + @Captor ArgumentCaptor asyncQueryJobMetadataArgumentCaptor; + @Captor ArgumentCaptor flintIndexOptionsArgumentCaptor; + @Captor ArgumentCaptor startJobRunRequestArgumentCaptor; + @Captor ArgumentCaptor createSessionRequestArgumentCaptor; + + AsyncQueryExecutorService asyncQueryExecutorService; + + @BeforeEach + public void setUp() { + emrServerlessClientFactory = + (accountId) -> new EmrServerlessClientImpl(awsemrServerless, metricsService); + SparkParameterComposerCollection collection = new SparkParameterComposerCollection(); + collection.register( + DataSourceType.S3GLUE, + (dataSourceMetadata, sparkSubmitParameters, dispatchQueryRequest, context) -> + sparkSubmitParameters.setConfigItem( + "key.from.datasource.composer", "value.from.datasource.composer")); + collection.register( + (sparkSubmitParameters, dispatchQueryRequest, context) -> + sparkSubmitParameters.setConfigItem( + "key.from.generic.composer", "value.from.generic.composer")); + SessionManager sessionManager = + new SessionManager( + sessionStorageService, + statementStorageService, + emrServerlessClientFactory, + sessionConfigSupplier, + sessionIdProvider); + FlintIndexOpFactory flintIndexOpFactory = + new FlintIndexOpFactory( + flintIndexStateModelService, + flintIndexClient, + flintIndexMetadataService, + emrServerlessClientFactory); + QueryHandlerFactory queryHandlerFactory = + new QueryHandlerFactory( + jobExecutionResponseReader, + flintIndexMetadataService, + sessionManager, + leaseManager, + indexDMLResultStorageService, + flintIndexOpFactory, + emrServerlessClientFactory, + metricsService, + new SparkSubmitParametersBuilderProvider(collection)); + SparkQueryDispatcher sparkQueryDispatcher = + new SparkQueryDispatcher( + dataSourceService, sessionManager, queryHandlerFactory, queryIdProvider); + asyncQueryExecutorService = + new AsyncQueryExecutorServiceImpl( + asyncQueryJobMetadataStorageService, + sparkQueryDispatcher, + sparkExecutionEngineConfigSupplier); + } + + @Test + public void createDropIndexQuery() { + givenSparkExecutionEngineConfigIsSupplied(); + givenValidDataSourceMetadataExist(); + when(queryIdProvider.getQueryId(any())).thenReturn(QUERY_ID); + String indexName = "flint_datasource_name_table_name_index_name_index"; + givenFlintIndexMetadataExists(indexName); + givenCancelJobRunSucceed(); + givenGetJobRunReturnJobRunWithState("Cancelled"); + + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "DROP INDEX index_name ON table_name", DATASOURCE_NAME, LangType.SQL), + asyncQueryRequestContext); + + assertEquals(QUERY_ID, response.getQueryId()); + assertNull(response.getSessionId()); + verifyGetQueryIdCalled(); + verifyCancelJobRunCalled(); + verifyCreateIndexDMLResultCalled(); + verifyStoreJobMetadataCalled(DML_QUERY_JOB_ID); + } + + @Test + public void createVacuumIndexQuery() { + givenSparkExecutionEngineConfigIsSupplied(); + givenValidDataSourceMetadataExist(); + when(queryIdProvider.getQueryId(any())).thenReturn(QUERY_ID); + String indexName = "flint_datasource_name_table_name_index_name_index"; + givenFlintIndexMetadataExists(indexName); + + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "VACUUM INDEX index_name ON table_name", DATASOURCE_NAME, LangType.SQL), + asyncQueryRequestContext); + + assertEquals(QUERY_ID, response.getQueryId()); + assertNull(response.getSessionId()); + verifyGetQueryIdCalled(); + verify(flintIndexClient).deleteIndex(indexName); + verifyCreateIndexDMLResultCalled(); + verifyStoreJobMetadataCalled(DML_QUERY_JOB_ID); + } + + @Test + public void createAlterIndexQuery() { + givenSparkExecutionEngineConfigIsSupplied(); + givenValidDataSourceMetadataExist(); + when(queryIdProvider.getQueryId(any())).thenReturn(QUERY_ID); + String indexName = "flint_datasource_name_table_name_index_name_index"; + givenFlintIndexMetadataExists(indexName); + givenCancelJobRunSucceed(); + givenGetJobRunReturnJobRunWithState("Cancelled"); + + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "ALTER INDEX index_name ON table_name WITH (auto_refresh = false)", + DATASOURCE_NAME, + LangType.SQL), + asyncQueryRequestContext); + + assertEquals(QUERY_ID, response.getQueryId()); + assertNull(response.getSessionId()); + verifyGetQueryIdCalled(); + verify(flintIndexMetadataService) + .updateIndexToManualRefresh(eq(indexName), flintIndexOptionsArgumentCaptor.capture()); + FlintIndexOptions flintIndexOptions = flintIndexOptionsArgumentCaptor.getValue(); + assertFalse(flintIndexOptions.autoRefresh()); + verifyCancelJobRunCalled(); + verifyCreateIndexDMLResultCalled(); + verifyStoreJobMetadataCalled(DML_QUERY_JOB_ID); + } + + @Test + public void createStreamingQuery() { + givenSparkExecutionEngineConfigIsSupplied(); + givenValidDataSourceMetadataExist(); + when(queryIdProvider.getQueryId(any())).thenReturn(QUERY_ID); + when(awsemrServerless.startJobRun(any())) + .thenReturn(new StartJobRunResult().withApplicationId(APPLICATION_ID).withJobRunId(JOB_ID)); + + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "CREATE INDEX index_name ON table_name(l_orderkey, l_quantity)" + + " WITH (auto_refresh = true)", + DATASOURCE_NAME, + LangType.SQL), + asyncQueryRequestContext); + + assertEquals(QUERY_ID, response.getQueryId()); + assertNull(response.getSessionId()); + verifyGetQueryIdCalled(); + verify(leaseManager).borrow(any()); + verifyStartJobRunCalled(); + verifyStoreJobMetadataCalled(JOB_ID); + } + + private void verifyStartJobRunCalled() { + verify(awsemrServerless).startJobRun(startJobRunRequestArgumentCaptor.capture()); + StartJobRunRequest startJobRunRequest = startJobRunRequestArgumentCaptor.getValue(); + assertEquals(APPLICATION_ID, startJobRunRequest.getApplicationId()); + String submitParameters = + startJobRunRequest.getJobDriver().getSparkSubmit().getSparkSubmitParameters(); + assertTrue( + submitParameters.contains("key.from.datasource.composer=value.from.datasource.composer")); + assertTrue(submitParameters.contains("key.from.generic.composer=value.from.generic.composer")); + } + + @Test + public void createCreateIndexQuery() { + givenSparkExecutionEngineConfigIsSupplied(); + givenValidDataSourceMetadataExist(); + when(queryIdProvider.getQueryId(any())).thenReturn(QUERY_ID); + when(awsemrServerless.startJobRun(any())) + .thenReturn(new StartJobRunResult().withApplicationId(APPLICATION_ID).withJobRunId(JOB_ID)); + + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "CREATE INDEX index_name ON table_name(l_orderkey, l_quantity)" + + " WITH (auto_refresh = false)", + DATASOURCE_NAME, + LangType.SQL), + asyncQueryRequestContext); + + assertEquals(QUERY_ID, response.getQueryId()); + assertNull(response.getSessionId()); + verifyGetQueryIdCalled(); + verifyStartJobRunCalled(); + verifyStoreJobMetadataCalled(JOB_ID); + } + + @Test + public void createRefreshQuery() { + givenSparkExecutionEngineConfigIsSupplied(); + givenValidDataSourceMetadataExist(); + when(queryIdProvider.getQueryId(any())).thenReturn(QUERY_ID); + when(awsemrServerless.startJobRun(any())) + .thenReturn(new StartJobRunResult().withApplicationId(APPLICATION_ID).withJobRunId(JOB_ID)); + + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "REFRESH INDEX index_name ON table_name", DATASOURCE_NAME, LangType.SQL), + asyncQueryRequestContext); + + assertEquals(QUERY_ID, response.getQueryId()); + assertNull(response.getSessionId()); + verifyGetQueryIdCalled(); + verify(leaseManager).borrow(any()); + verifyStartJobRunCalled(); + verifyStoreJobMetadataCalled(JOB_ID); + } + + @Test + public void createInteractiveQuery() { + givenSparkExecutionEngineConfigIsSupplied(); + givenValidDataSourceMetadataExist(); + givenSessionExists(); + when(queryIdProvider.getQueryId(any())).thenReturn(QUERY_ID); + when(sessionIdProvider.getSessionId(any())).thenReturn(SESSION_ID); + givenSessionExists(); // called twice + when(awsemrServerless.startJobRun(any())) + .thenReturn(new StartJobRunResult().withApplicationId(APPLICATION_ID).withJobRunId(JOB_ID)); + + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "SELECT * FROM table_name", DATASOURCE_NAME, LangType.SQL, SESSION_ID), + asyncQueryRequestContext); + + assertEquals(QUERY_ID, response.getQueryId()); + assertEquals(SESSION_ID, response.getSessionId()); + verifyGetQueryIdCalled(); + verifyGetSessionIdCalled(); + verify(leaseManager).borrow(any()); + verifyStartJobRunCalled(); + verifyStoreJobMetadataCalled(JOB_ID); + } + + @Test + public void getResultOfInteractiveQuery() { + givenJobMetadataExists( + getBaseAsyncQueryJobMetadataBuilder() + .queryId(QUERY_ID) + .sessionId(SESSION_ID) + .resultIndex(RESULT_INDEX)); + JSONObject result = getValidExecutionResponse(); + when(jobExecutionResponseReader.getResultWithQueryId(QUERY_ID, RESULT_INDEX)) + .thenReturn(result); + + AsyncQueryExecutionResponse response = asyncQueryExecutorService.getAsyncQueryResults(QUERY_ID); + + assertEquals("SUCCESS", response.getStatus()); + assertEquals(SESSION_ID, response.getSessionId()); + assertEquals("{col1:\"value\"}", response.getResults().get(0).toString()); + } + + @Test + public void getResultOfIndexDMLQuery() { + givenJobMetadataExists( + getBaseAsyncQueryJobMetadataBuilder() + .queryId(QUERY_ID) + .jobId(DROP_INDEX_JOB_ID) + .resultIndex(RESULT_INDEX)); + JSONObject result = getValidExecutionResponse(); + when(jobExecutionResponseReader.getResultWithQueryId(QUERY_ID, RESULT_INDEX)) + .thenReturn(result); + + AsyncQueryExecutionResponse response = asyncQueryExecutorService.getAsyncQueryResults(QUERY_ID); + + assertEquals("SUCCESS", response.getStatus()); + assertNull(response.getSessionId()); + assertEquals("{col1:\"value\"}", response.getResults().get(0).toString()); + } + + @Test + public void getResultOfRefreshQuery() { + givenJobMetadataExists( + getBaseAsyncQueryJobMetadataBuilder() + .queryId(QUERY_ID) + .jobId(JOB_ID) + .jobType(JobType.BATCH) + .resultIndex(RESULT_INDEX)); + JSONObject result = getValidExecutionResponse(); + when(jobExecutionResponseReader.getResultWithJobId(JOB_ID, RESULT_INDEX)).thenReturn(result); + + AsyncQueryExecutionResponse response = asyncQueryExecutorService.getAsyncQueryResults(QUERY_ID); + + assertEquals("SUCCESS", response.getStatus()); + assertNull(response.getSessionId()); + assertEquals("{col1:\"value\"}", response.getResults().get(0).toString()); + } + + @Test + public void cancelInteractiveQuery() { + givenJobMetadataExists(getBaseAsyncQueryJobMetadataBuilder().sessionId(SESSION_ID)); + givenSessionExists(); + when(sessionConfigSupplier.getSessionInactivityTimeoutMillis()).thenReturn(100000L); + final StatementModel statementModel = givenStatementExists(); + StatementModel canceledStatementModel = + StatementModel.copyWithState(statementModel, StatementState.CANCELLED, ImmutableMap.of()); + when(statementStorageService.updateStatementState(statementModel, StatementState.CANCELLED)) + .thenReturn(canceledStatementModel); + + String result = asyncQueryExecutorService.cancelQuery(QUERY_ID); + + assertEquals(QUERY_ID, result); + verify(statementStorageService).updateStatementState(statementModel, StatementState.CANCELLED); + } + + @Test + public void cancelIndexDMLQuery() { + givenJobMetadataExists(getBaseAsyncQueryJobMetadataBuilder().jobId(DROP_INDEX_JOB_ID)); + + assertThrows( + IllegalArgumentException.class, () -> asyncQueryExecutorService.cancelQuery(QUERY_ID)); + } + + @Test + public void cancelRefreshQuery() { + givenJobMetadataExists( + getBaseAsyncQueryJobMetadataBuilder().jobType(JobType.BATCH).indexName(INDEX_NAME)); + when(flintIndexMetadataService.getFlintIndexMetadata(INDEX_NAME)) + .thenReturn( + ImmutableMap.of( + INDEX_NAME, + FlintIndexMetadata.builder() + .latestId(null) + .appId(APPLICATION_ID) + .jobId(JOB_ID) + .build())); + givenCancelJobRunSucceed(); + when(awsemrServerless.getJobRun(any())) + .thenReturn( + new GetJobRunResult() + .withJobRun(new JobRun().withJobRunId(JOB_ID).withState("Cancelled"))); + + String result = asyncQueryExecutorService.cancelQuery(QUERY_ID); + + assertEquals(QUERY_ID, result); + verifyCancelJobRunCalled(); + verifyGetJobRunRequest(); + } + + @Test + public void cancelStreamingQuery() { + givenJobMetadataExists(getBaseAsyncQueryJobMetadataBuilder().jobType(JobType.STREAMING)); + + assertThrows( + IllegalArgumentException.class, () -> asyncQueryExecutorService.cancelQuery(QUERY_ID)); + } + + @Test + public void cancelBatchQuery() { + givenJobMetadataExists(getBaseAsyncQueryJobMetadataBuilder().jobId(JOB_ID)); + givenCancelJobRunSucceed(); + + String result = asyncQueryExecutorService.cancelQuery(QUERY_ID); + + assertEquals(QUERY_ID, result); + verifyCancelJobRunCalled(); + } + + private void givenSparkExecutionEngineConfigIsSupplied() { + when(sparkExecutionEngineConfigSupplier.getSparkExecutionEngineConfig(asyncQueryRequestContext)) + .thenReturn( + SparkExecutionEngineConfig.builder() + .applicationId(APPLICATION_ID) + .accountId(ACCOUNT_ID) + .sparkSubmitParameterModifier(sparkSubmitParameterModifier) + .build()); + } + + private void givenFlintIndexMetadataExists(String indexName) { + when(flintIndexMetadataService.getFlintIndexMetadata(indexName)) + .thenReturn( + ImmutableMap.of( + indexName, + FlintIndexMetadata.builder() + .appId(APPLICATION_ID) + .jobId(JOB_ID) + .opensearchIndexName(indexName) + .build())); + } + + private void givenValidDataSourceMetadataExist() { + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + DATASOURCE_NAME, asyncQueryRequestContext)) + .thenReturn( + new DataSourceMetadata.Builder() + .setName(DATASOURCE_NAME) + .setConnector(DataSourceType.S3GLUE) + .setProperties( + ImmutableMap.builder() + .put(GLUE_INDEX_STORE_OPENSEARCH_URI, "https://open.search.cluster:9200/") + .put(GLUE_INDEX_STORE_OPENSEARCH_AUTH, AuthenticationType.NOAUTH.getName()) + .build()) + .build()); + } + + private void givenGetJobRunReturnJobRunWithState(String state) { + when(awsemrServerless.getJobRun(any())) + .thenReturn( + new GetJobRunResult() + .withJobRun( + new JobRun() + .withJobRunId(JOB_ID) + .withApplicationId(APPLICATION_ID) + .withState(state))); + } + + private void verifyGetQueryIdCalled() { + verify(queryIdProvider).getQueryId(dispatchQueryRequestArgumentCaptor.capture()); + DispatchQueryRequest dispatchQueryRequest = dispatchQueryRequestArgumentCaptor.getValue(); + assertEquals(ACCOUNT_ID, dispatchQueryRequest.getAccountId()); + assertEquals(APPLICATION_ID, dispatchQueryRequest.getApplicationId()); + } + + private void verifyGetSessionIdCalled() { + verify(sessionIdProvider).getSessionId(createSessionRequestArgumentCaptor.capture()); + CreateSessionRequest createSessionRequest = createSessionRequestArgumentCaptor.getValue(); + assertEquals(ACCOUNT_ID, createSessionRequest.getAccountId()); + assertEquals(APPLICATION_ID, createSessionRequest.getApplicationId()); + } + + private void verifyStoreJobMetadataCalled(String jobId) { + verify(asyncQueryJobMetadataStorageService) + .storeJobMetadata( + asyncQueryJobMetadataArgumentCaptor.capture(), eq(asyncQueryRequestContext)); + AsyncQueryJobMetadata asyncQueryJobMetadata = asyncQueryJobMetadataArgumentCaptor.getValue(); + assertEquals(QUERY_ID, asyncQueryJobMetadata.getQueryId()); + assertEquals(jobId, asyncQueryJobMetadata.getJobId()); + assertEquals(DATASOURCE_NAME, asyncQueryJobMetadata.getDatasourceName()); + } + + private void verifyCreateIndexDMLResultCalled() { + verify(indexDMLResultStorageService) + .createIndexDMLResult(indexDMLResultArgumentCaptor.capture(), eq(asyncQueryRequestContext)); + IndexDMLResult indexDMLResult = indexDMLResultArgumentCaptor.getValue(); + assertEquals(QUERY_ID, indexDMLResult.getQueryId()); + assertEquals(DATASOURCE_NAME, indexDMLResult.getDatasourceName()); + assertEquals("SUCCESS", indexDMLResult.getStatus()); + assertEquals("", indexDMLResult.getError()); + } + + private void verifyCancelJobRunCalled() { + verify(awsemrServerless).cancelJobRun(cancelJobRunRequestArgumentCaptor.capture()); + CancelJobRunRequest cancelJobRunRequest = cancelJobRunRequestArgumentCaptor.getValue(); + assertEquals(JOB_ID, cancelJobRunRequest.getJobRunId()); + assertEquals(APPLICATION_ID, cancelJobRunRequest.getApplicationId()); + } + + private void verifyGetJobRunRequest() { + verify(awsemrServerless).getJobRun(getJobRunRequestArgumentCaptor.capture()); + GetJobRunRequest getJobRunRequest = getJobRunRequestArgumentCaptor.getValue(); + assertEquals(APPLICATION_ID, getJobRunRequest.getApplicationId()); + assertEquals(JOB_ID, getJobRunRequest.getJobRunId()); + } + + private StatementModel givenStatementExists() { + StatementModel statementModel = + StatementModel.builder() + .queryId(QUERY_ID) + .statementId(new StatementId(QUERY_ID)) + .statementState(StatementState.RUNNING) + .build(); + when(statementStorageService.getStatement(QUERY_ID, DATASOURCE_NAME)) + .thenReturn(Optional.of(statementModel)); + return statementModel; + } + + private void givenSessionExists() { + when(sessionStorageService.getSession(SESSION_ID, DATASOURCE_NAME)) + .thenReturn( + Optional.of( + SessionModel.builder() + .sessionId(SESSION_ID) + .datasourceName(DATASOURCE_NAME) + .jobId(JOB_ID) + .sessionState(SessionState.RUNNING) + .build())); + } + + private AsyncQueryJobMetadataBuilder getBaseAsyncQueryJobMetadataBuilder() { + return AsyncQueryJobMetadata.builder() + .applicationId(APPLICATION_ID) + .queryId(QUERY_ID) + .datasourceName(DATASOURCE_NAME); + } + + private void givenJobMetadataExists(AsyncQueryJobMetadataBuilder metadataBuilder) { + AsyncQueryJobMetadata metadata = metadataBuilder.build(); + when(asyncQueryJobMetadataStorageService.getJobMetadata(metadata.getQueryId())) + .thenReturn(Optional.of(metadata)); + } + + private void givenCancelJobRunSucceed() { + when(awsemrServerless.cancelJobRun(any())) + .thenReturn( + new CancelJobRunResult().withJobRunId(JOB_ID).withApplicationId(APPLICATION_ID)); + } + + private static JSONObject getValidExecutionResponse() { + return new JSONObject() + .put( + "data", + new JSONObject() + .put("status", "SUCCESS") + .put( + "schema", + new JSONArray() + .put( + new JSONObject().put("column_name", "col1").put("data_type", "string"))) + .put("result", new JSONArray().put("{'col1': 'value'}"))); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImplTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImplTest.java new file mode 100644 index 0000000000..dbc51bb0ad --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImplTest.java @@ -0,0 +1,236 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.asyncquery; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; +import static org.opensearch.sql.spark.constants.TestConstants.EMRS_APPLICATION_ID; +import static org.opensearch.sql.spark.constants.TestConstants.EMRS_EXECUTION_ROLE; +import static org.opensearch.sql.spark.constants.TestConstants.EMR_JOB_ID; +import static org.opensearch.sql.spark.constants.TestConstants.TEST_CLUSTER_NAME; +import static org.opensearch.sql.spark.utils.TestUtils.getJson; + +import com.amazonaws.services.emrserverless.model.JobRunState; +import java.io.IOException; +import java.util.HashMap; +import java.util.Optional; +import org.json.JSONObject; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.spark.asyncquery.exceptions.AsyncQueryNotFoundException; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryExecutionResponse; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.config.SparkExecutionEngineConfig; +import org.opensearch.sql.spark.config.SparkExecutionEngineConfigSupplier; +import org.opensearch.sql.spark.config.SparkSubmitParameterModifier; +import org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse; +import org.opensearch.sql.spark.dispatcher.model.JobType; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryResponse; +import org.opensearch.sql.spark.rest.model.LangType; + +@ExtendWith(MockitoExtension.class) +public class AsyncQueryExecutorServiceImplTest { + + @Mock private SparkQueryDispatcher sparkQueryDispatcher; + @Mock private AsyncQueryJobMetadataStorageService asyncQueryJobMetadataStorageService; + private AsyncQueryExecutorService jobExecutorService; + + @Mock private SparkExecutionEngineConfigSupplier sparkExecutionEngineConfigSupplier; + @Mock private SparkSubmitParameterModifier sparkSubmitParameterModifier; + @Mock private AsyncQueryRequestContext asyncQueryRequestContext; + private final String QUERY_ID = "QUERY_ID"; + + @BeforeEach + void setUp() { + jobExecutorService = + new AsyncQueryExecutorServiceImpl( + asyncQueryJobMetadataStorageService, + sparkQueryDispatcher, + sparkExecutionEngineConfigSupplier); + } + + @Test + void testCreateAsyncQuery() { + CreateAsyncQueryRequest createAsyncQueryRequest = + new CreateAsyncQueryRequest( + "select * from my_glue.default.http_logs", "my_glue", LangType.SQL); + when(sparkExecutionEngineConfigSupplier.getSparkExecutionEngineConfig(any())) + .thenReturn( + SparkExecutionEngineConfig.builder() + .applicationId(EMRS_APPLICATION_ID) + .region("eu-west-1") + .executionRoleARN(EMRS_EXECUTION_ROLE) + .sparkSubmitParameterModifier(sparkSubmitParameterModifier) + .clusterName(TEST_CLUSTER_NAME) + .build()); + DispatchQueryRequest expectedDispatchQueryRequest = + DispatchQueryRequest.builder() + .applicationId(EMRS_APPLICATION_ID) + .query("select * from my_glue.default.http_logs") + .datasource("my_glue") + .langType(LangType.SQL) + .executionRoleARN(EMRS_EXECUTION_ROLE) + .clusterName(TEST_CLUSTER_NAME) + .sparkSubmitParameterModifier(sparkSubmitParameterModifier) + .build(); + when(sparkQueryDispatcher.dispatch(expectedDispatchQueryRequest, asyncQueryRequestContext)) + .thenReturn( + DispatchQueryResponse.builder() + .queryId(QUERY_ID) + .jobId(EMR_JOB_ID) + .jobType(JobType.INTERACTIVE) + .build()); + + CreateAsyncQueryResponse createAsyncQueryResponse = + jobExecutorService.createAsyncQuery(createAsyncQueryRequest, asyncQueryRequestContext); + + verify(asyncQueryJobMetadataStorageService, times(1)) + .storeJobMetadata(getAsyncQueryJobMetadata(), asyncQueryRequestContext); + verify(sparkExecutionEngineConfigSupplier, times(1)) + .getSparkExecutionEngineConfig(asyncQueryRequestContext); + verify(sparkExecutionEngineConfigSupplier, times(1)) + .getSparkExecutionEngineConfig(asyncQueryRequestContext); + verify(sparkQueryDispatcher, times(1)) + .dispatch(expectedDispatchQueryRequest, asyncQueryRequestContext); + Assertions.assertEquals(QUERY_ID, createAsyncQueryResponse.getQueryId()); + } + + @Test + void testCreateAsyncQueryWithExtraSparkSubmitParameter() { + SparkSubmitParameterModifier modifier = + (builder) -> builder.extraParameters("--conf spark.dynamicAllocation.enabled=false"); + when(sparkExecutionEngineConfigSupplier.getSparkExecutionEngineConfig(any())) + .thenReturn( + SparkExecutionEngineConfig.builder() + .applicationId(EMRS_APPLICATION_ID) + .region("eu-west-1") + .executionRoleARN(EMRS_EXECUTION_ROLE) + .sparkSubmitParameterModifier(sparkSubmitParameterModifier) + .sparkSubmitParameterModifier(modifier) + .clusterName(TEST_CLUSTER_NAME) + .build()); + when(sparkQueryDispatcher.dispatch(any(), any())) + .thenReturn( + DispatchQueryResponse.builder() + .queryId(QUERY_ID) + .jobId(EMR_JOB_ID) + .jobType(JobType.INTERACTIVE) + .build()); + + jobExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "select * from my_glue.default.http_logs", "my_glue", LangType.SQL), + asyncQueryRequestContext); + + verify(sparkQueryDispatcher, times(1)) + .dispatch( + argThat(actualReq -> actualReq.getSparkSubmitParameterModifier().equals(modifier)), + eq(asyncQueryRequestContext)); + } + + @Test + void testGetAsyncQueryResultsWithJobNotFoundException() { + when(asyncQueryJobMetadataStorageService.getJobMetadata(EMR_JOB_ID)) + .thenReturn(Optional.empty()); + + AsyncQueryNotFoundException asyncQueryNotFoundException = + Assertions.assertThrows( + AsyncQueryNotFoundException.class, + () -> jobExecutorService.getAsyncQueryResults(EMR_JOB_ID)); + + Assertions.assertEquals( + "QueryId: " + EMR_JOB_ID + " not found", asyncQueryNotFoundException.getMessage()); + verifyNoInteractions(sparkQueryDispatcher); + verifyNoInteractions(sparkExecutionEngineConfigSupplier); + } + + @Test + void testGetAsyncQueryResultsWithInProgressJob() { + when(asyncQueryJobMetadataStorageService.getJobMetadata(EMR_JOB_ID)) + .thenReturn(Optional.of(getAsyncQueryJobMetadata())); + JSONObject jobResult = new JSONObject(); + jobResult.put("status", JobRunState.PENDING.toString()); + when(sparkQueryDispatcher.getQueryResponse(getAsyncQueryJobMetadata())).thenReturn(jobResult); + + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + jobExecutorService.getAsyncQueryResults(EMR_JOB_ID); + + Assertions.assertNull(asyncQueryExecutionResponse.getResults()); + Assertions.assertNull(asyncQueryExecutionResponse.getSchema()); + Assertions.assertEquals("PENDING", asyncQueryExecutionResponse.getStatus()); + verifyNoInteractions(sparkExecutionEngineConfigSupplier); + } + + @Test + void testGetAsyncQueryResultsWithSuccessJob() throws IOException { + when(asyncQueryJobMetadataStorageService.getJobMetadata(EMR_JOB_ID)) + .thenReturn(Optional.of(getAsyncQueryJobMetadata())); + JSONObject jobResult = new JSONObject(getJson("select_query_response.json")); + jobResult.put("status", JobRunState.SUCCESS.toString()); + when(sparkQueryDispatcher.getQueryResponse(getAsyncQueryJobMetadata())).thenReturn(jobResult); + + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + jobExecutorService.getAsyncQueryResults(EMR_JOB_ID); + + Assertions.assertEquals("SUCCESS", asyncQueryExecutionResponse.getStatus()); + Assertions.assertEquals(1, asyncQueryExecutionResponse.getSchema().getColumns().size()); + Assertions.assertEquals( + "1", asyncQueryExecutionResponse.getSchema().getColumns().get(0).getName()); + Assertions.assertEquals( + 1, + ((HashMap) asyncQueryExecutionResponse.getResults().get(0).value()) + .get("1")); + verifyNoInteractions(sparkExecutionEngineConfigSupplier); + } + + @Test + void testCancelJobWithJobNotFound() { + when(asyncQueryJobMetadataStorageService.getJobMetadata(EMR_JOB_ID)) + .thenReturn(Optional.empty()); + + AsyncQueryNotFoundException asyncQueryNotFoundException = + Assertions.assertThrows( + AsyncQueryNotFoundException.class, () -> jobExecutorService.cancelQuery(EMR_JOB_ID)); + + Assertions.assertEquals( + "QueryId: " + EMR_JOB_ID + " not found", asyncQueryNotFoundException.getMessage()); + verifyNoInteractions(sparkQueryDispatcher); + verifyNoInteractions(sparkExecutionEngineConfigSupplier); + } + + @Test + void testCancelJob() { + when(asyncQueryJobMetadataStorageService.getJobMetadata(EMR_JOB_ID)) + .thenReturn(Optional.of(getAsyncQueryJobMetadata())); + when(sparkQueryDispatcher.cancelJob(getAsyncQueryJobMetadata())).thenReturn(EMR_JOB_ID); + + String jobId = jobExecutorService.cancelQuery(EMR_JOB_ID); + + Assertions.assertEquals(EMR_JOB_ID, jobId); + verifyNoInteractions(sparkExecutionEngineConfigSupplier); + } + + private AsyncQueryJobMetadata getAsyncQueryJobMetadata() { + return AsyncQueryJobMetadata.builder() + .queryId(QUERY_ID) + .applicationId(EMRS_APPLICATION_ID) + .jobId(EMR_JOB_ID) + .build(); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/client/EMRServerlessClientFactoryImplTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/client/EMRServerlessClientFactoryImplTest.java new file mode 100644 index 0000000000..309d29c600 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/client/EMRServerlessClientFactoryImplTest.java @@ -0,0 +1,105 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.client; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.spark.config.SparkExecutionEngineConfig; +import org.opensearch.sql.spark.config.SparkExecutionEngineConfigSupplier; +import org.opensearch.sql.spark.constants.TestConstants; +import org.opensearch.sql.spark.metrics.MetricsService; + +@ExtendWith(MockitoExtension.class) +public class EMRServerlessClientFactoryImplTest { + + public static final String ACCOUNT_ID = "accountId"; + @Mock private SparkExecutionEngineConfigSupplier sparkExecutionEngineConfigSupplier; + @Mock private MetricsService metricsService; + + @Test + public void testGetClient() { + when(sparkExecutionEngineConfigSupplier.getSparkExecutionEngineConfig(any())) + .thenReturn(createSparkExecutionEngineConfig()); + EMRServerlessClientFactory emrServerlessClientFactory = + new EMRServerlessClientFactoryImpl(sparkExecutionEngineConfigSupplier, metricsService); + + EMRServerlessClient emrserverlessClient = emrServerlessClientFactory.getClient(ACCOUNT_ID); + + Assertions.assertNotNull(emrserverlessClient); + } + + @Test + public void testGetClientWithChangeInSetting() { + SparkExecutionEngineConfig sparkExecutionEngineConfig = createSparkExecutionEngineConfig(); + when(sparkExecutionEngineConfigSupplier.getSparkExecutionEngineConfig(any())) + .thenReturn(sparkExecutionEngineConfig); + EMRServerlessClientFactory emrServerlessClientFactory = + new EMRServerlessClientFactoryImpl(sparkExecutionEngineConfigSupplier, metricsService); + EMRServerlessClient emrserverlessClient = emrServerlessClientFactory.getClient(ACCOUNT_ID); + Assertions.assertNotNull(emrserverlessClient); + + EMRServerlessClient emrServerlessClient1 = emrServerlessClientFactory.getClient(ACCOUNT_ID); + Assertions.assertEquals(emrServerlessClient1, emrserverlessClient); + + sparkExecutionEngineConfig.setRegion(TestConstants.US_WEST_REGION); + when(sparkExecutionEngineConfigSupplier.getSparkExecutionEngineConfig(any())) + .thenReturn(sparkExecutionEngineConfig); + EMRServerlessClient emrServerlessClient2 = emrServerlessClientFactory.getClient(ACCOUNT_ID); + Assertions.assertNotEquals(emrServerlessClient2, emrserverlessClient); + Assertions.assertNotEquals(emrServerlessClient2, emrServerlessClient1); + } + + @Test + public void testGetClientWithException() { + when(sparkExecutionEngineConfigSupplier.getSparkExecutionEngineConfig(any())).thenReturn(null); + EMRServerlessClientFactory emrServerlessClientFactory = + new EMRServerlessClientFactoryImpl(sparkExecutionEngineConfigSupplier, metricsService); + + IllegalArgumentException illegalArgumentException = + Assertions.assertThrows( + IllegalArgumentException.class, () -> emrServerlessClientFactory.getClient(ACCOUNT_ID)); + + Assertions.assertEquals( + "Async Query APIs are disabled. Please configure plugins.query.executionengine.spark.config" + + " in cluster settings to enable them.", + illegalArgumentException.getMessage()); + } + + @Test + public void testGetClientWithExceptionWithNullRegion() { + SparkExecutionEngineConfig sparkExecutionEngineConfig = + SparkExecutionEngineConfig.builder().build(); + when(sparkExecutionEngineConfigSupplier.getSparkExecutionEngineConfig(any())) + .thenReturn(sparkExecutionEngineConfig); + EMRServerlessClientFactory emrServerlessClientFactory = + new EMRServerlessClientFactoryImpl(sparkExecutionEngineConfigSupplier, metricsService); + + IllegalArgumentException illegalArgumentException = + Assertions.assertThrows( + IllegalArgumentException.class, () -> emrServerlessClientFactory.getClient(ACCOUNT_ID)); + + Assertions.assertEquals( + "Async Query APIs are disabled. Please configure plugins.query.executionengine.spark.config" + + " in cluster settings to enable them.", + illegalArgumentException.getMessage()); + } + + private SparkExecutionEngineConfig createSparkExecutionEngineConfig() { + return SparkExecutionEngineConfig.builder() + .region(TestConstants.US_EAST_REGION) + .executionRoleARN(TestConstants.EMRS_EXECUTION_ROLE) + .sparkSubmitParameterModifier((sparkSubmitParameters) -> {}) + .clusterName(TestConstants.TEST_CLUSTER_NAME) + .applicationId(TestConstants.EMRS_APPLICATION_ID) + .build(); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/client/EmrServerlessClientImplTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/client/EmrServerlessClientImplTest.java new file mode 100644 index 0000000000..42d703f9ac --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/client/EmrServerlessClientImplTest.java @@ -0,0 +1,253 @@ +/* Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.client; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.opensearch.sql.spark.constants.TestConstants.DEFAULT_RESULT_INDEX; +import static org.opensearch.sql.spark.constants.TestConstants.EMRS_APPLICATION_ID; +import static org.opensearch.sql.spark.constants.TestConstants.EMRS_EXECUTION_ROLE; +import static org.opensearch.sql.spark.constants.TestConstants.EMRS_JOB_NAME; +import static org.opensearch.sql.spark.constants.TestConstants.EMR_JOB_ID; +import static org.opensearch.sql.spark.constants.TestConstants.ENTRY_POINT_START_JAR; +import static org.opensearch.sql.spark.constants.TestConstants.QUERY; +import static org.opensearch.sql.spark.constants.TestConstants.SPARK_SUBMIT_PARAMETERS; + +import com.amazonaws.services.emrserverless.AWSEMRServerless; +import com.amazonaws.services.emrserverless.model.AWSEMRServerlessException; +import com.amazonaws.services.emrserverless.model.CancelJobRunResult; +import com.amazonaws.services.emrserverless.model.GetJobRunResult; +import com.amazonaws.services.emrserverless.model.JobRun; +import com.amazonaws.services.emrserverless.model.StartJobRunRequest; +import com.amazonaws.services.emrserverless.model.StartJobRunResult; +import com.amazonaws.services.emrserverless.model.ValidationException; +import java.util.HashMap; +import java.util.List; +import org.apache.commons.lang3.RandomStringUtils; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.spark.metrics.MetricsService; +import org.opensearch.sql.spark.parameter.SparkParameterComposerCollection; +import org.opensearch.sql.spark.parameter.SparkSubmitParametersBuilder; + +@ExtendWith(MockitoExtension.class) +public class EmrServerlessClientImplTest { + @Mock private AWSEMRServerless emrServerless; + @Mock private MetricsService metricsService; + + @Captor private ArgumentCaptor startJobRunRequestArgumentCaptor; + + @InjectMocks EmrServerlessClientImpl emrServerlessClient; + + @Test + void testStartJobRun() { + StartJobRunResult response = new StartJobRunResult(); + when(emrServerless.startJobRun(any())).thenReturn(response); + + String parameters = + new SparkSubmitParametersBuilder(new SparkParameterComposerCollection()) + .query(QUERY) + .toString(); + + emrServerlessClient.startJobRun( + new StartJobRequest( + EMRS_JOB_NAME, + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + parameters, + new HashMap<>(), + false, + DEFAULT_RESULT_INDEX)); + verify(emrServerless, times(1)).startJobRun(startJobRunRequestArgumentCaptor.capture()); + StartJobRunRequest startJobRunRequest = startJobRunRequestArgumentCaptor.getValue(); + Assertions.assertEquals(EMRS_APPLICATION_ID, startJobRunRequest.getApplicationId()); + Assertions.assertEquals(EMRS_EXECUTION_ROLE, startJobRunRequest.getExecutionRoleArn()); + Assertions.assertEquals( + ENTRY_POINT_START_JAR, startJobRunRequest.getJobDriver().getSparkSubmit().getEntryPoint()); + Assertions.assertEquals( + List.of(DEFAULT_RESULT_INDEX), + startJobRunRequest.getJobDriver().getSparkSubmit().getEntryPointArguments()); + Assertions.assertTrue( + startJobRunRequest + .getJobDriver() + .getSparkSubmit() + .getSparkSubmitParameters() + .contains(QUERY)); + } + + @Test + void testStartJobRunWithErrorMetric() { + doThrow(new AWSEMRServerlessException("Couldn't start job")) + .when(emrServerless) + .startJobRun(any()); + RuntimeException runtimeException = + Assertions.assertThrows( + RuntimeException.class, + () -> + emrServerlessClient.startJobRun( + new StartJobRequest( + EMRS_JOB_NAME, + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + SPARK_SUBMIT_PARAMETERS, + new HashMap<>(), + false, + null))); + Assertions.assertEquals("Internal Server Error.", runtimeException.getMessage()); + } + + @Test + void testStartJobRunResultIndex() { + StartJobRunResult response = new StartJobRunResult(); + when(emrServerless.startJobRun(any())).thenReturn(response); + + emrServerlessClient.startJobRun( + new StartJobRequest( + EMRS_JOB_NAME, + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + SPARK_SUBMIT_PARAMETERS, + new HashMap<>(), + false, + "foo")); + } + + @Test + void testGetJobRunState() { + JobRun jobRun = new JobRun(); + jobRun.setState("Running"); + GetJobRunResult response = new GetJobRunResult(); + response.setJobRun(jobRun); + when(emrServerless.getJobRun(any())).thenReturn(response); + emrServerlessClient.getJobRunResult(EMRS_APPLICATION_ID, "123"); + } + + @Test + void testGetJobRunStateWithErrorMetric() { + doThrow(new ValidationException("Not a good job")).when(emrServerless).getJobRun(any()); + RuntimeException runtimeException = + Assertions.assertThrows( + RuntimeException.class, + () -> emrServerlessClient.getJobRunResult(EMRS_APPLICATION_ID, "123")); + Assertions.assertEquals("Internal Server Error.", runtimeException.getMessage()); + } + + @Test + void testCancelJobRun() { + when(emrServerless.cancelJobRun(any())) + .thenReturn(new CancelJobRunResult().withJobRunId(EMR_JOB_ID)); + + CancelJobRunResult cancelJobRunResult = + emrServerlessClient.cancelJobRun(EMRS_APPLICATION_ID, EMR_JOB_ID, false); + + Assertions.assertEquals(EMR_JOB_ID, cancelJobRunResult.getJobRunId()); + } + + @Test + void testCancelJobRunWithErrorMetric() { + doThrow(new RuntimeException()).when(emrServerless).cancelJobRun(any()); + + Assertions.assertThrows( + RuntimeException.class, + () -> emrServerlessClient.cancelJobRun(EMRS_APPLICATION_ID, "123", false)); + } + + @Test + void testCancelJobRunWithValidationException() { + doThrow(new ValidationException("Error")).when(emrServerless).cancelJobRun(any()); + + RuntimeException runtimeException = + Assertions.assertThrows( + RuntimeException.class, + () -> emrServerlessClient.cancelJobRun(EMRS_APPLICATION_ID, EMR_JOB_ID, false)); + + Assertions.assertEquals("Internal Server Error.", runtimeException.getMessage()); + } + + @Test + void testCancelJobRunWithNativeEMRExceptionWithValidationException() { + doThrow(new ValidationException("Error")).when(emrServerless).cancelJobRun(any()); + + ValidationException validationException = + Assertions.assertThrows( + ValidationException.class, + () -> emrServerlessClient.cancelJobRun(EMRS_APPLICATION_ID, EMR_JOB_ID, true)); + + Assertions.assertTrue(validationException.getMessage().contains("Error")); + } + + @Test + void testCancelJobRunWithNativeEMRException() { + when(emrServerless.cancelJobRun(any())) + .thenReturn(new CancelJobRunResult().withJobRunId(EMR_JOB_ID)); + + CancelJobRunResult cancelJobRunResult = + emrServerlessClient.cancelJobRun(EMRS_APPLICATION_ID, EMR_JOB_ID, true); + + Assertions.assertEquals(EMR_JOB_ID, cancelJobRunResult.getJobRunId()); + } + + @Test + void testStartJobRunWithLongJobName() { + StartJobRunResult response = new StartJobRunResult(); + when(emrServerless.startJobRun(any())).thenReturn(response); + + emrServerlessClient.startJobRun( + new StartJobRequest( + RandomStringUtils.random(300), + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + SPARK_SUBMIT_PARAMETERS, + new HashMap<>(), + false, + DEFAULT_RESULT_INDEX)); + + verify(emrServerless, times(1)).startJobRun(startJobRunRequestArgumentCaptor.capture()); + StartJobRunRequest startJobRunRequest = startJobRunRequestArgumentCaptor.getValue(); + Assertions.assertEquals(255, startJobRunRequest.getName().length()); + } + + @Test + void testStartJobRunThrowsValidationException() { + when(emrServerless.startJobRun(any())).thenThrow(new ValidationException("Unmatched quote")); + + IllegalArgumentException exception = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + emrServerlessClient.startJobRun( + new StartJobRequest( + EMRS_JOB_NAME, + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + SPARK_SUBMIT_PARAMETERS, + new HashMap<>(), + false, + DEFAULT_RESULT_INDEX)), + "Expected ValidationException to be thrown"); + + // Verify that the message in the exception is correct + Assertions.assertEquals( + "The input fails to satisfy the constraints specified by AWS EMR Serverless.", + exception.getMessage()); + + // Optionally verify that no job run is started + verify(emrServerless, times(1)).startJobRun(any()); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/client/StartJobRequestTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/client/StartJobRequestTest.java new file mode 100644 index 0000000000..ac5b0dd750 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/client/StartJobRequestTest.java @@ -0,0 +1,29 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.client; + +import static org.junit.jupiter.api.Assertions.*; +import static org.opensearch.sql.spark.client.StartJobRequest.DEFAULT_JOB_TIMEOUT; + +import java.util.Map; +import org.junit.jupiter.api.Test; + +class StartJobRequestTest { + + @Test + void executionTimeout() { + assertEquals(DEFAULT_JOB_TIMEOUT, onDemandJob().executionTimeout()); + assertEquals(0L, streamingJob().executionTimeout()); + } + + private StartJobRequest onDemandJob() { + return new StartJobRequest("", null, "", "", "", Map.of(), false, null); + } + + private StartJobRequest streamingJob() { + return new StartJobRequest("", null, "", "", "", Map.of(), true, null); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/constants/TestConstants.java b/async-query-core/src/test/java/org/opensearch/sql/spark/constants/TestConstants.java new file mode 100644 index 0000000000..295c74dcee --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/constants/TestConstants.java @@ -0,0 +1,23 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.constants; + +public class TestConstants { + public static final String QUERY = "select 1"; + public static final String EMR_JOB_ID = "job-123xxx"; + public static final String EMRS_APPLICATION_ID = "app-xxxxx"; + public static final String EMRS_EXECUTION_ROLE = "execution_role"; + public static final String EMRS_JOB_NAME = "job_name"; + public static final String SPARK_SUBMIT_PARAMETERS = "--conf org.flint.sql.SQLJob"; + public static final String TEST_CLUSTER_NAME = "TEST_CLUSTER"; + public static final String MOCK_SESSION_ID = "s-0123456"; + public static final String MOCK_STATEMENT_ID = "st-0123456"; + public static final String ENTRY_POINT_START_JAR = + "file:///home/hadoop/.ivy2/jars/org.opensearch_opensearch-spark-sql-application_2.12-0.3.0-SNAPSHOT.jar"; + public static final String DEFAULT_RESULT_INDEX = "query_execution_result_ds1"; + public static final String US_EAST_REGION = "us-east-1"; + public static final String US_WEST_REGION = "us-west-1"; +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/dispatcher/IndexDMLHandlerTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/dispatcher/IndexDMLHandlerTest.java new file mode 100644 index 0000000000..877d6ec32b --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/dispatcher/IndexDMLHandlerTest.java @@ -0,0 +1,135 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.dispatcher; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.opensearch.sql.datasource.model.DataSourceStatus.ACTIVE; +import static org.opensearch.sql.spark.constants.TestConstants.EMRS_APPLICATION_ID; +import static org.opensearch.sql.spark.constants.TestConstants.EMRS_EXECUTION_ROLE; +import static org.opensearch.sql.spark.constants.TestConstants.TEST_CLUSTER_NAME; +import static org.opensearch.sql.spark.data.constants.SparkConstants.ERROR_FIELD; +import static org.opensearch.sql.spark.data.constants.SparkConstants.STATUS_FIELD; + +import java.util.HashMap; +import org.json.JSONObject; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.datasource.model.DataSourceType; +import org.opensearch.sql.spark.config.SparkSubmitParameterModifier; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryContext; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse; +import org.opensearch.sql.spark.dispatcher.model.IndexQueryActionType; +import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails; +import org.opensearch.sql.spark.flint.FlintIndexMetadata; +import org.opensearch.sql.spark.flint.FlintIndexMetadataService; +import org.opensearch.sql.spark.flint.FlintIndexType; +import org.opensearch.sql.spark.flint.IndexDMLResultStorageService; +import org.opensearch.sql.spark.flint.operation.FlintIndexOpFactory; +import org.opensearch.sql.spark.response.JobExecutionResponseReader; +import org.opensearch.sql.spark.rest.model.LangType; + +@ExtendWith(MockitoExtension.class) +class IndexDMLHandlerTest { + + private static final String QUERY_ID = "QUERY_ID"; + @Mock private JobExecutionResponseReader jobExecutionResponseReader; + @Mock private FlintIndexMetadataService flintIndexMetadataService; + @Mock private IndexDMLResultStorageService indexDMLResultStorageService; + @Mock private FlintIndexOpFactory flintIndexOpFactory; + @Mock private SparkSubmitParameterModifier sparkSubmitParameterModifier; + + @InjectMocks IndexDMLHandler indexDMLHandler; + + private static final DataSourceMetadata metadata = + new DataSourceMetadata.Builder() + .setName("mys3") + .setDescription("test description") + .setConnector(DataSourceType.S3GLUE) + .setDataSourceStatus(ACTIVE) + .build(); + + @Test + public void getResponseFromExecutor() { + JSONObject result = new IndexDMLHandler(null, null, null, null).getResponseFromExecutor(null); + + assertEquals("running", result.getString(STATUS_FIELD)); + assertEquals("", result.getString(ERROR_FIELD)); + } + + @Test + public void testWhenIndexDetailsAreNotFound() { + DispatchQueryRequest dispatchQueryRequest = getDispatchQueryRequest("DROP INDEX"); + IndexQueryDetails indexQueryDetails = + IndexQueryDetails.builder() + .mvName("mys3.default.http_logs_metrics") + .indexType(FlintIndexType.MATERIALIZED_VIEW) + .build(); + DispatchQueryContext dispatchQueryContext = + DispatchQueryContext.builder() + .queryId(QUERY_ID) + .dataSourceMetadata(metadata) + .indexQueryDetails(indexQueryDetails) + .build(); + Mockito.when(flintIndexMetadataService.getFlintIndexMetadata(any())) + .thenReturn(new HashMap<>()); + + DispatchQueryResponse dispatchQueryResponse = + indexDMLHandler.submit(dispatchQueryRequest, dispatchQueryContext); + + Assertions.assertNotNull(dispatchQueryResponse.getQueryId()); + } + + @Test + public void testWhenIndexDetailsWithInvalidQueryActionType() { + FlintIndexMetadata flintIndexMetadata = mock(FlintIndexMetadata.class); + DispatchQueryRequest dispatchQueryRequest = getDispatchQueryRequest("CREATE INDEX"); + IndexQueryDetails indexQueryDetails = + IndexQueryDetails.builder() + .mvName("mys3.default.http_logs_metrics") + .indexQueryActionType(IndexQueryActionType.CREATE) + .indexType(FlintIndexType.MATERIALIZED_VIEW) + .build(); + DispatchQueryContext dispatchQueryContext = + DispatchQueryContext.builder() + .queryId(QUERY_ID) + .dataSourceMetadata(metadata) + .indexQueryDetails(indexQueryDetails) + .build(); + HashMap flintMetadataMap = new HashMap<>(); + flintMetadataMap.put(indexQueryDetails.openSearchIndexName(), flintIndexMetadata); + when(flintIndexMetadataService.getFlintIndexMetadata(indexQueryDetails.openSearchIndexName())) + .thenReturn(flintMetadataMap); + + indexDMLHandler.submit(dispatchQueryRequest, dispatchQueryContext); + } + + private DispatchQueryRequest getDispatchQueryRequest(String query) { + return DispatchQueryRequest.builder() + .applicationId(EMRS_APPLICATION_ID) + .query(query) + .datasource("my_glue") + .langType(LangType.SQL) + .executionRoleARN(EMRS_EXECUTION_ROLE) + .clusterName(TEST_CLUSTER_NAME) + .sparkSubmitParameterModifier(sparkSubmitParameterModifier) + .build(); + } + + @Test + public void testStaticMethods() { + Assertions.assertTrue(IndexDMLHandler.isIndexDMLQuery("dropIndexJobId")); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/dispatcher/SparkQueryDispatcherTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/dispatcher/SparkQueryDispatcherTest.java new file mode 100644 index 0000000000..16c38dbe62 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/dispatcher/SparkQueryDispatcherTest.java @@ -0,0 +1,1145 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.dispatcher; + +import static org.mockito.Answers.RETURNS_DEEP_STUBS; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; +import static org.opensearch.sql.spark.constants.TestConstants.EMRS_APPLICATION_ID; +import static org.opensearch.sql.spark.constants.TestConstants.EMRS_EXECUTION_ROLE; +import static org.opensearch.sql.spark.constants.TestConstants.EMR_JOB_ID; +import static org.opensearch.sql.spark.constants.TestConstants.MOCK_SESSION_ID; +import static org.opensearch.sql.spark.constants.TestConstants.MOCK_STATEMENT_ID; +import static org.opensearch.sql.spark.constants.TestConstants.TEST_CLUSTER_NAME; +import static org.opensearch.sql.spark.data.constants.SparkConstants.DATA_FIELD; +import static org.opensearch.sql.spark.data.constants.SparkConstants.ERROR_FIELD; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_AUTH_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_HOST_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_PORT_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_SCHEME_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.STATUS_FIELD; +import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.CLUSTER_NAME_TAG_KEY; +import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.DATASOURCE_TAG_KEY; +import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.INDEX_TAG_KEY; +import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.JOB_TYPE_TAG_KEY; + +import com.amazonaws.services.emrserverless.model.CancelJobRunResult; +import com.amazonaws.services.emrserverless.model.GetJobRunResult; +import com.amazonaws.services.emrserverless.model.JobRun; +import com.amazonaws.services.emrserverless.model.JobRunState; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import org.json.JSONObject; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.datasource.DataSourceService; +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.datasource.model.DataSourceType; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.client.EMRServerlessClient; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.client.StartJobRequest; +import org.opensearch.sql.spark.config.SparkSubmitParameterModifier; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse; +import org.opensearch.sql.spark.dispatcher.model.JobType; +import org.opensearch.sql.spark.execution.session.Session; +import org.opensearch.sql.spark.execution.session.SessionManager; +import org.opensearch.sql.spark.execution.statement.Statement; +import org.opensearch.sql.spark.execution.statement.StatementId; +import org.opensearch.sql.spark.execution.statement.StatementState; +import org.opensearch.sql.spark.flint.FlintIndexMetadataService; +import org.opensearch.sql.spark.flint.IndexDMLResultStorageService; +import org.opensearch.sql.spark.flint.operation.FlintIndexOpFactory; +import org.opensearch.sql.spark.leasemanager.LeaseManager; +import org.opensearch.sql.spark.metrics.MetricsService; +import org.opensearch.sql.spark.parameter.DataSourceSparkParameterComposer; +import org.opensearch.sql.spark.parameter.GeneralSparkParameterComposer; +import org.opensearch.sql.spark.parameter.SparkParameterComposerCollection; +import org.opensearch.sql.spark.parameter.SparkSubmitParametersBuilderProvider; +import org.opensearch.sql.spark.response.JobExecutionResponseReader; +import org.opensearch.sql.spark.rest.model.LangType; + +@ExtendWith(MockitoExtension.class) +public class SparkQueryDispatcherTest { + + public static final String MY_GLUE = "my_glue"; + public static final String KEY_FROM_COMPOSER = "key.from.composer"; + public static final String VALUE_FROM_COMPOSER = "value.from.composer"; + public static final String KEY_FROM_DATASOURCE_COMPOSER = "key.from.datasource.composer"; + public static final String VALUE_FROM_DATASOURCE_COMPOSER = "value.from.datasource.composer"; + @Mock private EMRServerlessClient emrServerlessClient; + @Mock private EMRServerlessClientFactory emrServerlessClientFactory; + @Mock private DataSourceService dataSourceService; + @Mock private JobExecutionResponseReader jobExecutionResponseReader; + @Mock private FlintIndexMetadataService flintIndexMetadataService; + @Mock private SessionManager sessionManager; + @Mock private LeaseManager leaseManager; + @Mock private IndexDMLResultStorageService indexDMLResultStorageService; + @Mock private FlintIndexOpFactory flintIndexOpFactory; + @Mock private SparkSubmitParameterModifier sparkSubmitParameterModifier; + @Mock private QueryIdProvider queryIdProvider; + @Mock private AsyncQueryRequestContext asyncQueryRequestContext; + @Mock private MetricsService metricsService; + private DataSourceSparkParameterComposer dataSourceSparkParameterComposer = + (datasourceMetadata, sparkSubmitParameters, dispatchQueryRequest, context) -> { + sparkSubmitParameters.setConfigItem(FLINT_INDEX_STORE_AUTH_KEY, "basic"); + sparkSubmitParameters.setConfigItem(FLINT_INDEX_STORE_HOST_KEY, "HOST"); + sparkSubmitParameters.setConfigItem(FLINT_INDEX_STORE_PORT_KEY, "PORT"); + sparkSubmitParameters.setConfigItem(FLINT_INDEX_STORE_SCHEME_KEY, "SCHEMA"); + sparkSubmitParameters.setConfigItem( + KEY_FROM_DATASOURCE_COMPOSER, VALUE_FROM_DATASOURCE_COMPOSER); + }; + + private GeneralSparkParameterComposer generalSparkParameterComposer = + (sparkSubmitParameters, dispatchQueryRequest, context) -> { + sparkSubmitParameters.setConfigItem(KEY_FROM_COMPOSER, VALUE_FROM_COMPOSER); + }; + + private SparkSubmitParametersBuilderProvider sparkSubmitParametersBuilderProvider; + + @Mock(answer = RETURNS_DEEP_STUBS) + private Session session; + + @Mock(answer = RETURNS_DEEP_STUBS) + private Statement statement; + + private SparkQueryDispatcher sparkQueryDispatcher; + + private final String QUERY_ID = "QUERY_ID"; + + @Captor ArgumentCaptor startJobRequestArgumentCaptor; + + @BeforeEach + void setUp() { + SparkParameterComposerCollection collection = new SparkParameterComposerCollection(); + collection.register(DataSourceType.S3GLUE, dataSourceSparkParameterComposer); + collection.register(generalSparkParameterComposer); + sparkSubmitParametersBuilderProvider = new SparkSubmitParametersBuilderProvider(collection); + QueryHandlerFactory queryHandlerFactory = + new QueryHandlerFactory( + jobExecutionResponseReader, + flintIndexMetadataService, + sessionManager, + leaseManager, + indexDMLResultStorageService, + flintIndexOpFactory, + emrServerlessClientFactory, + metricsService, + sparkSubmitParametersBuilderProvider); + sparkQueryDispatcher = + new SparkQueryDispatcher( + dataSourceService, sessionManager, queryHandlerFactory, queryIdProvider); + } + + @Test + void testDispatchSelectQuery() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + HashMap tags = new HashMap<>(); + tags.put(DATASOURCE_TAG_KEY, MY_GLUE); + tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); + tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); + String query = "select * from my_glue.default.http_logs"; + String sparkSubmitParameters = constructExpectedSparkSubmitParameterString(query); + StartJobRequest expected = + new StartJobRequest( + "TEST_CLUSTER:batch", + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + sparkSubmitParameters, + tags, + false, + "query_execution_result_my_glue"); + when(emrServerlessClient.startJobRun(expected)).thenReturn(EMR_JOB_ID); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch( + DispatchQueryRequest.builder() + .applicationId(EMRS_APPLICATION_ID) + .query(query) + .datasource(MY_GLUE) + .langType(LangType.SQL) + .executionRoleARN(EMRS_EXECUTION_ROLE) + .clusterName(TEST_CLUSTER_NAME) + .sparkSubmitParameterModifier(sparkSubmitParameterModifier) + .build(), + asyncQueryRequestContext); + + verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); + Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + verifyNoInteractions(flintIndexMetadataService); + } + + @Test + void testDispatchSelectQueryWithLakeFormation() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + HashMap tags = new HashMap<>(); + tags.put(DATASOURCE_TAG_KEY, MY_GLUE); + tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); + tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); + String query = "select * from my_glue.default.http_logs"; + String sparkSubmitParameters = constructExpectedSparkSubmitParameterString(query); + StartJobRequest expected = + new StartJobRequest( + "TEST_CLUSTER:batch", + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + sparkSubmitParameters, + tags, + false, + "query_execution_result_my_glue"); + when(emrServerlessClient.startJobRun(expected)).thenReturn(EMR_JOB_ID); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadataWithLakeFormation(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch(getBaseDispatchQueryRequest(query), asyncQueryRequestContext); + verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); + Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + verifyNoInteractions(flintIndexMetadataService); + } + + @Test + void testDispatchSelectQueryWithBasicAuthIndexStoreDatasource() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + HashMap tags = new HashMap<>(); + tags.put(DATASOURCE_TAG_KEY, MY_GLUE); + tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); + tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); + String query = "select * from my_glue.default.http_logs"; + String sparkSubmitParameters = constructExpectedSparkSubmitParameterString(query); + StartJobRequest expected = + new StartJobRequest( + "TEST_CLUSTER:batch", + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + sparkSubmitParameters, + tags, + false, + "query_execution_result_my_glue"); + when(emrServerlessClient.startJobRun(expected)).thenReturn(EMR_JOB_ID); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadataWithBasicAuth(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch(getBaseDispatchQueryRequest(query), asyncQueryRequestContext); + + verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); + Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + verifyNoInteractions(flintIndexMetadataService); + } + + @Test + void testDispatchSelectQueryCreateNewSession() { + String query = "select * from my_glue.default.http_logs"; + DispatchQueryRequest queryRequest = dispatchQueryRequestWithSessionId(query, null); + + doReturn(true).when(sessionManager).isEnabled(); + doReturn(session).when(sessionManager).createSession(any(), any()); + doReturn(MOCK_SESSION_ID).when(session).getSessionId(); + doReturn(new StatementId(MOCK_STATEMENT_ID)).when(session).submit(any(), any()); + when(session.getSessionModel().getJobId()).thenReturn(EMR_JOB_ID); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch(queryRequest, asyncQueryRequestContext); + + verifyNoInteractions(emrServerlessClient); + verify(sessionManager, never()).getSession(any(), any()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + Assertions.assertEquals(MOCK_SESSION_ID, dispatchQueryResponse.getSessionId()); + } + + @Test + void testDispatchSelectQueryReuseSession() { + String query = "select * from my_glue.default.http_logs"; + DispatchQueryRequest queryRequest = dispatchQueryRequestWithSessionId(query, MOCK_SESSION_ID); + + doReturn(true).when(sessionManager).isEnabled(); + doReturn(Optional.of(session)) + .when(sessionManager) + .getSession(eq(MOCK_SESSION_ID), eq(MY_GLUE)); + doReturn(MOCK_SESSION_ID).when(session).getSessionId(); + doReturn(new StatementId(MOCK_STATEMENT_ID)).when(session).submit(any(), any()); + when(session.getSessionModel().getJobId()).thenReturn(EMR_JOB_ID); + when(session.isOperationalForDataSource(any())).thenReturn(true); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch(queryRequest, asyncQueryRequestContext); + + verifyNoInteractions(emrServerlessClient); + verify(sessionManager, never()).createSession(any(), any()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + Assertions.assertEquals(MOCK_SESSION_ID, dispatchQueryResponse.getSessionId()); + } + + @Test + void testDispatchSelectQueryFailedCreateSession() { + String query = "select * from my_glue.default.http_logs"; + DispatchQueryRequest queryRequest = dispatchQueryRequestWithSessionId(query, null); + + doReturn(true).when(sessionManager).isEnabled(); + doThrow(RuntimeException.class).when(sessionManager).createSession(any(), any()); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + Assertions.assertThrows( + RuntimeException.class, + () -> sparkQueryDispatcher.dispatch(queryRequest, asyncQueryRequestContext)); + + verifyNoInteractions(emrServerlessClient); + } + + @Test + void testDispatchCreateAutoRefreshIndexQuery() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + HashMap tags = new HashMap<>(); + tags.put(DATASOURCE_TAG_KEY, MY_GLUE); + tags.put(INDEX_TAG_KEY, "flint_my_glue_default_http_logs_elb_and_requesturi_index"); + tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); + tags.put(JOB_TYPE_TAG_KEY, JobType.STREAMING.getText()); + String query = + "CREATE INDEX elb_and_requestUri ON my_glue.default.http_logs(l_orderkey, l_quantity) WITH" + + " (auto_refresh = true)"; + String sparkSubmitParameters = constructExpectedSparkSubmitParameterString(query, "streaming"); + StartJobRequest expected = + new StartJobRequest( + "TEST_CLUSTER:streaming:flint_my_glue_default_http_logs_elb_and_requesturi_index", + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + sparkSubmitParameters, + tags, + true, + "query_execution_result_my_glue"); + when(emrServerlessClient.startJobRun(expected)).thenReturn(EMR_JOB_ID); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch(getBaseDispatchQueryRequest(query), asyncQueryRequestContext); + + verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); + Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + verifyNoInteractions(flintIndexMetadataService); + } + + @Test + void testDispatchCreateManualRefreshIndexQuery() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + HashMap tags = new HashMap<>(); + tags.put(DATASOURCE_TAG_KEY, "my_glue"); + tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); + tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); + String query = + "CREATE INDEX elb_and_requestUri ON my_glue.default.http_logs(l_orderkey, l_quantity) WITH" + + " (auto_refresh = false)"; + String sparkSubmitParameters = constructExpectedSparkSubmitParameterString(query); + StartJobRequest expected = + new StartJobRequest( + "TEST_CLUSTER:batch", + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + sparkSubmitParameters, + tags, + false, + "query_execution_result_my_glue"); + when(emrServerlessClient.startJobRun(expected)).thenReturn(EMR_JOB_ID); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + "my_glue", asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch(getBaseDispatchQueryRequest(query), asyncQueryRequestContext); + + verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); + Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + verifyNoInteractions(flintIndexMetadataService); + } + + @Test + void testDispatchWithPPLQuery() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + HashMap tags = new HashMap<>(); + tags.put(DATASOURCE_TAG_KEY, MY_GLUE); + tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); + tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); + String query = "source = my_glue.default.http_logs"; + String sparkSubmitParameters = constructExpectedSparkSubmitParameterString(query); + StartJobRequest expected = + new StartJobRequest( + "TEST_CLUSTER:batch", + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + sparkSubmitParameters, + tags, + false, + "query_execution_result_my_glue"); + when(emrServerlessClient.startJobRun(expected)).thenReturn(EMR_JOB_ID); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch( + getBaseDispatchQueryRequestBuilder(query).langType(LangType.PPL).build(), + asyncQueryRequestContext); + + verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); + Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + verifyNoInteractions(flintIndexMetadataService); + } + + @Test + void testDispatchQueryWithoutATableAndDataSourceName() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + HashMap tags = new HashMap<>(); + tags.put(DATASOURCE_TAG_KEY, MY_GLUE); + tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); + tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); + String query = "show tables"; + String sparkSubmitParameters = constructExpectedSparkSubmitParameterString(query); + StartJobRequest expected = + new StartJobRequest( + "TEST_CLUSTER:batch", + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + sparkSubmitParameters, + tags, + false, + "query_execution_result_my_glue"); + when(emrServerlessClient.startJobRun(expected)).thenReturn(EMR_JOB_ID); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch(getBaseDispatchQueryRequest(query), asyncQueryRequestContext); + + verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); + Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + verifyNoInteractions(flintIndexMetadataService); + } + + @Test + void testDispatchIndexQueryWithoutADatasourceName() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + HashMap tags = new HashMap<>(); + tags.put(DATASOURCE_TAG_KEY, MY_GLUE); + tags.put(INDEX_TAG_KEY, "flint_my_glue_default_http_logs_elb_and_requesturi_index"); + tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); + tags.put(JOB_TYPE_TAG_KEY, JobType.STREAMING.getText()); + String query = + "CREATE INDEX elb_and_requestUri ON default.http_logs(l_orderkey, l_quantity) WITH" + + " (auto_refresh = true)"; + String sparkSubmitParameters = constructExpectedSparkSubmitParameterString(query, "streaming"); + StartJobRequest expected = + new StartJobRequest( + "TEST_CLUSTER:streaming:flint_my_glue_default_http_logs_elb_and_requesturi_index", + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + sparkSubmitParameters, + tags, + true, + "query_execution_result_my_glue"); + when(emrServerlessClient.startJobRun(expected)).thenReturn(EMR_JOB_ID); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch(getBaseDispatchQueryRequest(query), asyncQueryRequestContext); + + verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); + Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + verifyNoInteractions(flintIndexMetadataService); + } + + @Test + void testDispatchMaterializedViewQuery() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + HashMap tags = new HashMap<>(); + tags.put(DATASOURCE_TAG_KEY, MY_GLUE); + tags.put(INDEX_TAG_KEY, "flint_mv_1"); + tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); + tags.put(JOB_TYPE_TAG_KEY, JobType.STREAMING.getText()); + String query = + "CREATE MATERIALIZED VIEW mv_1 AS select * from logs WITH" + " (auto_refresh = true)"; + String sparkSubmitParameters = constructExpectedSparkSubmitParameterString(query, "streaming"); + StartJobRequest expected = + new StartJobRequest( + "TEST_CLUSTER:streaming:flint_mv_1", + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + sparkSubmitParameters, + tags, + true, + "query_execution_result_my_glue"); + when(emrServerlessClient.startJobRun(expected)).thenReturn(EMR_JOB_ID); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch(getBaseDispatchQueryRequest(query), asyncQueryRequestContext); + + verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); + Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + verifyNoInteractions(flintIndexMetadataService); + } + + @Test + void testDispatchShowMVQuery() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + HashMap tags = new HashMap<>(); + tags.put(DATASOURCE_TAG_KEY, MY_GLUE); + tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); + tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); + String query = "SHOW MATERIALIZED VIEW IN mys3.default"; + String sparkSubmitParameters = constructExpectedSparkSubmitParameterString(query); + StartJobRequest expected = + new StartJobRequest( + "TEST_CLUSTER:batch", + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + sparkSubmitParameters, + tags, + false, + "query_execution_result_my_glue"); + when(emrServerlessClient.startJobRun(expected)).thenReturn(EMR_JOB_ID); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch(getBaseDispatchQueryRequest(query), asyncQueryRequestContext); + + verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); + Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + verifyNoInteractions(flintIndexMetadataService); + } + + @Test + void testRefreshIndexQuery() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + HashMap tags = new HashMap<>(); + tags.put(DATASOURCE_TAG_KEY, MY_GLUE); + tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); + tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); + String query = "REFRESH SKIPPING INDEX ON my_glue.default.http_logs"; + String sparkSubmitParameters = constructExpectedSparkSubmitParameterString(query); + StartJobRequest expected = + new StartJobRequest( + "TEST_CLUSTER:batch", + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + sparkSubmitParameters, + tags, + false, + "query_execution_result_my_glue"); + when(emrServerlessClient.startJobRun(expected)).thenReturn(EMR_JOB_ID); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch(getBaseDispatchQueryRequest(query), asyncQueryRequestContext); + + verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); + Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + verifyNoInteractions(flintIndexMetadataService); + } + + @Test + void testDispatchDescribeIndexQuery() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + HashMap tags = new HashMap<>(); + tags.put(DATASOURCE_TAG_KEY, MY_GLUE); + tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); + tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); + String query = "DESCRIBE SKIPPING INDEX ON mys3.default.http_logs"; + String sparkSubmitParameters = constructExpectedSparkSubmitParameterString(query); + StartJobRequest expected = + new StartJobRequest( + "TEST_CLUSTER:batch", + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + sparkSubmitParameters, + tags, + false, + "query_execution_result_my_glue"); + when(emrServerlessClient.startJobRun(expected)).thenReturn(EMR_JOB_ID); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch(getBaseDispatchQueryRequest(query), asyncQueryRequestContext); + + verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); + Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + verifyNoInteractions(flintIndexMetadataService); + } + + @Test + void testDispatchAlterToAutoRefreshIndexQuery() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + HashMap tags = new HashMap<>(); + tags.put(DATASOURCE_TAG_KEY, "my_glue"); + tags.put(INDEX_TAG_KEY, "flint_my_glue_default_http_logs_elb_and_requesturi_index"); + tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); + tags.put(JOB_TYPE_TAG_KEY, JobType.STREAMING.getText()); + String query = + "ALTER INDEX elb_and_requestUri ON my_glue.default.http_logs WITH" + + " (auto_refresh = true)"; + String sparkSubmitParameters = constructExpectedSparkSubmitParameterString(query, "streaming"); + StartJobRequest expected = + new StartJobRequest( + "TEST_CLUSTER:streaming:flint_my_glue_default_http_logs_elb_and_requesturi_index", + null, + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + sparkSubmitParameters, + tags, + true, + "query_execution_result_my_glue"); + when(emrServerlessClient.startJobRun(expected)).thenReturn(EMR_JOB_ID); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + "my_glue", asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch(getBaseDispatchQueryRequest(query), asyncQueryRequestContext); + + verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); + Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + verifyNoInteractions(flintIndexMetadataService); + } + + @Test + void testDispatchAlterToManualRefreshIndexQuery() { + QueryHandlerFactory queryHandlerFactory = mock(QueryHandlerFactory.class); + sparkQueryDispatcher = + new SparkQueryDispatcher( + dataSourceService, sessionManager, queryHandlerFactory, queryIdProvider); + + String query = + "ALTER INDEX elb_and_requestUri ON my_glue.default.http_logs WITH" + + " (auto_refresh = false)"; + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + "my_glue", asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + when(queryHandlerFactory.getIndexDMLHandler()) + .thenReturn( + new IndexDMLHandler( + jobExecutionResponseReader, + flintIndexMetadataService, + indexDMLResultStorageService, + flintIndexOpFactory)); + + sparkQueryDispatcher.dispatch(getBaseDispatchQueryRequest(query), asyncQueryRequestContext); + verify(queryHandlerFactory, times(1)).getIndexDMLHandler(); + } + + @Test + void testDispatchDropIndexQuery() { + QueryHandlerFactory queryHandlerFactory = mock(QueryHandlerFactory.class); + sparkQueryDispatcher = + new SparkQueryDispatcher( + dataSourceService, sessionManager, queryHandlerFactory, queryIdProvider); + + String query = "DROP INDEX elb_and_requestUri ON my_glue.default.http_logs"; + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + "my_glue", asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + when(queryHandlerFactory.getIndexDMLHandler()) + .thenReturn( + new IndexDMLHandler( + jobExecutionResponseReader, + flintIndexMetadataService, + indexDMLResultStorageService, + flintIndexOpFactory)); + + sparkQueryDispatcher.dispatch(getBaseDispatchQueryRequest(query), asyncQueryRequestContext); + verify(queryHandlerFactory, times(1)).getIndexDMLHandler(); + } + + @Test + void testDispatchVacuumIndexQuery() { + QueryHandlerFactory queryHandlerFactory = mock(QueryHandlerFactory.class); + sparkQueryDispatcher = + new SparkQueryDispatcher( + dataSourceService, sessionManager, queryHandlerFactory, queryIdProvider); + + String query = "VACUUM INDEX elb_and_requestUri ON my_glue.default.http_logs"; + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + "my_glue", asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + when(queryHandlerFactory.getIndexDMLHandler()) + .thenReturn( + new IndexDMLHandler( + jobExecutionResponseReader, + flintIndexMetadataService, + indexDMLResultStorageService, + flintIndexOpFactory)); + + sparkQueryDispatcher.dispatch(getBaseDispatchQueryRequest(query), asyncQueryRequestContext); + verify(queryHandlerFactory, times(1)).getIndexDMLHandler(); + } + + @Test + void testDispatchWithUnSupportedDataSourceType() { + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + "my_prometheus", asyncQueryRequestContext)) + .thenReturn(constructPrometheusDataSourceType()); + String query = "select * from my_prometheus.default.http_logs"; + + UnsupportedOperationException unsupportedOperationException = + Assertions.assertThrows( + UnsupportedOperationException.class, + () -> + sparkQueryDispatcher.dispatch( + getBaseDispatchQueryRequestBuilder(query).datasource("my_prometheus").build(), + asyncQueryRequestContext)); + + Assertions.assertEquals( + "UnSupported datasource type for async queries:: PROMETHEUS", + unsupportedOperationException.getMessage()); + } + + @Test + void testCancelJob() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + when(emrServerlessClient.cancelJobRun(EMRS_APPLICATION_ID, EMR_JOB_ID, false)) + .thenReturn( + new CancelJobRunResult() + .withJobRunId(EMR_JOB_ID) + .withApplicationId(EMRS_APPLICATION_ID)); + + String queryId = sparkQueryDispatcher.cancelJob(asyncQueryJobMetadata()); + + Assertions.assertEquals(QUERY_ID, queryId); + } + + @Test + void testCancelQueryWithSession() { + doReturn(Optional.of(session)).when(sessionManager).getSession(MOCK_SESSION_ID, MY_GLUE); + doReturn(Optional.of(statement)).when(session).get(any()); + doNothing().when(statement).cancel(); + + String queryId = + sparkQueryDispatcher.cancelJob( + asyncQueryJobMetadataWithSessionId(MOCK_STATEMENT_ID, MOCK_SESSION_ID)); + + verifyNoInteractions(emrServerlessClient); + verify(statement, times(1)).cancel(); + Assertions.assertEquals(MOCK_STATEMENT_ID, queryId); + } + + @Test + void testCancelQueryWithInvalidSession() { + doReturn(Optional.empty()).when(sessionManager).getSession("invalid", MY_GLUE); + + IllegalArgumentException exception = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + sparkQueryDispatcher.cancelJob( + asyncQueryJobMetadataWithSessionId(MOCK_STATEMENT_ID, "invalid"))); + + verifyNoInteractions(emrServerlessClient); + verifyNoInteractions(session); + Assertions.assertEquals("no session found. invalid", exception.getMessage()); + } + + @Test + void testCancelQueryWithInvalidStatementId() { + doReturn(Optional.of(session)).when(sessionManager).getSession(MOCK_SESSION_ID, MY_GLUE); + + IllegalArgumentException exception = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + sparkQueryDispatcher.cancelJob( + asyncQueryJobMetadataWithSessionId("invalid", MOCK_SESSION_ID))); + + verifyNoInteractions(emrServerlessClient); + verifyNoInteractions(statement); + Assertions.assertEquals( + "no statement found. " + new StatementId("invalid"), exception.getMessage()); + } + + @Test + void testCancelQueryWithNoSessionId() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + when(emrServerlessClient.cancelJobRun(EMRS_APPLICATION_ID, EMR_JOB_ID, false)) + .thenReturn( + new CancelJobRunResult() + .withJobRunId(EMR_JOB_ID) + .withApplicationId(EMRS_APPLICATION_ID)); + + String queryId = sparkQueryDispatcher.cancelJob(asyncQueryJobMetadata()); + + Assertions.assertEquals(QUERY_ID, queryId); + } + + @Test + void testGetQueryResponse() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + when(emrServerlessClient.getJobRunResult(EMRS_APPLICATION_ID, EMR_JOB_ID)) + .thenReturn(new GetJobRunResult().withJobRun(new JobRun().withState(JobRunState.PENDING))); + // simulate result index is not created yet + when(jobExecutionResponseReader.getResultWithJobId(EMR_JOB_ID, null)) + .thenReturn(new JSONObject()); + + JSONObject result = sparkQueryDispatcher.getQueryResponse(asyncQueryJobMetadata()); + + Assertions.assertEquals("PENDING", result.get("status")); + } + + @Test + void testGetQueryResponseWithSession() { + doReturn(Optional.of(session)).when(sessionManager).getSession(MOCK_SESSION_ID, MY_GLUE); + doReturn(Optional.of(statement)).when(session).get(any()); + when(statement.getStatementModel().getError()).thenReturn("mock error"); + doReturn(StatementState.WAITING).when(statement).getStatementState(); + doReturn(new JSONObject()) + .when(jobExecutionResponseReader) + .getResultWithQueryId(eq(MOCK_STATEMENT_ID), any()); + + JSONObject result = + sparkQueryDispatcher.getQueryResponse( + asyncQueryJobMetadataWithSessionId(MOCK_STATEMENT_ID, MOCK_SESSION_ID)); + + verifyNoInteractions(emrServerlessClient); + Assertions.assertEquals("waiting", result.get("status")); + } + + @Test + void testGetQueryResponseWithInvalidSession() { + doReturn(Optional.empty()).when(sessionManager).getSession(MOCK_SESSION_ID, MY_GLUE); + doReturn(new JSONObject()) + .when(jobExecutionResponseReader) + .getResultWithQueryId(eq(MOCK_STATEMENT_ID), any()); + + IllegalArgumentException exception = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + sparkQueryDispatcher.getQueryResponse( + asyncQueryJobMetadataWithSessionId(MOCK_STATEMENT_ID, MOCK_SESSION_ID))); + + verifyNoInteractions(emrServerlessClient); + Assertions.assertEquals("no session found. " + MOCK_SESSION_ID, exception.getMessage()); + } + + @Test + void testGetQueryResponseWithStatementNotExist() { + doReturn(Optional.of(session)).when(sessionManager).getSession(MOCK_SESSION_ID, MY_GLUE); + doReturn(Optional.empty()).when(session).get(any()); + doReturn(new JSONObject()) + .when(jobExecutionResponseReader) + .getResultWithQueryId(eq(MOCK_STATEMENT_ID), any()); + + IllegalArgumentException exception = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> + sparkQueryDispatcher.getQueryResponse( + asyncQueryJobMetadataWithSessionId(MOCK_STATEMENT_ID, MOCK_SESSION_ID))); + + verifyNoInteractions(emrServerlessClient); + Assertions.assertEquals( + "no statement found. " + new StatementId(MOCK_STATEMENT_ID), exception.getMessage()); + } + + @Test + void testGetQueryResponseWithSuccess() { + JSONObject queryResult = new JSONObject(); + Map resultMap = new HashMap<>(); + resultMap.put(STATUS_FIELD, "SUCCESS"); + resultMap.put(ERROR_FIELD, ""); + queryResult.put(DATA_FIELD, resultMap); + when(jobExecutionResponseReader.getResultWithJobId(EMR_JOB_ID, null)).thenReturn(queryResult); + + JSONObject result = sparkQueryDispatcher.getQueryResponse(asyncQueryJobMetadata()); + + verify(jobExecutionResponseReader, times(1)).getResultWithJobId(EMR_JOB_ID, null); + Assertions.assertEquals( + new HashSet<>(Arrays.asList(DATA_FIELD, STATUS_FIELD, ERROR_FIELD)), result.keySet()); + JSONObject dataJson = new JSONObject(); + dataJson.put(ERROR_FIELD, ""); + dataJson.put(STATUS_FIELD, "SUCCESS"); + // JSONObject.similar() compares if two JSON objects are the same, but having perhaps a + // different order of its attributes. + // The equals() will compare each string caracter, one-by-one checking if it is the same, having + // the same order. + // We need similar. + Assertions.assertTrue(dataJson.similar(result.get(DATA_FIELD))); + Assertions.assertEquals("SUCCESS", result.get(STATUS_FIELD)); + verifyNoInteractions(emrServerlessClient); + } + + @Test + void testDispatchQueryWithExtraSparkSubmitParameters() { + when(emrServerlessClientFactory.getClient(any())).thenReturn(emrServerlessClient); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.verifyDataSourceAccessAndGetRawMetadata( + MY_GLUE, asyncQueryRequestContext)) + .thenReturn(dataSourceMetadata); + + String extraParameters = "--conf spark.dynamicAllocation.enabled=false"; + DispatchQueryRequest[] requests = { + // SQL direct query + constructDispatchQueryRequest( + "select * from my_glue.default.http_logs", LangType.SQL, extraParameters), + // SQL index query + constructDispatchQueryRequest( + "create skipping index on my_glue.default.http_logs (status VALUE_SET)", + LangType.SQL, + extraParameters), + // PPL query + constructDispatchQueryRequest( + "source = my_glue.default.http_logs", LangType.PPL, extraParameters) + }; + + for (DispatchQueryRequest request : requests) { + when(emrServerlessClient.startJobRun(any())).thenReturn(EMR_JOB_ID); + sparkQueryDispatcher.dispatch(request, asyncQueryRequestContext); + + verify(emrServerlessClient, times(1)) + .startJobRun( + argThat( + actualReq -> actualReq.getSparkSubmitParams().endsWith(" " + extraParameters))); + reset(emrServerlessClient); + } + } + + private String constructExpectedSparkSubmitParameterString(String query) { + return constructExpectedSparkSubmitParameterString(query, null); + } + + private String constructExpectedSparkSubmitParameterString(String query, String jobType) { + query = "\"" + query + "\""; + return " --class org.apache.spark.sql.FlintJob " + + getConfParam( + "spark.hadoop.fs.s3.customAWSCredentialsProvider=com.amazonaws.emr.AssumeRoleAWSCredentialsProvider", + "spark.hadoop.aws.catalog.credentials.provider.factory.class=com.amazonaws.glue.catalog.metastore.STSAssumeRoleSessionCredentialsProviderFactory", + "spark.jars=/usr/share/aws/iceberg/lib/iceberg-spark3-runtime.jar", + "spark.jars.packages=org.opensearch:opensearch-spark-standalone_2.12:0.3.0-SNAPSHOT,org.opensearch:opensearch-spark-sql-application_2.12:0.3.0-SNAPSHOT,org.opensearch:opensearch-spark-ppl_2.12:0.3.0-SNAPSHOT", + "spark.jars.repositories=https://aws.oss.sonatype.org/content/repositories/snapshots", + "spark.emr-serverless.driverEnv.JAVA_HOME=/usr/lib/jvm/java-17-amazon-corretto.x86_64/", + "spark.executorEnv.JAVA_HOME=/usr/lib/jvm/java-17-amazon-corretto.x86_64/", + "spark.emr-serverless.driverEnv.FLINT_CLUSTER_NAME=TEST_CLUSTER", + "spark.executorEnv.FLINT_CLUSTER_NAME=TEST_CLUSTER", + "spark.datasource.flint.host=HOST", + "spark.datasource.flint.port=PORT", + "spark.datasource.flint.scheme=SCHEMA", + "spark.datasource.flint.auth=basic", + "spark.datasource.flint.customAWSCredentialsProvider=com.amazonaws.emr.AssumeRoleAWSCredentialsProvider", + "spark.sql.extensions=org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions,org.opensearch.flint.spark.FlintSparkExtensions,org.opensearch.flint.spark.FlintPPLSparkExtensions", + "spark.hadoop.hive.metastore.client.factory.class=com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory", + "spark.sql.catalog.spark_catalog=org.apache.iceberg.spark.SparkSessionCatalog", + "spark.sql.catalog.spark_catalog.catalog-impl=org.apache.iceberg.aws.glue.GlueCatalog") + + getConfParam("spark.flint.job.query=" + query) + + (jobType != null ? getConfParam("spark.flint.job.type=" + jobType) : "") + + getConfParam( + KEY_FROM_DATASOURCE_COMPOSER + "=" + VALUE_FROM_DATASOURCE_COMPOSER, + KEY_FROM_COMPOSER + "=" + VALUE_FROM_COMPOSER); + } + + private String getConfParam(String... params) { + return Arrays.stream(params) + .map(param -> String.format(" --conf %s ", param)) + .collect(Collectors.joining()); + } + + private DataSourceMetadata constructMyGlueDataSourceMetadata() { + Map properties = new HashMap<>(); + properties.put("glue.auth.type", "iam_role"); + properties.put( + "glue.auth.role_arn", "arn:aws:iam::924196221507:role/FlintOpensearchServiceRole"); + properties.put( + "glue.indexstore.opensearch.uri", + "https://search-flint-dp-benchmark-cf5crj5mj2kfzvgwdeynkxnefy.eu-west-1.es.amazonaws.com"); + properties.put("glue.indexstore.opensearch.auth", "awssigv4"); + properties.put("glue.indexstore.opensearch.region", "eu-west-1"); + return new DataSourceMetadata.Builder() + .setName(MY_GLUE) + .setConnector(DataSourceType.S3GLUE) + .setProperties(properties) + .build(); + } + + private DataSourceMetadata constructMyGlueDataSourceMetadataWithBasicAuth() { + Map properties = new HashMap<>(); + properties.put("glue.auth.type", "iam_role"); + properties.put( + "glue.auth.role_arn", "arn:aws:iam::924196221507:role/FlintOpensearchServiceRole"); + properties.put( + "glue.indexstore.opensearch.uri", + "https://search-flint-dp-benchmark-cf5crj5mj2kfzvgwdeynkxnefy.eu-west-1.es.amazonaws.com"); + properties.put("glue.indexstore.opensearch.auth", "basicauth"); + properties.put("glue.indexstore.opensearch.auth.username", "username"); + properties.put("glue.indexstore.opensearch.auth.password", "password"); + return new DataSourceMetadata.Builder() + .setName(MY_GLUE) + .setConnector(DataSourceType.S3GLUE) + .setProperties(properties) + .build(); + } + + private DataSourceMetadata constructMyGlueDataSourceMetadataWithLakeFormation() { + + Map properties = new HashMap<>(); + properties.put("glue.auth.type", "iam_role"); + properties.put( + "glue.auth.role_arn", "arn:aws:iam::924196221507:role/FlintOpensearchServiceRole"); + properties.put( + "glue.indexstore.opensearch.uri", + "https://search-flint-dp-benchmark-cf5crj5mj2kfzvgwdeynkxnefy.eu-west-1.es.amazonaws.com"); + properties.put("glue.indexstore.opensearch.auth", "awssigv4"); + properties.put("glue.indexstore.opensearch.region", "eu-west-1"); + properties.put("glue.lakeformation.enabled", "true"); + return new DataSourceMetadata.Builder() + .setName(MY_GLUE) + .setConnector(DataSourceType.S3GLUE) + .setProperties(properties) + .build(); + } + + private DataSourceMetadata constructPrometheusDataSourceType() { + return new DataSourceMetadata.Builder() + .setName("my_prometheus") + .setConnector(DataSourceType.PROMETHEUS) + .build(); + } + + private DispatchQueryRequest getBaseDispatchQueryRequest(String query) { + return getBaseDispatchQueryRequestBuilder(query).build(); + } + + private DispatchQueryRequest.DispatchQueryRequestBuilder getBaseDispatchQueryRequestBuilder( + String query) { + return DispatchQueryRequest.builder() + .applicationId(EMRS_APPLICATION_ID) + .query(query) + .datasource(MY_GLUE) + .langType(LangType.SQL) + .executionRoleARN(EMRS_EXECUTION_ROLE) + .clusterName(TEST_CLUSTER_NAME) + .sparkSubmitParameterModifier(sparkSubmitParameterModifier); + } + + private DispatchQueryRequest constructDispatchQueryRequest( + String query, LangType langType, String extraParameters) { + return getBaseDispatchQueryRequestBuilder(query) + .langType(langType) + .sparkSubmitParameterModifier((builder) -> builder.extraParameters(extraParameters)) + .build(); + } + + private DispatchQueryRequest dispatchQueryRequestWithSessionId(String query, String sessionId) { + return getBaseDispatchQueryRequestBuilder(query).sessionId(sessionId).build(); + } + + private AsyncQueryJobMetadata asyncQueryJobMetadata() { + return AsyncQueryJobMetadata.builder() + .queryId(QUERY_ID) + .applicationId(EMRS_APPLICATION_ID) + .jobId(EMR_JOB_ID) + .datasourceName(MY_GLUE) + .build(); + } + + private AsyncQueryJobMetadata asyncQueryJobMetadataWithSessionId( + String statementId, String sessionId) { + return AsyncQueryJobMetadata.builder() + .queryId(statementId) + .applicationId(EMRS_APPLICATION_ID) + .jobId(EMR_JOB_ID) + .sessionId(sessionId) + .datasourceName(MY_GLUE) + .build(); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/execution/session/SessionManagerTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/execution/session/SessionManagerTest.java new file mode 100644 index 0000000000..0490c619bb --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/execution/session/SessionManagerTest.java @@ -0,0 +1,63 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.session; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.common.setting.Settings; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.execution.statestore.SessionStorageService; +import org.opensearch.sql.spark.execution.statestore.StatementStorageService; + +@ExtendWith(MockitoExtension.class) +public class SessionManagerTest { + @Mock private SessionStorageService sessionStorageService; + @Mock private StatementStorageService statementStorageService; + @Mock private EMRServerlessClientFactory emrServerlessClientFactory; + @Mock private SessionConfigSupplier sessionConfigSupplier; + @Mock private SessionIdProvider sessionIdProvider; + + @Test + public void sessionEnable() { + SessionManager sessionManager = + new SessionManager( + sessionStorageService, + statementStorageService, + emrServerlessClientFactory, + sessionConfigSupplier, + sessionIdProvider); + + Assertions.assertTrue(sessionManager.isEnabled()); + } + + public static org.opensearch.sql.common.setting.Settings sessionSetting() { + Map settings = new HashMap<>(); + settings.put(Settings.Key.SPARK_EXECUTION_SESSION_LIMIT, 100); + settings.put( + org.opensearch.sql.common.setting.Settings.Key.SESSION_INACTIVITY_TIMEOUT_MILLIS, 10000L); + return settings(settings); + } + + public static Settings settings(Map settings) { + return new Settings() { + @Override + public T getSettingValue(Key key) { + return (T) settings.get(key); + } + + @Override + public List getSettings() { + return (List) settings; + } + }; + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/execution/session/SessionStateTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/execution/session/SessionStateTest.java new file mode 100644 index 0000000000..a987c80d59 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/execution/session/SessionStateTest.java @@ -0,0 +1,20 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.session; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import org.junit.jupiter.api.Test; + +class SessionStateTest { + @Test + public void invalidSessionType() { + IllegalArgumentException exception = + assertThrows(IllegalArgumentException.class, () -> SessionState.fromString("invalid")); + assertEquals("Invalid session state: invalid", exception.getMessage()); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/execution/session/SessionTypeTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/execution/session/SessionTypeTest.java new file mode 100644 index 0000000000..a2ab43e709 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/execution/session/SessionTypeTest.java @@ -0,0 +1,20 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.session; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import org.junit.jupiter.api.Test; + +class SessionTypeTest { + @Test + public void invalidSessionType() { + IllegalArgumentException exception = + assertThrows(IllegalArgumentException.class, () -> SessionType.fromString("invalid")); + assertEquals("Invalid session type: invalid", exception.getMessage()); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/execution/statement/StatementStateTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/execution/statement/StatementStateTest.java new file mode 100644 index 0000000000..b7af1123ba --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/execution/statement/StatementStateTest.java @@ -0,0 +1,20 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statement; + +import static org.junit.Assert.assertThrows; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +class StatementStateTest { + @Test + public void invalidStatementState() { + IllegalArgumentException exception = + assertThrows(IllegalArgumentException.class, () -> StatementState.fromString("invalid")); + Assertions.assertEquals("Invalid statement state: invalid", exception.getMessage()); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/execution/statestore/OpenSearchStateStoreUtilTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/execution/statestore/OpenSearchStateStoreUtilTest.java new file mode 100644 index 0000000000..318080ff2d --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/execution/statestore/OpenSearchStateStoreUtilTest.java @@ -0,0 +1,20 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statestore; + +import static org.junit.Assert.assertEquals; + +import org.junit.jupiter.api.Test; + +public class OpenSearchStateStoreUtilTest { + + @Test + void getIndexName() { + String result = OpenSearchStateStoreUtil.getIndexName("DATASOURCE"); + + assertEquals(".query_execution_request_datasource", result); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/execution/statestore/StateModelTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/execution/statestore/StateModelTest.java new file mode 100644 index 0000000000..15d1ec2ecc --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/execution/statestore/StateModelTest.java @@ -0,0 +1,49 @@ +package org.opensearch.sql.spark.execution.statestore; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import com.google.common.collect.ImmutableMap; +import java.util.Optional; +import lombok.Data; +import lombok.experimental.SuperBuilder; +import org.junit.jupiter.api.Test; + +class StateModelTest { + + public static final String METADATA_KEY = "KEY"; + public static final String METADATA_VALUE = "VALUE"; + public static final String UNKNOWN_KEY = "UNKNOWN_KEY"; + + @Data + @SuperBuilder + static class ConcreteStateModel extends StateModel { + @Override + public String getId() { + return null; + } + } + + ConcreteStateModel model = + ConcreteStateModel.builder().metadata(ImmutableMap.of(METADATA_KEY, METADATA_VALUE)).build(); + + @Test + public void whenMetadataExist() { + Optional result = model.getMetadataItem(METADATA_KEY, String.class); + + assertEquals(METADATA_VALUE, result.get()); + } + + @Test + public void whenMetadataNotExist() { + Optional result = model.getMetadataItem(UNKNOWN_KEY, String.class); + + assertFalse(result.isPresent()); + } + + @Test + public void whenTypeDoNotMatch() { + assertThrows(RuntimeException.class, () -> model.getMetadataItem(METADATA_KEY, Long.class)); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/execution/xcontent/XContentSerializerUtilTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/execution/xcontent/XContentSerializerUtilTest.java new file mode 100644 index 0000000000..5bd8795663 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/execution/xcontent/XContentSerializerUtilTest.java @@ -0,0 +1,17 @@ +package org.opensearch.sql.spark.execution.xcontent; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import com.google.common.collect.ImmutableMap; +import org.junit.jupiter.api.Test; + +class XContentSerializerUtilTest { + @Test + public void testBuildMetadata() { + ImmutableMap result = XContentSerializerUtil.buildMetadata(1, 2); + + assertEquals(2, result.size()); + assertEquals(1L, result.get(XContentSerializerUtil.SEQ_NO)); + assertEquals(2L, result.get(XContentSerializerUtil.PRIMARY_TERM)); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/flint/FlintIndexStateTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/flint/FlintIndexStateTest.java new file mode 100644 index 0000000000..acd76fa11a --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/flint/FlintIndexStateTest.java @@ -0,0 +1,18 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +import static org.junit.jupiter.api.Assertions.*; +import static org.opensearch.sql.spark.flint.FlintIndexState.UNKNOWN; + +import org.junit.jupiter.api.Test; + +class FlintIndexStateTest { + @Test + public void unknownState() { + assertEquals(UNKNOWN, FlintIndexState.fromString("noSupported")); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/flint/IndexQueryDetailsTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/flint/IndexQueryDetailsTest.java new file mode 100644 index 0000000000..4d52baee92 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/flint/IndexQueryDetailsTest.java @@ -0,0 +1,122 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; +import org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions; +import org.opensearch.sql.spark.dispatcher.model.FullyQualifiedTableName; +import org.opensearch.sql.spark.dispatcher.model.IndexQueryActionType; +import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails; + +public class IndexQueryDetailsTest { + @Test + public void skippingIndexName() { + assertEquals( + "flint_mys3_default_http_logs_skipping_index", + IndexQueryDetails.builder() + .indexName("invalid") + .fullyQualifiedTableName(new FullyQualifiedTableName("mys3.default.http_logs")) + .indexOptions(new FlintIndexOptions()) + .indexQueryActionType(IndexQueryActionType.DROP) + .indexType(FlintIndexType.SKIPPING) + .build() + .openSearchIndexName()); + } + + @Test + public void coveringIndexName() { + assertEquals( + "flint_mys3_default_http_logs_idx_status_index", + IndexQueryDetails.builder() + .indexName("idx_status") + .fullyQualifiedTableName(new FullyQualifiedTableName("mys3.default.http_logs")) + .indexType(FlintIndexType.COVERING) + .build() + .openSearchIndexName()); + } + + @Test + public void materializedViewIndexName() { + assertEquals( + "flint_mys3_default_http_logs_metrics", + IndexQueryDetails.builder() + .mvName("mys3.default.http_logs_metrics") + .indexType(FlintIndexType.MATERIALIZED_VIEW) + .build() + .openSearchIndexName()); + } + + @Test + public void materializedViewIndexNameWithBackticks() { + assertEquals( + "flint_mys3_default_http_logs_metrics", + IndexQueryDetails.builder() + .mvName("`mys3`.`default`.`http_logs_metrics`") + .indexType(FlintIndexType.MATERIALIZED_VIEW) + .build() + .openSearchIndexName()); + } + + @Test + public void materializedViewIndexNameWithDots() { + assertEquals( + "flint_mys3_default_http_logs_metrics.1026", + IndexQueryDetails.builder() + .mvName("`mys3`.`default`.`http_logs_metrics.1026`") + .indexType(FlintIndexType.MATERIALIZED_VIEW) + .build() + .openSearchIndexName()); + } + + @Test + public void materializedViewIndexNameWithDotsInCatalogName() { + // FIXME: should not use ctx.getText which is hard to split + assertEquals( + "flint_mys3_1026_default`.`http_logs_metrics", + IndexQueryDetails.builder() + .mvName("`mys3.1026`.`default`.`http_logs_metrics`") + .indexType(FlintIndexType.MATERIALIZED_VIEW) + .build() + .openSearchIndexName()); + } + + @Test + public void materializedViewIndexNameNotFullyQualified() { + // Normally this should not happen and can add precondition check once confirmed. + assertEquals( + "flint_default_http_logs_metrics", + IndexQueryDetails.builder() + .mvName("default.http_logs_metrics") + .indexType(FlintIndexType.MATERIALIZED_VIEW) + .build() + .openSearchIndexName()); + + assertEquals( + "flint_http_logs_metrics", + IndexQueryDetails.builder() + .mvName("http_logs_metrics") + .indexType(FlintIndexType.MATERIALIZED_VIEW) + .build() + .openSearchIndexName()); + } + + @Test + public void sanitizedIndexName() { + assertEquals( + "flint_mys3_default_test%20%2c%3a%22%2b%2f%5c%7c%3f%23%3e%3c_skipping_index", + IndexQueryDetails.builder() + .indexName("invalid") + .fullyQualifiedTableName( + new FullyQualifiedTableName("mys3.default.`test ,:\"+/\\|?#><`")) + .indexOptions(new FlintIndexOptions()) + .indexQueryActionType(IndexQueryActionType.DROP) + .indexType(FlintIndexType.SKIPPING) + .build() + .openSearchIndexName()); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpFactoryTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpFactoryTest.java new file mode 100644 index 0000000000..3bf438aeb9 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpFactoryTest.java @@ -0,0 +1,51 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint.operation; + +import static org.junit.jupiter.api.Assertions.assertNotNull; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions; +import org.opensearch.sql.spark.flint.FlintIndexClient; +import org.opensearch.sql.spark.flint.FlintIndexMetadataService; +import org.opensearch.sql.spark.flint.FlintIndexStateModelService; + +@ExtendWith(MockitoExtension.class) +class FlintIndexOpFactoryTest { + public static final String DATASOURCE_NAME = "DATASOURCE_NAME"; + + @Mock private FlintIndexStateModelService flintIndexStateModelService; + @Mock private FlintIndexClient flintIndexClient; + @Mock private FlintIndexMetadataService flintIndexMetadataService; + @Mock private EMRServerlessClientFactory emrServerlessClientFactory; + + @InjectMocks FlintIndexOpFactory flintIndexOpFactory; + + @Test + void getDrop() { + assertNotNull(flintIndexOpFactory.getDrop(DATASOURCE_NAME)); + } + + @Test + void getAlter() { + assertNotNull(flintIndexOpFactory.getAlter(new FlintIndexOptions(), DATASOURCE_NAME)); + } + + @Test + void getVacuum() { + assertNotNull(flintIndexOpFactory.getDrop(DATASOURCE_NAME)); + } + + @Test + void getCancel() { + assertNotNull(flintIndexOpFactory.getDrop(DATASOURCE_NAME)); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpTest.java new file mode 100644 index 0000000000..0c82733ae6 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpTest.java @@ -0,0 +1,135 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint.operation; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.Optional; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.execution.xcontent.XContentSerializerUtil; +import org.opensearch.sql.spark.flint.FlintIndexMetadata; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexStateModel; +import org.opensearch.sql.spark.flint.FlintIndexStateModelService; + +@ExtendWith(MockitoExtension.class) +public class FlintIndexOpTest { + + @Mock private FlintIndexStateModelService flintIndexStateModelService; + @Mock private EMRServerlessClientFactory mockEmrServerlessClientFactory; + + @Test + public void testApplyWithTransitioningStateFailure() { + FlintIndexMetadata metadata = mock(FlintIndexMetadata.class); + when(metadata.getLatestId()).thenReturn(Optional.of("latestId")); + FlintIndexStateModel fakeModel = getFlintIndexStateModel(metadata); + when(flintIndexStateModelService.getFlintIndexStateModel(eq("latestId"), any())) + .thenReturn(Optional.of(fakeModel)); + when(flintIndexStateModelService.updateFlintIndexState(any(), any(), any())) + .thenThrow(new RuntimeException("Transitioning state failed")); + FlintIndexOp flintIndexOp = + new TestFlintIndexOp(flintIndexStateModelService, "myS3", mockEmrServerlessClientFactory); + + IllegalStateException illegalStateException = + Assertions.assertThrows(IllegalStateException.class, () -> flintIndexOp.apply(metadata)); + + Assertions.assertEquals( + "Moving to transition state:DELETING failed.", illegalStateException.getMessage()); + } + + @Test + public void testApplyWithCommitFailure() { + FlintIndexMetadata metadata = mock(FlintIndexMetadata.class); + when(metadata.getLatestId()).thenReturn(Optional.of("latestId")); + FlintIndexStateModel fakeModel = getFlintIndexStateModel(metadata); + when(flintIndexStateModelService.getFlintIndexStateModel(eq("latestId"), any())) + .thenReturn(Optional.of(fakeModel)); + when(flintIndexStateModelService.updateFlintIndexState(any(), any(), any())) + .thenReturn( + FlintIndexStateModel.copy(fakeModel, XContentSerializerUtil.buildMetadata(1, 2))) + .thenThrow(new RuntimeException("Commit state failed")) + .thenReturn( + FlintIndexStateModel.copy(fakeModel, XContentSerializerUtil.buildMetadata(1, 3))); + FlintIndexOp flintIndexOp = + new TestFlintIndexOp(flintIndexStateModelService, "myS3", mockEmrServerlessClientFactory); + + IllegalStateException illegalStateException = + Assertions.assertThrows(IllegalStateException.class, () -> flintIndexOp.apply(metadata)); + + Assertions.assertEquals( + "commit failed. target stable state: [DELETED]", illegalStateException.getMessage()); + } + + @Test + public void testApplyWithRollBackFailure() { + FlintIndexMetadata metadata = mock(FlintIndexMetadata.class); + when(metadata.getLatestId()).thenReturn(Optional.of("latestId")); + FlintIndexStateModel fakeModel = getFlintIndexStateModel(metadata); + when(flintIndexStateModelService.getFlintIndexStateModel(eq("latestId"), any())) + .thenReturn(Optional.of(fakeModel)); + when(flintIndexStateModelService.updateFlintIndexState(any(), any(), any())) + .thenReturn( + FlintIndexStateModel.copy(fakeModel, XContentSerializerUtil.buildMetadata(1, 2))) + .thenThrow(new RuntimeException("Commit state failed")) + .thenThrow(new RuntimeException("Rollback failure")); + FlintIndexOp flintIndexOp = + new TestFlintIndexOp(flintIndexStateModelService, "myS3", mockEmrServerlessClientFactory); + + IllegalStateException illegalStateException = + Assertions.assertThrows(IllegalStateException.class, () -> flintIndexOp.apply(metadata)); + + Assertions.assertEquals( + "commit failed. target stable state: [DELETED]", illegalStateException.getMessage()); + } + + private FlintIndexStateModel getFlintIndexStateModel(FlintIndexMetadata metadata) { + return FlintIndexStateModel.builder() + .indexState(FlintIndexState.ACTIVE) + .applicationId(metadata.getAppId()) + .jobId(metadata.getJobId()) + .latestId("latestId") + .datasourceName("myS3") + .lastUpdateTime(System.currentTimeMillis()) + .error("") + .build(); + } + + static class TestFlintIndexOp extends FlintIndexOp { + + public TestFlintIndexOp( + FlintIndexStateModelService flintIndexStateModelService, + String datasourceName, + EMRServerlessClientFactory emrServerlessClientFactory) { + super(flintIndexStateModelService, datasourceName, emrServerlessClientFactory); + } + + @Override + boolean validate(FlintIndexState state) { + return state == FlintIndexState.ACTIVE || state == FlintIndexState.EMPTY; + } + + @Override + FlintIndexState transitioningState() { + return FlintIndexState.DELETING; + } + + @Override + void runOp(FlintIndexMetadata flintIndexMetadata, FlintIndexStateModel flintIndex) {} + + @Override + FlintIndexState stableState() { + return FlintIndexState.DELETED; + } + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpVacuumTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpVacuumTest.java new file mode 100644 index 0000000000..60fa13dc93 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpVacuumTest.java @@ -0,0 +1,164 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint.operation; + +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Optional; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.flint.FlintIndexClient; +import org.opensearch.sql.spark.flint.FlintIndexMetadata; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexStateModel; +import org.opensearch.sql.spark.flint.FlintIndexStateModelService; + +@ExtendWith(MockitoExtension.class) +class FlintIndexOpVacuumTest { + + public static final String DATASOURCE_NAME = "DATASOURCE_NAME"; + public static final String LATEST_ID = "LATEST_ID"; + public static final String INDEX_NAME = "INDEX_NAME"; + public static final FlintIndexMetadata FLINT_INDEX_METADATA_WITH_LATEST_ID = + FlintIndexMetadata.builder().latestId(LATEST_ID).opensearchIndexName(INDEX_NAME).build(); + public static final FlintIndexMetadata FLINT_INDEX_METADATA_WITHOUT_LATEST_ID = + FlintIndexMetadata.builder().opensearchIndexName(INDEX_NAME).build(); + @Mock FlintIndexClient flintIndexClient; + @Mock FlintIndexStateModelService flintIndexStateModelService; + @Mock EMRServerlessClientFactory emrServerlessClientFactory; + @Mock FlintIndexStateModel flintIndexStateModel; + @Mock FlintIndexStateModel transitionedFlintIndexStateModel; + + RuntimeException testException = new RuntimeException("Test Exception"); + + FlintIndexOpVacuum flintIndexOpVacuum; + + @BeforeEach + public void setUp() { + flintIndexOpVacuum = + new FlintIndexOpVacuum( + flintIndexStateModelService, + DATASOURCE_NAME, + flintIndexClient, + emrServerlessClientFactory); + } + + @Test + public void testApplyWithEmptyLatestId() { + flintIndexOpVacuum.apply(FLINT_INDEX_METADATA_WITHOUT_LATEST_ID); + + verify(flintIndexClient).deleteIndex(INDEX_NAME); + } + + @Test + public void testApplyWithFlintIndexStateNotFound() { + when(flintIndexStateModelService.getFlintIndexStateModel(LATEST_ID, DATASOURCE_NAME)) + .thenReturn(Optional.empty()); + + assertThrows( + IllegalStateException.class, + () -> flintIndexOpVacuum.apply(FLINT_INDEX_METADATA_WITH_LATEST_ID)); + } + + @Test + public void testApplyWithNotDeletedState() { + when(flintIndexStateModelService.getFlintIndexStateModel(LATEST_ID, DATASOURCE_NAME)) + .thenReturn(Optional.of(flintIndexStateModel)); + when(flintIndexStateModel.getIndexState()).thenReturn(FlintIndexState.ACTIVE); + + assertThrows( + IllegalStateException.class, + () -> flintIndexOpVacuum.apply(FLINT_INDEX_METADATA_WITH_LATEST_ID)); + } + + @Test + public void testApplyWithUpdateFlintIndexStateThrow() { + when(flintIndexStateModelService.getFlintIndexStateModel(LATEST_ID, DATASOURCE_NAME)) + .thenReturn(Optional.of(flintIndexStateModel)); + when(flintIndexStateModel.getIndexState()).thenReturn(FlintIndexState.DELETED); + when(flintIndexStateModelService.updateFlintIndexState( + flintIndexStateModel, FlintIndexState.VACUUMING, DATASOURCE_NAME)) + .thenThrow(testException); + + assertThrows( + IllegalStateException.class, + () -> flintIndexOpVacuum.apply(FLINT_INDEX_METADATA_WITH_LATEST_ID)); + } + + @Test + public void testApplyWithRunOpThrow() { + when(flintIndexStateModelService.getFlintIndexStateModel(LATEST_ID, DATASOURCE_NAME)) + .thenReturn(Optional.of(flintIndexStateModel)); + when(flintIndexStateModel.getIndexState()).thenReturn(FlintIndexState.DELETED); + when(flintIndexStateModelService.updateFlintIndexState( + flintIndexStateModel, FlintIndexState.VACUUMING, DATASOURCE_NAME)) + .thenReturn(transitionedFlintIndexStateModel); + doThrow(testException).when(flintIndexClient).deleteIndex(INDEX_NAME); + + assertThrows( + Exception.class, () -> flintIndexOpVacuum.apply(FLINT_INDEX_METADATA_WITH_LATEST_ID)); + + verify(flintIndexStateModelService) + .updateFlintIndexState( + transitionedFlintIndexStateModel, FlintIndexState.DELETED, DATASOURCE_NAME); + } + + @Test + public void testApplyWithRunOpThrowAndRollbackThrow() { + when(flintIndexStateModelService.getFlintIndexStateModel(LATEST_ID, DATASOURCE_NAME)) + .thenReturn(Optional.of(flintIndexStateModel)); + when(flintIndexStateModel.getIndexState()).thenReturn(FlintIndexState.DELETED); + when(flintIndexStateModelService.updateFlintIndexState( + flintIndexStateModel, FlintIndexState.VACUUMING, DATASOURCE_NAME)) + .thenReturn(transitionedFlintIndexStateModel); + doThrow(testException).when(flintIndexClient).deleteIndex(INDEX_NAME); + when(flintIndexStateModelService.updateFlintIndexState( + transitionedFlintIndexStateModel, FlintIndexState.DELETED, DATASOURCE_NAME)) + .thenThrow(testException); + + assertThrows( + Exception.class, () -> flintIndexOpVacuum.apply(FLINT_INDEX_METADATA_WITH_LATEST_ID)); + } + + @Test + public void testApplyWithDeleteFlintIndexStateModelThrow() { + when(flintIndexStateModelService.getFlintIndexStateModel(LATEST_ID, DATASOURCE_NAME)) + .thenReturn(Optional.of(flintIndexStateModel)); + when(flintIndexStateModel.getIndexState()).thenReturn(FlintIndexState.DELETED); + when(flintIndexStateModelService.updateFlintIndexState( + flintIndexStateModel, FlintIndexState.VACUUMING, DATASOURCE_NAME)) + .thenReturn(transitionedFlintIndexStateModel); + when(flintIndexStateModelService.deleteFlintIndexStateModel(LATEST_ID, DATASOURCE_NAME)) + .thenThrow(testException); + + assertThrows( + IllegalStateException.class, + () -> flintIndexOpVacuum.apply(FLINT_INDEX_METADATA_WITH_LATEST_ID)); + } + + @Test + public void testApplyHappyPath() { + when(flintIndexStateModelService.getFlintIndexStateModel(LATEST_ID, DATASOURCE_NAME)) + .thenReturn(Optional.of(flintIndexStateModel)); + when(flintIndexStateModel.getIndexState()).thenReturn(FlintIndexState.DELETED); + when(flintIndexStateModelService.updateFlintIndexState( + flintIndexStateModel, FlintIndexState.VACUUMING, DATASOURCE_NAME)) + .thenReturn(transitionedFlintIndexStateModel); + when(transitionedFlintIndexStateModel.getLatestId()).thenReturn(LATEST_ID); + + flintIndexOpVacuum.apply(FLINT_INDEX_METADATA_WITH_LATEST_ID); + + verify(flintIndexStateModelService).deleteFlintIndexStateModel(LATEST_ID, DATASOURCE_NAME); + verify(flintIndexClient).deleteIndex(INDEX_NAME); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/leasemanager/ConcurrencyLimitExceededExceptionTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/leasemanager/ConcurrencyLimitExceededExceptionTest.java new file mode 100644 index 0000000000..c0591eaf66 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/leasemanager/ConcurrencyLimitExceededExceptionTest.java @@ -0,0 +1,19 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.leasemanager; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; + +class ConcurrencyLimitExceededExceptionTest { + @Test + public void test() { + ConcurrencyLimitExceededException e = new ConcurrencyLimitExceededException("Test"); + + assertEquals("Test", e.getMessage()); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/parameter/SparkParameterComposerCollectionTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/parameter/SparkParameterComposerCollectionTest.java new file mode 100644 index 0000000000..c0c97caa58 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/parameter/SparkParameterComposerCollectionTest.java @@ -0,0 +1,93 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.parameter; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.datasource.model.DataSourceType; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; + +@ExtendWith(MockitoExtension.class) +class SparkParameterComposerCollectionTest { + + @Mock DataSourceSparkParameterComposer composer1; + @Mock DataSourceSparkParameterComposer composer2; + @Mock DataSourceSparkParameterComposer composer3; + @Mock GeneralSparkParameterComposer generalComposer; + @Mock DispatchQueryRequest dispatchQueryRequest; + @Mock AsyncQueryRequestContext asyncQueryRequestContext; + + DataSourceType type1 = new DataSourceType("TYPE1"); + DataSourceType type2 = new DataSourceType("TYPE2"); + DataSourceType type3 = new DataSourceType("TYPE3"); + + SparkParameterComposerCollection collection; + + @BeforeEach + void setUp() { + collection = new SparkParameterComposerCollection(); + collection.register(type1, composer1); + collection.register(type1, composer2); + collection.register(type2, composer3); + collection.register(generalComposer); + } + + @Test + void isComposerRegistered() { + assertTrue(collection.isComposerRegistered(type1)); + assertTrue(collection.isComposerRegistered(type2)); + assertFalse(collection.isComposerRegistered(type3)); + } + + @Test + void composeByDataSourceWithRegisteredType() { + DataSourceMetadata metadata = + new DataSourceMetadata.Builder().setConnector(type1).setName("name").build(); + SparkSubmitParameters sparkSubmitParameters = new SparkSubmitParameters(); + + collection.composeByDataSource( + metadata, sparkSubmitParameters, dispatchQueryRequest, asyncQueryRequestContext); + + verify(composer1) + .compose(metadata, sparkSubmitParameters, dispatchQueryRequest, asyncQueryRequestContext); + verify(composer2) + .compose(metadata, sparkSubmitParameters, dispatchQueryRequest, asyncQueryRequestContext); + verifyNoInteractions(composer3); + } + + @Test + void composeByDataSourceWithUnregisteredType() { + DataSourceMetadata metadata = + new DataSourceMetadata.Builder().setConnector(type3).setName("name").build(); + SparkSubmitParameters sparkSubmitParameters = new SparkSubmitParameters(); + + collection.composeByDataSource( + metadata, sparkSubmitParameters, dispatchQueryRequest, asyncQueryRequestContext); + + verifyNoInteractions(composer1, composer2, composer3); + } + + @Test + void compose() { + SparkSubmitParameters sparkSubmitParameters = new SparkSubmitParameters(); + + collection.compose(sparkSubmitParameters, dispatchQueryRequest, asyncQueryRequestContext); + + verify(generalComposer) + .compose(sparkSubmitParameters, dispatchQueryRequest, asyncQueryRequestContext); + verifyNoInteractions(composer1, composer2, composer3); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/parameter/SparkSubmitParametersBuilderTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/parameter/SparkSubmitParametersBuilderTest.java new file mode 100644 index 0000000000..8947cb61f7 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/parameter/SparkSubmitParametersBuilderTest.java @@ -0,0 +1,206 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.parameter; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.opensearch.sql.spark.data.constants.SparkConstants.HADOOP_CATALOG_CREDENTIALS_PROVIDER_FACTORY_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_JARS_KEY; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.datasource.model.DataSourceType; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.config.SparkSubmitParameterModifier; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; + +@ExtendWith(MockitoExtension.class) +public class SparkSubmitParametersBuilderTest { + + @Mock SparkParameterComposerCollection sparkParameterComposerCollection; + @Mock SparkSubmitParameterModifier sparkSubmitParameterModifier; + @Mock AsyncQueryRequestContext asyncQueryRequestContext; + @Mock DispatchQueryRequest dispatchQueryRequest; + + @InjectMocks SparkSubmitParametersBuilder sparkSubmitParametersBuilder; + + @Test + public void testBuildWithoutExtraParameters() { + String params = sparkSubmitParametersBuilder.toString(); + + assertNotNull(params); + } + + @Test + public void testBuildWithExtraParameters() { + String params = sparkSubmitParametersBuilder.extraParameters("--conf A=1").toString(); + + // Assert the conf is included with a space + assertTrue(params.endsWith(" --conf A=1")); + } + + @Test + public void testBuildQueryString() { + String rawQuery = "SHOW tables LIKE \"%\";"; + String expectedQueryInParams = "\"SHOW tables LIKE \\\"%\\\";\""; + String params = sparkSubmitParametersBuilder.query(rawQuery).toString(); + assertTrue(params.contains(expectedQueryInParams)); + } + + @Test + public void testBuildQueryStringNestedQuote() { + String rawQuery = "SELECT '\"1\"'"; + String expectedQueryInParams = "\"SELECT '\\\"1\\\"'\""; + String params = sparkSubmitParametersBuilder.query(rawQuery).toString(); + assertTrue(params.contains(expectedQueryInParams)); + } + + @Test + public void testBuildQueryStringSpecialCharacter() { + String rawQuery = "SELECT '{\"test ,:+\\\"inner\\\"/\\|?#><\"}'"; + String expectedQueryInParams = "SELECT '{\\\"test ,:+\\\\\\\"inner\\\\\\\"/\\\\|?#><\\\"}'"; + String params = sparkSubmitParametersBuilder.query(rawQuery).toString(); + assertTrue(params.contains(expectedQueryInParams)); + } + + @Test + public void testClassName() { + String params = sparkSubmitParametersBuilder.className("CLASS_NAME").toString(); + assertTrue(params.contains("--class CLASS_NAME")); + } + + @Test + public void testClusterName() { + String params = sparkSubmitParametersBuilder.clusterName("CLUSTER_NAME").toString(); + assertTrue(params.contains("spark.emr-serverless.driverEnv.FLINT_CLUSTER_NAME=CLUSTER_NAME")); + assertTrue(params.contains("spark.executorEnv.FLINT_CLUSTER_NAME=CLUSTER_NAME")); + } + + @Test + public void testOverrideConfigItem() { + SparkSubmitParameters params = sparkSubmitParametersBuilder.getSparkSubmitParameters(); + params.setConfigItem(SPARK_JARS_KEY, "Overridden"); + String result = params.toString(); + + assertTrue(result.contains(String.format("%s=Overridden", SPARK_JARS_KEY))); + } + + @Test + public void testDeleteConfigItem() { + SparkSubmitParameters params = sparkSubmitParametersBuilder.getSparkSubmitParameters(); + params.deleteConfigItem(HADOOP_CATALOG_CREDENTIALS_PROVIDER_FACTORY_KEY); + String result = params.toString(); + + assertFalse(result.contains(HADOOP_CATALOG_CREDENTIALS_PROVIDER_FACTORY_KEY)); + } + + @Test + public void testAddConfigItem() { + SparkSubmitParameters params = sparkSubmitParametersBuilder.getSparkSubmitParameters(); + params.setConfigItem("AdditionalKey", "Value"); + String result = params.toString(); + + assertTrue(result.contains("AdditionalKey=Value")); + } + + @Test + public void testStructuredStreaming() { + SparkSubmitParameters params = + sparkSubmitParametersBuilder.structuredStreaming(true).getSparkSubmitParameters(); + String result = params.toString(); + + assertTrue(result.contains("spark.flint.job.type=streaming")); + } + + @Test + public void testNonStructuredStreaming() { + SparkSubmitParameters params = + sparkSubmitParametersBuilder.structuredStreaming(false).getSparkSubmitParameters(); + String result = params.toString(); + + assertFalse(result.contains("spark.flint.job.type=streaming")); + } + + @Test + public void testSessionExecution() { + SparkSubmitParameters params = + sparkSubmitParametersBuilder + .sessionExecution("SESSION_ID", "DATASOURCE_NAME") + .getSparkSubmitParameters(); + String result = params.toString(); + + assertTrue( + result.contains("spark.flint.job.requestIndex=.query_execution_request_datasource_name")); + assertTrue(result.contains("spark.flint.job.sessionId=SESSION_ID")); + } + + @Test + public void testAcceptModifier() { + sparkSubmitParametersBuilder.acceptModifier(sparkSubmitParameterModifier); + + verify(sparkSubmitParameterModifier).modifyParameters(sparkSubmitParametersBuilder); + } + + @Test + public void testAcceptNullModifier() { + sparkSubmitParametersBuilder.acceptModifier(null); + } + + @Test + public void testDataSource() { + when(sparkParameterComposerCollection.isComposerRegistered(DataSourceType.S3GLUE)) + .thenReturn(true); + + DataSourceMetadata metadata = + new DataSourceMetadata.Builder() + .setConnector(DataSourceType.S3GLUE) + .setName("name") + .build(); + SparkSubmitParameters params = + sparkSubmitParametersBuilder + .dataSource(metadata, dispatchQueryRequest, asyncQueryRequestContext) + .getSparkSubmitParameters(); + + verify(sparkParameterComposerCollection) + .composeByDataSource(metadata, params, dispatchQueryRequest, asyncQueryRequestContext); + } + + @Test + public void testUnsupportedDataSource() { + when(sparkParameterComposerCollection.isComposerRegistered(DataSourceType.S3GLUE)) + .thenReturn(false); + + DataSourceMetadata metadata = + new DataSourceMetadata.Builder() + .setConnector(DataSourceType.S3GLUE) + .setName("name") + .build(); + assertThrows( + UnsupportedOperationException.class, + () -> + sparkSubmitParametersBuilder.dataSource( + metadata, dispatchQueryRequest, asyncQueryRequestContext)); + } + + @Test + public void testAcceptComposers() { + SparkSubmitParameters params = + sparkSubmitParametersBuilder + .acceptComposers(dispatchQueryRequest, asyncQueryRequestContext) + .getSparkSubmitParameters(); + + verify(sparkParameterComposerCollection) + .compose(params, dispatchQueryRequest, asyncQueryRequestContext); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/utils/IDUtilsTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/utils/IDUtilsTest.java new file mode 100644 index 0000000000..1893256c39 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/utils/IDUtilsTest.java @@ -0,0 +1,33 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.utils; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.junit.jupiter.api.Test; + +class IDUtilsTest { + public static final String DATASOURCE_NAME = "DATASOURCE_NAME"; + + @Test + public void encodeAndDecode() { + String id = IDUtils.encode(DATASOURCE_NAME); + String decoded = IDUtils.decode(id); + + assertTrue(id.length() > IDUtils.PREFIX_LEN); + assertEquals(DATASOURCE_NAME, decoded); + } + + @Test + public void generateUniqueIds() { + String id1 = IDUtils.encode(DATASOURCE_NAME); + String id2 = IDUtils.encode(DATASOURCE_NAME); + + assertNotEquals(id1, id2); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/utils/MockTimeProvider.java b/async-query-core/src/test/java/org/opensearch/sql/spark/utils/MockTimeProvider.java new file mode 100644 index 0000000000..2f4c960ec0 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/utils/MockTimeProvider.java @@ -0,0 +1,19 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.utils; + +public class MockTimeProvider implements TimeProvider { + private final long fixedTime; + + public MockTimeProvider(long fixedTime) { + this.fixedTime = fixedTime; + } + + @Override + public long currentEpochMillis() { + return fixedTime; + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/utils/RealTimeProviderTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/utils/RealTimeProviderTest.java new file mode 100644 index 0000000000..7eb5a56cfe --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/utils/RealTimeProviderTest.java @@ -0,0 +1,19 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.utils; + +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.junit.jupiter.api.Test; + +class RealTimeProviderTest { + @Test + public void testCurrentEpochMillis() { + RealTimeProvider realTimeProvider = new RealTimeProvider(); + + assertTrue(realTimeProvider.currentEpochMillis() > 0); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/utils/SQLQueryUtilsTest.java b/async-query-core/src/test/java/org/opensearch/sql/spark/utils/SQLQueryUtilsTest.java new file mode 100644 index 0000000000..0d7c43fc0d --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/utils/SQLQueryUtilsTest.java @@ -0,0 +1,436 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.utils; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.opensearch.sql.spark.utils.SQLQueryUtilsTest.IndexQuery.index; +import static org.opensearch.sql.spark.utils.SQLQueryUtilsTest.IndexQuery.mv; +import static org.opensearch.sql.spark.utils.SQLQueryUtilsTest.IndexQuery.skippingIndex; + +import java.util.List; +import lombok.Getter; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.spark.dispatcher.model.FullyQualifiedTableName; +import org.opensearch.sql.spark.dispatcher.model.IndexQueryActionType; +import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails; +import org.opensearch.sql.spark.flint.FlintIndexType; + +@ExtendWith(MockitoExtension.class) +public class SQLQueryUtilsTest { + + @Test + void testExtractionOfTableNameFromSQLQueries() { + String sqlQuery = "select * from my_glue.default.http_logs"; + FullyQualifiedTableName fullyQualifiedTableName = + SQLQueryUtils.extractFullyQualifiedTableNames(sqlQuery).get(0); + assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + assertFullyQualifiedTableName("my_glue", "default", "http_logs", fullyQualifiedTableName); + + sqlQuery = "select * from my_glue.db.http_logs"; + assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + fullyQualifiedTableName = SQLQueryUtils.extractFullyQualifiedTableNames(sqlQuery).get(0); + assertFullyQualifiedTableName("my_glue", "db", "http_logs", fullyQualifiedTableName); + + sqlQuery = "select * from my_glue.http_logs"; + fullyQualifiedTableName = SQLQueryUtils.extractFullyQualifiedTableNames(sqlQuery).get(0); + assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + assertFullyQualifiedTableName(null, "my_glue", "http_logs", fullyQualifiedTableName); + + sqlQuery = "select * from http_logs"; + fullyQualifiedTableName = SQLQueryUtils.extractFullyQualifiedTableNames(sqlQuery).get(0); + assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + assertFullyQualifiedTableName(null, null, "http_logs", fullyQualifiedTableName); + + sqlQuery = "DROP TABLE myS3.default.alb_logs"; + fullyQualifiedTableName = SQLQueryUtils.extractFullyQualifiedTableNames(sqlQuery).get(0); + assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + assertFullyQualifiedTableName("myS3", "default", "alb_logs", fullyQualifiedTableName); + + sqlQuery = "DESCRIBE TABLE myS3.default.alb_logs"; + fullyQualifiedTableName = SQLQueryUtils.extractFullyQualifiedTableNames(sqlQuery).get(0); + assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + assertFullyQualifiedTableName("myS3", "default", "alb_logs", fullyQualifiedTableName); + + sqlQuery = + "CREATE EXTERNAL TABLE\n" + + "myS3.default.alb_logs\n" + + "[ PARTITIONED BY (col_name [, … ] ) ]\n" + + "[ ROW FORMAT DELIMITED row_format ]\n" + + "STORED AS file_format\n" + + "LOCATION { 's3://bucket/folder/' }"; + fullyQualifiedTableName = SQLQueryUtils.extractFullyQualifiedTableNames(sqlQuery).get(0); + assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + assertFullyQualifiedTableName("myS3", "default", "alb_logs", fullyQualifiedTableName); + } + + @Test + void testMultipleTables() { + String[] sqlQueries = { + "SELECT * FROM my_glue.default.http_logs, my_glue.default.access_logs", + "SELECT * FROM my_glue.default.http_logs LEFT JOIN my_glue.default.access_logs", + "SELECT table1.id, table2.id FROM my_glue.default.http_logs table1 LEFT OUTER JOIN" + + " (SELECT * FROM my_glue.default.access_logs) table2 ON table1.tag = table2.tag", + "SELECT table1.id, table2.id FROM my_glue.default.http_logs FOR VERSION AS OF 1 table1" + + " LEFT OUTER JOIN" + + " (SELECT * FROM my_glue.default.access_logs) table2" + + " ON table1.tag = table2.tag" + }; + + for (String sqlQuery : sqlQueries) { + List fullyQualifiedTableNames = + SQLQueryUtils.extractFullyQualifiedTableNames(sqlQuery); + + assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + assertEquals(2, fullyQualifiedTableNames.size()); + assertFullyQualifiedTableName( + "my_glue", "default", "http_logs", fullyQualifiedTableNames.get(0)); + assertFullyQualifiedTableName( + "my_glue", "default", "access_logs", fullyQualifiedTableNames.get(1)); + } + } + + @Test + void testMultipleTablesWithJoin() { + String sqlQuery = + "select * from my_glue.default.http_logs LEFT JOIN my_glue.default.access_logs"; + + List fullyQualifiedTableNames = + SQLQueryUtils.extractFullyQualifiedTableNames(sqlQuery); + + assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + assertFullyQualifiedTableName( + "my_glue", "default", "http_logs", fullyQualifiedTableNames.get(0)); + assertFullyQualifiedTableName( + "my_glue", "default", "access_logs", fullyQualifiedTableNames.get(1)); + } + + @Test + void testNoFullyQualifiedTableName() { + String sqlQuery = "SHOW tables"; + + List fullyQualifiedTableNames = + SQLQueryUtils.extractFullyQualifiedTableNames(sqlQuery); + + assertEquals(0, fullyQualifiedTableNames.size()); + } + + @Test + void testExtractionFromFlintSkippingIndexQueries() { + String[] createSkippingIndexQueries = { + "CREATE SKIPPING INDEX ON myS3.default.alb_logs (l_orderkey VALUE_SET)", + "CREATE SKIPPING INDEX IF NOT EXISTS" + + " ON myS3.default.alb_logs (l_orderkey VALUE_SET) " + + " WITH (auto_refresh = true)", + "CREATE SKIPPING INDEX ON myS3.default.alb_logs(l_orderkey VALUE_SET)" + + " WITH (auto_refresh = true)", + "CREATE SKIPPING INDEX ON myS3.default.alb_logs(l_orderkey VALUE_SET) " + + " WHERE elb_status_code = 500 " + + " WITH (auto_refresh = true)", + "DROP SKIPPING INDEX ON myS3.default.alb_logs", + "VACUUM SKIPPING INDEX ON myS3.default.alb_logs", + "ALTER SKIPPING INDEX ON myS3.default.alb_logs WITH (auto_refresh = false)", + }; + + for (String query : createSkippingIndexQueries) { + assertTrue(SQLQueryUtils.isFlintExtensionQuery(query), "Failed query: " + query); + + IndexQueryDetails indexQueryDetails = SQLQueryUtils.extractIndexDetails(query); + FullyQualifiedTableName fullyQualifiedTableName = + indexQueryDetails.getFullyQualifiedTableName(); + + assertNull(indexQueryDetails.getIndexName()); + assertFullyQualifiedTableName("myS3", "default", "alb_logs", fullyQualifiedTableName); + } + } + + @Test + void testExtractionFromFlintCoveringIndexQueries() { + String[] coveringIndexQueries = { + "CREATE INDEX elb_and_requestUri ON myS3.default.alb_logs(l_orderkey, l_quantity)", + "CREATE INDEX IF NOT EXISTS elb_and_requestUri " + + " ON myS3.default.alb_logs(l_orderkey, l_quantity) " + + " WITH (auto_refresh = true)", + "CREATE INDEX elb_and_requestUri ON myS3.default.alb_logs(l_orderkey, l_quantity)" + + " WITH (auto_refresh = true)", + "CREATE INDEX elb_and_requestUri ON myS3.default.alb_logs(l_orderkey, l_quantity) " + + " WHERE elb_status_code = 500 " + + " WITH (auto_refresh = true)", + "DROP INDEX elb_and_requestUri ON myS3.default.alb_logs", + "VACUUM INDEX elb_and_requestUri ON myS3.default.alb_logs", + "ALTER INDEX elb_and_requestUri ON myS3.default.alb_logs WITH (auto_refresh = false)" + }; + + for (String query : coveringIndexQueries) { + assertTrue(SQLQueryUtils.isFlintExtensionQuery(query), "Failed query: " + query); + + IndexQueryDetails indexQueryDetails = SQLQueryUtils.extractIndexDetails(query); + FullyQualifiedTableName fullyQualifiedTableName = + indexQueryDetails.getFullyQualifiedTableName(); + + assertEquals("elb_and_requestUri", indexQueryDetails.getIndexName()); + assertFullyQualifiedTableName("myS3", "default", "alb_logs", fullyQualifiedTableName); + } + } + + @Test + void testExtractionFromFlintMVQuery() { + String[] mvQueries = { + "CREATE MATERIALIZED VIEW mv_1 AS query=select * from my_glue.default.logs WITH" + + " (auto_refresh = true)", + "DROP MATERIALIZED VIEW mv_1", + "VACUUM MATERIALIZED VIEW mv_1", + "ALTER MATERIALIZED VIEW mv_1 WITH (auto_refresh = false)", + }; + + for (String query : mvQueries) { + assertTrue(SQLQueryUtils.isFlintExtensionQuery(query)); + + IndexQueryDetails indexQueryDetails = SQLQueryUtils.extractIndexDetails(query); + FullyQualifiedTableName fullyQualifiedTableName = + indexQueryDetails.getFullyQualifiedTableName(); + + assertNull(indexQueryDetails.getIndexName()); + assertNull(fullyQualifiedTableName); + assertEquals("mv_1", indexQueryDetails.getMvName()); + } + } + + @Test + void testDescSkippingIndex() { + String descSkippingIndex = "DESC SKIPPING INDEX ON mys3.default.http_logs"; + assertTrue(SQLQueryUtils.isFlintExtensionQuery(descSkippingIndex)); + IndexQueryDetails indexDetails = SQLQueryUtils.extractIndexDetails(descSkippingIndex); + FullyQualifiedTableName fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); + assertNull(indexDetails.getIndexName()); + assertNotNull(fullyQualifiedTableName); + assertEquals(FlintIndexType.SKIPPING, indexDetails.getIndexType()); + assertEquals(IndexQueryActionType.DESCRIBE, indexDetails.getIndexQueryActionType()); + + String descCoveringIndex = "DESC INDEX cv1 ON mys3.default.http_logs"; + assertTrue(SQLQueryUtils.isFlintExtensionQuery(descCoveringIndex)); + indexDetails = SQLQueryUtils.extractIndexDetails(descCoveringIndex); + fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); + assertEquals("cv1", indexDetails.getIndexName()); + assertNotNull(fullyQualifiedTableName); + assertEquals(FlintIndexType.COVERING, indexDetails.getIndexType()); + assertEquals(IndexQueryActionType.DESCRIBE, indexDetails.getIndexQueryActionType()); + + String descMv = "DESC MATERIALIZED VIEW mv1"; + assertTrue(SQLQueryUtils.isFlintExtensionQuery(descMv)); + indexDetails = SQLQueryUtils.extractIndexDetails(descMv); + fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); + assertNull(indexDetails.getIndexName()); + assertEquals("mv1", indexDetails.getMvName()); + assertNull(fullyQualifiedTableName); + assertEquals(FlintIndexType.MATERIALIZED_VIEW, indexDetails.getIndexType()); + assertEquals(IndexQueryActionType.DESCRIBE, indexDetails.getIndexQueryActionType()); + } + + @Test + void testShowIndex() { + String showCoveringIndex = " SHOW INDEX ON myS3.default.http_logs"; + assertTrue(SQLQueryUtils.isFlintExtensionQuery(showCoveringIndex)); + IndexQueryDetails indexDetails = SQLQueryUtils.extractIndexDetails(showCoveringIndex); + FullyQualifiedTableName fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); + assertNull(indexDetails.getIndexName()); + assertNull(indexDetails.getMvName()); + assertNotNull(fullyQualifiedTableName); + assertEquals(FlintIndexType.COVERING, indexDetails.getIndexType()); + assertEquals(IndexQueryActionType.SHOW, indexDetails.getIndexQueryActionType()); + + String showMV = "SHOW MATERIALIZED VIEW IN my_glue.default"; + assertTrue(SQLQueryUtils.isFlintExtensionQuery(showMV)); + indexDetails = SQLQueryUtils.extractIndexDetails(showMV); + fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); + assertNull(indexDetails.getIndexName()); + assertNull(indexDetails.getMvName()); + assertNull(fullyQualifiedTableName); + assertEquals(FlintIndexType.MATERIALIZED_VIEW, indexDetails.getIndexType()); + assertEquals(IndexQueryActionType.SHOW, indexDetails.getIndexQueryActionType()); + } + + @Test + void testRefreshIndex() { + String refreshSkippingIndex = "REFRESH SKIPPING INDEX ON mys3.default.http_logs"; + assertTrue(SQLQueryUtils.isFlintExtensionQuery(refreshSkippingIndex)); + IndexQueryDetails indexDetails = SQLQueryUtils.extractIndexDetails(refreshSkippingIndex); + FullyQualifiedTableName fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); + assertNull(indexDetails.getIndexName()); + assertNotNull(fullyQualifiedTableName); + assertEquals(FlintIndexType.SKIPPING, indexDetails.getIndexType()); + assertEquals(IndexQueryActionType.REFRESH, indexDetails.getIndexQueryActionType()); + + String refreshCoveringIndex = "REFRESH INDEX cv1 ON mys3.default.http_logs"; + assertTrue(SQLQueryUtils.isFlintExtensionQuery(refreshCoveringIndex)); + indexDetails = SQLQueryUtils.extractIndexDetails(refreshCoveringIndex); + fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); + assertEquals("cv1", indexDetails.getIndexName()); + assertNotNull(fullyQualifiedTableName); + assertEquals(FlintIndexType.COVERING, indexDetails.getIndexType()); + assertEquals(IndexQueryActionType.REFRESH, indexDetails.getIndexQueryActionType()); + + String refreshMV = "REFRESH MATERIALIZED VIEW mv1"; + assertTrue(SQLQueryUtils.isFlintExtensionQuery(refreshMV)); + indexDetails = SQLQueryUtils.extractIndexDetails(refreshMV); + fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); + assertNull(indexDetails.getIndexName()); + assertEquals("mv1", indexDetails.getMvName()); + assertNull(fullyQualifiedTableName); + assertEquals(FlintIndexType.MATERIALIZED_VIEW, indexDetails.getIndexType()); + assertEquals(IndexQueryActionType.REFRESH, indexDetails.getIndexQueryActionType()); + } + + /** https://github.com/opensearch-project/sql/issues/2206 */ + @Test + void testAutoRefresh() { + assertFalse( + SQLQueryUtils.extractIndexDetails(skippingIndex().getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertFalse( + SQLQueryUtils.extractIndexDetails( + skippingIndex().withProperty("auto_refresh", "false").getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertTrue( + SQLQueryUtils.extractIndexDetails( + skippingIndex().withProperty("auto_refresh", "true").getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertTrue( + SQLQueryUtils.extractIndexDetails( + skippingIndex().withProperty("auto_refresh", "true").withSemicolon().getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertTrue( + SQLQueryUtils.extractIndexDetails( + skippingIndex().withProperty("\"auto_refresh\"", "true").getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertTrue( + SQLQueryUtils.extractIndexDetails( + skippingIndex().withProperty("\"auto_refresh\"", "true").withSemicolon().getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertTrue( + SQLQueryUtils.extractIndexDetails( + skippingIndex().withProperty("\"auto_refresh\"", "\"true\"").getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertTrue( + SQLQueryUtils.extractIndexDetails( + skippingIndex() + .withProperty("\"auto_refresh\"", "\"true\"") + .withSemicolon() + .getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertFalse( + SQLQueryUtils.extractIndexDetails( + skippingIndex().withProperty("auto_refresh", "1").getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertFalse( + SQLQueryUtils.extractIndexDetails(skippingIndex().withProperty("interval", "1").getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertFalse( + SQLQueryUtils.extractIndexDetails( + skippingIndex().withProperty("\"\"", "\"true\"").getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertFalse( + SQLQueryUtils.extractIndexDetails(index().getQuery()).getFlintIndexOptions().autoRefresh()); + + assertFalse( + SQLQueryUtils.extractIndexDetails(index().withProperty("auto_refresh", "false").getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertTrue( + SQLQueryUtils.extractIndexDetails(index().withProperty("auto_refresh", "true").getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertTrue( + SQLQueryUtils.extractIndexDetails( + index().withProperty("auto_refresh", "true").withSemicolon().getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertTrue( + SQLQueryUtils.extractIndexDetails(mv().withProperty("auto_refresh", "true").getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + + assertTrue( + SQLQueryUtils.extractIndexDetails( + mv().withProperty("auto_refresh", "true").withSemicolon().getQuery()) + .getFlintIndexOptions() + .autoRefresh()); + } + + @Getter + protected static class IndexQuery { + private String query; + + private IndexQuery(String query) { + this.query = query; + } + + public static IndexQuery skippingIndex() { + return new IndexQuery( + "CREATE SKIPPING INDEX ON myS3.default.alb_logs" + "(l_orderkey VALUE_SET)"); + } + + public static IndexQuery index() { + return new IndexQuery( + "CREATE INDEX elb_and_requestUri ON myS3.default.alb_logs(l_orderkey, " + "l_quantity)"); + } + + public static IndexQuery mv() { + return new IndexQuery( + "CREATE MATERIALIZED VIEW mv_1 AS query=select * from my_glue.default.logs"); + } + + public IndexQuery withProperty(String key, String value) { + query = String.format("%s with (%s = %s)", query, key, value); + return this; + } + + public IndexQuery withSemicolon() { + query += ";"; + return this; + } + } + + private void assertFullyQualifiedTableName( + String expectedDatasourceName, + String expectedSchemaName, + String expectedTableName, + FullyQualifiedTableName fullyQualifiedTableName) { + assertEquals(expectedDatasourceName, fullyQualifiedTableName.getDatasourceName()); + assertEquals(expectedSchemaName, fullyQualifiedTableName.getSchemaName()); + assertEquals(expectedTableName, fullyQualifiedTableName.getTableName()); + } +} diff --git a/async-query-core/src/test/java/org/opensearch/sql/spark/utils/TestUtils.java b/async-query-core/src/test/java/org/opensearch/sql/spark/utils/TestUtils.java new file mode 100644 index 0000000000..4336b13aa9 --- /dev/null +++ b/async-query-core/src/test/java/org/opensearch/sql/spark/utils/TestUtils.java @@ -0,0 +1,17 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.utils; + +import java.io.IOException; +import java.util.Objects; + +public class TestUtils { + public static String getJson(String filename) throws IOException { + ClassLoader classLoader = TestUtils.class.getClassLoader(); + return new String( + Objects.requireNonNull(classLoader.getResourceAsStream(filename)).readAllBytes()); + } +} diff --git a/async-query-core/src/test/resources/invalid_response.json b/async-query-core/src/test/resources/invalid_response.json new file mode 100644 index 0000000000..53222e0560 --- /dev/null +++ b/async-query-core/src/test/resources/invalid_response.json @@ -0,0 +1,12 @@ +{ + "content": { + "result": [ + "{'1':1}" + ], + "schema": [ + "{'column_name':'1','data_type':'integer'}" + ], + "stepId": "s-123456789", + "applicationId": "application-abc" + } +} diff --git a/async-query-core/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker b/async-query-core/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker new file mode 100644 index 0000000000..ca6ee9cea8 --- /dev/null +++ b/async-query-core/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker @@ -0,0 +1 @@ +mock-maker-inline \ No newline at end of file diff --git a/async-query-core/src/test/resources/select_query_response.json b/async-query-core/src/test/resources/select_query_response.json new file mode 100644 index 0000000000..24cb06b49e --- /dev/null +++ b/async-query-core/src/test/resources/select_query_response.json @@ -0,0 +1,12 @@ +{ + "data": { + "result": [ + "{'1':1}" + ], + "schema": [ + "{'column_name':'1','data_type':'integer'}" + ], + "stepId": "s-123456789", + "applicationId": "application-abc" + } +} diff --git a/async-query/.gitignore b/async-query/.gitignore new file mode 100644 index 0000000000..689cc5c548 --- /dev/null +++ b/async-query/.gitignore @@ -0,0 +1,42 @@ +.gradle +build/ +!gradle/wrapper/gradle-wrapper.jar +!src/main/**/build/ +!src/test/**/build/ + +### IntelliJ IDEA ### +.idea/modules.xml +.idea/jarRepositories.xml +.idea/compiler.xml +.idea/libraries/ +*.iws +*.iml +*.ipr +out/ +!src/main/**/out/ +!src/test/**/out/ + +### Eclipse ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache +bin/ +!src/main/**/bin/ +!src/test/**/bin/ + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ + +### VS Code ### +.vscode/ + +### Mac OS ### +.DS_Store \ No newline at end of file diff --git a/async-query/build.gradle b/async-query/build.gradle new file mode 100644 index 0000000000..abda6161d3 --- /dev/null +++ b/async-query/build.gradle @@ -0,0 +1,122 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +plugins { + id 'java-library' + id "io.freefair.lombok" + id 'jacoco' + id 'antlr' +} + +repositories { + mavenCentral() +} + + +dependencies { + implementation "org.opensearch:opensearch-job-scheduler-spi:${opensearch_build}" + + api project(':core') + api project(':async-query-core') + implementation project(':protocol') + implementation project(':datasources') + implementation project(':legacy') + + implementation group: 'org.opensearch', name: 'opensearch', version: "${opensearch_version}" + implementation group: 'org.json', name: 'json', version: '20231013' + api group: 'com.amazonaws', name: 'aws-java-sdk-emr', version: "${aws_java_sdk_version}" + api group: 'com.amazonaws', name: 'aws-java-sdk-emrserverless', version: "${aws_java_sdk_version}" + implementation group: 'commons-io', name: 'commons-io', version: '2.8.0' + + testImplementation(platform("org.junit:junit-bom:5.9.3")) + + testImplementation 'org.junit.jupiter:junit-jupiter-api:5.9.3' + testImplementation group: 'org.mockito', name: 'mockito-core', version: '5.7.0' + testImplementation group: 'org.mockito', name: 'mockito-junit-jupiter', version: '5.7.0' + + testCompileOnly('junit:junit:4.13.1') { + exclude group: 'org.hamcrest', module: 'hamcrest-core' + } + testRuntimeOnly("org.junit.vintage:junit-vintage-engine") { + exclude group: 'org.hamcrest', module: 'hamcrest-core' + } + testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine") { + exclude group: 'org.hamcrest', module: 'hamcrest-core' + } + testRuntimeOnly("org.junit.platform:junit-platform-launcher") { + because 'allows tests to run from IDEs that bundle older version of launcher' + } + testImplementation("org.opensearch.test:framework:${opensearch_version}") + testImplementation project(':opensearch') +} + +test { + useJUnitPlatform { + includeEngines("junit-jupiter") + } + testLogging { + events "failed" + exceptionFormat "full" + } +} +task junit4(type: Test) { + useJUnitPlatform { + includeEngines("junit-vintage") + } + systemProperty 'tests.security.manager', 'false' + testLogging { + events "failed" + exceptionFormat "full" + } +} + +jacocoTestReport { + dependsOn test, junit4 + executionData test, junit4 + reports { + html.required = true + xml.required = true + } + afterEvaluate { + classDirectories.setFrom(files(classDirectories.files.collect { + fileTree(dir: it, exclude: ['**/antlr/parser/**']) + })) + } +} + +jacocoTestCoverageVerification { + dependsOn test, junit4 + executionData test, junit4 + violationRules { + rule { + element = 'CLASS' + excludes = [ + 'org.opensearch.sql.spark.cluster.ClusterManagerEventListener*', + 'org.opensearch.sql.spark.cluster.FlintIndexRetention', + 'org.opensearch.sql.spark.cluster.IndexCleanup', + // ignore because XContext IOException + 'org.opensearch.sql.spark.execution.statestore.StateStore', + 'org.opensearch.sql.spark.rest.*', + 'org.opensearch.sql.spark.scheduler.OpenSearchRefreshIndexJobRequestParser', + 'org.opensearch.sql.spark.transport.model.*' + ] + limit { + counter = 'LINE' + minimum = 1.0 + } + limit { + counter = 'BRANCH' + minimum = 1.0 + } + } + } + afterEvaluate { + classDirectories.setFrom(files(classDirectories.files.collect { + fileTree(dir: it, exclude: ['**/antlr/parser/**']) + })) + } +} +check.dependsOn jacocoTestCoverageVerification +jacocoTestCoverageVerification.dependsOn jacocoTestReport diff --git a/async-query/src/main/java/org/opensearch/sql/asyncquery/DummyConsumer.java b/async-query/src/main/java/org/opensearch/sql/asyncquery/DummyConsumer.java new file mode 100644 index 0000000000..9b1641e559 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/asyncquery/DummyConsumer.java @@ -0,0 +1,18 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.asyncquery; + +import lombok.AllArgsConstructor; + +// This is a dummy class for scaffolding and should be deleted later +@AllArgsConstructor +public class DummyConsumer { + Dummy dummy; + + public String hello() { + return dummy.hello(); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/asyncquery/OpenSearchAsyncQueryJobMetadataStorageService.java b/async-query/src/main/java/org/opensearch/sql/spark/asyncquery/OpenSearchAsyncQueryJobMetadataStorageService.java new file mode 100644 index 0000000000..4847c8e00f --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/asyncquery/OpenSearchAsyncQueryJobMetadataStorageService.java @@ -0,0 +1,58 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.asyncquery; + +import java.util.Optional; +import lombok.RequiredArgsConstructor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.sql.spark.asyncquery.exceptions.AsyncQueryNotFoundException; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.execution.statestore.OpenSearchStateStoreUtil; +import org.opensearch.sql.spark.execution.statestore.StateStore; +import org.opensearch.sql.spark.execution.xcontent.AsyncQueryJobMetadataXContentSerializer; +import org.opensearch.sql.spark.utils.IDUtils; + +/** OpenSearch implementation of {@link AsyncQueryJobMetadataStorageService} */ +@RequiredArgsConstructor +public class OpenSearchAsyncQueryJobMetadataStorageService + implements AsyncQueryJobMetadataStorageService { + + private final StateStore stateStore; + private final AsyncQueryJobMetadataXContentSerializer asyncQueryJobMetadataXContentSerializer; + + private static final Logger LOGGER = + LogManager.getLogger(OpenSearchAsyncQueryJobMetadataStorageService.class); + + @Override + public void storeJobMetadata( + AsyncQueryJobMetadata asyncQueryJobMetadata, + AsyncQueryRequestContext asyncQueryRequestContext) { + stateStore.create( + mapIdToDocumentId(asyncQueryJobMetadata.getId()), + asyncQueryJobMetadata, + AsyncQueryJobMetadata::copy, + OpenSearchStateStoreUtil.getIndexName(asyncQueryJobMetadata.getDatasourceName())); + } + + private String mapIdToDocumentId(String id) { + return "qid" + id; + } + + @Override + public Optional getJobMetadata(String queryId) { + try { + return stateStore.get( + mapIdToDocumentId(queryId), + asyncQueryJobMetadataXContentSerializer::fromXContent, + OpenSearchStateStoreUtil.getIndexName(IDUtils.decode(queryId))); + } catch (Exception e) { + LOGGER.error("Error while fetching the job metadata.", e); + throw new AsyncQueryNotFoundException(String.format("Invalid QueryId: %s", queryId)); + } + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/cluster/ClusterManagerEventListener.java b/async-query/src/main/java/org/opensearch/sql/spark/cluster/ClusterManagerEventListener.java new file mode 100644 index 0000000000..6c660f073c --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/cluster/ClusterManagerEventListener.java @@ -0,0 +1,196 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.cluster; + +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_REQUEST_BUFFER_INDEX_NAME; + +import com.google.common.annotations.VisibleForTesting; +import java.time.Clock; +import java.time.Duration; +import java.util.Arrays; +import java.util.List; +import org.opensearch.client.Client; +import org.opensearch.cluster.LocalNodeClusterManagerListener; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.lifecycle.LifecycleListener; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.sql.datasource.DataSourceService; +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.spark.flint.FlintIndexMetadataService; +import org.opensearch.sql.spark.flint.operation.FlintIndexOpFactory; +import org.opensearch.threadpool.Scheduler.Cancellable; +import org.opensearch.threadpool.ThreadPool; + +public class ClusterManagerEventListener implements LocalNodeClusterManagerListener { + + private Cancellable flintIndexRetentionCron; + private Cancellable flintStreamingJobHouseKeeperCron; + private ClusterService clusterService; + private ThreadPool threadPool; + private Client client; + private Clock clock; + private DataSourceService dataSourceService; + private FlintIndexMetadataService flintIndexMetadataService; + private FlintIndexOpFactory flintIndexOpFactory; + private Duration sessionTtlDuration; + private Duration resultTtlDuration; + private TimeValue streamingJobHouseKeepingInterval; + private boolean isAutoIndexManagementEnabled; + + public ClusterManagerEventListener( + ClusterService clusterService, + ThreadPool threadPool, + Client client, + Clock clock, + Setting sessionTtl, + Setting resultTtl, + Setting streamingJobHouseKeepingInterval, + Setting isAutoIndexManagementEnabledSetting, + Settings settings, + DataSourceService dataSourceService, + FlintIndexMetadataService flintIndexMetadataService, + FlintIndexOpFactory flintIndexOpFactory) { + this.clusterService = clusterService; + this.threadPool = threadPool; + this.client = client; + this.clusterService.addLocalNodeClusterManagerListener(this); + this.clock = clock; + this.dataSourceService = dataSourceService; + this.flintIndexMetadataService = flintIndexMetadataService; + this.flintIndexOpFactory = flintIndexOpFactory; + this.sessionTtlDuration = toDuration(sessionTtl.get(settings)); + this.resultTtlDuration = toDuration(resultTtl.get(settings)); + this.streamingJobHouseKeepingInterval = streamingJobHouseKeepingInterval.get(settings); + + clusterService + .getClusterSettings() + .addSettingsUpdateConsumer( + sessionTtl, + it -> { + this.sessionTtlDuration = toDuration(it); + cancel(flintIndexRetentionCron); + reInitializeFlintIndexRetention(); + }); + + clusterService + .getClusterSettings() + .addSettingsUpdateConsumer( + resultTtl, + it -> { + this.resultTtlDuration = toDuration(it); + cancel(flintIndexRetentionCron); + reInitializeFlintIndexRetention(); + }); + + isAutoIndexManagementEnabled = isAutoIndexManagementEnabledSetting.get(settings); + clusterService + .getClusterSettings() + .addSettingsUpdateConsumer( + isAutoIndexManagementEnabledSetting, + it -> { + if (isAutoIndexManagementEnabled != it) { + this.isAutoIndexManagementEnabled = it; + if (it) { + onClusterManager(); + } else { + offClusterManager(); + } + } + }); + + clusterService + .getClusterSettings() + .addSettingsUpdateConsumer( + streamingJobHouseKeepingInterval, + it -> { + this.streamingJobHouseKeepingInterval = it; + cancel(flintStreamingJobHouseKeeperCron); + initializeStreamingJobHouseKeeperCron(); + }); + } + + @Override + public void onClusterManager() { + + if (isAutoIndexManagementEnabled && flintIndexRetentionCron == null) { + reInitializeFlintIndexRetention(); + + clusterService.addLifecycleListener( + new LifecycleListener() { + @Override + public void beforeStop() { + cancel(flintIndexRetentionCron); + flintIndexRetentionCron = null; + } + }); + } + + if (flintStreamingJobHouseKeeperCron == null) { + initializeStreamingJobHouseKeeperCron(); + clusterService.addLifecycleListener( + new LifecycleListener() { + @Override + public void beforeStop() { + cancel(flintStreamingJobHouseKeeperCron); + flintStreamingJobHouseKeeperCron = null; + } + }); + } + } + + private void initializeStreamingJobHouseKeeperCron() { + flintStreamingJobHouseKeeperCron = + threadPool.scheduleWithFixedDelay( + new FlintStreamingJobHouseKeeperTask( + dataSourceService, flintIndexMetadataService, flintIndexOpFactory), + streamingJobHouseKeepingInterval, + executorName()); + } + + private void reInitializeFlintIndexRetention() { + IndexCleanup indexCleanup = new IndexCleanup(client, clusterService); + flintIndexRetentionCron = + threadPool.scheduleWithFixedDelay( + new FlintIndexRetention( + sessionTtlDuration, + resultTtlDuration, + clock, + indexCleanup, + SPARK_REQUEST_BUFFER_INDEX_NAME + "*", + DataSourceMetadata.DEFAULT_RESULT_INDEX + "*"), + TimeValue.timeValueHours(24), + executorName()); + } + + @Override + public void offClusterManager() { + cancel(flintIndexRetentionCron); + flintIndexRetentionCron = null; + cancel(flintStreamingJobHouseKeeperCron); + flintStreamingJobHouseKeeperCron = null; + } + + private void cancel(Cancellable cron) { + if (cron != null) { + cron.cancel(); + } + } + + @VisibleForTesting + public List getFlintIndexRetentionCron() { + return Arrays.asList(flintIndexRetentionCron); + } + + private String executorName() { + return ThreadPool.Names.GENERIC; + } + + public static Duration toDuration(TimeValue timeValue) { + return Duration.ofMillis(timeValue.millis()); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/cluster/FlintIndexRetention.java b/async-query/src/main/java/org/opensearch/sql/spark/cluster/FlintIndexRetention.java new file mode 100644 index 0000000000..628b578ae9 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/cluster/FlintIndexRetention.java @@ -0,0 +1,148 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.cluster; + +import static org.opensearch.sql.spark.execution.xcontent.StatementModelXContentSerializer.SUBMIT_TIME; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.LAST_UPDATE_TIME; + +import java.time.Clock; +import java.time.Duration; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.time.FormatNames; +import org.opensearch.core.action.ActionListener; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.query.QueryBuilders; + +public class FlintIndexRetention implements Runnable { + private static final Logger LOG = LogManager.getLogger(FlintIndexRetention.class); + + static final String SESSION_INDEX_NOT_EXIST_MSG = "Checkpoint index does not exist."; + + static final String RESULT_INDEX_NOT_EXIST_MSG = "Result index does not exist."; + + // timestamp field in result index + static final String UPDATE_TIME_FIELD = "updateTime"; + + private final Duration defaultSessionTtl; + private final Duration defaultResultTtl; + private final Clock clock; + private final IndexCleanup indexCleanup; + private final String sessionIndexNameRegex; + private final String resultIndexNameRegex; + + public FlintIndexRetention( + Duration defaultSessionTtl, + Duration defaultResultTtl, + Clock clock, + IndexCleanup indexCleanup, + String sessionIndexNameRegex, + String resultIndexNameRegex) { + this.defaultSessionTtl = defaultSessionTtl; + this.defaultResultTtl = defaultResultTtl; + this.clock = clock; + this.indexCleanup = indexCleanup; + this.sessionIndexNameRegex = sessionIndexNameRegex; + this.resultIndexNameRegex = resultIndexNameRegex; + } + + @Override + public void run() { + purgeSessionIndex(); + } + + private void purgeSessionIndex() { + purgeIndex( + sessionIndexNameRegex, + defaultSessionTtl, + LAST_UPDATE_TIME, + this::handleSessionPurgeResponse, + this::handleSessionPurgeError); + } + + private void handleSessionPurgeResponse(Long response) { + purgeStatementIndex(); + } + + private void handleSessionPurgeError(Exception exception) { + handlePurgeError(SESSION_INDEX_NOT_EXIST_MSG, "session index", exception); + purgeStatementIndex(); + } + + private void purgeStatementIndex() { + purgeIndex( + sessionIndexNameRegex, + defaultSessionTtl, + SUBMIT_TIME, + this::handleStatementPurgeResponse, + this::handleStatementPurgeError); + } + + private void handleStatementPurgeResponse(Long response) { + purgeResultIndex(); + } + + private void handleStatementPurgeError(Exception exception) { + handlePurgeError(SESSION_INDEX_NOT_EXIST_MSG, "session index", exception); + purgeResultIndex(); + } + + private void purgeResultIndex() { + purgeIndex( + resultIndexNameRegex, + defaultResultTtl, + UPDATE_TIME_FIELD, + this::handleResultPurgeResponse, + this::handleResultPurgeError); + } + + private void handleResultPurgeResponse(Long response) { + LOG.debug("purge result index done"); + } + + private void handleResultPurgeError(Exception exception) { + handlePurgeError(RESULT_INDEX_NOT_EXIST_MSG, "result index", exception); + } + + private void handlePurgeError(String notExistMsg, String indexType, Exception exception) { + if (exception instanceof IndexNotFoundException) { + LOG.debug(notExistMsg); + } else { + LOG.error("delete docs by query fails for " + indexType, exception); + } + } + + private void purgeIndex( + String indexName, + Duration ttl, + String timeStampField, + CheckedConsumer successHandler, + CheckedConsumer errorHandler) { + indexCleanup.deleteDocsByQuery( + indexName, + QueryBuilders.boolQuery() + .filter( + QueryBuilders.rangeQuery(timeStampField) + .lte(clock.millis() - ttl.toMillis()) + .format(FormatNames.EPOCH_MILLIS.getSnakeCaseName())), + ActionListener.wrap( + response -> { + try { + successHandler.accept(response); + } catch (Exception e) { + LOG.error("Error handling response for index " + indexName, e); + } + }, + ex -> { + try { + errorHandler.accept(ex); + } catch (Exception e) { + LOG.error("Error handling error for index " + indexName, e); + } + })); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/cluster/FlintStreamingJobHouseKeeperTask.java b/async-query/src/main/java/org/opensearch/sql/spark/cluster/FlintStreamingJobHouseKeeperTask.java new file mode 100644 index 0000000000..31b1ecb49c --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/cluster/FlintStreamingJobHouseKeeperTask.java @@ -0,0 +1,127 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.cluster; + +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import lombok.RequiredArgsConstructor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.sql.datasource.DataSourceService; +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.datasource.model.DataSourceStatus; +import org.opensearch.sql.datasources.exceptions.DataSourceNotFoundException; +import org.opensearch.sql.legacy.metrics.MetricName; +import org.opensearch.sql.legacy.metrics.Metrics; +import org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions; +import org.opensearch.sql.spark.flint.FlintIndexMetadata; +import org.opensearch.sql.spark.flint.FlintIndexMetadataService; +import org.opensearch.sql.spark.flint.operation.FlintIndexOpFactory; + +/** Cleaner task which alters the active streaming jobs of a disabled datasource. */ +@RequiredArgsConstructor +public class FlintStreamingJobHouseKeeperTask implements Runnable { + + private final DataSourceService dataSourceService; + private final FlintIndexMetadataService flintIndexMetadataService; + private final FlintIndexOpFactory flintIndexOpFactory; + + private static final Logger LOGGER = LogManager.getLogger(FlintStreamingJobHouseKeeperTask.class); + protected static final AtomicBoolean isRunning = new AtomicBoolean(false); + + @Override + public void run() { + if (!isRunning.compareAndSet(false, true)) { + LOGGER.info("Previous task is still running. Skipping this execution."); + return; + } + try { + LOGGER.info("Starting housekeeping task for auto refresh streaming jobs."); + Map autoRefreshFlintIndicesMap = getAllAutoRefreshIndices(); + autoRefreshFlintIndicesMap.forEach( + (autoRefreshIndex, flintIndexMetadata) -> { + try { + String datasourceName = getDataSourceName(flintIndexMetadata); + try { + DataSourceMetadata dataSourceMetadata = + this.dataSourceService.getDataSourceMetadata(datasourceName); + if (dataSourceMetadata.getStatus() == DataSourceStatus.DISABLED) { + LOGGER.info("Datasource is disabled for autoRefreshIndex: {}", autoRefreshIndex); + alterAutoRefreshIndex(autoRefreshIndex, flintIndexMetadata, datasourceName); + } else { + LOGGER.debug("Datasource is enabled for autoRefreshIndex : {}", autoRefreshIndex); + } + } catch (DataSourceNotFoundException exception) { + LOGGER.info("Datasource is deleted for autoRefreshIndex: {}", autoRefreshIndex); + try { + dropAutoRefreshIndex(autoRefreshIndex, flintIndexMetadata, datasourceName); + } catch (IllegalStateException illegalStateException) { + LOGGER.debug( + "AutoRefresh index: {} is not in valid state for deletion.", + autoRefreshIndex); + } + } + } catch (Exception exception) { + LOGGER.error( + "Failed to alter/cancel index {}: {}", + autoRefreshIndex, + exception.getMessage(), + exception); + Metrics.getInstance() + .getNumericalMetric(MetricName.STREAMING_JOB_HOUSEKEEPER_TASK_FAILURE_COUNT) + .increment(); + } + }); + LOGGER.info("Finished housekeeping task for auto refresh streaming jobs."); + } catch (Throwable error) { + LOGGER.error("Error while running the streaming job cleaner task: {}", error.getMessage()); + Metrics.getInstance() + .getNumericalMetric(MetricName.STREAMING_JOB_HOUSEKEEPER_TASK_FAILURE_COUNT) + .increment(); + } finally { + isRunning.set(false); + } + } + + private void dropAutoRefreshIndex( + String autoRefreshIndex, FlintIndexMetadata flintIndexMetadata, String datasourceName) { + // When the datasource is deleted. Possibly Replace with VACUUM Operation. + LOGGER.info("Attempting to drop auto refresh index: {}", autoRefreshIndex); + flintIndexOpFactory.getDrop(datasourceName).apply(flintIndexMetadata); + LOGGER.info("Successfully dropped index: {}", autoRefreshIndex); + } + + private void alterAutoRefreshIndex( + String autoRefreshIndex, FlintIndexMetadata flintIndexMetadata, String datasourceName) { + LOGGER.info("Attempting to alter index: {}", autoRefreshIndex); + FlintIndexOptions flintIndexOptions = new FlintIndexOptions(); + flintIndexOptions.setOption(FlintIndexOptions.AUTO_REFRESH, "false"); + flintIndexOpFactory.getAlter(flintIndexOptions, datasourceName).apply(flintIndexMetadata); + LOGGER.info("Successfully altered index: {}", autoRefreshIndex); + } + + private String getDataSourceName(FlintIndexMetadata flintIndexMetadata) { + String kind = flintIndexMetadata.getKind(); + switch (kind) { + case "mv": + return flintIndexMetadata.getName().split("\\.")[0]; + case "skipping": + case "covering": + return flintIndexMetadata.getSource().split("\\.")[0]; + default: + throw new IllegalArgumentException(String.format("Unknown flint index kind: %s", kind)); + } + } + + private Map getAllAutoRefreshIndices() { + Map flintIndexMetadataHashMap = + flintIndexMetadataService.getFlintIndexMetadata("flint_*"); + return flintIndexMetadataHashMap.entrySet().stream() + .filter(entry -> entry.getValue().getFlintIndexOptions().autoRefresh()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/cluster/IndexCleanup.java b/async-query/src/main/java/org/opensearch/sql/spark/cluster/IndexCleanup.java new file mode 100644 index 0000000000..562f12b69e --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/cluster/IndexCleanup.java @@ -0,0 +1,64 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.cluster; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.client.Client; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.reindex.DeleteByQueryAction; +import org.opensearch.index.reindex.DeleteByQueryRequest; + +/** Clean up the old docs for indices. */ +public class IndexCleanup { + private static final Logger LOG = LogManager.getLogger(IndexCleanup.class); + + private final Client client; + private final ClusterService clusterService; + + public IndexCleanup(Client client, ClusterService clusterService) { + this.client = client; + this.clusterService = clusterService; + } + + /** + * Delete docs based on query request + * + * @param indexName index name + * @param queryForDeleteByQueryRequest query request + * @param listener action listener + */ + public void deleteDocsByQuery( + String indexName, QueryBuilder queryForDeleteByQueryRequest, ActionListener listener) { + DeleteByQueryRequest deleteRequest = + new DeleteByQueryRequest(indexName) + .setQuery(queryForDeleteByQueryRequest) + .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN) + .setRefresh(true); + + try (ThreadContext.StoredContext context = + client.threadPool().getThreadContext().stashContext()) { + client.execute( + DeleteByQueryAction.INSTANCE, + deleteRequest, + ActionListener.wrap( + response -> { + long deleted = response.getDeleted(); + if (deleted > 0) { + // if 0 docs get deleted, it means our query cannot find any matching doc + // or the index does not exist at all + LOG.info("{} docs are deleted for index:{}", deleted, indexName); + } + listener.onResponse(response.getDeleted()); + }, + listener::onFailure)); + } + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/config/OpenSearchExtraParameterComposer.java b/async-query/src/main/java/org/opensearch/sql/spark/config/OpenSearchExtraParameterComposer.java new file mode 100644 index 0000000000..1925ada46e --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/config/OpenSearchExtraParameterComposer.java @@ -0,0 +1,30 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.config; + +import lombok.RequiredArgsConstructor; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; +import org.opensearch.sql.spark.parameter.GeneralSparkParameterComposer; +import org.opensearch.sql.spark.parameter.SparkSubmitParameters; + +/** Load extra parameters from settings and add to Spark submit parameters */ +@RequiredArgsConstructor +public class OpenSearchExtraParameterComposer implements GeneralSparkParameterComposer { + private final SparkExecutionEngineConfigClusterSettingLoader settingLoader; + + @Override + public void compose( + SparkSubmitParameters sparkSubmitParameters, + DispatchQueryRequest dispatchQueryRequest, + AsyncQueryRequestContext context) { + settingLoader + .load() + .ifPresent( + settings -> + sparkSubmitParameters.setExtraParameters(settings.getSparkSubmitParameters())); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/config/OpenSearchSparkSubmitParameterModifier.java b/async-query/src/main/java/org/opensearch/sql/spark/config/OpenSearchSparkSubmitParameterModifier.java new file mode 100644 index 0000000000..117d161440 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/config/OpenSearchSparkSubmitParameterModifier.java @@ -0,0 +1,20 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.config; + +import lombok.AllArgsConstructor; +import org.opensearch.sql.spark.parameter.SparkSubmitParametersBuilder; + +@AllArgsConstructor +public class OpenSearchSparkSubmitParameterModifier implements SparkSubmitParameterModifier { + + private String extraParameters; + + @Override + public void modifyParameters(SparkSubmitParametersBuilder builder) { + builder.extraParameters(this.extraParameters); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigClusterSetting.java b/async-query/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigClusterSetting.java new file mode 100644 index 0000000000..adaaa57d31 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigClusterSetting.java @@ -0,0 +1,34 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.config; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.google.gson.Gson; +import lombok.Builder; +import lombok.Data; + +/** + * This POJO is just for reading stringified json in `plugins.query.executionengine.spark.config` + * setting. + */ +@Data +@Builder +@JsonIgnoreProperties(ignoreUnknown = true) +public class SparkExecutionEngineConfigClusterSetting { + // optional + private String accountId; + private String applicationId; + private String region; + private String executionRoleARN; + + /** Additional Spark submit parameters to append to request. */ + private String sparkSubmitParameters; + + public static SparkExecutionEngineConfigClusterSetting toSparkExecutionEngineConfig( + String jsonString) { + return new Gson().fromJson(jsonString, SparkExecutionEngineConfigClusterSetting.class); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigClusterSettingLoader.java b/async-query/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigClusterSettingLoader.java new file mode 100644 index 0000000000..73b057ca5c --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigClusterSettingLoader.java @@ -0,0 +1,36 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.config; + +import static org.opensearch.sql.common.setting.Settings.Key.SPARK_EXECUTION_ENGINE_CONFIG; + +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Optional; +import lombok.RequiredArgsConstructor; +import org.apache.commons.lang3.StringUtils; +import org.opensearch.sql.common.setting.Settings; + +/** Load SparkExecutionEngineConfigClusterSetting from settings with privilege check. */ +@RequiredArgsConstructor +public class SparkExecutionEngineConfigClusterSettingLoader { + private final Settings settings; + + public Optional load() { + String sparkExecutionEngineConfigSettingString = + this.settings.getSettingValue(SPARK_EXECUTION_ENGINE_CONFIG); + if (!StringUtils.isBlank(sparkExecutionEngineConfigSettingString)) { + return Optional.of( + AccessController.doPrivileged( + (PrivilegedAction) + () -> + SparkExecutionEngineConfigClusterSetting.toSparkExecutionEngineConfig( + sparkExecutionEngineConfigSettingString))); + } else { + return Optional.empty(); + } + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigSupplierImpl.java b/async-query/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigSupplierImpl.java new file mode 100644 index 0000000000..66ad964ad1 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigSupplierImpl.java @@ -0,0 +1,40 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.config; + +import static org.opensearch.sql.common.setting.Settings.Key.CLUSTER_NAME; + +import lombok.AllArgsConstructor; +import org.opensearch.cluster.ClusterName; +import org.opensearch.sql.common.setting.Settings; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; + +@AllArgsConstructor +public class SparkExecutionEngineConfigSupplierImpl implements SparkExecutionEngineConfigSupplier { + + private final Settings settings; + private final SparkExecutionEngineConfigClusterSettingLoader settingLoader; + + @Override + public SparkExecutionEngineConfig getSparkExecutionEngineConfig( + AsyncQueryRequestContext asyncQueryRequestContext) { + ClusterName clusterName = settings.getSettingValue(CLUSTER_NAME); + return getBuilderFromSettingsIfAvailable().clusterName(clusterName.value()).build(); + } + + private SparkExecutionEngineConfig.SparkExecutionEngineConfigBuilder + getBuilderFromSettingsIfAvailable() { + return settingLoader + .load() + .map( + setting -> + SparkExecutionEngineConfig.builder() + .applicationId(setting.getApplicationId()) + .executionRoleARN(setting.getExecutionRoleARN()) + .region(setting.getRegion())) + .orElse(SparkExecutionEngineConfig.builder()); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/execution/session/OpenSearchSessionConfigSupplier.java b/async-query/src/main/java/org/opensearch/sql/spark/execution/session/OpenSearchSessionConfigSupplier.java new file mode 100644 index 0000000000..7bad399df8 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/execution/session/OpenSearchSessionConfigSupplier.java @@ -0,0 +1,19 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.session; + +import lombok.RequiredArgsConstructor; +import org.opensearch.sql.common.setting.Settings; + +@RequiredArgsConstructor +public class OpenSearchSessionConfigSupplier implements SessionConfigSupplier { + private final Settings settings; + + @Override + public Long getSessionInactivityTimeoutMillis() { + return settings.getSettingValue(Settings.Key.SESSION_INACTIVITY_TIMEOUT_MILLIS); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/execution/statestore/FromXContent.java b/async-query/src/main/java/org/opensearch/sql/spark/execution/statestore/FromXContent.java new file mode 100644 index 0000000000..0f691fc9c0 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/execution/statestore/FromXContent.java @@ -0,0 +1,12 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statestore; + +import org.opensearch.core.xcontent.XContentParser; + +public interface FromXContent { + T fromXContent(XContentParser parser, long seqNo, long primaryTerm); +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/execution/statestore/OpenSearchSessionStorageService.java b/async-query/src/main/java/org/opensearch/sql/spark/execution/statestore/OpenSearchSessionStorageService.java new file mode 100644 index 0000000000..db5ded46b5 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/execution/statestore/OpenSearchSessionStorageService.java @@ -0,0 +1,55 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statestore; + +import java.util.Optional; +import lombok.RequiredArgsConstructor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.index.engine.VersionConflictEngineException; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.execution.session.SessionModel; +import org.opensearch.sql.spark.execution.session.SessionState; +import org.opensearch.sql.spark.execution.xcontent.SessionModelXContentSerializer; + +@RequiredArgsConstructor +public class OpenSearchSessionStorageService implements SessionStorageService { + private static final Logger LOG = LogManager.getLogger(); + + private final StateStore stateStore; + private final SessionModelXContentSerializer serializer; + + @Override + public SessionModel createSession( + SessionModel sessionModel, AsyncQueryRequestContext asyncQueryRequestContext) { + try { + return stateStore.create( + sessionModel.getId(), + sessionModel, + SessionModel::of, + OpenSearchStateStoreUtil.getIndexName(sessionModel.getDatasourceName())); + } catch (VersionConflictEngineException e) { + String errorMsg = "session already exist. " + sessionModel.getSessionId(); + LOG.error(errorMsg); + throw new IllegalStateException(errorMsg); + } + } + + @Override + public Optional getSession(String id, String datasourceName) { + return stateStore.get( + id, serializer::fromXContent, OpenSearchStateStoreUtil.getIndexName(datasourceName)); + } + + @Override + public SessionModel updateSessionState(SessionModel sessionModel, SessionState sessionState) { + return stateStore.updateState( + sessionModel, + sessionState, + SessionModel::copyWithState, + OpenSearchStateStoreUtil.getIndexName(sessionModel.getDatasourceName())); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/execution/statestore/OpenSearchStatementStorageService.java b/async-query/src/main/java/org/opensearch/sql/spark/execution/statestore/OpenSearchStatementStorageService.java new file mode 100644 index 0000000000..67d0609ca5 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/execution/statestore/OpenSearchStatementStorageService.java @@ -0,0 +1,76 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statestore; + +import java.util.Optional; +import lombok.RequiredArgsConstructor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.index.engine.DocumentMissingException; +import org.opensearch.index.engine.VersionConflictEngineException; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.execution.statement.StatementModel; +import org.opensearch.sql.spark.execution.statement.StatementState; +import org.opensearch.sql.spark.execution.xcontent.StatementModelXContentSerializer; + +@RequiredArgsConstructor +public class OpenSearchStatementStorageService implements StatementStorageService { + private static final Logger LOG = LogManager.getLogger(); + + private final StateStore stateStore; + private final StatementModelXContentSerializer serializer; + + @Override + public StatementModel createStatement( + StatementModel statementModel, AsyncQueryRequestContext asyncQueryRequestContext) { + try { + return stateStore.create( + statementModel.getId(), + statementModel, + StatementModel::copy, + OpenSearchStateStoreUtil.getIndexName(statementModel.getDatasourceName())); + } catch (VersionConflictEngineException e) { + String errorMsg = "statement already exist. " + statementModel.getStatementId(); + LOG.error(errorMsg); + throw new IllegalStateException(errorMsg); + } + } + + @Override + public Optional getStatement(String id, String datasourceName) { + return stateStore.get( + id, serializer::fromXContent, OpenSearchStateStoreUtil.getIndexName(datasourceName)); + } + + @Override + public StatementModel updateStatementState( + StatementModel oldStatementModel, StatementState statementState) { + try { + return stateStore.updateState( + oldStatementModel, + statementState, + StatementModel::copyWithState, + OpenSearchStateStoreUtil.getIndexName(oldStatementModel.getDatasourceName())); + } catch (DocumentMissingException e) { + String errorMsg = + String.format( + "cancel statement failed. no statement found. statement: %s.", + oldStatementModel.getStatementId()); + LOG.error(errorMsg); + throw new IllegalStateException(errorMsg); + } catch (VersionConflictEngineException e) { + StatementModel statementModel = + getStatement(oldStatementModel.getId(), oldStatementModel.getDatasourceName()) + .orElse(oldStatementModel); + String errorMsg = + String.format( + "cancel statement failed. current statementState: %s " + "statement: %s.", + statementModel.getStatementState(), statementModel.getStatementId()); + LOG.error(errorMsg); + throw new IllegalStateException(errorMsg); + } + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/execution/statestore/StateStore.java b/async-query/src/main/java/org/opensearch/sql/spark/execution/statestore/StateStore.java new file mode 100644 index 0000000000..8d57198277 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/execution/statestore/StateStore.java @@ -0,0 +1,336 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statestore; + +import com.google.common.annotations.VisibleForTesting; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.Locale; +import java.util.Optional; +import java.util.function.Supplier; +import lombok.RequiredArgsConstructor; +import org.apache.commons.io.IOUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.indices.create.CreateIndexRequest; +import org.opensearch.action.admin.indices.create.CreateIndexResponse; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.get.GetRequest; +import org.opensearch.action.get.GetResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.update.UpdateRequest; +import org.opensearch.action.update.UpdateResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; +import org.opensearch.sql.spark.dispatcher.model.IndexDMLResult; +import org.opensearch.sql.spark.execution.session.SessionModel; +import org.opensearch.sql.spark.execution.session.SessionState; +import org.opensearch.sql.spark.execution.session.SessionType; +import org.opensearch.sql.spark.execution.statement.StatementModel; +import org.opensearch.sql.spark.execution.statement.StatementState; +import org.opensearch.sql.spark.execution.xcontent.AsyncQueryJobMetadataXContentSerializer; +import org.opensearch.sql.spark.execution.xcontent.FlintIndexStateModelXContentSerializer; +import org.opensearch.sql.spark.execution.xcontent.IndexDMLResultXContentSerializer; +import org.opensearch.sql.spark.execution.xcontent.SessionModelXContentSerializer; +import org.opensearch.sql.spark.execution.xcontent.StatementModelXContentSerializer; +import org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes; +import org.opensearch.sql.spark.execution.xcontent.XContentSerializer; +import org.opensearch.sql.spark.execution.xcontent.XContentSerializerUtil; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexStateModel; + +/** + * State Store maintain the state of Session and Statement. State State create/update/get doc on + * index regardless user FGAC permissions. + */ +@RequiredArgsConstructor +public class StateStore { + public static String SETTINGS_FILE_NAME = "query_execution_request_settings.yml"; + public static String MAPPING_FILE_NAME = "query_execution_request_mapping.yml"; + public static String ALL_DATASOURCE = "*"; + + private static final Logger LOG = LogManager.getLogger(); + + private final Client client; + private final ClusterService clusterService; + + @VisibleForTesting + public T create( + String docId, T st, CopyBuilder builder, String indexName) { + try { + if (!this.clusterService.state().routingTable().hasIndex(indexName)) { + createIndex(indexName); + } + XContentSerializer serializer = getXContentSerializer(st); + IndexRequest indexRequest = + new IndexRequest(indexName) + .id(docId) + .source(serializer.toXContent(st, ToXContent.EMPTY_PARAMS)) + .setIfSeqNo(getSeqNo(st)) + .setIfPrimaryTerm(getPrimaryTerm(st)) + .create(true) + .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); + try (ThreadContext.StoredContext ignored = + client.threadPool().getThreadContext().stashContext()) { + IndexResponse indexResponse = client.index(indexRequest).actionGet(); + if (indexResponse.getResult().equals(DocWriteResponse.Result.CREATED)) { + LOG.debug("Successfully created doc. id: {}", st.getId()); + return builder.of( + st, + XContentSerializerUtil.buildMetadata( + indexResponse.getSeqNo(), indexResponse.getPrimaryTerm())); + } else { + throw new RuntimeException( + String.format( + Locale.ROOT, + "Failed create doc. id: %s, error: %s", + st.getId(), + indexResponse.getResult().getLowercase())); + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @VisibleForTesting + public Optional get( + String sid, FromXContent builder, String indexName) { + try { + if (!this.clusterService.state().routingTable().hasIndex(indexName)) { + createIndex(indexName); + return Optional.empty(); + } + GetRequest getRequest = new GetRequest().index(indexName).id(sid).refresh(true); + try (ThreadContext.StoredContext ignored = + client.threadPool().getThreadContext().stashContext()) { + GetResponse getResponse = client.get(getRequest).actionGet(); + if (getResponse.isExists()) { + XContentParser parser = + XContentType.JSON + .xContent() + .createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + getResponse.getSourceAsString()); + parser.nextToken(); + return Optional.of( + builder.fromXContent(parser, getResponse.getSeqNo(), getResponse.getPrimaryTerm())); + } else { + return Optional.empty(); + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @VisibleForTesting + public T updateState( + T st, S state, StateCopyBuilder builder, String indexName) { + try { + T model = builder.of(st, state, st.getMetadata()); + XContentSerializer serializer = getXContentSerializer(st); + UpdateRequest updateRequest = + new UpdateRequest() + .index(indexName) + .id(model.getId()) + .setIfSeqNo(getSeqNo(model)) + .setIfPrimaryTerm(getPrimaryTerm(model)) + .doc(serializer.toXContent(model, ToXContent.EMPTY_PARAMS)) + .fetchSource(true) + .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); + try (ThreadContext.StoredContext ignored = + client.threadPool().getThreadContext().stashContext()) { + UpdateResponse updateResponse = client.update(updateRequest).actionGet(); + LOG.debug("Successfully update doc. id: {}", st.getId()); + return builder.of( + model, + state, + XContentSerializerUtil.buildMetadata( + updateResponse.getSeqNo(), updateResponse.getPrimaryTerm())); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private long getSeqNo(StateModel model) { + return model.getMetadataItem("seqNo", Long.class).orElse(SequenceNumbers.UNASSIGNED_SEQ_NO); + } + + private long getPrimaryTerm(StateModel model) { + return model + .getMetadataItem("primaryTerm", Long.class) + .orElse(SequenceNumbers.UNASSIGNED_PRIMARY_TERM); + } + + /** + * Delete the index state document with the given ID. + * + * @param sid index state doc ID + * @param indexName index store index name + * @return true if deleted, otherwise false + */ + @VisibleForTesting + public boolean delete(String sid, String indexName) { + try { + // No action if the index doesn't exist + if (!this.clusterService.state().routingTable().hasIndex(indexName)) { + return true; + } + + try (ThreadContext.StoredContext ignored = + client.threadPool().getThreadContext().stashContext()) { + DeleteRequest deleteRequest = new DeleteRequest(indexName, sid); + DeleteResponse deleteResponse = client.delete(deleteRequest).actionGet(); + return deleteResponse.getResult() == DocWriteResponse.Result.DELETED; + } + } catch (Exception e) { + throw new RuntimeException( + String.format("Failed to delete index state doc %s in index %s", sid, indexName), e); + } + } + + private void createIndex(String indexName) { + try { + CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName); + createIndexRequest + .mapping(loadConfigFromResource(MAPPING_FILE_NAME), XContentType.YAML) + .settings(loadConfigFromResource(SETTINGS_FILE_NAME), XContentType.YAML); + ActionFuture createIndexResponseActionFuture; + try (ThreadContext.StoredContext ignored = + client.threadPool().getThreadContext().stashContext()) { + createIndexResponseActionFuture = client.admin().indices().create(createIndexRequest); + } + CreateIndexResponse createIndexResponse = createIndexResponseActionFuture.actionGet(); + if (createIndexResponse.isAcknowledged()) { + LOG.info("Index: {} creation Acknowledged", indexName); + } else { + throw new RuntimeException("Index creation is not acknowledged."); + } + } catch (Throwable e) { + throw new RuntimeException( + "Internal server error while creating" + indexName + " index:: " + e.getMessage()); + } + } + + private long count(String indexName, QueryBuilder query) { + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.query(query); + searchSourceBuilder.size(0); + + // https://github.com/opensearch-project/sql/issues/1801. + SearchRequest searchRequest = + new SearchRequest() + .indices(indexName) + .preference("_primary_first") + .source(searchSourceBuilder); + + ActionFuture searchResponseActionFuture; + try (ThreadContext.StoredContext ignored = + client.threadPool().getThreadContext().stashContext()) { + searchResponseActionFuture = client.search(searchRequest); + } + SearchResponse searchResponse = searchResponseActionFuture.actionGet(); + if (searchResponse.status().getStatus() != 200) { + throw new RuntimeException( + "Fetching job metadata information failed with status : " + searchResponse.status()); + } else { + return searchResponse.getHits().getTotalHits().value; + } + } + + private String loadConfigFromResource(String fileName) throws IOException { + InputStream fileStream = StateStore.class.getClassLoader().getResourceAsStream(fileName); + return IOUtils.toString(fileStream, StandardCharsets.UTF_8); + } + + public static Supplier activeSessionsCount(StateStore stateStore, String datasourceName) { + return () -> + stateStore.count( + OpenSearchStateStoreUtil.getIndexName(datasourceName), + QueryBuilders.boolQuery() + .must( + QueryBuilders.termQuery( + XContentCommonAttributes.TYPE, + SessionModelXContentSerializer.SESSION_DOC_TYPE)) + .must( + QueryBuilders.termQuery( + SessionModelXContentSerializer.SESSION_TYPE, + SessionType.INTERACTIVE.getSessionType())) + .must( + QueryBuilders.termQuery( + XContentCommonAttributes.STATE, SessionState.RUNNING.getSessionState()))); + } + + public static Supplier activeRefreshJobCount(StateStore stateStore, String datasourceName) { + return () -> + stateStore.count( + OpenSearchStateStoreUtil.getIndexName(datasourceName), + QueryBuilders.boolQuery() + .must( + QueryBuilders.termQuery( + XContentCommonAttributes.TYPE, + FlintIndexStateModelXContentSerializer.FLINT_INDEX_DOC_TYPE)) + .must( + QueryBuilders.termQuery( + XContentCommonAttributes.STATE, FlintIndexState.REFRESHING.getState()))); + } + + public static Supplier activeStatementsCount(StateStore stateStore, String datasourceName) { + return () -> + stateStore.count( + OpenSearchStateStoreUtil.getIndexName(datasourceName), + QueryBuilders.boolQuery() + .must( + QueryBuilders.termQuery( + XContentCommonAttributes.TYPE, + StatementModelXContentSerializer.STATEMENT_DOC_TYPE)) + .should( + QueryBuilders.termsQuery( + XContentCommonAttributes.STATE, + StatementState.RUNNING.getState(), + StatementState.WAITING.getState()))); + } + + @SuppressWarnings("unchecked") + private XContentSerializer getXContentSerializer(T st) { + if (st instanceof StatementModel) { + return (XContentSerializer) new StatementModelXContentSerializer(); + } else if (st instanceof SessionModel) { + return (XContentSerializer) new SessionModelXContentSerializer(); + } else if (st instanceof FlintIndexStateModel) { + return (XContentSerializer) new FlintIndexStateModelXContentSerializer(); + } else if (st instanceof AsyncQueryJobMetadata) { + return (XContentSerializer) new AsyncQueryJobMetadataXContentSerializer(); + } else if (st instanceof IndexDMLResult) { + return (XContentSerializer) new IndexDMLResultXContentSerializer(); + } else { + throw new IllegalArgumentException( + "Unsupported StateModel subclass: " + st.getClass().getSimpleName()); + } + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/AsyncQueryJobMetadataXContentSerializer.java b/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/AsyncQueryJobMetadataXContentSerializer.java new file mode 100644 index 0000000000..1ae20a01b6 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/AsyncQueryJobMetadataXContentSerializer.java @@ -0,0 +1,104 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.xcontent; + +import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.ACCOUNT_ID; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.APPLICATION_ID; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.DATASOURCE_NAME; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.JOB_ID; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.QUERY_ID; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.TYPE; + +import java.io.IOException; +import java.util.Locale; +import lombok.SneakyThrows; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; +import org.opensearch.sql.spark.dispatcher.model.JobType; + +public class AsyncQueryJobMetadataXContentSerializer + implements XContentSerializer { + public static final String TYPE_JOBMETA = "jobmeta"; + public static final String JOB_TYPE = "jobType"; + public static final String INDEX_NAME = "indexName"; + public static final String RESULT_INDEX = "resultIndex"; + public static final String SESSION_ID = "sessionId"; + + @Override + public XContentBuilder toXContent(AsyncQueryJobMetadata jobMetadata, ToXContent.Params params) + throws IOException { + return XContentFactory.jsonBuilder() + .startObject() + .field(QUERY_ID, jobMetadata.getQueryId()) + .field(TYPE, TYPE_JOBMETA) + .field(JOB_ID, jobMetadata.getJobId()) + .field(ACCOUNT_ID, jobMetadata.getAccountId()) + .field(APPLICATION_ID, jobMetadata.getApplicationId()) + .field(RESULT_INDEX, jobMetadata.getResultIndex()) + .field(SESSION_ID, jobMetadata.getSessionId()) + .field(DATASOURCE_NAME, jobMetadata.getDatasourceName()) + .field(JOB_TYPE, jobMetadata.getJobType().getText().toLowerCase(Locale.ROOT)) + .field(INDEX_NAME, jobMetadata.getIndexName()) + .endObject(); + } + + @Override + @SneakyThrows + public AsyncQueryJobMetadata fromXContent(XContentParser parser, long seqNo, long primaryTerm) { + AsyncQueryJobMetadata.AsyncQueryJobMetadataBuilder builder = AsyncQueryJobMetadata.builder(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + while (!XContentParser.Token.END_OBJECT.equals(parser.nextToken())) { + String fieldName = parser.currentName(); + parser.nextToken(); + switch (fieldName) { + case QUERY_ID: + builder.queryId(parser.textOrNull()); + break; + case JOB_ID: + builder.jobId(parser.textOrNull()); + break; + case ACCOUNT_ID: + builder.accountId(parser.textOrNull()); + break; + case APPLICATION_ID: + builder.applicationId(parser.textOrNull()); + break; + case RESULT_INDEX: + builder.resultIndex(parser.textOrNull()); + break; + case SESSION_ID: + builder.sessionId(parser.textOrNull()); + break; + case DATASOURCE_NAME: + builder.datasourceName(parser.textOrNull()); + break; + case JOB_TYPE: + String jobTypeStr = parser.textOrNull(); + builder.jobType( + Strings.isNullOrEmpty(jobTypeStr) ? null : JobType.fromString(jobTypeStr)); + break; + case INDEX_NAME: + builder.indexName(parser.textOrNull()); + break; + case TYPE: + break; + default: + throw new IllegalArgumentException("Unknown field: " + fieldName); + } + } + builder.metadata(XContentSerializerUtil.buildMetadata(seqNo, primaryTerm)); + AsyncQueryJobMetadata result = builder.build(); + if (result.getJobId() == null || result.getApplicationId() == null) { + throw new IllegalArgumentException("jobId and applicationId are required fields."); + } + return builder.build(); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/FlintIndexStateModelXContentSerializer.java b/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/FlintIndexStateModelXContentSerializer.java new file mode 100644 index 0000000000..9b1530dddf --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/FlintIndexStateModelXContentSerializer.java @@ -0,0 +1,91 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.xcontent; + +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.ACCOUNT_ID; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.APPLICATION_ID; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.DATASOURCE_NAME; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.ERROR; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.JOB_ID; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.LAST_UPDATE_TIME; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.STATE; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.TYPE; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.VERSION; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.VERSION_1_0; + +import java.io.IOException; +import lombok.SneakyThrows; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.core.xcontent.XContentParserUtils; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexStateModel; + +public class FlintIndexStateModelXContentSerializer + implements XContentSerializer { + public static final String FLINT_INDEX_DOC_TYPE = "flintindexstate"; + public static final String LATEST_ID = "latestId"; + + @Override + public XContentBuilder toXContent( + FlintIndexStateModel flintIndexStateModel, ToXContent.Params params) throws IOException { + return XContentFactory.jsonBuilder() + .startObject() + .field(VERSION, VERSION_1_0) + .field(TYPE, FLINT_INDEX_DOC_TYPE) + .field(STATE, flintIndexStateModel.getIndexState().getState()) + .field(ACCOUNT_ID, flintIndexStateModel.getAccountId()) + .field(APPLICATION_ID, flintIndexStateModel.getApplicationId()) + .field(JOB_ID, flintIndexStateModel.getJobId()) + .field(LATEST_ID, flintIndexStateModel.getLatestId()) + .field(DATASOURCE_NAME, flintIndexStateModel.getDatasourceName()) + .field(LAST_UPDATE_TIME, flintIndexStateModel.getLastUpdateTime()) + .field(ERROR, flintIndexStateModel.getError()) + .endObject(); + } + + @Override + @SneakyThrows + public FlintIndexStateModel fromXContent(XContentParser parser, long seqNo, long primaryTerm) { + FlintIndexStateModel.FlintIndexStateModelBuilder builder = FlintIndexStateModel.builder(); + XContentParserUtils.ensureExpectedToken( + XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + while (!XContentParser.Token.END_OBJECT.equals(parser.nextToken())) { + String fieldName = parser.currentName(); + parser.nextToken(); + switch (fieldName) { + case STATE: + builder.indexState(FlintIndexState.fromString(parser.text())); + break; + case ACCOUNT_ID: + builder.accountId(parser.textOrNull()); + break; + case APPLICATION_ID: + builder.applicationId(parser.text()); + break; + case JOB_ID: + builder.jobId(parser.text()); + break; + case LATEST_ID: + builder.latestId(parser.text()); + break; + case DATASOURCE_NAME: + builder.datasourceName(parser.text()); + break; + case LAST_UPDATE_TIME: + builder.lastUpdateTime(parser.longValue()); + break; + case ERROR: + builder.error(parser.text()); + break; + } + } + builder.metadata(XContentSerializerUtil.buildMetadata(seqNo, primaryTerm)); + return builder.build(); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/IndexDMLResultXContentSerializer.java b/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/IndexDMLResultXContentSerializer.java new file mode 100644 index 0000000000..505533157d --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/IndexDMLResultXContentSerializer.java @@ -0,0 +1,44 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.xcontent; + +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.DATASOURCE_NAME; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.ERROR; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.QUERY_ID; + +import com.google.common.collect.ImmutableList; +import java.io.IOException; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.sql.spark.dispatcher.model.IndexDMLResult; + +public class IndexDMLResultXContentSerializer implements XContentSerializer { + public static final String QUERY_RUNTIME = "queryRunTime"; + public static final String UPDATE_TIME = "updateTime"; + + @Override + public XContentBuilder toXContent(IndexDMLResult dmlResult, ToXContent.Params params) + throws IOException { + return XContentFactory.jsonBuilder() + .startObject() + .field(QUERY_ID, dmlResult.getQueryId()) + .field("status", dmlResult.getStatus()) + .field(ERROR, dmlResult.getError()) + .field(DATASOURCE_NAME, dmlResult.getDatasourceName()) + .field(QUERY_RUNTIME, dmlResult.getQueryRunTime()) + .field(UPDATE_TIME, dmlResult.getUpdateTime()) + .field("result", ImmutableList.of()) + .field("schema", ImmutableList.of()) + .endObject(); + } + + @Override + public IndexDMLResult fromXContent(XContentParser parser, long seqNo, long primaryTerm) { + throw new UnsupportedOperationException("IndexDMLResult to fromXContent Not supported"); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/SessionModelXContentSerializer.java b/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/SessionModelXContentSerializer.java new file mode 100644 index 0000000000..c36fc1ffc0 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/SessionModelXContentSerializer.java @@ -0,0 +1,101 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.xcontent; + +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.ACCOUNT_ID; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.APPLICATION_ID; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.DATASOURCE_NAME; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.ERROR; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.JOB_ID; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.LAST_UPDATE_TIME; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.STATE; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.TYPE; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.VERSION; + +import java.io.IOException; +import lombok.SneakyThrows; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.core.xcontent.XContentParserUtils; +import org.opensearch.sql.spark.execution.session.SessionModel; +import org.opensearch.sql.spark.execution.session.SessionState; +import org.opensearch.sql.spark.execution.session.SessionType; + +public class SessionModelXContentSerializer implements XContentSerializer { + public static final String SESSION_DOC_TYPE = "session"; + public static final String SESSION_TYPE = "sessionType"; + public static final String SESSION_ID = "sessionId"; + + @Override + public XContentBuilder toXContent(SessionModel sessionModel, ToXContent.Params params) + throws IOException { + return XContentFactory.jsonBuilder() + .startObject() + .field(VERSION, sessionModel.getVersion()) + .field(TYPE, SESSION_DOC_TYPE) + .field(SESSION_TYPE, sessionModel.getSessionType().getSessionType()) + .field(SESSION_ID, sessionModel.getSessionId()) + .field(STATE, sessionModel.getSessionState().getSessionState()) + .field(DATASOURCE_NAME, sessionModel.getDatasourceName()) + .field(ACCOUNT_ID, sessionModel.getAccountId()) + .field(APPLICATION_ID, sessionModel.getApplicationId()) + .field(JOB_ID, sessionModel.getJobId()) + .field(LAST_UPDATE_TIME, sessionModel.getLastUpdateTime()) + .field(ERROR, sessionModel.getError()) + .endObject(); + } + + @Override + @SneakyThrows + public SessionModel fromXContent(XContentParser parser, long seqNo, long primaryTerm) { + SessionModel.SessionModelBuilder builder = SessionModel.builder(); + XContentParserUtils.ensureExpectedToken( + XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + while (!XContentParser.Token.END_OBJECT.equals(parser.nextToken())) { + String fieldName = parser.currentName(); + parser.nextToken(); + switch (fieldName) { + case VERSION: + builder.version(parser.text()); + break; + case SESSION_TYPE: + builder.sessionType(SessionType.fromString(parser.text())); + break; + case SESSION_ID: + builder.sessionId(parser.text()); + break; + case STATE: + builder.sessionState(SessionState.fromString(parser.text())); + break; + case DATASOURCE_NAME: + builder.datasourceName(parser.text()); + break; + case ERROR: + builder.error(parser.text()); + break; + case ACCOUNT_ID: + builder.accountId(parser.textOrNull()); + break; + case APPLICATION_ID: + builder.applicationId(parser.text()); + break; + case JOB_ID: + builder.jobId(parser.text()); + break; + case LAST_UPDATE_TIME: + builder.lastUpdateTime(parser.longValue()); + break; + case TYPE: + // do nothing. + break; + } + } + builder.metadata(XContentSerializerUtil.buildMetadata(seqNo, primaryTerm)); + return builder.build(); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/StatementModelXContentSerializer.java b/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/StatementModelXContentSerializer.java new file mode 100644 index 0000000000..07f018f90c --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/StatementModelXContentSerializer.java @@ -0,0 +1,120 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.xcontent; + +import static org.opensearch.sql.spark.execution.xcontent.SessionModelXContentSerializer.SESSION_ID; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.ACCOUNT_ID; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.APPLICATION_ID; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.DATASOURCE_NAME; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.ERROR; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.JOB_ID; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.STATE; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.TYPE; +import static org.opensearch.sql.spark.execution.xcontent.XContentCommonAttributes.VERSION; + +import java.io.IOException; +import lombok.SneakyThrows; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.core.xcontent.XContentParserUtils; +import org.opensearch.sql.spark.execution.statement.StatementId; +import org.opensearch.sql.spark.execution.statement.StatementModel; +import org.opensearch.sql.spark.execution.statement.StatementState; +import org.opensearch.sql.spark.rest.model.LangType; + +public class StatementModelXContentSerializer implements XContentSerializer { + public static final String STATEMENT_DOC_TYPE = "statement"; + public static final String STATEMENT_ID = "statementId"; + public static final String LANG = "lang"; + public static final String QUERY = "query"; + public static final String QUERY_ID = "queryId"; + public static final String SUBMIT_TIME = "submitTime"; + public static final String UNKNOWN = ""; + + @Override + public XContentBuilder toXContent(StatementModel statementModel, ToXContent.Params params) + throws IOException { + return XContentFactory.jsonBuilder() + .startObject() + .field(VERSION, statementModel.getVersion()) + .field(TYPE, STATEMENT_DOC_TYPE) + .field(STATE, statementModel.getStatementState().getState()) + .field(STATEMENT_ID, statementModel.getStatementId().getId()) + .field(SESSION_ID, statementModel.getSessionId()) + .field(ACCOUNT_ID, statementModel.getAccountId()) + .field(APPLICATION_ID, statementModel.getApplicationId()) + .field(JOB_ID, statementModel.getJobId()) + .field(LANG, statementModel.getLangType().getText()) + .field(DATASOURCE_NAME, statementModel.getDatasourceName()) + .field(QUERY, statementModel.getQuery()) + .field(QUERY_ID, statementModel.getQueryId()) + .field(SUBMIT_TIME, statementModel.getSubmitTime()) + .field(ERROR, statementModel.getError()) + .endObject(); + } + + @Override + @SneakyThrows + public StatementModel fromXContent(XContentParser parser, long seqNo, long primaryTerm) { + StatementModel.StatementModelBuilder builder = StatementModel.builder(); + XContentParserUtils.ensureExpectedToken( + XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + while (!XContentParser.Token.END_OBJECT.equals(parser.nextToken())) { + String fieldName = parser.currentName(); + parser.nextToken(); + switch (fieldName) { + case VERSION: + builder.version(parser.text()); + break; + case TYPE: + // do nothing + break; + case STATE: + builder.statementState(StatementState.fromString(parser.text())); + break; + case STATEMENT_ID: + builder.statementId(new StatementId(parser.text())); + break; + case SESSION_ID: + builder.sessionId(parser.text()); + break; + case ACCOUNT_ID: + builder.accountId(parser.textOrNull()); + break; + case APPLICATION_ID: + builder.applicationId(parser.text()); + break; + case JOB_ID: + builder.jobId(parser.text()); + break; + case LANG: + builder.langType(LangType.fromString(parser.text())); + break; + case DATASOURCE_NAME: + builder.datasourceName(parser.text()); + break; + case QUERY: + builder.query(parser.text()); + break; + case QUERY_ID: + builder.queryId(parser.text()); + break; + case SUBMIT_TIME: + builder.submitTime(parser.longValue()); + break; + case ERROR: + builder.error(parser.text()); + break; + default: + throw new IllegalArgumentException("Unexpected field: " + fieldName); + } + } + builder.metadata(XContentSerializerUtil.buildMetadata(seqNo, primaryTerm)); + return builder.build(); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/XContentCommonAttributes.java b/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/XContentCommonAttributes.java new file mode 100644 index 0000000000..d8f17bf9d6 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/XContentCommonAttributes.java @@ -0,0 +1,23 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.xcontent; + +import lombok.experimental.UtilityClass; + +@UtilityClass +public class XContentCommonAttributes { + public static final String VERSION = "version"; + public static final String VERSION_1_0 = "1.0"; + public static final String TYPE = "type"; + public static final String QUERY_ID = "queryId"; + public static final String STATE = "state"; + public static final String LAST_UPDATE_TIME = "lastUpdateTime"; + public static final String ACCOUNT_ID = "accountId"; + public static final String APPLICATION_ID = "applicationId"; + public static final String DATASOURCE_NAME = "dataSourceName"; + public static final String JOB_ID = "jobId"; + public static final String ERROR = "error"; +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/XContentSerializer.java b/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/XContentSerializer.java new file mode 100644 index 0000000000..d8cbcdbe29 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/execution/xcontent/XContentSerializer.java @@ -0,0 +1,36 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.xcontent; + +import java.io.IOException; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.sql.spark.execution.statestore.StateModel; + +/** Interface for XContentSerializer, which serialize/deserialize XContent */ +public interface XContentSerializer { + + /** + * Serializes the given object to an XContentBuilder using the specified parameters. + * + * @param object The object to serialize. + * @param params The parameters to use for serialization. + * @return An XContentBuilder containing the serialized representation of the object. + * @throws IOException If an I/O error occurs during serialization. + */ + XContentBuilder toXContent(T object, ToXContent.Params params) throws IOException; + + /** + * Deserializes an object from an XContentParser. + * + * @param parser The XContentParser to read the object from. + * @param seqNo The sequence number associated with the object. + * @param primaryTerm The primary term associated with the object. + * @return The deserialized object. + */ + T fromXContent(XContentParser parser, long seqNo, long primaryTerm); +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataServiceImpl.java b/async-query/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataServiceImpl.java new file mode 100644 index 0000000000..893b33b39d --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataServiceImpl.java @@ -0,0 +1,168 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +import static org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions.AUTO_REFRESH; +import static org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions.CHECKPOINT_LOCATION; +import static org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions.INCREMENTAL_REFRESH; +import static org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions.WATERMARK_DELAY; +import static org.opensearch.sql.spark.flint.FlintIndexMetadata.APP_ID; +import static org.opensearch.sql.spark.flint.FlintIndexMetadata.ENV_KEY; +import static org.opensearch.sql.spark.flint.FlintIndexMetadata.KIND_KEY; +import static org.opensearch.sql.spark.flint.FlintIndexMetadata.LATEST_ID_KEY; +import static org.opensearch.sql.spark.flint.FlintIndexMetadata.META_KEY; +import static org.opensearch.sql.spark.flint.FlintIndexMetadata.NAME_KEY; +import static org.opensearch.sql.spark.flint.FlintIndexMetadata.OPTIONS_KEY; +import static org.opensearch.sql.spark.flint.FlintIndexMetadata.PROPERTIES_KEY; +import static org.opensearch.sql.spark.flint.FlintIndexMetadata.SERVERLESS_EMR_JOB_ID; +import static org.opensearch.sql.spark.flint.FlintIndexMetadata.SOURCE_KEY; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import lombok.AllArgsConstructor; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.opensearch.client.Client; +import org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions; + +/** Implementation of {@link FlintIndexMetadataService} */ +@AllArgsConstructor +public class FlintIndexMetadataServiceImpl implements FlintIndexMetadataService { + + private static final Logger LOGGER = LogManager.getLogger(FlintIndexMetadataServiceImpl.class); + + private final Client client; + public static final Set ALTER_TO_FULL_REFRESH_ALLOWED_OPTIONS = + new LinkedHashSet<>(Arrays.asList(AUTO_REFRESH, INCREMENTAL_REFRESH)); + public static final Set ALTER_TO_INCREMENTAL_REFRESH_ALLOWED_OPTIONS = + new LinkedHashSet<>( + Arrays.asList(AUTO_REFRESH, INCREMENTAL_REFRESH, WATERMARK_DELAY, CHECKPOINT_LOCATION)); + + @Override + public Map getFlintIndexMetadata(String indexPattern) { + GetMappingsResponse mappingsResponse = + client.admin().indices().prepareGetMappings().setIndices(indexPattern).get(); + Map indexMetadataMap = new HashMap<>(); + mappingsResponse + .getMappings() + .forEach( + (indexName, mappingMetadata) -> { + try { + Map mappingSourceMap = mappingMetadata.getSourceAsMap(); + FlintIndexMetadata metadata = + fromMetadata(indexName, (Map) mappingSourceMap.get(META_KEY)); + indexMetadataMap.put(indexName, metadata); + } catch (Exception exception) { + LOGGER.error( + "Exception while building index details for index: {} due to: {}", + indexName, + exception.getMessage()); + } + }); + return indexMetadataMap; + } + + @Override + public void updateIndexToManualRefresh(String indexName, FlintIndexOptions flintIndexOptions) { + GetMappingsResponse mappingsResponse = + client.admin().indices().prepareGetMappings().setIndices(indexName).get(); + Map flintMetadataMap = + mappingsResponse.getMappings().get(indexName).getSourceAsMap(); + Map meta = (Map) flintMetadataMap.get("_meta"); + String kind = (String) meta.get("kind"); + Map options = (Map) meta.get("options"); + Map newOptions = flintIndexOptions.getProvidedOptions(); + validateFlintIndexOptions(kind, options, newOptions); + options.putAll(newOptions); + client.admin().indices().preparePutMapping(indexName).setSource(flintMetadataMap).get(); + } + + private void validateFlintIndexOptions( + String kind, Map existingOptions, Map newOptions) { + if ((newOptions.containsKey(INCREMENTAL_REFRESH) + && Boolean.parseBoolean(newOptions.get(INCREMENTAL_REFRESH))) + || ((!newOptions.containsKey(INCREMENTAL_REFRESH) + && Boolean.parseBoolean((String) existingOptions.get(INCREMENTAL_REFRESH))))) { + validateConversionToIncrementalRefresh(kind, existingOptions, newOptions); + } else { + validateConversionToFullRefresh(newOptions); + } + } + + private void validateConversionToFullRefresh(Map newOptions) { + if (!ALTER_TO_FULL_REFRESH_ALLOWED_OPTIONS.containsAll(newOptions.keySet())) { + throw new IllegalArgumentException( + String.format( + "Altering to full refresh only allows: %s options", + ALTER_TO_FULL_REFRESH_ALLOWED_OPTIONS)); + } + } + + private void validateConversionToIncrementalRefresh( + String kind, Map existingOptions, Map newOptions) { + if (!ALTER_TO_INCREMENTAL_REFRESH_ALLOWED_OPTIONS.containsAll(newOptions.keySet())) { + throw new IllegalArgumentException( + String.format( + "Altering to incremental refresh only allows: %s options", + ALTER_TO_INCREMENTAL_REFRESH_ALLOWED_OPTIONS)); + } + HashMap mergedOptions = new HashMap<>(); + mergedOptions.putAll(existingOptions); + mergedOptions.putAll(newOptions); + List missingAttributes = new ArrayList<>(); + if (!mergedOptions.containsKey(CHECKPOINT_LOCATION) + || StringUtils.isEmpty((String) mergedOptions.get(CHECKPOINT_LOCATION))) { + missingAttributes.add(CHECKPOINT_LOCATION); + } + if (kind.equals("mv") + && (!mergedOptions.containsKey(WATERMARK_DELAY) + || StringUtils.isEmpty((String) mergedOptions.get(WATERMARK_DELAY)))) { + missingAttributes.add(WATERMARK_DELAY); + } + if (missingAttributes.size() > 0) { + String errorMessage = + "Conversion to incremental refresh index cannot proceed due to missing attributes: " + + String.join(", ", missingAttributes) + + "."; + LOGGER.error(errorMessage); + throw new IllegalArgumentException(errorMessage); + } + } + + private FlintIndexMetadata fromMetadata(String indexName, Map metaMap) { + FlintIndexMetadata.FlintIndexMetadataBuilder flintIndexMetadataBuilder = + FlintIndexMetadata.builder(); + Map propertiesMap = (Map) metaMap.get(PROPERTIES_KEY); + Map envMap = (Map) propertiesMap.get(ENV_KEY); + Map options = (Map) metaMap.get(OPTIONS_KEY); + FlintIndexOptions flintIndexOptions = new FlintIndexOptions(); + for (String key : options.keySet()) { + flintIndexOptions.setOption(key, (String) options.get(key)); + } + String jobId = (String) envMap.get(SERVERLESS_EMR_JOB_ID); + String appId = (String) envMap.getOrDefault(APP_ID, null); + String latestId = (String) metaMap.getOrDefault(LATEST_ID_KEY, null); + String kind = (String) metaMap.getOrDefault(KIND_KEY, null); + String name = (String) metaMap.getOrDefault(NAME_KEY, null); + String source = (String) metaMap.getOrDefault(SOURCE_KEY, null); + flintIndexMetadataBuilder.jobId(jobId); + flintIndexMetadataBuilder.appId(appId); + flintIndexMetadataBuilder.latestId(latestId); + flintIndexMetadataBuilder.name(name); + flintIndexMetadataBuilder.kind(kind); + flintIndexMetadataBuilder.source(source); + flintIndexMetadataBuilder.opensearchIndexName(indexName); + flintIndexMetadataBuilder.flintIndexOptions(flintIndexOptions); + return flintIndexMetadataBuilder.build(); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/flint/OpenSearchFlintIndexClient.java b/async-query/src/main/java/org/opensearch/sql/spark/flint/OpenSearchFlintIndexClient.java new file mode 100644 index 0000000000..7a655f0678 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/flint/OpenSearchFlintIndexClient.java @@ -0,0 +1,27 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +import lombok.RequiredArgsConstructor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.client.Client; + +@RequiredArgsConstructor +public class OpenSearchFlintIndexClient implements FlintIndexClient { + private static final Logger LOG = LogManager.getLogger(); + + private final Client client; + + @Override + public void deleteIndex(String indexName) { + DeleteIndexRequest request = new DeleteIndexRequest().indices(indexName); + AcknowledgedResponse response = client.admin().indices().delete(request).actionGet(); + LOG.info("OpenSearch index delete result: {}", response.isAcknowledged()); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/flint/OpenSearchFlintIndexStateModelService.java b/async-query/src/main/java/org/opensearch/sql/spark/flint/OpenSearchFlintIndexStateModelService.java new file mode 100644 index 0000000000..5781c3e44b --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/flint/OpenSearchFlintIndexStateModelService.java @@ -0,0 +1,51 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +import java.util.Optional; +import lombok.RequiredArgsConstructor; +import org.opensearch.sql.spark.execution.statestore.OpenSearchStateStoreUtil; +import org.opensearch.sql.spark.execution.statestore.StateStore; +import org.opensearch.sql.spark.execution.xcontent.FlintIndexStateModelXContentSerializer; + +@RequiredArgsConstructor +public class OpenSearchFlintIndexStateModelService implements FlintIndexStateModelService { + private final StateStore stateStore; + private final FlintIndexStateModelXContentSerializer serializer; + + @Override + public FlintIndexStateModel updateFlintIndexState( + FlintIndexStateModel flintIndexStateModel, + FlintIndexState flintIndexState, + String datasourceName) { + return stateStore.updateState( + flintIndexStateModel, + flintIndexState, + FlintIndexStateModel::copyWithState, + OpenSearchStateStoreUtil.getIndexName(datasourceName)); + } + + @Override + public Optional getFlintIndexStateModel(String id, String datasourceName) { + return stateStore.get( + id, serializer::fromXContent, OpenSearchStateStoreUtil.getIndexName(datasourceName)); + } + + @Override + public FlintIndexStateModel createFlintIndexStateModel( + FlintIndexStateModel flintIndexStateModel) { + return stateStore.create( + flintIndexStateModel.getId(), + flintIndexStateModel, + FlintIndexStateModel::copy, + OpenSearchStateStoreUtil.getIndexName(flintIndexStateModel.getDatasourceName())); + } + + @Override + public boolean deleteFlintIndexStateModel(String id, String datasourceName) { + return stateStore.delete(id, OpenSearchStateStoreUtil.getIndexName(datasourceName)); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/flint/OpenSearchIndexDMLResultStorageService.java b/async-query/src/main/java/org/opensearch/sql/spark/flint/OpenSearchIndexDMLResultStorageService.java new file mode 100644 index 0000000000..3be44ba410 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/flint/OpenSearchIndexDMLResultStorageService.java @@ -0,0 +1,36 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +import lombok.RequiredArgsConstructor; +import org.opensearch.sql.datasource.DataSourceService; +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.dispatcher.model.IndexDMLResult; +import org.opensearch.sql.spark.execution.statestore.StateStore; + +@RequiredArgsConstructor +public class OpenSearchIndexDMLResultStorageService implements IndexDMLResultStorageService { + + private final DataSourceService dataSourceService; + private final StateStore stateStore; + + @Override + public IndexDMLResult createIndexDMLResult( + IndexDMLResult result, AsyncQueryRequestContext asyncQueryRequestContexts) { + DataSourceMetadata dataSourceMetadata = + dataSourceService.getDataSourceMetadata(result.getDatasourceName()); + return stateStore.create( + mapIdToDocumentId(result.getId()), + result, + IndexDMLResult::copy, + dataSourceMetadata.getResultIndex()); + } + + private String mapIdToDocumentId(String id) { + return "index" + id; + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/leasemanager/DefaultLeaseManager.java b/async-query/src/main/java/org/opensearch/sql/spark/leasemanager/DefaultLeaseManager.java new file mode 100644 index 0000000000..375fa7b11e --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/leasemanager/DefaultLeaseManager.java @@ -0,0 +1,105 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.leasemanager; + +import static org.opensearch.sql.common.setting.Settings.Key.SPARK_EXECUTION_REFRESH_JOB_LIMIT; +import static org.opensearch.sql.common.setting.Settings.Key.SPARK_EXECUTION_SESSION_LIMIT; +import static org.opensearch.sql.spark.execution.statestore.StateStore.ALL_DATASOURCE; +import static org.opensearch.sql.spark.execution.statestore.StateStore.activeRefreshJobCount; +import static org.opensearch.sql.spark.execution.statestore.StateStore.activeSessionsCount; + +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.function.Predicate; +import lombok.RequiredArgsConstructor; +import org.opensearch.sql.common.setting.Settings; +import org.opensearch.sql.spark.dispatcher.model.JobType; +import org.opensearch.sql.spark.execution.statestore.StateStore; +import org.opensearch.sql.spark.leasemanager.model.LeaseRequest; + +/** + * Default Lease Manager + *

  • QueryHandler borrow lease before execute the query. + *
  • LeaseManagerService check request against domain level concurrent limit. + *
  • LeaseManagerService running on data node and check limit based on cluster settings. + */ +public class DefaultLeaseManager implements LeaseManager { + + private final List> concurrentLimitRules; + private final Settings settings; + private final StateStore stateStore; + + public DefaultLeaseManager(Settings settings, StateStore stateStore) { + this.settings = settings; + this.stateStore = stateStore; + this.concurrentLimitRules = + Arrays.asList( + new ConcurrentSessionRule(settings, stateStore), + new ConcurrentRefreshJobRule(settings, stateStore)); + } + + @Override + public void borrow(LeaseRequest request) { + for (Rule rule : concurrentLimitRules) { + if (!rule.test(request)) { + throw new ConcurrencyLimitExceededException(rule.description()); + } + } + } + + interface Rule extends Predicate { + String description(); + } + + @RequiredArgsConstructor + public static class ConcurrentSessionRule implements Rule { + private final Settings settings; + private final StateStore stateStore; + + @Override + public String description() { + return String.format( + Locale.ROOT, "domain concurrent active session can not exceed %d", sessionMaxLimit()); + } + + @Override + public boolean test(LeaseRequest leaseRequest) { + if (leaseRequest.getJobType() != JobType.INTERACTIVE) { + return true; + } + return activeSessionsCount(stateStore, ALL_DATASOURCE).get() < sessionMaxLimit(); + } + + public int sessionMaxLimit() { + return settings.getSettingValue(SPARK_EXECUTION_SESSION_LIMIT); + } + } + + @RequiredArgsConstructor + public static class ConcurrentRefreshJobRule implements Rule { + private final Settings settings; + private final StateStore stateStore; + + @Override + public String description() { + return String.format( + Locale.ROOT, "domain concurrent refresh job can not exceed %d", refreshJobLimit()); + } + + @Override + public boolean test(LeaseRequest leaseRequest) { + if (leaseRequest.getJobType() == JobType.INTERACTIVE) { + return true; + } + return activeRefreshJobCount(stateStore, ALL_DATASOURCE).get() < refreshJobLimit(); + } + + public int refreshJobLimit() { + return settings.getSettingValue(SPARK_EXECUTION_REFRESH_JOB_LIMIT); + } + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/metrics/OpenSearchMetricsService.java b/async-query/src/main/java/org/opensearch/sql/spark/metrics/OpenSearchMetricsService.java new file mode 100644 index 0000000000..316ab536bc --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/metrics/OpenSearchMetricsService.java @@ -0,0 +1,32 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.metrics; + +import com.google.common.collect.ImmutableMap; +import java.util.Map; +import org.opensearch.sql.legacy.metrics.MetricName; +import org.opensearch.sql.legacy.utils.MetricUtils; + +public class OpenSearchMetricsService implements MetricsService { + private static final Map mapping = + ImmutableMap.of( + EmrMetrics.EMR_CANCEL_JOB_REQUEST_FAILURE_COUNT, + MetricName.EMR_CANCEL_JOB_REQUEST_FAILURE_COUNT, + EmrMetrics.EMR_GET_JOB_RESULT_FAILURE_COUNT, MetricName.EMR_GET_JOB_RESULT_FAILURE_COUNT, + EmrMetrics.EMR_START_JOB_REQUEST_FAILURE_COUNT, + MetricName.EMR_START_JOB_REQUEST_FAILURE_COUNT, + EmrMetrics.EMR_INTERACTIVE_QUERY_JOBS_CREATION_COUNT, + MetricName.EMR_INTERACTIVE_QUERY_JOBS_CREATION_COUNT, + EmrMetrics.EMR_STREAMING_QUERY_JOBS_CREATION_COUNT, + MetricName.EMR_STREAMING_QUERY_JOBS_CREATION_COUNT, + EmrMetrics.EMR_BATCH_QUERY_JOBS_CREATION_COUNT, + MetricName.EMR_BATCH_QUERY_JOBS_CREATION_COUNT); + + @Override + public void incrementNumericalMetric(EmrMetrics metricName) { + MetricUtils.incrementNumericalMetric(mapping.get(metricName)); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/parameter/S3GlueDataSourceSparkParameterComposer.java b/async-query/src/main/java/org/opensearch/sql/spark/parameter/S3GlueDataSourceSparkParameterComposer.java new file mode 100644 index 0000000000..26dbf3529a --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/parameter/S3GlueDataSourceSparkParameterComposer.java @@ -0,0 +1,108 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.parameter; + +import static org.opensearch.sql.datasources.glue.GlueDataSourceFactory.GLUE_INDEX_STORE_OPENSEARCH_AUTH; +import static org.opensearch.sql.datasources.glue.GlueDataSourceFactory.GLUE_INDEX_STORE_OPENSEARCH_AUTH_PASSWORD; +import static org.opensearch.sql.datasources.glue.GlueDataSourceFactory.GLUE_INDEX_STORE_OPENSEARCH_AUTH_USERNAME; +import static org.opensearch.sql.datasources.glue.GlueDataSourceFactory.GLUE_INDEX_STORE_OPENSEARCH_REGION; +import static org.opensearch.sql.datasources.glue.GlueDataSourceFactory.GLUE_INDEX_STORE_OPENSEARCH_URI; +import static org.opensearch.sql.datasources.glue.GlueDataSourceFactory.GLUE_LAKEFORMATION_ENABLED; +import static org.opensearch.sql.datasources.glue.GlueDataSourceFactory.GLUE_ROLE_ARN; +import static org.opensearch.sql.spark.data.constants.SparkConstants.DRIVER_ENV_ASSUME_ROLE_ARN_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.EMR_LAKEFORMATION_OPTION; +import static org.opensearch.sql.spark.data.constants.SparkConstants.EXECUTOR_ENV_ASSUME_ROLE_ARN_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_ACCELERATE_USING_COVERING_INDEX; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_DATA_SOURCE_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_DELEGATE_CATALOG; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_AUTH_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_AUTH_PASSWORD; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_AUTH_USERNAME; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_AWSREGION_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_HOST_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_PORT_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_SCHEME_KEY; +import static org.opensearch.sql.spark.data.constants.SparkConstants.HIVE_METASTORE_GLUE_ARN_KEY; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.function.Supplier; +import org.apache.commons.lang3.BooleanUtils; +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.datasources.auth.AuthenticationType; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; + +public class S3GlueDataSourceSparkParameterComposer implements DataSourceSparkParameterComposer { + public static final String FLINT_BASIC_AUTH = "basic"; + + @Override + public void compose( + DataSourceMetadata metadata, + SparkSubmitParameters params, + DispatchQueryRequest dispatchQueryRequest, + AsyncQueryRequestContext context) { + String roleArn = metadata.getProperties().get(GLUE_ROLE_ARN); + + params.setConfigItem(DRIVER_ENV_ASSUME_ROLE_ARN_KEY, roleArn); + params.setConfigItem(EXECUTOR_ENV_ASSUME_ROLE_ARN_KEY, roleArn); + params.setConfigItem(HIVE_METASTORE_GLUE_ARN_KEY, roleArn); + params.setConfigItem("spark.sql.catalog." + metadata.getName(), FLINT_DELEGATE_CATALOG); + params.setConfigItem(FLINT_DATA_SOURCE_KEY, metadata.getName()); + + final boolean lakeFormationEnabled = + BooleanUtils.toBoolean(metadata.getProperties().get(GLUE_LAKEFORMATION_ENABLED)); + params.setConfigItem(EMR_LAKEFORMATION_OPTION, Boolean.toString(lakeFormationEnabled)); + params.setConfigItem( + FLINT_ACCELERATE_USING_COVERING_INDEX, Boolean.toString(!lakeFormationEnabled)); + + setFlintIndexStoreHost( + params, + parseUri( + metadata.getProperties().get(GLUE_INDEX_STORE_OPENSEARCH_URI), metadata.getName())); + setFlintIndexStoreAuthProperties( + params, + metadata.getProperties().get(GLUE_INDEX_STORE_OPENSEARCH_AUTH), + () -> metadata.getProperties().get(GLUE_INDEX_STORE_OPENSEARCH_AUTH_USERNAME), + () -> metadata.getProperties().get(GLUE_INDEX_STORE_OPENSEARCH_AUTH_PASSWORD), + () -> metadata.getProperties().get(GLUE_INDEX_STORE_OPENSEARCH_REGION)); + params.setConfigItem("spark.flint.datasource.name", metadata.getName()); + } + + private void setFlintIndexStoreHost(SparkSubmitParameters params, URI uri) { + params.setConfigItem(FLINT_INDEX_STORE_HOST_KEY, uri.getHost()); + params.setConfigItem(FLINT_INDEX_STORE_PORT_KEY, String.valueOf(uri.getPort())); + params.setConfigItem(FLINT_INDEX_STORE_SCHEME_KEY, uri.getScheme()); + } + + private void setFlintIndexStoreAuthProperties( + SparkSubmitParameters params, + String authType, + Supplier userName, + Supplier password, + Supplier region) { + if (AuthenticationType.get(authType).equals(AuthenticationType.BASICAUTH)) { + params.setConfigItem(FLINT_INDEX_STORE_AUTH_KEY, FLINT_BASIC_AUTH); + params.setConfigItem(FLINT_INDEX_STORE_AUTH_USERNAME, userName.get()); + params.setConfigItem(FLINT_INDEX_STORE_AUTH_PASSWORD, password.get()); + } else if (AuthenticationType.get(authType).equals(AuthenticationType.AWSSIGV4AUTH)) { + params.setConfigItem(FLINT_INDEX_STORE_AUTH_KEY, "sigv4"); + params.setConfigItem(FLINT_INDEX_STORE_AWSREGION_KEY, region.get()); + } else { + params.setConfigItem(FLINT_INDEX_STORE_AUTH_KEY, authType); + } + } + + private URI parseUri(String opensearchUri, String datasourceName) { + try { + return new URI(opensearchUri); + } catch (URISyntaxException e) { + throw new IllegalArgumentException( + String.format( + "Bad URI in indexstore configuration of the : %s datasoure.", datasourceName)); + } + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/response/OpenSearchJobExecutionResponseReader.java b/async-query/src/main/java/org/opensearch/sql/spark/response/OpenSearchJobExecutionResponseReader.java new file mode 100644 index 0000000000..10113ece8d --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/response/OpenSearchJobExecutionResponseReader.java @@ -0,0 +1,77 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.response; + +import static org.opensearch.sql.datasource.model.DataSourceMetadata.DEFAULT_RESULT_INDEX; +import static org.opensearch.sql.spark.data.constants.SparkConstants.DATA_FIELD; +import static org.opensearch.sql.spark.data.constants.SparkConstants.JOB_ID_FIELD; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.json.JSONObject; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Client; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.SearchHit; +import org.opensearch.search.builder.SearchSourceBuilder; + +/** JobExecutionResponseReader implementation for reading response from OpenSearch index. */ +public class OpenSearchJobExecutionResponseReader implements JobExecutionResponseReader { + private final Client client; + private static final Logger LOG = LogManager.getLogger(); + + public OpenSearchJobExecutionResponseReader(Client client) { + this.client = client; + } + + @Override + public JSONObject getResultWithJobId(String jobId, String resultLocation) { + return searchInSparkIndex(QueryBuilders.termQuery(JOB_ID_FIELD, jobId), resultLocation); + } + + @Override + public JSONObject getResultWithQueryId(String queryId, String resultLocation) { + return searchInSparkIndex(QueryBuilders.termQuery("queryId", queryId), resultLocation); + } + + private JSONObject searchInSparkIndex(QueryBuilder query, String resultIndex) { + SearchRequest searchRequest = new SearchRequest(); + String searchResultIndex = resultIndex == null ? DEFAULT_RESULT_INDEX : resultIndex; + searchRequest.indices(searchResultIndex); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.query(query); + searchRequest.source(searchSourceBuilder); + ActionFuture searchResponseActionFuture; + JSONObject data = new JSONObject(); + try { + searchResponseActionFuture = client.search(searchRequest); + } catch (IndexNotFoundException e) { + // if there is no result index (e.g., EMR-S hasn't created the index yet), we return empty + // json + LOG.info(resultIndex + " is not created yet."); + return data; + } catch (Exception e) { + throw new RuntimeException(e); + } + SearchResponse searchResponse = searchResponseActionFuture.actionGet(); + if (searchResponse.status().getStatus() != 200) { + throw new RuntimeException( + "Fetching result from " + + searchResultIndex + + " index failed with status : " + + searchResponse.status()); + } else { + for (SearchHit searchHit : searchResponse.getHits().getHits()) { + data.put(DATA_FIELD, searchHit.getSourceAsMap()); + } + return data; + } + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/rest/RestAsyncQueryManagementAction.java b/async-query/src/main/java/org/opensearch/sql/spark/rest/RestAsyncQueryManagementAction.java new file mode 100644 index 0000000000..c188cf693f --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/rest/RestAsyncQueryManagementAction.java @@ -0,0 +1,302 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.rest; + +import static org.opensearch.core.rest.RestStatus.BAD_REQUEST; +import static org.opensearch.core.rest.RestStatus.INTERNAL_SERVER_ERROR; +import static org.opensearch.core.rest.RestStatus.TOO_MANY_REQUESTS; +import static org.opensearch.rest.RestRequest.Method.DELETE; +import static org.opensearch.rest.RestRequest.Method.GET; +import static org.opensearch.rest.RestRequest.Method.POST; + +import com.google.common.collect.ImmutableList; +import java.io.IOException; +import java.util.List; +import java.util.Locale; +import lombok.RequiredArgsConstructor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.OpenSearchException; +import org.opensearch.client.node.NodeClient; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.sql.common.setting.Settings; +import org.opensearch.sql.datasources.exceptions.DataSourceClientException; +import org.opensearch.sql.datasources.exceptions.ErrorMessage; +import org.opensearch.sql.datasources.utils.Scheduler; +import org.opensearch.sql.legacy.metrics.MetricName; +import org.opensearch.sql.legacy.utils.MetricUtils; +import org.opensearch.sql.opensearch.setting.OpenSearchSettings; +import org.opensearch.sql.opensearch.util.RestRequestUtil; +import org.opensearch.sql.spark.asyncquery.exceptions.AsyncQueryNotFoundException; +import org.opensearch.sql.spark.leasemanager.ConcurrencyLimitExceededException; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; +import org.opensearch.sql.spark.transport.TransportCancelAsyncQueryRequestAction; +import org.opensearch.sql.spark.transport.TransportCreateAsyncQueryRequestAction; +import org.opensearch.sql.spark.transport.TransportGetAsyncQueryResultAction; +import org.opensearch.sql.spark.transport.format.CreateAsyncQueryRequestConverter; +import org.opensearch.sql.spark.transport.model.CancelAsyncQueryActionRequest; +import org.opensearch.sql.spark.transport.model.CancelAsyncQueryActionResponse; +import org.opensearch.sql.spark.transport.model.CreateAsyncQueryActionRequest; +import org.opensearch.sql.spark.transport.model.CreateAsyncQueryActionResponse; +import org.opensearch.sql.spark.transport.model.GetAsyncQueryResultActionRequest; +import org.opensearch.sql.spark.transport.model.GetAsyncQueryResultActionResponse; + +@RequiredArgsConstructor +public class RestAsyncQueryManagementAction extends BaseRestHandler { + + public static final String ASYNC_QUERY_ACTIONS = "async_query_actions"; + public static final String BASE_ASYNC_QUERY_ACTION_URL = "/_plugins/_async_query"; + + private static final Logger LOG = LogManager.getLogger(RestAsyncQueryManagementAction.class); + + private final OpenSearchSettings settings; + + @Override + public String getName() { + return ASYNC_QUERY_ACTIONS; + } + + @Override + public List routes() { + return ImmutableList.of( + + /* + * + * Create a new async query using spark execution engine. + * Request URL: POST + * Request body: + * Ref [org.opensearch.sql.spark.transport.model.CreateAsyncQueryActionRequest] + * Response body: + * Ref [org.opensearch.sql.spark.transport.model.CreateAsyncQueryActionResponse] + */ + new Route(POST, BASE_ASYNC_QUERY_ACTION_URL), + + /* + * + * GET Async Query result with in spark execution engine. + * Request URL: GET + * Request body: + * Ref [org.opensearch.sql.spark.transport.model.GetAsyncQueryResultActionRequest] + * Response body: + * Ref [org.opensearch.sql.spark.transport.model.GetAsyncQueryResultActionResponse] + */ + new Route( + GET, String.format(Locale.ROOT, "%s/{%s}", BASE_ASYNC_QUERY_ACTION_URL, "queryId")), + + /* + * + * Cancel a job within spark execution engine. + * Request URL: DELETE + * Request body: + * Ref [org.opensearch.sql.spark.transport.model.CancelAsyncQueryActionRequest] + * Response body: + * Ref [org.opensearch.sql.spark.transport.model.CancelAsyncQueryActionResponse] + */ + new Route( + DELETE, String.format(Locale.ROOT, "%s/{%s}", BASE_ASYNC_QUERY_ACTION_URL, "queryId"))); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient nodeClient) + throws IOException { + if (!dataSourcesEnabled()) { + return dataSourcesDisabledError(restRequest); + } + switch (restRequest.method()) { + case POST: + return executePostRequest(restRequest, nodeClient); + case GET: + return executeGetAsyncQueryResultRequest(restRequest, nodeClient); + case DELETE: + return executeDeleteRequest(restRequest, nodeClient); + default: + return restChannel -> + restChannel.sendResponse( + new BytesRestResponse( + RestStatus.METHOD_NOT_ALLOWED, String.valueOf(restRequest.method()))); + } + } + + private RestChannelConsumer executePostRequest(RestRequest restRequest, NodeClient nodeClient) { + return restChannel -> { + try { + MetricUtils.incrementNumericalMetric(MetricName.ASYNC_QUERY_CREATE_API_REQUEST_COUNT); + CreateAsyncQueryRequest submitJobRequest = + CreateAsyncQueryRequestConverter.fromXContentParser(restRequest.contentParser()); + Scheduler.schedule( + nodeClient, + () -> + nodeClient.execute( + TransportCreateAsyncQueryRequestAction.ACTION_TYPE, + new CreateAsyncQueryActionRequest(submitJobRequest), + new ActionListener<>() { + @Override + public void onResponse( + CreateAsyncQueryActionResponse createAsyncQueryActionResponse) { + restChannel.sendResponse( + new BytesRestResponse( + RestStatus.CREATED, + "application/json; charset=UTF-8", + createAsyncQueryActionResponse.getResult())); + } + + @Override + public void onFailure(Exception e) { + handleException(e, restChannel, restRequest.method()); + } + })); + } catch (Exception e) { + handleException(e, restChannel, restRequest.method()); + } + }; + } + + private RestChannelConsumer executeGetAsyncQueryResultRequest( + RestRequest restRequest, NodeClient nodeClient) { + MetricUtils.incrementNumericalMetric(MetricName.ASYNC_QUERY_GET_API_REQUEST_COUNT); + String queryId = restRequest.param("queryId"); + return restChannel -> + Scheduler.schedule( + nodeClient, + () -> + nodeClient.execute( + TransportGetAsyncQueryResultAction.ACTION_TYPE, + new GetAsyncQueryResultActionRequest(queryId), + new ActionListener<>() { + @Override + public void onResponse( + GetAsyncQueryResultActionResponse getAsyncQueryResultActionResponse) { + restChannel.sendResponse( + new BytesRestResponse( + RestStatus.OK, + "application/json; charset=UTF-8", + getAsyncQueryResultActionResponse.getResult())); + } + + @Override + public void onFailure(Exception e) { + handleException(e, restChannel, restRequest.method()); + } + })); + } + + private void handleException( + Exception e, RestChannel restChannel, RestRequest.Method requestMethod) { + if (e instanceof OpenSearchException) { + OpenSearchException exception = (OpenSearchException) e; + reportError(restChannel, exception, exception.status()); + addCustomerErrorMetric(requestMethod); + } else if (e instanceof ConcurrencyLimitExceededException) { + LOG.error("Too many request", e); + reportError(restChannel, e, TOO_MANY_REQUESTS); + addCustomerErrorMetric(requestMethod); + } else { + LOG.error("Error happened during request handling", e); + if (isClientError(e)) { + reportError(restChannel, e, BAD_REQUEST); + addCustomerErrorMetric(requestMethod); + } else { + reportError(restChannel, e, INTERNAL_SERVER_ERROR); + addSystemErrorMetric(requestMethod); + } + } + } + + private RestChannelConsumer executeDeleteRequest(RestRequest restRequest, NodeClient nodeClient) { + MetricUtils.incrementNumericalMetric(MetricName.ASYNC_QUERY_CANCEL_API_REQUEST_COUNT); + String queryId = restRequest.param("queryId"); + return restChannel -> + Scheduler.schedule( + nodeClient, + () -> + nodeClient.execute( + TransportCancelAsyncQueryRequestAction.ACTION_TYPE, + new CancelAsyncQueryActionRequest(queryId), + new ActionListener<>() { + @Override + public void onResponse( + CancelAsyncQueryActionResponse cancelAsyncQueryActionResponse) { + restChannel.sendResponse( + new BytesRestResponse( + RestStatus.NO_CONTENT, + "application/json; charset=UTF-8", + cancelAsyncQueryActionResponse.getResult())); + } + + @Override + public void onFailure(Exception e) { + handleException(e, restChannel, restRequest.method()); + } + })); + } + + private void reportError(final RestChannel channel, final Exception e, final RestStatus status) { + channel.sendResponse( + new BytesRestResponse(status, new ErrorMessage(e, status.getStatus()).toString())); + } + + private static boolean isClientError(Exception e) { + return e instanceof IllegalArgumentException + || e instanceof IllegalStateException + || e instanceof DataSourceClientException + || e instanceof AsyncQueryNotFoundException + || e instanceof IllegalAccessException; + } + + private void addSystemErrorMetric(RestRequest.Method requestMethod) { + switch (requestMethod) { + case POST: + MetricUtils.incrementNumericalMetric( + MetricName.ASYNC_QUERY_CREATE_API_FAILED_REQ_COUNT_SYS); + break; + case GET: + MetricUtils.incrementNumericalMetric(MetricName.ASYNC_QUERY_GET_API_FAILED_REQ_COUNT_SYS); + break; + case DELETE: + MetricUtils.incrementNumericalMetric( + MetricName.ASYNC_QUERY_CANCEL_API_FAILED_REQ_COUNT_SYS); + break; + } + } + + private void addCustomerErrorMetric(RestRequest.Method requestMethod) { + switch (requestMethod) { + case POST: + MetricUtils.incrementNumericalMetric( + MetricName.ASYNC_QUERY_CREATE_API_FAILED_REQ_COUNT_CUS); + break; + case GET: + MetricUtils.incrementNumericalMetric(MetricName.ASYNC_QUERY_GET_API_FAILED_REQ_COUNT_CUS); + break; + case DELETE: + MetricUtils.incrementNumericalMetric( + MetricName.ASYNC_QUERY_CANCEL_API_FAILED_REQ_COUNT_CUS); + break; + } + } + + private boolean dataSourcesEnabled() { + return settings.getSettingValue(Settings.Key.DATASOURCES_ENABLED); + } + + private RestChannelConsumer dataSourcesDisabledError(RestRequest request) { + + RestRequestUtil.consumeAllRequestParameters(request); + + return channel -> { + reportError( + channel, + new IllegalAccessException( + String.format("%s setting is false", Settings.Key.DATASOURCES_ENABLED.getKeyValue())), + BAD_REQUEST); + }; + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/scheduler/OpenSearchAsyncQueryScheduler.java b/async-query/src/main/java/org/opensearch/sql/spark/scheduler/OpenSearchAsyncQueryScheduler.java new file mode 100644 index 0000000000..c7a66fc6be --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/scheduler/OpenSearchAsyncQueryScheduler.java @@ -0,0 +1,197 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.scheduler; + +import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; + +import com.google.common.annotations.VisibleForTesting; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import org.apache.commons.io.IOUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.DocWriteRequest; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.indices.create.CreateIndexRequest; +import org.opensearch.action.admin.indices.create.CreateIndexResponse; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.update.UpdateRequest; +import org.opensearch.action.update.UpdateResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.index.engine.DocumentMissingException; +import org.opensearch.index.engine.VersionConflictEngineException; +import org.opensearch.jobscheduler.spi.ScheduledJobRunner; +import org.opensearch.sql.spark.scheduler.job.OpenSearchRefreshIndexJob; +import org.opensearch.sql.spark.scheduler.model.OpenSearchRefreshIndexJobRequest; +import org.opensearch.threadpool.ThreadPool; + +/** Scheduler class for managing asynchronous query jobs. */ +public class OpenSearchAsyncQueryScheduler { + public static final String SCHEDULER_INDEX_NAME = ".async-query-scheduler"; + public static final String SCHEDULER_PLUGIN_JOB_TYPE = "async-query-scheduler"; + private static final String SCHEDULER_INDEX_MAPPING_FILE_NAME = + "async-query-scheduler-index-mapping.yml"; + private static final String SCHEDULER_INDEX_SETTINGS_FILE_NAME = + "async-query-scheduler-index-settings.yml"; + private static final Logger LOG = LogManager.getLogger(); + + private Client client; + private ClusterService clusterService; + + /** Loads job resources, setting up required services and job runner instance. */ + public void loadJobResource(Client client, ClusterService clusterService, ThreadPool threadPool) { + this.client = client; + this.clusterService = clusterService; + OpenSearchRefreshIndexJob openSearchRefreshIndexJob = + OpenSearchRefreshIndexJob.getJobRunnerInstance(); + openSearchRefreshIndexJob.setClusterService(clusterService); + openSearchRefreshIndexJob.setThreadPool(threadPool); + openSearchRefreshIndexJob.setClient(client); + } + + /** Schedules a new job by indexing it into the job index. */ + public void scheduleJob(OpenSearchRefreshIndexJobRequest request) { + if (!this.clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)) { + createAsyncQuerySchedulerIndex(); + } + IndexRequest indexRequest = new IndexRequest(SCHEDULER_INDEX_NAME); + indexRequest.id(request.getName()); + indexRequest.opType(DocWriteRequest.OpType.CREATE); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + IndexResponse indexResponse; + try { + indexRequest.source(request.toXContent(JsonXContent.contentBuilder(), EMPTY_PARAMS)); + ActionFuture indexResponseActionFuture = client.index(indexRequest); + indexResponse = indexResponseActionFuture.actionGet(); + } catch (VersionConflictEngineException exception) { + throw new IllegalArgumentException("A job already exists with name: " + request.getName()); + } catch (Throwable e) { + LOG.error("Failed to schedule job : {}", request.getName(), e); + throw new RuntimeException(e); + } + + if (indexResponse.getResult().equals(DocWriteResponse.Result.CREATED)) { + LOG.debug("Job : {} successfully created", request.getName()); + } else { + throw new RuntimeException( + "Schedule job failed with result : " + indexResponse.getResult().getLowercase()); + } + } + + /** Unschedules a job by marking it as disabled and updating its last update time. */ + public void unscheduleJob(String jobId) throws IOException { + assertIndexExists(); + OpenSearchRefreshIndexJobRequest request = + OpenSearchRefreshIndexJobRequest.builder() + .jobName(jobId) + .enabled(false) + .lastUpdateTime(Instant.now()) + .build(); + updateJob(request); + } + + /** Updates an existing job with new parameters. */ + public void updateJob(OpenSearchRefreshIndexJobRequest request) throws IOException { + assertIndexExists(); + UpdateRequest updateRequest = new UpdateRequest(SCHEDULER_INDEX_NAME, request.getName()); + updateRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + updateRequest.doc(request.toXContent(JsonXContent.contentBuilder(), EMPTY_PARAMS)); + UpdateResponse updateResponse; + try { + ActionFuture updateResponseActionFuture = client.update(updateRequest); + updateResponse = updateResponseActionFuture.actionGet(); + } catch (DocumentMissingException exception) { + throw new IllegalArgumentException("Job: " + request.getName() + " doesn't exist"); + } catch (Throwable e) { + LOG.error("Failed to update job : {}", request.getName(), e); + throw new RuntimeException(e); + } + + if (updateResponse.getResult().equals(DocWriteResponse.Result.UPDATED) + || updateResponse.getResult().equals(DocWriteResponse.Result.NOOP)) { + LOG.debug("Job : {} successfully updated", request.getName()); + } else { + throw new RuntimeException( + "Update job failed with result : " + updateResponse.getResult().getLowercase()); + } + } + + /** Removes a job by deleting its document from the index. */ + public void removeJob(String jobId) { + assertIndexExists(); + DeleteRequest deleteRequest = new DeleteRequest(SCHEDULER_INDEX_NAME, jobId); + deleteRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + ActionFuture deleteResponseActionFuture = client.delete(deleteRequest); + DeleteResponse deleteResponse = deleteResponseActionFuture.actionGet(); + + if (deleteResponse.getResult().equals(DocWriteResponse.Result.DELETED)) { + LOG.debug("Job : {} successfully deleted", jobId); + } else if (deleteResponse.getResult().equals(DocWriteResponse.Result.NOT_FOUND)) { + throw new IllegalArgumentException("Job : " + jobId + " doesn't exist"); + } else { + throw new RuntimeException( + "Remove job failed with result : " + deleteResponse.getResult().getLowercase()); + } + } + + /** Creates the async query scheduler index with specified mappings and settings. */ + @VisibleForTesting + void createAsyncQuerySchedulerIndex() { + try { + InputStream mappingFileStream = + OpenSearchAsyncQueryScheduler.class + .getClassLoader() + .getResourceAsStream(SCHEDULER_INDEX_MAPPING_FILE_NAME); + InputStream settingsFileStream = + OpenSearchAsyncQueryScheduler.class + .getClassLoader() + .getResourceAsStream(SCHEDULER_INDEX_SETTINGS_FILE_NAME); + CreateIndexRequest createIndexRequest = new CreateIndexRequest(SCHEDULER_INDEX_NAME); + createIndexRequest.mapping( + IOUtils.toString(mappingFileStream, StandardCharsets.UTF_8), XContentType.YAML); + createIndexRequest.settings( + IOUtils.toString(settingsFileStream, StandardCharsets.UTF_8), XContentType.YAML); + ActionFuture createIndexResponseActionFuture = + client.admin().indices().create(createIndexRequest); + CreateIndexResponse createIndexResponse = createIndexResponseActionFuture.actionGet(); + + if (createIndexResponse.isAcknowledged()) { + LOG.debug("Index: {} creation Acknowledged", SCHEDULER_INDEX_NAME); + } else { + throw new RuntimeException("Index creation is not acknowledged."); + } + } catch (Throwable e) { + LOG.error("Error creating index: {}", SCHEDULER_INDEX_NAME, e); + throw new RuntimeException( + "Internal server error while creating " + + SCHEDULER_INDEX_NAME + + " index: " + + e.getMessage(), + e); + } + } + + private void assertIndexExists() { + if (!this.clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)) { + throw new IllegalStateException("Job index does not exist."); + } + } + + /** Returns the job runner instance for the scheduler. */ + public static ScheduledJobRunner getJobRunner() { + return OpenSearchRefreshIndexJob.getJobRunnerInstance(); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/scheduler/OpenSearchRefreshIndexJobRequestParser.java b/async-query/src/main/java/org/opensearch/sql/spark/scheduler/OpenSearchRefreshIndexJobRequestParser.java new file mode 100644 index 0000000000..0422e7c015 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/scheduler/OpenSearchRefreshIndexJobRequestParser.java @@ -0,0 +1,71 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.scheduler; + +import java.io.IOException; +import java.time.Instant; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.core.xcontent.XContentParserUtils; +import org.opensearch.jobscheduler.spi.ScheduledJobParser; +import org.opensearch.jobscheduler.spi.schedule.ScheduleParser; +import org.opensearch.sql.spark.scheduler.model.OpenSearchRefreshIndexJobRequest; + +public class OpenSearchRefreshIndexJobRequestParser { + + private static Instant parseInstantValue(XContentParser parser) throws IOException { + if (XContentParser.Token.VALUE_NULL.equals(parser.currentToken())) { + return null; + } + if (parser.currentToken().isValue()) { + return Instant.ofEpochMilli(parser.longValue()); + } + XContentParserUtils.throwUnknownToken(parser.currentToken(), parser.getTokenLocation()); + return null; + } + + public static ScheduledJobParser getJobParser() { + return (parser, id, jobDocVersion) -> { + OpenSearchRefreshIndexJobRequest.OpenSearchRefreshIndexJobRequestBuilder builder = + OpenSearchRefreshIndexJobRequest.builder(); + XContentParserUtils.ensureExpectedToken( + XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + + while (!parser.nextToken().equals(XContentParser.Token.END_OBJECT)) { + String fieldName = parser.currentName(); + parser.nextToken(); + switch (fieldName) { + case OpenSearchRefreshIndexJobRequest.JOB_NAME_FIELD: + builder.jobName(parser.text()); + break; + case OpenSearchRefreshIndexJobRequest.JOB_TYPE_FIELD: + builder.jobType(parser.text()); + break; + case OpenSearchRefreshIndexJobRequest.ENABLED_FIELD: + builder.enabled(parser.booleanValue()); + break; + case OpenSearchRefreshIndexJobRequest.ENABLED_TIME_FIELD: + builder.enabledTime(parseInstantValue(parser)); + break; + case OpenSearchRefreshIndexJobRequest.LAST_UPDATE_TIME_FIELD: + builder.lastUpdateTime(parseInstantValue(parser)); + break; + case OpenSearchRefreshIndexJobRequest.SCHEDULE_FIELD: + builder.schedule(ScheduleParser.parse(parser)); + break; + case OpenSearchRefreshIndexJobRequest.LOCK_DURATION_SECONDS: + builder.lockDurationSeconds(parser.longValue()); + break; + case OpenSearchRefreshIndexJobRequest.JITTER: + builder.jitter(parser.doubleValue()); + break; + default: + XContentParserUtils.throwUnknownToken(parser.currentToken(), parser.getTokenLocation()); + } + } + return builder.build(); + }; + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/scheduler/job/OpenSearchRefreshIndexJob.java b/async-query/src/main/java/org/opensearch/sql/spark/scheduler/job/OpenSearchRefreshIndexJob.java new file mode 100644 index 0000000000..e465a8790f --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/scheduler/job/OpenSearchRefreshIndexJob.java @@ -0,0 +1,93 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.scheduler.job; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.client.Client; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.jobscheduler.spi.JobExecutionContext; +import org.opensearch.jobscheduler.spi.ScheduledJobParameter; +import org.opensearch.jobscheduler.spi.ScheduledJobRunner; +import org.opensearch.plugins.Plugin; +import org.opensearch.sql.spark.scheduler.model.OpenSearchRefreshIndexJobRequest; +import org.opensearch.threadpool.ThreadPool; + +/** + * The job runner class for scheduling refresh index query. + * + *

    The job runner should be a singleton class if it uses OpenSearch client or other objects + * passed from OpenSearch. Because when registering the job runner to JobScheduler plugin, + * OpenSearch has not invoked plugins' createComponents() method. That is saying the plugin is not + * completely initialized, and the OpenSearch {@link org.opensearch.client.Client}, {@link + * ClusterService} and other objects are not available to plugin and this job runner. + * + *

    So we have to move this job runner initialization to {@link Plugin} createComponents() method, + * and using singleton job runner to ensure we register a usable job runner instance to JobScheduler + * plugin. + */ +public class OpenSearchRefreshIndexJob implements ScheduledJobRunner { + + private static final Logger log = LogManager.getLogger(OpenSearchRefreshIndexJob.class); + + public static OpenSearchRefreshIndexJob INSTANCE = new OpenSearchRefreshIndexJob(); + + public static OpenSearchRefreshIndexJob getJobRunnerInstance() { + return INSTANCE; + } + + private ClusterService clusterService; + private ThreadPool threadPool; + private Client client; + + private OpenSearchRefreshIndexJob() { + // Singleton class, use getJobRunnerInstance method instead of constructor + } + + public void setClusterService(ClusterService clusterService) { + this.clusterService = clusterService; + } + + public void setThreadPool(ThreadPool threadPool) { + this.threadPool = threadPool; + } + + public void setClient(Client client) { + this.client = client; + } + + @Override + public void runJob(ScheduledJobParameter jobParameter, JobExecutionContext context) { + if (!(jobParameter instanceof OpenSearchRefreshIndexJobRequest)) { + throw new IllegalStateException( + "Job parameter is not instance of OpenSearchRefreshIndexJobRequest, type: " + + jobParameter.getClass().getCanonicalName()); + } + + if (this.clusterService == null) { + throw new IllegalStateException("ClusterService is not initialized."); + } + + if (this.threadPool == null) { + throw new IllegalStateException("ThreadPool is not initialized."); + } + + if (this.client == null) { + throw new IllegalStateException("Client is not initialized."); + } + + Runnable runnable = + () -> { + doRefresh(jobParameter.getName()); + }; + threadPool.generic().submit(runnable); + } + + void doRefresh(String refreshIndex) { + // TODO: add logic to refresh index + log.info("Scheduled refresh index job on : " + refreshIndex); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/scheduler/model/OpenSearchRefreshIndexJobRequest.java b/async-query/src/main/java/org/opensearch/sql/spark/scheduler/model/OpenSearchRefreshIndexJobRequest.java new file mode 100644 index 0000000000..7eaa4e2d29 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/scheduler/model/OpenSearchRefreshIndexJobRequest.java @@ -0,0 +1,108 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.scheduler.model; + +import java.io.IOException; +import java.time.Instant; +import lombok.Builder; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.jobscheduler.spi.ScheduledJobParameter; +import org.opensearch.jobscheduler.spi.schedule.Schedule; + +/** Represents a job request to refresh index. */ +@Builder +public class OpenSearchRefreshIndexJobRequest implements ScheduledJobParameter { + // Constant fields for JSON serialization + public static final String JOB_NAME_FIELD = "jobName"; + public static final String JOB_TYPE_FIELD = "jobType"; + public static final String LAST_UPDATE_TIME_FIELD = "lastUpdateTime"; + public static final String LAST_UPDATE_TIME_FIELD_READABLE = "last_update_time_field"; + public static final String SCHEDULE_FIELD = "schedule"; + public static final String ENABLED_TIME_FIELD = "enabledTime"; + public static final String ENABLED_TIME_FIELD_READABLE = "enabled_time_field"; + public static final String LOCK_DURATION_SECONDS = "lockDurationSeconds"; + public static final String JITTER = "jitter"; + public static final String ENABLED_FIELD = "enabled"; + + // name is doc id + private final String jobName; + private final String jobType; + private final Schedule schedule; + private final boolean enabled; + private final Instant lastUpdateTime; + private final Instant enabledTime; + private final Long lockDurationSeconds; + private final Double jitter; + + @Override + public String getName() { + return jobName; + } + + public String getJobType() { + return jobType; + } + + @Override + public Schedule getSchedule() { + return schedule; + } + + @Override + public boolean isEnabled() { + return enabled; + } + + @Override + public Instant getLastUpdateTime() { + return lastUpdateTime; + } + + @Override + public Instant getEnabledTime() { + return enabledTime; + } + + @Override + public Long getLockDurationSeconds() { + return lockDurationSeconds; + } + + @Override + public Double getJitter() { + return jitter; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) + throws IOException { + builder.startObject(); + builder.field(JOB_NAME_FIELD, getName()).field(ENABLED_FIELD, isEnabled()); + if (getSchedule() != null) { + builder.field(SCHEDULE_FIELD, getSchedule()); + } + if (getJobType() != null) { + builder.field(JOB_TYPE_FIELD, getJobType()); + } + if (getEnabledTime() != null) { + builder.timeField( + ENABLED_TIME_FIELD, ENABLED_TIME_FIELD_READABLE, getEnabledTime().toEpochMilli()); + } + builder.timeField( + LAST_UPDATE_TIME_FIELD, + LAST_UPDATE_TIME_FIELD_READABLE, + getLastUpdateTime().toEpochMilli()); + if (this.lockDurationSeconds != null) { + builder.field(LOCK_DURATION_SECONDS, this.lockDurationSeconds); + } + if (this.jitter != null) { + builder.field(JITTER, this.jitter); + } + builder.endObject(); + return builder; + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/transport/TransportCancelAsyncQueryRequestAction.java b/async-query/src/main/java/org/opensearch/sql/spark/transport/TransportCancelAsyncQueryRequestAction.java new file mode 100644 index 0000000000..232a280db5 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/transport/TransportCancelAsyncQueryRequestAction.java @@ -0,0 +1,52 @@ +/* + * + * * Copyright OpenSearch Contributors + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package org.opensearch.sql.spark.transport; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorServiceImpl; +import org.opensearch.sql.spark.transport.model.CancelAsyncQueryActionRequest; +import org.opensearch.sql.spark.transport.model.CancelAsyncQueryActionResponse; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +public class TransportCancelAsyncQueryRequestAction + extends HandledTransportAction { + + public static final String NAME = "cluster:admin/opensearch/ql/async_query/delete"; + private final AsyncQueryExecutorServiceImpl asyncQueryExecutorService; + public static final ActionType ACTION_TYPE = + new ActionType<>(NAME, CancelAsyncQueryActionResponse::new); + + @Inject + public TransportCancelAsyncQueryRequestAction( + TransportService transportService, + ActionFilters actionFilters, + AsyncQueryExecutorServiceImpl asyncQueryExecutorService) { + super(NAME, transportService, actionFilters, CancelAsyncQueryActionRequest::new); + this.asyncQueryExecutorService = asyncQueryExecutorService; + } + + @Override + protected void doExecute( + Task task, + CancelAsyncQueryActionRequest request, + ActionListener listener) { + try { + String jobId = asyncQueryExecutorService.cancelQuery(request.getQueryId()); + listener.onResponse( + new CancelAsyncQueryActionResponse( + String.format("Deleted async query with id: %s", jobId))); + } catch (Exception e) { + listener.onFailure(e); + } + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/transport/TransportCreateAsyncQueryRequestAction.java b/async-query/src/main/java/org/opensearch/sql/spark/transport/TransportCreateAsyncQueryRequestAction.java new file mode 100644 index 0000000000..bef3b29987 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/transport/TransportCreateAsyncQueryRequestAction.java @@ -0,0 +1,82 @@ +/* + * + * * Copyright OpenSearch Contributors + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package org.opensearch.sql.spark.transport; + +import java.util.Locale; +import org.opensearch.action.ActionType; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.sql.common.setting.Settings; +import org.opensearch.sql.opensearch.setting.OpenSearchSettings; +import org.opensearch.sql.protocol.response.format.JsonResponseFormatter; +import org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorService; +import org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorServiceImpl; +import org.opensearch.sql.spark.asyncquery.model.NullAsyncQueryRequestContext; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryResponse; +import org.opensearch.sql.spark.transport.model.CreateAsyncQueryActionRequest; +import org.opensearch.sql.spark.transport.model.CreateAsyncQueryActionResponse; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +public class TransportCreateAsyncQueryRequestAction + extends HandledTransportAction { + + private final AsyncQueryExecutorService asyncQueryExecutorService; + private final OpenSearchSettings pluginSettings; + + public static final String NAME = "cluster:admin/opensearch/ql/async_query/create"; + public static final ActionType ACTION_TYPE = + new ActionType<>(NAME, CreateAsyncQueryActionResponse::new); + + @Inject + public TransportCreateAsyncQueryRequestAction( + TransportService transportService, + ActionFilters actionFilters, + AsyncQueryExecutorServiceImpl jobManagementService, + OpenSearchSettings pluginSettings) { + super(NAME, transportService, actionFilters, CreateAsyncQueryActionRequest::new); + this.asyncQueryExecutorService = jobManagementService; + this.pluginSettings = pluginSettings; + } + + @Override + protected void doExecute( + Task task, + CreateAsyncQueryActionRequest request, + ActionListener listener) { + try { + if (!(Boolean) pluginSettings.getSettingValue(Settings.Key.ASYNC_QUERY_ENABLED)) { + listener.onFailure( + new IllegalAccessException( + String.format( + Locale.ROOT, + "%s setting is " + "false", + Settings.Key.ASYNC_QUERY_ENABLED.getKeyValue()))); + return; + } + + CreateAsyncQueryRequest createAsyncQueryRequest = request.getCreateAsyncQueryRequest(); + CreateAsyncQueryResponse createAsyncQueryResponse = + asyncQueryExecutorService.createAsyncQuery( + createAsyncQueryRequest, new NullAsyncQueryRequestContext()); + String responseContent = + new JsonResponseFormatter(JsonResponseFormatter.Style.PRETTY) { + @Override + protected Object buildJsonObject(CreateAsyncQueryResponse response) { + return response; + } + }.format(createAsyncQueryResponse); + listener.onResponse(new CreateAsyncQueryActionResponse(responseContent)); + } catch (Exception e) { + listener.onFailure(e); + } + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/transport/TransportGetAsyncQueryResultAction.java b/async-query/src/main/java/org/opensearch/sql/spark/transport/TransportGetAsyncQueryResultAction.java new file mode 100644 index 0000000000..0e9da0c13c --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/transport/TransportGetAsyncQueryResultAction.java @@ -0,0 +1,69 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.transport; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.sql.executor.pagination.Cursor; +import org.opensearch.sql.protocol.response.format.JsonResponseFormatter; +import org.opensearch.sql.protocol.response.format.ResponseFormatter; +import org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorService; +import org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorServiceImpl; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryExecutionResponse; +import org.opensearch.sql.spark.transport.format.AsyncQueryResultResponseFormatter; +import org.opensearch.sql.spark.transport.model.AsyncQueryResult; +import org.opensearch.sql.spark.transport.model.GetAsyncQueryResultActionRequest; +import org.opensearch.sql.spark.transport.model.GetAsyncQueryResultActionResponse; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +public class TransportGetAsyncQueryResultAction + extends HandledTransportAction< + GetAsyncQueryResultActionRequest, GetAsyncQueryResultActionResponse> { + + private final AsyncQueryExecutorService asyncQueryExecutorService; + + public static final String NAME = "cluster:admin/opensearch/ql/async_query/result"; + public static final ActionType ACTION_TYPE = + new ActionType<>(NAME, GetAsyncQueryResultActionResponse::new); + + @Inject + public TransportGetAsyncQueryResultAction( + TransportService transportService, + ActionFilters actionFilters, + AsyncQueryExecutorServiceImpl jobManagementService) { + super(NAME, transportService, actionFilters, GetAsyncQueryResultActionRequest::new); + this.asyncQueryExecutorService = jobManagementService; + } + + @Override + protected void doExecute( + Task task, + GetAsyncQueryResultActionRequest request, + ActionListener listener) { + try { + String jobId = request.getQueryId(); + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(jobId); + ResponseFormatter formatter = + new AsyncQueryResultResponseFormatter(JsonResponseFormatter.Style.PRETTY); + String responseContent = + formatter.format( + new AsyncQueryResult( + asyncQueryExecutionResponse.getStatus(), + asyncQueryExecutionResponse.getSchema(), + asyncQueryExecutionResponse.getResults(), + Cursor.None, + asyncQueryExecutionResponse.getError())); + listener.onResponse(new GetAsyncQueryResultActionResponse(responseContent)); + } catch (Exception e) { + listener.onFailure(e); + } + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/transport/config/AsyncExecutorServiceModule.java b/async-query/src/main/java/org/opensearch/sql/spark/transport/config/AsyncExecutorServiceModule.java new file mode 100644 index 0000000000..05f7d1095c --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/transport/config/AsyncExecutorServiceModule.java @@ -0,0 +1,256 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.transport.config; + +import static org.opensearch.sql.spark.execution.statestore.StateStore.ALL_DATASOURCE; + +import lombok.RequiredArgsConstructor; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.AbstractModule; +import org.opensearch.common.inject.Provides; +import org.opensearch.common.inject.Singleton; +import org.opensearch.sql.common.setting.Settings; +import org.opensearch.sql.datasource.DataSourceService; +import org.opensearch.sql.datasource.model.DataSourceType; +import org.opensearch.sql.legacy.metrics.GaugeMetric; +import org.opensearch.sql.legacy.metrics.Metrics; +import org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorService; +import org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorServiceImpl; +import org.opensearch.sql.spark.asyncquery.AsyncQueryJobMetadataStorageService; +import org.opensearch.sql.spark.asyncquery.OpenSearchAsyncQueryJobMetadataStorageService; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.client.EMRServerlessClientFactoryImpl; +import org.opensearch.sql.spark.config.OpenSearchExtraParameterComposer; +import org.opensearch.sql.spark.config.SparkExecutionEngineConfigClusterSettingLoader; +import org.opensearch.sql.spark.config.SparkExecutionEngineConfigSupplier; +import org.opensearch.sql.spark.config.SparkExecutionEngineConfigSupplierImpl; +import org.opensearch.sql.spark.dispatcher.DatasourceEmbeddedQueryIdProvider; +import org.opensearch.sql.spark.dispatcher.QueryHandlerFactory; +import org.opensearch.sql.spark.dispatcher.QueryIdProvider; +import org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher; +import org.opensearch.sql.spark.execution.session.DatasourceEmbeddedSessionIdProvider; +import org.opensearch.sql.spark.execution.session.OpenSearchSessionConfigSupplier; +import org.opensearch.sql.spark.execution.session.SessionConfigSupplier; +import org.opensearch.sql.spark.execution.session.SessionManager; +import org.opensearch.sql.spark.execution.statestore.OpenSearchSessionStorageService; +import org.opensearch.sql.spark.execution.statestore.OpenSearchStatementStorageService; +import org.opensearch.sql.spark.execution.statestore.SessionStorageService; +import org.opensearch.sql.spark.execution.statestore.StateStore; +import org.opensearch.sql.spark.execution.statestore.StatementStorageService; +import org.opensearch.sql.spark.execution.xcontent.AsyncQueryJobMetadataXContentSerializer; +import org.opensearch.sql.spark.execution.xcontent.FlintIndexStateModelXContentSerializer; +import org.opensearch.sql.spark.execution.xcontent.SessionModelXContentSerializer; +import org.opensearch.sql.spark.execution.xcontent.StatementModelXContentSerializer; +import org.opensearch.sql.spark.flint.FlintIndexClient; +import org.opensearch.sql.spark.flint.FlintIndexMetadataServiceImpl; +import org.opensearch.sql.spark.flint.FlintIndexStateModelService; +import org.opensearch.sql.spark.flint.IndexDMLResultStorageService; +import org.opensearch.sql.spark.flint.OpenSearchFlintIndexClient; +import org.opensearch.sql.spark.flint.OpenSearchFlintIndexStateModelService; +import org.opensearch.sql.spark.flint.OpenSearchIndexDMLResultStorageService; +import org.opensearch.sql.spark.flint.operation.FlintIndexOpFactory; +import org.opensearch.sql.spark.leasemanager.DefaultLeaseManager; +import org.opensearch.sql.spark.metrics.MetricsService; +import org.opensearch.sql.spark.metrics.OpenSearchMetricsService; +import org.opensearch.sql.spark.parameter.S3GlueDataSourceSparkParameterComposer; +import org.opensearch.sql.spark.parameter.SparkParameterComposerCollection; +import org.opensearch.sql.spark.parameter.SparkSubmitParametersBuilderProvider; +import org.opensearch.sql.spark.response.JobExecutionResponseReader; +import org.opensearch.sql.spark.response.OpenSearchJobExecutionResponseReader; + +@RequiredArgsConstructor +public class AsyncExecutorServiceModule extends AbstractModule { + + @Override + protected void configure() {} + + @Provides + public AsyncQueryExecutorService asyncQueryExecutorService( + AsyncQueryJobMetadataStorageService asyncQueryJobMetadataStorageService, + SparkQueryDispatcher sparkQueryDispatcher, + SparkExecutionEngineConfigSupplier sparkExecutionEngineConfigSupplier) { + return new AsyncQueryExecutorServiceImpl( + asyncQueryJobMetadataStorageService, + sparkQueryDispatcher, + sparkExecutionEngineConfigSupplier); + } + + @Provides + public AsyncQueryJobMetadataStorageService asyncQueryJobMetadataStorageService( + StateStore stateStore, AsyncQueryJobMetadataXContentSerializer serializer) { + return new OpenSearchAsyncQueryJobMetadataStorageService(stateStore, serializer); + } + + @Provides + @Singleton + public StateStore stateStore(NodeClient client, ClusterService clusterService) { + StateStore stateStore = new StateStore(client, clusterService); + registerStateStoreMetrics(stateStore); + return stateStore; + } + + @Provides + public SparkQueryDispatcher sparkQueryDispatcher( + DataSourceService dataSourceService, + SessionManager sessionManager, + QueryHandlerFactory queryHandlerFactory, + QueryIdProvider queryIdProvider) { + return new SparkQueryDispatcher( + dataSourceService, sessionManager, queryHandlerFactory, queryIdProvider); + } + + @Provides + public QueryIdProvider queryIdProvider() { + return new DatasourceEmbeddedQueryIdProvider(); + } + + @Provides + public QueryHandlerFactory queryhandlerFactory( + JobExecutionResponseReader openSearchJobExecutionResponseReader, + FlintIndexMetadataServiceImpl flintIndexMetadataReader, + SessionManager sessionManager, + DefaultLeaseManager defaultLeaseManager, + IndexDMLResultStorageService indexDMLResultStorageService, + FlintIndexOpFactory flintIndexOpFactory, + EMRServerlessClientFactory emrServerlessClientFactory, + MetricsService metricsService, + SparkSubmitParametersBuilderProvider sparkSubmitParametersBuilderProvider) { + return new QueryHandlerFactory( + openSearchJobExecutionResponseReader, + flintIndexMetadataReader, + sessionManager, + defaultLeaseManager, + indexDMLResultStorageService, + flintIndexOpFactory, + emrServerlessClientFactory, + metricsService, + sparkSubmitParametersBuilderProvider); + } + + @Provides + public FlintIndexOpFactory flintIndexOpFactory( + FlintIndexStateModelService flintIndexStateModelService, + FlintIndexClient flintIndexClient, + FlintIndexMetadataServiceImpl flintIndexMetadataService, + EMRServerlessClientFactory emrServerlessClientFactory) { + return new FlintIndexOpFactory( + flintIndexStateModelService, + flintIndexClient, + flintIndexMetadataService, + emrServerlessClientFactory); + } + + @Provides + public FlintIndexClient flintIndexClient(NodeClient nodeClient) { + return new OpenSearchFlintIndexClient(nodeClient); + } + + @Provides + public FlintIndexStateModelService flintIndexStateModelService( + StateStore stateStore, FlintIndexStateModelXContentSerializer serializer) { + return new OpenSearchFlintIndexStateModelService(stateStore, serializer); + } + + @Provides + public SparkSubmitParametersBuilderProvider sparkSubmitParametersBuilderProvider( + Settings settings, SparkExecutionEngineConfigClusterSettingLoader clusterSettingLoader) { + SparkParameterComposerCollection collection = new SparkParameterComposerCollection(); + collection.register(DataSourceType.S3GLUE, new S3GlueDataSourceSparkParameterComposer()); + collection.register(new OpenSearchExtraParameterComposer(clusterSettingLoader)); + return new SparkSubmitParametersBuilderProvider(collection); + } + + @Provides + public IndexDMLResultStorageService indexDMLResultStorageService( + DataSourceService dataSourceService, StateStore stateStore) { + return new OpenSearchIndexDMLResultStorageService(dataSourceService, stateStore); + } + + @Provides + public SessionManager sessionManager( + SessionStorageService sessionStorageService, + StatementStorageService statementStorageService, + EMRServerlessClientFactory emrServerlessClientFactory, + SessionConfigSupplier sessionConfigSupplier) { + return new SessionManager( + sessionStorageService, + statementStorageService, + emrServerlessClientFactory, + sessionConfigSupplier, + new DatasourceEmbeddedSessionIdProvider()); + } + + @Provides + public SessionStorageService sessionStorageService( + StateStore stateStore, SessionModelXContentSerializer serializer) { + return new OpenSearchSessionStorageService(stateStore, serializer); + } + + @Provides + public StatementStorageService statementStorageService( + StateStore stateStore, StatementModelXContentSerializer serializer) { + return new OpenSearchStatementStorageService(stateStore, serializer); + } + + @Provides + public DefaultLeaseManager defaultLeaseManager(Settings settings, StateStore stateStore) { + return new DefaultLeaseManager(settings, stateStore); + } + + @Provides + public EMRServerlessClientFactory createEMRServerlessClientFactory( + SparkExecutionEngineConfigSupplier sparkExecutionEngineConfigSupplier, + MetricsService metricsService) { + return new EMRServerlessClientFactoryImpl(sparkExecutionEngineConfigSupplier, metricsService); + } + + @Provides + public MetricsService metricsService() { + return new OpenSearchMetricsService(); + } + + @Provides + public SparkExecutionEngineConfigSupplier sparkExecutionEngineConfigSupplier( + Settings settings, SparkExecutionEngineConfigClusterSettingLoader clusterSettingLoader) { + return new SparkExecutionEngineConfigSupplierImpl(settings, clusterSettingLoader); + } + + @Provides + public SparkExecutionEngineConfigClusterSettingLoader + sparkExecutionEngineConfigClusterSettingLoader(Settings settings) { + return new SparkExecutionEngineConfigClusterSettingLoader(settings); + } + + @Provides + @Singleton + public FlintIndexMetadataServiceImpl flintIndexMetadataReader(NodeClient client) { + return new FlintIndexMetadataServiceImpl(client); + } + + @Provides + public JobExecutionResponseReader jobExecutionResponseReader(NodeClient client) { + return new OpenSearchJobExecutionResponseReader(client); + } + + @Provides + public SessionConfigSupplier sessionConfigSupplier(Settings settings) { + return new OpenSearchSessionConfigSupplier(settings); + } + + private void registerStateStoreMetrics(StateStore stateStore) { + GaugeMetric activeSessionMetric = + new GaugeMetric<>( + "active_async_query_sessions_count", + StateStore.activeSessionsCount(stateStore, ALL_DATASOURCE)); + GaugeMetric activeStatementMetric = + new GaugeMetric<>( + "active_async_query_statements_count", + StateStore.activeStatementsCount(stateStore, ALL_DATASOURCE)); + Metrics.getInstance().registerMetric(activeSessionMetric); + Metrics.getInstance().registerMetric(activeStatementMetric); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/transport/format/AsyncQueryResultResponseFormatter.java b/async-query/src/main/java/org/opensearch/sql/spark/transport/format/AsyncQueryResultResponseFormatter.java new file mode 100644 index 0000000000..afa6797694 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/transport/format/AsyncQueryResultResponseFormatter.java @@ -0,0 +1,96 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.transport.format; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import java.util.List; +import java.util.stream.Collectors; +import lombok.Builder; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import org.opensearch.core.common.Strings; +import org.opensearch.sql.protocol.response.QueryResult; +import org.opensearch.sql.protocol.response.format.JsonResponseFormatter; +import org.opensearch.sql.spark.transport.model.AsyncQueryResult; + +/** + * JSON response format with schema header and data rows. For example, + * + *

    + *  {
    + *      "schema": [
    + *          {
    + *              "name": "name",
    + *              "type": "string"
    + *          }
    + *      ],
    + *      "datarows": [
    + *          ["John"],
    + *          ["Smith"]
    + *      ],
    + *      "total": 2,
    + *      "size": 2
    + *  }
    + * 
    + */ +public class AsyncQueryResultResponseFormatter extends JsonResponseFormatter { + + public AsyncQueryResultResponseFormatter(Style style) { + super(style); + } + + @Override + public Object buildJsonObject(AsyncQueryResult response) { + JsonResponse.JsonResponseBuilder json = JsonResponse.builder(); + if (response.getStatus().equalsIgnoreCase("success")) { + json.total(response.size()).size(response.size()); + json.schema( + response.columnNameTypes().entrySet().stream() + .map((entry) -> new Column(entry.getKey(), entry.getValue())) + .collect(Collectors.toList())); + json.datarows(fetchDataRows(response)); + } + json.status(response.getStatus()); + if (!Strings.isEmpty(response.getError())) { + json.error(response.getError()); + } + + return json.build(); + } + + private Object[][] fetchDataRows(QueryResult response) { + Object[][] rows = new Object[response.size()][]; + int i = 0; + for (Object[] values : response) { + rows[i++] = values; + } + return rows; + } + + /** org.json requires these inner data classes be public (and static) */ + @Builder + @Getter + @JsonIgnoreProperties(ignoreUnknown = true) + public static class JsonResponse { + + private final String status; + + private final List schema; + + private final Object[][] datarows; + + private Integer total; + private Integer size; + private final String error; + } + + @RequiredArgsConstructor + @Getter + public static class Column { + private final String name; + private final String type; + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/transport/format/CreateAsyncQueryRequestConverter.java b/async-query/src/main/java/org/opensearch/sql/spark/transport/format/CreateAsyncQueryRequestConverter.java new file mode 100644 index 0000000000..c22c2da24d --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/transport/format/CreateAsyncQueryRequestConverter.java @@ -0,0 +1,46 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.transport.format; + +import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; + +import lombok.experimental.UtilityClass; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; +import org.opensearch.sql.spark.rest.model.LangType; + +@UtilityClass +public class CreateAsyncQueryRequestConverter { + public static CreateAsyncQueryRequest fromXContentParser(XContentParser parser) { + String query = null; + LangType lang = null; + String datasource = null; + String sessionId = null; + try { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + String fieldName = parser.currentName(); + parser.nextToken(); + if (fieldName.equals("query")) { + query = parser.textOrNull(); + } else if (fieldName.equals("lang")) { + String langString = parser.textOrNull(); + lang = LangType.fromString(langString); + } else if (fieldName.equals("datasource")) { + datasource = parser.textOrNull(); + } else if (fieldName.equals("sessionId")) { + sessionId = parser.textOrNull(); + } else { + throw new IllegalArgumentException("Unknown field: " + fieldName); + } + } + return new CreateAsyncQueryRequest(query, datasource, lang, sessionId); + } catch (Exception e) { + throw new IllegalArgumentException( + String.format("Error while parsing the request body: %s", e.getMessage())); + } + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/transport/model/AsyncQueryResult.java b/async-query/src/main/java/org/opensearch/sql/spark/transport/model/AsyncQueryResult.java new file mode 100644 index 0000000000..712cebf7e1 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/transport/model/AsyncQueryResult.java @@ -0,0 +1,41 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.transport.model; + +import java.util.Collection; +import lombok.Getter; +import org.opensearch.sql.data.model.ExprValue; +import org.opensearch.sql.executor.ExecutionEngine; +import org.opensearch.sql.executor.pagination.Cursor; +import org.opensearch.sql.protocol.response.QueryResult; + +/** AsyncQueryResult for async query APIs. */ +public class AsyncQueryResult extends QueryResult { + + @Getter private final String status; + @Getter private final String error; + + public AsyncQueryResult( + String status, + ExecutionEngine.Schema schema, + Collection exprValues, + Cursor cursor, + String error) { + super(schema, exprValues, cursor); + this.status = status; + this.error = error; + } + + public AsyncQueryResult( + String status, + ExecutionEngine.Schema schema, + Collection exprValues, + String error) { + super(schema, exprValues); + this.status = status; + this.error = error; + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/transport/model/CancelAsyncQueryActionRequest.java b/async-query/src/main/java/org/opensearch/sql/spark/transport/model/CancelAsyncQueryActionRequest.java new file mode 100644 index 0000000000..8a5f31646f --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/transport/model/CancelAsyncQueryActionRequest.java @@ -0,0 +1,30 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.transport.model; + +import java.io.IOException; +import lombok.AllArgsConstructor; +import lombok.Getter; +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.core.common.io.stream.StreamInput; + +@AllArgsConstructor +@Getter +public class CancelAsyncQueryActionRequest extends ActionRequest { + + private String queryId; + + /** Constructor of SubmitJobActionRequest from StreamInput. */ + public CancelAsyncQueryActionRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/transport/model/CancelAsyncQueryActionResponse.java b/async-query/src/main/java/org/opensearch/sql/spark/transport/model/CancelAsyncQueryActionResponse.java new file mode 100644 index 0000000000..a73430603f --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/transport/model/CancelAsyncQueryActionResponse.java @@ -0,0 +1,29 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.transport.model; + +import java.io.IOException; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +@RequiredArgsConstructor +public class CancelAsyncQueryActionResponse extends ActionResponse { + + @Getter private final String result; + + public CancelAsyncQueryActionResponse(StreamInput in) throws IOException { + super(in); + result = in.readString(); + } + + @Override + public void writeTo(StreamOutput streamOutput) throws IOException { + streamOutput.writeString(result); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/transport/model/CreateAsyncQueryActionRequest.java b/async-query/src/main/java/org/opensearch/sql/spark/transport/model/CreateAsyncQueryActionRequest.java new file mode 100644 index 0000000000..d003990311 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/transport/model/CreateAsyncQueryActionRequest.java @@ -0,0 +1,32 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.transport.model; + +import java.io.IOException; +import lombok.Getter; +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; + +public class CreateAsyncQueryActionRequest extends ActionRequest { + + @Getter private CreateAsyncQueryRequest createAsyncQueryRequest; + + /** Constructor of CreateJobActionRequest from StreamInput. */ + public CreateAsyncQueryActionRequest(StreamInput in) throws IOException { + super(in); + } + + public CreateAsyncQueryActionRequest(CreateAsyncQueryRequest createAsyncQueryRequest) { + this.createAsyncQueryRequest = createAsyncQueryRequest; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/transport/model/CreateAsyncQueryActionResponse.java b/async-query/src/main/java/org/opensearch/sql/spark/transport/model/CreateAsyncQueryActionResponse.java new file mode 100644 index 0000000000..17a4a73ed7 --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/transport/model/CreateAsyncQueryActionResponse.java @@ -0,0 +1,29 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.transport.model; + +import java.io.IOException; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +@RequiredArgsConstructor +public class CreateAsyncQueryActionResponse extends ActionResponse { + + @Getter private final String result; + + public CreateAsyncQueryActionResponse(StreamInput in) throws IOException { + super(in); + result = in.readString(); + } + + @Override + public void writeTo(StreamOutput streamOutput) throws IOException { + streamOutput.writeString(result); + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/transport/model/GetAsyncQueryResultActionRequest.java b/async-query/src/main/java/org/opensearch/sql/spark/transport/model/GetAsyncQueryResultActionRequest.java new file mode 100644 index 0000000000..f30decbb4d --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/transport/model/GetAsyncQueryResultActionRequest.java @@ -0,0 +1,29 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.transport.model; + +import java.io.IOException; +import lombok.AllArgsConstructor; +import lombok.Getter; +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.core.common.io.stream.StreamInput; + +@AllArgsConstructor +public class GetAsyncQueryResultActionRequest extends ActionRequest { + + @Getter private String queryId; + + /** Constructor of GetJobQueryResultActionRequest from StreamInput. */ + public GetAsyncQueryResultActionRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/async-query/src/main/java/org/opensearch/sql/spark/transport/model/GetAsyncQueryResultActionResponse.java b/async-query/src/main/java/org/opensearch/sql/spark/transport/model/GetAsyncQueryResultActionResponse.java new file mode 100644 index 0000000000..b2bbedd9ef --- /dev/null +++ b/async-query/src/main/java/org/opensearch/sql/spark/transport/model/GetAsyncQueryResultActionResponse.java @@ -0,0 +1,29 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.transport.model; + +import java.io.IOException; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +@RequiredArgsConstructor +public class GetAsyncQueryResultActionResponse extends ActionResponse { + + @Getter private final String result; + + public GetAsyncQueryResultActionResponse(StreamInput in) throws IOException { + super(in); + result = in.readString(); + } + + @Override + public void writeTo(StreamOutput streamOutput) throws IOException { + streamOutput.writeString(result); + } +} diff --git a/async-query/src/main/resources/async-query-scheduler-index-mapping.yml b/async-query/src/main/resources/async-query-scheduler-index-mapping.yml new file mode 100644 index 0000000000..36bd1b873e --- /dev/null +++ b/async-query/src/main/resources/async-query-scheduler-index-mapping.yml @@ -0,0 +1,41 @@ +--- +## +# Copyright OpenSearch Contributors +# SPDX-License-Identifier: Apache-2.0 +## + +# Schema file for the .async-query-scheduler index +# Also "dynamic" is set to "false" so that other fields cannot be added. +dynamic: false +properties: + name: + type: keyword + jobType: + type: keyword + lastUpdateTime: + type: date + format: epoch_millis + enabledTime: + type: date + format: epoch_millis + schedule: + properties: + initialDelay: + type: long + interval: + properties: + start_time: + type: date + format: "strict_date_time||epoch_millis" + period: + type: integer + unit: + type: keyword + enabled: + type: boolean + lockDurationSeconds: + type: long + null_value: -1 + jitter: + type: double + null_value: 0.0 \ No newline at end of file diff --git a/async-query/src/main/resources/async-query-scheduler-index-settings.yml b/async-query/src/main/resources/async-query-scheduler-index-settings.yml new file mode 100644 index 0000000000..386f1f4f34 --- /dev/null +++ b/async-query/src/main/resources/async-query-scheduler-index-settings.yml @@ -0,0 +1,11 @@ +--- +## +# Copyright OpenSearch Contributors +# SPDX-License-Identifier: Apache-2.0 +## + +# Settings file for the .async-query-scheduler index +index: + number_of_shards: "1" + auto_expand_replicas: "0-2" + number_of_replicas: "0" \ No newline at end of file diff --git a/async-query/src/test/java/org/opensearch/sql/asyncquery/DummyConsumerTest.java b/async-query/src/test/java/org/opensearch/sql/asyncquery/DummyConsumerTest.java new file mode 100644 index 0000000000..a08dbae736 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/asyncquery/DummyConsumerTest.java @@ -0,0 +1,28 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.asyncquery; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.when; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class DummyConsumerTest { + + @Mock Dummy dummy; + + @Test + public void test() { + DummyConsumer dummyConsumer = new DummyConsumer(dummy); + when(dummy.hello()).thenReturn("Hello from mock"); + + assertEquals("Hello from mock", dummyConsumer.hello()); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImplSpecTest.java b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImplSpecTest.java new file mode 100644 index 0000000000..3ff806bf50 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImplSpecTest.java @@ -0,0 +1,635 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.asyncquery; + +import static org.opensearch.sql.spark.data.constants.SparkConstants.DEFAULT_CLASS_NAME; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_JOB_REQUEST_INDEX; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_JOB_SESSION_ID; +import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_SESSION_CLASS_NAME; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_REQUEST_BUFFER_INDEX_NAME; +import static org.opensearch.sql.spark.execution.xcontent.SessionModelXContentSerializer.SESSION_DOC_TYPE; +import static org.opensearch.sql.spark.execution.xcontent.SessionModelXContentSerializer.SESSION_ID; +import static org.opensearch.sql.spark.execution.xcontent.StatementModelXContentSerializer.STATEMENT_DOC_TYPE; + +import com.google.common.collect.ImmutableMap; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.core.common.Strings; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.sql.common.setting.Settings; +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.datasource.model.DataSourceStatus; +import org.opensearch.sql.datasource.model.DataSourceType; +import org.opensearch.sql.datasources.exceptions.DatasourceDisabledException; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryExecutionResponse; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.asyncquery.model.NullAsyncQueryRequestContext; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.execution.session.SessionState; +import org.opensearch.sql.spark.execution.statement.StatementModel; +import org.opensearch.sql.spark.execution.statement.StatementState; +import org.opensearch.sql.spark.leasemanager.ConcurrencyLimitExceededException; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryResponse; +import org.opensearch.sql.spark.rest.model.LangType; +import org.opensearch.sql.spark.utils.IDUtils; + +public class AsyncQueryExecutorServiceImplSpecTest extends AsyncQueryExecutorServiceSpec { + AsyncQueryRequestContext asyncQueryRequestContext = new NullAsyncQueryRequestContext(); + + @Disabled("batch query is unsupported") + public void withoutSessionCreateAsyncQueryThenGetResultThenCancel() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // disable session + enableSession(false); + + // 1. create async query. + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + assertFalse(clusterService().state().routingTable().hasIndex(SPARK_REQUEST_BUFFER_INDEX_NAME)); + emrsClient.startJobRunCalled(1); + + // 2. fetch async query result. + AsyncQueryExecutionResponse asyncQueryResults = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("RUNNING", asyncQueryResults.getStatus()); + emrsClient.getJobRunResultCalled(1); + + // 3. cancel async query. + String cancelQueryId = asyncQueryExecutorService.cancelQuery(response.getQueryId()); + assertEquals(response.getQueryId(), cancelQueryId); + emrsClient.cancelJobRunCalled(1); + } + + @Disabled("batch query is unsupported") + public void sessionLimitNotImpactBatchQuery() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // disable session + enableSession(false); + setSessionLimit(0); + + // 1. create async query. + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + emrsClient.startJobRunCalled(1); + + CreateAsyncQueryResponse resp2 = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + emrsClient.startJobRunCalled(2); + } + + @Disabled("batch query is unsupported") + public void createAsyncQueryCreateJobWithCorrectParameters() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + enableSession(false); + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + String params = emrsClient.getJobRequest().getSparkSubmitParams(); + assertNull(response.getSessionId()); + assertTrue(params.contains(String.format("--class %s", DEFAULT_CLASS_NAME))); + assertFalse( + params.contains( + String.format("%s=%s", FLINT_JOB_REQUEST_INDEX, SPARK_REQUEST_BUFFER_INDEX_NAME))); + assertFalse( + params.contains(String.format("%s=%s", FLINT_JOB_SESSION_ID, response.getSessionId()))); + + // enable session + enableSession(true); + response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + params = emrsClient.getJobRequest().getSparkSubmitParams(); + assertTrue(params.contains(String.format("--class %s", FLINT_SESSION_CLASS_NAME))); + assertTrue( + params.contains( + String.format("%s=%s", FLINT_JOB_REQUEST_INDEX, SPARK_REQUEST_BUFFER_INDEX_NAME))); + assertTrue( + params.contains(String.format("%s=%s", FLINT_JOB_SESSION_ID, response.getSessionId()))); + } + + @Test + public void withSessionCreateAsyncQueryThenGetResultThenCancel() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // 1. create async query. + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + assertNotNull(response.getSessionId()); + Optional statementModel = + statementStorageService.getStatement(response.getQueryId(), MYS3_DATASOURCE); + assertTrue(statementModel.isPresent()); + assertEquals(StatementState.WAITING, statementModel.get().getStatementState()); + + // 2. fetch async query result. + AsyncQueryExecutionResponse asyncQueryResults = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("", asyncQueryResults.getError()); + assertTrue(Strings.isEmpty(asyncQueryResults.getError())); + assertEquals(StatementState.WAITING.getState(), asyncQueryResults.getStatus()); + + // 3. cancel async query. + String cancelQueryId = asyncQueryExecutorService.cancelQuery(response.getQueryId()); + assertEquals(response.getQueryId(), cancelQueryId); + } + + @Test + public void reuseSessionWhenCreateAsyncQuery() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // enable session + enableSession(true); + + // 1. create async query. + CreateAsyncQueryResponse first = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + assertNotNull(first.getSessionId()); + + // 2. reuse session id + CreateAsyncQueryResponse second = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "select 1", MYS3_DATASOURCE, LangType.SQL, first.getSessionId()), + asyncQueryRequestContext); + + assertEquals(first.getSessionId(), second.getSessionId()); + assertNotEquals(first.getQueryId(), second.getQueryId()); + // one session doc. + assertEquals( + 1, + search( + QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("type", SESSION_DOC_TYPE)) + .must(QueryBuilders.termQuery(SESSION_ID, first.getSessionId())))); + // two statement docs has same sessionId. + assertEquals( + 2, + search( + QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("type", STATEMENT_DOC_TYPE)) + .must(QueryBuilders.termQuery(SESSION_ID, first.getSessionId())))); + + Optional firstModel = + statementStorageService.getStatement(first.getQueryId(), MYS3_DATASOURCE); + assertTrue(firstModel.isPresent()); + assertEquals(StatementState.WAITING, firstModel.get().getStatementState()); + assertEquals(first.getQueryId(), firstModel.get().getStatementId().getId()); + assertEquals(first.getQueryId(), firstModel.get().getQueryId()); + Optional secondModel = + statementStorageService.getStatement(second.getQueryId(), MYS3_DATASOURCE); + assertEquals(StatementState.WAITING, secondModel.get().getStatementState()); + assertEquals(second.getQueryId(), secondModel.get().getStatementId().getId()); + assertEquals(second.getQueryId(), secondModel.get().getQueryId()); + } + + @Disabled("batch query is unsupported") + public void batchQueryHasTimeout() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + enableSession(false); + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + assertEquals(120L, (long) emrsClient.getJobRequest().executionTimeout()); + } + + @Test + public void interactiveQueryNoTimeout() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // enable session + enableSession(true); + + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + assertEquals(0L, (long) emrsClient.getJobRequest().executionTimeout()); + } + + @Ignore( + "flaky test, java.lang.IllegalArgumentException: Right now only AES/GCM/NoPadding is" + + " supported") + @Test + public void datasourceWithBasicAuth() { + Map properties = new HashMap<>(); + properties.put("glue.auth.type", "iam_role"); + properties.put( + "glue.auth.role_arn", "arn:aws:iam::924196221507:role/FlintOpensearchServiceRole"); + properties.put("glue.indexstore.opensearch.uri", "http://localhost:9200"); + properties.put("glue.indexstore.opensearch.auth", "basicauth"); + properties.put("glue.indexstore.opensearch.auth.username", "username"); + properties.put("glue.indexstore.opensearch.auth.password", "password"); + + dataSourceService.createDataSource( + new DataSourceMetadata.Builder() + .setName("mybasicauth") + .setConnector(DataSourceType.S3GLUE) + .setProperties(properties) + .build()); + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // enable session + enableSession(true); + + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", "mybasicauth", LangType.SQL, null), + asyncQueryRequestContext); + String params = emrsClient.getJobRequest().getSparkSubmitParams(); + assertTrue(params.contains(String.format("--conf spark.datasource.flint.auth=basic"))); + assertTrue( + params.contains(String.format("--conf spark.datasource.flint.auth.username=username"))); + assertTrue( + params.contains(String.format("--conf spark.datasource.flint.auth.password=password"))); + } + + @Test + public void withSessionCreateAsyncQueryFailed() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // enable session + enableSession(true); + + // 1. create async query. + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("myselect 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + assertNotNull(response.getSessionId()); + Optional statementModel = + statementStorageService.getStatement(response.getQueryId(), MYS3_DATASOURCE); + assertTrue(statementModel.isPresent()); + assertEquals(StatementState.WAITING, statementModel.get().getStatementState()); + + // 2. fetch async query result. not result write to DEFAULT_RESULT_INDEX yet. + // mock failed statement. + StatementModel submitted = statementModel.get(); + StatementModel mocked = + StatementModel.builder() + .version("1.0") + .statementState(submitted.getStatementState()) + .statementId(submitted.getStatementId()) + .sessionId(submitted.getSessionId()) + .applicationId(submitted.getApplicationId()) + .jobId(submitted.getJobId()) + .langType(submitted.getLangType()) + .datasourceName(submitted.getDatasourceName()) + .query(submitted.getQuery()) + .queryId(submitted.getQueryId()) + .submitTime(submitted.getSubmitTime()) + .error("mock error") + .metadata(submitted.getMetadata()) + .build(); + statementStorageService.updateStatementState(mocked, StatementState.FAILED); + + AsyncQueryExecutionResponse asyncQueryResults = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals(StatementState.FAILED.getState(), asyncQueryResults.getStatus()); + assertEquals("mock error", asyncQueryResults.getError()); + } + + // https://github.com/opensearch-project/sql/issues/2344 + @Test + public void createSessionMoreThanLimitFailed() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // enable session + enableSession(true); + // only allow one session in domain. + setSessionLimit(1); + + // 1. create async query. + CreateAsyncQueryResponse first = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + assertNotNull(first.getSessionId()); + setSessionState(first.getSessionId(), SessionState.RUNNING); + + // 2. create async query without session. + ConcurrencyLimitExceededException exception = + assertThrows( + ConcurrencyLimitExceededException.class, + () -> + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext)); + assertEquals("domain concurrent active session can not exceed 1", exception.getMessage()); + } + + // https://github.com/opensearch-project/sql/issues/2360 + @Test + public void recreateSessionIfNotReady() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // enable session + enableSession(true); + + // 1. create async query. + CreateAsyncQueryResponse first = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + assertNotNull(first.getSessionId()); + + // set sessionState to FAIL + setSessionState(first.getSessionId(), SessionState.FAIL); + + // 2. reuse session id + CreateAsyncQueryResponse second = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "select 1", MYS3_DATASOURCE, LangType.SQL, first.getSessionId()), + asyncQueryRequestContext); + + assertNotEquals(first.getSessionId(), second.getSessionId()); + + // set sessionState to FAIL + setSessionState(second.getSessionId(), SessionState.DEAD); + + // 3. reuse session id + CreateAsyncQueryResponse third = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "select 1", MYS3_DATASOURCE, LangType.SQL, second.getSessionId()), + asyncQueryRequestContext); + assertNotEquals(second.getSessionId(), third.getSessionId()); + } + + @Test + public void submitQueryWithDifferentDataSourceSessionWillCreateNewSession() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // enable session + enableSession(true); + + // 1. create async query. + CreateAsyncQueryResponse first = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "SHOW SCHEMAS IN " + MYS3_DATASOURCE, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + assertNotNull(first.getSessionId()); + + // set sessionState to RUNNING + setSessionState(first.getSessionId(), SessionState.RUNNING); + + // 2. reuse session id + CreateAsyncQueryResponse second = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "SHOW SCHEMAS IN " + MYS3_DATASOURCE, + MYS3_DATASOURCE, + LangType.SQL, + first.getSessionId()), + asyncQueryRequestContext); + + assertEquals(first.getSessionId(), second.getSessionId()); + + // set sessionState to RUNNING + setSessionState(second.getSessionId(), SessionState.RUNNING); + + // 3. given different source, create a new session id + CreateAsyncQueryResponse third = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "SHOW SCHEMAS IN " + MYGLUE_DATASOURCE, + MYGLUE_DATASOURCE, + LangType.SQL, + second.getSessionId()), + asyncQueryRequestContext); + assertNotEquals(second.getSessionId(), third.getSessionId()); + } + + @Test + public void recreateSessionIfStale() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // enable session + enableSession(true); + + // 1. create async query. + CreateAsyncQueryResponse first = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + assertNotNull(first.getSessionId()); + + // set sessionState to RUNNING + setSessionState(first.getSessionId(), SessionState.RUNNING); + + // 2. reuse session id + CreateAsyncQueryResponse second = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "select 1", MYS3_DATASOURCE, LangType.SQL, first.getSessionId()), + asyncQueryRequestContext); + + assertEquals(first.getSessionId(), second.getSessionId()); + + try { + // set timeout setting to 0 + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + org.opensearch.common.settings.Settings settings = + org.opensearch.common.settings.Settings.builder() + .put(Settings.Key.SESSION_INACTIVITY_TIMEOUT_MILLIS.getKeyValue(), 0) + .build(); + request.transientSettings(settings); + client().admin().cluster().updateSettings(request).actionGet(60000); + + // 3. not reuse session id + CreateAsyncQueryResponse third = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "select 1", MYS3_DATASOURCE, LangType.SQL, second.getSessionId()), + asyncQueryRequestContext); + assertNotEquals(second.getSessionId(), third.getSessionId()); + } finally { + // set timeout setting to 0 + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + org.opensearch.common.settings.Settings settings = + org.opensearch.common.settings.Settings.builder() + .putNull(Settings.Key.SESSION_INACTIVITY_TIMEOUT_MILLIS.getKeyValue()) + .build(); + request.transientSettings(settings); + client().admin().cluster().updateSettings(request).actionGet(60000); + } + } + + @Test + public void submitQueryInInvalidSessionWillCreateNewSession() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // enable session + enableSession(true); + + // 1. create async query with unknown sessionId + String unknownSessionId = IDUtils.encode(MYS3_DATASOURCE); + CreateAsyncQueryResponse asyncQuery = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "select 1", MYS3_DATASOURCE, LangType.SQL, unknownSessionId), + asyncQueryRequestContext); + + assertNotNull(asyncQuery.getSessionId()); + assertNotEquals(unknownSessionId, asyncQuery.getSessionId()); + } + + @Test + public void datasourceNameIncludeUppercase() { + dataSourceService.createDataSource( + new DataSourceMetadata.Builder() + .setName("TESTS3") + .setConnector(DataSourceType.S3GLUE) + .setProperties( + ImmutableMap.of( + "glue.auth.type", + "iam_role", + "glue.auth.role_arn", + "arn:aws:iam::924196221507:role/FlintOpensearchServiceRole", + "glue.indexstore.opensearch.uri", + "http://localhost:9200", + "glue.indexstore.opensearch.auth", + "noauth")) + .build()); + + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // enable session + enableSession(true); + + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", "TESTS3", LangType.SQL, null), + asyncQueryRequestContext); + String params = emrsClient.getJobRequest().getSparkSubmitParams(); + + assertNotNull(response.getSessionId()); + assertTrue( + params.contains( + "--conf spark.sql.catalog.TESTS3=org.opensearch.sql.FlintDelegatingSessionCatalog")); + } + + @Test + public void concurrentSessionLimitIsDomainLevel() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // only allow one session in domain. + setSessionLimit(1); + + // 1. create async query. + CreateAsyncQueryResponse first = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + assertNotNull(first.getSessionId()); + setSessionState(first.getSessionId(), SessionState.RUNNING); + + // 2. create async query without session. + ConcurrencyLimitExceededException exception = + assertThrows( + ConcurrencyLimitExceededException.class, + () -> + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYGLUE_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext)); + assertEquals("domain concurrent active session can not exceed 1", exception.getMessage()); + } + + @Test + public void testDatasourceDisabled() { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Disable Datasource + HashMap datasourceMap = new HashMap<>(); + datasourceMap.put("name", MYS3_DATASOURCE); + datasourceMap.put("status", DataSourceStatus.DISABLED); + this.dataSourceService.patchDataSource(datasourceMap); + + // 1. create async query. + try { + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest("select 1", MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + fail("It should have thrown DataSourceDisabledException"); + } catch (DatasourceDisabledException exception) { + Assertions.assertEquals("Datasource mys3 is disabled.", exception.getMessage()); + } + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceSpec.java b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceSpec.java new file mode 100644 index 0000000000..d8e3b80175 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceSpec.java @@ -0,0 +1,497 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.asyncquery; + +import static org.opensearch.sql.opensearch.setting.OpenSearchSettings.DATASOURCE_URI_HOSTS_DENY_LIST; +import static org.opensearch.sql.opensearch.setting.OpenSearchSettings.SPARK_EXECUTION_REFRESH_JOB_LIMIT_SETTING; +import static org.opensearch.sql.opensearch.setting.OpenSearchSettings.SPARK_EXECUTION_SESSION_LIMIT_SETTING; +import static org.opensearch.sql.spark.execution.statestore.OpenSearchStateStoreUtil.getIndexName; + +import com.amazonaws.services.emrserverless.model.CancelJobRunResult; +import com.amazonaws.services.emrserverless.model.GetJobRunResult; +import com.amazonaws.services.emrserverless.model.JobRun; +import com.amazonaws.services.emrserverless.model.JobRunState; +import com.google.common.base.Charsets; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.io.Resources; +import java.net.URL; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import lombok.SneakyThrows; +import org.junit.After; +import org.junit.Before; +import org.opensearch.action.admin.indices.create.CreateIndexRequest; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.plugins.Plugin; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.datasource.model.DataSourceType; +import org.opensearch.sql.datasources.encryptor.EncryptorImpl; +import org.opensearch.sql.datasources.glue.GlueDataSourceFactory; +import org.opensearch.sql.datasources.service.DataSourceMetadataStorage; +import org.opensearch.sql.datasources.service.DataSourceServiceImpl; +import org.opensearch.sql.datasources.storage.OpenSearchDataSourceMetadataStorage; +import org.opensearch.sql.legacy.esdomain.LocalClusterState; +import org.opensearch.sql.legacy.metrics.Metrics; +import org.opensearch.sql.opensearch.setting.OpenSearchSettings; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.client.EMRServerlessClient; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.client.StartJobRequest; +import org.opensearch.sql.spark.config.OpenSearchSparkSubmitParameterModifier; +import org.opensearch.sql.spark.config.SparkExecutionEngineConfig; +import org.opensearch.sql.spark.dispatcher.DatasourceEmbeddedQueryIdProvider; +import org.opensearch.sql.spark.dispatcher.QueryHandlerFactory; +import org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher; +import org.opensearch.sql.spark.execution.session.DatasourceEmbeddedSessionIdProvider; +import org.opensearch.sql.spark.execution.session.OpenSearchSessionConfigSupplier; +import org.opensearch.sql.spark.execution.session.SessionConfigSupplier; +import org.opensearch.sql.spark.execution.session.SessionIdProvider; +import org.opensearch.sql.spark.execution.session.SessionManager; +import org.opensearch.sql.spark.execution.session.SessionModel; +import org.opensearch.sql.spark.execution.session.SessionState; +import org.opensearch.sql.spark.execution.statestore.OpenSearchSessionStorageService; +import org.opensearch.sql.spark.execution.statestore.OpenSearchStatementStorageService; +import org.opensearch.sql.spark.execution.statestore.SessionStorageService; +import org.opensearch.sql.spark.execution.statestore.StateStore; +import org.opensearch.sql.spark.execution.statestore.StatementStorageService; +import org.opensearch.sql.spark.execution.xcontent.AsyncQueryJobMetadataXContentSerializer; +import org.opensearch.sql.spark.execution.xcontent.FlintIndexStateModelXContentSerializer; +import org.opensearch.sql.spark.execution.xcontent.SessionModelXContentSerializer; +import org.opensearch.sql.spark.execution.xcontent.StatementModelXContentSerializer; +import org.opensearch.sql.spark.flint.FlintIndexClient; +import org.opensearch.sql.spark.flint.FlintIndexMetadataService; +import org.opensearch.sql.spark.flint.FlintIndexMetadataServiceImpl; +import org.opensearch.sql.spark.flint.FlintIndexStateModelService; +import org.opensearch.sql.spark.flint.FlintIndexType; +import org.opensearch.sql.spark.flint.OpenSearchFlintIndexClient; +import org.opensearch.sql.spark.flint.OpenSearchFlintIndexStateModelService; +import org.opensearch.sql.spark.flint.OpenSearchIndexDMLResultStorageService; +import org.opensearch.sql.spark.flint.operation.FlintIndexOpFactory; +import org.opensearch.sql.spark.leasemanager.DefaultLeaseManager; +import org.opensearch.sql.spark.metrics.OpenSearchMetricsService; +import org.opensearch.sql.spark.parameter.S3GlueDataSourceSparkParameterComposer; +import org.opensearch.sql.spark.parameter.SparkParameterComposerCollection; +import org.opensearch.sql.spark.parameter.SparkSubmitParametersBuilderProvider; +import org.opensearch.sql.spark.response.JobExecutionResponseReader; +import org.opensearch.sql.spark.response.OpenSearchJobExecutionResponseReader; +import org.opensearch.sql.storage.DataSourceFactory; +import org.opensearch.test.OpenSearchIntegTestCase; + +public class AsyncQueryExecutorServiceSpec extends OpenSearchIntegTestCase { + + public static final String MYS3_DATASOURCE = "mys3"; + public static final String MYGLUE_DATASOURCE = "my_glue"; + + protected ClusterService clusterService; + protected org.opensearch.sql.common.setting.Settings pluginSettings; + protected SessionConfigSupplier sessionConfigSupplier; + protected NodeClient client; + protected FlintIndexClient flintIndexClient; + protected DataSourceServiceImpl dataSourceService; + protected ClusterSettings clusterSettings; + protected FlintIndexMetadataService flintIndexMetadataService; + protected FlintIndexStateModelService flintIndexStateModelService; + protected StateStore stateStore; + protected SessionStorageService sessionStorageService; + protected StatementStorageService statementStorageService; + protected AsyncQueryRequestContext asyncQueryRequestContext; + protected SessionIdProvider sessionIdProvider = new DatasourceEmbeddedSessionIdProvider(); + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(TestSettingPlugin.class); + } + + public static class TestSettingPlugin extends Plugin { + + @Override + public List> getSettings() { + return OpenSearchSettings.pluginSettings(); + } + } + + @Before + public void setup() { + clusterService = clusterService(); + clusterSettings = clusterService.getClusterSettings(); + pluginSettings = new OpenSearchSettings(clusterSettings); + LocalClusterState.state().setClusterService(clusterService); + LocalClusterState.state().setPluginSettings((OpenSearchSettings) pluginSettings); + sessionConfigSupplier = new OpenSearchSessionConfigSupplier(pluginSettings); + Metrics.getInstance().registerDefaultMetrics(); + client = (NodeClient) cluster().client(); + client + .admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .putList(DATASOURCE_URI_HOSTS_DENY_LIST.getKey(), Collections.emptyList()) + .build()) + .get(); + flintIndexClient = new OpenSearchFlintIndexClient(client); + dataSourceService = createDataSourceService(); + DataSourceMetadata dm = + new DataSourceMetadata.Builder() + .setName(MYS3_DATASOURCE) + .setConnector(DataSourceType.S3GLUE) + .setProperties( + ImmutableMap.of( + "glue.auth.type", + "iam_role", + "glue.auth.role_arn", + "arn:aws:iam::924196221507:role/FlintOpensearchServiceRole", + "glue.indexstore.opensearch.uri", + "http://localhost:9200", + "glue.indexstore.opensearch.auth", + "noauth")) + .build(); + dataSourceService.createDataSource(dm); + DataSourceMetadata otherDm = + new DataSourceMetadata.Builder() + .setName(MYGLUE_DATASOURCE) + .setConnector(DataSourceType.S3GLUE) + .setProperties( + ImmutableMap.of( + "glue.auth.type", + "iam_role", + "glue.auth.role_arn", + "arn:aws:iam::924196221507:role/FlintOpensearchServiceRole", + "glue.indexstore.opensearch.uri", + "http://localhost:9200", + "glue.indexstore.opensearch.auth", + "noauth")) + .build(); + dataSourceService.createDataSource(otherDm); + stateStore = new StateStore(client, clusterService); + createIndexWithMappings(dm.getResultIndex(), loadResultIndexMappings()); + createIndexWithMappings(otherDm.getResultIndex(), loadResultIndexMappings()); + flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + flintIndexStateModelService = + new OpenSearchFlintIndexStateModelService( + stateStore, new FlintIndexStateModelXContentSerializer()); + sessionStorageService = + new OpenSearchSessionStorageService(stateStore, new SessionModelXContentSerializer()); + statementStorageService = + new OpenSearchStatementStorageService(stateStore, new StatementModelXContentSerializer()); + } + + protected FlintIndexOpFactory getFlintIndexOpFactory( + EMRServerlessClientFactory emrServerlessClientFactory) { + return new FlintIndexOpFactory( + flintIndexStateModelService, + flintIndexClient, + flintIndexMetadataService, + emrServerlessClientFactory); + } + + @After + public void clean() { + client + .admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().putNull(SPARK_EXECUTION_SESSION_LIMIT_SETTING.getKey()).build()) + .get(); + client + .admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().putNull(SPARK_EXECUTION_REFRESH_JOB_LIMIT_SETTING.getKey()).build()) + .get(); + client + .admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().putNull(DATASOURCE_URI_HOSTS_DENY_LIST.getKey()).build()) + .get(); + } + + private DataSourceServiceImpl createDataSourceService() { + String masterKey = "a57d991d9b573f75b9bba1df"; + DataSourceMetadataStorage dataSourceMetadataStorage = + new OpenSearchDataSourceMetadataStorage( + client, + clusterService, + new EncryptorImpl(masterKey), + (OpenSearchSettings) pluginSettings); + return new DataSourceServiceImpl( + new ImmutableSet.Builder() + .add(new GlueDataSourceFactory(pluginSettings)) + .build(), + dataSourceMetadataStorage, + meta -> {}); + } + + protected AsyncQueryExecutorService createAsyncQueryExecutorService( + EMRServerlessClientFactory emrServerlessClientFactory) { + return createAsyncQueryExecutorService( + emrServerlessClientFactory, new OpenSearchJobExecutionResponseReader(client)); + } + + /** Pass a custom response reader which can mock interaction between PPL plugin and EMR-S job. */ + protected AsyncQueryExecutorService createAsyncQueryExecutorService( + EMRServerlessClientFactory emrServerlessClientFactory, + JobExecutionResponseReader jobExecutionResponseReader) { + StateStore stateStore = new StateStore(client, clusterService); + AsyncQueryJobMetadataStorageService asyncQueryJobMetadataStorageService = + new OpenSearchAsyncQueryJobMetadataStorageService( + stateStore, new AsyncQueryJobMetadataXContentSerializer()); + SparkParameterComposerCollection sparkParameterComposerCollection = + new SparkParameterComposerCollection(); + sparkParameterComposerCollection.register( + DataSourceType.S3GLUE, new S3GlueDataSourceSparkParameterComposer()); + SparkSubmitParametersBuilderProvider sparkSubmitParametersBuilderProvider = + new SparkSubmitParametersBuilderProvider(sparkParameterComposerCollection); + QueryHandlerFactory queryHandlerFactory = + new QueryHandlerFactory( + jobExecutionResponseReader, + new FlintIndexMetadataServiceImpl(client), + new SessionManager( + sessionStorageService, + statementStorageService, + emrServerlessClientFactory, + sessionConfigSupplier, + sessionIdProvider), + new DefaultLeaseManager(pluginSettings, stateStore), + new OpenSearchIndexDMLResultStorageService(dataSourceService, stateStore), + new FlintIndexOpFactory( + flintIndexStateModelService, + flintIndexClient, + new FlintIndexMetadataServiceImpl(client), + emrServerlessClientFactory), + emrServerlessClientFactory, + new OpenSearchMetricsService(), + sparkSubmitParametersBuilderProvider); + SparkQueryDispatcher sparkQueryDispatcher = + new SparkQueryDispatcher( + this.dataSourceService, + new SessionManager( + sessionStorageService, + statementStorageService, + emrServerlessClientFactory, + sessionConfigSupplier, + sessionIdProvider), + queryHandlerFactory, + new DatasourceEmbeddedQueryIdProvider()); + return new AsyncQueryExecutorServiceImpl( + asyncQueryJobMetadataStorageService, + sparkQueryDispatcher, + this::sparkExecutionEngineConfig); + } + + public static class LocalEMRSClient implements EMRServerlessClient { + + private int startJobRunCalled = 0; + private int cancelJobRunCalled = 0; + private int getJobResult = 0; + private JobRunState jobState = JobRunState.RUNNING; + + @Getter private StartJobRequest jobRequest; + + @Override + public String startJobRun(StartJobRequest startJobRequest) { + jobRequest = startJobRequest; + startJobRunCalled++; + return "jobId"; + } + + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + getJobResult++; + JobRun jobRun = new JobRun(); + jobRun.setState(jobState.toString()); + return new GetJobRunResult().withJobRun(jobRun); + } + + @Override + public CancelJobRunResult cancelJobRun( + String applicationId, String jobId, boolean allowExceptionPropagation) { + cancelJobRunCalled++; + return new CancelJobRunResult().withJobRunId(jobId); + } + + public void startJobRunCalled(int expectedTimes) { + assertEquals(expectedTimes, startJobRunCalled); + } + + public void cancelJobRunCalled(int expectedTimes) { + assertEquals(expectedTimes, cancelJobRunCalled); + } + + public void getJobRunResultCalled(int expectedTimes) { + assertEquals(expectedTimes, getJobResult); + } + + public void setJobState(JobRunState jobState) { + this.jobState = jobState; + } + } + + protected LocalEMRSClient getCancelledLocalEmrsClient() { + return new LocalEMRSClient() { + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + }; + } + + public static class LocalEMRServerlessClientFactory implements EMRServerlessClientFactory { + + @Override + public EMRServerlessClient getClient(String accountId) { + return new LocalEMRSClient(); + } + } + + public SparkExecutionEngineConfig sparkExecutionEngineConfig( + AsyncQueryRequestContext asyncQueryRequestContext) { + return SparkExecutionEngineConfig.builder() + .applicationId("appId") + .region("us-west-2") + .executionRoleARN("roleArn") + .sparkSubmitParameterModifier(new OpenSearchSparkSubmitParameterModifier("")) + .clusterName("myCluster") + .build(); + } + + public void enableSession(boolean enabled) { + // doNothing + } + + public void setSessionLimit(long limit) { + client + .admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(SPARK_EXECUTION_SESSION_LIMIT_SETTING.getKey(), limit).build()) + .get(); + } + + public void setConcurrentRefreshJob(long limit) { + client + .admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(SPARK_EXECUTION_REFRESH_JOB_LIMIT_SETTING.getKey(), limit) + .build()) + .get(); + } + + int search(QueryBuilder query) { + SearchRequest searchRequest = new SearchRequest(); + searchRequest.indices(getIndexName(MYS3_DATASOURCE)); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.query(query); + searchRequest.source(searchSourceBuilder); + SearchResponse searchResponse = client.search(searchRequest).actionGet(); + + return searchResponse.getHits().getHits().length; + } + + void setSessionState(String sessionId, SessionState sessionState) { + Optional model = sessionStorageService.getSession(sessionId, MYS3_DATASOURCE); + SessionModel updated = sessionStorageService.updateSessionState(model.get(), sessionState); + assertEquals(sessionState, updated.getSessionState()); + } + + @SneakyThrows + public String loadResultIndexMappings() { + URL url = Resources.getResource("query_execution_result_mapping.json"); + return Resources.toString(url, Charsets.UTF_8); + } + + @RequiredArgsConstructor + public class FlintDatasetMock { + + final String query; + final String refreshQuery; + final FlintIndexType indexType; + final String indexName; + boolean isLegacy = false; + boolean isSpecialCharacter = false; + String latestId; + + public FlintDatasetMock isLegacy(boolean isLegacy) { + this.isLegacy = isLegacy; + return this; + } + + FlintDatasetMock isSpecialCharacter(boolean isSpecialCharacter) { + this.isSpecialCharacter = isSpecialCharacter; + return this; + } + + public FlintDatasetMock latestId(String latestId) { + this.latestId = latestId; + return this; + } + + public void createIndex() { + String pathPrefix = isLegacy ? "flint-index-mappings" : "flint-index-mappings/0.1.1"; + if (isSpecialCharacter) { + createIndexWithMappings( + indexName, loadMappings(pathPrefix + "/" + "flint_special_character_index.json")); + return; + } + switch (indexType) { + case SKIPPING: + createIndexWithMappings( + indexName, loadMappings(pathPrefix + "/" + "flint_skipping_index.json")); + break; + case COVERING: + createIndexWithMappings( + indexName, loadMappings(pathPrefix + "/" + "flint_covering_index.json")); + break; + case MATERIALIZED_VIEW: + createIndexWithMappings(indexName, loadMappings(pathPrefix + "/" + "flint_mv.json")); + break; + } + } + + @SneakyThrows + public void deleteIndex() { + client().admin().indices().delete(new DeleteIndexRequest().indices(indexName)).get(); + } + } + + @SneakyThrows + public static String loadMappings(String path) { + URL url = Resources.getResource(path); + return Resources.toString(url, Charsets.UTF_8); + } + + public void createIndexWithMappings(String indexName, String metadata) { + CreateIndexRequest request = new CreateIndexRequest(indexName); + request.mapping(metadata, XContentType.JSON); + client().admin().indices().create(request).actionGet(); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryGetResultSpecTest.java b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryGetResultSpecTest.java new file mode 100644 index 0000000000..e0f04761c7 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryGetResultSpecTest.java @@ -0,0 +1,568 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.asyncquery; + +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.WAIT_UNTIL; +import static org.opensearch.sql.data.model.ExprValueUtils.tupleValue; +import static org.opensearch.sql.datasource.model.DataSourceMetadata.DEFAULT_RESULT_INDEX; + +import com.amazonaws.services.emrserverless.model.JobRunState; +import com.google.common.collect.ImmutableList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.json.JSONObject; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.sql.data.model.ExprValue; +import org.opensearch.sql.executor.pagination.Cursor; +import org.opensearch.sql.protocol.response.format.JsonResponseFormatter; +import org.opensearch.sql.protocol.response.format.ResponseFormatter; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryExecutionResponse; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.asyncquery.model.MockFlintSparkJob; +import org.opensearch.sql.spark.asyncquery.model.NullAsyncQueryRequestContext; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.execution.statement.StatementModel; +import org.opensearch.sql.spark.execution.statement.StatementState; +import org.opensearch.sql.spark.flint.FlintIndexType; +import org.opensearch.sql.spark.response.JobExecutionResponseReader; +import org.opensearch.sql.spark.response.OpenSearchJobExecutionResponseReader; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryResponse; +import org.opensearch.sql.spark.rest.model.LangType; +import org.opensearch.sql.spark.transport.format.AsyncQueryResultResponseFormatter; +import org.opensearch.sql.spark.transport.model.AsyncQueryResult; + +public class AsyncQueryGetResultSpecTest extends AsyncQueryExecutorServiceSpec { + AsyncQueryRequestContext asyncQueryRequestContext = new NullAsyncQueryRequestContext(); + + /** Mock Flint index and index state */ + private final FlintDatasetMock mockIndex = + new FlintDatasetMock( + "DROP SKIPPING INDEX ON mys3.default.http_logs", + "REFRESH SKIPPING INDEX ON mys3.default.http_logs", + FlintIndexType.SKIPPING, + "flint_mys3_default_http_logs_skipping_index") + .latestId("ZmxpbnRfbXlzM19kZWZhdWx0X2h0dHBfbG9nc19za2lwcGluZ19pbmRleA=="); + + private MockFlintSparkJob mockIndexState; + + @Before + public void doSetUp() { + mockIndexState = + new MockFlintSparkJob(flintIndexStateModelService, mockIndex.latestId, MYS3_DATASOURCE); + } + + @Test + public void testInteractiveQueryGetResult() { + createAsyncQuery("SELECT 1") + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("waiting", null) + .withInteraction( + interaction -> { + interaction.emrJobWriteResultDoc(createResultDoc(interaction.queryId)); + interaction.emrJobUpdateStatementState(StatementState.SUCCESS); + return interaction.pluginSearchQueryResult(); + }) + .assertQueryResults("SUCCESS", ImmutableList.of(tupleValue(Map.of("1", 1)))); + } + + @Test + public void testInteractiveQueryGetResultWithConcurrentEmrJobUpdate() { + createAsyncQuery("SELECT 1") + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("waiting", null) + .withInteraction( + interaction -> { + JSONObject result = interaction.pluginSearchQueryResult(); + interaction.emrJobWriteResultDoc(createResultDoc(interaction.queryId)); + interaction.emrJobUpdateStatementState(StatementState.SUCCESS); + return result; + }) + .assertQueryResults("running", null) + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("SUCCESS", ImmutableList.of(tupleValue(Map.of("1", 1)))); + } + + @Test + public void testBatchQueryGetResult() { + createAsyncQuery("REFRESH SKIPPING INDEX ON test") + .withInteraction( + interaction -> { + interaction.emrJobWriteResultDoc(createEmptyResultDoc(interaction.queryId)); + interaction.emrJobUpdateJobState(JobRunState.SUCCESS); + return interaction.pluginSearchQueryResult(); + }) + .assertQueryResults("SUCCESS", ImmutableList.of()); + } + + @Test + public void testBatchQueryGetResultWithConcurrentEmrJobUpdate() { + createAsyncQuery("REFRESH SKIPPING INDEX ON test") + .withInteraction( + interaction -> { + JSONObject result = interaction.pluginSearchQueryResult(); + interaction.emrJobWriteResultDoc(createEmptyResultDoc(interaction.queryId)); + interaction.emrJobUpdateJobState(JobRunState.SUCCESS); + return result; + }) + .assertQueryResults("running", null) + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("SUCCESS", ImmutableList.of()); + } + + @Test + public void testStreamingQueryGetResult() { + // Create mock index with index state refreshing + mockIndex.createIndex(); + mockIndexState.refreshing(); + try { + createAsyncQuery( + "CREATE SKIPPING INDEX ON mys3.default.http_logs " + + "(l_orderkey VALUE_SET) WITH (auto_refresh = true)") + .withInteraction( + interaction -> { + interaction.emrJobWriteResultDoc(createEmptyResultDoc(interaction.queryId)); + interaction.emrJobUpdateJobState(JobRunState.SUCCESS); + return interaction.pluginSearchQueryResult(); + }) + .assertQueryResults("SUCCESS", ImmutableList.of()); + } finally { + mockIndex.deleteIndex(); + mockIndexState.deleted(); + } + } + + @Test + public void testStreamingQueryGetResultWithConcurrentEmrJobUpdate() { + // Create mock index with index state refreshing + mockIndex.createIndex(); + mockIndexState.refreshing(); + try { + createAsyncQuery( + "CREATE SKIPPING INDEX ON mys3.default.http_logs " + + "(l_orderkey VALUE_SET) WITH (auto_refresh = true)") + .withInteraction( + interaction -> { + JSONObject result = interaction.pluginSearchQueryResult(); + interaction.emrJobWriteResultDoc(createEmptyResultDoc(interaction.queryId)); + interaction.emrJobUpdateJobState(JobRunState.SUCCESS); + return result; + }) + .assertQueryResults("running", null) + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("SUCCESS", ImmutableList.of()); + } finally { + mockIndex.deleteIndex(); + mockIndexState.deleted(); + } + } + + @Test + public void testDropIndexQueryGetResult() { + // Create mock index with index state refreshing + mockIndex.createIndex(); + mockIndexState.refreshing(); + + LocalEMRSClient emrClient = new LocalEMRSClient(); + emrClient.setJobState(JobRunState.CANCELLED); + createAsyncQuery(mockIndex.query, emrClient) + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("SUCCESS", ImmutableList.of()); + } + + @Test + public void testDropIndexQueryGetResultWithResultDocRefreshDelay() { + // Create mock index with index state refreshing + mockIndex.createIndex(); + mockIndexState.refreshing(); + + LocalEMRSClient emrClient = new LocalEMRSClient(); + emrClient.setJobState(JobRunState.CANCELLED); + createAsyncQuery(mockIndex.query, emrClient) + .withInteraction(interaction -> new JSONObject()) // simulate result index refresh delay + .assertQueryResults("running", null) + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("SUCCESS", ImmutableList.of()); + } + + @Test + public void testInteractiveQueryResponse() { + createAsyncQuery("SELECT * FROM TABLE") + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("waiting", null) + .withInteraction( + interaction -> { + interaction.emrJobWriteResultDoc(createResultDoc(interaction.queryId)); + interaction.emrJobUpdateStatementState(StatementState.SUCCESS); + return interaction.pluginSearchQueryResult(); + }) + .assertFormattedQueryResults( + "{\"status\":\"SUCCESS\",\"schema\":[{\"name\":\"1\"," + + "\"type\":\"integer\"}],\"datarows\":[[1]],\"total\":1,\"size\":1}"); + } + + @Test + public void testInteractiveQueryResponseBasicType() { + createAsyncQuery("SELECT * FROM TABLE") + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("waiting", null) + .withInteraction( + interaction -> { + interaction.emrJobWriteResultDoc( + createResultDoc( + interaction.queryId, + ImmutableList.of( + "{'column1': 'value1', 'column2': 123, 'column3': true}", + "{'column1': 'value2', 'column2': 456, 'column3': false}"), + ImmutableList.of( + "{'column_name': 'column1', 'data_type': 'string'}", + "{'column_name': 'column2', 'data_type': 'integer'}", + "{'column_name': 'column3', 'data_type': 'boolean'}"))); + interaction.emrJobUpdateStatementState(StatementState.SUCCESS); + return interaction.pluginSearchQueryResult(); + }) + .assertFormattedQueryResults( + "{\"status\":\"SUCCESS\",\"schema\":[{\"name\":\"column1\",\"type\":\"string\"},{\"name\":\"column2\",\"type\":\"integer\"},{\"name\":\"column3\",\"type\":\"boolean\"}],\"datarows\":[[\"value1\",123,true],[\"value2\",456,false]],\"total\":2,\"size\":2}"); + } + + @Test + public void testInteractiveQueryResponseJsonArray() { + createAsyncQuery("SELECT * FROM TABLE") + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("waiting", null) + .withInteraction( + interaction -> { + interaction.emrJobWriteResultDoc( + createResultDoc( + interaction.queryId, + ImmutableList.of( + "{ 'attributes': [{ 'key': 'telemetry.sdk.language', 'value': {" + + " 'stringValue': 'python' }}, { 'key': 'telemetry.sdk.name'," + + " 'value': { 'stringValue': 'opentelemetry' }}, { 'key':" + + " 'telemetry.sdk.version', 'value': { 'stringValue': '1.19.0' }}, {" + + " 'key': 'service.namespace', 'value': { 'stringValue':" + + " 'opentelemetry-demo' }}, { 'key': 'service.name', 'value': {" + + " 'stringValue': 'recommendationservice' }}, { 'key':" + + " 'telemetry.auto.version', 'value': { 'stringValue': '0.40b0'" + + " }}]}"), + ImmutableList.of("{'column_name':'attributes','data_type':'array'}"))); + interaction.emrJobUpdateStatementState(StatementState.SUCCESS); + return interaction.pluginSearchQueryResult(); + }) + .assertFormattedQueryResults( + "{\"status\":\"SUCCESS\",\"schema\":[{\"name\":\"attributes\",\"type\":\"array\"}],\"datarows\":[[[{\"value\":{\"stringValue\":\"python\"},\"key\":\"telemetry.sdk.language\"},{\"value\":{\"stringValue\":\"opentelemetry\"},\"key\":\"telemetry.sdk.name\"},{\"value\":{\"stringValue\":\"1.19.0\"},\"key\":\"telemetry.sdk.version\"},{\"value\":{\"stringValue\":\"opentelemetry-demo\"},\"key\":\"service.namespace\"},{\"value\":{\"stringValue\":\"recommendationservice\"},\"key\":\"service.name\"},{\"value\":{\"stringValue\":\"0.40b0\"},\"key\":\"telemetry.auto.version\"}]]],\"total\":1,\"size\":1}"); + } + + @Test + public void testInteractiveQueryResponseJsonNested() { + createAsyncQuery("SELECT * FROM TABLE") + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("waiting", null) + .withInteraction( + interaction -> { + interaction.emrJobWriteResultDoc( + createResultDoc( + interaction.queryId, + ImmutableList.of( + "{\n" + + " 'resourceSpans': {\n" + + " 'scopeSpans': {\n" + + " 'spans': {\n" + + " 'key': 'rpc.system',\n" + + " 'value': {\n" + + " 'stringValue': 'grpc'\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + "}"), + ImmutableList.of("{'column_name':'resourceSpans','data_type':'struct'}"))); + interaction.emrJobUpdateStatementState(StatementState.SUCCESS); + return interaction.pluginSearchQueryResult(); + }) + .assertFormattedQueryResults( + "{\"status\":\"SUCCESS\",\"schema\":[{\"name\":\"resourceSpans\",\"type\":\"struct\"}],\"datarows\":[[{\"scopeSpans\":{\"spans\":{\"value\":{\"stringValue\":\"grpc\"},\"key\":\"rpc.system\"}}}]],\"total\":1,\"size\":1}"); + } + + @Test + public void testInteractiveQueryResponseJsonNestedObjectArray() { + createAsyncQuery("SELECT * FROM TABLE") + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("waiting", null) + .withInteraction( + interaction -> { + interaction.emrJobWriteResultDoc( + createResultDoc( + interaction.queryId, + ImmutableList.of( + "{\n" + + " 'resourceSpans': \n" + + " {\n" + + " 'scopeSpans': \n" + + " {\n" + + " 'spans': \n" + + " [\n" + + " {\n" + + " 'attribute': {\n" + + " 'key': 'rpc.system',\n" + + " 'value': {\n" + + " 'stringValue': 'grpc'\n" + + " }\n" + + " }\n" + + " },\n" + + " {\n" + + " 'attribute': {\n" + + " 'key': 'rpc.system',\n" + + " 'value': {\n" + + " 'stringValue': 'grpc'\n" + + " }\n" + + " }\n" + + " }\n" + + " ]\n" + + " }\n" + + " }\n" + + "}"), + ImmutableList.of("{'column_name':'resourceSpans','data_type':'struct'}"))); + interaction.emrJobUpdateStatementState(StatementState.SUCCESS); + return interaction.pluginSearchQueryResult(); + }) + .assertFormattedQueryResults( + "{\"status\":\"SUCCESS\",\"schema\":[{\"name\":\"resourceSpans\",\"type\":\"struct\"}],\"datarows\":[[{\"scopeSpans\":{\"spans\":[{\"attribute\":{\"value\":{\"stringValue\":\"grpc\"},\"key\":\"rpc.system\"}},{\"attribute\":{\"value\":{\"stringValue\":\"grpc\"},\"key\":\"rpc.system\"}}]}}]],\"total\":1,\"size\":1}"); + } + + @Test + public void testExplainResponse() { + createAsyncQuery("EXPLAIN SELECT * FROM TABLE") + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("waiting", null) + .withInteraction( + interaction -> { + interaction.emrJobWriteResultDoc( + createResultDoc( + interaction.queryId, + ImmutableList.of("{'plan':'== Physical Plan ==\\nAdaptiveSparkPlan'}"), + ImmutableList.of("{'column_name':'plan','data_type':'string'}"))); + interaction.emrJobUpdateStatementState(StatementState.SUCCESS); + return interaction.pluginSearchQueryResult(); + }) + .assertFormattedQueryResults( + "{\"status\":\"SUCCESS\",\"schema\":[{\"name\":\"plan\",\"type\":\"string\"}],\"datarows\":[[\"==" + + " Physical Plan ==\\n" + + "AdaptiveSparkPlan\"]],\"total\":1,\"size\":1}"); + } + + @Test + public void testInteractiveQueryEmptyResponseIssue2367() { + createAsyncQuery("SELECT * FROM TABLE") + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("waiting", null) + .withInteraction( + interaction -> { + interaction.emrJobWriteResultDoc( + createResultDoc( + interaction.queryId, + ImmutableList.of( + "{'srcPort':20641}", + "{'srcPort':20641}", + "{}", + "{}", + "{'srcPort':20641}", + "{'srcPort':20641}"), + ImmutableList.of("{'column_name':'srcPort','data_type':'long'}"))); + interaction.emrJobUpdateStatementState(StatementState.SUCCESS); + return interaction.pluginSearchQueryResult(); + }) + .assertFormattedQueryResults( + "{\"status\":\"SUCCESS\",\"schema\":[{\"name\":\"srcPort\",\"type\":\"long\"}],\"datarows\":[[20641],[20641],[null],[null],[20641],[20641]],\"total\":6,\"size\":6}"); + } + + @Test + public void testInteractiveQueryArrayResponseIssue2367() { + createAsyncQuery("SELECT * FROM TABLE") + .withInteraction(InteractionStep::pluginSearchQueryResult) + .assertQueryResults("waiting", null) + .withInteraction( + interaction -> { + interaction.emrJobWriteResultDoc( + createResultDoc( + interaction.queryId, + ImmutableList.of( + "{'resourceSpans':[{'resource':{'attributes':[{'key':'telemetry.sdk.language','value':{'stringValue':'python'}},{'key':'telemetry.sdk.name','value':{'stringValue':'opentelemetry'}}]},'scopeSpans':[{'scope':{'name':'opentelemetry.instrumentation.grpc','version':'0.40b0'},'spans':[{'attributes':[{'key':'rpc.system','value':{'stringValue':'grpc'}},{'key':'rpc.grpc.status_code','value':{'intValue':'0'}}],'kind':3},{'attributes':[{'key':'rpc.system','value':{'stringValue':'grpc'}},{'key':'rpc.grpc.status_code','value':{'intValue':'0'}}],'kind':3}]}]}]}"), + ImmutableList.of("{'column_name':'resourceSpans','data_type':'array'}"))); + interaction.emrJobUpdateStatementState(StatementState.SUCCESS); + return interaction.pluginSearchQueryResult(); + }) + .assertFormattedQueryResults( + "{\"status\":\"SUCCESS\",\"schema\":[{\"name\":\"resourceSpans\",\"type\":\"array\"}],\"datarows\":[[[{\"resource\":{\"attributes\":[{\"value\":{\"stringValue\":\"python\"},\"key\":\"telemetry.sdk.language\"},{\"value\":{\"stringValue\":\"opentelemetry\"},\"key\":\"telemetry.sdk.name\"}]},\"scopeSpans\":[{\"spans\":[{\"kind\":3,\"attributes\":[{\"value\":{\"stringValue\":\"grpc\"},\"key\":\"rpc.system\"},{\"value\":{\"intValue\":\"0\"},\"key\":\"rpc.grpc.status_code\"}]},{\"kind\":3,\"attributes\":[{\"value\":{\"stringValue\":\"grpc\"},\"key\":\"rpc.system\"},{\"value\":{\"intValue\":\"0\"},\"key\":\"rpc.grpc.status_code\"}]}],\"scope\":{\"name\":\"opentelemetry.instrumentation.grpc\",\"version\":\"0.40b0\"}}]}]]],\"total\":1,\"size\":1}"); + } + + private AssertionHelper createAsyncQuery(String query) { + return new AssertionHelper(query, new LocalEMRSClient()); + } + + private AssertionHelper createAsyncQuery(String query, LocalEMRSClient emrClient) { + return new AssertionHelper(query, emrClient); + } + + private class AssertionHelper { + private final AsyncQueryExecutorService queryService; + private final CreateAsyncQueryResponse createQueryResponse; + private Interaction interaction; + + AssertionHelper(String query, LocalEMRSClient emrClient) { + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrClient; + this.queryService = + createAsyncQueryExecutorService( + emrServerlessClientFactory, + /* + * Custom reader that intercepts get results call and inject extra steps defined in + * current interaction. Intercept both get methods for different query handler which + * will only call either of them. + */ + new JobExecutionResponseReader() { + @Override + public JSONObject getResultWithJobId(String jobId, String resultIndex) { + return interaction.interact(new InteractionStep(emrClient, jobId, resultIndex)); + } + + @Override + public JSONObject getResultWithQueryId(String queryId, String resultIndex) { + return interaction.interact(new InteractionStep(emrClient, queryId, resultIndex)); + } + }); + this.createQueryResponse = + queryService.createAsyncQuery( + new CreateAsyncQueryRequest(query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + } + + AssertionHelper withInteraction(Interaction interaction) { + this.interaction = interaction; + return this; + } + + AssertionHelper assertQueryResults(String status, List data) { + AsyncQueryExecutionResponse results = + queryService.getAsyncQueryResults(createQueryResponse.getQueryId()); + assertEquals(status, results.getStatus()); + assertEquals(data, results.getResults()); + return this; + } + + AssertionHelper assertFormattedQueryResults(String expected) { + AsyncQueryExecutionResponse results = + queryService.getAsyncQueryResults(createQueryResponse.getQueryId()); + + ResponseFormatter formatter = + new AsyncQueryResultResponseFormatter(JsonResponseFormatter.Style.COMPACT); + assertEquals( + expected, + formatter.format( + new AsyncQueryResult( + results.getStatus(), + results.getSchema(), + results.getResults(), + Cursor.None, + results.getError()))); + return this; + } + } + + /** Define an interaction between PPL plugin and EMR-S job. */ + private interface Interaction { + + JSONObject interact(InteractionStep interaction); + } + + /** + * Each method in this class is one step that can happen in an interaction. These methods are + * called in any order to simulate concurrent scenario. + */ + private class InteractionStep { + private final LocalEMRSClient emrClient; + final String queryId; + final String resultIndex; + + private InteractionStep(LocalEMRSClient emrClient, String queryId, String resultIndex) { + this.emrClient = emrClient; + this.queryId = queryId; + this.resultIndex = resultIndex == null ? DEFAULT_RESULT_INDEX : resultIndex; + } + + /** Simulate PPL plugin search query_execution_result */ + JSONObject pluginSearchQueryResult() { + return new OpenSearchJobExecutionResponseReader(client) + .getResultWithQueryId(queryId, resultIndex); + } + + /** Simulate EMR-S bulk writes query_execution_result with refresh = wait_for */ + void emrJobWriteResultDoc(Map resultDoc) { + try { + IndexRequest request = + new IndexRequest().index(resultIndex).setRefreshPolicy(WAIT_UNTIL).source(resultDoc); + client.index(request).get(); + } catch (Exception e) { + Assert.fail("Failed to write result doc: " + e.getMessage()); + } + } + + /** Simulate EMR-S updates query_execution_request with state */ + void emrJobUpdateStatementState(StatementState newState) { + StatementModel stmt = statementStorageService.getStatement(queryId, MYS3_DATASOURCE).get(); + statementStorageService.updateStatementState(stmt, newState); + } + + void emrJobUpdateJobState(JobRunState jobState) { + emrClient.setJobState(jobState); + } + } + + private Map createEmptyResultDoc(String queryId) { + Map document = new HashMap<>(); + document.put("result", ImmutableList.of()); + document.put("schema", ImmutableList.of()); + document.put("jobRunId", "XXX"); + document.put("applicationId", "YYY"); + document.put("dataSourceName", MYS3_DATASOURCE); + document.put("status", "SUCCESS"); + document.put("error", ""); + document.put("queryId", queryId); + document.put("queryText", "SELECT 1"); + document.put("sessionId", "ZZZ"); + document.put("updateTime", 1699124602715L); + document.put("queryRunTime", 123); + return document; + } + + private Map createResultDoc(String queryId) { + return createResultDoc( + queryId, + ImmutableList.of("{'1':1}"), + ImmutableList.of("{'column_name" + "':'1','data_type':'integer'}")); + } + + private Map createResultDoc( + String queryId, List result, List schema) { + Map document = new HashMap<>(); + document.put("result", result); + document.put("schema", schema); + document.put("jobRunId", "XXX"); + document.put("applicationId", "YYY"); + document.put("dataSourceName", MYS3_DATASOURCE); + document.put("status", "SUCCESS"); + document.put("error", ""); + document.put("queryId", queryId); + document.put("queryText", "SELECT 1"); + document.put("sessionId", "ZZZ"); + document.put("updateTime", 1699124602715L); + document.put("queryRunTime", 123); + return document; + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/IndexQuerySpecAlterTest.java b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/IndexQuerySpecAlterTest.java new file mode 100644 index 0000000000..70a43e42d5 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/IndexQuerySpecAlterTest.java @@ -0,0 +1,1110 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.asyncquery; + +import com.amazonaws.services.emrserverless.model.CancelJobRunResult; +import com.amazonaws.services.emrserverless.model.GetJobRunResult; +import com.amazonaws.services.emrserverless.model.JobRun; +import com.amazonaws.services.emrserverless.model.ValidationException; +import com.google.common.collect.ImmutableList; +import java.util.HashMap; +import java.util.Map; +import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryExecutionResponse; +import org.opensearch.sql.spark.asyncquery.model.MockFlintIndex; +import org.opensearch.sql.spark.asyncquery.model.MockFlintSparkJob; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.client.StartJobRequest; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexType; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryResponse; +import org.opensearch.sql.spark.rest.model.LangType; + +public class IndexQuerySpecAlterTest extends AsyncQueryExecutorServiceSpec { + + @Test + public void testAlterIndexQueryConvertingToManualRefresh() { + MockFlintIndex ALTER_SKIPPING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_skipping_index", + FlintIndexType.SKIPPING, + "ALTER SKIPPING INDEX ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=false)"); + MockFlintIndex ALTER_COVERING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_covering_index", + FlintIndexType.COVERING, + "ALTER INDEX covering ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=false)"); + MockFlintIndex ALTER_MV = + new MockFlintIndex( + client, + "flint_my_glue_mydb_mv", + FlintIndexType.MATERIALIZED_VIEW, + "ALTER MATERIALIZED VIEW my_glue.mydb.mv WITH (auto_refresh=false," + + " incremental_refresh=false) "); + ImmutableList.of(ALTER_SKIPPING, ALTER_COVERING, ALTER_MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + mockDS.updateIndexOptions(existingOptions, false); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.active(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("SUCCESS", asyncQueryExecutionResponse.getStatus()); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(1); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("false", options.get("auto_refresh")); + }); + } + + @Test + public void testAlterIndexQueryConvertingToManualRefreshWithNoIncrementalRefresh() { + MockFlintIndex ALTER_SKIPPING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_skipping_index", + FlintIndexType.SKIPPING, + "ALTER SKIPPING INDEX ON my_glue.mydb.http_logs WITH (auto_refresh=false)"); + MockFlintIndex ALTER_COVERING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_covering_index", + FlintIndexType.COVERING, + "ALTER INDEX covering ON my_glue.mydb.http_logs WITH (auto_refresh=false)"); + MockFlintIndex ALTER_MV = + new MockFlintIndex( + client, + "flint_my_glue_mydb_mv", + FlintIndexType.MATERIALIZED_VIEW, + "ALTER MATERIALIZED VIEW my_glue.mydb.mv WITH (auto_refresh=false)"); + ImmutableList.of(ALTER_SKIPPING, ALTER_COVERING, ALTER_MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + existingOptions.put("checkpoint_location", "s3://checkpoint/location"); + mockDS.updateIndexOptions(existingOptions, true); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.active(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("SUCCESS", asyncQueryExecutionResponse.getStatus()); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(1); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("false", options.get("auto_refresh")); + }); + } + + @Test + public void testAlterIndexQueryWithRedundantOperation() { + MockFlintIndex ALTER_SKIPPING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_skipping_index", + FlintIndexType.SKIPPING, + "ALTER SKIPPING INDEX ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=false)"); + MockFlintIndex ALTER_COVERING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_covering_index", + FlintIndexType.COVERING, + "ALTER INDEX covering ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=false)"); + MockFlintIndex ALTER_MV = + new MockFlintIndex( + client, + "flint_my_glue_mydb_mv", + FlintIndexType.MATERIALIZED_VIEW, + "ALTER MATERIALIZED VIEW my_glue.mydb.mv WITH (auto_refresh=false," + + " incremental_refresh=false) "); + ImmutableList.of(ALTER_SKIPPING, ALTER_COVERING, ALTER_MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public String startJobRun(StartJobRequest startJobRequest) { + return "jobId"; + } + + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + + @Override + public CancelJobRunResult cancelJobRun( + String applicationId, String jobId, boolean allowExceptionPropagation) { + super.cancelJobRun(applicationId, jobId, allowExceptionPropagation); + throw new ValidationException("Job run is not in a cancellable state"); + } + }; + EMRServerlessClientFactory emrServerlessCientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessCientFactory); + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "false"); + mockDS.updateIndexOptions(existingOptions, false); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.active(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("SUCCESS", asyncQueryExecutionResponse.getStatus()); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(1); + emrsClient.getJobRunResultCalled(0); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("false", options.get("auto_refresh")); + }); + } + + @Test + public void testAlterIndexQueryConvertingToAutoRefresh() { + MockFlintIndex ALTER_SKIPPING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_skipping_index", + FlintIndexType.SKIPPING, + "ALTER SKIPPING INDEX ON my_glue.mydb.http_logs WITH (auto_refresh=true," + + " incremental_refresh=false)"); + MockFlintIndex ALTER_COVERING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_covering_index", + FlintIndexType.COVERING, + "ALTER INDEX covering ON my_glue.mydb.http_logs WITH (auto_refresh=true," + + " incremental_refresh=false)"); + MockFlintIndex ALTER_MV = + new MockFlintIndex( + client, + "flint_my_glue_mydb_mv", + FlintIndexType.MATERIALIZED_VIEW, + "ALTER MATERIALIZED VIEW my_glue.mydb.mv WITH (auto_refresh=true," + + " incremental_refresh=false) "); + ImmutableList.of(ALTER_SKIPPING, ALTER_COVERING, ALTER_MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory clientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(clientFactory); + + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "false"); + mockDS.updateIndexOptions(existingOptions, false); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.active(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + assertEquals( + "RUNNING", + asyncQueryExecutorService + .getAsyncQueryResults(response.getQueryId()) + .getStatus()); + + flintIndexJob.assertState(FlintIndexState.ACTIVE); + emrsClient.startJobRunCalled(1); + emrsClient.getJobRunResultCalled(1); + emrsClient.cancelJobRunCalled(0); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("false", options.get("auto_refresh")); + }); + } + + @Test + public void testAlterIndexQueryWithOutAnyAutoRefresh() { + MockFlintIndex ALTER_SKIPPING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_skipping_index", + FlintIndexType.SKIPPING, + "ALTER SKIPPING INDEX ON my_glue.mydb.http_logs WITH (" + + " incremental_refresh=false)"); + MockFlintIndex ALTER_COVERING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_covering_index", + FlintIndexType.COVERING, + "ALTER INDEX covering ON my_glue.mydb.http_logs WITH (" + + " incremental_refresh=false)"); + MockFlintIndex ALTER_MV = + new MockFlintIndex( + client, + "flint_my_glue_mydb_mv", + FlintIndexType.MATERIALIZED_VIEW, + "ALTER MATERIALIZED VIEW my_glue.mydb.mv WITH (" + " incremental_refresh=false) "); + ImmutableList.of(ALTER_SKIPPING, ALTER_COVERING, ALTER_MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = new LocalEMRSClient(); + EMRServerlessClientFactory clientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(clientFactory); + + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "false"); + mockDS.updateIndexOptions(existingOptions, false); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.active(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + assertEquals( + "RUNNING", + asyncQueryExecutorService + .getAsyncQueryResults(response.getQueryId()) + .getStatus()); + + flintIndexJob.assertState(FlintIndexState.ACTIVE); + emrsClient.startJobRunCalled(1); + emrsClient.getJobRunResultCalled(1); + emrsClient.cancelJobRunCalled(0); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("false", options.get("auto_refresh")); + }); + } + + @Test + public void testAlterIndexQueryOfFullRefreshWithInvalidOptions() { + MockFlintIndex ALTER_SKIPPING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_skipping_index", + FlintIndexType.SKIPPING, + "ALTER SKIPPING INDEX ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=false, checkpoint_location=\"s3://ckp/skp\")"); + MockFlintIndex ALTER_COVERING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_covering_index", + FlintIndexType.COVERING, + "ALTER INDEX covering ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=false, checkpoint_location=\"s3://ckp/skp\")"); + MockFlintIndex ALTER_MV = + new MockFlintIndex( + client, + "flint_my_glue_mydb_mv", + FlintIndexType.MATERIALIZED_VIEW, + "ALTER MATERIALIZED VIEW my_glue.mydb.mv WITH (auto_refresh=false," + + " incremental_refresh=false, checkpoint_location=\"s3://ckp/skp\") "); + ImmutableList.of(ALTER_SKIPPING, ALTER_COVERING, ALTER_MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + mockDS.updateIndexOptions(existingOptions, false); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.active(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("FAILED", asyncQueryExecutionResponse.getStatus()); + assertEquals( + "Altering to full refresh only allows: [auto_refresh, incremental_refresh]" + + " options", + asyncQueryExecutionResponse.getError()); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(0); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("true", options.get("auto_refresh")); + }); + } + + @Test + public void testAlterIndexQueryOfIncrementalRefreshWithInvalidOptions() { + MockFlintIndex ALTER_SKIPPING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_skipping_index", + FlintIndexType.SKIPPING, + "ALTER SKIPPING INDEX ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=true, output_mode=\"complete\")"); + MockFlintIndex ALTER_COVERING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_covering_index", + FlintIndexType.COVERING, + "ALTER INDEX covering ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=true, output_mode=\"complete\")"); + MockFlintIndex ALTER_MV = + new MockFlintIndex( + client, + "flint_my_glue_mydb_mv", + FlintIndexType.MATERIALIZED_VIEW, + "ALTER MATERIALIZED VIEW my_glue.mydb.mv WITH (auto_refresh=false," + + " incremental_refresh=true, output_mode=\"complete\") "); + ImmutableList.of(ALTER_SKIPPING, ALTER_COVERING, ALTER_MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + mockDS.updateIndexOptions(existingOptions, false); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.active(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("FAILED", asyncQueryExecutionResponse.getStatus()); + assertEquals( + "Altering to incremental refresh only allows: [auto_refresh, incremental_refresh," + + " watermark_delay, checkpoint_location] options", + asyncQueryExecutionResponse.getError()); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(0); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("true", options.get("auto_refresh")); + }); + } + + @Test + public void testAlterIndexQueryOfIncrementalRefreshWithInsufficientOptions() { + MockFlintIndex ALTER_SKIPPING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_skipping_index", + FlintIndexType.SKIPPING, + "ALTER SKIPPING INDEX ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=true)"); + MockFlintIndex ALTER_COVERING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_covering_index", + FlintIndexType.COVERING, + "ALTER INDEX covering ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=true)"); + ImmutableList.of(ALTER_SKIPPING, ALTER_COVERING) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + existingOptions.put("incremental_refresh", "false"); + mockDS.updateIndexOptions(existingOptions, true); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.active(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("FAILED", asyncQueryExecutionResponse.getStatus()); + assertEquals( + "Conversion to incremental refresh index cannot proceed due to missing" + + " attributes: checkpoint_location.", + asyncQueryExecutionResponse.getError()); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(0); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("true", options.get("auto_refresh")); + }); + } + + @Test + public void testAlterIndexQueryOfIncrementalRefreshWithInsufficientOptionsForMV() { + MockFlintIndex ALTER_MV = + new MockFlintIndex( + client, + "flint_my_glue_mydb_mv", + FlintIndexType.MATERIALIZED_VIEW, + "ALTER MATERIALIZED VIEW my_glue.mydb.mv WITH (auto_refresh=false," + + " incremental_refresh=true) "); + ImmutableList.of(ALTER_MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + existingOptions.put("incremental_refresh", "false"); + mockDS.updateIndexOptions(existingOptions, true); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.active(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("FAILED", asyncQueryExecutionResponse.getStatus()); + assertEquals( + "Conversion to incremental refresh index cannot proceed due to missing" + + " attributes: checkpoint_location, watermark_delay.", + asyncQueryExecutionResponse.getError()); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(0); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("true", options.get("auto_refresh")); + }); + } + + @Test + public void testAlterIndexQueryOfIncrementalRefreshWithEmptyExistingOptionsForMV() { + MockFlintIndex ALTER_MV = + new MockFlintIndex( + client, + "flint_my_glue_mydb_mv", + FlintIndexType.MATERIALIZED_VIEW, + "ALTER MATERIALIZED VIEW my_glue.mydb.mv WITH (auto_refresh=false," + + " incremental_refresh=true) "); + ImmutableList.of(ALTER_MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + existingOptions.put("incremental_refresh", "false"); + existingOptions.put("watermark_delay", ""); + existingOptions.put("checkpoint_location", ""); + mockDS.updateIndexOptions(existingOptions, true); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.active(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("FAILED", asyncQueryExecutionResponse.getStatus()); + assertEquals( + "Conversion to incremental refresh index cannot proceed due to missing" + + " attributes: checkpoint_location, watermark_delay.", + asyncQueryExecutionResponse.getError()); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(0); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("true", options.get("auto_refresh")); + }); + } + + @Test + public void testAlterIndexQueryOfIncrementalRefresh() { + MockFlintIndex ALTER_MV = + new MockFlintIndex( + client, + "flint_my_glue_mydb_mv", + FlintIndexType.MATERIALIZED_VIEW, + "ALTER MATERIALIZED VIEW my_glue.mydb.mv WITH (auto_refresh=false," + + " incremental_refresh=true) "); + ImmutableList.of(ALTER_MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + existingOptions.put("incremental_refresh", "false"); + existingOptions.put("watermark_delay", "watermark_delay"); + existingOptions.put("checkpoint_location", "s3://checkpoint/location"); + mockDS.updateIndexOptions(existingOptions, true); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.refreshing(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("SUCCESS", asyncQueryExecutionResponse.getStatus()); + emrsClient.startJobRunCalled(0); + emrsClient.getJobRunResultCalled(1); + emrsClient.cancelJobRunCalled(1); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("false", options.get("auto_refresh")); + Assertions.assertEquals("true", options.get("incremental_refresh")); + }); + } + + @Test + public void testAlterIndexQueryWithIncrementalRefreshAlreadyExisting() { + MockFlintIndex ALTER_MV = + new MockFlintIndex( + client, + "flint_my_glue_mydb_mv", + FlintIndexType.MATERIALIZED_VIEW, + "ALTER MATERIALIZED VIEW my_glue.mydb.mv WITH (auto_refresh=false) "); + ImmutableList.of(ALTER_MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + existingOptions.put("incremental_refresh", "true"); + existingOptions.put("watermark_delay", "watermark_delay"); + existingOptions.put("checkpoint_location", "s3://checkpoint/location"); + mockDS.updateIndexOptions(existingOptions, true); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.refreshing(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("SUCCESS", asyncQueryExecutionResponse.getStatus()); + emrsClient.startJobRunCalled(0); + emrsClient.getJobRunResultCalled(1); + emrsClient.cancelJobRunCalled(1); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("false", options.get("auto_refresh")); + Assertions.assertEquals("true", options.get("incremental_refresh")); + }); + } + + @Test + public void testAlterIndexQueryWithInvalidInitialState() { + MockFlintIndex ALTER_SKIPPING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_skipping_index", + FlintIndexType.SKIPPING, + "ALTER SKIPPING INDEX ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=false)"); + ImmutableList.of(ALTER_SKIPPING) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + mockDS.updateIndexOptions(existingOptions, false); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.updating(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("FAILED", asyncQueryExecutionResponse.getStatus()); + assertEquals( + "Transaction failed as flint index is not in a valid state.", + asyncQueryExecutionResponse.getError()); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(0); + flintIndexJob.assertState(FlintIndexState.UPDATING); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("true", options.get("auto_refresh")); + }); + } + + @Test + public void testAlterIndexQueryWithValidationExceptionWithSuccess() { + MockFlintIndex ALTER_SKIPPING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_skipping_index", + FlintIndexType.SKIPPING, + "ALTER SKIPPING INDEX ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=false)"); + ImmutableList.of(ALTER_SKIPPING) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + + @Override + public CancelJobRunResult cancelJobRun( + String applicationId, String jobId, boolean allowExceptionPropagation) { + super.cancelJobRun(applicationId, jobId, allowExceptionPropagation); + throw new ValidationException("Job run is not in a cancellable state"); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + mockDS.updateIndexOptions(existingOptions, false); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.active(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("SUCCESS", asyncQueryExecutionResponse.getStatus()); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(1); + emrsClient.getJobRunResultCalled(0); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("false", options.get("auto_refresh")); + }); + } + + @Test + public void testAlterIndexQueryWithResourceNotFoundExceptionWithSuccess() { + MockFlintIndex ALTER_SKIPPING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_skipping_index", + FlintIndexType.SKIPPING, + "ALTER SKIPPING INDEX ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=false)"); + ImmutableList.of(ALTER_SKIPPING) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + + @Override + public CancelJobRunResult cancelJobRun( + String applicationId, String jobId, boolean allowExceptionPropagation) { + super.cancelJobRun(applicationId, jobId, allowExceptionPropagation); + throw new ValidationException("Random validation exception"); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + mockDS.updateIndexOptions(existingOptions, false); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.active(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("FAILED", asyncQueryExecutionResponse.getStatus()); + assertEquals("Internal Server Error.", asyncQueryExecutionResponse.getError()); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(1); + emrsClient.getJobRunResultCalled(0); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("false", options.get("auto_refresh")); + }); + } + + @Test + public void testAlterIndexQueryWithUnknownError() { + MockFlintIndex ALTER_SKIPPING = + new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_skipping_index", + FlintIndexType.SKIPPING, + "ALTER SKIPPING INDEX ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=false)"); + ImmutableList.of(ALTER_SKIPPING) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + + @Override + public CancelJobRunResult cancelJobRun( + String applicationId, String jobId, boolean allowExceptionPropagation) { + super.cancelJobRun(applicationId, jobId, allowExceptionPropagation); + throw new IllegalArgumentException("Unknown Error"); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + // Mock flint index + mockDS.createIndex(); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + mockDS.updateIndexOptions(existingOptions, false); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.getLatestId(), MYS3_DATASOURCE); + flintIndexJob.active(); + + // 1. alter index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.getQuery(), MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("FAILED", asyncQueryExecutionResponse.getStatus()); + assertEquals("Internal Server Error.", asyncQueryExecutionResponse.getError()); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(1); + emrsClient.getJobRunResultCalled(0); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = mockDS.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("false", options.get("auto_refresh")); + }); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/IndexQuerySpecTest.java b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/IndexQuerySpecTest.java new file mode 100644 index 0000000000..2eed7b13a0 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/IndexQuerySpecTest.java @@ -0,0 +1,1043 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.asyncquery; + +import com.amazonaws.services.emrserverless.model.CancelJobRunResult; +import com.amazonaws.services.emrserverless.model.GetJobRunResult; +import com.amazonaws.services.emrserverless.model.JobRun; +import com.amazonaws.services.emrserverless.model.ValidationException; +import com.google.common.collect.ImmutableList; +import org.junit.Assert; +import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryExecutionResponse; +import org.opensearch.sql.spark.asyncquery.model.MockFlintIndex; +import org.opensearch.sql.spark.asyncquery.model.MockFlintSparkJob; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexType; +import org.opensearch.sql.spark.leasemanager.ConcurrencyLimitExceededException; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryResponse; +import org.opensearch.sql.spark.rest.model.LangType; + +public class IndexQuerySpecTest extends AsyncQueryExecutorServiceSpec { + private final String specialName = "`test ,:\"+/\\|?#><`"; + private final String encodedName = "test%20%2c%3a%22%2b%2f%5c%7c%3f%23%3e%3c"; + + public final String REFRESH_SI = "REFRESH SKIPPING INDEX on mys3.default.http_logs"; + public final String REFRESH_CI = "REFRESH INDEX covering ON mys3.default.http_logs"; + public final String REFRESH_MV = "REFRESH MATERIALIZED VIEW mys3.default.http_logs_metrics"; + public final String REFRESH_SCI = "REFRESH SKIPPING INDEX on mys3.default." + specialName; + + public final FlintDatasetMock LEGACY_SKIPPING = + new FlintDatasetMock( + "DROP SKIPPING INDEX ON mys3.default.http_logs", + REFRESH_SI, + FlintIndexType.SKIPPING, + "flint_mys3_default_http_logs_skipping_index") + .isLegacy(true); + public final FlintDatasetMock LEGACY_COVERING = + new FlintDatasetMock( + "DROP INDEX covering ON mys3.default.http_logs", + REFRESH_CI, + FlintIndexType.COVERING, + "flint_mys3_default_http_logs_covering_index") + .isLegacy(true); + public final FlintDatasetMock LEGACY_MV = + new FlintDatasetMock( + "DROP MATERIALIZED VIEW mys3.default.http_logs_metrics", + REFRESH_MV, + FlintIndexType.MATERIALIZED_VIEW, + "flint_mys3_default_http_logs_metrics") + .isLegacy(true); + + public final FlintDatasetMock LEGACY_SPECIAL_CHARACTERS = + new FlintDatasetMock( + "DROP SKIPPING INDEX ON mys3.default." + specialName, + REFRESH_SCI, + FlintIndexType.SKIPPING, + "flint_mys3_default_" + encodedName + "_skipping_index") + .isLegacy(true) + .isSpecialCharacter(true); + + public final FlintDatasetMock SKIPPING = + new FlintDatasetMock( + "DROP SKIPPING INDEX ON mys3.default.http_logs", + REFRESH_SI, + FlintIndexType.SKIPPING, + "flint_mys3_default_http_logs_skipping_index") + .latestId("ZmxpbnRfbXlzM19kZWZhdWx0X2h0dHBfbG9nc19za2lwcGluZ19pbmRleA=="); + public final FlintDatasetMock COVERING = + new FlintDatasetMock( + "DROP INDEX covering ON mys3.default.http_logs", + REFRESH_CI, + FlintIndexType.COVERING, + "flint_mys3_default_http_logs_covering_index") + .latestId("ZmxpbnRfbXlzM19kZWZhdWx0X2h0dHBfbG9nc19jb3ZlcmluZ19pbmRleA=="); + public final FlintDatasetMock MV = + new FlintDatasetMock( + "DROP MATERIALIZED VIEW mys3.default.http_logs_metrics", + REFRESH_MV, + FlintIndexType.MATERIALIZED_VIEW, + "flint_mys3_default_http_logs_metrics") + .latestId("ZmxpbnRfbXlzM19kZWZhdWx0X2h0dHBfbG9nc19tZXRyaWNz"); + public final FlintDatasetMock SPECIAL_CHARACTERS = + new FlintDatasetMock( + "DROP SKIPPING INDEX ON mys3.default." + specialName, + REFRESH_SCI, + FlintIndexType.SKIPPING, + "flint_mys3_default_" + encodedName + "_skipping_index") + .isSpecialCharacter(true) + .latestId( + "ZmxpbnRfbXlzM19kZWZhdWx0X3Rlc3QlMjAlMmMlM2ElMjIlMmIlMmYlNWMlN2MlM2YlMjMlM2UlM2Nfc2tpcHBpbmdfaW5kZXg="); + + public final String CREATE_SI_AUTO = + "CREATE SKIPPING INDEX ON mys3.default.http_logs" + + "(l_orderkey VALUE_SET) WITH (auto_refresh = true)"; + + public final String CREATE_CI_AUTO = + "CREATE INDEX covering ON mys3.default.http_logs " + + "(l_orderkey, l_quantity) WITH (auto_refresh = true)"; + + public final String CREATE_MV_AUTO = + "CREATE MATERIALIZED VIEW mys3.default.http_logs_metrics AS select * " + + "from mys3.default.https WITH (auto_refresh = true)"; + + /** + * Happy case. expectation is + * + *

    (1) Drop Index response is SUCCESS + */ + @Test + public void legacyBasicDropAndFetchAndCancel() { + ImmutableList.of(LEGACY_SKIPPING, LEGACY_COVERING, LEGACY_SPECIAL_CHARACTERS) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + return new GetJobRunResult().withJobRun(new JobRun().withState("Cancelled")); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + mockDS.createIndex(); + + // 1.drop index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + assertNotNull(response.getQueryId()); + assertTrue(clusterService.state().routingTable().hasIndex(mockDS.indexName)); + + // 2.fetch result + AsyncQueryExecutionResponse asyncQueryResults = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("SUCCESS", asyncQueryResults.getStatus()); + assertNull(asyncQueryResults.getError()); + emrsClient.cancelJobRunCalled(1); + + // 3.cancel + IllegalArgumentException exception = + assertThrows( + IllegalArgumentException.class, + () -> asyncQueryExecutorService.cancelQuery(response.getQueryId())); + assertEquals("can't cancel index DML query", exception.getMessage()); + }); + } + + /** + * Legacy Test, without state index support. Not EMR-S job running. expectation is + * + *

    (1) Drop Index response is SUCCESS + */ + @Test + public void legacyDropIndexNoJobRunning() { + ImmutableList.of(LEGACY_SKIPPING, LEGACY_COVERING, LEGACY_MV, LEGACY_SPECIAL_CHARACTERS) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public CancelJobRunResult cancelJobRun( + String applicationId, String jobId, boolean allowExceptionPropagation) { + throw new ValidationException("Job run is not in a cancellable state"); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + mockDS.createIndex(); + + // 1.drop index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2.fetch result. + AsyncQueryExecutionResponse asyncQueryResults = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("SUCCESS", asyncQueryResults.getStatus()); + assertNull(asyncQueryResults.getError()); + }); + } + + /** + * Legacy Test, without state index support. Cancel EMR-S job call timeout. expectation is + * + *

    (1) Drop Index response is FAILED + */ + @Test + public void legacyDropIndexCancelJobTimeout() { + ImmutableList.of(LEGACY_SKIPPING, LEGACY_COVERING, LEGACY_MV, LEGACY_SPECIAL_CHARACTERS) + .forEach( + mockDS -> { + // Mock EMR-S always return running. + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + return new GetJobRunResult().withJobRun(new JobRun().withState("Running")); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + mockDS.createIndex(); + + // 1. drop index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryResults = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("FAILED", asyncQueryResults.getStatus()); + assertEquals("Cancel job operation timed out.", asyncQueryResults.getError()); + }); + } + + /** + * Legacy Test, without state index support. Not EMR-S job running. expectation is + * + *

    (1) Drop Index response is SUCCESS + */ + @Test + public void legacyDropIndexSpecialCharacter() { + FlintDatasetMock mockDS = LEGACY_SPECIAL_CHARACTERS; + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public CancelJobRunResult cancelJobRun( + String applicationId, String jobId, boolean allowExceptionPropagation) { + throw new ValidationException("Job run is not in a cancellable state"); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + mockDS.createIndex(); + + // 1.drop index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest(mockDS.query, MYGLUE_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2.fetch result. + AsyncQueryExecutionResponse asyncQueryResults = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("SUCCESS", asyncQueryResults.getStatus()); + assertNull(asyncQueryResults.getError()); + } + + /** + * Happy case. expectation is + * + *

    (1) Drop Index response is SUCCESS (2) change index state to: DELETED + */ + @Test + public void dropAndFetchAndCancel() { + ImmutableList.of(SKIPPING, COVERING, MV, SPECIAL_CHARACTERS) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + return new GetJobRunResult().withJobRun(new JobRun().withState("Cancelled")); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + mockDS.createIndex(); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.latestId, MYS3_DATASOURCE); + flintIndexJob.refreshing(); + + // 1.drop index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + assertNotNull(response.getQueryId()); + assertTrue(clusterService.state().routingTable().hasIndex(mockDS.indexName)); + + // assert state is DELETED + flintIndexJob.assertState(FlintIndexState.DELETED); + + // 2.fetch result + AsyncQueryExecutionResponse asyncQueryResults = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("SUCCESS", asyncQueryResults.getStatus()); + assertNull(asyncQueryResults.getError()); + emrsClient.cancelJobRunCalled(1); + + // 3.cancel + IllegalArgumentException exception = + assertThrows( + IllegalArgumentException.class, + () -> asyncQueryExecutorService.cancelQuery(response.getQueryId())); + assertEquals("can't cancel index DML query", exception.getMessage()); + }); + } + + /** + * Cancel EMR-S job, but not job running. expectation is + * + *

    (1) Drop Index response is SUCCESS (2) change index state to: DELETED + */ + @Test + public void dropIndexNoJobRunning() { + ImmutableList.of(SKIPPING, COVERING, MV) + .forEach( + mockDS -> { + // Mock EMR-S job is not running + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public CancelJobRunResult cancelJobRun( + String applicationId, String jobId, boolean allowExceptionPropagation) { + throw new ValidationException("Job run is not in a cancellable state"); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + mockDS.createIndex(); + // Mock index state in refresh state. + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.latestId, MYS3_DATASOURCE); + flintIndexJob.refreshing(); + + // 1.drop index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2.fetch result. + AsyncQueryExecutionResponse asyncQueryResults = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("SUCCESS", asyncQueryResults.getStatus()); + assertNull(asyncQueryResults.getError()); + + flintIndexJob.assertState(FlintIndexState.DELETED); + }); + } + + /** + * Cancel EMR-S job call timeout, expectation is + * + *

    (1) Drop Index response is failed, (2) change index state to: CANCELLING + */ + @Test + public void dropIndexCancelJobTimeout() { + ImmutableList.of(SKIPPING, COVERING, MV) + .forEach( + mockDS -> { + // Mock EMR-S always return running. + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + return new GetJobRunResult().withJobRun(new JobRun().withState("Running")); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + mockDS.createIndex(); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.latestId, MYS3_DATASOURCE); + flintIndexJob.refreshing(); + + // 1. drop index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryResults = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("FAILED", asyncQueryResults.getStatus()); + assertEquals("Cancel job operation timed out.", asyncQueryResults.getError()); + flintIndexJob.assertState(FlintIndexState.REFRESHING); + }); + } + + /** + * Drop Index operation is retryable, expectation is + * + *

    (1) call EMR-S (2) change index state to: DELETED + */ + @Test + public void dropIndexWithIndexInRefreshingState() { + ImmutableList.of(SKIPPING, COVERING, MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + return new GetJobRunResult().withJobRun(new JobRun().withState("Cancelled")); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + mockDS.createIndex(); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.latestId, MYS3_DATASOURCE); + flintIndexJob.refreshing(); + + // 1. drop index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + assertEquals( + "SUCCESS", + asyncQueryExecutorService + .getAsyncQueryResults(response.getQueryId()) + .getStatus()); + + flintIndexJob.assertState(FlintIndexState.DELETED); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(1); + emrsClient.getJobRunResultCalled(1); + }); + } + + /** + * Index state is stable, Drop Index operation is retryable, expectation is + * + *

    (1) call EMR-S (2) change index state to: DELETED + */ + @Test + public void dropIndexWithIndexInActiveState() { + ImmutableList.of(SKIPPING, COVERING, MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + return new GetJobRunResult().withJobRun(new JobRun().withState("Cancelled")); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + mockDS.createIndex(); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.latestId, MYS3_DATASOURCE); + flintIndexJob.active(); + + // 1. drop index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("SUCCESS", asyncQueryExecutionResponse.getStatus()); + flintIndexJob.assertState(FlintIndexState.DELETED); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(1); + emrsClient.getJobRunResultCalled(1); + }); + } + + /** + * Index state is stable, expectation is + * + *

    (1) call EMR-S (2) change index state to: DELETED + */ + @Test + public void dropIndexWithIndexInCreatingState() { + ImmutableList.of(SKIPPING, COVERING, MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + return new GetJobRunResult().withJobRun(new JobRun().withState("Cancelled")); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + mockDS.createIndex(); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.latestId, MYS3_DATASOURCE); + flintIndexJob.creating(); + + // 1. drop index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + assertEquals( + "SUCCESS", + asyncQueryExecutorService + .getAsyncQueryResults(response.getQueryId()) + .getStatus()); + + flintIndexJob.assertState(FlintIndexState.DELETED); + }); + } + + /** + * Index state is stable, Drop Index operation is retryable, expectation is + * + *

    (1) call EMR-S (2) change index state to: DELETED + */ + @Test + public void dropIndexWithIndexInEmptyState() { + ImmutableList.of(SKIPPING, COVERING, MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + return new GetJobRunResult().withJobRun(new JobRun().withState("Cancelled")); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + mockDS.createIndex(); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.latestId, MYS3_DATASOURCE); + + // 1. drop index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + assertEquals( + "SUCCESS", + asyncQueryExecutorService + .getAsyncQueryResults(response.getQueryId()) + .getStatus()); + + flintIndexJob.assertState(FlintIndexState.DELETED); + }); + } + + /** + * Couldn't acquire lock as the index is in transitioning state. Will result in error. + * + *

    (1) not call EMR-S (2) change index state to: DELETED + */ + @Test + public void dropIndexWithIndexInDeletedState() { + ImmutableList.of(SKIPPING, COVERING, MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public CancelJobRunResult cancelJobRun( + String applicationId, String jobId, boolean allowExceptionPropagation) { + Assert.fail("should not call cancelJobRun"); + return null; + } + + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + Assert.fail("should not call getJobRunResult"); + return null; + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + mockDS.createIndex(); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.latestId, MYS3_DATASOURCE); + flintIndexJob.deleting(); + + // 1. drop index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + // 2. fetch result + assertEquals("FAILED", asyncQueryExecutionResponse.getStatus()); + assertEquals( + "Transaction failed as flint index is not in a valid state.", + asyncQueryExecutionResponse.getError()); + flintIndexJob.assertState(FlintIndexState.DELETING); + }); + } + + /** + * Cancel EMR-S job, but not job running. expectation is + * + *

    (1) Drop Index response is SUCCESS (2) change index state to: DELETED + */ + @Test + public void dropIndexSpecialCharacter() { + FlintDatasetMock mockDS = SPECIAL_CHARACTERS; + // Mock EMR-S job is not running + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public CancelJobRunResult cancelJobRun( + String applicationId, String jobId, boolean allowExceptionPropagation) { + throw new IllegalArgumentException("Job run is not in a cancellable state"); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + mockDS.createIndex(); + // Mock index state in refresh state. + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob(flintIndexStateModelService, mockDS.latestId, MYGLUE_DATASOURCE); + flintIndexJob.refreshing(); + + // 1.drop index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest(mockDS.query, MYGLUE_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2.fetch result. + AsyncQueryExecutionResponse asyncQueryResults = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("FAILED", asyncQueryResults.getStatus()); + assertEquals("Internal Server Error.", asyncQueryResults.getError()); + + flintIndexJob.assertState(FlintIndexState.REFRESHING); + } + + /** + * No Job running, expectation is + * + *

    (1) not call EMR-S (2) change index state to: DELETED + */ + @Test + public void edgeCaseNoIndexStateDoc() { + ImmutableList.of(SKIPPING, COVERING, MV) + .forEach( + mockDS -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public CancelJobRunResult cancelJobRun( + String applicationId, String jobId, boolean allowExceptionPropagation) { + Assert.fail("should not call cancelJobRun"); + return null; + } + + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + Assert.fail("should not call getJobRunResult"); + return null; + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + mockDS.createIndex(); + + // 1. drop index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. fetch result + AsyncQueryExecutionResponse asyncQueryResults = + asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + assertEquals("FAILED", asyncQueryResults.getStatus()); + assertTrue(asyncQueryResults.getError().contains("no state found")); + }); + } + + @Test + public void concurrentRefreshJobLimitNotApplied() { + EMRServerlessClientFactory emrServerlessClientFactory = new LocalEMRServerlessClientFactory(); + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock flint index + COVERING.createIndex(); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob(flintIndexStateModelService, COVERING.latestId, MYS3_DATASOURCE); + flintIndexJob.refreshing(); + + // query with auto refresh + String query = + "CREATE INDEX covering ON mys3.default.http_logs(l_orderkey, " + + "l_quantity) WITH (auto_refresh = true)"; + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest(query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + assertNull(response.getSessionId()); + } + + @Test + public void concurrentRefreshJobLimitAppliedToDDLWithAuthRefresh() { + EMRServerlessClientFactory emrServerlessClientFactory = new LocalEMRServerlessClientFactory(); + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + setConcurrentRefreshJob(1); + + // Mock flint index + COVERING.createIndex(); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob(flintIndexStateModelService, COVERING.latestId, MYS3_DATASOURCE); + flintIndexJob.refreshing(); + + // query with auto_refresh = true. + String query = + "CREATE INDEX covering ON mys3.default.http_logs(l_orderkey, " + + "l_quantity) WITH (auto_refresh = true)"; + ConcurrencyLimitExceededException exception = + assertThrows( + ConcurrencyLimitExceededException.class, + () -> + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest(query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext)); + assertEquals("domain concurrent refresh job can not exceed 1", exception.getMessage()); + } + + @Test + public void concurrentRefreshJobLimitAppliedToRefresh() { + EMRServerlessClientFactory emrServerlessClientFactory = new LocalEMRServerlessClientFactory(); + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + setConcurrentRefreshJob(1); + + // Mock flint index + COVERING.createIndex(); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob(flintIndexStateModelService, COVERING.latestId, MYS3_DATASOURCE); + flintIndexJob.refreshing(); + + // query with auto_refresh = true. + String query = "REFRESH INDEX covering ON mys3.default.http_logs"; + ConcurrencyLimitExceededException exception = + assertThrows( + ConcurrencyLimitExceededException.class, + () -> + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest(query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext)); + assertEquals("domain concurrent refresh job can not exceed 1", exception.getMessage()); + } + + @Test + public void concurrentRefreshJobLimitNotAppliedToDDL() { + String query = "CREATE INDEX covering ON mys3.default.http_logs(l_orderkey, l_quantity)"; + EMRServerlessClientFactory emrServerlessClientFactory = new LocalEMRServerlessClientFactory(); + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + setConcurrentRefreshJob(1); + + // Mock flint index + COVERING.createIndex(); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob(flintIndexStateModelService, COVERING.latestId, MYS3_DATASOURCE); + flintIndexJob.refreshing(); + + CreateAsyncQueryResponse asyncQueryResponse = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest(query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + } + + /** Cancel create flint index statement with auto_refresh=true, should throw exception. */ + @Test + public void cancelAutoRefreshCreateFlintIndexShouldThrowException() { + ImmutableList.of(CREATE_SI_AUTO, CREATE_CI_AUTO, CREATE_MV_AUTO) + .forEach( + query -> { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public CancelJobRunResult cancelJobRun( + String applicationId, String jobId, boolean allowExceptionPropagation) { + Assert.fail("should not call cancelJobRun"); + return null; + } + + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + Assert.fail("should not call getJobRunResult"); + return null; + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // 1. submit create / refresh index query + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest(query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + // 2. cancel query + IllegalArgumentException exception = + assertThrows( + IllegalArgumentException.class, + () -> asyncQueryExecutorService.cancelQuery(response.getQueryId())); + assertEquals( + "can't cancel index DML query, using ALTER auto_refresh=off statement to stop" + + " job, using VACUUM statement to stop job and delete data", + exception.getMessage()); + }); + } + + /** Cancel REFRESH statement should success */ + @Test + public void cancelRefreshStatement() { + ImmutableList.of(SKIPPING, COVERING, MV) + .forEach( + mockDS -> { + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService( + (accountId) -> + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult( + String applicationId, String jobId) { + return new GetJobRunResult() + .withJobRun(new JobRun().withState("Cancelled")); + } + }); + + // Mock flint index + mockDS.createIndex(); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.latestId, MYS3_DATASOURCE); + + // 1. Submit REFRESH statement + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.refreshQuery, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + // mock index state. + flintIndexJob.refreshing(); + + // 2. Cancel query + String cancelResponse = asyncQueryExecutorService.cancelQuery(response.getQueryId()); + + assertNotNull(cancelResponse); + assertTrue(clusterService.state().routingTable().hasIndex(mockDS.indexName)); + + // assert state is active + flintIndexJob.assertState(FlintIndexState.ACTIVE); + }); + } + + /** Cancel REFRESH statement should success */ + @Test + public void cancelRefreshStatementWithActiveState() { + ImmutableList.of(SKIPPING, COVERING, MV) + .forEach( + mockDS -> { + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService( + (accountId) -> + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult( + String applicationId, String jobId) { + return new GetJobRunResult() + .withJobRun(new JobRun().withState("Cancelled")); + } + }); + + // Mock flint index + mockDS.createIndex(); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, mockDS.latestId, MYS3_DATASOURCE); + + // 1. Submit REFRESH statement + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + mockDS.refreshQuery, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + // mock index state. + flintIndexJob.active(); + + // 2. Cancel query + IllegalStateException illegalStateException = + Assertions.assertThrows( + IllegalStateException.class, + () -> asyncQueryExecutorService.cancelQuery(response.getQueryId())); + Assertions.assertEquals( + "Transaction failed as flint index is not in a valid state.", + illegalStateException.getMessage()); + + // assert state is active + flintIndexJob.assertState(FlintIndexState.ACTIVE); + }); + } + + @Test + public void cancelRefreshStatementWithFailureInFetchingIndexMetadata() { + String indexName = "flint_my_glue_mydb_http_logs_covering_corrupted_index"; + MockFlintIndex mockFlintIndex = + new MockFlintIndex(client(), indexName, FlintIndexType.COVERING, null); + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService( + (accountId) -> + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + return new GetJobRunResult().withJobRun(new JobRun().withState("Cancelled")); + } + }); + + mockFlintIndex.createIndex(); + // Mock index state + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, indexName + "_latest_id", MYS3_DATASOURCE); + + // 1. Submit REFRESH statement + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest( + "REFRESH INDEX covering_corrupted ON my_glue.mydb.http_logs", + MYS3_DATASOURCE, + LangType.SQL, + null), + asyncQueryRequestContext); + // mock index state. + flintIndexJob.refreshing(); + + // 2. Cancel query + Assertions.assertThrows( + IllegalStateException.class, + () -> asyncQueryExecutorService.cancelQuery(response.getQueryId())); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/IndexQuerySpecVacuumTest.java b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/IndexQuerySpecVacuumTest.java new file mode 100644 index 0000000000..439b2ed2d6 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/IndexQuerySpecVacuumTest.java @@ -0,0 +1,217 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.asyncquery; + +import static org.opensearch.sql.spark.flint.FlintIndexState.ACTIVE; +import static org.opensearch.sql.spark.flint.FlintIndexState.CREATING; +import static org.opensearch.sql.spark.flint.FlintIndexState.DELETED; +import static org.opensearch.sql.spark.flint.FlintIndexState.EMPTY; +import static org.opensearch.sql.spark.flint.FlintIndexState.REFRESHING; +import static org.opensearch.sql.spark.flint.FlintIndexState.VACUUMING; +import static org.opensearch.sql.spark.flint.FlintIndexType.COVERING; +import static org.opensearch.sql.spark.flint.FlintIndexType.MATERIALIZED_VIEW; +import static org.opensearch.sql.spark.flint.FlintIndexType.SKIPPING; + +import com.amazonaws.services.emrserverless.model.CancelJobRunResult; +import com.amazonaws.services.emrserverless.model.GetJobRunResult; +import com.amazonaws.services.emrserverless.model.JobRun; +import com.google.common.collect.Lists; +import java.util.Base64; +import java.util.List; +import java.util.function.BiConsumer; +import org.apache.commons.lang3.tuple.Pair; +import org.junit.Test; +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.get.GetRequest; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryExecutionResponse; +import org.opensearch.sql.spark.asyncquery.model.MockFlintSparkJob; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.execution.statestore.OpenSearchStateStoreUtil; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexType; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryResponse; +import org.opensearch.sql.spark.rest.model.LangType; + +@SuppressWarnings({"unchecked", "rawtypes"}) +public class IndexQuerySpecVacuumTest extends AsyncQueryExecutorServiceSpec { + + private static final EMRApiCall DEFAULT_OP = () -> null; + + private final List FLINT_TEST_DATASETS = + List.of( + mockDataset( + "VACUUM SKIPPING INDEX ON mys3.default.http_logs", + SKIPPING, + "flint_mys3_default_http_logs_skipping_index"), + mockDataset( + "VACUUM INDEX covering ON mys3.default.http_logs", + COVERING, + "flint_mys3_default_http_logs_covering_index"), + mockDataset( + "VACUUM MATERIALIZED VIEW mys3.default.http_logs_metrics", + MATERIALIZED_VIEW, + "flint_mys3_default_http_logs_metrics"), + mockDataset( + "VACUUM SKIPPING INDEX ON mys3.default.`test ,:\"+/\\|?#><`", + SKIPPING, + "flint_mys3_default_test%20%2c%3a%22%2b%2f%5c%7c%3f%23%3e%3c_skipping_index") + .isSpecialCharacter(true)); + + @Test + public void shouldVacuumIndexInDeletedState() { + List> testCases = + Lists.cartesianProduct( + FLINT_TEST_DATASETS, + List.of(DELETED), + List.of( + Pair.of( + DEFAULT_OP, + () -> new GetJobRunResult().withJobRun(new JobRun().withState("Cancelled"))))); + + runVacuumTestSuite( + testCases, + (mockDS, response) -> { + assertEquals("SUCCESS", response.getStatus()); + assertFalse(flintIndexExists(mockDS.indexName)); + assertFalse(indexDocExists(mockDS.latestId)); + }); + } + + @Test + public void shouldNotVacuumIndexInOtherStates() { + List> testCases = + Lists.cartesianProduct( + FLINT_TEST_DATASETS, + List.of(EMPTY, CREATING, ACTIVE, REFRESHING, VACUUMING), + List.of( + Pair.of( + () -> { + throw new AssertionError("should not call cancelJobRun"); + }, + () -> { + throw new AssertionError("should not call getJobRunResult"); + }))); + + runVacuumTestSuite( + testCases, + (mockDS, response) -> { + assertEquals("FAILED", response.getStatus()); + assertTrue(flintIndexExists(mockDS.indexName)); + assertTrue(indexDocExists(mockDS.latestId)); + }); + } + + private void runVacuumTestSuite( + List> testCases, + BiConsumer assertion) { + testCases.forEach( + params -> { + FlintDatasetMock mockDS = (FlintDatasetMock) params.get(0); + try { + FlintIndexState state = (FlintIndexState) params.get(1); + EMRApiCall cancelJobRun = ((Pair) params.get(2)).getLeft(); + EMRApiCall getJobRunResult = ((Pair) params.get(2)).getRight(); + + AsyncQueryExecutionResponse response = + runVacuumTest(mockDS, state, cancelJobRun, getJobRunResult); + assertion.accept(mockDS, response); + } finally { + // Clean up because we simulate parameterized test in single unit test method + if (flintIndexExists(mockDS.indexName)) { + mockDS.deleteIndex(); + } + if (indexDocExists(mockDS.latestId)) { + deleteIndexDoc(mockDS.latestId); + } + } + }); + } + + private AsyncQueryExecutionResponse runVacuumTest( + FlintDatasetMock mockDS, + FlintIndexState state, + EMRApiCall cancelJobRun, + EMRApiCall getJobRunResult) { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public CancelJobRunResult cancelJobRun( + String applicationId, String jobId, boolean allowExceptionPropagation) { + if (cancelJobRun == DEFAULT_OP) { + return super.cancelJobRun(applicationId, jobId, allowExceptionPropagation); + } + return cancelJobRun.call(); + } + + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + if (getJobRunResult == DEFAULT_OP) { + return super.getJobRunResult(applicationId, jobId); + } + return getJobRunResult.call(); + } + }; + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + AsyncQueryExecutorService asyncQueryExecutorService = + createAsyncQueryExecutorService(emrServerlessClientFactory); + + // Mock Flint index + mockDS.createIndex(); + + // Mock index state doc + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob(flintIndexStateModelService, mockDS.latestId, "mys3"); + flintIndexJob.transition(state); + + // Vacuum index + CreateAsyncQueryResponse response = + asyncQueryExecutorService.createAsyncQuery( + new CreateAsyncQueryRequest(mockDS.query, MYS3_DATASOURCE, LangType.SQL, null), + asyncQueryRequestContext); + + return asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); + } + + private boolean flintIndexExists(String flintIndexName) { + return client + .admin() + .indices() + .exists(new IndicesExistsRequest(flintIndexName)) + .actionGet() + .isExists(); + } + + private boolean indexDocExists(String docId) { + return client + .get(new GetRequest(OpenSearchStateStoreUtil.getIndexName("mys3"), docId)) + .actionGet() + .isExists(); + } + + private void deleteIndexDoc(String docId) { + client + .delete(new DeleteRequest(OpenSearchStateStoreUtil.getIndexName("mys3"), docId)) + .actionGet(); + } + + private FlintDatasetMock mockDataset(String query, FlintIndexType indexType, String indexName) { + FlintDatasetMock dataset = new FlintDatasetMock(query, "", indexType, indexName); + dataset.latestId(Base64.getEncoder().encodeToString(indexName.getBytes())); + return dataset; + } + + /** + * EMR API call mock interface. + * + * @param API call response type + */ + @FunctionalInterface + public interface EMRApiCall { + V call(); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/OpenSearchAsyncQueryJobMetadataStorageServiceTest.java b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/OpenSearchAsyncQueryJobMetadataStorageServiceTest.java new file mode 100644 index 0000000000..c84d68421d --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/OpenSearchAsyncQueryJobMetadataStorageServiceTest.java @@ -0,0 +1,103 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.asyncquery; + +import static org.opensearch.sql.spark.constants.TestConstants.EMRS_APPLICATION_ID; +import static org.opensearch.sql.spark.constants.TestConstants.EMR_JOB_ID; + +import java.util.Optional; +import org.junit.Before; +import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.opensearch.sql.spark.asyncquery.exceptions.AsyncQueryNotFoundException; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.asyncquery.model.NullAsyncQueryRequestContext; +import org.opensearch.sql.spark.execution.statestore.StateStore; +import org.opensearch.sql.spark.execution.xcontent.AsyncQueryJobMetadataXContentSerializer; +import org.opensearch.sql.spark.utils.IDUtils; +import org.opensearch.test.OpenSearchIntegTestCase; + +public class OpenSearchAsyncQueryJobMetadataStorageServiceTest extends OpenSearchIntegTestCase { + + public static final String DS_NAME = "mys3"; + private static final String MOCK_SESSION_ID = "sessionId"; + private static final String MOCK_RESULT_INDEX = "resultIndex"; + private static final String MOCK_QUERY_ID = "00fdo6u94n7abo0q"; + private OpenSearchAsyncQueryJobMetadataStorageService openSearchJobMetadataStorageService; + private AsyncQueryRequestContext asyncQueryRequestContext = new NullAsyncQueryRequestContext(); + + @Before + public void setup() { + openSearchJobMetadataStorageService = + new OpenSearchAsyncQueryJobMetadataStorageService( + new StateStore(client(), clusterService()), + new AsyncQueryJobMetadataXContentSerializer()); + } + + @Test + public void testStoreJobMetadata() { + AsyncQueryJobMetadata expected = + AsyncQueryJobMetadata.builder() + .queryId(IDUtils.encode(DS_NAME)) + .jobId(EMR_JOB_ID) + .applicationId(EMRS_APPLICATION_ID) + .resultIndex(MOCK_RESULT_INDEX) + .datasourceName(DS_NAME) + .build(); + + openSearchJobMetadataStorageService.storeJobMetadata(expected, asyncQueryRequestContext); + Optional actual = + openSearchJobMetadataStorageService.getJobMetadata(expected.getQueryId()); + + assertTrue(actual.isPresent()); + assertEquals(expected, actual.get()); + assertEquals(expected, actual.get()); + assertNull(actual.get().getSessionId()); + } + + @Test + public void testStoreJobMetadataWithResultExtraData() { + AsyncQueryJobMetadata expected = + AsyncQueryJobMetadata.builder() + .queryId(IDUtils.encode(DS_NAME)) + .jobId(EMR_JOB_ID) + .applicationId(EMRS_APPLICATION_ID) + .resultIndex(MOCK_RESULT_INDEX) + .sessionId(MOCK_SESSION_ID) + .datasourceName(DS_NAME) + .build(); + + openSearchJobMetadataStorageService.storeJobMetadata(expected, asyncQueryRequestContext); + Optional actual = + openSearchJobMetadataStorageService.getJobMetadata(expected.getQueryId()); + + assertTrue(actual.isPresent()); + assertEquals(expected, actual.get()); + assertEquals(MOCK_RESULT_INDEX, actual.get().getResultIndex()); + assertEquals(MOCK_SESSION_ID, actual.get().getSessionId()); + } + + @Test + public void testGetJobMetadataWithMalformedQueryId() { + AsyncQueryNotFoundException asyncQueryNotFoundException = + Assertions.assertThrows( + AsyncQueryNotFoundException.class, + () -> openSearchJobMetadataStorageService.getJobMetadata(MOCK_QUERY_ID)); + Assertions.assertEquals( + String.format("Invalid QueryId: %s", MOCK_QUERY_ID), + asyncQueryNotFoundException.getMessage()); + } + + @Test + public void testGetJobMetadataWithEmptyQueryId() { + AsyncQueryNotFoundException asyncQueryNotFoundException = + Assertions.assertThrows( + AsyncQueryNotFoundException.class, + () -> openSearchJobMetadataStorageService.getJobMetadata("")); + Assertions.assertEquals("Invalid QueryId: ", asyncQueryNotFoundException.getMessage()); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/model/MockFlintIndex.java b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/model/MockFlintIndex.java new file mode 100644 index 0000000000..e25250fd09 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/model/MockFlintIndex.java @@ -0,0 +1,71 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.asyncquery.model; + +import java.util.Map; +import lombok.Getter; +import lombok.SneakyThrows; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.opensearch.client.Client; +import org.opensearch.sql.spark.flint.FlintIndexType; +import org.opensearch.sql.spark.utils.TestUtils; + +@Getter +public class MockFlintIndex { + private final String indexName; + private final Client client; + private final FlintIndexType flintIndexType; + private final String query; + + public MockFlintIndex( + Client client, String indexName, FlintIndexType flintIndexType, String query) { + this.client = client; + this.indexName = indexName; + this.flintIndexType = flintIndexType; + this.query = query; + } + + public void createIndex() { + String mappingFile = String.format("flint-index-mappings/%s_mapping.json", indexName); + TestUtils.createIndexWithMappings(client, indexName, mappingFile); + } + + public String getLatestId() { + return this.indexName + "_latest_id"; + } + + @SneakyThrows + public void deleteIndex() { + client.admin().indices().delete(new DeleteIndexRequest().indices(indexName)).get(); + } + + public Map getIndexMappings() { + return client + .admin() + .indices() + .prepareGetMappings(indexName) + .get() + .getMappings() + .get(indexName) + .getSourceAsMap(); + } + + public void updateIndexOptions(Map newOptions, Boolean replaceCompletely) { + GetMappingsResponse mappingsResponse = + client.admin().indices().prepareGetMappings().setIndices(indexName).get(); + Map flintMetadataMap = + mappingsResponse.getMappings().get(indexName).getSourceAsMap(); + Map meta = (Map) flintMetadataMap.get("_meta"); + Map options = (Map) meta.get("options"); + if (replaceCompletely) { + meta.put("options", newOptions); + } else { + options.putAll(newOptions); + } + client.admin().indices().preparePutMapping(indexName).setSource(flintMetadataMap).get(); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/model/MockFlintSparkJob.java b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/model/MockFlintSparkJob.java new file mode 100644 index 0000000000..6c82188ee6 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/asyncquery/model/MockFlintSparkJob.java @@ -0,0 +1,75 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.asyncquery.model; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.Optional; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexStateModel; +import org.opensearch.sql.spark.flint.FlintIndexStateModelService; + +public class MockFlintSparkJob { + private FlintIndexStateModel stateModel; + private FlintIndexStateModelService flintIndexStateModelService; + private String datasource; + + public MockFlintSparkJob( + FlintIndexStateModelService flintIndexStateModelService, String latestId, String datasource) { + assertNotNull(latestId); + this.flintIndexStateModelService = flintIndexStateModelService; + this.datasource = datasource; + stateModel = + FlintIndexStateModel.builder() + .indexState(FlintIndexState.EMPTY) + .applicationId("mockAppId") + .jobId("mockJobId") + .latestId(latestId) + .datasourceName(datasource) + .lastUpdateTime(System.currentTimeMillis()) + .error("") + .build(); + stateModel = flintIndexStateModelService.createFlintIndexStateModel(stateModel); + } + + public void transition(FlintIndexState newState) { + stateModel = + flintIndexStateModelService.updateFlintIndexState(stateModel, newState, datasource); + } + + public void refreshing() { + transition(FlintIndexState.REFRESHING); + } + + public void active() { + transition(FlintIndexState.ACTIVE); + } + + public void creating() { + transition(FlintIndexState.CREATING); + } + + public void updating() { + transition(FlintIndexState.UPDATING); + } + + public void deleting() { + transition(FlintIndexState.DELETING); + } + + public void deleted() { + transition(FlintIndexState.DELETED); + } + + public void assertState(FlintIndexState expected) { + Optional stateModelOpt = + flintIndexStateModelService.getFlintIndexStateModel(stateModel.getId(), datasource); + assertTrue(stateModelOpt.isPresent()); + assertEquals(expected, stateModelOpt.get().getIndexState()); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/cluster/FlintStreamingJobHouseKeeperTaskTest.java b/async-query/src/test/java/org/opensearch/sql/spark/cluster/FlintStreamingJobHouseKeeperTaskTest.java new file mode 100644 index 0000000000..c5964a61e3 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/cluster/FlintStreamingJobHouseKeeperTaskTest.java @@ -0,0 +1,578 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.cluster; + +import static org.opensearch.sql.datasource.model.DataSourceStatus.DISABLED; +import static org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorServiceSpec.MYGLUE_DATASOURCE; + +import com.amazonaws.services.emrserverless.model.GetJobRunResult; +import com.amazonaws.services.emrserverless.model.JobRun; +import com.google.common.collect.ImmutableList; +import java.util.HashMap; +import java.util.Map; +import lombok.SneakyThrows; +import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.opensearch.sql.datasource.model.DataSourceStatus; +import org.opensearch.sql.legacy.metrics.MetricName; +import org.opensearch.sql.legacy.metrics.Metrics; +import org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorServiceSpec; +import org.opensearch.sql.spark.asyncquery.model.MockFlintIndex; +import org.opensearch.sql.spark.asyncquery.model.MockFlintSparkJob; +import org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions; +import org.opensearch.sql.spark.flint.FlintIndexMetadata; +import org.opensearch.sql.spark.flint.FlintIndexMetadataService; +import org.opensearch.sql.spark.flint.FlintIndexMetadataServiceImpl; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexType; + +public class FlintStreamingJobHouseKeeperTaskTest extends AsyncQueryExecutorServiceSpec { + + @Test + @SneakyThrows + public void testStreamingJobHouseKeeperWhenDataSourceDisabled() { + ImmutableList mockFlintIndices = getMockFlintIndices(); + Map indexJobMapping = new HashMap<>(); + mockFlintIndices.forEach( + INDEX -> { + INDEX.createIndex(); + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, INDEX.getLatestId(), MYGLUE_DATASOURCE); + indexJobMapping.put(INDEX, flintIndexJob); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + // Making Index Auto Refresh + INDEX.updateIndexOptions(existingOptions, false); + flintIndexJob.refreshing(); + }); + changeDataSourceStatus(MYGLUE_DATASOURCE, DISABLED); + LocalEMRSClient emrsClient = getCancelledLocalEmrsClient(); + FlintIndexMetadataService flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + FlintStreamingJobHouseKeeperTask flintStreamingJobHouseKeeperTask = + new FlintStreamingJobHouseKeeperTask( + dataSourceService, + flintIndexMetadataService, + getFlintIndexOpFactory((accountId) -> emrsClient)); + + Thread thread = new Thread(flintStreamingJobHouseKeeperTask); + thread.start(); + thread.join(); + + mockFlintIndices.forEach( + INDEX -> { + MockFlintSparkJob flintIndexJob = indexJobMapping.get(INDEX); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = INDEX.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("false", options.get("auto_refresh")); + }); + emrsClient.cancelJobRunCalled(3); + emrsClient.getJobRunResultCalled(3); + emrsClient.startJobRunCalled(0); + Assertions.assertEquals( + 0L, + Metrics.getInstance() + .getNumericalMetric(MetricName.STREAMING_JOB_HOUSEKEEPER_TASK_FAILURE_COUNT) + .getValue()); + } + + private ImmutableList getMockFlintIndices() { + return ImmutableList.of(getSkipping(), getCovering(), getMv()); + } + + private MockFlintIndex getMv() { + return new MockFlintIndex( + client, + "flint_my_glue_mydb_mv", + FlintIndexType.MATERIALIZED_VIEW, + "ALTER MATERIALIZED VIEW my_glue.mydb.mv WITH (auto_refresh=false," + + " incremental_refresh=true, output_mode=\"complete\") "); + } + + private MockFlintIndex getCovering() { + return new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_covering_index", + FlintIndexType.COVERING, + "ALTER INDEX covering ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=true, output_mode=\"complete\")"); + } + + private MockFlintIndex getSkipping() { + return new MockFlintIndex( + client, + "flint_my_glue_mydb_http_logs_skipping_index", + FlintIndexType.SKIPPING, + "ALTER SKIPPING INDEX ON my_glue.mydb.http_logs WITH (auto_refresh=false," + + " incremental_refresh=true, output_mode=\"complete\")"); + } + + @Test + @SneakyThrows + public void testStreamingJobHouseKeeperWhenCancelJobGivesTimeout() { + ImmutableList mockFlintIndices = getMockFlintIndices(); + Map indexJobMapping = new HashMap<>(); + mockFlintIndices.forEach( + INDEX -> { + INDEX.createIndex(); + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, INDEX.getLatestId(), MYGLUE_DATASOURCE); + indexJobMapping.put(INDEX, flintIndexJob); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + // Making Index Auto Refresh + INDEX.updateIndexOptions(existingOptions, false); + flintIndexJob.refreshing(); + }); + changeDataSourceStatus(MYGLUE_DATASOURCE, DISABLED); + LocalEMRSClient emrsClient = new LocalEMRSClient(); + FlintIndexMetadataService flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + FlintStreamingJobHouseKeeperTask flintStreamingJobHouseKeeperTask = + new FlintStreamingJobHouseKeeperTask( + dataSourceService, + flintIndexMetadataService, + getFlintIndexOpFactory((accountId) -> emrsClient)); + + Thread thread = new Thread(flintStreamingJobHouseKeeperTask); + thread.start(); + thread.join(); + + mockFlintIndices.forEach( + INDEX -> { + MockFlintSparkJob flintIndexJob = indexJobMapping.get(INDEX); + flintIndexJob.assertState(FlintIndexState.REFRESHING); + Map mappings = INDEX.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("false", options.get("auto_refresh")); + }); + emrsClient.cancelJobRunCalled(3); + emrsClient.getJobRunResultCalled(9); + emrsClient.startJobRunCalled(0); + Assertions.assertEquals( + 3L, + Metrics.getInstance() + .getNumericalMetric(MetricName.STREAMING_JOB_HOUSEKEEPER_TASK_FAILURE_COUNT) + .getValue()); + } + + @Test + @SneakyThrows + public void testSimulateConcurrentJobHouseKeeperExecution() { + ImmutableList mockFlintIndices = getMockFlintIndices(); + Map indexJobMapping = new HashMap<>(); + mockFlintIndices.forEach( + INDEX -> { + INDEX.createIndex(); + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, INDEX.getLatestId(), MYGLUE_DATASOURCE); + indexJobMapping.put(INDEX, flintIndexJob); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + // Making Index Auto Refresh + INDEX.updateIndexOptions(existingOptions, false); + flintIndexJob.refreshing(); + }); + changeDataSourceStatus(MYGLUE_DATASOURCE, DISABLED); + LocalEMRSClient emrsClient = new LocalEMRSClient(); + FlintIndexMetadataService flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + FlintStreamingJobHouseKeeperTask flintStreamingJobHouseKeeperTask = + new FlintStreamingJobHouseKeeperTask( + dataSourceService, + flintIndexMetadataService, + getFlintIndexOpFactory((accountId) -> emrsClient)); + FlintStreamingJobHouseKeeperTask.isRunning.compareAndSet(false, true); + + Thread thread = new Thread(flintStreamingJobHouseKeeperTask); + thread.start(); + thread.join(); + + mockFlintIndices.forEach( + INDEX -> { + MockFlintSparkJob flintIndexJob = indexJobMapping.get(INDEX); + flintIndexJob.assertState(FlintIndexState.REFRESHING); + Map mappings = INDEX.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("true", options.get("auto_refresh")); + }); + emrsClient.cancelJobRunCalled(0); + emrsClient.getJobRunResultCalled(0); + emrsClient.startJobRunCalled(0); + Assertions.assertEquals( + 0L, + Metrics.getInstance() + .getNumericalMetric(MetricName.STREAMING_JOB_HOUSEKEEPER_TASK_FAILURE_COUNT) + .getValue()); + FlintStreamingJobHouseKeeperTask.isRunning.compareAndSet(true, false); + } + + @SneakyThrows + @Test + public void testStreamingJobClearnerWhenDataSourceIsDeleted() { + ImmutableList mockFlintIndices = getMockFlintIndices(); + Map indexJobMapping = new HashMap<>(); + mockFlintIndices.forEach( + INDEX -> { + INDEX.createIndex(); + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, INDEX.getLatestId(), MYGLUE_DATASOURCE); + indexJobMapping.put(INDEX, flintIndexJob); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + // Making Index Auto Refresh + INDEX.updateIndexOptions(existingOptions, false); + flintIndexJob.refreshing(); + }); + this.dataSourceService.deleteDataSource(MYGLUE_DATASOURCE); + LocalEMRSClient emrsClient = getCancelledLocalEmrsClient(); + FlintIndexMetadataService flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + FlintStreamingJobHouseKeeperTask flintStreamingJobHouseKeeperTask = + new FlintStreamingJobHouseKeeperTask( + dataSourceService, + flintIndexMetadataService, + getFlintIndexOpFactory((accountId) -> emrsClient)); + + Thread thread = new Thread(flintStreamingJobHouseKeeperTask); + thread.start(); + thread.join(); + + mockFlintIndices.forEach( + INDEX -> { + MockFlintSparkJob flintIndexJob = indexJobMapping.get(INDEX); + flintIndexJob.assertState(FlintIndexState.DELETED); + Map mappings = INDEX.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("true", options.get("auto_refresh")); + }); + emrsClient.cancelJobRunCalled(3); + emrsClient.getJobRunResultCalled(3); + emrsClient.startJobRunCalled(0); + Assertions.assertEquals( + 0L, + Metrics.getInstance() + .getNumericalMetric(MetricName.STREAMING_JOB_HOUSEKEEPER_TASK_FAILURE_COUNT) + .getValue()); + } + + @Test + @SneakyThrows + public void testStreamingJobHouseKeeperWhenDataSourceIsNeitherDisabledNorDeleted() { + ImmutableList mockFlintIndices = getMockFlintIndices(); + Map indexJobMapping = new HashMap<>(); + mockFlintIndices.forEach( + INDEX -> { + INDEX.createIndex(); + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, INDEX.getLatestId(), MYGLUE_DATASOURCE); + indexJobMapping.put(INDEX, flintIndexJob); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + // Making Index Auto Refresh + INDEX.updateIndexOptions(existingOptions, false); + flintIndexJob.refreshing(); + }); + LocalEMRSClient emrsClient = getCancelledLocalEmrsClient(); + FlintIndexMetadataService flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + FlintStreamingJobHouseKeeperTask flintStreamingJobHouseKeeperTask = + new FlintStreamingJobHouseKeeperTask( + dataSourceService, + flintIndexMetadataService, + getFlintIndexOpFactory((accountId) -> emrsClient)); + + Thread thread = new Thread(flintStreamingJobHouseKeeperTask); + thread.start(); + thread.join(); + + mockFlintIndices.forEach( + INDEX -> { + MockFlintSparkJob flintIndexJob = indexJobMapping.get(INDEX); + flintIndexJob.assertState(FlintIndexState.REFRESHING); + Map mappings = INDEX.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("true", options.get("auto_refresh")); + }); + emrsClient.cancelJobRunCalled(0); + emrsClient.getJobRunResultCalled(0); + emrsClient.startJobRunCalled(0); + Assertions.assertEquals( + 0L, + Metrics.getInstance() + .getNumericalMetric(MetricName.STREAMING_JOB_HOUSEKEEPER_TASK_FAILURE_COUNT) + .getValue()); + } + + @Test + public void testStreamingJobHouseKeeperWhenS3GlueIsDisabledButNotStreamingJobQueries() + throws InterruptedException { + changeDataSourceStatus(MYGLUE_DATASOURCE, DISABLED); + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + }; + FlintIndexMetadataService flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + FlintStreamingJobHouseKeeperTask flintStreamingJobHouseKeeperTask = + new FlintStreamingJobHouseKeeperTask( + dataSourceService, + flintIndexMetadataService, + getFlintIndexOpFactory((accountId) -> emrsClient)); + + Thread thread = new Thread(flintStreamingJobHouseKeeperTask); + thread.start(); + thread.join(); + + emrsClient.getJobRunResultCalled(0); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(0); + Assertions.assertEquals( + 0L, + Metrics.getInstance() + .getNumericalMetric(MetricName.STREAMING_JOB_HOUSEKEEPER_TASK_FAILURE_COUNT) + .getValue()); + } + + @Test + public void testStreamingJobHouseKeeperWhenFlintIndexIsCorrupted() throws InterruptedException { + String indexName = "flint_my_glue_mydb_http_logs_covering_error_index"; + MockFlintIndex mockFlintIndex = + new MockFlintIndex(client(), indexName, FlintIndexType.COVERING, null); + mockFlintIndex.createIndex(); + changeDataSourceStatus(MYGLUE_DATASOURCE, DISABLED); + LocalEMRSClient emrsClient = getCancelledLocalEmrsClient(); + FlintIndexMetadataService flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + FlintStreamingJobHouseKeeperTask flintStreamingJobHouseKeeperTask = + new FlintStreamingJobHouseKeeperTask( + dataSourceService, + flintIndexMetadataService, + getFlintIndexOpFactory((accountId) -> emrsClient)); + + Thread thread = new Thread(flintStreamingJobHouseKeeperTask); + thread.start(); + thread.join(); + + emrsClient.getJobRunResultCalled(0); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(0); + Assertions.assertEquals( + 1L, + Metrics.getInstance() + .getNumericalMetric(MetricName.STREAMING_JOB_HOUSEKEEPER_TASK_FAILURE_COUNT) + .getValue()); + } + + @SneakyThrows + @Test + public void testErrorScenario() { + LocalEMRSClient emrsClient = + new LocalEMRSClient() { + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + super.getJobRunResult(applicationId, jobId); + JobRun jobRun = new JobRun(); + jobRun.setState("cancelled"); + return new GetJobRunResult().withJobRun(jobRun); + } + }; + FlintIndexMetadataService flintIndexMetadataService = + new FlintIndexMetadataService() { + @Override + public Map getFlintIndexMetadata(String indexPattern) { + throw new RuntimeException("Couldn't fetch details from ElasticSearch"); + } + + @Override + public void updateIndexToManualRefresh( + String indexName, FlintIndexOptions flintIndexOptions) {} + }; + FlintStreamingJobHouseKeeperTask flintStreamingJobHouseKeeperTask = + new FlintStreamingJobHouseKeeperTask( + dataSourceService, + flintIndexMetadataService, + getFlintIndexOpFactory((accountId) -> emrsClient)); + + Thread thread = new Thread(flintStreamingJobHouseKeeperTask); + thread.start(); + thread.join(); + + Assertions.assertFalse(FlintStreamingJobHouseKeeperTask.isRunning.get()); + emrsClient.getJobRunResultCalled(0); + emrsClient.startJobRunCalled(0); + emrsClient.cancelJobRunCalled(0); + Assertions.assertEquals( + 1L, + Metrics.getInstance() + .getNumericalMetric(MetricName.STREAMING_JOB_HOUSEKEEPER_TASK_FAILURE_COUNT) + .getValue()); + } + + @Test + @SneakyThrows + public void testStreamingJobHouseKeeperMultipleTimesWhenDataSourceDisabled() { + ImmutableList mockFlintIndices = getMockFlintIndices(); + Map indexJobMapping = new HashMap<>(); + mockFlintIndices.forEach( + INDEX -> { + INDEX.createIndex(); + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, INDEX.getLatestId(), MYGLUE_DATASOURCE); + indexJobMapping.put(INDEX, flintIndexJob); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + // Making Index Auto Refresh + INDEX.updateIndexOptions(existingOptions, false); + flintIndexJob.refreshing(); + }); + changeDataSourceStatus(MYGLUE_DATASOURCE, DISABLED); + LocalEMRSClient emrsClient = getCancelledLocalEmrsClient(); + FlintIndexMetadataService flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + FlintStreamingJobHouseKeeperTask flintStreamingJobHouseKeeperTask = + new FlintStreamingJobHouseKeeperTask( + dataSourceService, + flintIndexMetadataService, + getFlintIndexOpFactory((accountId) -> emrsClient)); + + Thread thread = new Thread(flintStreamingJobHouseKeeperTask); + thread.start(); + thread.join(); + + mockFlintIndices.forEach( + INDEX -> { + MockFlintSparkJob flintIndexJob = indexJobMapping.get(INDEX); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = INDEX.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("false", options.get("auto_refresh")); + }); + emrsClient.cancelJobRunCalled(3); + emrsClient.getJobRunResultCalled(3); + emrsClient.startJobRunCalled(0); + Assertions.assertEquals( + 0L, + Metrics.getInstance() + .getNumericalMetric(MetricName.STREAMING_JOB_HOUSEKEEPER_TASK_FAILURE_COUNT) + .getValue()); + + // Second Run + Thread thread2 = new Thread(flintStreamingJobHouseKeeperTask); + thread2.start(); + thread2.join(); + mockFlintIndices.forEach( + INDEX -> { + MockFlintSparkJob flintIndexJob = indexJobMapping.get(INDEX); + flintIndexJob.assertState(FlintIndexState.ACTIVE); + Map mappings = INDEX.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("false", options.get("auto_refresh")); + }); + + // No New Calls and Errors + emrsClient.cancelJobRunCalled(3); + emrsClient.getJobRunResultCalled(3); + emrsClient.startJobRunCalled(0); + Assertions.assertEquals( + 0L, + Metrics.getInstance() + .getNumericalMetric(MetricName.STREAMING_JOB_HOUSEKEEPER_TASK_FAILURE_COUNT) + .getValue()); + } + + @SneakyThrows + @Test + public void testRunStreamingJobHouseKeeperWhenDataSourceIsDeleted() { + ImmutableList mockFlintIndices = getMockFlintIndices(); + Map indexJobMapping = new HashMap<>(); + mockFlintIndices.forEach( + INDEX -> { + INDEX.createIndex(); + MockFlintSparkJob flintIndexJob = + new MockFlintSparkJob( + flintIndexStateModelService, INDEX.getLatestId(), MYGLUE_DATASOURCE); + indexJobMapping.put(INDEX, flintIndexJob); + HashMap existingOptions = new HashMap<>(); + existingOptions.put("auto_refresh", "true"); + // Making Index Auto Refresh + INDEX.updateIndexOptions(existingOptions, false); + flintIndexJob.refreshing(); + }); + this.dataSourceService.deleteDataSource(MYGLUE_DATASOURCE); + LocalEMRSClient emrsClient = getCancelledLocalEmrsClient(); + FlintIndexMetadataService flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + FlintStreamingJobHouseKeeperTask flintStreamingJobHouseKeeperTask = + new FlintStreamingJobHouseKeeperTask( + dataSourceService, + flintIndexMetadataService, + getFlintIndexOpFactory((accountId) -> emrsClient)); + + Thread thread = new Thread(flintStreamingJobHouseKeeperTask); + thread.start(); + thread.join(); + + mockFlintIndices.forEach( + INDEX -> { + MockFlintSparkJob flintIndexJob = indexJobMapping.get(INDEX); + flintIndexJob.assertState(FlintIndexState.DELETED); + Map mappings = INDEX.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("true", options.get("auto_refresh")); + }); + emrsClient.cancelJobRunCalled(3); + emrsClient.getJobRunResultCalled(3); + emrsClient.startJobRunCalled(0); + Assertions.assertEquals( + 0L, + Metrics.getInstance() + .getNumericalMetric(MetricName.STREAMING_JOB_HOUSEKEEPER_TASK_FAILURE_COUNT) + .getValue()); + + // Second Run + Thread thread2 = new Thread(flintStreamingJobHouseKeeperTask); + thread2.start(); + thread2.join(); + mockFlintIndices.forEach( + INDEX -> { + MockFlintSparkJob flintIndexJob = indexJobMapping.get(INDEX); + flintIndexJob.assertState(FlintIndexState.DELETED); + Map mappings = INDEX.getIndexMappings(); + Map meta = (HashMap) mappings.get("_meta"); + Map options = (Map) meta.get("options"); + Assertions.assertEquals("true", options.get("auto_refresh")); + }); + // No New Calls and Errors + emrsClient.cancelJobRunCalled(3); + emrsClient.getJobRunResultCalled(3); + emrsClient.startJobRunCalled(0); + Assertions.assertEquals( + 0L, + Metrics.getInstance() + .getNumericalMetric(MetricName.STREAMING_JOB_HOUSEKEEPER_TASK_FAILURE_COUNT) + .getValue()); + } + + private void changeDataSourceStatus(String dataSourceName, DataSourceStatus dataSourceStatus) { + HashMap datasourceMap = new HashMap<>(); + datasourceMap.put("name", dataSourceName); + datasourceMap.put("status", dataSourceStatus); + this.dataSourceService.patchDataSource(datasourceMap); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/config/OpenSearchExtraParameterComposerTest.java b/async-query/src/test/java/org/opensearch/sql/spark/config/OpenSearchExtraParameterComposerTest.java new file mode 100644 index 0000000000..d3b0b2727a --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/config/OpenSearchExtraParameterComposerTest.java @@ -0,0 +1,52 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.config; + +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +import java.util.Optional; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; +import org.opensearch.sql.spark.parameter.SparkSubmitParameters; + +@ExtendWith(MockitoExtension.class) +class OpenSearchExtraParameterComposerTest { + + public static final String PARAMS = "PARAMS"; + @Mock SparkExecutionEngineConfigClusterSettingLoader settingsLoader; + @Mock SparkSubmitParameters sparkSubmitParameters; + @Mock DispatchQueryRequest dispatchQueryRequest; + @Mock AsyncQueryRequestContext context; + + @InjectMocks OpenSearchExtraParameterComposer openSearchExtraParameterComposer; + + @Test + public void paramExists_compose() { + SparkExecutionEngineConfigClusterSetting setting = + SparkExecutionEngineConfigClusterSetting.builder().sparkSubmitParameters(PARAMS).build(); + when(settingsLoader.load()).thenReturn(Optional.of(setting)); + + openSearchExtraParameterComposer.compose(sparkSubmitParameters, dispatchQueryRequest, context); + + verify(sparkSubmitParameters).setExtraParameters(PARAMS); + } + + @Test + public void paramNotExist_compose() { + when(settingsLoader.load()).thenReturn(Optional.empty()); + + openSearchExtraParameterComposer.compose(sparkSubmitParameters, dispatchQueryRequest, context); + + verifyNoInteractions(sparkSubmitParameters); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigClusterSettingLoaderTest.java b/async-query/src/test/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigClusterSettingLoaderTest.java new file mode 100644 index 0000000000..f9ccd93b00 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigClusterSettingLoaderTest.java @@ -0,0 +1,67 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.config; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; +import static org.opensearch.sql.common.setting.Settings.Key.SPARK_EXECUTION_ENGINE_CONFIG; +import static org.opensearch.sql.spark.constants.TestConstants.ACCOUNT_ID; +import static org.opensearch.sql.spark.constants.TestConstants.EMRS_APPLICATION_ID; +import static org.opensearch.sql.spark.constants.TestConstants.EMRS_EXECUTION_ROLE; +import static org.opensearch.sql.spark.constants.TestConstants.SPARK_SUBMIT_PARAMETERS; +import static org.opensearch.sql.spark.constants.TestConstants.US_WEST_REGION; + +import java.util.Optional; +import org.json.JSONObject; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.common.setting.Settings; + +@ExtendWith(MockitoExtension.class) +class SparkExecutionEngineConfigClusterSettingLoaderTest { + @Mock Settings settings; + + @InjectMocks + SparkExecutionEngineConfigClusterSettingLoader sparkExecutionEngineConfigClusterSettingLoader; + + @Test + public void blankConfig() { + when(settings.getSettingValue(SPARK_EXECUTION_ENGINE_CONFIG)).thenReturn(""); + + Optional result = + sparkExecutionEngineConfigClusterSettingLoader.load(); + + assertTrue(result.isEmpty()); + } + + @Test + public void validConfig() { + when(settings.getSettingValue(SPARK_EXECUTION_ENGINE_CONFIG)).thenReturn(getConfigJson()); + + SparkExecutionEngineConfigClusterSetting result = + sparkExecutionEngineConfigClusterSettingLoader.load().get(); + + Assertions.assertEquals(ACCOUNT_ID, result.getAccountId()); + Assertions.assertEquals(EMRS_APPLICATION_ID, result.getApplicationId()); + Assertions.assertEquals(EMRS_EXECUTION_ROLE, result.getExecutionRoleARN()); + Assertions.assertEquals(US_WEST_REGION, result.getRegion()); + Assertions.assertEquals(SPARK_SUBMIT_PARAMETERS, result.getSparkSubmitParameters()); + } + + String getConfigJson() { + return new JSONObject() + .put("accountId", ACCOUNT_ID) + .put("applicationId", EMRS_APPLICATION_ID) + .put("executionRoleARN", EMRS_EXECUTION_ROLE) + .put("region", US_WEST_REGION) + .put("sparkSubmitParameters", SPARK_SUBMIT_PARAMETERS) + .toString(); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigClusterSettingTest.java b/async-query/src/test/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigClusterSettingTest.java new file mode 100644 index 0000000000..c6be37567d --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigClusterSettingTest.java @@ -0,0 +1,49 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.config; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; + +import org.junit.jupiter.api.Test; + +public class SparkExecutionEngineConfigClusterSettingTest { + + @Test + public void testToSparkExecutionEngineConfigWithoutAllFields() { + String json = + "{" + + "\"applicationId\": \"app-1\"," + + "\"executionRoleARN\": \"role-1\"," + + "\"region\": \"us-west-1\"" + + "}"; + SparkExecutionEngineConfigClusterSetting config = + SparkExecutionEngineConfigClusterSetting.toSparkExecutionEngineConfig(json); + + assertEquals("app-1", config.getApplicationId()); + assertEquals("role-1", config.getExecutionRoleARN()); + assertEquals("us-west-1", config.getRegion()); + assertNull(config.getSparkSubmitParameters()); + } + + @Test + public void testToSparkExecutionEngineConfigWithAllFields() { + String json = + "{" + + "\"applicationId\": \"app-1\"," + + "\"executionRoleARN\": \"role-1\"," + + "\"region\": \"us-west-1\"," + + "\"sparkSubmitParameters\": \"--conf A=1\"" + + "}"; + SparkExecutionEngineConfigClusterSetting config = + SparkExecutionEngineConfigClusterSetting.toSparkExecutionEngineConfig(json); + + assertEquals("app-1", config.getApplicationId()); + assertEquals("role-1", config.getExecutionRoleARN()); + assertEquals("us-west-1", config.getRegion()); + assertEquals("--conf A=1", config.getSparkSubmitParameters()); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigSupplierImplTest.java b/async-query/src/test/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigSupplierImplTest.java new file mode 100644 index 0000000000..124d8d0b6e --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/config/SparkExecutionEngineConfigSupplierImplTest.java @@ -0,0 +1,81 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.config; + +import static org.mockito.Mockito.when; +import static org.opensearch.sql.spark.constants.TestConstants.EMRS_APPLICATION_ID; +import static org.opensearch.sql.spark.constants.TestConstants.EMRS_EXECUTION_ROLE; +import static org.opensearch.sql.spark.constants.TestConstants.SPARK_SUBMIT_PARAMETERS; +import static org.opensearch.sql.spark.constants.TestConstants.TEST_CLUSTER_NAME; +import static org.opensearch.sql.spark.constants.TestConstants.US_WEST_REGION; + +import java.util.Optional; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.cluster.ClusterName; +import org.opensearch.sql.common.setting.Settings; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; + +@ExtendWith(MockitoExtension.class) +public class SparkExecutionEngineConfigSupplierImplTest { + + @Mock private Settings settings; + @Mock private AsyncQueryRequestContext asyncQueryRequestContext; + + @Mock + private SparkExecutionEngineConfigClusterSettingLoader + sparkExecutionEngineConfigClusterSettingLoader; + + @Test + void testGetSparkExecutionEngineConfig() { + SparkExecutionEngineConfigSupplier sparkExecutionEngineConfigSupplier = + new SparkExecutionEngineConfigSupplierImpl( + settings, sparkExecutionEngineConfigClusterSettingLoader); + when(settings.getSettingValue(Settings.Key.CLUSTER_NAME)) + .thenReturn(new ClusterName(TEST_CLUSTER_NAME)); + when(sparkExecutionEngineConfigClusterSettingLoader.load()) + .thenReturn(Optional.of(getClusterSetting())); + + SparkExecutionEngineConfig sparkExecutionEngineConfig = + sparkExecutionEngineConfigSupplier.getSparkExecutionEngineConfig(asyncQueryRequestContext); + + Assertions.assertEquals(EMRS_APPLICATION_ID, sparkExecutionEngineConfig.getApplicationId()); + Assertions.assertEquals(EMRS_EXECUTION_ROLE, sparkExecutionEngineConfig.getExecutionRoleARN()); + Assertions.assertEquals(US_WEST_REGION, sparkExecutionEngineConfig.getRegion()); + Assertions.assertEquals(TEST_CLUSTER_NAME, sparkExecutionEngineConfig.getClusterName()); + } + + SparkExecutionEngineConfigClusterSetting getClusterSetting() { + return SparkExecutionEngineConfigClusterSetting.builder() + .applicationId(EMRS_APPLICATION_ID) + .executionRoleARN(EMRS_EXECUTION_ROLE) + .region(US_WEST_REGION) + .sparkSubmitParameters(SPARK_SUBMIT_PARAMETERS) + .build(); + } + + @Test + void testGetSparkExecutionEngineConfigWithNullSetting() { + SparkExecutionEngineConfigSupplier sparkExecutionEngineConfigSupplier = + new SparkExecutionEngineConfigSupplierImpl( + settings, sparkExecutionEngineConfigClusterSettingLoader); + when(settings.getSettingValue(Settings.Key.CLUSTER_NAME)) + .thenReturn(new ClusterName(TEST_CLUSTER_NAME)); + when(sparkExecutionEngineConfigClusterSettingLoader.load()).thenReturn(Optional.empty()); + + SparkExecutionEngineConfig sparkExecutionEngineConfig = + sparkExecutionEngineConfigSupplier.getSparkExecutionEngineConfig(asyncQueryRequestContext); + + Assertions.assertNull(sparkExecutionEngineConfig.getApplicationId()); + Assertions.assertNull(sparkExecutionEngineConfig.getExecutionRoleARN()); + Assertions.assertNull(sparkExecutionEngineConfig.getRegion()); + Assertions.assertNull(sparkExecutionEngineConfig.getSparkSubmitParameterModifier()); + Assertions.assertEquals(TEST_CLUSTER_NAME, sparkExecutionEngineConfig.getClusterName()); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/constants/TestConstants.java b/async-query/src/test/java/org/opensearch/sql/spark/constants/TestConstants.java new file mode 100644 index 0000000000..15871bf6b2 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/constants/TestConstants.java @@ -0,0 +1,18 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.constants; + +public class TestConstants { + public static final String TEST_DATASOURCE_NAME = "test_datasource_name"; + public static final String EMR_JOB_ID = "job-123xxx"; + public static final String ACCOUNT_ID = "TEST_ACCOUNT_ID"; + public static final String EMRS_APPLICATION_ID = "app-xxxxx"; + public static final String EMRS_EXECUTION_ROLE = "execution_role"; + public static final String SPARK_SUBMIT_PARAMETERS = "--conf org.flint.sql.SQLJob"; + public static final String TEST_CLUSTER_NAME = "TEST_CLUSTER"; + public static final String MOCK_SESSION_ID = "s-0123456"; + public static final String US_WEST_REGION = "us-west-1"; +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/execution/session/InteractiveSessionTest.java b/async-query/src/test/java/org/opensearch/sql/spark/execution/session/InteractiveSessionTest.java new file mode 100644 index 0000000000..d0bfed94c0 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/execution/session/InteractiveSessionTest.java @@ -0,0 +1,215 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.session; + +import static org.opensearch.sql.spark.constants.TestConstants.TEST_CLUSTER_NAME; +import static org.opensearch.sql.spark.constants.TestConstants.TEST_DATASOURCE_NAME; +import static org.opensearch.sql.spark.execution.session.SessionState.NOT_STARTED; +import static org.opensearch.sql.spark.execution.session.SessionTestUtil.createSessionRequest; + +import java.util.HashMap; +import java.util.Optional; +import lombok.RequiredArgsConstructor; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.asyncquery.model.NullAsyncQueryRequestContext; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.client.StartJobRequest; +import org.opensearch.sql.spark.dispatcher.model.JobType; +import org.opensearch.sql.spark.execution.statestore.OpenSearchSessionStorageService; +import org.opensearch.sql.spark.execution.statestore.OpenSearchStateStoreUtil; +import org.opensearch.sql.spark.execution.statestore.OpenSearchStatementStorageService; +import org.opensearch.sql.spark.execution.statestore.SessionStorageService; +import org.opensearch.sql.spark.execution.statestore.StateStore; +import org.opensearch.sql.spark.execution.statestore.StatementStorageService; +import org.opensearch.sql.spark.execution.xcontent.SessionModelXContentSerializer; +import org.opensearch.sql.spark.execution.xcontent.StatementModelXContentSerializer; +import org.opensearch.sql.spark.utils.IDUtils; +import org.opensearch.test.OpenSearchIntegTestCase; + +/** mock-maker-inline does not work with OpenSearchTestCase. */ +public class InteractiveSessionTest extends OpenSearchIntegTestCase { + + private static final String indexName = + OpenSearchStateStoreUtil.getIndexName(TEST_DATASOURCE_NAME); + + private TestEMRServerlessClient emrsClient; + private StartJobRequest startJobRequest; + private SessionStorageService sessionStorageService; + private StatementStorageService statementStorageService; + private SessionConfigSupplier sessionConfigSupplier = () -> 600000L; + private SessionManager sessionManager; + private AsyncQueryRequestContext asyncQueryRequestContext = new NullAsyncQueryRequestContext(); + private SessionIdProvider sessionIdProvider = new DatasourceEmbeddedSessionIdProvider(); + + @Before + public void setup() { + emrsClient = new TestEMRServerlessClient(); + startJobRequest = new StartJobRequest("", null, "appId", "", "", new HashMap<>(), false, ""); + StateStore stateStore = new StateStore(client(), clusterService()); + sessionStorageService = + new OpenSearchSessionStorageService(stateStore, new SessionModelXContentSerializer()); + statementStorageService = + new OpenSearchStatementStorageService(stateStore, new StatementModelXContentSerializer()); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + + sessionManager = + new SessionManager( + sessionStorageService, + statementStorageService, + emrServerlessClientFactory, + sessionConfigSupplier, + sessionIdProvider); + } + + @After + public void clean() { + if (clusterService().state().routingTable().hasIndex(indexName)) { + client().admin().indices().delete(new DeleteIndexRequest(indexName)).actionGet(); + } + } + + @Test + public void openCloseSession() { + String sessionId = IDUtils.encode(TEST_DATASOURCE_NAME); + InteractiveSession session = + InteractiveSession.builder() + .sessionId(sessionId) + .statementStorageService(statementStorageService) + .sessionStorageService(sessionStorageService) + .serverlessClient(emrsClient) + .build(); + + SessionAssertions assertions = new SessionAssertions(session); + assertions + .open(createSessionRequest()) + .assertSessionState(NOT_STARTED) + .assertAppId("appId") + .assertJobId("jobId"); + emrsClient.startJobRunCalled(1); + emrsClient.assertJobNameOfLastRequest( + TEST_CLUSTER_NAME + ":" + JobType.INTERACTIVE.getText() + ":" + sessionId); + + // close session + assertions.close(); + emrsClient.cancelJobRunCalled(1); + } + + @Test + public void openSessionFailedConflict() { + String sessionId = IDUtils.encode(TEST_DATASOURCE_NAME); + InteractiveSession session = + InteractiveSession.builder() + .sessionId(sessionId) + .sessionStorageService(sessionStorageService) + .statementStorageService(statementStorageService) + .serverlessClient(emrsClient) + .build(); + session.open(createSessionRequest(), asyncQueryRequestContext); + + InteractiveSession duplicateSession = + InteractiveSession.builder() + .sessionId(sessionId) + .sessionStorageService(sessionStorageService) + .statementStorageService(statementStorageService) + .serverlessClient(emrsClient) + .build(); + IllegalStateException exception = + assertThrows( + IllegalStateException.class, + () -> duplicateSession.open(createSessionRequest(), asyncQueryRequestContext)); + assertEquals("session already exist. " + sessionId, exception.getMessage()); + } + + @Test + public void closeNotExistSession() { + String sessionId = IDUtils.encode(TEST_DATASOURCE_NAME); + InteractiveSession session = + InteractiveSession.builder() + .sessionId(sessionId) + .sessionStorageService(sessionStorageService) + .statementStorageService(statementStorageService) + .serverlessClient(emrsClient) + .build(); + session.open(createSessionRequest(), asyncQueryRequestContext); + + client().delete(new DeleteRequest(indexName, sessionId)).actionGet(); + + IllegalStateException exception = assertThrows(IllegalStateException.class, session::close); + assertEquals("session does not exist. " + sessionId, exception.getMessage()); + emrsClient.cancelJobRunCalled(0); + } + + @Test + public void sessionManagerCreateSession() { + Session session = + sessionManager.createSession(createSessionRequest(), asyncQueryRequestContext); + + new SessionAssertions(session) + .assertSessionState(NOT_STARTED) + .assertAppId("appId") + .assertJobId("jobId"); + } + + @Test + public void sessionManagerGetSession() { + Session session = + sessionManager.createSession(createSessionRequest(), asyncQueryRequestContext); + + Optional managerSession = + sessionManager.getSession(session.getSessionId(), TEST_DATASOURCE_NAME); + assertTrue(managerSession.isPresent()); + assertEquals(session.getSessionId(), managerSession.get().getSessionId()); + } + + @Test + public void getSessionWithNonExistingId() { + Optional session = + sessionManager.getSession("non-existing-id", "non-existing-datasource"); + + assertTrue(session.isEmpty()); + } + + @RequiredArgsConstructor + class SessionAssertions { + private final Session session; + + public SessionAssertions assertSessionState(SessionState expected) { + assertEquals(expected, session.getSessionModel().getSessionState()); + + Optional sessionStoreState = + sessionStorageService.getSession(session.getSessionModel().getId(), TEST_DATASOURCE_NAME); + assertTrue(sessionStoreState.isPresent()); + assertEquals(expected, sessionStoreState.get().getSessionState()); + + return this; + } + + public SessionAssertions assertAppId(String expected) { + assertEquals(expected, session.getSessionModel().getApplicationId()); + return this; + } + + public SessionAssertions assertJobId(String expected) { + assertEquals(expected, session.getSessionModel().getJobId()); + return this; + } + + public SessionAssertions open(CreateSessionRequest req) { + session.open(req, asyncQueryRequestContext); + return this; + } + + public SessionAssertions close() { + session.close(); + return this; + } + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/execution/session/SessionTestUtil.java b/async-query/src/test/java/org/opensearch/sql/spark/execution/session/SessionTestUtil.java new file mode 100644 index 0000000000..e5ca93e96e --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/execution/session/SessionTestUtil.java @@ -0,0 +1,28 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.session; + +import static org.opensearch.sql.spark.constants.TestConstants.TEST_CLUSTER_NAME; +import static org.opensearch.sql.spark.constants.TestConstants.TEST_DATASOURCE_NAME; + +import java.util.HashMap; +import org.opensearch.sql.spark.parameter.SparkParameterComposerCollection; +import org.opensearch.sql.spark.parameter.SparkSubmitParametersBuilder; + +public class SessionTestUtil { + + public static CreateSessionRequest createSessionRequest() { + return new CreateSessionRequest( + TEST_CLUSTER_NAME, + null, + "appId", + "arn", + new SparkSubmitParametersBuilder(new SparkParameterComposerCollection()), + new HashMap<>(), + "resultIndex", + TEST_DATASOURCE_NAME); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/execution/session/TestEMRServerlessClient.java b/async-query/src/test/java/org/opensearch/sql/spark/execution/session/TestEMRServerlessClient.java new file mode 100644 index 0000000000..a6b0e6038e --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/execution/session/TestEMRServerlessClient.java @@ -0,0 +1,51 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.session; + +import com.amazonaws.services.emrserverless.model.CancelJobRunResult; +import com.amazonaws.services.emrserverless.model.GetJobRunResult; +import org.junit.Assert; +import org.opensearch.sql.spark.client.EMRServerlessClient; +import org.opensearch.sql.spark.client.StartJobRequest; + +public class TestEMRServerlessClient implements EMRServerlessClient { + + private int startJobRunCalled = 0; + private int cancelJobRunCalled = 0; + + private StartJobRequest startJobRequest; + + @Override + public String startJobRun(StartJobRequest startJobRequest) { + this.startJobRequest = startJobRequest; + startJobRunCalled++; + return "jobId"; + } + + @Override + public GetJobRunResult getJobRunResult(String applicationId, String jobId) { + return null; + } + + @Override + public CancelJobRunResult cancelJobRun( + String applicationId, String jobId, boolean allowExceptionPropagation) { + cancelJobRunCalled++; + return null; + } + + public void startJobRunCalled(int expectedTimes) { + Assert.assertEquals(expectedTimes, startJobRunCalled); + } + + public void cancelJobRunCalled(int expectedTimes) { + Assert.assertEquals(expectedTimes, cancelJobRunCalled); + } + + public void assertJobNameOfLastRequest(String expectedJobName) { + Assert.assertEquals(expectedJobName, startJobRequest.getJobName()); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/execution/statement/StatementTest.java b/async-query/src/test/java/org/opensearch/sql/spark/execution/statement/StatementTest.java new file mode 100644 index 0000000000..e76776e2fc --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/execution/statement/StatementTest.java @@ -0,0 +1,379 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.statement; + +import static org.opensearch.sql.spark.constants.TestConstants.TEST_DATASOURCE_NAME; +import static org.opensearch.sql.spark.execution.session.SessionTestUtil.createSessionRequest; +import static org.opensearch.sql.spark.execution.statement.StatementState.CANCELLED; +import static org.opensearch.sql.spark.execution.statement.StatementState.RUNNING; +import static org.opensearch.sql.spark.execution.statement.StatementState.WAITING; +import static org.opensearch.sql.spark.execution.statement.StatementTest.TestStatement.testStatement; + +import java.util.Optional; +import lombok.RequiredArgsConstructor; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryRequestContext; +import org.opensearch.sql.spark.asyncquery.model.NullAsyncQueryRequestContext; +import org.opensearch.sql.spark.client.EMRServerlessClientFactory; +import org.opensearch.sql.spark.execution.session.DatasourceEmbeddedSessionIdProvider; +import org.opensearch.sql.spark.execution.session.Session; +import org.opensearch.sql.spark.execution.session.SessionConfigSupplier; +import org.opensearch.sql.spark.execution.session.SessionIdProvider; +import org.opensearch.sql.spark.execution.session.SessionManager; +import org.opensearch.sql.spark.execution.session.SessionState; +import org.opensearch.sql.spark.execution.session.TestEMRServerlessClient; +import org.opensearch.sql.spark.execution.statestore.OpenSearchSessionStorageService; +import org.opensearch.sql.spark.execution.statestore.OpenSearchStateStoreUtil; +import org.opensearch.sql.spark.execution.statestore.OpenSearchStatementStorageService; +import org.opensearch.sql.spark.execution.statestore.SessionStorageService; +import org.opensearch.sql.spark.execution.statestore.StateStore; +import org.opensearch.sql.spark.execution.statestore.StatementStorageService; +import org.opensearch.sql.spark.execution.xcontent.SessionModelXContentSerializer; +import org.opensearch.sql.spark.execution.xcontent.StatementModelXContentSerializer; +import org.opensearch.sql.spark.rest.model.LangType; +import org.opensearch.sql.spark.utils.IDUtils; +import org.opensearch.test.OpenSearchIntegTestCase; + +public class StatementTest extends OpenSearchIntegTestCase { + private static final String indexName = + OpenSearchStateStoreUtil.getIndexName(TEST_DATASOURCE_NAME); + + private StatementStorageService statementStorageService; + private SessionStorageService sessionStorageService; + private TestEMRServerlessClient emrsClient = new TestEMRServerlessClient(); + private SessionConfigSupplier sessionConfigSupplier = () -> 600000L; + private SessionIdProvider sessionIdProvider = new DatasourceEmbeddedSessionIdProvider(); + + private SessionManager sessionManager; + private AsyncQueryRequestContext asyncQueryRequestContext = new NullAsyncQueryRequestContext(); + + @Before + public void setup() { + StateStore stateStore = new StateStore(client(), clusterService()); + statementStorageService = + new OpenSearchStatementStorageService(stateStore, new StatementModelXContentSerializer()); + sessionStorageService = + new OpenSearchSessionStorageService(stateStore, new SessionModelXContentSerializer()); + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + + sessionManager = + new SessionManager( + sessionStorageService, + statementStorageService, + emrServerlessClientFactory, + sessionConfigSupplier, + sessionIdProvider); + } + + @After + public void clean() { + if (clusterService().state().routingTable().hasIndex(indexName)) { + client().admin().indices().delete(new DeleteIndexRequest(indexName)).actionGet(); + } + } + + @Test + public void openThenCancelStatement() { + Statement st = buildStatement(); + + // submit statement + TestStatement testStatement = testStatement(st, statementStorageService); + testStatement + .open() + .assertSessionState(WAITING) + .assertStatementId(new StatementId("statementId")); + + // close statement + testStatement.cancel().assertSessionState(CANCELLED); + } + + private Statement buildStatement() { + return buildStatement(new StatementId("statementId")); + } + + private Statement buildStatement(StatementId stId) { + return Statement.builder() + .sessionId("sessionId") + .applicationId("appId") + .jobId("jobId") + .statementId(stId) + .langType(LangType.SQL) + .datasourceName(TEST_DATASOURCE_NAME) + .query("query") + .queryId("statementId") + .statementStorageService(statementStorageService) + .build(); + } + + @Test + public void openFailedBecauseConflict() { + Statement st = buildStatement(); + st.open(); + + // open statement with same statement id + Statement dupSt = buildStatement(); + IllegalStateException exception = assertThrows(IllegalStateException.class, dupSt::open); + assertEquals("statement already exist. statementId=statementId", exception.getMessage()); + } + + @Test + public void cancelNotExistStatement_throwsException() { + StatementId stId = new StatementId("statementId"); + Statement st = buildStatement(stId); + st.open(); + + client().delete(new DeleteRequest(indexName, stId.getId())).actionGet(); + + IllegalStateException exception = assertThrows(IllegalStateException.class, st::cancel); + assertEquals( + String.format("cancel statement failed. no statement found. statement: %s.", stId), + exception.getMessage()); + } + + @Test + public void cancelFailedBecauseOfConflict() { + StatementId stId = new StatementId("statementId"); + Statement st = buildStatement(stId); + st.open(); + + StatementModel running = + statementStorageService.updateStatementState(st.getStatementModel(), CANCELLED); + + assertEquals(StatementState.CANCELLED, running.getStatementState()); + IllegalStateException exception = assertThrows(IllegalStateException.class, st::cancel); + assertEquals( + String.format( + "cancel statement failed. current statementState: CANCELLED " + "statement: %s.", stId), + exception.getMessage()); + } + + @Test + public void cancelCancelledStatement_throwsException() { + testCancelThrowsExceptionGivenStatementState(StatementState.CANCELLED); + } + + @Test + public void cancelSuccessStatement_throwsException() { + testCancelThrowsExceptionGivenStatementState(StatementState.SUCCESS); + } + + @Test + public void cancelFailedStatement_throwsException() { + testCancelThrowsExceptionGivenStatementState(StatementState.FAILED); + } + + @Test + public void cancelTimeoutStatement_throwsException() { + testCancelThrowsExceptionGivenStatementState(StatementState.TIMEOUT); + } + + private void testCancelThrowsExceptionGivenStatementState(StatementState state) { + StatementId stId = new StatementId("statementId"); + Statement st = createStatement(stId); + + StatementModel model = st.getStatementModel(); + st.setStatementModel( + StatementModel.copyWithState(st.getStatementModel(), state, model.getMetadata())); + + IllegalStateException exception = assertThrows(IllegalStateException.class, st::cancel); + assertEquals( + String.format("can't cancel statement in %s state. statement: %s.", state.getState(), stId), + exception.getMessage()); + } + + @Test + public void cancelRunningStatementSuccess() { + Statement st = buildStatement(); + + // submit statement + TestStatement testStatement = testStatement(st, statementStorageService); + testStatement + .open() + .assertSessionState(WAITING) + .assertStatementId(new StatementId("statementId")); + + testStatement.run(); + + // close statement + testStatement.cancel().assertSessionState(CANCELLED); + } + + @Test + public void submitStatementInRunningSession() { + Session session = + sessionManager.createSession(createSessionRequest(), asyncQueryRequestContext); + + // App change state to running + sessionStorageService.updateSessionState(session.getSessionModel(), SessionState.RUNNING); + + StatementId statementId = session.submit(queryRequest(), asyncQueryRequestContext); + assertFalse(statementId.getId().isEmpty()); + } + + @Test + public void submitStatementInNotStartedState() { + Session session = + sessionManager.createSession(createSessionRequest(), asyncQueryRequestContext); + + StatementId statementId = session.submit(queryRequest(), asyncQueryRequestContext); + assertFalse(statementId.getId().isEmpty()); + } + + @Test + public void failToSubmitStatementInDeadState() { + Session session = + sessionManager.createSession(createSessionRequest(), asyncQueryRequestContext); + + sessionStorageService.updateSessionState(session.getSessionModel(), SessionState.DEAD); + + IllegalStateException exception = + assertThrows( + IllegalStateException.class, + () -> session.submit(queryRequest(), asyncQueryRequestContext)); + assertEquals( + "can't submit statement, session should not be in end state, current session state is:" + + " dead", + exception.getMessage()); + } + + @Test + public void failToSubmitStatementInFailState() { + Session session = + sessionManager.createSession(createSessionRequest(), asyncQueryRequestContext); + + sessionStorageService.updateSessionState(session.getSessionModel(), SessionState.FAIL); + + IllegalStateException exception = + assertThrows( + IllegalStateException.class, + () -> session.submit(queryRequest(), asyncQueryRequestContext)); + assertEquals( + "can't submit statement, session should not be in end state, current session state is:" + + " fail", + exception.getMessage()); + } + + @Test + public void newStatementFieldAssert() { + Session session = + sessionManager.createSession(createSessionRequest(), asyncQueryRequestContext); + StatementId statementId = session.submit(queryRequest(), asyncQueryRequestContext); + Optional statement = session.get(statementId); + + assertTrue(statement.isPresent()); + assertEquals(session.getSessionId(), statement.get().getSessionId()); + assertEquals("appId", statement.get().getApplicationId()); + assertEquals("jobId", statement.get().getJobId()); + assertEquals(statementId, statement.get().getStatementId()); + assertEquals(WAITING, statement.get().getStatementState()); + assertEquals(LangType.SQL, statement.get().getLangType()); + assertEquals("select 1", statement.get().getQuery()); + } + + @Test + public void failToSubmitStatementInDeletedSession() { + EMRServerlessClientFactory emrServerlessClientFactory = (accountId) -> emrsClient; + Session session = + sessionManager.createSession(createSessionRequest(), asyncQueryRequestContext); + + // other's delete session + client().delete(new DeleteRequest(indexName, session.getSessionId())).actionGet(); + + IllegalStateException exception = + assertThrows( + IllegalStateException.class, + () -> session.submit(queryRequest(), asyncQueryRequestContext)); + assertEquals("session does not exist. " + session.getSessionId(), exception.getMessage()); + } + + @Test + public void getStatementSuccess() { + Session session = + sessionManager.createSession(createSessionRequest(), asyncQueryRequestContext); + // App change state to running + sessionStorageService.updateSessionState(session.getSessionModel(), SessionState.RUNNING); + StatementId statementId = session.submit(queryRequest(), asyncQueryRequestContext); + + Optional statement = session.get(statementId); + assertTrue(statement.isPresent()); + assertEquals(WAITING, statement.get().getStatementState()); + assertEquals(statementId, statement.get().getStatementId()); + } + + @Test + public void getStatementNotExist() { + Session session = + sessionManager.createSession(createSessionRequest(), asyncQueryRequestContext); + // App change state to running + sessionStorageService.updateSessionState(session.getSessionModel(), SessionState.RUNNING); + + Optional statement = session.get(StatementId.newStatementId("not-exist-id")); + assertFalse(statement.isPresent()); + } + + @RequiredArgsConstructor + static class TestStatement { + private final Statement st; + private final StatementStorageService statementStorageService; + + public static TestStatement testStatement( + Statement st, StatementStorageService statementStorageService) { + return new TestStatement(st, statementStorageService); + } + + public TestStatement assertSessionState(StatementState expected) { + assertEquals(expected, st.getStatementModel().getStatementState()); + + Optional model = + statementStorageService.getStatement(st.getStatementId().getId(), TEST_DATASOURCE_NAME); + assertTrue(model.isPresent()); + assertEquals(expected, model.get().getStatementState()); + + return this; + } + + public TestStatement assertStatementId(StatementId expected) { + assertEquals(expected, st.getStatementModel().getStatementId()); + + Optional model = + statementStorageService.getStatement(st.getStatementId().getId(), TEST_DATASOURCE_NAME); + assertTrue(model.isPresent()); + assertEquals(expected, model.get().getStatementId()); + return this; + } + + public TestStatement open() { + st.open(); + return this; + } + + public TestStatement cancel() { + st.cancel(); + return this; + } + + public TestStatement run() { + StatementModel model = + statementStorageService.updateStatementState(st.getStatementModel(), RUNNING); + st.setStatementModel(model); + return this; + } + } + + private QueryRequest queryRequest() { + return new QueryRequest(IDUtils.encode(TEST_DATASOURCE_NAME), LangType.SQL, "select 1"); + } + + private Statement createStatement(StatementId stId) { + Statement st = buildStatement(stId); + st.open(); + return st; + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/AsyncQueryJobMetadataXContentSerializerTest.java b/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/AsyncQueryJobMetadataXContentSerializerTest.java new file mode 100644 index 0000000000..c43a6f936e --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/AsyncQueryJobMetadataXContentSerializerTest.java @@ -0,0 +1,162 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.xcontent; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import org.json.JSONObject; +import org.junit.jupiter.api.Test; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; +import org.opensearch.sql.spark.dispatcher.model.JobType; + +class AsyncQueryJobMetadataXContentSerializerTest { + + private final AsyncQueryJobMetadataXContentSerializer serializer = + new AsyncQueryJobMetadataXContentSerializer(); + + @Test + void toXContentShouldSerializeAsyncQueryJobMetadata() throws Exception { + AsyncQueryJobMetadata jobMetadata = + AsyncQueryJobMetadata.builder() + .queryId("query1") + .accountId("account1") + .applicationId("app1") + .jobId("job1") + .resultIndex("result1") + .sessionId("session1") + .datasourceName("datasource1") + .jobType(JobType.INTERACTIVE) + .indexName("index1") + .metadata(XContentSerializerUtil.buildMetadata(1L, 1L)) + .build(); + + XContentBuilder xContentBuilder = serializer.toXContent(jobMetadata, ToXContent.EMPTY_PARAMS); + String json = xContentBuilder.toString(); + + assertEquals(true, json.contains("\"queryId\":\"query1\"")); + assertEquals(true, json.contains("\"type\":\"jobmeta\"")); + assertEquals(true, json.contains("\"jobId\":\"job1\"")); + assertEquals(true, json.contains("\"accountId\":\"account1\"")); + assertEquals(true, json.contains("\"applicationId\":\"app1\"")); + assertEquals(true, json.contains("\"resultIndex\":\"result1\"")); + assertEquals(true, json.contains("\"sessionId\":\"session1\"")); + assertEquals(true, json.contains("\"dataSourceName\":\"datasource1\"")); + assertEquals(true, json.contains("\"jobType\":\"interactive\"")); + assertEquals(true, json.contains("\"indexName\":\"index1\"")); + } + + @Test + void fromXContentShouldDeserializeAsyncQueryJobMetadata() throws Exception { + String json = getBaseJson().toString(); + XContentParser parser = XContentSerializerTestUtil.prepareParser(json); + + AsyncQueryJobMetadata jobMetadata = serializer.fromXContent(parser, 1L, 1L); + + assertEquals("query1", jobMetadata.getQueryId()); + assertEquals("job1", jobMetadata.getJobId()); + assertEquals("account1", jobMetadata.getAccountId()); + assertEquals("app1", jobMetadata.getApplicationId()); + assertEquals("result1", jobMetadata.getResultIndex()); + assertEquals("session1", jobMetadata.getSessionId()); + assertEquals("datasource1", jobMetadata.getDatasourceName()); + assertEquals(JobType.INTERACTIVE, jobMetadata.getJobType()); + assertEquals("index1", jobMetadata.getIndexName()); + } + + @Test + void fromXContentShouldThrowExceptionWhenMissingJobId() throws Exception { + String json = getJsonWithout("jobId").toString(); + XContentParser parser = XContentSerializerTestUtil.prepareParser(json); + + assertThrows(IllegalArgumentException.class, () -> serializer.fromXContent(parser, 1L, 1L)); + } + + @Test + void fromXContentShouldThrowExceptionWhenMissingApplicationId() throws Exception { + String json = getJsonWithout("applicationId").toString(); + XContentParser parser = XContentSerializerTestUtil.prepareParser(json); + + assertThrows(IllegalArgumentException.class, () -> serializer.fromXContent(parser, 1L, 1L)); + } + + @Test + void fromXContentShouldThrowExceptionWhenUnknownFields() throws Exception { + String json = getBaseJson().put("unknownAttr", "index1").toString(); + XContentParser parser = XContentSerializerTestUtil.prepareParser(json); + + assertThrows(IllegalArgumentException.class, () -> serializer.fromXContent(parser, 1L, 1L)); + } + + @Test + void fromXContentShouldDeserializeAsyncQueryWithJobTypeNUll() throws Exception { + String json = getBaseJson().put("jobType", "").toString(); + XContentParser parser = XContentSerializerTestUtil.prepareParser(json); + + AsyncQueryJobMetadata jobMetadata = serializer.fromXContent(parser, 1L, 1L); + + assertEquals("query1", jobMetadata.getQueryId()); + assertEquals("job1", jobMetadata.getJobId()); + assertEquals("account1", jobMetadata.getAccountId()); + assertEquals("app1", jobMetadata.getApplicationId()); + assertEquals("result1", jobMetadata.getResultIndex()); + assertEquals("session1", jobMetadata.getSessionId()); + assertEquals("datasource1", jobMetadata.getDatasourceName()); + assertNull(jobMetadata.getJobType()); + assertEquals("index1", jobMetadata.getIndexName()); + } + + @Test + void fromXContentShouldDeserializeAsyncQueryWithAccountIdNUll() throws Exception { + String json = getJsonWithout("accountId").put("jobType", "").toString(); + XContentParser parser = XContentSerializerTestUtil.prepareParser(json); + + AsyncQueryJobMetadata jobMetadata = serializer.fromXContent(parser, 1L, 1L); + + assertEquals("query1", jobMetadata.getQueryId()); + assertEquals("job1", jobMetadata.getJobId()); + assertEquals("app1", jobMetadata.getApplicationId()); + assertEquals("result1", jobMetadata.getResultIndex()); + assertEquals("session1", jobMetadata.getSessionId()); + assertEquals("datasource1", jobMetadata.getDatasourceName()); + assertNull(jobMetadata.getJobType()); + assertEquals("index1", jobMetadata.getIndexName()); + } + + @Test + void fromXContentShouldDeserializeAsyncQueryWithoutJobId() throws Exception { + String json = getJsonWithout("jobId").toString(); + XContentParser parser = XContentSerializerTestUtil.prepareParser(json); + + assertThrows(IllegalArgumentException.class, () -> serializer.fromXContent(parser, 1L, 1L)); + } + + private JSONObject getJsonWithout(String... attrs) { + JSONObject result = getBaseJson(); + for (String attr : attrs) { + result.remove(attr); + } + return result; + } + + private JSONObject getBaseJson() { + return new JSONObject() + .put("queryId", "query1") + .put("type", "jobmeta") + .put("jobId", "job1") + .put("accountId", "account1") + .put("applicationId", "app1") + .put("resultIndex", "result1") + .put("sessionId", "session1") + .put("dataSourceName", "datasource1") + .put("jobType", "interactive") + .put("indexName", "index1"); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/FlintIndexStateModelXContentSerializerTest.java b/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/FlintIndexStateModelXContentSerializerTest.java new file mode 100644 index 0000000000..0d6d5f3119 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/FlintIndexStateModelXContentSerializerTest.java @@ -0,0 +1,115 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.xcontent; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; + +import org.json.JSONObject; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.sql.spark.flint.FlintIndexState; +import org.opensearch.sql.spark.flint.FlintIndexStateModel; + +@ExtendWith(MockitoExtension.class) +class FlintIndexStateModelXContentSerializerTest { + + private FlintIndexStateModelXContentSerializer serializer = + new FlintIndexStateModelXContentSerializer(); + + @Test + void toXContentShouldSerializeFlintIndexStateModel() throws Exception { + FlintIndexStateModel flintIndexStateModel = + FlintIndexStateModel.builder() + .indexState(FlintIndexState.ACTIVE) + .accountId("account1") + .applicationId("app1") + .jobId("job1") + .latestId("latest1") + .datasourceName("datasource1") + .lastUpdateTime(System.currentTimeMillis()) + .error(null) + .build(); + + XContentBuilder xContentBuilder = + serializer.toXContent(flintIndexStateModel, ToXContent.EMPTY_PARAMS); + String json = xContentBuilder.toString(); + + assertEquals(true, json.contains("\"version\":\"1.0\"")); + assertEquals(true, json.contains("\"type\":\"flintindexstate\"")); + assertEquals(true, json.contains("\"state\":\"active\"")); + assertEquals(true, json.contains("\"accountId\":\"account1\"")); + assertEquals(true, json.contains("\"applicationId\":\"app1\"")); + assertEquals(true, json.contains("\"jobId\":\"job1\"")); + assertEquals(true, json.contains("\"latestId\":\"latest1\"")); + assertEquals(true, json.contains("\"dataSourceName\":\"datasource1\"")); + } + + @Test + void fromXContentShouldDeserializeFlintIndexStateModel() throws Exception { + String json = getBaseJson().toString(); + XContentParser parser = XContentSerializerTestUtil.prepareParser(json); + + FlintIndexStateModel flintIndexStateModel = serializer.fromXContent(parser, 1L, 1L); + + assertEquals(FlintIndexState.ACTIVE, flintIndexStateModel.getIndexState()); + assertEquals("account1", flintIndexStateModel.getAccountId()); + assertEquals("app1", flintIndexStateModel.getApplicationId()); + assertEquals("job1", flintIndexStateModel.getJobId()); + assertEquals("latest1", flintIndexStateModel.getLatestId()); + assertEquals("datasource1", flintIndexStateModel.getDatasourceName()); + } + + @Test + void fromXContentShouldDeserializeFlintIndexStateModelWithoutAccountId() throws Exception { + String json = getJsonWithout("accountId").toString(); + XContentParser parser = XContentSerializerTestUtil.prepareParser(json); + + FlintIndexStateModel flintIndexStateModel = serializer.fromXContent(parser, 1L, 1L); + + assertEquals(FlintIndexState.ACTIVE, flintIndexStateModel.getIndexState()); + assertNull(flintIndexStateModel.getAccountId()); + assertEquals("app1", flintIndexStateModel.getApplicationId()); + assertEquals("job1", flintIndexStateModel.getJobId()); + assertEquals("latest1", flintIndexStateModel.getLatestId()); + assertEquals("datasource1", flintIndexStateModel.getDatasourceName()); + } + + private JSONObject getJsonWithout(String attr) { + JSONObject result = getBaseJson(); + result.remove(attr); + return result; + } + + private JSONObject getBaseJson() { + return new JSONObject() + .put("version", "1.0") + .put("type", "flintindexstate") + .put("state", "active") + .put("statementId", "statement1") + .put("sessionId", "session1") + .put("accountId", "account1") + .put("applicationId", "app1") + .put("jobId", "job1") + .put("latestId", "latest1") + .put("dataSourceName", "datasource1") + .put("lastUpdateTime", 1623456789) + .put("error", ""); + } + + @Test + void fromXContentThrowsExceptionWhenParsingInvalidContent() { + XContentParser parser = mock(XContentParser.class); + + assertThrows(RuntimeException.class, () -> serializer.fromXContent(parser, 0, 0)); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/IndexDMLResultXContentSerializerTest.java b/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/IndexDMLResultXContentSerializerTest.java new file mode 100644 index 0000000000..edf88bad42 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/IndexDMLResultXContentSerializerTest.java @@ -0,0 +1,75 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.xcontent; + +import static org.junit.jupiter.api.Assertions.*; + +import java.io.IOException; +import org.junit.jupiter.api.Test; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.sql.spark.dispatcher.model.IndexDMLResult; + +class IndexDMLResultXContentSerializerTest { + + private final IndexDMLResultXContentSerializer serializer = + new IndexDMLResultXContentSerializer(); + + @Test + void toXContentShouldSerializeIndexDMLResult() throws IOException { + IndexDMLResult dmlResult = + IndexDMLResult.builder() + .queryId("query1") + .status("SUCCESS") + .error(null) + .datasourceName("datasource1") + .queryRunTime(1000L) + .updateTime(2000L) + .build(); + + XContentBuilder xContentBuilder = serializer.toXContent(dmlResult, ToXContent.EMPTY_PARAMS); + String json = xContentBuilder.toString(); + + assertTrue(json.contains("\"queryId\":\"query1\"")); + assertTrue(json.contains("\"status\":\"SUCCESS\"")); + assertTrue(json.contains("\"error\":null")); + assertTrue(json.contains("\"dataSourceName\":\"datasource1\"")); + assertTrue(json.contains("\"queryRunTime\":1000")); + assertTrue(json.contains("\"updateTime\":2000")); + assertTrue(json.contains("\"result\":[]")); + assertTrue(json.contains("\"schema\":[]")); + } + + @Test + void toXContentShouldHandleErrorInIndexDMLResult() throws IOException { + IndexDMLResult dmlResult = + IndexDMLResult.builder() + .queryId("query1") + .status("FAILURE") + .error("An error occurred") + .datasourceName("datasource1") + .queryRunTime(1000L) + .updateTime(2000L) + .build(); + + XContentBuilder xContentBuilder = serializer.toXContent(dmlResult, ToXContent.EMPTY_PARAMS); + + String json = xContentBuilder.toString(); + assertTrue(json.contains("\"queryId\":\"query1\"")); + assertTrue(json.contains("\"status\":\"FAILURE\"")); + assertTrue(json.contains("\"error\":\"An error occurred\"")); + assertTrue(json.contains("\"dataSourceName\":\"datasource1\"")); + assertTrue(json.contains("\"queryRunTime\":1000")); + assertTrue(json.contains("\"updateTime\":2000")); + assertTrue(json.contains("\"result\":[]")); + assertTrue(json.contains("\"schema\":[]")); + } + + @Test + void fromXContentShouldThrowUnsupportedOperationException() { + assertThrows(UnsupportedOperationException.class, () -> serializer.fromXContent(null, 0L, 0L)); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/SessionModelXContentSerializerTest.java b/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/SessionModelXContentSerializerTest.java new file mode 100644 index 0000000000..0b32bbf020 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/SessionModelXContentSerializerTest.java @@ -0,0 +1,117 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.xcontent; + +import static org.junit.Assert.assertNull; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; + +import org.json.JSONObject; +import org.junit.jupiter.api.Test; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.sql.spark.execution.session.SessionModel; +import org.opensearch.sql.spark.execution.session.SessionState; +import org.opensearch.sql.spark.execution.session.SessionType; + +class SessionModelXContentSerializerTest { + + private final SessionModelXContentSerializer serializer = new SessionModelXContentSerializer(); + + @Test + void toXContentShouldSerializeSessionModel() throws Exception { + SessionModel sessionModel = + SessionModel.builder() + .version("1.0") + .sessionType(SessionType.INTERACTIVE) + .sessionId("session1") + .sessionState(SessionState.FAIL) + .datasourceName("datasource1") + .accountId("account1") + .applicationId("app1") + .jobId("job1") + .lastUpdateTime(System.currentTimeMillis()) + .error(null) + .build(); + + XContentBuilder xContentBuilder = serializer.toXContent(sessionModel, ToXContent.EMPTY_PARAMS); + + String json = xContentBuilder.toString(); + assertEquals(true, json.contains("\"version\":\"1.0\"")); + assertEquals(true, json.contains("\"type\":\"session\"")); + assertEquals(true, json.contains("\"sessionType\":\"interactive\"")); + assertEquals(true, json.contains("\"sessionId\":\"session1\"")); + assertEquals(true, json.contains("\"state\":\"fail\"")); + assertEquals(true, json.contains("\"dataSourceName\":\"datasource1\"")); + assertEquals(true, json.contains("\"accountId\":\"account1\"")); + assertEquals(true, json.contains("\"applicationId\":\"app1\"")); + assertEquals(true, json.contains("\"jobId\":\"job1\"")); + } + + @Test + void fromXContentShouldDeserializeSessionModel() throws Exception { + String json = getBaseJson().toString(); + XContentParser parser = XContentSerializerTestUtil.prepareParser(json); + + SessionModel sessionModel = serializer.fromXContent(parser, 1L, 1L); + + assertEquals("1.0", sessionModel.getVersion()); + assertEquals(SessionType.INTERACTIVE, sessionModel.getSessionType()); + assertEquals("session1", sessionModel.getSessionId()); + assertEquals(SessionState.FAIL, sessionModel.getSessionState()); + assertEquals("datasource1", sessionModel.getDatasourceName()); + assertEquals("account1", sessionModel.getAccountId()); + assertEquals("app1", sessionModel.getApplicationId()); + assertEquals("job1", sessionModel.getJobId()); + } + + @Test + void fromXContentShouldDeserializeSessionModelWithoutAccountId() throws Exception { + String json = getJsonWithout("accountId").toString(); + XContentParser parser = XContentSerializerTestUtil.prepareParser(json); + + SessionModel sessionModel = serializer.fromXContent(parser, 1L, 1L); + + assertEquals("1.0", sessionModel.getVersion()); + assertEquals(SessionType.INTERACTIVE, sessionModel.getSessionType()); + assertEquals("session1", sessionModel.getSessionId()); + assertEquals(SessionState.FAIL, sessionModel.getSessionState()); + assertEquals("datasource1", sessionModel.getDatasourceName()); + assertNull(sessionModel.getAccountId()); + assertEquals("app1", sessionModel.getApplicationId()); + assertEquals("job1", sessionModel.getJobId()); + } + + private JSONObject getJsonWithout(String attr) { + JSONObject result = getBaseJson(); + result.remove(attr); + return result; + } + + private JSONObject getBaseJson() { + return new JSONObject() + .put("version", "1.0") + .put("type", "session") + .put("sessionType", "interactive") + .put("sessionId", "session1") + .put("state", "fail") + .put("dataSourceName", "datasource1") + .put("accountId", "account1") + .put("applicationId", "app1") + .put("jobId", "job1") + .put("lastUpdateTime", 1623456789) + .put("error", ""); + } + + @Test + void fromXContentThrowsExceptionWhenParsingInvalidContent() { + XContentParser parser = mock(XContentParser.class); + + assertThrows(RuntimeException.class, () -> serializer.fromXContent(parser, 0, 0)); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/StatementModelXContentSerializerTest.java b/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/StatementModelXContentSerializerTest.java new file mode 100644 index 0000000000..f85667930e --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/StatementModelXContentSerializerTest.java @@ -0,0 +1,133 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.xcontent; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; + +import org.json.JSONObject; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.sql.spark.execution.statement.StatementId; +import org.opensearch.sql.spark.execution.statement.StatementModel; +import org.opensearch.sql.spark.execution.statement.StatementState; +import org.opensearch.sql.spark.rest.model.LangType; + +@ExtendWith(MockitoExtension.class) +class StatementModelXContentSerializerTest { + + private StatementModelXContentSerializer serializer; + + @Test + void toXContentShouldSerializeStatementModel() throws Exception { + serializer = new StatementModelXContentSerializer(); + StatementModel statementModel = + StatementModel.builder() + .version("1.0") + .statementState(StatementState.RUNNING) + .statementId(new StatementId("statement1")) + .sessionId("session1") + .accountId("account1") + .applicationId("app1") + .jobId("job1") + .langType(LangType.SQL) + .datasourceName("datasource1") + .query("SELECT * FROM table") + .queryId("query1") + .submitTime(System.currentTimeMillis()) + .error(null) + .build(); + + XContentBuilder xContentBuilder = + serializer.toXContent(statementModel, ToXContent.EMPTY_PARAMS); + + String json = xContentBuilder.toString(); + assertEquals(true, json.contains("\"version\":\"1.0\"")); + assertEquals(true, json.contains("\"state\":\"running\"")); + assertEquals(true, json.contains("\"statementId\":\"statement1\"")); + assertEquals(true, json.contains("\"accountId\":\"account1\"")); + assertEquals(true, json.contains("\"applicationId\":\"app1\"")); + assertEquals(true, json.contains("\"jobId\":\"job1\"")); + } + + @Test + void fromXContentShouldDeserializeStatementModel() throws Exception { + StatementModelXContentSerializer serializer = new StatementModelXContentSerializer(); + String json = getBaseJson().toString(); + final XContentParser parser = XContentSerializerTestUtil.prepareParser(json); + + StatementModel statementModel = serializer.fromXContent(parser, 1L, 1L); + + assertEquals("1.0", statementModel.getVersion()); + assertEquals(StatementState.RUNNING, statementModel.getStatementState()); + assertEquals("statement1", statementModel.getStatementId().getId()); + assertEquals("session1", statementModel.getSessionId()); + assertEquals("account1", statementModel.getAccountId()); + } + + @Test + void fromXContentShouldDeserializeStatementModelWithoutAccountId() throws Exception { + StatementModelXContentSerializer serializer = new StatementModelXContentSerializer(); + String json = getJsonWithout("accountId").toString(); + final XContentParser parser = XContentSerializerTestUtil.prepareParser(json); + + StatementModel statementModel = serializer.fromXContent(parser, 1L, 1L); + + assertEquals("1.0", statementModel.getVersion()); + assertEquals(StatementState.RUNNING, statementModel.getStatementState()); + assertEquals("statement1", statementModel.getStatementId().getId()); + assertEquals("session1", statementModel.getSessionId()); + assertNull(statementModel.getAccountId()); + } + + @Test + void fromXContentThrowsExceptionWhenParsingInvalidContent() { + XContentParser parser = mock(XContentParser.class); + + assertThrows(RuntimeException.class, () -> serializer.fromXContent(parser, 0, 0)); + } + + @Test + void fromXContentShouldThrowExceptionForUnexpectedField() throws Exception { + StatementModelXContentSerializer serializer = new StatementModelXContentSerializer(); + String json = getBaseJson().put("unexpectedField", "someValue").toString(); + final XContentParser parser = XContentSerializerTestUtil.prepareParser(json); + + IllegalArgumentException exception = + assertThrows(IllegalArgumentException.class, () -> serializer.fromXContent(parser, 1L, 1L)); + assertEquals("Unexpected field: unexpectedField", exception.getMessage()); + } + + private JSONObject getJsonWithout(String attr) { + JSONObject result = getBaseJson(); + result.remove(attr); + return result; + } + + private JSONObject getBaseJson() { + return new JSONObject() + .put("version", "1.0") + .put("type", "statement") + .put("state", "running") + .put("statementId", "statement1") + .put("sessionId", "session1") + .put("accountId", "account1") + .put("applicationId", "app1") + .put("jobId", "job1") + .put("lang", "SQL") + .put("dataSourceName", "datasource1") + .put("query", "SELECT * FROM table") + .put("queryId", "query1") + .put("submitTime", 1623456789) + .put("error", ""); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/XContentSerializerTestUtil.java b/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/XContentSerializerTestUtil.java new file mode 100644 index 0000000000..a9356b6908 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/execution/xcontent/XContentSerializerTestUtil.java @@ -0,0 +1,23 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.execution.xcontent; + +import java.io.IOException; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.XContentParser; + +public class XContentSerializerTestUtil { + public static XContentParser prepareParser(String json) throws IOException { + XContentParser parser = + XContentType.JSON + .xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, json); + parser.nextToken(); + return parser; + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/flint/FlintIndexMetadataServiceImplTest.java b/async-query/src/test/java/org/opensearch/sql/spark/flint/FlintIndexMetadataServiceImplTest.java new file mode 100644 index 0000000000..f6baa82dd2 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/flint/FlintIndexMetadataServiceImplTest.java @@ -0,0 +1,190 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +import static org.mockito.Answers.RETURNS_DEEP_STUBS; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.common.base.Charsets; +import com.google.common.io.Resources; +import java.io.IOException; +import java.net.URL; +import java.util.HashMap; +import java.util.Map; +import lombok.SneakyThrows; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.sql.spark.dispatcher.model.FlintIndexOptions; +import org.opensearch.sql.spark.dispatcher.model.FullyQualifiedTableName; +import org.opensearch.sql.spark.dispatcher.model.IndexQueryActionType; +import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails; + +@ExtendWith(MockitoExtension.class) +public class FlintIndexMetadataServiceImplTest { + @Mock(answer = RETURNS_DEEP_STUBS) + private Client client; + + @SneakyThrows + @Test + void testGetJobIdFromFlintSkippingIndexMetadata() { + URL url = + Resources.getResource( + "flint-index-mappings/flint_mys3_default_http_logs_skipping_index.json"); + String mappings = Resources.toString(url, Charsets.UTF_8); + String indexName = "flint_mys3_default_http_logs_skipping_index"; + mockNodeClientIndicesMappings(indexName, mappings); + FlintIndexMetadataService flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + IndexQueryDetails indexQueryDetails = + IndexQueryDetails.builder() + .fullyQualifiedTableName(new FullyQualifiedTableName("mys3.default.http_logs")) + .indexOptions(new FlintIndexOptions()) + .indexQueryActionType(IndexQueryActionType.DROP) + .indexType(FlintIndexType.SKIPPING) + .build(); + Map indexMetadataMap = + flintIndexMetadataService.getFlintIndexMetadata(indexQueryDetails.openSearchIndexName()); + Assertions.assertEquals( + "00fhelvq7peuao0", + indexMetadataMap.get(indexQueryDetails.openSearchIndexName()).getJobId()); + } + + @SneakyThrows + @Test + void testGetJobIdFromFlintSkippingIndexMetadataWithIndexState() { + URL url = + Resources.getResource( + "flint-index-mappings/flint_mys3_default_http_logs_skipping_index.json"); + String mappings = Resources.toString(url, Charsets.UTF_8); + String indexName = "flint_mys3_default_http_logs_skipping_index"; + mockNodeClientIndicesMappings(indexName, mappings); + FlintIndexMetadataService flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + IndexQueryDetails indexQueryDetails = + IndexQueryDetails.builder() + .fullyQualifiedTableName(new FullyQualifiedTableName("mys3.default.http_logs")) + .indexOptions(new FlintIndexOptions()) + .indexQueryActionType(IndexQueryActionType.DROP) + .indexType(FlintIndexType.SKIPPING) + .build(); + Map indexMetadataMap = + flintIndexMetadataService.getFlintIndexMetadata(indexQueryDetails.openSearchIndexName()); + FlintIndexMetadata metadata = indexMetadataMap.get(indexQueryDetails.openSearchIndexName()); + Assertions.assertEquals("00fhelvq7peuao0", metadata.getJobId()); + } + + @SneakyThrows + @Test + void testGetJobIdFromFlintCoveringIndexMetadata() { + URL url = + Resources.getResource("flint-index-mappings/flint_mys3_default_http_logs_cv1_index.json"); + String mappings = Resources.toString(url, Charsets.UTF_8); + String indexName = "flint_mys3_default_http_logs_cv1_index"; + mockNodeClientIndicesMappings(indexName, mappings); + IndexQueryDetails indexQueryDetails = + IndexQueryDetails.builder() + .indexName("cv1") + .fullyQualifiedTableName(new FullyQualifiedTableName("mys3.default.http_logs")) + .indexOptions(new FlintIndexOptions()) + .indexQueryActionType(IndexQueryActionType.DROP) + .indexType(FlintIndexType.COVERING) + .build(); + FlintIndexMetadataService flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + Map indexMetadataMap = + flintIndexMetadataService.getFlintIndexMetadata(indexQueryDetails.openSearchIndexName()); + Assertions.assertEquals( + "00fdmvv9hp8u0o0q", + indexMetadataMap.get(indexQueryDetails.openSearchIndexName()).getJobId()); + } + + @SneakyThrows + @Test + void testGetJobIDWithNPEException() { + URL url = Resources.getResource("flint-index-mappings/npe_mapping.json"); + String mappings = Resources.toString(url, Charsets.UTF_8); + String indexName = "flint_mys3_default_http_logs_cv1_index"; + mockNodeClientIndicesMappings(indexName, mappings); + FlintIndexMetadataService flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + IndexQueryDetails indexQueryDetails = + IndexQueryDetails.builder() + .indexName("cv1") + .fullyQualifiedTableName(new FullyQualifiedTableName("mys3.default.http_logs")) + .indexOptions(new FlintIndexOptions()) + .indexQueryActionType(IndexQueryActionType.DROP) + .indexType(FlintIndexType.COVERING) + .build(); + Map flintIndexMetadataMap = + flintIndexMetadataService.getFlintIndexMetadata(indexQueryDetails.openSearchIndexName()); + Assertions.assertFalse( + flintIndexMetadataMap.containsKey("flint_mys3_default_http_logs_cv1_index")); + } + + @SneakyThrows + @Test + void testGetJobIDWithNPEExceptionForMultipleIndices() { + HashMap indexMappingsMap = new HashMap<>(); + URL url = Resources.getResource("flint-index-mappings/npe_mapping.json"); + String mappings = Resources.toString(url, Charsets.UTF_8); + String indexName = "flint_mys3_default_http_logs_cv1_index"; + indexMappingsMap.put(indexName, mappings); + url = + Resources.getResource( + "flint-index-mappings/flint_mys3_default_http_logs_skipping_index.json"); + mappings = Resources.toString(url, Charsets.UTF_8); + indexName = "flint_mys3_default_http_logs_skipping_index"; + indexMappingsMap.put(indexName, mappings); + mockNodeClientIndicesMappings("flint_mys3*", indexMappingsMap); + FlintIndexMetadataService flintIndexMetadataService = new FlintIndexMetadataServiceImpl(client); + Map flintIndexMetadataMap = + flintIndexMetadataService.getFlintIndexMetadata("flint_mys3*"); + Assertions.assertFalse( + flintIndexMetadataMap.containsKey("flint_mys3_default_http_logs_cv1_index")); + Assertions.assertTrue( + flintIndexMetadataMap.containsKey("flint_mys3_default_http_logs_skipping_index")); + } + + @SneakyThrows + public void mockNodeClientIndicesMappings(String indexName, String mappings) { + GetMappingsResponse mockResponse = mock(GetMappingsResponse.class); + when(client.admin().indices().prepareGetMappings().setIndices(indexName).get()) + .thenReturn(mockResponse); + Map metadata; + metadata = Map.of(indexName, IndexMetadata.fromXContent(createParser(mappings)).mapping()); + when(mockResponse.getMappings()).thenReturn(metadata); + } + + @SneakyThrows + public void mockNodeClientIndicesMappings( + String indexPattern, HashMap indexMappingsMap) { + GetMappingsResponse mockResponse = mock(GetMappingsResponse.class); + when(client.admin().indices().prepareGetMappings().setIndices(indexPattern).get()) + .thenReturn(mockResponse); + Map metadataMap = new HashMap<>(); + for (String indexName : indexMappingsMap.keySet()) { + metadataMap.put( + indexName, + IndexMetadata.fromXContent(createParser(indexMappingsMap.get(indexName))).mapping()); + } + when(mockResponse.getMappings()).thenReturn(metadataMap); + } + + private XContentParser createParser(String mappings) throws IOException { + return XContentType.JSON + .xContent() + .createParser( + NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, mappings); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/flint/OpenSearchFlintIndexStateModelServiceTest.java b/async-query/src/test/java/org/opensearch/sql/spark/flint/OpenSearchFlintIndexStateModelServiceTest.java new file mode 100644 index 0000000000..977f77b397 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/flint/OpenSearchFlintIndexStateModelServiceTest.java @@ -0,0 +1,80 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.flint; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + +import java.util.Optional; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.spark.execution.statestore.StateStore; +import org.opensearch.sql.spark.execution.xcontent.FlintIndexStateModelXContentSerializer; + +@ExtendWith(MockitoExtension.class) +public class OpenSearchFlintIndexStateModelServiceTest { + + public static final String DATASOURCE = "DATASOURCE"; + public static final String ID = "ID"; + + @Mock StateStore mockStateStore; + @Mock FlintIndexStateModel flintIndexStateModel; + @Mock FlintIndexState flintIndexState; + @Mock FlintIndexStateModel responseFlintIndexStateModel; + @Mock FlintIndexStateModelXContentSerializer flintIndexStateModelXContentSerializer; + + @InjectMocks OpenSearchFlintIndexStateModelService openSearchFlintIndexStateModelService; + + @Test + void updateFlintIndexState() { + when(mockStateStore.updateState(any(), any(), any(), any())) + .thenReturn(responseFlintIndexStateModel); + + FlintIndexStateModel result = + openSearchFlintIndexStateModelService.updateFlintIndexState( + flintIndexStateModel, flintIndexState, DATASOURCE); + + assertEquals(responseFlintIndexStateModel, result); + } + + @Test + void getFlintIndexStateModel() { + when(mockStateStore.get(any(), any(), any())) + .thenReturn(Optional.of(responseFlintIndexStateModel)); + + Optional result = + openSearchFlintIndexStateModelService.getFlintIndexStateModel("ID", DATASOURCE); + + assertEquals(responseFlintIndexStateModel, result.get()); + } + + @Test + void createFlintIndexStateModel() { + when(mockStateStore.create(any(), any(), any(), any())) + .thenReturn(responseFlintIndexStateModel); + when(flintIndexStateModel.getDatasourceName()).thenReturn(DATASOURCE); + + FlintIndexStateModel result = + openSearchFlintIndexStateModelService.createFlintIndexStateModel(flintIndexStateModel); + + assertEquals(responseFlintIndexStateModel, result); + } + + @Test + void deleteFlintIndexStateModel() { + when(mockStateStore.delete(any(), any())).thenReturn(true); + + boolean result = + openSearchFlintIndexStateModelService.deleteFlintIndexStateModel(ID, DATASOURCE); + + assertTrue(result); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/leasemanager/DefaultLeaseManagerTest.java b/async-query/src/test/java/org/opensearch/sql/spark/leasemanager/DefaultLeaseManagerTest.java new file mode 100644 index 0000000000..558f7f7b3a --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/leasemanager/DefaultLeaseManagerTest.java @@ -0,0 +1,41 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.leasemanager; + +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.common.setting.Settings; +import org.opensearch.sql.spark.dispatcher.model.JobType; +import org.opensearch.sql.spark.execution.statestore.StateStore; +import org.opensearch.sql.spark.leasemanager.model.LeaseRequest; + +@ExtendWith(MockitoExtension.class) +class DefaultLeaseManagerTest { + @Mock private Settings settings; + + @Mock private StateStore stateStore; + + @Test + public void concurrentSessionRuleOnlyApplyToInteractiveQuery() { + assertTrue( + new DefaultLeaseManager.ConcurrentSessionRule(settings, stateStore) + .test(new LeaseRequest(JobType.BATCH, "mys3"))); + assertTrue( + new DefaultLeaseManager.ConcurrentSessionRule(settings, stateStore) + .test(new LeaseRequest(JobType.STREAMING, "mys3"))); + } + + @Test + public void concurrentRefreshRuleOnlyNotAppliedToInteractiveQuery() { + assertTrue( + new DefaultLeaseManager.ConcurrentRefreshJobRule(settings, stateStore) + .test(new LeaseRequest(JobType.INTERACTIVE, "mys3"))); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/parameter/S3GlueDataSourceSparkParameterComposerTest.java b/async-query/src/test/java/org/opensearch/sql/spark/parameter/S3GlueDataSourceSparkParameterComposerTest.java new file mode 100644 index 0000000000..55e62d52f0 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/parameter/S3GlueDataSourceSparkParameterComposerTest.java @@ -0,0 +1,160 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.parameter; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import com.google.common.collect.ImmutableMap; +import java.util.Arrays; +import java.util.Map; +import java.util.stream.Collectors; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.sql.datasource.model.DataSourceMetadata; +import org.opensearch.sql.datasource.model.DataSourceStatus; +import org.opensearch.sql.datasource.model.DataSourceType; +import org.opensearch.sql.datasources.auth.AuthenticationType; +import org.opensearch.sql.datasources.glue.GlueDataSourceFactory; +import org.opensearch.sql.spark.asyncquery.model.NullAsyncQueryRequestContext; +import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; + +@ExtendWith(MockitoExtension.class) +class S3GlueDataSourceSparkParameterComposerTest { + + public static final String VALID_URI = "https://test.host.com:9200"; + public static final String INVALID_URI = "http://test/\r\n"; + public static final String USERNAME = "USERNAME"; + public static final String PASSWORD = "PASSWORD"; + public static final String REGION = "REGION"; + public static final String TRUE = "true"; + public static final String ROLE_ARN = "ROLE_ARN"; + + private static final String COMMON_EXPECTED_PARAMS = + " --class org.apache.spark.sql.FlintJob " + + getConfList( + "spark.emr-serverless.driverEnv.ASSUME_ROLE_CREDENTIALS_ROLE_ARN=ROLE_ARN", + "spark.executorEnv.ASSUME_ROLE_CREDENTIALS_ROLE_ARN=ROLE_ARN", + "spark.hive.metastore.glue.role.arn=ROLE_ARN", + "spark.sql.catalog.DATASOURCE_NAME=org.opensearch.sql.FlintDelegatingSessionCatalog", + "spark.flint.datasource.name=DATASOURCE_NAME", + "spark.emr-serverless.lakeformation.enabled=true", + "spark.flint.optimizer.covering.enabled=false", + "spark.datasource.flint.host=test.host.com", + "spark.datasource.flint.port=9200", + "spark.datasource.flint.scheme=https"); + + @Mock DispatchQueryRequest dispatchQueryRequest; + + @Test + public void testBasicAuth() { + DataSourceMetadata dataSourceMetadata = + getDataSourceMetadata(AuthenticationType.BASICAUTH, VALID_URI); + SparkSubmitParameters sparkSubmitParameters = new SparkSubmitParameters(); + + new S3GlueDataSourceSparkParameterComposer() + .compose( + dataSourceMetadata, + sparkSubmitParameters, + dispatchQueryRequest, + new NullAsyncQueryRequestContext()); + + assertEquals( + COMMON_EXPECTED_PARAMS + + getConfList( + "spark.datasource.flint.auth=basic", + "spark.datasource.flint.auth.username=USERNAME", + "spark.datasource.flint.auth.password=PASSWORD"), + sparkSubmitParameters.toString()); + } + + @Test + public void testComposeWithSigV4Auth() { + DataSourceMetadata dataSourceMetadata = + getDataSourceMetadata(AuthenticationType.AWSSIGV4AUTH, VALID_URI); + SparkSubmitParameters sparkSubmitParameters = new SparkSubmitParameters(); + + new S3GlueDataSourceSparkParameterComposer() + .compose( + dataSourceMetadata, + sparkSubmitParameters, + dispatchQueryRequest, + new NullAsyncQueryRequestContext()); + + assertEquals( + COMMON_EXPECTED_PARAMS + + getConfList( + "spark.datasource.flint.auth=sigv4", "spark.datasource.flint.region=REGION"), + sparkSubmitParameters.toString()); + } + + @Test + public void testComposeWithNoAuth() { + DataSourceMetadata dataSourceMetadata = + getDataSourceMetadata(AuthenticationType.NOAUTH, VALID_URI); + SparkSubmitParameters sparkSubmitParameters = new SparkSubmitParameters(); + + new S3GlueDataSourceSparkParameterComposer() + .compose( + dataSourceMetadata, + sparkSubmitParameters, + dispatchQueryRequest, + new NullAsyncQueryRequestContext()); + + assertEquals( + COMMON_EXPECTED_PARAMS + getConfList("spark.datasource.flint.auth=noauth"), + sparkSubmitParameters.toString()); + } + + @Test + public void testComposeWithBadUri() { + DataSourceMetadata dataSourceMetadata = + getDataSourceMetadata(AuthenticationType.NOAUTH, INVALID_URI); + SparkSubmitParameters sparkSubmitParameters = new SparkSubmitParameters(); + + assertThrows( + IllegalArgumentException.class, + () -> + new S3GlueDataSourceSparkParameterComposer() + .compose( + dataSourceMetadata, + sparkSubmitParameters, + dispatchQueryRequest, + new NullAsyncQueryRequestContext())); + } + + private DataSourceMetadata getDataSourceMetadata( + AuthenticationType authenticationType, String uri) { + return new DataSourceMetadata.Builder() + .setConnector(DataSourceType.S3GLUE) + .setName("DATASOURCE_NAME") + .setDescription("DESCRIPTION") + .setResultIndex("RESULT_INDEX") + .setDataSourceStatus(DataSourceStatus.ACTIVE) + .setProperties(getProperties(authenticationType, uri)) + .build(); + } + + private Map getProperties(AuthenticationType authType, String uri) { + return ImmutableMap.builder() + .put(GlueDataSourceFactory.GLUE_ROLE_ARN, ROLE_ARN) + .put(GlueDataSourceFactory.GLUE_LAKEFORMATION_ENABLED, TRUE) + .put(GlueDataSourceFactory.GLUE_INDEX_STORE_OPENSEARCH_URI, uri) + .put(GlueDataSourceFactory.GLUE_INDEX_STORE_OPENSEARCH_AUTH, authType.getName()) + .put(GlueDataSourceFactory.GLUE_INDEX_STORE_OPENSEARCH_AUTH_USERNAME, USERNAME) + .put(GlueDataSourceFactory.GLUE_INDEX_STORE_OPENSEARCH_AUTH_PASSWORD, PASSWORD) + .put(GlueDataSourceFactory.GLUE_INDEX_STORE_OPENSEARCH_REGION, REGION) + .build(); + } + + private static String getConfList(String... params) { + return Arrays.stream(params) + .map(param -> String.format(" --conf %s ", param)) + .collect(Collectors.joining()); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/response/OpenSearchJobExecutionResponseReaderTest.java b/async-query/src/test/java/org/opensearch/sql/spark/response/OpenSearchJobExecutionResponseReaderTest.java new file mode 100644 index 0000000000..66230464e5 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/response/OpenSearchJobExecutionResponseReaderTest.java @@ -0,0 +1,104 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.response; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; +import static org.opensearch.sql.datasource.model.DataSourceMetadata.DEFAULT_RESULT_INDEX; +import static org.opensearch.sql.spark.constants.TestConstants.EMR_JOB_ID; + +import java.util.Map; +import org.apache.lucene.search.TotalHits; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Client; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; + +@ExtendWith(MockitoExtension.class) +public class OpenSearchJobExecutionResponseReaderTest { + @Mock private Client client; + @Mock private SearchResponse searchResponse; + @Mock private SearchHit searchHit; + @Mock private ActionFuture searchResponseActionFuture; + + @InjectMocks OpenSearchJobExecutionResponseReader jobExecutionResponseReader; + + @Test + public void testGetResultFromOpensearchIndex() { + when(client.search(any())).thenReturn(searchResponseActionFuture); + when(searchResponseActionFuture.actionGet()).thenReturn(searchResponse); + when(searchResponse.status()).thenReturn(RestStatus.OK); + when(searchResponse.getHits()) + .thenReturn( + new SearchHits( + new SearchHit[] {searchHit}, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F)); + Mockito.when(searchHit.getSourceAsMap()).thenReturn(Map.of("stepId", EMR_JOB_ID)); + + assertFalse(jobExecutionResponseReader.getResultWithJobId(EMR_JOB_ID, null).isEmpty()); + } + + @Test + public void testGetResultFromCustomIndex() { + when(client.search(any())).thenReturn(searchResponseActionFuture); + when(searchResponseActionFuture.actionGet()).thenReturn(searchResponse); + when(searchResponse.status()).thenReturn(RestStatus.OK); + when(searchResponse.getHits()) + .thenReturn( + new SearchHits( + new SearchHit[] {searchHit}, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F)); + Mockito.when(searchHit.getSourceAsMap()).thenReturn(Map.of("stepId", EMR_JOB_ID)); + + assertFalse(jobExecutionResponseReader.getResultWithJobId(EMR_JOB_ID, "foo").isEmpty()); + } + + @Test + public void testInvalidSearchResponse() { + when(client.search(any())).thenReturn(searchResponseActionFuture); + when(searchResponseActionFuture.actionGet()).thenReturn(searchResponse); + when(searchResponse.status()).thenReturn(RestStatus.NO_CONTENT); + + RuntimeException exception = + assertThrows( + RuntimeException.class, + () -> jobExecutionResponseReader.getResultWithJobId(EMR_JOB_ID, null)); + + Assertions.assertEquals( + "Fetching result from " + + DEFAULT_RESULT_INDEX + + " index failed with status : " + + RestStatus.NO_CONTENT, + exception.getMessage()); + } + + @Test + public void testSearchFailure() { + when(client.search(any())).thenThrow(RuntimeException.class); + + assertThrows( + RuntimeException.class, + () -> jobExecutionResponseReader.getResultWithJobId(EMR_JOB_ID, null)); + } + + @Test + public void testIndexNotFoundException() { + when(client.search(any())).thenThrow(IndexNotFoundException.class); + + assertTrue(jobExecutionResponseReader.getResultWithJobId(EMR_JOB_ID, "foo").isEmpty()); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/rest/RestAsyncQueryManagementActionTest.java b/async-query/src/test/java/org/opensearch/sql/spark/rest/RestAsyncQueryManagementActionTest.java new file mode 100644 index 0000000000..ccee3eb642 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/rest/RestAsyncQueryManagementActionTest.java @@ -0,0 +1,83 @@ +package org.opensearch.sql.spark.rest; + +import com.google.gson.Gson; +import com.google.gson.JsonObject; +import lombok.SneakyThrows; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.ArgumentMatchers; +import org.mockito.Mockito; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.sql.common.setting.Settings; +import org.opensearch.sql.opensearch.setting.OpenSearchSettings; +import org.opensearch.threadpool.ThreadPool; + +public class RestAsyncQueryManagementActionTest { + + private OpenSearchSettings settings; + private RestRequest request; + private RestChannel channel; + private NodeClient nodeClient; + private ThreadPool threadPool; + private RestAsyncQueryManagementAction unit; + + @BeforeEach + public void setup() { + settings = Mockito.mock(OpenSearchSettings.class); + request = Mockito.mock(RestRequest.class); + channel = Mockito.mock(RestChannel.class); + nodeClient = Mockito.mock(NodeClient.class); + threadPool = Mockito.mock(ThreadPool.class); + + Mockito.when(nodeClient.threadPool()).thenReturn(threadPool); + + unit = new RestAsyncQueryManagementAction(settings); + } + + @Test + @SneakyThrows + public void testWhenDataSourcesAreDisabled() { + setDataSourcesEnabled(false); + unit.handleRequest(request, channel, nodeClient); + Mockito.verifyNoInteractions(nodeClient); + ArgumentCaptor response = ArgumentCaptor.forClass(RestResponse.class); + Mockito.verify(channel, Mockito.times(1)).sendResponse(response.capture()); + Assertions.assertEquals(400, response.getValue().status().getStatus()); + JsonObject actualResponseJson = + new Gson().fromJson(response.getValue().content().utf8ToString(), JsonObject.class); + JsonObject expectedResponseJson = new JsonObject(); + expectedResponseJson.addProperty("status", 400); + expectedResponseJson.add("error", new JsonObject()); + expectedResponseJson.getAsJsonObject("error").addProperty("type", "IllegalAccessException"); + expectedResponseJson.getAsJsonObject("error").addProperty("reason", "Invalid Request"); + expectedResponseJson + .getAsJsonObject("error") + .addProperty("details", "plugins.query.datasources.enabled setting is false"); + Assertions.assertEquals(expectedResponseJson, actualResponseJson); + } + + @Test + @SneakyThrows + public void testWhenDataSourcesAreEnabled() { + setDataSourcesEnabled(true); + Mockito.when(request.method()).thenReturn(RestRequest.Method.GET); + unit.handleRequest(request, channel, nodeClient); + Mockito.verify(threadPool, Mockito.times(1)) + .schedule(ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.any()); + Mockito.verifyNoInteractions(channel); + } + + @Test + public void testGetName() { + Assertions.assertEquals("async_query_actions", unit.getName()); + } + + private void setDataSourcesEnabled(boolean value) { + Mockito.when(settings.getSettingValue(Settings.Key.DATASOURCES_ENABLED)).thenReturn(value); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/scheduler/OpenSearchAsyncQuerySchedulerTest.java b/async-query/src/test/java/org/opensearch/sql/spark/scheduler/OpenSearchAsyncQuerySchedulerTest.java new file mode 100644 index 0000000000..de86f111f3 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/scheduler/OpenSearchAsyncQuerySchedulerTest.java @@ -0,0 +1,434 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.scheduler; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.opensearch.sql.spark.scheduler.OpenSearchAsyncQueryScheduler.SCHEDULER_INDEX_NAME; + +import java.io.IOException; +import java.time.Instant; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Answers; +import org.mockito.ArgumentCaptor; +import org.mockito.ArgumentMatchers; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.indices.create.CreateIndexRequest; +import org.opensearch.action.admin.indices.create.CreateIndexResponse; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.update.UpdateRequest; +import org.opensearch.action.update.UpdateResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.index.engine.DocumentMissingException; +import org.opensearch.index.engine.VersionConflictEngineException; +import org.opensearch.jobscheduler.spi.ScheduledJobRunner; +import org.opensearch.sql.spark.scheduler.model.OpenSearchRefreshIndexJobRequest; +import org.opensearch.threadpool.ThreadPool; + +public class OpenSearchAsyncQuerySchedulerTest { + + private static final String TEST_SCHEDULER_INDEX_NAME = "testQS"; + + private static final String TEST_JOB_ID = "testJob"; + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private Client client; + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private ClusterService clusterService; + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private ThreadPool threadPool; + + @Mock private ActionFuture indexResponseActionFuture; + + @Mock private ActionFuture updateResponseActionFuture; + + @Mock private ActionFuture deleteResponseActionFuture; + + @Mock private ActionFuture createIndexResponseActionFuture; + + @Mock private IndexResponse indexResponse; + + @Mock private UpdateResponse updateResponse; + + private OpenSearchAsyncQueryScheduler scheduler; + + @BeforeEach + public void setup() { + MockitoAnnotations.openMocks(this); + scheduler = new OpenSearchAsyncQueryScheduler(); + scheduler.loadJobResource(client, clusterService, threadPool); + } + + @Test + public void testScheduleJob() { + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)) + .thenReturn(Boolean.FALSE); + when(client.admin().indices().create(any(CreateIndexRequest.class))) + .thenReturn(createIndexResponseActionFuture); + when(createIndexResponseActionFuture.actionGet()) + .thenReturn(new CreateIndexResponse(true, true, TEST_SCHEDULER_INDEX_NAME)); + when(client.index(any(IndexRequest.class))).thenReturn(indexResponseActionFuture); + + // Test the if case + when(indexResponseActionFuture.actionGet()).thenReturn(indexResponse); + when(indexResponse.getResult()).thenReturn(DocWriteResponse.Result.CREATED); + + OpenSearchRefreshIndexJobRequest request = + OpenSearchRefreshIndexJobRequest.builder() + .jobName(TEST_JOB_ID) + .lastUpdateTime(Instant.now()) + .build(); + + scheduler.scheduleJob(request); + + // Verify index created + verify(client.admin().indices(), times(1)).create(ArgumentMatchers.any()); + + // Verify doc indexed + ArgumentCaptor captor = ArgumentCaptor.forClass(IndexRequest.class); + verify(client, times(1)).index(captor.capture()); + IndexRequest capturedRequest = captor.getValue(); + assertEquals(request.getName(), capturedRequest.id()); + assertEquals(WriteRequest.RefreshPolicy.IMMEDIATE, capturedRequest.getRefreshPolicy()); + } + + @Test + public void testScheduleJobWithExistingJob() { + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)) + .thenReturn(Boolean.TRUE); + + OpenSearchRefreshIndexJobRequest request = + OpenSearchRefreshIndexJobRequest.builder() + .jobName(TEST_JOB_ID) + .lastUpdateTime(Instant.now()) + .build(); + + when(client.index(any(IndexRequest.class))).thenThrow(VersionConflictEngineException.class); + + IllegalArgumentException exception = + assertThrows( + IllegalArgumentException.class, + () -> { + scheduler.scheduleJob(request); + }); + + verify(client, times(1)).index(ArgumentCaptor.forClass(IndexRequest.class).capture()); + assertEquals("A job already exists with name: testJob", exception.getMessage()); + } + + @Test + public void testScheduleJobWithExceptions() { + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)) + .thenReturn(Boolean.FALSE); + when(client.admin().indices().create(any(CreateIndexRequest.class))) + .thenReturn(createIndexResponseActionFuture); + when(createIndexResponseActionFuture.actionGet()) + .thenReturn(new CreateIndexResponse(true, true, TEST_SCHEDULER_INDEX_NAME)); + when(client.index(any(IndexRequest.class))).thenThrow(new RuntimeException("Test exception")); + + OpenSearchRefreshIndexJobRequest request = + OpenSearchRefreshIndexJobRequest.builder() + .jobName(TEST_JOB_ID) + .lastUpdateTime(Instant.now()) + .build(); + + assertThrows(RuntimeException.class, () -> scheduler.scheduleJob(request)); + + when(client.index(any(IndexRequest.class))).thenReturn(indexResponseActionFuture); + when(indexResponseActionFuture.actionGet()).thenReturn(indexResponse); + when(indexResponse.getResult()).thenReturn(DocWriteResponse.Result.NOT_FOUND); + + RuntimeException exception = + assertThrows(RuntimeException.class, () -> scheduler.scheduleJob(request)); + assertEquals("Schedule job failed with result : not_found", exception.getMessage()); + } + + @Test + public void testUnscheduleJob() throws IOException { + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)).thenReturn(true); + + when(updateResponseActionFuture.actionGet()).thenReturn(updateResponse); + when(updateResponse.getResult()).thenReturn(DocWriteResponse.Result.UPDATED); + + when(client.update(any(UpdateRequest.class))).thenReturn(updateResponseActionFuture); + + scheduler.unscheduleJob(TEST_JOB_ID); + + ArgumentCaptor captor = ArgumentCaptor.forClass(UpdateRequest.class); + verify(client).update(captor.capture()); + + UpdateRequest capturedRequest = captor.getValue(); + assertEquals(TEST_JOB_ID, capturedRequest.id()); + assertEquals(WriteRequest.RefreshPolicy.IMMEDIATE, capturedRequest.getRefreshPolicy()); + + // Reset the captor for the next verification + captor = ArgumentCaptor.forClass(UpdateRequest.class); + + when(updateResponse.getResult()).thenReturn(DocWriteResponse.Result.NOOP); + scheduler.unscheduleJob(TEST_JOB_ID); + + verify(client, times(2)).update(captor.capture()); + capturedRequest = captor.getValue(); + assertEquals(TEST_JOB_ID, capturedRequest.id()); + assertEquals(WriteRequest.RefreshPolicy.IMMEDIATE, capturedRequest.getRefreshPolicy()); + } + + @Test + public void testUnscheduleJobWithIndexNotFound() { + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)).thenReturn(false); + + assertThrows(IllegalStateException.class, () -> scheduler.unscheduleJob(TEST_JOB_ID)); + } + + @Test + public void testUpdateJob() throws IOException { + OpenSearchRefreshIndexJobRequest request = + OpenSearchRefreshIndexJobRequest.builder() + .jobName(TEST_JOB_ID) + .lastUpdateTime(Instant.now()) + .build(); + + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)).thenReturn(true); + + when(updateResponseActionFuture.actionGet()).thenReturn(updateResponse); + when(updateResponse.getResult()).thenReturn(DocWriteResponse.Result.UPDATED); + + when(client.update(any(UpdateRequest.class))).thenReturn(updateResponseActionFuture); + + scheduler.updateJob(request); + + ArgumentCaptor captor = ArgumentCaptor.forClass(UpdateRequest.class); + verify(client).update(captor.capture()); + + UpdateRequest capturedRequest = captor.getValue(); + assertEquals(request.getName(), capturedRequest.id()); + assertEquals(WriteRequest.RefreshPolicy.IMMEDIATE, capturedRequest.getRefreshPolicy()); + } + + @Test + public void testUpdateJobWithIndexNotFound() { + OpenSearchRefreshIndexJobRequest request = + OpenSearchRefreshIndexJobRequest.builder() + .jobName(TEST_JOB_ID) + .lastUpdateTime(Instant.now()) + .build(); + + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)).thenReturn(false); + + assertThrows(IllegalStateException.class, () -> scheduler.updateJob(request)); + } + + @Test + public void testUpdateJobWithExceptions() { + OpenSearchRefreshIndexJobRequest request = + OpenSearchRefreshIndexJobRequest.builder() + .jobName(TEST_JOB_ID) + .lastUpdateTime(Instant.now()) + .build(); + + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)).thenReturn(true); + when(client.update(any(UpdateRequest.class))) + .thenThrow(new DocumentMissingException(null, null)); + + IllegalArgumentException exception1 = + assertThrows( + IllegalArgumentException.class, + () -> { + scheduler.updateJob(request); + }); + + assertEquals("Job: testJob doesn't exist", exception1.getMessage()); + + when(client.update(any(UpdateRequest.class))).thenThrow(new RuntimeException("Test exception")); + + RuntimeException exception2 = + assertThrows( + RuntimeException.class, + () -> { + scheduler.updateJob(request); + }); + + assertEquals("java.lang.RuntimeException: Test exception", exception2.getMessage()); + + when(client.update(any(UpdateRequest.class))).thenReturn(updateResponseActionFuture); + when(updateResponseActionFuture.actionGet()).thenReturn(updateResponse); + when(updateResponse.getResult()).thenReturn(DocWriteResponse.Result.NOT_FOUND); + + RuntimeException exception = + assertThrows(RuntimeException.class, () -> scheduler.updateJob(request)); + assertEquals("Update job failed with result : not_found", exception.getMessage()); + } + + @Test + public void testRemoveJob() { + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)).thenReturn(true); + + DeleteResponse deleteResponse = mock(DeleteResponse.class); + when(deleteResponseActionFuture.actionGet()).thenReturn(deleteResponse); + when(deleteResponse.getResult()).thenReturn(DocWriteResponse.Result.DELETED); + + when(client.delete(any(DeleteRequest.class))).thenReturn(deleteResponseActionFuture); + + scheduler.removeJob(TEST_JOB_ID); + + ArgumentCaptor captor = ArgumentCaptor.forClass(DeleteRequest.class); + verify(client).delete(captor.capture()); + + DeleteRequest capturedRequest = captor.getValue(); + assertEquals(TEST_JOB_ID, capturedRequest.id()); + assertEquals(WriteRequest.RefreshPolicy.IMMEDIATE, capturedRequest.getRefreshPolicy()); + } + + @Test + public void testRemoveJobWithIndexNotFound() { + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)).thenReturn(false); + + assertThrows(IllegalStateException.class, () -> scheduler.removeJob(TEST_JOB_ID)); + } + + @Test + public void testCreateAsyncQuerySchedulerIndex() { + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)).thenReturn(false); + + CreateIndexResponse createIndexResponse = mock(CreateIndexResponse.class); + when(createIndexResponseActionFuture.actionGet()).thenReturn(createIndexResponse); + when(createIndexResponse.isAcknowledged()).thenReturn(true); + + when(client.admin().indices().create(any(CreateIndexRequest.class))) + .thenReturn(createIndexResponseActionFuture); + + scheduler.createAsyncQuerySchedulerIndex(); + + ArgumentCaptor captor = ArgumentCaptor.forClass(CreateIndexRequest.class); + verify(client.admin().indices()).create(captor.capture()); + + CreateIndexRequest capturedRequest = captor.getValue(); + assertEquals(SCHEDULER_INDEX_NAME, capturedRequest.index()); + } + + @Test + public void testCreateAsyncQuerySchedulerIndexFailure() { + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)).thenReturn(false); + + when(client.admin().indices().create(any(CreateIndexRequest.class))) + .thenThrow(new RuntimeException("Error creating index")); + + RuntimeException exception = + assertThrows( + RuntimeException.class, + () -> { + scheduler.createAsyncQuerySchedulerIndex(); + }); + + assertEquals( + "Internal server error while creating .async-query-scheduler index: Error creating index", + exception.getMessage()); + + when(client.admin().indices().create(any(CreateIndexRequest.class))) + .thenReturn(createIndexResponseActionFuture); + Mockito.when(createIndexResponseActionFuture.actionGet()) + .thenReturn(new CreateIndexResponse(false, false, SCHEDULER_INDEX_NAME)); + + OpenSearchRefreshIndexJobRequest request = + OpenSearchRefreshIndexJobRequest.builder() + .jobName(TEST_JOB_ID) + .lastUpdateTime(Instant.now()) + .build(); + + RuntimeException runtimeException = + Assertions.assertThrows(RuntimeException.class, () -> scheduler.scheduleJob(request)); + Assertions.assertEquals( + "Internal server error while creating .async-query-scheduler index: Index creation is not" + + " acknowledged.", + runtimeException.getMessage()); + } + + @Test + public void testUpdateJobNotFound() { + OpenSearchRefreshIndexJobRequest request = + OpenSearchRefreshIndexJobRequest.builder() + .jobName(TEST_JOB_ID) + .lastUpdateTime(Instant.now()) + .build(); + + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)).thenReturn(true); + + when(client.update(any(UpdateRequest.class))) + .thenThrow(new DocumentMissingException(null, null)); + + IllegalArgumentException exception = + assertThrows( + IllegalArgumentException.class, + () -> { + scheduler.updateJob(request); + }); + + assertEquals("Job: testJob doesn't exist", exception.getMessage()); + } + + @Test + public void testRemoveJobNotFound() { + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)).thenReturn(true); + + DeleteResponse deleteResponse = mock(DeleteResponse.class); + when(deleteResponseActionFuture.actionGet()).thenReturn(deleteResponse); + when(deleteResponse.getResult()).thenReturn(DocWriteResponse.Result.NOT_FOUND); + + when(client.delete(any(DeleteRequest.class))).thenReturn(deleteResponseActionFuture); + + IllegalArgumentException exception = + assertThrows( + IllegalArgumentException.class, + () -> { + scheduler.removeJob(TEST_JOB_ID); + }); + + assertEquals("Job : testJob doesn't exist", exception.getMessage()); + } + + @Test + public void testRemoveJobWithExceptions() { + when(clusterService.state().routingTable().hasIndex(SCHEDULER_INDEX_NAME)).thenReturn(true); + + when(client.delete(any(DeleteRequest.class))).thenThrow(new RuntimeException("Test exception")); + + assertThrows(RuntimeException.class, () -> scheduler.removeJob(TEST_JOB_ID)); + + DeleteResponse deleteResponse = mock(DeleteResponse.class); + when(client.delete(any(DeleteRequest.class))).thenReturn(deleteResponseActionFuture); + when(deleteResponseActionFuture.actionGet()).thenReturn(deleteResponse); + when(deleteResponse.getResult()).thenReturn(DocWriteResponse.Result.NOOP); + + RuntimeException runtimeException = + Assertions.assertThrows(RuntimeException.class, () -> scheduler.removeJob(TEST_JOB_ID)); + Assertions.assertEquals("Remove job failed with result : noop", runtimeException.getMessage()); + } + + @Test + public void testGetJobRunner() { + ScheduledJobRunner jobRunner = OpenSearchAsyncQueryScheduler.getJobRunner(); + assertNotNull(jobRunner); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/scheduler/job/OpenSearchRefreshIndexJobTest.java b/async-query/src/test/java/org/opensearch/sql/spark/scheduler/job/OpenSearchRefreshIndexJobTest.java new file mode 100644 index 0000000000..cbf137997e --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/scheduler/job/OpenSearchRefreshIndexJobTest.java @@ -0,0 +1,145 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.scheduler.job; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + +import java.time.Instant; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Answers; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.opensearch.client.Client; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.jobscheduler.spi.JobExecutionContext; +import org.opensearch.jobscheduler.spi.ScheduledJobParameter; +import org.opensearch.sql.spark.scheduler.model.OpenSearchRefreshIndexJobRequest; +import org.opensearch.threadpool.ThreadPool; + +public class OpenSearchRefreshIndexJobTest { + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private ClusterService clusterService; + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private ThreadPool threadPool; + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private Client client; + + @Mock private JobExecutionContext context; + + private OpenSearchRefreshIndexJob jobRunner; + + private OpenSearchRefreshIndexJob spyJobRunner; + + @BeforeEach + public void setup() { + MockitoAnnotations.openMocks(this); + jobRunner = OpenSearchRefreshIndexJob.getJobRunnerInstance(); + jobRunner.setClient(null); + jobRunner.setClusterService(null); + jobRunner.setThreadPool(null); + } + + @Test + public void testRunJobWithCorrectParameter() { + spyJobRunner = spy(jobRunner); + spyJobRunner.setClusterService(clusterService); + spyJobRunner.setThreadPool(threadPool); + spyJobRunner.setClient(client); + + OpenSearchRefreshIndexJobRequest jobParameter = + OpenSearchRefreshIndexJobRequest.builder() + .jobName("testJob") + .lastUpdateTime(Instant.now()) + .lockDurationSeconds(10L) + .build(); + + spyJobRunner.runJob(jobParameter, context); + + ArgumentCaptor captor = ArgumentCaptor.forClass(Runnable.class); + verify(threadPool.generic()).submit(captor.capture()); + + Runnable runnable = captor.getValue(); + runnable.run(); + + verify(spyJobRunner).doRefresh(eq(jobParameter.getName())); + } + + @Test + public void testRunJobWithIncorrectParameter() { + jobRunner = OpenSearchRefreshIndexJob.getJobRunnerInstance(); + jobRunner.setClusterService(clusterService); + jobRunner.setThreadPool(threadPool); + jobRunner.setClient(client); + + ScheduledJobParameter wrongParameter = mock(ScheduledJobParameter.class); + + IllegalStateException exception = + assertThrows( + IllegalStateException.class, + () -> jobRunner.runJob(wrongParameter, context), + "Expected IllegalStateException but no exception was thrown"); + + assertEquals( + "Job parameter is not instance of OpenSearchRefreshIndexJobRequest, type: " + + wrongParameter.getClass().getCanonicalName(), + exception.getMessage()); + } + + @Test + public void testRunJobWithUninitializedServices() { + OpenSearchRefreshIndexJobRequest jobParameter = + OpenSearchRefreshIndexJobRequest.builder() + .jobName("testJob") + .lastUpdateTime(Instant.now()) + .build(); + + IllegalStateException exception = + assertThrows( + IllegalStateException.class, + () -> jobRunner.runJob(jobParameter, context), + "Expected IllegalStateException but no exception was thrown"); + assertEquals("ClusterService is not initialized.", exception.getMessage()); + + jobRunner.setClusterService(clusterService); + + exception = + assertThrows( + IllegalStateException.class, + () -> jobRunner.runJob(jobParameter, context), + "Expected IllegalStateException but no exception was thrown"); + assertEquals("ThreadPool is not initialized.", exception.getMessage()); + + jobRunner.setThreadPool(threadPool); + + exception = + assertThrows( + IllegalStateException.class, + () -> jobRunner.runJob(jobParameter, context), + "Expected IllegalStateException but no exception was thrown"); + assertEquals("Client is not initialized.", exception.getMessage()); + } + + @Test + public void testGetJobRunnerInstanceMultipleCalls() { + OpenSearchRefreshIndexJob instance1 = OpenSearchRefreshIndexJob.getJobRunnerInstance(); + OpenSearchRefreshIndexJob instance2 = OpenSearchRefreshIndexJob.getJobRunnerInstance(); + OpenSearchRefreshIndexJob instance3 = OpenSearchRefreshIndexJob.getJobRunnerInstance(); + + assertSame(instance1, instance2); + assertSame(instance2, instance3); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/scheduler/model/OpenSearchRefreshIndexJobRequestTest.java b/async-query/src/test/java/org/opensearch/sql/spark/scheduler/model/OpenSearchRefreshIndexJobRequestTest.java new file mode 100644 index 0000000000..108f1acfd5 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/scheduler/model/OpenSearchRefreshIndexJobRequestTest.java @@ -0,0 +1,81 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.scheduler.model; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; + +import java.io.IOException; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import org.junit.jupiter.api.Test; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.jobscheduler.spi.schedule.IntervalSchedule; + +public class OpenSearchRefreshIndexJobRequestTest { + + @Test + public void testBuilderAndGetterMethods() { + Instant now = Instant.now(); + IntervalSchedule schedule = new IntervalSchedule(now, 1, ChronoUnit.MINUTES); + + OpenSearchRefreshIndexJobRequest jobRequest = + OpenSearchRefreshIndexJobRequest.builder() + .jobName("testJob") + .jobType("testType") + .schedule(schedule) + .enabled(true) + .lastUpdateTime(now) + .enabledTime(now) + .lockDurationSeconds(60L) + .jitter(0.1) + .build(); + + assertEquals("testJob", jobRequest.getName()); + assertEquals("testType", jobRequest.getJobType()); + assertEquals(schedule, jobRequest.getSchedule()); + assertTrue(jobRequest.isEnabled()); + assertEquals(now, jobRequest.getLastUpdateTime()); + assertEquals(now, jobRequest.getEnabledTime()); + assertEquals(60L, jobRequest.getLockDurationSeconds()); + assertEquals(0.1, jobRequest.getJitter()); + } + + @Test + public void testToXContent() throws IOException { + Instant now = Instant.now(); + IntervalSchedule schedule = new IntervalSchedule(now, 1, ChronoUnit.MINUTES); + + OpenSearchRefreshIndexJobRequest jobRequest = + OpenSearchRefreshIndexJobRequest.builder() + .jobName("testJob") + .jobType("testType") + .schedule(schedule) + .enabled(true) + .lastUpdateTime(now) + .enabledTime(now) + .lockDurationSeconds(60L) + .jitter(0.1) + .build(); + + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + jobRequest.toXContent(builder, EMPTY_PARAMS); + String jsonString = builder.toString(); + + assertTrue(jsonString.contains("\"jobName\" : \"testJob\"")); + assertTrue(jsonString.contains("\"jobType\" : \"testType\"")); + assertTrue(jsonString.contains("\"start_time\" : " + now.toEpochMilli())); + assertTrue(jsonString.contains("\"period\" : 1")); + assertTrue(jsonString.contains("\"unit\" : \"Minutes\"")); + assertTrue(jsonString.contains("\"enabled\" : true")); + assertTrue(jsonString.contains("\"lastUpdateTime\" : " + now.toEpochMilli())); + assertTrue(jsonString.contains("\"enabledTime\" : " + now.toEpochMilli())); + assertTrue(jsonString.contains("\"lockDurationSeconds\" : 60")); + assertTrue(jsonString.contains("\"jitter\" : 0.1")); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/transport/TransportCancelAsyncQueryRequestActionTest.java b/async-query/src/test/java/org/opensearch/sql/spark/transport/TransportCancelAsyncQueryRequestActionTest.java new file mode 100644 index 0000000000..2ff76b9b57 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/transport/TransportCancelAsyncQueryRequestActionTest.java @@ -0,0 +1,76 @@ +/* + * + * * Copyright OpenSearch Contributors + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package org.opensearch.sql.spark.transport; + +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.when; +import static org.opensearch.sql.spark.constants.TestConstants.EMR_JOB_ID; + +import java.util.HashSet; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.core.action.ActionListener; +import org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorServiceImpl; +import org.opensearch.sql.spark.transport.model.CancelAsyncQueryActionRequest; +import org.opensearch.sql.spark.transport.model.CancelAsyncQueryActionResponse; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +@ExtendWith(MockitoExtension.class) +public class TransportCancelAsyncQueryRequestActionTest { + + @Mock private TransportService transportService; + @Mock private TransportCancelAsyncQueryRequestAction action; + @Mock private Task task; + @Mock private ActionListener actionListener; + + @Mock private AsyncQueryExecutorServiceImpl asyncQueryExecutorService; + + @Captor + private ArgumentCaptor deleteJobActionResponseArgumentCaptor; + + @Captor private ArgumentCaptor exceptionArgumentCaptor; + + @BeforeEach + public void setUp() { + action = + new TransportCancelAsyncQueryRequestAction( + transportService, new ActionFilters(new HashSet<>()), asyncQueryExecutorService); + } + + @Test + public void testDoExecute() { + CancelAsyncQueryActionRequest request = new CancelAsyncQueryActionRequest(EMR_JOB_ID); + when(asyncQueryExecutorService.cancelQuery(EMR_JOB_ID)).thenReturn(EMR_JOB_ID); + action.doExecute(task, request, actionListener); + Mockito.verify(actionListener).onResponse(deleteJobActionResponseArgumentCaptor.capture()); + CancelAsyncQueryActionResponse cancelAsyncQueryActionResponse = + deleteJobActionResponseArgumentCaptor.getValue(); + Assertions.assertEquals( + "Deleted async query with id: " + EMR_JOB_ID, cancelAsyncQueryActionResponse.getResult()); + } + + @Test + public void testDoExecuteWithException() { + CancelAsyncQueryActionRequest request = new CancelAsyncQueryActionRequest(EMR_JOB_ID); + doThrow(new RuntimeException("Error")).when(asyncQueryExecutorService).cancelQuery(EMR_JOB_ID); + action.doExecute(task, request, actionListener); + Mockito.verify(actionListener).onFailure(exceptionArgumentCaptor.capture()); + Exception exception = exceptionArgumentCaptor.getValue(); + Assertions.assertTrue(exception instanceof RuntimeException); + Assertions.assertEquals("Error", exception.getMessage()); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/transport/TransportCreateAsyncQueryRequestActionTest.java b/async-query/src/test/java/org/opensearch/sql/spark/transport/TransportCreateAsyncQueryRequestActionTest.java new file mode 100644 index 0000000000..2a4d33726b --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/transport/TransportCreateAsyncQueryRequestActionTest.java @@ -0,0 +1,145 @@ +/* + * + * * Copyright OpenSearch Contributors + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package org.opensearch.sql.spark.transport; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.opensearch.sql.spark.constants.TestConstants.MOCK_SESSION_ID; + +import java.util.HashSet; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.core.action.ActionListener; +import org.opensearch.sql.common.setting.Settings; +import org.opensearch.sql.opensearch.setting.OpenSearchSettings; +import org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorServiceImpl; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryResponse; +import org.opensearch.sql.spark.rest.model.LangType; +import org.opensearch.sql.spark.transport.model.CreateAsyncQueryActionRequest; +import org.opensearch.sql.spark.transport.model.CreateAsyncQueryActionResponse; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +@ExtendWith(MockitoExtension.class) +public class TransportCreateAsyncQueryRequestActionTest { + + @Mock private TransportService transportService; + @Mock private TransportCreateAsyncQueryRequestAction action; + @Mock private AsyncQueryExecutorServiceImpl jobExecutorService; + @Mock private Task task; + @Mock private ActionListener actionListener; + @Mock private OpenSearchSettings pluginSettings; + + @Captor + private ArgumentCaptor createJobActionResponseArgumentCaptor; + + @Captor private ArgumentCaptor exceptionArgumentCaptor; + + @BeforeEach + public void setUp() { + action = + new TransportCreateAsyncQueryRequestAction( + transportService, + new ActionFilters(new HashSet<>()), + jobExecutorService, + pluginSettings); + } + + @Test + public void testDoExecute() { + CreateAsyncQueryRequest createAsyncQueryRequest = + new CreateAsyncQueryRequest("source = my_glue.default.alb_logs", "my_glue", LangType.SQL); + CreateAsyncQueryActionRequest request = + new CreateAsyncQueryActionRequest(createAsyncQueryRequest); + when(pluginSettings.getSettingValue(Settings.Key.ASYNC_QUERY_ENABLED)).thenReturn(true); + when(jobExecutorService.createAsyncQuery(eq(createAsyncQueryRequest), any())) + .thenReturn(new CreateAsyncQueryResponse("123", null)); + + action.doExecute(task, request, actionListener); + + Mockito.verify(actionListener).onResponse(createJobActionResponseArgumentCaptor.capture()); + CreateAsyncQueryActionResponse createAsyncQueryActionResponse = + createJobActionResponseArgumentCaptor.getValue(); + Assertions.assertEquals( + "{\n" + " \"queryId\": \"123\"\n" + "}", createAsyncQueryActionResponse.getResult()); + } + + @Test + public void testDoExecuteWithSessionId() { + CreateAsyncQueryRequest createAsyncQueryRequest = + new CreateAsyncQueryRequest( + "source = my_glue.default.alb_logs", "my_glue", LangType.SQL, MOCK_SESSION_ID); + CreateAsyncQueryActionRequest request = + new CreateAsyncQueryActionRequest(createAsyncQueryRequest); + when(pluginSettings.getSettingValue(Settings.Key.ASYNC_QUERY_ENABLED)).thenReturn(true); + when(jobExecutorService.createAsyncQuery(eq(createAsyncQueryRequest), any())) + .thenReturn(new CreateAsyncQueryResponse("123", MOCK_SESSION_ID)); + + action.doExecute(task, request, actionListener); + + Mockito.verify(actionListener).onResponse(createJobActionResponseArgumentCaptor.capture()); + CreateAsyncQueryActionResponse createAsyncQueryActionResponse = + createJobActionResponseArgumentCaptor.getValue(); + Assertions.assertEquals( + "{\n" + " \"queryId\": \"123\",\n" + " \"sessionId\": \"s-0123456\"\n" + "}", + createAsyncQueryActionResponse.getResult()); + } + + @Test + public void testDoExecuteWithException() { + CreateAsyncQueryRequest createAsyncQueryRequest = + new CreateAsyncQueryRequest("source = my_glue.default.alb_logs", "my_glue", LangType.SQL); + CreateAsyncQueryActionRequest request = + new CreateAsyncQueryActionRequest(createAsyncQueryRequest); + when(pluginSettings.getSettingValue(Settings.Key.ASYNC_QUERY_ENABLED)).thenReturn(true); + doThrow(new RuntimeException("Error")) + .when(jobExecutorService) + .createAsyncQuery(eq(createAsyncQueryRequest), any()); + + action.doExecute(task, request, actionListener); + + verify(jobExecutorService, times(1)).createAsyncQuery(eq(createAsyncQueryRequest), any()); + Mockito.verify(actionListener).onFailure(exceptionArgumentCaptor.capture()); + Exception exception = exceptionArgumentCaptor.getValue(); + Assertions.assertTrue(exception instanceof RuntimeException); + Assertions.assertEquals("Error", exception.getMessage()); + } + + @Test + public void asyncQueryDisabled() { + CreateAsyncQueryRequest createAsyncQueryRequest = + new CreateAsyncQueryRequest("source = my_glue.default.alb_logs", "my_glue", LangType.SQL); + CreateAsyncQueryActionRequest request = + new CreateAsyncQueryActionRequest(createAsyncQueryRequest); + when(pluginSettings.getSettingValue(Settings.Key.ASYNC_QUERY_ENABLED)).thenReturn(false); + + action.doExecute(task, request, actionListener); + + verify(jobExecutorService, never()).createAsyncQuery(eq(createAsyncQueryRequest), any()); + Mockito.verify(actionListener).onFailure(exceptionArgumentCaptor.capture()); + Exception exception = exceptionArgumentCaptor.getValue(); + Assertions.assertTrue(exception instanceof IllegalAccessException); + Assertions.assertEquals( + "plugins.query.executionengine.async_query.enabled " + "setting is false", + exception.getMessage()); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/transport/TransportGetAsyncQueryResultActionTest.java b/async-query/src/test/java/org/opensearch/sql/spark/transport/TransportGetAsyncQueryResultActionTest.java new file mode 100644 index 0000000000..34f10b0083 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/transport/TransportGetAsyncQueryResultActionTest.java @@ -0,0 +1,141 @@ +/* + * + * * Copyright OpenSearch Contributors + * * SPDX-License-Identifier: Apache-2.0 + * + */ + +package org.opensearch.sql.spark.transport; + +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.opensearch.sql.data.model.ExprValueUtils.tupleValue; +import static org.opensearch.sql.data.type.ExprCoreType.INTEGER; +import static org.opensearch.sql.data.type.ExprCoreType.STRING; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import java.util.Arrays; +import java.util.HashSet; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.core.action.ActionListener; +import org.opensearch.sql.executor.ExecutionEngine; +import org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorServiceImpl; +import org.opensearch.sql.spark.asyncquery.exceptions.AsyncQueryNotFoundException; +import org.opensearch.sql.spark.asyncquery.model.AsyncQueryExecutionResponse; +import org.opensearch.sql.spark.transport.model.GetAsyncQueryResultActionRequest; +import org.opensearch.sql.spark.transport.model.GetAsyncQueryResultActionResponse; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +@ExtendWith(MockitoExtension.class) +public class TransportGetAsyncQueryResultActionTest { + + @Mock private TransportService transportService; + @Mock private TransportGetAsyncQueryResultAction action; + @Mock private Task task; + @Mock private ActionListener actionListener; + @Mock private AsyncQueryExecutorServiceImpl jobExecutorService; + + @Captor + private ArgumentCaptor createJobActionResponseArgumentCaptor; + + @Captor private ArgumentCaptor exceptionArgumentCaptor; + + @BeforeEach + public void setUp() { + action = + new TransportGetAsyncQueryResultAction( + transportService, new ActionFilters(new HashSet<>()), jobExecutorService); + } + + @Test + public void testDoExecute() { + GetAsyncQueryResultActionRequest request = new GetAsyncQueryResultActionRequest("jobId"); + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + new AsyncQueryExecutionResponse("IN_PROGRESS", null, null, null, null); + when(jobExecutorService.getAsyncQueryResults("jobId")).thenReturn(asyncQueryExecutionResponse); + action.doExecute(task, request, actionListener); + verify(actionListener).onResponse(createJobActionResponseArgumentCaptor.capture()); + GetAsyncQueryResultActionResponse getAsyncQueryResultActionResponse = + createJobActionResponseArgumentCaptor.getValue(); + Assertions.assertEquals( + "{\n" + " \"status\": \"IN_PROGRESS\"\n" + "}", + getAsyncQueryResultActionResponse.getResult()); + } + + @Test + public void testDoExecuteWithSuccessResponse() { + GetAsyncQueryResultActionRequest request = new GetAsyncQueryResultActionRequest("jobId"); + ExecutionEngine.Schema schema = + new ExecutionEngine.Schema( + ImmutableList.of( + new ExecutionEngine.Schema.Column("name", "name", STRING), + new ExecutionEngine.Schema.Column("age", "age", INTEGER))); + AsyncQueryExecutionResponse asyncQueryExecutionResponse = + new AsyncQueryExecutionResponse( + "SUCCESS", + schema, + Arrays.asList( + tupleValue(ImmutableMap.of("name", "John", "age", 20)), + tupleValue(ImmutableMap.of("name", "Smith", "age", 30))), + null, + null); + when(jobExecutorService.getAsyncQueryResults("jobId")).thenReturn(asyncQueryExecutionResponse); + action.doExecute(task, request, actionListener); + verify(actionListener).onResponse(createJobActionResponseArgumentCaptor.capture()); + GetAsyncQueryResultActionResponse getAsyncQueryResultActionResponse = + createJobActionResponseArgumentCaptor.getValue(); + Assertions.assertEquals( + "{\n" + + " \"status\": \"SUCCESS\",\n" + + " \"schema\": [\n" + + " {\n" + + " \"name\": \"name\",\n" + + " \"type\": \"string\"\n" + + " },\n" + + " {\n" + + " \"name\": \"age\",\n" + + " \"type\": \"integer\"\n" + + " }\n" + + " ],\n" + + " \"datarows\": [\n" + + " [\n" + + " \"John\",\n" + + " 20\n" + + " ],\n" + + " [\n" + + " \"Smith\",\n" + + " 30\n" + + " ]\n" + + " ],\n" + + " \"total\": 2,\n" + + " \"size\": 2\n" + + "}", + getAsyncQueryResultActionResponse.getResult()); + } + + @Test + public void testDoExecuteWithException() { + GetAsyncQueryResultActionRequest request = new GetAsyncQueryResultActionRequest("123"); + doThrow(new AsyncQueryNotFoundException("JobId 123 not found")) + .when(jobExecutorService) + .getAsyncQueryResults("123"); + action.doExecute(task, request, actionListener); + verify(jobExecutorService, times(1)).getAsyncQueryResults("123"); + verify(actionListener).onFailure(exceptionArgumentCaptor.capture()); + Exception exception = exceptionArgumentCaptor.getValue(); + Assertions.assertTrue(exception instanceof RuntimeException); + Assertions.assertEquals("JobId 123 not found", exception.getMessage()); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/transport/config/AsyncExecutorServiceModuleTest.java b/async-query/src/test/java/org/opensearch/sql/spark/transport/config/AsyncExecutorServiceModuleTest.java new file mode 100644 index 0000000000..d45950852f --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/transport/config/AsyncExecutorServiceModuleTest.java @@ -0,0 +1,50 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.transport.config; + +import static org.junit.jupiter.api.Assertions.assertNotNull; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Injector; +import org.opensearch.common.inject.ModulesBuilder; +import org.opensearch.sql.common.setting.Settings; +import org.opensearch.sql.datasource.DataSourceService; +import org.opensearch.sql.legacy.metrics.Metrics; +import org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorService; + +@ExtendWith(MockitoExtension.class) +public class AsyncExecutorServiceModuleTest { + + @Mock private NodeClient nodeClient; + + @Mock private ClusterService clusterService; + + @Mock private Settings settings; + + @Mock private DataSourceService dataSourceService; + + @Test + public void testAsyncQueryExecutorService() { + ModulesBuilder modulesBuilder = new ModulesBuilder(); + modulesBuilder.add(new AsyncExecutorServiceModule()); + modulesBuilder.add( + b -> { + b.bind(NodeClient.class).toInstance(nodeClient); + b.bind(org.opensearch.sql.common.setting.Settings.class).toInstance(settings); + b.bind(DataSourceService.class).toInstance(dataSourceService); + b.bind(ClusterService.class).toInstance(clusterService); + }); + Injector injector = modulesBuilder.createInjector(); + assertNotNull(injector.getInstance(AsyncQueryExecutorService.class)); + assertNotNull(Metrics.getInstance().getMetric("active_async_query_sessions_count")); + assertNotNull(Metrics.getInstance().getMetric("active_async_query_statements_count")); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/transport/format/AsyncQueryResultResponseFormatterTest.java b/async-query/src/test/java/org/opensearch/sql/spark/transport/format/AsyncQueryResultResponseFormatterTest.java new file mode 100644 index 0000000000..bb7d5f7893 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/transport/format/AsyncQueryResultResponseFormatterTest.java @@ -0,0 +1,53 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.transport.format; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.opensearch.sql.data.model.ExprValueUtils.tupleValue; +import static org.opensearch.sql.data.type.ExprCoreType.INTEGER; +import static org.opensearch.sql.data.type.ExprCoreType.STRING; +import static org.opensearch.sql.protocol.response.format.JsonResponseFormatter.Style.COMPACT; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import java.util.Arrays; +import org.junit.jupiter.api.Test; +import org.opensearch.sql.executor.ExecutionEngine; +import org.opensearch.sql.spark.transport.model.AsyncQueryResult; + +public class AsyncQueryResultResponseFormatterTest { + + private final ExecutionEngine.Schema schema = + new ExecutionEngine.Schema( + ImmutableList.of( + new ExecutionEngine.Schema.Column("firstname", null, STRING), + new ExecutionEngine.Schema.Column("age", null, INTEGER))); + + @Test + void formatAsyncQueryResponse() { + AsyncQueryResult response = + new AsyncQueryResult( + "success", + schema, + Arrays.asList( + tupleValue(ImmutableMap.of("firstname", "John", "age", 20)), + tupleValue(ImmutableMap.of("firstname", "Smith", "age", 30))), + null); + AsyncQueryResultResponseFormatter formatter = new AsyncQueryResultResponseFormatter(COMPACT); + assertEquals( + "{\"status\":\"success\",\"schema\":[{\"name\":\"firstname\",\"type\":\"string\"}," + + "{\"name\":\"age\",\"type\":\"integer\"}],\"datarows\":" + + "[[\"John\",20],[\"Smith\",30]],\"total\":2,\"size\":2}", + formatter.format(response)); + } + + @Test + void formatAsyncQueryError() { + AsyncQueryResult response = new AsyncQueryResult("FAILED", null, null, "foo"); + AsyncQueryResultResponseFormatter formatter = new AsyncQueryResultResponseFormatter(COMPACT); + assertEquals("{\"status\":\"FAILED\",\"error\":\"foo\"}", formatter.format(response)); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/transport/format/CreateAsyncQueryRequestConverterTest.java b/async-query/src/test/java/org/opensearch/sql/spark/transport/format/CreateAsyncQueryRequestConverterTest.java new file mode 100644 index 0000000000..d7f8046a1b --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/transport/format/CreateAsyncQueryRequestConverterTest.java @@ -0,0 +1,111 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.transport.format; + +import java.io.IOException; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; +import org.opensearch.sql.spark.rest.model.LangType; + +public class CreateAsyncQueryRequestConverterTest { + + @Test + public void fromXContent() throws IOException { + String request = + "{\n" + + " \"datasource\": \"my_glue\",\n" + + " \"lang\": \"sql\",\n" + + " \"query\": \"select 1\"\n" + + "}"; + CreateAsyncQueryRequest queryRequest = + CreateAsyncQueryRequestConverter.fromXContentParser(xContentParser(request)); + Assertions.assertEquals("my_glue", queryRequest.getDatasource()); + Assertions.assertEquals(LangType.SQL, queryRequest.getLang()); + Assertions.assertEquals("select 1", queryRequest.getQuery()); + } + + @Test + public void testConstructor() { + Assertions.assertDoesNotThrow( + () -> new CreateAsyncQueryRequest("select * from apple", "my_glue", LangType.SQL)); + } + + @Test + public void fromXContentWithDuplicateFields() throws IOException { + String request = + "{\n" + + " \"datasource\": \"my_glue\",\n" + + " \"datasource\": \"my_glue_1\",\n" + + " \"lang\": \"sql\",\n" + + " \"query\": \"select 1\"\n" + + "}"; + IllegalArgumentException illegalArgumentException = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> CreateAsyncQueryRequestConverter.fromXContentParser(xContentParser(request))); + Assertions.assertTrue( + illegalArgumentException + .getMessage() + .contains("Error while parsing the request body: Duplicate field 'datasource'")); + } + + @Test + public void fromXContentWithUnknownField() throws IOException { + String request = + "{\n" + + " \"datasource\": \"my_glue\",\n" + + " \"random\": \"my_gue_1\",\n" + + " \"lang\": \"sql\",\n" + + " \"query\": \"select 1\"\n" + + "}"; + IllegalArgumentException illegalArgumentException = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> CreateAsyncQueryRequestConverter.fromXContentParser(xContentParser(request))); + Assertions.assertEquals( + "Error while parsing the request body: Unknown field: random", + illegalArgumentException.getMessage()); + } + + @Test + public void fromXContentWithWrongDatatype() throws IOException { + String request = + "{\"datasource\": [\"my_glue\", \"my_glue_1\"], \"lang\": \"sql\", \"query\": \"select" + + " 1\"}"; + IllegalArgumentException illegalArgumentException = + Assertions.assertThrows( + IllegalArgumentException.class, + () -> CreateAsyncQueryRequestConverter.fromXContentParser(xContentParser(request))); + Assertions.assertEquals( + "Error while parsing the request body: Can't get text on a START_ARRAY at 1:16", + illegalArgumentException.getMessage()); + } + + @Test + public void fromXContentWithSessionId() throws IOException { + String request = + "{\n" + + " \"datasource\": \"my_glue\",\n" + + " \"lang\": \"sql\",\n" + + " \"query\": \"select 1\",\n" + + " \"sessionId\": \"00fdjevgkf12s00q\"\n" + + "}"; + CreateAsyncQueryRequest queryRequest = + CreateAsyncQueryRequestConverter.fromXContentParser(xContentParser(request)); + Assertions.assertEquals("00fdjevgkf12s00q", queryRequest.getSessionId()); + } + + private XContentParser xContentParser(String request) throws IOException { + return XContentType.JSON + .xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, request); + } +} diff --git a/async-query/src/test/java/org/opensearch/sql/spark/utils/TestUtils.java b/async-query/src/test/java/org/opensearch/sql/spark/utils/TestUtils.java new file mode 100644 index 0000000000..24c10ebea9 --- /dev/null +++ b/async-query/src/test/java/org/opensearch/sql/spark/utils/TestUtils.java @@ -0,0 +1,29 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.spark.utils; + +import com.google.common.base.Charsets; +import com.google.common.io.Resources; +import java.net.URL; +import lombok.SneakyThrows; +import org.opensearch.action.admin.indices.create.CreateIndexRequest; +import org.opensearch.client.Client; +import org.opensearch.common.xcontent.XContentType; + +public class TestUtils { + @SneakyThrows + public static String loadMappings(String path) { + URL url = Resources.getResource(path); + return Resources.toString(url, Charsets.UTF_8); + } + + public static void createIndexWithMappings( + Client client, String indexName, String metadataFileLocation) { + CreateIndexRequest request = new CreateIndexRequest(indexName); + request.mapping(loadMappings(metadataFileLocation), XContentType.JSON); + client.admin().indices().create(request).actionGet(); + } +} diff --git a/async-query/src/test/resources/flint-index-mappings/0.1.1/flint_covering_index.json b/async-query/src/test/resources/flint-index-mappings/0.1.1/flint_covering_index.json new file mode 100644 index 0000000000..811204847c --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/0.1.1/flint_covering_index.json @@ -0,0 +1,37 @@ +{ + "_meta": { + "kind": "covering", + "indexedColumns": [ + { + "columnType": "timestamp", + "columnName": "time" + }, + { + "columnType": "string", + "columnName": "client_ip" + }, + { + "columnType": "int", + "columnName": "client_port" + }, + { + "columnType": "string", + "columnName": "request_url" + } + ], + "name": "covering", + "options": { + "auto_refresh": "true", + "index_settings": "{\"number_of_shards\":1,\"number_of_replicas\":1}" + }, + "source": "mys3.default.http_logs", + "version": "0.1.0", + "properties": { + "env": { + "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", + "SERVERLESS_EMR_JOB_ID": "00fe3gu2tgad000q" + } + }, + "latestId": "ZmxpbnRfbXlzM19kZWZhdWx0X2h0dHBfbG9nc19jb3ZlcmluZ19pbmRleA==" + } +} diff --git a/async-query/src/test/resources/flint-index-mappings/0.1.1/flint_mv.json b/async-query/src/test/resources/flint-index-mappings/0.1.1/flint_mv.json new file mode 100644 index 0000000000..1369f9c721 --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/0.1.1/flint_mv.json @@ -0,0 +1,30 @@ +{ + "_meta": { + "kind": "mv", + "indexedColumns": [ + { + "columnType": "timestamp", + "columnName": "start.time" + }, + { + "columnType": "long", + "columnName": "count" + } + ], + "name": "mys3.default.http_logs_metrics", + "options": { + "auto_refresh": "true", + "checkpoint_location": "s3://flint-data-dp-eu-west-1-beta/data/checkpoint/chen-job-1", + "watermark_delay": "30 Minutes" + }, + "source": "SELECT window.start AS `start.time`, COUNT(*) AS count FROM mys3.default.http_logs WHERE status != 200 GROUP BY TUMBLE(`@timestamp`, '6 Hours')", + "version": "0.1.0", + "properties": { + "env": { + "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", + "SERVERLESS_EMR_JOB_ID": "00fe86mkk5q3u00q" + } + }, + "latestId": "ZmxpbnRfbXlzM19kZWZhdWx0X2h0dHBfbG9nc19tZXRyaWNz" + } +} diff --git a/async-query/src/test/resources/flint-index-mappings/0.1.1/flint_skipping_index.json b/async-query/src/test/resources/flint-index-mappings/0.1.1/flint_skipping_index.json new file mode 100644 index 0000000000..2f65b1d8ee --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/0.1.1/flint_skipping_index.json @@ -0,0 +1,23 @@ +{ + "_meta": { + "kind": "skipping", + "indexedColumns": [ + { + "columnType": "int", + "kind": "VALUE_SET", + "columnName": "status" + } + ], + "name": "flint_mys3_default_http_logs_skipping_index", + "options": {}, + "source": "mys3.default.http_logs", + "version": "0.1.0", + "properties": { + "env": { + "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", + "SERVERLESS_EMR_JOB_ID": "00fdmvv9hp8u0o0q" + } + }, + "latestId": "ZmxpbnRfbXlzM19kZWZhdWx0X2h0dHBfbG9nc19za2lwcGluZ19pbmRleA==" + } +} diff --git a/async-query/src/test/resources/flint-index-mappings/0.1.1/flint_special_character_index.json b/async-query/src/test/resources/flint-index-mappings/0.1.1/flint_special_character_index.json new file mode 100644 index 0000000000..72c83c59fa --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/0.1.1/flint_special_character_index.json @@ -0,0 +1,23 @@ +{ + "_meta": { + "kind": "skipping", + "indexedColumns": [ + { + "columnType": "int", + "kind": "VALUE_SET", + "columnName": "status" + } + ], + "name": "flint_mys3_default_test%20%2c%3a%22%2b%2f%5c%7c%3f%23%3e%3c_skipping_index", + "options": {}, + "source": "mys3.default.`test ,:\"+/\\|?#><`", + "version": "0.1.0", + "properties": { + "env": { + "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", + "SERVERLESS_EMR_JOB_ID": "00fdmvv9hp8u0o0q" + } + }, + "latestId": "ZmxpbnRfbXlzM19kZWZhdWx0X3Rlc3QlMjAlMmMlM2ElMjIlMmIlMmYlNWMlN2MlM2YlMjMlM2UlM2Nfc2tpcHBpbmdfaW5kZXg=" + } +} diff --git a/async-query/src/test/resources/flint-index-mappings/flint_covering_index.json b/async-query/src/test/resources/flint-index-mappings/flint_covering_index.json new file mode 100644 index 0000000000..f68a1627ab --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/flint_covering_index.json @@ -0,0 +1,36 @@ +{ + "_meta": { + "kind": "covering", + "indexedColumns": [ + { + "columnType": "timestamp", + "columnName": "time" + }, + { + "columnType": "string", + "columnName": "client_ip" + }, + { + "columnType": "int", + "columnName": "client_port" + }, + { + "columnType": "string", + "columnName": "request_url" + } + ], + "name": "test", + "options": { + "auto_refresh": "true", + "index_settings": "{\"number_of_shards\":1,\"number_of_replicas\":1}" + }, + "source": "mys3.default.http_logs", + "version": "0.1.0", + "properties": { + "env": { + "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", + "SERVERLESS_EMR_JOB_ID": "00fe3gu2tgad000q" + } + } + } +} diff --git a/async-query/src/test/resources/flint-index-mappings/flint_mv.json b/async-query/src/test/resources/flint-index-mappings/flint_mv.json new file mode 100644 index 0000000000..3d130832b8 --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/flint_mv.json @@ -0,0 +1,42 @@ +{ + "_meta": { + "kind": "mv", + "indexedColumns": [ + { + "columnType": "timestamp", + "columnName": "start.time" + }, + { + "columnType": "long", + "columnName": "count" + } + ], + "name": "spark_catalog.default.http_logs_metrics_chen", + "options": { + "auto_refresh": "true", + "checkpoint_location": "s3://flint-data-dp-eu-west-1-beta/data/checkpoint/chen-job-1", + "watermark_delay": "30 Minutes" + }, + "source": "SELECT window.start AS `start.time`, COUNT(*) AS count FROM mys3.default.http_logs WHERE status != 200 GROUP BY TUMBLE(`@timestamp`, '6 Hours')", + "version": "0.1.0", + "properties": { + "env": { + "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", + "SERVERLESS_EMR_JOB_ID": "00fe86mkk5q3u00q" + } + } + }, + "properties": { + "count": { + "type": "long" + }, + "start": { + "properties": { + "time": { + "type": "date", + "format": "strict_date_optional_time_nanos" + } + } + } + } +} diff --git a/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_http_logs_covering_corrupted_index_mapping.json b/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_http_logs_covering_corrupted_index_mapping.json new file mode 100644 index 0000000000..90d37c3e79 --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_http_logs_covering_corrupted_index_mapping.json @@ -0,0 +1,33 @@ +{ + "_meta": { + "latestId": "flint_my_glue_mydb_http_logs_covering_corrupted_index_latest_id", + "kind": "covering", + "indexedColumns": [ + { + "columnType": "string", + "columnName": "clientip" + }, + { + "columnType": "int", + "columnName": "status" + } + ], + "name": "covering", + "options": { + "auto_refresh": "true", + "incremental_refresh": "false", + "index_settings": "{\"number_of_shards\":5,\"number_of_replicas\":1}", + "checkpoint_location": "s3://vamsicheckpoint/cv/" + }, + "source": "my_glue.mydb.http_logs", + "version": "0.2.0" + }, + "properties": { + "clientip": { + "type": "keyword" + }, + "status": { + "type": "integer" + } + } +} \ No newline at end of file diff --git a/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_http_logs_covering_error_index_mapping.json b/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_http_logs_covering_error_index_mapping.json new file mode 100644 index 0000000000..edd71b41db --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_http_logs_covering_error_index_mapping.json @@ -0,0 +1,39 @@ +{ + "_meta": { + "latestId": "flint_my_glue_mydb_http_logs_covering_error_index_latest_id", + "kind": "random", + "indexedColumns": [ + { + "columnType": "string", + "columnName": "clientip" + }, + { + "columnType": "int", + "columnName": "status" + } + ], + "name": "covering", + "options": { + "auto_refresh": "true", + "incremental_refresh": "false", + "index_settings": "{\"number_of_shards\":5,\"number_of_replicas\":1}", + "checkpoint_location": "s3://vamsicheckpoint/cv/" + }, + "source": "my_glue.mydb.http_logs", + "version": "0.2.0", + "properties": { + "env": { + "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fhh7frokkf0k0l", + "SERVERLESS_EMR_JOB_ID": "00fhoag6i0671o0m" + } + } + }, + "properties": { + "clientip": { + "type": "keyword" + }, + "status": { + "type": "integer" + } + } +} \ No newline at end of file diff --git a/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_http_logs_covering_index_mapping.json b/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_http_logs_covering_index_mapping.json new file mode 100644 index 0000000000..cb4a6b5366 --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_http_logs_covering_index_mapping.json @@ -0,0 +1,39 @@ +{ + "_meta": { + "latestId": "flint_my_glue_mydb_http_logs_covering_index_latest_id", + "kind": "covering", + "indexedColumns": [ + { + "columnType": "string", + "columnName": "clientip" + }, + { + "columnType": "int", + "columnName": "status" + } + ], + "name": "covering", + "options": { + "auto_refresh": "true", + "incremental_refresh": "false", + "index_settings": "{\"number_of_shards\":5,\"number_of_replicas\":1}", + "checkpoint_location": "s3://vamsicheckpoint/cv/" + }, + "source": "my_glue.mydb.http_logs", + "version": "0.2.0", + "properties": { + "env": { + "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fhh7frokkf0k0l", + "SERVERLESS_EMR_JOB_ID": "00fhoag6i0671o0m" + } + } + }, + "properties": { + "clientip": { + "type": "keyword" + }, + "status": { + "type": "integer" + } + } +} \ No newline at end of file diff --git a/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_http_logs_skipping_index_mapping.json b/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_http_logs_skipping_index_mapping.json new file mode 100644 index 0000000000..4ffd73bf9c --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_http_logs_skipping_index_mapping.json @@ -0,0 +1,39 @@ +{ + "_meta": { + "latestId": "flint_my_glue_mydb_http_logs_skipping_index_latest_id", + "kind": "skipping", + "indexedColumns": [ + { + "columnType": "int", + "kind": "VALUE_SET", + "parameters": { + "max_size": "100" + }, + "columnName": "status" + } + ], + "name": "flint_my_glue_mydb_http_logs_skipping_index", + "options": { + "auto_refresh": "true", + "incremental_refresh": "false", + "index_settings": "{\"number_of_shards\":5, \"number_of_replicas\":1}", + "checkpoint_location": "s3://vamsicheckpoint/skp/" + }, + "source": "my_glue.mydb.http_logs", + "version": "0.3.0", + "properties": { + "env": { + "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fhe6d5jpah090l", + "SERVERLESS_EMR_JOB_ID": "00fhelvq7peuao0m" + } + } + }, + "properties": { + "file_path": { + "type": "keyword" + }, + "status": { + "type": "integer" + } + } +} \ No newline at end of file diff --git a/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_mv_mapping.json b/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_mv_mapping.json new file mode 100644 index 0000000000..0fcbf299ec --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/flint_my_glue_mydb_mv_mapping.json @@ -0,0 +1,33 @@ +{ + "_meta": { + "latestId": "flint_my_glue_mydb_mv_latest_id", + "kind": "mv", + "indexedColumns": [ + { + "columnType": "bigint", + "columnName": "counter1" + } + ], + "name": "my_glue.mydb.mv", + "options": { + "auto_refresh": "true", + "incremental_refresh": "false", + "index_settings": "{\"number_of_shards\":5,\"number_of_replicas\":1}", + "checkpoint_location": "s3://vamsicheckpoint/mv/", + "watermark_delay": "10 seconds" + }, + "source": "SELECT count(`@timestamp`) AS `counter1` FROM my_glue.mydb.http_logs GROUP BY TUMBLE (`@timestamp`, '1 second')", + "version": "0.2.0", + "properties": { + "env": { + "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fhh7frokkf0k0l", + "SERVERLESS_EMR_JOB_ID": "00fhob01oa7fu00m" + } + } + }, + "properties": { + "counter1": { + "type": "long" + } + } +} \ No newline at end of file diff --git a/async-query/src/test/resources/flint-index-mappings/flint_mys3_default_http_logs_cv1_index.json b/async-query/src/test/resources/flint-index-mappings/flint_mys3_default_http_logs_cv1_index.json new file mode 100644 index 0000000000..e7ca1ff440 --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/flint_mys3_default_http_logs_cv1_index.json @@ -0,0 +1,41 @@ +{ + "flint_mys3_default_http_logs_cv1_index": { + "mappings": { + "_doc": { + "_meta": { + "kind": "skipping", + "indexedColumns": [ + { + "columnType": "int", + "kind": "VALUE_SET", + "columnName": "status" + } + ], + "name": "flint_mys3_default_http_logs_cv1_index", + "options": {}, + "source": "mys3.default.http_logs", + "version": "0.1.0", + "properties": { + "env": { + "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", + "SERVERLESS_EMR_JOB_ID": "00fdmvv9hp8u0o0q" + } + } + } + } + }, + "settings": { + "index": { + "number_of_shards": 5, + "number_of_replicas": 0, + "max_result_window": 100, + "version": { + "created": "6050399" + } + } + }, + "mapping_version": "1", + "settings_version": "1", + "aliases_version": "1" + } +} \ No newline at end of file diff --git a/async-query/src/test/resources/flint-index-mappings/flint_mys3_default_http_logs_skipping_index.json b/async-query/src/test/resources/flint-index-mappings/flint_mys3_default_http_logs_skipping_index.json new file mode 100644 index 0000000000..1438b257d1 --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/flint_mys3_default_http_logs_skipping_index.json @@ -0,0 +1,50 @@ +{ + "flint_mys3_default_http_logs_skipping_index": { + "mappings": { + "_doc": { + "_meta": { + "latestId": "ZmxpbnRfdmFtc2lfZ2x1ZV92YW1zaWRiX2h0dHBfbG9nc19za2lwcGluZ19pbmRleA==", + "kind": "skipping", + "indexedColumns": [ + { + "columnType": "int", + "kind": "VALUE_SET", + "parameters": { + "max_size": "100" + }, + "columnName": "status" + } + ], + "name": "flint_vamsi_glue_vamsidb_http_logs_skipping_index", + "options": { + "auto_refresh": "true", + "incremental_refresh": "false", + "index_settings": "{\"number_of_shards\":5,\"number_of_replicas\":1}", + "checkpoint_location": "s3://vamsicheckpoint/skp/" + }, + "source": "vamsi_glue.vamsidb.http_logs", + "version": "0.3.0", + "properties": { + "env": { + "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fhe6d5jpah090l", + "SERVERLESS_EMR_JOB_ID": "00fhelvq7peuao0" + } + } + } + } + }, + "settings": { + "index": { + "number_of_shards": 5, + "number_of_replicas": 0, + "max_result_window": 100, + "version": { + "created": "6050399" + } + } + }, + "mapping_version": "1", + "settings_version": "1", + "aliases_version": "1" + } +} \ No newline at end of file diff --git a/async-query/src/test/resources/flint-index-mappings/flint_skipping_index.json b/async-query/src/test/resources/flint-index-mappings/flint_skipping_index.json new file mode 100644 index 0000000000..edb8a97790 --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/flint_skipping_index.json @@ -0,0 +1,25 @@ +{ + "_meta": { + "kind": "skipping", + "indexedColumns": [ + { + "columnType": "int", + "kind": "VALUE_SET", + "columnName": "status" + } + ], + "name": "flint_mys3_default_http_logs_skipping_index", + "options": { + "auto_refresh" : "true", + "index_settings": "{\"number_of_shards\":1,\"number_of_replicas\":1}" + }, + "source": "mys3.default.http_logs", + "version": "0.1.0", + "properties": { + "env": { + "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", + "SERVERLESS_EMR_JOB_ID": "00fdmvv9hp8u0o0q" + } + } + } +} diff --git a/async-query/src/test/resources/flint-index-mappings/flint_special_character_index.json b/async-query/src/test/resources/flint-index-mappings/flint_special_character_index.json new file mode 100644 index 0000000000..95ae75545f --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/flint_special_character_index.json @@ -0,0 +1,22 @@ +{ + "_meta": { + "kind": "skipping", + "indexedColumns": [ + { + "columnType": "int", + "kind": "VALUE_SET", + "columnName": "status" + } + ], + "name": "flint_mys3_default_test%20%2c%3a%22%2b%2f%5c%7c%3f%23%3e%3c_skipping_index", + "options": {}, + "source": "mys3.default.`test ,:\"+/\\|?#><`", + "version": "0.1.0", + "properties": { + "env": { + "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", + "SERVERLESS_EMR_JOB_ID": "00fdmvv9hp8u0o0q" + } + } + } +} diff --git a/async-query/src/test/resources/flint-index-mappings/npe_mapping.json b/async-query/src/test/resources/flint-index-mappings/npe_mapping.json new file mode 100644 index 0000000000..ff1d19f99f --- /dev/null +++ b/async-query/src/test/resources/flint-index-mappings/npe_mapping.json @@ -0,0 +1,35 @@ +{ + "flint_mys3_default_http_logs_cv1_index": { + "mappings": { + "_doc": { + "_meta": { + "kind": "skipping", + "indexedColumns": [ + { + "columnType": "int", + "kind": "VALUE_SET", + "columnName": "status" + } + ], + "name": "flint_mys3_default_http_logs_cv1_index", + "options": {}, + "source": "mys3.default.http_logs", + "version": "0.1.0" + } + } + }, + "settings": { + "index": { + "number_of_shards": 5, + "number_of_replicas": 0, + "max_result_window": 100, + "version": { + "created": "6050399" + } + } + }, + "mapping_version": "1", + "settings_version": "1", + "aliases_version": "1" + } +} \ No newline at end of file diff --git a/async-query/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker b/async-query/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker new file mode 100644 index 0000000000..ca6ee9cea8 --- /dev/null +++ b/async-query/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker @@ -0,0 +1 @@ +mock-maker-inline \ No newline at end of file diff --git a/async-query/src/test/resources/query_execution_result_mapping.json b/async-query/src/test/resources/query_execution_result_mapping.json new file mode 100644 index 0000000000..a76ef77383 --- /dev/null +++ b/async-query/src/test/resources/query_execution_result_mapping.json @@ -0,0 +1,44 @@ +{ + "dynamic": "false", + "properties": { + "applicationId": { + "type": "keyword" + }, + "dataSourceName": { + "type": "keyword" + }, + "error": { + "type": "text" + }, + "jobRunId": { + "type": "keyword" + }, + "queryId": { + "type": "keyword" + }, + "queryRunTime": { + "type": "long" + }, + "queryText": { + "type": "text" + }, + "result": { + "type": "object", + "enabled": false + }, + "schema": { + "type": "object", + "enabled": false + }, + "sessionId": { + "type": "keyword" + }, + "status": { + "type": "keyword" + }, + "updateTime": { + "type": "date", + "format": "strict_date_time||epoch_millis" + } + } +} diff --git a/benchmarks/README.md b/benchmarks/README.md new file mode 100644 index 0000000000..d90eb850db --- /dev/null +++ b/benchmarks/README.md @@ -0,0 +1,19 @@ +# OpenSearch SQL/PPL Microbenchmark Suite + +This directory contains the microbenchmark suite of OpenSearch SQL/PPL. It relies on [JMH](http://openjdk.java.net/projects/code-tools/jmh/). + +## Purpose + +Microbenchmarks are intended to spot performance regressions in performance-critical components. + +The microbenchmark suite is also handy for ad-hoc microbenchmarks but please remove them again before merging your PR. + +## Getting Started + +Just run `./gradlew :benchmarks:jmh` from the project root directory or run specific benchmark via your IDE. It will build all microbenchmarks, execute them and print the result. + +## Adding Microbenchmarks + +Before adding a new microbenchmark, make yourself familiar with the JMH API. You can check our existing microbenchmarks and also the [JMH samples](http://hg.openjdk.java.net/code-tools/jmh/file/tip/jmh-samples/src/main/java/org/openjdk/jmh/samples/). + +In contrast to tests, the actual name of the benchmark class is not relevant to JMH. However, stick to the naming convention and end the class name of a benchmark with `Benchmark`. To have JMH execute a benchmark, annotate the respective methods with `@Benchmark`. \ No newline at end of file diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle new file mode 100644 index 0000000000..f01819be14 --- /dev/null +++ b/benchmarks/build.gradle @@ -0,0 +1,23 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +plugins { + id 'java-library' + id "me.champeau.jmh" version "0.6.8" +} + +repositories { + mavenCentral() +} + +dependencies { + implementation project(':core') + + // Dependencies required by JMH micro benchmark + api group: 'org.openjdk.jmh', name: 'jmh-core', version: '1.36' + annotationProcessor group: 'org.openjdk.jmh', name: 'jmh-generator-annprocess', version: '1.36' +} + +compileJava.options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"]) \ No newline at end of file diff --git a/benchmarks/src/jmh/java/org/opensearch/sql/expression/operator/predicate/ComparisonOperatorBenchmark.java b/benchmarks/src/jmh/java/org/opensearch/sql/expression/operator/predicate/ComparisonOperatorBenchmark.java new file mode 100644 index 0000000000..01b2068694 --- /dev/null +++ b/benchmarks/src/jmh/java/org/opensearch/sql/expression/operator/predicate/ComparisonOperatorBenchmark.java @@ -0,0 +1,71 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.expression.operator.predicate; + +import static org.opensearch.sql.data.model.ExprValueUtils.fromObjectValue; +import static org.opensearch.sql.data.model.ExprValueUtils.integerValue; +import static org.opensearch.sql.data.model.ExprValueUtils.stringValue; +import static org.opensearch.sql.data.type.ExprCoreType.DATE; +import static org.opensearch.sql.expression.DSL.literal; + +import com.google.common.collect.ImmutableMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.opensearch.sql.data.model.ExprValue; +import org.opensearch.sql.expression.DSL; +import org.opensearch.sql.expression.Expression; +import org.opensearch.sql.expression.FunctionExpression; + +@Warmup(iterations = 1) +@Measurement(iterations = 3) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Thread) +@Fork(value = 1) +public class ComparisonOperatorBenchmark { + + @Param(value = {"int", "string", "date"}) + private String testDataType; + + private final Map params = + ImmutableMap.builder() + .put("int", integerValue(1)) + .put("string", stringValue("hello")) + .put("date", fromObjectValue("2022-01-12", DATE)) + .build(); + + @Benchmark + public void testEqualOperator() { + run(DSL::equal); + } + + @Benchmark + public void testLessOperator() { + run(DSL::less); + } + + @Benchmark + public void testGreaterOperator() { + run(DSL::greater); + } + + private void run(Function dsl) { + ExprValue param = params.get(testDataType); + FunctionExpression func = dsl.apply(new Expression[] {literal(param), literal(param)}); + func.valueOf(); + } +} diff --git a/bi-connectors/PowerBIConnector/AmazonOpenSearchService.md b/bi-connectors/PowerBIConnector/AmazonOpenSearchService.md deleted file mode 100644 index d6498a683c..0000000000 --- a/bi-connectors/PowerBIConnector/AmazonOpenSearchService.md +++ /dev/null @@ -1,79 +0,0 @@ -# Connecting Amazon OpenSearch Service to Microsoft Power BI Desktop - ->[!Note] - ->The following connector article is provided by Amazon, the owner of this connector and a member of the Microsoft Power Query Connector Certification Program. If you have questions regarding the content of this article or have changes you would like to see made to this article, visit the OpenSearch website and use the support channels there. - -## Summary -| Item | Description | -| ---- | ----------- | -| Release State | General Availability | -| Products | Power BI Desktop | -| Authentication Types Supported | Basic | - -## Prerequisites -* Microsoft Power BI Desktop -* [OpenSearch](https://opensearch.org/docs/latest/opensearch/install/index/) -* [OpenSearch SQL ODBC driver](https://opensearch.org/docs/latest/search-plugins/sql/odbc/) - -## Capabilities supported -* Import -* DirectQuery - -## Connect to Amazon OpenSearch Service -1. Open Power BI Desktop. - -2. Click on **Home** > **Get Data** > **More** > **Other**. Select **Amazon OpenSearch Service**. Click on **Connect**. - - - -3. You will get a warning for using a third-party service. Click on **Continue**. - - - -4. Enter host and port values and select your preferred SSL and Certificate validation options. Click on **OK**. - - - -5. Select authentication option. Enter credentials if required and click on **Connect**. - - - -6. Select required table. Data preview will be loaded. - - - -7. Click on **Load**. - -8. Select required columns for creating a graph. - - - - -## Troubleshooting - -* If you get the following error, please install the [OpenSearch SQL ODBC Driver](https://docs-beta.opensearch.org/search-plugins/sql/odbc/). - - - -* If you get the following error, - - - -1. Check if host and port values are correct. -2. Check if auth credentials are correct. -3. Check if server is running. - -## Limitations and Known issues - -There are known limitations and issues that are tracked by OpenSearch including the items listed below. - -| Issue | Description | -| ---- | ----------- | -| [Visualizations without numerical columns](https://github.com/opensearch-project/sql/issues/347) | Visualizations is only supported if there is a numerical column included due to a known limitation in OpenSearch regarding subqueries. An exception to this would be visualizations with columns only of type string is supported as well. | -| [Visualizations with First or Last options selected are not yet supported](https://github.com/opensearch-project/sql/issues/279) | First and Last options generate an sql query that uses MAX and MIN for strings which is not yet supported in OpenSearch | -| [Visualizations with aggregate functions selected are not yet supported](https://github.com/opensearch-project/sql/issues/363) | Specifically Minimum, Maximum, Standard deviation, Variance, or Median options are not yet supported. | -| [Basic filtering limitations](https://github.com/opensearch-project/sql/issues/347) | Selecting more than one value is not yet supported when using basic filtering. Due to the issue in the first row of this table, selecting one value for basic filtering when there is no numerical column is not yet supported. | -| [Top N filtering limitations](https://opensearch.org/docs/latest/search-plugins/sql/limitation/) | OpenSearch has limitations on subquery which does not yet support the Top N filtering functionality. | -| [Advanced filtering limitations](https://github.com/opensearch-project/sql/issues/308) | `does not contain` and `does not start with` filters for string columns are not yet supported. All advanced filtering for numerical columns are not yet supported except for `is` and `is blank`. All advanced filtering for date and time columns are not yet supported except for `is blank` and `is not blank`. | -| [Relative Date filtering limitations](https://github.com/opensearch-project/sql/issues/364) | Due to a known timestamp issue in OpenSearch, all relative date filtering is not yet supported. | diff --git a/bi-connectors/PowerBIConnector/AmazonOpenSearchService.mez b/bi-connectors/PowerBIConnector/AmazonOpenSearchService.mez deleted file mode 100644 index 0f18dfe060..0000000000 Binary files a/bi-connectors/PowerBIConnector/AmazonOpenSearchService.mez and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/CHANGELOG.md b/bi-connectors/PowerBIConnector/CHANGELOG.md deleted file mode 100644 index 080208c4e7..0000000000 --- a/bi-connectors/PowerBIConnector/CHANGELOG.md +++ /dev/null @@ -1,11 +0,0 @@ -# Changelog -All notable changes to the connector will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [1.0.1] 2022-02-28 -### Added -- Allow users to enable or disable validation for SSL certificate validation. -### Changed -- Changed beta to false diff --git a/bi-connectors/PowerBIConnector/OpenSearchProject.md b/bi-connectors/PowerBIConnector/OpenSearchProject.md deleted file mode 100644 index 966c55a7e4..0000000000 --- a/bi-connectors/PowerBIConnector/OpenSearchProject.md +++ /dev/null @@ -1,79 +0,0 @@ -# Connecting OpenSearch Project to Microsoft Power BI Desktop - ->[!Note] - ->The following connector article is provided by OpenSearch, the owner of this connector and a member of the Microsoft Power Query Connector Certification Program. If you have questions regarding the content of this article or have changes you would like to see made to this article, visit the OpenSearch website and use the support channels there. - -## Summary -| Item | Description | -| ---- | ----------- | -| Release State | General Availability | -| Products | Power BI Desktop | -| Authentication Types Supported | Basic | - -## Prerequisites -* Microsoft Power BI Desktop -* [OpenSearch](https://opensearch.org/docs/latest/opensearch/install/index/) -* [OpenSearch SQL ODBC driver](https://opensearch.org/docs/latest/search-plugins/sql/odbc/) - -## Capabilities supported -* Import -* DirectQuery - -## Connect to OpenSearch Project -1. Open Power BI Desktop. - -2. Click on **Home** > **Get Data** > **More** > **Other**. Select **OpenSearch Project**. Click on **Connect**. - - - -3. You will get a warning for using a third-party service. Click on **Continue**. - - - -4. Enter host and port values and select your preferred SSL and Certificate validation options. Click on **OK**. - - - -5. Select authentication option. Enter credentials if required and click on **Connect**. - - - -6. Select required table. Data preview will be loaded. - - - -7. Click on **Load**. - -8. Select required columns for creating a graph. - - - - -## Troubleshooting - -* If you get the following error, please install the [OpenSearch SQL ODBC Driver](https://docs-beta.opensearch.org/search-plugins/sql/odbc/). - - - -* If you get the following error, - - - -1. Check if host and port values are correct. -2. Check if auth credentials are correct. -3. Check if server is running. - -## Limitations and Known issues - -There are known limitations and issues that are tracked by OpenSearch including the items listed below. - -| Issue | Description | -| ---- | ----------- | -| [Visualizations without numerical columns](https://github.com/opensearch-project/sql/issues/347) | Visualizations is only supported if there is a numerical column included due to a known limitation in OpenSearch regarding subqueries. An exception to this would be visualizations with columns only of type string is supported as well. | -| [Visualizations with First or Last options selected are not yet supported](https://github.com/opensearch-project/sql/issues/279) | First and Last options generate an sql query that uses MAX and MIN for strings which is not yet supported in OpenSearch | -| [Visualizations with aggregate functions selected are not yet supported](https://github.com/opensearch-project/sql/issues/363) | Specifically Minimum, Maximum, Standard deviation, Variance, or Median options are not yet supported. | -| [Basic filtering limitations](https://github.com/opensearch-project/sql/issues/347) | Selecting more than one value is not yet supported when using basic filtering. Due to the issue in the first row of this table, selecting one value for basic filtering when there is no numerical column is not yet supported. | -| [Top N filtering limitations](https://opensearch.org/docs/latest/search-plugins/sql/limitation/) | OpenSearch has limitations on subquery which does not yet support the Top N filtering functionality. | -| [Advanced filtering limitations](https://github.com/opensearch-project/sql/issues/308) | `does not contain` and `does not start with` filters for string columns are not yet supported. All advanced filtering for numerical columns are not yet supported except for `is` and `is blank`. All advanced filtering for date and time columns are not yet supported except for `is blank` and `is not blank`. | -| [Relative Date filtering limitations](https://github.com/opensearch-project/sql/issues/364) | Due to a known timestamp issue in OpenSearch, all relative date filtering is not yet supported. | diff --git a/bi-connectors/PowerBIConnector/OpenSearchProject.mez b/bi-connectors/PowerBIConnector/OpenSearchProject.mez deleted file mode 100644 index efe442b6ba..0000000000 Binary files a/bi-connectors/PowerBIConnector/OpenSearchProject.mez and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/PBIDSExamples/sqlodbc_import.pbids b/bi-connectors/PowerBIConnector/PBIDSExamples/sqlodbc_import.pbids deleted file mode 100644 index 1b10587631..0000000000 --- a/bi-connectors/PowerBIConnector/PBIDSExamples/sqlodbc_import.pbids +++ /dev/null @@ -1,14 +0,0 @@ -{ - "version": "0.1", - "connections": [ - { - "details": { - "protocol": "sqlodbc", - "address": { - "server": "localhost:9200" - } - }, - "mode": "Import" - } - ] -} \ No newline at end of file diff --git a/bi-connectors/PowerBIConnector/README.md b/bi-connectors/PowerBIConnector/README.md deleted file mode 100644 index 8632dc961c..0000000000 --- a/bi-connectors/PowerBIConnector/README.md +++ /dev/null @@ -1,23 +0,0 @@ -## Connector Download - -The Power BI connector is available to download from the automated CI workflow: [link](https://github.com/opensearch-project/sql/actions/workflows/bi-connectors.yml). -The release snapshots are also available here: [OpenSearch Project](OpenSearchProject.mez) and [Amazon OpenSearch Service](AmazonOpenSearchService.mez). - -## Connector Install - -1. Put connector `mez` file into: `C:\Users\%USERNAME%\Documents\Power BI Desktop\Custom Connectors`. -2. Install OpenSearch `ODBC` [driver](../../sql-odbc/README.md). -3. Run `Power BI Desktop`. -4. Check under `File | Options and settings | Options | Security | Data Extensions` option `(Not Recommended) Allow any extension to load without validation or warning in Power BI Desktop`: - -6. Restart `Power BI Desktop`. - -## See also - -* [Changelog](CHANGELOG.md) -* Installation instructions for - * [OpenSearch Project connector](OpenSearchProject.md) - * [Amazon OpenSearch Service connector](AmazonOpenSearchService.md) -* Connector user manual for - * [Power BI Service](power_bi_service_support.md) - * [Power BI Desktop](power_bi_support.md) \ No newline at end of file diff --git a/bi-connectors/PowerBIConnector/img/pbi_auth.png b/bi-connectors/PowerBIConnector/img/pbi_auth.png deleted file mode 100644 index 5af0eda576..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_auth.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_auth_amz.png b/bi-connectors/PowerBIConnector/img/pbi_auth_amz.png deleted file mode 100644 index 3c63ed27a2..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_auth_amz.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_connection_string_options.png b/bi-connectors/PowerBIConnector/img/pbi_connection_string_options.png deleted file mode 100644 index b7bf7f3ce1..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_connection_string_options.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_connection_string_options_amz.png b/bi-connectors/PowerBIConnector/img/pbi_connection_string_options_amz.png deleted file mode 100644 index aff8ddc984..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_connection_string_options_amz.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_data_preview.png b/bi-connectors/PowerBIConnector/img/pbi_data_preview.png deleted file mode 100644 index b26bcee6be..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_data_preview.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_disable_parallel_loading_tables.png b/bi-connectors/PowerBIConnector/img/pbi_disable_parallel_loading_tables.png deleted file mode 100644 index 4e12022e9e..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_disable_parallel_loading_tables.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_error_conn.png b/bi-connectors/PowerBIConnector/img/pbi_error_conn.png deleted file mode 100644 index 261999ab80..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_error_conn.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_error_driver_not_installed.png b/bi-connectors/PowerBIConnector/img/pbi_error_driver_not_installed.png deleted file mode 100644 index 76f5da35fb..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_error_driver_not_installed.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_gateway_connector_path.png b/bi-connectors/PowerBIConnector/img/pbi_gateway_connector_path.png deleted file mode 100644 index 057882bc93..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_gateway_connector_path.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_gateway_status.png b/bi-connectors/PowerBIConnector/img/pbi_gateway_status.png deleted file mode 100644 index c7f4272ad6..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_gateway_status.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_publish_report.png b/bi-connectors/PowerBIConnector/img/pbi_publish_report.png deleted file mode 100644 index a04e66aeb1..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_publish_report.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_publish_status.png b/bi-connectors/PowerBIConnector/img/pbi_publish_status.png deleted file mode 100644 index 8978c94860..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_publish_status.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_select_connector.png b/bi-connectors/PowerBIConnector/img/pbi_select_connector.png deleted file mode 100644 index 33188b97a3..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_select_connector.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_select_connector_amz.png b/bi-connectors/PowerBIConnector/img/pbi_select_connector_amz.png deleted file mode 100644 index 25677e12c7..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_select_connector_amz.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_select_workspace.png b/bi-connectors/PowerBIConnector/img/pbi_select_workspace.png deleted file mode 100644 index 8d89fe3073..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_select_workspace.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_service_cluster_setting.png b/bi-connectors/PowerBIConnector/img/pbi_service_cluster_setting.png deleted file mode 100644 index 8c6f46f5b8..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_service_cluster_setting.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_service_data_source.png b/bi-connectors/PowerBIConnector/img/pbi_service_data_source.png deleted file mode 100644 index 95700aab8b..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_service_data_source.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_service_data_source_success.png b/bi-connectors/PowerBIConnector/img/pbi_service_data_source_success.png deleted file mode 100644 index aa09fdb45f..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_service_data_source_success.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_service_dataset_gateway.png b/bi-connectors/PowerBIConnector/img/pbi_service_dataset_gateway.png deleted file mode 100644 index e73eae7084..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_service_dataset_gateway.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_service_dataset_refresh.png b/bi-connectors/PowerBIConnector/img/pbi_service_dataset_refresh.png deleted file mode 100644 index 4170494d8a..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_service_dataset_refresh.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_service_dataset_refresh_history.png b/bi-connectors/PowerBIConnector/img/pbi_service_dataset_refresh_history.png deleted file mode 100644 index 3810003117..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_service_dataset_refresh_history.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_service_edit_reoprt.png b/bi-connectors/PowerBIConnector/img/pbi_service_edit_reoprt.png deleted file mode 100644 index 0e8aff6a63..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_service_edit_reoprt.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_service_modified_report.png b/bi-connectors/PowerBIConnector/img/pbi_service_modified_report.png deleted file mode 100644 index 098485fba7..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_service_modified_report.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_service_setting.png b/bi-connectors/PowerBIConnector/img/pbi_service_setting.png deleted file mode 100644 index 574aba671d..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_service_setting.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_settings.png b/bi-connectors/PowerBIConnector/img/pbi_settings.png deleted file mode 100644 index ebd9ce0284..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_settings.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_simple_graph.png b/bi-connectors/PowerBIConnector/img/pbi_simple_graph.png deleted file mode 100644 index 9174de438b..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_simple_graph.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_third_party_warning.png b/bi-connectors/PowerBIConnector/img/pbi_third_party_warning.png deleted file mode 100644 index f05502ad57..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_third_party_warning.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/img/pbi_third_party_warning_amz.png b/bi-connectors/PowerBIConnector/img/pbi_third_party_warning_amz.png deleted file mode 100644 index 48a11a86f0..0000000000 Binary files a/bi-connectors/PowerBIConnector/img/pbi_third_party_warning_amz.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/power_bi_service_support.md b/bi-connectors/PowerBIConnector/power_bi_service_support.md deleted file mode 100644 index 1ccb20ccea..0000000000 --- a/bi-connectors/PowerBIConnector/power_bi_service_support.md +++ /dev/null @@ -1,82 +0,0 @@ -# Connecting OpenSearch to Microsoft Power BI Service - -## Setup -* Download and Install [On-premises data gateway](https://docs.microsoft.com/en-us/data-integration/gateway/service-gateway-install) -* Change the path for custom data connector folder in On-premises data gateway so that the gateway can find the custom connector. -> NOTE: Ensure the gateway service account (**PBIEgwService**) has permissions to access the custom connector folder. Alternatively, you can copy connector file to `C:\Windows\ServiceProfiles\PBIEgwService\Documents\Power BI Desktop\Custom Connectors\`. - - - -* Verify the status of data gateway is ready to be used. - - - -* Login to Power BI Service. -* Click on **Setting** > **Manage Gateway**. - - - -* Select **Allow user's custom data connectors to refresh through this gateway cluster(preview)**. Click on **Apply**. - - - -* Click on **Add data sources to use the gateway**. -* Select Data Source Type as **OpenSearch**. -* Enter Data Source Name and Server values. -* Select required **Authentication Method**. Select **Anonymous** for auth **NONE**. -For **AWS_SIGV4**, select **Key** and set aws access credentials for user **PBIEgwService** at path `C:\Windows\ServiceProfiles\PBIEgwService\.aws\` - -* Select Encryption mode for connection. -* Click on **Add**. - - - -* You will get a **Connection Successful** message. - - - - -## Publish Report - -* Follow [instructions](./power_bi_support.md) to create graph using OpenSearch Data connector. -* Click on **Publish** to publish the report on Power BI service. - - - -* Select destination and click on **Select**. - - - -* You will get a success message when report is published. - - - -* Click on **Open '%report name%' in Power BI** to open published report in Power BI service. - -## Modify report using Power BI Service - -* Click on **Edit report** to modfify report. - - - -* Use **Filters**,**Visualizations** and **Fields** to modify report. - - - -## Dataset Scheduled Refresh - -* Click on **Settings** > **Datasets**. -* Select required Gateway and click on **Apply**. - - - -* Turn on Keep your data up to date option. -* Select refresh frequency and timezone. -* Add email for failure notifications if required. -* Click on **Apply**. - - - -* You can also check history by clicking on **Refresh history**. - - \ No newline at end of file diff --git a/bi-connectors/PowerBIConnector/power_bi_support.md b/bi-connectors/PowerBIConnector/power_bi_support.md deleted file mode 100644 index 2c5451a684..0000000000 --- a/bi-connectors/PowerBIConnector/power_bi_support.md +++ /dev/null @@ -1,100 +0,0 @@ -# Connecting OpenSearch to Microsoft Power BI Desktop - -## Prerequisites -* Microsoft Power BI Desktop -* [OpenSearch](https://docs-beta.opensearch.org/opensearch/install/index/) -* [OpenSearch SQL ODBC driver](https://docs-beta.opensearch.org/search-plugins/sql/odbc/) -* [OpenSearchProject.mez](OpenSearchProject.mez) or [AmazonOpenSearchService.mez](AmazonOpenSearchService.mez) -* Optional: [sqlodbc_import.pbids](PBIDSExamples/sqlodbc_import.pbids) to help with repeated connections to the same server - -## Setup -* Copy `mez` file in the `\Documents\Power BI Desktop\Custom Connectors\` folder. This will let Power BI access custom connector. -* Open Power BI Desktop. -* Change the security settings. Click on **Files** > **Options and settings** > **Options** > **Security** > Select **Allow any extension to load without validation or warning** for Data Extensions. This will allow the custom connector to load data into Power BI. - - - -* Restart Power BI Desktop. - -## Load Data - -> **NOTE**: Currently only import mode is supported. Direct query support will be added soon. - -* Open Power BI Desktop. - -* Disable parallel loading of tables. Click on **Files** > **Options and settings** > **Options** > **CURRENT FILE** > **Data Load** > Deselect **Enable parallel loading of tables** and click **OK**. - - - -* Click on **Home** > **Get Data** > **More** > **Other**. Select **OpenSearch Project**. Click on **Connect**. - - - -* You will get a warning for using a third-party service. Click on **Continue**. - - - -* Enter host and port values and select your preferred SSL option. Click on **OK**. - - - -* Select authentication option. Enter credentials if required and click on **Connect**. - - - -* Select required table. Data preview will be loaded. - - - -* Click on **Load**. - -* Select required columns for creating graph. - - - -## Using .PBIDS Files - -More info: https://docs.microsoft.com/en-us/power-bi/connect-data/desktop-data-sources#using-pbids-files-to-get-data - -Example PBIDS file for OpenSearch: (available here: [sqlodbc_import.pbids](PBIDSExamples/sqlodbc_import.pbids)) -```json -{ - "version": "0.1", - "connections": [ - { - "details": { - "protocol": "sqlodbc", - "address": { - "server": "localhost:9200" - } - }, - "mode": "Import" - } - ] -} -``` - -The only part you should change is the `server` attribute, to point to the location of your OpenSearch server. -* For AWS connections, this will be the full path of your OpenSearch instance (ex: `https://aws-opensearch-instance.us-west-1.com`). -* Otherwise, this will be the `host:port` combination for your instance (ex: `localhost:9200`). - -Save this as a `.pbids` file. Double-click on it to open up your connection in Power BI Desktop. -It will take you straight to the **Navigator** window for selecting the tables from the OpenSearch server. -* If this is the first time you are connecting to this instance, you will be prompted for your credentials. - - - -## Troubleshooting - -* If you get an following error, please install [OpenSearch SQL ODBC Driver](https://docs-beta.opensearch.org/search-plugins/sql/odbc/). - - - -* If you get an following error, - - - -1. Check if host and port values are correct. -2. Check if auth credentials are correct. -3. Check if server is running. - diff --git a/bi-connectors/PowerBIConnector/src/Diagnostics.pqm b/bi-connectors/PowerBIConnector/src/Diagnostics.pqm deleted file mode 100644 index ca10c3cf98..0000000000 --- a/bi-connectors/PowerBIConnector/src/Diagnostics.pqm +++ /dev/null @@ -1,275 +0,0 @@ -let - Diagnostics.LogValue = (prefix, value) => Diagnostics.Trace(TraceLevel.Information, prefix & ": " & (try Diagnostics.ValueToText(value) otherwise ""), value), - Diagnostics.LogValue2 = (prefix, value, result, optional delayed) => Diagnostics.Trace(TraceLevel.Information, prefix & ": " & Diagnostics.ValueToText(value), result, delayed), - Diagnostics.LogFailure = (text, function) => - let - result = try function() - in - if result[HasError] then Diagnostics.LogValue2(text, result[Error], () => error result[Error], true) else result[Value], - - Diagnostics.WrapFunctionResult = (innerFunction as function, outerFunction as function) as function => - Function.From(Value.Type(innerFunction), (list) => outerFunction(() => Function.Invoke(innerFunction, list))), - - Diagnostics.WrapHandlers = (handlers as record) as record => - Record.FromList( - List.Transform( - Record.FieldNames(handlers), - (h) => Diagnostics.WrapFunctionResult(Record.Field(handlers, h), (fn) => Diagnostics.LogFailure(h, fn))), - Record.FieldNames(handlers)), - - Diagnostics.ValueToText = (value) => - let - _canBeIdentifier = (x) => - let - keywords = {"and", "as", "each", "else", "error", "false", "if", "in", "is", "let", "meta", "not", "otherwise", "or", "section", "shared", "then", "true", "try", "type" }, - charAlpha = (c as number) => (c>= 65 and c <= 90) or (c>= 97 and c <= 122) or c=95, - charDigit = (c as number) => c>= 48 and c <= 57 - in - try - charAlpha(Character.ToNumber(Text.At(x,0))) - and - List.MatchesAll( - Text.ToList(x), - (c)=> let num = Character.ToNumber(c) in charAlpha(num) or charDigit(num) - ) - and not - List.MatchesAny( keywords, (li)=> li=x ) - otherwise - false, - - Serialize.Binary = (x) => "#binary(" & Serialize(Binary.ToList(x)) & ") ", - - Serialize.Date = (x) => "#date(" & - Text.From(Date.Year(x)) & ", " & - Text.From(Date.Month(x)) & ", " & - Text.From(Date.Day(x)) & ") ", - - Serialize.Datetime = (x) => "#datetime(" & - Text.From(Date.Year(DateTime.Date(x))) & ", " & - Text.From(Date.Month(DateTime.Date(x))) & ", " & - Text.From(Date.Day(DateTime.Date(x))) & ", " & - Text.From(Time.Hour(DateTime.Time(x))) & ", " & - Text.From(Time.Minute(DateTime.Time(x))) & ", " & - Text.From(Time.Second(DateTime.Time(x))) & ") ", - - Serialize.Datetimezone =(x) => let - dtz = DateTimeZone.ToRecord(x) - in - "#datetimezone(" & - Text.From(dtz[Year]) & ", " & - Text.From(dtz[Month]) & ", " & - Text.From(dtz[Day]) & ", " & - Text.From(dtz[Hour]) & ", " & - Text.From(dtz[Minute]) & ", " & - Text.From(dtz[Second]) & ", " & - Text.From(dtz[ZoneHours]) & ", " & - Text.From(dtz[ZoneMinutes]) & ") ", - - Serialize.Duration = (x) => let - dur = Duration.ToRecord(x) - in - "#duration(" & - Text.From(dur[Days]) & ", " & - Text.From(dur[Hours]) & ", " & - Text.From(dur[Minutes]) & ", " & - Text.From(dur[Seconds]) & ") ", - - Serialize.Function = (x) => _serialize_function_param_type( - Type.FunctionParameters(Value.Type(x)), - Type.FunctionRequiredParameters(Value.Type(x)) ) & - " as " & - _serialize_function_return_type(Value.Type(x)) & - " => (...) ", - - Serialize.List = (x) => "{" & - List.Accumulate(x, "", (seed,item) => if seed="" then Serialize(item) else seed & ", " & Serialize(item)) & - "} ", - - Serialize.Logical = (x) => Text.From(x), - - Serialize.Null = (x) => "null", - - Serialize.Number = (x) => - let Text.From = (i as number) as text => - if Number.IsNaN(i) then "#nan" else - if i=Number.PositiveInfinity then "#infinity" else - if i=Number.NegativeInfinity then "-#infinity" else - Text.From(i) - in - Text.From(x), - - Serialize.Record = (x) => "[ " & - List.Accumulate( - Record.FieldNames(x), - "", - (seed,item) => - (if seed="" then Serialize.Identifier(item) else seed & ", " & Serialize.Identifier(item)) & " = " & Serialize(Record.Field(x, item)) - ) & - " ] ", - - Serialize.Table = (x) => "#table( type " & - _serialize_table_type(Value.Type(x)) & - ", " & - Serialize(Table.ToRows(x)) & - ") ", - - Serialize.Text = (x) => """" & - _serialize_text_content(x) & - """", - - _serialize_text_content = (x) => let - escapeText = (n as number) as text => "#(#)(" & Text.PadStart(Number.ToText(n, "X", "en-US"), 4, "0") & ")" - in - List.Accumulate( - List.Transform( - Text.ToList(x), - (c) => let n=Character.ToNumber(c) in - if n = 9 then "#(#)(tab)" else - if n = 10 then "#(#)(lf)" else - if n = 13 then "#(#)(cr)" else - if n = 34 then """""" else - if n = 35 then "#(#)(#)" else - if n < 32 then escapeText(n) else - if n < 127 then Character.FromNumber(n) else - escapeText(n) - ), - "", - (s,i)=>s&i - ), - - Serialize.Identifier = (x) => - if _canBeIdentifier(x) then - x - else - "#""" & - _serialize_text_content(x) & - """", - - Serialize.Time = (x) => "#time(" & - Text.From(Time.Hour(x)) & ", " & - Text.From(Time.Minute(x)) & ", " & - Text.From(Time.Second(x)) & ") ", - - Serialize.Type = (x) => "type " & _serialize_typename(x), - - - _serialize_typename = (x, optional funtype as logical) => /* Optional parameter: Is this being used as part of a function signature? */ - let - isFunctionType = (x as type) => try if Type.FunctionReturn(x) is type then true else false otherwise false, - isTableType = (x as type) => try if Type.TableSchema(x) is table then true else false otherwise false, - isRecordType = (x as type) => try if Type.ClosedRecord(x) is type then true else false otherwise false, - isListType = (x as type) => try if Type.ListItem(x) is type then true else false otherwise false - in - - if funtype=null and isTableType(x) then _serialize_table_type(x) else - if funtype=null and isListType(x) then "{ " & @_serialize_typename( Type.ListItem(x) ) & " }" else - if funtype=null and isFunctionType(x) then "function " & _serialize_function_type(x) else - if funtype=null and isRecordType(x) then _serialize_record_type(x) else - - if x = type any then "any" else - let base = Type.NonNullable(x) in - (if Type.IsNullable(x) then "nullable " else "") & - (if base = type anynonnull then "anynonnull" else - if base = type binary then "binary" else - if base = type date then "date" else - if base = type datetime then "datetime" else - if base = type datetimezone then "datetimezone" else - if base = type duration then "duration" else - if base = type logical then "logical" else - if base = type none then "none" else - if base = type null then "null" else - if base = type number then "number" else - if base = type text then "text" else - if base = type time then "time" else - if base = type type then "type" else - - /* Abstract types: */ - if base = type function then "function" else - if base = type table then "table" else - if base = type record then "record" else - if base = type list then "list" else - - "any /*Actually unknown type*/"), - - _serialize_table_type = (x) => - let - schema = Type.TableSchema(x) - in - "table " & - (if Table.IsEmpty(schema) then "" else - "[" & List.Accumulate( - List.Transform( - Table.ToRecords(Table.Sort(schema,"Position")), - each Serialize.Identifier(_[Name]) & " = " & _[Kind]), - "", - (seed,item) => (if seed="" then item else seed & ", " & item ) - ) & "] " ), - - _serialize_record_type = (x) => - let flds = Type.RecordFields(x) - in - if Record.FieldCount(flds)=0 then "record" else - "[" & List.Accumulate( - Record.FieldNames(flds), - "", - (seed,item) => - seed & - (if seed<>"" then ", " else "") & - (Serialize.Identifier(item) & "=" & _serialize_typename(Record.Field(flds,item)[Type]) ) - ) & - (if Type.IsOpenRecord(x) then ",..." else "") & - "]", - - _serialize_function_type = (x) => _serialize_function_param_type( - Type.FunctionParameters(x), - Type.FunctionRequiredParameters(x) ) & - " as " & - _serialize_function_return_type(x), - - _serialize_function_param_type = (t,n) => - let - funsig = Table.ToRecords( - Table.TransformColumns( - Table.AddIndexColumn( Record.ToTable( t ), "isOptional", 1 ), - { "isOptional", (x)=> x>n } ) ) - in - "(" & - List.Accumulate( - funsig, - "", - (seed,item)=> - (if seed="" then "" else seed & ", ") & - (if item[isOptional] then "optional " else "") & - Serialize.Identifier(item[Name]) & " as " & _serialize_typename(item[Value], true) ) - & ")", - - _serialize_function_return_type = (x) => _serialize_typename(Type.FunctionReturn(x), true), - - Serialize = (x) as text => - if x is binary then try Serialize.Binary(x) otherwise "null /*serialize failed*/" else - if x is date then try Serialize.Date(x) otherwise "null /*serialize failed*/" else - if x is datetime then try Serialize.Datetime(x) otherwise "null /*serialize failed*/" else - if x is datetimezone then try Serialize.Datetimezone(x) otherwise "null /*serialize failed*/" else - if x is duration then try Serialize.Duration(x) otherwise "null /*serialize failed*/" else - if x is function then try Serialize.Function(x) otherwise "null /*serialize failed*/" else - if x is list then try Serialize.List(x) otherwise "null /*serialize failed*/" else - if x is logical then try Serialize.Logical(x) otherwise "null /*serialize failed*/" else - if x is null then try Serialize.Null(x) otherwise "null /*serialize failed*/" else - if x is number then try Serialize.Number(x) otherwise "null /*serialize failed*/" else - if x is record then try Serialize.Record(x) otherwise "null /*serialize failed*/" else - if x is table then try Serialize.Table(x) otherwise "null /*serialize failed*/" else - if x is text then try Serialize.Text(x) otherwise "null /*serialize failed*/" else - if x is time then try Serialize.Time(x) otherwise "null /*serialize failed*/" else - if x is type then try Serialize.Type(x) otherwise "null /*serialize failed*/" else - "[#_unable_to_serialize_#]" - in - try Serialize(value) otherwise "" -in - [ - LogValue = Diagnostics.LogValue, - LogValue2 = Diagnostics.LogValue2, - LogFailure = Diagnostics.LogFailure, - WrapFunctionResult = Diagnostics.WrapFunctionResult, - WrapHandlers = Diagnostics.WrapHandlers, - ValueToText = Diagnostics.ValueToText - ] \ No newline at end of file diff --git a/bi-connectors/PowerBIConnector/src/OdbcConstants.pqm b/bi-connectors/PowerBIConnector/src/OdbcConstants.pqm deleted file mode 100644 index 144e525413..0000000000 --- a/bi-connectors/PowerBIConnector/src/OdbcConstants.pqm +++ /dev/null @@ -1,1253 +0,0 @@ -// values from https://github.com/Microsoft/ODBC-Specification/blob/master/Windows/inc/sqlext.h -[ - Flags = (flags as list) => - if (List.IsEmpty(flags)) then 0 else - let - Loop = List.Generate(()=> [i = 0, Combined = flags{0}], - each [i] < List.Count(flags), - each [Combined = Number.BitwiseOr([Combined], flags{i}), i = [i]+1], - each [Combined]), - Result = List.Last(Loop) - in - Result, - - SQL_HANDLE = - [ - ENV = 1, - DBC = 2, - STMT = 3, - DESC = 4 - ], - - RetCode = - [ - SUCCESS = 0, - SUCCESS_WITH_INFO = 1, - ERROR = -1, - INVALID_HANDLE = -2, - NO_DATA = 100 - ], - - SQL_CONVERT = - [ - BIGINT = 53, - BINARY = 54, - BIT = 55, - CHAR = 56, - DATE = 57, - DECIMAL = 58, - DOUBLE = 59, - FLOAT = 60, - INTEGER = 61, - LONGVARCHAR = 62, - NUMERIC = 63, - REAL = 64, - SMALLINT = 65, - TIME = 66, - TIMESTAMP = 67, - TINYINT = 68, - VARBINARY = 69, - VARCHAR = 70, - LONGVARBINARY = 71 - ], - - SQL_ROW = - [ - PROCEED = 0, - IGNORE = 1, - SUCCESS = 0, - DELETED = 1, - UPDATED = 2, - NOROW = 3, - ADDED = 4, - ERROR = 5, - SUCCESS_WITH_INFO = 6 - ], - -SQL_CVT = -[ - //None = 0, - - CHAR = 0x00000001, - NUMERIC = 0x00000002, - DECIMAL = 0x00000004, - INTEGER = 0x00000008, - SMALLINT = 0x00000010, - FLOAT = 0x00000020, - REAL = 0x00000040, - DOUBLE = 0x00000080, - VARCHAR = 0x00000100, - LONGVARCHAR = 0x00000200, - BINARY = 0x00000400, - VARBINARY = 0x00000800, - BIT = 0x00001000, - TINYINT = 0x00002000, - BIGINT = 0x00004000, - DATE = 0x00008000, - TIME = 0x00010000, - TIMESTAMP = 0x00020000, - LONGVARBINARY = 0x00040000, - INTERVAL_YEAR_MONTH = 0x00080000, - INTERVAL_DAY_TIME = 0x00100000, - WCHAR = 0x00200000, - WLONGVARCHAR = 0x00400000, - WVARCHAR = 0x00800000, - GUID = 0x01000000 -], - - STMT = - [ - CLOSE = 0, - DROP = 1, - UNBIND = 2, - RESET_PARAMS = 3 - ], - - SQL_MAX = - [ - NUMERIC_LEN = 16 - ], - - SQL_IS = - [ - POINTER = -4, - INTEGER = -6, - UINTEGER = -5, - SMALLINT = -8 - ], - - //SQL Server specific defines - // - SQL_HC = // from Odbcss.h - [ - OFF = 0, // FOR BROWSE columns are hidden - ON = 1 // FOR BROWSE columns are exposed - ], - - SQL_NB = // from Odbcss.h - [ - OFF = 0, // NO_BROWSETABLE is off - ON = 1 // NO_BROWSETABLE is on - ], - - // SQLColAttributes driver specific defines. - // SQLSet/GetDescField driver specific defines. - // Microsoft has 1200 thru 1249 reserved for Microsoft SQL Server driver usage. - // - SQL_CA_SS = // from Odbcss.h - [ - BASE = 1200, // SQL_CA_SS_BASE - - COLUMN_HIDDEN = 1200 + 11, // Column is hidden (FOR BROWSE) - COLUMN_KEY = 1200 + 12, // Column is key column (FOR BROWSE) - VARIANT_TYPE = 1200 + 15, - VARIANT_SQL_TYPE = 1200 + 16, - VARIANT_SERVER_TYPE = 1200 + 17 - - ], - - SQL_SOPT_SS = // from Odbcss.h - [ - BASE = 1225, // SQL_SOPT_SS_BASE - HIDDEN_COLUMNS = 1225 + 2, // Expose FOR BROWSE hidden columns - NOBROWSETABLE = 1225 + 3 // Set NOBROWSETABLE option - ], - - SQL_COMMIT = 0, //Commit - SQL_ROLLBACK = 1, //Abort - - //static public readonly IntPtr SQL_AUTOCOMMIT_OFF = IntPtr.Zero; - //static public readonly IntPtr SQL_AUTOCOMMIT_ON = new IntPtr(1); - - SQL_TRANSACTION = - [ - READ_UNCOMMITTED = 0x00000001, - READ_COMMITTED = 0x00000002, - REPEATABLE_READ = 0x00000004, - SERIALIZABLE = 0x00000008, - SNAPSHOT = 0x00000020 // VSDD 414121: SQL_TXN_SS_SNAPSHOT == 0x20 (sqlncli.h) - ], - - SQL_PARAM = - [ - TYPE_UNKNOWN = 0, // SQL_PARAM_TYPE_UNKNOWN - INPUT = 1, // SQL_PARAM_INPUT - INPUT_OUTPUT = 2, // SQL_PARAM_INPUT_OUTPUT - RESULT_COL = 3, // SQL_RESULT_COL - OUTPUT = 4, // SQL_PARAM_OUTPUT - RETURN_VALUE = 5 // SQL_RETURN_VALUE - ], - - SQL_DESC = - [ - // from sql.h (ODBCVER >= 3.0) - // - COUNT = 1001, - TYPE = 1002, - LENGTH = 1003, - OCTET_LENGTH_PTR = 1004, - PRECISION = 1005, - SCALE = 1006, - DATETIME_INTERVAL_CODE = 1007, - NULLABLE = 1008, - INDICATOR_PTR = 1009, - DATA_PTR = 1010, - NAME = 1011, - UNNAMED = 1012, - OCTET_LENGTH = 1013, - ALLOC_TYPE = 1099, - - // from sqlext.h (ODBCVER >= 3.0) - // - CONCISE_TYPE = SQL_COLUMN[TYPE], - DISPLAY_SIZE = SQL_COLUMN[DISPLAY_SIZE], - UNSIGNED = SQL_COLUMN[UNSIGNED], - UPDATABLE = SQL_COLUMN[UPDATABLE], - AUTO_UNIQUE_VALUE = SQL_COLUMN[AUTO_INCREMENT], - - TYPE_NAME = SQL_COLUMN[TYPE_NAME], - TABLE_NAME = SQL_COLUMN[TABLE_NAME], - SCHEMA_NAME = SQL_COLUMN[OWNER_NAME], - CATALOG_NAME = SQL_COLUMN[QUALIFIER_NAME], - - BASE_COLUMN_NAME = 22, - BASE_TABLE_NAME = 23, - - NUM_PREC_RADIX = 32 - ], - - // ODBC version 2.0 style attributes - // All IdentifierValues are ODBC 1.0 unless marked differently - // - SQL_COLUMN = - [ - COUNT = 0, - NAME = 1, - TYPE = 2, - LENGTH = 3, - PRECISION = 4, - SCALE = 5, - DISPLAY_SIZE = 6, - NULLABLE = 7, - UNSIGNED = 8, - MONEY = 9, - UPDATABLE = 10, - AUTO_INCREMENT = 11, - CASE_SENSITIVE = 12, - SEARCHABLE = 13, - TYPE_NAME = 14, - TABLE_NAME = 15, // (ODBC 2.0) - OWNER_NAME = 16, // (ODBC 2.0) - QUALIFIER_NAME = 17, // (ODBC 2.0) - LABEL = 18 - ], - - // values from sqlext.h - SQL_SQL92_RELATIONAL_JOIN_OPERATORS = - [ - CORRESPONDING_CLAUSE = 0x00000001, // SQL_SRJO_CORRESPONDING_CLAUSE - CROSS_JOIN = 0x00000002, // SQL_SRJO_CROSS_JOIN - EXCEPT_JOIN = 0x00000004, // SQL_SRJO_EXCEPT_JOIN - FULL_OUTER_JOIN = 0x00000008, // SQL_SRJO_FULL_OUTER_JOIN - INNER_JOIN = 0x00000010, // SQL_SRJO_INNER_JOIN - INTERSECT_JOIN = 0x00000020, // SQL_SRJO_INTERSECT_JOIN - LEFT_OUTER_JOIN = 0x00000040, // SQL_SRJO_LEFT_OUTER_JOIN - NATURAL_JOIN = 0x00000080, // SQL_SRJO_NATURAL_JOIN - RIGHT_OUTER_JOIN = 0x00000100, // SQL_SRJO_RIGHT_OUTER_JOIN - UNION_JOIN = 0x00000200 // SQL_SRJO_UNION_JOIN - ], - - // values from sqlext.h - SQL_QU = - [ - SQL_QU_DML_STATEMENTS = 0x00000001, - SQL_QU_PROCEDURE_INVOCATION = 0x00000002, - SQL_QU_TABLE_DEFINITION = 0x00000004, - SQL_QU_INDEX_DEFINITION = 0x00000008, - SQL_QU_PRIVILEGE_DEFINITION = 0x00000010 - ], - - // values from sql.h - SQL_OJ_CAPABILITIES = - [ - LEFT = 0x00000001, // SQL_OJ_LEFT - RIGHT = 0x00000002, // SQL_OJ_RIGHT - FULL = 0x00000004, // SQL_OJ_FULL - NESTED = 0x00000008, // SQL_OJ_NESTED - NOT_ORDERED = 0x00000010, // SQL_OJ_NOT_ORDERED - INNER = 0x00000020, // SQL_OJ_INNER - ALL_COMPARISON_OPS = 0x00000040 //SQL_OJ_ALLCOMPARISION+OPS - ], - - SQL_UPDATABLE = - [ - READONLY = 0, // SQL_ATTR_READ_ONLY - WRITE = 1, // SQL_ATTR_WRITE - READWRITE_UNKNOWN = 2 // SQL_ATTR_READWRITE_UNKNOWN - ], - - SQL_IDENTIFIER_CASE = - [ - UPPER = 1, // SQL_IC_UPPER - LOWER = 2, // SQL_IC_LOWER - SENSITIVE = 3, // SQL_IC_SENSITIVE - MIXED = 4 // SQL_IC_MIXED - ], - - // Uniqueness parameter in the SQLStatistics function - SQL_INDEX = - [ - UNIQUE = 0, - ALL = 1 - ], - - // Reserved parameter in the SQLStatistics function - SQL_STATISTICS_RESERVED = - [ - QUICK = 0, // SQL_QUICK - ENSURE = 1 // SQL_ENSURE - ], - - // Identifier type parameter in the SQLSpecialColumns function - SQL_SPECIALCOLS = - [ - BEST_ROWID = 1, // SQL_BEST_ROWID - ROWVER = 2 // SQL_ROWVER - ], - - // Scope parameter in the SQLSpecialColumns function - SQL_SCOPE = - [ - CURROW = 0, // SQL_SCOPE_CURROW - TRANSACTION = 1, // SQL_SCOPE_TRANSACTION - SESSION = 2 // SQL_SCOPE_SESSION - ], - - SQL_NULLABILITY = - [ - NO_NULLS = 0, // SQL_NO_NULLS - NULLABLE = 1, // SQL_NULLABLE - UNKNOWN = 2 // SQL_NULLABLE_UNKNOWN - ], - - SQL_SEARCHABLE = - [ - UNSEARCHABLE = 0, // SQL_UNSEARCHABLE - LIKE_ONLY = 1, // SQL_LIKE_ONLY - ALL_EXCEPT_LIKE = 2, // SQL_ALL_EXCEPT_LIKE - SEARCHABLE = 3 // SQL_SEARCHABLE - ], - - SQL_UNNAMED = - [ - NAMED = 0, // SQL_NAMED - UNNAMED = 1 // SQL_UNNAMED - ], - // todo:move - // internal constants - // not odbc specific - // - HANDLER = - [ - IGNORE = 0x00000000, - THROW = 0x00000001 - ], - - // values for SQLStatistics TYPE column - SQL_STATISTICSTYPE = - [ - TABLE_STAT = 0, // TABLE Statistics - INDEX_CLUSTERED = 1, // CLUSTERED index statistics - INDEX_HASHED = 2, // HASHED index statistics - INDEX_OTHER = 3 // OTHER index statistics - ], - - // values for SQLProcedures PROCEDURE_TYPE column - SQL_PROCEDURETYPE = - [ - UNKNOWN = 0, // procedure is of unknow type - PROCEDURE = 1, // procedure is a procedure - FUNCTION = 2 // procedure is a function - ], - - // private constants - // to define data types (see below) - // - SIGNED_OFFSET = -20, // SQL_SIGNED_OFFSET - UNSIGNED_OFFSET = -22, // SQL_UNSIGNED_OFFSET - - // C Data Types - SQL_C = - [ - CHAR = 1, - WCHAR = -8, - SLONG = 4 + SIGNED_OFFSET, - ULONG = 4 + UNSIGNED_OFFSET, - SSHORT = 5 + SIGNED_OFFSET, - USHORT = 5 + UNSIGNED_OFFSET, - FLOAT = 7, - DOUBLE = 8, - BIT = -7, - STINYINT = -6 + SIGNED_OFFSET, - UTINYINT = -6 + UNSIGNED_OFFSET, - SBIGINT = -5 + SIGNED_OFFSET, - UBIGINT = -5 + UNSIGNED_OFFSET, - BINARY = -2, - TIMESTAMP = 11, - - TYPE_DATE = 91, - TYPE_TIME = 92, - TYPE_TIMESTAMP = 93, - - NUMERIC = 2, - GUID = -11, - DEFAULT = 99, - ARD_TYPE = -99 - ], - - // SQL Data Types - SQL_TYPE = - [ - // Base data types (sql.h) - UNKNOWN = 0, - NULL = 0, - CHAR = 1, - NUMERIC = 2, - DECIMAL = 3, - INTEGER = 4, - SMALLINT = 5, - FLOAT = 6, - REAL = 7, - DOUBLE = 8, - DATETIME = 9, // V3 Only - VARCHAR = 12, - - // Unicode types (sqlucode.h) - WCHAR = -8, - WVARCHAR = -9, - WLONGVARCHAR = -10, - - // Extended data types (sqlext.h) - INTERVAL = 10, // V3 Only - TIME = 10, - TIMESTAMP = 11, - LONGVARCHAR = -1, - BINARY = -2, - VARBINARY = -3, - LONGVARBINARY = -4, - BIGINT = -5, - TINYINT = -6, - BIT = -7, - GUID = -11, // V3 Only - - // One-parameter shortcuts for date/time data types. - TYPE_DATE = 91, - TYPE_TIME = 92, - TYPE_TIMESTAMP = 93, - - // SQL Server Types -150 to -159 (sqlncli.h) - SS_VARIANT = -150, - SS_UDT = -151, - SS_XML = -152, - SS_TABLE = -153, - SS_TIME2 = -154, - SS_TIMESTAMPOFFSET = -155 - ], - - //SQL_ALL_TYPES = 0, - //static public readonly IntPtr SQL_HANDLE_NULL = IntPtr.Zero; - - SQL_LENGTH = - [ - SQL_IGNORE = -6, - SQL_DEFAULT_PARAM = -5, - SQL_NO_TOTAL = -4, - SQL_NTS = -3, - SQL_DATA_AT_EXEC = -2, - SQL_NULL_DATA = -1 - ], - - SQL_DEFAULT_PARAM = -5, - - // column ordinals for SQLProcedureColumns result set - // this column ordinals are not defined in any c/c++ header but in the ODBC Programmer's Reference under SQLProcedureColumns - // - COLUMN_NAME = 4, - COLUMN_TYPE = 5, - DATA_TYPE = 6, - COLUMN_SIZE = 8, - DECIMAL_DIGITS = 10, - NUM_PREC_RADIX = 11, - - SQL_ATTR = - [ - ODBC_VERSION = 200, - CONNECTION_POOLING = 201, - AUTOCOMMIT = 102, - TXN_ISOLATION = 108, - CURRENT_CATALOG = 109, - LOGIN_TIMEOUT = 103, - QUERY_TIMEOUT = 0, - CONNECTION_DEAD = 1209, - - SQL_COPT_SS_BASE = 1200, - SQL_COPT_SS_ENLIST_IN_DTC = (1200 + 7), - SQL_COPT_SS_TXN_ISOLATION = (1200 + 27), - - MAX_LENGTH = 3, - ROW_BIND_TYPE = 5, - CURSOR_TYPE = 6, - RETRIEVE_DATA = 11, - ROW_STATUS_PTR = 25, - ROWS_FETCHED_PTR = 26, - ROW_ARRAY_SIZE = 27, - - // ODBC 3.0 - APP_ROW_DESC = 10010, - APP_PARAM_DESC = 10011, - IMP_ROW_DESC = 10012, - IMP_PARAM_DESC = 10013, - METADATA_ID = 10014, - - // ODBC 4.0 - PRIVATE_DRIVER_LOCATION = 204 - ], - - SQL_RD = - [ - OFF = 0, - ON = 1 - ], - - SQL_GD = - [ - //None = 0, - ANY_COLUMN = 1, - ANY_ORDER = 2, - BLOCK = 4, - BOUND = 8, - OUTPUT_PARAMS = 16 - ], - - //SQLGetInfo -/* - SQL_INFO = - [ - SQL_ACTIVE_CONNECTIONS = 0, - SQL_MAX_DRIVER_CONNECTIONS = 0, - SQL_MAX_CONCURRENT_ACTIVITIES = 1, - SQL_ACTIVE_STATEMENTS = 1, - SQL_DATA_SOURCE_NAME = 2, - SQL_DRIVER_HDBC, - SQL_DRIVER_HENV, - SQL_DRIVER_HSTMT, - SQL_DRIVER_NAME, - SQL_DRIVER_VER, - SQL_FETCH_DIRECTION, - SQL_ODBC_API_CONFORMANCE, - SQL_ODBC_VER, - SQL_ROW_UPDATES, - SQL_ODBC_SAG_CLI_CONFORMANCE, - SQL_SERVER_NAME, - SQL_SEARCH_PATTERN_ESCAPE, - SQL_ODBC_SQL_CONFORMANCE, - - SQL_DATABASE_NAME, - SQL_DBMS_NAME, - SQL_DBMS_VER, - - SQL_ACCESSIBLE_TABLES, - SQL_ACCESSIBLE_PROCEDURES, - SQL_PROCEDURES, - SQL_CONCAT_NULL_BEHAVIOR, - SQL_CURSOR_COMMIT_BEHAVIOR, - SQL_CURSOR_ROLLBACK_BEHAVIOR, - SQL_DATA_SOURCE_READ_ONLY, - SQL_DEFAULT_TXN_ISOLATION, - SQL_EXPRESSIONS_IN_ORDERBY, - SQL_IDENTIFIER_CASE, - SQL_IDENTIFIER_QUOTE_CHAR, - SQL_MAX_COLUMN_NAME_LEN, - SQL_MAX_CURSOR_NAME_LEN, - SQL_MAX_OWNER_NAME_LEN, - SQL_MAX_SCHEMA_NAME_LEN = 32, - SQL_MAX_PROCEDURE_NAME_LEN, - SQL_MAX_QUALIFIER_NAME_LEN, - SQL_MAX_CATALOG_NAME_LEN = 34, - SQL_MAX_TABLE_NAME_LEN, - SQL_MULT_RESULT_SETS, - SQL_MULTIPLE_ACTIVE_TXN, - SQL_OUTER_JOINS, - SQL_SCHEMA_TERM, - SQL_PROCEDURE_TERM, - SQL_CATALOG_NAME_SEPARATOR, - SQL_CATALOG_TERM, - SQL_SCROLL_CONCURRENCY, - SQL_SCROLL_OPTIONS, - SQL_TABLE_TERM, - SQL_TXN_CAPABLE, - SQL_USER_NAME, - - SQL_CONVERT_FUNCTIONS, - SQL_NUMERIC_FUNCTIONS, - SQL_STRING_FUNCTIONS, - SQL_SYSTEM_FUNCTIONS, - SQL_TIMEDATE_FUNCTIONS, - - SQL_CONVERT_BIGINT, - SQL_CONVERT_BINARY, - SQL_CONVERT_BIT, - SQL_CONVERT_CHAR, - SQL_CONVERT_DATE, - SQL_CONVERT_DECIMAL, - SQL_CONVERT_DOUBLE, - SQL_CONVERT_FLOAT, - SQL_CONVERT_INTEGER, - SQL_CONVERT_LONGVARCHAR, - SQL_CONVERT_NUMERIC, - SQL_CONVERT_REAL, - SQL_CONVERT_SMALLINT, - SQL_CONVERT_TIME, - SQL_CONVERT_TIMESTAMP, - SQL_CONVERT_TINYINT, - SQL_CONVERT_VARBINARY, - SQL_CONVERT_VARCHAR, - SQL_CONVERT_LONGVARBINARY, - - SQL_TXN_ISOLATION_OPTION, - SQL_ODBC_SQL_OPT_IEF, - SQL_INTEGRITY = 73, - SQL_CORRELATION_NAME, - SQL_NON_NULLABLE_COLUMNS, - SQL_DRIVER_HLIB, - SQL_DRIVER_ODBC_VER, - SQL_LOCK_TYPES, - SQL_POS_OPERATIONS, - SQL_POSITIONED_STATEMENTS, - SQL_GETDATA_EXTENSIONS, - SQL_BOOKMARK_PERSISTENCE, - SQL_STATIC_SENSITIVITY, - SQL_FILE_USAGE, - SQL_NULL_COLLATION, - SQL_ALTER_TABLE, - SQL_COLUMN_ALIAS, - SQL_GROUP_BY, - SQL_KEYWORDS, - SQL_ORDER_BY_COLUMNS_IN_SELECT, - SQL_SCHEMA_USAGE, - SQL_CATALOG_USAGE, - SQL_QUOTED_IDENTIFIER_CASE, - SQL_SPECIAL_CHARACTERS, - SQL_SUBQUERIES, - SQL_UNION_STATEMENT, - SQL_MAX_COLUMNS_IN_GROUP_BY, - SQL_MAX_COLUMNS_IN_INDEX, - SQL_MAX_COLUMNS_IN_ORDER_BY, - SQL_MAX_COLUMNS_IN_SELECT, - SQL_MAX_COLUMNS_IN_TABLE, - SQL_MAX_INDEX_SIZE, - SQL_MAX_ROW_SIZE_INCLUDES_LONG, - SQL_MAX_ROW_SIZE, - SQL_MAX_STATEMENT_LEN, - SQL_MAX_TABLES_IN_SELECT, - SQL_MAX_USER_NAME_LEN, - SQL_MAX_CHAR_LITERAL_LEN, - SQL_TIMEDATE_ADD_INTERVALS, - SQL_TIMEDATE_DIFF_INTERVALS, - SQL_NEED_LONG_DATA_LEN, - SQL_MAX_BINARY_LITERAL_LEN, - SQL_LIKE_ESCAPE_CLAUSE, - SQL_CATALOG_LOCATION, - SQL_OJ_CAPABILITIES, - - SQL_ACTIVE_ENVIRONMENTS, - SQL_ALTER_DOMAIN, - SQL_SQL_CONFORMANCE, - SQL_DATETIME_LITERALS, - SQL_BATCH_ROW_COUNT, - SQL_BATCH_SUPPORT, - SQL_CONVERT_WCHAR, - SQL_CONVERT_INTERVAL_DAY_TIME, - SQL_CONVERT_INTERVAL_YEAR_MONTH, - SQL_CONVERT_WLONGVARCHAR, - SQL_CONVERT_WVARCHAR, - SQL_CREATE_ASSERTION, - SQL_CREATE_CHARACTER_SET, - SQL_CREATE_COLLATION, - SQL_CREATE_DOMAIN, - SQL_CREATE_SCHEMA, - SQL_CREATE_TABLE, - SQL_CREATE_TRANSLATION, - SQL_CREATE_VIEW, - SQL_DRIVER_HDESC, - SQL_DROP_ASSERTION, - SQL_DROP_CHARACTER_SET, - SQL_DROP_COLLATION, - SQL_DROP_DOMAIN, - SQL_DROP_SCHEMA, - SQL_DROP_TABLE, - SQL_DROP_TRANSLATION, - SQL_DROP_VIEW, - SQL_DYNAMIC_CURSOR_ATTRIBUTES1, - SQL_DYNAMIC_CURSOR_ATTRIBUTES2, - SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1, - SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2, - SQL_INDEX_KEYWORDS, - SQL_INFO_SCHEMA_VIEWS, - SQL_KEYSET_CURSOR_ATTRIBUTES1, - SQL_KEYSET_CURSOR_ATTRIBUTES2, - SQL_ODBC_INTERFACE_CONFORMANCE, - SQL_PARAM_ARRAY_ROW_COUNTS, - SQL_PARAM_ARRAY_SELECTS, - SQL_SQL92_DATETIME_FUNCTIONS, - SQL_SQL92_FOREIGN_KEY_DELETE_RULE, - SQL_SQL92_FOREIGN_KEY_UPDATE_RULE, - SQL_SQL92_GRANT, - SQL_SQL92_NUMERIC_VALUE_FUNCTIONS, - SQL_SQL92_PREDICATES, - SQL_SQL92_RELATIONAL_JOIN_OPERATORS, - SQL_SQL92_REVOKE, - SQL_SQL92_ROW_VALUE_CONSTRUCTOR, - SQL_SQL92_STRING_FUNCTIONS, - SQL_SQL92_VALUE_EXPRESSIONS, - SQL_STANDARD_CLI_CONFORMANCE, - SQL_STATIC_CURSOR_ATTRIBUTES1, - SQL_STATIC_CURSOR_ATTRIBUTES2, - SQL_AGGREGATE_FUNCTIONS, - SQL_DDL_INDEX, - SQL_DM_VER, - SQL_INSERT_STATEMENT, - SQL_CONVERT_GUID, - - SQL_XOPEN_CLI_YEAR = 10000, - SQL_CURSOR_SENSITIVITY, - SQL_DESCRIBE_PARAMETER, - SQL_CATALOG_NAME, - SQL_COLLATION_SEQ, - SQL_MAX_IDENTIFIER_LEN, - SQL_ASYNC_MODE = 10021, - SQL_MAX_ASYNC_CONCURRENT_STATEMENTS, - - SQL_DTC_TRANSITION_COST = 1750, - ], -*/ - SQL_OAC = - [ - SQL_OAC_None = 0x0000, - SQL_OAC_LEVEL1 = 0x0001, - SQL_OAC_LEVEL2 = 0x0002 - ], - - SQL_OSC = - [ - SQL_OSC_MINIMUM = 0x0000, - SQL_OSC_CORE = 0x0001, - SQL_OSC_EXTENDED = 0x0002 - ], - - SQL_SCC = - [ - SQL_SCC_XOPEN_CLI_VERSION1 = 0x00000001, - SQL_SCC_ISO92_CLI = 0x00000002 - ], - - SQL_SVE = - [ - SQL_SVE_CASE = 0x00000001, - SQL_SVE_CAST = 0x00000002, - SQL_SVE_COALESCE = 0x00000004, - SQL_SVE_NULLIF = 0x00000008 - ], - - SQL_SSF = - [ - SQL_SSF_CONVERT = 0x00000001, - SQL_SSF_LOWER = 0x00000002, - SQL_SSF_UPPER = 0x00000004, - SQL_SSF_SUBSTRING = 0x00000008, - SQL_SSF_TRANSLATE = 0x00000010, - SQL_SSF_TRIM_BOTH = 0x00000020, - SQL_SSF_TRIM_LEADING = 0x00000040, - SQL_SSF_TRIM_TRAILING = 0x00000080 - ], - - SQL_SP = - [ - //None = 0, - - SQL_SP_EXISTS = 0x00000001, - SQL_SP_ISNOTNULL = 0x00000002, - SQL_SP_ISNULL = 0x00000004, - SQL_SP_MATCH_FULL = 0x00000008, - SQL_SP_MATCH_PARTIAL = 0x00000010, - SQL_SP_MATCH_UNIQUE_FULL = 0x00000020, - SQL_SP_MATCH_UNIQUE_PARTIAL = 0x00000040, - SQL_SP_OVERLAPS = 0x00000080, - SQL_SP_UNIQUE = 0x00000100, - SQL_SP_LIKE = 0x00000200, - SQL_SP_IN = 0x00000400, - SQL_SP_BETWEEN = 0x00000800, - SQL_SP_COMPARISON = 0x00001000, - SQL_SP_QUANTIFIED_COMPARISON = 0x00002000, - - All = 0x0000FFFF - ], - - SQL_OIC = - [ - SQL_OIC_CORE = 1, - SQL_OIC_LEVEL1 = 2, - SQL_OIC_LEVEL2 = 3 - ], - - SQL_USAGE = - [ - SQL_U_DML_STATEMENTS = 0x00000001, - SQL_U_PROCEDURE_INVOCATION = 0x00000002, - SQL_U_TABLE_DEFINITION = 0x00000004, - SQL_U_INDEX_DEFINITION = 0x00000008, - SQL_U_PRIVILEGE_DEFINITION = 0x00000010 - ], - - SQL_GB = - [ - - SQL_GB_NOT_SUPPORTED = 0, - SQL_GB_GROUP_BY_EQUALS_SELECT = 1, - SQL_GB_GROUP_BY_CONTAINS_SELECT = 2, - SQL_GB_NO_RELATION = 3, - SQL_GB_COLLATE = 4 - ], - - SQL_NC = - [ - SQL_NC_END = 0, - SQL_NC_HIGH = 1, - SQL_NC_LOW = 2, - SQL_NC_START = 3 - ], - - SQL_CN = - [ - SQL_CN_None = 0, - SQL_CN_DIFFERENT = 1, - SQL_CN_ANY = 2 - ], - - SQL_NNC = - [ - SQL_NNC_NULL = 0, - SQL_NNC_NON_NULL = 1 - ], - - SQL_CB = - [ - SQL_CB_NULL = 0, - SQL_CB_NON_NULL = 1 - ], - - SQL_FD_FETCH = - [ - SQL_FD_FETCH_NEXT = 0x00000001, - SQL_FD_FETCH_FIRST = 0x00000002, - SQL_FD_FETCH_LAST = 0x00000004, - SQL_FD_FETCH_PRIOR = 0x00000008, - SQL_FD_FETCH_ABSOLUTE = 0x00000010, - SQL_FD_FETCH_RELATIVE = 0x00000020, - SQL_FD_FETCH_BOOKMARK = 0x00000080 - ], - - SQL_SQ = - [ - SQL_SQ_COMPARISON = 0x00000001, - SQL_SQ_EXISTS = 0x00000002, - SQL_SQ_IN = 0x00000004, - SQL_SQ_QUANTIFIED = 0x00000008, - SQL_SQ_CORRELATED_SUBQUERIES = 0x00000010 - ], - - SQL_U = - [ - SQL_U_UNION = 0x00000001, - SQL_U_UNION_ALL = 0x00000002 - ], - - SQL_BP = - [ - SQL_BP_CLOSE = 0x00000001, - SQL_BP_DELETE = 0x00000002, - SQL_BP_DROP = 0x00000004, - SQL_BP_TRANSACTION = 0x00000008, - SQL_BP_UPDATE = 0x00000010, - SQL_BP_OTHER_HSTMT = 0x00000020, - SQL_BP_SCROLL = 0x00000040 - ], - - SQL_QL = - [ - SQL_QL_START = 0x0001, - SQL_QL_END = 0x0002 - ], - - SQL_OJ = - [ - SQL_OJ_LEFT = 0x00000001, - SQL_OJ_RIGHT = 0x00000002, - SQL_OJ_FULL = 0x00000004, - SQL_OJ_NESTED = 0x00000008, - SQL_OJ_NOT_ORDERED = 0x00000010, - SQL_OJ_INNER = 0x00000020, - SQL_OJ_ALL_COMPARISON_OPS = 0x00000040 - ], - - SQL_FN_CVT = - [ - //None = 0, - - SQL_FN_CVT_CONVERT = 0x00000001, - SQL_FN_CVT_CAST = 0x00000002 - ], - - SQL_FN_NUM = - [ - //None = 0, - - SQL_FN_NUM_ABS = 0x00000001, - SQL_FN_NUM_ACOS = 0x00000002, - SQL_FN_NUM_ASIN = 0x00000004, - SQL_FN_NUM_ATAN = 0x00000008, - SQL_FN_NUM_ATAN2 = 0x00000010, - SQL_FN_NUM_CEILING = 0x00000020, - SQL_FN_NUM_COS = 0x00000040, - SQL_FN_NUM_COT = 0x00000080, - SQL_FN_NUM_EXP = 0x00000100, - SQL_FN_NUM_FLOOR = 0x00000200, - SQL_FN_NUM_LOG = 0x00000400, - SQL_FN_NUM_MOD = 0x00000800, - SQL_FN_NUM_SIGN = 0x00001000, - SQL_FN_NUM_SIN = 0x00002000, - SQL_FN_NUM_SQRT = 0x00004000, - SQL_FN_NUM_TAN = 0x00008000, - SQL_FN_NUM_PI = 0x00010000, - SQL_FN_NUM_RAND = 0x00020000, - SQL_FN_NUM_DEGREES = 0x00040000, - SQL_FN_NUM_LOG10 = 0x00080000, - SQL_FN_NUM_POWER = 0x00100000, - SQL_FN_NUM_RADIANS = 0x00200000, - SQL_FN_NUM_ROUND = 0x00400000, - SQL_FN_NUM_TRUNCATE = 0x00800000 - ], - - SQL_SNVF = - [ - SQL_SNVF_BIT_LENGTH = 0x00000001, - SQL_SNVF_CHAR_LENGTH = 0x00000002, - SQL_SNVF_CHARACTER_LENGTH = 0x00000004, - SQL_SNVF_EXTRACT = 0x00000008, - SQL_SNVF_OCTET_LENGTH = 0x00000010, - SQL_SNVF_POSITION = 0x00000020 - ], - - SQL_FN_STR = - [ - //None = 0, - - SQL_FN_STR_CONCAT = 0x00000001, - SQL_FN_STR_INSERT = 0x00000002, - SQL_FN_STR_LEFT = 0x00000004, - SQL_FN_STR_LTRIM = 0x00000008, - SQL_FN_STR_LENGTH = 0x00000010, - SQL_FN_STR_LOCATE = 0x00000020, - SQL_FN_STR_LCASE = 0x00000040, - SQL_FN_STR_REPEAT = 0x00000080, - SQL_FN_STR_REPLACE = 0x00000100, - SQL_FN_STR_RIGHT = 0x00000200, - SQL_FN_STR_RTRIM = 0x00000400, - SQL_FN_STR_SUBSTRING = 0x00000800, - SQL_FN_STR_UCASE = 0x00001000, - SQL_FN_STR_ASCII = 0x00002000, - SQL_FN_STR_CHAR = 0x00004000, - SQL_FN_STR_DIFFERENCE = 0x00008000, - SQL_FN_STR_LOCATE_2 = 0x00010000, - SQL_FN_STR_SOUNDEX = 0x00020000, - SQL_FN_STR_SPACE = 0x00040000, - SQL_FN_STR_BIT_LENGTH = 0x00080000, - SQL_FN_STR_CHAR_LENGTH = 0x00100000, - SQL_FN_STR_CHARACTER_LENGTH = 0x00200000, - SQL_FN_STR_OCTET_LENGTH = 0x00400000, - SQL_FN_STR_POSITION = 0x00800000 - ], - - SQL_FN_SYSTEM = - [ - //None = 0, - - SQL_FN_SYS_USERNAME = 0x00000001, - SQL_FN_SYS_DBNAME = 0x00000002, - SQL_FN_SYS_IFNULL = 0x00000004 - ], - - SQL_FN_TD = - [ - //None = 0, - - SQL_FN_TD_NOW = 0x00000001, - SQL_FN_TD_CURDATE = 0x00000002, - SQL_FN_TD_DAYOFMONTH = 0x00000004, - SQL_FN_TD_DAYOFWEEK = 0x00000008, - SQL_FN_TD_DAYOFYEAR = 0x00000010, - SQL_FN_TD_MONTH = 0x00000020, - SQL_FN_TD_QUARTER = 0x00000040, - SQL_FN_TD_WEEK = 0x00000080, - SQL_FN_TD_YEAR = 0x00000100, - SQL_FN_TD_CURTIME = 0x00000200, - SQL_FN_TD_HOUR = 0x00000400, - SQL_FN_TD_MINUTE = 0x00000800, - SQL_FN_TD_SECOND = 0x00001000, - SQL_FN_TD_TIMESTAMPADD = 0x00002000, - SQL_FN_TD_TIMESTAMPDIFF = 0x00004000, - SQL_FN_TD_DAYNAME = 0x00008000, - SQL_FN_TD_MONTHNAME = 0x00010000, - SQL_FN_TD_CURRENT_DATE = 0x00020000, - SQL_FN_TD_CURRENT_TIME = 0x00040000, - SQL_FN_TD_CURRENT_TIMESTAMP = 0x00080000, - SQL_FN_TD_EXTRACT = 0x00100000 - ], - - SQL_SDF = - [ - SQL_SDF_CURRENT_DATE = 0x00000001, - SQL_SDF_CURRENT_TIME = 0x00000002, - SQL_SDF_CURRENT_TIMESTAMP = 0x00000004 - ], - - SQL_TSI = - [ - //None = 0, - - SQL_TSI_FRAC_SECOND = 0x00000001, - SQL_TSI_SECOND = 0x00000002, - SQL_TSI_MINUTE = 0x00000004, - SQL_TSI_HOUR = 0x00000008, - SQL_TSI_DAY = 0x00000010, - SQL_TSI_WEEK = 0x00000020, - SQL_TSI_MONTH = 0x00000040, - SQL_TSI_QUARTER = 0x00000080, - SQL_TSI_YEAR = 0x00000100 - ], - - SQL_AF = - [ - //None = 0, - - SQL_AF_AVG = 0x00000001, - SQL_AF_COUNT = 0x00000002, - SQL_AF_MAX = 0x00000004, - SQL_AF_MIN = 0x00000008, - SQL_AF_SUM = 0x00000010, - SQL_AF_DISTINCT = 0x00000020, - SQL_AF_ALL = 0x00000040, - - All = 0xFF - ], - - SQL_SC = - [ - //None = 0, - - SQL_SC_SQL92_ENTRY = 0x00000001, - SQL_SC_FIPS127_2_TRANSITIONAL = 0x00000002, - SQL_SC_SQL92_INTERMEDIATE = 0x00000004, - SQL_SC_SQL92_FULL = 0x00000008 - ], - - SQL_DL_SQL92 = - [ - SQL_DL_SQL92_DATE = 0x00000001, - SQL_DL_SQL92_TIME = 0x00000002, - SQL_DL_SQL92_TIMESTAMP = 0x00000004, - SQL_DL_SQL92_INTERVAL_YEAR = 0x00000008, - SQL_DL_SQL92_INTERVAL_MONTH = 0x00000010, - SQL_DL_SQL92_INTERVAL_DAY = 0x00000020, - SQL_DL_SQL92_INTERVAL_HOUR = 0x00000040, - SQL_DL_SQL92_INTERVAL_MINUTE = 0x00000080, - SQL_DL_SQL92_INTERVAL_SECOND = 0x00000100, - SQL_DL_SQL92_INTERVAL_YEAR_TO_MONTH = 0x00000200, - SQL_DL_SQL92_INTERVAL_DAY_TO_HOUR = 0x00000400, - SQL_DL_SQL92_INTERVAL_DAY_TO_MINUTE = 0x00000800, - SQL_DL_SQL92_INTERVAL_DAY_TO_SECOND = 0x00001000, - SQL_DL_SQL92_INTERVAL_HOUR_TO_MINUTE = 0x00002000, - SQL_DL_SQL92_INTERVAL_HOUR_TO_SECOND = 0x00004000, - SQL_DL_SQL92_INTERVAL_MINUTE_TO_SECOND = 0x00008000 - ], - - SQL_IK = - [ - SQL_IK_NONE = 0x00000000, - SQL_IK_ASC = 0x00000001, - SQL_IK_DESC = 0x00000002, - SQL_IK_ALL = 0x00000003 //SQL_IK_ASC | SQL_IK_DESC - ], - - SQL_ISV = - [ - SQL_ISV_ASSERTIONS = 0x00000001, - SQL_ISV_CHARACTER_SETS = 0x00000002, - SQL_ISV_CHECK_CONSTRAINTS = 0x00000004, - SQL_ISV_COLLATIONS = 0x00000008, - SQL_ISV_COLUMN_DOMAIN_USAGE = 0x00000010, - SQL_ISV_COLUMN_PRIVILEGES = 0x00000020, - SQL_ISV_COLUMNS = 0x00000040, - SQL_ISV_CONSTRAINT_COLUMN_USAGE = 0x00000080, - SQL_ISV_CONSTRAINT_TABLE_USAGE = 0x00000100, - SQL_ISV_DOMAIN_CONSTRAINTS = 0x00000200, - SQL_ISV_DOMAINS = 0x00000400, - SQL_ISV_KEY_COLUMN_USAGE = 0x00000800, - SQL_ISV_REFERENTIAL_CONSTRAINTS = 0x00001000, - SQL_ISV_SCHEMATA = 0x00002000, - SQL_ISV_SQL_LANGUAGES = 0x00004000, - SQL_ISV_TABLE_CONSTRAINTS = 0x00008000, - SQL_ISV_TABLE_PRIVILEGES = 0x00010000, - SQL_ISV_TABLES = 0x00020000, - SQL_ISV_TRANSLATIONS = 0x00040000, - SQL_ISV_USAGE_PRIVILEGES = 0x00080000, - SQL_ISV_VIEW_COLUMN_USAGE = 0x00100000, - SQL_ISV_VIEW_TABLE_USAGE = 0x00200000, - SQL_ISV_VIEWS = 0x00400000 - ], - - SQL_SRJO = - [ - //None = 0, - - SQL_SRJO_CORRESPONDING_CLAUSE = 0x00000001, - SQL_SRJO_CROSS_JOIN = 0x00000002, - SQL_SRJO_EXCEPT_JOIN = 0x00000004, - SQL_SRJO_FULL_OUTER_JOIN = 0x00000008, - SQL_SRJO_INNER_JOIN = 0x00000010, - SQL_SRJO_INTERSECT_JOIN = 0x00000020, - SQL_SRJO_LEFT_OUTER_JOIN = 0x00000040, - SQL_SRJO_NATURAL_JOIN = 0x00000080, - SQL_SRJO_RIGHT_OUTER_JOIN = 0x00000100, - SQL_SRJO_UNION_JOIN = 0x00000200 - ], - - SQL_SRVC = - [ - SQL_SRVC_VALUE_EXPRESSION = 0x00000001, - SQL_SRVC_NULL = 0x00000002, - SQL_SRVC_DEFAULT = 0x00000004, - SQL_SRVC_ROW_SUBQUERY = 0x00000008 - ], - - //public static readonly int SQL_OV_ODBC3 = 3; - //public const Int32 SQL_NTS = -3; //flags for null-terminated string - - //Pooling - SQL_CP = - [ - OFF = 0, - ONE_PER_DRIVER = 1, - ONE_PER_HENV = 2 - ], - -/* - public const Int32 SQL_CD_TRUE = 1; - public const Int32 SQL_CD_FALSE = 0; - - public const Int32 SQL_DTC_DONE = 0; - public const Int32 SQL_IS_POINTER = -4; - public const Int32 SQL_IS_PTR = 1; -*/ - SQL_DRIVER = - [ - NOPROMPT = 0, - COMPLETE = 1, - PROMPT = 2, - COMPLETE_REQUIRED = 3 - ], - - // Column set for SQLPrimaryKeys - SQL_PRIMARYKEYS = - [ - /* - CATALOGNAME = 1, // TABLE_CAT - SCHEMANAME = 2, // TABLE_SCHEM - TABLENAME = 3, // TABLE_NAME - */ - COLUMNNAME = 4 // COLUMN_NAME - /* - KEY_SEQ = 5, // KEY_SEQ - PKNAME = 6, // PK_NAME - */ - ], - - // Column set for SQLStatistics - SQL_STATISTICS = - [ - /* - CATALOGNAME = 1, // TABLE_CAT - SCHEMANAME = 2, // TABLE_SCHEM - TABLENAME = 3, // TABLE_NAME - NONUNIQUE = 4, // NON_UNIQUE - INDEXQUALIFIER = 5, // INDEX_QUALIFIER - */ - INDEXNAME = 6, // INDEX_NAME - /* - TYPE = 7, // TYPE - */ - ORDINAL_POSITION = 8, // ORDINAL_POSITION - COLUMN_NAME = 9 // COLUMN_NAME - /* - ASC_OR_DESC = 10, // ASC_OR_DESC - CARDINALITY = 11, // CARDINALITY - PAGES = 12, // PAGES - FILTER_CONDITION = 13, // FILTER_CONDITION - */ - ], - - // Column set for SQLSpecialColumns - SQL_SPECIALCOLUMNSET = - [ - /* - SCOPE = 1, // SCOPE - */ - COLUMN_NAME = 2 // COLUMN_NAME - /* - DATA_TYPE = 3, // DATA_TYPE - TYPE_NAME = 4, // TYPE_NAME - COLUMN_SIZE = 5, // COLUMN_SIZE - BUFFER_LENGTH = 6, // BUFFER_LENGTH - DECIMAL_DIGITS = 7, // DECIMAL_DIGITS - PSEUDO_COLUMN = 8, // PSEUDO_COLUMN - */ - ], - - SQL_DIAG = - [ - CURSOR_ROW_COUNT= -1249, - ROW_NUMBER = -1248, - COLUMN_NUMBER = -1247, - RETURNCODE = 1, - NUMBER = 2, - ROW_COUNT = 3, - SQLSTATE = 4, - NATIVE = 5, - MESSAGE_TEXT = 6, - DYNAMIC_FUNCTION = 7, - CLASS_ORIGIN = 8, - SUBCLASS_ORIGIN = 9, - CONNECTION_NAME = 10, - SERVER_NAME = 11, - DYNAMIC_FUNCTION_CODE = 12 - ], - - SQL_SU = - [ - SQL_SU_DML_STATEMENTS = 0x00000001, - SQL_SU_PROCEDURE_INVOCATION = 0x00000002, - SQL_SU_TABLE_DEFINITION = 0x00000004, - SQL_SU_INDEX_DEFINITION = 0x00000008, - SQL_SU_PRIVILEGE_DEFINITION = 0x00000010 - ] -] diff --git a/bi-connectors/PowerBIConnector/src/OpenSearch16.png b/bi-connectors/PowerBIConnector/src/OpenSearch16.png deleted file mode 100644 index 67c00e9609..0000000000 Binary files a/bi-connectors/PowerBIConnector/src/OpenSearch16.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/src/OpenSearch20.png b/bi-connectors/PowerBIConnector/src/OpenSearch20.png deleted file mode 100644 index bd0a960a1c..0000000000 Binary files a/bi-connectors/PowerBIConnector/src/OpenSearch20.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/src/OpenSearch24.png b/bi-connectors/PowerBIConnector/src/OpenSearch24.png deleted file mode 100644 index dbca9acdf4..0000000000 Binary files a/bi-connectors/PowerBIConnector/src/OpenSearch24.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/src/OpenSearch32.png b/bi-connectors/PowerBIConnector/src/OpenSearch32.png deleted file mode 100644 index 0698737da4..0000000000 Binary files a/bi-connectors/PowerBIConnector/src/OpenSearch32.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/src/OpenSearch40.png b/bi-connectors/PowerBIConnector/src/OpenSearch40.png deleted file mode 100644 index 201319e6e0..0000000000 Binary files a/bi-connectors/PowerBIConnector/src/OpenSearch40.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/src/OpenSearch48.png b/bi-connectors/PowerBIConnector/src/OpenSearch48.png deleted file mode 100644 index 5aed8ebf93..0000000000 Binary files a/bi-connectors/PowerBIConnector/src/OpenSearch48.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/src/OpenSearch64.png b/bi-connectors/PowerBIConnector/src/OpenSearch64.png deleted file mode 100644 index a42f13babf..0000000000 Binary files a/bi-connectors/PowerBIConnector/src/OpenSearch64.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/src/OpenSearch80.png b/bi-connectors/PowerBIConnector/src/OpenSearch80.png deleted file mode 100644 index 606f76205a..0000000000 Binary files a/bi-connectors/PowerBIConnector/src/OpenSearch80.png and /dev/null differ diff --git a/bi-connectors/PowerBIConnector/src/OpenSearchProject.mproj b/bi-connectors/PowerBIConnector/src/OpenSearchProject.mproj deleted file mode 100644 index 938648e40f..0000000000 --- a/bi-connectors/PowerBIConnector/src/OpenSearchProject.mproj +++ /dev/null @@ -1,125 +0,0 @@ - - - Debug - 2.0 - {75a2dabe-6c5b-498e-8df1-e85d4483a7dc} - Exe - MyRootNamespace - MyAssemblyName - False - False - False - False - False - False - False - False - False - False - 1000 - Yes - OpenSearch - - - false - - bin\Debug\ - - - false - bin\Release\ - - - - - - - - - - Code - - - Code - - - Code - - - Code - - - Code - - - Code - - - Code - - - Code - - - Code - - - Code - - - Content - - - Content - - - Code - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/bi-connectors/PowerBIConnector/src/OpenSearchProject.pq b/bi-connectors/PowerBIConnector/src/OpenSearchProject.pq deleted file mode 100644 index b3f65ddc99..0000000000 --- a/bi-connectors/PowerBIConnector/src/OpenSearchProject.pq +++ /dev/null @@ -1,263 +0,0 @@ -// This file contains Data Connector logic -[Version = "1.0.1"] -section OpenSearchProject; - -// When set to true, additional trace information will be written out to the User log. -// This should be set to false before release. Tracing is done through a call to -// Diagnostics.LogValue(). When EnableTraceOutput is set to false, the call becomes a -// no-op and simply returns the original value. -EnableTraceOutput = false; - -[DataSource.Kind="OpenSearchProject", Publish="OpenSearchProject.Publish"] -shared OpenSearchProject.Contents = Value.ReplaceType(OpenSearchProjectImpl, OpenSearchProjectType); - -// Wrapper function to provide additional UI customization. -OpenSearchProjectType = type function ( - Server as (type text meta [ - Documentation.FieldCaption = "Server", - Documentation.FieldDescription = "The hostname of the OpenSearch server.", - Documentation.SampleValues = { "localhost" } - ]), - Port as (type number meta [ - Documentation.FieldCaption = "Port", - Documentation.FieldDescription = "Port which OpenSearch server listens on.", - Documentation.SampleValues = { 9200 } - ]), - UseSSL as (type logical meta [ - Documentation.FieldCaption = "Use SSL", - Documentation.FieldDescription = "Use SSL", - Documentation.AllowedValues = { true, false } - ]), - HostnameVerification as (type logical meta [ - Documentation.FieldCaption = "Certificate validation", - Documentation.FieldDescription = "Certificate validation", - Documentation.AllowedValues = { true, false } - ]) - ) - as table meta [ - Documentation.Name = "OpenSearch Project" - ]; - -OpenSearchProjectImpl = (Server as text, Port as number, UseSSL as logical, HostnameVerification as logical) as table => - let - Credential = Extension.CurrentCredential(), - AuthenticationMode = Credential[AuthenticationKind], - - // Sets connection string properties for authentication. - CredentialConnectionString = - if AuthenticationMode = "UsernamePassword" then - [ - Auth = "BASIC", - UID = Credential[Username], - PWD = Credential[Password] - ] - else if AuthenticationMode = "Key" then - [ - Auth = "AWS_SIGV4", - Region = Credential[Key] - ] - else - [ - Auth = "NONE" - ], - - // Sets connection string properties for encrypted connections. - EncryptedConnectionString = - if Credential[EncryptConnection] = null or Credential[EncryptConnection] = true then - [ - UseSSL = 1 - ] - else - [ - UseSSL = 0 - ], - - // Subtract the server from the user input in case it's entered like 'http://localhost' or 'https://srv.com:100500' or 'localhost:0' - // And build the proper string on our own - FinalServerString = if UseSSL then - "https://" & Uri.Parts(Server)[Host] & ":" & Text.From(Port) - else - "http://" & Uri.Parts(Server)[Host] & ":" & Text.From(Port), - - ConnectionString = [ - Driver = "OpenSearch SQL ODBC Driver", - Host = FinalServerString, - HostnameVerification = if HostnameVerification then 1 else 0 - ], - - SQLGetInfo = Diagnostics.LogValue("SQLGetInfo_Options", [ - SQL_AGGREGATE_FUNCTIONS = ODBC[SQL_AF][All], - SQL_SQL_CONFORMANCE = ODBC[SQL_SC][SQL_SC_SQL92_INTERMEDIATE] - ]), - - SQLGetTypeInfo = (types) => - if (EnableTraceOutput <> true) then types else - let - // Outputting the entire table might be too large, and result in the value being truncated. - // We can output a row at a time instead with Table.TransformRows() - rows = Table.TransformRows(types, each Diagnostics.LogValue("SQLGetTypeInfo " & _[TYPE_NAME], _)), - toTable = Table.FromRecords(rows) - in - Value.ReplaceType(toTable, Value.Type(types)), - - // SQLColumns is a function handler that receives the results of an ODBC call to SQLColumns(). - SQLColumns = (catalogName, schemaName, tableName, columnName, source) => - if (EnableTraceOutput <> true) then source else - // the if statement conditions will force the values to evaluated/written to diagnostics - if (Diagnostics.LogValue("SQLColumns.TableName", tableName) <> "***" and Diagnostics.LogValue("SQLColumns.ColumnName", columnName) <> "***") then - let - // Outputting the entire table might be too large, and result in the value being truncated. - // We can output a row at a time instead with Table.TransformRows() - rows = Table.TransformRows(source, each Diagnostics.LogValue("SQLColumns", _)), - toTable = Table.FromRecords(rows) - in - Value.ReplaceType(toTable, Value.Type(source)) - else - source, - - SQLGetFunctions = Diagnostics.LogValue("SQLGetFunctions_Options", [ - SQL_API_SQLBINDPARAMETER = false - ]), - - SqlCapabilities = Diagnostics.LogValue("SqlCapabilities_Options", [ - SupportsTop = false, - LimitClauseKind = LimitClauseKind.LimitOffset, - Sql92Conformance = ODBC[SQL_SC][SQL_SC_SQL92_FULL], - SupportsNumericLiterals = true, - SupportsStringLiterals = true, - SupportsOdbcDateLiterals = true, - SupportsOdbcTimeLiterals = true, - SupportsOdbcTimestampLiterals = true - ]), - - OdbcOptions = [ - // Do not view the tables grouped by their schema names. - HierarchicalNavigation = false, - // Prevents execution of native SQL statements. Extensions should set this to true. - HideNativeQuery = true, - // Allows upconversion of numeric types - SoftNumbers = true, - // Allow upconversion / resizing of numeric and string types - TolerateConcatOverflow = true, - // Enables connection pooling via the system ODBC manager - ClientConnectionPooling = true, - - // These values should be set by previous steps - SQLColumns = SQLColumns, - SQLGetTypeInfo = SQLGetTypeInfo, - SQLGetInfo = SQLGetInfo, - SQLGetFunctions = SQLGetFunctions, - SqlCapabilities = SqlCapabilities, - - OnError = OnOdbcError, - - // Connection string properties used for encrypted connections. - CredentialConnectionString = EncryptedConnectionString - ], - - FullConnectionString = (ConnectionString & CredentialConnectionString & EncryptedConnectionString), - - OdbcDatasource = Odbc.DataSource(FullConnectionString, OdbcOptions) - in - OdbcDatasource; - -// Handles ODBC errors. -OnOdbcError = (errorRecord as record) => - let - ErrorMessage = errorRecord[Message], - ConnectionServer = errorRecord[Detail][DataSourcePath], - - IsDriverNotInstalled = Text.Contains(ErrorMessage, "doesn't correspond to an installed ODBC driver"), - - OdbcError = errorRecord[Detail][OdbcErrors]{0}, - OdbcErrorCode = OdbcError[NativeError], - - // Failed to connect to given host - IsHostUnreachable = - OdbcErrorCode = 202 - in - if IsDriverNotInstalled then - error Error.Record("DataSource.Error", "The OpenSearch SQL ODBC driver is not installed. Please install the driver") - else if IsHostUnreachable then - error Error.Record("DataSource.Error", "Couldn't reach server. Please double-check the server and auth. [" & ConnectionServer & "]") - else - error errorRecord; - -// Data Source Kind description -OpenSearchProject = [ - // Required for use with Power BI Service. - TestConnection = (dataSourcePath) => - let - json = Json.Document(dataSourcePath), - Server = json[Server], - Port = json[Port], - UseSSL = json[UseSSL], - HostnameVerification = json[HostnameVerification] - in - { "OpenSearchProject.Contents", Server, Port, UseSSL, HostnameVerification }, - - // Authentication modes - Authentication = [ - Implicit = [ - Label = "NONE" - ], - UsernamePassword = [ - Label = "BASIC" - ], - Key = [ - Label = "AWS_SIGV4", - KeyLabel = "Region" - ] - ], - - // PBIDS Handler - DSRHandlers = [ - opensearchproject = [ - GetDSR = (Server, Port, UseSSL, HostnameVerification, optional Options) => [ protocol = "opensearchproject-odbc", address = [ server = Server, port = Port, useSSL = UseSSL, hostnameVerification = HostnameVerification ] ], - GetFormula = (dsr, optional options) => () => - let - db = OpenSearchProject.Contents(dsr[address][server], dsr[address][port], dsr[address][useSSL], dsr[address][hostnameVerification]) - in - db, - GetFriendlyName = (dsr) => "OpenSearch Project" - ] - ], - - // Enable Encryption - SupportsEncryption = true, - - Label = Extension.LoadString("DataSourceLabel") -]; - -// Data Source UI publishing description -OpenSearchProject.Publish = [ - Beta = false, - Category = "Other", - ButtonText = { Extension.LoadString("ButtonTitle"), Extension.LoadString("ButtonHelp") }, - LearnMoreUrl = "https://github.com/opensearch-project/sql/blob/main/bi-connectors/PowerBIConnector/docs/OpenSearchProject.md", - - SupportsDirectQuery = true, - - SourceImage = OpenSearch.Icons, - SourceTypeImage = OpenSearch.Icons -]; - -OpenSearch.Icons = [ - Icon16 = { Extension.Contents("OpenSearch16.png"), Extension.Contents("OpenSearch20.png"), Extension.Contents("OpenSearch24.png"), Extension.Contents("OpenSearch32.png") }, - Icon32 = { Extension.Contents("OpenSearch32.png"), Extension.Contents("OpenSearch40.png"), Extension.Contents("OpenSearch48.png"), Extension.Contents("OpenSearch64.png") } -]; - -// Load common library functions -Extension.LoadFunction = (name as text) => - let - binary = Extension.Contents(name), - asText = Text.FromBinary(binary) - in - Expression.Evaluate(asText, #shared); - -// Diagnostics module contains multiple functions. . -Diagnostics = Extension.LoadFunction("Diagnostics.pqm"); -Diagnostics.LogValue = if (EnableTraceOutput) then Diagnostics[LogValue] else (prefix, value) => value; - -// OdbcConstants contains numeric constants from the ODBC header files, and helper function to create bitfield values. -ODBC = Extension.LoadFunction("OdbcConstants.pqm"); diff --git a/bi-connectors/PowerBIConnector/src/OpenSearchProject.query.pq b/bi-connectors/PowerBIConnector/src/OpenSearchProject.query.pq deleted file mode 100644 index 45026f96a3..0000000000 --- a/bi-connectors/PowerBIConnector/src/OpenSearchProject.query.pq +++ /dev/null @@ -1,324 +0,0 @@ -// This file contains queries to test your data connector -section OpenSearch.UnitTests; - -shared MyExtension.UnitTest = -[ - // Common variables for all tests - Host = "localhost", - Port = 9200, - UseSSL = false, - HostnameVerification = false, - - facts = - { - Fact("Connection Test", - 7, - let - Source = OpenSearchProject.Contents(Host, Port, UseSSL, HostnameVerification), - no_of_columns = Table.ColumnCount(Source) - in - no_of_columns - ), - Fact("calcs_data:bool0", - #table(type table [bool0 = logical], - { {null}, {false}, {true} }), - let - Source = OpenSearchProject.Contents(Host, Port, UseSSL, HostnameVerification), - calcs_null_null = Source{[Item="calcs",Schema=null,Catalog=null]}[Data], - grouped = Table.Group(calcs_null_null, {"bool0"}, {}) - in - grouped - ) - }, - - report = Facts.Summarize(facts) -][report]; - -/// COMMON UNIT TESTING CODE -Fact = (_subject as text, _expected, _actual) as record => -[ expected = try _expected, - safeExpected = if expected[HasError] then "Expected : "& @ValueToText(expected[Error]) else expected[Value], - actual = try _actual, - safeActual = if actual[HasError] then "Actual : "& @ValueToText(actual[Error]) else actual[Value], - attempt = try safeExpected = safeActual, - result = if attempt[HasError] or not attempt[Value] then "Failure ⛔" else "Success ✓", - resultOp = if result = "Success ✓" then " = " else " <> ", - addendumEvalAttempt = if attempt[HasError] then @ValueToText(attempt[Error]) else "", - addendumEvalExpected = try @ValueToText(safeExpected) otherwise "...", - addendumEvalActual = try @ValueToText(safeActual) otherwise "...", - ShortenedAddendumEvalExpected = if Text.Length(addendumEvalExpected) > 20 then Text.Range(addendumEvalExpected, 0, 20) & "..." else addendumEvalExpected, - ShortenedAddendumEvalActual = if Text.Length(addendumEvalActual) > 20 then Text.Range(addendumEvalActual, 0, 20) & "..." else addendumEvalActual, - fact = - [ Result = result & " " & addendumEvalAttempt, - Notes =_subject, - Details = ShortenedAddendumEvalExpected & resultOp & ShortenedAddendumEvalActual - ] -][fact]; - -Facts = (_subject as text, _predicates as list) => List.Transform(_predicates, each Fact(_subject,_{0},_{1})); - -Facts.Summarize = (_facts as list) as table => -[ Fact.CountSuccesses = (count, i) => - [ result = try i[Result], - sum = if result[HasError] or not Text.StartsWith(result[Value], "Success") then count else count + 1 - ][sum], - passed = List.Accumulate(_facts, 0, Fact.CountSuccesses), - total = List.Count(_facts), - format = if passed = total then "All #{0} Passed !!! ✓" else "#{0} Passed ☺ #{1} Failed ☹", - result = if passed = total then "Success" else "⛔", - rate = Number.IntegerDivide(100*passed, total), - header = - [ Result = result, - Notes = Text.Format(format, {passed, total-passed}), - Details = Text.Format("#{0}% success rate", {rate}) - ], - report = Table.FromRecords(List.Combine({{header},_facts})) -][report]; - -ValueToText = (value, optional depth) => - let - _canBeIdentifier = (x) => - let - keywords = {"and", "as", "each", "else", "error", "false", "if", "in", "is", "let", "meta", "not", "otherwise", "or", "section", "shared", "then", "true", "try", "type" }, - charAlpha = (c as number) => (c>= 65 and c <= 90) or (c>= 97 and c <= 122) or c=95, - charDigit = (c as number) => c>= 48 and c <= 57 - in - try - charAlpha(Character.ToNumber(Text.At(x,0))) - and - List.MatchesAll( - Text.ToList(x), - (c)=> let num = Character.ToNumber(c) in charAlpha(num) or charDigit(num) - ) - and not - List.MatchesAny( keywords, (li)=> li=x ) - otherwise - false, - - Serialize.Binary = (x) => "#binary(" & Serialize(Binary.ToList(x)) & ") ", - - Serialize.Date = (x) => "#date(" & - Text.From(Date.Year(x)) & ", " & - Text.From(Date.Month(x)) & ", " & - Text.From(Date.Day(x)) & ") ", - - Serialize.Datetime = (x) => "#datetime(" & - Text.From(Date.Year(DateTime.Date(x))) & ", " & - Text.From(Date.Month(DateTime.Date(x))) & ", " & - Text.From(Date.Day(DateTime.Date(x))) & ", " & - Text.From(Time.Hour(DateTime.Time(x))) & ", " & - Text.From(Time.Minute(DateTime.Time(x))) & ", " & - Text.From(Time.Second(DateTime.Time(x))) & ") ", - - Serialize.Datetimezone =(x) => let - dtz = DateTimeZone.ToRecord(x) - in - "#datetimezone(" & - Text.From(dtz[Year]) & ", " & - Text.From(dtz[Month]) & ", " & - Text.From(dtz[Day]) & ", " & - Text.From(dtz[Hour]) & ", " & - Text.From(dtz[Minute]) & ", " & - Text.From(dtz[Second]) & ", " & - Text.From(dtz[ZoneHours]) & ", " & - Text.From(dtz[ZoneMinutes]) & ") ", - - Serialize.Duration = (x) => let - dur = Duration.ToRecord(x) - in - "#duration(" & - Text.From(dur[Days]) & ", " & - Text.From(dur[Hours]) & ", " & - Text.From(dur[Minutes]) & ", " & - Text.From(dur[Seconds]) & ") ", - - Serialize.Function = (x) => _serialize_function_param_type( - Type.FunctionParameters(Value.Type(x)), - Type.FunctionRequiredParameters(Value.Type(x)) ) & - " as " & - _serialize_function_return_type(Value.Type(x)) & - " => (...) ", - - Serialize.List = (x) => "{" & - List.Accumulate(x, "", (seed,item) => if seed="" then Serialize(item) else seed & ", " & Serialize(item)) & - "} ", - - Serialize.Logical = (x) => Text.From(x), - - Serialize.Null = (x) => "null", - - Serialize.Number = (x) => - let Text.From = (i as number) as text => - if Number.IsNaN(i) then "#nan" else - if i=Number.PositiveInfinity then "#infinity" else - if i=Number.NegativeInfinity then "-#infinity" else - Text.From(i) - in - Text.From(x), - - Serialize.Record = (x) => "[ " & - List.Accumulate( - Record.FieldNames(x), - "", - (seed,item) => - (if seed="" then Serialize.Identifier(item) else seed & ", " & Serialize.Identifier(item)) & " = " & Serialize(Record.Field(x, item)) - ) & - " ] ", - - Serialize.Table = (x) => "#table( type " & - _serialize_table_type(Value.Type(x)) & - ", " & - Serialize(Table.ToRows(x)) & - ") ", - - Serialize.Text = (x) => """" & - _serialize_text_content(x) & - """", - - _serialize_text_content = (x) => let - escapeText = (n as number) as text => "#(#)(" & Text.PadStart(Number.ToText(n, "X", "en-US"), 4, "0") & ")" - in - List.Accumulate( - List.Transform( - Text.ToList(x), - (c) => let n=Character.ToNumber(c) in - if n = 9 then "#(#)(tab)" else - if n = 10 then "#(#)(lf)" else - if n = 13 then "#(#)(cr)" else - if n = 34 then """""" else - if n = 35 then "#(#)(#)" else - if n < 32 then escapeText(n) else - if n < 127 then Character.FromNumber(n) else - escapeText(n) - ), - "", - (s,i)=>s&i - ), - - Serialize.Identifier = (x) => - if _canBeIdentifier(x) then - x - else - "#""" & - _serialize_text_content(x) & - """", - - Serialize.Time = (x) => "#time(" & - Text.From(Time.Hour(x)) & ", " & - Text.From(Time.Minute(x)) & ", " & - Text.From(Time.Second(x)) & ") ", - - Serialize.Type = (x) => "type " & _serialize_typename(x), - - - _serialize_typename = (x, optional funtype as logical) => /* Optional parameter: Is this being used as part of a function signature? */ - let - isFunctionType = (x as type) => try if Type.FunctionReturn(x) is type then true else false otherwise false, - isTableType = (x as type) => try if Type.TableSchema(x) is table then true else false otherwise false, - isRecordType = (x as type) => try if Type.ClosedRecord(x) is type then true else false otherwise false, - isListType = (x as type) => try if Type.ListItem(x) is type then true else false otherwise false - in - - if funtype=null and isTableType(x) then _serialize_table_type(x) else - if funtype=null and isListType(x) then "{ " & @_serialize_typename( Type.ListItem(x) ) & " }" else - if funtype=null and isFunctionType(x) then "function " & _serialize_function_type(x) else - if funtype=null and isRecordType(x) then _serialize_record_type(x) else - - if x = type any then "any" else - let base = Type.NonNullable(x) in - (if Type.IsNullable(x) then "nullable " else "") & - (if base = type anynonnull then "anynonnull" else - if base = type binary then "binary" else - if base = type date then "date" else - if base = type datetime then "datetime" else - if base = type datetimezone then "datetimezone" else - if base = type duration then "duration" else - if base = type logical then "logical" else - if base = type none then "none" else - if base = type null then "null" else - if base = type number then "number" else - if base = type text then "text" else - if base = type time then "time" else - if base = type type then "type" else - - /* Abstract types: */ - if base = type function then "function" else - if base = type table then "table" else - if base = type record then "record" else - if base = type list then "list" else - - "any /*Actually unknown type*/"), - - _serialize_table_type = (x) => - let - schema = Type.TableSchema(x) - in - "table " & - (if Table.IsEmpty(schema) then "" else - "[" & List.Accumulate( - List.Transform( - Table.ToRecords(Table.Sort(schema,"Position")), - each Serialize.Identifier(_[Name]) & " = " & _[Kind]), - "", - (seed,item) => (if seed="" then item else seed & ", " & item ) - ) & "] " ), - - _serialize_record_type = (x) => - let flds = Type.RecordFields(x) - in - if Record.FieldCount(flds)=0 then "record" else - "[" & List.Accumulate( - Record.FieldNames(flds), - "", - (seed,item) => - seed & - (if seed<>"" then ", " else "") & - (Serialize.Identifier(item) & "=" & _serialize_typename(Record.Field(flds,item)[Type]) ) - ) & - (if Type.IsOpenRecord(x) then ",..." else "") & - "]", - - _serialize_function_type = (x) => _serialize_function_param_type( - Type.FunctionParameters(x), - Type.FunctionRequiredParameters(x) ) & - " as " & - _serialize_function_return_type(x), - - _serialize_function_param_type = (t,n) => - let - funsig = Table.ToRecords( - Table.TransformColumns( - Table.AddIndexColumn( Record.ToTable( t ), "isOptional", 1 ), - { "isOptional", (x)=> x>n } ) ) - in - "(" & - List.Accumulate( - funsig, - "", - (seed,item)=> - (if seed="" then "" else seed & ", ") & - (if item[isOptional] then "optional " else "") & - Serialize.Identifier(item[Name]) & " as " & _serialize_typename(item[Value], true) ) - & ")", - - _serialize_function_return_type = (x) => _serialize_typename(Type.FunctionReturn(x), true), - - Serialize = (x) as text => - if x is binary then try Serialize.Binary(x) otherwise "null /*serialize failed*/" else - if x is date then try Serialize.Date(x) otherwise "null /*serialize failed*/" else - if x is datetime then try Serialize.Datetime(x) otherwise "null /*serialize failed*/" else - if x is datetimezone then try Serialize.Datetimezone(x) otherwise "null /*serialize failed*/" else - if x is duration then try Serialize.Duration(x) otherwise "null /*serialize failed*/" else - if x is function then try Serialize.Function(x) otherwise "null /*serialize failed*/" else - if x is list then try Serialize.List(x) otherwise "null /*serialize failed*/" else - if x is logical then try Serialize.Logical(x) otherwise "null /*serialize failed*/" else - if x is null then try Serialize.Null(x) otherwise "null /*serialize failed*/" else - if x is number then try Serialize.Number(x) otherwise "null /*serialize failed*/" else - if x is record then try Serialize.Record(x) otherwise "null /*serialize failed*/" else - if x is table then try Serialize.Table(x) otherwise "null /*serialize failed*/" else - if x is text then try Serialize.Text(x) otherwise "null /*serialize failed*/" else - if x is time then try Serialize.Time(x) otherwise "null /*serialize failed*/" else - if x is type then try Serialize.Type(x) otherwise "null /*serialize failed*/" else - "[#_unable_to_serialize_#]" - in - try Serialize(value) otherwise ""; diff --git a/bi-connectors/PowerBIConnector/src/resources.resx b/bi-connectors/PowerBIConnector/src/resources.resx deleted file mode 100644 index 3db608e458..0000000000 --- a/bi-connectors/PowerBIConnector/src/resources.resx +++ /dev/null @@ -1,129 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - text/microsoft-resx - - - 2.0 - - - System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 - - - System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 - - - Connect to OpenSearch - - - OpenSearch Project - - - OpenSearch Project - - \ No newline at end of file diff --git a/bi-connectors/TableauConnector/README.md b/bi-connectors/TableauConnector/README.md deleted file mode 100644 index 71c3d206d1..0000000000 --- a/bi-connectors/TableauConnector/README.md +++ /dev/null @@ -1,32 +0,0 @@ -## Connector Download - -The Tableau connector is available to download from the automated CI workflow: [link](https://github.com/opensearch-project/sql/actions/workflows/bi-connectors.yml). -The release snapshot is also available [here](opensearch_sql_jdbc.taco). - -## Connector Install - -1. Put connector `taco` file into - * Windows: `C:\Users\%USERNAME%\Documents\My Tableau Repository\Connectors`; - * MacOS: `~/Documents/My Tableau Repository/Connectors`. -2. Put OpenSearch `JDBC` [driver](../../sql-jdbc/README.md) (`jar` file) into - * Windows: `C:\Program Files\Tableau\Drivers`; - * MacOS: `~/Library/Tableau/Drivers`. -3. Run `Tableau Desktop` with command line flag `-DDisableVerifyConnectorPluginSignature=true`: - * Windows: `"C:\Program Files\Tableau\Tableau 2022.1\bin\tableau.exe" -DDisableVerifyConnectorPluginSignature=true`; - * MacOS: `open -n /Applications/Tableau\ Desktop\ 2022.1.app --args -DDisableVerifyConnectorPluginSignature=true`. -Adjust the command line accoring to the Tableau version you have. You can create a shortcut or a script to simplify this step. - -## TDVT report for OpenSearch JDBC Tableau connector - -Each Tableau connector has to be tested and verified using [TDVT](https://tableau.github.io/connector-plugin-sdk/docs/tdvt). - -Most recent tests of the connector were performed on OpenSearch v.1.2.0 with SQL plugin v.1.2.0. - -TDVT test results are available in [tdvt_test_results.csv](tdvt_test_results.csv). - -Test pass rate is 669/837 (80%). - -## See also - -* [Connector user manual for Tableau Desktop](tableau_support.md) -* JDBC Driver user manual [describes](../../sql-jdbc/docs/tableau.md) how to use the `JDBC` driver without the connector \ No newline at end of file diff --git a/bi-connectors/TableauConnector/img/tableau_columns_list.png b/bi-connectors/TableauConnector/img/tableau_columns_list.png deleted file mode 100644 index 967ba224ca..0000000000 Binary files a/bi-connectors/TableauConnector/img/tableau_columns_list.png and /dev/null differ diff --git a/bi-connectors/TableauConnector/img/tableau_data_preview.png b/bi-connectors/TableauConnector/img/tableau_data_preview.png deleted file mode 100644 index 6c03338f2d..0000000000 Binary files a/bi-connectors/TableauConnector/img/tableau_data_preview.png and /dev/null differ diff --git a/bi-connectors/TableauConnector/img/tableau_dialog.png b/bi-connectors/TableauConnector/img/tableau_dialog.png deleted file mode 100644 index 2a3017586a..0000000000 Binary files a/bi-connectors/TableauConnector/img/tableau_dialog.png and /dev/null differ diff --git a/bi-connectors/TableauConnector/img/tableau_download_taco.png b/bi-connectors/TableauConnector/img/tableau_download_taco.png deleted file mode 100644 index ec84d503b4..0000000000 Binary files a/bi-connectors/TableauConnector/img/tableau_download_taco.png and /dev/null differ diff --git a/bi-connectors/TableauConnector/img/tableau_graph.png b/bi-connectors/TableauConnector/img/tableau_graph.png deleted file mode 100644 index 3d0cdce994..0000000000 Binary files a/bi-connectors/TableauConnector/img/tableau_graph.png and /dev/null differ diff --git a/bi-connectors/TableauConnector/img/tableau_select_connector.png b/bi-connectors/TableauConnector/img/tableau_select_connector.png deleted file mode 100644 index d380ecc958..0000000000 Binary files a/bi-connectors/TableauConnector/img/tableau_select_connector.png and /dev/null differ diff --git a/bi-connectors/TableauConnector/img/tableau_select_table.png b/bi-connectors/TableauConnector/img/tableau_select_table.png deleted file mode 100644 index 0e79347304..0000000000 Binary files a/bi-connectors/TableauConnector/img/tableau_select_table.png and /dev/null differ diff --git a/bi-connectors/TableauConnector/img/tableau_table_list.png b/bi-connectors/TableauConnector/img/tableau_table_list.png deleted file mode 100644 index 104b4becdd..0000000000 Binary files a/bi-connectors/TableauConnector/img/tableau_table_list.png and /dev/null differ diff --git a/bi-connectors/TableauConnector/opensearch_sql_jdbc.taco b/bi-connectors/TableauConnector/opensearch_sql_jdbc.taco deleted file mode 100644 index 44dea102d6..0000000000 Binary files a/bi-connectors/TableauConnector/opensearch_sql_jdbc.taco and /dev/null differ diff --git a/bi-connectors/TableauConnector/src/META-INF/MANIFEST.MF b/bi-connectors/TableauConnector/src/META-INF/MANIFEST.MF deleted file mode 100644 index 97a662d62c..0000000000 --- a/bi-connectors/TableauConnector/src/META-INF/MANIFEST.MF +++ /dev/null @@ -1,3 +0,0 @@ -Manifest-Version: 1.0 -Created-By: 13 (Oracle Corporation) - diff --git a/bi-connectors/TableauConnector/src/connection-fields.xml b/bi-connectors/TableauConnector/src/connection-fields.xml deleted file mode 100644 index 54f5296f7e..0000000000 --- a/bi-connectors/TableauConnector/src/connection-fields.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/bi-connectors/TableauConnector/src/connection-metadata.xml b/bi-connectors/TableauConnector/src/connection-metadata.xml deleted file mode 100644 index 1b3432c317..0000000000 --- a/bi-connectors/TableauConnector/src/connection-metadata.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - diff --git a/bi-connectors/TableauConnector/src/connectionBuilder.js b/bi-connectors/TableauConnector/src/connectionBuilder.js deleted file mode 100644 index 1a16fddaec..0000000000 --- a/bi-connectors/TableauConnector/src/connectionBuilder.js +++ /dev/null @@ -1,28 +0,0 @@ -(function dsbuilder(attr){ - var connStr = "jdbc:opensearch://"; - // Set SSL value in connection string - if (attr[connectionHelper.attributeSSLMode] == "require"){ - connStr += "https://"; - } else { - connStr += "http://"; - } - - // Set host information in connection string - connStr += attr[connectionHelper.attributeServer] + ":" + attr[connectionHelper.attributePort] + "?"; - - // Set authentication values in connection string - var authAttrValue = attr[connectionHelper.attributeAuthentication]; - if (authAttrValue == "auth-none"){ - connStr += "auth=NONE&trustSelfSigned=" + attr["v-trustSelfSigned"]; - } else if (authAttrValue == "auth-integrated"){ - connStr += "auth=AWS_SIGV4"; - var region = attr["v-region"]; - if (region){ - connStr += "&Region=" + region; - } - } else { //if (authAttrValue == "auth-user-pass"){ - connStr += "auth=BASIC&user=" + attr[connectionHelper.attributeUsername] + "&password=" + attr[connectionHelper.attributePassword] + "&trustSelfSigned=" + attr["v-trustSelfSigned"]; - } - - return [connStr]; -}) diff --git a/bi-connectors/TableauConnector/src/connectionResolver.tdr b/bi-connectors/TableauConnector/src/connectionResolver.tdr deleted file mode 100644 index c51adc002e..0000000000 --- a/bi-connectors/TableauConnector/src/connectionResolver.tdr +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - diff --git a/sql-odbc/src/TestRunner/test_exclude_list.txt b/sql-odbc/src/TestRunner/test_exclude_list.txt deleted file mode 100644 index 32a0ca2634..0000000000 --- a/sql-odbc/src/TestRunner/test_exclude_list.txt +++ /dev/null @@ -1,2 +0,0 @@ -ut_aws_sdk_cpp -itodbc_aws_auth \ No newline at end of file diff --git a/sql-odbc/src/TestRunner/test_runner.py b/sql-odbc/src/TestRunner/test_runner.py deleted file mode 100644 index ad2c4d9014..0000000000 --- a/sql-odbc/src/TestRunner/test_runner.py +++ /dev/null @@ -1,304 +0,0 @@ -import os -import subprocess -import json -import re -import traceback -import sys -import getopt -import shutil -from mako.template import Template -from string import capwords - -UT_TYPE = "UT" -IT_TYPE = "IT" -PERFORMANCE_TYPE = "performance" -PERFORMANCE_INFO = "performance_info" -PERFORMANCE_RESULTS = "performance_results" -EXCLUDE_EXTENSION_LIST = ( - ".py", ".c", ".cmake", ".log", - ".pdb", ".dll", ".sln", ".vcxproj", ".user", - ".tlog", ".lastbuildstate", ".filters", - ".obj", ".exp", ".lib", ".h", ".cpp", ".ilk") -total_failures = 0 -SYNC_START = "%%__PARSE__SYNC__START__%%" -SYNC_SEP = "%%__SEP__%%" -SYNC_END = "%%__PARSE__SYNC__END__%%" -SYNC_QUERY = "%%__QUERY__%%"; -SYNC_CASE = "%%__CASE__%%"; -SYNC_MIN = "%%__MIN__%%"; -SYNC_MAX = "%%__MAX__%%"; -SYNC_MEAN = "%%__MEAN__%%"; -SYNC_MEDIAN = "%%__MEDIAN__%%"; - -def GetTestSuiteExes(test_type, test_suites, exclude_tests_list): - test_exes = [] - for root, dirs, files in os.walk(os.getcwd()): - for name in dirs: - if name.startswith("bin"): - dirs = name - for file_name in files: - if file_name.endswith(EXCLUDE_EXTENSION_LIST): - continue - if file_name.startswith(tuple(exclude_tests_list)): - continue - if test_suites is None and file_name.startswith(test_type.lower()): - print(f"Found {test_type} file: {file_name}") - test_exes.append(os.path.join(root, file_name)) - elif test_suites is not None and file_name.startswith(test_type.lower()) and (file_name in test_suites.split(sep=",")): - print(f"Found {test_type} file: {file_name}") - test_exes.append(os.path.join(root, file_name)) - return test_exes - -def RunTests(tests, test_type): - output = [] - global total_failures - for test in tests: - print("Running " + test) - output_path = test.replace(".exe", "") + ".log" - total_failures += subprocess.call([test, "-fout", output_path, "--gtest_color=no"]) - if test_type == UT_TYPE: - with open(output_path, "r+") as f: - output.append({"UnitTest" : test.split(os.path.sep)[-1].replace(".exe",""), "Log": f.read()}) - elif test_type == IT_TYPE: - with open(output_path, "r+") as f: - output.append({"IntegrationTest" : test.split(os.path.sep)[-1].replace(".exe",""), "Log": f.read()}) - print("Total Failures :", total_failures) - return output - -def FindBetween(s, f, l): - try: - start = s.index(f) + len(f) - end = s.index(l,start) - return s[start:end] - except ValueError: - return "" - -def GetAndTranslatePerformanceInfo(test): - global total_failures - output_path = test.replace(".exe", "") + ".log" - total_failures += subprocess.call([test, "-fout", output_path, "--gtest_color=no"]) - output = None - with open(output_path, "r+") as f: - log = f.readlines() - if log == None: - raise Exception("Failed to read in performance info test results") - reading = False - output = {} - for line in log: - if SYNC_START in line: - reading = True - continue - if SYNC_END in line: - reading = False - continue - if reading: - data = line.split(SYNC_SEP) - if len(data) != 2: - raise Exception(f"Unknown log line format: {line}") - if data[0].rstrip() == "number": - data[0] = "Version Number" - else: - data[0] = capwords(data[0].rstrip().replace("_", " ")) - data[0].replace("Uuid", "UUID") - output[data[0]] = data[1] - if "Not all tests passed" in line: - raise Exception("Performance info test failed") - if output == {}: - raise Exception("Failed to get any information out of performance info log") - return output - -def GetAndTranslatePerformanceResults(test): - global total_failures - output_path = test.replace(".exe", "") + ".log" - total_failures += subprocess.call([test, "-fout", output_path, "--gtest_color=no"]) - output = None - with open(output_path, "r+") as f: - log = f.readlines() - if log == None: - raise Exception("Failed to read in performance info test results") - reading = False - output = [] - single_case = {} - sync_items_line = [SYNC_QUERY, SYNC_CASE, SYNC_MIN, SYNC_MAX, SYNC_MEAN, SYNC_MEDIAN] - sync_items_readable = [item.replace("%%","").replace("__","").capitalize() for item in sync_items_line] - for line in log: - if SYNC_START in line: - single_case = {} - reading = True - continue - if SYNC_END in line: - if set(sync_items_readable) != set(single_case.keys()): - info = f'Missing data in test case: {single_case}. Items {sync_items_readable}. Keys {single_case.keys()}' - raise Exception(info) - output.append(single_case) - reading = False - continue - if reading: - for sync_item in sync_items_line: - if sync_item in line: - single_case[sync_item.replace("%%","").replace("__","").capitalize()] = line.replace(sync_item,"").rstrip() - return output - -def ParseUnitTestCase(log_lines, test_case): - start_tag = test_case + "." - test_case_info = { "TestCase" : test_case } - tests = [] - for log_line in log_lines: - if start_tag in log_line and "RUN" in log_line: - test = log_line.split(start_tag)[1] - tests.append(test) - if "[----------] " in log_line and (test_case + " ") in log_line and log_line.endswith(" ms total)"): - test_case_info["TotalTime"] = FindBetween(log_line, "(", ")").replace(" total","") - - test_infos = [] - for test in tests: - test_tag = start_tag + test - test_info = { "TestName" : test } - for log_line in log_lines: - if test_tag in log_line and log_line.endswith(")"): - test_info["TestTime"] = FindBetween(log_line, "(", ")") - test_info["TestResult"] = FindBetween(log_line, "[", "]").replace(" ", "") - - if test_info["TestResult"] != "OK": - start_error_grab = False - error_info = "" - for log_line in log_lines: - if test_tag in log_line and not log_line.endswith(")"): - start_error_grab = True - elif test_tag in log_line and log_line.endswith(")"): - break - elif start_error_grab: - if error_info != "": - error_info += os.linesep + log_line - else: - error_info += log_line - test_info["Error"] = error_info - test_infos.append(test_info) - test_case_info["TestCount"] = str(len(test_infos)) - test_case_info["TestResults"] = test_infos - pass_count = 0 - for test_info in test_infos: - if test_info["TestResult"] == "OK": - pass_count = pass_count + 1 - test_case_info["PassCount"] = str(pass_count) - return test_case_info - -def ParseUnitTestLog(unit_test, log): - log_json = { "UnitTest" : unit_test } - log_split = log.splitlines() - if len(log) < 8: - return {} - - tmp = "" - for log in log_split: - if log.startswith("[==========] Running"): - tmp = log.replace("[==========] Running ", "").replace(" test suites.", "").replace( - " test suite.", "").replace("tests from", "").replace("test from", "") - if tmp == "": - print('!!! FAILED TO FIND LOG WITH RUNNING !!!') - log_json["TotalTestCount"] = "0" - log_json["TotalTestCases"] = "0" - else: - log_json["TotalTestCount"] = tmp.split(" ")[0] - log_json["TotalTestCases"] = tmp.split(" ")[1] - log_json["TestCases"] = [] - test_cases = [] - for _line in log_split: - tag = { } - if re.match(r".*tests? from.*", _line) and "[----------]" in _line and "total" not in _line: - test_cases.append(re.split(" tests? from ", _line)[1]) - case_pass_count = 0 - test_pass_count = 0 - for test_case in test_cases: - log_json["TestCases"].append(ParseUnitTestCase(log_split, test_case)) - for test_case in log_json["TestCases"]: - if test_case["PassCount"] == test_case["TestCount"]: - case_pass_count += 1 - test_pass_count += int(test_case["PassCount"]) - log_json["CasePassCount"] = str(case_pass_count) - log_json["TestPassCount"] = str(test_pass_count) - return log_json - -def TranslateTestOutput(test_type, outputs): - log_jsons = [] - if test_type == UT_TYPE: - for output in outputs: - log_jsons.append(ParseUnitTestLog(output["UnitTest"], output["Log"])) - elif test_type == IT_TYPE: - for output in outputs: - log_jsons.append(ParseUnitTestLog(output["IntegrationTest"], output["Log"])) - return log_jsons - -def RunAllTests(test_types, test_suites, exclude_test_list): - final_output = {} - - for _type in test_types: - tests = GetTestSuiteExes(_type, test_suites, exclude_test_list) - print("!! Found tests:", *tests, sep="\n") - if PERFORMANCE_TYPE == _type: - final_output[PERFORMANCE_TYPE] = {} - for test in tests: - if test.replace(".exe", "").endswith(PERFORMANCE_INFO): - final_output[PERFORMANCE_TYPE]["Info"] = GetAndTranslatePerformanceInfo(test) - elif test.replace(".exe", "").endswith(PERFORMANCE_RESULTS): - final_output[PERFORMANCE_TYPE]["Results"] = GetAndTranslatePerformanceResults(test) - else: - test_outputs = RunTests(tests, _type) - final_output[_type] = TranslateTestOutput(_type, test_outputs) - return final_output - -def ParseCommandLineArguments(): - infile = None - outfile = None - suites = None - efile = None - opts, args = getopt.getopt(sys.argv[1:],"i:o:s:e:",["ifile=","ofile=","suites=","efile="]) - for opt,arg in opts: - if opt in ('-i', '--ifile'): - infile = arg - elif opt in ('-s', '--suites'): - suites = arg - elif opt in ('-o', '--ofile'): - outfile = arg - elif opt in ('-e', '--efile'): - efile = arg - return (infile, outfile, suites, efile) - -def main(): - try: - (infile, outfile, suites, efile) = ParseCommandLineArguments() - if infile is None or outfile is None: - print("Usage: -i -o [-s -e ]") - sys.exit(1) - exclude_test_list = [] - global total_failures - total_failures = 0 - if efile is not None: - with open(efile) as ef: - exclude_test_list = ef.readlines() - exclude_test_list = [l.strip() for l in exclude_test_list if l.strip() != ""] - if len(exclude_test_list) == 0: - print('== Exclude list empty. Running all available tests ==') - else: - print(f'== Excluding tests {exclude_test_list} ==') - else: - print('== No exclude list. Running all available tests ==') - print(f'== Using template file {infile} ==') - template = Template(filename=infile) - - if suites is not None: - print(f'== Using suites {suites} ==') - with open(os.path.join(os.getcwd(), outfile), 'w+') as results_file: - data = RunAllTests([UT_TYPE, IT_TYPE, PERFORMANCE_TYPE], suites, exclude_test_list) - os.chmod(outfile, 0o744) - results_file.write(template.render(data = data)) - - print(f"== Finished generating results file {outfile} ==") - os._exit(total_failures) - - except: - print(traceback.format_exc()) - os._exit(255) - -if __name__ == "__main__": - main() diff --git a/sql-odbc/src/UnitTests/CMakeLists.txt b/sql-odbc/src/UnitTests/CMakeLists.txt deleted file mode 100644 index 825c0c1770..0000000000 --- a/sql-odbc/src/UnitTests/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ -project(unit_tests) - -set(HELPER_UTEST "${CMAKE_CURRENT_SOURCE_DIR}/UTHelper") -set(CONN_UTEST "${CMAKE_CURRENT_SOURCE_DIR}/UTConn") -set(RABBIT_UTEST "${CMAKE_CURRENT_SOURCE_DIR}/UTRabbit") -set(CRITICALSECTION_UTEST "${CMAKE_CURRENT_SOURCE_DIR}/UTCriticalSection") -set(AWSSDKCPP_UTEST "${CMAKE_CURRENT_SOURCE_DIR}/UTAwsSdkCpp") - -# Projects to build -add_subdirectory(${HELPER_UTEST}) -add_subdirectory(${CONN_UTEST}) -add_subdirectory(${RABBIT_UTEST}) -add_subdirectory(${CRITICALSECTION_UTEST}) -add_subdirectory(${AWSSDKCPP_UTEST}) diff --git a/sql-odbc/src/UnitTests/GoogleTest.LICENSE b/sql-odbc/src/UnitTests/GoogleTest.LICENSE deleted file mode 100644 index 65c76c50ce..0000000000 --- a/sql-odbc/src/UnitTests/GoogleTest.LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2008, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/sql-odbc/src/UnitTests/UTAwsSdkCpp/CMakeLists.txt b/sql-odbc/src/UnitTests/UTAwsSdkCpp/CMakeLists.txt deleted file mode 100644 index 05f665aabf..0000000000 --- a/sql-odbc/src/UnitTests/UTAwsSdkCpp/CMakeLists.txt +++ /dev/null @@ -1,17 +0,0 @@ -project(ut_aws_sdk_cpp) - -# Source, headers, and include dirs -set(SOURCE_FILES test_aws_sdk_cpp.cpp) -include_directories( ${UT_HELPER} - ${AWSSDK_INCLUDE_DIR} - ${VLD_SRC}) - -# Generate executable -add_executable(ut_aws_sdk_cpp ${SOURCE_FILES}) - -# Find packages from vcpkg -find_package(GTest CONFIG REQUIRED) - -# Library dependencies -target_link_libraries(ut_aws_sdk_cpp ut_helper GTest::gtest_main aws-cpp-sdk-core ${VLD}) -target_compile_definitions(ut_aws_sdk_cpp PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/UnitTests/UTAwsSdkCpp/packages.config b/sql-odbc/src/UnitTests/UTAwsSdkCpp/packages.config deleted file mode 100644 index 3c6fe17f54..0000000000 --- a/sql-odbc/src/UnitTests/UTAwsSdkCpp/packages.config +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/sql-odbc/src/UnitTests/UTAwsSdkCpp/pch.cpp b/sql-odbc/src/UnitTests/UTAwsSdkCpp/pch.cpp deleted file mode 100644 index 97b544ec11..0000000000 --- a/sql-odbc/src/UnitTests/UTAwsSdkCpp/pch.cpp +++ /dev/null @@ -1,6 +0,0 @@ -// -// pch.cpp -// Include the standard header and generate the precompiled header. -// - -#include "pch.h" diff --git a/sql-odbc/src/UnitTests/UTAwsSdkCpp/pch.h b/sql-odbc/src/UnitTests/UTAwsSdkCpp/pch.h deleted file mode 100644 index 29c81fffa1..0000000000 --- a/sql-odbc/src/UnitTests/UTAwsSdkCpp/pch.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// pch.h -// Header for standard system include files. -// - -#pragma once - -#include "gtest/gtest.h" diff --git a/sql-odbc/src/UnitTests/UTAwsSdkCpp/test_aws_sdk_cpp.cpp b/sql-odbc/src/UnitTests/UTAwsSdkCpp/test_aws_sdk_cpp.cpp deleted file mode 100644 index 4952c70fc2..0000000000 --- a/sql-odbc/src/UnitTests/UTAwsSdkCpp/test_aws_sdk_cpp.cpp +++ /dev/null @@ -1,61 +0,0 @@ -// clang-format off -#include "pch.h" -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunused-parameter" -#endif // __APPLE__ -#include -#include -#include -#include -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ -#include "unit_test_helper.h" - -using namespace Aws::Auth; -using namespace Aws::Client; -using namespace Aws::Http; - -static const char service_name[] = "es"; -static const char allocation_tag[] = "AWS_SIGV4_Test"; -static const char host[] = "https://search-bit-quill-cx3hpfoxvasohujxkllmgjwqde.us-west-2.es.amazonaws.com"; -static const char region[] = "us-west-2"; - -TEST(AWS_SIGV4, EnvironmentAWSCredentials) { - Aws::SDKOptions options; - EXPECT_NO_THROW(Aws::InitAPI(options)); - - auto request = CreateHttpRequest(Aws::String(host), HttpMethod::HTTP_GET, Aws::Utils::Stream::DefaultResponseStreamFactoryMethod); - - std::shared_ptr credential_provider = Aws::MakeShared(allocation_tag); - - AWSAuthV4Signer signer(credential_provider, service_name, region); - ASSERT_TRUE(signer.SignRequest(*request)); - - auto http_client = CreateHttpClient(Aws::Client::ClientConfiguration()); - - auto response = http_client->MakeRequest(request); - ASSERT_NE(response, nullptr); - EXPECT_EQ(Aws::Http::HttpResponseCode::OK, response->GetResponseCode()); - - EXPECT_NO_THROW(Aws::ShutdownAPI(options)); -} - -TEST(SettingSDKOptions, TurnLoggingOn) { - Aws::SDKOptions options; - options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Info; - EXPECT_NO_THROW(Aws::InitAPI(options)); - EXPECT_NO_THROW(Aws::ShutdownAPI(options)); -} - -int main(int argc, char** argv) { - testing::internal::CaptureStdout(); - ::testing::InitGoogleTest(&argc, argv); - int failures = RUN_ALL_TESTS(); - std::string output = testing::internal::GetCapturedStdout(); - std::cout << output << std::endl; - std::cout << (failures ? "Not all tests passed." : "All tests passed") << std::endl; - WriteFileIfSpecified(argv, argv + argc, "-fout", output); - return failures; -} diff --git a/sql-odbc/src/UnitTests/UTConn/CMakeLists.txt b/sql-odbc/src/UnitTests/UTConn/CMakeLists.txt deleted file mode 100644 index 0e90d8f4d6..0000000000 --- a/sql-odbc/src/UnitTests/UTConn/CMakeLists.txt +++ /dev/null @@ -1,20 +0,0 @@ -project(ut_conn) - -# Source, headers, and include dirs -set(SOURCE_FILES test_conn.cpp test_query_execution.cpp) -include_directories( ${UT_HELPER} - ${OPENSEARCHODBC_SRC} - ${RAPIDJSON_SRC} - ${RABBIT_SRC} - ${LIBCURL_SRC} - ${VLD_SRC} ) - -# Generate executable -add_executable(ut_conn ${SOURCE_FILES}) - -# Find packages from vcpkg -find_package(GTest CONFIG REQUIRED) - -# Library dependencies -target_link_libraries(ut_conn sqlodbc ut_helper GTest::gtest_main) -target_compile_definitions(ut_conn PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/UnitTests/UTConn/packages.config b/sql-odbc/src/UnitTests/UTConn/packages.config deleted file mode 100644 index 3c6fe17f54..0000000000 --- a/sql-odbc/src/UnitTests/UTConn/packages.config +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/sql-odbc/src/UnitTests/UTConn/pch.cpp b/sql-odbc/src/UnitTests/UTConn/pch.cpp deleted file mode 100644 index 97b544ec11..0000000000 --- a/sql-odbc/src/UnitTests/UTConn/pch.cpp +++ /dev/null @@ -1,6 +0,0 @@ -// -// pch.cpp -// Include the standard header and generate the precompiled header. -// - -#include "pch.h" diff --git a/sql-odbc/src/UnitTests/UTConn/pch.h b/sql-odbc/src/UnitTests/UTConn/pch.h deleted file mode 100644 index 29c81fffa1..0000000000 --- a/sql-odbc/src/UnitTests/UTConn/pch.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// pch.h -// Header for standard system include files. -// - -#pragma once - -#include "gtest/gtest.h" diff --git a/sql-odbc/src/UnitTests/UTConn/test_conn.cpp b/sql-odbc/src/UnitTests/UTConn/test_conn.cpp deleted file mode 100644 index a93962a0f7..0000000000 --- a/sql-odbc/src/UnitTests/UTConn/test_conn.cpp +++ /dev/null @@ -1,128 +0,0 @@ -// clang-format off -#include "pch.h" -#include "unit_test_helper.h" -#include "opensearch_communication.h" -// clang-format on - -const size_t valid_option_count = 4; -const size_t invalid_option_count = 4; -const size_t missing_option_count = 3; -const std::string valid_host = (use_ssl ? "https://localhost" : "localhost"); -const std::string valid_port = "9200"; -const std::string valid_user = "admin"; -const std::string valid_pw = "admin"; -const std::string valid_region = "us-west-3"; -const std::string invalid_host = "10.1.1.189"; -const std::string invalid_port = "920"; -const std::string invalid_user = "amin"; -const std::string invalid_pw = "amin"; -const std::string invalid_region = "bad-region"; -runtime_options valid_opt_val = {{valid_host, valid_port, "1", "0"}, - {"BASIC", valid_user, valid_pw, valid_region}, - {use_ssl, false, "", "", "", ""}}; -runtime_options invalid_opt_val = { - {invalid_host, invalid_port, "1", "0"}, - {"BASIC", invalid_user, invalid_pw, valid_region}, - {use_ssl, false, "", "", "", ""}}; -runtime_options missing_opt_val = {{"", "", "1", "0"}, - {"BASIC", "", invalid_pw, valid_region}, - {use_ssl, false, "", "", "", ""}}; - -TEST(TestOpenSearchConnConnectionOptions, ValidParameters) { - OpenSearchCommunication conn; - EXPECT_EQ(true, - conn.ConnectionOptions(valid_opt_val, 1, 1, valid_option_count)); -} - -TEST(TestOpenSearchConnConnectionOptions, MissingParameters) { - OpenSearchCommunication conn; - EXPECT_EQ(false, conn.ConnectionOptions(missing_opt_val, 1, 1, - missing_option_count)); -} - -class TestOpenSearchConnConnectDBStart : public testing::Test { - public: - TestOpenSearchConnConnectDBStart() { - } - - void SetUp() { - } - - void TearDown() { - m_conn.DropDBConnection(); - } - - ~TestOpenSearchConnConnectDBStart() { - // cleanup any pending stuff, but no exceptions allowed - } - - OpenSearchCommunication m_conn; -}; - -TEST_F(TestOpenSearchConnConnectDBStart, ValidParameters) { - ASSERT_NE(false, m_conn.ConnectionOptions(valid_opt_val, 1, 1, - valid_option_count)); - EXPECT_EQ(true, m_conn.ConnectDBStart()); - EXPECT_EQ(CONNECTION_OK, m_conn.GetConnectionStatus()); -} - -TEST_F(TestOpenSearchConnConnectDBStart, InvalidParameters) { - ASSERT_TRUE( - m_conn.ConnectionOptions(invalid_opt_val, 1, 1, invalid_option_count)); - EXPECT_EQ(false, m_conn.ConnectDBStart()); - EXPECT_EQ(CONNECTION_BAD, m_conn.GetConnectionStatus()); -} - -TEST_F(TestOpenSearchConnConnectDBStart, MissingParameters) { - ASSERT_NE(true, m_conn.ConnectionOptions(missing_opt_val, 1, 1, - missing_option_count)); - EXPECT_EQ(false, m_conn.ConnectDBStart()); - EXPECT_EQ(CONNECTION_BAD, m_conn.GetConnectionStatus()); -} - -TEST(TestOpenSearchConnDropDBConnection, InvalidParameters) { - OpenSearchCommunication conn; - ASSERT_EQ(CONNECTION_BAD, conn.GetConnectionStatus()); - ASSERT_TRUE( - conn.ConnectionOptions(invalid_opt_val, 1, 1, invalid_option_count)); - ASSERT_NE(true, conn.ConnectDBStart()); - ASSERT_EQ(CONNECTION_BAD, conn.GetConnectionStatus()); - conn.DropDBConnection(); - EXPECT_EQ(CONNECTION_BAD, conn.GetConnectionStatus()); -} - -TEST(TestOpenSearchConnDropDBConnection, MissingParameters) { - OpenSearchCommunication conn; - ASSERT_EQ(CONNECTION_BAD, conn.GetConnectionStatus()); - ASSERT_NE(true, conn.ConnectionOptions(missing_opt_val, 1, 1, - missing_option_count)); - ASSERT_NE(true, conn.ConnectDBStart()); - ASSERT_EQ(CONNECTION_BAD, conn.GetConnectionStatus()); - conn.DropDBConnection(); - EXPECT_EQ(CONNECTION_BAD, conn.GetConnectionStatus()); -} - -TEST(TestOpenSearchConnDropDBConnection, ValidParameters) { - OpenSearchCommunication conn; - ASSERT_NE(false, - conn.ConnectionOptions(valid_opt_val, 1, 1, valid_option_count)); - ASSERT_NE(false, conn.ConnectDBStart()); - ASSERT_EQ(CONNECTION_OK, conn.GetConnectionStatus()); - conn.DropDBConnection(); - EXPECT_EQ(CONNECTION_BAD, conn.GetConnectionStatus()); -} - -int main(int argc, char** argv) { - testing::internal::CaptureStdout(); - ::testing::InitGoogleTest(&argc, argv); - - int failures = RUN_ALL_TESTS(); - - std::string output = testing::internal::GetCapturedStdout(); - std::cout << output << std::endl; - std::cout << (failures ? "Not all tests passed." : "All tests passed") - << std::endl; - WriteFileIfSpecified(argv, argv + argc, "-fout", output); - - return failures; -} diff --git a/sql-odbc/src/UnitTests/UTConn/test_query_execution.cpp b/sql-odbc/src/UnitTests/UTConn/test_query_execution.cpp deleted file mode 100644 index f3899123dd..0000000000 --- a/sql-odbc/src/UnitTests/UTConn/test_query_execution.cpp +++ /dev/null @@ -1,115 +0,0 @@ -// clang-format off -#include "pch.h" -#include "unit_test_helper.h" -#include "opensearch_communication.h" -#include "opensearch_helper.h" -// clang-format on - -const std::string valid_host = (use_ssl ? "https://localhost" : "localhost"); -const std::string valid_port = "9200"; -const std::string valid_user = "admin"; -const std::string valid_pw = "admin"; -const std::string valid_region = "us-west-3"; -const std::string query = - "SELECT Origin FROM opensearch_dashboards_sample_data_flights LIMIT 5"; -const std::string all_columns_flights_query = - "SELECT * FROM opensearch_dashboards_sample_data_flights LIMIT 5"; -const std::string some_columns_flights_query = - "SELECT Origin, OriginWeather FROM opensearch_dashboards_sample_data_flights LIMIT 5"; -const std::string invalid_query = "SELECT"; -const int EXECUTION_SUCCESS = 0; -const int EXECUTION_ERROR = -1; -const std::string fetch_size = "0"; -const int all_columns_flights_count = 25; -const int some_columns_flights_count = 2; -runtime_options valid_conn_opt_val = { - {valid_host, valid_port, "1", "0"}, - {"BASIC", valid_user, valid_pw, valid_region}, - {use_ssl, false, "", "", "", ""}}; - -TEST(TestOpenSearchExecDirect, ValidQuery) { - OpenSearchCommunication conn; - ASSERT_TRUE(conn.ConnectionOptions(valid_conn_opt_val, false, 0, 0)); - ASSERT_TRUE(conn.ConnectDBStart()); - EXPECT_EQ(EXECUTION_SUCCESS, - OpenSearchExecDirect(&conn, some_columns_flights_query.c_str(), fetch_size.c_str())); -} - -TEST(TestOpenSearchExecDirect, MissingQuery) { - OpenSearchCommunication conn; - ASSERT_TRUE(conn.ConnectionOptions(valid_conn_opt_val, false, 0, 0)); - ASSERT_TRUE(conn.ConnectDBStart()); - EXPECT_EQ(EXECUTION_ERROR, - OpenSearchExecDirect(&conn, NULL, fetch_size.c_str())); -} - -TEST(TestOpenSearchExecDirect, MissingConnection) { - EXPECT_EQ(EXECUTION_ERROR, - OpenSearchExecDirect(NULL, query.c_str(), fetch_size.c_str())); -} - -// Conn::ExecDirect - -TEST(TestConnExecDirect, ValidQueryAllColumns) { - OpenSearchCommunication conn; - ASSERT_TRUE(conn.ConnectionOptions(valid_conn_opt_val, false, 0, 0)); - ASSERT_TRUE(conn.ConnectDBStart()); - - conn.ExecDirect(all_columns_flights_query.c_str(), fetch_size.c_str()); - OpenSearchResult* result = conn.PopResult(); - EXPECT_EQ("SELECT", result->command_type); - EXPECT_FALSE(result->result_json.empty()); - EXPECT_EQ(all_columns_flights_count, result->num_fields); - EXPECT_EQ((size_t)all_columns_flights_count, result->column_info.size()); -} - -TEST(TestConnExecDirect, ValidQuerySomeColumns) { - OpenSearchCommunication conn; - ASSERT_TRUE(conn.ConnectionOptions(valid_conn_opt_val, false, 0, 0)); - ASSERT_TRUE(conn.ConnectDBStart()); - - conn.ExecDirect(some_columns_flights_query.c_str(), fetch_size.c_str()); - OpenSearchResult* result = conn.PopResult(); - EXPECT_EQ("SELECT", result->command_type); - EXPECT_FALSE(result->result_json.empty()); - EXPECT_EQ(some_columns_flights_count, result->num_fields); - EXPECT_EQ((size_t)some_columns_flights_count, result->column_info.size()); -} - -TEST(TestConnExecDirect, InvalidQuery) { - OpenSearchCommunication conn; - ASSERT_TRUE(conn.ConnectionOptions(valid_conn_opt_val, false, 0, 0)); - ASSERT_TRUE(conn.ConnectDBStart()); - - conn.ExecDirect(invalid_query.c_str(), fetch_size.c_str()); - OpenSearchResult* result = conn.PopResult(); - EXPECT_EQ(NULL, (void*)result); -} - -// Conn::PopResult - -TEST(TestConnPopResult, PopEmptyQueue) { - OpenSearchCommunication conn; - ASSERT_TRUE(conn.ConnectionOptions(valid_conn_opt_val, false, 0, 0)); - ASSERT_TRUE(conn.ConnectDBStart()); - - OpenSearchResult* result = conn.PopResult(); - EXPECT_EQ(NULL, (void*)result); -} - -TEST(TestConnPopResult, PopTwoQueryResults) { - OpenSearchCommunication conn; - ASSERT_TRUE(conn.ConnectionOptions(valid_conn_opt_val, false, 0, 0)); - ASSERT_TRUE(conn.ConnectDBStart()); - - conn.ExecDirect(some_columns_flights_query.c_str(), fetch_size.c_str()); - conn.ExecDirect(all_columns_flights_query.c_str(), fetch_size.c_str()); - - // Pop some_columns - OpenSearchResult* result = conn.PopResult(); - EXPECT_EQ(some_columns_flights_count, result->num_fields); - - // Pop all_columns - result = conn.PopResult(); - EXPECT_EQ(all_columns_flights_count, result->num_fields); -} diff --git a/sql-odbc/src/UnitTests/UTCriticalSection/CMakeLists.txt b/sql-odbc/src/UnitTests/UTCriticalSection/CMakeLists.txt deleted file mode 100644 index e806ef9f4b..0000000000 --- a/sql-odbc/src/UnitTests/UTCriticalSection/CMakeLists.txt +++ /dev/null @@ -1,19 +0,0 @@ -project(ut_critical_section) - -# Source, headers, and include dirs -set(SOURCE_FILES test_critical_section.cpp) -include_directories( ${UT_HELPER} - ${OPENSEARCHODBC_SRC} - ${RAPIDJSON_SRC} - ${VLD_SRC} - ${RABBIT_SRC} ) - -# Generate executable -add_executable(ut_critical_section ${SOURCE_FILES}) - -# Find packages from vcpkg -find_package(GTest CONFIG REQUIRED) - -# Library dependencies -target_link_libraries(ut_critical_section sqlodbc ut_helper GTest::gtest_main) -target_compile_definitions(ut_critical_section PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/UnitTests/UTCriticalSection/packages.config b/sql-odbc/src/UnitTests/UTCriticalSection/packages.config deleted file mode 100644 index 3c6fe17f54..0000000000 --- a/sql-odbc/src/UnitTests/UTCriticalSection/packages.config +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/sql-odbc/src/UnitTests/UTCriticalSection/pch.cpp b/sql-odbc/src/UnitTests/UTCriticalSection/pch.cpp deleted file mode 100644 index 97b544ec11..0000000000 --- a/sql-odbc/src/UnitTests/UTCriticalSection/pch.cpp +++ /dev/null @@ -1,6 +0,0 @@ -// -// pch.cpp -// Include the standard header and generate the precompiled header. -// - -#include "pch.h" diff --git a/sql-odbc/src/UnitTests/UTCriticalSection/pch.h b/sql-odbc/src/UnitTests/UTCriticalSection/pch.h deleted file mode 100644 index 29c81fffa1..0000000000 --- a/sql-odbc/src/UnitTests/UTCriticalSection/pch.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// pch.h -// Header for standard system include files. -// - -#pragma once - -#include "gtest/gtest.h" diff --git a/sql-odbc/src/UnitTests/UTCriticalSection/test_critical_section.cpp b/sql-odbc/src/UnitTests/UTCriticalSection/test_critical_section.cpp deleted file mode 100644 index 31a3f7ea2b..0000000000 --- a/sql-odbc/src/UnitTests/UTCriticalSection/test_critical_section.cpp +++ /dev/null @@ -1,136 +0,0 @@ -#include - -#include -#include -#include - -#include "opensearch_helper.h" -#include "pch.h" -#include "unit_test_helper.h" - -const size_t loop_count = 10; -const size_t thread_count = 1000; - -#define INIT_CS(x) XPlatformInitializeCriticalSection(&(x)) -#define ENTER_CS(x) XPlatformEnterCriticalSection((x)) -#define LEAVE_CS(x) XPlatformLeaveCriticalSection((x)) -#define DELETE_CS(x) XPlatformDeleteCriticalSection(&(x)) - -class TestCriticalSection : public testing::Test { - public: - TestCriticalSection() : m_lock(NULL) { - } - - void SetUp() { - INIT_CS(m_lock); - } - - void TearDown() { - DELETE_CS(m_lock); - } - - ~TestCriticalSection() { - } - void* m_lock; - - typedef struct CriticalInfo { - volatile size_t* shared_mem; - void* lock; - } CriticalInfo; -}; - -TEST_F(TestCriticalSection, SingleEnterExit) { - ENTER_CS(m_lock); - LEAVE_CS(m_lock); -} - -TEST_F(TestCriticalSection, MultipleEntersMultipleExits) { - for (size_t i = 0; i < loop_count; i++) - ENTER_CS(m_lock); - for (size_t i = 0; i < loop_count; i++) - LEAVE_CS(m_lock); -} - -TEST_F(TestCriticalSection, MultipleEnterExit) { - for (size_t i = 0; i < loop_count; i++) { - ENTER_CS(m_lock); - LEAVE_CS(m_lock); - } -} - -TEST_F(TestCriticalSection, MultiThreadSingleLock) { - auto f = [](CriticalInfo* info) { - *info->shared_mem = static_cast< size_t >(1); - ENTER_CS(info->lock); - *info->shared_mem = static_cast< size_t >(2); - LEAVE_CS(info->lock); - }; - - volatile size_t shared_mem = 0; - CriticalInfo crit_info; - crit_info.shared_mem = &shared_mem; - crit_info.lock = m_lock; - - ENTER_CS(m_lock); - std::thread thread_object(f, &crit_info); -#ifdef WIN32 - Sleep(1000); -#else - usleep(1000 * 1000); -#endif - EXPECT_EQ(shared_mem, static_cast< size_t >(1)); - LEAVE_CS(m_lock); -#ifdef WIN32 - Sleep(1000); -#else - usleep(1000 * 1000); -#endif - EXPECT_EQ(shared_mem, static_cast< size_t >(2)); - thread_object.join(); -} - -// Make many threads to see if multiple simultaneous attempts at locking cause -// any issues -TEST_F(TestCriticalSection, RaceConditions) { - auto f = [](CriticalInfo* info) { - std::stringstream ss_thread_id; - ss_thread_id << std::this_thread::get_id(); - size_t thread_id = static_cast< size_t >( - strtoull(ss_thread_id.str().c_str(), NULL, 10)); - ENTER_CS(info->lock); - // Update shared memory, release thread priority, then check if memory - // is still the same - *info->shared_mem = static_cast< size_t >(thread_id); -#ifdef WIN32 - Sleep(0); -#else - usleep(0); -#endif - EXPECT_EQ(thread_id, *info->shared_mem); - LEAVE_CS(info->lock); - }; - - volatile size_t shared_mem = 0; - CriticalInfo crit_info; - crit_info.shared_mem = &shared_mem; - crit_info.lock = m_lock; - std::vector< std::thread > threads; - threads.reserve(thread_count); - - for (size_t i = 0; i < thread_count; i++) - threads.emplace_back(std::thread(f, &crit_info)); - - for (auto& it : threads) - it.join(); -} - -int main(int argc, char** argv) { - testing::internal::CaptureStdout(); - ::testing::InitGoogleTest(&argc, argv); - int failures = RUN_ALL_TESTS(); - std::string output = testing::internal::GetCapturedStdout(); - std::cout << output << std::endl; - std::cout << (failures ? "Not all tests passed." : "All tests passed") - << std::endl; - WriteFileIfSpecified(argv, argv + argc, "-fout", output); -} diff --git a/sql-odbc/src/UnitTests/UTHelper/CMakeLists.txt b/sql-odbc/src/UnitTests/UTHelper/CMakeLists.txt deleted file mode 100644 index 1bfad42485..0000000000 --- a/sql-odbc/src/UnitTests/UTHelper/CMakeLists.txt +++ /dev/null @@ -1,30 +0,0 @@ -project(ut_helper) - -# Source, headers, and include dirs -set(SOURCE_FILES unit_test_helper.cpp) -set(HEADER_FILES unit_test_helper.h) -include_directories(${OPENSEARCHODBC_SRC} ${VLD_SRC}) - -# Generate dll (SHARED) -add_library(ut_helper SHARED ${SOURCE_FILES} ${HEADER_FILES}) - -if (WIN32 AND BITNESS EQUAL 64) -find_library( VLD - vld - HINTS "${LIBRARY_DIRECTORY}/VisualLeakDetector/lib64" - ) -target_link_libraries(ut_helper ${VLD}) -elseif (WIN32 AND BITNESS EQUAL 32) -find_library( VLD - vld - HINTS "${LIBRARY_DIRECTORY}/VisualLeakDetector/lib" - ) -target_link_libraries(ut_helper ${VLD}) -endif() - -# Find packages from vcpkg -find_package(GTest CONFIG REQUIRED) - -# Library dependencies -target_link_libraries(ut_helper sqlodbc GTest::gtest_main) -target_compile_definitions(ut_helper PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/UnitTests/UTHelper/unit_test_helper.cpp b/sql-odbc/src/UnitTests/UTHelper/unit_test_helper.cpp deleted file mode 100644 index 9f8fbd7508..0000000000 --- a/sql-odbc/src/UnitTests/UTHelper/unit_test_helper.cpp +++ /dev/null @@ -1,15 +0,0 @@ -#include "unit_test_helper.h" - -#include -#include - -void WriteFileIfSpecified(char** begin, char** end, const std::string& option, - std::string& output) { - char** itr = std::find(begin, end, option); - if (itr != end && ++itr != end) { - std::ofstream out_file(*itr); - if (out_file.good()) - out_file << output; - } - return; -} diff --git a/sql-odbc/src/UnitTests/UTHelper/unit_test_helper.h b/sql-odbc/src/UnitTests/UTHelper/unit_test_helper.h deleted file mode 100644 index fcdbfa95b2..0000000000 --- a/sql-odbc/src/UnitTests/UTHelper/unit_test_helper.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef UNIT_TEST_HELPER -#define UNIT_TEST_HELPER - -#if defined(WIN32) || defined (WIN64) -#ifdef _DEBUG -#define VLD_FORCE_ENABLE 1 -#include -#endif -#endif - -#include -#ifdef USE_SSL -const bool use_ssl = true; -#else -const bool use_ssl = false; -#endif - -void WriteFileIfSpecified(char** begin, char** end, const std::string& option, - std::string& output); - -#endif diff --git a/sql-odbc/src/UnitTests/UTRabbit/CMakeLists.txt b/sql-odbc/src/UnitTests/UTRabbit/CMakeLists.txt deleted file mode 100644 index b5f638fe01..0000000000 --- a/sql-odbc/src/UnitTests/UTRabbit/CMakeLists.txt +++ /dev/null @@ -1,17 +0,0 @@ -project(ut_rabbit) - -# Source, headers, and include dirs -set(SOURCE_FILES test_rabbit.cpp) -include_directories( ${UT_HELPER} - ${RAPIDJSON_SRC} - ${RABBIT_SRC} - ${VLD_SRC} ) - -# Generate executable -add_executable(ut_rabbit ${SOURCE_FILES}) - -# Find packages from vcpkg -find_package(GTest CONFIG REQUIRED) - -target_link_libraries(ut_rabbit ut_helper GTest::gtest_main ${VLD}) -target_compile_definitions(ut_rabbit PUBLIC _UNICODE UNICODE) diff --git a/sql-odbc/src/UnitTests/UTRabbit/packages.config b/sql-odbc/src/UnitTests/UTRabbit/packages.config deleted file mode 100644 index 3c6fe17f54..0000000000 --- a/sql-odbc/src/UnitTests/UTRabbit/packages.config +++ /dev/null @@ -1,4 +0,0 @@ - - - - \ No newline at end of file diff --git a/sql-odbc/src/UnitTests/UTRabbit/pch.cpp b/sql-odbc/src/UnitTests/UTRabbit/pch.cpp deleted file mode 100644 index 97b544ec11..0000000000 --- a/sql-odbc/src/UnitTests/UTRabbit/pch.cpp +++ /dev/null @@ -1,6 +0,0 @@ -// -// pch.cpp -// Include the standard header and generate the precompiled header. -// - -#include "pch.h" diff --git a/sql-odbc/src/UnitTests/UTRabbit/pch.h b/sql-odbc/src/UnitTests/UTRabbit/pch.h deleted file mode 100644 index 29c81fffa1..0000000000 --- a/sql-odbc/src/UnitTests/UTRabbit/pch.h +++ /dev/null @@ -1,8 +0,0 @@ -// -// pch.h -// Header for standard system include files. -// - -#pragma once - -#include "gtest/gtest.h" diff --git a/sql-odbc/src/UnitTests/UTRabbit/test_rabbit.cpp b/sql-odbc/src/UnitTests/UTRabbit/test_rabbit.cpp deleted file mode 100644 index 84f53605a7..0000000000 --- a/sql-odbc/src/UnitTests/UTRabbit/test_rabbit.cpp +++ /dev/null @@ -1,268 +0,0 @@ -// clang-format off -#include "pch.h" -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunused-parameter" -#endif // __APPLE__ -#include "rabbit.hpp" -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ -#include "unit_test_helper.h" - -const std::string invalid_json_schema = "{ invalid schema }"; -const std::string valid_json_schema = "{" // This was generated from the example OpenSearch data - "\"type\": \"object\"," - "\"properties\": {" - "\"schema\": {" - "\"type\": \"array\"," - "\"items\": [{" - "\"type\": \"object\"," - "\"properties\": {" - "\"name\": { \"type\": \"string\" }," - "\"type\": { \"type\": \"string\" }" - "}," - "\"required\": [ \"name\", \"type\" ]" - "}]" - "}," - "\"total\": { \"type\": \"integer\" }," - "\"datarows\": {" - "\"type\": \"array\"," - "\"items\": {}" - "}," - "\"size\": { \"type\": \"integer\" }," - "\"status\": { \"type\": \"integer\" }" - "}," - "\"required\": [\"schema\", \"total\", \"datarows\", \"size\", \"status\"]" -"}"; -const std::string valid_json_for_schema = "{" // This was taken from the example OpenSearch data - "\"schema\": [{" - "\"name\": \"valid_name1\"," - "\"type\": \"valid_type1\"},{" - "\"name\": \"valid_name2\"," - "\"type\": \"valid_type2\"},{" - "\"name\": \"valid_name3\"," - "\"type\": \"valid_type3\"}]," - "\"total\": 10," - "\"datarows\": []," - "\"size\": 3," - "\"status\": 200" -"}"; -const std::string invalid_json_for_schema = "{" - "\"schema\": [{" - "\"name\": 1," - "\"type\": \"valid_type1\"},{" - "\"name\": 2," - "\"type\": \"valid_type2\"},{" - "\"name\": 3," - "\"type\": \"valid_type3\"}]," - "\"total\": \"10\"," - "\"datarows\": {}," - "\"size\": \"string_size\"," - "\"status\": 200" -"}"; -const std::string invalid_json = "invalid json"; -const std::string valid_json_int = "{ \"value\" : 123 }"; -const std::string invalid_json_int = "{ \"value\" : invalid }"; -const std::string valid_json_str = "{ \"value\" : \"123\"}"; -const std::string invalid_json_str = "{ \"value\" : \"123}"; -const std::string valid_json_arr = "{ \"value\" : [ 1, \"2\", true] }"; -const std::string invalid_json_arr = "{ \"value\" : [ 1, 2 3] }"; -const std::string valid_json_obj = "{" - "\"value\" : {" - "\"subval_str\" : \"1\"," - "\"subval_int\" : 2," - "\"subval_bool\" : true," - "\"subval_flt\" : 3.4" - "}" -"}"; -const std::string invalid_json_obj = "{" - "\"value\" : {" - "\"subval_str\" : \"1\"" - "\"subval_int\" : 2," - "\"subval_bool\" : true," - "\"subval_flt\" : 3.4" - "}" -"}"; -// Intentionally serialized because it will be compared to a str parsed by rabbit, which is serialized by default -const std::string valid_sub_obj_for_conversion = "{\"subval_obj\":{\"subval_str\":\"1\",\"subval_int\":2,\"subval_bool\":true,\"subval_flt\":3.4}}"; -const std::string valid_obj_for_conversion = "{ \"value\" : " + valid_sub_obj_for_conversion + "}"; -// clang-format on - -const std::vector< size_t > distances = {0, 1, 5, 30}; - -TEST(StandardDistance, ValidIterator) { - rabbit::array arr; - for (size_t i = 0; i < distances.size(); i++) { - rabbit::array sub_array; - for (size_t j = 0; j < distances[i]; j++) { - sub_array.push_back(static_cast< uint64_t >(j)); - } - arr.push_back(sub_array); - } - - ASSERT_EQ(static_cast< size_t >(std::distance(arr.begin(), arr.end())), - distances.size()); - size_t i = 0; - for (auto it = arr.begin(); it < arr.end(); it++, i++) { - EXPECT_EQ(static_cast< size_t >( - std::distance(it->value_begin(), it->value_end())), - distances[i]); - } -} - -TEST(ConvertObjectToString, IteratorAtStringConvert) { - rabbit::document doc; - ASSERT_NO_THROW(doc.parse(valid_json_for_schema)); - rabbit::array arr; - ASSERT_NO_THROW(arr = doc["schema"]); - size_t i = 1; - std::string valid_name = "valid_name"; - std::string valid_type = "valid_type"; - for (auto it = arr.begin(); it < arr.end(); ++it, ++i) { - std::string name, type; - ASSERT_NO_THROW(name = it->at("name").as_string()); - ASSERT_NO_THROW(type = it->at("type").as_string()); - EXPECT_EQ(name, valid_name + std::to_string(i)); - EXPECT_EQ(type, valid_type + std::to_string(i)); - } -} - -TEST(ConvertObjectToString, ValidObject) { - rabbit::document doc; - EXPECT_NO_THROW(doc.parse(valid_obj_for_conversion)); - ASSERT_TRUE(doc.is_object()); - ASSERT_TRUE(doc.has("value")); - ASSERT_TRUE(doc["value"].is_object()); - std::string value_str = doc["value"].str(); - EXPECT_EQ(value_str, valid_sub_obj_for_conversion); -} - -TEST(ParseSchema, ValidSchemaValidDoc) { - rabbit::document doc; - EXPECT_NO_THROW(doc.parse(valid_json_for_schema, valid_json_schema)); -} - -TEST(ParseSchema, InvalidSchemaValidDoc) { - rabbit::document doc; - EXPECT_THROW(doc.parse(valid_json_for_schema, invalid_json_schema), - rabbit::parse_error); -} - -TEST(ParseSchema, ValidSchemaInvalidDoc) { - rabbit::document doc; - EXPECT_THROW(doc.parse(invalid_json_for_schema, valid_json_schema), - rabbit::parse_error); -} - -TEST(ParseSchema, InvalidSchemaInvalidDoc) { - rabbit::document doc; - EXPECT_THROW(doc.parse(invalid_json, invalid_json_schema), - rabbit::parse_error); -} - -TEST(ParseObj, ValidObj) { - rabbit::document doc; - EXPECT_NO_THROW(doc.parse(valid_json_obj)); - ASSERT_TRUE(doc.is_object()); - ASSERT_TRUE(doc.has("value")); - ASSERT_TRUE(doc["value"].is_object()); - ASSERT_TRUE(doc["value"].has("subval_str")); - ASSERT_TRUE(doc["value"].has("subval_int")); - ASSERT_TRUE(doc["value"].has("subval_bool")); - ASSERT_TRUE(doc["value"].has("subval_flt")); - ASSERT_TRUE(doc["value"]["subval_str"].is_string()); - ASSERT_TRUE(doc["value"]["subval_int"].is_int()); - ASSERT_TRUE(doc["value"]["subval_bool"].is_bool()); - ASSERT_TRUE(doc["value"]["subval_flt"].is_number()); - EXPECT_EQ("1", doc["value"]["subval_str"].as_string()); - EXPECT_EQ(2, doc["value"]["subval_int"].as_int()); - EXPECT_EQ(true, doc["value"]["subval_bool"].as_bool()); - EXPECT_EQ(3.4, doc["value"]["subval_flt"].as_double()); -} - -TEST(ParseObj, InvalidObj) { - rabbit::document doc; - EXPECT_THROW(doc.parse(invalid_json_obj), rabbit::parse_error); -} - -TEST(ParseArr, ValidArr) { - rabbit::document doc; - ASSERT_NO_THROW(doc.parse(valid_json_arr)); - ASSERT_TRUE(doc.is_object()); - ASSERT_TRUE(doc.has("value")); - ASSERT_TRUE(doc["value"].is_array()); - - rabbit::array arr; - ASSERT_NO_THROW(arr = doc["value"]); - size_t i = 0; - for (rabbit::array::iterator it = arr.begin(); it != arr.end(); ++it, ++i) { - switch (i) { - case 0: - ASSERT_TRUE(it->is_int()); - EXPECT_EQ(1, it->as_int()); - break; - case 1: - ASSERT_TRUE(it->is_string()); - EXPECT_EQ("2", it->as_string()); - break; - case 2: - ASSERT_TRUE(it->is_bool()); - EXPECT_EQ(true, it->as_bool()); - break; - default: - FAIL() << "Array iterator exceeded bounds"; - return; - } - } -} -TEST(ParseArr, InvalidArr) { - rabbit::document doc; - EXPECT_THROW(doc.parse(invalid_json_arr), rabbit::parse_error); -} - -TEST(ParseStr, ValidStr) { - rabbit::document doc; - ASSERT_NO_THROW(doc.parse(valid_json_str)); - ASSERT_TRUE(doc.is_object()); - ASSERT_TRUE(doc.has("value")); - ASSERT_TRUE(doc["value"].is_string()); - EXPECT_EQ("123", doc["value"].as_string()); -} - -TEST(ParseStr, InvalidStr) { - rabbit::document doc; - EXPECT_THROW(doc.parse(invalid_json_str), rabbit::parse_error); -} - -TEST(ParseInt, ValidInt) { - rabbit::document doc; - ASSERT_NO_THROW(doc.parse(valid_json_int)); - ASSERT_TRUE(doc.is_object()); - ASSERT_TRUE(doc.has("value")); - ASSERT_TRUE(doc["value"].is_int()); - EXPECT_EQ(123, doc["value"].as_int()); -} - -TEST(ParseInt, InvalidInt) { - rabbit::document doc; - EXPECT_THROW(doc.parse(invalid_json_int), rabbit::parse_error); -} - -TEST(Parse, InvalidJson) { - rabbit::document doc; - EXPECT_THROW(doc.parse(invalid_json), rabbit::parse_error); -} - -int main(int argc, char** argv) { - testing::internal::CaptureStdout(); - ::testing::InitGoogleTest(&argc, argv); - int failures = RUN_ALL_TESTS(); - std::string output = testing::internal::GetCapturedStdout(); - std::cout << output << std::endl; - std::cout << (failures ? "Not all tests passed." : "All tests passed") - << std::endl; - WriteFileIfSpecified(argv, argv + argc, "-fout", output); - - return failures; -} diff --git a/sql-odbc/src/autoconf.h.in b/sql-odbc/src/autoconf.h.in deleted file mode 100644 index 5abb4109e0..0000000000 --- a/sql-odbc/src/autoconf.h.in +++ /dev/null @@ -1,2 +0,0 @@ -#cmakedefine AUTOCONF_ENABLE -#cmakedefine AUTOCONF_STRING "@AUTOCONF_STRING@" diff --git a/sql-odbc/src/installer/CMakeLists.txt b/sql-odbc/src/installer/CMakeLists.txt deleted file mode 100644 index d172b91ac0..0000000000 --- a/sql-odbc/src/installer/CMakeLists.txt +++ /dev/null @@ -1,93 +0,0 @@ -include(InstallRequiredSystemLibraries) -include(CPackComponent) - -set(CMAKE_INSTALL_PREFIX ${INSTALL_ROOT}) - -# General package info -set(CPACK_PACKAGE_NAME "OpenSearch SQL ODBC Driver ${BITNESS}-bit") -set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "OpenSearch SQL ODBC Driver ${BITNESS}-bit") -set(CPACK_PACKAGE_VENDOR "Amazon") -set(CPACK_PACKAGE_INSTALL_DIRECTORY "${CPACK_PACKAGE_NAME}") -set(CPACK_SYSTEM_NAME "${CMAKE_SYSTEM_NAME}") -set(CPACK_PACKAGE_VERSION "${DRIVER_PACKAGE_VERSION}") - -# OS-specific package info -if(WIN32) - # Set generator to WIX - set(CPACK_GENERATOR "WIX") - - # This is a unique id for the installer - required for Windows - # Generated at https://www.guidgen.com/ - set(CPACK_WIX_UPGRADE_GUID "2D325BD7-1176-40E8-8AB8-C52DD2F7B792") - - # The Icon shown next to the program name in Add/Remove programs - set(CPACK_WIX_PRODUCT_ICON "${CMAKE_CURRENT_SOURCE_DIR}/icon.ico") - - # The bitmap will appear at the top of all installer pages other than the welcome and completion dialogs - set(CPACK_WIX_UI_BANNER "${CMAKE_CURRENT_SOURCE_DIR}/banner.bmp") - - # Background bitmap used on the welcome and completion dialogs - set(CPACK_WIX_UI_DIALOG "${CMAKE_CURRENT_SOURCE_DIR}/dialog.bmp") - - # This XML file is used for registry setup - set(CPACK_WIX_PATCH_FILE "${CMAKE_CURRENT_SOURCE_DIR}/patch.xml") - - # Add license file - set(CPACK_RESOURCE_FILE_LICENSE "${PROJECT_ROOT}/LICENSE.txt") -else() - set(CPACK_GENERATOR "productbuild") - - # This script will be run once the Driver component has finished installing. - set(CPACK_POSTFLIGHT_DRIVER_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/postinstall") - - # The productbuild generator copies files from this directory - set(CPACK_PRODUCTBUILD_RESOURCES_DIR "${CMAKE_CURRENT_SOURCE_DIR}/Resources") - - # Background setup to Distribution XML - set(CPACK_PRODUCTBUILD_BACKGROUND "background.bmp") - set(CPACK_PRODUCTBUILD_BACKGROUND_ALIGNMENT "bottomleft") - set(CPACK_PRODUCTBUILD_BACKGROUND_SCALING "none") - - # Background setup for the Dark Aqua theme to Distribution XML - set(CPACK_PRODUCTBUILD_BACKGROUND_DARKAQUA "background_darkaqua.bmp") - set(CPACK_PRODUCTBUILD_BACKGROUND_DARKAQUA_ALIGNMENT "bottomleft") - set(CPACK_PRODUCTBUILD_BACKGROUND_DARKAQUA_SCALING "none") - - # Add license file - set(CPACK_RESOURCE_FILE_LICENSE "${PROJECT_ROOT}/LICENSE.txt") - set(CPACK_RESOURCE_FILE_README "${CMAKE_CURRENT_SOURCE_DIR}/Resources/README.txt") - set(CPACK_RESOURCE_FILE_WELCOME "${CMAKE_CURRENT_SOURCE_DIR}/Resources/Welcome.txt") -endif() - -# Set up components for installer -cpack_add_component(Docs - DISPLAY_NAME "Documentation" - DESCRIPTION "Documentation about OpenSearch SQL ODBC Driver" -) -cpack_add_component(Driver - DISPLAY_NAME "Driver" - DESCRIPTION "Library files for running the OpenSearch SQL ODBC Driver" - REQUIRED -) -cpack_add_component(Resources - DISPLAY_NAME "Resources" - DESCRIPTION "Resources for OpenSearch SQL ODBC Driver" -) -# Install driver files -install(TARGETS sqlodbc DESTINATION bin COMPONENT "Driver") - -# TODO: look into DSN Installer failure -# if(APPLE) -# install(FILES "${PROJECT_ROOT}/bin64/dsn_installer" DESTINATION bin COMPONENT "Driver") -# install(FILES "${CMAKE_CURRENT_SOURCE_DIR}/remove-opensearch-dsn.sh" DESTINATION bin COMPONENT "Driver") -# endif() - -# Install documentation files -install(FILES "${PROJECT_ROOT}/README.md" DESTINATION doc COMPONENT "Docs") -install(FILES "${PROJECT_ROOT}/LICENSE.txt" DESTINATION doc COMPONENT "Docs") -install(FILES "${PROJECT_ROOT}/THIRD-PARTY" DESTINATION doc COMPONENT "Docs") - -# Install resource files -install(FILES "${CMAKE_CURRENT_SOURCE_DIR}/Resources/opensearch_sql_odbc.tdc" DESTINATION resources COMPONENT "Resources") - -include(CPack) diff --git a/sql-odbc/src/installer/Resources/README.txt b/sql-odbc/src/installer/Resources/README.txt deleted file mode 100644 index 86ec3cd375..0000000000 --- a/sql-odbc/src/installer/Resources/README.txt +++ /dev/null @@ -1,19 +0,0 @@ -All files are available in '/Library/ODBC/opensearch-sql-odbc' after installation. - -To setup a connection, you can use DSN to store your data source connection information, -1. Open 'iODBC Data Source Administrator'. -2. Go to 'User DSN'. -3. Select 'OpenSearch SQL ODBC DSN' and click on 'Configure'. -4. Update the connection string values. For the list of all supported options, check '/Library/ODBC/opensearch-sql-odbc/doc/README.md'. -5. Click 'Ok' to save changes. - -If using with ODBC compatible BI tools, refer to the tool documentation on configuring a new ODBC driver. The typical requirement is to make the tool aware of the location of the driver library file and then use it to setup database (i.e OpenSearch) connections. - -For example, if you want to use Tableau with OpenSearch Server, -1. Open 'Tableau'. -2. Click on 'Other Databases (ODBC)'. -3. Select 'OpenSearch SQL ODBC DSN' from the DSN list or 'OpenSearch SQL ODBC Driver' from the driver list. If using driver, you need to enter connection string values. -4. Click on 'Connect'. All connection attributes will be retrived. -5. Click on 'Sign In'. You will be successfully connected to OpenSearch server. - -For more details, check 'https://github.com/opensearch-project/sql/tree/main/sql-odbc'. \ No newline at end of file diff --git a/sql-odbc/src/installer/Resources/Welcome.txt b/sql-odbc/src/installer/Resources/Welcome.txt deleted file mode 100644 index b4412f3294..0000000000 --- a/sql-odbc/src/installer/Resources/Welcome.txt +++ /dev/null @@ -1 +0,0 @@ -OpenSearch SQL ODBC is a read-only ODBC driver for connecting to OpenSearch SQL support. \ No newline at end of file diff --git a/sql-odbc/src/installer/Resources/background.bmp b/sql-odbc/src/installer/Resources/background.bmp deleted file mode 100644 index 7ddbfeecd7..0000000000 Binary files a/sql-odbc/src/installer/Resources/background.bmp and /dev/null differ diff --git a/sql-odbc/src/installer/Resources/background_darkaqua.bmp b/sql-odbc/src/installer/Resources/background_darkaqua.bmp deleted file mode 100644 index dbd5fc3e50..0000000000 Binary files a/sql-odbc/src/installer/Resources/background_darkaqua.bmp and /dev/null differ diff --git a/sql-odbc/src/installer/Resources/opensearch_sql_odbc.tdc b/sql-odbc/src/installer/Resources/opensearch_sql_odbc.tdc deleted file mode 100644 index 3574dfdbff..0000000000 --- a/sql-odbc/src/installer/Resources/opensearch_sql_odbc.tdc +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - \ No newline at end of file diff --git a/sql-odbc/src/installer/banner.bmp b/sql-odbc/src/installer/banner.bmp deleted file mode 100644 index c0e4b9506f..0000000000 Binary files a/sql-odbc/src/installer/banner.bmp and /dev/null differ diff --git a/sql-odbc/src/installer/dialog.bmp b/sql-odbc/src/installer/dialog.bmp deleted file mode 100644 index 72d7858622..0000000000 Binary files a/sql-odbc/src/installer/dialog.bmp and /dev/null differ diff --git a/sql-odbc/src/installer/icon.ico b/sql-odbc/src/installer/icon.ico deleted file mode 100644 index 10fc89a562..0000000000 Binary files a/sql-odbc/src/installer/icon.ico and /dev/null differ diff --git a/sql-odbc/src/installer/patch.xml b/sql-odbc/src/installer/patch.xml deleted file mode 100644 index 869b624ab5..0000000000 --- a/sql-odbc/src/installer/patch.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/sql-odbc/src/installer/postinstall b/sql-odbc/src/installer/postinstall deleted file mode 100644 index c95783c053..0000000000 --- a/sql-odbc/src/installer/postinstall +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -PKG_INSTALL_DIR=/Applications -FINAL_INSTALL_DIR=/Library/ODBC/opensearch-sql-odbc - -# Remove install directory if it already exists -if [ -d "${FINAL_INSTALL_DIR}" ]; then - # Fail if FINAL_INSTALL_DIR is not set for whatever reason - if [ -z ${FINAL_INSTALL_DIR} ]; then exit 1; fi - rm -rf ${FINAL_INSTALL_DIR} -fi - -# Move PKG installed folders to intended install directory -mkdir -p ${FINAL_INSTALL_DIR} -mv ${PKG_INSTALL_DIR}/bin ${FINAL_INSTALL_DIR}/bin -mv ${PKG_INSTALL_DIR}/doc ${FINAL_INSTALL_DIR}/doc -mv ${PKG_INSTALL_DIR}/resources ${FINAL_INSTALL_DIR}/resources - -# TODO: look into why DSN installer is not working for fresh Mac install -# Current issue: "General installer error" when adding driver entry - -# Run DSN installer to configurate driver and DSN for system for easy setup. -# chmod a+x ${FINAL_INSTALL_DIR}/bin/dsn_installer -# chmod a+x ${FINAL_INSTALL_DIR}/bin/remove-opensearch-dsn.sh -# echo "I can write to this file" > /tmp/dsn_installer.log -# ${FINAL_INSTALL_DIR}/bin/dsn_installer ${FINAL_INSTALL_DIR}/bin/ >> /tmp/dsn_installer.log -# echo "After DSN Installer finishes" >> /tmp/dsn_installer.log diff --git a/sql-odbc/src/installer/remove-opensearch-dsn.sh b/sql-odbc/src/installer/remove-opensearch-dsn.sh deleted file mode 100644 index ecf7121f5f..0000000000 --- a/sql-odbc/src/installer/remove-opensearch-dsn.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -echo "This script will only remove the default DSN and Driver entries from your ODBC configuration." -echo "You will be responsible for removing installed files from the system." -if [[ $EUID -ne 0 ]]; then - echo "ERROR: This script must be run as root" - exit 1 -fi - -# check for "Yes" -while true; do - read -p "Do you want to continue? (Y/y) " yn - case $yn in - [Yy]* ) break;; - [Nn]* ) exit;; - * ) echo "Please answer yes or no.";; - esac -done - -# Run dsn_installer uninstall -${BASH_SOURCE%/*}/dsn_installer uninstall -if [ $? -ne 0 ]; then - echo "Error while removing DSN and Driver entries." -else - echo "DSN and Driver entries have been removed successfully." -fi diff --git a/sql-odbc/src/modules/code-coverage.cmake b/sql-odbc/src/modules/code-coverage.cmake deleted file mode 100644 index 0d24190c5d..0000000000 --- a/sql-odbc/src/modules/code-coverage.cmake +++ /dev/null @@ -1,610 +0,0 @@ -# -# Copyright (C) 2018-2020 by George Cave - gcave@stablecoder.ca -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy of -# the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. - -# USAGE: To enable any code coverage instrumentation/targets, the single CMake -# option of `CODE_COVERAGE` needs to be set to 'ON', either by GUI, ccmake, or -# on the command line. -# -# From this point, there are two primary methods for adding instrumentation to -# targets: 1 - A blanket instrumentation by calling `add_code_coverage()`, where -# all targets in that directory and all subdirectories are automatically -# instrumented. 2 - Per-target instrumentation by calling -# `target_code_coverage()`, where the target is given and thus only -# that target is instrumented. This applies to both libraries and executables. -# -# To add coverage targets, such as calling `make ccov` to generate the actual -# coverage information for perusal or consumption, call -# `target_code_coverage()` on an *executable* target. -# -# Example 1: All targets instrumented -# -# In this case, the coverage information reported will will be that of the -# `theLib` library target and `theExe` executable. -# -# 1a: Via global command -# -# ~~~ -# add_code_coverage() # Adds instrumentation to all targets -# -# add_library(theLib lib.cpp) -# -# add_executable(theExe main.cpp) -# target_link_libraries(theExe PRIVATE theLib) -# target_code_coverage(theExe) # As an executable target, adds the 'ccov-theExe' target (instrumentation already added via global anyways) for generating code coverage reports. -# ~~~ -# -# 1b: Via target commands -# -# ~~~ -# add_library(theLib lib.cpp) -# target_code_coverage(theLib) # As a library target, adds coverage instrumentation but no targets. -# -# add_executable(theExe main.cpp) -# target_link_libraries(theExe PRIVATE theLib) -# target_code_coverage(theExe) # As an executable target, adds the 'ccov-theExe' target and instrumentation for generating code coverage reports. -# ~~~ -# -# Example 2: Target instrumented, but with regex pattern of files to be excluded -# from report -# -# ~~~ -# add_executable(theExe main.cpp non_covered.cpp) -# target_code_coverage(theExe EXCLUDE non_covered.cpp test/*) # As an executable target, the reports will exclude the non-covered.cpp file, and any files in a test/ folder. -# ~~~ -# -# Example 3: Target added to the 'ccov' and 'ccov-all' targets -# -# ~~~ -# add_code_coverage_all_targets(EXCLUDE test/*) # Adds the 'ccov-all' target set and sets it to exclude all files in test/ folders. -# -# add_executable(theExe main.cpp non_covered.cpp) -# target_code_coverage(theExe AUTO ALL EXCLUDE non_covered.cpp test/*) # As an executable target, adds to the 'ccov' and ccov-all' targets, and the reports will exclude the non-covered.cpp file, and any files in a test/ folder. -# ~~~ - -# Options -option( - CODE_COVERAGE - "Builds targets with code coverage instrumentation. (Requires GCC or Clang)" - OFF) - -# Programs -find_program(LLVM_COV_PATH llvm-cov) -find_program(LLVM_PROFDATA_PATH llvm-profdata) -find_program(LCOV_PATH lcov) -find_program(GENHTML_PATH genhtml) -# Hide behind the 'advanced' mode flag for GUI/ccmake -mark_as_advanced( - FORCE - LLVM_COV_PATH - LLVM_PROFDATA_PATH - LCOV_PATH - GENHTML_PATH) - -# Variables -set(CMAKE_COVERAGE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/ccov) - -# Common initialization/checks -if(CODE_COVERAGE AND NOT CODE_COVERAGE_ADDED) - set(CODE_COVERAGE_ADDED ON) - - # Common Targets - add_custom_target( - ccov-preprocessing - COMMAND ${CMAKE_COMMAND} -E make_directory - ${CMAKE_COVERAGE_OUTPUT_DIRECTORY} - DEPENDS ccov-clean) - - if(CMAKE_C_COMPILER_ID MATCHES "(Apple)?[Cc]lang" - OR CMAKE_CXX_COMPILER_ID MATCHES "(Apple)?[Cc]lang") - # Messages - message(STATUS "Building with llvm Code Coverage Tools") - - if(NOT LLVM_COV_PATH) - message(FATAL_ERROR "llvm-cov not found! Aborting.") - else() - # Version number checking for 'EXCLUDE' compatibility - execute_process(COMMAND ${LLVM_COV_PATH} --version - OUTPUT_VARIABLE LLVM_COV_VERSION_CALL_OUTPUT) - string( - REGEX MATCH - "[0-9]+\\.[0-9]+\\.[0-9]+" - LLVM_COV_VERSION - ${LLVM_COV_VERSION_CALL_OUTPUT}) - - if(LLVM_COV_VERSION VERSION_LESS "7.0.0") - message( - WARNING - "target_code_coverage()/add_code_coverage_all_targets() 'EXCLUDE' option only available on llvm-cov >= 7.0.0" - ) - endif() - endif() - - # Targets - add_custom_target( - ccov-clean - COMMAND rm -f ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/binaries.list - COMMAND rm -f ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/profraw.list) - - # Used to get the shared object file list before doing the main all- - # processing - add_custom_target( - ccov-libs - COMMAND ; - COMMENT "libs ready for coverage report.") - - elseif(CMAKE_C_COMPILER_ID MATCHES "GNU" - OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") - # Messages - message(STATUS "Building with lcov Code Coverage Tools") - - if(CMAKE_BUILD_TYPE) - string(TOUPPER ${CMAKE_BUILD_TYPE} upper_build_type) - if(NOT - ${upper_build_type} - STREQUAL - "DEBUG") - message( - WARNING - "Code coverage results with an optimized (non-Debug) build may be misleading" - ) - endif() - else() - message( - WARNING - "Code coverage results with an optimized (non-Debug) build may be misleading" - ) - endif() - if(NOT LCOV_PATH) - message(FATAL_ERROR "lcov not found! Aborting...") - endif() - if(NOT GENHTML_PATH) - message(FATAL_ERROR "genhtml not found! Aborting...") - endif() - - # Targets - add_custom_target(ccov-clean COMMAND ${LCOV_PATH} --directory - ${CMAKE_BINARY_DIR} --zerocounters) - - else() - message(FATAL_ERROR "Code coverage requires Clang or GCC. Aborting.") - endif() -endif() - -# Adds code coverage instrumentation to a library, or instrumentation/targets -# for an executable target. -# ~~~ -# EXECUTABLE ADDED TARGETS: -# GCOV/LCOV: -# ccov : Generates HTML code coverage report for every target added with 'AUTO' parameter. -# ccov-${TARGET_NAME} : Generates HTML code coverage report for the associated named target. -# ccov-all : Generates HTML code coverage report, merging every target added with 'ALL' parameter into a single detailed report. -# -# LLVM-COV: -# ccov : Generates HTML code coverage report for every target added with 'AUTO' parameter. -# ccov-report : Generates HTML code coverage report for every target added with 'AUTO' parameter. -# ccov-${TARGET_NAME} : Generates HTML code coverage report. -# ccov-report-${TARGET_NAME} : Prints to command line summary per-file coverage information. -# ccov-show-${TARGET_NAME} : Prints to command line detailed per-line coverage information. -# ccov-all : Generates HTML code coverage report, merging every target added with 'ALL' parameter into a single detailed report. -# ccov-all-report : Prints summary per-file coverage information for every target added with ALL' parameter to the command line. -# -# Required: -# TARGET_NAME - Name of the target to generate code coverage for. -# Optional: -# PUBLIC - Sets the visibility for added compile options to targets to PUBLIC instead of the default of PRIVATE. -# PUBLIC - Sets the visibility for added compile options to targets to INTERFACE instead of the default of PRIVATE. -# AUTO - Adds the target to the 'ccov' target so that it can be run in a batch with others easily. Effective on executable targets. -# ALL - Adds the target to the 'ccov-all' and 'ccov-all-report' targets, which merge several executable targets coverage data to a single report. Effective on executable targets. -# EXTERNAL - For GCC's lcov, allows the profiling of 'external' files from the processing directory -# COVERAGE_TARGET_NAME - For executables ONLY, changes the outgoing target name so instead of `ccov-${TARGET_NAME}` it becomes `ccov-${COVERAGE_TARGET_NAME}`. -# EXCLUDE - Excludes files of the patterns provided from coverage. **These do not copy to the 'all' targets.** -# OBJECTS - For executables ONLY, if the provided targets are shared libraries, adds coverage information to the output -# ARGS - For executables ONLY, appends the given arguments to the associated ccov-* executable call -# ~~~ -function(target_code_coverage TARGET_NAME) - # Argument parsing - set(options - AUTO - ALL - EXTERNAL - PUBLIC - INTERFACE) - set(single_value_keywords COVERAGE_TARGET_NAME) - set(multi_value_keywords EXCLUDE OBJECTS ARGS) - cmake_parse_arguments( - target_code_coverage - "${options}" - "${single_value_keywords}" - "${multi_value_keywords}" - ${ARGN}) - - # Set the visibility of target functions to PUBLIC, INTERFACE or default to - # PRIVATE. - if(target_code_coverage_PUBLIC) - set(TARGET_VISIBILITY PUBLIC) - elseif(target_code_coverage_INTERFACE) - set(TARGET_VISIBILITY INTERFACE) - else() - set(TARGET_VISIBILITY PRIVATE) - endif() - - if(NOT target_code_coverage_COVERAGE_TARGET_NAME) - # If a specific name was given, use that instead. - set(target_code_coverage_COVERAGE_TARGET_NAME ${TARGET_NAME}) - endif() - - if(CODE_COVERAGE) - - # Add code coverage instrumentation to the target's linker command - if(CMAKE_C_COMPILER_ID MATCHES "(Apple)?[Cc]lang" - OR CMAKE_CXX_COMPILER_ID MATCHES "(Apple)?[Cc]lang") - target_compile_options( - ${TARGET_NAME} - ${TARGET_VISIBILITY} - -fprofile-instr-generate - -fcoverage-mapping) - target_link_options( - ${TARGET_NAME} - ${TARGET_VISIBILITY} - -fprofile-instr-generate - -fcoverage-mapping) - elseif(CMAKE_C_COMPILER_ID MATCHES "GNU" - OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") - target_compile_options( - ${TARGET_NAME} - ${TARGET_VISIBILITY} - -fprofile-arcs - -ftest-coverage) - target_link_libraries(${TARGET_NAME} ${TARGET_VISIBILITY} gcov) - endif() - - # Targets - get_target_property(target_type ${TARGET_NAME} TYPE) - - # Add shared library to processing for 'all' targets - if(target_type STREQUAL "SHARED_LIBRARY" AND target_code_coverage_ALL) - if(CMAKE_C_COMPILER_ID MATCHES "(Apple)?[Cc]lang" - OR CMAKE_CXX_COMPILER_ID MATCHES "(Apple)?[Cc]lang") - add_custom_target( - ccov-run-${target_code_coverage_COVERAGE_TARGET_NAME} - COMMAND echo "-object=$" >> - ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/binaries.list - DEPENDS ccov-preprocessing ${TARGET_NAME}) - - if(NOT TARGET ccov-libs) - message( - FATAL_ERROR - "Calling target_code_coverage with 'ALL' must be after a call to 'add_code_coverage_all_targets'." - ) - endif() - - add_dependencies(ccov-libs - ccov-run-${target_code_coverage_COVERAGE_TARGET_NAME}) - endif() - endif() - - # For executables add targets to run and produce output - if(target_type STREQUAL "EXECUTABLE") - if(CMAKE_C_COMPILER_ID MATCHES "(Apple)?[Cc]lang" - OR CMAKE_CXX_COMPILER_ID MATCHES "(Apple)?[Cc]lang") - - # If there are shared objects to also work with, generate the string to - # add them here - foreach(SO_TARGET ${target_code_coverage_OBJECTS}) - # Check to see if the target is a shared object - if(TARGET ${SO_TARGET}) - get_target_property(SO_TARGET_TYPE ${SO_TARGET} TYPE) - if(${SO_TARGET_TYPE} STREQUAL "SHARED_LIBRARY") - set(SO_OBJECTS ${SO_OBJECTS} -object=$) - endif() - endif() - endforeach() - - # Run the executable, generating raw profile data - add_custom_target( - ccov-run-${target_code_coverage_COVERAGE_TARGET_NAME} - COMMAND - LLVM_PROFILE_FILE=${target_code_coverage_COVERAGE_TARGET_NAME}.profraw - $ ${target_code_coverage_ARGS} - COMMAND echo "-object=$" >> - ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/binaries.list - COMMAND - echo - "${CMAKE_CURRENT_BINARY_DIR}/${target_code_coverage_COVERAGE_TARGET_NAME}.profraw " - >> ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/profraw.list - DEPENDS ccov-preprocessing ccov-libs ${TARGET_NAME}) - - # Merge the generated profile data so llvm-cov can process it - add_custom_target( - ccov-processing-${target_code_coverage_COVERAGE_TARGET_NAME} - COMMAND - ${LLVM_PROFDATA_PATH} merge -sparse - ${target_code_coverage_COVERAGE_TARGET_NAME}.profraw -o - ${target_code_coverage_COVERAGE_TARGET_NAME}.profdata - DEPENDS ccov-run-${target_code_coverage_COVERAGE_TARGET_NAME}) - - # Ignore regex only works on LLVM >= 7 - if(LLVM_COV_VERSION VERSION_GREATER_EQUAL "7.0.0") - foreach(EXCLUDE_ITEM ${target_code_coverage_EXCLUDE}) - set(EXCLUDE_REGEX ${EXCLUDE_REGEX} - -ignore-filename-regex='${EXCLUDE_ITEM}') - endforeach() - endif() - - # Print out details of the coverage information to the command line - add_custom_target( - ccov-show-${target_code_coverage_COVERAGE_TARGET_NAME} - COMMAND - ${LLVM_COV_PATH} show $ ${SO_OBJECTS} - -instr-profile=${target_code_coverage_COVERAGE_TARGET_NAME}.profdata - -show-line-counts-or-regions ${EXCLUDE_REGEX} - DEPENDS ccov-processing-${target_code_coverage_COVERAGE_TARGET_NAME}) - - # Print out a summary of the coverage information to the command line - add_custom_target( - ccov-report-${target_code_coverage_COVERAGE_TARGET_NAME} - COMMAND - ${LLVM_COV_PATH} report $ ${SO_OBJECTS} - -instr-profile=${target_code_coverage_COVERAGE_TARGET_NAME}.profdata - ${EXCLUDE_REGEX} - DEPENDS ccov-processing-${target_code_coverage_COVERAGE_TARGET_NAME}) - - # Generates HTML output of the coverage information for perusal - add_custom_target( - ccov-${target_code_coverage_COVERAGE_TARGET_NAME} - COMMAND - ${LLVM_COV_PATH} show $ ${SO_OBJECTS} - -instr-profile=${target_code_coverage_COVERAGE_TARGET_NAME}.profdata - -show-line-counts-or-regions - -output-dir=${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/${target_code_coverage_COVERAGE_TARGET_NAME} - -format="html" ${EXCLUDE_REGEX} - DEPENDS ccov-processing-${target_code_coverage_COVERAGE_TARGET_NAME}) - - elseif(CMAKE_C_COMPILER_ID MATCHES "GNU" - OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") - set(COVERAGE_INFO - "${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/${target_code_coverage_COVERAGE_TARGET_NAME}.info" - ) - - # Run the executable, generating coverage information - add_custom_target( - ccov-run-${target_code_coverage_COVERAGE_TARGET_NAME} - COMMAND $ ${target_code_coverage_ARGS} - DEPENDS ccov-preprocessing ${TARGET_NAME}) - - # Generate exclusion string for use - foreach(EXCLUDE_ITEM ${target_code_coverage_EXCLUDE}) - set(EXCLUDE_REGEX - ${EXCLUDE_REGEX} - --remove - ${COVERAGE_INFO} - '${EXCLUDE_ITEM}') - endforeach() - - if(EXCLUDE_REGEX) - set(EXCLUDE_COMMAND ${LCOV_PATH} ${EXCLUDE_REGEX} --output-file - ${COVERAGE_INFO}) - else() - set(EXCLUDE_COMMAND ;) - endif() - - if(NOT ${target_code_coverage_EXTERNAL}) - set(EXTERNAL_OPTION --no-external) - endif() - - # Capture coverage data - add_custom_target( - ccov-capture-${target_code_coverage_COVERAGE_TARGET_NAME} - COMMAND ${CMAKE_COMMAND} -E remove ${COVERAGE_INFO} - COMMAND ${LCOV_PATH} --directory ${CMAKE_BINARY_DIR} --zerocounters - COMMAND $ ${target_code_coverage_ARGS} - COMMAND - ${LCOV_PATH} --directory ${CMAKE_BINARY_DIR} --base-directory - ${CMAKE_SOURCE_DIR} --capture ${EXTERNAL_OPTION} --output-file - ${COVERAGE_INFO} - COMMAND ${EXCLUDE_COMMAND} - DEPENDS ccov-preprocessing ${TARGET_NAME}) - - # Generates HTML output of the coverage information for perusal - add_custom_target( - ccov-${target_code_coverage_COVERAGE_TARGET_NAME} - COMMAND - ${GENHTML_PATH} -o - ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/${target_code_coverage_COVERAGE_TARGET_NAME} - ${COVERAGE_INFO} - DEPENDS ccov-capture-${target_code_coverage_COVERAGE_TARGET_NAME}) - endif() - - add_custom_command( - TARGET ccov-${target_code_coverage_COVERAGE_TARGET_NAME} - POST_BUILD - COMMAND ; - COMMENT - "Open ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/${target_code_coverage_COVERAGE_TARGET_NAME}/index.html in your browser to view the coverage report." - ) - - # AUTO - if(target_code_coverage_AUTO) - if(NOT TARGET ccov) - add_custom_target(ccov) - endif() - add_dependencies(ccov ccov-${target_code_coverage_COVERAGE_TARGET_NAME}) - - if(NOT CMAKE_C_COMPILER_ID MATCHES "GNU" - OR NOT CMAKE_CXX_COMPILER_ID MATCHES "GNU") - if(NOT TARGET ccov-report) - add_custom_target(ccov-report) - endif() - add_dependencies( - ccov-report - ccov-report-${target_code_coverage_COVERAGE_TARGET_NAME}) - endif() - endif() - - # ALL - if(target_code_coverage_ALL) - if(NOT TARGET ccov-all-processing) - message( - FATAL_ERROR - "Calling target_code_coverage with 'ALL' must be after a call to 'add_code_coverage_all_targets'." - ) - endif() - - add_dependencies(ccov-all-processing - ccov-run-${target_code_coverage_COVERAGE_TARGET_NAME}) - endif() - endif() - endif() -endfunction() - -# Adds code coverage instrumentation to all targets in the current directory and -# any subdirectories. To add coverage instrumentation to only specific targets, -# use `target_code_coverage`. -function(add_code_coverage) - if(CMAKE_C_COMPILER_ID MATCHES "(Apple)?[Cc]lang" - OR CMAKE_CXX_COMPILER_ID MATCHES "(Apple)?[Cc]lang") - add_compile_options(-fprofile-instr-generate -fcoverage-mapping) - add_link_options(-fprofile-instr-generate -fcoverage-mapping) - elseif(CMAKE_C_COMPILER_ID MATCHES "GNU" - OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") - add_compile_options(-fprofile-arcs -ftest-coverage) - link_libraries(gcov) - endif() -endfunction() - -# Adds the 'ccov-all' type targets that calls all targets added via -# `target_code_coverage` with the `ALL` parameter, but merges all the coverage -# data from them into a single large report instead of the numerous smaller -# reports. Also adds the ccov-all-capture Generates an all-merged.info file, for -# use with coverage dashboards (e.g. codecov.io, coveralls). -# ~~~ -# Optional: -# EXCLUDE - Excludes files of the regex patterns provided from coverage. -# ~~~ -function(add_code_coverage_all_targets) - # Argument parsing - set(multi_value_keywords EXCLUDE) - cmake_parse_arguments( - add_code_coverage_all_targets - "" - "" - "${multi_value_keywords}" - ${ARGN}) - - if(CODE_COVERAGE) - if(CMAKE_C_COMPILER_ID MATCHES "(Apple)?[Cc]lang" - OR CMAKE_CXX_COMPILER_ID MATCHES "(Apple)?[Cc]lang") - - # Merge the profile data for all of the run executables - add_custom_target( - ccov-all-processing - COMMAND - ${LLVM_PROFDATA_PATH} merge -o - ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged.profdata -sparse `cat - ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/profraw.list`) - - # Regex exclude only available for LLVM >= 7 - if(LLVM_COV_VERSION VERSION_GREATER_EQUAL "7.0.0") - foreach(EXCLUDE_ITEM ${add_code_coverage_all_targets_EXCLUDE}) - set(EXCLUDE_REGEX ${EXCLUDE_REGEX} - -ignore-filename-regex='${EXCLUDE_ITEM}') - endforeach() - endif() - - # Print summary of the code coverage information to the command line - add_custom_target( - ccov-all-report - COMMAND - ${LLVM_COV_PATH} report `cat - ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/binaries.list` - -instr-profile=${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged.profdata - ${EXCLUDE_REGEX} - DEPENDS ccov-all-processing) - - # Export coverage information so continuous integration tools (e.g. - # Jenkins) can consume it - add_custom_target( - ccov-all-export - COMMAND - ${LLVM_COV_PATH} export `cat - ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/binaries.list` - -instr-profile=${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged.profdata - -format="text" ${EXCLUDE_REGEX} > - ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/coverage.json - DEPENDS ccov-all-processing) - - # Generate HTML output of all added targets for perusal - add_custom_target( - ccov-all - COMMAND - ${LLVM_COV_PATH} show `cat - ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/binaries.list` - -instr-profile=${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged.profdata - -show-line-counts-or-regions - -output-dir=${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged - -format="html" ${EXCLUDE_REGEX} - DEPENDS ccov-all-processing) - - elseif(CMAKE_C_COMPILER_ID MATCHES "GNU" - OR CMAKE_CXX_COMPILER_ID MATCHES "GNU") - set(COVERAGE_INFO "${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged.info") - - # Nothing required for gcov - add_custom_target(ccov-all-processing COMMAND ;) - - # Exclusion regex string creation - set(EXCLUDE_REGEX) - foreach(EXCLUDE_ITEM ${add_code_coverage_all_targets_EXCLUDE}) - set(EXCLUDE_REGEX - ${EXCLUDE_REGEX} - --remove - ${COVERAGE_INFO} - '${EXCLUDE_ITEM}') - endforeach() - - if(EXCLUDE_REGEX) - set(EXCLUDE_COMMAND ${LCOV_PATH} ${EXCLUDE_REGEX} --output-file - ${COVERAGE_INFO}) - else() - set(EXCLUDE_COMMAND ;) - endif() - - # Capture coverage data - add_custom_target( - ccov-all-capture - COMMAND ${CMAKE_COMMAND} -E remove ${COVERAGE_INFO} - COMMAND ${LCOV_PATH} --directory ${CMAKE_BINARY_DIR} --capture - --output-file ${COVERAGE_INFO} - COMMAND ${EXCLUDE_COMMAND} - DEPENDS ccov-all-processing) - - # Generates HTML output of all targets for perusal - add_custom_target( - ccov-all - COMMAND ${GENHTML_PATH} -o ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged - ${COVERAGE_INFO} - DEPENDS ccov-all-capture) - - endif() - - add_custom_command( - TARGET ccov-all - POST_BUILD - COMMAND ; - COMMENT - "Open ${CMAKE_COVERAGE_OUTPUT_DIRECTORY}/all-merged/index.html in your browser to view the coverage report." - ) - endif() -endfunction() diff --git a/sql-odbc/src/opensearchenlist/CMakeLists.txt b/sql-odbc/src/opensearchenlist/CMakeLists.txt deleted file mode 100644 index 6e4c00524d..0000000000 --- a/sql-odbc/src/opensearchenlist/CMakeLists.txt +++ /dev/null @@ -1,19 +0,0 @@ -project(opensearchenlist) - -# Source files for opensearchenlist -set(SOURCE_FILES msdtc_enlist.cpp) -set(HEADER_FILES opensearch_enlist.h) - -# Generate static lib -add_library(opensearchenlist STATIC ${SOURCE_FILES} ${HEADER_FILES}) - -# Library dependencies -target_link_libraries(opensearchenlist kernel32 advapi32 Delayimp XOleHlp) - -# Platform specific library dependencies -if(WIN32) - # Windows specifiec - target_link_libraries(opensearchenlist wsock32 winspool user32 gdi32 comdlg32 shell32 uuid) -else() - # Unix specific -endif() diff --git a/sql-odbc/src/opensearchenlist/msdtc_enlist.cpp b/sql-odbc/src/opensearchenlist/msdtc_enlist.cpp deleted file mode 100644 index f427d837ad..0000000000 --- a/sql-odbc/src/opensearchenlist/msdtc_enlist.cpp +++ /dev/null @@ -1,1279 +0,0 @@ -#ifdef _HANDLE_ENLIST_IN_DTC_ - -#undef _MEMORY_DEBUG_ -#ifndef _WIN32_WINNT -#define _WIN32_WINNT 0x0400 -#endif /* _WIN32_WINNT */ - -#define WIN32_LEAN_AND_MEAN -#include -#include -/*#include */ -#define _ESDTC_FUNCS_IMPORT_ -#include "connexp.h" - -/*#define _SLEEP_FOR_TEST_*/ -#include -#include -#include -#include -#include -#ifndef WIN32 -#include -#endif /* WIN32 */ - -#include -#define _MYLOG_FUNCS_IMPORT_ -#include "mylog.h" -#define _OPENSEARCH_ENLIST_FUNCS_IMPLEMENT_ -#include "opensearch_enlist.h" -#include "xalibname.h" - -#ifdef WIN32 -#ifndef snprintf -#define snprintf _snprintf -#endif /* snprintf */ -#endif /* WIN32 */ - -/* Define a type for defining a constant string expression */ -#ifndef CSTR -#define CSTR static const char *const -#endif /* CSTR */ - -EXTERN_C { - HINSTANCE s_hModule; /* Saved module handle. */ -} -/* This is where the Driver Manager attaches to this Driver */ -BOOL WINAPI DllMain(HANDLE hInst, ULONG ul_reason_for_call, LPVOID lpReserved) { - switch (ul_reason_for_call) { - case DLL_PROCESS_ATTACH: - s_hModule = (HINSTANCE)hInst; /* Save for dialog boxes */ - break; - case DLL_PROCESS_DETACH: - mylog("DETACHING opensearch_enlist\n"); - break; - } - return TRUE; -} - -/* - * A comment About locks used in this module - * - * the locks should be acquired with stronger to weaker order. - * - * 1:ELOCK -- the strongest per IAsyncES object lock - * When the *isolated* or *dtcconn* member of an IAsyncES object - * is changed, this lock should be held. - * While an IAsyncES object accesses a es_odbc connection, - * this lock should be held. - * - * 2:[CONN_CS] -- per es_odbc connection lock - * This lock would be held for a pretty long time while accessing - * the es_odbc connection assigned to an IAsyncES object. You - * can use the connecion safely by holding a ELOCK for the - * IAsyncES object because the assignment is ensured to be - * fixed while the ELOCK is held. - * - * 3:LIFELOCK -- a global lock to ensure the lives of IAsyncES objects - * While this lock is held, IAsyncES objects would never die. - * - * 4:SLOCK -- the short term per IAsyncES object lock - * When any member of an IAsyncES object is changed, this lock - * should be held. - */ - -// #define _LOCK_DEBUG_ -static class INIT_CRIT { - public: - CRITICAL_SECTION life_cs; /* for asdum member of ConnectionClass */ - INIT_CRIT() { - InitializeCriticalSection(&life_cs); - } - ~INIT_CRIT() { - DeleteCriticalSection(&life_cs); - } -} init_crit; -#define LIFELOCK_ACQUIRE EnterCriticalSection(&init_crit.life_cs) -#define LIFELOCK_RELEASE LeaveCriticalSection(&init_crit.life_cs) - -/* - * Some helper macros about connection handling. - */ -#define CONN_CS_ACQUIRE(conn) EsDtc_lock_cntrl((conn), TRUE, FALSE) -#define TRY_CONN_CS_ACQUIRE(conn) EsDtc_lock_cntrl((conn), TRUE, TRUE) -#define CONN_CS_RELEASE(conn) EsDtc_lock_cntrl((conn), FALSE, FALSE) - -#define CONN_IS_IN_TRANS(conn) EsDtc_get_property((conn), inTrans) - -static const char *XidToText(const XID &xid, char *rtext) { - int glen = xid.gtrid_length, blen = xid.bqual_length; - int i, j; - - for (i = 0, j = 0; i < glen; i++, j += 2) - sprintf(rtext + j, "%02x", (unsigned char)xid.data[i]); - strcat(rtext, "-"); - j++; - for (; i < glen + blen; i++, j += 2) - sprintf(rtext + j, "%02x", (unsigned char)xid.data[i]); - return rtext; -} - -static LONG g_cComponents = 0; -static LONG g_cServerLocks = 0; - -// -// �ȉ���ITransactionResourceAsync�I�u�W�F�N�g�͔C�ӂ̃X���b�h���� -// ���R�ɃA�N�Z�X�”\�Ȃ悤�Ɏ�������B�eRequest�̌��ʂ�Ԃ����߂� -// �g�p����ITransactionEnlistmentAsync�C���^�[�t�F�C�X�����̂悤�� -// ��������Ă���i�Ǝv����A���L�Q�Ɓj�̂ŌĂяo����COM�̃A�p�[ -// �g�����g���ӎ�����(CoMarshalInterThreadInterfaceInStream/CoGetIn -// terfaceAndReleaseStream���g�p����j�K�v�͂Ȃ��B -// ����DLL���Ŏg�p����ITransactionResourceAsync��ITransactionEnlist -// mentAsync�̃C���^�[�t�F�C�X�|�C���^�[�͔C�ӂ̃X���b�h���璼�ڎg�p -// ���邱�Ƃ��ł���B -// - -// OLE Transactions Standard -// -// OLE Transactions is the Microsoft interface standard for transaction -// management. Applications use OLE Transactions-compliant interfaces to -// initiate, commit, abort, and inquire about transactions. Resource -// managers use OLE Transactions-compliant interfaces to enlist in -// transactions, to propagate transactions to other resource managers, -// to propagate transactions from process to process or from system to -// system, and to participate in the two-phase commit protocol. -// -// The Microsoft DTC system implements most OLE Transactions-compliant -// objects, interfaces, and methods. Resource managers that wish to use -// OLE Transactions must implement some OLE Transactions-compliant objects, -// interfaces, and methods. -// -// The OLE Transactions specification is based on COM but it differs in the -// following respects: -// -// OLE Transactions objects cannot be created using the COM CoCreate APIs. -// References to OLE Transactions objects are always direct. Therefore, -// no proxies or stubs are created for inter-apartment, inter-process, -// or inter-node calls and OLE Transactions references cannot be marshaled -// using standard COM marshaling. -// All references to OLE Transactions objects and their sinks are completely -// free threaded and cannot rely upon COM concurrency control models. -// For example, you cannot pass a reference to an IResourceManagerSink -// interface on a single-threaded apartment and expect the callback to occur -// only on the same single-threaded apartment. - -class IAsyncES : public ITransactionResourceAsync { - private: - IDtcToXaHelperSinglePipe *helper; - DWORD RMCookie; - void *dtcconn; - LONG refcnt; - CRITICAL_SECTION as_spin; // to make this object Both - CRITICAL_SECTION as_exec; // to make this object Both - XID xid; - bool isolated; - bool prepared; - bool done; - bool abort; - HANDLE eThread[3]; - bool eFin[3]; - bool requestAccepted; - HRESULT prepare_result; - HRESULT commit_result; -#ifdef _LOCK_DEBUG_ - int spin_cnt; - int cs_cnt; -#endif /* _LOCK_DEBUG_ */ - - public: - enum { PrepareExec = 0, CommitExec, AbortExec }; - - ITransactionEnlistmentAsync *enlist; - - HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void **ppvObject); - ULONG STDMETHODCALLTYPE AddRef(void); - ULONG STDMETHODCALLTYPE Release(void); - - HRESULT STDMETHODCALLTYPE PrepareRequest(BOOL fRetaining, DWORD grfRM, - BOOL fWantMoniker, - BOOL fSinglePhase); - HRESULT STDMETHODCALLTYPE CommitRequest(DWORD grfRM, XACTUOW *pNewUOW); - HRESULT STDMETHODCALLTYPE AbortRequest(BOID *pboidReason, BOOL fRetaining, - XACTUOW *pNewUOW); - HRESULT STDMETHODCALLTYPE TMDown(void); - - IAsyncES(); - void SetHelper(IDtcToXaHelperSinglePipe *pHelper, DWORD dwRMCookie) { - helper = pHelper; - RMCookie = dwRMCookie; - } - - HRESULT RequestExec(DWORD type, HRESULT res); - HRESULT ReleaseConnection(void); - void SetConnection(void *sconn) { - SLOCK_ACQUIRE(); - dtcconn = sconn; - SLOCK_RELEASE(); - } - void SetXid(const XID *ixid) { - SLOCK_ACQUIRE(); - xid = *ixid; - SLOCK_RELEASE(); - } - void *separateXAConn(bool spinAcquired, bool continueConnection); - bool CloseThread(DWORD type); - - private: - ~IAsyncES(); - void SLOCK_ACQUIRE() { - EnterCriticalSection(&as_spin); - } - void SLOCK_RELEASE() { - LeaveCriticalSection(&as_spin); - } - void ELOCK_ACQUIRE() { - EnterCriticalSection(&as_exec); - } - void ELOCK_RELEASE() { - LeaveCriticalSection(&as_exec); - } - void *getLockedXAConn(void); - void *generateXAConn(bool spinAcquired); - void *isolateXAConn(bool spinAcquired, bool continueConnection); - void SetPrepareResult(HRESULT res) { - SLOCK_ACQUIRE(); - prepared = true; - prepare_result = res; - SLOCK_RELEASE(); - } - void SetDone(HRESULT); - void Wait_pThread(bool slock_hold); - void Wait_cThread(bool slock_hold, bool once); -}; - -IAsyncES::IAsyncES(void) - : helper(NULL), - RMCookie(0), - enlist(NULL), - dtcconn(NULL), - refcnt(1), - isolated(false), - done(false), - abort(false), - prepared(false), - requestAccepted(false) { - InterlockedIncrement(&g_cComponents); - InitializeCriticalSection(&as_spin); - InitializeCriticalSection(&as_exec); - eThread[0] = eThread[1] = eThread[2] = NULL; - eFin[0] = eFin[1] = eFin[2] = false; - memset(&xid, 0, sizeof(xid)); -#ifdef _LOCK_DEBUG_ - spin_cnt = 0; - cs_cnt = 0; -#endif /* _LOCK_DEBUG_ */ -} - -// -// invoked from *delete*. -// When entered ELOCK -> LIFELOCK -> SLOCK are held -// and they are released. -// -IAsyncES::~IAsyncES(void) { - void *fconn = NULL; - - if (dtcconn) { - if (isolated) - fconn = dtcconn; - EsDtc_set_async(dtcconn, NULL); - dtcconn = NULL; - } - SLOCK_RELEASE(); - LIFELOCK_RELEASE; - if (fconn) { - mylog("IAsyncES Destructor is freeing the connection\n"); - EsDtc_free_connect(fconn); - } - DeleteCriticalSection(&as_spin); - ELOCK_RELEASE(); - DeleteCriticalSection(&as_exec); - InterlockedDecrement(&g_cComponents); -} -HRESULT STDMETHODCALLTYPE IAsyncES::QueryInterface(REFIID riid, - void **ppvObject) { - mylog("%p QueryInterface called\n", this); - if (riid == IID_IUnknown || riid == IID_ITransactionResourceAsync) { - *ppvObject = this; - AddRef(); - return S_OK; - } - *ppvObject = NULL; - return E_NOINTERFACE; -} -// -// acquire/releases SLOCK. -// -ULONG STDMETHODCALLTYPE IAsyncES::AddRef(void) { - mylog("%p->AddRef called\n", this); - SLOCK_ACQUIRE(); - refcnt++; - SLOCK_RELEASE(); - return refcnt; -} -// -// acquire/releases [ELOCK -> LIFELOCK -> ] SLOCK. -// -ULONG STDMETHODCALLTYPE IAsyncES::Release(void) { - mylog("%p->Release called refcnt=%d\n", this, refcnt); - SLOCK_ACQUIRE(); - refcnt--; - if (refcnt <= 0) { - SLOCK_RELEASE(); - ELOCK_ACQUIRE(); - LIFELOCK_ACQUIRE; - SLOCK_ACQUIRE(); - if (refcnt <= 0) { - const int refcnt_copy = refcnt; - mylog("delete %p\n", this); - delete this; - return refcnt_copy; - } else { - SLOCK_RELEASE(); - LIFELOCK_RELEASE; - ELOCK_RELEASE(); - } - } else - SLOCK_RELEASE(); - return refcnt; -} - -// -// Acquire/release SLOCK. -// -void IAsyncES::Wait_pThread(bool slock_hold) { - mylog("Wait_pThread %d in\n", slock_hold); - HANDLE wThread; - int wait_idx = PrepareExec; - DWORD ret; - - if (!slock_hold) - SLOCK_ACQUIRE(); - while (NULL != (wThread = eThread[wait_idx]) && !eFin[wait_idx]) { - SLOCK_RELEASE(); - ret = WaitForSingleObject(wThread, 2000); - SLOCK_ACQUIRE(); - if (WAIT_TIMEOUT != ret) - eFin[wait_idx] = true; - } - if (!slock_hold) - SLOCK_RELEASE(); - mylog("Wait_pThread out\n"); -} - -// -// Acquire/releases SLOCK. -// -void IAsyncES::Wait_cThread(bool slock_hold, bool once) { - HANDLE wThread; - int wait_idx; - DWORD ret; - - mylog("Wait_cThread %d,%d in\n", slock_hold, once); - if (!slock_hold) - SLOCK_ACQUIRE(); - if (NULL != eThread[CommitExec]) - wait_idx = CommitExec; - else - wait_idx = AbortExec; - while (NULL != (wThread = eThread[wait_idx]) && !eFin[wait_idx]) { - SLOCK_RELEASE(); - ret = WaitForSingleObject(wThread, 2000); - SLOCK_ACQUIRE(); - if (WAIT_TIMEOUT != ret) - eFin[wait_idx] = true; - else if (once) - break; - } - if (!slock_hold) - SLOCK_RELEASE(); - mylog("Wait_cThread out\n"); -} - -/* Processing Prepare/Commit Request */ -typedef struct RequestPara { - DWORD type; - LPVOID lpr; - HRESULT res; -} RequestPara; - -// -// Acquire/releases LIFELOCK -> SLOCK. -// may acquire/release ELOCK. -// -void IAsyncES::SetDone(HRESULT res) { - LIFELOCK_ACQUIRE; - SLOCK_ACQUIRE(); - done = true; - if (E_FAIL == res || E_UNEXPECTED == res) - abort = true; - requestAccepted = true; - commit_result = res; - if (dtcconn) { - EsDtc_set_async(dtcconn, NULL); - if (isolated) { - SLOCK_RELEASE(); - LIFELOCK_RELEASE; - ELOCK_ACQUIRE(); - if (dtcconn) { - mylog("Freeing isolated connection=%p\n", dtcconn); - EsDtc_free_connect(dtcconn); - SetConnection(NULL); - } - ELOCK_RELEASE(); - } else { - dtcconn = NULL; - SLOCK_RELEASE(); - LIFELOCK_RELEASE; - } - } else { - SLOCK_RELEASE(); - LIFELOCK_RELEASE; - } -} - -// -// Acquire/releases [ELOCK -> LIFELOCK -> ] SLOCK. -// -void *IAsyncES::generateXAConn(bool spinAcquired) { - mylog("generateXAConn isolated=%d dtcconn=%p\n", isolated, dtcconn); - if (!spinAcquired) - SLOCK_ACQUIRE(); - if (isolated || done) { - SLOCK_RELEASE(); - return dtcconn; - } - SLOCK_RELEASE(); - ELOCK_ACQUIRE(); - LIFELOCK_ACQUIRE; - SLOCK_ACQUIRE(); - if (dtcconn && !isolated && !done && prepared) { - void *sconn = dtcconn; - - dtcconn = EsDtc_isolate(sconn, useAnotherRoom); - isolated = true; - SLOCK_RELEASE(); - LIFELOCK_RELEASE; - // EsDtc_connect(dtcconn); may be called in getLockedXAConn - } else { - SLOCK_RELEASE(); - LIFELOCK_RELEASE; - } - ELOCK_RELEASE(); - return dtcconn; -} - -// -// Acquire/releases [ELOCK -> LIFELOCK -> ] SLOCK. -// -void *IAsyncES::isolateXAConn(bool spinAcquired, bool continueConnection) { - void *sconn; - - mylog("isolateXAConn isolated=%d dtcconn=%p\n", isolated, dtcconn); - if (!spinAcquired) - SLOCK_ACQUIRE(); - if (isolated || done || NULL == dtcconn) { - SLOCK_RELEASE(); - return dtcconn; - } - SLOCK_RELEASE(); - ELOCK_ACQUIRE(); - LIFELOCK_ACQUIRE; - SLOCK_ACQUIRE(); - if (isolated || done || NULL == dtcconn) { - SLOCK_RELEASE(); - LIFELOCK_RELEASE; - ELOCK_RELEASE(); - return dtcconn; - } - sconn = dtcconn; - - dtcconn = - EsDtc_isolate(sconn, continueConnection ? 0 : disposingConnection); - - isolated = true; - SLOCK_RELEASE(); - LIFELOCK_RELEASE; - if (continueConnection) { - EsDtc_connect(sconn); - } - ELOCK_RELEASE(); - return dtcconn; -} - -// -// Acquire/releases [ELOCK -> LIFELOCK -> ] SLOCK. -// -void *IAsyncES::separateXAConn(bool spinAcquired, bool continueConnection) { - mylog("%s isolated=%d dtcconn=%p\n", __FUNCTION__, isolated, dtcconn); - if (!spinAcquired) - SLOCK_ACQUIRE(); - if (prepared) - return generateXAConn(true); - else - return isolateXAConn(true, continueConnection); -} - -// -// [when entered] -// ELOCK is held. -// -// Acquire/releases SLOCK. -// Try to acquire CONN_CS also. -// -// [on exit] -// ELOCK is kept held. -// If the return connection != NULL -// the CONN_CS lock for the connection is held. -// -void *IAsyncES::getLockedXAConn() { - SLOCK_ACQUIRE(); - while (!done && !isolated && NULL != dtcconn) { - /* - * Note that COMMIT/ROLLBACK PREPARED command should be - * issued outside the transaction. - */ - if (!prepared || !CONN_IS_IN_TRANS(dtcconn)) { - if (TRY_CONN_CS_ACQUIRE(dtcconn)) { - if (prepared && CONN_IS_IN_TRANS(dtcconn)) { - CONN_CS_RELEASE(dtcconn); - } else - break; - } - } - separateXAConn(true, true); - SLOCK_ACQUIRE(); // SLOCK was released by separateXAConn() - } - SLOCK_RELEASE(); - if (isolated && NULL != dtcconn) { - CONN_CS_ACQUIRE(dtcconn); - if (!EsDtc_get_property(dtcconn, connected)) - EsDtc_connect(dtcconn); - } - return dtcconn; -} - -// -// Acquire/release ELOCK -> SLOCK. -// -HRESULT IAsyncES::RequestExec(DWORD type, HRESULT res) { - HRESULT ret; - bool bReleaseEnlist = false; - void *econn; - char esxid[258]; - - mylog("%p->RequestExec type=%d conn=%p\n", this, type, dtcconn); - XidToText(xid, esxid); -#ifdef _SLEEP_FOR_TEST_ - /*Sleep(2000);*/ -#endif /* _SLEEP_FOR_TEST_ */ - ELOCK_ACQUIRE(); - switch (type) { - case PrepareExec: - if (done || NULL == dtcconn) { - res = E_UNEXPECTED; - break; - } - if (econn = getLockedXAConn(), NULL != econn) { - EsDtc_set_property(econn, inprogress, (void *)1); - if (E_FAIL == res) - EsDtc_one_phase_operation(econn, ABORT_GLOBAL_TRANSACTION); - else if (XACT_S_SINGLEPHASE == res) { - if (!EsDtc_one_phase_operation(econn, ONE_PHASE_COMMIT)) - res = E_FAIL; - } else { - if (!EsDtc_two_phase_operation(econn, PREPARE_TRANSACTION, - esxid)) - res = E_FAIL; - } - EsDtc_set_property(econn, inprogress, (void *)0); - CONN_CS_RELEASE(econn); - } - if (S_OK != res) { - SetDone(res); - bReleaseEnlist = true; - } - ret = enlist->PrepareRequestDone(res, NULL, NULL); - SetPrepareResult(res); - break; - case CommitExec: - Wait_pThread(false); - if (E_FAIL != res) { - econn = getLockedXAConn(); - if (econn) { - EsDtc_set_property(econn, inprogress, (void *)1); - if (!EsDtc_two_phase_operation(econn, COMMIT_PREPARED, - esxid)) - res = E_FAIL; - EsDtc_set_property(econn, inprogress, (void *)0); - CONN_CS_RELEASE(econn); - } - } - SetDone(res); - ret = enlist->CommitRequestDone(res); - bReleaseEnlist = true; - break; - case AbortExec: - Wait_pThread(false); - if (prepared && !done) { - econn = getLockedXAConn(); - if (econn) { - EsDtc_set_property(econn, inprogress, (void *)1); - if (!EsDtc_two_phase_operation(econn, ROLLBACK_PREPARED, - esxid)) - res = E_FAIL; - EsDtc_set_property(econn, inprogress, (void *)0); - CONN_CS_RELEASE(econn); - } - } - SetDone(res); - ret = enlist->AbortRequestDone(res); - bReleaseEnlist = true; - break; - default: - ret = -1; - } - if (bReleaseEnlist) { - helper->ReleaseRMCookie(RMCookie, TRUE); - enlist->Release(); - } - ELOCK_RELEASE(); - mylog("%p->Done ret=%d\n", this, ret); - return ret; -} - -// -// Acquire/releses SLOCK -// or [ELOCK -> LIFELOCK -> ] SLOCK. -// -HRESULT IAsyncES::ReleaseConnection(void) { - mylog("%p->ReleaseConnection\n", this); - - SLOCK_ACQUIRE(); - if (isolated || NULL == dtcconn) { - SLOCK_RELEASE(); - return SQL_SUCCESS; - } - Wait_pThread(true); - if (NULL != eThread[CommitExec] || NULL != eThread[AbortExec] - || requestAccepted) { - if (!done) - Wait_cThread(true, true); - } - if (!isolated && !done && dtcconn - && EsDtc_get_property(dtcconn, connected)) { - isolateXAConn(true, false); - } else - SLOCK_RELEASE(); - mylog("%p->ReleaseConnection exit\n", this); - return SQL_SUCCESS; -} - -EXTERN_C static unsigned WINAPI DtcRequestExec(LPVOID para); -EXTERN_C static void __cdecl ClosePrepareThread(LPVOID para); -EXTERN_C static void __cdecl CloseCommitThread(LPVOID para); -EXTERN_C static void __cdecl CloseAbortThread(LPVOID para); - -// -// Acquire/release [ELOCK -> ] SLOCK. -// -HRESULT STDMETHODCALLTYPE IAsyncES::PrepareRequest(BOOL fRetaining, DWORD grfRM, - BOOL fWantMoniker, - BOOL fSinglePhase) { - HRESULT ret, res; - RequestPara *reqp; - const DWORD reqtype = PrepareExec; - - mylog("%p PrepareRequest called grhRM=%d enl=%p\n", this, grfRM, enlist); - SLOCK_ACQUIRE(); - if (dtcconn && 0 != EsDtc_get_property(dtcconn, errorNumber)) - res = ret = E_FAIL; - else { - ret = S_OK; - if (fSinglePhase) { - res = XACT_S_SINGLEPHASE; - mylog("XACT is singlePhase\n"); - } else - res = S_OK; - } - SLOCK_RELEASE(); - ELOCK_ACQUIRE(); -#ifdef _SLEEP_FOR_TEST_ - Sleep(2000); -#endif /* _SLEEP_FOR_TEST_ */ - reqp = new RequestPara; - reqp->type = reqtype; - reqp->lpr = (LPVOID)this; - reqp->res = res; -#define DONT_CALL_RETURN_FROM_HERE ? ? ? - AddRef(); - HANDLE hThread = - (HANDLE)_beginthreadex(NULL, 0, DtcRequestExec, reqp, 0, NULL); - if (NULL == hThread) { - delete (reqp); - ret = E_FAIL; - } else { - SLOCK_ACQUIRE(); - eThread[reqtype] = hThread; - SLOCK_RELEASE(); - /* - * We call here _beginthread not _beginthreadex - * so as not to call CloseHandle() to clean up - * the thread. - */ - _beginthread(ClosePrepareThread, 0, (void *)this); - } - ELOCK_RELEASE(); - Release(); -#undef return - return ret; -} -// -// Acquire/release [ELOCK -> ] SLOCK. -// -HRESULT STDMETHODCALLTYPE IAsyncES::CommitRequest(DWORD grfRM, - XACTUOW *pNewUOW) { - HRESULT res = S_OK, ret = S_OK; - RequestPara *reqp; - const DWORD reqtype = CommitExec; - - mylog("%p CommitRequest called grfRM=%d enl=%p\n", this, grfRM, enlist); - - SLOCK_ACQUIRE(); - if (!prepared || done) - ret = E_UNEXPECTED; - else if (S_OK != prepare_result) - ret = E_UNEXPECTED; - SLOCK_RELEASE(); - if (S_OK != ret) - return ret; -#define DONT_CALL_RETURN_FROM_HERE ? ? ? - AddRef(); - ELOCK_ACQUIRE(); -#ifdef _SLEEP_FOR_TEST_ - Sleep(1000); -#endif /* _SLEEP_FOR_TEST_ */ - reqp = new RequestPara; - reqp->type = reqtype; - reqp->lpr = (LPVOID)this; - reqp->res = res; - enlist->AddRef(); - HANDLE hThread = - (HANDLE)_beginthreadex(NULL, 0, DtcRequestExec, reqp, 0, NULL); - if (NULL == hThread) { - delete (reqp); - enlist->Release(); - ret = E_FAIL; - } else { - SLOCK_ACQUIRE(); - eThread[reqtype] = hThread; - SLOCK_RELEASE(); - /* - * We call here _beginthread not _beginthreadex - * so as not to call CloseHandle() to clean up - * the thread. - */ - _beginthread(CloseCommitThread, 0, (void *)this); - } - mylog("CommitRequest ret=%d\n", ret); - requestAccepted = true; - ELOCK_RELEASE(); - Release(); -#undef return - return ret; -} -// -// Acquire/release [ELOCK -> ] SLOCK. -// -HRESULT STDMETHODCALLTYPE IAsyncES::AbortRequest(BOID *pboidReason, - BOOL fRetaining, - XACTUOW *pNewUOW) { - HRESULT res = S_OK, ret = S_OK; - RequestPara *reqp; - const DWORD reqtype = AbortExec; - - mylog("%p AbortRequest called\n", this); - SLOCK_ACQUIRE(); - if (done) - ret = E_UNEXPECTED; - else if (prepared && S_OK != prepare_result) - ret = E_UNEXPECTED; - SLOCK_RELEASE(); - if (S_OK != ret) - return ret; -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wkeyword-macro" -#endif // __APPLE__ -#define return DONT_CALL_RETURN_FROM_HERE ? ? ? -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ - AddRef(); - ELOCK_ACQUIRE(); - if (!prepared && dtcconn) { - EsDtc_set_property(dtcconn, inprogress, (void *)1); - EsDtc_one_phase_operation(dtcconn, ONE_PHASE_ROLLBACK); - EsDtc_set_property(dtcconn, inprogress, (void *)0); - } - reqp = new RequestPara; - reqp->type = reqtype; - reqp->lpr = (LPVOID)this; - reqp->res = res; - enlist->AddRef(); - HANDLE hThread = - (HANDLE)_beginthreadex(NULL, 0, DtcRequestExec, reqp, 0, NULL); - if (NULL == hThread) { - delete (reqp); - enlist->Release(); - ret = E_FAIL; - } else { - SLOCK_ACQUIRE(); - eThread[reqtype] = hThread; - SLOCK_RELEASE(); - /* - * We call here _beginthread not _beginthreadex - * so as not to call CloseHandle() to clean up - * the thread. - */ - _beginthread(CloseAbortThread, 0, (void *)this); - } - mylog("AbortRequest ret=%d\n", ret); - requestAccepted = true; - ELOCK_RELEASE(); - Release(); -#undef return - return ret; -} -HRESULT STDMETHODCALLTYPE IAsyncES::TMDown(void) { - mylog("%p TMDown called\n", this); - return S_OK; -} - -bool IAsyncES::CloseThread(DWORD type) { - CSTR func = "CloseThread"; - HANDLE th; - DWORD ret, excode = S_OK; - bool rls_async = false; - - mylog("%s for %p thread=%d\n", func, this, eThread[type]); - if (th = eThread[type], NULL == th || eFin[type]) - return false; - ret = WaitForSingleObject(th, INFINITE); - if (WAIT_OBJECT_0 == ret) { - switch (type) { - case IAsyncES::AbortExec: - case IAsyncES::CommitExec: - rls_async = true; - break; - default: - GetExitCodeThread(th, &excode); - if (S_OK != excode) - rls_async = true; - } - SLOCK_ACQUIRE(); - eThread[type] = NULL; - eFin[type] = true; - SLOCK_RELEASE(); - CloseHandle(th); - } - mylog("%s ret=%d\n", func, ret); - return rls_async; -} - -EXTERN_C static void __cdecl ClosePrepareThread(LPVOID para) { - CSTR func = "ClosePrepareThread"; - IAsyncES *async = (IAsyncES *)para; - bool release; - - mylog("%s for %p", func, async); - if (release = async->CloseThread(IAsyncES::PrepareExec), release) - async->Release(); - mylog("%s release=%d\n", func, release); -} - -EXTERN_C static void __cdecl CloseCommitThread(LPVOID para) { - CSTR func = "CloseCommitThread"; - IAsyncES *async = (IAsyncES *)para; - bool release; - - mylog("%s for %p", func, async); - if (release = async->CloseThread(IAsyncES::CommitExec), release) - async->Release(); - mylog("%s release=%d\n", func, release); -} - -EXTERN_C static void __cdecl CloseAbortThread(LPVOID para) { - CSTR func = "CloseAbortThread"; - IAsyncES *async = (IAsyncES *)para; - bool release; - - mylog("%s for %p", func, async); - if (release = async->CloseThread(IAsyncES::AbortExec), release) - async->Release(); - mylog("%s release=%d\n", func, release); -} - -EXTERN_C static unsigned WINAPI DtcRequestExec(LPVOID para) { - RequestPara *reqp = (RequestPara *)para; - DWORD type = reqp->type; - IAsyncES *async = (IAsyncES *)reqp->lpr; - HRESULT res = reqp->res, ret; - - mylog("DtcRequestExec type=%d", reqp->type); - delete (reqp); - ret = async->RequestExec(type, res); - mylog(" Done ret=%d\n", ret); - return ret; -} - -CSTR regKey = "SOFTWARE\\Microsoft\\MSDTC\\XADLL"; - -static int regkeyCheck(const char *xalibname, const char *xalibpath) { - int retcode = 0; - LONG ret; - HKEY sKey; - DWORD rSize; - - ret = ::RegOpenKeyEx(HKEY_LOCAL_MACHINE, regKey, 0, - KEY_QUERY_VALUE | KEY_SET_VALUE | KEY_WOW64_64KEY, - &sKey); - switch (ret) { - case ERROR_SUCCESS: - break; - case ERROR_FILE_NOT_FOUND: - ret = ::RegCreateKeyEx(HKEY_LOCAL_MACHINE, regKey, 0, NULL, - REG_OPTION_NON_VOLATILE, KEY_ALL_ACCESS, - NULL, &sKey, NULL); - mylog("%s:CreateKeyEx ret=%d\n", __FUNCTION__, ret); - break; - default: - mylog("%s:OpenKeyEx ret=%d\n", __FUNCTION__, ret); - } - if (ERROR_SUCCESS != ret) - return -1; - else { - char keyval[1024]; - - rSize = sizeof(keyval); - switch (ret = ::RegQueryValueEx(sKey, xalibname, NULL, NULL, - (LPBYTE)keyval, &rSize)) { - case ERROR_SUCCESS: - if (rSize > 0) { - if (0 == _stricmp(keyval, xalibpath)) - break; - mylog("%s:XADLL value %s is different from %s\n", - __FUNCTION__, keyval, xalibpath); - if (IsWow64()) { - mylog( - "%s:avoid RegSetValue operation from wow64 " - "process\n", - __FUNCTION__); - break; - } - } - case ERROR_FILE_NOT_FOUND: - mylog("%s:Setting value %s\n", __FUNCTION__, xalibpath); - ret = ::RegSetValueEx(sKey, xalibname, 0, REG_SZ, - (CONST BYTE *)xalibpath, - (DWORD)strlen(xalibpath) + 1); - if (ERROR_SUCCESS == ret) - retcode = 1; - else { - retcode = -1; - mylog("%s:SetValuEx ret=%d\n", __FUNCTION__, ret); - } - break; - default: - retcode = -1; - mylog("%s:QueryValuEx ret=%d\n", __FUNCTION__, ret); - break; - } - ::RegCloseKey(sKey); - } - return retcode; -} - -RETCODE static EnlistInDtc_1pipe(void *conn, ITransaction *pTra, - ITransactionDispenser *pDtc, int method) { - CSTR func = "EnlistInDtc_1pipe"; - static IDtcToXaHelperSinglePipe *pHelper = NULL; - ITransactionResourceAsync *pRes = NULL; - IAsyncES *asdum; - HRESULT res; - DWORD dwRMCookie; - XID xid; - const char *xalibname = GetXaLibName(); - const char *xalibpath = GetXaLibPath(); - - int recovLvl; - char errmsg[256]; - char reason[128]; - - if (!pHelper) { - res = pDtc->QueryInterface(IID_IDtcToXaHelperSinglePipe, - (void **)&pHelper); - if (res != S_OK || !pHelper) { - mylog("DtcToXaHelperSingelPipe get error %d\n", res); - pHelper = NULL; - return SQL_ERROR; - } - } - res = (NULL != (asdum = new IAsyncES)) ? S_OK : E_FAIL; - if (S_OK != res) { - mylog("CoCreateInstance error %d\n", res); - return SQL_ERROR; - } - - recovLvl = EsDtc_is_recovery_available(conn, reason, sizeof(reason)); - switch (method) { - case DTC_CHECK_BEFORE_LINK: - if (0 == recovLvl) { - snprintf(errmsg, sizeof(errmsg), - "%s is unavailable in distributed transactions", - reason); - EsDtc_set_error(conn, errmsg, func); - return SQL_ERROR; - } - } - /*mylog("dllname=%s dsn=%s\n", xalibname, conn->connInfo.dsn); res = 0;*/ - char dtcname[1024]; - EsDtc_create_connect_string(conn, dtcname, sizeof(dtcname)); - - bool confirmedRegkey = false, confirmingLink = false, xarmerr = false; - char error_header[64]; - while (true) { - res = pHelper->XARMCreate(dtcname, (char *)xalibname, &dwRMCookie); - - mylog("XARMcreate error code=%x (%d %d)\n", res, confirmedRegkey, - confirmingLink); - xarmerr = true; - if (!confirmingLink) - snprintf(error_header, sizeof(error_header), - "XARMcreate error code=%x", res); - switch (res) { - case S_OK: - if (confirmingLink) { - switch (recovLvl) { - case 0: - snprintf(errmsg, sizeof(errmsg), - "%s:%s is currently unavailable in " - "distributed transactions", - error_header, reason); - break; - case -1: - snprintf( - errmsg, sizeof(errmsg), - "%s:Possibly you connect to the database whose " - "authentication method is %s or ident", - error_header, reason); - break; - case 1: - snprintf( - errmsg, sizeof(errmsg), - "%s:Are you trying to connect to the database " - "whose authentication method is ident?", - error_header); - break; - } - } else - xarmerr = false; - break; - case XACT_E_XA_TX_DISABLED: - snprintf(errmsg, sizeof(errmsg), - "%s:Please enable XA transaction in MSDTC security " - "configuration", - error_header); - break; - case XACT_E_TMNOTAVAILABLE: - snprintf(errmsg, sizeof(errmsg), - "%s:Please start Distributed Transaction Coordinator " - "service", - error_header); - break; - case E_FAIL: - if (!confirmedRegkey) { - int retcode = regkeyCheck(xalibname, xalibpath); - confirmedRegkey = true; - if (retcode > 0) - continue; - } - switch (method) { - case DTC_CHECK_RM_CONNECTION: - if (!confirmingLink) { - confirmingLink = true; - strcat(dtcname, ";" KEYWORD_DTC_CHECK "=0"); - continue; - } - default: - snprintf(errmsg, sizeof(errmsg), - "%s:Failed to link with DTC service. Please " - "look at the log of Event Viewer etc.", - error_header); - } - break; - case XACT_E_CONNECTION_DOWN: - snprintf(errmsg, sizeof(errmsg), - "%s:Lost connection with DTC transaction " - "manager\nMSDTC has some trouble?", - error_header); - break; - default: - snprintf(errmsg, sizeof(errmsg), "%s\n", error_header); - break; - } - break; - } - if (xarmerr) { - EsDtc_set_error(conn, errmsg, func); - return SQL_ERROR; - } - - res = pHelper->ConvertTridToXID((DWORD *)pTra, dwRMCookie, &xid); - if (res != S_OK) { - mylog("ConvertTridToXid error %d\n", res); - return SQL_ERROR; - } - { - char esxid[258]; - XidToText(xid, esxid); - mylog("ConvertTridToXID -> %s\n", esxid); - } - asdum->SetXid(&xid); - /* Create an IAsyncES instance by myself */ - /* DLLGetClassObject(GUID_IAsyncES, IID_ITransactionResourceAsync, (void **) - * &asdum); */ - - asdum->SetHelper(pHelper, dwRMCookie); - res = pHelper->EnlistWithRM(dwRMCookie, pTra, asdum, &asdum->enlist); - if (res != S_OK) { - mylog("EnlistWithRM error %d\n", res); - pHelper->ReleaseRMCookie(dwRMCookie, TRUE); - return SQL_ERROR; - } - - mylog("asdum=%p start transaction\n", asdum); - asdum->SetConnection(conn); - LIFELOCK_ACQUIRE; - EsDtc_set_async(conn, asdum); - LIFELOCK_RELEASE; - - return SQL_SUCCESS; -} - -EXTERN_C RETCODE IsolateDtcConn(void *conn, BOOL continueConnection) { - IAsyncES *async; - - LIFELOCK_ACQUIRE; - if (async = (IAsyncES *)EsDtc_get_async(conn), NULL != async) { - if (EsDtc_get_property(conn, idleInGlobalTransaction)) { - async->AddRef(); - LIFELOCK_RELEASE; - async->separateXAConn(false, continueConnection ? true : false); - async->Release(); - } else - LIFELOCK_RELEASE; - } else - LIFELOCK_RELEASE; - return SQL_SUCCESS; -} - -static ITransactionDispenser *getITransactionDispenser(DWORD grfOptions, - HRESULT *hres) { - static ITransactionDispenser *pDtc = NULL; - HRESULT res = S_OK; - - if (!pDtc) { - res = DtcGetTransactionManagerEx(NULL, NULL, IID_ITransactionDispenser, - - grfOptions, NULL, (void **)&pDtc); - if (FAILED(res)) { - mylog("DtcGetTransactionManager error %x\n", res); - pDtc = NULL; - } - } - if (hres) - *hres = res; - - return pDtc; -} - -EXTERN_C void *GetTransactionObject(HRESULT *hres) { - ITransaction *pTra = NULL; - ITransactionDispenser *pDtc = NULL; - - if (pDtc = getITransactionDispenser(OLE_TM_FLAG_NONE, hres), NULL == pDtc) - return pTra; - HRESULT res = pDtc->BeginTransaction(NULL, ISOLATIONLEVEL_READCOMMITTED, 0, - NULL, &pTra); - switch (res) { - case S_OK: - break; - default: - pTra = NULL; - } - if (hres) - *hres = res; - return pTra; -} - -EXTERN_C void ReleaseTransactionObject(void *pObj) { - ITransaction *pTra = (ITransaction *)pObj; - - if (!pTra) - return; - pTra->Release(); -} - -EXTERN_C RETCODE EnlistInDtc(void *conn, void *pTra, int method) { - ITransactionDispenser *pDtc = NULL; - RETCODE ret; - - if (!pTra) { - IAsyncES *asdum = (IAsyncES *)EsDtc_get_async(conn); - EsDtc_set_property(conn, enlisted, (void *)0); - return SQL_SUCCESS; - } - if (CONN_IS_IN_TRANS(conn)) { - EsDtc_one_phase_operation(conn, SHUTDOWN_LOCAL_TRANSACTION); - } - HRESULT hres; - pDtc = getITransactionDispenser(OLE_TM_FLAG_NODEMANDSTART, &hres); - if (!pDtc) { - char errmsg[128]; - snprintf(errmsg, sizeof(errmsg), - "enlistment error:DtcGetTransactionManager error code=%x", - hres); - EsDtc_set_error(conn, errmsg, __FUNCTION__); - return SQL_ERROR; - } - ret = EnlistInDtc_1pipe(conn, (ITransaction *)pTra, pDtc, method); - if (SQL_SUCCEEDED(ret)) - EsDtc_set_property(conn, enlisted, (void *)1); - return ret; -} - -EXTERN_C RETCODE DtcOnDisconnect(void *conn) { - mylog("DtcOnDisconnect\n"); - LIFELOCK_ACQUIRE; - IAsyncES *asdum = (IAsyncES *)EsDtc_get_async(conn); - if (asdum) { - asdum->AddRef(); - LIFELOCK_RELEASE; - asdum->ReleaseConnection(); - asdum->Release(); - } else - LIFELOCK_RELEASE; - return SQL_SUCCESS; -} - -#endif /* _HANDLE_ENLIST_IN_DTC_ */ diff --git a/sql-odbc/src/opensearchenlist/opensearch_enlist.h b/sql-odbc/src/opensearchenlist/opensearch_enlist.h deleted file mode 100644 index a87d402bd6..0000000000 --- a/sql-odbc/src/opensearchenlist/opensearch_enlist.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef __OPENSEARCH_ENLIST_H__ -#define __OPENSEARCH_ENLIST_H__ - -#ifdef __cplusplus -extern "C" { -#endif -#ifdef WIN32 -#ifdef _HANDLE_ENLIST_IN_DTC_ - -#undef DLL_DECLARE -#ifdef _OPENSEARCH_ENLIST_FUNCS_IMPLEMENT_ -#define DLL_DECLARE _declspec(dllexport) -#else -#ifdef _OPENSEARCH_ENLIST_FUNCS_IMPORT_ -#define DLL_DECLARE _declspec(dllimport) -#else -#define DLL_DECLARE -#endif /* _OPENSEARCH_ENLIST_FUNCS_IMPORT_ */ -#endif /* _OPENSEARCH_ENLIST_FUNCS_IMPLEMENT_ */ - -RETCODE EnlistInDtc(void *conn, void *pTra, int method); -RETCODE DtcOnDisconnect(void *); -RETCODE IsolateDtcConn(void *, BOOL continueConnection); -// for testing -DLL_DECLARE void *GetTransactionObject(HRESULT *hres); -DLL_DECLARE void ReleaseTransactionObject(void *); - -#endif /* _HANDLE_ENLIST_IN_DTC_ */ -#endif /* WIN32 */ - -#ifdef __cplusplus -} -#endif -#endif /* __OPENSEARCH_ENLIST_H__ */ diff --git a/sql-odbc/src/sqlodbc/CMakeLists.txt b/sql-odbc/src/sqlodbc/CMakeLists.txt deleted file mode 100644 index f926dfe644..0000000000 --- a/sql-odbc/src/sqlodbc/CMakeLists.txt +++ /dev/null @@ -1,62 +0,0 @@ -project(sqlodbc) - -# Source files for sqlodbc -set( SOURCE_FILES - bind.c columninfo.c connection.c convert.c - descriptor.c dlg_specific.c drvconn.c options.c - environ.c execute.c info.c loadlib.c - misc.c multibyte.c mylog.c tuple.c - parse.c results.c statement.c odbcapi30.c - qresult.c odbcapi30w.c opensearch_api30.c opensearch_types.c - opensearch_utility.cpp opensearch_communication.cpp opensearch_connection.cpp opensearch_odbc.c - opensearch_driver_connect.cpp opensearch_helper.cpp opensearch_info.cpp opensearch_parse_result.cpp - opensearch_semaphore.cpp opensearch_statement.cpp win_unicode.c odbcapi.c - odbcapiw.c opensearch_result_queue.cpp - ) -if(WIN32) -set(SOURCE_FILES ${SOURCE_FILES} dlg_wingui.c setup.c) -endif() - -set( HEADER_FILES - bind.h catfunc.h columninfo.h - convert.h descriptor.h dlg_specific.h drvconn.h - environ.h opensearch_apifunc.h opensearch_communication.h opensearch_parse_result.h - opensearch_connection.h opensearch_driver_connect.h opensearch_helper.h opensearch_info.h - opensearch_statement.h opensearch_types.h loadlib.h - misc.h multibyte.h mylog.h opensearch_utility.h - resource.h statement.h tuple.h unicode_support.h - opensearch_apifunc.h opensearch_odbc.h opensearch_semaphore.h qresult.h - version.h win_setup.h opensearch_result_queue.h - ) - -# Generate dll (SHARED) -if(WIN32) -set(RESOURCE_FILES opensearch_odbc.rc) -add_library(sqlodbc SHARED ${SOURCE_FILES} ${HEADER_FILES} ${RESOURCE_FILES} ${AWSSDK_LIB_DIR}) -else() -add_library(sqlodbc SHARED ${SOURCE_FILES} ${HEADER_FILES}) -endif() - -include_directories( - ${LIBRARY_DIRECTORY}/../src - ${CMAKE_CURRENT_SOURCE_DIR} - ${OPENSEARCHENLIST_SRC} - ${RABBIT_SRC} - ${RAPIDJSON_SRC} - ${AWSSDK_INCLUDE_DIR} - ) - -# Platform specific library dependencies -if(WIN32) - # Windows specifiec - target_link_libraries(sqlodbc wsock32 ws2_32 winmm user32 gdi32 legacy_stdio_definitions aws-cpp-sdk-core kernel32 advapi32 secur32 XOleHlp Wldap32 crypt32 Normaliz odbccp32 odbc32) - target_link_libraries(sqlodbc debug msvcrtd) - target_link_libraries(sqlodbc optimized msvcrt) -elseif(APPLE) - # Apple specific - target_link_libraries(sqlodbc iodbc iodbcinst aws-cpp-sdk-core) -elseif(UNIX) - # Unix specific - include_directories(/usr/src/linux-headers-5.0.0-27/include) - target_link_libraries(sqlodbc aws-cpp-sdk-core odbc odbcinst) -endif() diff --git a/sql-odbc/src/sqlodbc/LICENSE.txt b/sql-odbc/src/sqlodbc/LICENSE.txt deleted file mode 100644 index f2ff00e7d8..0000000000 --- a/sql-odbc/src/sqlodbc/LICENSE.txt +++ /dev/null @@ -1,961 +0,0 @@ - GNU LIBRARY GENERAL PUBLIC LICENSE - - Version 2, June 1991 - - - - Copyright (C) 1991 Free Software Foundation, Inc. - - 675 Mass Ave, Cambridge, MA 02139, USA - - Everyone is permitted to copy and distribute verbatim copies - - of this license document, but changing it is not allowed. - - - -[This is the first released version of the library GPL. It is - - numbered 2 because it goes with version 2 of the ordinary GPL.] - - - - Preamble - - - - The licenses for most software are designed to take away your - -freedom to share and change it. By contrast, the GNU General Public - -Licenses are intended to guarantee your freedom to share and change - -free software--to make sure the software is free for all its users. - - - - This license, the Library General Public License, applies to some - -specially designated Free Software Foundation software, and to any - -other libraries whose authors decide to use it. You can use it for - -your libraries, too. - - - - When we speak of free software, we are referring to freedom, not - -price. Our General Public Licenses are designed to make sure that you - -have the freedom to distribute copies of free software (and charge for - -this service if you wish), that you receive source code or can get it - -if you want it, that you can change the software or use pieces of it - -in new free programs; and that you know you can do these things. - - - - To protect your rights, we need to make restrictions that forbid - -anyone to deny you these rights or to ask you to surrender the rights. - -These restrictions translate to certain responsibilities for you if - -you distribute copies of the library, or if you modify it. - - - - For example, if you distribute copies of the library, whether gratis - -or for a fee, you must give the recipients all the rights that we gave - -you. You must make sure that they, too, receive or can get the source - -code. If you link a program with the library, you must provide - -complete object files to the recipients so that they can relink them - -with the library, after making changes to the library and recompiling - -it. And you must show them these terms so they know their rights. - - - - Our method of protecting your rights has two steps: (1) copyright - -the library, and (2) offer you this license which gives you legal - -permission to copy, distribute and/or modify the library. - - - - Also, for each distributor's protection, we want to make certain - -that everyone understands that there is no warranty for this free - -library. If the library is modified by someone else and passed on, we - -want its recipients to know that what they have is not the original - -version, so that any problems introduced by others will not reflect on - -the original authors' reputations. - - - - Finally, any free program is threatened constantly by software - -patents. We wish to avoid the danger that companies distributing free - -software will individually obtain patent licenses, thus in effect - -transforming the program into proprietary software. To prevent this, - -we have made it clear that any patent must be licensed for everyone's - -free use or not licensed at all. - - - - Most GNU software, including some libraries, is covered by the ordinary - -GNU General Public License, which was designed for utility programs. This - -license, the GNU Library General Public License, applies to certain - -designated libraries. This license is quite different from the ordinary - -one; be sure to read it in full, and don't assume that anything in it is - -the same as in the ordinary license. - - - - The reason we have a separate public license for some libraries is that - -they blur the distinction we usually make between modifying or adding to a - -program and simply using it. Linking a program with a library, without - -changing the library, is in some sense simply using the library, and is - -analogous to running a utility program or application program. However, in - -a textual and legal sense, the linked executable is a combined work, a - -derivative of the original library, and the ordinary General Public License - -treats it as such. - - - - Because of this blurred distinction, using the ordinary General - -Public License for libraries did not effectively promote software - -sharing, because most developers did not use the libraries. We - -concluded that weaker conditions might promote sharing better. - - - - However, unrestricted linking of non-free programs would deprive the - -users of those programs of all benefit from the free status of the - -libraries themselves. This Library General Public License is intended to - -permit developers of non-free programs to use free libraries, while - -preserving your freedom as a user of such programs to change the free - -libraries that are incorporated in them. (We have not seen how to achieve - -this as regards changes in header files, but we have achieved it as regards - -changes in the actual functions of the Library.) The hope is that this - -will lead to faster development of free libraries. - - - - The precise terms and conditions for copying, distribution and - -modification follow. Pay close attention to the difference between a - -"work based on the library" and a "work that uses the library". The - -former contains code derived from the library, while the latter only - -works together with the library. - - - - Note that it is possible for a library to be covered by the ordinary - -General Public License rather than by this special one. - - - - GNU LIBRARY GENERAL PUBLIC LICENSE - - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - - - 0. This License Agreement applies to any software library which - -contains a notice placed by the copyright holder or other authorized - -party saying it may be distributed under the terms of this Library - -General Public License (also called "this License"). Each licensee is - -addressed as "you". - - - - A "library" means a collection of software functions and/or data - -prepared so as to be conveniently linked with application programs - -(which use some of those functions and data) to form executables. - - - - The "Library", below, refers to any such software library or work - -which has been distributed under these terms. A "work based on the - -Library" means either the Library or any derivative work under - -copyright law: that is to say, a work containing the Library or a - -portion of it, either verbatim or with modifications and/or translated - -straightforwardly into another language. (Hereinafter, translation is - -included without limitation in the term "modification".) - - - - "Source code" for a work means the preferred form of the work for - -making modifications to it. For a library, complete source code means - -all the source code for all modules it contains, plus any associated - -interface definition files, plus the scripts used to control compilation - -and installation of the library. - - - - Activities other than copying, distribution and modification are not - -covered by this License; they are outside its scope. The act of - -running a program using the Library is not restricted, and output from - -such a program is covered only if its contents constitute a work based - -on the Library (independent of the use of the Library in a tool for - -writing it). Whether that is true depends on what the Library does - -and what the program that uses the Library does. - - - - 1. You may copy and distribute verbatim copies of the Library's - -complete source code as you receive it, in any medium, provided that - -you conspicuously and appropriately publish on each copy an - -appropriate copyright notice and disclaimer of warranty; keep intact - -all the notices that refer to this License and to the absence of any - -warranty; and distribute a copy of this License along with the - -Library. - - - - You may charge a fee for the physical act of transferring a copy, - -and you may at your option offer warranty protection in exchange for a - -fee. - - - - 2. You may modify your copy or copies of the Library or any portion - -of it, thus forming a work based on the Library, and copy and - -distribute such modifications or work under the terms of Section 1 - -above, provided that you also meet all of these conditions: - - - - a) The modified work must itself be a software library. - - - - b) You must cause the files modified to carry prominent notices - - stating that you changed the files and the date of any change. - - - - c) You must cause the whole of the work to be licensed at no - - charge to all third parties under the terms of this License. - - - - d) If a facility in the modified Library refers to a function or a - - table of data to be supplied by an application program that uses - - the facility, other than as an argument passed when the facility - - is invoked, then you must make a good faith effort to ensure that, - - in the event an application does not supply such function or - - table, the facility still operates, and performs whatever part of - - its purpose remains meaningful. - - - - (For example, a function in a library to compute square roots has - - a purpose that is entirely well-defined independent of the - - application. Therefore, Subsection 2d requires that any - - application-supplied function or table used by this function must - - be optional: if the application does not supply it, the square - - root function must still compute square roots.) - - - -These requirements apply to the modified work as a whole. If - -identifiable sections of that work are not derived from the Library, - -and can be reasonably considered independent and separate works in - -themselves, then this License, and its terms, do not apply to those - -sections when you distribute them as separate works. But when you - -distribute the same sections as part of a whole which is a work based - -on the Library, the distribution of the whole must be on the terms of - -this License, whose permissions for other licensees extend to the - -entire whole, and thus to each and every part regardless of who wrote - -it. - - - -Thus, it is not the intent of this section to claim rights or contest - -your rights to work written entirely by you; rather, the intent is to - -exercise the right to control the distribution of derivative or - -collective works based on the Library. - - - -In addition, mere aggregation of another work not based on the Library - -with the Library (or with a work based on the Library) on a volume of - -a storage or distribution medium does not bring the other work under - -the scope of this License. - - - - 3. You may opt to apply the terms of the ordinary GNU General Public - -License instead of this License to a given copy of the Library. To do - -this, you must alter all the notices that refer to this License, so - -that they refer to the ordinary GNU General Public License, version 2, - -instead of to this License. (If a newer version than version 2 of the - -ordinary GNU General Public License has appeared, then you can specify - -that version instead if you wish.) Do not make any other change in - -these notices. - - - - Once this change is made in a given copy, it is irreversible for - -that copy, so the ordinary GNU General Public License applies to all - -subsequent copies and derivative works made from that copy. - - - - This option is useful when you wish to copy part of the code of - -the Library into a program that is not a library. - - - - 4. You may copy and distribute the Library (or a portion or - -derivative of it, under Section 2) in object code or executable form - -under the terms of Sections 1 and 2 above provided that you accompany - -it with the complete corresponding machine-readable source code, which - -must be distributed under the terms of Sections 1 and 2 above on a - -medium customarily used for software interchange. - - - - If distribution of object code is made by offering access to copy - -from a designated place, then offering equivalent access to copy the - -source code from the same place satisfies the requirement to - -distribute the source code, even though third parties are not - -compelled to copy the source along with the object code. - - - - 5. A program that contains no derivative of any portion of the - -Library, but is designed to work with the Library by being compiled or - -linked with it, is called a "work that uses the Library". Such a - -work, in isolation, is not a derivative work of the Library, and - -therefore falls outside the scope of this License. - - - - However, linking a "work that uses the Library" with the Library - -creates an executable that is a derivative of the Library (because it - -contains portions of the Library), rather than a "work that uses the - -library". The executable is therefore covered by this License. - -Section 6 states terms for distribution of such executables. - - - - When a "work that uses the Library" uses material from a header file - -that is part of the Library, the object code for the work may be a - -derivative work of the Library even though the source code is not. - -Whether this is true is especially significant if the work can be - -linked without the Library, or if the work is itself a library. The - -threshold for this to be true is not precisely defined by law. - - - - If such an object file uses only numerical parameters, data - -structure layouts and accessors, and small macros and small inline - -functions (ten lines or less in length), then the use of the object - -file is unrestricted, regardless of whether it is legally a derivative - -work. (Executables containing this object code plus portions of the - -Library will still fall under Section 6.) - - - - Otherwise, if the work is a derivative of the Library, you may - -distribute the object code for the work under the terms of Section 6. - -Any executables containing that work also fall under Section 6, - -whether or not they are linked directly with the Library itself. - - - - 6. As an exception to the Sections above, you may also compile or - -link a "work that uses the Library" with the Library to produce a - -work containing portions of the Library, and distribute that work - -under terms of your choice, provided that the terms permit - -modification of the work for the customer's own use and reverse - -engineering for debugging such modifications. - - - - You must give prominent notice with each copy of the work that the - -Library is used in it and that the Library and its use are covered by - -this License. You must supply a copy of this License. If the work - -during execution displays copyright notices, you must include the - -copyright notice for the Library among them, as well as a reference - -directing the user to the copy of this License. Also, you must do one - -of these things: - - - - a) Accompany the work with the complete corresponding - - machine-readable source code for the Library including whatever - - changes were used in the work (which must be distributed under - - Sections 1 and 2 above); and, if the work is an executable linked - - with the Library, with the complete machine-readable "work that - - uses the Library", as object code and/or source code, so that the - - user can modify the Library and then relink to produce a modified - - executable containing the modified Library. (It is understood - - that the user who changes the contents of definitions files in the - - Library will not necessarily be able to recompile the application - - to use the modified definitions.) - - - - b) Accompany the work with a written offer, valid for at - - least three years, to give the same user the materials - - specified in Subsection 6a, above, for a charge no more - - than the cost of performing this distribution. - - - - c) If distribution of the work is made by offering access to copy - - from a designated place, offer equivalent access to copy the above - - specified materials from the same place. - - - - d) Verify that the user has already received a copy of these - - materials or that you have already sent this user a copy. - - - - For an executable, the required form of the "work that uses the - -Library" must include any data and utility programs needed for - -reproducing the executable from it. However, as a special exception, - -the source code distributed need not include anything that is normally - -distributed (in either source or binary form) with the major - -components (compiler, kernel, and so on) of the operating system on - -which the executable runs, unless that component itself accompanies - -the executable. - - - - It may happen that this requirement contradicts the license - -restrictions of other proprietary libraries that do not normally - -accompany the operating system. Such a contradiction means you cannot - -use both them and the Library together in an executable that you - -distribute. - - - - 7. You may place library facilities that are a work based on the - -Library side-by-side in a single library together with other library - -facilities not covered by this License, and distribute such a combined - -library, provided that the separate distribution of the work based on - -the Library and of the other library facilities is otherwise - -permitted, and provided that you do these two things: - - - - a) Accompany the combined library with a copy of the same work - - based on the Library, uncombined with any other library - - facilities. This must be distributed under the terms of the - - Sections above. - - - - b) Give prominent notice with the combined library of the fact - - that part of it is a work based on the Library, and explaining - - where to find the accompanying uncombined form of the same work. - - - - 8. You may not copy, modify, sublicense, link with, or distribute - -the Library except as expressly provided under this License. Any - -attempt otherwise to copy, modify, sublicense, link with, or - -distribute the Library is void, and will automatically terminate your - -rights under this License. However, parties who have received copies, - -or rights, from you under this License will not have their licenses - -terminated so long as such parties remain in full compliance. - - - - 9. You are not required to accept this License, since you have not - -signed it. However, nothing else grants you permission to modify or - -distribute the Library or its derivative works. These actions are - -prohibited by law if you do not accept this License. Therefore, by - -modifying or distributing the Library (or any work based on the - -Library), you indicate your acceptance of this License to do so, and - -all its terms and conditions for copying, distributing or modifying - -the Library or works based on it. - - - - 10. Each time you redistribute the Library (or any work based on the - -Library), the recipient automatically receives a license from the - -original licensor to copy, distribute, link with or modify the Library - -subject to these terms and conditions. You may not impose any further - -restrictions on the recipients' exercise of the rights granted herein. - -You are not responsible for enforcing compliance by third parties to - -this License. - - - - 11. If, as a consequence of a court judgment or allegation of patent - -infringement or for any other reason (not limited to patent issues), - -conditions are imposed on you (whether by court order, agreement or - -otherwise) that contradict the conditions of this License, they do not - -excuse you from the conditions of this License. If you cannot - -distribute so as to satisfy simultaneously your obligations under this - -License and any other pertinent obligations, then as a consequence you - -may not distribute the Library at all. For example, if a patent - -license would not permit royalty-free redistribution of the Library by - -all those who receive copies directly or indirectly through you, then - -the only way you could satisfy both it and this License would be to - -refrain entirely from distribution of the Library. - - - -If any portion of this section is held invalid or unenforceable under any - -particular circumstance, the balance of the section is intended to apply, - -and the section as a whole is intended to apply in other circumstances. - - - -It is not the purpose of this section to induce you to infringe any - -patents or other property right claims or to contest validity of any - -such claims; this section has the sole purpose of protecting the - -integrity of the free software distribution system which is - -implemented by public license practices. Many people have made - -generous contributions to the wide range of software distributed - -through that system in reliance on consistent application of that - -system; it is up to the author/donor to decide if he or she is willing - -to distribute software through any other system and a licensee cannot - -impose that choice. - - - -This section is intended to make thoroughly clear what is believed to - -be a consequence of the rest of this License. - - - - 12. If the distribution and/or use of the Library is restricted in - -certain countries either by patents or by copyrighted interfaces, the - -original copyright holder who places the Library under this License may add - -an explicit geographical distribution limitation excluding those countries, - -so that distribution is permitted only in or among countries not thus - -excluded. In such case, this License incorporates the limitation as if - -written in the body of this License. - - - - 13. The Free Software Foundation may publish revised and/or new - -versions of the Library General Public License from time to time. - -Such new versions will be similar in spirit to the present version, - -but may differ in detail to address new problems or concerns. - - - -Each version is given a distinguishing version number. If the Library - -specifies a version number of this License which applies to it and - -"any later version", you have the option of following the terms and - -conditions either of that version or of any later version published by - -the Free Software Foundation. If the Library does not specify a - -license version number, you may choose any version ever published by - -the Free Software Foundation. - - - - 14. If you wish to incorporate parts of the Library into other free - -programs whose distribution conditions are incompatible with these, - -write to the author to ask for permission. For software which is - -copyrighted by the Free Software Foundation, write to the Free - -Software Foundation; we sometimes make exceptions for this. Our - -decision will be guided by the two goals of preserving the free status - -of all derivatives of our free software and of promoting the sharing - -and reuse of software generally. - - - - NO WARRANTY - - - - 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO - -WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. - -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR - -OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY - -KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE - -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE - -LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME - -THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - - - 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN - -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY - -AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU - -FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR - -CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE - -LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING - -RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A - -FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF - -SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH - -DAMAGES. - - - - END OF TERMS AND CONDITIONS - - - - Appendix: How to Apply These Terms to Your New Libraries - - - - If you develop a new library, and you want it to be of the greatest - -possible use to the public, we recommend making it free software that - -everyone can redistribute and change. You can do so by permitting - -redistribution under these terms (or, alternatively, under the terms of the - -ordinary General Public License). - - - - To apply these terms, attach the following notices to the library. It is - -safest to attach them to the start of each source file to most effectively - -convey the exclusion of warranty; and each file should have at least the - -"copyright" line and a pointer to where the full notice is found. - - - - - - Copyright (C) - - - - This library is free software; you can redistribute it and/or - - modify it under the terms of the GNU Library General Public - - License as published by the Free Software Foundation; either - - version 2 of the License, or (at your option) any later version. - - - - This library is distributed in the hope that it will be useful, - - but WITHOUT ANY WARRANTY; without even the implied warranty of - - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - - Library General Public License for more details. - - - - You should have received a copy of the GNU Library General Public - - License along with this library; if not, write to the Free - - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - - - -Also add information on how to contact you by electronic and paper mail. - - - -You should also get your employer (if you work as a programmer) or your - -school, if any, to sign a "copyright disclaimer" for the library, if - -necessary. Here is a sample; alter the names: - - - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - - library `Frob' (a library for tweaking knobs) written by James Random Hacker. - - - - , 1 April 1990 - - Ty Coon, President of Vice - - - -That's all there is to it! diff --git a/sql-odbc/src/sqlodbc/bind.c b/sql-odbc/src/sqlodbc/bind.c deleted file mode 100644 index 889c4c66c1..0000000000 --- a/sql-odbc/src/sqlodbc/bind.c +++ /dev/null @@ -1,687 +0,0 @@ -#include "bind.h" - -#include -#include - -#include "descriptor.h" -#include "environ.h" -#include "misc.h" -#include "multibyte.h" -#include "opensearch_apifunc.h" -#include "opensearch_types.h" -#include "qresult.h" -#include "statement.h" - -/* Associate a user-supplied buffer with a database column. */ -RETCODE SQL_API OPENSEARCHAPI_BindCol(HSTMT hstmt, SQLUSMALLINT icol, - SQLSMALLINT fCType, PTR rgbValue, - SQLLEN cbValueMax, SQLLEN *pcbValue) { - StatementClass *stmt = (StatementClass *)hstmt; - CSTR func = "OPENSEARCHAPI_BindCol"; - ARDFields *opts; - GetDataInfo *gdata_info; - BindInfoClass *bookmark; - RETCODE ret = SQL_SUCCESS; - - MYLOG(OPENSEARCH_TRACE, "entering...\n"); - - MYLOG(OPENSEARCH_DEBUG, "**** : stmt = %p, icol = %d\n", stmt, icol); - MYLOG(OPENSEARCH_DEBUG, "**** : fCType=%d rgb=%p valusMax=" FORMAT_LEN " pcb=%p\n", - fCType, rgbValue, cbValueMax, pcbValue); - - if (!stmt) { - SC_log_error(func, "", NULL); - return SQL_INVALID_HANDLE; - } - - opts = SC_get_ARDF(stmt); - if (stmt->status == STMT_EXECUTING) { - SC_set_error(stmt, STMT_SEQUENCE_ERROR, - "Can't bind columns while statement is still executing.", - func); - return SQL_ERROR; - } - -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wkeyword-macro" -#endif // __APPLE__ -#define return DONT_CALL_RETURN_FROM_HERE ? ? ? -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ - SC_clear_error(stmt); - /* If the bookmark column is being bound, then just save it */ - if (icol == 0) { - bookmark = opts->bookmark; - if (rgbValue == NULL) { - if (bookmark) { - bookmark->buffer = NULL; - bookmark->used = bookmark->indicator = NULL; - } - } else { - /* Make sure it is the bookmark data type */ - switch (fCType) { - case SQL_C_BOOKMARK: - case SQL_C_VARBOOKMARK: - break; - default: - SC_set_error(stmt, STMT_PROGRAM_TYPE_OUT_OF_RANGE, - "Bind column 0 is not of type SQL_C_BOOKMARK", - func); - MYLOG( - OPENSEARCH_ERROR, - "Bind column 0 is type %d not of type SQL_C_BOOKMARK\n", - fCType); - ret = SQL_ERROR; - goto cleanup; - } - - bookmark = ARD_AllocBookmark(opts); - bookmark->buffer = rgbValue; - bookmark->used = bookmark->indicator = pcbValue; - bookmark->buflen = cbValueMax; - bookmark->returntype = fCType; - } - goto cleanup; - } - - /* - * Allocate enough bindings if not already done. Most likely, - * execution of a statement would have setup the necessary bindings. - * But some apps call BindCol before any statement is executed. - */ - if (icol > opts->allocated) - extend_column_bindings(opts, icol); - gdata_info = SC_get_GDTI(stmt); - if (icol > gdata_info->allocated) - extend_getdata_info(gdata_info, icol, FALSE); - - /* check to see if the bindings were allocated */ - if (!opts->bindings || !gdata_info->gdata) { - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, - "Could not allocate memory for bindings.", func); - ret = SQL_ERROR; - goto cleanup; - } - - /* use zero based col numbers from here out */ - icol--; - - /* Reset for SQLGetData */ - GETDATA_RESET(gdata_info->gdata[icol]); - - if (rgbValue == NULL) { - /* we have to unbind the column */ - opts->bindings[icol].buflen = 0; - opts->bindings[icol].buffer = NULL; - opts->bindings[icol].used = opts->bindings[icol].indicator = NULL; - opts->bindings[icol].returntype = SQL_C_CHAR; - opts->bindings[icol].precision = 0; - opts->bindings[icol].scale = 0; - if (gdata_info->gdata[icol].ttlbuf) - free(gdata_info->gdata[icol].ttlbuf); - gdata_info->gdata[icol].ttlbuf = NULL; - gdata_info->gdata[icol].ttlbuflen = 0; - gdata_info->gdata[icol].ttlbufused = 0; - } else { - /* ok, bind that column */ - opts->bindings[icol].buflen = cbValueMax; - opts->bindings[icol].buffer = rgbValue; - opts->bindings[icol].used = opts->bindings[icol].indicator = pcbValue; - opts->bindings[icol].returntype = fCType; - opts->bindings[icol].precision = 0; - switch (fCType) { - case SQL_C_NUMERIC: - opts->bindings[icol].precision = 32; - break; - case SQL_C_TIMESTAMP: - case SQL_C_INTERVAL_DAY_TO_SECOND: - case SQL_C_INTERVAL_HOUR_TO_SECOND: - case SQL_C_INTERVAL_MINUTE_TO_SECOND: - case SQL_C_INTERVAL_SECOND: - opts->bindings[icol].precision = 6; - break; - } - opts->bindings[icol].scale = 0; - - MYLOG(OPENSEARCH_DEBUG, " bound buffer[%d] = %p\n", icol, - opts->bindings[icol].buffer); - } - -cleanup: -#undef return - return ret; -} - -RETCODE SQL_API OPENSEARCHAPI_NumParams(HSTMT hstmt, SQLSMALLINT *pcpar) { - StatementClass *stmt = (StatementClass *)hstmt; - if (pcpar != NULL) { - *pcpar = 0; - } else { - SC_set_error(stmt, STMT_EXEC_ERROR, "Parameter count address is null", - "OPENSEARCHAPI_NumParams"); - return SQL_ERROR; - } - return SQL_SUCCESS; -} - -/* - * Bindings Implementation - */ -static BindInfoClass *create_empty_bindings(int num_columns) { - BindInfoClass *new_bindings; - int i; - - new_bindings = (BindInfoClass *)malloc(num_columns * sizeof(BindInfoClass)); - if (!new_bindings) - return NULL; - - for (i = 0; i < num_columns; i++) { - new_bindings[i].buflen = 0; - new_bindings[i].buffer = NULL; - new_bindings[i].used = new_bindings[i].indicator = NULL; - } - - return new_bindings; -} - -void extend_parameter_bindings(APDFields *self, SQLSMALLINT num_params) { - ParameterInfoClass *new_bindings; - - MYLOG(OPENSEARCH_TRACE, - "entering ... self=%p, parameters_allocated=%d, num_params=%d,%p\n", - self, self->allocated, num_params, self->parameters); - - /* - * if we have too few, allocate room for more, and copy the old - * entries into the new structure - */ - if (self->allocated < num_params) { - new_bindings = (ParameterInfoClass *)realloc( - self->parameters, sizeof(ParameterInfoClass) * num_params); - if (!new_bindings) { - MYLOG(OPENSEARCH_DEBUG, - "unable to create %d new bindings from %d old bindings\n", - num_params, self->allocated); - - if (self->parameters) - free(self->parameters); - self->parameters = NULL; - self->allocated = 0; - return; - } - memset(&new_bindings[self->allocated], 0, - sizeof(ParameterInfoClass) * (num_params - self->allocated)); - - self->parameters = new_bindings; - self->allocated = num_params; - } - - MYLOG(OPENSEARCH_TRACE, "leaving %p\n", self->parameters); -} - -void extend_iparameter_bindings(IPDFields *self, SQLSMALLINT num_params) { - ParameterImplClass *new_bindings; - - MYLOG(OPENSEARCH_TRACE, - "entering ... self=%p, parameters_allocated=%d, num_params=%d\n", - self, self->allocated, num_params); - - /* - * if we have too few, allocate room for more, and copy the old - * entries into the new structure - */ - if (self->allocated < num_params) { - new_bindings = (ParameterImplClass *)realloc( - self->parameters, sizeof(ParameterImplClass) * num_params); - if (!new_bindings) { - MYLOG(OPENSEARCH_DEBUG, - "unable to create %d new bindings from %d old bindings\n", - num_params, self->allocated); - - if (self->parameters) - free(self->parameters); - self->parameters = NULL; - self->allocated = 0; - return; - } - memset(&new_bindings[self->allocated], 0, - sizeof(ParameterImplClass) * (num_params - self->allocated)); - - self->parameters = new_bindings; - self->allocated = num_params; - } - - MYLOG(OPENSEARCH_TRACE, "leaving %p\n", self->parameters); -} - -void reset_a_parameter_binding(APDFields *self, int ipar) { - MYLOG(OPENSEARCH_TRACE, "entering ... self=%p, parameters_allocated=%d, ipar=%d\n", - self, self->allocated, ipar); - - if (ipar < 1 || ipar > self->allocated) - return; - - ipar--; - self->parameters[ipar].buflen = 0; - self->parameters[ipar].buffer = NULL; - self->parameters[ipar].used = self->parameters[ipar].indicator = NULL; - self->parameters[ipar].CType = 0; - self->parameters[ipar].data_at_exec = FALSE; - self->parameters[ipar].precision = 0; - self->parameters[ipar].scale = 0; -} - -void reset_a_iparameter_binding(IPDFields *self, int ipar) { - MYLOG(OPENSEARCH_TRACE, "entering ... self=%p, parameters_allocated=%d, ipar=%d\n", - self, self->allocated, ipar); - - if (ipar < 1 || ipar > self->allocated) - return; - - ipar--; - NULL_THE_NAME(self->parameters[ipar].paramName); - self->parameters[ipar].paramType = 0; - self->parameters[ipar].SQLType = 0; - self->parameters[ipar].column_size = 0; - self->parameters[ipar].decimal_digits = 0; - self->parameters[ipar].precision = 0; - self->parameters[ipar].scale = 0; - PIC_set_opensearch_type(self->parameters[ipar], 0); -} - -int CountParameters(const StatementClass *self, Int2 *inputCount, Int2 *ioCount, - Int2 *outputCount) { - IPDFields *ipdopts = SC_get_IPDF(self); - int i, num_params, valid_count; - - if (inputCount) - *inputCount = 0; - if (ioCount) - *ioCount = 0; - if (outputCount) - *outputCount = 0; - if (!ipdopts) - return -1; - num_params = self->num_params; - if (ipdopts->allocated < num_params) - num_params = ipdopts->allocated; - for (i = 0, valid_count = 0; i < num_params; i++) { - if (SQL_PARAM_OUTPUT == ipdopts->parameters[i].paramType) { - if (outputCount) { - (*outputCount)++; - valid_count++; - } - } else if (SQL_PARAM_INPUT_OUTPUT == ipdopts->parameters[i].paramType) { - if (ioCount) { - (*ioCount)++; - valid_count++; - } - } else if (inputCount) { - (*inputCount)++; - valid_count++; - } - } - return valid_count; -} - -/* - * Free parameters and free the memory. - */ -void APD_free_params(APDFields *apdopts, char option) { - MYLOG(OPENSEARCH_TRACE, "entering self=%p\n", apdopts); - - if (!apdopts->parameters) - return; - - if (option == STMT_FREE_PARAMS_ALL) { - free(apdopts->parameters); - apdopts->parameters = NULL; - apdopts->allocated = 0; - } - - MYLOG(OPENSEARCH_TRACE, "leaving\n"); -} - -void PDATA_free_params(PutDataInfo *pdata, char option) { - int i; - - MYLOG(OPENSEARCH_TRACE, "entering self=%p\n", pdata); - - if (!pdata->pdata) - return; - - for (i = 0; i < pdata->allocated; i++) { - if (pdata->pdata[i].EXEC_used) { - free(pdata->pdata[i].EXEC_used); - pdata->pdata[i].EXEC_used = NULL; - } - if (pdata->pdata[i].EXEC_buffer) { - free(pdata->pdata[i].EXEC_buffer); - pdata->pdata[i].EXEC_buffer = NULL; - } - } - - if (option == STMT_FREE_PARAMS_ALL) { - free(pdata->pdata); - pdata->pdata = NULL; - pdata->allocated = 0; - } - - MYLOG(OPENSEARCH_TRACE, "leaving\n"); -} - -/* - * Free parameters and free the memory. - */ -void IPD_free_params(IPDFields *ipdopts, char option) { - MYLOG(OPENSEARCH_TRACE, "entering self=%p\n", ipdopts); - - if (!ipdopts->parameters) - return; - if (option == STMT_FREE_PARAMS_ALL) { - free(ipdopts->parameters); - ipdopts->parameters = NULL; - ipdopts->allocated = 0; - } - - MYLOG(OPENSEARCH_TRACE, "leaving\n"); -} - -void extend_column_bindings(ARDFields *self, SQLSMALLINT num_columns) { - BindInfoClass *new_bindings; - SQLSMALLINT i; - - MYLOG(OPENSEARCH_TRACE, - "entering ... self=%p, bindings_allocated=%d, num_columns=%d\n", self, - self->allocated, num_columns); - - /* - * if we have too few, allocate room for more, and copy the old - * entries into the new structure - */ - if (self->allocated < num_columns) { - new_bindings = create_empty_bindings(num_columns); - if (!new_bindings) { - MYLOG(OPENSEARCH_DEBUG, - "unable to create %d new bindings from %d old bindings\n", - num_columns, self->allocated); - - if (self->bindings) { - free(self->bindings); - self->bindings = NULL; - } - self->allocated = 0; - return; - } - - if (self->bindings) { - for (i = 0; i < self->allocated; i++) - new_bindings[i] = self->bindings[i]; - - free(self->bindings); - } - - self->bindings = new_bindings; - self->allocated = num_columns; - } - - /* - * There is no reason to zero out extra bindings if there are more - * than needed. If an app has allocated extra bindings, let it worry - * about it by unbinding those columns. - */ - - /* SQLBindCol(1..) ... SQLBindCol(10...) # got 10 bindings */ - /* SQLExecDirect(...) # returns 5 cols */ - /* SQLExecDirect(...) # returns 10 cols (now OK) */ - - MYLOG(OPENSEARCH_TRACE, "leaving %p\n", self->bindings); -} - -void reset_a_column_binding(ARDFields *self, int icol) { - BindInfoClass *bookmark; - - MYLOG(OPENSEARCH_TRACE, "entering ... self=%p, bindings_allocated=%d, icol=%d\n", - self, self->allocated, icol); - - if (icol > self->allocated) - return; - - /* use zero based col numbers from here out */ - if (0 == icol) { - if (bookmark = self->bookmark, bookmark != NULL) { - bookmark->buffer = NULL; - bookmark->used = bookmark->indicator = NULL; - } - } else { - icol--; - - /* we have to unbind the column */ - self->bindings[icol].buflen = 0; - self->bindings[icol].buffer = NULL; - self->bindings[icol].used = self->bindings[icol].indicator = NULL; - self->bindings[icol].returntype = SQL_C_CHAR; - } -} - -void ARD_unbind_cols(ARDFields *self, BOOL freeall) { - Int2 lf; - - MYLOG(OPENSEARCH_ALL, "freeall=%d allocated=%d bindings=%p\n", freeall, - self->allocated, self->bindings); - for (lf = 1; lf <= self->allocated; lf++) - reset_a_column_binding(self, lf); - if (freeall) { - if (self->bindings) - free(self->bindings); - self->bindings = NULL; - self->allocated = 0; - } -} -void GDATA_unbind_cols(GetDataInfo *self, BOOL freeall) { - Int2 lf; - - MYLOG(OPENSEARCH_ALL, "freeall=%d allocated=%d gdata=%p\n", freeall, - self->allocated, self->gdata); - if (self->fdata.ttlbuf) { - free(self->fdata.ttlbuf); - self->fdata.ttlbuf = NULL; - } - self->fdata.ttlbuflen = self->fdata.ttlbufused = 0; - GETDATA_RESET(self->fdata); - for (lf = 1; lf <= self->allocated; lf++) - reset_a_getdata_info(self, lf); - if (freeall) { - if (self->gdata) - free(self->gdata); - self->gdata = NULL; - self->allocated = 0; - } -} - -void GetDataInfoInitialize(GetDataInfo *gdata_info) { - GETDATA_RESET(gdata_info->fdata); - gdata_info->fdata.ttlbuf = NULL; - gdata_info->fdata.ttlbuflen = gdata_info->fdata.ttlbufused = 0; - gdata_info->allocated = 0; - gdata_info->gdata = NULL; -} -static GetDataClass *create_empty_gdata(int num_columns) { - GetDataClass *new_gdata; - int i; - - new_gdata = (GetDataClass *)malloc(num_columns * sizeof(GetDataClass)); - if (!new_gdata) - return NULL; - for (i = 0; i < num_columns; i++) { - GETDATA_RESET(new_gdata[i]); - new_gdata[i].ttlbuf = NULL; - new_gdata[i].ttlbuflen = 0; - new_gdata[i].ttlbufused = 0; - } - - return new_gdata; -} -void extend_getdata_info(GetDataInfo *self, SQLSMALLINT num_columns, - BOOL shrink) { - GetDataClass *new_gdata; - - MYLOG(OPENSEARCH_TRACE, - "entering ... self=%p, gdata_allocated=%d, num_columns=%d\n", self, - self->allocated, num_columns); - - /* - * if we have too few, allocate room for more, and copy the old - * entries into the new structure - */ - if (self->allocated < num_columns) { - new_gdata = create_empty_gdata(num_columns); - if (!new_gdata) { - MYLOG(OPENSEARCH_DEBUG, "unable to create %d new gdata from %d old gdata\n", - num_columns, self->allocated); - - if (self->gdata) { - free(self->gdata); - self->gdata = NULL; - } - self->allocated = 0; - return; - } - if (self->gdata) { - SQLSMALLINT i; - - for (i = 0; i < self->allocated; i++) - new_gdata[i] = self->gdata[i]; - free(self->gdata); - } - self->gdata = new_gdata; - self->allocated = num_columns; - } else if (shrink && self->allocated > num_columns) { - int i; - - for (i = self->allocated; i > num_columns; i--) - reset_a_getdata_info(self, i); - self->allocated = num_columns; - if (0 == num_columns) { - free(self->gdata); - self->gdata = NULL; - } - } - - /* - * There is no reason to zero out extra gdata if there are more - * than needed. If an app has allocated extra gdata, let it worry - * about it by unbinding those columns. - */ - - MYLOG(OPENSEARCH_TRACE, "leaving %p\n", self->gdata); -} -void reset_a_getdata_info(GetDataInfo *gdata_info, int icol) { - if (icol < 1 || icol > gdata_info->allocated) - return; - icol--; - if (gdata_info->gdata[icol].ttlbuf) { - free(gdata_info->gdata[icol].ttlbuf); - gdata_info->gdata[icol].ttlbuf = NULL; - } - gdata_info->gdata[icol].ttlbuflen = gdata_info->gdata[icol].ttlbufused = 0; - GETDATA_RESET(gdata_info->gdata[icol]); -} - -void PutDataInfoInitialize(PutDataInfo *pdata_info) { - pdata_info->allocated = 0; - pdata_info->pdata = NULL; -} -void extend_putdata_info(PutDataInfo *self, SQLSMALLINT num_params, - BOOL shrink) { - PutDataClass *new_pdata; - - MYLOG(OPENSEARCH_TRACE, - "entering ... self=%p, parameters_allocated=%d, num_params=%d\n", - self, self->allocated, num_params); - - /* - * if we have too few, allocate room for more, and copy the old - * entries into the new structure - */ - if (self->allocated < num_params) { - if (self->allocated <= 0 && self->pdata) { - MYLOG(OPENSEARCH_DEBUG, "??? pdata is not null while allocated == 0\n"); - self->pdata = NULL; - } - new_pdata = (PutDataClass *)realloc(self->pdata, - sizeof(PutDataClass) * num_params); - if (!new_pdata) { - MYLOG(OPENSEARCH_DEBUG, "unable to create %d new pdata from %d old pdata\n", - num_params, self->allocated); - - self->pdata = NULL; - self->allocated = 0; - return; - } - memset(&new_pdata[self->allocated], 0, - sizeof(PutDataClass) * (num_params - self->allocated)); - - self->pdata = new_pdata; - self->allocated = num_params; - } else if (shrink && self->allocated > num_params) { - int i; - - for (i = self->allocated; i > num_params; i--) - reset_a_putdata_info(self, i); - self->allocated = num_params; - if (0 == num_params) { - free(self->pdata); - self->pdata = NULL; - } - } - - MYLOG(OPENSEARCH_TRACE, "leaving %p\n", self->pdata); -} -void reset_a_putdata_info(PutDataInfo *pdata_info, int ipar) { - if (ipar < 1 || ipar > pdata_info->allocated) - return; - ipar--; - if (pdata_info->pdata[ipar].EXEC_used) { - free(pdata_info->pdata[ipar].EXEC_used); - pdata_info->pdata[ipar].EXEC_used = NULL; - } - if (pdata_info->pdata[ipar].EXEC_buffer) { - free(pdata_info->pdata[ipar].EXEC_buffer); - pdata_info->pdata[ipar].EXEC_buffer = NULL; - } - pdata_info->pdata[ipar].lobj_oid = 0; -} - -void SC_param_next(const StatementClass *stmt, int *param_number, - ParameterInfoClass **apara, ParameterImplClass **ipara) { - int next; - IPDFields *ipdopts = SC_get_IPDF(stmt); - - if (*param_number < 0) - next = stmt->proc_return; - else - next = *param_number + 1; - if (stmt->discard_output_params) { - for (; next < ipdopts->allocated - && SQL_PARAM_OUTPUT == ipdopts->parameters[next].paramType; - next++) - ; - } - *param_number = next; - if (ipara) { - if (next < ipdopts->allocated) - *ipara = ipdopts->parameters + next; - else - *ipara = NULL; - } - if (apara) { - APDFields *apdopts = SC_get_APDF(stmt); - if (next < apdopts->allocated) - *apara = apdopts->parameters + next; - else - *apara = NULL; - } -} diff --git a/sql-odbc/src/sqlodbc/bind.h b/sql-odbc/src/sqlodbc/bind.h deleted file mode 100644 index adcdb66999..0000000000 --- a/sql-odbc/src/sqlodbc/bind.h +++ /dev/null @@ -1,132 +0,0 @@ -#ifndef __BIND_H__ -#define __BIND_H__ - -#include "descriptor.h" -#include "opensearch_odbc.h" - -// C Interface -#ifdef __cplusplus -extern "C" { -#endif -/* - * BindInfoClass -- stores information about a bound column - */ -struct BindInfoClass_ { - SQLLEN buflen; /* size of buffer */ - char *buffer; /* pointer to the buffer */ - SQLLEN *used; /* used space in the buffer (for strings - * not counting the '\0') */ - SQLLEN *indicator; /* indicator == used in many cases ? */ - SQLSMALLINT returntype; /* kind of conversion to be applied when - * returning (SQL_C_DEFAULT, - * SQL_C_CHAR... etc) */ - SQLSMALLINT precision; /* the precision for numeric or timestamp type */ - SQLSMALLINT scale; /* the scale for numeric type */ - /* area for work variables */ - char dummy_data; /* currently not used */ -}; - -/* struct for SQLGetData */ -typedef struct { - /* for BLOBs which don't hold the data */ - struct GetBlobDataClass { - Int8 data_left64; /* amount of large object data - left to read before conversion */ - } blob; - /* for non-BLOBs which hold the data in ttlbuf after conversion */ - char *ttlbuf; /* to save the large result */ - SQLLEN ttlbuflen; /* the buffer length */ - SQLLEN ttlbufused; /* used length of the buffer */ - SQLLEN data_left; /* amount of data left to read */ -} GetDataClass; -#define GETDATA_RESET(gdc) ((gdc).blob.data_left64 = (gdc).data_left = -1) - -/* - * ParameterInfoClass -- stores information about a bound parameter - */ -struct ParameterInfoClass_ { - SQLLEN buflen; - char *buffer; - SQLLEN *used; - SQLLEN *indicator; /* indicator == used in many cases ? */ - SQLSMALLINT CType; - SQLSMALLINT precision; /* the precision for numeric or timestamp type */ - SQLSMALLINT scale; /* the scale for numeric type */ - /* area for work variables */ - char data_at_exec; -}; - -typedef struct { - SQLLEN *EXEC_used; /* amount of data */ - char *EXEC_buffer; /* the data */ - OID lobj_oid; -} PutDataClass; - -/* - * ParameterImplClass -- stores implementation information about a parameter - */ -struct ParameterImplClass_ { - opensearchNAME paramName; /* this is unavailable even in 8.1 */ - SQLSMALLINT paramType; - SQLSMALLINT SQLType; - OID OpenSearchType; - SQLULEN column_size; - SQLSMALLINT decimal_digits; - SQLSMALLINT precision; /* the precision for numeric or timestamp type */ - SQLSMALLINT scale; /* the scale for numeric type */ -}; - -typedef struct { - GetDataClass fdata; - SQLSMALLINT allocated; - GetDataClass *gdata; -} GetDataInfo; -typedef struct { - SQLSMALLINT allocated; - PutDataClass *pdata; -} PutDataInfo; - -#define PARSE_PARAM_CAST FALSE -#define EXEC_PARAM_CAST TRUE -#define SIMPLE_PARAM_CAST TRUE - -#define CALC_BOOKMARK_ADDR(book, offset, bind_size, index) \ - (book->buffer + offset \ - + (bind_size > 0 \ - ? bind_size \ - : (SQL_C_VARBOOKMARK == book->returntype ? book->buflen \ - : sizeof(UInt4))) \ - * index) - -/* Macros to handle opensearch type of parameters */ -#define PIC_get_opensearch_type(pari) ((pari).OpenSearchType) -#define PIC_set_opensearch_type(pari, type) ((pari).OpenSearchType = (type)) -#define PIC_dsp_opensearch_type(conn, pari) \ - ((pari).OpenSearchType ? (pari).OpenSearchType : sqltype_to_opensearchtype(conn, (pari).SQLType)) - -void extend_column_bindings(ARDFields *opts, SQLSMALLINT num_columns); -void reset_a_column_binding(ARDFields *opts, int icol); -void extend_parameter_bindings(APDFields *opts, SQLSMALLINT num_params); -void extend_iparameter_bindings(IPDFields *opts, SQLSMALLINT num_params); -void reset_a_parameter_binding(APDFields *opts, int ipar); -void reset_a_iparameter_binding(IPDFields *opts, int ipar); -int CountParameters(const StatementClass *stmt, Int2 *inCount, Int2 *ioCount, - Int2 *outputCount); -void GetDataInfoInitialize(GetDataInfo *gdata); -void extend_getdata_info(GetDataInfo *gdata, SQLSMALLINT num_columns, - BOOL shrink); -void reset_a_getdata_info(GetDataInfo *gdata, int icol); -void GDATA_unbind_cols(GetDataInfo *gdata, BOOL freeall); -void PutDataInfoInitialize(PutDataInfo *pdata); -void extend_putdata_info(PutDataInfo *pdata, SQLSMALLINT num_params, - BOOL shrink); -void reset_a_putdata_info(PutDataInfo *pdata, int ipar); -void PDATA_free_params(PutDataInfo *pdata, char option); -void SC_param_next(const StatementClass *, int *param_number, - ParameterInfoClass **, ParameterImplClass **); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sql-odbc/src/sqlodbc/catfunc.h b/sql-odbc/src/sqlodbc/catfunc.h deleted file mode 100644 index 8378454256..0000000000 --- a/sql-odbc/src/sqlodbc/catfunc.h +++ /dev/null @@ -1,220 +0,0 @@ -#ifndef __CATFUNC_H__ -#define __CATFUNC_H__ - -#include "opensearch_odbc.h" - -/* SQLTables field position */ -enum { - TABLES_CATALOG_NAME = 0, - TABLES_SCHEMA_NAME, - TABLES_TABLE_NAME, - TABLES_TABLE_TYPE, - TABLES_REMARKS, - NUM_OF_TABLES_FIELDS -}; - -/* SQLColumns field position */ -enum { - COLUMNS_CATALOG_NAME = 0, - COLUMNS_SCHEMA_NAME, - COLUMNS_TABLE_NAME, - COLUMNS_COLUMN_NAME, - COLUMNS_DATA_TYPE, - COLUMNS_TYPE_NAME, - COLUMNS_PRECISION, - COLUMNS_LENGTH, - COLUMNS_SCALE, - COLUMNS_RADIX, - COLUMNS_NULLABLE, - COLUMNS_REMARKS, - COLUMNS_COLUMN_DEF /* ODBC 3.0 but always use it */ - , - COLUMNS_SQL_DATA_TYPE, - COLUMNS_SQL_DATETIME_SUB, - COLUMNS_CHAR_OCTET_LENGTH, - COLUMNS_ORDINAL_POSITION, - COLUMNS_IS_NULLABLE, - COLUMNS_DISPLAY_SIZE, - COLUMNS_FIELD_TYPE, - COLUMNS_AUTO_INCREMENT, - COLUMNS_PHYSICAL_NUMBER, - COLUMNS_TABLE_OID, - COLUMNS_BASE_TYPEID, - COLUMNS_ATTTYPMOD, - COLUMNS_TABLE_INFO, - NUM_OF_COLUMNS_FIELDS -}; -/* SQLPrimaryKeys field position */ -enum { - PKS_TABLE_CAT = 0, - PKS_TABLE_SCHEM, - PKS_TABLE_NAME, - PKS_COLUMN_NAME, - PKS_KEY_SQ, - PKS_PK_NAME, - NUM_OF_PKS_FIELDS -}; -/* SQLForeignKeys field position */ -enum { - FKS_PKTABLE_CAT = 0, - FKS_PKTABLE_SCHEM, - FKS_PKTABLE_NAME, - FKS_PKCOLUMN_NAME, - FKS_FKTABLE_CAT, - FKS_FKTABLE_SCHEM, - FKS_FKTABLE_NAME, - FKS_FKCOLUMN_NAME, - FKS_KEY_SEQ, - FKS_UPDATE_RULE, - FKS_DELETE_RULE, - FKS_FK_NAME, - FKS_PK_NAME, - FKS_DEFERRABILITY, - FKS_TRIGGER_NAME, - NUM_OF_FKS_FIELDS -}; -/* SQLColAttribute */ -enum { - COLATTR_DESC_COUNT = -1, - COLATTR_DESC_AUTO_UNIQUE_VALUE = 0, - COLATTR_DESC_BASE_COLUMN_NAME, - COLATTR_DESC_BASE_TABLE_NAME, - COLATTR_DESC_CASE_SENSITIVE, - COLATTR_DESC_CATALOG_NAME, - COLATTR_DESC_CONCISE_TYPE, - COLATTR_DESC_DISPLAY_SIZE, - COLATTR_DESC_FIXED_PREC_SCALE, - COLATTR_DESC_LABEL, - COLATTR_DESC_LENGTH, - COLATTR_DESC_LITERAL_PREFIX, - COLATTR_DESC_LITERAL_SUFFIX, - COLATTR_DESC_LOCAL_TYPE_NAME, - COLATTR_DESC_NAME, - COLATTR_DESC_NULLABLE, - COLATTR_DESC_NUM_PREX_RADIX, - COLATTR_DESC_OCTET_LENGTH, - COLATTR_DESC_PRECISION, - COLATTR_DESC_SCALE, - COLATTR_DESC_SCHEMA_NAME, - COLATTR_DESC_SEARCHABLE, - COLATTR_DESC_TABLE_NAME, - COLATTR_DESC_TYPE, - COLATTR_DESC_TYPE_NAME, - COLATTR_DESC_UNNAMED, - COLATTR_DESC_UNSIGNED, - COLATTR_DESC_UPDATABLE -}; - -/* SQLStatistics field position */ -enum { - STATS_CATALOG_NAME = 0, - STATS_SCHEMA_NAME, - STATS_TABLE_NAME, - STATS_NON_UNIQUE, - STATS_INDEX_QUALIFIER, - STATS_INDEX_NAME, - STATS_TYPE, - STATS_SEQ_IN_INDEX, - STATS_COLUMN_NAME, - STATS_COLLATION, - STATS_CARDINALITY, - STATS_PAGES, - STATS_FILTER_CONDITION, - NUM_OF_STATS_FIELDS -}; - -/* SQLProcedure field position */ -enum { - PRO_PROCEDURE_CAT = 0, - PRO_PROCEDURE_SCHEM, - PRO_PROCEDURE_NAME, - PRO_NUM_INPUT_PARAMS, - PRO_NUM_OUTPUT_PARAMS, - PRO_RESULT_SETS, - PRO_REMARKS, - PRO_PROCEDURE_TYPE, - NUM_OF_PRO_FIELDS -}; - -/* SQLProcedureColumns field position */ -enum { - PROCOLS_PROCEDURE_CAT = 0, - PROCOLS_PROCEDURE_SCHEM, - PROCOLS_PROCEDURE_NAME, - PROCOLS_COLUMN_NAME, - PROCOLS_COLUMN_TYPE, - PROCOLS_DATA_TYPE, - PROCOLS_TYPE_NAME, - PROCOLS_COLUMN_SIZE, - PROCOLS_BUFFER_LENGTH, - PROCOLS_DECIMAL_DIGITS, - PROCOLS_NUM_PREC_RADIX, - PROCOLS_NULLABLE, - PROCOLS_REMARKS, - PROCOLS_COLUMN_DEF, - PROCOLS_SQL_DATA_TYPE, - PROCOLS_SQL_DATETIME_SUB, - PROCOLS_CHAR_OCTET_LENGTH, - PROCOLS_ORDINAL_POSITION, - PROCOLS_IS_NULLABLE, - NUM_OF_PROCOLS_FIELDS -}; -/* SQLGetTypeInfo field position */ -enum { - GETTYPE_TYPE_NAME = 0, - GETTYPE_DATA_TYPE, - GETTYPE_COLUMN_SIZE, - GETTYPE_LITERAL_PREFIX, - GETTYPE_LITERAL_SUFFIX, - GETTYPE_CREATE_PARAMS, - GETTYPE_NULLABLE, - GETTYPE_CASE_SENSITIVE, - GETTYPE_SEARCHABLE, - GETTYPE_UNSIGNED_ATTRIBUTE, - GETTYPE_FIXED_PREC_SCALE, - GETTYPE_AUTO_UNIQUE_VALUE, - GETTYPE_LOCAL_TYPE_NAME, - GETTYPE_MINIMUM_SCALE, - GETTYPE_MAXIMUM_SCALE, - GETTYPE_SQL_DATA_TYPE, - GETTYPE_SQL_DATETIME_SUB, - GETTYPE_NUM_PREC_RADIX, - GETTYPE_INTERVAL_PRECISION, - NUM_OF_GETTYPE_FIELDS -}; -/* SQLSpecialColumns field position */ -enum { - SPECOLS_SCOPE = 0, - SPECOLS_COLUMN_NAME, - SPECOLS_DATA_TYPE, - SPECOLS_TYPE_NAME, - SPECOLS_COLUMN_SIZE, - SPECOLS_BUFFER_LENGTH, - SPECOLS_DECIMAL_DIGITS, - SPECOLS_PSEUDO_COLUMN, - NUM_OF_SPECOLS_FIELDS -}; -/* SQLColumnPrivileges field position */ -enum { - COLPRIV_TABLE_CAT = 0, - COLPRIV_TABLE_SCHEM, - COLPRIV_TABLE_NAME, - COLPRIV_COLUMN_NAME, - COLPRIV_GRANTOR, - COLPRIV_GRANTEE, - COLPRIV_PRIVILEGE, - COLPRIV_IS_GRANTABLE, - NUM_OF_COLPRIV_FIELDS -}; -/* SQLTablePrivileges field position */ -enum { - TABPRIV_TABLE_CAT = 0, - TABPRIV_TABLE_SCHEM, - TABPRIV_TABLE_NAME, - TABPRIV_GRANTOR, - TABPRIV_GRANTEE, - TABPRIV_PRIVILEGE, - TABPRIV_IS_GRANTABLE, - NUM_OF_TABPRIV_FIELDS -}; -#endif /* __CARFUNC_H__ */ diff --git a/sql-odbc/src/sqlodbc/columninfo.c b/sql-odbc/src/sqlodbc/columninfo.c deleted file mode 100644 index 8c31901b2d..0000000000 --- a/sql-odbc/src/sqlodbc/columninfo.c +++ /dev/null @@ -1,73 +0,0 @@ -#include "columninfo.h" - -#include -#include - -#include "opensearch_types.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" - -ColumnInfoClass *CI_Constructor(void) { - ColumnInfoClass *rv; - - rv = (ColumnInfoClass *)malloc(sizeof(ColumnInfoClass)); - - if (rv) { - rv->refcount = 0; - rv->num_fields = 0; - rv->coli_array = NULL; - } - - return rv; -} - -void CI_Destructor(ColumnInfoClass *self) { - CI_free_memory(self); - - free(self); -} - -void CI_free_memory(ColumnInfoClass *self) { - register Int2 lf; - int num_fields = self->num_fields; - - /* Safe to call even if null */ - self->num_fields = 0; - if (self->coli_array) { - for (lf = 0; lf < num_fields; lf++) { - if (self->coli_array[lf].name) { - free(self->coli_array[lf].name); - self->coli_array[lf].name = NULL; - } - } - free(self->coli_array); - self->coli_array = NULL; - } -} - -void CI_set_num_fields(ColumnInfoClass *self, SQLSMALLINT new_num_fields) { - CI_free_memory(self); /* always safe to call */ - - self->num_fields = new_num_fields; - - self->coli_array = - (struct srvr_info *)calloc(sizeof(struct srvr_info), self->num_fields); -} - -void CI_set_field_info(ColumnInfoClass *self, int field_num, - const char *new_name, OID new_adtid, Int2 new_adtsize, - Int4 new_atttypmod, OID new_relid, OID new_attid) { - /* check bounds */ - if ((field_num < 0) || (field_num >= self->num_fields)) - return; - - /* store the info */ - self->coli_array[field_num].name = strdup(new_name); - self->coli_array[field_num].adtid = new_adtid; - self->coli_array[field_num].adtsize = new_adtsize; - self->coli_array[field_num].atttypmod = new_atttypmod; - - self->coli_array[field_num].display_size = OPENSEARCH_ADT_UNSET; - self->coli_array[field_num].relid = new_relid; - self->coli_array[field_num].attid = (short)new_attid; -} diff --git a/sql-odbc/src/sqlodbc/columninfo.h b/sql-odbc/src/sqlodbc/columninfo.h deleted file mode 100644 index 01afe68db4..0000000000 --- a/sql-odbc/src/sqlodbc/columninfo.h +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef __COLUMNINFO_H__ -#define __COLUMNINFO_H__ - -#include "opensearch_odbc.h" - -struct ColumnInfoClass_ { - UInt4 refcount; /* reference count. A ColumnInfo can be shared by - * several qresults. */ - Int2 num_fields; - struct srvr_info { - char *name; /* field name */ - OID adtid; /* type oid */ - Int2 adtsize; /* type size */ - Int4 display_size; /* the display size (longest row) */ - Int4 atttypmod; /* the length of bpchar/varchar */ - OID relid; /* the relation id */ - Int2 attid; /* the attribute number */ - } * coli_array; -}; - -#define CI_get_num_fields(self) (self->num_fields) -#define CI_get_oid(self, col) (self->coli_array[col].adtid) -#define CI_get_fieldname(self, col) (self->coli_array[col].name) -#define CI_get_fieldsize(self, col) (self->coli_array[col].adtsize) -#define CI_get_display_size(self, col) (self->coli_array[col].display_size) -#define CI_get_atttypmod(self, col) (self->coli_array[col].atttypmod) -#define CI_get_relid(self, col) (self->coli_array[col].relid) -#define CI_get_attid(self, col) (self->coli_array[col].attid) - -ColumnInfoClass *CI_Constructor(void); -void CI_Destructor(ColumnInfoClass *self); -void CI_free_memory(ColumnInfoClass *self); - -/* functions for setting up the fields from within the program, */ -/* without reading from a socket */ -void CI_set_num_fields(ColumnInfoClass *self, SQLSMALLINT new_num_fields); - -// Used in opensearch_parse_results.cpp -#ifdef __cplusplus -extern "C" { -#endif -void CI_set_field_info(ColumnInfoClass *self, int field_num, - const char *new_name, OID new_adtid, Int2 new_adtsize, - Int4 atttypmod, OID new_relid, OID new_attid); -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sql-odbc/src/sqlodbc/connection.c b/sql-odbc/src/sqlodbc/connection.c deleted file mode 100644 index 597ab12b4d..0000000000 --- a/sql-odbc/src/sqlodbc/connection.c +++ /dev/null @@ -1,696 +0,0 @@ -/* TryEnterCritiaclSection needs the following #define */ -#ifndef _WIN32_WINNT -#define _WIN32_WINNT 0x0400 -#endif /* _WIN32_WINNT */ - -#include -#include -#include - -#include "opensearch_connection.h" -#include "misc.h" - -/* for htonl */ -#ifdef WIN32 -#include -#else -#include -#endif - -#include "dlg_specific.h" -#include "environ.h" -#include "loadlib.h" -#include "multibyte.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "opensearch_helper.h" -#include "qresult.h" -#include "statement.h" -#ifndef WIN32 -#include -#endif -#define SAFE_STR(s) (NULL != (s) ? (s) : "(null)") - -#define ELASTIC_MAXIMUM_ID_LEN SHRT_MAX // Max 16-bit signed int -#define ELASTIC_TRANSACTION_SUPPORT 0 // Not supported -#define STMT_INCREMENT \ - 16 /* how many statement holders to allocate \ \ - * at a time */ - -#define PROTOCOL3_OPTS_MAX 30 - -RETCODE SQL_API OPENSEARCHAPI_AllocConnect(HENV henv, HDBC *phdbc) { - EnvironmentClass *env = (EnvironmentClass *)henv; - ConnectionClass *conn; - CSTR func = "OPENSEARCHAPI_AllocConnect"; - - MYLOG(OPENSEARCH_TRACE, "entering...\n"); - - conn = CC_Constructor(); - MYLOG(OPENSEARCH_DEBUG, "**** henv = %p, conn = %p\n", henv, conn); - - if (!conn) { - env->errormsg = "Couldn't allocate memory for Connection object."; - env->errornumber = ENV_ALLOC_ERROR; - *phdbc = SQL_NULL_HDBC; - EN_log_error(func, "", env); - return SQL_ERROR; - } - - if (!EN_add_connection(env, conn)) { - env->errormsg = "Maximum number of connections exceeded."; - env->errornumber = ENV_ALLOC_ERROR; - CC_Destructor(conn); - *phdbc = SQL_NULL_HDBC; - EN_log_error(func, "", env); - return SQL_ERROR; - } - - if (phdbc) - *phdbc = (HDBC)conn; - - return SQL_SUCCESS; -} - -RETCODE SQL_API OPENSEARCHAPI_Connect(HDBC hdbc, const SQLCHAR *szDSN, - SQLSMALLINT cbDSN, const SQLCHAR *szUID, - SQLSMALLINT cbUID, const SQLCHAR *szAuthStr, - SQLSMALLINT cbAuthStr) { - ConnectionClass *conn = (ConnectionClass *)hdbc; - ConnInfo *ci; - CSTR func = "OPENSEARCHAPI_Connect"; - RETCODE ret = SQL_SUCCESS; - char fchar, *tmpstr; - - MYLOG(OPENSEARCH_TRACE, "entering..cbDSN=%hi.\n", cbDSN); - - if (!conn) { - CC_log_error(func, "", NULL); - return SQL_INVALID_HANDLE; - } - - ci = &conn->connInfo; - CC_conninfo_init(ci, INIT_GLOBALS); - - make_string(szDSN, cbDSN, ci->dsn, sizeof(ci->dsn)); - - /* get the values for the DSN from the registry */ - getDSNinfo(ci, NULL); - - logs_on_off(1, ci->drivers.loglevel, ci->drivers.loglevel); - /* initialize opensearch_version from connInfo.protocol */ - CC_initialize_opensearch_version(conn); - - /* - * override values from DSN info with UID and authStr(pwd) This only - * occurs if the values are actually there. - */ - fchar = ci->username[0]; /* save the first byte */ - make_string(szUID, cbUID, ci->username, sizeof(ci->username)); - if ('\0' == ci->username[0]) /* an empty string is specified */ - ci->username[0] = fchar; /* restore the original username */ - tmpstr = make_string(szAuthStr, cbAuthStr, NULL, 0); - if (tmpstr) { - if (tmpstr[0]) /* non-empty string is specified */ - STR_TO_NAME(ci->password, tmpstr); - free(tmpstr); - } - - MYLOG(OPENSEARCH_DEBUG, "conn = %p (DSN='%s', UID='%s', PWD='%s')\n", conn, ci->dsn, - ci->username, NAME_IS_VALID(ci->password) ? "xxxxx" : ""); - - if ((fchar = CC_connect(conn)) <= 0) { - /* Error messages are filled in */ - CC_log_error(func, "Error on CC_connect", conn); - ret = SQL_ERROR; - } - if (SQL_SUCCESS == ret && 2 == fchar) - ret = SQL_SUCCESS_WITH_INFO; - - MYLOG(OPENSEARCH_TRACE, "leaving..%d.\n", ret); - - return ret; -} - -RETCODE SQL_API OPENSEARCHAPI_BrowseConnect(HDBC hdbc, const SQLCHAR *szConnStrIn, - SQLSMALLINT cbConnStrIn, - SQLCHAR *szConnStrOut, - SQLSMALLINT cbConnStrOutMax, - SQLSMALLINT *pcbConnStrOut) { - UNUSED(szConnStrIn, cbConnStrIn, szConnStrOut, cbConnStrOutMax, - cbConnStrOutMax, pcbConnStrOut); - CSTR func = "OPENSEARCHAPI_BrowseConnect"; - ConnectionClass *conn = (ConnectionClass *)hdbc; - - MYLOG(OPENSEARCH_TRACE, "entering...\n"); - - CC_set_error(conn, CONN_NOT_IMPLEMENTED_ERROR, "Function not implemented", - func); - return SQL_ERROR; -} - -/* Drop any hstmts open on hdbc and disconnect from database */ -RETCODE SQL_API OPENSEARCHAPI_Disconnect(HDBC hdbc) { - ConnectionClass *conn = (ConnectionClass *)hdbc; - CSTR func = "OPENSEARCHAPI_Disconnect"; - RETCODE ret = SQL_SUCCESS; - - MYLOG(OPENSEARCH_TRACE, "entering...\n"); - - if (!conn) { - CC_log_error(func, "", NULL); - return SQL_INVALID_HANDLE; - } - - if (conn->status == CONN_EXECUTING) { - // This should only be possible if transactions are supported, but they - // are not. Return an error regardless - CC_set_error(conn, CONN_IN_USE, "Connection is currently in use!", - func); - return SQL_ERROR; - } - - logs_on_off(-1, conn->connInfo.drivers.loglevel, - conn->connInfo.drivers.loglevel); - MYLOG(OPENSEARCH_DEBUG, "about to CC_cleanup\n"); - - /* Close the connection and free statements */ - ret = CC_cleanup(conn, FALSE); - - MYLOG(OPENSEARCH_DEBUG, "done CC_cleanup\n"); - MYLOG(OPENSEARCH_TRACE, "leaving...\n"); - - return ret; -} - -RETCODE SQL_API OPENSEARCHAPI_FreeConnect(HDBC hdbc) { - ConnectionClass *conn = (ConnectionClass *)hdbc; - CSTR func = "OPENSEARCHAPI_FreeConnect"; - EnvironmentClass *env; - - MYLOG(OPENSEARCH_TRACE, "entering...hdbc=%p\n", hdbc); - - if (!conn) { - CC_log_error(func, "", NULL); - return SQL_INVALID_HANDLE; - } - - /* Remove the connection from the environment */ - if (NULL != (env = CC_get_env(conn)) && !EN_remove_connection(env, conn)) { - // This should only be possible if transactions are supported, but they - // are not. Return an error regardless - CC_set_error(conn, CONN_IN_USE, "Connection is currently in use!", - func); - return SQL_ERROR; - } - - CC_Destructor(conn); - - MYLOG(OPENSEARCH_TRACE, "leaving...\n"); - - return SQL_SUCCESS; -} - -/* - * IMPLEMENTATION CONNECTION CLASS - */ - -static void reset_current_schema(ConnectionClass *self) { - if (self->current_schema) { - free(self->current_schema); - self->current_schema = NULL; - } - self->current_schema_valid = FALSE; -} - -static ConnectionClass *CC_alloc(void) { - return (ConnectionClass *)calloc(sizeof(ConnectionClass), 1); -} - -static void CC_lockinit(ConnectionClass *self) { - UNUSED(self); - INIT_CONNLOCK(self); - INIT_CONN_CS(self); -} - -static ConnectionClass *CC_initialize(ConnectionClass *rv, BOOL lockinit) { - size_t clear_size; - - clear_size = (char *)&(rv->cs) - (char *)rv; - - memset(rv, 0, clear_size); - rv->status = CONN_NOT_CONNECTED; - rv->transact_status = CONN_IN_AUTOCOMMIT; /* autocommit by default */ - rv->unnamed_prepared_stmt = NULL; - - rv->stmts = - (StatementClass **)malloc(sizeof(StatementClass *) * STMT_INCREMENT); - if (!rv->stmts) - goto cleanup; - memset(rv->stmts, 0, sizeof(StatementClass *) * STMT_INCREMENT); - - rv->num_stmts = STMT_INCREMENT; - rv->descs = - (DescriptorClass **)malloc(sizeof(DescriptorClass *) * STMT_INCREMENT); - if (!rv->descs) - goto cleanup; - memset(rv->descs, 0, sizeof(DescriptorClass *) * STMT_INCREMENT); - - rv->num_descs = STMT_INCREMENT; - - rv->lobj_type = OPENSEARCH_TYPE_LO_UNDEFINED; - if (isMsAccess()) - rv->ms_jet = 1; - rv->isolation = 0; // means initially unknown server's default isolation - rv->mb_maxbyte_per_char = 1; - rv->max_identifier_length = ELASTIC_MAXIMUM_ID_LEN; - rv->autocommit_public = SQL_AUTOCOMMIT_ON; - - /* Initialize statement options to defaults */ - /* Statements under this conn will inherit these options */ - - InitializeStatementOptions(&rv->stmtOptions); - InitializeARDFields(&rv->ardOptions); - InitializeAPDFields(&rv->apdOptions); -#ifdef _HANDLE_ENLIST_IN_DTC_ - rv->asdum = NULL; - rv->gTranInfo = 0; -#endif /* _HANDLE_ENLIST_IN_DTC_ */ - if (lockinit) - CC_lockinit(rv); - - return rv; - -cleanup: - CC_Destructor(rv); - return NULL; -} - -ConnectionClass *CC_Constructor() { - ConnectionClass *rv, *retrv = NULL; - - if (rv = CC_alloc(), NULL != rv) - retrv = CC_initialize(rv, TRUE); - return retrv; -} - -char CC_Destructor(ConnectionClass *self) { - MYLOG(OPENSEARCH_TRACE, "entering self=%p\n", self); - - if (self->status == CONN_EXECUTING) - return 0; - - CC_cleanup(self, FALSE); /* cleanup socket and statements */ - - MYLOG(OPENSEARCH_DEBUG, "after CC_Cleanup\n"); - - /* Free up statement holders */ - if (self->stmts) { - free(self->stmts); - self->stmts = NULL; - } - if (self->descs) { - free(self->descs); - self->descs = NULL; - } - MYLOG(OPENSEARCH_DEBUG, "after free statement holders\n"); - - NULL_THE_NAME(self->schemaIns); - NULL_THE_NAME(self->tableIns); - CC_conninfo_release(&self->connInfo); - if (self->__error_message) - free(self->__error_message); - DELETE_CONN_CS(self); - DELETE_CONNLOCK(self); - free(self); - - MYLOG(OPENSEARCH_TRACE, "leaving\n"); - - return 1; -} - -void CC_clear_error(ConnectionClass *self) { - if (!self) - return; - CONNLOCK_ACQUIRE(self); - self->__error_number = 0; - if (self->__error_message) { - free(self->__error_message); - self->__error_message = NULL; - } - self->sqlstate[0] = '\0'; - CONNLOCK_RELEASE(self); -} - -/* This is called by SQLSetConnectOption etc also */ -BOOL CC_set_autocommit(ConnectionClass *self, BOOL on) { - BOOL currsts = CC_is_in_autocommit(self); - - if ((on && currsts) || (!on && !currsts)) - return on; - MYLOG(OPENSEARCH_DEBUG, " %d->%d\n", currsts, on); - if (on) - self->transact_status |= CONN_IN_AUTOCOMMIT; - else - self->transact_status &= ~CONN_IN_AUTOCOMMIT; - - return on; -} - -/* Clear cached table info */ -static void CC_clear_col_info(ConnectionClass *self, BOOL destroy) { - if (self->col_info) { - int i; - COL_INFO *coli; - - for (i = 0; i < self->ntables; i++) { - if (coli = self->col_info[i], NULL != coli) { - if (destroy || coli->refcnt == 0) { - free_col_info_contents(coli); - free(coli); - self->col_info[i] = NULL; - } else - coli->acc_time = 0; - } - } - self->ntables = 0; - if (destroy) { - free(self->col_info); - self->col_info = NULL; - self->coli_allocated = 0; - } - } -} - -/* This is called by SQLDisconnect also */ -RETCODE -CC_cleanup(ConnectionClass *self, BOOL keepCommunication) { - int i; - StatementClass *stmt; - DescriptorClass *desc; - RETCODE ret = SQL_SUCCESS; - CSTR func = "CC_cleanup"; - - if (self->status == CONN_EXECUTING) - return FALSE; - - MYLOG(OPENSEARCH_TRACE, "entering self=%p\n", self); - - ENTER_CONN_CS(self); - /* Cancel an ongoing transaction */ - /* We are always in the middle of a transaction, */ - /* even if we are in auto commit. */ - if (self->opensearchconn) { - QLOG(0, "LIBOPENSEARCH_disconnect: %p\n", self->opensearchconn); - LIBOPENSEARCH_disconnect(self->opensearchconn); - self->opensearchconn = NULL; - } else { - ret = SQL_ERROR; - CC_set_error(self, CC_not_connected(self), "Connection not open", func); - } - - MYLOG(OPENSEARCH_DEBUG, "after LIBOPENSEARCH_disconnect\n"); - - /* Free all the stmts on this connection */ - for (i = 0; i < self->num_stmts; i++) { - stmt = self->stmts[i]; - if (stmt) { - stmt->hdbc = NULL; /* prevent any more dbase interactions */ - - SC_Destructor(stmt); - - self->stmts[i] = NULL; - } - } - /* Free all the descs on this connection */ - for (i = 0; i < self->num_descs; i++) { - desc = self->descs[i]; - if (desc) { - DC_get_conn(desc) = NULL; /* prevent any more dbase interactions */ - DC_Destructor(desc); - free(desc); - self->descs[i] = NULL; - } - } - - /* Check for translation dll */ -#ifdef WIN32 - if (!keepCommunication && self->translation_handle) { - FreeLibrary(self->translation_handle); - self->translation_handle = NULL; - } -#endif - - if (!keepCommunication) { - self->status = CONN_NOT_CONNECTED; - self->transact_status = CONN_IN_AUTOCOMMIT; - self->unnamed_prepared_stmt = NULL; - } - if (!keepCommunication) { - CC_conninfo_init(&(self->connInfo), CLEANUP_FOR_REUSE); - if (self->original_client_encoding) { - free(self->original_client_encoding); - self->original_client_encoding = NULL; - } - if (self->locale_encoding) { - free(self->locale_encoding); - self->locale_encoding = NULL; - } - if (self->server_encoding) { - free(self->server_encoding); - self->server_encoding = NULL; - } - reset_current_schema(self); - } - /* Free cached table info */ - CC_clear_col_info(self, TRUE); - if (self->num_discardp > 0 && self->discardp) { - for (i = 0; i < self->num_discardp; i++) - free(self->discardp[i]); - self->num_discardp = 0; - } - if (self->discardp) { - free(self->discardp); - self->discardp = NULL; - } - - LEAVE_CONN_CS(self); - MYLOG(OPENSEARCH_TRACE, "leaving\n"); - return ret; -} - -#ifndef OPENSEARCH_DIAG_SEVERITY_NONLOCALIZED -#define OPENSEARCH_DIAG_SEVERITY_NONLOCALIZED 'V' -#endif - -#define TRANSACTION_ISOLATION "transaction_isolation" -#define ISOLATION_SHOW_QUERY "show " TRANSACTION_ISOLATION - -char CC_add_statement(ConnectionClass *self, StatementClass *stmt) { - int i; - char ret = TRUE; - - MYLOG(OPENSEARCH_DEBUG, "self=%p, stmt=%p\n", self, stmt); - - CONNLOCK_ACQUIRE(self); - for (i = 0; i < self->num_stmts; i++) { - if (!self->stmts[i]) { - stmt->hdbc = self; - self->stmts[i] = stmt; - break; - } - } - - if (i >= self->num_stmts) /* no more room -- allocate more memory */ - { - StatementClass **newstmts; - Int2 new_num_stmts; - - new_num_stmts = STMT_INCREMENT + self->num_stmts; - - if (new_num_stmts > 0) - newstmts = (StatementClass **)realloc( - self->stmts, sizeof(StatementClass *) * new_num_stmts); - else - newstmts = NULL; /* num_stmts overflowed */ - if (!newstmts) - ret = FALSE; - else { - self->stmts = newstmts; - memset(&self->stmts[self->num_stmts], 0, - sizeof(StatementClass *) * STMT_INCREMENT); - - stmt->hdbc = self; - self->stmts[self->num_stmts] = stmt; - - self->num_stmts = new_num_stmts; - } - } - CONNLOCK_RELEASE(self); - - return ret; -} - -static void CC_set_error_statements(ConnectionClass *self) { - int i; - - MYLOG(OPENSEARCH_TRACE, "entering self=%p\n", self); - - for (i = 0; i < self->num_stmts; i++) { - if (NULL != self->stmts[i]) - SC_ref_CC_error(self->stmts[i]); - } -} - -char CC_remove_statement(ConnectionClass *self, StatementClass *stmt) { - int i; - char ret = FALSE; - - CONNLOCK_ACQUIRE(self); - for (i = 0; i < self->num_stmts; i++) { - if (self->stmts[i] == stmt && stmt->status != STMT_EXECUTING) { - self->stmts[i] = NULL; - ret = TRUE; - break; - } - } - CONNLOCK_RELEASE(self); - - return ret; -} - -char CC_get_escape(const ConnectionClass *self) { - UNUSED(self); - return ESCAPE_IN_LITERAL; -} - -int CC_get_max_idlen(ConnectionClass *self) { - UNUSED(self); - return self->max_identifier_length; -} - -SQLUINTEGER CC_get_isolation(ConnectionClass *self) { - UNUSED(self); - return ELASTIC_TRANSACTION_SUPPORT; -} - -void CC_set_error(ConnectionClass *self, int number, const char *message, - const char *func) { - CONNLOCK_ACQUIRE(self); - if (self->__error_message) - free(self->__error_message); - self->__error_number = number; - self->__error_message = message ? strdup(message) : NULL; - if (0 != number) - CC_set_error_statements(self); - if (func && number != 0) - CC_log_error(func, "", self); - CONNLOCK_RELEASE(self); -} - -void CC_set_errormsg(ConnectionClass *self, const char *message) { - CONNLOCK_ACQUIRE(self); - if (self->__error_message) - free(self->__error_message); - self->__error_message = message ? strdup(message) : NULL; - CONNLOCK_RELEASE(self); -} - -int CC_get_error(ConnectionClass *self, int *number, char **message) { - int rv; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - - CONNLOCK_ACQUIRE(self); - - if (CC_get_errornumber(self)) { - *number = CC_get_errornumber(self); - *message = CC_get_errormsg(self); - } - rv = (CC_get_errornumber(self) != 0); - - CONNLOCK_RELEASE(self); - - MYLOG(OPENSEARCH_TRACE, "leaving\n"); - - return rv; -} -void CC_log_error(const char *func, const char *desc, - const ConnectionClass *self) { -#define NULLCHECK(a) (a ? a : "(NULL)") - - if (self) { - MYLOG(OPENSEARCH_ERROR, - "CONN ERROR: func=%s, desc='%s', errnum=%d, errmsg='%s'\n", func, - desc, self->__error_number, NULLCHECK(self->__error_message)); - MYLOG(OPENSEARCH_ERROR, - " " - "------------------------------------------------------------\n"); - MYLOG(OPENSEARCH_ERROR, - " henv=%p, conn=%p, status=%u, num_stmts=%d\n", - self->henv, self, self->status, self->num_stmts); - MYLOG(OPENSEARCH_ERROR, " opensearchconn=%p, stmts=%p, lobj_type=%d\n", - self->opensearchconn, self->stmts, self->lobj_type); - } else { - MYLOG(OPENSEARCH_ERROR, "INVALID CONNECTION HANDLE ERROR: func=%s, desc='%s'\n", - func, desc); - } -} - -const char *CurrCat(const ConnectionClass *conn) { - UNUSED(conn); - return NULL; -} - -const char *CurrCatString(const ConnectionClass *conn) { - const char *cat = CurrCat(conn); - - if (!cat) - cat = NULL_STRING; - return cat; -} - -/*------ - * Create a null terminated lower-case string if the - * original string contains upper-case characters. - * The SQL_NTS length is considered. - *------ - */ -SQLCHAR *make_lstring_ifneeded(ConnectionClass *conn, const SQLCHAR *s, - ssize_t len, BOOL ifallupper) { - ssize_t length = len; - char *str = NULL; - const char *ccs = (const char *)s; - - if (s && (len > 0 || (len == SQL_NTS && (length = strlen(ccs)) > 0))) { - int i; - int tchar; - encoded_str encstr; - - make_encoded_str(&encstr, conn, ccs); - for (i = 0; i < length; i++) { - tchar = encoded_nextchar(&encstr); - if (MBCS_NON_ASCII(encstr)) - continue; - if (ifallupper && islower(tchar)) { - if (str) { - free(str); - str = NULL; - } - break; - } - if (tolower(tchar) != tchar) { - if (!str) { - str = malloc(length + 1); - if (!str) - return NULL; - memcpy(str, s, length); - str[length] = '\0'; - } - str[i] = (char)tolower(tchar); - } - } - } - - return (SQLCHAR *)str; -} diff --git a/sql-odbc/src/sqlodbc/convert.c b/sql-odbc/src/sqlodbc/convert.c deleted file mode 100644 index 8cc686aaed..0000000000 --- a/sql-odbc/src/sqlodbc/convert.c +++ /dev/null @@ -1,2282 +0,0 @@ -#include "convert.h" - -#include "misc.h" -#include "unicode_support.h" -#ifdef WIN32 -#include -#define HAVE_LOCALE_H -#endif /* WIN32 */ - -#include -#include -#include -#include - -#include "multibyte.h" -#ifdef HAVE_LOCALE_H -#include -#endif -#include -#include -#include - -#include "bind.h" -#include "catfunc.h" -#include "opensearch_types.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "qresult.h" -#include "statement.h" - -CSTR NAN_STRING = "NaN"; -CSTR INFINITY_STRING = "Infinity"; -CSTR MINFINITY_STRING = "-Infinity"; - -#if defined(WIN32) || defined(__CYGWIN__) -#define TIMEZONE_GLOBAL _timezone -#define TZNAME_GLOBAL _tzname -#define DAYLIGHT_GLOBAL _daylight -#elif defined(HAVE_INT_TIMEZONE) -#define TIMEZONE_GLOBAL timezone -#define TZNAME_GLOBAL tzname -#define DAYLIGHT_GLOBAL daylight -#endif - -typedef struct { - int infinity; - int m; - int d; - int y; - int hh; - int mm; - int ss; - int fr; -} SIMPLE_TIME; - -static BOOL convert_money(const char *s, char *sout, size_t soutmax); -size_t convert_linefeeds(const char *s, char *dst, size_t max, BOOL convlf, - BOOL *changed); -static size_t convert_from_opensearchbinary(const char *value, char *rgbValue, - SQLLEN cbValueMax); -static int convert_lo(StatementClass *stmt, const void *value, - SQLSMALLINT fCType, PTR rgbValue, SQLLEN cbValueMax, - SQLLEN *pcbValue); -static int conv_from_octal(const char *s); -static SQLLEN opensearch_bin2hex(const char *src, char *dst, SQLLEN length); -#ifdef UNICODE_SUPPORT -static SQLLEN es_bin2whex(const char *src, SQLWCHAR *dst, SQLLEN length); -#endif /* UNICODE_SUPPORT */ - -/*--------- - * A Guide for date/time/timestamp conversions - * - * field_type fCType Output - * ---------- ------ ---------- - * OPENSEARCH_TYPE_DATE SQL_C_DEFAULT SQL_C_DATE - * OPENSEARCH_TYPE_DATE SQL_C_DATE SQL_C_DATE - * OPENSEARCH_TYPE_DATE SQL_C_TIMESTAMP SQL_C_TIMESTAMP (time = 0 - *(midnight)) OPENSEARCH_TYPE_TIME SQL_C_DEFAULT SQL_C_TIME OPENSEARCH_TYPE_TIME - *SQL_C_TIME SQL_C_TIME - * OPENSEARCH_TYPE_TIME SQL_C_TIMESTAMP SQL_C_TIMESTAMP (date = - *current date) OPENSEARCH_TYPE_ABSTIME SQL_C_DEFAULT SQL_C_TIMESTAMP - *OPENSEARCH_TYPE_ABSTIME SQL_C_DATE SQL_C_DATE (time is truncated) - *OPENSEARCH_TYPE_ABSTIME SQL_C_TIME SQL_C_TIME (date is truncated) - *OPENSEARCH_TYPE_ABSTIME SQL_C_TIMESTAMP SQL_C_TIMESTAMP - *--------- - */ - -/* - * Macros for unsigned long handling. - */ -#ifdef WIN32 -#define ATOI32U(val) strtoul(val, NULL, 10) -#elif defined(HAVE_STRTOUL) -#define ATOI32U(val) strtoul(val, NULL, 10) -#else /* HAVE_STRTOUL */ -#define ATOI32U atol -#endif /* WIN32 */ - -/* - * Macros for BIGINT handling. - */ -#ifdef ODBCINT64 -#ifdef WIN32 -#define ATOI64(val) _strtoi64(val, NULL, 10) -#define ATOI64U(val) _strtoui64(val, NULL, 10) -#elif (SIZEOF_LONG == 8) -#define ATOI64(val) strtol(val, NULL, 10) -#define ATOI64U(val) strtoul(val, NULL, 10) -#else -#if defined(HAVE_STRTOLL) -#define ATOI64(val) strtoll(val, NULL, 10) -#define ATOI64U(val) strtoull(val, NULL, 10) -#else -static ODBCINT64 ATOI64(const char *val) { - ODBCINT64 ll; - sscanf(val, "%lld", &ll); - return ll; -} -static unsigned ODBCINT64 ATOI64U(const char *val) { - unsigned ODBCINT64 ll; - sscanf(val, "%llu", &ll); - return ll; -} -#endif /* HAVE_STRTOLL */ -#endif /* WIN32 */ -#endif /* ODBCINT64 */ - -static void parse_to_numeric_struct(const char *wv, SQL_NUMERIC_STRUCT *ns, - BOOL *overflow); - -/* - * TIMESTAMP <-----> SIMPLE_TIME - * precision support since 7.2. - * time zone support is unavailable(the stuff is unreliable) - */ -static BOOL timestamp2stime(const char *str, SIMPLE_TIME *st, BOOL *bZone, - int *zone) { - char rest[64], bc[16], *ptr; - int scnt, i; - int y, m, d, hh, mm, ss; -#ifdef TIMEZONE_GLOBAL - long timediff; -#endif - BOOL withZone = *bZone; - - *bZone = FALSE; - *zone = 0; - st->fr = 0; - st->infinity = 0; - rest[0] = '\0'; - bc[0] = '\0'; - if ((scnt = sscanf(str, "%4d-%2d-%2d %2d:%2d:%2d%31s %15s", &y, &m, &d, &hh, - &mm, &ss, rest, bc)) - < 6) { - if (scnt == 3) /* date */ - { - st->y = y; - st->m = m; - st->d = d; - st->hh = 0; - st->mm = 0; - st->ss = 0; - return TRUE; - } - if ((scnt = - sscanf(str, "%2d:%2d:%2d%31s %15s", &hh, &mm, &ss, rest, bc)) - < 3) - return FALSE; - else { - st->hh = hh; - st->mm = mm; - st->ss = ss; - if (scnt == 3) /* time */ - return TRUE; - } - } else { - st->y = y; - st->m = m; - st->d = d; - st->hh = hh; - st->mm = mm; - st->ss = ss; - if (scnt == 6) - return TRUE; - } - switch (rest[0]) { - case '+': - *bZone = TRUE; - *zone = atoi(&rest[1]); - break; - case '-': - *bZone = TRUE; - *zone = -atoi(&rest[1]); - break; - case '.': - if ((ptr = strchr(rest, '+')) != NULL) { - *bZone = TRUE; - *zone = atoi(&ptr[1]); - *ptr = '\0'; - } else if ((ptr = strchr(rest, '-')) != NULL) { - *bZone = TRUE; - *zone = -atoi(&ptr[1]); - *ptr = '\0'; - } - for (i = 1; i < 10; i++) { - if (!isdigit((UCHAR)rest[i])) - break; - } - for (; i < 10; i++) - rest[i] = '0'; - rest[i] = '\0'; - st->fr = atoi(&rest[1]); - break; - case 'B': - if (stricmp(rest, "BC") == 0) - st->y *= -1; - return TRUE; - default: - return TRUE; - } - if (stricmp(bc, "BC") == 0) { - st->y *= -1; - } - if (!withZone || !*bZone || st->y < 1970) - return TRUE; -#ifdef TIMEZONE_GLOBAL - if (!TZNAME_GLOBAL[0] || !TZNAME_GLOBAL[0][0]) { - *bZone = FALSE; - return TRUE; - } - timediff = TIMEZONE_GLOBAL + (*zone) * 3600; - if (!DAYLIGHT_GLOBAL && timediff == 0) /* the same timezone */ - return TRUE; - else { - struct tm tm, *tm2; - time_t time0; - - *bZone = FALSE; - tm.tm_year = st->y - 1900; - tm.tm_mon = st->m - 1; - tm.tm_mday = st->d; - tm.tm_hour = st->hh; - tm.tm_min = st->mm; - tm.tm_sec = st->ss; - tm.tm_isdst = -1; - time0 = mktime(&tm); - if (time0 < 0) - return TRUE; - if (tm.tm_isdst > 0) - timediff -= 3600; - if (timediff == 0) /* the same time zone */ - return TRUE; - time0 -= timediff; -#ifdef HAVE_LOCALTIME_R - if (time0 >= 0 && (tm2 = localtime_r(&time0, &tm)) != NULL) -#else - if (time0 >= 0 && (tm2 = localtime(&time0)) != NULL) -#endif /* HAVE_LOCALTIME_R */ - { - st->y = tm2->tm_year + 1900; - st->m = tm2->tm_mon + 1; - st->d = tm2->tm_mday; - st->hh = tm2->tm_hour; - st->mm = tm2->tm_min; - st->ss = tm2->tm_sec; - *bZone = TRUE; - } - } -#endif /* TIMEZONE_GLOBAL */ - return TRUE; -} - -static int stime2timestamp(const SIMPLE_TIME *st, char *str, size_t bufsize, - BOOL bZone, int precision) { - UNUSED(bZone); - char precstr[16], zonestr[16]; - int i; - - precstr[0] = '\0'; - if (st->infinity > 0) { - return snprintf(str, bufsize, "%s", INFINITY_STRING); - } else if (st->infinity < 0) { - return snprintf(str, bufsize, "%s", MINFINITY_STRING); - } - if (precision > 0 && st->fr) { - SPRINTF_FIXED(precstr, ".%09d", st->fr); - if (precision < 9) - precstr[precision + 1] = '\0'; - else if (precision > 9) - precision = 9; - for (i = precision; i > 0; i--) { - if (precstr[i] != '0') - break; - precstr[i] = '\0'; - } - if (i == 0) - precstr[i] = '\0'; - } - zonestr[0] = '\0'; -#ifdef TIMEZONE_GLOBAL - if (bZone && TZNAME_GLOBAL[0] && TZNAME_GLOBAL[0][0] && st->y >= 1970) { - long zoneint; - struct tm tm; - time_t time0; - - zoneint = TIMEZONE_GLOBAL; - if (DAYLIGHT_GLOBAL && st->y >= 1900) { - tm.tm_year = st->y - 1900; - tm.tm_mon = st->m - 1; - tm.tm_mday = st->d; - tm.tm_hour = st->hh; - tm.tm_min = st->mm; - tm.tm_sec = st->ss; - tm.tm_isdst = -1; - time0 = mktime(&tm); - if (time0 >= 0 && tm.tm_isdst > 0) - zoneint -= 3600; - } - if (zoneint > 0) - SPRINTF_FIXED(zonestr, "-%02d", (int)zoneint / 3600); - else - SPRINTF_FIXED(zonestr, "+%02d", -(int)zoneint / 3600); - } -#endif /* TIMEZONE_GLOBAL */ - if (st->y < 0) - return snprintf(str, bufsize, "%.4d-%.2d-%.2d %.2d:%.2d:%.2d%s%s BC", - -st->y, st->m, st->d, st->hh, st->mm, st->ss, precstr, - zonestr); - else - return snprintf(str, bufsize, "%.4d-%.2d-%.2d %.2d:%.2d:%.2d%s%s", - st->y, st->m, st->d, st->hh, st->mm, st->ss, precstr, - zonestr); -} - -static SQLINTERVAL interval2itype(SQLSMALLINT ctype) { - SQLINTERVAL sqlitv = 0; - - switch (ctype) { - case SQL_C_INTERVAL_YEAR: - sqlitv = SQL_IS_YEAR; - break; - case SQL_C_INTERVAL_MONTH: - sqlitv = SQL_IS_MONTH; - break; - case SQL_C_INTERVAL_YEAR_TO_MONTH: - sqlitv = SQL_IS_YEAR_TO_MONTH; - break; - case SQL_C_INTERVAL_DAY: - sqlitv = SQL_IS_DAY; - break; - case SQL_C_INTERVAL_HOUR: - sqlitv = SQL_IS_HOUR; - break; - case SQL_C_INTERVAL_DAY_TO_HOUR: - sqlitv = SQL_IS_DAY_TO_HOUR; - break; - case SQL_C_INTERVAL_MINUTE: - sqlitv = SQL_IS_MINUTE; - break; - case SQL_C_INTERVAL_DAY_TO_MINUTE: - sqlitv = SQL_IS_DAY_TO_MINUTE; - break; - case SQL_C_INTERVAL_HOUR_TO_MINUTE: - sqlitv = SQL_IS_HOUR_TO_MINUTE; - break; - case SQL_C_INTERVAL_SECOND: - sqlitv = SQL_IS_SECOND; - break; - case SQL_C_INTERVAL_DAY_TO_SECOND: - sqlitv = SQL_IS_DAY_TO_SECOND; - break; - case SQL_C_INTERVAL_HOUR_TO_SECOND: - sqlitv = SQL_IS_HOUR_TO_SECOND; - break; - case SQL_C_INTERVAL_MINUTE_TO_SECOND: - sqlitv = SQL_IS_MINUTE_TO_SECOND; - break; - } - return sqlitv; -} - -/* - * Interval data <-----> SQL_INTERVAL_STRUCT - */ - -static int getPrecisionPart(int precision, const char *precPart) { - char fraction[] = "000000000"; - size_t fracs = (size_t)(sizeof(fraction) - 1); - size_t cpys; - - if (precision < 0) - precision = 6; /* default */ - if (precision == 0) - return 0; - cpys = strlen(precPart); - if (cpys > fracs) - cpys = fracs; - memcpy(fraction, precPart, cpys); - fraction[precision] = '\0'; - - return atoi(fraction); -} - -static BOOL interval2istruct(SQLSMALLINT ctype, int precision, const char *str, - SQL_INTERVAL_STRUCT *st) { - char lit1[64], lit2[64]; - int scnt, years, mons, days, hours, minutes, seconds; - SQLSMALLINT sign; - SQLINTERVAL itype = interval2itype(ctype); - - memset(st, 0, sizeof(SQL_INTERVAL_STRUCT)); - if ((scnt = sscanf(str, "%d-%d", &years, &mons)) >= 2) { - if (SQL_IS_YEAR_TO_MONTH == itype) { - sign = years < 0 ? SQL_TRUE : SQL_FALSE; - st->interval_type = itype; - st->interval_sign = sign; - st->intval.year_month.year = sign ? (-years) : years; - st->intval.year_month.month = mons; - return TRUE; - } - return FALSE; - } else if (scnt = sscanf(str, "%d %02d:%02d:%02d.%09s", &days, &hours, - &minutes, &seconds, lit2), - 5 == scnt || 4 == scnt) { - sign = days < 0 ? SQL_TRUE : SQL_FALSE; - st->interval_type = itype; - st->interval_sign = sign; - st->intval.day_second.day = sign ? (-days) : days; - st->intval.day_second.hour = hours; - st->intval.day_second.minute = minutes; - st->intval.day_second.second = seconds; - if (scnt > 4) - st->intval.day_second.fraction = getPrecisionPart(precision, lit2); - return TRUE; - } else if ((scnt = - sscanf(str, "%d %10s %d %10s", &years, lit1, &mons, lit2)) - >= 4) { - if (strnicmp(lit1, "year", 4) == 0 && strnicmp(lit2, "mon", 2) == 0 - && (SQL_IS_MONTH == itype || SQL_IS_YEAR_TO_MONTH == itype)) { - sign = years < 0 ? SQL_TRUE : SQL_FALSE; - st->interval_type = itype; - st->interval_sign = sign; - st->intval.year_month.year = sign ? (-years) : years; - st->intval.year_month.month = sign ? (-mons) : mons; - return TRUE; - } - return FALSE; - } - if ((scnt = sscanf(str, "%d %10s %d", &years, lit1, &days)) == 2) { - sign = years < 0 ? SQL_TRUE : SQL_FALSE; - if (SQL_IS_YEAR == itype - && (stricmp(lit1, "year") == 0 || stricmp(lit1, "years") == 0)) { - st->interval_type = itype; - st->interval_sign = sign; - st->intval.year_month.year = sign ? (-years) : years; - return TRUE; - } - if (SQL_IS_MONTH == itype - && (stricmp(lit1, "mon") == 0 || stricmp(lit1, "mons") == 0)) { - st->interval_type = itype; - st->interval_sign = sign; - st->intval.year_month.month = sign ? (-years) : years; - return TRUE; - } - if (SQL_IS_DAY == itype - && (stricmp(lit1, "day") == 0 || stricmp(lit1, "days") == 0)) { - st->interval_type = itype; - st->interval_sign = sign; - st->intval.day_second.day = sign ? (-years) : years; - return TRUE; - } - return FALSE; - } - if (itype == SQL_IS_YEAR || itype == SQL_IS_MONTH - || itype == SQL_IS_YEAR_TO_MONTH) { - /* these formats should've been handled above already */ - return FALSE; - } - scnt = sscanf(str, "%d %10s %02d:%02d:%02d.%09s", &days, lit1, &hours, - &minutes, &seconds, lit2); - if (scnt == 5 || scnt == 6) { - if (strnicmp(lit1, "day", 3) != 0) - return FALSE; - sign = days < 0 ? SQL_TRUE : SQL_FALSE; - - st->interval_type = itype; - st->interval_sign = sign; - st->intval.day_second.day = sign ? (-days) : days; - st->intval.day_second.hour = sign ? (-hours) : hours; - st->intval.day_second.minute = minutes; - st->intval.day_second.second = seconds; - if (scnt > 5) - st->intval.day_second.fraction = getPrecisionPart(precision, lit2); - return TRUE; - } - scnt = sscanf(str, "%02d:%02d:%02d.%09s", &hours, &minutes, &seconds, lit2); - if (scnt == 3 || scnt == 4) { - sign = hours < 0 ? SQL_TRUE : SQL_FALSE; - - st->interval_type = itype; - st->interval_sign = sign; - st->intval.day_second.hour = sign ? (-hours) : hours; - st->intval.day_second.minute = minutes; - st->intval.day_second.second = seconds; - if (scnt > 3) - st->intval.day_second.fraction = getPrecisionPart(precision, lit2); - return TRUE; - } - - return FALSE; -} - -#ifdef HAVE_LOCALE_H -/* - * Get the decimal point of the current locale. - * - * XXX: This isn't thread-safe, if another thread changes the locale with - * setlocale() concurrently. There are two problems with that: - * - * 1. The pointer returned by localeconv(), or the lc->decimal_point string, - * might be invalidated by calls in other threads. Until someone comes up - * with a thread-safe version of localeconv(), there isn't much we can do - * about that. (libc implementations that return a static buffer (like glibc) - * happen to be safe from the lconv struct being invalidated, but the - * decimal_point string might still not point to a static buffer). - * - * 2. The between the call to sprintf() and get_current_decimal_point(), the - * decimal point might change. That would cause set_server_decimal_point() - * to fail to recognize a decimal separator, and we might send a numeric - * string to the server that the server won't recognize. This would cause - * the query to fail in the server. - * - * XXX: we only take into account the first byte of the decimal separator. - */ -static char get_current_decimal_point(void) { - struct lconv *lc = localeconv(); - - return lc->decimal_point[0]; -} - -/* - * Inverse of set_server_decimal_point. - */ -static void set_client_decimal_point(char *num) { - char current_decimal_point = get_current_decimal_point(); - char *str; - - if ('.' == current_decimal_point) - return; - for (str = num; '\0' != *str; str++) { - if (*str == '.') { - *str = current_decimal_point; - break; - } - } -} -#else -static void set_client_decimal_point(char *num) { - UNUSED(num); -} -#endif /* HAVE_LOCALE_H */ - -/* This is called by SQLFetch() */ -int copy_and_convert_field_bindinfo(StatementClass *stmt, OID field_type, - int atttypmod, void *value, int col) { - ARDFields *opts = SC_get_ARDF(stmt); - BindInfoClass *bic; - SQLULEN offset = opts->row_offset_ptr ? *opts->row_offset_ptr : 0; - - if (opts->allocated <= col) - extend_column_bindings(opts, (SQLSMALLINT)(col + 1)); - bic = &(opts->bindings[col]); - SC_set_current_col(stmt, -1); - return copy_and_convert_field(stmt, field_type, atttypmod, value, - bic->returntype, bic->precision, - (PTR)(bic->buffer + offset), bic->buflen, - LENADDR_SHIFT(bic->used, offset), - LENADDR_SHIFT(bic->indicator, offset)); -} - -static double get_double_value(const char *str) { - if (stricmp(str, NAN_STRING) == 0) -#ifdef NAN - return (double)NAN; -#else - { - double a = .0; - return .0 / a; - } -#endif /* NAN */ - else if (stricmp(str, INFINITY_STRING) == 0) -#ifdef INFINITY - return (double)INFINITY; -#else - return (double)(HUGE_VAL * HUGE_VAL); -#endif /* INFINITY */ - else if (stricmp(str, MINFINITY_STRING) == 0) -#ifdef INFINITY - return (double)-INFINITY; -#else - return (double)-(HUGE_VAL * HUGE_VAL); -#endif /* INFINITY */ - return atof(str); -} - -static int char2guid(const char *str, SQLGUID *g) { - /* - * SQLGUID.Data1 is an "unsigned long" on some platforms, and - * "unsigned int" on others. For format "%08X", it should be an - * "unsigned int", so use a temporary variable for it. - */ - unsigned int Data1; - if (sscanf(str, - "%08X-%04hX-%04hX-%02hhX%02hhX-%02hhX%02hhX%02hhX%02hhX%02hhX%" - "02hhX", - &Data1, &g->Data2, &g->Data3, &g->Data4[0], &g->Data4[1], - &g->Data4[2], &g->Data4[3], &g->Data4[4], &g->Data4[5], - &g->Data4[6], &g->Data4[7]) - < 11) - return COPY_GENERAL_ERROR; - g->Data1 = Data1; - return COPY_OK; -} - -static int effective_fraction(int fraction, int *width) { - for (*width = 9; fraction % 10 == 0; (*width)--, fraction /= 10) - ; - return fraction; -} - -static int get_terminator_len(SQLSMALLINT fCType) { - switch (fCType) { -#ifdef UNICODE_SUPPORT - case SQL_C_WCHAR: - return WCLEN; -#endif /* UNICODE_SUPPORT */ - case SQL_C_BINARY: - return 0; - } - - /* SQL_C_CHAR or INTERNAL_ASIS_TYPE */ - return 1; -} - -static SQLLEN get_adjust_len(SQLSMALLINT fCType, SQLLEN len) { - switch (fCType) { -#ifdef UNICODE_SUPPORT - case SQL_C_WCHAR: - return (len / WCLEN) * WCLEN; -#endif /* UNICODE_SUPPORT */ - } - - return len; -} - -#define BYTEA_PROCESS_ESCAPE 1 -#define BYTEA_PROCESS_BINARY 2 - -static int setup_getdataclass(SQLLEN *const length_return, - const char **const ptr_return, - int *needbuflen_return, GetDataClass *const esdc, - const char *neut_str, const OID field_type, - const SQLSMALLINT fCType, const SQLLEN cbValueMax, - const ConnectionClass *const conn) { - SQLLEN len = (-2); - const char *ptr = NULL; - int needbuflen = 0; - int result = COPY_OK; - - BOOL lf_conv = 0; - int bytea_process_kind = 0; - BOOL already_processed = FALSE; - BOOL changed = FALSE; - int len_for_wcs_term = 0; - -#ifdef UNICODE_SUPPORT - char *allocbuf = NULL; - int unicode_count = -1; - BOOL localize_needed = FALSE; - BOOL hybrid = FALSE; -#endif /* UNICODE_SUPPORT */ - - if (OPENSEARCH_TYPE_BYTEA == field_type) { - if (SQL_C_BINARY == fCType) - bytea_process_kind = BYTEA_PROCESS_BINARY; - else if (0 == strnicmp(neut_str, "\\x", 2)) /* hex format */ - neut_str += 2; - else - bytea_process_kind = BYTEA_PROCESS_ESCAPE; - } - -#ifdef UNICODE_SUPPORT - if (0 == bytea_process_kind) { - if (get_convtype() - > 0) /* coversion between the current locale is available */ - { - BOOL wcs_debug = 0; - BOOL same_encoding = - (conn->ccsc == opensearch_CS_code(conn->locale_encoding)); - BOOL is_utf8 = (UTF8 == conn->ccsc); - - switch (field_type) { - case OPENSEARCH_TYPE_UNKNOWN: - case OPENSEARCH_TYPE_BPCHAR: - case OPENSEARCH_TYPE_VARCHAR: - case OPENSEARCH_TYPE_TEXT: - case OPENSEARCH_TYPE_BPCHARARRAY: - case OPENSEARCH_TYPE_VARCHARARRAY: - case OPENSEARCH_TYPE_TEXTARRAY: - if (SQL_C_CHAR == fCType || SQL_C_BINARY == fCType) - localize_needed = (!same_encoding || wcs_debug); - if (SQL_C_WCHAR == fCType) - hybrid = (!is_utf8 || (same_encoding && wcs_debug)); - } - MYLOG(OPENSEARCH_DEBUG, - "localize=%d hybrid=%d is_utf8=%d same_encoding=%d " - "wcs_debug=%d\n", - localize_needed, hybrid, is_utf8, same_encoding, wcs_debug); - } - } - if (fCType == SQL_C_WCHAR) { - if (BYTEA_PROCESS_ESCAPE == bytea_process_kind) - unicode_count = (int)convert_from_opensearchbinary(neut_str, NULL, 0) * 2; - else if (hybrid) { - MYLOG(OPENSEARCH_DEBUG, "hybrid estimate\n"); - if ((unicode_count = - (int)bindcol_hybrid_estimate(neut_str, lf_conv, &allocbuf)) - < 0) { - result = COPY_INVALID_STRING_CONVERSION; - goto cleanup; - } - } else /* normally */ - { - unicode_count = (int)utf8_to_ucs2_lf(neut_str, SQL_NTS, lf_conv, - NULL, 0, FALSE); - } - len = WCLEN * unicode_count; - already_processed = changed = TRUE; - } else if (localize_needed) { - if ((len = bindcol_localize_estimate(neut_str, lf_conv, &allocbuf)) - < 0) { - result = COPY_INVALID_STRING_CONVERSION; - goto cleanup; - } - already_processed = changed = TRUE; - } -#endif /* UNICODE_SUPPORT */ - - if (already_processed) /* skip */ - ; - else if (0 != bytea_process_kind) { - len = convert_from_opensearchbinary(neut_str, NULL, 0); - if (BYTEA_PROCESS_BINARY != bytea_process_kind) - len *= 2; - changed = TRUE; - } else - /* convert linefeeds to carriage-return/linefeed */ - len = convert_linefeeds(neut_str, NULL, 0, lf_conv, &changed); - - /* just returns length info */ - if (cbValueMax == 0) { - result = COPY_RESULT_TRUNCATED; - goto cleanup; - } - - if (!esdc->ttlbuf) - esdc->ttlbuflen = 0; - needbuflen = (int)len + get_terminator_len(fCType); - if (SQL_C_BINARY == fCType) { - /* - * Though Binary doesn't have NULL terminator, - * bindcol_localize_exec() needs output buffer - * for NULL terminator. - */ - len_for_wcs_term = 1; - } - if (changed || needbuflen > cbValueMax) { - if (needbuflen > (SQLLEN)esdc->ttlbuflen) { - esdc->ttlbuf = realloc(esdc->ttlbuf, needbuflen + len_for_wcs_term); - esdc->ttlbuflen = needbuflen; - } - - already_processed = FALSE; -#ifdef UNICODE_SUPPORT - if (fCType == SQL_C_WCHAR) { - if (BYTEA_PROCESS_ESCAPE == bytea_process_kind) { - len = convert_from_opensearchbinary(neut_str, esdc->ttlbuf, - esdc->ttlbuflen); - len = es_bin2whex(esdc->ttlbuf, (SQLWCHAR *)esdc->ttlbuf, len); - } else { - if (!hybrid) /* normally */ - utf8_to_ucs2_lf(neut_str, SQL_NTS, lf_conv, - (SQLWCHAR *)esdc->ttlbuf, unicode_count, - FALSE); - else /* hybrid */ - { - MYLOG(OPENSEARCH_DEBUG, "hybrid convert\n"); - if (bindcol_hybrid_exec((SQLWCHAR *)esdc->ttlbuf, neut_str, - unicode_count + 1, lf_conv, - &allocbuf) - < 0) { - result = COPY_INVALID_STRING_CONVERSION; - goto cleanup; - } - } - } - already_processed = TRUE; - } else if (localize_needed) { - if (bindcol_localize_exec(esdc->ttlbuf, len + 1, lf_conv, &allocbuf) - < 0) { - result = COPY_INVALID_STRING_CONVERSION; - goto cleanup; - } - already_processed = TRUE; - } -#endif /* UNICODE_SUPPORT */ - - if (already_processed) - ; - else if (0 != bytea_process_kind) { - len = convert_from_opensearchbinary(neut_str, esdc->ttlbuf, - esdc->ttlbuflen); - if (BYTEA_PROCESS_ESCAPE == bytea_process_kind) - len = opensearch_bin2hex(esdc->ttlbuf, esdc->ttlbuf, len); - } else - convert_linefeeds(neut_str, esdc->ttlbuf, esdc->ttlbuflen, lf_conv, - &changed); - ptr = esdc->ttlbuf; - esdc->ttlbufused = len; - } else { - if (esdc->ttlbuf) { - free(esdc->ttlbuf); - esdc->ttlbuf = NULL; - } - ptr = neut_str; - } -cleanup: -#ifdef UNICODE_SUPPORT - if (allocbuf) - free(allocbuf); -#endif /* UNICODE_SUPPORT */ - - *length_return = len; - *ptr_return = ptr; - *needbuflen_return = needbuflen; - - return result; -} - -/* - gdata SC_get_GDTI(stmt) - current_col stmt->current_col - */ - -/* - * fCType treated in the following function is - * - * SQL_C_CHAR, SQL_C_BINARY, SQL_C_WCHAR or INTERNAL_ASIS_TYPE - */ -static int convert_text_field_to_sql_c( - GetDataInfo *const gdata, const int current_col, const char *const neut_str, - const OID field_type, const SQLSMALLINT fCType, char *const rgbValueBindRow, - const SQLLEN cbValueMax, const ConnectionClass *const conn, - SQLLEN *const length_return) { - int result = COPY_OK; - SQLLEN len = (-2); - GetDataClass *esdc; - int copy_len = 0, needbuflen = 0, i; - const char *ptr; - - MYLOG(OPENSEARCH_DEBUG, "field_type=%u type=%d\n", field_type, fCType); - - switch (field_type) { - case OPENSEARCH_TYPE_FLOAT4: - case OPENSEARCH_TYPE_FLOAT8: - case OPENSEARCH_TYPE_NUMERIC: - set_client_decimal_point((char *)neut_str); - break; - } - - if (current_col < 0) { - esdc = &(gdata->fdata); - esdc->data_left = -1; - } else - esdc = &gdata->gdata[current_col]; - if (esdc->data_left < 0) { - if (COPY_OK - != (result = - setup_getdataclass(&len, &ptr, &needbuflen, esdc, neut_str, - field_type, fCType, cbValueMax, conn))) - goto cleanup; - } else { - ptr = esdc->ttlbuf; - len = esdc->ttlbufused; - } - - MYLOG(OPENSEARCH_DEBUG, "DEFAULT: len = " FORMAT_LEN ", ptr = '%.*s'\n", len, - (int)len, ptr); - - if (current_col >= 0) { - if (esdc->data_left > 0) { - ptr += (len - esdc->data_left); - len = esdc->data_left; - needbuflen = (int)len + (int)(esdc->ttlbuflen - esdc->ttlbufused); - } else - esdc->data_left = len; - } - - if (cbValueMax > 0) { - BOOL already_copied = FALSE; - int terminatorlen; - - terminatorlen = get_terminator_len(fCType); - if (terminatorlen >= cbValueMax) - copy_len = 0; - else if (len + terminatorlen > cbValueMax) - copy_len = (int)get_adjust_len(fCType, cbValueMax - terminatorlen); - else - copy_len = (int)len; - - if (!already_copied) { - /* Copy the data */ - if (copy_len > 0) - memcpy(rgbValueBindRow, ptr, copy_len); - /* Add null terminator */ - for (i = 0; i < terminatorlen && copy_len + i < cbValueMax; i++) - rgbValueBindRow[copy_len + i] = '\0'; - } - /* Adjust data_left for next time */ - if (current_col >= 0) - esdc->data_left -= copy_len; - } - - /* - * Finally, check for truncation so that proper status can - * be returned - */ - if (cbValueMax > 0 && needbuflen > cbValueMax) - result = COPY_RESULT_TRUNCATED; - else { - if (esdc->ttlbuf != NULL) { - free(esdc->ttlbuf); - esdc->ttlbuf = NULL; - } - } - -#ifdef UNICODE_SUPPORT - if (SQL_C_WCHAR == fCType) - MYLOG(OPENSEARCH_DEBUG, - " SQL_C_WCHAR, default: len = " FORMAT_LEN - ", cbValueMax = " FORMAT_LEN ", rgbValueBindRow = '%s'\n", - len, cbValueMax, rgbValueBindRow); - else -#endif /* UNICODE_SUPPORT */ - if (SQL_C_BINARY == fCType) - MYLOG(OPENSEARCH_DEBUG, - " SQL_C_BINARY, default: len = " FORMAT_LEN - ", cbValueMax = " FORMAT_LEN ", rgbValueBindRow = '%.*s'\n", - len, cbValueMax, copy_len, rgbValueBindRow); - else - MYLOG(OPENSEARCH_DEBUG, - " SQL_C_CHAR, default: len = " FORMAT_LEN - ", cbValueMax = " FORMAT_LEN ", rgbValueBindRow = '%s'\n", - len, cbValueMax, rgbValueBindRow); - -cleanup: - *length_return = len; - - return result; -} - -/* This is called by SQLGetData() */ -int copy_and_convert_field(StatementClass *stmt, OID field_type, int atttypmod, - void *valuei, SQLSMALLINT fCType, int precision, - PTR rgbValue, SQLLEN cbValueMax, SQLLEN *pcbValue, - SQLLEN *pIndicator) { - CSTR func = "copy_and_convert_field"; - const char *value = valuei; - ARDFields *opts = SC_get_ARDF(stmt); - GetDataInfo *gdata = SC_get_GDTI(stmt); - SQLLEN len = 0; - SIMPLE_TIME std_time; -#ifdef HAVE_LOCALTIME_R - struct tm tm; -#endif /* HAVE_LOCALTIME_R */ - SQLLEN pcbValueOffset, rgbValueOffset; - char *rgbValueBindRow = NULL; - SQLLEN *pcbValueBindRow = NULL, *pIndicatorBindRow = NULL; - SQLSETPOSIROW bind_row = stmt->bind_row; - int bind_size = opts->bind_size; - int result = COPY_OK; - const ConnectionClass *conn = SC_get_conn(stmt); - BOOL text_bin_handling; - const char *neut_str = value; - char booltemp[3]; - char midtemp[64]; - GetDataClass *esdc; - - if (stmt->current_col >= 0) { - if (stmt->current_col >= opts->allocated) { - return SQL_ERROR; - } - if (gdata->allocated != opts->allocated) - extend_getdata_info(gdata, opts->allocated, TRUE); - esdc = &gdata->gdata[stmt->current_col]; - if (esdc->data_left == -2) - esdc->data_left = (cbValueMax > 0) ? 0 : -1; /* This seems to be * - * needed by ADO ? */ - if (esdc->data_left == 0) { - if (esdc->ttlbuf != NULL) { - free(esdc->ttlbuf); - esdc->ttlbuf = NULL; - esdc->ttlbuflen = 0; - } - esdc->data_left = -2; /* needed by ADO ? */ - return COPY_NO_DATA_FOUND; - } - } - /*--------- - * rgbValueOffset is *ONLY* for character and binary data. - * pcbValueOffset is for computing any pcbValue location - *--------- - */ - - if (bind_size > 0) - pcbValueOffset = rgbValueOffset = (bind_size * bind_row); - else { - pcbValueOffset = bind_row * sizeof(SQLLEN); - rgbValueOffset = bind_row * cbValueMax; - } - /* - * The following is applicable in case bind_size > 0 - * or the fCType is of variable length. - */ - if (rgbValue) - rgbValueBindRow = (char *)rgbValue + rgbValueOffset; - if (pcbValue) - pcbValueBindRow = LENADDR_SHIFT(pcbValue, pcbValueOffset); - if (pIndicator) { - pIndicatorBindRow = (SQLLEN *)((char *)pIndicator + pcbValueOffset); - *pIndicatorBindRow = 0; - } - - memset(&std_time, 0, sizeof(SIMPLE_TIME)); - - MYLOG(OPENSEARCH_DEBUG, - "field_type = %d, fctype = %d, value = '%s', cbValueMax=" FORMAT_LEN - "\n", - field_type, fCType, (value == NULL) ? "" : value, cbValueMax); - - if (!value) { - /* - * handle a null just by returning SQL_NULL_DATA in pcbValue, and - * doing nothing to the buffer. - */ - if (pIndicator) { - *pIndicatorBindRow = SQL_NULL_DATA; - return COPY_OK; - } else { - SC_set_error(stmt, STMT_RETURN_NULL_WITHOUT_INDICATOR, - "StrLen_or_IndPtr was a null pointer and NULL data " - "was retrieved", - func); - return SQL_ERROR; - } - } - - if (stmt->hdbc->DataSourceToDriver != NULL) { - size_t length = strlen(value); - - stmt->hdbc->DataSourceToDriver(stmt->hdbc->translation_option, SQL_CHAR, - valuei, (SDWORD)length, valuei, - (SDWORD)length, NULL, NULL, 0, NULL); - } - - /* - * First convert any specific OpenSearch types into more useable data. - * - * NOTE: Conversions from ES char/varchar of a date/time/timestamp value - * to SQL_C_DATE,SQL_C_TIME, SQL_C_TIMESTAMP not supported - */ - switch (field_type) { - /* - * $$$ need to add parsing for date/time/timestamp strings in - * OPENSEARCH_TYPE_CHAR,VARCHAR $$$ - */ - case OPENSEARCH_TYPE_DATE: - sscanf(value, "%4d-%2d-%2d", &std_time.y, &std_time.m, &std_time.d); - break; - - case OPENSEARCH_TYPE_TIME: { - BOOL bZone = FALSE; /* time zone stuff is unreliable */ - int zone; - timestamp2stime(value, &std_time, &bZone, &zone); - } break; - - case OPENSEARCH_TYPE_ABSTIME: - case OPENSEARCH_TYPE_DATETIME: - case OPENSEARCH_TYPE_TIMESTAMP_NO_TMZONE: - case OPENSEARCH_TYPE_TIMESTAMP: - std_time.fr = 0; - std_time.infinity = 0; - if (strnicmp(value, INFINITY_STRING, 8) == 0) { - std_time.infinity = 1; - std_time.m = 12; - std_time.d = 31; - std_time.y = 9999; - std_time.hh = 23; - std_time.mm = 59; - std_time.ss = 59; - } - if (strnicmp(value, MINFINITY_STRING, 9) == 0) { - std_time.infinity = -1; - std_time.m = 1; - std_time.d = 1; - // std_time.y = -4713; - std_time.y = -9999; - std_time.hh = 0; - std_time.mm = 0; - std_time.ss = 0; - } - if (strnicmp(value, "invalid", 7) != 0) { - BOOL bZone = field_type != OPENSEARCH_TYPE_TIMESTAMP_NO_TMZONE; - int zone; - - /* - * sscanf(value, "%4d-%2d-%2d %2d:%2d:%2d", &std_time.y, - * &std_time.m, &std_time.d, &std_time.hh, &std_time.mm, - * &std_time.ss); - */ - bZone = FALSE; /* time zone stuff is unreliable */ - timestamp2stime(value, &std_time, &bZone, &zone); - MYLOG(OPENSEARCH_ALL, "2stime fr=%d\n", std_time.fr); - } else { - /* - * The timestamp is invalid so set something conspicuous, - * like the epoch - */ - struct tm *tim; - time_t t = 0; -#ifdef HAVE_LOCALTIME_R - tim = localtime_r(&t, &tm); -#else - tim = localtime(&t); -#endif /* HAVE_LOCALTIME_R */ - std_time.m = tim->tm_mon + 1; - std_time.d = tim->tm_mday; - std_time.y = tim->tm_year + 1900; - std_time.hh = tim->tm_hour; - std_time.mm = tim->tm_min; - std_time.ss = tim->tm_sec; - } - break; - - case OPENSEARCH_TYPE_BOOL: { /* change T/F to 1/0 */ - switch (((char *)value)[0]) { - case 'f': - case 'F': - case 'n': - case 'N': - case '0': - STRCPY_FIXED(booltemp, "0"); - break; - default: - STRCPY_FIXED(booltemp, "1"); - } - neut_str = booltemp; - } break; - - /* This is for internal use by SQLStatistics() */ - case OPENSEARCH_TYPE_INT2VECTOR: - if (SQL_C_DEFAULT == fCType) { - int i, nval, maxc; - const char *vp; - /* this is an array of eight integers */ - short *short_array = (short *)rgbValueBindRow, shortv; - - maxc = 0; - if (NULL != short_array) - maxc = (int)cbValueMax / sizeof(short); - vp = value; - nval = 0; - MYLOG(OPENSEARCH_DEBUG, "index=("); - for (i = 0;; i++) { - if (sscanf(vp, "%hi", &shortv) != 1) - break; - MYPRINTF(0, " %hi", shortv); - nval++; - if (nval < maxc) - short_array[i + 1] = shortv; - - /* skip the current token */ - while (IS_NOT_SPACE(*vp)) - vp++; - /* and skip the space to the next token */ - while ((*vp != '\0') && (isspace(*vp))) - vp++; - if (*vp == '\0') - break; - } - MYPRINTF(0, ") nval = %i\n", nval); - if (maxc > 0) - short_array[0] = (short)nval; - - /* There is no corresponding fCType for this. */ - len = (nval + 1) * sizeof(short); - if (pcbValue) - *pcbValueBindRow = len; - - if (len <= cbValueMax) - return COPY_OK; /* dont go any further or the data will be - * trashed */ - else - return COPY_RESULT_TRUNCATED; - } - break; - - /* - * This is a large object OID, which is used to store - * LONGVARBINARY objects. - */ - case OPENSEARCH_TYPE_LO_UNDEFINED: - - return convert_lo(stmt, value, fCType, rgbValueBindRow, cbValueMax, - pcbValueBindRow); - - case 0: - break; - - default: - if (field_type - == (OID)stmt->hdbc - ->lobj_type /* hack until permanent type available */ - || (OPENSEARCH_TYPE_OID == field_type && SQL_C_BINARY == fCType - && conn->lo_is_domain)) - return convert_lo(stmt, value, fCType, rgbValueBindRow, - cbValueMax, pcbValueBindRow); - } - - /* Change default into something useable */ - if (fCType == SQL_C_DEFAULT) { - fCType = opensearchtype_attr_to_ctype(conn, field_type, atttypmod); -#ifdef UNICODE_SUPPORT - if (fCType == SQL_C_WCHAR && CC_default_is_c(conn)) - fCType = SQL_C_CHAR; -#endif - - MYLOG(OPENSEARCH_DEBUG, ", SQL_C_DEFAULT: fCType = %d\n", fCType); - } - - text_bin_handling = FALSE; - switch (fCType) { - case INTERNAL_ASIS_TYPE: -#ifdef UNICODE_SUPPORT - case SQL_C_WCHAR: -#endif /* UNICODE_SUPPORT */ - case SQL_C_CHAR: - text_bin_handling = TRUE; - break; - case SQL_C_BINARY: - switch (field_type) { - case OPENSEARCH_TYPE_UNKNOWN: - case OPENSEARCH_TYPE_BPCHAR: - case OPENSEARCH_TYPE_VARCHAR: - case OPENSEARCH_TYPE_TEXT: - case OPENSEARCH_TYPE_XML: - case OPENSEARCH_TYPE_BPCHARARRAY: - case OPENSEARCH_TYPE_VARCHARARRAY: - case OPENSEARCH_TYPE_TEXTARRAY: - case OPENSEARCH_TYPE_XMLARRAY: - case OPENSEARCH_TYPE_BYTEA: - text_bin_handling = TRUE; - break; - } - break; - } - - if (text_bin_handling) { - BOOL pre_convert = TRUE; - int midsize = sizeof(midtemp); - int i; - - /* Special character formatting as required */ - - /* - * These really should return error if cbValueMax is not big - * enough. - */ - switch (field_type) { - case OPENSEARCH_TYPE_DATE: - len = SPRINTF_FIXED(midtemp, "%.4d-%.2d-%.2d", std_time.y, - std_time.m, std_time.d); - break; - - case OPENSEARCH_TYPE_TIME: - len = SPRINTF_FIXED(midtemp, "%.2d:%.2d:%.2d", std_time.hh, - std_time.mm, std_time.ss); - if (std_time.fr > 0) { - int wdt; - int fr = effective_fraction(std_time.fr, &wdt); - - char *fraction = NULL; - len = sprintf(fraction, ".%0*d", wdt, fr); - strcat(midtemp, fraction); - } - break; - - case OPENSEARCH_TYPE_ABSTIME: - case OPENSEARCH_TYPE_DATETIME: - case OPENSEARCH_TYPE_TIMESTAMP_NO_TMZONE: - case OPENSEARCH_TYPE_TIMESTAMP: - len = stime2timestamp(&std_time, midtemp, midsize, FALSE, - (int)(midsize - 19 - 2)); - break; - - case OPENSEARCH_TYPE_UUID: - len = strlen(neut_str); - for (i = 0; i < len && i < midsize - 2; i++) - midtemp[i] = (char)toupper((UCHAR)neut_str[i]); - midtemp[i] = '\0'; - MYLOG(OPENSEARCH_DEBUG, "OPENSEARCH_TYPE_UUID: rgbValueBindRow = '%s'\n", - rgbValueBindRow); - break; - - /* - * Currently, data is SILENTLY TRUNCATED for BYTEA and - * character data types if there is not enough room in - * cbValueMax because the driver can't handle multiple - * calls to SQLGetData for these, yet. Most likely, the - * buffer passed in will be big enough to handle the - * maximum limit of OpenSearch, anyway. - * - * LongVarBinary types are handled correctly above, observing - * truncation and all that stuff since there is - * essentially no limit on the large object used to store - * those. - */ - case OPENSEARCH_TYPE_BYTEA: /* convert binary data to hex strings - * (i.e, 255 = "FF") */ - - default: - pre_convert = FALSE; - } - if (pre_convert) - neut_str = midtemp; - result = convert_text_field_to_sql_c( - gdata, stmt->current_col, neut_str, field_type, fCType, - rgbValueBindRow, cbValueMax, conn, &len); - } else { - SQLGUID g; - - /* - * for SQL_C_CHAR, it's probably ok to leave currency symbols in. - * But to convert to numeric types, it is necessary to get rid of - * those. - */ - if (field_type == OPENSEARCH_TYPE_MONEY) { - if (convert_money(neut_str, midtemp, sizeof(midtemp))) - neut_str = midtemp; - else { - MYLOG(OPENSEARCH_DEBUG, "couldn't convert money type to %d\n", fCType); - return COPY_UNSUPPORTED_TYPE; - } - } - - switch (fCType) { - case SQL_C_DATE: - case SQL_C_TYPE_DATE: /* 91 */ - len = 6; - { - DATE_STRUCT *ds; - struct tm *tim; - - if (bind_size > 0) - ds = (DATE_STRUCT *)rgbValueBindRow; - else - ds = (DATE_STRUCT *)rgbValue + bind_row; - - /* - * Initialize date in case conversion destination - * expects date part from this source time data. - * A value may be partially set here, so do some - * sanity checks on the existing values before - * setting them. - */ - tim = SC_get_localtime(stmt); - if (std_time.m == 0) - std_time.m = tim->tm_mon + 1; - if (std_time.d == 0) - std_time.d = tim->tm_mday; - if (std_time.y == 0) - std_time.y = tim->tm_year + 1900; - ds->year = (SQLSMALLINT)std_time.y; - ds->month = (SQLUSMALLINT)std_time.m; - ds->day = (SQLUSMALLINT)std_time.d; - } - break; - - case SQL_C_TIME: - case SQL_C_TYPE_TIME: /* 92 */ - len = 6; - { - TIME_STRUCT *ts; - - if (bind_size > 0) - ts = (TIME_STRUCT *)rgbValueBindRow; - else - ts = (TIME_STRUCT *)rgbValue + bind_row; - ts->hour = (SQLUSMALLINT)std_time.hh; - ts->minute = (SQLUSMALLINT)std_time.mm; - ts->second = (SQLUSMALLINT)std_time.ss; - } - break; - - case SQL_C_TIMESTAMP: - case SQL_C_TYPE_TIMESTAMP: /* 93 */ - len = 16; - { - struct tm *tim; - TIMESTAMP_STRUCT *ts; - - if (bind_size > 0) - ts = (TIMESTAMP_STRUCT *)rgbValueBindRow; - else - ts = (TIMESTAMP_STRUCT *)rgbValue + bind_row; - - /* - * Initialize date in case conversion destination - * expects date part from this source time data. - * A value may be partially set here, so do some - * sanity checks on the existing values before - * setting them. - */ - tim = SC_get_localtime(stmt); - if (std_time.m == 0) - std_time.m = tim->tm_mon + 1; - if (std_time.d == 0) - std_time.d = tim->tm_mday; - if (std_time.y == 0) - std_time.y = tim->tm_year + 1900; - - ts->year = (SQLSMALLINT)std_time.y; - ts->month = (SQLUSMALLINT)std_time.m; - ts->day = (SQLUSMALLINT)std_time.d; - ts->hour = (SQLUSMALLINT)std_time.hh; - ts->minute = (SQLUSMALLINT)std_time.mm; - ts->second = (SQLUSMALLINT)std_time.ss; - ts->fraction = (SQLUINTEGER)std_time.fr; - } - break; - - case SQL_C_BIT: - len = 1; - if (bind_size > 0) - *((UCHAR *)rgbValueBindRow) = (UCHAR)atoi(neut_str); - else - *((UCHAR *)rgbValue + bind_row) = (UCHAR)atoi(neut_str); - - MYLOG(99, - "SQL_C_BIT: bind_row = " FORMAT_POSIROW - " val = %d, cb = " FORMAT_LEN ", rgb=%d\n", - bind_row, atoi(neut_str), cbValueMax, - *((UCHAR *)rgbValue)); - break; - - case SQL_C_STINYINT: - case SQL_C_TINYINT: - len = 1; - if (bind_size > 0) - *((SCHAR *)rgbValueBindRow) = (SCHAR)atoi(neut_str); - else - *((SCHAR *)rgbValue + bind_row) = (SCHAR)atoi(neut_str); - break; - - case SQL_C_UTINYINT: - len = 1; - if (bind_size > 0) - *((UCHAR *)rgbValueBindRow) = (UCHAR)atoi(neut_str); - else - *((UCHAR *)rgbValue + bind_row) = (UCHAR)atoi(neut_str); - break; - - case SQL_C_FLOAT: - set_client_decimal_point((char *)neut_str); - len = 4; - if (bind_size > 0) - *((SFLOAT *)rgbValueBindRow) = - (SFLOAT)get_double_value(neut_str); - else - *((SFLOAT *)rgbValue + bind_row) = - (SFLOAT)get_double_value(neut_str); - break; - - case SQL_C_DOUBLE: - set_client_decimal_point((char *)neut_str); - len = 8; - if (bind_size > 0) - *((SDOUBLE *)rgbValueBindRow) = - (SDOUBLE)get_double_value(neut_str); - else - *((SDOUBLE *)rgbValue + bind_row) = - (SDOUBLE)get_double_value(neut_str); - break; - - case SQL_C_NUMERIC: { - SQL_NUMERIC_STRUCT *ns; - BOOL overflowed; - - if (bind_size > 0) - ns = (SQL_NUMERIC_STRUCT *)rgbValueBindRow; - else - ns = (SQL_NUMERIC_STRUCT *)rgbValue + bind_row; - - parse_to_numeric_struct(neut_str, ns, &overflowed); - if (overflowed) - result = COPY_RESULT_TRUNCATED; - } break; - - case SQL_C_SSHORT: - case SQL_C_SHORT: - len = 2; - if (bind_size > 0) - *((SQLSMALLINT *)rgbValueBindRow) = - (SQLSMALLINT)atoi(neut_str); - else - *((SQLSMALLINT *)rgbValue + bind_row) = - (SQLSMALLINT)atoi(neut_str); - break; - - case SQL_C_USHORT: - len = 2; - if (bind_size > 0) - *((SQLUSMALLINT *)rgbValueBindRow) = - (SQLUSMALLINT)atoi(neut_str); - else - *((SQLUSMALLINT *)rgbValue + bind_row) = - (SQLUSMALLINT)atoi(neut_str); - break; - - case SQL_C_SLONG: - case SQL_C_LONG: - len = 4; - if (bind_size > 0) - *((SQLINTEGER *)rgbValueBindRow) = atol(neut_str); - else - *((SQLINTEGER *)rgbValue + bind_row) = atol(neut_str); - break; - - case SQL_C_ULONG: - len = 4; - if (bind_size > 0) - *((SQLUINTEGER *)rgbValueBindRow) = ATOI32U(neut_str); - else - *((SQLUINTEGER *)rgbValue + bind_row) = ATOI32U(neut_str); - break; - -#ifdef ODBCINT64 - case SQL_C_SBIGINT: - len = 8; - if (bind_size > 0) - *((SQLBIGINT *)rgbValueBindRow) = ATOI64(neut_str); - else - *((SQLBIGINT *)rgbValue + bind_row) = ATOI64(neut_str); - break; - - case SQL_C_UBIGINT: - len = 8; - if (bind_size > 0) - *((SQLUBIGINT *)rgbValueBindRow) = ATOI64U(neut_str); - else - *((SQLUBIGINT *)rgbValue + bind_row) = ATOI64U(neut_str); - break; - -#endif /* ODBCINT64 */ - case SQL_C_BINARY: - /* The following is for SQL_C_VARBOOKMARK */ - if (OPENSEARCH_TYPE_INT4 == field_type) { - UInt4 ival = ATOI32U(neut_str); - - MYLOG(OPENSEARCH_ALL, "SQL_C_VARBOOKMARK value=%d\n", ival); - if (pcbValue) - *pcbValueBindRow = sizeof(ival); - if (cbValueMax >= (SQLLEN)sizeof(ival)) { - memcpy(rgbValueBindRow, &ival, sizeof(ival)); - return COPY_OK; - } else - return COPY_RESULT_TRUNCATED; - } else if (OPENSEARCH_TYPE_UUID == field_type) { - int rtn = char2guid(neut_str, &g); - - if (COPY_OK != rtn) - return rtn; - if (pcbValue) - *pcbValueBindRow = sizeof(g); - if (cbValueMax >= (SQLLEN)sizeof(g)) { - memcpy(rgbValueBindRow, &g, sizeof(g)); - return COPY_OK; - } else - return COPY_RESULT_TRUNCATED; - } else { - MYLOG(OPENSEARCH_DEBUG, - "couldn't convert the type %d to SQL_C_BINARY\n", - field_type); - return COPY_UNSUPPORTED_TYPE; - } - break; - case SQL_C_GUID: - - result = char2guid(neut_str, &g); - if (COPY_OK != result) { - MYLOG(OPENSEARCH_DEBUG, "Could not convert to SQL_C_GUID\n"); - return COPY_UNSUPPORTED_TYPE; - } - len = sizeof(g); - if (bind_size > 0) - *((SQLGUID *)rgbValueBindRow) = g; - else - *((SQLGUID *)rgbValue + bind_row) = g; - break; - case SQL_C_INTERVAL_YEAR: - case SQL_C_INTERVAL_MONTH: - case SQL_C_INTERVAL_YEAR_TO_MONTH: - case SQL_C_INTERVAL_DAY: - case SQL_C_INTERVAL_HOUR: - case SQL_C_INTERVAL_DAY_TO_HOUR: - case SQL_C_INTERVAL_MINUTE: - case SQL_C_INTERVAL_HOUR_TO_MINUTE: - case SQL_C_INTERVAL_SECOND: - case SQL_C_INTERVAL_DAY_TO_SECOND: - case SQL_C_INTERVAL_HOUR_TO_SECOND: - case SQL_C_INTERVAL_MINUTE_TO_SECOND: - interval2istruct( - fCType, precision, neut_str, - bind_size > 0 ? (SQL_INTERVAL_STRUCT *)rgbValueBindRow - : (SQL_INTERVAL_STRUCT *)rgbValue + bind_row); - break; - - default: - MYLOG(OPENSEARCH_DEBUG, "conversion to the type %d isn't supported\n", - fCType); - return COPY_UNSUPPORTED_TYPE; - } - } - - /* store the length of what was copied, if there's a place for it */ - if (pcbValue) - *pcbValueBindRow = len; - - if (result == COPY_OK && stmt->current_col >= 0) - gdata->gdata[stmt->current_col].data_left = 0; - return result; -} - -/*-------------------------------------------------------------------- - * Functions/Macros to get rid of query size limit. - * - * I always used the follwoing macros to convert from - * old_statement to new_statement. Please improve it - * if you have a better way. Hiroshi 2001/05/22 - *-------------------------------------------------------------------- - */ - -#define FLGP_USING_CURSOR (1L << 1) -#define FLGP_SELECT_INTO (1L << 2) -#define FLGP_SELECT_FOR_UPDATE_OR_SHARE (1L << 3) -#define FLGP_MULTIPLE_STATEMENT (1L << 5) -#define FLGP_SELECT_FOR_READONLY (1L << 6) -typedef struct _QueryParse { - const char *statement; - int statement_type; - size_t opos; - ssize_t from_pos; - ssize_t where_pos; - ssize_t stmt_len; - int in_status; - char escape_in_literal, prev_token_end; - const char *dollar_tag; - ssize_t taglen; - char token_curr[64]; - int token_len; - size_t declare_pos; - UInt4 flags, comment_level; - encoded_str encstr; -} QueryParse; - -enum { - QP_IN_IDENT_KEYWORD = 1L /* identifier or keyword */ - , - QP_IN_DQUOTE_IDENTIFIER = (1L << 1) /* "" */ - , - QP_IN_LITERAL = (1L << 2) /* '' */ - , - QP_IN_ESCAPE = (1L << 3) /* \ in literal */ - , - QP_IN_DOLLAR_QUOTE = (1L << 4) /* $...$ $...$ */ - , - QP_IN_COMMENT_BLOCK = (1L << 5) /* slash asterisk */ - , - QP_IN_LINE_COMMENT = (1L << 6) /* -- */ -}; - -#define QP_in_idle_status(qp) ((qp)->in_status == 0) - -#define QP_is_in(qp, status) (((qp)->in_status & status) != 0) -#define QP_enter(qp, status) ((qp)->in_status |= status) -#define QP_exit(qp, status) ((qp)->in_status &= (~status)) - -typedef enum { - RPM_REPLACE_PARAMS, - RPM_FAKE_PARAMS, - RPM_BUILDING_PREPARE_STATEMENT, - RPM_BUILDING_BIND_REQUEST -} ResolveParamMode; - -#define FLGB_INACCURATE_RESULT (1L << 4) -#define FLGB_CREATE_KEYSET (1L << 5) -#define FLGB_KEYSET_DRIVEN (1L << 6) -#define FLGB_CONVERT_LF (1L << 7) -#define FLGB_DISCARD_OUTPUT (1L << 8) -#define FLGB_BINARY_AS_POSSIBLE (1L << 9) -#define FLGB_LITERAL_EXTENSION (1L << 10) -#define FLGB_HEX_BIN_FORMAT (1L << 11) -#define FLGB_PARAM_CAST (1L << 12) -typedef struct _QueryBuild { - char *query_statement; - size_t str_alsize; - size_t npos; - SQLLEN current_row; - Int2 param_number; - Int2 dollar_number; - Int2 num_io_params; - Int2 num_output_params; - Int2 num_discard_params; - Int2 proc_return; - Int2 brace_level; - char parenthesize_the_first; - APDFields *apdopts; - IPDFields *ipdopts; - PutDataInfo *pdata; - size_t load_stmt_len; - size_t load_from_pos; - ResolveParamMode param_mode; - UInt4 flags; - int ccsc; - int errornumber; - const char *errormsg; - - ConnectionClass *conn; /* mainly needed for LO handling */ - StatementClass *stmt; /* needed to set error info in ENLARGE_.. */ -} QueryBuild; - -#define INIT_MIN_ALLOC 4096 - -/* - * New macros (Aceto) - *-------------------- - */ - -#define F_OldChar(qp) ((qp)->statement[(qp)->opos]) - -#define F_OldPtr(qp) ((qp)->statement + (qp)->opos) - -#define F_OldNext(qp) (++(qp)->opos) - -#define F_OldPrior(qp) (--(qp)->opos) - -#define F_OldPos(qp) (qp)->opos - -#define F_ExtractOldTo(qp, buf, ch, maxsize) \ - do { \ - size_t c = 0; \ - while ((qp)->statement[qp->opos] != '\0' \ - && (qp)->statement[qp->opos] != ch) { \ - if (c >= maxsize) \ - break; \ - buf[c++] = (qp)->statement[qp->opos++]; \ - } \ - if ((qp)->statement[qp->opos] == '\0') { \ - retval = SQL_ERROR; \ - goto cleanup; \ - } \ - buf[c] = '\0'; \ - } while (0) - -#define F_NewChar(qb) (qb->query_statement[(qb)->npos]) - -#define F_NewPtr(qb) ((qb)->query_statement + (qb)->npos) - -#define F_NewNext(qb) (++(qb)->npos) - -#define F_NewPos(qb) ((qb)->npos) - -/*---------- - * Terminate the stmt_with_params string with NULL. - *---------- - */ -#define CVT_TERMINATE(qb) \ - do { \ - if (NULL == (qb)->query_statement) { \ - retval = SQL_ERROR; \ - goto cleanup; \ - } \ - (qb)->query_statement[(qb)->npos] = '\0'; \ - } while (0) - -/*---------- - * Append a data. - *---------- - */ -#define CVT_APPEND_DATA(qb, s, len) \ - do { \ - size_t newpos = (qb)->npos + len; \ - ENLARGE_NEWSTATEMENT((qb), newpos); \ - memcpy(&(qb)->query_statement[(qb)->npos], s, len); \ - (qb)->npos = newpos; \ - (qb)->query_statement[newpos] = '\0'; \ - } while (0) - -/*---------- - * Append a string. - *---------- - */ -#define CVT_APPEND_STR(qb, s) \ - do { \ - size_t len = strlen(s); \ - CVT_APPEND_DATA(qb, s, len); \ - } while (0) - -/*---------- - * Append a char. - *---------- - */ -#define CVT_APPEND_CHAR(qb, c) \ - do { \ - ENLARGE_NEWSTATEMENT(qb, (qb)->npos + 1); \ - (qb)->query_statement[(qb)->npos++] = c; \ - } while (0) - -int findIdentifier(const UCHAR *str, int ccsc, const UCHAR **next_token) { - int slen = -1; - encoded_str encstr; - UCHAR tchar; - BOOL dquote = FALSE; - - *next_token = NULL; - encoded_str_constr(&encstr, ccsc, (const char *)str); - for (tchar = (UCHAR)encoded_nextchar(&encstr); tchar; - tchar = (UCHAR)encoded_nextchar(&encstr)) { - if (MBCS_NON_ASCII(encstr)) - continue; - if (encstr.pos == 0) /* the first character */ - { - if (dquote = (IDENTIFIER_QUOTE == tchar), dquote) - continue; - if (!isalpha(tchar)) { - slen = 0; - if (IS_NOT_SPACE(tchar)) - *next_token = ENCODE_PTR(encstr); - break; - } - } - if (dquote) { - if (IDENTIFIER_QUOTE == tchar) { - tchar = (UCHAR)encoded_nextchar(&encstr); - if (IDENTIFIER_QUOTE == tchar) - continue; - slen = (int)encstr.pos; - break; - } - } else { - if (isalnum(tchar)) - continue; - switch (tchar) { - case '_': - case DOLLAR_QUOTE: - continue; - } - slen = (int)encstr.pos; - if (IS_NOT_SPACE(tchar)) - *next_token = ENCODE_PTR(encstr); - break; - } - } - if (slen < 0 && !dquote) - slen = (int)encstr.pos; - if (NULL == *next_token) { - for (; tchar; tchar = (UCHAR)encoded_nextchar(&encstr)) { - if (IS_NOT_SPACE((UCHAR)tchar)) { - *next_token = ENCODE_PTR(encstr); - break; - } - } - } - return slen; -} - -static opensearchNAME lower_or_remove_dquote(opensearchNAME nm, const UCHAR *src, int srclen, - int ccsc) { - int i, outlen; - char *tc; - UCHAR tchar; - BOOL idQuote; - encoded_str encstr; - - if (nm.name) - tc = realloc(nm.name, srclen + 1); - else - tc = malloc(srclen + 1); - if (!tc) { - NULL_THE_NAME(nm); - return nm; - } - nm.name = tc; - idQuote = (src[0] == IDENTIFIER_QUOTE); - encoded_str_constr(&encstr, ccsc, (const char *)src); - for (i = 0, tchar = (UCHAR)encoded_nextchar(&encstr), outlen = 0; - i < srclen; i++, tchar = (UCHAR)encoded_nextchar(&encstr)) { - if (MBCS_NON_ASCII(encstr)) { - tc[outlen++] = tchar; - continue; - } - if (idQuote) { - if (IDENTIFIER_QUOTE == tchar) { - if (0 == i) - continue; - if (i == srclen - 1) - continue; - i++; - tchar = (UCHAR)encoded_nextchar(&encstr); - } - tc[outlen++] = tchar; - } else { - tc[outlen++] = (char)tolower(tchar); - } - } - tc[outlen] = '\0'; - return nm; -} - -int eatTableIdentifiers(const UCHAR *str, int ccsc, opensearchNAME *table, - opensearchNAME *schema) { - int len; - const UCHAR *next_token; - const UCHAR *tstr = str; - - while (isspace(*tstr)) - tstr++; - - if ((len = findIdentifier(tstr, ccsc, &next_token)) <= 0) - return len; /* table name doesn't exist */ - if (table) { - if (IDENTIFIER_QUOTE == *tstr) - *table = lower_or_remove_dquote(*table, tstr, len, ccsc); - else - STRN_TO_NAME(*table, tstr, len); - } - if (!next_token || '.' != *next_token || (int)(next_token - tstr) != len) - return (int)(next_token - str); /* table only */ - tstr = next_token + 1; - if ((len = findIdentifier(tstr, ccsc, &next_token)) <= 0) - return -1; - if (table) { - if (schema) - MOVE_NAME(*schema, *table); - *table = lower_or_remove_dquote(*table, tstr, len, ccsc); - } - if (!next_token || '.' != *next_token || (int)(next_token - tstr) != len) - return (int)(next_token - str); /* schema.table */ - tstr = next_token + 1; - if ((len = findIdentifier(tstr, ccsc, &next_token)) <= 0) - return -1; - if (table) { - if (schema) - MOVE_NAME(*schema, *table); - *table = lower_or_remove_dquote(*table, tstr, len, ccsc); - } - return (int)(next_token - str); /* catalog.schema.table */ -} - -#define PT_TOKEN_IGNORE(pt) ((pt)->curchar_processed = TRUE) - -#define MIN_ALC_SIZE 128 - -/* - * With SQL_MAX_NUMERIC_LEN = 16, the highest representable number is - * 2^128 - 1, which fits in 39 digits. - */ -#define MAX_NUMERIC_DIGITS 39 - -/* - * Convert a string representation of a numeric into SQL_NUMERIC_STRUCT. - */ -static void parse_to_numeric_struct(const char *wv, SQL_NUMERIC_STRUCT *ns, - BOOL *overflow) { - int i, nlen, dig; - char calv[SQL_MAX_NUMERIC_LEN * 3]; - BOOL dot_exist; - - *overflow = FALSE; - - /* skip leading space */ - while (*wv && isspace((unsigned char)*wv)) - wv++; - - /* sign */ - ns->sign = 1; - if (*wv == '-') { - ns->sign = 0; - wv++; - } else if (*wv == '+') - wv++; - - /* skip leading zeros */ - while (*wv == '0') - wv++; - - /* read the digits into calv */ - ns->precision = 0; - ns->scale = 0; - for (nlen = 0, dot_exist = FALSE;; wv++) { - if (*wv == '.') { - if (dot_exist) - break; - dot_exist = TRUE; - } else if (*wv == '\0' || !isdigit((unsigned char)*wv)) - break; - else { - if (nlen >= (int)sizeof(calv)) { - if (dot_exist) - break; - else { - ns->scale--; - *overflow = TRUE; - continue; - } - } - if (dot_exist) - ns->scale++; - calv[nlen++] = *wv; - } - } - ns->precision = (SQLCHAR)nlen; - - /* Convert the decimal digits to binary */ - memset(ns->val, 0, sizeof(ns->val)); - for (dig = 0; dig < nlen; dig++) { - UInt4 carry; - - /* multiply the current value by 10, and add the next digit */ - carry = calv[dig] - '0'; - for (i = 0; i < (int)sizeof(ns->val); i++) { - UInt4 t; - - t = ((UInt4)ns->val[i]) * 10 + carry; - ns->val[i] = (unsigned char)(t & 0xFF); - carry = (t >> 8); - } - - if (carry != 0) - *overflow = TRUE; - } -} - -static BOOL convert_money(const char *s, char *sout, size_t soutmax) { - char in, decp = 0; - size_t i = 0, out = 0; - int num_in = -1, period_in = -1, comma_in = -1; - - for (i = 0; s[i]; i++) { - switch (in = s[i]) { - case '.': - if (period_in < 0) - period_in = (int)i; - break; - case ',': - if (comma_in < 0) - comma_in = (int)i; - break; - default: - if ('0' <= in && '9' >= in) - num_in = (int)i; - break; - } - } - if (period_in > comma_in) { - if (period_in >= num_in - 2) - decp = '.'; - } else if (comma_in >= 0 && comma_in >= num_in - 2) - decp = ','; - for (i = 0; s[i] && out + 1 < soutmax; i++) { - switch (in = s[i]) { - case '(': - case '-': - sout[out++] = '-'; - break; - default: - if (in >= '0' && in <= '9') - sout[out++] = in; - else if (in == decp) - sout[out++] = '.'; - } - } - sout[out] = '\0'; - return TRUE; -} - -/* Change linefeed to carriage-return/linefeed */ -size_t convert_linefeeds(const char *si, char *dst, size_t max, BOOL convlf, - BOOL *changed) { - size_t i = 0, out = 0; - - if (max == 0) - max = 0xffffffff; - *changed = FALSE; - for (i = 0; si[i] && out < max - 1; i++) { - if (convlf && si[i] == '\n') { - /* Only add the carriage-return if needed */ - if (i > 0 && OPENSEARCH_CARRIAGE_RETURN == si[i - 1]) { - if (dst) - dst[out++] = si[i]; - else - out++; - continue; - } - *changed = TRUE; - - if (dst) { - dst[out++] = OPENSEARCH_CARRIAGE_RETURN; - dst[out++] = '\n'; - } else - out += 2; - } else { - if (dst) - dst[out++] = si[i]; - else - out++; - } - } - if (dst) - dst[out] = '\0'; - return out; -} - -static int conv_from_octal(const char *s) { - ssize_t i; - int y = 0; - - for (i = 1; i <= 3; i++) - y += (s[i] - '0') << (3 * (3 - i)); - - return y; -} - -/* convert octal escapes to bytes */ -static size_t convert_from_opensearchbinary(const char *value, char *rgbValue, - SQLLEN cbValueMax) { - UNUSED(cbValueMax); - size_t i, ilen = strlen(value); - size_t o = 0; - - for (i = 0; i < ilen;) { - if (value[i] == BYTEA_ESCAPE_CHAR) { - if (value[i + 1] == BYTEA_ESCAPE_CHAR) { - if (rgbValue) - rgbValue[o] = value[i]; - o++; - i += 2; - } else if (value[i + 1] == 'x') { - i += 2; - if (i < ilen) { - ilen -= i; - if (rgbValue) - opensearch_hex2bin(value + i, rgbValue + o, ilen); - o += ilen / 2; - } - break; - } else { - if (rgbValue) - rgbValue[o] = (char)conv_from_octal(&value[i]); - o++; - i += 4; - } - } else { - if (rgbValue) - rgbValue[o] = value[i]; - o++; - i++; - } - /** if (rgbValue) - MYLOG(OPENSEARCH_DEBUG, "i=%d, rgbValue[%d] = %d, %c\n", i, o, rgbValue[o], - rgbValue[o]); ***/ - } - - if (rgbValue) - rgbValue[o] = '\0'; /* extra protection */ - - MYLOG(OPENSEARCH_DEBUG, "in=" FORMAT_SIZE_T ", out = " FORMAT_SIZE_T "\n", ilen, o); - - return o; -} - -static const char *hextbl = "0123456789ABCDEF"; - -#define def_bin2hex(type) \ - (const char *src, type *dst, SQLLEN length) { \ - const char *src_wk; \ - UCHAR chr; \ - type *dst_wk; \ - BOOL backwards; \ - int i; \ - \ - backwards = FALSE; \ - if ((char *)dst < src) { \ - if ((char *)(dst + 2 * (length - 1)) > src + length - 1) \ - return -1; \ - } else if ((char *)dst < src + length) \ - backwards = TRUE; \ - if (backwards) { \ - for (i = 0, src_wk = src + length - 1, \ - dst_wk = dst + 2 * length - 1; \ - i < length; i++, src_wk--) { \ - chr = *src_wk; \ - *dst_wk-- = hextbl[chr % 16]; \ - *dst_wk-- = hextbl[chr >> 4]; \ - } \ - } else { \ - for (i = 0, src_wk = src, dst_wk = dst; i < length; \ - i++, src_wk++) { \ - chr = *src_wk; \ - *dst_wk++ = hextbl[chr >> 4]; \ - *dst_wk++ = hextbl[chr % 16]; \ - } \ - } \ - dst[2 * length] = '\0'; \ - return 2 * length * sizeof(type); \ - } -#ifdef UNICODE_SUPPORT -static SQLLEN es_bin2whex def_bin2hex(SQLWCHAR) -#endif /* UNICODE_SUPPORT */ - - static SQLLEN opensearch_bin2hex def_bin2hex(char) - - SQLLEN opensearch_hex2bin(const char *in, char *out, SQLLEN len) { - UCHAR chr; - const char *src_wk; - char *dst_wk; - SQLLEN i; - int val; - BOOL HByte = TRUE; - - for (i = 0, src_wk = in, dst_wk = out; i < len; i++, src_wk++) { - chr = *src_wk; - if (!chr) - break; - if (chr >= 'a' && chr <= 'f') - val = chr - 'a' + 10; - else if (chr >= 'A' && chr <= 'F') - val = chr - 'A' + 10; - else - val = chr - '0'; - if (HByte) - *dst_wk = (char)(val << 4); - else { - *dst_wk += (char)val; - dst_wk++; - } - HByte = !HByte; - } - *dst_wk = '\0'; - return len; -} - -static int convert_lo(StatementClass *stmt, const void *value, - SQLSMALLINT fCType, PTR rgbValue, SQLLEN cbValueMax, - SQLLEN *pcbValue) { - UNUSED(cbValueMax, pcbValue, rgbValue, fCType, value); - SC_set_error(stmt, STMT_EXEC_ERROR, - "Could not convert large object to c-type (large objects are " - "not supported).", - "convert_lo"); - return COPY_GENERAL_ERROR; -} diff --git a/sql-odbc/src/sqlodbc/convert.h b/sql-odbc/src/sqlodbc/convert.h deleted file mode 100644 index c833f46720..0000000000 --- a/sql-odbc/src/sqlodbc/convert.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef __CONVERT_H__ -#define __CONVERT_H__ - -#include "opensearch_odbc.h" - -#ifdef __cplusplus -extern "C" { -#endif -/* copy_and_convert results */ -#define COPY_OK 0 -#define COPY_UNSUPPORTED_TYPE 1 -#define COPY_UNSUPPORTED_CONVERSION 2 -#define COPY_RESULT_TRUNCATED 3 -#define COPY_GENERAL_ERROR 4 -#define COPY_NO_DATA_FOUND 5 -#define COPY_INVALID_STRING_CONVERSION 6 - -int copy_and_convert_field_bindinfo(StatementClass *stmt, OID field_type, - int atttypmod, void *value, int col); -int copy_and_convert_field(StatementClass *stmt, OID field_type, int atttypmod, - void *value, SQLSMALLINT fCType, int precision, - PTR rgbValue, SQLLEN cbValueMax, SQLLEN *pcbValue, - SQLLEN *pIndicator); - -SQLLEN opensearch_hex2bin(const char *in, char *out, SQLLEN len); - -#ifdef __cplusplus -} -#endif -#endif diff --git a/sql-odbc/src/sqlodbc/descriptor.c b/sql-odbc/src/sqlodbc/descriptor.c deleted file mode 100644 index 3c668f24dc..0000000000 --- a/sql-odbc/src/sqlodbc/descriptor.c +++ /dev/null @@ -1,574 +0,0 @@ -#include "descriptor.h" - -#include -#include -#include - -#include "environ.h" -#include "misc.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "qresult.h" -#include "statement.h" - -void TI_Destructor(TABLE_INFO **ti, int count) { - int i; - - MYLOG(OPENSEARCH_TRACE, "entering count=%d\n", count); - if (ti) { - for (i = 0; i < count; i++) { - if (ti[i]) { - COL_INFO *coli = ti[i]->col_info; - if (coli) { - MYLOG(OPENSEARCH_ALL, "!!!refcnt %p:%d -> %d\n", coli, coli->refcnt, - coli->refcnt - 1); - coli->refcnt--; - if (coli->refcnt <= 0 - && 0 == coli->acc_time) /* acc_time == 0 means the table - is dropped */ - free_col_info_contents(coli); - } - NULL_THE_NAME(ti[i]->schema_name); - NULL_THE_NAME(ti[i]->table_name); - NULL_THE_NAME(ti[i]->table_alias); - NULL_THE_NAME(ti[i]->bestitem); - NULL_THE_NAME(ti[i]->bestqual); - TI_Destroy_IH(ti[i]); - free(ti[i]); - ti[i] = NULL; - } - } - } -} - -void FI_Destructor(FIELD_INFO **fi, int count, BOOL freeFI) { - int i; - - MYLOG(OPENSEARCH_TRACE, "entering count=%d\n", count); - if (fi) { - for (i = 0; i < count; i++) { - if (fi[i]) { - NULL_THE_NAME(fi[i]->column_name); - NULL_THE_NAME(fi[i]->column_alias); - NULL_THE_NAME(fi[i]->schema_name); - NULL_THE_NAME(fi[i]->before_dot); - if (freeFI) { - free(fi[i]); - fi[i] = NULL; - } - } - } - if (freeFI) - free(fi); - } -} - -#define INIT_IH 32 - -void TI_Destroy_IH(TABLE_INFO *ti) { - InheritanceClass *ih; - unsigned int i; - - if (NULL == (ih = ti->ih)) - return; - for (i = 0; i < ih->count; i++) { - NULL_THE_NAME(ih->inf[i].fullTable); - } - free(ih); - ti->ih = NULL; -} - -void DC_Constructor(DescriptorClass *self, BOOL embedded, - StatementClass *stmt) { - UNUSED(stmt); - memset(self, 0, sizeof(DescriptorClass)); - self->deschd.embedded = (char)embedded; -} - -static void ARDFields_free(ARDFields *self) { - MYLOG(OPENSEARCH_TRACE, "entering %p bookmark=%p\n", self, self->bookmark); - if (self->bookmark) { - free(self->bookmark); - self->bookmark = NULL; - } - /* - * the memory pointed to by the bindings is not deallocated by the - * driver but by the application that uses that driver, so we don't - * have to care - */ - ARD_unbind_cols(self, TRUE); -} - -static void APDFields_free(APDFields *self) { - if (self->bookmark) { - free(self->bookmark); - self->bookmark = NULL; - } - /* param bindings */ - APD_free_params(self, STMT_FREE_PARAMS_ALL); -} - -static void IRDFields_free(IRDFields *self) { - /* Free the parsed field information */ - if (self->fi) { - FI_Destructor(self->fi, self->allocated, TRUE); - self->fi = NULL; - } - self->allocated = 0; - self->nfields = 0; -} - -static void IPDFields_free(IPDFields *self) { - /* param bindings */ - IPD_free_params(self, STMT_FREE_PARAMS_ALL); -} - -void DC_Destructor(DescriptorClass *self) { - DescriptorHeader *deschd = &(self->deschd); - if (deschd->__error_message) { - free(deschd->__error_message); - deschd->__error_message = NULL; - } - if (deschd->opensearch_error) { - ER_Destructor(deschd->opensearch_error); - deschd->opensearch_error = NULL; - } - if (deschd->type_defined) { - switch (deschd->desc_type) { - case SQL_ATTR_APP_ROW_DESC: - ARDFields_free(&(self->ardf)); - break; - case SQL_ATTR_APP_PARAM_DESC: - APDFields_free(&(self->apdf)); - break; - case SQL_ATTR_IMP_ROW_DESC: - IRDFields_free(&(self->irdf)); - break; - case SQL_ATTR_IMP_PARAM_DESC: - IPDFields_free(&(self->ipdf)); - break; - } - } -} - -void InitializeEmbeddedDescriptor(DescriptorClass *self, StatementClass *stmt, - UInt4 desc_type) { - DescriptorHeader *deschd = &(self->deschd); - DC_Constructor(self, TRUE, stmt); - DC_get_conn(self) = SC_get_conn(stmt); - deschd->type_defined = TRUE; - deschd->desc_type = desc_type; - switch (desc_type) { - case SQL_ATTR_APP_ROW_DESC: - memset(&(self->ardf), 0, sizeof(ARDFields)); - stmt->ard = self; - break; - case SQL_ATTR_APP_PARAM_DESC: - memset(&(self->apdf), 0, sizeof(APDFields)); - stmt->apd = self; - break; - case SQL_ATTR_IMP_ROW_DESC: - memset(&(self->irdf), 0, sizeof(IRDFields)); - stmt->ird = self; - stmt->ird->irdf.stmt = stmt; - break; - case SQL_ATTR_IMP_PARAM_DESC: - memset(&(self->ipdf), 0, sizeof(IPDFields)); - stmt->ipd = self; - break; - } -} - -/* - * ARDFields initialize - */ -void InitializeARDFields(ARDFields *opt) { - memset(opt, 0, sizeof(ARDFields)); - opt->size_of_rowset = 1; - opt->bind_size = 0; /* default is to bind by column */ - opt->size_of_rowset_odbc2 = 1; -} -/* - * APDFields initialize - */ -void InitializeAPDFields(APDFields *opt) { - memset(opt, 0, sizeof(APDFields)); - opt->paramset_size = 1; - opt->param_bind_type = 0; /* default is to bind by column */ - opt->paramset_size_dummy = 1; /* dummy setting */ -} - -BindInfoClass *ARD_AllocBookmark(ARDFields *ardopts) { - if (!ardopts->bookmark) { - ardopts->bookmark = (BindInfoClass *)malloc(sizeof(BindInfoClass)); - memset(ardopts->bookmark, 0, sizeof(BindInfoClass)); - } - return ardopts->bookmark; -} - -#define DESC_INCREMENT 10 -char CC_add_descriptor(ConnectionClass *self, DescriptorClass *desc) { - int i; - int new_num_descs; - DescriptorClass **descs; - - MYLOG(OPENSEARCH_TRACE, "entering self=%p, desc=%p\n", self, desc); - - for (i = 0; i < self->num_descs; i++) { - if (!self->descs[i]) { - DC_get_conn(desc) = self; - self->descs[i] = desc; - return TRUE; - } - } - /* no more room -- allocate more memory */ - new_num_descs = DESC_INCREMENT + self->num_descs; - descs = (DescriptorClass **)realloc( - self->descs, sizeof(DescriptorClass *) * new_num_descs); - if (!descs) - return FALSE; - self->descs = descs; - - memset(&self->descs[self->num_descs], 0, - sizeof(DescriptorClass *) * DESC_INCREMENT); - DC_get_conn(desc) = self; - self->descs[self->num_descs] = desc; - self->num_descs = new_num_descs; - - return TRUE; -} - -/* - * This API allocates a Application descriptor. - */ -RETCODE SQL_API OPENSEARCHAPI_AllocDesc(HDBC ConnectionHandle, - SQLHDESC *DescriptorHandle) { - CSTR func = "OPENSEARCHAPI_AllocDesc"; - ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; - RETCODE ret = SQL_SUCCESS; - DescriptorClass *desc; - - MYLOG(OPENSEARCH_TRACE, "entering...\n"); - - desc = (DescriptorClass *)malloc(sizeof(DescriptorClass)); - if (desc) { - memset(desc, 0, sizeof(DescriptorClass)); - DC_get_conn(desc) = conn; - if (CC_add_descriptor(conn, desc)) - *DescriptorHandle = desc; - else { - free(desc); - CC_set_error(conn, CONN_STMT_ALLOC_ERROR, - "Maximum number of descriptors exceeded", func); - ret = SQL_ERROR; - } - } else { - CC_set_error(conn, CONN_STMT_ALLOC_ERROR, - "No more memory ti allocate a further descriptor", func); - ret = SQL_ERROR; - } - return ret; -} - -RETCODE SQL_API OPENSEARCHAPI_FreeDesc(SQLHDESC DescriptorHandle) { - DescriptorClass *desc = (DescriptorClass *)DescriptorHandle; - RETCODE ret = SQL_SUCCESS; - - MYLOG(OPENSEARCH_TRACE, "entering...\n"); - DC_Destructor(desc); - if (!desc->deschd.embedded) { - int i; - ConnectionClass *conn = DC_get_conn(desc); - - for (i = 0; i < conn->num_descs; i++) { - if (conn->descs[i] == desc) { - conn->descs[i] = NULL; - break; - } - } - free(desc); - } - return ret; -} - -static void BindInfoClass_copy(const BindInfoClass *src, - BindInfoClass *target) { - memcpy(target, src, sizeof(BindInfoClass)); -} -static void ARDFields_copy(const ARDFields *src, ARDFields *target) { - memcpy(target, src, sizeof(ARDFields)); - target->bookmark = NULL; - if (src->bookmark) { - BindInfoClass *bookmark = ARD_AllocBookmark(target); - if (bookmark) - BindInfoClass_copy(src->bookmark, bookmark); - } - if (src->allocated <= 0) { - target->allocated = 0; - target->bindings = NULL; - } else { - int i; - - target->bindings = malloc(target->allocated * sizeof(BindInfoClass)); - if (!target->bindings) - target->allocated = 0; - for (i = 0; i < target->allocated; i++) - BindInfoClass_copy(&src->bindings[i], &target->bindings[i]); - } -} - -static void ParameterInfoClass_copy(const ParameterInfoClass *src, - ParameterInfoClass *target) { - memcpy(target, src, sizeof(ParameterInfoClass)); -} -static void APDFields_copy(const APDFields *src, APDFields *target) { - memcpy(target, src, sizeof(APDFields)); - if (src->bookmark) { - target->bookmark = malloc(sizeof(ParameterInfoClass)); - if (target->bookmark) - ParameterInfoClass_copy(src->bookmark, target->bookmark); - } - if (src->allocated <= 0) { - target->allocated = 0; - target->parameters = NULL; - } else { - int i; - - target->parameters = - malloc(target->allocated * sizeof(ParameterInfoClass)); - if (!target->parameters) - target->allocated = 0; - for (i = 0; i < target->allocated; i++) - ParameterInfoClass_copy(&src->parameters[i], - &target->parameters[i]); - } -} - -static void ParameterImplClass_copy(const ParameterImplClass *src, - ParameterImplClass *target) { - memcpy(target, src, sizeof(ParameterImplClass)); -} -static void IPDFields_copy(const IPDFields *src, IPDFields *target) { - memcpy(target, src, sizeof(IPDFields)); - if (src->allocated <= 0) { - target->allocated = 0; - target->parameters = NULL; - } else { - int i; - - target->parameters = (ParameterImplClass *)malloc( - target->allocated * sizeof(ParameterImplClass)); - if (!target->parameters) - target->allocated = 0; - for (i = 0; i < target->allocated; i++) - ParameterImplClass_copy(&src->parameters[i], - &target->parameters[i]); - } -} - -RETCODE SQL_API OPENSEARCHAPI_CopyDesc(SQLHDESC SourceDescHandle, - SQLHDESC TargetDescHandle) { - RETCODE ret = SQL_ERROR; - DescriptorClass *src, *target; - DescriptorHeader *srchd, *targethd; - ARDFields *ard_src, *ard_tgt; - APDFields *apd_src, *apd_tgt; - IPDFields *ipd_src, *ipd_tgt; - - MYLOG(OPENSEARCH_TRACE, "entering...\n"); - src = (DescriptorClass *)SourceDescHandle; - target = (DescriptorClass *)TargetDescHandle; - srchd = &(src->deschd); - targethd = &(target->deschd); - if (!srchd->type_defined) { - MYLOG(OPENSEARCH_ERROR, "source type undefined\n"); - DC_set_error(target, DESC_EXEC_ERROR, "source handle type undefined"); - return ret; - } - if (targethd->type_defined) { - MYLOG(OPENSEARCH_DEBUG, "source type=%d -> target type=%d\n", srchd->desc_type, - targethd->desc_type); - if (SQL_ATTR_IMP_ROW_DESC == targethd->desc_type) { - MYLOG(OPENSEARCH_DEBUG, "can't modify IRD\n"); - DC_set_error(target, DESC_EXEC_ERROR, "can't copy to IRD"); - return ret; - } else if (targethd->desc_type != srchd->desc_type) { - if (targethd->embedded) { - MYLOG(OPENSEARCH_DEBUG, "src type != target type\n"); - DC_set_error( - target, DESC_EXEC_ERROR, - "copying different type descriptor to embedded one"); - return ret; - } - } - DC_Destructor(target); - } - ret = SQL_SUCCESS; - switch (srchd->desc_type) { - case SQL_ATTR_APP_ROW_DESC: - MYLOG(OPENSEARCH_DEBUG, "src=%p target=%p type=%d", src, target, - srchd->desc_type); - if (!targethd->type_defined) { - targethd->desc_type = srchd->desc_type; - } - ard_src = &(src->ardf); - MYPRINTF(OPENSEARCH_DEBUG, - " rowset_size=" FORMAT_LEN " bind_size=" FORMAT_UINTEGER - " ope_ptr=%p off_ptr=%p\n", - ard_src->size_of_rowset, ard_src->bind_size, - ard_src->row_operation_ptr, ard_src->row_offset_ptr); - ard_tgt = &(target->ardf); - MYPRINTF(OPENSEARCH_DEBUG, " target=%p", ard_tgt); - ARDFields_copy(ard_src, ard_tgt); - MYPRINTF(OPENSEARCH_DEBUG, " offset_ptr=%p\n", ard_tgt->row_offset_ptr); - break; - case SQL_ATTR_APP_PARAM_DESC: - if (!targethd->type_defined) { - targethd->desc_type = srchd->desc_type; - } - apd_src = &(src->apdf); - apd_tgt = &(target->apdf); - APDFields_copy(apd_src, apd_tgt); - break; - case SQL_ATTR_IMP_PARAM_DESC: - if (!targethd->type_defined) { - targethd->desc_type = srchd->desc_type; - } - ipd_src = &(src->ipdf); - ipd_tgt = &(target->ipdf); - IPDFields_copy(ipd_src, ipd_tgt); - break; - default: - MYLOG(OPENSEARCH_DEBUG, "invalid descriptor handle type=%d\n", - srchd->desc_type); - DC_set_error(target, DESC_EXEC_ERROR, "invalid descriptor type"); - ret = SQL_ERROR; - } - - if (SQL_SUCCESS == ret) - targethd->type_defined = TRUE; - return ret; -} - -void DC_set_error(DescriptorClass *self, int errornumber, - const char *errormsg) { - DescriptorHeader *deschd = &(self->deschd); - if (deschd->__error_message) - free(deschd->__error_message); - deschd->__error_number = errornumber; - deschd->__error_message = errormsg ? strdup(errormsg) : NULL; -} -void DC_set_errormsg(DescriptorClass *self, const char *errormsg) { - DescriptorHeader *deschd = &(self->deschd); - if (deschd->__error_message) - free(deschd->__error_message); - deschd->__error_message = errormsg ? strdup(errormsg) : NULL; -} -const char *DC_get_errormsg(const DescriptorClass *desc) { - return desc->deschd.__error_message; -} -int DC_get_errornumber(const DescriptorClass *desc) { - return desc->deschd.__error_number; -} - -/* Map sql commands to statement types */ -static const struct { - int number; - const char ver3str[6]; - const char ver2str[6]; -} Descriptor_sqlstate[] = - - { - {DESC_ERROR_IN_ROW, "01S01", "01S01"}, - {DESC_OPTION_VALUE_CHANGED, "01S02", "01S02"}, - {DESC_OK, "00000", "00000"}, /* OK */ - {DESC_EXEC_ERROR, "HY000", "S1000"}, /* also a general error */ - {DESC_STATUS_ERROR, "HY010", "S1010"}, - {DESC_SEQUENCE_ERROR, "HY010", "S1010"}, /* Function sequence error */ - {DESC_NO_MEMORY_ERROR, "HY001", - "S1001"}, /* memory allocation failure */ - {DESC_COLNUM_ERROR, "07009", "S1002"}, /* invalid column number */ - {DESC_NO_STMTSTRING, "HY001", - "S1001"}, /* having no stmtstring is also a malloc problem */ - {DESC_ERROR_TAKEN_FROM_BACKEND, "HY000", "S1000"}, /* general error */ - {DESC_INTERNAL_ERROR, "HY000", "S1000"}, /* general error */ - {DESC_STILL_EXECUTING, "HY010", "S1010"}, - {DESC_NOT_IMPLEMENTED_ERROR, "HYC00", "S1C00"}, /* == 'driver not - * capable' */ - {DESC_BAD_PARAMETER_NUMBER_ERROR, "07009", "S1093"}, - {DESC_OPTION_OUT_OF_RANGE_ERROR, "HY092", "S1092"}, - {DESC_INVALID_COLUMN_NUMBER_ERROR, "07009", "S1002"}, - {DESC_RESTRICTED_DATA_TYPE_ERROR, "07006", "07006"}, - {DESC_INVALID_CURSOR_STATE_ERROR, "07005", "24000"}, - {DESC_CREATE_TABLE_ERROR, "42S01", "S0001"}, /* table already exists */ - {DESC_NO_CURSOR_NAME, "S1015", "S1015"}, - {DESC_INVALID_CURSOR_NAME, "34000", "34000"}, - {DESC_INVALID_ARGUMENT_NO, "HY024", - "S1009"}, /* invalid argument value */ - {DESC_ROW_OUT_OF_RANGE, "HY107", "S1107"}, - {DESC_OPERATION_CANCELLED, "HY008", "S1008"}, - {DESC_INVALID_CURSOR_POSITION, "HY109", "S1109"}, - {DESC_VALUE_OUT_OF_RANGE, "HY019", "22003"}, - {DESC_OPERATION_INVALID, "HY011", "S1011"}, - {DESC_PROGRAM_TYPE_OUT_OF_RANGE, "?????", "?????"}, - {DESC_BAD_ERROR, "08S01", "08S01"}, /* communication link failure */ - {DESC_INVALID_OPTION_IDENTIFIER, "HY092", "HY092"}, - {DESC_RETURN_NULL_WITHOUT_INDICATOR, "22002", "22002"}, - {DESC_INVALID_DESCRIPTOR_IDENTIFIER, "HY091", "HY091"}, - {DESC_OPTION_NOT_FOR_THE_DRIVER, "HYC00", "HYC00"}, - {DESC_FETCH_OUT_OF_RANGE, "HY106", "S1106"}, - {DESC_COUNT_FIELD_INCORRECT, "07002", "07002"}, -}; - -static OpenSearch_ErrorInfo *DC_create_errorinfo(const DescriptorClass *self) { - const DescriptorHeader *deschd = &(self->deschd); - OpenSearch_ErrorInfo *error; - ConnectionClass *conn; - EnvironmentClass *env; - Int4 errornum; - BOOL env_is_odbc3 = TRUE; - - if (deschd->opensearch_error) - return deschd->opensearch_error; - errornum = deschd->__error_number; - error = ER_Constructor(errornum, deschd->__error_message); - if (!error) - return error; - conn = DC_get_conn(self); - if (conn && (env = (EnvironmentClass *)conn->henv, env)) - env_is_odbc3 = EN_is_odbc3(env); - errornum -= LOWEST_DESC_ERROR; - if (errornum < 0 - || errornum >= (int)(sizeof(Descriptor_sqlstate) - / sizeof(Descriptor_sqlstate[0]))) - errornum = 1 - LOWEST_DESC_ERROR; - STRCPY_FIXED(error->sqlstate, env_is_odbc3 - ? Descriptor_sqlstate[errornum].ver3str - : Descriptor_sqlstate[errornum].ver2str); - return error; -} -void DC_log_error(const char *func, const char *desc, - const DescriptorClass *self) { -#define nullcheck(a) (a ? a : "(NULL)") - if (self) { - MYLOG(OPENSEARCH_DEBUG, - "DESCRIPTOR ERROR: func=%s, desc='%s', errnum=%d, errmsg='%s'\n", - func, desc, self->deschd.__error_number, - nullcheck(self->deschd.__error_message)); - } -} - -/* Returns the next SQL error information. */ -RETCODE SQL_API OPENSEARCHAPI_DescError(SQLHDESC hdesc, SQLSMALLINT RecNumber, - SQLCHAR *szSqlState, SQLINTEGER *pfNativeError, - SQLCHAR *szErrorMsg, SQLSMALLINT cbErrorMsgMax, - SQLSMALLINT *pcbErrorMsg, UWORD flag) { - /* CC: return an error of a hdesc */ - DescriptorClass *desc = (DescriptorClass *)hdesc; - DescriptorHeader *deschd = &(desc->deschd); - - MYLOG(OPENSEARCH_TRACE, "entering RecN=%hd\n", RecNumber); - deschd->opensearch_error = DC_create_errorinfo(desc); - return ER_ReturnError(deschd->opensearch_error, RecNumber, szSqlState, pfNativeError, - szErrorMsg, cbErrorMsgMax, pcbErrorMsg, flag); -} diff --git a/sql-odbc/src/sqlodbc/descriptor.h b/sql-odbc/src/sqlodbc/descriptor.h deleted file mode 100644 index 59831d8d97..0000000000 --- a/sql-odbc/src/sqlodbc/descriptor.h +++ /dev/null @@ -1,254 +0,0 @@ -#ifndef __DESCRIPTOR_H__ -#define __DESCRIPTOR_H__ - -#include "opensearch_odbc.h" - -#ifdef WIN32 -#pragma warning(push) -#pragma warning(disable : 4201) // nonstandard extension used: nameless - // struct/union warning -#endif // WIN32 - -typedef struct InheritanceClass { - UInt4 allocated; - UInt4 count; - OID cur_tableoid; - opensearchNAME cur_fullTable; - struct { - OID tableoid; - opensearchNAME fullTable; - } inf[1]; -} InheritanceClass; - -enum { - TI_UPDATABLE = 1L, - TI_HASOIDS_CHECKED = (1L << 1), - TI_HASOIDS = (1L << 2), - TI_COLATTRIBUTE = (1L << 3), - TI_HASSUBCLASS = (1L << 4) -}; -typedef struct { - OID table_oid; - COL_INFO *col_info; /* cached SQLColumns info for this table */ - opensearchNAME schema_name; - opensearchNAME table_name; - opensearchNAME table_alias; - opensearchNAME bestitem; - opensearchNAME bestqual; - UInt4 flags; - InheritanceClass *ih; -} TABLE_INFO; -#define TI_set_updatable(ti) (ti->flags |= TI_UPDATABLE) -#define TI_is_updatable(ti) (0 != (ti->flags & TI_UPDATABLE)) -#define TI_no_updatable(ti) (ti->flags &= (~TI_UPDATABLE)) -#define TI_set_hasoids_checked(ti) (ti->flags |= TI_HASOIDS_CHECKED) -#define TI_checked_hasoids(ti) (0 != (ti->flags & TI_HASOIDS)) -#define TI_set_hasoids(ti) (ti->flags |= TI_HASOIDS) -#define TI_has_oids(ti) (0 != (ti->flags & TI_HASOIDS)) -#define TI_set_has_no_oids(ti) (ti->flags &= (~TI_HASOIDS)) -#define TI_set_hassubclass(ti) (ti->flags |= TI_HASSUBCLASS) -#define TI_has_subclass(ti) (0 != (ti->flags & TI_HASSUBCLASS)) -#define TI_set_has_no_subclass(ti) (ti->flags &= (~TI_HASSUBCLASS)) -void TI_Destructor(TABLE_INFO **, int); -void TI_Destroy_IH(TABLE_INFO *ti); - -enum { - FIELD_INITIALIZED = 0, - FIELD_PARSING = 1L, - FIELD_TEMP_SET = (1L << 1), - FIELD_COL_ATTRIBUTE = (1L << 2), - FIELD_PARSED_OK = (1L << 3), - FIELD_PARSED_INCOMPLETE = (1L << 4) -}; -typedef struct { - char flag; - char updatable; - Int2 attnum; - opensearchNAME schema_name; - TABLE_INFO *ti; /* to resolve explicit table names */ - opensearchNAME column_name; - opensearchNAME column_alias; - char nullable; - char auto_increment; - char func; - char columnkey; - int column_size; /* precision in 2.x */ - int decimal_digits; /* scale in 2.x */ - int display_size; - SQLLEN length; - OID columntype; - OID basetype; /* may be the basetype when the column type is a domain */ - int typmod; - char expr; - char quote; - char dquote; - char numeric; - opensearchNAME before_dot; -} FIELD_INFO; -Int4 FI_precision(const FIELD_INFO *); -void FI_Destructor(FIELD_INFO **, int, BOOL freeFI); -#define FI_is_applicable(fi) \ - (NULL != fi && (fi->flag & (FIELD_PARSED_OK | FIELD_COL_ATTRIBUTE)) != 0) -#define FI_type(fi) (0 == (fi)->basetype ? (fi)->columntype : (fi)->basetype) - -typedef struct DescriptorHeader_ { - ConnectionClass *conn_conn; - char embedded; - char type_defined; - UInt4 desc_type; - UInt4 error_row; /* 1-based row */ - UInt4 error_index; /* 1-based index */ - Int4 __error_number; - char *__error_message; - OpenSearch_ErrorInfo *opensearch_error; -} DescriptorHeader; - -/* - * ARD and APD are(must be) of the same format - */ -struct ARDFields_ { - SQLLEN size_of_rowset; /* for ODBC3 fetch operation */ - SQLUINTEGER bind_size; /* size of each structure if using - * Row-wise Binding */ - SQLUSMALLINT *row_operation_ptr; - SQLULEN *row_offset_ptr; - BindInfoClass *bookmark; - BindInfoClass *bindings; - SQLSMALLINT allocated; - SQLLEN size_of_rowset_odbc2; /* for SQLExtendedFetch */ -}; - -/* - * APD must be of the same format as ARD - */ -struct APDFields_ { - SQLLEN paramset_size; /* really an SQLINTEGER type */ - SQLUINTEGER param_bind_type; /* size of each structure if using - * Row-wise Parameter Binding */ - SQLUSMALLINT *param_operation_ptr; - SQLULEN *param_offset_ptr; - ParameterInfoClass *bookmark; /* dummy item to fit APD to ARD */ - ParameterInfoClass *parameters; - SQLSMALLINT allocated; - SQLLEN paramset_size_dummy; /* dummy item to fit APD to ARD */ -}; - -struct IRDFields_ { - StatementClass *stmt; - SQLULEN *rowsFetched; - SQLUSMALLINT *rowStatusArray; - UInt4 nfields; - SQLSMALLINT allocated; - FIELD_INFO **fi; -}; - -struct IPDFields_ { - SQLULEN *param_processed_ptr; - SQLUSMALLINT *param_status_ptr; - SQLSMALLINT allocated; - ParameterImplClass *parameters; -}; - -/*** -typedef struct -{ - DescriptorHeader deschd; - ARDFields ardopts; -} ARDClass; -typedef struct -{ - DescriptorHeader deschd; - APDFields apdopts; -} APDClass; -typedef struct -{ - DescriptorHeader deschd; - IRDFields irdopts; -} IRDClass; -typedef struct -{ - DescriptorHeader deschd; - IPDFields ipdopts; -} IPDClass; -***/ -typedef struct { - DescriptorHeader deschd; - union { - ARDFields ardf; - APDFields apdf; - IRDFields irdf; - IPDFields ipdf; - }; -} DescriptorClass; - -#define DC_get_conn(a) ((a)->deschd.conn_conn) -#define DC_get_desc_type(a) ((a)->deschd.desc_type) -#define DC_get_embedded(a) ((a)->deschd.embedded) - -void InitializeEmbeddedDescriptor(DescriptorClass *, StatementClass *stmt, - UInt4 desc_type); -void DC_Destructor(DescriptorClass *desc); -void InitializeARDFields(ARDFields *self); -void InitializeAPDFields(APDFields *self); -/* void InitializeIRDFields(IRDFields *self); -void InitializeIPDFiedls(IPDFields *self); */ -BindInfoClass *ARD_AllocBookmark(ARDFields *self); -void ARD_unbind_cols(ARDFields *self, BOOL freeall); -void APD_free_params(APDFields *self, char option); -void IPD_free_params(IPDFields *self, char option); -RETCODE DC_set_stmt(DescriptorClass *desc, StatementClass *stmt); -void DC_set_error(DescriptorClass *desc, int errornumber, const char *errormsg); -void DC_set_errormsg(DescriptorClass *desc, const char *errormsg); -OpenSearch_ErrorInfo *DC_get_error(DescriptorClass *self); -int DC_get_errornumber(const DescriptorClass *self); -const char *DC_get_errormsg(const DescriptorClass *self); -void DC_log_error(const char *func, const char *desc, - const DescriptorClass *self); - -/* Error numbers about descriptor handle */ -enum { - LOWEST_DESC_ERROR = -2 - /* minus means warning/notice message */ - , - DESC_ERROR_IN_ROW = -2, - DESC_OPTION_VALUE_CHANGED = -1, - DESC_OK = 0, - DESC_EXEC_ERROR, - DESC_STATUS_ERROR, - DESC_SEQUENCE_ERROR, - DESC_NO_MEMORY_ERROR, - DESC_COLNUM_ERROR, - DESC_NO_STMTSTRING, - DESC_ERROR_TAKEN_FROM_BACKEND, - DESC_INTERNAL_ERROR, - DESC_STILL_EXECUTING, - DESC_NOT_IMPLEMENTED_ERROR, - DESC_BAD_PARAMETER_NUMBER_ERROR, - DESC_OPTION_OUT_OF_RANGE_ERROR, - DESC_INVALID_COLUMN_NUMBER_ERROR, - DESC_RESTRICTED_DATA_TYPE_ERROR, - DESC_INVALID_CURSOR_STATE_ERROR, - DESC_CREATE_TABLE_ERROR, - DESC_NO_CURSOR_NAME, - DESC_INVALID_CURSOR_NAME, - DESC_INVALID_ARGUMENT_NO, - DESC_ROW_OUT_OF_RANGE, - DESC_OPERATION_CANCELLED, - DESC_INVALID_CURSOR_POSITION, - DESC_VALUE_OUT_OF_RANGE, - DESC_OPERATION_INVALID, - DESC_PROGRAM_TYPE_OUT_OF_RANGE, - DESC_BAD_ERROR, - DESC_INVALID_OPTION_IDENTIFIER, - DESC_RETURN_NULL_WITHOUT_INDICATOR, - DESC_INVALID_DESCRIPTOR_IDENTIFIER, - DESC_OPTION_NOT_FOR_THE_DRIVER, - DESC_FETCH_OUT_OF_RANGE, - DESC_COUNT_FIELD_INCORRECT -}; - -#ifdef WIN32 -#pragma warning(pop) -#endif // WIN32 - -#endif /* __DESCRIPTOR_H__ */ diff --git a/sql-odbc/src/sqlodbc/dlg_specific.c b/sql-odbc/src/sqlodbc/dlg_specific.c deleted file mode 100644 index f210bf0257..0000000000 --- a/sql-odbc/src/sqlodbc/dlg_specific.c +++ /dev/null @@ -1,506 +0,0 @@ -#include "dlg_specific.h" - -#include - -#include "misc.h" -#include "opensearch_apifunc.h" - -#define NULL_IF_NULL(a) ((a) ? ((const char *)(a)) : "(null)") - -static void encode(const opensearchNAME, char *out, int outlen); -static opensearchNAME decode(const char *in); -static opensearchNAME decode_or_remove_braces(const char *in); - -#define OVR_EXTRA_BITS \ - (BIT_FORCEABBREVCONNSTR | BIT_FAKE_MSS | BIT_BDE_ENVIRONMENT \ - | BIT_CVT_NULL_DATE | BIT_ACCESSIBLE_ONLY | BIT_IGNORE_ROUND_TRIP_TIME \ - | BIT_DISABLE_KEEPALIVE) - -#define OPENING_BRACKET '{' -#define CLOSING_BRACKET '}' - -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wembedded-directive" -#endif // __APPLE__ -void makeConnectString(char *connect_string, const ConnInfo *ci, UWORD len) { - UNUSED(len); - char got_dsn = (ci->dsn[0] != '\0'); - char encoded_item[LARGE_REGISTRY_LEN]; - char *connsetStr = NULL; - char *esoptStr = NULL; -#ifdef _HANDLE_ENLIST_IN_DTC_ - char xaOptStr[16]; -#endif - ssize_t hlen, nlen, olen; - - encode(ci->password, encoded_item, sizeof(encoded_item)); - /* fundamental info */ - nlen = MAX_CONNECT_STRING; - olen = snprintf( - connect_string, nlen, - "%s=%s;" INI_SERVER - "=%s;" - "database=OpenSearch;" INI_PORT "=%s;" INI_USERNAME_ABBR - "=%s;" INI_PASSWORD_ABBR "=%s;" INI_AUTH_MODE "=%s;" INI_REGION - "=%s;" INI_SSL_USE "=%d;" INI_SSL_HOST_VERIFY "=%d;" INI_LOG_LEVEL - "=%d;" INI_LOG_OUTPUT "=%s;" INI_TIMEOUT "=%s;" INI_FETCH_SIZE "=%s;", - got_dsn ? "DSN" : "DRIVER", got_dsn ? ci->dsn : ci->drivername, - ci->server, ci->port, ci->username, encoded_item, ci->authtype, - ci->region, (int)ci->use_ssl, (int)ci->verify_server, - (int)ci->drivers.loglevel, ci->drivers.output_dir, - ci->response_timeout, ci->fetch_size); - if (olen < 0 || olen >= nlen) { - connect_string[0] = '\0'; - return; - } - - /* extra info */ - hlen = strlen(connect_string); - nlen = MAX_CONNECT_STRING - hlen; - if (olen < 0 || olen >= nlen) /* failed */ - connect_string[0] = '\0'; - - if (NULL != connsetStr) - free(connsetStr); - if (NULL != esoptStr) - free(esoptStr); -} -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ - -BOOL get_DSN_or_Driver(ConnInfo *ci, const char *attribute, const char *value) { - BOOL found = TRUE; - - if (stricmp(attribute, "DSN") == 0) - STRCPY_FIXED(ci->dsn, value); - else if (stricmp(attribute, "driver") == 0) - STRCPY_FIXED(ci->drivername, value); - else - found = FALSE; - - return found; -} - -BOOL copyConnAttributes(ConnInfo *ci, const char *attribute, - const char *value) { - BOOL found = TRUE, printed = FALSE; - if (stricmp(attribute, "DSN") == 0) - STRCPY_FIXED(ci->dsn, value); - else if (stricmp(attribute, "driver") == 0) - STRCPY_FIXED(ci->drivername, value); - else if ((stricmp(attribute, INI_HOST) == 0) - || (stricmp(attribute, INI_SERVER) == 0)) - STRCPY_FIXED(ci->server, value); - else if (stricmp(attribute, INI_PORT) == 0) - STRCPY_FIXED(ci->port, value); - else if ((stricmp(attribute, INI_USERNAME) == 0) - || (stricmp(attribute, INI_USERNAME_ABBR) == 0)) - STRCPY_FIXED(ci->username, value); - else if ((stricmp(attribute, INI_PASSWORD) == 0) - || (stricmp(attribute, INI_PASSWORD_ABBR) == 0)) { - ci->password = decode_or_remove_braces(value); -#ifndef FORCE_PASSWORDE_DISPLAY - MYLOG(OPENSEARCH_DEBUG, "key='%s' value='xxxxxxxx'\n", attribute); - printed = TRUE; -#endif - } else if (stricmp(attribute, INI_AUTH_MODE) == 0) - STRCPY_FIXED(ci->authtype, value); - else if (stricmp(attribute, INI_REGION) == 0) - STRCPY_FIXED(ci->region, value); - else if (stricmp(attribute, INI_SSL_USE) == 0) - ci->use_ssl = (char)atoi(value); - else if (stricmp(attribute, INI_SSL_HOST_VERIFY) == 0) - ci->verify_server = (char)atoi(value); - else if (stricmp(attribute, INI_LOG_LEVEL) == 0) - ci->drivers.loglevel = (char)atoi(value); - else if (stricmp(attribute, INI_LOG_OUTPUT) == 0) - STRCPY_FIXED(ci->drivers.output_dir, value); - else if (stricmp(attribute, INI_TIMEOUT) == 0) - STRCPY_FIXED(ci->response_timeout, value); - else if (stricmp(attribute, INI_FETCH_SIZE) == 0) - STRCPY_FIXED(ci->fetch_size, value); - else - found = FALSE; - - if (!printed) - MYLOG(OPENSEARCH_DEBUG, "key='%s' value='%s'%s\n", attribute, value, - found ? NULL_STRING : " not found"); - - return found; -} - -static void getCiDefaults(ConnInfo *ci) { - strncpy(ci->desc, DEFAULT_DESC, MEDIUM_REGISTRY_LEN); - strncpy(ci->drivername, DEFAULT_DRIVERNAME, MEDIUM_REGISTRY_LEN); - strncpy(ci->server, DEFAULT_HOST, MEDIUM_REGISTRY_LEN); - strncpy(ci->port, DEFAULT_PORT, SMALL_REGISTRY_LEN); - strncpy(ci->response_timeout, DEFAULT_RESPONSE_TIMEOUT_STR, - SMALL_REGISTRY_LEN); - strncpy(ci->fetch_size, DEFAULT_FETCH_SIZE_STR, - SMALL_REGISTRY_LEN); - strncpy(ci->authtype, DEFAULT_AUTHTYPE, MEDIUM_REGISTRY_LEN); - if (ci->password.name != NULL) - free(ci->password.name); - ci->password.name = NULL; - strncpy(ci->username, DEFAULT_USERNAME, MEDIUM_REGISTRY_LEN); - strncpy(ci->region, DEFAULT_REGION, MEDIUM_REGISTRY_LEN); - ci->use_ssl = DEFAULT_USE_SSL; - ci->verify_server = DEFAULT_VERIFY_SERVER; - strcpy(ci->drivers.output_dir, "C:\\"); -} - -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wembedded-directive" -#endif // __APPLE__ -int getDriverNameFromDSN(const char *dsn, char *driver_name, int namelen) { -#ifdef WIN32 - return SQLGetPrivateProfileString(ODBC_DATASOURCES, dsn, NULL_STRING, - driver_name, namelen, ODBC_INI); -#else /* WIN32 */ - int cnt; - - cnt = SQLGetPrivateProfileString(dsn, "Driver", NULL_STRING, driver_name, - namelen, ODBC_INI); - if (!driver_name[0]) - return cnt; - if (strchr(driver_name, '/') || /* path to the driver */ - strchr(driver_name, '.')) { - driver_name[0] = '\0'; - return 0; - } - return cnt; -#endif /* WIN32 */ -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ -} - -void getDriversDefaults(const char *drivername, GLOBAL_VALUES *comval) { - if (NULL != drivername) - STR_TO_NAME(comval->drivername, drivername); -} - -void getDSNinfo(ConnInfo *ci, const char *configDrvrname) { - char *DSN = ci->dsn; - char temp[LARGE_REGISTRY_LEN]; - const char *drivername; - getCiDefaults(ci); - drivername = ci->drivername; - if (DSN[0] == '\0') { - if (drivername[0] == '\0') /* adding new DSN via configDSN */ - { - if (configDrvrname) - drivername = configDrvrname; - strncpy_null(DSN, INI_DSN, sizeof(ci->dsn)); - } - /* else dns-less connections */ - } - - /* brute-force chop off trailing blanks... */ - while (*(DSN + strlen(DSN) - 1) == ' ') - *(DSN + strlen(DSN) - 1) = '\0'; - - if (!drivername[0] && DSN[0]) - getDriverNameFromDSN(DSN, (char *)drivername, sizeof(ci->drivername)); - MYLOG(OPENSEARCH_DEBUG, "drivername=%s\n", drivername); - if (!drivername[0]) - drivername = INVALID_DRIVER; - getDriversDefaults(drivername, &(ci->drivers)); - - if (DSN[0] == '\0') - return; - - /* Proceed with getting info for the given DSN. */ - if (SQLGetPrivateProfileString(DSN, INI_SERVER, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - STRCPY_FIXED(ci->server, temp); - if (SQLGetPrivateProfileString(DSN, INI_HOST, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - STRCPY_FIXED(ci->server, temp); - if (SQLGetPrivateProfileString(DSN, INI_PORT, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - STRCPY_FIXED(ci->port, temp); - if (SQLGetPrivateProfileString(DSN, INI_USERNAME, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - STRCPY_FIXED(ci->username, temp); - if (SQLGetPrivateProfileString(DSN, INI_USERNAME_ABBR, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - STRCPY_FIXED(ci->username, temp); - if (SQLGetPrivateProfileString(DSN, INI_PASSWORD, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - ci->password = decode(temp); - if (SQLGetPrivateProfileString(DSN, INI_PASSWORD_ABBR, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - ci->password = decode(temp); - if (SQLGetPrivateProfileString(DSN, INI_AUTH_MODE, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - STRCPY_FIXED(ci->authtype, temp); - if (SQLGetPrivateProfileString(DSN, INI_REGION, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - STRCPY_FIXED(ci->region, temp); - if (SQLGetPrivateProfileString(DSN, INI_SSL_USE, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - ci->use_ssl = (char)atoi(temp); - if (SQLGetPrivateProfileString(DSN, INI_SSL_HOST_VERIFY, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - ci->verify_server = (char)atoi(temp); - if (SQLGetPrivateProfileString(DSN, INI_LOG_LEVEL, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - ci->drivers.loglevel = (char)atoi(temp); - if (SQLGetPrivateProfileString(DSN, INI_LOG_OUTPUT, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - STRCPY_FIXED(ci->drivers.output_dir, temp); - if (SQLGetPrivateProfileString(DSN, INI_TIMEOUT, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - STRCPY_FIXED(ci->response_timeout, temp); - if (SQLGetPrivateProfileString(DSN, INI_FETCH_SIZE, NULL_STRING, temp, - sizeof(temp), ODBC_INI) - > 0) - STRCPY_FIXED(ci->fetch_size, temp); - STR_TO_NAME(ci->drivers.drivername, drivername); -} -/* - * This function writes any global parameters (that can be manipulated) - * to the ODBCINST.INI portion of the registry - */ -int write_Ci_Drivers(const char *fileName, const char *sectionName, - const GLOBAL_VALUES *comval) { - UNUSED(comval, fileName, sectionName); - - // We don't need anything here - return 0; -} - -int writeDriversDefaults(const char *drivername, const GLOBAL_VALUES *comval) { - return write_Ci_Drivers(ODBCINST_INI, drivername, comval); -} - -/* This is for datasource based options only */ -void writeDSNinfo(const ConnInfo *ci) { - const char *DSN = ci->dsn; - char encoded_item[MEDIUM_REGISTRY_LEN], temp[SMALL_REGISTRY_LEN]; - - SQLWritePrivateProfileString(DSN, INI_HOST, ci->server, ODBC_INI); - SQLWritePrivateProfileString(DSN, INI_PORT, ci->port, ODBC_INI); - SQLWritePrivateProfileString(DSN, INI_USERNAME, ci->username, ODBC_INI); - encode(ci->password, encoded_item, sizeof(encoded_item)); - SQLWritePrivateProfileString(DSN, INI_PASSWORD, encoded_item, ODBC_INI); - SQLWritePrivateProfileString(DSN, INI_AUTH_MODE, ci->authtype, ODBC_INI); - SQLWritePrivateProfileString(DSN, INI_REGION, ci->region, ODBC_INI); - ITOA_FIXED(temp, ci->use_ssl); - SQLWritePrivateProfileString(DSN, INI_SSL_USE, temp, ODBC_INI); - ITOA_FIXED(temp, ci->verify_server); - SQLWritePrivateProfileString(DSN, INI_SSL_HOST_VERIFY, temp, ODBC_INI); - ITOA_FIXED(temp, ci->drivers.loglevel); - SQLWritePrivateProfileString(DSN, INI_LOG_LEVEL, temp, ODBC_INI); - SQLWritePrivateProfileString(DSN, INI_LOG_OUTPUT, ci->drivers.output_dir, - ODBC_INI); - SQLWritePrivateProfileString(DSN, INI_TIMEOUT, ci->response_timeout, - ODBC_INI); - SQLWritePrivateProfileString(DSN, INI_FETCH_SIZE, ci->fetch_size, - ODBC_INI); - -} - -static void encode(const opensearchNAME in, char *out, int outlen) { - size_t i, ilen = 0; - int o = 0; - char inc, *ins; - - if (NAME_IS_NULL(in)) { - out[0] = '\0'; - return; - } - ins = GET_NAME(in); - ilen = strlen(ins); - for (i = 0; i < ilen && o < outlen - 1; i++) { - inc = ins[i]; - if (inc == '+') { - if (o + 2 >= outlen) - break; - snprintf(&out[o], outlen - o, "%%2B"); - o += 3; - } else if (isspace((unsigned char)inc)) - out[o++] = '+'; - else if (!isalnum((unsigned char)inc)) { - if (o + 2 >= outlen) - break; - snprintf(&out[o], outlen - o, "%%%02x", inc); - o += 3; - } else - out[o++] = inc; - } - out[o++] = '\0'; -} - -static unsigned int conv_from_hex(const char *s) { - int i, y = 0, val; - - for (i = 1; i <= 2; i++) { - if (s[i] >= 'a' && s[i] <= 'f') - val = s[i] - 'a' + 10; - else if (s[i] >= 'A' && s[i] <= 'F') - val = s[i] - 'A' + 10; - else - val = s[i] - '0'; - - y += val << (4 * (2 - i)); - } - - return y; -} - -static opensearchNAME decode(const char *in) { - size_t i, ilen = strlen(in), o = 0; - char inc, *outs; - opensearchNAME out; - - INIT_NAME(out); - if (0 == ilen) { - return out; - } - outs = (char *)malloc(ilen + 1); - if (!outs) - return out; - for (i = 0; i < ilen; i++) { - inc = in[i]; - if (inc == '+') - outs[o++] = ' '; - else if (inc == '%') { - snprintf(&outs[o], ilen + 1 - o, "%c", conv_from_hex(&in[i])); - o++; - i += 2; - } else - outs[o++] = inc; - } - outs[o++] = '\0'; - STR_TO_NAME(out, outs); - free(outs); - return out; -} - -/* - * Remove braces if the input value is enclosed by braces({}). - * Othewise decode the input value. - */ -static opensearchNAME decode_or_remove_braces(const char *in) { - if (OPENING_BRACKET == in[0]) { - size_t inlen = strlen(in); - if (CLOSING_BRACKET == in[inlen - 1]) /* enclosed with braces */ - { - int i; - const char *istr, *eptr; - char *ostr; - opensearchNAME out; - - INIT_NAME(out); - if (NULL == (ostr = (char *)malloc(inlen))) - return out; - eptr = in + inlen - 1; - for (istr = in + 1, i = 0; *istr && istr < eptr; i++) { - if (CLOSING_BRACKET == istr[0] && CLOSING_BRACKET == istr[1]) - istr++; - ostr[i] = *(istr++); - } - ostr[i] = '\0'; - SET_NAME_DIRECTLY(out, ostr); - return out; - } - } - return decode(in); -} - -void CC_conninfo_release(ConnInfo *conninfo) { - NULL_THE_NAME(conninfo->password); - finalize_globals(&conninfo->drivers); -} - -void CC_conninfo_init(ConnInfo *conninfo, UInt4 option) { - MYLOG(OPENSEARCH_TRACE, "entering opt=%d\n", option); - - if (0 != (CLEANUP_FOR_REUSE & option)) - CC_conninfo_release(conninfo); - memset(conninfo, 0, sizeof(ConnInfo)); - - strncpy(conninfo->dsn, DEFAULT_DSN, MEDIUM_REGISTRY_LEN); - strncpy(conninfo->desc, DEFAULT_DESC, MEDIUM_REGISTRY_LEN); - strncpy(conninfo->drivername, DEFAULT_DRIVERNAME, MEDIUM_REGISTRY_LEN); - strncpy(conninfo->server, DEFAULT_HOST, MEDIUM_REGISTRY_LEN); - strncpy(conninfo->port, DEFAULT_PORT, SMALL_REGISTRY_LEN); - strncpy(conninfo->response_timeout, DEFAULT_RESPONSE_TIMEOUT_STR, - SMALL_REGISTRY_LEN); - strncpy(conninfo->fetch_size, DEFAULT_FETCH_SIZE_STR, - SMALL_REGISTRY_LEN); - strncpy(conninfo->authtype, DEFAULT_AUTHTYPE, MEDIUM_REGISTRY_LEN); - if (conninfo->password.name != NULL) - free(conninfo->password.name); - conninfo->password.name = NULL; - strncpy(conninfo->username, DEFAULT_USERNAME, MEDIUM_REGISTRY_LEN); - strncpy(conninfo->region, DEFAULT_REGION, MEDIUM_REGISTRY_LEN); - conninfo->use_ssl = DEFAULT_USE_SSL; - conninfo->verify_server = DEFAULT_VERIFY_SERVER; - - if (0 != (INIT_GLOBALS & option)) - init_globals(&(conninfo->drivers)); -} - -void init_globals(GLOBAL_VALUES *glbv) { - memset(glbv, 0, sizeof(*glbv)); - glbv->loglevel = DEFAULT_LOGLEVEL; - glbv->output_dir[0] = '\0'; -} - -#define CORR_STRCPY(item) strncpy_null(to->item, from->item, sizeof(to->item)) -#define CORR_VALCPY(item) (to->item = from->item) - -void copy_globals(GLOBAL_VALUES *to, const GLOBAL_VALUES *from) { - memset(to, 0, sizeof(*to)); - NAME_TO_NAME(to->drivername, from->drivername); - CORR_VALCPY(loglevel); -} - -void finalize_globals(GLOBAL_VALUES *glbv) { - NULL_THE_NAME(glbv->drivername); -} - -#undef CORR_STRCPY -#undef CORR_VALCPY -#define CORR_STRCPY(item) strncpy_null(ci->item, sci->item, sizeof(ci->item)) -#define CORR_VALCPY(item) (ci->item = sci->item) - -void CC_copy_conninfo(ConnInfo *ci, const ConnInfo *sci) { - memset(ci, 0, sizeof(ConnInfo)); - CORR_STRCPY(dsn); - CORR_STRCPY(desc); - CORR_STRCPY(drivername); - CORR_STRCPY(server); - CORR_STRCPY(username); - CORR_STRCPY(authtype); - CORR_STRCPY(region); - NAME_TO_NAME(ci->password, sci->password); - CORR_VALCPY(use_ssl); - CORR_VALCPY(verify_server); - CORR_STRCPY(port); - CORR_STRCPY(response_timeout); - CORR_STRCPY(fetch_size); - copy_globals(&(ci->drivers), &(sci->drivers)); -} -#undef CORR_STRCPY -#undef CORR_VALCPY diff --git a/sql-odbc/src/sqlodbc/dlg_specific.h b/sql-odbc/src/sqlodbc/dlg_specific.h deleted file mode 100644 index 9b67d84cd5..0000000000 --- a/sql-odbc/src/sqlodbc/dlg_specific.h +++ /dev/null @@ -1,198 +0,0 @@ -#ifndef __DLG_SPECIFIC_H__ -#define __DLG_SPECIFIC_H__ - -#include "opensearch_odbc.h" - -#ifdef WIN32 -#include - -#include "resource.h" -#endif - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ -/* Unknown data type sizes */ -#define UNKNOWNS_AS_MAX 0 -#define UNKNOWNS_AS_DONTKNOW 1 -#define UNKNOWNS_AS_LONGEST 2 - -/* ODBC initialization files */ -#ifndef WIN32 -#define ODBC_INI ".odbc.ini" -#define ODBCINST_INI "odbcinst.ini" -#else -#define ODBC_INI "ODBC.INI" -#define ODBCINST_INI "ODBCINST.INI" -#endif - -#define ODBC_DATASOURCES "ODBC Data Sources" -#define INVALID_DRIVER " @@driver not exist@@ " - -#ifdef UNICODE_SUPPORT -#define INI_DSN "OpenSearch35W" -#else -#define INI_DSN "OpenSearch30" -#endif /* UNICODE_SUPPORT */ - -#define INI_HOST "host" -#define INI_SERVER "server" -#define INI_PORT "port" -#define INI_USERNAME "user" -#define INI_USERNAME_ABBR "UID" -#define INI_PASSWORD "password" -#define INI_PASSWORD_ABBR "PWD" -#define INI_AUTH_MODE "auth" -#define INI_REGION "region" -#define INI_SSL_USE "useSSL" -#define INI_SSL_HOST_VERIFY "hostnameVerification" -#define INI_LOG_LEVEL "logLevel" -#define INI_LOG_OUTPUT "logOutput" -#define INI_TIMEOUT "responseTimeout" -#define INI_FETCH_SIZE "fetchSize" - -#define DEFAULT_FETCH_SIZE -1 -#define DEFAULT_FETCH_SIZE_STR "-1" -#define DEFAULT_RESPONSE_TIMEOUT 10 // Seconds -#define DEFAULT_RESPONSE_TIMEOUT_STR "10" -#define DEFAULT_AUTHTYPE "NONE" -#define DEFAULT_HOST "" -#define DEFAULT_PORT "" -#define DEFAULT_USERNAME "" -#define DEFAULT_PASSWORD "" -#define DEFAULT_DRIVERNAME "opensearchodbc" -#define DEFAULT_DESC "" -#define DEFAULT_DSN "" -#define DEFAULT_REGION "" -#define DEFAULT_VERIFY_SERVER 1 - -#define AUTHTYPE_NONE "NONE" -#define AUTHTYPE_BASIC "BASIC" -#define AUTHTYPE_IAM "AWS_SIGV4" - -#ifdef _HANDLE_ENLIST_IN_DTC_ -#define INI_XAOPT "XaOpt" -#endif /* _HANDLE_ENLIST_IN_DTC_ */ -/* Bit representation for abbreviated connection strings */ -#define BIT_LFCONVERSION (1L) -#define BIT_UPDATABLECURSORS (1L << 1) -/* #define BIT_DISALLOWPREMATURE (1L<<2) */ -#define BIT_UNIQUEINDEX (1L << 3) -#define BIT_UNKNOWN_DONTKNOW (1L << 6) -#define BIT_UNKNOWN_ASMAX (1L << 7) -#define BIT_COMMLOG (1L << 10) -#define BIT_DEBUG (1L << 11) -#define BIT_PARSE (1L << 12) -#define BIT_CANCELASFREESTMT (1L << 13) -#define BIT_USEDECLAREFETCH (1L << 14) -#define BIT_READONLY (1L << 15) -#define BIT_TEXTASLONGVARCHAR (1L << 16) -#define BIT_UNKNOWNSASLONGVARCHAR (1L << 17) -#define BIT_BOOLSASCHAR (1L << 18) -#define BIT_ROWVERSIONING (1L << 19) -#define BIT_SHOWSYSTEMTABLES (1L << 20) -#define BIT_SHOWOIDCOLUMN (1L << 21) -#define BIT_FAKEOIDINDEX (1L << 22) -#define BIT_TRUEISMINUS1 (1L << 23) -#define BIT_BYTEAASLONGVARBINARY (1L << 24) -#define BIT_USESERVERSIDEPREPARE (1L << 25) -#define BIT_LOWERCASEIDENTIFIER (1L << 26) - -#define EFFECTIVE_BIT_COUNT 28 - -/* Mask for extra options */ -#define BIT_FORCEABBREVCONNSTR 1L -#define BIT_FAKE_MSS (1L << 1) -#define BIT_BDE_ENVIRONMENT (1L << 2) -#define BIT_CVT_NULL_DATE (1L << 3) -#define BIT_ACCESSIBLE_ONLY (1L << 4) -#define BIT_IGNORE_ROUND_TRIP_TIME (1L << 5) -#define BIT_DISABLE_KEEPALIVE (1L << 6) - -/* Connection Defaults */ -#define DEFAULT_READONLY 1 -#define DEFAULT_PROTOCOL \ - "7.4" /* the latest protocol is \ \ - * the default */ -#define DEFAULT_USEDECLAREFETCH 0 -#define DEFAULT_TEXTASLONGVARCHAR 0 -#define DEFAULT_UNKNOWNSASLONGVARCHAR 0 -#define DEFAULT_BOOLSASCHAR 0 -#define DEFAULT_UNIQUEINDEX 1 /* dont recognize */ -#define DEFAULT_LOGLEVEL OPENSEARCH_WARNING -#define DEFAULT_USE_SSL 0 -#define DEFAULT_TRUST_SELF_SIGNED 0 -#define DEFAULT_AUTH_MODE "NONE" -#define DEFAULT_REGION "" -#define DEFAULT_CERTIFICATE "" -#define DEFAULT_KEY "" -#define DEFAULT_UNKNOWNSIZES UNKNOWNS_AS_MAX - -#define DEFAULT_FAKEOIDINDEX 0 -#define DEFAULT_SHOWOIDCOLUMN 0 -#define DEFAULT_ROWVERSIONING 0 -#define DEFAULT_SHOWSYSTEMTABLES 0 /* dont show system tables */ -#define DEFAULT_LIE 0 -#define DEFAULT_PARSE 0 - -#define DEFAULT_CANCELASFREESTMT 0 - -#define DEFAULT_EXTRASYSTABLEPREFIXES "" - -#define DEFAULT_TRUEISMINUS1 0 -#define DEFAULT_UPDATABLECURSORS 1 -#ifdef WIN32 -#define DEFAULT_LFCONVERSION 1 -#else -#define DEFAULT_LFCONVERSION 0 -#endif /* WIN32 */ -#define DEFAULT_INT8AS 0 -#define DEFAULT_BYTEAASLONGVARBINARY 0 -#define DEFAULT_USESERVERSIDEPREPARE 1 -#define DEFAULT_LOWERCASEIDENTIFIER 0 -#define DEFAULT_NUMERIC_AS (-101) - -#ifdef _HANDLE_ENLIST_IN_DTC_ -#define DEFAULT_XAOPT 1 -#endif /* _HANDLE_ENLIST_IN_DTC_ */ - -/* for CC_DSN_info */ -#define CONN_DONT_OVERWRITE 0 -#define CONN_OVERWRITE 1 - -struct authmode { - int authtype_id; - const char *authtype_str; -}; -const struct authmode *GetAuthModes(); - -/* prototypes */ - -#ifdef WIN32 -void SetDlgStuff(HWND hdlg, const ConnInfo *ci); -void GetDlgStuff(HWND hdlg, ConnInfo *ci); -INT_PTR CALLBACK advancedOptionsProc(HWND hdlg, UINT wMsg, WPARAM wParam, - LPARAM lParam); -INT_PTR CALLBACK logOptionsProc(HWND hdlg, UINT wMsg, WPARAM wParam, LPARAM lParam); -#endif /* WIN32 */ - -int write_Ci_Drivers(const char *fileName, const char *sectionName, - const GLOBAL_VALUES *); -int writeDriversDefaults(const char *drivername, const GLOBAL_VALUES *); -void writeDSNinfo(const ConnInfo *ci); -void getDriversDefaults(const char *drivername, GLOBAL_VALUES *); -void getDSNinfo(ConnInfo *ci, const char *configDrvrname); -void makeConnectString(char *connect_string, const ConnInfo *ci, UWORD); -BOOL get_DSN_or_Driver(ConnInfo *ci, const char *attribute, const char *value); -BOOL copyConnAttributes(ConnInfo *ci, const char *attribute, const char *value); -int getDriverNameFromDSN(const char *dsn, char *driver_name, int namelen); -UInt4 getExtraOptions(const ConnInfo *); -void SetAuthenticationVisibility(HWND hdlg, const struct authmode *am); -const struct authmode *GetCurrentAuthMode(HWND hdlg); -int *GetLogLevels(); -int GetCurrentLogLevel(HWND hdlg); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ -#endif /* __DLG_SPECIFIC_H__ */ diff --git a/sql-odbc/src/sqlodbc/dlg_wingui.c b/sql-odbc/src/sqlodbc/dlg_wingui.c deleted file mode 100644 index 376a1438aa..0000000000 --- a/sql-odbc/src/sqlodbc/dlg_wingui.c +++ /dev/null @@ -1,278 +0,0 @@ -#ifdef WIN32 - -#include "dlg_specific.h" -#include "opensearch_apifunc.h" -#include "loadlib.h" -#include "misc.h" // strncpy_null -#include "win_setup.h" -#ifdef _HANDLE_ENLIST_IN_DTC_ -#include "connexp.h" -#include "xalibname.h" -#endif /* _HANDLE_ENLIST_IN_DTC_ */ - -#define HTTP_PREFIX "http://" -#define HTTPS_PREFIX "https://" - -#define AUTHMODE_CNT 3 -#define LOGLEVEL_CNT 8 -extern HINSTANCE s_hModule; - -int loglevels[LOGLEVEL_CNT] = { - {IDS_LOGTYPE_OFF}, - {IDS_LOGTYPE_FATAL}, - {IDS_LOGTYPE_ERROR}, - {IDS_LOGTYPE_WARNING}, - {IDS_LOGTYPE_INFO}, - {IDS_LOGTYPE_DEBUG}, - {IDS_LOGTYPE_TRACE}, - {IDS_LOGTYPE_ALL}}; - -static const struct authmode authmodes[AUTHMODE_CNT] = { - {IDS_AUTHTYPE_NONE, AUTHTYPE_IAM}, - {IDS_AUTHTYPE_BASIC, AUTHTYPE_BASIC}, - {IDS_AUTHTYPE_IAM, AUTHTYPE_NONE}}; - -const struct authmode *GetCurrentAuthMode(HWND hdlg) { - unsigned int ams_cnt = 0; - const struct authmode *ams = GetAuthModes(&ams_cnt); - unsigned int authtype_selection_idx = (unsigned int)(DWORD)SendMessage( - GetDlgItem(hdlg, IDC_AUTHTYPE), CB_GETCURSEL, 0L, 0L); - if (authtype_selection_idx >= ams_cnt) - authtype_selection_idx = 0; - return &ams[authtype_selection_idx]; -} - -int *GetLogLevels(unsigned int *count) { - *count = LOGLEVEL_CNT; - return loglevels; -} - -int GetCurrentLogLevel(HWND hdlg) { - unsigned int log_cnt = 0; - int *log = GetLogLevels(&log_cnt); - unsigned int loglevel_selection_idx = (unsigned int)(DWORD)SendMessage( - GetDlgItem(hdlg, IDC_LOG_LEVEL), CB_GETCURSEL, 0L, 0L); - if (loglevel_selection_idx >= log_cnt) - loglevel_selection_idx = 0; - return log[loglevel_selection_idx]; -} - - -void SetAuthenticationVisibility(HWND hdlg, const struct authmode *am) { - if (strcmp(am->authtype_str, AUTHTYPE_BASIC) == 0) { - EnableWindow(GetDlgItem(hdlg, IDC_USER), TRUE); - EnableWindow(GetDlgItem(hdlg, IDC_PASSWORD), TRUE); - EnableWindow(GetDlgItem(hdlg, IDC_REGION), FALSE); - } else if (strcmp(am->authtype_str, AUTHTYPE_IAM) == 0) { - EnableWindow(GetDlgItem(hdlg, IDC_USER), FALSE); - EnableWindow(GetDlgItem(hdlg, IDC_PASSWORD), FALSE); - EnableWindow(GetDlgItem(hdlg, IDC_REGION), TRUE); - } else { - EnableWindow(GetDlgItem(hdlg, IDC_USER), FALSE); - EnableWindow(GetDlgItem(hdlg, IDC_PASSWORD), FALSE); - EnableWindow(GetDlgItem(hdlg, IDC_REGION), FALSE); - } -} - -void SetDlgStuff(HWND hdlg, const ConnInfo *ci) { - // Connection - SetDlgItemText(hdlg, IDC_DRIVER_VERSION, "V."OPENSEARCHDRIVERVERSION); - SetDlgItemText(hdlg, IDC_DSNAME, ci->dsn); - SetDlgItemText(hdlg, IDC_SERVER, ci->server); - SetDlgItemText(hdlg, IDC_PORT, ci->port); - - // Authentication - int authtype_selection_idx = 0; - unsigned int ams_cnt = 0; - const struct authmode *ams = GetAuthModes(&ams_cnt); - char buff[MEDIUM_REGISTRY_LEN + 1]; - for (unsigned int i = 0; i < ams_cnt; i++) { - LoadString(GetWindowInstance(hdlg), ams[i].authtype_id, buff, - MEDIUM_REGISTRY_LEN); - SendDlgItemMessage(hdlg, IDC_AUTHTYPE, CB_ADDSTRING, 0, (WPARAM)buff); - if (!stricmp(ci->authtype, ams[i].authtype_str)) { - authtype_selection_idx = i; - } - } - SendDlgItemMessage(hdlg, IDC_AUTHTYPE, CB_SETCURSEL, - ams[authtype_selection_idx].authtype_id, (WPARAM)0); - SetDlgItemText(hdlg, IDC_USER, ci->username); - SetDlgItemText(hdlg, IDC_PASSWORD, SAFE_NAME(ci->password)); - SetDlgItemText(hdlg, IDC_REGION, ci->region); -} - -static void GetNameField(HWND hdlg, int item, opensearchNAME *name) { - char medium_buf[MEDIUM_REGISTRY_LEN + 1]; - GetDlgItemText(hdlg, item, medium_buf, sizeof(medium_buf)); - STR_TO_NAME((*name), medium_buf); -} - -void GetDlgStuff(HWND hdlg, ConnInfo *ci) { - // Connection - GetDlgItemText(hdlg, IDC_DESC, ci->desc, sizeof(ci->desc)); - GetDlgItemText(hdlg, IDC_SERVER, ci->server, sizeof(ci->server)); - GetDlgItemText(hdlg, IDC_PORT, ci->port, sizeof(ci->port)); - - // Authentication - GetDlgItemText(hdlg, IDC_USER, ci->username, sizeof(ci->username)); - GetNameField(hdlg, IDC_PASSWORD, &ci->password); - GetDlgItemText(hdlg, IDC_REGION, ci->region, sizeof(ci->region)); - const struct authmode *am = GetCurrentAuthMode(hdlg); - SetAuthenticationVisibility(hdlg, am); - STRCPY_FIXED(ci->authtype, am->authtype_str); - -} - -const struct authmode *GetAuthModes(unsigned int *count) { - *count = AUTHMODE_CNT; - return authmodes; -} -static void getDriversDefaultsOfCi(const ConnInfo *ci, GLOBAL_VALUES *glbv) { - const char *drivername = NULL; - - if (ci->drivername[0]) - drivername = ci->drivername; - else if (NAME_IS_VALID(ci->drivers.drivername)) - drivername = SAFE_NAME(ci->drivers.drivername); - if (drivername && drivername[0]) - getDriversDefaults(drivername, glbv); - else - getDriversDefaults(INVALID_DRIVER, glbv); -} - -/** - * @brief Initializes and closes the advanced dialog box. - * - * @param hdlg : Handle to dialog box - * @param wMsg : Dialog box command message - * @param wParam : Handle to the control to receive keyboard focus - * @param lParam : Dialog connection data - * @return INT_PTR : Returns true on successful command of advanced dialog box - */ -INT_PTR CALLBACK advancedOptionsProc(HWND hdlg, UINT wMsg, WPARAM wParam, - LPARAM lParam) { - switch (wMsg) { - case WM_INITDIALOG: { - SetWindowLongPtr(hdlg, DWLP_USER, lParam); - ConnInfo *ci = (ConnInfo *)lParam; - - // To avoid cases in which the "UseSSL" flag is different from a specified server protocol - if (strncmp(HTTP_PREFIX, ci->server, strlen(HTTP_PREFIX)) == 0) { - CheckDlgButton(hdlg, IDC_USESSL, FALSE); - CheckDlgButton(hdlg, IDC_HOST_VER, FALSE); - EnableWindow(GetDlgItem(hdlg, IDC_USESSL), FALSE); - EnableWindow(GetDlgItem(hdlg, IDC_HOST_VER), FALSE); - } else if (strncmp(HTTPS_PREFIX, ci->server, strlen(HTTPS_PREFIX)) == 0) { - CheckDlgButton(hdlg, IDC_USESSL, TRUE); - CheckDlgButton(hdlg, IDC_HOST_VER, ci->verify_server); - EnableWindow(GetDlgItem(hdlg, IDC_USESSL), FALSE); - } else { - CheckDlgButton(hdlg, IDC_USESSL, ci->use_ssl); - CheckDlgButton(hdlg, IDC_HOST_VER, ci->verify_server); - } - - SetDlgItemText(hdlg, IDC_CONNTIMEOUT, ci->response_timeout); - SetDlgItemText(hdlg, IDC_FETCH_SIZE, ci->fetch_size); - break; - } - - case WM_COMMAND: { - ConnInfo *ci = (ConnInfo *)GetWindowLongPtr(hdlg, DWLP_USER); - switch (GET_WM_COMMAND_ID(wParam, lParam)) { - case IDOK: - // Get Dialog Values - ci->use_ssl = (IsDlgButtonChecked(hdlg, IDC_USESSL) ? 1 : 0); - ci->verify_server = (IsDlgButtonChecked(hdlg, IDC_HOST_VER) ? 1 : 0); - GetDlgItemText(hdlg, IDC_CONNTIMEOUT, ci->response_timeout, - sizeof(ci->response_timeout)); - GetDlgItemText(hdlg, IDC_FETCH_SIZE, ci->fetch_size, - sizeof(ci->fetch_size)); - case IDCANCEL: - EndDialog(hdlg, FALSE); - return TRUE; - } - } - } - return FALSE; -} - -INT_PTR CALLBACK logOptionsProc(HWND hdlg, UINT wMsg, WPARAM wParam, - LPARAM lParam) { - switch (wMsg) { - case WM_INITDIALOG: { - ConnInfo *ci = (ConnInfo *)lParam; - SetWindowLongPtr(hdlg, DWLP_USER, lParam); - - // Logging - int loglevel_selection_idx = 0; - unsigned int log_cnt = 0; - int *log = GetLogLevels(&log_cnt); - char buff[MEDIUM_REGISTRY_LEN + 1]; - for (unsigned int i = 0; i < log_cnt; i++) { - LoadString(GetWindowInstance(hdlg), log[i], buff, - MEDIUM_REGISTRY_LEN); - SendDlgItemMessage(hdlg, IDC_LOG_LEVEL, CB_ADDSTRING, 0, - (WPARAM)buff); - if ((unsigned int)ci->drivers.loglevel == i) { - loglevel_selection_idx = i; - } - } - SendDlgItemMessage(hdlg, IDC_LOG_LEVEL, CB_SETCURSEL, - loglevel_selection_idx, (WPARAM)0); - SetDlgItemText(hdlg, IDC_LOG_PATH, ci->drivers.output_dir); - break; - } - - case WM_COMMAND: { - ConnInfo *ci = (ConnInfo *)GetWindowLongPtr(hdlg, DWLP_USER); - switch (GET_WM_COMMAND_ID(wParam, lParam)) { - case IDOK: { - // Get Dialog Values - int log = GetCurrentLogLevel(hdlg); - switch (log) { - case IDS_LOGTYPE_OFF: - ci->drivers.loglevel = (char)OPENSEARCH_OFF; - break; - case IDS_LOGTYPE_FATAL: - ci->drivers.loglevel = (char)OPENSEARCH_FATAL; - break; - case IDS_LOGTYPE_ERROR: - ci->drivers.loglevel = (char)OPENSEARCH_ERROR; - break; - case IDS_LOGTYPE_WARNING: - ci->drivers.loglevel = (char)OPENSEARCH_WARNING; - break; - case IDS_LOGTYPE_INFO: - ci->drivers.loglevel = (char)OPENSEARCH_INFO; - break; - case IDS_LOGTYPE_DEBUG: - ci->drivers.loglevel = (char)OPENSEARCH_DEBUG; - break; - case IDS_LOGTYPE_TRACE: - ci->drivers.loglevel = (char)OPENSEARCH_TRACE; - break; - case IDS_LOGTYPE_ALL: - ci->drivers.loglevel = (char)OPENSEARCH_ALL; - break; - default: - ci->drivers.loglevel = (char)OPENSEARCH_OFF; - break; - } - setGlobalCommlog(ci->drivers.loglevel); - setGlobalDebug(ci->drivers.loglevel); - writeGlobalLogs(); - GetDlgItemText(hdlg, IDC_LOG_PATH, ci->drivers.output_dir, - sizeof(ci->drivers.output_dir)); - setLogDir(ci->drivers.output_dir); - } - - case IDCANCEL: - EndDialog(hdlg, FALSE); - return TRUE; - } - } - } - return FALSE; -} - -#endif /* WIN32 */ diff --git a/sql-odbc/src/sqlodbc/drvconn.c b/sql-odbc/src/sqlodbc/drvconn.c deleted file mode 100644 index e4e53def22..0000000000 --- a/sql-odbc/src/sqlodbc/drvconn.c +++ /dev/null @@ -1,320 +0,0 @@ -#include "drvconn.h" - -#include -#include - -#include "opensearch_odbc.h" -#include "misc.h" -#include "opensearch_connection.h" - -#ifndef WIN32 -#include -#include -#else -#include -#endif - -#include - -#ifdef WIN32 -#include - -#include "resource.h" -#include "win_setup.h" -#endif -#include "dlg_specific.h" -#include "opensearch_apifunc.h" - -#ifdef WIN32 -INT_PTR CALLBACK dconn_FDriverConnectProc(HWND hdlg, UINT wMsg, WPARAM wParam, - LPARAM lParam); -extern HINSTANCE s_hModule; /* Saved module handle. */ -#endif - -char *hide_password(const char *str) { - char *outstr, *pwdp; - - if (!str) - return NULL; - outstr = strdup(str); - if (!outstr) - return NULL; - if (pwdp = strstr(outstr, "PWD="), !pwdp) - pwdp = strstr(outstr, "pwd="); - if (pwdp) { - char *p; - - for (p = pwdp + 4; *p && *p != ';'; p++) - *p = 'x'; - } - return outstr; -} - -int paramRequired(const ConnInfo *ci, int reqs) { - int required = 0; - const char *pw = SAFE_NAME(ci->password); - - /* Password is not necessarily a required parameter. */ - if ((reqs & PASSWORD_IS_REQUIRED) != 0) - if ('\0' == pw[0]) - required |= PASSWORD_IS_REQUIRED; - - return required; -} - -#ifdef WIN32 -RETCODE -dconn_DoDialog(HWND hwnd, ConnInfo *ci) { - INT_PTR dialog_result; - - MYLOG(OPENSEARCH_TRACE, "entering ci = %p\n", ci); - - if (hwnd) { - dialog_result = - DialogBoxParam(s_hModule, MAKEINTRESOURCE(DLG_CONFIG), hwnd, - dconn_FDriverConnectProc, (LPARAM)ci); - if (-1 == dialog_result) { - int errc = GetLastError(); - MYLOG(OPENSEARCH_DEBUG, " LastError=%d\n", errc); - } - if (!dialog_result || (dialog_result == -1)) - return SQL_NO_DATA_FOUND; - else - return SQL_SUCCESS; - } - - MYLOG(OPENSEARCH_DEBUG, " No window specified\n"); - return SQL_ERROR; -} - -INT_PTR CALLBACK dconn_FDriverConnectProc(HWND hdlg, UINT wMsg, WPARAM wParam, - LPARAM lParam) { - MYLOG(OPENSEARCH_DEBUG, "dconn_FDriverConnectProc\n"); - ConnInfo *ci; - - switch (wMsg) { - case WM_INITDIALOG: - ci = (ConnInfo *)lParam; - - /* Change the caption for the setup dialog */ - SetWindowText(hdlg, "OpenSearch Connection"); - - /* Hide the DSN and description fields */ - ShowWindow(GetDlgItem(hdlg, IDC_DSNAMETEXT), SW_HIDE); - ShowWindow(GetDlgItem(hdlg, IDC_DSNAME), SW_HIDE); - - SetWindowLongPtr(hdlg, DWLP_USER, - lParam); /* Save the ConnInfo for the "OK" */ - SetDlgStuff(hdlg, ci); - - if (ci->server[0] == '\0') - SetFocus(GetDlgItem(hdlg, IDC_SERVER)); - else if (ci->port[0] == '\0') - SetFocus(GetDlgItem(hdlg, IDC_PORT)); - else if (ci->username[0] == '\0') - SetFocus(GetDlgItem(hdlg, IDC_USER)); - else if (ci->region[0] == '\0') - SetFocus(GetDlgItem(hdlg, IDC_REGION)); - - SendDlgItemMessage(hdlg, IDC_AUTHTYPE, CB_SETCURSEL, 2, (WPARAM)0); - - // Encryption - ci->use_ssl = (IsDlgButtonChecked(hdlg, IDC_USESSL) ? 1 : 0); - break; - - case WM_COMMAND: - switch (GET_WM_COMMAND_ID(wParam, lParam)) { - case IDOK: - ci = (ConnInfo *)GetWindowLongPtr(hdlg, DWLP_USER); - GetDlgStuff(hdlg, ci); - case IDCANCEL: - EndDialog(hdlg, GET_WM_COMMAND_ID(wParam, lParam) == IDOK); - return TRUE; - - case IDOK2: // <== TEST button - { - ConnInfo tmp_info; - ci = (ConnInfo *)GetWindowLongPtr(hdlg, DWLP_USER); - GetDlgStuff(hdlg, ci); - CC_copy_conninfo(&tmp_info, ci); - test_connection(hdlg, &tmp_info, FALSE); - CC_conninfo_release(&tmp_info); - break; - } - case ID_ADVANCED_OPTIONS: { - ci = (ConnInfo *)GetWindowLongPtr(hdlg, DWLP_USER); - DialogBoxParam(s_hModule, - MAKEINTRESOURCE(DLG_ADVANCED_OPTIONS), hdlg, - advancedOptionsProc, (LPARAM)ci); - break; - } - case ID_LOG_OPTIONS: { - ci = (ConnInfo *)GetWindowLongPtr(hdlg, DWLP_USER); - DialogBoxParam(s_hModule, MAKEINTRESOURCE(DLG_LOG_OPTIONS), - hdlg, logOptionsProc, (LPARAM)ci); - break; - } - case IDC_AUTHTYPE: { - ci = (ConnInfo *)GetWindowLongPtr(hdlg, DWLP_USER); - const struct authmode *am = GetCurrentAuthMode(hdlg); - SetAuthenticationVisibility(hdlg, am); - break; - } - } - break; - case WM_CTLCOLORSTATIC: - if (lParam == (LPARAM)GetDlgItem(hdlg, IDC_NOTICE_USER)) { - HBRUSH hBrush = (HBRUSH)GetStockObject(WHITE_BRUSH); - SetTextColor((HDC)wParam, RGB(255, 0, 0)); - return (INT_PTR)hBrush; - } - break; - } - - return FALSE; -} -#endif /* WIN32 */ - -#define ATTRIBUTE_DELIMITER ';' -#define OPENING_BRACKET '{' -#define CLOSING_BRACKET '}' - -typedef BOOL (*copyfunc)(ConnInfo *, const char *attribute, const char *value); -BOOL dconn_get_attributes(copyfunc func, const char *connect_string, - ConnInfo *ci) { - BOOL ret = TRUE; - char *our_connect_string; - const char *pair, *attribute, *value, *termp; - BOOL eoftok; - char *equals, *delp; - char *strtok_arg; -#ifdef HAVE_STRTOK_R - char *last = NULL; -#endif /* HAVE_STRTOK_R */ - - if (our_connect_string = strdup(connect_string), NULL == our_connect_string) { - ret = FALSE; - goto cleanup; - } - strtok_arg = our_connect_string; - -#ifdef FORCE_PASSWORD_DISPLAY - MYLOG(OPENSEARCH_DEBUG, "our_connect_string = '%s'\n", our_connect_string); -#else - if (get_mylog()) { - char *hide_str = hide_password(our_connect_string); - - MYLOG(OPENSEARCH_DEBUG, "our_connect_string = '%s'\n", hide_str); - free(hide_str); - } -#endif /* FORCE_PASSWORD_DISPLAY */ - - termp = strchr(our_connect_string, '\0'); - eoftok = FALSE; - while (!eoftok) { - if (strtok_arg != NULL && strtok_arg >= termp) /* for safety */ - break; -#ifdef HAVE_STRTOK_R - pair = strtok_r(strtok_arg, ";", &last); -#else - pair = strtok(strtok_arg, ";"); -#endif /* HAVE_STRTOK_R */ - if (strtok_arg) - strtok_arg = NULL; - if (!pair) - break; - - equals = strchr(pair, '='); - if (!equals) - continue; - - *equals = '\0'; - attribute = pair; /* ex. DSN */ - value = equals + 1; /* ex. 'CEO co1' */ - /* - * Values enclosed with braces({}) can contain ; etc - * We don't remove the braces here because - * decode_or_remove_braces() in dlg_specifi.c - * would remove them later. - * Just correct the misdetected delimter(;). - */ - switch (*value) { - const char *valuen, *closep; - - case OPENING_BRACKET: - delp = strchr(value, '\0'); - if (delp >= termp) { - eoftok = TRUE; - break; - } - /* Where's a corresponding closing bracket? */ - closep = strchr(value, CLOSING_BRACKET); - if (NULL != closep && closep[1] == '\0') - break; - - for (valuen = value; valuen < termp; - closep = strchr(valuen, CLOSING_BRACKET)) { - if (NULL == closep) { - if (!delp) /* error */ - { - MYLOG(OPENSEARCH_DEBUG, - "closing bracket doesn't exist 1\n"); - ret = FALSE; - goto cleanup; - } - closep = strchr(delp + 1, CLOSING_BRACKET); - if (!closep) /* error */ - { - MYLOG(OPENSEARCH_DEBUG, - "closing bracket doesn't exist 2\n"); - ret = FALSE; - goto cleanup; - } - *delp = ATTRIBUTE_DELIMITER; /* restore delimiter */ - delp = NULL; - } - if (CLOSING_BRACKET == closep[1]) { - valuen = closep + 2; - if (valuen >= termp) - break; - else if (valuen == delp) { - *delp = ATTRIBUTE_DELIMITER; - delp = NULL; - } - continue; - } else if (ATTRIBUTE_DELIMITER == closep[1] - || '\0' == closep[1] || delp == closep + 1) { - delp = (char *)(closep + 1); - *delp = '\0'; - strtok_arg = delp + 1; - if (strtok_arg + 1 >= termp) - eoftok = TRUE; - break; - } - MYLOG(OPENSEARCH_DEBUG, - "subsequent char to the closing bracket is %c " - "value=%s\n", - closep[1], value); - ret = FALSE; - goto cleanup; - } - } - - /* Copy the appropriate value to the conninfo */ - (*func)(ci, attribute, value); - } - -cleanup: - free(our_connect_string); - - return ret; -} - -BOOL dconn_get_DSN_or_Driver(const char *connect_string, ConnInfo *ci) { - CC_conninfo_init(ci, INIT_GLOBALS); - return dconn_get_attributes(get_DSN_or_Driver, connect_string, ci); -} - -BOOL dconn_get_connect_attributes(const char *connect_string, ConnInfo *ci) { - return dconn_get_attributes(copyConnAttributes, connect_string, ci); -} diff --git a/sql-odbc/src/sqlodbc/drvconn.h b/sql-odbc/src/sqlodbc/drvconn.h deleted file mode 100644 index 3a00957b8b..0000000000 --- a/sql-odbc/src/sqlodbc/drvconn.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef _DRVCONN_H_ -#define _DRVCONN_H_ - -#include -#include - -#include "opensearch_odbc.h" -#include "misc.h" -#include "opensearch_connection.h" - -#ifndef WIN32 -#include -#include -#else -#include -#endif - -#include - -#ifdef WIN32 -#include - -#include "resource.h" -#endif -#include "dlg_specific.h" -#include "opensearch_apifunc.h" - -#define PASSWORD_IS_REQUIRED 1 - -#ifdef __cplusplus -extern "C" { -#endif -char *hide_password(const char *str); -BOOL dconn_get_connect_attributes(const char *connect_string, ConnInfo *ci); -BOOL dconn_get_DSN_or_Driver(const char *connect_string, ConnInfo *ci); -int paramRequired(const ConnInfo *ci, int reqs); -#ifdef WIN32 -RETCODE dconn_DoDialog(HWND hwnd, ConnInfo *ci); -#endif -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sql-odbc/src/sqlodbc/environ.c b/sql-odbc/src/sqlodbc/environ.c deleted file mode 100644 index c12dc085a7..0000000000 --- a/sql-odbc/src/sqlodbc/environ.c +++ /dev/null @@ -1,547 +0,0 @@ -#include "environ.h" - -#include -#include - -#include "dlg_specific.h" -#include "misc.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "statement.h" -#ifdef WIN32 -#include -#endif /* WIN32 */ -#include "loadlib.h" - -/* The one instance of the handles */ -static int conns_count = 0; -static ConnectionClass **conns = NULL; - -void *conns_cs = NULL; -void *common_cs = NULL; -void *common_lcs = NULL; - -RETCODE SQL_API OPENSEARCHAPI_AllocEnv(HENV *phenv) { - CSTR func = "OPENSEARCHAPI_AllocEnv"; - SQLRETURN ret = SQL_SUCCESS; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - - /* - * For systems on which none of the constructor-making - * techniques in elasticodbc.c work: - * It's ok to call initialize_global_cs() twice. - */ - { initialize_global_cs(); } - - *phenv = (HENV)EN_Constructor(); - if (!*phenv) { - *phenv = SQL_NULL_HENV; - EN_log_error(func, "Error allocating environment", NULL); - ret = SQL_ERROR; - } - - MYLOG(OPENSEARCH_TRACE, "leaving phenv=%p\n", *phenv); - return ret; -} - -RETCODE SQL_API OPENSEARCHAPI_FreeEnv(HENV henv) { - CSTR func = "OPENSEARCHAPI_FreeEnv"; - SQLRETURN ret = SQL_SUCCESS; - EnvironmentClass *env = (EnvironmentClass *)henv; - - MYLOG(OPENSEARCH_TRACE, "entering env=%p\n", env); - - if (env && EN_Destructor(env)) { - MYLOG(OPENSEARCH_DEBUG, " ok\n"); - goto cleanup; - } - - ret = SQL_ERROR; - EN_log_error(func, "Error freeing environment", NULL); -cleanup: - return ret; -} - -#define SIZEOF_SQLSTATE 6 - -static void opensearch_sqlstate_set(const EnvironmentClass *env, UCHAR *szSqlState, - const char *ver3str, const char *ver2str) { - strncpy_null((char *)szSqlState, EN_is_odbc3(env) ? ver3str : ver2str, - SIZEOF_SQLSTATE); -} - -OpenSearch_ErrorInfo *ER_Constructor(SDWORD errnumber, const char *msg) { - OpenSearch_ErrorInfo *error; - ssize_t aladd, errsize; - - if (DESC_OK == errnumber) - return NULL; - if (msg) { - errsize = strlen(msg); - aladd = errsize - sizeof(error->__error_message) + 1; - if (aladd < 0) - aladd = 0; - } else { - errsize = -1; - aladd = 0; - } - error = (OpenSearch_ErrorInfo *)malloc(sizeof(OpenSearch_ErrorInfo) + aladd); - if (error) { - memset(error, 0, sizeof(OpenSearch_ErrorInfo)); - error->status = errnumber; - error->errorsize = (Int2)errsize; - if (errsize > 0) - memcpy(error->__error_message, msg, errsize); - error->__error_message[errsize] = '\0'; - error->recsize = -1; - } - return error; -} - -void ER_Destructor(OpenSearch_ErrorInfo *self) { - free(self); -} - -OpenSearch_ErrorInfo *ER_Dup(const OpenSearch_ErrorInfo *self) { - OpenSearch_ErrorInfo *new; - Int4 alsize; - - if (!self) - return NULL; - alsize = sizeof(OpenSearch_ErrorInfo); - if (self->errorsize > 0) - alsize += self->errorsize; - new = (OpenSearch_ErrorInfo *)malloc(alsize); - if (new) - memcpy(new, self, alsize); - - return new; -} - -#define DRVMNGRDIV 511 -/* Returns the next SQL error information. */ -RETCODE SQL_API ER_ReturnError(OpenSearch_ErrorInfo *openSearchError, SQLSMALLINT RecNumber, - SQLCHAR *szSqlState, SQLINTEGER *pfNativeError, - SQLCHAR *szErrorMsg, SQLSMALLINT cbErrorMsgMax, - SQLSMALLINT *pcbErrorMsg, UWORD flag) { - /* CC: return an error of a hstmt */ - OpenSearch_ErrorInfo *error; - BOOL partial_ok = ((flag & PODBC_ALLOW_PARTIAL_EXTRACT) != 0); - const char *msg; - SWORD msglen, stapos, wrtlen, pcblen; - - if (!openSearchError) - return SQL_NO_DATA_FOUND; - error = openSearchError; - msg = error->__error_message; - MYLOG(OPENSEARCH_TRACE, "entering status = %d, msg = #%s#\n", error->status, msg); - msglen = (SQLSMALLINT)strlen(msg); - /* - * Even though an application specifies a larger error message - * buffer, the driver manager changes it silently. - * Therefore we divide the error message into ... - */ - if (error->recsize < 0) { - if (cbErrorMsgMax > 0) - error->recsize = cbErrorMsgMax - 1; /* apply the first request */ - else - error->recsize = DRVMNGRDIV; - } else if (1 == RecNumber && cbErrorMsgMax > 0) - error->recsize = cbErrorMsgMax - 1; - if (RecNumber < 0) { - if (0 == error->errorpos) - RecNumber = 1; - else - RecNumber = 2 + (error->errorpos - 1) / error->recsize; - } - stapos = (RecNumber - 1) * error->recsize; - if (stapos > msglen) - return SQL_NO_DATA_FOUND; - pcblen = wrtlen = msglen - stapos; - if (pcblen > error->recsize) - pcblen = error->recsize; - if (0 == cbErrorMsgMax) - wrtlen = 0; - else if (wrtlen >= cbErrorMsgMax) { - if (partial_ok) - wrtlen = cbErrorMsgMax - 1; - else if (cbErrorMsgMax <= error->recsize) - wrtlen = cbErrorMsgMax - 1; - else - wrtlen = error->recsize; - } - if (wrtlen > pcblen) - wrtlen = pcblen; - if (NULL != pcbErrorMsg) - *pcbErrorMsg = pcblen; - - if ((NULL != szErrorMsg) && (cbErrorMsgMax > 0)) { - memcpy(szErrorMsg, msg + stapos, wrtlen); - szErrorMsg[wrtlen] = '\0'; - } - - if (NULL != pfNativeError) - *pfNativeError = error->status; - - if (NULL != szSqlState) - strncpy_null((char *)szSqlState, error->sqlstate, 6); - - MYLOG(OPENSEARCH_DEBUG, " szSqlState = '%s',len=%d, szError='%s'\n", - szSqlState, pcblen, szErrorMsg); - if (wrtlen < pcblen) - return SQL_SUCCESS_WITH_INFO; - else - return SQL_SUCCESS; -} - -RETCODE SQL_API OPENSEARCHAPI_ConnectError(HDBC hdbc, SQLSMALLINT RecNumber, - SQLCHAR *szSqlState, - SQLINTEGER *pfNativeError, - SQLCHAR *szErrorMsg, - SQLSMALLINT cbErrorMsgMax, - SQLSMALLINT *pcbErrorMsg, UWORD flag) { - UNUSED(flag); - ConnectionClass *conn = (ConnectionClass *)hdbc; - EnvironmentClass *env = (EnvironmentClass *)conn->henv; - char *msg; - int status; - BOOL once_again = FALSE; - ssize_t msglen; - - MYLOG(OPENSEARCH_ERROR, "entering hdbc=%p <%d>\n", hdbc, cbErrorMsgMax); - if (RecNumber != 1 && RecNumber != -1) - return SQL_NO_DATA_FOUND; - if (cbErrorMsgMax < 0) - return SQL_ERROR; - if (CONN_EXECUTING == conn->status || !CC_get_error(conn, &status, &msg) - || NULL == msg) { - MYLOG(OPENSEARCH_ERROR, "CC_Get_error returned nothing.\n"); - if (NULL != szSqlState) - strncpy_null((char *)szSqlState, "00000", SIZEOF_SQLSTATE); - if (NULL != pcbErrorMsg) - *pcbErrorMsg = 0; - if ((NULL != szErrorMsg) && (cbErrorMsgMax > 0)) - szErrorMsg[0] = '\0'; - - return SQL_NO_DATA_FOUND; - } - MYLOG(OPENSEARCH_ERROR, "CC_get_error: status = %d, msg = #%s#\n", status, msg); - - msglen = strlen(msg); - if (NULL != pcbErrorMsg) { - *pcbErrorMsg = (SQLSMALLINT)msglen; - if (cbErrorMsgMax == 0) - once_again = TRUE; - else if (msglen >= cbErrorMsgMax) - *pcbErrorMsg = cbErrorMsgMax - 1; - } - if ((NULL != szErrorMsg) && (cbErrorMsgMax > 0)) - strncpy_null((char *)szErrorMsg, msg, cbErrorMsgMax); - if (NULL != pfNativeError) - *pfNativeError = status; - - if (NULL != szSqlState) { - if (conn->sqlstate[0]) - strncpy_null((char *)szSqlState, conn->sqlstate, SIZEOF_SQLSTATE); - else - switch (status) { - case CONN_OPTION_VALUE_CHANGED: - opensearch_sqlstate_set(env, szSqlState, "01S02", "01S02"); - break; - case CONN_TRUNCATED: - opensearch_sqlstate_set(env, szSqlState, "01004", "01004"); - /* data truncated */ - break; - case CONN_INIREAD_ERROR: - opensearch_sqlstate_set(env, szSqlState, "IM002", "IM002"); - /* data source not found */ - break; - case CONNECTION_SERVER_NOT_REACHED: - case CONN_OPENDB_ERROR: - opensearch_sqlstate_set(env, szSqlState, "08001", "08001"); - /* unable to connect to data source */ - break; - case CONN_INVALID_AUTHENTICATION: - case CONN_AUTH_TYPE_UNSUPPORTED: - opensearch_sqlstate_set(env, szSqlState, "28000", "28000"); - break; - case CONN_STMT_ALLOC_ERROR: - opensearch_sqlstate_set(env, szSqlState, "HY001", "S1001"); - /* memory allocation failure */ - break; - case CONN_IN_USE: - opensearch_sqlstate_set(env, szSqlState, "HY000", "S1000"); - /* general error */ - break; - case CONN_UNSUPPORTED_OPTION: - opensearch_sqlstate_set(env, szSqlState, "HYC00", "IM001"); - /* driver does not support this function */ - break; - case CONN_INVALID_ARGUMENT_NO: - opensearch_sqlstate_set(env, szSqlState, "HY009", "S1009"); - /* invalid argument value */ - break; - case CONN_TRANSACT_IN_PROGRES: - opensearch_sqlstate_set(env, szSqlState, "HY011", "S1011"); - break; - case CONN_NO_MEMORY_ERROR: - opensearch_sqlstate_set(env, szSqlState, "HY001", "S1001"); - break; - case CONN_NOT_IMPLEMENTED_ERROR: - opensearch_sqlstate_set(env, szSqlState, "HYC00", "S1C00"); - break; - case CONN_ILLEGAL_TRANSACT_STATE: - opensearch_sqlstate_set(env, szSqlState, "25000", "S1010"); - break; - case CONN_VALUE_OUT_OF_RANGE: - opensearch_sqlstate_set(env, szSqlState, "HY019", "22003"); - break; - case CONNECTION_COULD_NOT_SEND: - case CONNECTION_COULD_NOT_RECEIVE: - case CONNECTION_COMMUNICATION_ERROR: - case CONNECTION_NO_RESPONSE: - opensearch_sqlstate_set(env, szSqlState, "08S01", "08S01"); - break; - default: - opensearch_sqlstate_set(env, szSqlState, "HY000", "S1000"); - /* general error */ - break; - } - } - - MYLOG(OPENSEARCH_DEBUG, - " szSqlState = '%s',len=" FORMAT_SSIZE_T ", szError='%s'\n", - szSqlState ? (char *)szSqlState : PRINT_NULL, msglen, - szErrorMsg ? (char *)szErrorMsg : PRINT_NULL); - if (once_again) { - CC_set_errornumber(conn, status); - return SQL_SUCCESS_WITH_INFO; - } else - return SQL_SUCCESS; -} - -RETCODE SQL_API OPENSEARCHAPI_EnvError(HENV henv, SQLSMALLINT RecNumber, - SQLCHAR *szSqlState, SQLINTEGER *pfNativeError, - SQLCHAR *szErrorMsg, SQLSMALLINT cbErrorMsgMax, - SQLSMALLINT *pcbErrorMsg, UWORD flag) { - UNUSED(flag); - EnvironmentClass *env = (EnvironmentClass *)henv; - char *msg = NULL; - int status; - - MYLOG(OPENSEARCH_ERROR, "entering henv=%p <%d>\n", henv, cbErrorMsgMax); - if (RecNumber != 1 && RecNumber != -1) - return SQL_NO_DATA_FOUND; - if (cbErrorMsgMax < 0) - return SQL_ERROR; - if (!EN_get_error(env, &status, &msg) || NULL == msg) { - MYLOG(OPENSEARCH_ERROR, "EN_get_error: msg = #%s#\n", msg); - - if (NULL != szSqlState) - opensearch_sqlstate_set(env, szSqlState, "00000", "00000"); - if (NULL != pcbErrorMsg) - *pcbErrorMsg = 0; - if ((NULL != szErrorMsg) && (cbErrorMsgMax > 0)) - szErrorMsg[0] = '\0'; - - return SQL_NO_DATA_FOUND; - } - MYLOG(OPENSEARCH_ERROR, "EN_get_error: status = %d, msg = #%s#\n", status, msg); - - if (NULL != pcbErrorMsg) - *pcbErrorMsg = (SQLSMALLINT)strlen(msg); - if ((NULL != szErrorMsg) && (cbErrorMsgMax > 0)) - strncpy_null((char *)szErrorMsg, msg, cbErrorMsgMax); - if (NULL != pfNativeError) - *pfNativeError = status; - - if (szSqlState) { - switch (status) { - case ENV_ALLOC_ERROR: - /* memory allocation failure */ - opensearch_sqlstate_set(env, szSqlState, "HY001", "S1001"); - break; - default: - opensearch_sqlstate_set(env, szSqlState, "HY000", "S1000"); - /* general error */ - break; - } - } - - return SQL_SUCCESS; -} - -/* - * EnvironmentClass implementation - */ -EnvironmentClass *EN_Constructor(void) { - EnvironmentClass *rv = NULL; -#ifdef WIN32 - WORD wVersionRequested; - WSADATA wsaData; - const int major = 2, minor = 2; - - /* Load the WinSock Library */ - wVersionRequested = MAKEWORD(major, minor); - - if (WSAStartup(wVersionRequested, &wsaData)) { - MYLOG(OPENSEARCH_ERROR, " WSAStartup error\n"); - return rv; - } - /* Verify that this is the minimum version of WinSock */ - if (LOBYTE(wsaData.wVersion) >= 1 - && (LOBYTE(wsaData.wVersion) >= 2 || HIBYTE(wsaData.wVersion) >= 1)) - ; - else { - MYLOG(OPENSEARCH_DEBUG, " WSAStartup version=(%d,%d)\n", - LOBYTE(wsaData.wVersion), HIBYTE(wsaData.wVersion)); - goto cleanup; - } -#endif /* WIN32 */ - - rv = (EnvironmentClass *)malloc(sizeof(EnvironmentClass)); - if (NULL == rv) { - MYLOG(OPENSEARCH_ERROR, " malloc error\n"); - goto cleanup; - } - rv->errormsg = 0; - rv->errornumber = 0; - rv->flag = 0; - INIT_ENV_CS(rv); -cleanup: -#ifdef WIN32 - if (NULL == rv) { - WSACleanup(); - } -#endif /* WIN32 */ - - return rv; -} - -char EN_Destructor(EnvironmentClass *self) { - int lf, nullcnt; - char rv = 1; - - MYLOG(OPENSEARCH_TRACE, "entering self=%p\n", self); - if (!self) - return 0; - - /* - * the error messages are static strings distributed throughout the - * source--they should not be freed - */ - - /* Free any connections belonging to this environment */ - ENTER_CONNS_CS; - for (lf = 0, nullcnt = 0; lf < conns_count; lf++) { - if (NULL == conns[lf]) - nullcnt++; - else if (conns[lf]->henv == self) { - if (CC_Destructor(conns[lf])) - conns[lf] = NULL; - else - rv = 0; - nullcnt++; - } - } - if (conns && nullcnt >= conns_count) { - MYLOG(OPENSEARCH_DEBUG, "clearing conns count=%d\n", conns_count); - free(conns); - conns = NULL; - conns_count = 0; - } - LEAVE_CONNS_CS; - DELETE_ENV_CS(self); - free(self); - -#ifdef WIN32 - WSACleanup(); -#endif - MYLOG(OPENSEARCH_TRACE, "leaving rv=%d\n", rv); -#ifdef _MEMORY_DEBUG_ - debug_memory_check(); -#endif /* _MEMORY_DEBUG_ */ - return rv; -} - -char EN_get_error(EnvironmentClass *self, int *number, char **message) { - if (self && self->errormsg && self->errornumber) { - *message = self->errormsg; - *number = self->errornumber; - self->errormsg = 0; - self->errornumber = 0; - return 1; - } else - return 0; -} - -#define INIT_CONN_COUNT 128 - -char EN_add_connection(EnvironmentClass *self, ConnectionClass *conn) { - int i, alloc; - ConnectionClass **newa; - char ret = FALSE; - - MYLOG(OPENSEARCH_TRACE, "entering self = %p, conn = %p\n", self, conn); - - ENTER_CONNS_CS; - for (i = 0; i < conns_count; i++) { - if (!conns[i]) { - conn->henv = self; - conns[i] = conn; - ret = TRUE; - MYLOG( - 0, - " added at i=%d, conn->henv = %p, conns[i]->henv = %p\n", - i, conn->henv, conns[i]->henv); - goto cleanup; - } - } - if (conns_count > 0) - alloc = 2 * conns_count; - else - alloc = INIT_CONN_COUNT; - if (newa = (ConnectionClass **)realloc(conns, - alloc * sizeof(ConnectionClass *)), - NULL == newa) - goto cleanup; - conn->henv = self; - newa[conns_count] = conn; - conns = newa; - ret = TRUE; - MYLOG(OPENSEARCH_DEBUG, - " added at %d, conn->henv = %p, conns[%d]->henv = %p\n", - conns_count, conn->henv, conns_count, conns[conns_count]->henv); - for (i = conns_count + 1; i < alloc; i++) - conns[i] = NULL; - conns_count = alloc; -cleanup: - LEAVE_CONNS_CS; - return ret; -} - -char EN_remove_connection(EnvironmentClass *self, ConnectionClass *conn) { - UNUSED(self); - int i; - - for (i = 0; i < conns_count; i++) - if (conns[i] == conn && conns[i]->status != CONN_EXECUTING) { - ENTER_CONNS_CS; - conns[i] = NULL; - LEAVE_CONNS_CS; - return TRUE; - } - - return FALSE; -} - -void EN_log_error(const char *func, char *desc, EnvironmentClass *self) { - if (self) - MYLOG(OPENSEARCH_ERROR, - "ENVIRON ERROR: func=%s, desc='%s', errnum=%d, errmsg='%s'\n", - func, desc, self->errornumber, self->errormsg); - else - MYLOG(OPENSEARCH_ERROR, "INVALID ENVIRON HANDLE ERROR: func=%s, desc='%s'\n", - func, desc); -} diff --git a/sql-odbc/src/sqlodbc/environ.h b/sql-odbc/src/sqlodbc/environ.h deleted file mode 100644 index bcbb04b78a..0000000000 --- a/sql-odbc/src/sqlodbc/environ.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef __ENVIRON_H__ -#define __ENVIRON_H__ - -#ifdef __cplusplus -extern "C" { -#endif - -#include "opensearch_helper.h" -#include "opensearch_odbc.h" - -#define ENV_ALLOC_ERROR 1 - -/********** Environment Handle *************/ -struct EnvironmentClass_ { - char *errormsg; - int errornumber; - Int4 flag; - void *cs; -}; - -/* Environment prototypes */ -EnvironmentClass *EN_Constructor(void); -char EN_Destructor(EnvironmentClass *self); -char EN_get_error(EnvironmentClass *self, int *number, char **message); -char EN_add_connection(EnvironmentClass *self, ConnectionClass *conn); -char EN_remove_connection(EnvironmentClass *self, ConnectionClass *conn); -void EN_log_error(const char *func, char *desc, EnvironmentClass *self); - -#define EN_OV_ODBC2 1L -#define EN_CONN_POOLING (1L << 1) -#define EN_is_odbc2(env) ((env->flag & EN_OV_ODBC2) != 0) -#define EN_is_odbc3(env) (env && (env->flag & EN_OV_ODBC2) == 0) -#define EN_set_odbc2(env) (env->flag |= EN_OV_ODBC2) -#define EN_set_odbc3(env) (env->flag &= ~EN_OV_ODBC2) -#define EN_is_pooling(env) (env && (env->flag & EN_CONN_POOLING) != 0) -#define EN_set_pooling(env) (env->flag |= EN_CONN_POOLING) -#define EN_unset_pooling(env) (env->flag &= ~EN_CONN_POOLING) - -/* For Multi-thread */ -#define INIT_CONNS_CS XPlatformInitializeCriticalSection(&conns_cs) -#define ENTER_CONNS_CS XPlatformEnterCriticalSection(conns_cs) -#define LEAVE_CONNS_CS XPlatformLeaveCriticalSection(conns_cs) -#define DELETE_CONNS_CS XPlatformDeleteCriticalSection(&conns_cs) -#define INIT_ENV_CS(x) XPlatformInitializeCriticalSection(&((x)->cs)) -#define ENTER_ENV_CS(x) XPlatformEnterCriticalSection(((x)->cs)) -#define LEAVE_ENV_CS(x) XPlatformLeaveCriticalSection(((x)->cs)) -#define DELETE_ENV_CS(x) XPlatformDeleteCriticalSection(&((x)->cs)) -#define INIT_COMMON_CS XPlatformInitializeCriticalSection(&common_cs) -#define ENTER_COMMON_CS XPlatformEnterCriticalSection(common_cs) -#define LEAVE_COMMON_CS XPlatformLeaveCriticalSection(common_cs) -#define DELETE_COMMON_CS XPlatformDeleteCriticalSection(&common_cs) - -#ifdef __cplusplus -} -#endif -#endif /* __ENVIRON_H_ */ diff --git a/sql-odbc/src/sqlodbc/execute.c b/sql-odbc/src/sqlodbc/execute.c deleted file mode 100644 index f2f2cd24e3..0000000000 --- a/sql-odbc/src/sqlodbc/execute.c +++ /dev/null @@ -1,134 +0,0 @@ -#include -#include - -#include "opensearch_odbc.h" -#include "misc.h" - -#ifndef WIN32 -#include -#endif /* WIN32 */ - -#include "bind.h" -#include "convert.h" -#include "environ.h" -#include "opensearch_types.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "opensearch_statement.h" -#include "qresult.h" -#include "statement.h" - -RETCODE SQL_API OPENSEARCHAPI_Prepare(HSTMT hstmt, const SQLCHAR *stmt_str, - SQLINTEGER stmt_sz) { - if (hstmt == NULL) - return SQL_ERROR; - - // We know cursor is not open at this point - StatementClass *stmt = (StatementClass *)hstmt; - - // PrepareStatement deallocates memory if necessary - RETCODE ret = PrepareStatement(stmt, stmt_str, stmt_sz); - if (ret != SQL_SUCCESS) - return ret; - - // Execute the statement - ret = ExecuteStatement(stmt, FALSE); - if (ret == SQL_SUCCESS) - stmt->prepared = PREPARED; - - return ret; -} - -RETCODE SQL_API OPENSEARCHAPI_Execute(HSTMT hstmt) { - if (hstmt == NULL) - return SQL_ERROR; - - // We know cursor is not open at this point - StatementClass *stmt = (StatementClass *)hstmt; - RETCODE ret = SQL_ERROR; - switch (stmt->prepared) { - case PREPARED: - ret = AssignResult(stmt); - stmt->prepared = EXECUTED; - break; - case EXECUTED: - ret = RePrepareStatement(stmt); - if (ret != SQL_SUCCESS) - break; - ret = ExecuteStatement(stmt, TRUE); - if (ret != SQL_SUCCESS) - break; - stmt->prepared = EXECUTED; - break; - case NOT_PREPARED: - ret = SQL_ERROR; - break; - default: - break; - } - return ret; -} - -RETCODE SQL_API OPENSEARCHAPI_ExecDirect(HSTMT hstmt, const SQLCHAR *stmt_str, - SQLINTEGER stmt_sz, BOOL commit) { - if (hstmt == NULL) - return SQL_ERROR; - - // We know cursor is not open at this point - StatementClass *stmt = (StatementClass *)hstmt; - RETCODE ret = PrepareStatement(stmt, stmt_str, stmt_sz); - if (ret != SQL_SUCCESS) - return ret; - - // Execute statement - ret = ExecuteStatement(hstmt, commit); - if (ret != SQL_SUCCESS) - return ret; - stmt->prepared = NOT_PREPARED; - return ret; -} - -/* - * Returns the SQL string as modified by the driver. - * Currently, just copy the input string without modification - * observing buffer limits and truncation. - */ -RETCODE SQL_API OPENSEARCHAPI_NativeSql(HDBC hdbc, const SQLCHAR *szSqlStrIn, - SQLINTEGER cbSqlStrIn, SQLCHAR *szSqlStr, - SQLINTEGER cbSqlStrMax, SQLINTEGER *pcbSqlStr) { - CSTR func = "OPENSEARCHAPI_NativeSql"; - size_t len = 0; - char *ptr; - ConnectionClass *conn = (ConnectionClass *)hdbc; - RETCODE result; - - MYLOG(OPENSEARCH_TRACE, "entering...cbSqlStrIn=" FORMAT_INTEGER "\n", cbSqlStrIn); - - ptr = (cbSqlStrIn == 0) ? "" : make_string(szSqlStrIn, cbSqlStrIn, NULL, 0); - if (!ptr) { - CC_set_error(conn, CONN_NO_MEMORY_ERROR, - "No memory available to store native sql string", func); - return SQL_ERROR; - } - - result = SQL_SUCCESS; - len = strlen(ptr); - - if (szSqlStr) { - strncpy_null((char *)szSqlStr, ptr, cbSqlStrMax); - - if (len >= (size_t)cbSqlStrMax) { - result = SQL_SUCCESS_WITH_INFO; - CC_set_error(conn, CONN_TRUNCATED, - "The buffer was too small for the NativeSQL.", func); - } - } - - if (pcbSqlStr) - *pcbSqlStr = (SQLINTEGER)len; - - if (cbSqlStrIn) - free(ptr); - - return result; -} diff --git a/sql-odbc/src/sqlodbc/info.c b/sql-odbc/src/sqlodbc/info.c deleted file mode 100644 index c40f783a55..0000000000 --- a/sql-odbc/src/sqlodbc/info.c +++ /dev/null @@ -1,1897 +0,0 @@ -#include -#include - -#include "opensearch_odbc.h" -#include "unicode_support.h" - -#ifndef WIN32 -#include -#endif - -#include "bind.h" -#include "catfunc.h" -#include "dlg_specific.h" -#include "environ.h" -#include "opensearch_types.h" -#include "misc.h" -#include "multibyte.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "opensearch_info.h" -#include "qresult.h" -#include "statement.h" -#include "tuple.h" - -/* Trigger related stuff for SQLForeign Keys */ -#define TRIGGER_SHIFT 3 -#define TRIGGER_MASK 0x03 -#define TRIGGER_DELETE 0x01 -#define TRIGGER_UPDATE 0x02 - -RETCODE SQL_API OPENSEARCHAPI_GetInfo(HDBC hdbc, SQLUSMALLINT fInfoType, - PTR rgbInfoValue, SQLSMALLINT cbInfoValueMax, - SQLSMALLINT *pcbInfoValue) { - CSTR func = "OPENSEARCHAPI_GetInfo"; - ConnectionClass *conn = (ConnectionClass *)hdbc; - const char *p = NULL; - char tmp[MAX_INFO_STRING]; - SQLULEN len = 0, value = 0; - RETCODE ret = SQL_ERROR; - char odbcver[16]; - - MYLOG(OPENSEARCH_TRACE, "entering...fInfoType=%d\n", fInfoType); - - if (!conn) { - CC_log_error(func, NULL_STRING, NULL); - return SQL_INVALID_HANDLE; - } - - switch (fInfoType) { - case SQL_ACCESSIBLE_PROCEDURES: /* ODBC 1.0 */ - p = "N"; - break; - - case SQL_ACCESSIBLE_TABLES: /* ODBC 1.0 */ - p = "N"; - break; - - case SQL_ACTIVE_CONNECTIONS: /* ODBC 1.0 */ - len = 2; - value = 0; - break; - - case SQL_ACTIVE_STATEMENTS: /* ODBC 1.0 */ - len = 2; - value = 0; - break; - - case SQL_ALTER_TABLE: /* ODBC 2.0 */ - len = 4; - value = SQL_AT_ADD_COLUMN | SQL_AT_DROP_COLUMN - | SQL_AT_ADD_COLUMN_SINGLE | SQL_AT_ADD_CONSTRAINT - | SQL_AT_ADD_TABLE_CONSTRAINT - | SQL_AT_CONSTRAINT_INITIALLY_DEFERRED - | SQL_AT_CONSTRAINT_INITIALLY_IMMEDIATE - | SQL_AT_CONSTRAINT_DEFERRABLE - | SQL_AT_DROP_TABLE_CONSTRAINT_RESTRICT - | SQL_AT_DROP_TABLE_CONSTRAINT_CASCADE - | SQL_AT_DROP_COLUMN_RESTRICT | SQL_AT_DROP_COLUMN_CASCADE; - break; - - case SQL_BOOKMARK_PERSISTENCE: /* ODBC 2.0 */ - /* very simple bookmark support */ - len = 4; - value = SQL_BP_SCROLL | SQL_BP_DELETE | SQL_BP_UPDATE - | SQL_BP_TRANSACTION; - break; - - case SQL_COLUMN_ALIAS: /* ODBC 2.0 */ - p = "Y"; - break; - - case SQL_CONCAT_NULL_BEHAVIOR: /* ODBC 1.0 */ - len = 2; - value = SQL_CB_NULL; - break; - - case SQL_CONVERT_GUID: - case SQL_CONVERT_SMALLINT: - case SQL_CONVERT_TINYINT: - case SQL_CONVERT_VARCHAR: - case SQL_CONVERT_DECIMAL: - case SQL_CONVERT_FLOAT: - case SQL_CONVERT_NUMERIC: - case SQL_CONVERT_DATE: - case SQL_CONVERT_TIME: - case SQL_CONVERT_BINARY: - case SQL_CONVERT_LONGVARBINARY: - case SQL_CONVERT_VARBINARY: /* ODBC 1.0 */ - case SQL_CONVERT_CHAR: - case SQL_CONVERT_LONGVARCHAR: -#ifdef UNICODE_SUPPORT - case SQL_CONVERT_WCHAR: - case SQL_CONVERT_WLONGVARCHAR: -#endif /* UNICODE_SUPPORT */ - len = sizeof(SQLUINTEGER); - value = 0; /* CONVERT is unavailable */ - break; - - case SQL_CONVERT_INTEGER: /* ODBC 1.0 */ - len = sizeof(SQLUINTEGER); - value = SQL_CVT_INTEGER | SQL_CVT_BIT | SQL_CVT_WVARCHAR | SQL_CVT_DOUBLE | SQL_CVT_BIGINT | SQL_CVT_REAL; - break; - - case SQL_CONVERT_BIT: /* ODBC 1.0 */ - len = sizeof(SQLUINTEGER); - value = SQL_CVT_BIT | SQL_CVT_INTEGER | SQL_CVT_WVARCHAR | SQL_CVT_DOUBLE | SQL_CVT_BIGINT | SQL_CVT_REAL; - break; - - case SQL_CONVERT_WVARCHAR: /* ODBC 1.0 */ - len = sizeof(SQLUINTEGER); - value = SQL_CVT_WVARCHAR | SQL_CVT_INTEGER | SQL_CVT_DOUBLE | SQL_CVT_BIGINT | SQL_CVT_REAL; - break; - - case SQL_CONVERT_DOUBLE: /* ODBC 1.0 */ - len = sizeof(SQLUINTEGER); - value = SQL_CVT_DOUBLE | SQL_CVT_INTEGER | SQL_CVT_BIT | SQL_CVT_WVARCHAR | SQL_CVT_BIGINT | SQL_CVT_REAL; - break; - - case SQL_CONVERT_BIGINT: /* ODBC 1.0 */ - len = sizeof(SQLUINTEGER); - value = SQL_CVT_BIGINT | SQL_CVT_INTEGER | SQL_CVT_BIT | SQL_CVT_WVARCHAR | SQL_CVT_DOUBLE | SQL_CVT_REAL; - break; - - case SQL_CONVERT_REAL: /* ODBC 1.0 */ - len = sizeof(SQLUINTEGER); - value = SQL_CVT_REAL | SQL_CVT_INTEGER | SQL_CVT_BIT | SQL_CVT_WVARCHAR | SQL_CVT_DOUBLE | SQL_CVT_BIGINT; - break; - - case SQL_CONVERT_TIMESTAMP: /* ODBC 1.0 */ - len = sizeof(SQLUINTEGER); - value = SQL_CVT_TIMESTAMP | SQL_CVT_WVARCHAR; - break; - - case SQL_CONVERT_FUNCTIONS: /* ODBC 1.0 */ - len = sizeof(SQLUINTEGER); - value = SQL_FN_CVT_CAST; - MYLOG(OPENSEARCH_DEBUG, "CONVERT_FUNCTIONS=" FORMAT_ULEN "\n", value); - break; - - case SQL_CORRELATION_NAME: /* ODBC 1.0 */ - - /* - * Saying no correlation name makes Query not work right. - * value = SQL_CN_NONE; - */ - len = 2; - value = SQL_CN_ANY; - break; - - case SQL_CURSOR_COMMIT_BEHAVIOR: /* ODBC 1.0 */ - len = 2; - value = SQL_CB_CLOSE; - break; - - case SQL_CURSOR_ROLLBACK_BEHAVIOR: /* ODBC 1.0 */ - len = 2; - value = SQL_CB_PRESERVE; - break; - - case SQL_DATA_SOURCE_NAME: /* ODBC 1.0 */ - p = CC_get_DSN(conn); - break; - - case SQL_DATA_SOURCE_READ_ONLY: /* ODBC 1.0 */ - p = "Y"; - break; - - case SQL_DATABASE_NAME: /* Support for old ODBC 1.0 Apps */ - - /* - * Returning the database name causes problems in MS Query. It - * generates query like: "SELECT DISTINCT a FROM byronnbad3 - * bad3" - * - * p = CC_get_database(conn); - */ - p = CurrCatString(conn); - break; - - case SQL_DBMS_NAME: /* ODBC 1.0 */ - p = "OpenSearch"; - break; - - case SQL_DBMS_VER: /* ODBC 1.0 */ - STRCPY_FIXED(tmp, conn->opensearch_version); - p = tmp; - break; - - case SQL_DEFAULT_TXN_ISOLATION: /* ODBC 1.0 */ - len = 4; - if (0 == conn->default_isolation) - conn->isolation = CC_get_isolation(conn); - value = conn->default_isolation; - break; - - case SQL_DRIVER_NAME: /* ODBC 1.0 */ - p = DRIVER_FILE_NAME; - break; - - case SQL_DRIVER_ODBC_VER: - SPRINTF_FIXED(odbcver, "%02x.%02x", ODBCVER / 256, ODBCVER % 256); - /* p = DRIVER_ODBC_VER; */ - p = odbcver; - break; - - case SQL_DRIVER_VER: /* ODBC 1.0 */ - p = OPENSEARCHDRIVERVERSION; - break; - - case SQL_EXPRESSIONS_IN_ORDERBY: /* ODBC 1.0 */ - p = "Y"; - break; - - case SQL_FETCH_DIRECTION: /* ODBC 1.0 */ - len = 4; - value = (SQL_FD_FETCH_NEXT | SQL_FD_FETCH_FIRST | SQL_FD_FETCH_LAST - | SQL_FD_FETCH_PRIOR | SQL_FD_FETCH_ABSOLUTE - | SQL_FD_FETCH_RELATIVE | SQL_FD_FETCH_BOOKMARK); - break; - - case SQL_FILE_USAGE: /* ODBC 2.0 */ - len = 2; - value = SQL_FILE_NOT_SUPPORTED; - break; - - case SQL_GETDATA_EXTENSIONS: /* ODBC 2.0 */ - len = 4; - value = (SQL_GD_ANY_COLUMN | SQL_GD_ANY_ORDER | SQL_GD_BOUND - | SQL_GD_BLOCK); - break; - - case SQL_GROUP_BY: /* ODBC 2.0 */ - len = 2; - value = SQL_GB_GROUP_BY_CONTAINS_SELECT; - break; - - case SQL_IDENTIFIER_CASE: /* ODBC 1.0 */ - - /* - * are identifiers case-sensitive (yes, but only when quoted. - * If not quoted, they default to lowercase) - */ - len = 2; - value = SQL_IC_LOWER; - break; - - case SQL_IDENTIFIER_QUOTE_CHAR: /* ODBC 1.0 */ - /* the character used to quote "identifiers" */ - p = "`"; - break; - - case SQL_KEYWORDS: /* ODBC 2.0 */ - p = NULL_STRING; - break; - - case SQL_LIKE_ESCAPE_CLAUSE: /* ODBC 2.0 */ - p = "Y"; - break; - - case SQL_LOCK_TYPES: /* ODBC 2.0 */ - len = 4; - value = SQL_LCK_NO_CHANGE; - break; - - case SQL_MAX_BINARY_LITERAL_LEN: /* ODBC 2.0 */ - len = 4; - value = 0; - break; - - case SQL_MAX_CHAR_LITERAL_LEN: /* ODBC 2.0 */ - len = 4; - value = 0; - break; - - case SQL_MAX_COLUMN_NAME_LEN: /* ODBC 1.0 */ - len = 2; - value = CC_get_max_idlen(conn); - if (0 == value) - value = NAMEDATALEN_V73 - 1; - break; - - case SQL_MAX_COLUMNS_IN_GROUP_BY: /* ODBC 2.0 */ - len = 2; - value = 0; - break; - - case SQL_MAX_COLUMNS_IN_INDEX: /* ODBC 2.0 */ - len = 2; - value = 0; - break; - - case SQL_MAX_COLUMNS_IN_ORDER_BY: /* ODBC 2.0 */ - len = 2; - value = 0; - break; - - case SQL_MAX_COLUMNS_IN_SELECT: /* ODBC 2.0 */ - len = 2; - value = 0; - break; - - case SQL_MAX_COLUMNS_IN_TABLE: /* ODBC 2.0 */ - len = 2; - value = 0; - break; - - case SQL_MAX_CURSOR_NAME_LEN: /* ODBC 1.0 */ - len = 2; - value = MAX_CURSOR_LEN; - break; - - case SQL_MAX_INDEX_SIZE: /* ODBC 2.0 */ - len = 4; - value = 0; - break; - - case SQL_MAX_OWNER_NAME_LEN: /* ODBC 1.0 */ - len = 2; - value = 0; - break; - - case SQL_MAX_PROCEDURE_NAME_LEN: /* ODBC 1.0 */ - len = 2; - value = 0; - break; - - case SQL_MAX_QUALIFIER_NAME_LEN: /* ODBC 1.0 */ - len = 2; - value = 0; - break; - - case SQL_MAX_ROW_SIZE: /* ODBC 2.0 */ - len = 4; - /* No limit with tuptoaster in 7.1+ */ - value = 0; - break; - - case SQL_MAX_STATEMENT_LEN: /* ODBC 2.0 */ - len = 4; - value = 0; - break; - - case SQL_MAX_TABLE_NAME_LEN: /* ODBC 1.0 */ - len = 2; - if (OPENSEARCH_VERSION_GT(conn, 7.4)) - value = CC_get_max_idlen(conn); -#ifdef MAX_TABLE_LEN - else - value = MAX_TABLE_LEN; -#endif /* MAX_TABLE_LEN */ - if (0 == value) - value = NAMEDATALEN_V73 - 1; - break; - - case SQL_MAX_TABLES_IN_SELECT: /* ODBC 2.0 */ - len = 2; - value = 0; - break; - - case SQL_MAX_USER_NAME_LEN: - len = 2; - value = 0; - break; - - case SQL_MULT_RESULT_SETS: /* ODBC 1.0 */ - /* Don't support multiple result sets but say yes anyway? */ - p = "Y"; - break; - - case SQL_MULTIPLE_ACTIVE_TXN: /* ODBC 1.0 */ - p = "Y"; - break; - - case SQL_NEED_LONG_DATA_LEN: /* ODBC 2.0 */ - - /* - * Don't need the length, SQLPutData can handle any size and - * multiple calls - */ - p = "N"; - break; - - case SQL_NON_NULLABLE_COLUMNS: /* ODBC 1.0 */ - len = 2; - value = SQL_NNC_NON_NULL; - break; - - case SQL_NULL_COLLATION: /* ODBC 2.0 */ - /* where are nulls sorted? */ - len = 2; - value = SQL_NC_HIGH; - break; - - case SQL_NUMERIC_FUNCTIONS: /* ODBC 1.0 */ - len = 4; - value = SQL_FN_NUM_ABS | SQL_FN_NUM_ATAN | SQL_FN_NUM_ATAN2 - | SQL_FN_NUM_COS | SQL_FN_NUM_COT | SQL_FN_NUM_DEGREES - | SQL_FN_NUM_FLOOR | SQL_FN_NUM_LOG | SQL_FN_NUM_LOG10 - | SQL_FN_NUM_PI | SQL_FN_NUM_POWER | SQL_FN_NUM_RADIANS - | SQL_FN_NUM_ROUND | SQL_FN_NUM_SIGN | SQL_FN_NUM_SIN - | SQL_FN_NUM_SQRT | SQL_FN_NUM_TAN; - break; - - case SQL_ODBC_API_CONFORMANCE: /* ODBC 1.0 */ - len = 2; - value = SQL_OAC_LEVEL1; - break; - - case SQL_ODBC_SAG_CLI_CONFORMANCE: /* ODBC 1.0 */ - len = 2; - value = SQL_OSCC_NOT_COMPLIANT; - break; - - case SQL_ODBC_SQL_CONFORMANCE: /* ODBC 1.0 */ - len = 2; - value = SQL_OSC_CORE; - break; - - case SQL_ODBC_SQL_OPT_IEF: /* ODBC 1.0 */ - p = "N"; - break; - - case SQL_OJ_CAPABILITIES: /* ODBC 2.01 */ - len = 4; - value = SQL_OJ_LEFT | SQL_OJ_RIGHT | SQL_OJ_NOT_ORDERED - | SQL_OJ_ALL_COMPARISON_OPS; - break; - - case SQL_ORDER_BY_COLUMNS_IN_SELECT: /* ODBC 2.0 */ - p = "Y"; - break; - - case SQL_OUTER_JOINS: /* ODBC 1.0 */ - p = "Y"; - break; - - case SQL_OWNER_TERM: /* ODBC 1.0 */ - p = ""; - break; - - case SQL_OWNER_USAGE: /* ODBC 2.0 */ - // OpenSearch does not support schemas. - // This will disable showing an empty schema box in Tableau. - len = 4; - value = 0; - break; - - case SQL_POS_OPERATIONS: /* ODBC 2.0 */ - len = 4; - value = (SQL_POS_POSITION | SQL_POS_REFRESH); - break; - - case SQL_POSITIONED_STATEMENTS: /* ODBC 2.0 */ - len = 4; - value = 0; - break; - - case SQL_PROCEDURE_TERM: /* ODBC 1.0 */ - p = "procedure"; - break; - - case SQL_PROCEDURES: /* ODBC 1.0 */ - p = "Y"; - break; - - case SQL_QUALIFIER_LOCATION: /* ODBC 2.0 */ - len = 2; - value = 0; - break; - - case SQL_QUALIFIER_NAME_SEPARATOR: /* ODBC 1.0 */ - p = ""; - break; - - case SQL_QUALIFIER_TERM: /* ODBC 1.0 */ - p = ""; - break; - - case SQL_QUALIFIER_USAGE: /* ODBC 2.0 */ - len = 4; - value = 0; - break; - - case SQL_QUOTED_IDENTIFIER_CASE: /* ODBC 2.0 */ - /* are "quoted" identifiers case-sensitive? YES! */ - len = 2; - value = SQL_IC_SENSITIVE; - break; - - case SQL_ROW_UPDATES: /* ODBC 1.0 */ - - /* - * Driver doesn't support keyset-driven or mixed cursors, so - * not much point in saying row updates are supported - */ - p = "N"; - break; - - case SQL_SCROLL_CONCURRENCY: /* ODBC 1.0 */ - len = 4; - value = SQL_SCCO_READ_ONLY; - break; - - case SQL_SCROLL_OPTIONS: /* ODBC 1.0 */ - len = 4; - value = SQL_SO_FORWARD_ONLY | SQL_SO_STATIC; - break; - - case SQL_SEARCH_PATTERN_ESCAPE: /* ODBC 1.0 */ - p = ""; - break; - - case SQL_SERVER_NAME: /* ODBC 1.0 */ - p = CC_get_server(conn); - break; - - case SQL_SPECIAL_CHARACTERS: /* ODBC 2.0 */ - p = "_"; - break; - - case SQL_STATIC_SENSITIVITY: /* ODBC 2.0 */ - len = 4; - value = 0; - break; - - case SQL_STRING_FUNCTIONS: /* ODBC 1.0 */ - len = 4; - value = SQL_FN_STR_ASCII | SQL_FN_STR_LENGTH | SQL_FN_STR_LTRIM - | SQL_FN_STR_REPLACE | SQL_FN_STR_RTRIM - | SQL_FN_STR_SUBSTRING; - break; - - case SQL_SUBQUERIES: /* ODBC 2.0 */ - len = 4; - value = (SQL_SQ_QUANTIFIED | SQL_SQ_IN | SQL_SQ_EXISTS - | SQL_SQ_COMPARISON); - break; - - case SQL_SYSTEM_FUNCTIONS: /* ODBC 1.0 */ - len = 4; - value = SQL_FN_SYS_IFNULL; - break; - - case SQL_TABLE_TERM: /* ODBC 1.0 */ - p = "table"; - break; - - case SQL_TIMEDATE_ADD_INTERVALS: /* ODBC 2.0 */ - len = 4; - value = 0; - break; - - case SQL_TIMEDATE_DIFF_INTERVALS: /* ODBC 2.0 */ - len = 4; - value = 0; - break; - - case SQL_TIMEDATE_FUNCTIONS: /* ODBC 1.0 */ - len = 4; - value = SQL_FN_TD_CURDATE | SQL_FN_TD_DAYOFMONTH | SQL_FN_TD_MONTH - | SQL_FN_TD_MONTHNAME | SQL_FN_TD_NOW | SQL_FN_TD_YEAR; - break; - - case SQL_TXN_CAPABLE: /* ODBC 1.0 */ - /* - * OpenSearch does not support transactions. - */ - len = 2; - value = SQL_TC_NONE; - break; - - case SQL_TXN_ISOLATION_OPTION: /* ODBC 1.0 */ - len = 4; - value = SQL_TXN_READ_UNCOMMITTED | SQL_TXN_READ_COMMITTED - | SQL_TXN_REPEATABLE_READ | SQL_TXN_SERIALIZABLE; - break; - - case SQL_UNION: /* ODBC 2.0 */ - len = 4; - value = (SQL_U_UNION | SQL_U_UNION_ALL); - break; - - case SQL_USER_NAME: /* ODBC 1.0 */ - p = CC_get_username(conn); - break; - - /* Keys for ODBC 3.0 */ - case SQL_DYNAMIC_CURSOR_ATTRIBUTES1: - len = 4; - value = 0; - break; - case SQL_DYNAMIC_CURSOR_ATTRIBUTES2: - len = 4; - value = 0; - break; - case SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1: - len = 4; - value = SQL_CA1_NEXT; /* others aren't allowed in ODBC spec */ - break; - case SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2: - len = 4; - value = SQL_CA2_READ_ONLY_CONCURRENCY | SQL_CA2_CRC_EXACT; - break; - case SQL_KEYSET_CURSOR_ATTRIBUTES1: - len = 4; - value = 0; - break; - case SQL_KEYSET_CURSOR_ATTRIBUTES2: - len = 4; - value = 0; - break; - - case SQL_STATIC_CURSOR_ATTRIBUTES1: - len = 4; - value = SQL_CA1_NEXT | SQL_CA1_ABSOLUTE | SQL_CA1_RELATIVE - | SQL_CA1_BOOKMARK | SQL_CA1_LOCK_NO_CHANGE - | SQL_CA1_POS_POSITION | SQL_CA1_POS_REFRESH; - break; - case SQL_STATIC_CURSOR_ATTRIBUTES2: - len = 4; - value = SQL_CA2_READ_ONLY_CONCURRENCY | SQL_CA2_CRC_EXACT; - break; - - case SQL_ODBC_INTERFACE_CONFORMANCE: - len = 4; - value = SQL_OIC_CORE; - break; - case SQL_ACTIVE_ENVIRONMENTS: - len = 2; - value = 0; - break; - case SQL_AGGREGATE_FUNCTIONS: - len = 4; - value = SQL_AF_ALL; - break; - case SQL_ALTER_DOMAIN: - len = 4; - value = 0; - break; - case SQL_ASYNC_MODE: - len = 4; - value = SQL_AM_NONE; - break; - case SQL_BATCH_ROW_COUNT: - len = 4; - value = SQL_BRC_EXPLICIT; - break; - case SQL_BATCH_SUPPORT: - len = 4; - value = SQL_BS_SELECT_EXPLICIT | SQL_BS_ROW_COUNT_EXPLICIT; - break; - case SQL_CATALOG_NAME: - p = "N"; - break; - case SQL_COLLATION_SEQ: - p = ""; - break; - case SQL_CREATE_ASSERTION: - len = 4; - value = 0; - break; - case SQL_CREATE_CHARACTER_SET: - len = 4; - value = 0; - break; - case SQL_CREATE_COLLATION: - len = 4; - value = 0; - break; - case SQL_CREATE_DOMAIN: - len = 4; - value = 0; - break; - case SQL_CREATE_SCHEMA: - len = 4; - value = SQL_CS_CREATE_SCHEMA | SQL_CS_AUTHORIZATION; - break; - case SQL_CREATE_TABLE: - len = 4; - value = SQL_CT_CREATE_TABLE | SQL_CT_COLUMN_CONSTRAINT - | SQL_CT_COLUMN_DEFAULT | SQL_CT_GLOBAL_TEMPORARY - | SQL_CT_TABLE_CONSTRAINT - | SQL_CT_CONSTRAINT_NAME_DEFINITION - | SQL_CT_CONSTRAINT_INITIALLY_DEFERRED - | SQL_CT_CONSTRAINT_INITIALLY_IMMEDIATE - | SQL_CT_CONSTRAINT_DEFERRABLE; - break; - case SQL_CREATE_TRANSLATION: - len = 4; - value = 0; - break; - case SQL_CREATE_VIEW: - len = 4; - value = SQL_CV_CREATE_VIEW; - break; - case SQL_DDL_INDEX: - len = 4; - value = SQL_DI_CREATE_INDEX | SQL_DI_DROP_INDEX; - break; - case SQL_DESCRIBE_PARAMETER: - p = "N"; - break; - case SQL_DROP_ASSERTION: - len = 4; - value = 0; - break; - case SQL_DROP_CHARACTER_SET: - len = 4; - value = 0; - break; - case SQL_DROP_COLLATION: - len = 4; - value = 0; - break; - case SQL_DROP_DOMAIN: - len = 4; - value = 0; - break; - case SQL_DROP_SCHEMA: - len = 4; - value = SQL_DS_DROP_SCHEMA | SQL_DS_RESTRICT | SQL_DS_CASCADE; - break; - case SQL_DROP_TABLE: - len = 4; - value = SQL_DT_DROP_TABLE; - value |= (SQL_DT_RESTRICT | SQL_DT_CASCADE); - break; - case SQL_DROP_TRANSLATION: - len = 4; - value = 0; - break; - case SQL_DROP_VIEW: - len = 4; - value = SQL_DV_DROP_VIEW; - value |= (SQL_DV_RESTRICT | SQL_DV_CASCADE); - break; - case SQL_INDEX_KEYWORDS: - len = 4; - value = SQL_IK_NONE; - break; - case SQL_INFO_SCHEMA_VIEWS: - len = 4; - value = 0; - break; - case SQL_INSERT_STATEMENT: - len = 4; - value = SQL_IS_INSERT_LITERALS | SQL_IS_INSERT_SEARCHED - | SQL_IS_SELECT_INTO; - break; - case SQL_MAX_IDENTIFIER_LEN: - len = 2; - value = CC_get_max_idlen(conn); - if (0 == value) - value = NAMEDATALEN_V73 - 1; - break; - case SQL_MAX_ROW_SIZE_INCLUDES_LONG: - p = "Y"; - break; - case SQL_PARAM_ARRAY_ROW_COUNTS: - len = 4; - value = SQL_PARC_BATCH; - break; - case SQL_PARAM_ARRAY_SELECTS: - len = 4; - value = SQL_PAS_BATCH; - break; - case SQL_SQL_CONFORMANCE: - // SQL plugin currently does not support this level, - // but Tableau requires at least Entry level reported for retrieving - // row data - len = 4; - value = SQL_SC_SQL92_ENTRY; - break; - case SQL_SQL92_DATETIME_FUNCTIONS: - len = 4; - value = 0; - break; - case SQL_SQL92_FOREIGN_KEY_DELETE_RULE: - len = 4; - value = SQL_SFKD_CASCADE | SQL_SFKD_NO_ACTION | SQL_SFKD_SET_DEFAULT - | SQL_SFKD_SET_NULL; - break; - case SQL_SQL92_FOREIGN_KEY_UPDATE_RULE: - len = 4; - value = SQL_SFKU_CASCADE | SQL_SFKU_NO_ACTION | SQL_SFKU_SET_DEFAULT - | SQL_SFKU_SET_NULL; - break; - case SQL_SQL92_GRANT: - len = 4; - value = SQL_SG_DELETE_TABLE | SQL_SG_INSERT_TABLE - | SQL_SG_REFERENCES_TABLE | SQL_SG_SELECT_TABLE - | SQL_SG_UPDATE_TABLE; - break; - case SQL_SQL92_NUMERIC_VALUE_FUNCTIONS: - len = 4; - value = 0; - break; - case SQL_SQL92_PREDICATES: - len = 4; - value = SQL_SP_BETWEEN | SQL_SP_COMPARISON | SQL_SP_IN - | SQL_SP_ISNULL | SQL_SP_LIKE; - break; - case SQL_SQL92_RELATIONAL_JOIN_OPERATORS: - len = 4; - value = SQL_SRJO_CROSS_JOIN | SQL_SRJO_INNER_JOIN - | SQL_SRJO_LEFT_OUTER_JOIN | SQL_SRJO_RIGHT_OUTER_JOIN; - break; - case SQL_SQL92_REVOKE: - len = 4; - value = SQL_SR_DELETE_TABLE | SQL_SR_INSERT_TABLE - | SQL_SR_REFERENCES_TABLE | SQL_SR_SELECT_TABLE - | SQL_SR_UPDATE_TABLE; - break; - case SQL_SQL92_ROW_VALUE_CONSTRUCTOR: - len = 4; - value = SQL_SRVC_VALUE_EXPRESSION | SQL_SRVC_NULL; - break; - case SQL_SQL92_STRING_FUNCTIONS: - len = 4; - value = SQL_SSF_LOWER | SQL_SSF_UPPER; - break; - case SQL_SQL92_VALUE_EXPRESSIONS: - len = 4; - value = SQL_SVE_CASE | SQL_SVE_CAST; - break; -#ifdef SQL_DTC_TRANSACTION_COST - case SQL_DTC_TRANSACTION_COST: -#else - case 1750: -#endif - len = 4; - break; - case SQL_DATETIME_LITERALS: - case SQL_DRIVER_HDESC: - case SQL_MAX_ASYNC_CONCURRENT_STATEMENTS: - case SQL_STANDARD_CLI_CONFORMANCE: - case SQL_CONVERT_INTERVAL_DAY_TIME: - len = 4; - value = 0; - break; - case SQL_DM_VER: - case SQL_XOPEN_CLI_YEAR: - len = 0; - value = 0; - break; - - default: - /* unrecognized key */ - CC_set_error(conn, CONN_NOT_IMPLEMENTED_ERROR, - "Unrecognized key passed to OPENSEARCHAPI_GetInfo.", NULL); - goto cleanup; - } - - ret = SQL_SUCCESS; - - MYLOG(OPENSEARCH_DEBUG, "p='%s', len=" FORMAT_ULEN ", value=" FORMAT_ULEN ", cbMax=%d\n", - p ? p : "", len, value, cbInfoValueMax); - - /* - * NOTE, that if rgbInfoValue is NULL, then no warnings or errors - * should result and just pcbInfoValue is returned, which indicates - * what length would be required if a real buffer had been passed in. - */ - if (p) { - /* char/binary data */ - len = strlen(p); - - if (rgbInfoValue) { -#ifdef UNICODE_SUPPORT - if (CC_is_in_unicode_driver(conn)) { - len = utf8_to_ucs2(p, len, (SQLWCHAR *)rgbInfoValue, - cbInfoValueMax / WCLEN); - len *= WCLEN; - } else -#endif /* UNICODE_SUPPORT */ - strncpy_null((char *)rgbInfoValue, p, (size_t)cbInfoValueMax); - - if (len >= (SQLULEN)cbInfoValueMax) { - ret = SQL_SUCCESS_WITH_INFO; - CC_set_error(conn, CONN_TRUNCATED, - "The buffer was too small for the InfoValue.", - func); - } - } -#ifdef UNICODE_SUPPORT - else if (CC_is_in_unicode_driver(conn)) - len *= WCLEN; -#endif /* UNICODE_SUPPORT */ - } else { - /* numeric data */ - if (rgbInfoValue) { - if (len == sizeof(SQLSMALLINT)) - *((SQLUSMALLINT *)rgbInfoValue) = (SQLUSMALLINT)value; - else if (len == sizeof(SQLINTEGER)) - *((SQLUINTEGER *)rgbInfoValue) = (SQLUINTEGER)value; - } - } - - if (pcbInfoValue) - *pcbInfoValue = (SQLSMALLINT)len; -cleanup: - - return ret; -} - -/* - * macros for opensearchtype_xxxx() calls which have OPENSEARCH_ATP_UNSET parameters - */ -#define OPENSEARCHTYPE_COLUMN_SIZE(conn, openSearchType) \ - opensearchtype_attr_column_size(conn, openSearchType, OPENSEARCH_ATP_UNSET, \ - OPENSEARCH_ADT_UNSET, OPENSEARCH_UNKNOWNS_UNSET) -#define OPENSEARCHTYPE_TO_CONCISE_TYPE(conn, openSearchType) \ - opensearchtype_attr_to_concise_type(conn, openSearchType, OPENSEARCH_ATP_UNSET, \ - OPENSEARCH_ADT_UNSET, \ - OPENSEARCH_UNKNOWNS_UNSET) -#define OPENSEARCHTYPE_TO_SQLDESCTYPE(conn, openSearchType) \ - opensearchtype_attr_to_sqldesctype(conn, openSearchType, OPENSEARCH_ATP_UNSET, \ - OPENSEARCH_ADT_UNSET, \ - OPENSEARCH_UNKNOWNS_UNSET) -#define OPENSEARCHTYPE_BUFFER_LENGTH(conn, openSearchType) \ - opensearchtype_attr_buffer_length(conn, openSearchType, OPENSEARCH_ATP_UNSET, \ - OPENSEARCH_ADT_UNSET, OPENSEARCH_UNKNOWNS_UNSET) -#define OPENSEARCHTYPE_DECIMAL_DIGITS(conn, openSearchType) \ - opensearchtype_attr_decimal_digits(conn, openSearchType, OPENSEARCH_ATP_UNSET, \ - OPENSEARCH_ADT_UNSET, \ - OPENSEARCH_UNKNOWNS_UNSET) -#define OPENSEARCHTYPE_TRANSFER_OCTET_LENGTH(conn, openSearchType) \ - opensearchtype_attr_transfer_octet_length(conn, openSearchType, OPENSEARCH_ATP_UNSET, \ - OPENSEARCH_UNKNOWNS_UNSET) -#define OPENSEARCHTYPE_TO_NAME(conn, openSearchType, auto_increment) \ - opensearchtype_attr_to_name(conn, openSearchType, OPENSEARCH_ATP_UNSET, auto_increment) - -RETCODE SQL_API OPENSEARCHAPI_GetFunctions(HDBC hdbc, SQLUSMALLINT fFunction, - SQLUSMALLINT *pfExists) { - UNUSED(hdbc); - MYLOG(OPENSEARCH_TRACE, "entering...%u\n", fFunction); - - if (fFunction == SQL_API_ALL_FUNCTIONS) { - memset(pfExists, 0, sizeof(pfExists[0]) * 100); - - /* ODBC core functions */ - pfExists[SQL_API_SQLALLOCCONNECT] = TRUE; - pfExists[SQL_API_SQLALLOCENV] = TRUE; - pfExists[SQL_API_SQLALLOCSTMT] = TRUE; - pfExists[SQL_API_SQLBINDCOL] = TRUE; - pfExists[SQL_API_SQLCANCEL] = TRUE; - pfExists[SQL_API_SQLCOLATTRIBUTES] = TRUE; - pfExists[SQL_API_SQLCONNECT] = TRUE; - pfExists[SQL_API_SQLDESCRIBECOL] = TRUE; /* partial */ - pfExists[SQL_API_SQLDISCONNECT] = TRUE; - pfExists[SQL_API_SQLERROR] = TRUE; - pfExists[SQL_API_SQLEXECDIRECT] = TRUE; - pfExists[SQL_API_SQLEXECUTE] = TRUE; - pfExists[SQL_API_SQLFETCH] = TRUE; - pfExists[SQL_API_SQLFREECONNECT] = TRUE; - pfExists[SQL_API_SQLFREEENV] = TRUE; - pfExists[SQL_API_SQLFREESTMT] = TRUE; - pfExists[SQL_API_SQLGETCURSORNAME] = TRUE; - pfExists[SQL_API_SQLNUMRESULTCOLS] = TRUE; - pfExists[SQL_API_SQLPREPARE] = TRUE; /* complete? */ - pfExists[SQL_API_SQLROWCOUNT] = TRUE; - pfExists[SQL_API_SQLSETCURSORNAME] = TRUE; - pfExists[SQL_API_SQLSETPARAM] = FALSE; /* odbc 1.0 */ - pfExists[SQL_API_SQLTRANSACT] = TRUE; - - /* ODBC level 1 functions */ - pfExists[SQL_API_SQLBINDPARAMETER] = TRUE; - pfExists[SQL_API_SQLCOLUMNS] = TRUE; - pfExists[SQL_API_SQLDRIVERCONNECT] = TRUE; - pfExists[SQL_API_SQLGETCONNECTOPTION] = TRUE; /* partial */ - pfExists[SQL_API_SQLGETDATA] = TRUE; - pfExists[SQL_API_SQLGETFUNCTIONS] = TRUE; - pfExists[SQL_API_SQLGETINFO] = TRUE; - pfExists[SQL_API_SQLGETSTMTOPTION] = TRUE; /* partial */ - pfExists[SQL_API_SQLGETTYPEINFO] = TRUE; - pfExists[SQL_API_SQLPARAMDATA] = TRUE; - pfExists[SQL_API_SQLPUTDATA] = TRUE; - pfExists[SQL_API_SQLSETCONNECTOPTION] = TRUE; /* partial */ - pfExists[SQL_API_SQLSETSTMTOPTION] = TRUE; - pfExists[SQL_API_SQLSPECIALCOLUMNS] = TRUE; - pfExists[SQL_API_SQLSTATISTICS] = TRUE; - pfExists[SQL_API_SQLTABLES] = TRUE; - - /* ODBC level 2 functions */ - pfExists[SQL_API_SQLBROWSECONNECT] = FALSE; - pfExists[SQL_API_SQLCOLUMNPRIVILEGES] = FALSE; - pfExists[SQL_API_SQLDATASOURCES] = FALSE; /* only implemented by - * DM */ - if (SUPPORT_DESCRIBE_PARAM(ci)) - pfExists[SQL_API_SQLDESCRIBEPARAM] = TRUE; - else - pfExists[SQL_API_SQLDESCRIBEPARAM] = FALSE; /* not properly - * implemented */ - pfExists[SQL_API_SQLDRIVERS] = FALSE; /* only implemented by - * DM */ - pfExists[SQL_API_SQLEXTENDEDFETCH] = TRUE; - pfExists[SQL_API_SQLFOREIGNKEYS] = TRUE; - pfExists[SQL_API_SQLMORERESULTS] = TRUE; - pfExists[SQL_API_SQLNATIVESQL] = TRUE; - pfExists[SQL_API_SQLNUMPARAMS] = TRUE; - pfExists[SQL_API_SQLPARAMOPTIONS] = TRUE; - pfExists[SQL_API_SQLPRIMARYKEYS] = TRUE; - pfExists[SQL_API_SQLPROCEDURECOLUMNS] = TRUE; - pfExists[SQL_API_SQLPROCEDURES] = TRUE; - pfExists[SQL_API_SQLSETPOS] = TRUE; - pfExists[SQL_API_SQLSETSCROLLOPTIONS] = TRUE; /* odbc 1.0 */ - pfExists[SQL_API_SQLTABLEPRIVILEGES] = TRUE; - pfExists[SQL_API_SQLBULKOPERATIONS] = FALSE; - } else { - switch (fFunction) { - case SQL_API_SQLBINDCOL: - *pfExists = TRUE; - break; - case SQL_API_SQLCANCEL: - *pfExists = TRUE; - break; - case SQL_API_SQLCOLATTRIBUTE: - *pfExists = TRUE; - break; - case SQL_API_SQLCONNECT: - *pfExists = TRUE; - break; - case SQL_API_SQLDESCRIBECOL: - *pfExists = TRUE; - break; /* partial */ - case SQL_API_SQLDISCONNECT: - *pfExists = TRUE; - break; - case SQL_API_SQLEXECDIRECT: - *pfExists = TRUE; - break; - case SQL_API_SQLEXECUTE: - *pfExists = TRUE; - break; - case SQL_API_SQLFETCH: - *pfExists = TRUE; - break; - case SQL_API_SQLFREESTMT: - *pfExists = TRUE; - break; - case SQL_API_SQLGETCURSORNAME: - *pfExists = TRUE; - break; - case SQL_API_SQLNUMRESULTCOLS: - *pfExists = TRUE; - break; - case SQL_API_SQLPREPARE: - *pfExists = TRUE; - break; - case SQL_API_SQLROWCOUNT: - *pfExists = TRUE; - break; - case SQL_API_SQLSETCURSORNAME: - *pfExists = TRUE; - break; - - /* ODBC level 1 functions */ - case SQL_API_SQLBINDPARAMETER: - *pfExists = TRUE; - break; - case SQL_API_SQLCOLUMNS: - *pfExists = TRUE; - break; - case SQL_API_SQLDRIVERCONNECT: - *pfExists = TRUE; - break; - case SQL_API_SQLGETDATA: - *pfExists = TRUE; - break; - case SQL_API_SQLGETFUNCTIONS: - *pfExists = TRUE; - break; - case SQL_API_SQLGETINFO: - *pfExists = TRUE; - break; - case SQL_API_SQLGETTYPEINFO: - *pfExists = TRUE; - break; - case SQL_API_SQLPARAMDATA: - *pfExists = TRUE; - break; - case SQL_API_SQLPUTDATA: - *pfExists = TRUE; - break; - case SQL_API_SQLSPECIALCOLUMNS: - *pfExists = TRUE; - break; - case SQL_API_SQLSTATISTICS: - *pfExists = TRUE; - break; - case SQL_API_SQLTABLES: - *pfExists = TRUE; - break; - - /* ODBC level 2 functions */ - case SQL_API_SQLBROWSECONNECT: - *pfExists = FALSE; - break; - case SQL_API_SQLCOLUMNPRIVILEGES: - *pfExists = FALSE; - break; - case SQL_API_SQLDATASOURCES: - *pfExists = FALSE; - break; /* only implemented by DM */ - case SQL_API_SQLDESCRIBEPARAM: - if (SUPPORT_DESCRIBE_PARAM(ci)) - *pfExists = TRUE; - else - *pfExists = FALSE; - break; /* not properly implemented */ - case SQL_API_SQLDRIVERS: - *pfExists = FALSE; - break; /* only implemented by DM */ - case SQL_API_SQLEXTENDEDFETCH: - *pfExists = TRUE; - break; - case SQL_API_SQLFOREIGNKEYS: - *pfExists = TRUE; - break; - case SQL_API_SQLMORERESULTS: - *pfExists = TRUE; - break; - case SQL_API_SQLNATIVESQL: - *pfExists = TRUE; - break; - case SQL_API_SQLNUMPARAMS: - *pfExists = TRUE; - break; - case SQL_API_SQLPRIMARYKEYS: - *pfExists = TRUE; - break; - case SQL_API_SQLPROCEDURECOLUMNS: - *pfExists = TRUE; - break; - case SQL_API_SQLPROCEDURES: - *pfExists = TRUE; - break; - case SQL_API_SQLSETPOS: - *pfExists = TRUE; - break; - case SQL_API_SQLTABLEPRIVILEGES: - *pfExists = TRUE; - break; - case SQL_API_SQLBULKOPERATIONS: /* 24 */ - case SQL_API_SQLALLOCHANDLE: /* 1001 */ - case SQL_API_SQLBINDPARAM: /* 1002 */ - case SQL_API_SQLCLOSECURSOR: /* 1003 */ - case SQL_API_SQLENDTRAN: /* 1005 */ - case SQL_API_SQLFETCHSCROLL: /* 1021 */ - case SQL_API_SQLFREEHANDLE: /* 1006 */ - case SQL_API_SQLGETCONNECTATTR: /* 1007 */ - case SQL_API_SQLGETDESCFIELD: /* 1008 */ - case SQL_API_SQLGETDIAGFIELD: /* 1010 */ - case SQL_API_SQLGETDIAGREC: /* 1011 */ - case SQL_API_SQLGETENVATTR: /* 1012 */ - case SQL_API_SQLGETSTMTATTR: /* 1014 */ - case SQL_API_SQLSETCONNECTATTR: /* 1016 */ - case SQL_API_SQLSETDESCFIELD: /* 1017 */ - case SQL_API_SQLSETENVATTR: /* 1019 */ - case SQL_API_SQLSETSTMTATTR: /* 1020 */ - *pfExists = TRUE; - break; - case SQL_API_SQLGETDESCREC: /* 1009 */ - case SQL_API_SQLSETDESCREC: /* 1018 */ - case SQL_API_SQLCOPYDESC: /* 1004 */ - *pfExists = FALSE; - break; - default: - *pfExists = FALSE; - break; - } - } - return SQL_SUCCESS; -} - -char *identifierEscape(const SQLCHAR *src, SQLLEN srclen, - const ConnectionClass *conn, char *buf, size_t bufsize, - BOOL double_quote) { - int i; - size_t outlen; - UCHAR tchar; - char *dest = NULL, escape_ch = CC_get_escape(conn); - encoded_str encstr; - - if (!src || srclen == SQL_NULL_DATA) - return dest; - else if (srclen == SQL_NTS) - srclen = (SQLLEN)strlen((char *)src); - if (srclen <= 0) - return dest; - MYLOG(OPENSEARCH_TRACE, "entering in=%s(" FORMAT_LEN ")\n", src, srclen); - if (NULL != buf && bufsize > 0) - dest = buf; - else { - bufsize = 2 * srclen + 1; - dest = malloc(bufsize); - } - if (!dest) - return NULL; - encoded_str_constr(&encstr, conn->ccsc, (char *)src); - outlen = 0; - if (double_quote) - dest[outlen++] = IDENTIFIER_QUOTE; - for (i = 0, tchar = (UCHAR)encoded_nextchar(&encstr); - i < srclen && outlen < bufsize - 1; - i++, tchar = (UCHAR)encoded_nextchar(&encstr)) { - if (MBCS_NON_ASCII(encstr)) { - dest[outlen++] = tchar; - continue; - } - if (LITERAL_QUOTE == tchar || escape_ch == tchar) - dest[outlen++] = tchar; - else if (double_quote && IDENTIFIER_QUOTE == tchar) - dest[outlen++] = tchar; - dest[outlen++] = tchar; - } - if (double_quote) - dest[outlen++] = IDENTIFIER_QUOTE; - dest[outlen] = '\0'; - MYLOG(OPENSEARCH_TRACE, "leaving output=%s(%d)\n", dest, (int)outlen); - return dest; -} - -#define CSTR_SYS_TABLE "SYSTEM TABLE" -#define CSTR_TABLE "TABLE" -#define CSTR_VIEW "VIEW" -#define CSTR_FOREIGN_TABLE "FOREIGN TABLE" -#define CSTR_MATVIEW "MATVIEW" - -#define IS_VALID_NAME(str) ((str) && (str)[0]) -#define TABLE_IN_RELKIND "('r', 'v', 'm', 'f', 'p')" - -/* - * macros for opensearchtype_attr_xxxx() calls which have - * OPENSEARCH_ADT_UNSET or OPENSEARCH_UNKNOWNS_UNSET parameters - */ -#define OPENSEARCHTYPE_ATTR_COLUMN_SIZE(conn, openSearchType, atttypmod) \ - opensearchtype_attr_column_size(conn, openSearchType, atttypmod, OPENSEARCH_ADT_UNSET, \ - OPENSEARCH_UNKNOWNS_UNSET) -#define OPENSEARCHTYPE_ATTR_TO_CONCISE_TYPE(conn, openSearchType, atttypmod) \ - opensearchtype_attr_to_concise_type(conn, openSearchType, atttypmod, OPENSEARCH_ADT_UNSET, \ - OPENSEARCH_UNKNOWNS_UNSET) -#define OPENSEARCHTYPE_ATTR_TO_SQLDESCTYPE(conn, openSearchType, atttypmod) \ - opensearchtype_attr_to_sqldesctype(conn, openSearchType, atttypmod, OPENSEARCH_ADT_UNSET, \ - OPENSEARCH_UNKNOWNS_UNSET) -#define OPENSEARCHTYPE_ATTR_DISPLAY_SIZE(conn, openSearchType, atttypmod) \ - opensearchtype_attr_display_size(conn, openSearchType, atttypmod, OPENSEARCH_ADT_UNSET, \ - OPENSEARCH_UNKNOWNS_UNSET) -#define OPENSEARCHTYPE_ATTR_BUFFER_LENGTH(conn, openSearchType, atttypmod) \ - opensearchtype_attr_buffer_length(conn, openSearchType, atttypmod, OPENSEARCH_ADT_UNSET, \ - OPENSEARCH_UNKNOWNS_UNSET) -#define OPENSEARCHTYPE_ATTR_DECIMAL_DIGITS(conn, openSearchType, atttypmod) \ - opensearchtype_attr_decimal_digits(conn, openSearchType, atttypmod, OPENSEARCH_ADT_UNSET, \ - OPENSEARCH_UNKNOWNS_UNSET) -#define OPENSEARCHTYPE_ATTR_TRANSFER_OCTET_LENGTH(conn, openSearchType, atttypmod) \ - opensearchtype_attr_transfer_octet_length(conn, openSearchType, atttypmod, \ - OPENSEARCH_UNKNOWNS_UNSET) - -RETCODE SQL_API OPENSEARCHAPI_SpecialColumns( - HSTMT hstmt, SQLUSMALLINT fColType, const SQLCHAR *szTableQualifier, - SQLSMALLINT cbTableQualifier, const SQLCHAR *szTableOwner, /* OA E*/ - SQLSMALLINT cbTableOwner, const SQLCHAR *szTableName, /* OA(R) E*/ - SQLSMALLINT cbTableName, SQLUSMALLINT fScope, SQLUSMALLINT fNullable) { - UNUSED(fColType, szTableQualifier, cbTableQualifier, szTableOwner, - cbTableOwner, szTableName, cbTableName, fScope, fNullable); - CSTR func = "OPENSEARCHAPI_SpecialColumns"; - - // Initialize Statement - StatementClass *stmt = (StatementClass *)hstmt; - RETCODE result; - if (result = SC_initialize_and_recycle(stmt), SQL_SUCCESS != result) - return result; - - // Initialize QResultClass - QResultClass *res = QR_Constructor(); - if (!res) { - SC_set_error( - stmt, STMT_NO_MEMORY_ERROR, - "Couldn't allocate memory for OPENSEARCHAPI_SpecialColumns result.", func); - return SQL_ERROR; - } - - // Link QResultClass to statement and connection - QR_set_conn(res, SC_get_conn(stmt)); - SC_set_Result(stmt, res); - - // Set number of fields and declare as catalog result - extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_SPECOLS_FIELDS); - stmt->catalog_result = TRUE; - - // Setup fields - QR_set_num_fields(res, NUM_OF_SPECOLS_FIELDS); - QR_set_field_info_v(res, SPECOLS_SCOPE, "SCOPE", OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, SPECOLS_COLUMN_NAME, "COLUMN_NAME", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, SPECOLS_DATA_TYPE, "DATA_TYPE", - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, SPECOLS_TYPE_NAME, "TYPE_NAME", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, SPECOLS_COLUMN_SIZE, "COLUMN_SIZE", - OPENSEARCH_TYPE_INT4, - 4); - QR_set_field_info_v(res, SPECOLS_BUFFER_LENGTH, "BUFFER_LENGTH", - OPENSEARCH_TYPE_INT4, 4); - QR_set_field_info_v(res, SPECOLS_DECIMAL_DIGITS, "DECIMAL_DIGITS", - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, SPECOLS_PSEUDO_COLUMN, "PSEUDO_COLUMN", - OPENSEARCH_TYPE_INT2, 2); - - // Set result to okay and adjust fields if keys exist - QR_set_rstatus(res, PORES_FIELDS_OK); - res->num_fields = CI_get_num_fields(QR_get_fields(res)); - if (QR_haskeyset(res)) - res->num_fields -= res->num_key_fields; - - // Finalize data - stmt->status = STMT_FINISHED; - stmt->currTuple = -1; - SC_set_rowset_start(stmt, -1, FALSE); - SC_set_current_col(stmt, -1); - - return SQL_SUCCESS; -} - -#define INDOPTION_DESC 0x0001 /* values are in reverse order */ -RETCODE SQL_API OPENSEARCHAPI_Statistics( - HSTMT hstmt, const SQLCHAR *szTableQualifier, /* OA X*/ - SQLSMALLINT cbTableQualifier, const SQLCHAR *szTableOwner, /* OA E*/ - SQLSMALLINT cbTableOwner, const SQLCHAR *szTableName, /* OA(R) E*/ - SQLSMALLINT cbTableName, SQLUSMALLINT fUnique, SQLUSMALLINT fAccuracy) { - UNUSED(szTableQualifier, cbTableQualifier, szTableOwner, cbTableOwner, - szTableName, cbTableName, fUnique, fAccuracy); - CSTR func = "OPENSEARCHAPI_Statistics"; - - // Initialize Statement - StatementClass *stmt = (StatementClass *)hstmt; - RETCODE result; - if (result = SC_initialize_and_recycle(stmt), SQL_SUCCESS != result) - return result; - - // Initialize QResultClass - QResultClass *res = QR_Constructor(); - if (!res) { - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, - "Couldn't allocate memory for OPENSEARCHAPI_Statistics result.", - func); - return SQL_ERROR; - } - - // Link QResultClass to statement and connection - QR_set_conn(res, SC_get_conn(stmt)); - SC_set_Result(stmt, res); - - // Set number of fields and declare as catalog result - extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_STATS_FIELDS); - stmt->catalog_result = TRUE; - - // Setup fields - QR_set_num_fields(res, NUM_OF_STATS_FIELDS); - QR_set_field_info_v(res, STATS_CATALOG_NAME, "TABLE_QUALIFIER", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, STATS_SCHEMA_NAME, "TABLE_OWNER", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, STATS_TABLE_NAME, "TABLE_NAME", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, STATS_NON_UNIQUE, "NON_UNIQUE", - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, STATS_INDEX_QUALIFIER, "INDEX_QUALIFIER", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, STATS_INDEX_NAME, "INDEX_NAME", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, STATS_TYPE, "TYPE", OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, STATS_SEQ_IN_INDEX, "SEQ_IN_INDEX", - OPENSEARCH_TYPE_INT2, - 2); - QR_set_field_info_v(res, STATS_COLUMN_NAME, "COLUMN_NAME", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, STATS_COLLATION, "COLLATION", OPENSEARCH_TYPE_CHAR, 1); - QR_set_field_info_v(res, STATS_CARDINALITY, "CARDINALITY", - OPENSEARCH_TYPE_INT4, 4); - QR_set_field_info_v(res, STATS_PAGES, "PAGES", OPENSEARCH_TYPE_INT4, 4); - QR_set_field_info_v(res, STATS_FILTER_CONDITION, "FILTER_CONDITION", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - - // Set result to okay and adjust fields if keys exist - QR_set_rstatus(res, PORES_FIELDS_OK); - res->num_fields = CI_get_num_fields(QR_get_fields(res)); - if (QR_haskeyset(res)) - res->num_fields -= res->num_key_fields; - - // Finalize data - stmt->status = STMT_FINISHED; - stmt->currTuple = -1; - SC_set_rowset_start(stmt, -1, FALSE); - SC_set_current_col(stmt, -1); - - return SQL_SUCCESS; -} - -RETCODE SQL_API OPENSEARCHAPI_ColumnPrivileges( - HSTMT hstmt, const SQLCHAR *szTableQualifier, /* OA X*/ - SQLSMALLINT cbTableQualifier, const SQLCHAR *szTableOwner, /* OA E*/ - SQLSMALLINT cbTableOwner, const SQLCHAR *szTableName, /* OA(R) E*/ - SQLSMALLINT cbTableName, const SQLCHAR *szColumnName, /* PV E*/ - SQLSMALLINT cbColumnName, UWORD flag) { - UNUSED(szTableQualifier, cbTableQualifier, szTableOwner, cbTableOwner, - szTableName, cbTableName, szColumnName, cbColumnName, flag); - CSTR func = "OPENSEARCHAPI_ColumnPrivileges"; - - // Initialize Statement - StatementClass *stmt = (StatementClass *)hstmt; - RETCODE result; - if (result = SC_initialize_and_recycle(stmt), SQL_SUCCESS != result) - return result; - - // Initialize QResultClass - QResultClass *res = QR_Constructor(); - if (!res) { - SC_set_error( - stmt, STMT_NO_MEMORY_ERROR, - "Couldn't allocate memory for OPENSEARCHAPI_ColumnPrivileges result.", - func); - return SQL_ERROR; - } - - // Link QResultClass to statement and connection - QR_set_conn(res, SC_get_conn(stmt)); - SC_set_Result(stmt, res); - - // Set number of fields and declare as catalog result - extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_COLPRIV_FIELDS); - stmt->catalog_result = TRUE; - - // Setup fields - QR_set_num_fields(res, NUM_OF_COLPRIV_FIELDS); - QR_set_field_info_v(res, COLPRIV_TABLE_CAT, "TABLE_CAT", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, COLPRIV_TABLE_SCHEM, "TABLE_SCHEM", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, COLPRIV_TABLE_NAME, "TABLE_NAME", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, COLPRIV_COLUMN_NAME, "COLUMN_NAME", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, COLPRIV_GRANTOR, "GRANTOR", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, COLPRIV_GRANTEE, "GRANTEE", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, COLPRIV_PRIVILEGE, "PRIVILEGE", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, COLPRIV_IS_GRANTABLE, "IS_GRANTABLE", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - - // Set result to okay and adjust fields if keys exist - QR_set_rstatus(res, PORES_FIELDS_OK); - res->num_fields = CI_get_num_fields(QR_get_fields(res)); - if (QR_haskeyset(res)) - res->num_fields -= res->num_key_fields; - - // Finalize data - stmt->status = STMT_FINISHED; - stmt->currTuple = -1; - SC_set_rowset_start(stmt, -1, FALSE); - SC_set_current_col(stmt, -1); - - return SQL_SUCCESS; -} - -/* - * SQLPrimaryKeys() - * - * Retrieve the primary key columns for the specified table. - */ -RETCODE SQL_API OPENSEARCHAPI_PrimaryKeys(HSTMT hstmt, - const SQLCHAR *szTableQualifier, /* OA X*/ - SQLSMALLINT cbTableQualifier, - const SQLCHAR *szTableOwner, /* OA E*/ - SQLSMALLINT cbTableOwner, - const SQLCHAR *szTableName, /* OA(R) E*/ - SQLSMALLINT cbTableName, OID reloid) { - UNUSED(szTableQualifier, cbTableQualifier, szTableOwner, cbTableOwner, - szTableName, cbTableName, reloid); - CSTR func = "OPENSEARCHAPI_PrimaryKeys"; - - // Initialize Statement - StatementClass *stmt = (StatementClass *)hstmt; - RETCODE ret = SC_initialize_and_recycle(stmt); - if (ret != SQL_SUCCESS) - return ret; - - // Initialize QResultClass - QResultClass *res = QR_Constructor(); - if (res == NULL) { - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, - "Couldn't allocate memory for OPENSEARCHAPI_PrimaryKeys result.", - func); - return SQL_ERROR; - } - - // Link QResultClass to statement and cnnection - QR_set_conn(res, SC_get_conn(stmt)); - SC_set_Result(stmt, res); - - // Set number of fields and declare as catalog result - extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_PKS_FIELDS); - stmt->catalog_result = TRUE; - - // Setup fields - QR_set_num_fields(res, NUM_OF_PKS_FIELDS); - QR_set_field_info_v(res, PKS_TABLE_CAT, "TABLE_QUALIFIER", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, PKS_TABLE_SCHEM, "TABLE_OWNER", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, PKS_TABLE_NAME, "TABLE_NAME", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, PKS_COLUMN_NAME, "COLUMN_NAME", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, PKS_KEY_SQ, "KEY_SEQ", OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, PKS_PK_NAME, "PK_NAME", OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - - // Set result to okay and adjust fields if keys exist - QR_set_rstatus(res, PORES_FIELDS_OK); - res->num_fields = CI_get_num_fields(QR_get_fields(res)); - if (QR_haskeyset(res)) - res->num_fields -= res->num_key_fields; - - // Finalize data - stmt->currTuple = -1; - stmt->status = STMT_FINISHED; - SC_set_rowset_start(stmt, -1, FALSE); - SC_set_current_col(stmt, -1); - - return ret; -} - -RETCODE SQL_API OPENSEARCHAPI_ForeignKeys( - HSTMT hstmt, const SQLCHAR *szPkTableQualifier, /* OA X*/ - SQLSMALLINT cbPkTableQualifier, const SQLCHAR *szPkTableOwner, /* OA E*/ - SQLSMALLINT cbPkTableOwner, const SQLCHAR *szPkTableName, /* OA(R) E*/ - SQLSMALLINT cbPkTableName, const SQLCHAR *szFkTableQualifier, /* OA X*/ - SQLSMALLINT cbFkTableQualifier, const SQLCHAR *szFkTableOwner, /* OA E*/ - SQLSMALLINT cbFkTableOwner, const SQLCHAR *szFkTableName, /* OA(R) E*/ - SQLSMALLINT cbFkTableName) { - UNUSED(szPkTableQualifier, cbPkTableQualifier, szPkTableOwner, - cbPkTableOwner, szPkTableName, cbPkTableName, szFkTableQualifier, - cbFkTableQualifier, szFkTableOwner, cbFkTableOwner, szFkTableName, - cbFkTableName); - CSTR func = "OPENSEARCHAPI_ForeignKeys"; - - // Initialize Statement - StatementClass *stmt = (StatementClass *)hstmt; - RETCODE result; - if (result = SC_initialize_and_recycle(stmt), SQL_SUCCESS != result) - return result; - - // Initialize QResultClass - QResultClass *res = QR_Constructor(); - if (!res) { - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, - "Couldn't allocate memory for OPENSEARCHAPI_ForeignKeys result.", - func); - return SQL_ERROR; - } - - // Link QResultClass to statement and connection - QR_set_conn(res, SC_get_conn(stmt)); - SC_set_Result(stmt, res); - - // Set number of fields and declare as catalog result - extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_FKS_FIELDS); - stmt->catalog_result = TRUE; - - // Setup fields - QR_set_num_fields(res, NUM_OF_FKS_FIELDS); - QR_set_field_info_v(res, FKS_PKTABLE_CAT, "PKTABLE_QUALIFIER", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, FKS_PKTABLE_SCHEM, "PKTABLE_OWNER", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, FKS_PKTABLE_NAME, "PKTABLE_NAME", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, FKS_PKCOLUMN_NAME, "PKCOLUMN_NAME", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, FKS_FKTABLE_CAT, "FKTABLE_QUALIFIER", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, FKS_FKTABLE_SCHEM, "FKTABLE_OWNER", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, FKS_FKTABLE_NAME, "FKTABLE_NAME", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, FKS_FKCOLUMN_NAME, "FKCOLUMN_NAME", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, FKS_KEY_SEQ, "KEY_SEQ", OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, FKS_UPDATE_RULE, "UPDATE_RULE", - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, FKS_DELETE_RULE, "DELETE_RULE", - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, FKS_FK_NAME, "FK_NAME", OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, FKS_PK_NAME, "PK_NAME", OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, FKS_DEFERRABILITY, "DEFERRABILITY", - OPENSEARCH_TYPE_INT2, - 2); - QR_set_field_info_v(res, FKS_TRIGGER_NAME, "TRIGGER_NAME", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - - // Set result to okay and adjust fields if keys exist - QR_set_rstatus(res, PORES_FIELDS_OK); - res->num_fields = CI_get_num_fields(QR_get_fields(res)); - if (QR_haskeyset(res)) - res->num_fields -= res->num_key_fields; - - // Finalize data - stmt->status = STMT_FINISHED; - stmt->currTuple = -1; - SC_set_rowset_start(stmt, -1, FALSE); - SC_set_current_col(stmt, -1); - - return SQL_SUCCESS; -} - -#define PRORET_COUNT -#define DISPLAY_ARGNAME - -RETCODE SQL_API OPENSEARCHAPI_ProcedureColumns( - HSTMT hstmt, const SQLCHAR *szProcQualifier, /* OA X*/ - SQLSMALLINT cbProcQualifier, const SQLCHAR *szProcOwner, /* PV E*/ - SQLSMALLINT cbProcOwner, const SQLCHAR *szProcName, /* PV E*/ - SQLSMALLINT cbProcName, const SQLCHAR *szColumnName, /* PV X*/ - SQLSMALLINT cbColumnName, UWORD flag) { - UNUSED(szProcQualifier, cbProcQualifier, szProcOwner, cbProcOwner, - szProcName, cbProcName, szColumnName, cbColumnName, flag); - CSTR func = "OPENSEARCHAPI_ProcedureColumns"; - - // Initialize Statement - StatementClass *stmt = (StatementClass *)hstmt; - RETCODE ret = SC_initialize_and_recycle(stmt); - if (ret != SQL_SUCCESS) - return ret; - - // Initialize QResultClass - QResultClass *res = QR_Constructor(); - if (res == NULL) { - SC_set_error( - stmt, STMT_NO_MEMORY_ERROR, - "Couldn't allocate memory for OPENSEARCHAPI_ProcedureColumns result.", - func); - return SQL_ERROR; - } - - // Link QResultClass to statement and cnnection - QR_set_conn(res, SC_get_conn(stmt)); - SC_set_Result(stmt, res); - - // Set number of fields and declare as catalog result - extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_PROCOLS_FIELDS); - stmt->catalog_result = TRUE; - - // Setup fields - QR_set_num_fields(res, NUM_OF_PROCOLS_FIELDS); - QR_set_field_info_v(res, PROCOLS_PROCEDURE_CAT, "PROCEDURE_CAT", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, PROCOLS_PROCEDURE_SCHEM, "PROCEDUR_SCHEM", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, PROCOLS_PROCEDURE_NAME, "PROCEDURE_NAME", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, PROCOLS_COLUMN_NAME, "COLUMN_NAME", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, PROCOLS_COLUMN_TYPE, "COLUMN_TYPE", - OPENSEARCH_TYPE_INT2, - 2); - QR_set_field_info_v(res, PROCOLS_DATA_TYPE, "DATA_TYPE", - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, PROCOLS_TYPE_NAME, "TYPE_NAME", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, PROCOLS_COLUMN_SIZE, "COLUMN_SIZE", - OPENSEARCH_TYPE_INT4, - 4); - QR_set_field_info_v(res, PROCOLS_BUFFER_LENGTH, "BUFFER_LENGTH", - OPENSEARCH_TYPE_INT4, 4); - QR_set_field_info_v(res, PROCOLS_DECIMAL_DIGITS, "DECIMAL_DIGITS", - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, PROCOLS_NUM_PREC_RADIX, "NUM_PREC_RADIX", - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, PROCOLS_NULLABLE, "NULLABLE", OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, PROCOLS_REMARKS, "REMARKS", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, PROCOLS_COLUMN_DEF, "COLUMN_DEF", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, PROCOLS_SQL_DATA_TYPE, "SQL_DATA_TYPE", - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, PROCOLS_SQL_DATETIME_SUB, "SQL_DATETIME_SUB", - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, PROCOLS_CHAR_OCTET_LENGTH, "CHAR_OCTET_LENGTH", - OPENSEARCH_TYPE_INT4, 4); - QR_set_field_info_v(res, PROCOLS_ORDINAL_POSITION, "ORDINAL_POSITION", - OPENSEARCH_TYPE_INT4, 4); - QR_set_field_info_v(res, PROCOLS_IS_NULLABLE, "IS_NULLABLE", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - - // Set result to okay and adjust fields if keys exist - QR_set_rstatus(res, PORES_FIELDS_OK); - res->num_fields = CI_get_num_fields(QR_get_fields(res)); - if (QR_haskeyset(res)) - res->num_fields -= res->num_key_fields; - - // Finalize data - stmt->currTuple = -1; - stmt->status = STMT_FINISHED; - SC_set_rowset_start(stmt, -1, FALSE); - SC_set_current_col(stmt, -1); - - return ret; -} - -RETCODE SQL_API OPENSEARCHAPI_Procedures(HSTMT hstmt, - const SQLCHAR *szProcQualifier, /* OA X*/ - SQLSMALLINT cbProcQualifier, - const SQLCHAR *szProcOwner, /* PV E*/ - SQLSMALLINT cbProcOwner, - const SQLCHAR *szProcName, /* PV E*/ - SQLSMALLINT cbProcName, UWORD flag) { - UNUSED(szProcQualifier, cbProcQualifier, szProcOwner, cbProcOwner, - szProcName, cbProcName, flag); - CSTR func = "OPENSEARCHAPI_Procedures"; - - // Initialize Statement - StatementClass *stmt = (StatementClass *)hstmt; - RETCODE ret = SC_initialize_and_recycle(stmt); - if (ret != SQL_SUCCESS) - return ret; - - // Initialize QResultClass - QResultClass *res = QR_Constructor(); - if (res == NULL) { - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, - "Couldn't allocate memory for OPENSEARCHAPI_Procedures result.", - func); - return SQL_ERROR; - } - - // Link QResultClass to statement and cnnection - QR_set_conn(res, SC_get_conn(stmt)); - SC_set_Result(stmt, res); - - // Set number of fields and declare as catalog result - extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_PRO_FIELDS); - stmt->catalog_result = TRUE; - - // Setup fields - QR_set_num_fields(res, NUM_OF_PRO_FIELDS); - QR_set_field_info_v(res, PRO_PROCEDURE_CAT, "PRO_PROCEDURE_CAT", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, PRO_PROCEDURE_SCHEM, "PRO_PROCEDURE_SCHEM", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, PRO_PROCEDURE_NAME, "PRO_PROCEDURE_NAME", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, PRO_NUM_INPUT_PARAMS, "PRO_NUM_INPUT_PARAMS", - OPENSEARCH_TYPE_INT4, 4); - QR_set_field_info_v(res, PRO_NUM_OUTPUT_PARAMS, "PRO_NUM_OUTPUT_PARAMS", - OPENSEARCH_TYPE_INT4, 4); - QR_set_field_info_v(res, PRO_RESULT_SETS, "PRO_RESULT_SETS", - OPENSEARCH_TYPE_INT4, - 4); - QR_set_field_info_v(res, PRO_REMARKS, "PRO_REMARKS", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, PRO_PROCEDURE_TYPE, "PRO_PROCEDURE_TYPE", - OPENSEARCH_TYPE_INT2, 2); - - // Set result to okay and adjust fields if keys exist - QR_set_rstatus(res, PORES_FIELDS_OK); - res->num_fields = CI_get_num_fields(QR_get_fields(res)); - if (QR_haskeyset(res)) - res->num_fields -= res->num_key_fields; - - // Finalize data - stmt->currTuple = -1; - stmt->status = STMT_FINISHED; - SC_set_rowset_start(stmt, -1, FALSE); - SC_set_current_col(stmt, -1); - - return ret; -} - -#define ACLMAX 8 -#define ALL_PRIVILIGES "arwdRxt" - -RETCODE SQL_API OPENSEARCHAPI_TablePrivileges(HSTMT hstmt, - const SQLCHAR *szTableQualifier, /* OA X*/ - SQLSMALLINT cbTableQualifier, - const SQLCHAR *szTableOwner, /* PV E*/ - SQLSMALLINT cbTableOwner, - const SQLCHAR *szTableName, /* PV E*/ - SQLSMALLINT cbTableName, UWORD flag) { - UNUSED(szTableQualifier, cbTableQualifier, szTableOwner, cbTableOwner, - szTableName, cbTableName, flag); - CSTR func = "OPENSEARCHAPI_TablePrivileges"; - - // Initialize Statement - StatementClass *stmt = (StatementClass *)hstmt; - RETCODE result; - if (result = SC_initialize_and_recycle(stmt), SQL_SUCCESS != result) - return result; - - // Initialize QResultClass - QResultClass *res = QR_Constructor(); - if (!res) { - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, - "Couldn't allocate memory for OPENSEARCHAPI_Statistics result.", - func); - return SQL_ERROR; - } - - // Link QResultClass to statement and connection - QR_set_conn(res, SC_get_conn(stmt)); - SC_set_Result(stmt, res); - - // Set number of fields and declare as catalog result - extend_column_bindings(SC_get_ARDF(stmt), NUM_OF_TABPRIV_FIELDS); - stmt->catalog_result = TRUE; - - // Setup fields - QR_set_num_fields(res, NUM_OF_TABPRIV_FIELDS); - QR_set_field_info_v(res, TABPRIV_TABLE_CAT, "TABLE_CAT", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, TABPRIV_TABLE_SCHEM, "TABLE_SCHEM", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, TABPRIV_TABLE_NAME, "TABLE_NAME", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, TABPRIV_GRANTOR, "GRANTOR", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, TABPRIV_GRANTEE, "GRANTEE", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, TABPRIV_PRIVILEGE, "PRIVILEGE", - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, TABPRIV_IS_GRANTABLE, "IS_GRANTABLE", - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - - // Set result to okay and adjust fields if keys exist - QR_set_rstatus(res, PORES_FIELDS_OK); - res->num_fields = CI_get_num_fields(QR_get_fields(res)); - if (QR_haskeyset(res)) - res->num_fields -= res->num_key_fields; - - // Finalize data - stmt->status = STMT_FINISHED; - stmt->currTuple = -1; - SC_set_rowset_start(stmt, -1, FALSE); - SC_set_current_col(stmt, -1); - - return SQL_SUCCESS; -} diff --git a/sql-odbc/src/sqlodbc/loadlib.c b/sql-odbc/src/sqlodbc/loadlib.c deleted file mode 100644 index 78015e63fa..0000000000 --- a/sql-odbc/src/sqlodbc/loadlib.c +++ /dev/null @@ -1,258 +0,0 @@ -#include -#include -#include -#ifndef WIN32 -#include -#endif /* WIN32 */ - -#include "opensearch_enlist.h" -#include "loadlib.h" -#include "misc.h" - -#ifdef WIN32 -#ifdef _MSC_VER -#pragma comment(lib, "Delayimp") -#ifdef _HANDLE_ENLIST_IN_DTC_ -#ifdef UNICODE_SUPPORT -#pragma comment(lib, "opensearch_enlist") -#else -#pragma comment(lib, "opensearch_enlista") -#endif /* UNICODE_SUPPORT */ -#endif /* _HANDLE_ENLIST_IN_DTC_ */ -// The followings works under VC++6.0 but doesn't work under VC++7.0. -// Please add the equivalent linker options using command line etc. -#if (_MSC_VER == 1200) && defined(DYNAMIC_LOAD) // VC6.0 -#ifdef UNICODE_SUPPORT -#pragma comment(linker, "/Delayload:opensearch_enlist.dll") -#else -#pragma comment(linker, "/Delayload:opensearch_enlista.dll") -#endif /* UNICODE_SUPPORT */ -#pragma comment(linker, "/Delay:UNLOAD") -#endif /* _MSC_VER */ -#endif /* _MSC_VER */ - -#if defined(DYNAMIC_LOAD) -#define WIN_DYN_LOAD -#ifdef UNICODE_SUPPORT -CSTR opensearch_enlist = "opensearch_enlist"; -CSTR opensearch_enlistdll = "opensearch_enlist.dll"; -CSTR elasticodbc = "sqlodbc35w"; -CSTR elasticodbcdll = "sqlodbc35w.dll"; -#else -CSTR opensearch_enlist = "opensearch_enlista"; -CSTR opensearch_enlistdll = "opensearch_enlista.dll"; -CSTR elasticodbc = "sqlodbc30a"; -CSTR elasticodbcdll = "sqlodbc30a.dll"; -#endif /* UNICODE_SUPPORT */ -#if defined(_MSC_VER) && (_MSC_VER >= 1200) -#define _MSC_DELAY_LOAD_IMPORT -#endif /* MSC_VER */ -#endif /* DYNAMIC_LOAD */ -#endif /* WIN32 */ - -#if defined(_MSC_DELAY_LOAD_IMPORT) -/* - * Error hook function for delay load import. - * Try to load a DLL based on elasticodbc path. - */ -#if (_MSC_VER >= 1900) /* vc14 or later */ -#define TRY_DLI_HOOK __try { -#define RELEASE_NOTIFY_HOOK -#elif (_MSC_VER < 1300) /* vc6 */ -extern PfnDliHook __pfnDliFailureHook; -extern PfnDliHook __pfnDliNotifyHook; -#define TRY_DLI_HOOK \ - __try { \ - __pfnDliFailureHook = DliErrorHook; \ - __pfnDliNotifyHook = DliErrorHook; -#define RELEASE_NOTIFY_HOOK __pfnDliNotifyHook = NULL; -#else /* vc7 ~ 12 */ -extern PfnDliHook __pfnDliFailureHook2; -extern PfnDliHook __pfnDliNotifyHook2; -#define TRY_DLI_HOOK \ - __try { \ - __pfnDliFailureHook2 = DliErrorHook; \ - __pfnDliNotifyHook2 = DliErrorHook; -#define RELEASE_NOTIFY_HOOK __pfnDliNotifyHook2 = NULL; -#endif /* _MSC_VER */ -#else -#define TRY_DLI_HOOK __try { -#define RELEASE_NOTIFY_HOOK -#endif /* _MSC_DELAY_LOAD_IMPORT */ - -#if defined(_MSC_DELAY_LOAD_IMPORT) -static BOOL loaded_opensearch_enlist = FALSE; -static HMODULE enlist_module = NULL; -static BOOL loaded_elasticodbc = FALSE; -/* - * Load a DLL based on elasticodbc path. - */ -HMODULE MODULE_load_from_elasticodbc_path(const char *module_name) { - extern HINSTANCE s_hModule; - HMODULE hmodule = NULL; - char szFileName[MAX_PATH]; - - if (GetModuleFileName(s_hModule, szFileName, sizeof(szFileName)) > 0) { - char drive[_MAX_DRIVE], dir[_MAX_DIR], sysdir[MAX_PATH]; - - _splitpath(szFileName, drive, dir, NULL, NULL); - GetSystemDirectory(sysdir, MAX_PATH); - SPRINTF_FIXED(szFileName, "%s%s%s.dll", drive, dir, module_name); - if (_strnicmp(szFileName, sysdir, strlen(sysdir)) != 0) { - hmodule = - LoadLibraryEx(szFileName, NULL, LOAD_WITH_ALTERED_SEARCH_PATH); - MYLOG(OPENSEARCH_DEBUG, "elasticodbc path based %s loaded module=%p\n", - module_name, hmodule); - } - } - return hmodule; -} - -static FARPROC WINAPI DliErrorHook(unsigned dliNotify, PDelayLoadInfo pdli) { - HMODULE hmodule = NULL; - const char *call_module = NULL; - - MYLOG(OPENSEARCH_DEBUG, "Dli%sHook %s Notify=%d\n", - (dliFailLoadLib == dliNotify || dliFailGetProc == dliNotify) - ? "Error" - : "Notify", - NULL != pdli->szDll ? pdli->szDll : pdli->dlp.szProcName, dliNotify); - switch (dliNotify) { - case dliNotePreLoadLibrary: - case dliFailLoadLib: - RELEASE_NOTIFY_HOOK - if (_strnicmp(pdli->szDll, elasticodbc, strlen(elasticodbc)) == 0) - call_module = elasticodbc; - if (call_module) { - if (hmodule = MODULE_load_from_elasticodbc_path(call_module), - NULL == hmodule) - hmodule = LoadLibrary(call_module); - if (NULL != hmodule) { - if (opensearch_enlist == call_module) - loaded_opensearch_enlist = TRUE; - else if (elasticodbc == call_module) - loaded_elasticodbc = TRUE; - } - } - break; - } - return (FARPROC)hmodule; -} - -void AlreadyLoadedOpenSearchodbc(void) { - loaded_elasticodbc = TRUE; -} - -/* - * unload delay loaded libraries. - */ - -typedef BOOL(WINAPI *UnloadFunc)(LPCSTR); -void CleanupDelayLoadedDLLs(void) { - BOOL success; -#if (_MSC_VER < 1300) /* VC6 DELAYLOAD IMPORT */ - UnloadFunc func = __FUnloadDelayLoadedDLL; -#else - UnloadFunc func = __FUnloadDelayLoadedDLL2; -#endif - /* The dll names are case sensitive for the unload helper */ - if (loaded_opensearch_enlist) { - if (enlist_module != NULL) { - MYLOG(OPENSEARCH_DEBUG, "Freeing Library %s\n", opensearch_enlistdll); - FreeLibrary(enlist_module); - } - MYLOG(OPENSEARCH_DEBUG, "%s unloading\n", opensearch_enlistdll); - success = (*func)(opensearch_enlistdll); - MYLOG(OPENSEARCH_DEBUG, "%s unloaded success=%d\n", opensearch_enlistdll, success); - loaded_opensearch_enlist = FALSE; - } - if (loaded_elasticodbc) { - MYLOG(OPENSEARCH_DEBUG, "%s unloading\n", elasticodbcdll); - success = (*func)(elasticodbcdll); - MYLOG(OPENSEARCH_DEBUG, "%s unloaded success=%d\n", elasticodbcdll, success); - loaded_elasticodbc = FALSE; - } - return; -} -#else -void CleanupDelayLoadedDLLs(void) { - return; -} -#endif /* _MSC_DELAY_LOAD_IMPORT */ - -#ifdef _HANDLE_ENLIST_IN_DTC_ -RETCODE CALL_EnlistInDtc(ConnectionClass *conn, void *pTra, int method) { - RETCODE ret; - BOOL loaded = TRUE; - -#if defined(_MSC_DELAY_LOAD_IMPORT) - if (!loaded_opensearch_enlist) { - TRY_DLI_HOOK - ret = EnlistInDtc(conn, pTra, method); - } - __except ((GetExceptionCode() & 0xffff) == ERROR_MOD_NOT_FOUND - ? EXCEPTION_EXECUTE_HANDLER - : EXCEPTION_CONTINUE_SEARCH) { - if (enlist_module = MODULE_load_from_elasticodbc_path(opensearch_enlist), - NULL == enlist_module) - loaded = FALSE; - else - ret = EnlistInDtc(conn, pTra, method); - } - if (loaded) - loaded_opensearch_enlist = TRUE; - RELEASE_NOTIFY_HOOK -} -else ret = EnlistInDtc(conn, pTra, method); -#else - ret = EnlistInDtc(conn, pTra, method); - loaded_opensearch_enlist = TRUE; -#endif /* _MSC_DELAY_LOAD_IMPORT */ -return ret; -} -RETCODE CALL_DtcOnDisconnect(ConnectionClass *conn) { - if (loaded_opensearch_enlist) - return DtcOnDisconnect(conn); - return FALSE; -} -RETCODE CALL_IsolateDtcConn(ConnectionClass *conn, BOOL continueConnection) { - if (loaded_opensearch_enlist) - return IsolateDtcConn(conn, continueConnection); - return FALSE; -} - -void *CALL_GetTransactionObject(HRESULT *hres) { - void *ret = NULL; - BOOL loaded = TRUE; - -#if defined(_MSC_DELAY_LOAD_IMPORT) - if (!loaded_opensearch_enlist) { - TRY_DLI_HOOK - ret = GetTransactionObject(hres); - } - __except ((GetExceptionCode() & 0xffff) == ERROR_MOD_NOT_FOUND - ? EXCEPTION_EXECUTE_HANDLER - : EXCEPTION_CONTINUE_SEARCH) { - if (enlist_module = MODULE_load_from_elasticodbc_path(opensearch_enlist), - NULL == enlist_module) - loaded = FALSE; - else - ret = GetTransactionObject(hres); - } - if (loaded) - loaded_opensearch_enlist = TRUE; - RELEASE_NOTIFY_HOOK -} -else ret = GetTransactionObject(hres); -#else - ret = GetTransactionObject(hres); - loaded_opensearch_enlist = TRUE; -#endif /* _MSC_DELAY_LOAD_IMPORT */ -return ret; -} -void CALL_ReleaseTransactionObject(void *pObj) { - if (loaded_opensearch_enlist) - ReleaseTransactionObject(pObj); - return; -} -#endif /* _HANDLE_ENLIST_IN_DTC_ */ diff --git a/sql-odbc/src/sqlodbc/loadlib.h b/sql-odbc/src/sqlodbc/loadlib.h deleted file mode 100644 index 16a6c03937..0000000000 --- a/sql-odbc/src/sqlodbc/loadlib.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __LOADLIB_H__ -#define __LOADLIB_H__ - -#include "opensearch_odbc.h" -#ifdef HAVE_LIBLTDL -#include -#else -#ifdef HAVE_DLFCN_H -#include -#endif /* HAVE_DLFCN_H */ -#endif /* HAVE_LIBLTDL */ - -#include -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef _HANDLE_ENLIST_IN_DTC_ -RETCODE CALL_EnlistInDtc(ConnectionClass *conn, void *pTra, int method); -RETCODE CALL_DtcOnDisconnect(ConnectionClass *); -RETCODE CALL_IsolateDtcConn(ConnectionClass *, BOOL); -void *CALL_GetTransactionObject(HRESULT *); -void CALL_ReleaseTransactionObject(void *); -#endif /* _HANDLE_ENLIST_IN_DTC_ */ -/* void UnloadDelayLoadedDLLs(BOOL); */ -void CleanupDelayLoadedDLLs(void); -#ifdef WIN32 -HMODULE MODULE_load_from_elasticodbc_path(const char *module_name); -void AlreadyLoadedOpenSearchodbc(void); -#endif /* WIN32 */ - -#ifdef __cplusplus -} -#endif -#endif /* __LOADLIB_H__ */ diff --git a/sql-odbc/src/sqlodbc/misc.c b/sql-odbc/src/sqlodbc/misc.c deleted file mode 100644 index 867c7744fe..0000000000 --- a/sql-odbc/src/sqlodbc/misc.c +++ /dev/null @@ -1,201 +0,0 @@ -// clang-format off -#include "opensearch_odbc.h" -#include "misc.h" - -#include -#include -#include -#include -#include -// clang-format on - -#ifndef WIN32 -#include -#include -#include -#else -#include /* Byron: is this where Windows keeps def. - * of getpid ? */ -#endif - -/* - * returns STRCPY_FAIL, STRCPY_TRUNCATED, or #bytes copied - * (not including null term) - */ -ssize_t my_strcpy(char *dst, ssize_t dst_len, const char *src, - ssize_t src_len) { - if (dst_len <= 0) - return STRCPY_FAIL; - - if (src_len == SQL_NULL_DATA) { - dst[0] = '\0'; - return STRCPY_NULL; - } else if (src_len == SQL_NTS) - src_len = strlen(src); - - if (src_len <= 0) - return STRCPY_FAIL; - else { - if (src_len < dst_len) { - memcpy(dst, src, src_len); - dst[src_len] = '\0'; - } else { - memcpy(dst, src, dst_len - 1); - dst[dst_len - 1] = '\0'; /* truncated */ - return STRCPY_TRUNCATED; - } - } - - return strlen(dst); -} - -/* - * strncpy copies up to len characters, and doesn't terminate - * the destination string if src has len characters or more. - * instead, I want it to copy up to len-1 characters and always - * terminate the destination string. - */ -size_t strncpy_null(char *dst, const char *src, ssize_t len) { - int i; - - if (NULL != dst && len > 0) { - for (i = 0; src[i] && i < len - 1; i++) - dst[i] = src[i]; - - dst[i] = '\0'; - } else - return 0; - if (src[i]) - return strlen(src); - return i; -} - -/*------ - * Create a null terminated string (handling the SQL_NTS thing): - * 1. If buf is supplied, place the string in there - * (assumes enough space) and return buf. - * 2. If buf is not supplied, malloc space and return this string - *------ - */ -char *make_string(const SQLCHAR *s, SQLINTEGER len, char *buf, size_t bufsize) { - size_t length; - char *str; - - if (!s || SQL_NULL_DATA == len) - return NULL; - if (len >= 0) - length = len; - else if (SQL_NTS == len) - length = strlen((char *)s); - else { - MYLOG(OPENSEARCH_DEBUG, "invalid length=" FORMAT_INTEGER "\n", len); - return NULL; - } - if (buf) { - strncpy_null(buf, (char *)s, bufsize > length ? length + 1 : bufsize); - return buf; - } - - MYLOG(OPENSEARCH_DEBUG, "malloc size=" FORMAT_SIZE_T "\n", length); - str = malloc(length + 1); - MYLOG(OPENSEARCH_DEBUG, "str=%p\n", str); - if (!str) - return NULL; - - strncpy_null(str, (char *)s, length + 1); - return str; -} - -/* - * snprintfcat is a extension to snprintf - * It add format to buf at given pos - */ -#ifdef POSIX_SNPRINTF_REQUIRED -static posix_vsnprintf(char *str, size_t size, const char *format, va_list ap); -#define vsnprintf posix_vsnprintf -#endif /* POSIX_SNPRINTF_REQUIRED */ - -int snprintfcat(char *buf, size_t size, const char *format, ...) { - int len; - size_t pos = strlen(buf); - va_list arglist; - - va_start(arglist, format); - len = vsnprintf(buf + pos, size - pos, format, arglist); - va_end(arglist); - return len + (int)pos; -} - -/* - * Windows doesn't have snprintf(). It has _snprintf() which is similar, - * but it behaves differently wrt. truncation. This is a compatibility - * function that uses _snprintf() to provide POSIX snprintf() behavior. - * - * Our strategy, if the output doesn't fit, is to create a temporary buffer - * and call _snprintf() on that. If it still doesn't fit, enlarge the buffer - * and repeat. - */ -#ifdef POSIX_SNPRINTF_REQUIRED -static int posix_vsnprintf(char *str, size_t size, const char *format, - va_list ap) { - int len; - char *tmp; - size_t newsize; - - len = _vsnprintf(str, size, format, ap); - if (len < 0) { - if (size == 0) - newsize = 100; - else - newsize = size; - do { - newsize *= 2; - tmp = malloc(newsize); - if (!tmp) - return -1; - len = _vsnprintf(tmp, newsize, format, ap); - if (len >= 0) - memcpy(str, tmp, size); - free(tmp); - } while (len < 0); - } - if (len >= size && size > 0) { - /* Ensure the buffer is NULL-terminated */ - str[size - 1] = '\0'; - } - return len; -} - -int posix_snprintf(char *buf, size_t size, const char *format, ...) { - int len; - va_list arglist; - - va_start(arglist, format); - len = posix_vsnprintf(buf, size, format, arglist); - va_end(arglist); - return len; -} -#endif /* POSIX_SNPRINTF_REQUIRED */ - -#ifndef HAVE_STRLCAT -size_t strlcat(char *dst, const char *src, size_t size) { - size_t ttllen; - char *pd = dst; - const char *ps = src; - - for (ttllen = 0; ttllen < size; ttllen++, pd++) { - if (0 == *pd) - break; - } - if (ttllen >= size - 1) - return ttllen + strlen(src); - for (; ttllen < size - 1; ttllen++, pd++, ps++) { - if (0 == (*pd = *ps)) - return ttllen; - } - *pd = 0; - for (; *ps; ttllen++, ps++) - ; - return ttllen; -} -#endif /* HAVE_STRLCAT */ diff --git a/sql-odbc/src/sqlodbc/misc.h b/sql-odbc/src/sqlodbc/misc.h deleted file mode 100644 index f589cbcb98..0000000000 --- a/sql-odbc/src/sqlodbc/misc.h +++ /dev/null @@ -1,93 +0,0 @@ -#ifndef __MISC_H__ -#define __MISC_H__ - -#include -#ifndef WIN32 -#include -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -size_t strncpy_null(char *dst, const char *src, ssize_t len); -#ifndef HAVE_STRLCAT -size_t strlcat(char *, const char *, size_t); -#endif /* HAVE_STRLCAT */ -int snprintfcat(char *buf, size_t size, const char *format, ...) - __attribute__((format(OPENSEARCH_PRINTF_ATTRIBUTE, 3, 4))); - -char *make_string(const SQLCHAR *s, SQLINTEGER len, char *buf, size_t bufsize); -/* #define GET_SCHEMA_NAME(nspname) (stricmp(nspname, "public") ? nspname : - * "") */ - -#define GET_SCHEMA_NAME(nspname) (nspname) - -/* defines for return value of my_strcpy */ -#define STRCPY_SUCCESS 1 -#define STRCPY_FAIL 0 -#define STRCPY_TRUNCATED (-1) -#define STRCPY_NULL (-2) - -ssize_t my_strcpy(char *dst, ssize_t dst_len, const char *src, ssize_t src_len); - -/* - * Macros to safely strcpy, strcat or sprintf to fixed arrays. - * - */ - -/* - * With GCC, the macro CHECK_NOT_CHAR_P() causes a compilation error - * when the target is pointer not a fixed array. - */ -#if ((__GNUC__ * 100) + __GNUC_MINOR__) >= 406 -#define FUNCTION_BEGIN_MACRO ({ -#define FUNCTION_END_MACRO \ - ; \ - }) -#define CHECK_NOT_CHAR_P(t) \ - _Pragma("GCC diagnostic push") \ - _Pragma("GCC diagnostic ignored \"-Wunused-variable\"") if (0) { \ - typeof(t) dummy_for_check = {}; \ - } \ - _Pragma("GCC diagnostic pop") -#else -#define FUNCTION_BEGIN_MACRO -#define FUNCTION_END_MACRO -#define CHECK_NOT_CHAR_P(t) -#endif - -/* macro to safely strcpy() to fixed arrays. */ -#define STRCPY_FIXED(to, from) \ - FUNCTION_BEGIN_MACRO \ - CHECK_NOT_CHAR_P(to) \ - strncpy_null((to), (from), sizeof(to)) FUNCTION_END_MACRO - -/* macro to safely strcat() to fixed arrays. */ -#define STRCAT_FIXED(to, from) \ - FUNCTION_BEGIN_MACRO \ - CHECK_NOT_CHAR_P(to) \ - strlcat((to), (from), sizeof(to)) FUNCTION_END_MACRO - -/* macro to safely sprintf() to fixed arrays. */ -#define SPRINTF_FIXED(to, ...) \ - FUNCTION_BEGIN_MACRO \ - CHECK_NOT_CHAR_P(to) \ - snprintf((to), sizeof(to), __VA_ARGS__) FUNCTION_END_MACRO - -/* macro to safely sprintf() & cat to fixed arrays. */ -#define SPRINTFCAT_FIXED(to, ...) \ - FUNCTION_BEGIN_MACRO \ - CHECK_NOT_CHAR_P(to) \ - snprintfcat((to), sizeof(to), __VA_ARGS__) FUNCTION_END_MACRO - -#define ITOA_FIXED(to, from) \ - FUNCTION_BEGIN_MACRO \ - CHECK_NOT_CHAR_P(to) \ - snprintf((to), sizeof(to), "%d", from) FUNCTION_END_MACRO - -#ifdef __cplusplus -} -#endif - -#endif /* __MISC_H__ */ diff --git a/sql-odbc/src/sqlodbc/multibyte.c b/sql-odbc/src/sqlodbc/multibyte.c deleted file mode 100644 index 4c5fb0d465..0000000000 --- a/sql-odbc/src/sqlodbc/multibyte.c +++ /dev/null @@ -1,357 +0,0 @@ -#include "multibyte.h" - -#include -#include -#include -#include - -#include "misc.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#ifndef WIN32 -#include -#endif -#ifndef TRUE -#define TRUE 1 -#endif - -typedef struct OPENSEARCH_CS { - char *name; - int code; -} OPENSEARCH_CS; - -static OPENSEARCH_CS CS_Table[] = { - {"SQL_ASCII", SQL_ASCII}, - {"EUC_JP", EUC_JP}, - {"EUC_CN", EUC_CN}, - {"EUC_KR", EUC_KR}, - {"EUC_TW", EUC_TW}, - {"JOHAB", JOHAB}, /* since 7.3 */ - {"UTF8", UTF8}, /* since 7.2 */ - {"MULE_INTERNAL", MULE_INTERNAL}, - {"LATIN1", LATIN1}, - {"LATIN2", LATIN2}, - {"LATIN3", LATIN3}, - {"LATIN4", LATIN4}, - {"LATIN5", LATIN5}, - {"LATIN6", LATIN6}, - {"LATIN7", LATIN7}, - {"LATIN8", LATIN8}, - {"LATIN9", LATIN9}, - {"LATIN10", LATIN10}, - {"WIN1256", WIN1256}, /* Arabic since 7.3 */ - {"WIN1258", WIN1258}, /* Vietnamese since 8.1 */ - {"WIN866", WIN866}, /* since 8.1 */ - {"WIN874", WIN874}, /* Thai since 7.3 */ - {"KOI8", KOI8R}, - {"WIN1251", WIN1251}, /* Cyrillic */ - {"WIN1252", WIN1252}, /* Western Europe since 8.1 */ - {"ISO_8859_5", ISO_8859_5}, - {"ISO_8859_6", ISO_8859_6}, - {"ISO_8859_7", ISO_8859_7}, - {"ISO_8859_8", ISO_8859_8}, - {"WIN1250", WIN1250}, /* Central Europe */ - {"WIN1253", WIN1253}, /* Greek since 8.2 */ - {"WIN1254", WIN1254}, /* Turkish since 8.2 */ - {"WIN1255", WIN1255}, /* Hebrew since 8.2 */ - {"WIN1257", WIN1257}, /* Baltic(North Europe) since 8.2 */ - - {"EUC_JIS_2004", - EUC_JIS_2004}, /* EUC for SHIFT-JIS-2004 Japanese, since 8.3 */ - {"SJIS", SJIS}, - {"BIG5", BIG5}, - {"GBK", GBK}, /* since 7.3 */ - {"UHC", UHC}, /* since 7.3 */ - {"GB18030", GB18030}, /* since 7.3 */ - {"SHIFT_JIS_2004", SHIFT_JIS_2004}, /* SHIFT-JIS-2004 Japanese, standard JIS - X 0213, since 8.3 */ - {"OTHER", OTHER}}; - -static OPENSEARCH_CS CS_Alias[] = {{"UNICODE", UTF8}, {"TCVN", WIN1258}, - {"ALT", WIN866}, {"WIN", WIN1251}, - {"KOI8R", KOI8R}, {"OTHER", OTHER}}; - -int opensearch_CS_code(const char *characterset_string) { - int i, c = -1; - - for (i = 0; CS_Table[i].code != OTHER; i++) { - if (0 == stricmp(characterset_string, CS_Table[i].name)) { - c = CS_Table[i].code; - break; - } - } - if (c < 0) { - for (i = 0; CS_Alias[i].code != OTHER; i++) { - if (0 == stricmp(characterset_string, CS_Alias[i].name)) { - c = CS_Alias[i].code; - break; - } - } - } - if (c < 0) - c = OTHER; - return (c); -} - -int opensearch_mb_maxlen(int characterset_code) { - switch (characterset_code) { - case UTF8: - return 4; - case EUC_TW: - return 4; - case EUC_JIS_2004: - case EUC_JP: - case GB18030: - return 3; - case SHIFT_JIS_2004: - case SJIS: - case BIG5: - case GBK: - case UHC: - case EUC_CN: - case EUC_KR: - case JOHAB: - return 2; - default: - return 1; - } -} - -static int opensearch_CS_stat(int stat, unsigned int character, int characterset_code) { - if (character == 0) - stat = 0; - switch (characterset_code) { - case UTF8: { - if (stat < 2 && character >= 0x80) { - if (character >= 0xfc) - stat = 6; - else if (character >= 0xf8) - stat = 5; - else if (character >= 0xf0) - stat = 4; - else if (character >= 0xe0) - stat = 3; - else if (character >= 0xc0) - stat = 2; - } else if (stat >= 2 && character > 0x7f) - stat--; - else - stat = 0; - } break; - /* SHIFT_JIS_2004 Support. */ - case SHIFT_JIS_2004: { - if (stat < 2 && character >= 0x81 && character <= 0x9f) - stat = 2; - else if (stat < 2 && character >= 0xe0 && character <= 0xef) - stat = 2; - else if (stat < 2 && character >= 0xf0 && character <= 0xfc) - stat = 2; - else if (stat == 2) - stat = 1; - else - stat = 0; - } break; - /* Shift-JIS Support. */ - case SJIS: { - if (stat < 2 && character > 0x80 - && !(character > 0x9f && character < 0xe0)) - stat = 2; - else if (stat == 2) - stat = 1; - else - stat = 0; - } break; - /* Chinese Big5 Support. */ - case BIG5: { - if (stat < 2 && character > 0xA0) - stat = 2; - else if (stat == 2) - stat = 1; - else - stat = 0; - } break; - /* Chinese GBK Support. */ - case GBK: { - if (stat < 2 && character > 0x7F) - stat = 2; - else if (stat == 2) - stat = 1; - else - stat = 0; - } break; - - /* Korian UHC Support. */ - case UHC: { - if (stat < 2 && character > 0x7F) - stat = 2; - else if (stat == 2) - stat = 1; - else - stat = 0; - } break; - - case EUC_JIS_2004: - /* 0x8f is JIS X 0212 + JIS X 0213(2) 3 byte */ - /* 0x8e is JIS X 0201 2 byte */ - /* 0xa0-0xff is JIS X 0213(1) 2 byte */ - case EUC_JP: - /* 0x8f is JIS X 0212 3 byte */ - /* 0x8e is JIS X 0201 2 byte */ - /* 0xa0-0xff is JIS X 0208 2 byte */ - { - if (stat < 3 && character == 0x8f) /* JIS X 0212 */ - stat = 3; - else if (stat != 2 - && (character == 0x8e - || character > 0xa0)) /* Half Katakana HighByte & - Kanji HighByte */ - stat = 2; - else if (stat == 2) - stat = 1; - else - stat = 0; - } - break; - - /* EUC_CN, EUC_KR, JOHAB Support */ - case EUC_CN: - case EUC_KR: - case JOHAB: { - if (stat < 2 && character > 0xa0) - stat = 2; - else if (stat == 2) - stat = 1; - else - stat = 0; - } break; - case EUC_TW: { - if (stat < 4 && character == 0x8e) - stat = 4; - else if (stat == 4 && character > 0xa0) - stat = 3; - else if ((stat == 3 || stat < 2) && character > 0xa0) - stat = 2; - else if (stat == 2) - stat = 1; - else - stat = 0; - } break; - /*Chinese GB18030 support.Added by Bill Huang - * */ - case GB18030: { - if (stat < 2 && character > 0x80) - stat = 2; - else if (stat == 2) { - if (character >= 0x30 && character <= 0x39) - stat = 3; - else - stat = 1; - } else if (stat == 3) { - if (character >= 0x30 && character <= 0x39) - stat = 1; - else - stat = 3; - } else - stat = 0; - } break; - default: { - stat = 0; - } break; - } - return stat; -} - -/* - * This function is used to know the encoding corresponding to - * the current locale. - */ -const char *derive_locale_encoding(const char *dbencoding) { - UNUSED(dbencoding); - const char *wenc = NULL; -#ifdef WIN32 - int acp; -#endif /* WIN32 */ - - if (wenc = getenv("ESCLIENTENCODING"), - NULL != wenc) /* environmnt variable */ - return wenc; -#ifdef WIN32 - acp = GetACP(); - if (acp >= 1251 && acp <= 1258) { - if (stricmp(dbencoding, "SQL_ASCII") == 0) - return wenc; - } - switch (acp) { - case 932: - wenc = "SJIS"; - break; - case 936: - wenc = "GBK"; - break; - case 949: - wenc = "UHC"; - break; - case 950: - wenc = "BIG5"; - break; - case 1250: - wenc = "WIN1250"; - break; - case 1251: - wenc = "WIN1251"; - break; - case 1256: - wenc = "WIN1256"; - break; - case 1252: - if (strnicmp(dbencoding, "LATIN", 5) == 0) - break; - wenc = "WIN1252"; - break; - case 1258: - wenc = "WIN1258"; - break; - case 1253: - wenc = "WIN1253"; - break; - case 1254: - wenc = "WIN1254"; - break; - case 1255: - wenc = "WIN1255"; - break; - case 1257: - wenc = "WIN1257"; - break; - } -#else - // TODO #34 - Investigate locale handling on Mac -#endif /* WIN32 */ - return wenc; -} - -void encoded_str_constr(encoded_str *encstr, int ccsc, const char *str) { - encstr->ccsc = ccsc; - encstr->encstr = (const UCHAR *)str; - encstr->pos = -1; - encstr->ccst = 0; -} -int encoded_nextchar(encoded_str *encstr) { - int chr; - - if (encstr->pos >= 0 && !encstr->encstr[encstr->pos]) - return 0; - chr = encstr->encstr[++encstr->pos]; - encstr->ccst = - opensearch_CS_stat(encstr->ccst, (unsigned int)chr, encstr->ccsc); - return chr; -} - -int encoded_byte_check(encoded_str *encstr, size_t abspos) { - int chr; - - chr = encstr->encstr[encstr->pos = abspos]; - encstr->ccst = - opensearch_CS_stat(encstr->ccst, (unsigned int)chr, encstr->ccsc); - return chr; -} diff --git a/sql-odbc/src/sqlodbc/multibyte.h b/sql-odbc/src/sqlodbc/multibyte.h deleted file mode 100644 index 26bfb6f061..0000000000 --- a/sql-odbc/src/sqlodbc/multibyte.h +++ /dev/null @@ -1,124 +0,0 @@ -#ifndef __MULTIBUYTE_H__ -#define __MULTIBUYTE_H__ - -/* - * - * Multibyte library header - * - */ -#include "opensearch_odbc.h" -#include "qresult.h" - -/* Elastic client encoding */ -enum { - SQL_ASCII = 0 /* SQL/ASCII */ - , - EUC_JP /* EUC for Japanese */ - , - EUC_CN /* EUC for Chinese */ - , - EUC_KR /* EUC for Korean */ - , - EUC_TW /* EUC for Taiwan */ - , - JOHAB, - UTF8 /* Unicode UTF-8 */ - , - MULE_INTERNAL /* Mule internal code */ - , - LATIN1 /* ISO-8859 Latin 1 */ - , - LATIN2 /* ISO-8859 Latin 2 */ - , - LATIN3 /* ISO-8859 Latin 3 */ - , - LATIN4 /* ISO-8859 Latin 4 */ - , - LATIN5 /* ISO-8859 Latin 5 */ - , - LATIN6 /* ISO-8859 Latin 6 */ - , - LATIN7 /* ISO-8859 Latin 7 */ - , - LATIN8 /* ISO-8859 Latin 8 */ - , - LATIN9 /* ISO-8859 Latin 9 */ - , - LATIN10 /* ISO-8859 Latin 10 */ - , - WIN1256 /* Arabic Windows */ - , - WIN1258 /* Vietnamese Windows */ - , - WIN866 /* Alternativny Variant (MS-DOS CP866) */ - , - WIN874 /* Thai Windows */ - , - KOI8R /* KOI8-R/U */ - , - WIN1251 /* Cyrillic Windows */ - , - WIN1252 /* Western Europe Windows */ - , - ISO_8859_5 /* ISO-8859-5 */ - , - ISO_8859_6 /* ISO-8859-6 */ - , - ISO_8859_7 /* ISO-8859-7 */ - , - ISO_8859_8 /* ISO-8859-8 */ - , - WIN1250 /* Central Europe Windows */ - , - WIN1253 /* Greek Windows */ - , - WIN1254 /* Turkish Windows */ - , - WIN1255 /* Hebrew Windows */ - , - WIN1257 /* Baltic(North Europe) Windows */ - , - EUC_JIS_2004 /* EUC for SHIFT-JIS-2004 Japanese */ - , - SJIS /* Shift JIS */ - , - BIG5 /* Big5 */ - , - GBK /* GBK */ - , - UHC /* UHC */ - , - GB18030 /* GB18030 */ - , - SHIFT_JIS_2004 /* SHIFT-JIS-2004 Japanese, JIS X 0213 */ - , - OTHER = -1 -}; - -/* Old Type Compatible */ -typedef struct { - int ccsc; - const UCHAR *encstr; - ssize_t pos; - int ccst; -} encoded_str; - -#ifdef __cplusplus -extern "C" { -#endif -int opensearch_CS_code(const char *stat_string); -int encoded_nextchar(encoded_str *encstr); -int encoded_byte_check(encoded_str *encstr, size_t abspos); -const char *derive_locale_encoding(const char *dbencoding); -int opensearch_mb_maxlen(int characterset_code); -#ifdef __cplusplus -} -#endif -#define ENCODE_STATUS(enc) ((enc).ccst) -#define ENCODE_PTR(enc) ((enc).encstr + (enc).pos) -#define MBCS_NON_ASCII(enc) (0 != (enc).ccst || (enc).encstr[(enc).pos] >= 0x80) - -void encoded_str_constr(encoded_str *encstr, int ccsc, const char *str); -#define make_encoded_str(encstr, conn, str) \ - encoded_str_constr(encstr, conn->ccsc, str) -#endif /* __MULTIBUYTE_H__ */ diff --git a/sql-odbc/src/sqlodbc/mylog.c b/sql-odbc/src/sqlodbc/mylog.c deleted file mode 100644 index 2933a1cab0..0000000000 --- a/sql-odbc/src/sqlodbc/mylog.c +++ /dev/null @@ -1,527 +0,0 @@ -#define _MYLOG_FUNCS_IMPLEMENT_ -#include -#include -#include -#include -#include -#include - -#include "dlg_specific.h" -#include "opensearch_odbc.h" -#include "misc.h" -#include "opensearch_helper.h" - -#ifndef WIN32 -#include -#include -#include -#include -#define GENERAL_ERRNO (errno) -#define GENERAL_ERRNO_SET(e) (errno = e) -#else -#define GENERAL_ERRNO (GetLastError()) -#define GENERAL_ERRNO_SET(e) SetLastError(e) -#include /* Byron: is this where Windows keeps def. - * of getpid ? */ -#endif - -#ifdef WIN32 -#define DIRSEPARATOR "\\" -#define OPENSEARCH_BINARY O_BINARY -#define OPENSEARCH_BINARY_R "rb" -#define OPENSEARCH_BINARY_W "wb" -#define OPENSEARCH_BINARY_A "ab" -#else -#define DIRSEPARATOR "/" -#define OPENSEARCH_BINARY 0 -#define OPENSEARCH_BINARY_R "r" -#define OPENSEARCH_BINARY_W "w" -#define OPENSEARCH_BINARY_A "a" -#endif /* WIN32 */ - -static char *logdir = NULL; - -void generate_filename(const char *dirname, const char *prefix, char *filename, - size_t filenamelen) { - const char *exename = GetExeProgramName(); -#ifdef WIN32 - int pid; - - pid = _getpid(); -#else - pid_t pid; - struct passwd *ptr; - - ptr = getpwuid(getuid()); - pid = getpid(); -#endif - if (dirname == 0 || filename == 0) - return; - - snprintf(filename, filenamelen, "%s%s", dirname, DIRSEPARATOR); - if (prefix != 0) - strlcat(filename, prefix, filenamelen); - if (exename[0]) - snprintfcat(filename, filenamelen, "%s_", exename); -#ifndef WIN32 - if (ptr) - strlcat(filename, ptr->pw_name, filenamelen); -#endif - snprintfcat(filename, filenamelen, "%u%s", pid, ".log"); - return; -} - -static void generate_homefile(const char *prefix, char *filename, - size_t filenamelen) { - char dir[PATH_MAX]; -#ifdef WIN32 - const char *ptr; - - dir[0] = '\0'; - if (ptr = getenv("HOMEDRIVE"), NULL != ptr) - strlcat(dir, ptr, filenamelen); - if (ptr = getenv("HOMEPATH"), NULL != ptr) - strlcat(dir, ptr, filenamelen); -#else - STRCPY_FIXED(dir, "~"); -#endif /* WIN32 */ - generate_filename(dir, prefix, filename, filenamelen); - - return; -} - -#ifdef WIN32 -static char exename[_MAX_FNAME]; -#elif defined MAXNAMELEN -static char exename[MAXNAMELEN]; -#else -static char exename[256]; -#endif - -const char *GetExeProgramName() { - static int init = 1; - - if (init) { - UCHAR *p; -#ifdef WIN32 - char pathname[_MAX_PATH]; - - if (GetModuleFileName(NULL, pathname, sizeof(pathname)) > 0) - _splitpath(pathname, NULL, NULL, exename, NULL); -#else - CSTR flist[] = {"/proc/self/exe", "/proc/curproc/file", - "/proc/curproc/exe"}; - unsigned long i; - char path_name[256]; - - for (i = 0; i < sizeof(flist) / sizeof(flist[0]); i++) { - if (readlink(flist[i], path_name, sizeof(path_name)) > 0) { - /* fprintf(stderr, "i=%d pathname=%s\n", i, path_name); */ - STRCPY_FIXED(exename, po_basename(path_name)); - break; - } - } -#endif /* WIN32 */ - for (p = (UCHAR *)exename; '\0' != *p; p++) { - if (isalnum(*p)) - continue; - switch (*p) { - case '_': - case '-': - continue; - } - *p = '\0'; /* avoid multi bytes for safety */ - break; - } - init = 0; - } - return exename; -} - -static void *qlog_cs, *mylog_cs; - -static int mylog_on = OPENSEARCH_WARNING, qlog_on = OPENSEARCH_WARNING; - -#define INIT_QLOG_CS XPlatformInitializeCriticalSection(&qlog_cs) -#define ENTER_QLOG_CS XPlatformEnterCriticalSection(qlog_cs) -#define LEAVE_QLOG_CS XPlatformLeaveCriticalSection(qlog_cs) -#define DELETE_QLOG_CS XPlatformDeleteCriticalSection(&qlog_cs) -#define INIT_MYLOG_CS XPlatformInitializeCriticalSection(&mylog_cs) -#define ENTER_MYLOG_CS XPlatformEnterCriticalSection(mylog_cs) -#define LEAVE_MYLOG_CS XPlatformLeaveCriticalSection(mylog_cs) -#define DELETE_MYLOG_CS XPlatformDeleteCriticalSection(&mylog_cs) - -#define MYLOGFILE "mylog_" -#ifndef WIN32 -#define MYLOGDIR "/tmp" -#else -#define MYLOGDIR "c:" -#endif /* WIN32 */ - -#define QLOGFILE "elasticodbc_" -#ifndef WIN32 -#define QLOGDIR "/tmp" -#else -#define QLOGDIR "c:" -#endif /* WIN32 */ - -int get_mylog(void) { - return mylog_on; -} -int get_qlog(void) { - return qlog_on; -} - -const char *po_basename(const char *path) { - char *p; - - if (p = strrchr(path, DIRSEPARATOR[0]), NULL != p) - return p + 1; - return path; -} - -void logs_on_off(int cnopen, int mylog_onoff, int qlog_onoff) { - static int mylog_on_count = 0, mylog_off_count = 0, qlog_on_count = 0, - qlog_off_count = 0; - - ENTER_MYLOG_CS; - if (mylog_onoff) - mylog_on_count += cnopen; - else - mylog_off_count += cnopen; - if (mylog_on_count > 0) { - if (mylog_onoff > mylog_on) - mylog_on = mylog_onoff; - else if (mylog_on < 1) - mylog_on = 1; - } else if (mylog_off_count > 0) - mylog_on = 0; - else if (getGlobalDebug() > 0) - mylog_on = getGlobalDebug(); - LEAVE_MYLOG_CS; - - ENTER_QLOG_CS; - if (qlog_onoff) - qlog_on_count += cnopen; - else - qlog_off_count += cnopen; - if (qlog_on_count > 0) { - if (qlog_onoff > qlog_on) - qlog_on = qlog_onoff; - else if (qlog_on < 1) - qlog_on = 1; - } else if (qlog_off_count > 0) - qlog_on = 0; - else if (getGlobalCommlog() > 0) - qlog_on = getGlobalCommlog(); - LEAVE_QLOG_CS; - MYLOG(OPENSEARCH_DEBUG, "mylog_on=%d qlog_on=%d\n", mylog_on, qlog_on); -} - -#ifdef WIN32 -#define LOGGING_PROCESS_TIME -#include -#endif /* WIN32 */ -#ifdef LOGGING_PROCESS_TIME -#include -static DWORD start_time = 0; -#endif /* LOGGING_PROCESS_TIME */ -static FILE *MLOGFP = NULL; - -static void MLOG_open() { - char filebuf[80], errbuf[160]; - BOOL open_error = FALSE; - - // TODO (#585): Add option to log to stderr stream - // MLOGFP = stderr; - if (MLOGFP) - return; - - generate_filename(logdir ? logdir : MYLOGDIR, MYLOGFILE, filebuf, - sizeof(filebuf)); - MLOGFP = fopen(filebuf, OPENSEARCH_BINARY_A); - if (!MLOGFP) { - int lasterror = GENERAL_ERRNO; - - open_error = TRUE; - SPRINTF_FIXED(errbuf, "%s open error %d\n", filebuf, lasterror); - generate_homefile(MYLOGFILE, filebuf, sizeof(filebuf)); - MLOGFP = fopen(filebuf, OPENSEARCH_BINARY_A); - } - if (MLOGFP) { - if (open_error) - fputs(errbuf, MLOGFP); - } -} - -static int mylog_misc(unsigned int option, const char *fmt, va_list args) { - // va_list args; - int gerrno; - BOOL log_threadid = option; - - gerrno = GENERAL_ERRNO; - ENTER_MYLOG_CS; -#ifdef LOGGING_PROCESS_TIME - if (!start_time) - start_time = timeGetTime(); -#endif /* LOGGING_PROCESS_TIME */ - // va_start(args, fmt); - - if (!MLOGFP) { - MLOG_open(); - if (!MLOGFP) - mylog_on = 0; - } - - if (MLOGFP) { - if (log_threadid) { -#ifdef WIN_MULTITHREAD_SUPPORT -#ifdef LOGGING_PROCESS_TIME - DWORD proc_time = timeGetTime() - start_time; - fprintf(MLOGFP, "[%u-%d.%03d]", GetCurrentThreadId(), - proc_time / 1000, proc_time % 1000); -#else - fprintf(MLOGFP, "[%u]", GetCurrentThreadId()); -#endif /* LOGGING_PROCESS_TIME */ -#endif /* WIN_MULTITHREAD_SUPPORT */ -#if defined(POSIX_MULTITHREAD_SUPPORT) - fprintf(MLOGFP, "[%lx]", (unsigned long int)pthread_self()); -#endif /* POSIX_MULTITHREAD_SUPPORT */ - } - vfprintf(MLOGFP, fmt, args); - fflush(MLOGFP); - } - - // va_end(args); - LEAVE_MYLOG_CS; - GENERAL_ERRNO_SET(gerrno); - - return 1; -} - -DLL_DECLARE int mylog(const char *fmt, ...) { - int ret = 0; - unsigned int option = 1; - va_list args; - - if (!mylog_on) - return ret; - - va_start(args, fmt); - ret = mylog_misc(option, fmt, args); - va_end(args); - return ret; -} - -DLL_DECLARE int myprintf(const char *fmt, ...) { - int ret = 0; - va_list args; - - va_start(args, fmt); - ret = mylog_misc(0, fmt, args); - va_end(args); - return ret; -} - -static void mylog_initialize(void) { - INIT_MYLOG_CS; -} -static void mylog_finalize(void) { - mylog_on = 0; - if (MLOGFP) { - fclose(MLOGFP); - MLOGFP = NULL; - } - DELETE_MYLOG_CS; -} - -static FILE *QLOGFP = NULL; - -static int qlog_misc(unsigned int option, const char *fmt, va_list args) { - char filebuf[80]; - int gerrno; - - if (!qlog_on) - return 0; - - gerrno = GENERAL_ERRNO; - ENTER_QLOG_CS; -#ifdef LOGGING_PROCESS_TIME - if (!start_time) - start_time = timeGetTime(); -#endif /* LOGGING_PROCESS_TIME */ - - if (!QLOGFP) { - generate_filename(logdir ? logdir : QLOGDIR, QLOGFILE, filebuf, - sizeof(filebuf)); - QLOGFP = fopen(filebuf, OPENSEARCH_BINARY_A); - if (!QLOGFP) { - generate_homefile(QLOGFILE, filebuf, sizeof(filebuf)); - QLOGFP = fopen(filebuf, OPENSEARCH_BINARY_A); - } - if (!QLOGFP) - qlog_on = 0; - } - - if (QLOGFP) { - if (option) { -#ifdef LOGGING_PROCESS_TIME - DWORD proc_time = timeGetTime() - start_time; - fprintf(QLOGFP, "[%d.%03d]", proc_time / 1000, proc_time % 1000); -#endif /* LOGGING_PROCESS_TIME */ - } - vfprintf(QLOGFP, fmt, args); - fflush(QLOGFP); - } - - LEAVE_QLOG_CS; - GENERAL_ERRNO_SET(gerrno); - - return 1; -} -int qlog(const char *fmt, ...) { - int ret = 0; - unsigned int option = 1; - va_list args; - - if (!qlog_on) - return ret; - - va_start(args, fmt); - ret = qlog_misc(option, fmt, args); - va_end(args); - return ret; -} -int qprintf(char *fmt, ...) { - int ret = 0; - va_list args; - - va_start(args, fmt); - ret = qlog_misc(0, fmt, args); - va_end(args); - return ret; -} - -static void qlog_initialize(void) { - INIT_QLOG_CS; -} -static void qlog_finalize(void) { - qlog_on = 0; - if (QLOGFP) { - fclose(QLOGFP); - QLOGFP = NULL; - } - DELETE_QLOG_CS; -} - -static int globalDebug = -1; -int getGlobalDebug() { - char temp[16]; - - if (globalDebug >= 0) - return globalDebug; - /* Debug is stored in the driver section */ - SQLGetPrivateProfileString(DBMS_NAME, INI_LOG_LEVEL, "", temp, sizeof(temp), - ODBCINST_INI); - if (temp[0]) - globalDebug = atoi(temp); - else - globalDebug = DEFAULT_LOGLEVEL; - - return globalDebug; -} - -int setGlobalDebug(int val) { - return (globalDebug = val); -} - -static int globalCommlog = -1; -int getGlobalCommlog() { - char temp[16]; - - if (globalCommlog >= 0) - return globalCommlog; - /* Commlog is stored in the driver section */ - SQLGetPrivateProfileString(DBMS_NAME, INI_LOG_LEVEL, "", temp, sizeof(temp), - ODBCINST_INI); - if (temp[0]) - globalCommlog = atoi(temp); - else - globalCommlog = DEFAULT_LOGLEVEL; - - return globalCommlog; -} - -int setGlobalCommlog(int val) { - return (globalCommlog = val); -} - -int writeGlobalLogs() { - char temp[10]; - - ITOA_FIXED(temp, globalDebug); - SQLWritePrivateProfileString(DBMS_NAME, INI_LOG_LEVEL, temp, ODBCINST_INI); - ITOA_FIXED(temp, globalCommlog); - SQLWritePrivateProfileString(DBMS_NAME, INI_LOG_LEVEL, temp, ODBCINST_INI); - return 0; -} - -void logInstallerError(int ret, const char *dir) { - DWORD err = (DWORD)ret; - char msg[SQL_MAX_MESSAGE_LENGTH] = ""; - msg[0] = '\0'; - ret = SQLInstallerError(1, &err, msg, sizeof(msg), NULL); - if (msg[0] != '\0') - MYLOG(OPENSEARCH_DEBUG, "Dir= %s ErrorMsg = %s\n", dir, msg); -} - -int getLogDir(char *dir, int dirmax) { - int ret = SQLGetPrivateProfileString(DBMS_NAME, INI_LOG_OUTPUT, "", - dir, dirmax, ODBCINST_INI); - if (!ret) - logInstallerError(ret, dir); - return ret; -} - -int setLogDir(const char *dir) { - int ret = SQLWritePrivateProfileString(DBMS_NAME, INI_LOG_OUTPUT, dir, - ODBCINST_INI); - if (!ret) - logInstallerError(ret, dir); - return ret; -} - -/* - * This function starts a logging out of connections according the ODBCINST.INI - * portion of the DBMS_NAME registry. - */ -static void start_logging() { - /* - * GlobalDebug or GlobalCommlog means whether take mylog or commlog - * out of the connection time or not but doesn't mean the default of - * ci->drivers.debug(commlog). - */ - logs_on_off(0, 0, 0); - mylog("\t%s:Global.debug&commlog=%d&%d\n", __FUNCTION__, getGlobalDebug(), - getGlobalCommlog()); -} - -void InitializeLogging(void) { - char dir[PATH_MAX]; - getLogDir(dir, sizeof(dir)); - if (dir[0]) - logdir = strdup(dir); - mylog_initialize(); - qlog_initialize(); - start_logging(); - MYLOG(OPENSEARCH_DEBUG, "Log Output Dir: %s\n", logdir); -} - -void FinalizeLogging(void) { - mylog_finalize(); - qlog_finalize(); - if (logdir) { - free(logdir); - logdir = NULL; - } -} diff --git a/sql-odbc/src/sqlodbc/mylog.h b/sql-odbc/src/sqlodbc/mylog.h deleted file mode 100644 index c7ee6710c7..0000000000 --- a/sql-odbc/src/sqlodbc/mylog.h +++ /dev/null @@ -1,142 +0,0 @@ -#ifndef __MYLOG_H__ -#define __MYLOG_H__ - -#undef DLL_DECLARE -#ifdef WIN32 -#ifdef _MYLOG_FUNCS_IMPLEMENT_ -#define DLL_DECLARE _declspec(dllexport) -#else -#ifdef _MYLOG_FUNCS_IMPORT_ -#define DLL_DECLARE _declspec(dllimport) -#else -#define DLL_DECLARE -#endif /* _MYLOG_FUNCS_IMPORT_ */ -#endif /* _MYLOG_FUNCS_IMPLEMENT_ */ -#else -#define DLL_DECLARE -#endif /* WIN32 */ - -#include -#ifndef WIN32 -#include -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef __GNUC__ -#define __attribute__(x) -#endif - -DLL_DECLARE int mylog(const char *fmt, ...) - __attribute__((format(OPENSEARCH_PRINTF_ATTRIBUTE, 1, 2))); -DLL_DECLARE int myprintf(const char *fmt, ...) - __attribute__((format(OPENSEARCH_PRINTF_ATTRIBUTE, 1, 2))); - -extern int qlog(const char *fmt, ...) - __attribute__((format(OPENSEARCH_PRINTF_ATTRIBUTE, 1, 2))); -extern int qprintf(char *fmt, ...) - __attribute__((format(OPENSEARCH_PRINTF_ATTRIBUTE, 1, 2))); - -const char *po_basename(const char *path); - -#define PREPEND_FMT "%10.10s[%s]%d: " -#define PREPEND_ITEMS , po_basename(__FILE__), __FUNCTION__, __LINE__ -#define QLOG_MARK "[QLOG]" - -#if defined(__GNUC__) && !defined(__APPLE__) -#define MYLOG(level, fmt, ...) \ - (level < get_mylog() ? mylog(PREPEND_FMT fmt PREPEND_ITEMS, ##__VA_ARGS__) \ - : 0) -#define MYPRINTF(level, fmt, ...) \ - (level < get_mylog() ? myprintf((fmt), ##__VA_ARGS__) : 0) -#define QLOG(level, fmt, ...) \ - ((level < get_qlog() ? qlog((fmt), ##__VA_ARGS__) : 0), \ - MYLOG(level, QLOG_MARK fmt, ##__VA_ARGS__)) -#define QPRINTF(level, fmt, ...) \ - ((level < get_qlog() ? qprintf((fmt), ##__VA_ARGS__) : 0), \ - MYPRINTF(level, (fmt), ##__VA_ARGS__)) -#elif defined WIN32 /* && _MSC_VER > 1800 */ -#define MYLOG(level, fmt, ...) \ - ((int)level <= get_mylog() \ - ? mylog(PREPEND_FMT fmt PREPEND_ITEMS, __VA_ARGS__) \ - : (printf || printf((fmt), __VA_ARGS__))) -#define MYPRINTF(level, fmt, ...) \ - ((int)level <= get_mylog() ? myprintf(fmt, __VA_ARGS__) \ - : (printf || printf((fmt), __VA_ARGS__))) -#define QLOG(level, fmt, ...) \ - (((int)level <= get_qlog() ? qlog((fmt), __VA_ARGS__) \ - : (printf || printf(fmt, __VA_ARGS__))), \ - MYLOG(level, QLOG_MARK fmt, __VA_ARGS__)) -#define QPRINTF(level, fmt, ...) \ - (((int)level <= get_qlog() ? qprintf(fmt, __VA_ARGS__) \ - : (printf || printf((fmt), __VA_ARGS__))), \ - MYPRINTF(level, (fmt), __VA_ARGS__)) -#else -#define MYLOG(level, ...) \ - do { \ - _Pragma("clang diagnostic push"); \ - _Pragma("clang diagnostic ignored \"-Wformat-pedantic\""); \ - (level < get_mylog() \ - ? (mylog(PREPEND_FMT PREPEND_ITEMS), myprintf(__VA_ARGS__)) \ - : 0); \ - _Pragma("clang diagnostic pop"); \ - } while (0) -#define MYPRINTF(level, ...) \ - do { \ - _Pragma("clang diagnostic push"); \ - _Pragma("clang diagnostic ignored \"-Wformat-pedantic\""); \ - (level < get_mylog() ? myprintf(__VA_ARGS__) : 0); \ - _Pragma("clang diagnostic pop"); \ - } while (0) -#define QLOG(level, ...) \ - do { \ - _Pragma("clang diagnostic push"); \ - _Pragma("clang diagnostic ignored \"-Wformat-pedantic\""); \ - (level < get_qlog() ? qlog(__VA_ARGS__) : 0); \ - MYLOG(level, QLOG_MARK); \ - MYPRINTF(level, __VA_ARGS__); \ - _Pragma("clang diagnostic pop"); \ - } while (0) -#define QPRINTF(level, ...) \ - do { \ - _Pragma("clang diagnostic push"); \ - _Pragma("clang diagnostic ignored \"-Wformat-pedantic\""); \ - (level < get_qlog() ? qprintf(__VA_ARGS__) : 0); \ - MYPRINTF(level, __VA_ARGS__); \ - _Pragma("clang diagnostic pop"); \ - } while (0) -#endif /* __GNUC__ */ - -enum OpenSearchLogLevel { - // Prefixing with OPENSEARCH_ because C does not support namespaces and we may get a - // collision, given how common these names are - OPENSEARCH_OFF, - OPENSEARCH_FATAL, - OPENSEARCH_ERROR, - OPENSEARCH_WARNING, - OPENSEARCH_INFO, - OPENSEARCH_DEBUG, - OPENSEARCH_TRACE, - OPENSEARCH_ALL -}; - -int get_qlog(void); -int get_mylog(void); - -int getGlobalDebug(); -int setGlobalDebug(int val); -int getGlobalCommlog(); -int setGlobalCommlog(int val); -int writeGlobalLogs(); -int getLogDir(char *dir, int dirmax); -int setLogDir(const char *dir); - -void InitializeLogging(void); -void FinalizeLogging(void); - -#ifdef __cplusplus -} -#endif -#endif /* __MYLOG_H__ */ diff --git a/sql-odbc/src/sqlodbc/odbcapi.c b/sql-odbc/src/sqlodbc/odbcapi.c deleted file mode 100644 index 58cd9dff5d..0000000000 --- a/sql-odbc/src/sqlodbc/odbcapi.c +++ /dev/null @@ -1,1383 +0,0 @@ -#include -#include - -#include "environ.h" -#include "opensearch_odbc.h" -#include "loadlib.h" -#include "misc.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "opensearch_driver_connect.h" -#include "opensearch_info.h" -#include "opensearch_statement.h" -#include "qresult.h" -#include "statement.h" - -BOOL SC_connection_lost_check(StatementClass *stmt, const char *funcname) { - ConnectionClass *conn = SC_get_conn(stmt); - char message[64]; - - if (NULL != conn->opensearchconn) - return FALSE; - SC_clear_error(stmt); - SPRINTF_FIXED(message, "%s unable due to the connection lost", funcname); - SC_set_error(stmt, STMT_COMMUNICATION_ERROR, message, funcname); - return TRUE; -} - -RETCODE SQL_API SQLBindCol(HSTMT StatementHandle, SQLUSMALLINT ColumnNumber, - SQLSMALLINT TargetType, PTR TargetValue, - SQLLEN BufferLength, SQLLEN *StrLen_or_Ind) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_BindCol(StatementHandle, ColumnNumber, TargetType, TargetValue, - BufferLength, StrLen_or_Ind); - LEAVE_STMT_CS(stmt); - return ret; -} - -RETCODE SQL_API SQLCancel(HSTMT StatementHandle) { - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (!StatementHandle) - return SQL_INVALID_HANDLE; - if (SC_connection_lost_check((StatementClass *)StatementHandle, - __FUNCTION__)) - return SQL_ERROR; - return OPENSEARCHAPI_Cancel(StatementHandle); -} - -static BOOL theResultIsEmpty(const StatementClass *stmt) { - QResultClass *res = SC_get_Result(stmt); - if (NULL == res) - return FALSE; - return (0 == QR_get_num_total_tuples(res)); -} - -#ifndef UNICODE_SUPPORTXX -RETCODE SQL_API SQLColumns(HSTMT StatementHandle, SQLCHAR *CatalogName, - SQLSMALLINT NameLength1, SQLCHAR *SchemaName, - SQLSMALLINT NameLength2, SQLCHAR *TableName, - SQLSMALLINT NameLength3, SQLCHAR *ColumnName, - SQLSMALLINT NameLength4) { - CSTR func = "SQLColumns"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - SQLCHAR *ctName = CatalogName, *scName = SchemaName, *tbName = TableName, - *clName = ColumnName; - UWORD flag = PODBC_SEARCH_PUBLIC_SCHEMA; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (stmt->options.metadata_id) - flag |= PODBC_NOT_SEARCH_PATTERN; - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_Columns(StatementHandle, ctName, NameLength1, scName, - NameLength2, tbName, NameLength3, clName, - NameLength4, flag, 0, 0); - if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { - BOOL ifallupper = TRUE, reexec = FALSE; - SQLCHAR *newCt = NULL, *newSc = NULL, *newTb = NULL, *newCl = NULL; - ConnectionClass *conn = SC_get_conn(stmt); - - if (newCt = make_lstring_ifneeded(conn, CatalogName, NameLength1, - ifallupper), - NULL != newCt) { - ctName = newCt; - reexec = TRUE; - } - if (newSc = make_lstring_ifneeded(conn, SchemaName, NameLength2, - ifallupper), - NULL != newSc) { - scName = newSc; - reexec = TRUE; - } - if (newTb = - make_lstring_ifneeded(conn, TableName, NameLength3, ifallupper), - NULL != newTb) { - tbName = newTb; - reexec = TRUE; - } - if (newCl = make_lstring_ifneeded(conn, ColumnName, NameLength4, - ifallupper), - NULL != newCl) { - clName = newCl; - reexec = TRUE; - } - if (reexec) { - ret = OPENSEARCHAPI_Columns(StatementHandle, ctName, NameLength1, scName, - NameLength2, tbName, NameLength3, clName, - NameLength4, flag, 0, 0); - if (newCt) - free(newCt); - if (newSc) - free(newSc); - if (newTb) - free(newTb); - if (newCl) - free(newCl); - } - } - LEAVE_STMT_CS(stmt); - return ret; -} - -RETCODE SQL_API SQLConnect(HDBC ConnectionHandle, SQLCHAR *ServerName, - SQLSMALLINT NameLength1, SQLCHAR *UserName, - SQLSMALLINT NameLength2, SQLCHAR *Authentication, - SQLSMALLINT NameLength3) { - RETCODE ret; - ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_CONN_CS(conn); - CC_clear_error(conn); - ret = OPENSEARCHAPI_Connect(ConnectionHandle, ServerName, NameLength1, UserName, - NameLength2, Authentication, NameLength3); - LEAVE_CONN_CS(conn); - return ret; -} - -RETCODE SQL_API SQLDriverConnect(HDBC hdbc, HWND hwnd, SQLCHAR *szConnStrIn, - SQLSMALLINT cbConnStrIn, SQLCHAR *szConnStrOut, - SQLSMALLINT cbConnStrOutMax, - SQLSMALLINT *pcbConnStrOut, - SQLUSMALLINT fDriverCompletion) { - RETCODE ret; - ConnectionClass *conn = (ConnectionClass *)hdbc; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_CONN_CS(conn); - CC_clear_error(conn); - ret = - OPENSEARCHAPI_DriverConnect(hdbc, hwnd, szConnStrIn, cbConnStrIn, szConnStrOut, - cbConnStrOutMax, pcbConnStrOut, fDriverCompletion); - LEAVE_CONN_CS(conn); - return ret; -} -RETCODE SQL_API SQLBrowseConnect(HDBC hdbc, SQLCHAR *szConnStrIn, - SQLSMALLINT cbConnStrIn, SQLCHAR *szConnStrOut, - SQLSMALLINT cbConnStrOutMax, - SQLSMALLINT *pcbConnStrOut) { - RETCODE ret; - ConnectionClass *conn = (ConnectionClass *)hdbc; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_CONN_CS(conn); - CC_clear_error(conn); - ret = OPENSEARCHAPI_BrowseConnect(hdbc, szConnStrIn, cbConnStrIn, szConnStrOut, - cbConnStrOutMax, pcbConnStrOut); - LEAVE_CONN_CS(conn); - return ret; -} - -RETCODE SQL_API SQLDataSources(HENV EnvironmentHandle, SQLUSMALLINT Direction, - SQLCHAR *ServerName, SQLSMALLINT BufferLength1, - SQLSMALLINT *NameLength1, SQLCHAR *Description, - SQLSMALLINT BufferLength2, - SQLSMALLINT *NameLength2) { - UNUSED(EnvironmentHandle, Direction, ServerName, BufferLength1, NameLength1, - Description, BufferLength2, NameLength2); - MYLOG(OPENSEARCH_TRACE, "entering\n"); - return SQL_ERROR; -} - -RETCODE SQL_API SQLDescribeCol(HSTMT StatementHandle, SQLUSMALLINT ColumnNumber, - SQLCHAR *ColumnName, SQLSMALLINT BufferLength, - SQLSMALLINT *NameLength, SQLSMALLINT *DataType, - SQLULEN *ColumnSize, SQLSMALLINT *DecimalDigits, - SQLSMALLINT *Nullable) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_DescribeCol(StatementHandle, ColumnNumber, ColumnName, - BufferLength, NameLength, DataType, ColumnSize, - DecimalDigits, Nullable); - LEAVE_STMT_CS(stmt); - return ret; -} -#endif /* UNICODE_SUPPORTXX */ - -RETCODE SQL_API SQLDisconnect(HDBC ConnectionHandle) { - RETCODE ret; - ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; - - MYLOG(OPENSEARCH_TRACE, "entering for %p\n", ConnectionHandle); -#ifdef _HANDLE_ENLIST_IN_DTC_ - if (CC_is_in_global_trans(conn)) - CALL_DtcOnDisconnect(conn); -#endif /* _HANDLE_ENLIST_IN_DTC_ */ - ENTER_CONN_CS(conn); - CC_clear_error(conn); - ret = OPENSEARCHAPI_Disconnect(ConnectionHandle); - LEAVE_CONN_CS(conn); - return ret; -} - -#ifndef UNICODE_SUPPORTXX -RETCODE SQL_API SQLExecDirect(HSTMT StatementHandle, SQLCHAR *StatementText, - SQLINTEGER TextLength) { - if (StatementHandle == NULL) - return SQL_ERROR; - StatementClass *stmt = (StatementClass *)StatementHandle; - - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - // Enter critical - ENTER_STMT_CS(stmt); - - // Clear error and rollback - SC_clear_error(stmt); - - // Execute statement if statement is ready - RETCODE ret = SQL_ERROR; - if (!SC_opencheck(stmt, "SQLExecDirect")) - ret = OPENSEARCHAPI_ExecDirect(StatementHandle, StatementText, TextLength, 1); - - // Exit critical - LEAVE_STMT_CS(stmt); - - return ret; -} -#endif /* UNICODE_SUPPORTXX */ - -RETCODE SQL_API SQLExecute(HSTMT StatementHandle) { - if (StatementHandle == NULL) - return SQL_ERROR; - - StatementClass *stmt = (StatementClass *)StatementHandle; - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - // Enter critical - ENTER_STMT_CS(stmt); - - // Clear error and rollback - SC_clear_error(stmt); - RETCODE ret = SQL_ERROR; - if (!SC_opencheck(stmt, "SQLExecute")) - ret = OPENSEARCHAPI_Execute(StatementHandle); - - // Exit critical - LEAVE_STMT_CS(stmt); - return ret; -} - -RETCODE SQL_API SQLFetch(HSTMT StatementHandle) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - IRDFields *irdopts = SC_get_IRDF(stmt); - ARDFields *ardopts = SC_get_ARDF(stmt); - SQLUSMALLINT *rowStatusArray = irdopts->rowStatusArray; - SQLULEN *pcRow = irdopts->rowsFetched; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_ExtendedFetch(StatementHandle, SQL_FETCH_NEXT, 0, pcRow, - rowStatusArray, 0, ardopts->size_of_rowset); - stmt->transition_status = STMT_TRANSITION_FETCH_SCROLL; - - LEAVE_STMT_CS(stmt); - return ret; -} - -RETCODE SQL_API SQLFreeStmt(HSTMT StatementHandle, SQLUSMALLINT Option) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - ConnectionClass *conn = NULL; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - - if (stmt) { - if (Option == SQL_DROP) { - conn = stmt->hdbc; - if (conn) - ENTER_CONN_CS(conn); - } else - ENTER_STMT_CS(stmt); - } - - ret = OPENSEARCHAPI_FreeStmt(StatementHandle, Option); - - if (stmt) { - if (Option == SQL_DROP) { - if (conn) - LEAVE_CONN_CS(conn); - } else - LEAVE_STMT_CS(stmt); - } - - return ret; -} - -#ifndef UNICODE_SUPPORTXX -RETCODE SQL_API SQLGetCursorName(HSTMT StatementHandle, SQLCHAR *CursorName, - SQLSMALLINT BufferLength, - SQLSMALLINT *NameLength) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_GetCursorName(StatementHandle, CursorName, BufferLength, - NameLength); - LEAVE_STMT_CS(stmt); - return ret; -} -#endif /* UNICODE_SUPPORTXX */ - -RETCODE SQL_API SQLGetData(HSTMT StatementHandle, SQLUSMALLINT ColumnNumber, - SQLSMALLINT TargetType, PTR TargetValue, - SQLLEN BufferLength, SQLLEN *StrLen_or_Ind) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_GetData(StatementHandle, ColumnNumber, TargetType, TargetValue, - BufferLength, StrLen_or_Ind); - LEAVE_STMT_CS(stmt); - return ret; -} - -RETCODE SQL_API SQLGetFunctions(HDBC ConnectionHandle, SQLUSMALLINT FunctionId, - SQLUSMALLINT *Supported) { - RETCODE ret; - ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_CONN_CS(conn); - CC_clear_error(conn); - if (FunctionId == SQL_API_ODBC3_ALL_FUNCTIONS) - ret = OPENSEARCHAPI_GetFunctions30(ConnectionHandle, FunctionId, Supported); - else - ret = OPENSEARCHAPI_GetFunctions(ConnectionHandle, FunctionId, Supported); - - LEAVE_CONN_CS(conn); - return ret; -} - -#ifndef UNICODE_SUPPORTXX -RETCODE SQL_API SQLGetInfo(HDBC ConnectionHandle, SQLUSMALLINT InfoType, - PTR InfoValue, SQLSMALLINT BufferLength, - SQLSMALLINT *StringLength) { - RETCODE ret; - ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; - - ENTER_CONN_CS(conn); - CC_clear_error(conn); - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if ((ret = OPENSEARCHAPI_GetInfo(ConnectionHandle, InfoType, InfoValue, - BufferLength, StringLength)) - == SQL_ERROR) - CC_log_error("SQLGetInfo(30)", "", conn); - LEAVE_CONN_CS(conn); - return ret; -} - -RETCODE SQL_API SQLGetTypeInfo(HSTMT StatementHandle, SQLSMALLINT DataType) { - CSTR func = "SQLGetTypeInfo"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check((StatementClass *)StatementHandle, - __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_GetTypeInfo(StatementHandle, DataType); - LEAVE_STMT_CS(stmt); - return ret; -} -#endif /* UNICODE_SUPPORTXX */ - -RETCODE SQL_API SQLNumResultCols(HSTMT StatementHandle, - SQLSMALLINT *ColumnCount) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_NumResultCols(StatementHandle, ColumnCount); - LEAVE_STMT_CS(stmt); - return ret; -} - -RETCODE SQL_API SQLParamData(HSTMT StatementHandle, PTR *Value) { - UNUSED(Value); - StatementClass *stmt = (StatementClass *)StatementHandle; - if (stmt == NULL) - return SQL_ERROR; - SC_clear_error(stmt); - SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, - "OpenSearch does not support parameters.", "SQLParamData"); - return SQL_ERROR; -} - -#ifndef UNICODE_SUPPORTXX -RETCODE SQL_API SQLPrepare(HSTMT StatementHandle, SQLCHAR *StatementText, - SQLINTEGER TextLength) { - if (StatementHandle == NULL) - return SQL_ERROR; - - CSTR func = "SQLPrepare"; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - // Enter critical - ENTER_STMT_CS(stmt); - - // Clear error and rollback - SC_clear_error(stmt); - - // Prepare statement if statement is ready - RETCODE ret = SQL_ERROR; - if (!SC_opencheck(stmt, func)) - ret = OPENSEARCHAPI_Prepare(StatementHandle, StatementText, TextLength); - - // Exit critical - LEAVE_STMT_CS(stmt); - return ret; -} -#endif /* UNICODE_SUPPORTXX */ - -RETCODE SQL_API SQLPutData(HSTMT StatementHandle, PTR Data, - SQLLEN StrLen_or_Ind) { - UNUSED(Data, StrLen_or_Ind); - StatementClass *stmt = (StatementClass *)StatementHandle; - if (stmt == NULL) - return SQL_ERROR; - SC_clear_error(stmt); - SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, - "OpenSearch does not support parameters.", "SQLPutData"); - return SQL_ERROR; -} - -RETCODE SQL_API SQLRowCount(HSTMT StatementHandle, SQLLEN *RowCount) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_RowCount(StatementHandle, RowCount); - LEAVE_STMT_CS(stmt); - return ret; -} - -#ifndef UNICODE_SUPPORTXX -RETCODE SQL_API SQLSetCursorName(HSTMT StatementHandle, SQLCHAR *CursorName, - SQLSMALLINT NameLength) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_SetCursorName(StatementHandle, CursorName, NameLength); - LEAVE_STMT_CS(stmt); - return ret; -} -#endif /* UNICODE_SUPPORTXX */ - -RETCODE SQL_API SQLSetParam(HSTMT StatementHandle, SQLUSMALLINT ParameterNumber, - SQLSMALLINT ValueType, SQLSMALLINT ParameterType, - SQLULEN LengthPrecision, SQLSMALLINT ParameterScale, - PTR ParameterValue, SQLLEN *StrLen_or_Ind) { - UNUSED(ParameterNumber, ValueType, ParameterType, LengthPrecision, - ParameterScale, ParameterValue, StrLen_or_Ind); - StatementClass *stmt = (StatementClass *)StatementHandle; - if (stmt == NULL) - return SQL_ERROR; - SC_clear_error(stmt); - SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, - "OpenSearch does not support parameters.", "SQLSetParam"); - return SQL_ERROR; -} - -#ifndef UNICODE_SUPPORTXX -RETCODE SQL_API SQLSpecialColumns(HSTMT StatementHandle, - SQLUSMALLINT IdentifierType, - SQLCHAR *CatalogName, SQLSMALLINT NameLength1, - SQLCHAR *SchemaName, SQLSMALLINT NameLength2, - SQLCHAR *TableName, SQLSMALLINT NameLength3, - SQLUSMALLINT Scope, SQLUSMALLINT Nullable) { - CSTR func = "SQLSpecialColumns"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - SQLCHAR *ctName = CatalogName, *scName = SchemaName, *tbName = TableName; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_SpecialColumns(StatementHandle, IdentifierType, ctName, - NameLength1, scName, NameLength2, tbName, - NameLength3, Scope, Nullable); - if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { - BOOL ifallupper = TRUE, reexec = FALSE; - SQLCHAR *newCt = NULL, *newSc = NULL, *newTb = NULL; - ConnectionClass *conn = SC_get_conn(stmt); - - if (newCt = make_lstring_ifneeded(conn, CatalogName, NameLength1, - ifallupper), - NULL != newCt) { - ctName = newCt; - reexec = TRUE; - } - if (newSc = make_lstring_ifneeded(conn, SchemaName, NameLength2, - ifallupper), - NULL != newSc) { - scName = newSc; - reexec = TRUE; - } - if (newTb = - make_lstring_ifneeded(conn, TableName, NameLength3, ifallupper), - NULL != newTb) { - tbName = newTb; - reexec = TRUE; - } - if (reexec) { - ret = OPENSEARCHAPI_SpecialColumns(StatementHandle, IdentifierType, ctName, - NameLength1, scName, NameLength2, tbName, - NameLength3, Scope, Nullable); - if (newCt) - free(newCt); - if (newSc) - free(newSc); - if (newTb) - free(newTb); - } - } - LEAVE_STMT_CS(stmt); - return ret; -} - -RETCODE SQL_API SQLStatistics(HSTMT StatementHandle, SQLCHAR *CatalogName, - SQLSMALLINT NameLength1, SQLCHAR *SchemaName, - SQLSMALLINT NameLength2, SQLCHAR *TableName, - SQLSMALLINT NameLength3, SQLUSMALLINT Unique, - SQLUSMALLINT Reserved) { - CSTR func = "SQLStatistics"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - SQLCHAR *ctName = CatalogName, *scName = SchemaName, *tbName = TableName; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_Statistics(StatementHandle, ctName, NameLength1, scName, - NameLength2, tbName, NameLength3, Unique, - Reserved); - if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { - BOOL ifallupper = TRUE, reexec = FALSE; - SQLCHAR *newCt = NULL, *newSc = NULL, *newTb = NULL; - ConnectionClass *conn = SC_get_conn(stmt); - - if (newCt = make_lstring_ifneeded(conn, CatalogName, NameLength1, - ifallupper), - NULL != newCt) { - ctName = newCt; - reexec = TRUE; - } - if (newSc = make_lstring_ifneeded(conn, SchemaName, NameLength2, - ifallupper), - NULL != newSc) { - scName = newSc; - reexec = TRUE; - } - if (newTb = - make_lstring_ifneeded(conn, TableName, NameLength3, ifallupper), - NULL != newTb) { - tbName = newTb; - reexec = TRUE; - } - if (reexec) { - ret = OPENSEARCHAPI_Statistics(StatementHandle, ctName, NameLength1, scName, - NameLength2, tbName, NameLength3, Unique, - Reserved); - if (newCt) - free(newCt); - if (newSc) - free(newSc); - if (newTb) - free(newTb); - } - } - LEAVE_STMT_CS(stmt); - return ret; -} - -RETCODE SQL_API SQLTables(HSTMT StatementHandle, SQLCHAR *CatalogName, - SQLSMALLINT NameLength1, SQLCHAR *SchemaName, - SQLSMALLINT NameLength2, SQLCHAR *TableName, - SQLSMALLINT NameLength3, SQLCHAR *TableType, - SQLSMALLINT NameLength4) { - CSTR func = "SQLTables"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - SQLCHAR *ctName = CatalogName, *scName = SchemaName, *tbName = TableName; - UWORD flag = 0; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (stmt->options.metadata_id) - flag |= PODBC_NOT_SEARCH_PATTERN; - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_Tables(StatementHandle, ctName, NameLength1, scName, - NameLength2, tbName, NameLength3, TableType, - NameLength4, flag); - if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { - BOOL ifallupper = TRUE, reexec = FALSE; - SQLCHAR *newCt = NULL, *newSc = NULL, *newTb = NULL; - ConnectionClass *conn = SC_get_conn(stmt); - - if (newCt = make_lstring_ifneeded(conn, CatalogName, NameLength1, - ifallupper), - NULL != newCt) { - ctName = newCt; - reexec = TRUE; - } - if (newSc = make_lstring_ifneeded(conn, SchemaName, NameLength2, - ifallupper), - NULL != newSc) { - scName = newSc; - reexec = TRUE; - } - if (newTb = - make_lstring_ifneeded(conn, TableName, NameLength3, ifallupper), - NULL != newTb) { - tbName = newTb; - reexec = TRUE; - } - if (reexec) { - ret = OPENSEARCHAPI_Tables(StatementHandle, ctName, NameLength1, scName, - NameLength2, tbName, NameLength3, TableType, - NameLength4, flag); - if (newCt) - free(newCt); - if (newSc) - free(newSc); - if (newTb) - free(newTb); - } - } - LEAVE_STMT_CS(stmt); - return ret; -} - -RETCODE SQL_API SQLColumnPrivileges( - HSTMT hstmt, SQLCHAR *szCatalogName, SQLSMALLINT cbCatalogName, - SQLCHAR *szSchemaName, SQLSMALLINT cbSchemaName, SQLCHAR *szTableName, - SQLSMALLINT cbTableName, SQLCHAR *szColumnName, SQLSMALLINT cbColumnName) { - CSTR func = "SQLColumnPrivileges"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)hstmt; - SQLCHAR *ctName = szCatalogName, *scName = szSchemaName, - *tbName = szTableName, *clName = szColumnName; - UWORD flag = 0; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (stmt->options.metadata_id) - flag |= PODBC_NOT_SEARCH_PATTERN; - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_ColumnPrivileges(hstmt, ctName, cbCatalogName, scName, - cbSchemaName, tbName, cbTableName, clName, - cbColumnName, flag); - if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { - BOOL ifallupper = TRUE, reexec = FALSE; - SQLCHAR *newCt = NULL, *newSc = NULL, *newTb = NULL, *newCl = NULL; - ConnectionClass *conn = SC_get_conn(stmt); - - if (newCt = make_lstring_ifneeded(conn, szCatalogName, cbCatalogName, - ifallupper), - NULL != newCt) { - ctName = newCt; - reexec = TRUE; - } - if (newSc = make_lstring_ifneeded(conn, szSchemaName, cbSchemaName, - ifallupper), - NULL != newSc) { - scName = newSc; - reexec = TRUE; - } - if (newTb = make_lstring_ifneeded(conn, szTableName, cbTableName, - ifallupper), - NULL != newTb) { - tbName = newTb; - reexec = TRUE; - } - if (newCl = make_lstring_ifneeded(conn, szColumnName, cbColumnName, - ifallupper), - NULL != newCl) { - clName = newCl; - reexec = TRUE; - } - if (reexec) { - ret = OPENSEARCHAPI_ColumnPrivileges(hstmt, ctName, cbCatalogName, scName, - cbSchemaName, tbName, cbTableName, - clName, cbColumnName, flag); - if (newCt) - free(newCt); - if (newSc) - free(newSc); - if (newTb) - free(newTb); - if (newCl) - free(newCl); - } - } - LEAVE_STMT_CS(stmt); - return ret; -} -#endif /* UNICODE_SUPPORTXX */ - -RETCODE SQL_API SQLDescribeParam(HSTMT hstmt, SQLUSMALLINT ipar, - SQLSMALLINT *pfSqlType, SQLULEN *pcbParamDef, - SQLSMALLINT *pibScale, - SQLSMALLINT *pfNullable) { - UNUSED(ipar, pfSqlType, pcbParamDef, pibScale, pfNullable); - StatementClass *stmt = (StatementClass *)hstmt; - SC_clear_error(stmt); - - // COLNUM_ERROR translates to 'invalid descriptor index' - SC_set_error(stmt, STMT_COLNUM_ERROR, - "OpenSearch does not support parameters.", "SQLNumParams"); - return SQL_ERROR; -} - -RETCODE SQL_API SQLExtendedFetch(HSTMT hstmt, SQLUSMALLINT fFetchType, - SQLLEN irow, -#if defined(WITH_UNIXODBC) && (SIZEOF_LONG_INT != 8) - SQLROWSETSIZE *pcrow, -#else - SQLULEN *pcrow, -#endif /* WITH_UNIXODBC */ - SQLUSMALLINT *rgfRowStatus) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)hstmt; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); -#ifdef WITH_UNIXODBC - { - SQLULEN retrieved; - - ret = OPENSEARCHAPI_ExtendedFetch(hstmt, fFetchType, irow, &retrieved, - rgfRowStatus, 0, - SC_get_ARDF(stmt)->size_of_rowset_odbc2); - if (pcrow) - *pcrow = retrieved; - } -#else - ret = OPENSEARCHAPI_ExtendedFetch(hstmt, fFetchType, irow, pcrow, rgfRowStatus, 0, - SC_get_ARDF(stmt)->size_of_rowset_odbc2); -#endif /* WITH_UNIXODBC */ - stmt->transition_status = STMT_TRANSITION_EXTENDED_FETCH; - LEAVE_STMT_CS(stmt); - return ret; -} - -#ifndef UNICODE_SUPPORTXX -RETCODE SQL_API SQLForeignKeys( - HSTMT hstmt, SQLCHAR *szPkCatalogName, SQLSMALLINT cbPkCatalogName, - SQLCHAR *szPkSchemaName, SQLSMALLINT cbPkSchemaName, SQLCHAR *szPkTableName, - SQLSMALLINT cbPkTableName, SQLCHAR *szFkCatalogName, - SQLSMALLINT cbFkCatalogName, SQLCHAR *szFkSchemaName, - SQLSMALLINT cbFkSchemaName, SQLCHAR *szFkTableName, - SQLSMALLINT cbFkTableName) { - CSTR func = "SQLForeignKeys"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)hstmt; - SQLCHAR *pkctName = szPkCatalogName, *pkscName = szPkSchemaName, - *pktbName = szPkTableName, *fkctName = szFkCatalogName, - *fkscName = szFkSchemaName, *fktbName = szFkTableName; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_ForeignKeys(hstmt, pkctName, cbPkCatalogName, pkscName, - cbPkSchemaName, pktbName, cbPkTableName, - fkctName, cbFkCatalogName, fkscName, - cbFkSchemaName, fktbName, cbFkTableName); - if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { - BOOL ifallupper = TRUE, reexec = FALSE; - SQLCHAR *newPkct = NULL, *newPksc = NULL, *newPktb = NULL, - *newFkct = NULL, *newFksc = NULL, *newFktb = NULL; - ConnectionClass *conn = SC_get_conn(stmt); - - if (newPkct = make_lstring_ifneeded(conn, szPkCatalogName, - cbPkCatalogName, ifallupper), - NULL != newPkct) { - pkctName = newPkct; - reexec = TRUE; - } - if (newPksc = make_lstring_ifneeded(conn, szPkSchemaName, - cbPkSchemaName, ifallupper), - NULL != newPksc) { - pkscName = newPksc; - reexec = TRUE; - } - if (newPktb = make_lstring_ifneeded(conn, szPkTableName, cbPkTableName, - ifallupper), - NULL != newPktb) { - pktbName = newPktb; - reexec = TRUE; - } - if (newFkct = make_lstring_ifneeded(conn, szFkCatalogName, - cbFkCatalogName, ifallupper), - NULL != newFkct) { - fkctName = newFkct; - reexec = TRUE; - } - if (newFksc = make_lstring_ifneeded(conn, szFkSchemaName, - cbFkSchemaName, ifallupper), - NULL != newFksc) { - fkscName = newFksc; - reexec = TRUE; - } - if (newFktb = make_lstring_ifneeded(conn, szFkTableName, cbFkTableName, - ifallupper), - NULL != newFktb) { - fktbName = newFktb; - reexec = TRUE; - } - if (reexec) { - ret = OPENSEARCHAPI_ForeignKeys(hstmt, pkctName, cbPkCatalogName, pkscName, - cbPkSchemaName, pktbName, cbPkTableName, - fkctName, cbFkCatalogName, fkscName, - cbFkSchemaName, fktbName, cbFkTableName); - if (newPkct) - free(newPkct); - if (newPksc) - free(newPksc); - if (newPktb) - free(newPktb); - if (newFkct) - free(newFkct); - if (newFksc) - free(newFksc); - if (newFktb) - free(newFktb); - } - } - LEAVE_STMT_CS(stmt); - return ret; -} -#endif /* UNICODE_SUPPORTXX */ - -RETCODE SQL_API SQLMoreResults(HSTMT hstmt) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)hstmt; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_MoreResults(hstmt); - LEAVE_STMT_CS(stmt); - return ret; -} - -#ifndef UNICODE_SUPPORTXX -RETCODE SQL_API SQLNativeSql(HDBC hdbc, SQLCHAR *szSqlStrIn, - SQLINTEGER cbSqlStrIn, SQLCHAR *szSqlStr, - SQLINTEGER cbSqlStrMax, SQLINTEGER *pcbSqlStr) { - RETCODE ret; - ConnectionClass *conn = (ConnectionClass *)hdbc; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_CONN_CS(conn); - CC_clear_error(conn); - ret = OPENSEARCHAPI_NativeSql(hdbc, szSqlStrIn, cbSqlStrIn, szSqlStr, cbSqlStrMax, - pcbSqlStr); - LEAVE_CONN_CS(conn); - return ret; -} -#endif /* UNICODE_SUPPORTXX */ - -RETCODE SQL_API SQLNumParams(HSTMT hstmt, SQLSMALLINT *pcpar) { - if (pcpar != NULL) - *pcpar = 0; - - StatementClass *stmt = (StatementClass *)hstmt; - if (stmt == NULL) - return SQL_ERROR; - SC_clear_error(stmt); - SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, - "OpenSearch does not support parameters.", "SQLNumParams"); - return SQL_SUCCESS_WITH_INFO; -} - -#ifndef UNICODE_SUPPORTXX -RETCODE SQL_API SQLPrimaryKeys(HSTMT hstmt, SQLCHAR *szCatalogName, - SQLSMALLINT cbCatalogName, SQLCHAR *szSchemaName, - SQLSMALLINT cbSchemaName, SQLCHAR *szTableName, - SQLSMALLINT cbTableName) { - CSTR func = "SQLPrimaryKeys"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)hstmt; - SQLCHAR *ctName = szCatalogName, *scName = szSchemaName, - *tbName = szTableName; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_PrimaryKeys(hstmt, ctName, cbCatalogName, scName, - cbSchemaName, tbName, cbTableName, 0); - if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { - BOOL ifallupper = TRUE, reexec = FALSE; - SQLCHAR *newCt = NULL, *newSc = NULL, *newTb = NULL; - ConnectionClass *conn = SC_get_conn(stmt); - - if (newCt = make_lstring_ifneeded(conn, szCatalogName, cbCatalogName, - ifallupper), - NULL != newCt) { - ctName = newCt; - reexec = TRUE; - } - if (newSc = make_lstring_ifneeded(conn, szSchemaName, cbSchemaName, - ifallupper), - NULL != newSc) { - scName = newSc; - reexec = TRUE; - } - if (newTb = make_lstring_ifneeded(conn, szTableName, cbTableName, - ifallupper), - NULL != newTb) { - tbName = newTb; - reexec = TRUE; - } - if (reexec) { - ret = OPENSEARCHAPI_PrimaryKeys(hstmt, ctName, cbCatalogName, scName, - cbSchemaName, tbName, cbTableName, 0); - if (newCt) - free(newCt); - if (newSc) - free(newSc); - if (newTb) - free(newTb); - } - } - LEAVE_STMT_CS(stmt); - return ret; -} - -RETCODE SQL_API SQLProcedureColumns( - HSTMT hstmt, SQLCHAR *szCatalogName, SQLSMALLINT cbCatalogName, - SQLCHAR *szSchemaName, SQLSMALLINT cbSchemaName, SQLCHAR *szProcName, - SQLSMALLINT cbProcName, SQLCHAR *szColumnName, SQLSMALLINT cbColumnName) { - CSTR func = "SQLProcedureColumns"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)hstmt; - SQLCHAR *ctName = szCatalogName, *scName = szSchemaName, - *prName = szProcName, *clName = szColumnName; - UWORD flag = 0; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (stmt->options.metadata_id) - flag |= PODBC_NOT_SEARCH_PATTERN; - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_ProcedureColumns(hstmt, ctName, cbCatalogName, scName, - cbSchemaName, prName, cbProcName, clName, - cbColumnName, flag); - if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { - BOOL ifallupper = TRUE, reexec = FALSE; - SQLCHAR *newCt = NULL, *newSc = NULL, *newPr = NULL, *newCl = NULL; - ConnectionClass *conn = SC_get_conn(stmt); - - if (newCt = make_lstring_ifneeded(conn, szCatalogName, cbCatalogName, - ifallupper), - NULL != newCt) { - ctName = newCt; - reexec = TRUE; - } - if (newSc = make_lstring_ifneeded(conn, szSchemaName, cbSchemaName, - ifallupper), - NULL != newSc) { - scName = newSc; - reexec = TRUE; - } - if (newPr = - make_lstring_ifneeded(conn, szProcName, cbProcName, ifallupper), - NULL != newPr) { - prName = newPr; - reexec = TRUE; - } - if (newCl = make_lstring_ifneeded(conn, szColumnName, cbColumnName, - ifallupper), - NULL != newCl) { - clName = newCl; - reexec = TRUE; - } - if (reexec) { - ret = OPENSEARCHAPI_ProcedureColumns(hstmt, ctName, cbCatalogName, scName, - cbSchemaName, prName, cbProcName, - clName, cbColumnName, flag); - if (newCt) - free(newCt); - if (newSc) - free(newSc); - if (newPr) - free(newPr); - if (newCl) - free(newCl); - } - } - LEAVE_STMT_CS(stmt); - return ret; -} - -RETCODE SQL_API SQLProcedures(HSTMT hstmt, SQLCHAR *szCatalogName, - SQLSMALLINT cbCatalogName, SQLCHAR *szSchemaName, - SQLSMALLINT cbSchemaName, SQLCHAR *szProcName, - SQLSMALLINT cbProcName) { - CSTR func = "SQLProcedures"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)hstmt; - SQLCHAR *ctName = szCatalogName, *scName = szSchemaName, - *prName = szProcName; - UWORD flag = 0; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (stmt->options.metadata_id) - flag |= PODBC_NOT_SEARCH_PATTERN; - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_Procedures(hstmt, ctName, cbCatalogName, scName, - cbSchemaName, prName, cbProcName, flag); - if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { - BOOL ifallupper = TRUE, reexec = FALSE; - SQLCHAR *newCt = NULL, *newSc = NULL, *newPr = NULL; - ConnectionClass *conn = SC_get_conn(stmt); - - if (newCt = make_lstring_ifneeded(conn, szCatalogName, cbCatalogName, - ifallupper), - NULL != newCt) { - ctName = newCt; - reexec = TRUE; - } - if (newSc = make_lstring_ifneeded(conn, szSchemaName, cbSchemaName, - ifallupper), - NULL != newSc) { - scName = newSc; - reexec = TRUE; - } - if (newPr = - make_lstring_ifneeded(conn, szProcName, cbProcName, ifallupper), - NULL != newPr) { - prName = newPr; - reexec = TRUE; - } - if (reexec) { - ret = OPENSEARCHAPI_Procedures(hstmt, ctName, cbCatalogName, scName, - cbSchemaName, prName, cbProcName, flag); - if (newCt) - free(newCt); - if (newSc) - free(newSc); - if (newPr) - free(newPr); - } - } - LEAVE_STMT_CS(stmt); - return ret; -} -#endif /* UNICODE_SUPPORTXX */ - -RETCODE SQL_API SQLSetPos(HSTMT hstmt, SQLSETPOSIROW irow, SQLUSMALLINT fOption, - SQLUSMALLINT fLock) { - UNUSED(irow, fOption, fLock); - StatementClass *stmt = (StatementClass *)hstmt; - if (stmt == NULL) - return SQL_ERROR; - SC_clear_error(stmt); - SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, - "SQLSetPos is not supported.", "SQLSetPos"); - return SQL_ERROR; -} - -#ifndef UNICODE_SUPPORTXX -RETCODE SQL_API SQLTablePrivileges(HSTMT hstmt, SQLCHAR *szCatalogName, - SQLSMALLINT cbCatalogName, - SQLCHAR *szSchemaName, - SQLSMALLINT cbSchemaName, - SQLCHAR *szTableName, - SQLSMALLINT cbTableName) { - CSTR func = "SQLTablePrivileges"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)hstmt; - SQLCHAR *ctName = szCatalogName, *scName = szSchemaName, - *tbName = szTableName; - UWORD flag = 0; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (stmt->options.metadata_id) - flag |= PODBC_NOT_SEARCH_PATTERN; - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_TablePrivileges(hstmt, ctName, cbCatalogName, scName, - cbSchemaName, tbName, cbTableName, flag); - if (SQL_SUCCESS == ret && theResultIsEmpty(stmt)) { - BOOL ifallupper = TRUE, reexec = FALSE; - SQLCHAR *newCt = NULL, *newSc = NULL, *newTb = NULL; - ConnectionClass *conn = SC_get_conn(stmt); - - if (newCt = make_lstring_ifneeded(conn, szCatalogName, cbCatalogName, - ifallupper), - NULL != newCt) { - ctName = newCt; - reexec = TRUE; - } - if (newSc = make_lstring_ifneeded(conn, szSchemaName, cbSchemaName, - ifallupper), - NULL != newSc) { - scName = newSc; - reexec = TRUE; - } - if (newTb = make_lstring_ifneeded(conn, szTableName, cbTableName, - ifallupper), - NULL != newTb) { - tbName = newTb; - reexec = TRUE; - } - if (reexec) { - ret = OPENSEARCHAPI_TablePrivileges(hstmt, ctName, cbCatalogName, scName, - cbSchemaName, tbName, cbTableName, 0); - if (newCt) - free(newCt); - if (newSc) - free(newSc); - if (newTb) - free(newTb); - } - } - LEAVE_STMT_CS(stmt); - return ret; -} -#endif /* UNICODE_SUPPORTXX */ - -RETCODE SQL_API SQLBindParameter(HSTMT hstmt, SQLUSMALLINT ipar, - SQLSMALLINT fParamType, SQLSMALLINT fCType, - SQLSMALLINT fSqlType, SQLULEN cbColDef, - SQLSMALLINT ibScale, PTR rgbValue, - SQLLEN cbValueMax, SQLLEN *pcbValue) { - UNUSED(ipar, fParamType, fCType, fSqlType, cbColDef, ibScale, rgbValue, - cbValueMax, pcbValue); - StatementClass *stmt = (StatementClass *)hstmt; - if (stmt == NULL) - return SQL_ERROR; - SC_clear_error(stmt); - SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, - "OpenSearch does not support parameters.", - "SQLBindParameter"); - return SQL_ERROR; -} - -/* ODBC 2.x-specific functions */ -// TODO (#590): Add implementations for remaining ODBC 2.x function - -RETCODE SQL_API SQLAllocStmt(SQLHDBC InputHandle, SQLHSTMT *OutputHandle) { - RETCODE ret; - ConnectionClass *conn; - MYLOG(OPENSEARCH_TRACE, "entering\n"); - - conn = (ConnectionClass *)InputHandle; - ENTER_CONN_CS(conn); - ret = OPENSEARCHAPI_AllocStmt( - InputHandle, OutputHandle, - PODBC_EXTERNAL_STATEMENT | PODBC_INHERIT_CONNECT_OPTIONS); - if (*OutputHandle) - ((StatementClass *)(*OutputHandle))->external = 1; - LEAVE_CONN_CS(conn); - - return ret; -} - -#ifndef UNICODE_SUPPORTXX -RETCODE SQL_API SQLGetConnectOption(HDBC ConnectionHandle, SQLUSMALLINT Option, - PTR Value) { - RETCODE ret; - - MYLOG(OPENSEARCH_TRACE, "entering " FORMAT_UINTEGER "\n", Option); - ENTER_CONN_CS((ConnectionClass *)ConnectionHandle); - CC_clear_error((ConnectionClass *)ConnectionHandle); - ret = OPENSEARCHAPI_GetConnectOption(ConnectionHandle, Option, Value, NULL, 0); - LEAVE_CONN_CS((ConnectionClass *)ConnectionHandle); - return ret; -} - -/* SQLSetConnectOption -> SQLSetConnectAttr */ -RETCODE SQL_API SQLSetConnectOption(HDBC ConnectionHandle, SQLUSMALLINT Option, - SQLULEN Value) { - RETCODE ret; - ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; - - MYLOG(OPENSEARCH_TRACE, "entering " FORMAT_INTEGER "\n", Option); - ENTER_CONN_CS(conn); - CC_clear_error(conn); - ret = OPENSEARCHAPI_SetConnectOption(ConnectionHandle, Option, Value); - LEAVE_CONN_CS(conn); - return ret; -} - -/* SQLColAttributes -> SQLColAttribute */ -SQLRETURN SQL_API SQLColAttributes(SQLHSTMT StatementHandle, - SQLUSMALLINT ColumnNumber, - SQLUSMALLINT FieldIdentifier, - SQLPOINTER CharacterAttribute, - SQLSMALLINT BufferLength, - SQLSMALLINT *StringLength, -#if defined(_WIN64) || defined(_WIN32) || defined(SQLCOLATTRIBUTE_SQLLEN) - SQLLEN *NumericAttribute -#else - SQLPOINTER NumericAttribute -#endif -) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_ColAttributes(StatementHandle, ColumnNumber, FieldIdentifier, - CharacterAttribute, BufferLength, StringLength, - NumericAttribute); - LEAVE_STMT_CS(stmt); - return ret; -} - -/* SQLError -> SQLDiagRec */ -RETCODE SQL_API SQLError(SQLHENV EnvironmentHandle, SQLHDBC ConnectionHandle, - SQLHSTMT StatementHandle, SQLCHAR *Sqlstate, - SQLINTEGER *NativeError, SQLCHAR *MessageText, - SQLSMALLINT BufferLength, SQLSMALLINT *TextLength) { - RETCODE ret; - SQLSMALLINT RecNumber = 1; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - - if (StatementHandle) { - ret = - OPENSEARCHAPI_StmtError(StatementHandle, RecNumber, Sqlstate, NativeError, - MessageText, BufferLength, TextLength, 0); - } else if (ConnectionHandle) { - ret = OPENSEARCHAPI_ConnectError(ConnectionHandle, RecNumber, Sqlstate, - NativeError, MessageText, BufferLength, - TextLength, 0); - } else if (EnvironmentHandle) { - ret = OPENSEARCHAPI_EnvError(EnvironmentHandle, RecNumber, Sqlstate, NativeError, - MessageText, BufferLength, TextLength, 0); - } else { - ret = SQL_ERROR; - } - - MYLOG(OPENSEARCH_TRACE, "leaving %d\n", ret); - return ret; -} -#endif /* UNICODE_SUPPORTXX */ diff --git a/sql-odbc/src/sqlodbc/odbcapi30.c b/sql-odbc/src/sqlodbc/odbcapi30.c deleted file mode 100644 index 83b249015d..0000000000 --- a/sql-odbc/src/sqlodbc/odbcapi30.c +++ /dev/null @@ -1,589 +0,0 @@ -#include -#include - -#include "environ.h" -#include "opensearch_odbc.h" -#include "misc.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "statement.h" - -/* SQLAllocConnect/SQLAllocEnv/SQLAllocStmt -> SQLAllocHandle */ -RETCODE SQL_API SQLAllocHandle(SQLSMALLINT HandleType, SQLHANDLE InputHandle, - SQLHANDLE *OutputHandle) { - RETCODE ret; - ConnectionClass *conn; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - switch (HandleType) { - case SQL_HANDLE_ENV: - ret = OPENSEARCHAPI_AllocEnv(OutputHandle); - break; - case SQL_HANDLE_DBC: - ENTER_ENV_CS((EnvironmentClass *)InputHandle); - ret = OPENSEARCHAPI_AllocConnect(InputHandle, OutputHandle); - LEAVE_ENV_CS((EnvironmentClass *)InputHandle); - break; - case SQL_HANDLE_STMT: - conn = (ConnectionClass *)InputHandle; - ENTER_CONN_CS(conn); - ret = OPENSEARCHAPI_AllocStmt( - InputHandle, OutputHandle, - PODBC_EXTERNAL_STATEMENT | PODBC_INHERIT_CONNECT_OPTIONS); - if (*OutputHandle) - ((StatementClass *)(*OutputHandle))->external = 1; - LEAVE_CONN_CS(conn); - break; - case SQL_HANDLE_DESC: - conn = (ConnectionClass *)InputHandle; - ENTER_CONN_CS(conn); - ret = OPENSEARCHAPI_AllocDesc(InputHandle, OutputHandle); - LEAVE_CONN_CS(conn); - MYLOG(OPENSEARCH_DEBUG, "OutputHandle=%p\n", *OutputHandle); - break; - default: - ret = SQL_ERROR; - break; - } - return ret; -} - -/* SQLBindParameter/SQLSetParam -> SQLBindParam */ -RETCODE SQL_API SQLBindParam(HSTMT StatementHandle, - SQLUSMALLINT ParameterNumber, - SQLSMALLINT ValueType, SQLSMALLINT ParameterType, - SQLULEN LengthPrecision, - SQLSMALLINT ParameterScale, PTR ParameterValue, - SQLLEN *StrLen_or_Ind) { - UNUSED(ParameterNumber, ValueType, ParameterType, LengthPrecision, - ParameterScale, ParameterValue, StrLen_or_Ind); - StatementClass *stmt = (StatementClass *)StatementHandle; - if (stmt == NULL) - return SQL_ERROR; - SC_clear_error(stmt); - SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, - "OpenSearch does not support parameters.", "SQLBindParam"); - return SQL_ERROR; -} - -/* New function */ -RETCODE SQL_API SQLCloseCursor(HSTMT StatementHandle) { - StatementClass *stmt = (StatementClass *)StatementHandle; - if(stmt == NULL) - return SQL_ERROR; - - RETCODE ret; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_FreeStmt(StatementHandle, SQL_CLOSE); - LEAVE_STMT_CS(stmt); - return ret; -} - -#ifndef UNICODE_SUPPORTXX -/* SQLColAttributes -> SQLColAttribute */ -SQLRETURN SQL_API SQLColAttribute(SQLHSTMT StatementHandle, - SQLUSMALLINT ColumnNumber, - SQLUSMALLINT FieldIdentifier, - SQLPOINTER CharacterAttribute, - SQLSMALLINT BufferLength, - SQLSMALLINT *StringLength, -#if defined(_WIN64) || defined(SQLCOLATTRIBUTE_SQLLEN) - SQLLEN *NumericAttribute -#else - SQLPOINTER NumericAttribute -#endif -) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_ColAttributes(StatementHandle, ColumnNumber, FieldIdentifier, - CharacterAttribute, BufferLength, StringLength, - NumericAttribute); - LEAVE_STMT_CS(stmt); - return ret; -} -#endif /* UNICODE_SUPPORTXX */ - -/* new function */ -RETCODE SQL_API SQLCopyDesc(SQLHDESC SourceDescHandle, - SQLHDESC TargetDescHandle) { - RETCODE ret; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ret = OPENSEARCHAPI_CopyDesc(SourceDescHandle, TargetDescHandle); - return ret; -} - -/* SQLTransact -> SQLEndTran */ -RETCODE SQL_API SQLEndTran(SQLSMALLINT HandleType, SQLHANDLE Handle, - SQLSMALLINT CompletionType) { - UNUSED(CompletionType); - if (HandleType == SQL_HANDLE_STMT) { - StatementClass *stmt = (StatementClass *)Handle; - if (stmt == NULL) - return SQL_ERROR; - SC_clear_error(stmt); - SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, - "Transactions are not supported.", "SQLEndTran"); - } else if (HandleType == SQL_HANDLE_DBC) { - ConnectionClass *conn = (ConnectionClass *)Handle; - if (conn == NULL) - return SQL_ERROR; - CC_clear_error(conn); - CC_set_error(conn, CONN_NOT_IMPLEMENTED_ERROR, - "Transactions are not supported.", "SQLEndTran"); - } - return SQL_ERROR; -} - -/* SQLExtendedFetch -> SQLFetchScroll */ -RETCODE SQL_API SQLFetchScroll(HSTMT StatementHandle, - SQLSMALLINT FetchOrientation, - SQLLEN FetchOffset) { - CSTR func = "SQLFetchScroll"; - StatementClass *stmt = (StatementClass *)StatementHandle; - RETCODE ret = SQL_SUCCESS; - IRDFields *irdopts = SC_get_IRDF(stmt); - SQLUSMALLINT *rowStatusArray = irdopts->rowStatusArray; - SQLULEN *pcRow = irdopts->rowsFetched; - SQLLEN bkmarkoff = 0; - - MYLOG(OPENSEARCH_TRACE, "entering %d," FORMAT_LEN "\n", FetchOrientation, - FetchOffset); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (FetchOrientation == SQL_FETCH_BOOKMARK) { - if (stmt->options.bookmark_ptr) { - bkmarkoff = FetchOffset; - FetchOffset = *((Int4 *)stmt->options.bookmark_ptr); - MYLOG(OPENSEARCH_DEBUG, - "bookmark=" FORMAT_LEN " FetchOffset = " FORMAT_LEN "\n", - FetchOffset, bkmarkoff); - } else { - SC_set_error(stmt, STMT_SEQUENCE_ERROR, - "Bookmark isn't specifed yet", func); - ret = SQL_ERROR; - } - } - if (SQL_SUCCESS == ret) { - ARDFields *opts = SC_get_ARDF(stmt); - - ret = OPENSEARCHAPI_ExtendedFetch(StatementHandle, FetchOrientation, - FetchOffset, pcRow, rowStatusArray, bkmarkoff, - opts->size_of_rowset); - stmt->transition_status = STMT_TRANSITION_FETCH_SCROLL; - } - LEAVE_STMT_CS(stmt); - if (ret != SQL_SUCCESS) - MYLOG(OPENSEARCH_TRACE, "leaving return = %d\n", ret); - return ret; -} - -/* SQLFree(Connect/Env/Stmt) -> SQLFreeHandle */ -RETCODE SQL_API SQLFreeHandle(SQLSMALLINT HandleType, SQLHANDLE Handle) { - RETCODE ret; - StatementClass *stmt; - ConnectionClass *conn = NULL; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - - switch (HandleType) { - case SQL_HANDLE_ENV: - ret = OPENSEARCHAPI_FreeEnv(Handle); - break; - case SQL_HANDLE_DBC: - ret = OPENSEARCHAPI_FreeConnect(Handle); - break; - case SQL_HANDLE_STMT: - stmt = (StatementClass *)Handle; - - if (stmt) { - conn = stmt->hdbc; - if (conn) - ENTER_CONN_CS(conn); - } - - ret = OPENSEARCHAPI_FreeStmt(Handle, SQL_DROP); - - if (conn) - LEAVE_CONN_CS(conn); - - break; - case SQL_HANDLE_DESC: - ret = OPENSEARCHAPI_FreeDesc(Handle); - break; - default: - ret = SQL_ERROR; - break; - } - return ret; -} - -#ifndef UNICODE_SUPPORTXX -/* new function */ -RETCODE SQL_API SQLGetDescField(SQLHDESC DescriptorHandle, - SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength, - SQLINTEGER *StringLength) { - RETCODE ret; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ret = OPENSEARCHAPI_GetDescField(DescriptorHandle, RecNumber, FieldIdentifier, - Value, BufferLength, StringLength); - return ret; -} - -/* new function */ -RETCODE SQL_API SQLGetDescRec(SQLHDESC DescriptorHandle, SQLSMALLINT RecNumber, - SQLCHAR *Name, SQLSMALLINT BufferLength, - SQLSMALLINT *StringLength, SQLSMALLINT *Type, - SQLSMALLINT *SubType, SQLLEN *Length, - SQLSMALLINT *Precision, SQLSMALLINT *Scale, - SQLSMALLINT *Nullable) { - UNUSED(DescriptorHandle, RecNumber, Name, BufferLength, StringLength, Type, - SubType, Length, Precision, Scale, Nullable); - MYLOG(OPENSEARCH_TRACE, "entering\n"); - MYLOG(OPENSEARCH_DEBUG, "Error not implemented\n"); - return SQL_ERROR; -} - -/* new function */ -RETCODE SQL_API SQLGetDiagField(SQLSMALLINT HandleType, SQLHANDLE Handle, - SQLSMALLINT RecNumber, - SQLSMALLINT DiagIdentifier, PTR DiagInfo, - SQLSMALLINT BufferLength, - SQLSMALLINT *StringLength) { - RETCODE ret; - - MYLOG(OPENSEARCH_TRACE, "entering Handle=(%u,%p) Rec=%d Id=%d info=(%p,%d)\n", - HandleType, Handle, RecNumber, DiagIdentifier, DiagInfo, - BufferLength); - ret = OPENSEARCHAPI_GetDiagField(HandleType, Handle, RecNumber, DiagIdentifier, - DiagInfo, BufferLength, StringLength); - return ret; -} - -/* SQLError -> SQLDiagRec */ -RETCODE SQL_API SQLGetDiagRec(SQLSMALLINT HandleType, SQLHANDLE Handle, - SQLSMALLINT RecNumber, SQLCHAR *Sqlstate, - SQLINTEGER *NativeError, SQLCHAR *MessageText, - SQLSMALLINT BufferLength, - SQLSMALLINT *TextLength) { - RETCODE ret; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ret = OPENSEARCHAPI_GetDiagRec(HandleType, Handle, RecNumber, Sqlstate, NativeError, - MessageText, BufferLength, TextLength); - return ret; -} -#endif /* UNICODE_SUPPORTXX */ - -/* new function */ -RETCODE SQL_API SQLGetEnvAttr(HENV EnvironmentHandle, SQLINTEGER Attribute, - PTR Value, SQLINTEGER BufferLength, - SQLINTEGER *StringLength) { - UNUSED(BufferLength, StringLength); - RETCODE ret; - EnvironmentClass *env = (EnvironmentClass *)EnvironmentHandle; - - MYLOG(OPENSEARCH_TRACE, "entering " FORMAT_INTEGER "\n", Attribute); - ENTER_ENV_CS(env); - ret = SQL_SUCCESS; - switch (Attribute) { - case SQL_ATTR_CONNECTION_POOLING: - *((unsigned int *)Value) = - EN_is_pooling(env) ? SQL_CP_ONE_PER_DRIVER : SQL_CP_OFF; - break; - case SQL_ATTR_CP_MATCH: - *((unsigned int *)Value) = SQL_CP_RELAXED_MATCH; - break; - case SQL_ATTR_ODBC_VERSION: - *((unsigned int *)Value) = - EN_is_odbc2(env) ? SQL_OV_ODBC2 : SQL_OV_ODBC3; - break; - case SQL_ATTR_OUTPUT_NTS: - *((unsigned int *)Value) = SQL_TRUE; - break; - default: - env->errornumber = CONN_INVALID_ARGUMENT_NO; - ret = SQL_ERROR; - } - LEAVE_ENV_CS(env); - return ret; -} - -#ifndef UNICODE_SUPPORTXX -/* SQLGetConnectOption -> SQLGetconnectAttr */ -RETCODE SQL_API SQLGetConnectAttr(HDBC ConnectionHandle, SQLINTEGER Attribute, - PTR Value, SQLINTEGER BufferLength, - SQLINTEGER *StringLength) { - RETCODE ret; - - MYLOG(OPENSEARCH_TRACE, "entering " FORMAT_UINTEGER "\n", Attribute); - ENTER_CONN_CS((ConnectionClass *)ConnectionHandle); - CC_clear_error((ConnectionClass *)ConnectionHandle); - ret = OPENSEARCHAPI_GetConnectAttr(ConnectionHandle, Attribute, Value, BufferLength, - StringLength); - LEAVE_CONN_CS((ConnectionClass *)ConnectionHandle); - return ret; -} - -/* SQLGetStmtOption -> SQLGetStmtAttr */ -RETCODE SQL_API SQLGetStmtAttr(HSTMT StatementHandle, SQLINTEGER Attribute, - PTR Value, SQLINTEGER BufferLength, - SQLINTEGER *StringLength) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, "entering Handle=%p " FORMAT_INTEGER "\n", StatementHandle, - Attribute); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_GetStmtAttr(StatementHandle, Attribute, Value, BufferLength, - StringLength); - LEAVE_STMT_CS(stmt); - return ret; -} - -/* SQLSetConnectOption -> SQLSetConnectAttr */ -RETCODE SQL_API SQLSetConnectAttr(HDBC ConnectionHandle, SQLINTEGER Attribute, - PTR Value, SQLINTEGER StringLength) { - RETCODE ret; - ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; - - MYLOG(OPENSEARCH_TRACE, "entering " FORMAT_INTEGER "\n", Attribute); - ENTER_CONN_CS(conn); - CC_clear_error(conn); - ret = - OPENSEARCHAPI_SetConnectAttr(ConnectionHandle, Attribute, Value, StringLength); - LEAVE_CONN_CS(conn); - return ret; -} - -/* new function */ -RETCODE SQL_API SQLSetDescField(SQLHDESC DescriptorHandle, - SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength) { - RETCODE ret; - - MYLOG(OPENSEARCH_TRACE, "entering h=%p rec=%d field=%d val=%p\n", DescriptorHandle, - RecNumber, FieldIdentifier, Value); - ret = OPENSEARCHAPI_SetDescField(DescriptorHandle, RecNumber, FieldIdentifier, - Value, BufferLength); - return ret; -} - -/* new fucntion */ -RETCODE SQL_API SQLSetDescRec(SQLHDESC DescriptorHandle, SQLSMALLINT RecNumber, - SQLSMALLINT Type, SQLSMALLINT SubType, - SQLLEN Length, SQLSMALLINT Precision, - SQLSMALLINT Scale, PTR Data, SQLLEN *StringLength, - SQLLEN *Indicator) { - UNUSED(DescriptorHandle, RecNumber, Type, SubType, Length, Precision, Scale, - Data, StringLength, Indicator); - MYLOG(OPENSEARCH_TRACE, "entering\n"); - MYLOG(OPENSEARCH_DEBUG, "Error not implemented\n"); - return SQL_ERROR; -} -#endif /* UNICODE_SUPPORTXX */ - -/* new function */ -RETCODE SQL_API SQLSetEnvAttr(HENV EnvironmentHandle, SQLINTEGER Attribute, - PTR Value, SQLINTEGER StringLength) { - UNUSED(StringLength); - RETCODE ret; - EnvironmentClass *env = (EnvironmentClass *)EnvironmentHandle; - - MYLOG(OPENSEARCH_TRACE, "entering att=" FORMAT_INTEGER "," FORMAT_ULEN "\n", - Attribute, (SQLULEN)Value); - ENTER_ENV_CS(env); - switch (Attribute) { - case SQL_ATTR_CONNECTION_POOLING: - switch ((ULONG_PTR)Value) { - case SQL_CP_OFF: - EN_unset_pooling(env); - ret = SQL_SUCCESS; - break; - case SQL_CP_ONE_PER_DRIVER: - EN_set_pooling(env); - ret = SQL_SUCCESS; - break; - default: - ret = SQL_SUCCESS_WITH_INFO; - } - break; - case SQL_ATTR_CP_MATCH: - /* *((unsigned int *) Value) = SQL_CP_RELAXED_MATCH; */ - ret = SQL_SUCCESS; - break; - case SQL_ATTR_ODBC_VERSION: - if (SQL_OV_ODBC2 == CAST_UPTR(SQLUINTEGER, Value)) - EN_set_odbc2(env); - else - EN_set_odbc3(env); - ret = SQL_SUCCESS; - break; - case SQL_ATTR_OUTPUT_NTS: - if (SQL_TRUE == CAST_UPTR(SQLUINTEGER, Value)) - ret = SQL_SUCCESS; - else - ret = SQL_SUCCESS_WITH_INFO; - break; - default: - env->errornumber = CONN_INVALID_ARGUMENT_NO; - ret = SQL_ERROR; - } - if (SQL_SUCCESS_WITH_INFO == ret) { - env->errornumber = CONN_OPTION_VALUE_CHANGED; - env->errormsg = "SetEnv changed to "; - } - LEAVE_ENV_CS(env); - return ret; -} - -#ifndef UNICODE_SUPPORTXX -/* SQLSet(Param/Scroll/Stmt)Option -> SQLSetStmtAttr */ -RETCODE SQL_API SQLSetStmtAttr(HSTMT StatementHandle, SQLINTEGER Attribute, - PTR Value, SQLINTEGER StringLength) { - StatementClass *stmt = (StatementClass *)StatementHandle; - RETCODE ret; - - MYLOG(OPENSEARCH_TRACE, "entering Handle=%p " FORMAT_INTEGER "," FORMAT_ULEN "\n", - StatementHandle, Attribute, (SQLULEN)Value); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_SetStmtAttr(StatementHandle, Attribute, Value, StringLength); - LEAVE_STMT_CS(stmt); - return ret; -} -#endif /* UNICODE_SUPPORTXX */ - -#define SQL_FUNC_ESET(pfExists, uwAPI) \ - (*(((UWORD *)(pfExists)) + ((uwAPI) >> 4)) |= (1 << ((uwAPI)&0x000F))) -RETCODE SQL_API OPENSEARCHAPI_GetFunctions30(HDBC hdbc, SQLUSMALLINT fFunction, - SQLUSMALLINT FAR *pfExists) { - ConnectionClass *conn = (ConnectionClass *)hdbc; - CC_clear_error(conn); - if (fFunction != SQL_API_ODBC3_ALL_FUNCTIONS) - return SQL_ERROR; - memset(pfExists, 0, sizeof(UWORD) * SQL_API_ODBC3_ALL_FUNCTIONS_SIZE); - - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLALLOCCONNECT); 1 deprecated */ - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLALLOCENV); 2 deprecated */ - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLALLOCSTMT); 3 deprecated */ - - /* - * for (i = SQL_API_SQLBINDCOL; i <= 23; i++) SQL_FUNC_ESET(pfExists, - * i); - */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLBINDCOL); /* 4 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLCANCEL); /* 5 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLCOLATTRIBUTE); /* 6 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLCONNECT); /* 7 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLDESCRIBECOL); /* 8 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLDISCONNECT); /* 9 */ - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLERROR); 10 deprecated */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLEXECDIRECT); /* 11 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLEXECUTE); /* 12 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLFETCH); /* 13 */ - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLFREECONNECT); 14 deprecated */ - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLFREEENV); 15 deprecated */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLFREESTMT); /* 16 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLGETCURSORNAME); /* 17 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLNUMRESULTCOLS); /* 18 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLPREPARE); /* 19 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLROWCOUNT); /* 20 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLSETCURSORNAME); /* 21 */ - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLSETPARAM); 22 deprecated */ - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLTRANSACT); 23 deprecated */ - - /* - * for (i = 40; i < SQL_API_SQLEXTENDEDFETCH; i++) - * SQL_FUNC_ESET(pfExists, i); - */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLCOLUMNS); /* 40 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLDRIVERCONNECT); /* 41 */ - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLGETCONNECTOPTION); 42 deprecated */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLGETDATA); /* 43 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLGETFUNCTIONS); /* 44 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLGETINFO); /* 45 */ - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLGETSTMTOPTION); 46 deprecated */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLGETTYPEINFO); /* 47 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLPARAMDATA); /* 48 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLPUTDATA); /* 49 */ - - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLSETCONNECTIONOPTION); 50 deprecated */ - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLSETSTMTOPTION); 51 deprecated */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLSPECIALCOLUMNS); /* 52 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLSTATISTICS); /* 53 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLTABLES); /* 54 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLDATASOURCES); /* 57 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLDESCRIBEPARAM); /* 58 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLEXTENDEDFETCH); /* 59 deprecated ? */ - - /* - * for (++i; i < SQL_API_SQLBINDPARAMETER; i++) - * SQL_FUNC_ESET(pfExists, i); - */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLFOREIGNKEYS); /* 60 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLMORERESULTS); /* 61 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLNATIVESQL); /* 62 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLNUMPARAMS); /* 63 */ - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLPARAMOPTIONS); 64 deprecated */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLPRIMARYKEYS); /* 65 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLPROCEDURECOLUMNS); /* 66 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLPROCEDURES); /* 67 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLSETPOS); /* 68 */ - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLSETSCROLLOPTIONS); 69 deprecated */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLTABLEPRIVILEGES); /* 70 */ - /* SQL_FUNC_ESET(pfExists, SQL_API_SQLDRIVERS); */ /* 71 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLBINDPARAMETER); /* 72 */ - - SQL_FUNC_ESET(pfExists, SQL_API_SQLALLOCHANDLE); /* 1001 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLBINDPARAM); /* 1002 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLCLOSECURSOR); /* 1003 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLCOPYDESC); /* 1004 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLENDTRAN); /* 1005 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLFREEHANDLE); /* 1006 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLGETCONNECTATTR); /* 1007 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLGETDESCFIELD); /* 1008 */ - SQL_FUNC_ESET(pfExists, - SQL_API_SQLGETDIAGFIELD); /* 1010 minimal implementation */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLGETDIAGREC); /* 1011 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLGETENVATTR); /* 1012 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLGETSTMTATTR); /* 1014 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLSETCONNECTATTR); /* 1016 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLSETDESCFIELD); /* 1017 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLSETENVATTR); /* 1019 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLSETSTMTATTR); /* 1020 */ - SQL_FUNC_ESET(pfExists, SQL_API_SQLFETCHSCROLL); /* 1021 */ - return SQL_SUCCESS; -} - -RETCODE SQL_API SQLBulkOperations(HSTMT hstmt, SQLSMALLINT operation) { - UNUSED(operation); - StatementClass *stmt = (StatementClass *)hstmt; - if (stmt == NULL) - return SQL_ERROR; - SC_clear_error(stmt); - SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, - "Bulk operations are not supported.", "SQLBulkOperations"); - return SQL_ERROR; -} diff --git a/sql-odbc/src/sqlodbc/odbcapi30w.c b/sql-odbc/src/sqlodbc/odbcapi30w.c deleted file mode 100644 index dd8cf0d3a0..0000000000 --- a/sql-odbc/src/sqlodbc/odbcapi30w.c +++ /dev/null @@ -1,387 +0,0 @@ -#include -#include - -#include "opensearch_odbc.h" -#include "misc.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "statement.h" -#include "unicode_support.h" - -RETCODE SQL_API SQLGetStmtAttrW(SQLHSTMT hstmt, SQLINTEGER fAttribute, - PTR rgbValue, SQLINTEGER cbValueMax, - SQLINTEGER *pcbValue) { - UNUSED(hstmt, fAttribute, rgbValue, cbValueMax, pcbValue); - RETCODE ret; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_STMT_CS((StatementClass *)hstmt); - SC_clear_error((StatementClass *)hstmt); - ret = OPENSEARCHAPI_GetStmtAttr(hstmt, fAttribute, rgbValue, cbValueMax, pcbValue); - LEAVE_STMT_CS((StatementClass *)hstmt); - return ret; -} - -RETCODE SQL_API SQLSetStmtAttrW(SQLHSTMT hstmt, SQLINTEGER fAttribute, - PTR rgbValue, SQLINTEGER cbValueMax) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)hstmt; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_SetStmtAttr(hstmt, fAttribute, rgbValue, cbValueMax); - LEAVE_STMT_CS(stmt); - return ret; -} - -RETCODE SQL_API SQLGetConnectAttrW(HDBC hdbc, SQLINTEGER fAttribute, - PTR rgbValue, SQLINTEGER cbValueMax, - SQLINTEGER *pcbValue) { - RETCODE ret; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_CONN_CS((ConnectionClass *)hdbc); - CC_clear_error((ConnectionClass *)hdbc); - ret = - OPENSEARCHAPI_GetConnectAttr(hdbc, fAttribute, rgbValue, cbValueMax, pcbValue); - LEAVE_CONN_CS((ConnectionClass *)hdbc); - return ret; -} - -RETCODE SQL_API SQLSetConnectAttrW(HDBC hdbc, SQLINTEGER fAttribute, - PTR rgbValue, SQLINTEGER cbValue) { - RETCODE ret; - ConnectionClass *conn = (ConnectionClass *)hdbc; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_CONN_CS(conn); - CC_clear_error(conn); - CC_set_in_unicode_driver(conn); - ret = OPENSEARCHAPI_SetConnectAttr(hdbc, fAttribute, rgbValue, cbValue); - LEAVE_CONN_CS(conn); - return ret; -} - -/* new function */ -RETCODE SQL_API SQLSetDescFieldW(SQLHDESC DescriptorHandle, - SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength) { - RETCODE ret; - SQLLEN vallen; - char *uval = NULL; - BOOL val_alloced = FALSE; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (BufferLength > 0 || SQL_NTS == BufferLength) { - switch (FieldIdentifier) { - case SQL_DESC_BASE_COLUMN_NAME: - case SQL_DESC_BASE_TABLE_NAME: - case SQL_DESC_CATALOG_NAME: - case SQL_DESC_LABEL: - case SQL_DESC_LITERAL_PREFIX: - case SQL_DESC_LITERAL_SUFFIX: - case SQL_DESC_LOCAL_TYPE_NAME: - case SQL_DESC_NAME: - case SQL_DESC_SCHEMA_NAME: - case SQL_DESC_TABLE_NAME: - case SQL_DESC_TYPE_NAME: - uval = ucs2_to_utf8( - Value, - BufferLength > 0 ? BufferLength / WCLEN : BufferLength, - &vallen, FALSE); - val_alloced = TRUE; - break; - default: - vallen = BufferLength; - uval = Value; - break; - } - } else { - vallen = BufferLength; - uval = Value; - } - ret = OPENSEARCHAPI_SetDescField(DescriptorHandle, RecNumber, FieldIdentifier, uval, - (SQLINTEGER)vallen); - if (val_alloced) - free(uval); - return ret; -} - -RETCODE SQL_API SQLGetDescFieldW(SQLHDESC hdesc, SQLSMALLINT iRecord, - SQLSMALLINT iField, PTR rgbValue, - SQLINTEGER cbValueMax, SQLINTEGER *pcbValue) { - RETCODE ret; - SQLINTEGER blen = 0, bMax, *pcbV; - char *rgbV = NULL, *rgbVt; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - switch (iField) { - case SQL_DESC_BASE_COLUMN_NAME: - case SQL_DESC_BASE_TABLE_NAME: - case SQL_DESC_CATALOG_NAME: - case SQL_DESC_LABEL: - case SQL_DESC_LITERAL_PREFIX: - case SQL_DESC_LITERAL_SUFFIX: - case SQL_DESC_LOCAL_TYPE_NAME: - case SQL_DESC_NAME: - case SQL_DESC_SCHEMA_NAME: - case SQL_DESC_TABLE_NAME: - case SQL_DESC_TYPE_NAME: - bMax = cbValueMax * 3 / WCLEN; - rgbV = malloc(bMax + 1); - pcbV = &blen; - for (rgbVt = rgbV;; bMax = blen + 1, rgbVt = realloc(rgbV, bMax)) { - if (!rgbVt) { - ret = SQL_ERROR; - break; - } - rgbV = rgbVt; - ret = OPENSEARCHAPI_GetDescField(hdesc, iRecord, iField, rgbV, bMax, - pcbV); - if (SQL_SUCCESS_WITH_INFO != ret || blen < bMax) - break; - } - if (SQL_SUCCEEDED(ret)) { - blen = (SQLINTEGER)utf8_to_ucs2( - rgbV, blen, (SQLWCHAR *)rgbValue, cbValueMax / WCLEN); - if (SQL_SUCCESS == ret - && blen * WCLEN >= (unsigned long)cbValueMax) { - ret = SQL_SUCCESS_WITH_INFO; - DC_set_error(hdesc, STMT_TRUNCATED, - "The buffer was too small for the rgbDesc."); - } - if (pcbValue) - *pcbValue = blen * WCLEN; - } - if (rgbV) - free(rgbV); - break; - default: - rgbV = rgbValue; - bMax = cbValueMax; - pcbV = pcbValue; - ret = OPENSEARCHAPI_GetDescField(hdesc, iRecord, iField, rgbV, bMax, pcbV); - break; - } - - return ret; -} - -RETCODE SQL_API SQLGetDiagRecW(SQLSMALLINT fHandleType, SQLHANDLE handle, - SQLSMALLINT iRecord, SQLWCHAR *szSqlState, - SQLINTEGER *pfNativeError, SQLWCHAR *szErrorMsg, - SQLSMALLINT cbErrorMsgMax, - SQLSMALLINT *pcbErrorMsg) { - RETCODE ret; - SQLSMALLINT buflen, tlen; - char qstr_ansi[8], *mtxt = NULL; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - buflen = 0; - if (szErrorMsg && cbErrorMsgMax > 0) { - buflen = cbErrorMsgMax; - mtxt = malloc(buflen); - } - ret = OPENSEARCHAPI_GetDiagRec(fHandleType, handle, iRecord, (SQLCHAR *)qstr_ansi, - pfNativeError, (SQLCHAR *)mtxt, buflen, &tlen); - if (SQL_SUCCEEDED(ret)) { - if (szSqlState) - utf8_to_ucs2(qstr_ansi, -1, szSqlState, 6); - if (mtxt && tlen <= cbErrorMsgMax) { - SQLULEN ulen = utf8_to_ucs2_lf(mtxt, tlen, FALSE, szErrorMsg, - cbErrorMsgMax, TRUE); - if (ulen == (SQLULEN)-1) - tlen = (SQLSMALLINT)locale_to_sqlwchar( - (SQLWCHAR *)szErrorMsg, mtxt, cbErrorMsgMax, FALSE); - else - tlen = (SQLSMALLINT)ulen; - if (tlen >= cbErrorMsgMax) - ret = SQL_SUCCESS_WITH_INFO; - else if (tlen < 0) { - char errc[32]; - - SPRINTF_FIXED(errc, "Error: SqlState=%s", qstr_ansi); - tlen = (SQLSMALLINT)utf8_to_ucs2(errc, -1, szErrorMsg, - cbErrorMsgMax); - } - } - if (pcbErrorMsg) - *pcbErrorMsg = tlen; - } - if (mtxt) - free(mtxt); - return ret; -} - -SQLRETURN SQL_API SQLColAttributeW(SQLHSTMT hstmt, SQLUSMALLINT iCol, - SQLUSMALLINT iField, SQLPOINTER pCharAttr, - SQLSMALLINT cbCharAttrMax, - SQLSMALLINT *pcbCharAttr, -#if defined(_WIN64) || defined(SQLCOLATTRIBUTE_SQLLEN) - SQLLEN *pNumAttr -#else - SQLPOINTER pNumAttr -#endif -) { - CSTR func = "SQLColAttributeW"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)hstmt; - SQLSMALLINT *rgbL, blen = 0, bMax; - char *rgbD = NULL, *rgbDt; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - switch (iField) { - case SQL_DESC_BASE_COLUMN_NAME: - case SQL_DESC_BASE_TABLE_NAME: - case SQL_DESC_CATALOG_NAME: - case SQL_DESC_LABEL: - case SQL_DESC_LITERAL_PREFIX: - case SQL_DESC_LITERAL_SUFFIX: - case SQL_DESC_LOCAL_TYPE_NAME: - case SQL_DESC_NAME: - case SQL_DESC_SCHEMA_NAME: - case SQL_DESC_TABLE_NAME: - case SQL_DESC_TYPE_NAME: - case SQL_COLUMN_NAME: - bMax = cbCharAttrMax * 3 / WCLEN; - rgbD = malloc(bMax); - rgbL = &blen; - for (rgbDt = rgbD;; bMax = blen + 1, rgbDt = realloc(rgbD, bMax)) { - if (!rgbDt) { - ret = SQL_ERROR; - break; - } - rgbD = rgbDt; - ret = OPENSEARCHAPI_ColAttributes(hstmt, iCol, iField, rgbD, bMax, rgbL, - pNumAttr); - if (SQL_SUCCESS_WITH_INFO != ret || blen < bMax) - break; - } - if (SQL_SUCCEEDED(ret)) { - blen = (SQLSMALLINT)utf8_to_ucs2( - rgbD, blen, (SQLWCHAR *)pCharAttr, cbCharAttrMax / WCLEN); - if (SQL_SUCCESS == ret - && blen * WCLEN >= (unsigned long)cbCharAttrMax) { - ret = SQL_SUCCESS_WITH_INFO; - SC_set_error(stmt, STMT_TRUNCATED, - "The buffer was too small for the pCharAttr.", - func); - } - if (pcbCharAttr) - *pcbCharAttr = blen * WCLEN; - } - if (rgbD) - free(rgbD); - break; - default: - rgbD = pCharAttr; - bMax = cbCharAttrMax; - rgbL = pcbCharAttr; - ret = OPENSEARCHAPI_ColAttributes(hstmt, iCol, iField, rgbD, bMax, rgbL, - pNumAttr); - break; - } - LEAVE_STMT_CS(stmt); - - return ret; -} - -RETCODE SQL_API SQLGetDiagFieldW(SQLSMALLINT fHandleType, SQLHANDLE handle, - SQLSMALLINT iRecord, SQLSMALLINT fDiagField, - SQLPOINTER rgbDiagInfo, - SQLSMALLINT cbDiagInfoMax, - SQLSMALLINT *pcbDiagInfo) { - RETCODE ret; - SQLSMALLINT *rgbL, blen = 0, bMax; - char *rgbD = NULL, *rgbDt; - - MYLOG(OPENSEARCH_TRACE, "entering Handle=(%u,%p) Rec=%d Id=%d info=(%p,%d)\n", fHandleType, - handle, iRecord, fDiagField, rgbDiagInfo, cbDiagInfoMax); - switch (fDiagField) { - case SQL_DIAG_DYNAMIC_FUNCTION: - case SQL_DIAG_CLASS_ORIGIN: - case SQL_DIAG_CONNECTION_NAME: - case SQL_DIAG_MESSAGE_TEXT: - case SQL_DIAG_SERVER_NAME: - case SQL_DIAG_SQLSTATE: - case SQL_DIAG_SUBCLASS_ORIGIN: - bMax = cbDiagInfoMax * 3 / WCLEN + 1; - if (rgbD = malloc(bMax), !rgbD) - return SQL_ERROR; - rgbL = &blen; - for (rgbDt = rgbD;; bMax = blen + 1, rgbDt = realloc(rgbD, bMax)) { - if (!rgbDt) { - free(rgbD); - return SQL_ERROR; - } - rgbD = rgbDt; - ret = OPENSEARCHAPI_GetDiagField(fHandleType, handle, iRecord, - fDiagField, rgbD, bMax, rgbL); - if (SQL_SUCCESS_WITH_INFO != ret || blen < bMax) - break; - } - if (SQL_SUCCEEDED(ret)) { - SQLULEN ulen = (SQLSMALLINT)utf8_to_ucs2_lf( - rgbD, blen, FALSE, (SQLWCHAR *)rgbDiagInfo, - cbDiagInfoMax / WCLEN, TRUE); - if (ulen == (SQLULEN)-1) - blen = (SQLSMALLINT)locale_to_sqlwchar( - (SQLWCHAR *)rgbDiagInfo, rgbD, cbDiagInfoMax / WCLEN, - FALSE); - else - blen = (SQLSMALLINT)ulen; - if (SQL_SUCCESS == ret - && blen * WCLEN >= (unsigned long)cbDiagInfoMax) - ret = SQL_SUCCESS_WITH_INFO; - if (pcbDiagInfo) { - *pcbDiagInfo = blen * WCLEN; - } - } - if (rgbD) - free(rgbD); - break; - default: - rgbD = rgbDiagInfo; - bMax = cbDiagInfoMax; - rgbL = pcbDiagInfo; - ret = OPENSEARCHAPI_GetDiagField(fHandleType, handle, iRecord, fDiagField, - rgbD, bMax, rgbL); - break; - } - - return ret; -} - -/* new function */ -RETCODE SQL_API SQLGetDescRecW(SQLHDESC DescriptorHandle, SQLSMALLINT RecNumber, - SQLWCHAR *Name, SQLSMALLINT BufferLength, - SQLSMALLINT *StringLength, SQLSMALLINT *Type, - SQLSMALLINT *SubType, SQLLEN *Length, - SQLSMALLINT *Precision, SQLSMALLINT *Scale, - SQLSMALLINT *Nullable) { - UNUSED(DescriptorHandle, RecNumber, Name, BufferLength, StringLength, Type, - SubType, Length, Precision, Scale, Nullable); - MYLOG(OPENSEARCH_TRACE, "entering\n"); - MYLOG(OPENSEARCH_DEBUG, "Error not implemented\n"); - return SQL_ERROR; -} - -/* new fucntion */ -RETCODE SQL_API SQLSetDescRecW(SQLHDESC DescriptorHandle, SQLSMALLINT RecNumber, - SQLSMALLINT Type, SQLSMALLINT SubType, - SQLLEN Length, SQLSMALLINT Precision, - SQLSMALLINT Scale, PTR Data, - SQLLEN *StringLength, SQLLEN *Indicator) { - UNUSED(DescriptorHandle, RecNumber, Type, SubType, Length, Precision, Scale, - Data, StringLength, Indicator); - MYLOG(OPENSEARCH_TRACE, "entering\n"); - MYLOG(OPENSEARCH_DEBUG, "Error not implemented\n"); - return SQL_ERROR; -} diff --git a/sql-odbc/src/sqlodbc/odbcapiw.c b/sql-odbc/src/sqlodbc/odbcapiw.c deleted file mode 100644 index 7577e0577c..0000000000 --- a/sql-odbc/src/sqlodbc/odbcapiw.c +++ /dev/null @@ -1,1031 +0,0 @@ -#include -#include - -#include "opensearch_odbc.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "opensearch_driver_connect.h" -#include "opensearch_info.h" -#include "statement.h" -#include "unicode_support.h" - -RETCODE SQL_API SQLColumnsW(HSTMT StatementHandle, SQLWCHAR *CatalogName, - SQLSMALLINT NameLength1, SQLWCHAR *SchemaName, - SQLSMALLINT NameLength2, SQLWCHAR *TableName, - SQLSMALLINT NameLength3, SQLWCHAR *ColumnName, - SQLSMALLINT NameLength4) { - CSTR func = "SQLColumnsW"; - RETCODE ret; - char *ctName, *scName, *tbName, *clName; - SQLLEN nmlen1, nmlen2, nmlen3, nmlen4; - StatementClass *stmt = (StatementClass *)StatementHandle; - BOOL lower_id; - UWORD flag = PODBC_SEARCH_PUBLIC_SCHEMA; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - lower_id = DEFAULT_LOWERCASEIDENTIFIER; - ctName = ucs2_to_utf8(CatalogName, NameLength1, &nmlen1, lower_id); - scName = ucs2_to_utf8(SchemaName, NameLength2, &nmlen2, lower_id); - tbName = ucs2_to_utf8(TableName, NameLength3, &nmlen3, lower_id); - clName = ucs2_to_utf8(ColumnName, NameLength4, &nmlen4, lower_id); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (stmt->options.metadata_id) - flag |= PODBC_NOT_SEARCH_PATTERN; - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_Columns(StatementHandle, (SQLCHAR *)ctName, - (SQLSMALLINT)nmlen1, (SQLCHAR *)scName, - (SQLSMALLINT)nmlen2, (SQLCHAR *)tbName, - (SQLSMALLINT)nmlen3, (SQLCHAR *)clName, - (SQLSMALLINT)nmlen4, flag, 0, 0); - LEAVE_STMT_CS(stmt); - if (ctName) - free(ctName); - if (scName) - free(scName); - if (tbName) - free(tbName); - if (clName) - free(clName); - return ret; -} - -RETCODE SQL_API SQLConnectW(HDBC ConnectionHandle, SQLWCHAR *ServerName, - SQLSMALLINT NameLength1, SQLWCHAR *UserName, - SQLSMALLINT NameLength2, SQLWCHAR *Authentication, - SQLSMALLINT NameLength3) { - char *svName, *usName, *auth; - SQLLEN nmlen1, nmlen2, nmlen3; - RETCODE ret; - ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_CONN_CS(conn); - CC_clear_error(conn); - CC_set_in_unicode_driver(conn); - svName = ucs2_to_utf8(ServerName, NameLength1, &nmlen1, FALSE); - usName = ucs2_to_utf8(UserName, NameLength2, &nmlen2, FALSE); - auth = ucs2_to_utf8(Authentication, NameLength3, &nmlen3, FALSE); - ret = - OPENSEARCHAPI_Connect(ConnectionHandle, (SQLCHAR *)svName, (SQLSMALLINT)nmlen1, - (SQLCHAR *)usName, (SQLSMALLINT)nmlen2, (SQLCHAR *)auth, - (SQLSMALLINT)nmlen3); - LEAVE_CONN_CS(conn); - if (svName) - free(svName); - if (usName) - free(usName); - if (auth) - free(auth); - return ret; -} - -RETCODE SQL_API SQLDriverConnectW(HDBC hdbc, HWND hwnd, SQLWCHAR *szConnStrIn, - SQLSMALLINT cbConnStrIn, - SQLWCHAR *szConnStrOut, - SQLSMALLINT cbConnStrOutMax, - SQLSMALLINT *pcbConnStrOut, - SQLUSMALLINT fDriverCompletion) { - CSTR func = "SQLDriverConnectW"; - char *szIn, *szOut = NULL; - SQLSMALLINT maxlen, obuflen = 0; - SQLLEN inlen; - SQLSMALLINT olen, *pCSO; - RETCODE ret; - ConnectionClass *conn = (ConnectionClass *)hdbc; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_CONN_CS(conn); - CC_clear_error(conn); - CC_set_in_unicode_driver(conn); - szIn = ucs2_to_utf8(szConnStrIn, cbConnStrIn, &inlen, FALSE); - maxlen = cbConnStrOutMax; - pCSO = NULL; - olen = 0; - if (maxlen > 0) { - obuflen = maxlen + 1; - szOut = malloc(obuflen); - if (!szOut) { - CC_set_error(conn, CONN_NO_MEMORY_ERROR, - "Could not allocate memory for output buffer", func); - ret = SQL_ERROR; - goto cleanup; - } - pCSO = &olen; - } else if (pcbConnStrOut) - pCSO = &olen; - ret = - OPENSEARCHAPI_DriverConnect(hdbc, hwnd, (SQLCHAR *)szIn, (SQLSMALLINT)inlen, - (SQLCHAR *)szOut, maxlen, pCSO, fDriverCompletion); - if (ret != SQL_ERROR && NULL != pCSO) { - SQLLEN outlen = olen; - - if (olen < obuflen) - outlen = utf8_to_ucs2(szOut, olen, szConnStrOut, cbConnStrOutMax); - else - utf8_to_ucs2(szOut, maxlen, szConnStrOut, cbConnStrOutMax); - if (outlen >= cbConnStrOutMax && NULL != szConnStrOut - && NULL != pcbConnStrOut) { - MYLOG(OPENSEARCH_ALL, "cbConnstrOutMax=%d pcb=%p\n", cbConnStrOutMax, - pcbConnStrOut); - if (SQL_SUCCESS == ret) { - CC_set_error(conn, CONN_TRUNCATED, - "the ConnStrOut is too small", func); - ret = SQL_SUCCESS_WITH_INFO; - } - } - if (pcbConnStrOut) - *pcbConnStrOut = (SQLSMALLINT)outlen; - } -cleanup: - LEAVE_CONN_CS(conn); - if (szOut) - free(szOut); - if (szIn) - free(szIn); - return ret; -} -RETCODE SQL_API SQLBrowseConnectW(HDBC hdbc, SQLWCHAR *szConnStrIn, - SQLSMALLINT cbConnStrIn, - SQLWCHAR *szConnStrOut, - SQLSMALLINT cbConnStrOutMax, - SQLSMALLINT *pcbConnStrOut) { - CSTR func = "SQLBrowseConnectW"; - char *szIn, *szOut; - SQLLEN inlen; - SQLUSMALLINT obuflen; - SQLSMALLINT olen = 0; - RETCODE ret; - ConnectionClass *conn = (ConnectionClass *)hdbc; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_CONN_CS(conn); - CC_clear_error(conn); - CC_set_in_unicode_driver(conn); - szIn = ucs2_to_utf8(szConnStrIn, cbConnStrIn, &inlen, FALSE); - obuflen = cbConnStrOutMax + 1; - szOut = malloc(obuflen); - if (szOut) - ret = OPENSEARCHAPI_BrowseConnect(hdbc, (SQLCHAR *)szIn, (SQLSMALLINT)inlen, - (SQLCHAR *)szOut, cbConnStrOutMax, &olen); - else { - CC_set_error(conn, CONN_NO_MEMORY_ERROR, - "Could not allocate memory for output buffer", func); - ret = SQL_ERROR; - } - LEAVE_CONN_CS(conn); - if (ret != SQL_ERROR) { - SQLLEN outlen = - utf8_to_ucs2(szOut, olen, szConnStrOut, cbConnStrOutMax); - if (pcbConnStrOut) - *pcbConnStrOut = (SQLSMALLINT)outlen; - } - free(szOut); - if (szIn) - free(szIn); - return ret; -} - -RETCODE SQL_API SQLDataSourcesW(HENV EnvironmentHandle, SQLUSMALLINT Direction, - SQLWCHAR *ServerName, SQLSMALLINT BufferLength1, - SQLSMALLINT *NameLength1, SQLWCHAR *Description, - SQLSMALLINT BufferLength2, - SQLSMALLINT *NameLength2) { - UNUSED(EnvironmentHandle, Direction, ServerName, BufferLength1, NameLength1, - Description, BufferLength2, NameLength2); - MYLOG(OPENSEARCH_TRACE, "entering\n"); - return SQL_ERROR; -} - -RETCODE SQL_API SQLDescribeColW(HSTMT StatementHandle, - SQLUSMALLINT ColumnNumber, SQLWCHAR *ColumnName, - SQLSMALLINT BufferLength, - SQLSMALLINT *NameLength, SQLSMALLINT *DataType, - SQLULEN *ColumnSize, SQLSMALLINT *DecimalDigits, - SQLSMALLINT *Nullable) { - CSTR func = "SQLDescribeColW"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - SQLSMALLINT buflen, nmlen = 0; - char *clName = NULL, *clNamet = NULL; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - buflen = 0; - if (BufferLength > 0) - buflen = BufferLength * 3; - else if (NameLength) - buflen = 32; - if (buflen > 0) - clNamet = malloc(buflen); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - for (;; buflen = nmlen + 1, clNamet = realloc(clName, buflen)) { - if (!clNamet) { - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, - "Could not allocate memory for column name", func); - ret = SQL_ERROR; - break; - } - clName = clNamet; - ret = OPENSEARCHAPI_DescribeCol(StatementHandle, ColumnNumber, - (SQLCHAR *)clName, buflen, &nmlen, DataType, - ColumnSize, DecimalDigits, Nullable); - if (SQL_SUCCESS_WITH_INFO != ret || nmlen < buflen) - break; - } - if (SQL_SUCCEEDED(ret)) { - SQLLEN nmcount = nmlen; - - if (nmlen < buflen) - nmcount = utf8_to_ucs2(clName, nmlen, ColumnName, BufferLength); - if (SQL_SUCCESS == ret && BufferLength > 0 && nmcount > BufferLength) { - ret = SQL_SUCCESS_WITH_INFO; - SC_set_error(stmt, STMT_TRUNCATED, "Column name too large", func); - } - if (NameLength) - *NameLength = (SQLSMALLINT)nmcount; - } - LEAVE_STMT_CS(stmt); - if (clName) - free(clName); - return ret; -} - -RETCODE SQL_API SQLExecDirectW(HSTMT StatementHandle, SQLWCHAR *StatementText, - SQLINTEGER TextLength) { - if (StatementHandle == NULL) - return SQL_ERROR; - - StatementClass *stmt = (StatementClass *)StatementHandle; - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - // Get query string - SQLLEN slen = 0; - char *stxt = ucs2_to_utf8(StatementText, TextLength, &slen, FALSE); - - // Enter critical - ENTER_STMT_CS(stmt); - - // Clear error and rollback - SC_clear_error(stmt); - - // Execute statement if statement is ready - RETCODE ret = SQL_ERROR; - if (!SC_opencheck(stmt, "SQLExecDirectW")) - ret = OPENSEARCHAPI_ExecDirect(StatementHandle, (const SQLCHAR *)stxt, - (SQLINTEGER)slen, 1); - - // Exit critical - LEAVE_STMT_CS(stmt); - - if (stxt) - free(stxt); - return ret; -} - -RETCODE SQL_API SQLGetCursorNameW(HSTMT StatementHandle, SQLWCHAR *CursorName, - SQLSMALLINT BufferLength, - SQLSMALLINT *NameLength) { - CSTR func = "SQLGetCursorNameW"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - char *crName = NULL, *crNamet; - SQLSMALLINT clen = 0, buflen; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (BufferLength > 0) - buflen = BufferLength * 3; - else - buflen = 32; - crNamet = malloc(buflen); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - for (;; buflen = clen + 1, crNamet = realloc(crName, buflen)) { - if (!crNamet) { - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, - "Could not allocate memory for cursor name", func); - ret = SQL_ERROR; - break; - } - crName = crNamet; - ret = OPENSEARCHAPI_GetCursorName(StatementHandle, (SQLCHAR *)crName, buflen, - &clen); - if (SQL_SUCCESS_WITH_INFO != ret || clen < buflen) - break; - } - if (SQL_SUCCEEDED(ret)) { - SQLLEN nmcount = clen; - - if (clen < buflen) - nmcount = utf8_to_ucs2(crName, clen, CursorName, BufferLength); - if (SQL_SUCCESS == ret && nmcount > BufferLength) { - ret = SQL_SUCCESS_WITH_INFO; - SC_set_error(stmt, STMT_TRUNCATED, "Cursor name too large", func); - } - if (NameLength) - *NameLength = (SQLSMALLINT)nmcount; - } - LEAVE_STMT_CS(stmt); - free(crName); - return ret; -} - -RETCODE SQL_API SQLGetInfoW(HDBC ConnectionHandle, SQLUSMALLINT InfoType, - PTR InfoValue, SQLSMALLINT BufferLength, - SQLSMALLINT *StringLength) { - ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; - RETCODE ret; - - ENTER_CONN_CS(conn); - CC_set_in_unicode_driver(conn); - CC_clear_error(conn); - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if ((ret = OPENSEARCHAPI_GetInfo(ConnectionHandle, InfoType, InfoValue, - BufferLength, StringLength)) - == SQL_ERROR) - CC_log_error("SQLGetInfoW", "", conn); - LEAVE_CONN_CS(conn); - return ret; -} - -RETCODE SQL_API SQLPrepareW(HSTMT StatementHandle, SQLWCHAR *StatementText, - SQLINTEGER TextLength) { - if (StatementHandle == NULL) - return SQL_ERROR; - - CSTR func = "SQLPrepareW"; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - SQLLEN slen; - char *stxt = ucs2_to_utf8(StatementText, TextLength, &slen, FALSE); - - // Enter critical - ENTER_STMT_CS(stmt); - - // Clear error and rollback - SC_clear_error(stmt); - - // Prepare statement if statement is ready - RETCODE ret = SQL_ERROR; - if (!SC_opencheck(stmt, func)) - ret = OPENSEARCHAPI_Prepare(StatementHandle, (const SQLCHAR *)stxt, - (SQLINTEGER)slen); - - // Exit critical - LEAVE_STMT_CS(stmt); - - // Release memory - if (stxt) - free(stxt); - return ret; -} - -RETCODE SQL_API SQLSetCursorNameW(HSTMT StatementHandle, SQLWCHAR *CursorName, - SQLSMALLINT NameLength) { - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - char *crName; - SQLLEN nlen; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - crName = ucs2_to_utf8(CursorName, NameLength, &nlen, FALSE); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - ret = OPENSEARCHAPI_SetCursorName(StatementHandle, (SQLCHAR *)crName, - (SQLSMALLINT)nlen); - LEAVE_STMT_CS(stmt); - if (crName) - free(crName); - return ret; -} - -RETCODE SQL_API SQLSpecialColumnsW( - HSTMT StatementHandle, SQLUSMALLINT IdentifierType, SQLWCHAR *CatalogName, - SQLSMALLINT NameLength1, SQLWCHAR *SchemaName, SQLSMALLINT NameLength2, - SQLWCHAR *TableName, SQLSMALLINT NameLength3, SQLUSMALLINT Scope, - SQLUSMALLINT Nullable) { - CSTR func = "SQLSpecialColumnsW"; - RETCODE ret; - char *ctName, *scName, *tbName; - SQLLEN nmlen1, nmlen2, nmlen3; - StatementClass *stmt = (StatementClass *)StatementHandle; - BOOL lower_id; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - lower_id = DEFAULT_LOWERCASEIDENTIFIER; - ctName = ucs2_to_utf8(CatalogName, NameLength1, &nmlen1, lower_id); - scName = ucs2_to_utf8(SchemaName, NameLength2, &nmlen2, lower_id); - tbName = ucs2_to_utf8(TableName, NameLength3, &nmlen3, lower_id); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_SpecialColumns( - StatementHandle, IdentifierType, (SQLCHAR *)ctName, - (SQLSMALLINT)nmlen1, (SQLCHAR *)scName, (SQLSMALLINT)nmlen2, - (SQLCHAR *)tbName, (SQLSMALLINT)nmlen3, Scope, Nullable); - LEAVE_STMT_CS(stmt); - if (ctName) - free(ctName); - if (scName) - free(scName); - if (tbName) - free(tbName); - return ret; -} - -RETCODE SQL_API SQLStatisticsW(HSTMT StatementHandle, SQLWCHAR *CatalogName, - SQLSMALLINT NameLength1, SQLWCHAR *SchemaName, - SQLSMALLINT NameLength2, SQLWCHAR *TableName, - SQLSMALLINT NameLength3, SQLUSMALLINT Unique, - SQLUSMALLINT Reserved) { - CSTR func = "SQLStatisticsW"; - RETCODE ret; - char *ctName, *scName, *tbName; - SQLLEN nmlen1, nmlen2, nmlen3; - StatementClass *stmt = (StatementClass *)StatementHandle; - BOOL lower_id; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - lower_id = DEFAULT_LOWERCASEIDENTIFIER; - ctName = ucs2_to_utf8(CatalogName, NameLength1, &nmlen1, lower_id); - scName = ucs2_to_utf8(SchemaName, NameLength2, &nmlen2, lower_id); - tbName = ucs2_to_utf8(TableName, NameLength3, &nmlen3, lower_id); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_Statistics(StatementHandle, (SQLCHAR *)ctName, - (SQLSMALLINT)nmlen1, (SQLCHAR *)scName, - (SQLSMALLINT)nmlen2, (SQLCHAR *)tbName, - (SQLSMALLINT)nmlen3, Unique, Reserved); - LEAVE_STMT_CS(stmt); - if (ctName) - free(ctName); - if (scName) - free(scName); - if (tbName) - free(tbName); - return ret; -} - -RETCODE SQL_API SQLTablesW(HSTMT StatementHandle, SQLWCHAR *CatalogName, - SQLSMALLINT NameLength1, SQLWCHAR *SchemaName, - SQLSMALLINT NameLength2, SQLWCHAR *TableName, - SQLSMALLINT NameLength3, SQLWCHAR *TableType, - SQLSMALLINT NameLength4) { - CSTR func = "SQLTablesW"; - RETCODE ret; - char *ctName, *scName, *tbName, *tbType; - SQLLEN nmlen1, nmlen2, nmlen3, nmlen4; - StatementClass *stmt = (StatementClass *)StatementHandle; - BOOL lower_id; - UWORD flag = 0; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - lower_id = DEFAULT_LOWERCASEIDENTIFIER; - ctName = ucs2_to_utf8(CatalogName, NameLength1, &nmlen1, lower_id); - scName = ucs2_to_utf8(SchemaName, NameLength2, &nmlen2, lower_id); - tbName = ucs2_to_utf8(TableName, NameLength3, &nmlen3, lower_id); - tbType = ucs2_to_utf8(TableType, NameLength4, &nmlen4, FALSE); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (stmt->options.metadata_id) - flag |= PODBC_NOT_SEARCH_PATTERN; - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_Tables( - StatementHandle, (SQLCHAR *)ctName, (SQLSMALLINT)nmlen1, - (SQLCHAR *)scName, (SQLSMALLINT)nmlen2, (SQLCHAR *)tbName, - (SQLSMALLINT)nmlen3, (SQLCHAR *)tbType, (SQLSMALLINT)nmlen4, flag); - LEAVE_STMT_CS(stmt); - if (ctName) - free(ctName); - if (scName) - free(scName); - if (tbName) - free(tbName); - if (tbType) - free(tbType); - return ret; -} - -RETCODE SQL_API SQLColumnPrivilegesW( - HSTMT hstmt, SQLWCHAR *szCatalogName, SQLSMALLINT cbCatalogName, - SQLWCHAR *szSchemaName, SQLSMALLINT cbSchemaName, SQLWCHAR *szTableName, - SQLSMALLINT cbTableName, SQLWCHAR *szColumnName, SQLSMALLINT cbColumnName) { - CSTR func = "SQLColumnPrivilegesW"; - RETCODE ret; - char *ctName, *scName, *tbName, *clName; - SQLLEN nmlen1, nmlen2, nmlen3, nmlen4; - StatementClass *stmt = (StatementClass *)hstmt; - BOOL lower_id; - UWORD flag = 0; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - lower_id = DEFAULT_LOWERCASEIDENTIFIER; - ctName = ucs2_to_utf8(szCatalogName, cbCatalogName, &nmlen1, lower_id); - scName = ucs2_to_utf8(szSchemaName, cbSchemaName, &nmlen2, lower_id); - tbName = ucs2_to_utf8(szTableName, cbTableName, &nmlen3, lower_id); - clName = ucs2_to_utf8(szColumnName, cbColumnName, &nmlen4, lower_id); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (stmt->options.metadata_id) - flag |= PODBC_NOT_SEARCH_PATTERN; - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_ColumnPrivileges( - hstmt, (SQLCHAR *)ctName, (SQLSMALLINT)nmlen1, (SQLCHAR *)scName, - (SQLSMALLINT)nmlen2, (SQLCHAR *)tbName, (SQLSMALLINT)nmlen3, - (SQLCHAR *)clName, (SQLSMALLINT)nmlen4, flag); - LEAVE_STMT_CS(stmt); - if (ctName) - free(ctName); - if (scName) - free(scName); - if (tbName) - free(tbName); - if (clName) - free(clName); - return ret; -} - -RETCODE SQL_API SQLForeignKeysW( - HSTMT hstmt, SQLWCHAR *szPkCatalogName, SQLSMALLINT cbPkCatalogName, - SQLWCHAR *szPkSchemaName, SQLSMALLINT cbPkSchemaName, - SQLWCHAR *szPkTableName, SQLSMALLINT cbPkTableName, - SQLWCHAR *szFkCatalogName, SQLSMALLINT cbFkCatalogName, - SQLWCHAR *szFkSchemaName, SQLSMALLINT cbFkSchemaName, - SQLWCHAR *szFkTableName, SQLSMALLINT cbFkTableName) { - CSTR func = "SQLForeignKeysW"; - RETCODE ret; - char *ctName, *scName, *tbName, *fkctName, *fkscName, *fktbName; - SQLLEN nmlen1, nmlen2, nmlen3, nmlen4, nmlen5, nmlen6; - StatementClass *stmt = (StatementClass *)hstmt; - BOOL lower_id; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - lower_id = DEFAULT_LOWERCASEIDENTIFIER; - ctName = ucs2_to_utf8(szPkCatalogName, cbPkCatalogName, &nmlen1, lower_id); - scName = ucs2_to_utf8(szPkSchemaName, cbPkSchemaName, &nmlen2, lower_id); - tbName = ucs2_to_utf8(szPkTableName, cbPkTableName, &nmlen3, lower_id); - fkctName = - ucs2_to_utf8(szFkCatalogName, cbFkCatalogName, &nmlen4, lower_id); - fkscName = ucs2_to_utf8(szFkSchemaName, cbFkSchemaName, &nmlen5, lower_id); - fktbName = ucs2_to_utf8(szFkTableName, cbFkTableName, &nmlen6, lower_id); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_ForeignKeys( - hstmt, (SQLCHAR *)ctName, (SQLSMALLINT)nmlen1, (SQLCHAR *)scName, - (SQLSMALLINT)nmlen2, (SQLCHAR *)tbName, (SQLSMALLINT)nmlen3, - (SQLCHAR *)fkctName, (SQLSMALLINT)nmlen4, (SQLCHAR *)fkscName, - (SQLSMALLINT)nmlen5, (SQLCHAR *)fktbName, (SQLSMALLINT)nmlen6); - LEAVE_STMT_CS(stmt); - if (ctName) - free(ctName); - if (scName) - free(scName); - if (tbName) - free(tbName); - if (fkctName) - free(fkctName); - if (fkscName) - free(fkscName); - if (fktbName) - free(fktbName); - return ret; -} - -RETCODE SQL_API SQLNativeSqlW(HDBC hdbc, SQLWCHAR *szSqlStrIn, - SQLINTEGER cbSqlStrIn, SQLWCHAR *szSqlStr, - SQLINTEGER cbSqlStrMax, SQLINTEGER *pcbSqlStr) { - CSTR func = "SQLNativeSqlW"; - RETCODE ret; - char *szIn, *szOut = NULL, *szOutt = NULL; - SQLLEN slen; - SQLINTEGER buflen, olen = 0; - ConnectionClass *conn = (ConnectionClass *)hdbc; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - ENTER_CONN_CS(conn); - CC_clear_error(conn); - CC_set_in_unicode_driver(conn); - szIn = ucs2_to_utf8(szSqlStrIn, cbSqlStrIn, &slen, FALSE); - buflen = 3 * cbSqlStrMax; - if (buflen > 0) - szOutt = malloc(buflen); - for (;; buflen = olen + 1, szOutt = realloc(szOut, buflen)) { - if (!szOutt) { - CC_set_error(conn, CONN_NO_MEMORY_ERROR, - "Could not allocate memory for output buffer", func); - ret = SQL_ERROR; - break; - } - szOut = szOutt; - ret = OPENSEARCHAPI_NativeSql(hdbc, (SQLCHAR *)szIn, (SQLINTEGER)slen, - (SQLCHAR *)szOut, buflen, &olen); - if (SQL_SUCCESS_WITH_INFO != ret || olen < buflen) - break; - } - if (szIn) - free(szIn); - if (SQL_SUCCEEDED(ret)) { - SQLLEN szcount = olen; - - if (olen < buflen) - szcount = utf8_to_ucs2(szOut, olen, szSqlStr, cbSqlStrMax); - if (SQL_SUCCESS == ret && szcount > cbSqlStrMax) { - ret = SQL_SUCCESS_WITH_INFO; - CC_set_error(conn, CONN_TRUNCATED, "Sql string too large", func); - } - if (pcbSqlStr) - *pcbSqlStr = (SQLINTEGER)szcount; - } - LEAVE_CONN_CS(conn); - free(szOut); - return ret; -} - -RETCODE SQL_API SQLPrimaryKeysW(HSTMT hstmt, SQLWCHAR *szCatalogName, - SQLSMALLINT cbCatalogName, - SQLWCHAR *szSchemaName, - SQLSMALLINT cbSchemaName, SQLWCHAR *szTableName, - SQLSMALLINT cbTableName) { - CSTR func = "SQLPrimaryKeysW"; - RETCODE ret; - char *ctName, *scName, *tbName; - SQLLEN nmlen1, nmlen2, nmlen3; - StatementClass *stmt = (StatementClass *)hstmt; - BOOL lower_id; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - lower_id = DEFAULT_LOWERCASEIDENTIFIER; - ctName = ucs2_to_utf8(szCatalogName, cbCatalogName, &nmlen1, lower_id); - scName = ucs2_to_utf8(szSchemaName, cbSchemaName, &nmlen2, lower_id); - tbName = ucs2_to_utf8(szTableName, cbTableName, &nmlen3, lower_id); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_PrimaryKeys(hstmt, (SQLCHAR *)ctName, (SQLSMALLINT)nmlen1, - (SQLCHAR *)scName, (SQLSMALLINT)nmlen2, - (SQLCHAR *)tbName, (SQLSMALLINT)nmlen3, 0); - LEAVE_STMT_CS(stmt); - if (ctName) - free(ctName); - if (scName) - free(scName); - if (tbName) - free(tbName); - return ret; -} - -RETCODE SQL_API SQLProcedureColumnsW( - HSTMT hstmt, SQLWCHAR *szCatalogName, SQLSMALLINT cbCatalogName, - SQLWCHAR *szSchemaName, SQLSMALLINT cbSchemaName, SQLWCHAR *szProcName, - SQLSMALLINT cbProcName, SQLWCHAR *szColumnName, SQLSMALLINT cbColumnName) { - CSTR func = "SQLProcedureColumnsW"; - RETCODE ret; - char *ctName, *scName, *prName, *clName; - SQLLEN nmlen1, nmlen2, nmlen3, nmlen4; - StatementClass *stmt = (StatementClass *)hstmt; - BOOL lower_id; - UWORD flag = 0; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - lower_id = DEFAULT_LOWERCASEIDENTIFIER; - ctName = ucs2_to_utf8(szCatalogName, cbCatalogName, &nmlen1, lower_id); - scName = ucs2_to_utf8(szSchemaName, cbSchemaName, &nmlen2, lower_id); - prName = ucs2_to_utf8(szProcName, cbProcName, &nmlen3, lower_id); - clName = ucs2_to_utf8(szColumnName, cbColumnName, &nmlen4, lower_id); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (stmt->options.metadata_id) - flag |= PODBC_NOT_SEARCH_PATTERN; - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_ProcedureColumns( - hstmt, (SQLCHAR *)ctName, (SQLSMALLINT)nmlen1, (SQLCHAR *)scName, - (SQLSMALLINT)nmlen2, (SQLCHAR *)prName, (SQLSMALLINT)nmlen3, - (SQLCHAR *)clName, (SQLSMALLINT)nmlen4, flag); - LEAVE_STMT_CS(stmt); - if (ctName) - free(ctName); - if (scName) - free(scName); - if (prName) - free(prName); - if (clName) - free(clName); - return ret; -} - -RETCODE SQL_API SQLProceduresW(HSTMT hstmt, SQLWCHAR *szCatalogName, - SQLSMALLINT cbCatalogName, - SQLWCHAR *szSchemaName, SQLSMALLINT cbSchemaName, - SQLWCHAR *szProcName, SQLSMALLINT cbProcName) { - CSTR func = "SQLProceduresW"; - RETCODE ret; - char *ctName, *scName, *prName; - SQLLEN nmlen1, nmlen2, nmlen3; - StatementClass *stmt = (StatementClass *)hstmt; - BOOL lower_id; - UWORD flag = 0; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - lower_id = DEFAULT_LOWERCASEIDENTIFIER; - ctName = ucs2_to_utf8(szCatalogName, cbCatalogName, &nmlen1, lower_id); - scName = ucs2_to_utf8(szSchemaName, cbSchemaName, &nmlen2, lower_id); - prName = ucs2_to_utf8(szProcName, cbProcName, &nmlen3, lower_id); - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (stmt->options.metadata_id) - flag |= PODBC_NOT_SEARCH_PATTERN; - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_Procedures(hstmt, (SQLCHAR *)ctName, (SQLSMALLINT)nmlen1, - (SQLCHAR *)scName, (SQLSMALLINT)nmlen2, - (SQLCHAR *)prName, (SQLSMALLINT)nmlen3, flag); - LEAVE_STMT_CS(stmt); - if (ctName) - free(ctName); - if (scName) - free(scName); - if (prName) - free(prName); - return ret; -} - -RETCODE SQL_API SQLTablePrivilegesW(HSTMT hstmt, SQLWCHAR *szCatalogName, - SQLSMALLINT cbCatalogName, - SQLWCHAR *szSchemaName, - SQLSMALLINT cbSchemaName, - SQLWCHAR *szTableName, - SQLSMALLINT cbTableName) { - CSTR func = "SQLTablePrivilegesW"; - RETCODE ret; - char *ctName, *scName, *tbName; - SQLLEN nmlen1, nmlen2, nmlen3; - StatementClass *stmt = (StatementClass *)hstmt; - BOOL lower_id; - UWORD flag = 0; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - lower_id = DEFAULT_LOWERCASEIDENTIFIER; - ctName = ucs2_to_utf8(szCatalogName, cbCatalogName, &nmlen1, lower_id); - scName = ucs2_to_utf8(szSchemaName, cbSchemaName, &nmlen2, lower_id); - tbName = ucs2_to_utf8(szTableName, cbTableName, &nmlen3, lower_id); - ENTER_STMT_CS((StatementClass *)hstmt); - SC_clear_error(stmt); - if (stmt->options.metadata_id) - flag |= PODBC_NOT_SEARCH_PATTERN; - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_TablePrivileges( - hstmt, (SQLCHAR *)ctName, (SQLSMALLINT)nmlen1, (SQLCHAR *)scName, - (SQLSMALLINT)nmlen2, (SQLCHAR *)tbName, (SQLSMALLINT)nmlen3, flag); - LEAVE_STMT_CS((StatementClass *)hstmt); - if (ctName) - free(ctName); - if (scName) - free(scName); - if (tbName) - free(tbName); - return ret; -} - -RETCODE SQL_API SQLGetTypeInfoW(SQLHSTMT StatementHandle, - SQLSMALLINT DataType) { - CSTR func = "SQLGetTypeInfoW"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - if (SC_opencheck(stmt, func)) - ret = SQL_ERROR; - else - ret = OPENSEARCHAPI_GetTypeInfo(StatementHandle, DataType); - LEAVE_STMT_CS(stmt); - return ret; -} - -/* ODBC 2.x-specific functions */ -// TODO (#590): Add implementations for remaining ODBC 2.x function - -SQLRETURN SQL_API SQLColAttributesW(SQLHSTMT hstmt, SQLUSMALLINT iCol, - SQLUSMALLINT iField, SQLPOINTER pCharAttr, - SQLSMALLINT cbCharAttrMax, - SQLSMALLINT *pcbCharAttr, -#if defined(_WIN64) || defined(_WIN32) || defined(SQLCOLATTRIBUTE_SQLLEN) - SQLLEN *pNumAttr -#else - SQLPOINTER pNumAttr -#endif -) { - CSTR func = "SQLColAttributeW"; - RETCODE ret; - StatementClass *stmt = (StatementClass *)hstmt; - SQLSMALLINT *rgbL, blen = 0, bMax; - char *rgbD = NULL, *rgbDt; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (SC_connection_lost_check(stmt, __FUNCTION__)) - return SQL_ERROR; - - ENTER_STMT_CS(stmt); - SC_clear_error(stmt); - switch (iField) { - case SQL_DESC_BASE_COLUMN_NAME: - case SQL_DESC_BASE_TABLE_NAME: - case SQL_DESC_CATALOG_NAME: - case SQL_DESC_LABEL: - case SQL_DESC_LITERAL_PREFIX: - case SQL_DESC_LITERAL_SUFFIX: - case SQL_DESC_LOCAL_TYPE_NAME: - case SQL_DESC_NAME: - case SQL_DESC_SCHEMA_NAME: - case SQL_DESC_TABLE_NAME: - case SQL_DESC_TYPE_NAME: - case SQL_COLUMN_NAME: - bMax = cbCharAttrMax * 3 / WCLEN; - rgbD = malloc(bMax); - rgbL = &blen; - for (rgbDt = rgbD;; bMax = blen + 1, rgbDt = realloc(rgbD, bMax)) { - if (!rgbDt) { - ret = SQL_ERROR; - break; - } - rgbD = rgbDt; - ret = OPENSEARCHAPI_ColAttributes(hstmt, iCol, iField, rgbD, bMax, rgbL, - pNumAttr); - if (SQL_SUCCESS_WITH_INFO != ret || blen < bMax) - break; - } - if (SQL_SUCCEEDED(ret)) { - blen = (SQLSMALLINT)utf8_to_ucs2( - rgbD, blen, (SQLWCHAR *)pCharAttr, cbCharAttrMax / WCLEN); - if (SQL_SUCCESS == ret - && blen * WCLEN >= (unsigned long)cbCharAttrMax) { - ret = SQL_SUCCESS_WITH_INFO; - SC_set_error(stmt, STMT_TRUNCATED, - "The buffer was too small for the pCharAttr.", - func); - } - if (pcbCharAttr) - *pcbCharAttr = blen * WCLEN; - } - if (rgbD) - free(rgbD); - break; - default: - rgbD = pCharAttr; - bMax = cbCharAttrMax; - rgbL = pcbCharAttr; - ret = OPENSEARCHAPI_ColAttributes(hstmt, iCol, iField, rgbD, bMax, rgbL, - pNumAttr); - break; - } - LEAVE_STMT_CS(stmt); - - return ret; -} - -RETCODE SQL_API SQLGetConnectOptionW(HDBC ConnectionHandle, SQLUSMALLINT Option, - PTR Value) { - ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; - RETCODE ret; - - ENTER_CONN_CS(conn); - CC_clear_error(conn); - MYLOG(OPENSEARCH_TRACE, "entering " FORMAT_UINTEGER "\n", Option); - ret = OPENSEARCHAPI_GetConnectOption(ConnectionHandle, Option, Value, NULL, 0); - LEAVE_CONN_CS(conn); - return ret; -} - -RETCODE SQL_API SQLSetConnectOptionW(HDBC ConnectionHandle, SQLUSMALLINT Option, - SQLULEN Value) { - ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; - RETCODE ret; - - MYLOG(OPENSEARCH_TRACE, "entering " FORMAT_INTEGER "\n", Option); - ENTER_CONN_CS(conn); - CC_clear_error(conn); - ret = OPENSEARCHAPI_SetConnectOption(ConnectionHandle, Option, Value); - LEAVE_CONN_CS(conn); - return ret; -} - -RETCODE SQL_API SQLErrorW(SQLHENV EnvironmentHandle, SQLHDBC ConnectionHandle, - SQLHSTMT StatementHandle, SQLWCHAR *Sqlstate, - SQLINTEGER *NativeError, SQLWCHAR *MessageText, - SQLSMALLINT BufferLength, SQLSMALLINT *TextLength) { - RETCODE ret; - SQLSMALLINT buflen; - SQLSMALLINT tlen = 0; - SQLSMALLINT RecNumber = 1; - char qstr_ansi[8], *mtxt = NULL; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - buflen = 0; - if (MessageText && BufferLength > 0) { - buflen = BufferLength; - mtxt = malloc(buflen); - } - - if (StatementHandle) { - ret = OPENSEARCHAPI_StmtError(StatementHandle, RecNumber, (SQLCHAR *)qstr_ansi, - NativeError, (SQLCHAR *)mtxt, buflen, &tlen, 0); - } else if (ConnectionHandle) { - ret = OPENSEARCHAPI_ConnectError(ConnectionHandle, RecNumber, - (SQLCHAR *)qstr_ansi, NativeError, - (SQLCHAR *)mtxt, buflen, &tlen, 0); - } else if (EnvironmentHandle) { - ret = OPENSEARCHAPI_EnvError(EnvironmentHandle, RecNumber, (SQLCHAR *)qstr_ansi, - NativeError, (SQLCHAR *)mtxt, buflen, &tlen, 0); - } else { - ret = SQL_ERROR; - } - - if (SQL_SUCCEEDED(ret)) { - if (Sqlstate) - utf8_to_ucs2(qstr_ansi, -1, Sqlstate, 6); - if (mtxt && tlen <= BufferLength) { - // TODO (#612): Verify wide character conversion - SQLULEN ulen = utf8_to_ucs2_lf(mtxt, tlen, FALSE, MessageText, - BufferLength, TRUE); - if (ulen == (SQLULEN)-1) - tlen = (SQLSMALLINT)locale_to_sqlwchar( - (SQLWCHAR *)MessageText, mtxt, BufferLength, FALSE); - else - tlen = (SQLSMALLINT)ulen; - if (tlen >= BufferLength) - ret = SQL_SUCCESS_WITH_INFO; - else if (tlen < 0) { - char errc[32]; - - SPRINTF_FIXED(errc, "Error: SqlState=%s", qstr_ansi); - tlen = (SQLSMALLINT)utf8_to_ucs2(errc, -1, MessageText, - BufferLength); - } - } - if (TextLength) - *TextLength = tlen; - } - if (mtxt) - free(mtxt); - return ret; -} diff --git a/sql-odbc/src/sqlodbc/opensearch_api30.c b/sql-odbc/src/sqlodbc/opensearch_api30.c deleted file mode 100644 index 25baeb7915..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_api30.c +++ /dev/null @@ -1,1842 +0,0 @@ -#include -#include - -#include "descriptor.h" -#include "dlg_specific.h" -#include "environ.h" -#include "opensearch_odbc.h" -#include "loadlib.h" -#include "misc.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "qresult.h" -#include "statement.h" - -/* SQLError -> SQLDiagRec */ -RETCODE SQL_API OPENSEARCHAPI_GetDiagRec(SQLSMALLINT HandleType, SQLHANDLE Handle, - SQLSMALLINT RecNumber, SQLCHAR *Sqlstate, - SQLINTEGER *NativeError, SQLCHAR *MessageText, - SQLSMALLINT BufferLength, - SQLSMALLINT *TextLength) { - RETCODE ret; - - MYLOG(OPENSEARCH_TRACE, "entering type=%d rec=%d\n", HandleType, RecNumber); - switch (HandleType) { - case SQL_HANDLE_ENV: - ret = OPENSEARCHAPI_EnvError(Handle, RecNumber, Sqlstate, NativeError, - MessageText, BufferLength, TextLength, 0); - break; - case SQL_HANDLE_DBC: - ret = OPENSEARCHAPI_ConnectError(Handle, RecNumber, Sqlstate, NativeError, - MessageText, BufferLength, TextLength, 0); - break; - case SQL_HANDLE_STMT: - ret = OPENSEARCHAPI_StmtError(Handle, RecNumber, Sqlstate, NativeError, - MessageText, BufferLength, TextLength, 0); - break; - case SQL_HANDLE_DESC: - ret = OPENSEARCHAPI_DescError(Handle, RecNumber, Sqlstate, NativeError, - MessageText, BufferLength, TextLength, 0); - break; - default: - ret = SQL_ERROR; - } - MYLOG(OPENSEARCH_TRACE, "leaving %d\n", ret); - return ret; -} - -/* - * Minimal implementation. - * - */ -RETCODE SQL_API OPENSEARCHAPI_GetDiagField(SQLSMALLINT HandleType, SQLHANDLE Handle, - SQLSMALLINT RecNumber, - SQLSMALLINT DiagIdentifier, PTR DiagInfoPtr, - SQLSMALLINT BufferLength, - SQLSMALLINT *StringLengthPtr) { - RETCODE ret = SQL_ERROR, rtn; - ConnectionClass *conn; - StatementClass *stmt; - SQLLEN rc; - SQLSMALLINT pcbErrm; - ssize_t rtnlen = -1; - int rtnctype = SQL_C_CHAR; - - MYLOG(OPENSEARCH_TRACE, "entering rec=%d\n", RecNumber); - switch (HandleType) { - case SQL_HANDLE_ENV: - switch (DiagIdentifier) { - case SQL_DIAG_CLASS_ORIGIN: - case SQL_DIAG_SUBCLASS_ORIGIN: - case SQL_DIAG_CONNECTION_NAME: - case SQL_DIAG_SERVER_NAME: - rtnlen = 0; - if (DiagInfoPtr && BufferLength > rtnlen) { - ret = SQL_SUCCESS; - *((char *)DiagInfoPtr) = '\0'; - } else - ret = SQL_SUCCESS_WITH_INFO; - break; - case SQL_DIAG_MESSAGE_TEXT: - ret = OPENSEARCHAPI_EnvError(Handle, RecNumber, NULL, NULL, - DiagInfoPtr, BufferLength, - StringLengthPtr, 0); - break; - case SQL_DIAG_NATIVE: - rtnctype = SQL_C_LONG; - ret = OPENSEARCHAPI_EnvError(Handle, RecNumber, NULL, - (SQLINTEGER *)DiagInfoPtr, NULL, 0, - NULL, 0); - break; - case SQL_DIAG_NUMBER: - rtnctype = SQL_C_LONG; - ret = OPENSEARCHAPI_EnvError(Handle, RecNumber, NULL, NULL, NULL, 0, - NULL, 0); - if (SQL_SUCCEEDED(ret)) { - *((SQLINTEGER *)DiagInfoPtr) = 1; - } - break; - case SQL_DIAG_SQLSTATE: - rtnlen = 5; - ret = OPENSEARCHAPI_EnvError(Handle, RecNumber, DiagInfoPtr, NULL, - NULL, 0, NULL, 0); - if (SQL_SUCCESS_WITH_INFO == ret) - ret = SQL_SUCCESS; - break; - case SQL_DIAG_RETURNCODE: /* driver manager returns */ - break; - case SQL_DIAG_CURSOR_ROW_COUNT: - case SQL_DIAG_ROW_COUNT: - case SQL_DIAG_DYNAMIC_FUNCTION: - case SQL_DIAG_DYNAMIC_FUNCTION_CODE: - /* options for statement type only */ - break; - } - break; - case SQL_HANDLE_DBC: - conn = (ConnectionClass *)Handle; - switch (DiagIdentifier) { - case SQL_DIAG_CLASS_ORIGIN: - case SQL_DIAG_SUBCLASS_ORIGIN: - case SQL_DIAG_CONNECTION_NAME: - rtnlen = 0; - if (DiagInfoPtr && BufferLength > rtnlen) { - ret = SQL_SUCCESS; - *((char *)DiagInfoPtr) = '\0'; - } else - ret = SQL_SUCCESS_WITH_INFO; - break; - case SQL_DIAG_SERVER_NAME: - rtnlen = strlen(CC_get_DSN(conn)); - if (DiagInfoPtr) { - strncpy_null(DiagInfoPtr, CC_get_DSN(conn), - BufferLength); - ret = (BufferLength > rtnlen ? SQL_SUCCESS - : SQL_SUCCESS_WITH_INFO); - } else - ret = SQL_SUCCESS_WITH_INFO; - break; - case SQL_DIAG_MESSAGE_TEXT: - ret = OPENSEARCHAPI_ConnectError(Handle, RecNumber, NULL, NULL, - DiagInfoPtr, BufferLength, - StringLengthPtr, 0); - break; - case SQL_DIAG_NATIVE: - rtnctype = SQL_C_LONG; - ret = OPENSEARCHAPI_ConnectError(Handle, RecNumber, NULL, - (SQLINTEGER *)DiagInfoPtr, NULL, 0, - NULL, 0); - break; - case SQL_DIAG_NUMBER: - rtnctype = SQL_C_LONG; - ret = OPENSEARCHAPI_ConnectError(Handle, RecNumber, NULL, NULL, - NULL, 0, NULL, 0); - if (SQL_SUCCEEDED(ret)) { - *((SQLINTEGER *)DiagInfoPtr) = 1; - } - break; - case SQL_DIAG_SQLSTATE: - rtnlen = 5; - ret = OPENSEARCHAPI_ConnectError(Handle, RecNumber, DiagInfoPtr, - NULL, NULL, 0, NULL, 0); - if (SQL_SUCCESS_WITH_INFO == ret) - ret = SQL_SUCCESS; - break; - case SQL_DIAG_RETURNCODE: /* driver manager returns */ - break; - case SQL_DIAG_CURSOR_ROW_COUNT: - case SQL_DIAG_ROW_COUNT: - case SQL_DIAG_DYNAMIC_FUNCTION: - case SQL_DIAG_DYNAMIC_FUNCTION_CODE: - /* options for statement type only */ - break; - } - break; - case SQL_HANDLE_STMT: - conn = (ConnectionClass *)SC_get_conn(((StatementClass *)Handle)); - switch (DiagIdentifier) { - case SQL_DIAG_CLASS_ORIGIN: - case SQL_DIAG_SUBCLASS_ORIGIN: - case SQL_DIAG_CONNECTION_NAME: - rtnlen = 0; - if (DiagInfoPtr && BufferLength > rtnlen) { - ret = SQL_SUCCESS; - *((char *)DiagInfoPtr) = '\0'; - } else - ret = SQL_SUCCESS_WITH_INFO; - break; - case SQL_DIAG_SERVER_NAME: - rtnlen = strlen(CC_get_DSN(conn)); - if (DiagInfoPtr) { - strncpy_null(DiagInfoPtr, CC_get_DSN(conn), - BufferLength); - ret = (BufferLength > rtnlen ? SQL_SUCCESS - : SQL_SUCCESS_WITH_INFO); - } else - ret = SQL_SUCCESS_WITH_INFO; - break; - case SQL_DIAG_MESSAGE_TEXT: - ret = OPENSEARCHAPI_StmtError(Handle, RecNumber, NULL, NULL, - DiagInfoPtr, BufferLength, - StringLengthPtr, 0); - break; - case SQL_DIAG_NATIVE: - rtnctype = SQL_C_LONG; - ret = OPENSEARCHAPI_StmtError(Handle, RecNumber, NULL, - (SQLINTEGER *)DiagInfoPtr, NULL, 0, - NULL, 0); - break; - case SQL_DIAG_NUMBER: - rtnctype = SQL_C_LONG; - *((SQLINTEGER *)DiagInfoPtr) = 0; - ret = SQL_NO_DATA_FOUND; - stmt = (StatementClass *)Handle; - rtn = OPENSEARCHAPI_StmtError(Handle, -1, NULL, NULL, NULL, 0, - &pcbErrm, 0); - switch (rtn) { - case SQL_SUCCESS: - case SQL_SUCCESS_WITH_INFO: - ret = SQL_SUCCESS; - if (pcbErrm > 0 && stmt->opensearch_error) - - *((SQLINTEGER *)DiagInfoPtr) = - (pcbErrm - 1) / stmt->opensearch_error->recsize + 1; - break; - default: - break; - } - break; - case SQL_DIAG_SQLSTATE: - rtnlen = 5; - ret = OPENSEARCHAPI_StmtError(Handle, RecNumber, DiagInfoPtr, NULL, - NULL, 0, NULL, 0); - if (SQL_SUCCESS_WITH_INFO == ret) - ret = SQL_SUCCESS; - break; - case SQL_DIAG_CURSOR_ROW_COUNT: - rtnctype = SQL_C_LONG; - stmt = (StatementClass *)Handle; - rc = -1; - if (stmt->status == STMT_FINISHED) { - QResultClass *res = SC_get_Curres(stmt); - - /*if (!res) - return SQL_ERROR;*/ - if (stmt->proc_return > 0) - rc = 0; - else if (res && QR_NumResultCols(res) > 0 - && !SC_is_fetchcursor(stmt)) - rc = QR_get_num_total_tuples(res) - res->dl_count; - } - *((SQLLEN *)DiagInfoPtr) = rc; - MYLOG(OPENSEARCH_ALL, "rc=" FORMAT_LEN "\n", rc); - ret = SQL_SUCCESS; - break; - case SQL_DIAG_ROW_COUNT: - rtnctype = SQL_C_LONG; - stmt = (StatementClass *)Handle; - *((SQLLEN *)DiagInfoPtr) = stmt->diag_row_count; - ret = SQL_SUCCESS; - break; - case SQL_DIAG_ROW_NUMBER: - rtnctype = SQL_C_LONG; - *((SQLLEN *)DiagInfoPtr) = SQL_ROW_NUMBER_UNKNOWN; - ret = SQL_SUCCESS; - break; - case SQL_DIAG_COLUMN_NUMBER: - rtnctype = SQL_C_LONG; - *((SQLINTEGER *)DiagInfoPtr) = SQL_COLUMN_NUMBER_UNKNOWN; - ret = SQL_SUCCESS; - break; - case SQL_DIAG_RETURNCODE: /* driver manager returns */ - break; - } - break; - case SQL_HANDLE_DESC: - conn = DC_get_conn(((DescriptorClass *)Handle)); - switch (DiagIdentifier) { - case SQL_DIAG_CLASS_ORIGIN: - case SQL_DIAG_SUBCLASS_ORIGIN: - case SQL_DIAG_CONNECTION_NAME: - rtnlen = 0; - if (DiagInfoPtr && BufferLength > rtnlen) { - ret = SQL_SUCCESS; - *((char *)DiagInfoPtr) = '\0'; - } else - ret = SQL_SUCCESS_WITH_INFO; - break; - case SQL_DIAG_SERVER_NAME: - rtnlen = strlen(CC_get_DSN(conn)); - if (DiagInfoPtr) { - strncpy_null(DiagInfoPtr, CC_get_DSN(conn), - BufferLength); - ret = (BufferLength > rtnlen ? SQL_SUCCESS - : SQL_SUCCESS_WITH_INFO); - } else - ret = SQL_SUCCESS_WITH_INFO; - break; - case SQL_DIAG_MESSAGE_TEXT: - case SQL_DIAG_NATIVE: - case SQL_DIAG_NUMBER: - break; - case SQL_DIAG_SQLSTATE: - rtnlen = 5; - ret = OPENSEARCHAPI_DescError(Handle, RecNumber, DiagInfoPtr, NULL, - NULL, 0, NULL, 0); - if (SQL_SUCCESS_WITH_INFO == ret) - ret = SQL_SUCCESS; - break; - case SQL_DIAG_RETURNCODE: /* driver manager returns */ - break; - case SQL_DIAG_CURSOR_ROW_COUNT: - case SQL_DIAG_ROW_COUNT: - case SQL_DIAG_DYNAMIC_FUNCTION: - case SQL_DIAG_DYNAMIC_FUNCTION_CODE: - rtnctype = SQL_C_LONG; - /* options for statement type only */ - break; - } - break; - default: - ret = SQL_ERROR; - } - if (SQL_C_LONG == rtnctype) { - if (SQL_SUCCESS_WITH_INFO == ret) - ret = SQL_SUCCESS; - if (StringLengthPtr) - *StringLengthPtr = sizeof(SQLINTEGER); - } else if (rtnlen >= 0) { - if (rtnlen >= BufferLength) { - if (SQL_SUCCESS == ret) - ret = SQL_SUCCESS_WITH_INFO; - if (BufferLength > 0) - ((char *)DiagInfoPtr)[BufferLength - 1] = '\0'; - } - if (StringLengthPtr) - *StringLengthPtr = (SQLSMALLINT)rtnlen; - } - MYLOG(OPENSEARCH_TRACE, "leaving %d\n", ret); - return ret; -} - -/* SQLGetConnectOption -> SQLGetconnectAttr */ -RETCODE SQL_API OPENSEARCHAPI_GetConnectAttr(HDBC ConnectionHandle, - SQLINTEGER Attribute, PTR Value, - SQLINTEGER BufferLength, - SQLINTEGER *StringLength) { - ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; - RETCODE ret = SQL_SUCCESS; - SQLINTEGER len = 4; - - MYLOG(OPENSEARCH_TRACE, "entering " FORMAT_INTEGER "\n", Attribute); - switch (Attribute) { - case SQL_ATTR_ASYNC_ENABLE: - *((SQLINTEGER *)Value) = SQL_ASYNC_ENABLE_OFF; - break; - case SQL_ATTR_AUTO_IPD: - *((SQLINTEGER *)Value) = SQL_FALSE; - break; - case SQL_ATTR_CONNECTION_DEAD: - *((SQLUINTEGER *)Value) = CC_not_connected(conn); - break; - case SQL_ATTR_CONNECTION_TIMEOUT: - *((SQLUINTEGER *)Value) = 0; - break; - case SQL_ATTR_METADATA_ID: - *((SQLUINTEGER *)Value) = conn->stmtOptions.metadata_id; - break; - case SQL_ATTR_ESOPT_DEBUG: - *((SQLINTEGER *)Value) = conn->connInfo.drivers.loglevel; - break; - case SQL_ATTR_ESOPT_COMMLOG: - *((SQLINTEGER *)Value) = conn->connInfo.drivers.loglevel; - break; - default: - ret = OPENSEARCHAPI_GetConnectOption(ConnectionHandle, (UWORD)Attribute, - Value, &len, BufferLength); - } - if (StringLength) - *StringLength = len; - return ret; -} - -static SQLHDESC descHandleFromStatementHandle(HSTMT StatementHandle, - SQLINTEGER descType) { - StatementClass *stmt = (StatementClass *)StatementHandle; - - switch (descType) { - case SQL_ATTR_APP_ROW_DESC: /* 10010 */ - return (HSTMT)stmt->ard; - case SQL_ATTR_APP_PARAM_DESC: /* 10011 */ - return (HSTMT)stmt->apd; - case SQL_ATTR_IMP_ROW_DESC: /* 10012 */ - return (HSTMT)stmt->ird; - case SQL_ATTR_IMP_PARAM_DESC: /* 10013 */ - return (HSTMT)stmt->ipd; - } - return (HSTMT)0; -} - -static void column_bindings_set(ARDFields *opts, SQLSMALLINT cols, - BOOL maxset) { - int i; - - if (cols == opts->allocated) - return; - if (cols > opts->allocated) { - extend_column_bindings(opts, cols); - return; - } - if (maxset) - return; - - for (i = opts->allocated; i > cols; i--) - reset_a_column_binding(opts, i); - opts->allocated = cols; - if (0 == cols) { - free(opts->bindings); - opts->bindings = NULL; - } -} - -static RETCODE SQL_API ARDSetField(DescriptorClass *desc, SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength) { - UNUSED(BufferLength); - RETCODE ret = SQL_SUCCESS; - ARDFields *opts = &(desc->ardf); - SQLSMALLINT row_idx; - BOOL unbind = TRUE; - - switch (FieldIdentifier) { - case SQL_DESC_ARRAY_SIZE: - opts->size_of_rowset = CAST_UPTR(SQLULEN, Value); - return ret; - case SQL_DESC_ARRAY_STATUS_PTR: - opts->row_operation_ptr = Value; - return ret; - case SQL_DESC_BIND_OFFSET_PTR: - opts->row_offset_ptr = Value; - return ret; - case SQL_DESC_BIND_TYPE: - opts->bind_size = CAST_UPTR(SQLUINTEGER, Value); - return ret; - case SQL_DESC_COUNT: - column_bindings_set(opts, CAST_PTR(SQLSMALLINT, Value), FALSE); - return ret; - - case SQL_DESC_TYPE: - case SQL_DESC_DATETIME_INTERVAL_CODE: - case SQL_DESC_CONCISE_TYPE: - column_bindings_set(opts, RecNumber, TRUE); - break; - } - if (RecNumber < 0 || RecNumber > opts->allocated) { - DC_set_error(desc, DESC_INVALID_COLUMN_NUMBER_ERROR, - "invalid column number"); - return SQL_ERROR; - } - if (0 == RecNumber) /* bookmark column */ - { - BindInfoClass *bookmark = ARD_AllocBookmark(opts); - - switch (FieldIdentifier) { - case SQL_DESC_DATA_PTR: - bookmark->buffer = Value; - break; - case SQL_DESC_INDICATOR_PTR: - bookmark->indicator = Value; - break; - case SQL_DESC_OCTET_LENGTH_PTR: - bookmark->used = Value; - break; - default: - DC_set_error(desc, DESC_INVALID_COLUMN_NUMBER_ERROR, - "invalid column number"); - ret = SQL_ERROR; - } - return ret; - } - row_idx = RecNumber - 1; - switch (FieldIdentifier) { - case SQL_DESC_TYPE: - opts->bindings[row_idx].returntype = CAST_PTR(SQLSMALLINT, Value); - break; - case SQL_DESC_DATETIME_INTERVAL_CODE: - switch (opts->bindings[row_idx].returntype) { - case SQL_DATETIME: - case SQL_C_TYPE_DATE: - case SQL_C_TYPE_TIME: - case SQL_C_TYPE_TIMESTAMP: - switch ((LONG_PTR)Value) { - case SQL_CODE_DATE: - opts->bindings[row_idx].returntype = - SQL_C_TYPE_DATE; - break; - case SQL_CODE_TIME: - opts->bindings[row_idx].returntype = - SQL_C_TYPE_TIME; - break; - case SQL_CODE_TIMESTAMP: - opts->bindings[row_idx].returntype = - SQL_C_TYPE_TIMESTAMP; - break; - } - break; - } - break; - case SQL_DESC_CONCISE_TYPE: - opts->bindings[row_idx].returntype = CAST_PTR(SQLSMALLINT, Value); - break; - case SQL_DESC_DATA_PTR: - unbind = FALSE; - opts->bindings[row_idx].buffer = Value; - break; - case SQL_DESC_INDICATOR_PTR: - unbind = FALSE; - opts->bindings[row_idx].indicator = Value; - break; - case SQL_DESC_OCTET_LENGTH_PTR: - unbind = FALSE; - opts->bindings[row_idx].used = Value; - break; - case SQL_DESC_OCTET_LENGTH: - opts->bindings[row_idx].buflen = CAST_PTR(SQLLEN, Value); - break; - case SQL_DESC_PRECISION: - opts->bindings[row_idx].precision = CAST_PTR(SQLSMALLINT, Value); - break; - case SQL_DESC_SCALE: - opts->bindings[row_idx].scale = CAST_PTR(SQLSMALLINT, Value); - break; - case SQL_DESC_ALLOC_TYPE: /* read-only */ - case SQL_DESC_DATETIME_INTERVAL_PRECISION: - case SQL_DESC_LENGTH: - case SQL_DESC_NUM_PREC_RADIX: - default: - ret = SQL_ERROR; - DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, - "invalid descriptor identifier"); - } - if (unbind) - opts->bindings[row_idx].buffer = NULL; - return ret; -} - -static void parameter_bindings_set(APDFields *opts, SQLSMALLINT params, - BOOL maxset) { - int i; - - if (params == opts->allocated) - return; - if (params > opts->allocated) { - extend_parameter_bindings(opts, params); - return; - } - if (maxset) - return; - - for (i = opts->allocated; i > params; i--) - reset_a_parameter_binding(opts, i); - opts->allocated = params; - if (0 == params) { - free(opts->parameters); - opts->parameters = NULL; - } -} - -static void parameter_ibindings_set(IPDFields *opts, SQLSMALLINT params, - BOOL maxset) { - int i; - - if (params == opts->allocated) - return; - if (params > opts->allocated) { - extend_iparameter_bindings(opts, params); - return; - } - if (maxset) - return; - - for (i = opts->allocated; i > params; i--) - reset_a_iparameter_binding(opts, i); - opts->allocated = params; - if (0 == params) { - free(opts->parameters); - opts->parameters = NULL; - } -} - -static RETCODE SQL_API APDSetField(DescriptorClass *desc, SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength) { - UNUSED(BufferLength); - RETCODE ret = SQL_SUCCESS; - APDFields *opts = &(desc->apdf); - SQLSMALLINT para_idx; - BOOL unbind = TRUE; - - switch (FieldIdentifier) { - case SQL_DESC_ARRAY_SIZE: - opts->paramset_size = CAST_UPTR(SQLUINTEGER, Value); - return ret; - case SQL_DESC_ARRAY_STATUS_PTR: - opts->param_operation_ptr = Value; - return ret; - case SQL_DESC_BIND_OFFSET_PTR: - opts->param_offset_ptr = Value; - return ret; - case SQL_DESC_BIND_TYPE: - opts->param_bind_type = CAST_UPTR(SQLUINTEGER, Value); - return ret; - case SQL_DESC_COUNT: - parameter_bindings_set(opts, CAST_PTR(SQLSMALLINT, Value), FALSE); - return ret; - - case SQL_DESC_TYPE: - case SQL_DESC_DATETIME_INTERVAL_CODE: - case SQL_DESC_CONCISE_TYPE: - parameter_bindings_set(opts, RecNumber, TRUE); - break; - } - if (RecNumber <= 0) { - MYLOG(OPENSEARCH_ALL, "RecN=%d allocated=%d\n", RecNumber, opts->allocated); - DC_set_error(desc, DESC_BAD_PARAMETER_NUMBER_ERROR, - "bad parameter number"); - return SQL_ERROR; - } - if (RecNumber > opts->allocated) { - MYLOG(OPENSEARCH_ALL, "RecN=%d allocated=%d\n", RecNumber, opts->allocated); - parameter_bindings_set(opts, RecNumber, TRUE); - /* DC_set_error(desc, DESC_BAD_PARAMETER_NUMBER_ERROR, - "bad parameter number"); - return SQL_ERROR;*/ - } - para_idx = RecNumber - 1; - switch (FieldIdentifier) { - case SQL_DESC_TYPE: - opts->parameters[para_idx].CType = CAST_PTR(SQLSMALLINT, Value); - break; - case SQL_DESC_DATETIME_INTERVAL_CODE: - switch (opts->parameters[para_idx].CType) { - case SQL_DATETIME: - case SQL_C_TYPE_DATE: - case SQL_C_TYPE_TIME: - case SQL_C_TYPE_TIMESTAMP: - switch ((LONG_PTR)Value) { - case SQL_CODE_DATE: - opts->parameters[para_idx].CType = SQL_C_TYPE_DATE; - break; - case SQL_CODE_TIME: - opts->parameters[para_idx].CType = SQL_C_TYPE_TIME; - break; - case SQL_CODE_TIMESTAMP: - opts->parameters[para_idx].CType = - SQL_C_TYPE_TIMESTAMP; - break; - } - break; - } - break; - case SQL_DESC_CONCISE_TYPE: - opts->parameters[para_idx].CType = CAST_PTR(SQLSMALLINT, Value); - break; - case SQL_DESC_DATA_PTR: - unbind = FALSE; - opts->parameters[para_idx].buffer = Value; - break; - case SQL_DESC_INDICATOR_PTR: - unbind = FALSE; - opts->parameters[para_idx].indicator = Value; - break; - case SQL_DESC_OCTET_LENGTH: - opts->parameters[para_idx].buflen = CAST_PTR(Int4, Value); - break; - case SQL_DESC_OCTET_LENGTH_PTR: - unbind = FALSE; - opts->parameters[para_idx].used = Value; - break; - case SQL_DESC_PRECISION: - opts->parameters[para_idx].precision = CAST_PTR(SQLSMALLINT, Value); - break; - case SQL_DESC_SCALE: - opts->parameters[para_idx].scale = CAST_PTR(SQLSMALLINT, Value); - break; - case SQL_DESC_ALLOC_TYPE: /* read-only */ - case SQL_DESC_DATETIME_INTERVAL_PRECISION: - case SQL_DESC_LENGTH: - case SQL_DESC_NUM_PREC_RADIX: - default: - ret = SQL_ERROR; - DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, - "invaid descriptor identifier"); - } - if (unbind) - opts->parameters[para_idx].buffer = NULL; - - return ret; -} - -static RETCODE SQL_API IRDSetField(DescriptorClass *desc, SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength) { - UNUSED(BufferLength, RecNumber); - RETCODE ret = SQL_SUCCESS; - IRDFields *opts = &(desc->irdf); - - switch (FieldIdentifier) { - case SQL_DESC_ARRAY_STATUS_PTR: - opts->rowStatusArray = (SQLUSMALLINT *)Value; - break; - case SQL_DESC_ROWS_PROCESSED_PTR: - opts->rowsFetched = (SQLULEN *)Value; - break; - case SQL_DESC_ALLOC_TYPE: /* read-only */ - case SQL_DESC_COUNT: /* read-only */ - case SQL_DESC_AUTO_UNIQUE_VALUE: /* read-only */ - case SQL_DESC_BASE_COLUMN_NAME: /* read-only */ - case SQL_DESC_BASE_TABLE_NAME: /* read-only */ - case SQL_DESC_CASE_SENSITIVE: /* read-only */ - case SQL_DESC_CATALOG_NAME: /* read-only */ - case SQL_DESC_CONCISE_TYPE: /* read-only */ - case SQL_DESC_DATETIME_INTERVAL_CODE: /* read-only */ - case SQL_DESC_DATETIME_INTERVAL_PRECISION: /* read-only */ - case SQL_DESC_DISPLAY_SIZE: /* read-only */ - case SQL_DESC_FIXED_PREC_SCALE: /* read-only */ - case SQL_DESC_LABEL: /* read-only */ - case SQL_DESC_LENGTH: /* read-only */ - case SQL_DESC_LITERAL_PREFIX: /* read-only */ - case SQL_DESC_LITERAL_SUFFIX: /* read-only */ - case SQL_DESC_LOCAL_TYPE_NAME: /* read-only */ - case SQL_DESC_NAME: /* read-only */ - case SQL_DESC_NULLABLE: /* read-only */ - case SQL_DESC_NUM_PREC_RADIX: /* read-only */ - case SQL_DESC_OCTET_LENGTH: /* read-only */ - case SQL_DESC_PRECISION: /* read-only */ - case SQL_DESC_ROWVER: /* read-only */ - case SQL_DESC_SCALE: /* read-only */ - case SQL_DESC_SCHEMA_NAME: /* read-only */ - case SQL_DESC_SEARCHABLE: /* read-only */ - case SQL_DESC_TABLE_NAME: /* read-only */ - case SQL_DESC_TYPE: /* read-only */ - case SQL_DESC_TYPE_NAME: /* read-only */ - case SQL_DESC_UNNAMED: /* read-only */ - case SQL_DESC_UNSIGNED: /* read-only */ - case SQL_DESC_UPDATABLE: /* read-only */ - default: - ret = SQL_ERROR; - DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, - "invalid descriptor identifier"); - } - return ret; -} - -static RETCODE SQL_API IPDSetField(DescriptorClass *desc, SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength) { - UNUSED(BufferLength); - RETCODE ret = SQL_SUCCESS; - IPDFields *ipdopts = &(desc->ipdf); - SQLSMALLINT para_idx; - - switch (FieldIdentifier) { - case SQL_DESC_ARRAY_STATUS_PTR: - ipdopts->param_status_ptr = (SQLUSMALLINT *)Value; - return ret; - case SQL_DESC_ROWS_PROCESSED_PTR: - ipdopts->param_processed_ptr = (SQLULEN *)Value; - return ret; - case SQL_DESC_COUNT: - parameter_ibindings_set(ipdopts, CAST_PTR(SQLSMALLINT, Value), - FALSE); - return ret; - case SQL_DESC_UNNAMED: /* only SQL_UNNAMED is allowed */ - if (SQL_UNNAMED != CAST_PTR(SQLSMALLINT, Value)) { - ret = SQL_ERROR; - DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, - "invalid descriptor identifier"); - return ret; - } - case SQL_DESC_NAME: - case SQL_DESC_TYPE: - case SQL_DESC_DATETIME_INTERVAL_CODE: - case SQL_DESC_CONCISE_TYPE: - parameter_ibindings_set(ipdopts, RecNumber, TRUE); - break; - } - if (RecNumber <= 0 || RecNumber > ipdopts->allocated) { - MYLOG(OPENSEARCH_ALL, "RecN=%d allocated=%d\n", RecNumber, ipdopts->allocated); - DC_set_error(desc, DESC_BAD_PARAMETER_NUMBER_ERROR, - "bad parameter number"); - return SQL_ERROR; - } - para_idx = RecNumber - 1; - switch (FieldIdentifier) { - case SQL_DESC_TYPE: - if (ipdopts->parameters[para_idx].SQLType - != CAST_PTR(SQLSMALLINT, Value)) { - reset_a_iparameter_binding(ipdopts, RecNumber); - ipdopts->parameters[para_idx].SQLType = - CAST_PTR(SQLSMALLINT, Value); - } - break; - case SQL_DESC_DATETIME_INTERVAL_CODE: - switch (ipdopts->parameters[para_idx].SQLType) { - case SQL_DATETIME: - case SQL_TYPE_DATE: - case SQL_TYPE_TIME: - case SQL_TYPE_TIMESTAMP: - switch ((LONG_PTR)Value) { - case SQL_CODE_DATE: - ipdopts->parameters[para_idx].SQLType = - SQL_TYPE_DATE; - break; - case SQL_CODE_TIME: - ipdopts->parameters[para_idx].SQLType = - SQL_TYPE_TIME; - break; - case SQL_CODE_TIMESTAMP: - ipdopts->parameters[para_idx].SQLType = - SQL_TYPE_TIMESTAMP; - break; - } - break; - } - break; - case SQL_DESC_CONCISE_TYPE: - ipdopts->parameters[para_idx].SQLType = - CAST_PTR(SQLSMALLINT, Value); - break; - case SQL_DESC_NAME: - if (Value) - STR_TO_NAME(ipdopts->parameters[para_idx].paramName, Value); - else - NULL_THE_NAME(ipdopts->parameters[para_idx].paramName); - break; - case SQL_DESC_PARAMETER_TYPE: - ipdopts->parameters[para_idx].paramType = - CAST_PTR(SQLSMALLINT, Value); - break; - case SQL_DESC_SCALE: - ipdopts->parameters[para_idx].decimal_digits = - CAST_PTR(SQLSMALLINT, Value); - break; - case SQL_DESC_UNNAMED: /* only SQL_UNNAMED is allowed */ - if (SQL_UNNAMED != CAST_PTR(SQLSMALLINT, Value)) { - ret = SQL_ERROR; - DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, - "invalid descriptor identifier"); - } else - NULL_THE_NAME(ipdopts->parameters[para_idx].paramName); - break; - case SQL_DESC_ALLOC_TYPE: /* read-only */ - case SQL_DESC_CASE_SENSITIVE: /* read-only */ - case SQL_DESC_DATETIME_INTERVAL_PRECISION: - case SQL_DESC_FIXED_PREC_SCALE: /* read-only */ - case SQL_DESC_LENGTH: - case SQL_DESC_LOCAL_TYPE_NAME: /* read-only */ - case SQL_DESC_NULLABLE: /* read-only */ - case SQL_DESC_NUM_PREC_RADIX: - case SQL_DESC_OCTET_LENGTH: - case SQL_DESC_PRECISION: - case SQL_DESC_ROWVER: /* read-only */ - case SQL_DESC_TYPE_NAME: /* read-only */ - case SQL_DESC_UNSIGNED: /* read-only */ - default: - ret = SQL_ERROR; - DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, - "invalid descriptor identifier"); - } - return ret; -} - -static RETCODE SQL_API ARDGetField(DescriptorClass *desc, SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength, - SQLINTEGER *StringLength) { - UNUSED(BufferLength); - RETCODE ret = SQL_SUCCESS; - SQLLEN ival = 0; - SQLINTEGER len, rettype = 0; - PTR ptr = NULL; - const ARDFields *opts = &(desc->ardf); - SQLSMALLINT row_idx; - - len = sizeof(SQLINTEGER); - if (0 == RecNumber) /* bookmark */ - { - BindInfoClass *bookmark = opts->bookmark; - switch (FieldIdentifier) { - case SQL_DESC_DATA_PTR: - rettype = SQL_IS_POINTER; - ptr = bookmark ? bookmark->buffer : NULL; - break; - case SQL_DESC_INDICATOR_PTR: - rettype = SQL_IS_POINTER; - ptr = bookmark ? bookmark->indicator : NULL; - break; - case SQL_DESC_OCTET_LENGTH_PTR: - rettype = SQL_IS_POINTER; - ptr = bookmark ? bookmark->used : NULL; - break; - } - if (ptr) { - *((void **)Value) = ptr; - if (StringLength) - *StringLength = len; - return ret; - } - } - switch (FieldIdentifier) { - case SQL_DESC_ARRAY_SIZE: - case SQL_DESC_ARRAY_STATUS_PTR: - case SQL_DESC_BIND_OFFSET_PTR: - case SQL_DESC_BIND_TYPE: - case SQL_DESC_COUNT: - break; - default: - if (RecNumber <= 0 || RecNumber > opts->allocated) { - DC_set_error(desc, DESC_INVALID_COLUMN_NUMBER_ERROR, - "invalid column number"); - return SQL_ERROR; - } - } - row_idx = RecNumber - 1; - switch (FieldIdentifier) { - case SQL_DESC_ARRAY_SIZE: - ival = opts->size_of_rowset; - break; - case SQL_DESC_ARRAY_STATUS_PTR: - rettype = SQL_IS_POINTER; - ptr = opts->row_operation_ptr; - break; - case SQL_DESC_BIND_OFFSET_PTR: - rettype = SQL_IS_POINTER; - ptr = opts->row_offset_ptr; - break; - case SQL_DESC_BIND_TYPE: - ival = opts->bind_size; - break; - case SQL_DESC_TYPE: - rettype = SQL_IS_SMALLINT; - switch (opts->bindings[row_idx].returntype) { - case SQL_C_TYPE_DATE: - case SQL_C_TYPE_TIME: - case SQL_C_TYPE_TIMESTAMP: - ival = SQL_DATETIME; - break; - default: - ival = opts->bindings[row_idx].returntype; - } - break; - case SQL_DESC_DATETIME_INTERVAL_CODE: - rettype = SQL_IS_SMALLINT; - switch (opts->bindings[row_idx].returntype) { - case SQL_C_TYPE_DATE: - ival = SQL_CODE_DATE; - break; - case SQL_C_TYPE_TIME: - ival = SQL_CODE_TIME; - break; - case SQL_C_TYPE_TIMESTAMP: - ival = SQL_CODE_TIMESTAMP; - break; - default: - ival = 0; - break; - } - break; - case SQL_DESC_CONCISE_TYPE: - rettype = SQL_IS_SMALLINT; - ival = opts->bindings[row_idx].returntype; - break; - case SQL_DESC_DATA_PTR: - rettype = SQL_IS_POINTER; - ptr = opts->bindings[row_idx].buffer; - break; - case SQL_DESC_INDICATOR_PTR: - rettype = SQL_IS_POINTER; - ptr = opts->bindings[row_idx].indicator; - break; - case SQL_DESC_OCTET_LENGTH_PTR: - rettype = SQL_IS_POINTER; - ptr = opts->bindings[row_idx].used; - break; - case SQL_DESC_COUNT: - rettype = SQL_IS_SMALLINT; - ival = opts->allocated; - break; - case SQL_DESC_OCTET_LENGTH: - ival = opts->bindings[row_idx].buflen; - break; - case SQL_DESC_ALLOC_TYPE: /* read-only */ - rettype = SQL_IS_SMALLINT; - if (DC_get_embedded(desc)) - ival = SQL_DESC_ALLOC_AUTO; - else - ival = SQL_DESC_ALLOC_USER; - break; - case SQL_DESC_PRECISION: - rettype = SQL_IS_SMALLINT; - ival = opts->bindings[row_idx].precision; - break; - case SQL_DESC_SCALE: - rettype = SQL_IS_SMALLINT; - ival = opts->bindings[row_idx].scale; - break; - case SQL_DESC_NUM_PREC_RADIX: - ival = 10; - break; - case SQL_DESC_DATETIME_INTERVAL_PRECISION: - case SQL_DESC_LENGTH: - default: - ret = SQL_ERROR; - DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, - "invalid descriptor identifier"); - } - switch (rettype) { - case 0: - case SQL_IS_INTEGER: - len = sizeof(SQLINTEGER); - *((SQLINTEGER *)Value) = (SQLINTEGER)ival; - break; - case SQL_IS_SMALLINT: - len = sizeof(SQLSMALLINT); - *((SQLSMALLINT *)Value) = (SQLSMALLINT)ival; - break; - case SQL_IS_POINTER: - len = sizeof(SQLPOINTER); - *((void **)Value) = ptr; - break; - } - - if (StringLength) - *StringLength = len; - return ret; -} - -static RETCODE SQL_API APDGetField(DescriptorClass *desc, SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength, - SQLINTEGER *StringLength) { - UNUSED(BufferLength); - RETCODE ret = SQL_SUCCESS; - SQLLEN ival = 0; - SQLINTEGER len, rettype = 0; - PTR ptr = NULL; - const APDFields *opts = (const APDFields *)&(desc->apdf); - SQLSMALLINT para_idx; - - len = sizeof(SQLINTEGER); - switch (FieldIdentifier) { - case SQL_DESC_ARRAY_SIZE: - case SQL_DESC_ARRAY_STATUS_PTR: - case SQL_DESC_BIND_OFFSET_PTR: - case SQL_DESC_BIND_TYPE: - case SQL_DESC_COUNT: - break; - default: - if (RecNumber <= 0 || RecNumber > opts->allocated) { - MYLOG(OPENSEARCH_ALL, "RecN=%d allocated=%d\n", RecNumber, - opts->allocated); - DC_set_error(desc, DESC_BAD_PARAMETER_NUMBER_ERROR, - "bad parameter number"); - return SQL_ERROR; - } - } - para_idx = RecNumber - 1; - switch (FieldIdentifier) { - case SQL_DESC_ARRAY_SIZE: - rettype = SQL_IS_LEN; - ival = opts->paramset_size; - break; - case SQL_DESC_ARRAY_STATUS_PTR: - rettype = SQL_IS_POINTER; - ptr = opts->param_operation_ptr; - break; - case SQL_DESC_BIND_OFFSET_PTR: - rettype = SQL_IS_POINTER; - ptr = opts->param_offset_ptr; - break; - case SQL_DESC_BIND_TYPE: - ival = opts->param_bind_type; - break; - - case SQL_DESC_TYPE: - rettype = SQL_IS_SMALLINT; - switch (opts->parameters[para_idx].CType) { - case SQL_C_TYPE_DATE: - case SQL_C_TYPE_TIME: - case SQL_C_TYPE_TIMESTAMP: - ival = SQL_DATETIME; - break; - default: - ival = opts->parameters[para_idx].CType; - } - break; - case SQL_DESC_DATETIME_INTERVAL_CODE: - rettype = SQL_IS_SMALLINT; - switch (opts->parameters[para_idx].CType) { - case SQL_C_TYPE_DATE: - ival = SQL_CODE_DATE; - break; - case SQL_C_TYPE_TIME: - ival = SQL_CODE_TIME; - break; - case SQL_C_TYPE_TIMESTAMP: - ival = SQL_CODE_TIMESTAMP; - break; - default: - ival = 0; - break; - } - break; - case SQL_DESC_CONCISE_TYPE: - rettype = SQL_IS_SMALLINT; - ival = opts->parameters[para_idx].CType; - break; - case SQL_DESC_DATA_PTR: - rettype = SQL_IS_POINTER; - ptr = opts->parameters[para_idx].buffer; - break; - case SQL_DESC_INDICATOR_PTR: - rettype = SQL_IS_POINTER; - ptr = opts->parameters[para_idx].indicator; - break; - case SQL_DESC_OCTET_LENGTH: - ival = opts->parameters[para_idx].buflen; - break; - case SQL_DESC_OCTET_LENGTH_PTR: - rettype = SQL_IS_POINTER; - ptr = opts->parameters[para_idx].used; - break; - case SQL_DESC_COUNT: - rettype = SQL_IS_SMALLINT; - ival = opts->allocated; - break; - case SQL_DESC_ALLOC_TYPE: /* read-only */ - rettype = SQL_IS_SMALLINT; - if (DC_get_embedded(desc)) - ival = SQL_DESC_ALLOC_AUTO; - else - ival = SQL_DESC_ALLOC_USER; - break; - case SQL_DESC_NUM_PREC_RADIX: - ival = 10; - break; - case SQL_DESC_PRECISION: - rettype = SQL_IS_SMALLINT; - ival = opts->parameters[para_idx].precision; - break; - case SQL_DESC_SCALE: - rettype = SQL_IS_SMALLINT; - ival = opts->parameters[para_idx].scale; - break; - case SQL_DESC_DATETIME_INTERVAL_PRECISION: - case SQL_DESC_LENGTH: - default: - ret = SQL_ERROR; - DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, - "invalid descriptor identifer"); - } - switch (rettype) { - case SQL_IS_LEN: - len = sizeof(SQLLEN); - *((SQLLEN *)Value) = ival; - break; - case 0: - case SQL_IS_INTEGER: - len = sizeof(SQLINTEGER); - *((SQLINTEGER *)Value) = (SQLINTEGER)ival; - break; - case SQL_IS_SMALLINT: - len = sizeof(SQLSMALLINT); - *((SQLSMALLINT *)Value) = (SQLSMALLINT)ival; - break; - case SQL_IS_POINTER: - len = sizeof(SQLPOINTER); - *((void **)Value) = ptr; - break; - } - - if (StringLength) - *StringLength = len; - return ret; -} - -static RETCODE SQL_API IRDGetField(DescriptorClass *desc, SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength, - SQLINTEGER *StringLength) { - RETCODE ret = SQL_SUCCESS; - SQLLEN ival = 0; - SQLINTEGER len = 0, rettype = 0; - PTR ptr = NULL; - BOOL bCallColAtt = FALSE; - const IRDFields *opts = &(desc->irdf); - - switch (FieldIdentifier) { - case SQL_DESC_ROWVER: /* read-only */ - // Database is read-only, and does not support transactions - rettype = SQL_IS_SMALLINT; - ival = SQL_FALSE; - break; - case SQL_DESC_ARRAY_STATUS_PTR: - rettype = SQL_IS_POINTER; - ptr = opts->rowStatusArray; - break; - case SQL_DESC_ROWS_PROCESSED_PTR: - rettype = SQL_IS_POINTER; - ptr = opts->rowsFetched; - break; - case SQL_DESC_ALLOC_TYPE: /* read-only */ - rettype = SQL_IS_SMALLINT; - ival = SQL_DESC_ALLOC_AUTO; - break; - case SQL_DESC_AUTO_UNIQUE_VALUE: /* read-only */ - case SQL_DESC_CASE_SENSITIVE: /* read-only */ - case SQL_DESC_DATETIME_INTERVAL_PRECISION: /* read-only */ - case SQL_DESC_NUM_PREC_RADIX: /* read-only */ - rettype = SQL_IS_INTEGER; - bCallColAtt = TRUE; - break; - case SQL_DESC_DISPLAY_SIZE: /* read-only */ - case SQL_DESC_LENGTH: /* read-only */ - case SQL_DESC_OCTET_LENGTH: /* read-only */ - rettype = SQL_IS_LEN; - bCallColAtt = TRUE; - break; - case SQL_DESC_NULLABLE: /* read-only */ - case SQL_DESC_FIXED_PREC_SCALE: /* read-only */ - case SQL_DESC_DATETIME_INTERVAL_CODE: /* read-only */ - case SQL_DESC_CONCISE_TYPE: /* read-only */ - case SQL_DESC_COUNT: /* read-only */ - case SQL_DESC_PRECISION: /* read-only */ - case SQL_DESC_SCALE: /* read-only */ - case SQL_DESC_SEARCHABLE: /* read-only */ - case SQL_DESC_TYPE: /* read-only */ - case SQL_DESC_UNNAMED: /* read-only */ - case SQL_DESC_UNSIGNED: /* read-only */ - case SQL_DESC_UPDATABLE: /* read-only */ - rettype = SQL_IS_SMALLINT; - bCallColAtt = TRUE; - break; - case SQL_DESC_BASE_COLUMN_NAME: /* read-only */ - case SQL_DESC_BASE_TABLE_NAME: /* read-only */ - case SQL_DESC_CATALOG_NAME: /* read-only */ - case SQL_DESC_LABEL: /* read-only */ - case SQL_DESC_LITERAL_PREFIX: /* read-only */ - case SQL_DESC_LITERAL_SUFFIX: /* read-only */ - case SQL_DESC_LOCAL_TYPE_NAME: /* read-only */ - case SQL_DESC_NAME: /* read-only */ - case SQL_DESC_SCHEMA_NAME: /* read-only */ - case SQL_DESC_TABLE_NAME: /* read-only */ - case SQL_DESC_TYPE_NAME: /* read-only */ - rettype = SQL_NTS; - bCallColAtt = TRUE; - break; - default: - ret = SQL_ERROR; - DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, - "invalid descriptor identifier"); - } - if (bCallColAtt) { - SQLSMALLINT pcbL; - StatementClass *stmt; - - stmt = opts->stmt; - ret = OPENSEARCHAPI_ColAttributes(stmt, RecNumber, FieldIdentifier, Value, - (SQLSMALLINT)BufferLength, &pcbL, &ival); - len = pcbL; - } - switch (rettype) { - case 0: - case SQL_IS_INTEGER: - len = sizeof(SQLINTEGER); - *((SQLINTEGER *)Value) = (SQLINTEGER)ival; - break; - case SQL_IS_UINTEGER: - len = sizeof(SQLUINTEGER); - *((SQLUINTEGER *)Value) = (SQLUINTEGER)ival; - break; - case SQL_IS_SMALLINT: - len = sizeof(SQLSMALLINT); - *((SQLSMALLINT *)Value) = (SQLSMALLINT)ival; - break; - case SQL_IS_POINTER: - len = sizeof(SQLPOINTER); - *((void **)Value) = ptr; - break; - case SQL_NTS: - break; - } - - if (StringLength) - *StringLength = len; - return ret; -} - -static RETCODE SQL_API IPDGetField(DescriptorClass *desc, SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength, - SQLINTEGER *StringLength) { - UNUSED(BufferLength); - RETCODE ret = SQL_SUCCESS; - SQLINTEGER ival = 0, len = 0, rettype = 0; - PTR ptr = NULL; - const IPDFields *ipdopts = (const IPDFields *)&(desc->ipdf); - SQLSMALLINT para_idx; - - switch (FieldIdentifier) { - case SQL_DESC_ARRAY_STATUS_PTR: - case SQL_DESC_ROWS_PROCESSED_PTR: - case SQL_DESC_COUNT: - break; - default: - if (RecNumber <= 0 || RecNumber > ipdopts->allocated) { - MYLOG(OPENSEARCH_ALL, "RecN=%d allocated=%d\n", RecNumber, - ipdopts->allocated); - DC_set_error(desc, DESC_BAD_PARAMETER_NUMBER_ERROR, - "bad parameter number"); - return SQL_ERROR; - } - } - para_idx = RecNumber - 1; - switch (FieldIdentifier) { - case SQL_DESC_ARRAY_STATUS_PTR: - rettype = SQL_IS_POINTER; - ptr = ipdopts->param_status_ptr; - break; - case SQL_DESC_ROWS_PROCESSED_PTR: - rettype = SQL_IS_POINTER; - ptr = ipdopts->param_processed_ptr; - break; - case SQL_DESC_UNNAMED: - rettype = SQL_IS_SMALLINT; - ival = NAME_IS_NULL(ipdopts->parameters[para_idx].paramName) - ? SQL_UNNAMED - : SQL_NAMED; - break; - case SQL_DESC_TYPE: - rettype = SQL_IS_SMALLINT; - switch (ipdopts->parameters[para_idx].SQLType) { - case SQL_TYPE_DATE: - case SQL_TYPE_TIME: - case SQL_TYPE_TIMESTAMP: - ival = SQL_DATETIME; - break; - default: - ival = ipdopts->parameters[para_idx].SQLType; - } - break; - case SQL_DESC_DATETIME_INTERVAL_CODE: - rettype = SQL_IS_SMALLINT; - switch (ipdopts->parameters[para_idx].SQLType) { - case SQL_TYPE_DATE: - ival = SQL_CODE_DATE; - break; - case SQL_TYPE_TIME: - ival = SQL_CODE_TIME; - break; - case SQL_TYPE_TIMESTAMP: - ival = SQL_CODE_TIMESTAMP; - break; - default: - ival = 0; - } - break; - case SQL_DESC_CONCISE_TYPE: - rettype = SQL_IS_SMALLINT; - ival = ipdopts->parameters[para_idx].SQLType; - break; - case SQL_DESC_COUNT: - rettype = SQL_IS_SMALLINT; - ival = ipdopts->allocated; - break; - case SQL_DESC_PARAMETER_TYPE: - rettype = SQL_IS_SMALLINT; - ival = ipdopts->parameters[para_idx].paramType; - break; - case SQL_DESC_PRECISION: - rettype = SQL_IS_SMALLINT; - switch (ipdopts->parameters[para_idx].SQLType) { - case SQL_TYPE_DATE: - case SQL_TYPE_TIME: - case SQL_TYPE_TIMESTAMP: - case SQL_DATETIME: - ival = ipdopts->parameters[para_idx].decimal_digits; - break; - } - break; - case SQL_DESC_SCALE: - rettype = SQL_IS_SMALLINT; - switch (ipdopts->parameters[para_idx].SQLType) { - case SQL_NUMERIC: - ival = ipdopts->parameters[para_idx].decimal_digits; - break; - } - break; - case SQL_DESC_ALLOC_TYPE: /* read-only */ - rettype = SQL_IS_SMALLINT; - ival = SQL_DESC_ALLOC_AUTO; - break; - case SQL_DESC_CASE_SENSITIVE: /* read-only */ - case SQL_DESC_DATETIME_INTERVAL_PRECISION: - case SQL_DESC_FIXED_PREC_SCALE: /* read-only */ - case SQL_DESC_LENGTH: - case SQL_DESC_LOCAL_TYPE_NAME: /* read-only */ - case SQL_DESC_NAME: - case SQL_DESC_NULLABLE: /* read-only */ - case SQL_DESC_NUM_PREC_RADIX: - case SQL_DESC_OCTET_LENGTH: - case SQL_DESC_ROWVER: /* read-only */ - case SQL_DESC_TYPE_NAME: /* read-only */ - case SQL_DESC_UNSIGNED: /* read-only */ - default: - ret = SQL_ERROR; - DC_set_error(desc, DESC_INVALID_DESCRIPTOR_IDENTIFIER, - "invalid descriptor identifier"); - } - switch (rettype) { - case 0: - case SQL_IS_INTEGER: - len = sizeof(SQLINTEGER); - *((SQLINTEGER *)Value) = ival; - break; - case SQL_IS_SMALLINT: - len = sizeof(SQLSMALLINT); - *((SQLSMALLINT *)Value) = (SQLSMALLINT)ival; - break; - case SQL_IS_POINTER: - len = sizeof(SQLPOINTER); - *((void **)Value) = ptr; - break; - } - - if (StringLength) - *StringLength = len; - return ret; -} - -/* SQLGetStmtOption -> SQLGetStmtAttr */ -RETCODE SQL_API OPENSEARCHAPI_GetStmtAttr(HSTMT StatementHandle, SQLINTEGER Attribute, - PTR Value, SQLINTEGER BufferLength, - SQLINTEGER *StringLength) { - CSTR func = "OPENSEARCHAPI_GetStmtAttr"; - StatementClass *stmt = (StatementClass *)StatementHandle; - RETCODE ret = SQL_SUCCESS; - SQLINTEGER len = 0; - - MYLOG(OPENSEARCH_TRACE, "entering Handle=%p " FORMAT_INTEGER "\n", StatementHandle, - Attribute); - switch (Attribute) { - case SQL_ATTR_FETCH_BOOKMARK_PTR: /* 16 */ - *((void **)Value) = stmt->options.bookmark_ptr; - len = sizeof(SQLPOINTER); - break; - case SQL_ATTR_PARAM_BIND_OFFSET_PTR: /* 17 */ - *((SQLULEN **)Value) = SC_get_APDF(stmt)->param_offset_ptr; - len = sizeof(SQLPOINTER); - break; - case SQL_ATTR_PARAM_BIND_TYPE: /* 18 */ - *((SQLUINTEGER *)Value) = SC_get_APDF(stmt)->param_bind_type; - len = sizeof(SQLUINTEGER); - break; - case SQL_ATTR_PARAM_OPERATION_PTR: /* 19 */ - *((SQLUSMALLINT **)Value) = SC_get_APDF(stmt)->param_operation_ptr; - len = sizeof(SQLPOINTER); - break; - case SQL_ATTR_PARAM_STATUS_PTR: /* 20 */ - *((SQLUSMALLINT **)Value) = SC_get_IPDF(stmt)->param_status_ptr; - len = sizeof(SQLPOINTER); - break; - case SQL_ATTR_PARAMS_PROCESSED_PTR: /* 21 */ - *((SQLULEN **)Value) = SC_get_IPDF(stmt)->param_processed_ptr; - len = sizeof(SQLPOINTER); - break; - case SQL_ATTR_PARAMSET_SIZE: /* 22 */ - *((SQLULEN *)Value) = SC_get_APDF(stmt)->paramset_size; - len = sizeof(SQLUINTEGER); - break; - case SQL_ATTR_ROW_BIND_OFFSET_PTR: /* 23 */ - *((SQLULEN **)Value) = SC_get_ARDF(stmt)->row_offset_ptr; - len = 4; - break; - case SQL_ATTR_ROW_OPERATION_PTR: /* 24 */ - *((SQLUSMALLINT **)Value) = SC_get_ARDF(stmt)->row_operation_ptr; - len = 4; - break; - case SQL_ATTR_ROW_STATUS_PTR: /* 25 */ - *((SQLUSMALLINT **)Value) = SC_get_IRDF(stmt)->rowStatusArray; - len = 4; - break; - case SQL_ATTR_ROWS_FETCHED_PTR: /* 26 */ - *((SQLULEN **)Value) = SC_get_IRDF(stmt)->rowsFetched; - len = 4; - break; - case SQL_ATTR_ROW_ARRAY_SIZE: /* 27 */ - *((SQLULEN *)Value) = SC_get_ARDF(stmt)->size_of_rowset; - len = 4; - break; - case SQL_ATTR_APP_ROW_DESC: /* 10010 */ - case SQL_ATTR_APP_PARAM_DESC: /* 10011 */ - case SQL_ATTR_IMP_ROW_DESC: /* 10012 */ - case SQL_ATTR_IMP_PARAM_DESC: /* 10013 */ - len = 4; - *((HSTMT *)Value) = - descHandleFromStatementHandle(StatementHandle, Attribute); - break; - - case SQL_ATTR_CURSOR_SCROLLABLE: /* -1 */ - len = 4; - if (SQL_CURSOR_FORWARD_ONLY == stmt->options.cursor_type) - *((SQLUINTEGER *)Value) = SQL_NONSCROLLABLE; - else - *((SQLUINTEGER *)Value) = SQL_SCROLLABLE; - break; - case SQL_ATTR_CURSOR_SENSITIVITY: /* -2 */ - len = 4; - if (SQL_CONCUR_READ_ONLY == stmt->options.scroll_concurrency) - *((SQLUINTEGER *)Value) = SQL_INSENSITIVE; - else - *((SQLUINTEGER *)Value) = SQL_UNSPECIFIED; - break; - case SQL_ATTR_METADATA_ID: /* 10014 */ - *((SQLUINTEGER *)Value) = stmt->options.metadata_id; - break; - case SQL_ATTR_ENABLE_AUTO_IPD: /* 15 */ - *((SQLUINTEGER *)Value) = SQL_FALSE; - break; - case SQL_ATTR_AUTO_IPD: /* 10001 */ - /* case SQL_ATTR_ROW_BIND_TYPE: ** == SQL_BIND_TYPE(ODBC2.0) */ - SC_set_error(stmt, DESC_INVALID_OPTION_IDENTIFIER, - "Unsupported statement option (Get)", func); - return SQL_ERROR; - default: - ret = OPENSEARCHAPI_GetStmtOption(StatementHandle, (SQLSMALLINT)Attribute, - Value, &len, BufferLength); - } - if (ret == SQL_SUCCESS && StringLength) - *StringLength = len; - return ret; -} - -/* SQLSetConnectOption -> SQLSetConnectAttr */ -RETCODE SQL_API OPENSEARCHAPI_SetConnectAttr(HDBC ConnectionHandle, - SQLINTEGER Attribute, PTR Value, - SQLINTEGER StringLength) { - UNUSED(StringLength); - CSTR func = "OPENSEARCHAPI_SetConnectAttr"; - ConnectionClass *conn = (ConnectionClass *)ConnectionHandle; - RETCODE ret = SQL_SUCCESS; - BOOL unsupported = FALSE; - int newValue; - - MYLOG(OPENSEARCH_TRACE, "entering for %p: " FORMAT_INTEGER " %p\n", - ConnectionHandle, Attribute, Value); - switch (Attribute) { - case SQL_ATTR_METADATA_ID: - conn->stmtOptions.metadata_id = CAST_UPTR(SQLUINTEGER, Value); - break; - case SQL_ATTR_ANSI_APP: - if (SQL_AA_FALSE != CAST_PTR(SQLINTEGER, Value)) { - MYLOG(OPENSEARCH_DEBUG, "the application is ansi\n"); - if (CC_is_in_unicode_driver(conn)) /* the driver is unicode */ - CC_set_in_ansi_app(conn); /* but the app is ansi */ - } else { - MYLOG(OPENSEARCH_DEBUG, "the application is unicode\n"); - } - /*return SQL_ERROR;*/ - return SQL_SUCCESS; - case SQL_ATTR_ENLIST_IN_DTC: - unsupported = TRUE; - break; - case SQL_ATTR_AUTO_IPD: - if (SQL_FALSE != Value) - unsupported = TRUE; - break; - case SQL_ATTR_ASYNC_ENABLE: - case SQL_ATTR_CONNECTION_DEAD: - case SQL_ATTR_CONNECTION_TIMEOUT: - unsupported = TRUE; - break; - case SQL_ATTR_ESOPT_DEBUG: - newValue = CAST_UPTR(SQLCHAR, Value); - if (newValue > 0) { - logs_on_off(-1, conn->connInfo.drivers.loglevel, 0); - conn->connInfo.drivers.loglevel = (char)newValue; - logs_on_off(1, conn->connInfo.drivers.loglevel, 0); - MYLOG(OPENSEARCH_DEBUG, "debug => %d\n", - conn->connInfo.drivers.loglevel); - } else if (newValue == 0 && conn->connInfo.drivers.loglevel > 0) { - MYLOG(OPENSEARCH_DEBUG, "debug => %d\n", newValue); - logs_on_off(-1, conn->connInfo.drivers.loglevel, 0); - conn->connInfo.drivers.loglevel = (char)newValue; - logs_on_off(1, 0, 0); - } - break; - case SQL_ATTR_ESOPT_COMMLOG: - newValue = CAST_UPTR(SQLCHAR, Value); - if (newValue > 0) { - logs_on_off(-1, 0, conn->connInfo.drivers.loglevel); - conn->connInfo.drivers.loglevel = (char)newValue; - logs_on_off(1, 0, conn->connInfo.drivers.loglevel); - MYLOG(OPENSEARCH_DEBUG, "commlog => %d\n", - conn->connInfo.drivers.loglevel); - } else if (newValue == 0 && conn->connInfo.drivers.loglevel > 0) { - MYLOG(OPENSEARCH_DEBUG, "commlog => %d\n", newValue); - logs_on_off(-1, 0, conn->connInfo.drivers.loglevel); - conn->connInfo.drivers.loglevel = (char)newValue; - logs_on_off(1, 0, 0); - } - break; - default: - if (Attribute < 65536) - ret = OPENSEARCHAPI_SetConnectOption( - ConnectionHandle, (SQLUSMALLINT)Attribute, (SQLLEN)Value); - else - unsupported = TRUE; - } - if (unsupported) { - char msg[64]; - SPRINTF_FIXED( - msg, "Couldn't set unsupported connect attribute " FORMAT_INTEGER, - Attribute); - CC_set_error(conn, CONN_OPTION_NOT_FOR_THE_DRIVER, msg, func); - return SQL_ERROR; - } - return ret; -} - -/* new function */ -RETCODE SQL_API OPENSEARCHAPI_GetDescField(SQLHDESC DescriptorHandle, - SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength, - SQLINTEGER *StringLength) { - CSTR func = "OPENSEARCHAPI_GetDescField"; - RETCODE ret = SQL_SUCCESS; - DescriptorClass *desc = (DescriptorClass *)DescriptorHandle; - - MYLOG(OPENSEARCH_TRACE, - "entering h=%p rec=" FORMAT_SMALLI " field=" FORMAT_SMALLI - " blen=" FORMAT_INTEGER "\n", - DescriptorHandle, RecNumber, FieldIdentifier, BufferLength); - switch (DC_get_desc_type(desc)) { - case SQL_ATTR_APP_ROW_DESC: - ret = ARDGetField(desc, RecNumber, FieldIdentifier, Value, - BufferLength, StringLength); - break; - case SQL_ATTR_APP_PARAM_DESC: - ret = APDGetField(desc, RecNumber, FieldIdentifier, Value, - BufferLength, StringLength); - break; - case SQL_ATTR_IMP_ROW_DESC: - ret = IRDGetField(desc, RecNumber, FieldIdentifier, Value, - BufferLength, StringLength); - break; - case SQL_ATTR_IMP_PARAM_DESC: - ret = IPDGetField(desc, RecNumber, FieldIdentifier, Value, - BufferLength, StringLength); - break; - default: - ret = SQL_ERROR; - DC_set_error(desc, DESC_INTERNAL_ERROR, "Error not implemented"); - } - if (ret == SQL_ERROR) { - if (!DC_get_errormsg(desc)) { - switch (DC_get_errornumber(desc)) { - case DESC_INVALID_DESCRIPTOR_IDENTIFIER: - DC_set_errormsg( - desc, - "can't SQLGetDescField for this descriptor identifier"); - break; - case DESC_INVALID_COLUMN_NUMBER_ERROR: - DC_set_errormsg( - desc, "can't SQLGetDescField for this column number"); - break; - case DESC_BAD_PARAMETER_NUMBER_ERROR: - DC_set_errormsg( - desc, - "can't SQLGetDescField for this parameter number"); - break; - } - } - DC_log_error(func, "", desc); - } - return ret; -} - -/* new function */ -RETCODE SQL_API OPENSEARCHAPI_SetDescField(SQLHDESC DescriptorHandle, - SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength) { - CSTR func = "OPENSEARCHAPI_SetDescField"; - RETCODE ret = SQL_SUCCESS; - DescriptorClass *desc = (DescriptorClass *)DescriptorHandle; - - MYLOG(OPENSEARCH_TRACE, - "entering h=%p(%d) rec=" FORMAT_SMALLI " field=" FORMAT_SMALLI - " val=%p," FORMAT_INTEGER "\n", - DescriptorHandle, DC_get_desc_type(desc), RecNumber, FieldIdentifier, - Value, BufferLength); - switch (DC_get_desc_type(desc)) { - case SQL_ATTR_APP_ROW_DESC: - ret = ARDSetField(desc, RecNumber, FieldIdentifier, Value, - BufferLength); - break; - case SQL_ATTR_APP_PARAM_DESC: - ret = APDSetField(desc, RecNumber, FieldIdentifier, Value, - BufferLength); - break; - case SQL_ATTR_IMP_ROW_DESC: - ret = IRDSetField(desc, RecNumber, FieldIdentifier, Value, - BufferLength); - break; - case SQL_ATTR_IMP_PARAM_DESC: - ret = IPDSetField(desc, RecNumber, FieldIdentifier, Value, - BufferLength); - break; - default: - ret = SQL_ERROR; - DC_set_error(desc, DESC_INTERNAL_ERROR, "Error not implemented"); - } - if (ret == SQL_ERROR) { - if (!DC_get_errormsg(desc)) { - switch (DC_get_errornumber(desc)) { - case DESC_INVALID_DESCRIPTOR_IDENTIFIER: - DC_set_errormsg( - desc, - "can't SQLSetDescField for this descriptor identifier"); - break; - case DESC_INVALID_COLUMN_NUMBER_ERROR: - DC_set_errormsg( - desc, "can't SQLSetDescField for this column number"); - break; - case DESC_BAD_PARAMETER_NUMBER_ERROR: - DC_set_errormsg( - desc, - "can't SQLSetDescField for this parameter number"); - break; - break; - } - } - DC_log_error(func, "", desc); - } - return ret; -} - -/* SQLSet(Param/Scroll/Stmt)Option -> SQLSetStmtAttr */ -RETCODE SQL_API OPENSEARCHAPI_SetStmtAttr(HSTMT StatementHandle, SQLINTEGER Attribute, - PTR Value, SQLINTEGER StringLength) { - UNUSED(StringLength); - RETCODE ret = SQL_SUCCESS; - CSTR func = "OPENSEARCHAPI_SetStmtAttr"; - StatementClass *stmt = (StatementClass *)StatementHandle; - - MYLOG(OPENSEARCH_TRACE, - "entering Handle=%p " FORMAT_INTEGER "," FORMAT_ULEN "(%p)\n", - StatementHandle, Attribute, (SQLULEN)Value, Value); - switch (Attribute) { - case SQL_ATTR_ENABLE_AUTO_IPD: /* 15 */ - if (SQL_FALSE == Value) - break; - case SQL_ATTR_CURSOR_SCROLLABLE: /* -1 */ - case SQL_ATTR_CURSOR_SENSITIVITY: /* -2 */ - case SQL_ATTR_AUTO_IPD: /* 10001 */ - SC_set_error(stmt, DESC_OPTION_NOT_FOR_THE_DRIVER, - "Unsupported statement option (Set)", func); - return SQL_ERROR; - /* case SQL_ATTR_ROW_BIND_TYPE: ** == SQL_BIND_TYPE(ODBC2.0) */ - case SQL_ATTR_IMP_ROW_DESC: /* 10012 (read-only) */ - case SQL_ATTR_IMP_PARAM_DESC: /* 10013 (read-only) */ - - /* - * case SQL_ATTR_PREDICATE_PTR: case - * SQL_ATTR_PREDICATE_OCTET_LENGTH_PTR: - */ - SC_set_error(stmt, DESC_INVALID_OPTION_IDENTIFIER, - "Unsupported statement option (Set)", func); - return SQL_ERROR; - - case SQL_ATTR_METADATA_ID: /* 10014 */ - stmt->options.metadata_id = CAST_UPTR(SQLUINTEGER, Value); - break; - case SQL_ATTR_APP_ROW_DESC: /* 10010 */ - if (SQL_NULL_HDESC == Value) { - stmt->ard = &(stmt->ardi); - } else { - stmt->ard = (DescriptorClass *)Value; - MYLOG(OPENSEARCH_ALL, "set ard=%p\n", stmt->ard); - } - break; - case SQL_ATTR_APP_PARAM_DESC: /* 10011 */ - if (SQL_NULL_HDESC == Value) { - stmt->apd = &(stmt->apdi); - } else { - stmt->apd = (DescriptorClass *)Value; - } - break; - case SQL_ATTR_FETCH_BOOKMARK_PTR: /* 16 */ - stmt->options.bookmark_ptr = Value; - break; - case SQL_ATTR_PARAM_BIND_OFFSET_PTR: /* 17 */ - SC_get_APDF(stmt)->param_offset_ptr = (SQLULEN *)Value; - break; - case SQL_ATTR_PARAM_BIND_TYPE: /* 18 */ - SC_get_APDF(stmt)->param_bind_type = CAST_UPTR(SQLUINTEGER, Value); - break; - case SQL_ATTR_PARAM_OPERATION_PTR: /* 19 */ - SC_get_APDF(stmt)->param_operation_ptr = Value; - break; - case SQL_ATTR_PARAM_STATUS_PTR: /* 20 */ - SC_get_IPDF(stmt)->param_status_ptr = (SQLUSMALLINT *)Value; - break; - case SQL_ATTR_PARAMS_PROCESSED_PTR: /* 21 */ - SC_get_IPDF(stmt)->param_processed_ptr = (SQLULEN *)Value; - break; - case SQL_ATTR_PARAMSET_SIZE: /* 22 */ - SC_get_APDF(stmt)->paramset_size = CAST_UPTR(SQLULEN, Value); - break; - case SQL_ATTR_ROW_BIND_OFFSET_PTR: /* 23 */ - SC_get_ARDF(stmt)->row_offset_ptr = (SQLULEN *)Value; - break; - case SQL_ATTR_ROW_OPERATION_PTR: /* 24 */ - SC_get_ARDF(stmt)->row_operation_ptr = Value; - break; - case SQL_ATTR_ROW_STATUS_PTR: /* 25 */ - SC_get_IRDF(stmt)->rowStatusArray = (SQLUSMALLINT *)Value; - break; - case SQL_ATTR_ROWS_FETCHED_PTR: /* 26 */ - SC_get_IRDF(stmt)->rowsFetched = (SQLULEN *)Value; - break; - case SQL_ATTR_ROW_ARRAY_SIZE: /* 27 */ - SC_get_ARDF(stmt)->size_of_rowset = CAST_UPTR(SQLULEN, Value); - break; - default: - return OPENSEARCHAPI_SetStmtOption(StatementHandle, (SQLUSMALLINT)Attribute, - (SQLULEN)Value); - } - return ret; -} diff --git a/sql-odbc/src/sqlodbc/opensearch_apifunc.h b/sql-odbc/src/sqlodbc/opensearch_apifunc.h deleted file mode 100644 index 1901fe8cd0..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_apifunc.h +++ /dev/null @@ -1,228 +0,0 @@ -#ifndef _OPENSEARCH_API_FUNC_H__ -#define _OPENSEARCH_API_FUNC_H__ - -#include -#include - -#include "opensearch_odbc.h" - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/* Internal flags for catalog functions */ -#define PODBC_NOT_SEARCH_PATTERN 1L -#define PODBC_SEARCH_PUBLIC_SCHEMA (1L << 1) -#define PODBC_SEARCH_BY_IDS (1L << 2) -#define PODBC_SHOW_OID_COLUMN (1L << 3) -#define PODBC_ROW_VERSIONING (1L << 4) -/* Internal flags for OPENSEARCHAPI_AllocStmt functions */ -#define PODBC_EXTERNAL_STATEMENT 1L /* visible to the driver manager */ -#define PODBC_INHERIT_CONNECT_OPTIONS (1L << 1) -/* Internal flags for OPENSEARCHAPI_Exec... functions */ -/* Flags for the error handling */ -#define PODBC_ALLOW_PARTIAL_EXTRACT 1L -/* #define PODBC_ERROR_CLEAR (1L << 1) no longer used */ - -RETCODE SQL_API OPENSEARCHAPI_AllocConnect(HENV EnvironmentHandle, - HDBC *ConnectionHandle); -RETCODE SQL_API OPENSEARCHAPI_AllocEnv(HENV *EnvironmentHandle); -RETCODE SQL_API OPENSEARCHAPI_AllocStmt(HDBC ConnectionHandle, HSTMT *StatementHandle, - UDWORD flag); -RETCODE SQL_API OPENSEARCHAPI_BindCol(HSTMT StatementHandle, SQLUSMALLINT ColumnNumber, - SQLSMALLINT TargetType, PTR TargetValue, - SQLLEN BufferLength, SQLLEN *StrLen_or_Ind); -RETCODE SQL_API OPENSEARCHAPI_Connect(HDBC ConnectionHandle, const SQLCHAR *ServerName, - SQLSMALLINT NameLength1, const SQLCHAR *UserName, - SQLSMALLINT NameLength2, - const SQLCHAR *Authentication, - SQLSMALLINT NameLength3); -RETCODE SQL_API OPENSEARCHAPI_BrowseConnect(HDBC hdbc, const SQLCHAR *szConnStrIn, - SQLSMALLINT cbConnStrIn, - SQLCHAR *szConnStrOut, - SQLSMALLINT cbConnStrOutMax, - SQLSMALLINT *pcbConnStrOut); -RETCODE SQL_API OPENSEARCHAPI_DescribeCol( - HSTMT StatementHandle, SQLUSMALLINT ColumnNumber, SQLCHAR *ColumnName, - SQLSMALLINT BufferLength, SQLSMALLINT *NameLength, SQLSMALLINT *DataType, - SQLULEN *ColumnSize, SQLSMALLINT *DecimalDigits, SQLSMALLINT *Nullable); -RETCODE SQL_API OPENSEARCHAPI_Disconnect(HDBC ConnectionHandle); -/* Helper functions for Error handling */ -RETCODE SQL_API OPENSEARCHAPI_EnvError(HENV EnvironmentHandle, SQLSMALLINT RecNumber, - SQLCHAR *Sqlstate, SQLINTEGER *NativeError, - SQLCHAR *MessageText, SQLSMALLINT BufferLength, - SQLSMALLINT *TextLength, UWORD flag); -RETCODE SQL_API OPENSEARCHAPI_ConnectError(HDBC ConnectionHandle, SQLSMALLINT RecNumber, - SQLCHAR *Sqlstate, SQLINTEGER *NativeError, - SQLCHAR *MessageText, - SQLSMALLINT BufferLength, - SQLSMALLINT *TextLength, UWORD flag); -RETCODE SQL_API OPENSEARCHAPI_StmtError(HSTMT StatementHandle, SQLSMALLINT RecNumber, - SQLCHAR *Sqlstate, SQLINTEGER *NativeError, - SQLCHAR *MessageText, SQLSMALLINT BufferLength, - SQLSMALLINT *TextLength, UWORD flag); -RETCODE SQL_API OPENSEARCHAPI_ExecDirect(HSTMT StatementHandle, - const SQLCHAR *StatementText, - SQLINTEGER TextLength, BOOL commit); -RETCODE SQL_API OPENSEARCHAPI_Execute(HSTMT StatementHandle); -RETCODE SQL_API OPENSEARCHAPI_Fetch(HSTMT StatementHandle); -RETCODE SQL_API OPENSEARCHAPI_FreeConnect(HDBC ConnectionHandle); -RETCODE SQL_API OPENSEARCHAPI_FreeEnv(HENV EnvironmentHandle); -RETCODE SQL_API OPENSEARCHAPI_FreeStmt(HSTMT StatementHandle, SQLUSMALLINT Option); -RETCODE SQL_API OPENSEARCHAPI_GetConnectOption(HDBC ConnectionHandle, - SQLUSMALLINT Option, PTR Value, - SQLINTEGER *StringLength, - SQLINTEGER BufferLength); -RETCODE SQL_API OPENSEARCHAPI_GetCursorName(HSTMT StatementHandle, SQLCHAR *CursorName, - SQLSMALLINT BufferLength, - SQLSMALLINT *NameLength); -RETCODE SQL_API OPENSEARCHAPI_GetData(HSTMT StatementHandle, SQLUSMALLINT ColumnNumber, - SQLSMALLINT TargetType, PTR TargetValue, - SQLLEN BufferLength, SQLLEN *StrLen_or_Ind); -RETCODE SQL_API OPENSEARCHAPI_GetFunctions(HDBC ConnectionHandle, - SQLUSMALLINT FunctionId, - SQLUSMALLINT *Supported); -RETCODE SQL_API OPENSEARCHAPI_GetFunctions30(HDBC ConnectionHandle, - SQLUSMALLINT FunctionId, - SQLUSMALLINT *Supported); -RETCODE SQL_API OPENSEARCHAPI_GetInfo(HDBC ConnectionHandle, SQLUSMALLINT InfoType, - PTR InfoValue, SQLSMALLINT BufferLength, - SQLSMALLINT *StringLength); -RETCODE SQL_API OPENSEARCHAPI_GetStmtOption(HSTMT StatementHandle, SQLUSMALLINT Option, - PTR Value, SQLINTEGER *StringLength, - SQLINTEGER BufferLength); -RETCODE SQL_API OPENSEARCHAPI_NumResultCols(HSTMT StatementHandle, - SQLSMALLINT *ColumnCount); -RETCODE SQL_API OPENSEARCHAPI_RowCount(HSTMT StatementHandle, SQLLEN *RowCount); -RETCODE SQL_API OPENSEARCHAPI_SetConnectOption(HDBC ConnectionHandle, - SQLUSMALLINT Option, SQLULEN Value); -RETCODE SQL_API OPENSEARCHAPI_SetCursorName(HSTMT StatementHandle, - const SQLCHAR *CursorName, - SQLSMALLINT NameLength); -RETCODE SQL_API OPENSEARCHAPI_SetStmtOption(HSTMT StatementHandle, SQLUSMALLINT Option, - SQLULEN Value); -RETCODE SQL_API -OPENSEARCHAPI_SpecialColumns(HSTMT StatementHandle, SQLUSMALLINT IdentifierType, - const SQLCHAR *CatalogName, SQLSMALLINT NameLength1, - const SQLCHAR *SchemaName, SQLSMALLINT NameLength2, - const SQLCHAR *TableName, SQLSMALLINT NameLength3, - SQLUSMALLINT Scope, SQLUSMALLINT Nullable); -RETCODE SQL_API OPENSEARCHAPI_Statistics( - HSTMT StatementHandle, const SQLCHAR *CatalogName, SQLSMALLINT NameLength1, - const SQLCHAR *SchemaName, SQLSMALLINT NameLength2, - const SQLCHAR *TableName, SQLSMALLINT NameLength3, SQLUSMALLINT Unique, - SQLUSMALLINT Reserved); -RETCODE SQL_API OPENSEARCHAPI_ColAttributes(HSTMT hstmt, SQLUSMALLINT icol, - SQLUSMALLINT fDescType, PTR rgbDesc, - SQLSMALLINT cbDescMax, SQLSMALLINT *pcbDesc, - SQLLEN *pfDesc); -RETCODE SQL_API OPENSEARCHAPI_Prepare(HSTMT hstmt, const SQLCHAR *szSqlStr, - SQLINTEGER cbSqlStr); -RETCODE SQL_API OPENSEARCHAPI_ColumnPrivileges( - HSTMT hstmt, const SQLCHAR *szCatalogName, SQLSMALLINT cbCatalogName, - const SQLCHAR *szSchemaName, SQLSMALLINT cbSchemaName, - const SQLCHAR *szTableName, SQLSMALLINT cbTableName, - const SQLCHAR *szColumnName, SQLSMALLINT cbColumnName, UWORD flag); -RETCODE SQL_API OPENSEARCHAPI_ExtendedFetch(HSTMT hstmt, SQLUSMALLINT fFetchType, - SQLLEN irow, SQLULEN *pcrow, - SQLUSMALLINT *rgfRowStatus, - SQLLEN FetchOffset, SQLLEN rowsetSize); -RETCODE SQL_API OPENSEARCHAPI_ForeignKeys( - HSTMT hstmt, const SQLCHAR *szPkCatalogName, SQLSMALLINT cbPkCatalogName, - const SQLCHAR *szPkSchemaName, SQLSMALLINT cbPkSchemaName, - const SQLCHAR *szPkTableName, SQLSMALLINT cbPkTableName, - const SQLCHAR *szFkCatalogName, SQLSMALLINT cbFkCatalogName, - const SQLCHAR *szFkSchemaName, SQLSMALLINT cbFkSchemaName, - const SQLCHAR *szFkTableName, SQLSMALLINT cbFkTableName); -RETCODE SQL_API OPENSEARCHAPI_MoreResults(HSTMT hstmt); -RETCODE SQL_API OPENSEARCHAPI_NativeSql(HDBC hdbc, const SQLCHAR *szSqlStrIn, - SQLINTEGER cbSqlStrIn, SQLCHAR *szSqlStr, - SQLINTEGER cbSqlStrMax, SQLINTEGER *pcbSqlStr); -RETCODE SQL_API OPENSEARCHAPI_NumParams(HSTMT hstmt, SQLSMALLINT *pcpar); -RETCODE SQL_API OPENSEARCHAPI_PrimaryKeys(HSTMT hstmt, const SQLCHAR *szCatalogName, - SQLSMALLINT cbCatalogName, - const SQLCHAR *szSchemaName, - SQLSMALLINT cbSchemaName, - const SQLCHAR *szTableName, - SQLSMALLINT cbTableName, OID reloid); -RETCODE SQL_API OPENSEARCHAPI_ProcedureColumns( - HSTMT hstmt, const SQLCHAR *szCatalogName, SQLSMALLINT cbCatalogName, - const SQLCHAR *szSchemaName, SQLSMALLINT cbSchemaName, - const SQLCHAR *szProcName, SQLSMALLINT cbProcName, - const SQLCHAR *szColumnName, SQLSMALLINT cbColumnName, UWORD flag); -RETCODE SQL_API OPENSEARCHAPI_Procedures(HSTMT hstmt, const SQLCHAR *szCatalogName, - SQLSMALLINT cbCatalogName, - const SQLCHAR *szSchemaName, - SQLSMALLINT cbSchemaName, - const SQLCHAR *szProcName, - SQLSMALLINT cbProcName, UWORD flag); -RETCODE SQL_API OPENSEARCHAPI_TablePrivileges(HSTMT hstmt, const SQLCHAR *szCatalogName, - SQLSMALLINT cbCatalogName, - const SQLCHAR *szSchemaName, - SQLSMALLINT cbSchemaName, - const SQLCHAR *szTableName, - SQLSMALLINT cbTableName, UWORD flag); -RETCODE SQL_API OPENSEARCHAPI_GetDiagRec(SQLSMALLINT HandleType, SQLHANDLE Handle, - SQLSMALLINT RecNumber, SQLCHAR *Sqlstate, - SQLINTEGER *NativeError, SQLCHAR *MessageText, - SQLSMALLINT BufferLength, - SQLSMALLINT *TextLength); -RETCODE SQL_API OPENSEARCHAPI_GetDiagField(SQLSMALLINT HandleType, SQLHANDLE Handle, - SQLSMALLINT RecNumber, - SQLSMALLINT DiagIdentifier, PTR DiagInfoPtr, - SQLSMALLINT BufferLength, - SQLSMALLINT *StringLengthPtr); -RETCODE SQL_API OPENSEARCHAPI_GetConnectAttr(HDBC ConnectionHandle, - SQLINTEGER Attribute, PTR Value, - SQLINTEGER BufferLength, - SQLINTEGER *StringLength); -RETCODE SQL_API OPENSEARCHAPI_GetStmtAttr(HSTMT StatementHandle, SQLINTEGER Attribute, - PTR Value, SQLINTEGER BufferLength, - SQLINTEGER *StringLength); - -/* Driver-specific connection attributes, for SQLSet/GetConnectAttr() */ -enum { - SQL_ATTR_ESOPT_DEBUG = 65536, - SQL_ATTR_ESOPT_COMMLOG = 65537, - SQL_ATTR_ESOPT_PARSE = 65538, - SQL_ATTR_ESOPT_USE_DECLAREFETCH = 65539, - SQL_ATTR_ESOPT_SERVER_SIDE_PREPARE = 65540, - SQL_ATTR_ESOPT_FETCH = 65541, - SQL_ATTR_ESOPT_UNKNOWNSIZES = 65542, - SQL_ATTR_ESOPT_TEXTASLONGVARCHAR = 65543, - SQL_ATTR_ESOPT_UNKNOWNSASLONGVARCHAR = 65544, - SQL_ATTR_ESOPT_BOOLSASCHAR = 65545, - SQL_ATTR_ESOPT_MAXVARCHARSIZE = 65546, - SQL_ATTR_ESOPT_MAXLONGVARCHARSIZE = 65547, - SQL_ATTR_ESOPT_WCSDEBUG = 65548, - SQL_ATTR_ESOPT_MSJET = 65549 -}; -RETCODE SQL_API OPENSEARCHAPI_SetConnectAttr(HDBC ConnectionHandle, - SQLINTEGER Attribute, PTR Value, - SQLINTEGER StringLength); -RETCODE SQL_API OPENSEARCHAPI_SetStmtAttr(HSTMT StatementHandle, SQLINTEGER Attribute, - PTR Value, SQLINTEGER StringLength); -RETCODE SQL_API OPENSEARCHAPI_AllocDesc(HDBC ConnectionHandle, - SQLHDESC *DescriptorHandle); -RETCODE SQL_API OPENSEARCHAPI_FreeDesc(SQLHDESC DescriptorHandle); -RETCODE SQL_API OPENSEARCHAPI_CopyDesc(SQLHDESC SourceDescHandle, - SQLHDESC TargetDescHandle); -RETCODE SQL_API OPENSEARCHAPI_SetDescField(SQLHDESC DescriptorHandle, - SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength); -RETCODE SQL_API OPENSEARCHAPI_GetDescField(SQLHDESC DescriptorHandle, - SQLSMALLINT RecNumber, - SQLSMALLINT FieldIdentifier, PTR Value, - SQLINTEGER BufferLength, - SQLINTEGER *StringLength); -RETCODE SQL_API OPENSEARCHAPI_DescError(SQLHDESC DescriptorHandle, - SQLSMALLINT RecNumber, SQLCHAR *Sqlstate, - SQLINTEGER *NativeError, SQLCHAR *MessageText, - SQLSMALLINT BufferLength, - SQLSMALLINT *TextLength, UWORD flag); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ -#endif /* define_OPENSEARCH_API_FUNC_H__ */ diff --git a/sql-odbc/src/sqlodbc/opensearch_communication.cpp b/sql-odbc/src/sqlodbc/opensearch_communication.cpp deleted file mode 100644 index dab46cb1fa..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_communication.cpp +++ /dev/null @@ -1,1039 +0,0 @@ -#include "opensearch_communication.h" - -// sqlodbc needs to be included before mylog, otherwise mylog will generate -// compiler warnings -// clang-format off -#include "opensearch_odbc.h" -#include "mylog.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -// clang-format on - -#define SQL_ENDPOINT_ERROR_STR "Error" - -static const std::string ctype = "application/json"; -static const std::string ALLOCATION_TAG = "AWS_SIGV4_AUTH"; -static const std::string SERVICE_NAME = "es"; -static const std::string ESODBC_PROFILE_NAME = "opensearchodbc"; -static const std::string ERROR_MSG_PREFIX = - "[OpenSearch][SQL ODBC Driver][SQL Plugin] "; -static const std::string JSON_SCHEMA = - "{" // This was generated from the example OpenSearch data - "\"type\": \"object\"," - "\"properties\": {" - "\"schema\": {" - "\"type\": \"array\"," - "\"items\": [{" - "\"type\": \"object\"," - "\"properties\": {" - "\"name\": { \"type\": \"string\" }," - "\"type\": { \"type\": \"string\" }" - "}," - "\"required\": [ \"name\", \"type\" ]" - "}]" - "}," - "\"cursor\": { \"type\": \"string\" }," - "\"total\": { \"type\": \"integer\" }," - "\"datarows\": {" - "\"type\": \"array\"," - "\"items\": {}" - "}," - "\"size\": { \"type\": \"integer\" }," - "\"status\": { \"type\": \"integer\" }" - "}," - "\"required\": [\"schema\", \"total\", \"datarows\", \"size\", \"status\"]" - "}"; -static const std::string CURSOR_JSON_SCHEMA = - "{" // This was generated from the example OpenSearch data - "\"type\": \"object\"," - "\"properties\": {" - "\"cursor\": { \"type\": \"string\" }," - "\"datarows\": {" - "\"type\": \"array\"," - "\"items\": {}" - "}," - "\"status\": { \"type\": \"integer\" }" - "}," - "\"required\": [\"datarows\"]" - "}"; -static const std::string ERROR_RESPONSE_SCHEMA = R"EOF( -{ - "type": "object", - "properties": { - "error": { - "type": "object", - "properties": { - "reason": { "type": "string" }, - "details": { "type": "string" }, - "type": { "type": "string" } - }, - "required": [ - "reason", - "details", - "type" - ] - }, - "status": { - "type": "integer" - } - }, - "required": [ - "error", - "status" - ] -} -)EOF"; - -namespace { - /** - * A helper class to initialize/shutdown AWS API once per DLL load/unload. - */ - class AwsSdkHelper { - public: - AwsSdkHelper() : - m_reference_count(0) { - } - - AwsSdkHelper& operator++() { - if (1 == ++m_reference_count) { - std::scoped_lock lock(m_mutex); - Aws::InitAPI(m_sdk_options); - } - return *this; - } - - AwsSdkHelper& operator--() { - if (0 == --m_reference_count) { - std::scoped_lock lock(m_mutex); - Aws::ShutdownAPI(m_sdk_options); - } - return *this; - } - - Aws::SDKOptions m_sdk_options; - std::atomic m_reference_count; - std::mutex m_mutex; - }; - - AwsSdkHelper AWS_SDK_HELPER; -} - -void OpenSearchCommunication::AwsHttpResponseToString( - std::shared_ptr< Aws::Http::HttpResponse > response, std::string& output) { - // This function has some unconventional stream operations because we need - // performance over readability here. Equivalent code done in conventional - // ways (using stringstream operators) takes ~30x longer than this code - // below and bottlenecks our query performance - - // Get streambuffer from response and set position to start - std::streambuf* stream_buffer = response->GetResponseBody().rdbuf(); - stream_buffer->pubseekpos(0); - - // Get size of streambuffer and reserver that much space in the output - size_t avail = static_cast< size_t >(stream_buffer->in_avail()); - std::vector< char > buf(avail, '\0'); - output.clear(); - output.reserve(avail); - - // Directly copy memory from buffer into our string buffer - stream_buffer->sgetn(buf.data(), avail); - output.assign(buf.data(), avail); -} - -void OpenSearchCommunication::PrepareCursorResult(OpenSearchResult& opensearch_result) { - // Prepare document and validate result - try { - LogMsg(OPENSEARCH_DEBUG, "Parsing result JSON with cursor."); - opensearch_result.opensearch_result_doc.parse( - opensearch_result.result_json, - CURSOR_JSON_SCHEMA); - } catch (const rabbit::parse_error& e) { - // The exception rabbit gives is quite useless - providing the json - // will aid debugging for users - std::string str = "Exception obtained '" + std::string(e.what()) - + "' when parsing json string '" - + opensearch_result.result_json + "'."; - throw std::runtime_error(str.c_str()); - } -} - -std::shared_ptr< ErrorDetails > OpenSearchCommunication::ParseErrorResponse( - OpenSearchResult& opensearch_result) { - // Prepare document and validate schema - try { - LogMsg(OPENSEARCH_DEBUG, "Parsing error response (with schema validation)"); - opensearch_result.opensearch_result_doc.parse( - opensearch_result.result_json, - ERROR_RESPONSE_SCHEMA); - - auto error_details = std::make_shared< ErrorDetails >(); - error_details->reason = - opensearch_result.opensearch_result_doc["error"]["reason"].as_string(); - error_details->details = - opensearch_result.opensearch_result_doc["error"]["details"].as_string(); - error_details->source_type = - opensearch_result.opensearch_result_doc["error"]["type"].as_string(); - return error_details; - } catch (const rabbit::parse_error& e) { - // The exception rabbit gives is quite useless - providing the json - // will aid debugging for users - std::string str = "Exception obtained '" + std::string(e.what()) - + "' when parsing json string '" - + opensearch_result.result_json + "'."; - throw std::runtime_error(str.c_str()); - } -} - -void OpenSearchCommunication::SetErrorDetails(std::string reason, std::string message, - ConnErrorType error_type) { - // Prepare document and validate schema - auto error_details = std::make_shared< ErrorDetails >(); - error_details->reason = reason; - error_details->details = message; - error_details->source_type = "Dummy type"; - error_details->type = error_type; - m_error_details = error_details; -} - -void OpenSearchCommunication::SetErrorDetails(ErrorDetails details) { - // Prepare document and validate schema - auto error_details = std::make_shared< ErrorDetails >(details); - m_error_details = error_details; -} - -void OpenSearchCommunication::GetJsonSchema(OpenSearchResult& opensearch_result) { - // Prepare document and validate schema - try { - LogMsg(OPENSEARCH_DEBUG, "Parsing result JSON with schema."); - opensearch_result.opensearch_result_doc.parse( - opensearch_result.result_json, JSON_SCHEMA); - } catch (const rabbit::parse_error& e) { - // The exception rabbit gives is quite useless - providing the json - // will aid debugging for users - std::string str = "Exception obtained '" + std::string(e.what()) - + "' when parsing json string '" - + opensearch_result.result_json + "'."; - throw std::runtime_error(str.c_str()); - } -} - -OpenSearchCommunication::OpenSearchCommunication() -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wreorder" -#endif // __APPLE__ - : m_status(ConnStatusType::CONNECTION_BAD), - m_error_type(ConnErrorType::CONN_ERROR_SUCCESS), - m_valid_connection_options(false), - m_is_retrieving(false), - m_error_message(""), - m_result_queue(2), - m_client_encoding(m_supported_client_encodings[0]), - m_error_message_to_user("") -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ -{ - ++AWS_SDK_HELPER; -} - -OpenSearchCommunication::~OpenSearchCommunication() { - // Release the HTTP client instance to free its resources before releasing AWS SDK. - // Changing order of these actions would cause crash on disconnect. - m_http_client.reset(); - --AWS_SDK_HELPER; -} - -std::string OpenSearchCommunication::GetErrorMessage() { - // TODO #35 - Check if they expect NULL or "" when there is no error. - if (m_error_details) { - m_error_details->details = std::regex_replace( - m_error_details->details, std::regex("\\n"), "\\\\n"); - return ERROR_MSG_PREFIX + m_error_details->reason + ": " - + m_error_details->details; - } else { - return ERROR_MSG_PREFIX - + "No error details available; check the driver logs."; - } -} - -ConnErrorType OpenSearchCommunication::GetErrorType() { - return m_error_type; -} - -bool OpenSearchCommunication::ConnectionOptions(runtime_options& rt_opts, - bool use_defaults, int expand_dbname, - unsigned int option_count) { - (void)(expand_dbname); - (void)(option_count); - (void)(use_defaults); - m_rt_opts = rt_opts; - return CheckConnectionOptions(); -} - -bool OpenSearchCommunication::ConnectionOptions2() { - return true; -} - -bool OpenSearchCommunication::ConnectDBStart() { - LogMsg(OPENSEARCH_ALL, "Starting DB connection."); - m_status = ConnStatusType::CONNECTION_BAD; - if (!m_valid_connection_options) { - // TODO: get error message from CheckConnectionOptions - m_error_message = - "Invalid connection options, unable to connect to DB."; - SetErrorDetails("Invalid connection options", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - DropDBConnection(); - return false; - } - - m_status = ConnStatusType::CONNECTION_NEEDED; - if (!EstablishConnection()) { - m_error_message = m_error_message_to_user.empty() - ? "Failed to establish connection to DB." - : m_error_message_to_user; - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - DropDBConnection(); - return false; - } - - LogMsg(OPENSEARCH_DEBUG, "Connection established."); - m_status = ConnStatusType::CONNECTION_OK; - return true; -} - -ConnStatusType OpenSearchCommunication::GetConnectionStatus() { - return m_status; -} - -void OpenSearchCommunication::DropDBConnection() { - LogMsg(OPENSEARCH_ALL, "Dropping DB connection."); - if (m_http_client) { - m_http_client.reset(); - } - - m_status = ConnStatusType::CONNECTION_BAD; - StopResultRetrieval(); -} - -bool OpenSearchCommunication::CheckConnectionOptions() { - LogMsg(OPENSEARCH_ALL, "Verifying connection options."); - m_error_message = ""; - if (m_rt_opts.auth.auth_type != AUTHTYPE_NONE - && m_rt_opts.auth.auth_type != AUTHTYPE_IAM) { - if (m_rt_opts.auth.auth_type == AUTHTYPE_BASIC) { - if (m_rt_opts.auth.username.empty() - || m_rt_opts.auth.password.empty()) { - m_error_message = AUTHTYPE_BASIC - " authentication requires a username and password."; - SetErrorDetails("Auth error", m_error_message, - ConnErrorType::CONN_ERROR_INVALID_AUTH); - } - } else { - m_error_message = "Unknown authentication type: '" - + m_rt_opts.auth.auth_type + "'"; - SetErrorDetails("Auth error", m_error_message, - ConnErrorType::CONN_ERROR_INVALID_AUTH); - } - } else if (m_rt_opts.conn.server == "") { - m_error_message = "Host connection option was not specified."; - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_UNABLE_TO_ESTABLISH); - } - - if (m_error_message != "") { - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - m_valid_connection_options = false; - return false; - } else { - LogMsg(OPENSEARCH_DEBUG, "Required connection option are valid."); - m_valid_connection_options = true; - } - return m_valid_connection_options; -} - -void OpenSearchCommunication::InitializeConnection() { - Aws::Client::ClientConfiguration config; - config.scheme = (m_rt_opts.crypt.use_ssl ? Aws::Http::Scheme::HTTPS - : Aws::Http::Scheme::HTTP); - config.verifySSL = m_rt_opts.crypt.verify_server; - long response_timeout = - static_cast< long >(DEFAULT_RESPONSE_TIMEOUT) * 1000L; - try { - response_timeout = - std::stol(m_rt_opts.conn.timeout, nullptr, 10) * 1000L; - } catch (...) { - } - config.connectTimeoutMs = response_timeout; - config.httpRequestTimeoutMs = response_timeout; - config.requestTimeoutMs = response_timeout; - m_http_client = Aws::Http::CreateHttpClient(config); -} - -std::shared_ptr< Aws::Http::HttpResponse > -OpenSearchCommunication::IssueRequest( - const std::string& endpoint, const Aws::Http::HttpMethod request_type, - const std::string& content_type, const std::string& query, - const std::string& fetch_size, const std::string& cursor) { - // Generate http request - std::shared_ptr< Aws::Http::HttpRequest > request = - Aws::Http::CreateHttpRequest( - Aws::String( - m_rt_opts.conn.server - + (m_rt_opts.conn.port.empty() ? "" : ":" + m_rt_opts.conn.port) - + endpoint), - request_type, - Aws::Utils::Stream::DefaultResponseStreamFactoryMethod); - - // Set header type - if (!content_type.empty()) - request->SetHeaderValue(Aws::Http::CONTENT_TYPE_HEADER, Aws::String(ctype.c_str(), ctype.size())); - - // Set body - if (!query.empty() || !cursor.empty()) { - rabbit::object body; - if (!query.empty()) { - body["query"] = query; - if (!fetch_size.empty() && fetch_size != "-1") - body["fetch_size"] = fetch_size; - } else if (!cursor.empty()) { - body["cursor"] = cursor; - } - std::shared_ptr< Aws::StringStream > aws_ss = - Aws::MakeShared< Aws::StringStream >("RabbitStream"); - *aws_ss << std::string(body.str()); - request->AddContentBody(aws_ss); - request->SetContentLength(Aws::Utils::StringUtils::to_string(body.str().size())); - } - - // Handle authentication - if (m_rt_opts.auth.auth_type == AUTHTYPE_BASIC) { - std::string userpw_str = - m_rt_opts.auth.username + ":" + m_rt_opts.auth.password; - Aws::Utils::Array< unsigned char > userpw_arr( - reinterpret_cast< const unsigned char* >(userpw_str.c_str()), - userpw_str.length()); - Aws::String hashed_userpw = - Aws::Utils::HashingUtils::Base64Encode(userpw_arr); - request->SetAuthorization("Basic " + hashed_userpw); - } else if (m_rt_opts.auth.auth_type == AUTHTYPE_IAM) { - std::shared_ptr< Aws::Auth::ProfileConfigFileAWSCredentialsProvider > - credential_provider = Aws::MakeShared< - Aws::Auth::ProfileConfigFileAWSCredentialsProvider >( - ALLOCATION_TAG.c_str(), ESODBC_PROFILE_NAME.c_str()); - Aws::Client::AWSAuthV4Signer signer(credential_provider, - SERVICE_NAME.c_str(), - m_rt_opts.auth.region.c_str()); - signer.SignRequest(*request); - } - - // Issue request and return response - return m_http_client->MakeRequest(request); -} - -bool OpenSearchCommunication::IsSQLPluginEnabled(std::shared_ptr< ErrorDetails > error_details) { - std::string error_type = error_details->source_type; - if (error_type =="SQLFeatureDisabledException") { - return false; - } - return true; -} - -/** - * @brief Queries server to determine SQL plugin availability. - * - * @return true : Successfully queried server for SQL plugin - * @return false : Failed to query server, no plugin available, exception was caught - */ -bool OpenSearchCommunication::CheckSQLPluginAvailability() { - LogMsg(OPENSEARCH_ALL, "Checking for SQL plugin status."); - std::string test_query = "SHOW TABLES LIKE %"; - try { - std::shared_ptr< Aws::Http::HttpResponse > response = - IssueRequest(sql_endpoint, - Aws::Http::HttpMethod::HTTP_POST, ctype, test_query); - if (response == nullptr) { - m_error_message = - "Failed to receive response." - "Received NULL response."; - SetErrorDetails("Execution error", m_error_message, - ConnErrorType::CONN_ERROR_QUERY_SYNTAX); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return false; - } - - AwsHttpResponseToString(response, m_response_str); - if (response->GetResponseCode() == Aws::Http::HttpResponseCode::OK) { - return true; - } - else { - std::unique_ptr< OpenSearchResult > result = - std::make_unique< OpenSearchResult >(); - AwsHttpResponseToString(response, result->result_json); - std::shared_ptr< ErrorDetails > error_details = - ParseErrorResponse(*result); - - if(!IsSQLPluginEnabled(error_details)) { - m_error_message_to_user = - "SQL plugin is disabled, please enable the plugin " - "to use this driver."; - m_error_message += - "The SQL plugin is disabled. The SQL plugin must be " - "enabled in order to use this driver. Response body: '" - + m_response_str + "'"; - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - } - - if (response->HasClientError()) { - m_error_message += " Client error: '" - + response->GetClientErrorMessage() + "'."; - SetErrorDetails("HTTP client error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - } - - if (!m_response_str.empty()) { - m_error_message += " Response error: '" + m_response_str + "'."; - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - } - } - } catch (...) { - m_error_message_to_user = - "SQL plugin is not available at url: " + - (m_rt_opts.conn.server + (m_rt_opts.conn.port.empty() ? - "" : ":" + m_rt_opts.conn.port)) + - ", please install the SQL plugin to use this driver."; - m_error_message += - "Unexpected exception thrown from the server, " - "the SQL plugin is not installed or in unhealthy status."; - SetErrorDetails("Server error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - } - return false; -} - -bool OpenSearchCommunication::EstablishConnection() { - // Generate HttpClient Connection class if it does not exist - LogMsg(OPENSEARCH_ALL, "Attempting to establish DB connection."); - if (!m_http_client) { - InitializeConnection(); - } - - // check if the endpoint is initialized - if (sql_endpoint.empty()) { - SetSqlEndpoint(); - } - - // Check whether SQL plugin has been installed and enabled in the - // OpenSearch server since the SQL plugin is a prerequisite to - // use this driver. - if((sql_endpoint != SQL_ENDPOINT_ERROR_STR) && CheckSQLPluginAvailability()) { - return true; - } - - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return false; -} - -std::vector< std::string > OpenSearchCommunication::GetColumnsWithSelectQuery( - const std::string table_name) { - std::vector< std::string > list_of_column; - if (table_name.empty()) { - m_error_type = ConnErrorType::CONN_ERROR_INVALID_NULL_PTR; - m_error_message = "Query is NULL"; - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return list_of_column; - } - - // Prepare query - std::string query = "SELECT * FROM " + table_name + " LIMIT 0"; - std::string msg = "Attempting to execute a query \"" + query + "\""; - LogMsg(OPENSEARCH_DEBUG, msg.c_str()); - - // Issue request - std::shared_ptr< Aws::Http::HttpResponse > response = - IssueRequest(sql_endpoint, Aws::Http::HttpMethod::HTTP_POST, - ctype, query); - - // Validate response - if (response == nullptr) { - m_error_message = - "Failed to receive response from query. " - "Received NULL response."; - SetErrorDetails("HTTP client error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return list_of_column; - } - - // Convert body from Aws IOStream to string - std::unique_ptr< OpenSearchResult > result = std::make_unique< OpenSearchResult >(); - AwsHttpResponseToString(response, result->result_json); - - // If response was not valid, set error - if (response->GetResponseCode() != Aws::Http::HttpResponseCode::OK) { - m_error_type = ConnErrorType::CONN_ERROR_QUERY_SYNTAX; - m_error_message = - "Http response code was not OK. Code received: " - + std::to_string(static_cast< long >(response->GetResponseCode())) - + "."; - if (response->HasClientError()) - m_error_message += - " Client error: '" + response->GetClientErrorMessage() + "'."; - if (!result->result_json.empty()) { - m_error_message += - " Response error: '" + result->result_json + "'."; - } - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return list_of_column; - } - - GetJsonSchema(*result); - - rabbit::array schema_array = result->opensearch_result_doc["schema"]; - for (rabbit::array::iterator it = schema_array.begin(); - it != schema_array.end(); ++it) { - std::string column_name = it->at("name").as_string(); - list_of_column.push_back(column_name); - } - - return list_of_column; -} - -int OpenSearchCommunication::ExecDirect(const char* query, const char* fetch_size_) { - m_error_details.reset(); - if (!query) { - m_error_message = "Query is NULL"; - SetErrorDetails("Execution error", m_error_message, - ConnErrorType::CONN_ERROR_INVALID_NULL_PTR); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return -1; - } else if (!m_http_client) { - m_error_message = "Unable to connect. Please try connecting again."; - SetErrorDetails("Execution error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return -1; - } - - // Prepare statement - std::string statement(query); - std::string fetch_size(fetch_size_); - std::string msg = "Attempting to execute a query \"" + statement + "\""; - LogMsg(OPENSEARCH_DEBUG, msg.c_str()); - - // Issue request - std::shared_ptr< Aws::Http::HttpResponse > response = - IssueRequest(sql_endpoint, Aws::Http::HttpMethod::HTTP_POST, - ctype, statement, fetch_size); - - // Validate response - if (response == nullptr) { - m_error_message = - "Failed to receive response from query. " - "Received NULL response."; - SetErrorDetails("Execution error", m_error_message, - ConnErrorType::CONN_ERROR_QUERY_SYNTAX); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return -1; - } - - // Convert body from Aws IOStream to string - std::unique_ptr< OpenSearchResult > result = std::make_unique< OpenSearchResult >(); - AwsHttpResponseToString(response, result->result_json); - - // If response was not valid, set error - if (response->GetResponseCode() != Aws::Http::HttpResponseCode::OK) { - m_error_type = ConnErrorType::CONN_ERROR_QUERY_SYNTAX; - m_error_message = - "Http response code was not OK. Code received: " - + std::to_string(static_cast< long >(response->GetResponseCode())) - + "."; - if (response->HasClientError()) - m_error_message += - " Client error: '" + response->GetClientErrorMessage() + "'."; - if (!result->result_json.empty()) { - m_error_details = ParseErrorResponse(*result.get()); - m_error_message += - " Response error: '" + result->result_json + "'."; - } - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return -1; - } - - // Add to result queue and return - try { - ConstructOpenSearchResult(*result); - } catch (std::runtime_error& e) { - m_error_message = - "Received runtime exception: " + std::string(e.what()); - if (!result->result_json.empty()) { - m_error_message += " Result body: " + result->result_json; - } - SetErrorDetails("Execution error", m_error_message, - ConnErrorType::CONN_ERROR_QUERY_SYNTAX); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return -1; - } - - const std::string cursor = result->cursor; - while (!m_result_queue.push(QUEUE_TIMEOUT, result.get())) { - if (ConnStatusType::CONNECTION_OK == m_status) { - return -1; - } - } - - result.release(); - - if (!cursor.empty()) { - // If the response has a cursor, this thread will retrieve more result - // pages asynchronously. - std::thread([&, cursor]() { SendCursorQueries(cursor); }).detach(); - } - - return 0; -} - -void OpenSearchCommunication::SendCursorQueries(std::string cursor) { - if (cursor.empty()) { - return; - } - m_is_retrieving = true; - - try { - while (!cursor.empty() && m_is_retrieving) { - std::shared_ptr< Aws::Http::HttpResponse > response = IssueRequest( - sql_endpoint, Aws::Http::HttpMethod::HTTP_POST, - ctype, "", "", cursor); - if (response == nullptr) { - m_error_message = - "Failed to receive response from cursor. " - "Received NULL response."; - SetErrorDetails("Cursor error", m_error_message, - ConnErrorType::CONN_ERROR_QUERY_SYNTAX); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return; - } - - std::unique_ptr< OpenSearchResult > result = std::make_unique< OpenSearchResult >(); - AwsHttpResponseToString(response, result->result_json); - PrepareCursorResult(*result); - - if (result->opensearch_result_doc.has("cursor")) { - cursor = result->opensearch_result_doc["cursor"].as_string(); - result->cursor = result->opensearch_result_doc["cursor"].as_string(); - } else { - SendCloseCursorRequest(cursor); - cursor.clear(); - } - - while (m_is_retrieving - && !m_result_queue.push(QUEUE_TIMEOUT, result.get())) { - } - - // Don't release when attempting to push to the queue as it may take - // multiple tries. - result.release(); - } - } catch (std::runtime_error& e) { - m_error_message = - "Received runtime exception: " + std::string(e.what()); - SetErrorDetails("Cursor error", m_error_message, - ConnErrorType::CONN_ERROR_QUERY_SYNTAX); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - } - - if (!m_is_retrieving) { - m_result_queue.clear(); - } else { - m_is_retrieving = false; - } -} - -void OpenSearchCommunication::SendCloseCursorRequest(const std::string& cursor) { - std::shared_ptr< Aws::Http::HttpResponse > response = - IssueRequest(sql_endpoint + "/close", - Aws::Http::HttpMethod::HTTP_POST, ctype, "", "", cursor); - if (response == nullptr) { - m_error_message = - "Failed to receive response from cursor close request. " - "Received NULL response."; - SetErrorDetails("Cursor error", m_error_message, - ConnErrorType::CONN_ERROR_QUERY_SYNTAX); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - } -} - -void OpenSearchCommunication::StopResultRetrieval() { - m_is_retrieving = false; - m_result_queue.clear(); -} - -void OpenSearchCommunication::ConstructOpenSearchResult(OpenSearchResult& result) { - GetJsonSchema(result); - rabbit::array schema_array = result.opensearch_result_doc["schema"]; - for (rabbit::array::iterator it = schema_array.begin(); - it != schema_array.end(); ++it) { - std::string column_name = it->at("name").as_string(); - - ColumnInfo col_info; - col_info.field_name = column_name; - col_info.type_oid = KEYWORD_TYPE_OID; - col_info.type_size = KEYWORD_TYPE_SIZE; - col_info.display_size = KEYWORD_DISPLAY_SIZE; - col_info.length_of_str = KEYWORD_TYPE_SIZE; - col_info.relation_id = 0; - col_info.attribute_number = 0; - - result.column_info.push_back(col_info); - } - if (result.opensearch_result_doc.has("cursor")) { - result.cursor = result.opensearch_result_doc["cursor"].as_string(); - } - result.command_type = "SELECT"; - result.num_fields = (uint16_t)schema_array.size(); -} - -inline void OpenSearchCommunication::LogMsg(OpenSearchLogLevel level, const char* msg) { -#if WIN32 -#pragma warning(push) -#pragma warning(disable : 4551) -#endif // WIN32 - // cppcheck outputs an erroneous missing argument error which breaks build. - // Disable for this function call - MYLOG(level, "%s\n", msg); -#if WIN32 -#pragma warning(pop) -#endif // WIN32 -} - -OpenSearchResult* OpenSearchCommunication::PopResult() { - OpenSearchResult* result = NULL; - while (!m_result_queue.pop(QUEUE_TIMEOUT, result) && m_is_retrieving) { - } - - return result; -} - -// TODO #36 - Send query to database to get encoding -std::string OpenSearchCommunication::GetClientEncoding() { - return m_client_encoding; -} - -// TODO #36 - Send query to database to set encoding -bool OpenSearchCommunication::SetClientEncoding(std::string& encoding) { - if (std::find(m_supported_client_encodings.begin(), - m_supported_client_encodings.end(), encoding) - != m_supported_client_encodings.end()) { - m_client_encoding = encoding; - return true; - } - LogMsg(OPENSEARCH_ERROR, - std::string("Failed to find encoding " + encoding).c_str()); - return false; -} - -std::string OpenSearchCommunication::GetServerVersion() { - if (!m_http_client) { - InitializeConnection(); - } - - // Issue request - std::shared_ptr< Aws::Http::HttpResponse > response = - IssueRequest("", Aws::Http::HttpMethod::HTTP_GET, "", "", ""); - if (response == nullptr) { - m_error_message = - "Failed to receive response from server version query. " - "Received NULL response."; - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return ""; - } - - // Parse server version - if (response->GetResponseCode() == Aws::Http::HttpResponseCode::OK) { - try { - AwsHttpResponseToString(response, m_response_str); - rabbit::document doc; - doc.parse(m_response_str); - if (doc.has("version") && doc["version"].has("number")) { - return doc["version"]["number"].as_string(); - } - - } catch (const rabbit::type_mismatch& e) { - m_error_message = "Error parsing main endpoint response: " - + std::string(e.what()); - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - } catch (const rabbit::parse_error& e) { - m_error_message = "Error parsing main endpoint response: " - + std::string(e.what()); - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - } catch (const std::exception& e) { - m_error_message = "Error parsing main endpoint response: " - + std::string(e.what()); - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - } catch (...) { - LogMsg(OPENSEARCH_ERROR, - "Unknown exception thrown when parsing main endpoint " - "response."); - } - } - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return ""; -} - -/** - * @brief Queries supplied URL to validate Server Distribution. Maintains - * backwards compatibility with opendistro distribution. - * - * @return std::string : Server distribution name, returns "" on error - */ -std::string OpenSearchCommunication::GetServerDistribution() { - if (!m_http_client) { - InitializeConnection(); - } - - std::shared_ptr< Aws::Http::HttpResponse > response = - IssueRequest("", Aws::Http::HttpMethod::HTTP_GET, "", "", ""); - if (response == nullptr || response->GetResponseCode() == Aws::Http::HttpResponseCode::REQUEST_NOT_MADE) { - m_error_message = - "Failed to receive response from server version query. " - "Received no response from url: " - + (m_rt_opts.conn.server + (m_rt_opts.conn.port.empty() ? - "" : ":" + m_rt_opts.conn.port)); - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - m_error_message_to_user = m_error_message; - return ""; - } - - // Parse server version distribution - if (response->GetResponseCode() == Aws::Http::HttpResponseCode::OK) { - try { - AwsHttpResponseToString(response, m_response_str); - rabbit::document doc; - doc.parse(m_response_str); - if (doc.has("version") && doc["version"].has("distribution")) { - return doc["version"]["distribution"].as_string(); - } - } catch (const rabbit::type_mismatch& e) { - m_error_message = "Error parsing main endpoint response: " - + std::string(e.what()); - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - } catch (const rabbit::parse_error& e) { - m_error_message = "Error parsing main endpoint response: " - + std::string(e.what()); - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - } catch (const std::exception& e) { - m_error_message = "Error parsing main endpoint response: " - + std::string(e.what()); - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - } catch (...) { - LogMsg(OPENSEARCH_ERROR, - "Unknown exception thrown when parsing main endpoint " - "response."); - } - } - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return ""; -} - -std::string OpenSearchCommunication::GetClusterName() { - if (!m_http_client) { - InitializeConnection(); - } - - // Issue request - std::shared_ptr< Aws::Http::HttpResponse > response = - IssueRequest("", Aws::Http::HttpMethod::HTTP_GET, "", "", ""); - if (response == nullptr) { - m_error_message = - "Failed to receive response from cluster name query. " - "Received NULL response."; - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return ""; - } - - // Parse cluster name - if (response->GetResponseCode() == Aws::Http::HttpResponseCode::OK) { - try { - AwsHttpResponseToString(response, m_response_str); - rabbit::document doc; - doc.parse(m_response_str); - if (doc.has("cluster_name")) { - return doc["cluster_name"].as_string(); - } - - } catch (const rabbit::type_mismatch& e) { - m_error_message = "Error parsing main endpoint response: " - + std::string(e.what()); - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - } catch (const rabbit::parse_error& e) { - m_error_message = "Error parsing main endpoint response: " - + std::string(e.what()); - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - } catch (const std::exception& e) { - m_error_message = "Error parsing main endpoint response: " - + std::string(e.what()); - SetErrorDetails("Connection error", m_error_message, - ConnErrorType::CONN_ERROR_COMM_LINK_FAILURE); - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - } catch (...) { - LogMsg(OPENSEARCH_ERROR, - "Unknown exception thrown when parsing main endpoint " - "response."); - } - } - LogMsg(OPENSEARCH_ERROR, m_error_message.c_str()); - return ""; -} - -/** - * @brief Sets URL endpoint for SQL plugin. On failure to - * determine appropriate endpoint, value is set to SQL_ENDPOINT_ERROR_STR - * - */ -void OpenSearchCommunication::SetSqlEndpoint() { - std::string distribution = GetServerDistribution(); - if (distribution.empty()) { - sql_endpoint = SQL_ENDPOINT_ERROR_STR; - } else if (distribution.compare("opensearch") == 0) { - sql_endpoint = "/_plugins/_sql"; - } else { - sql_endpoint = "/_opendistro/_sql"; - } -} diff --git a/sql-odbc/src/sqlodbc/opensearch_communication.h b/sql-odbc/src/sqlodbc/opensearch_communication.h deleted file mode 100644 index b37ccc3535..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_communication.h +++ /dev/null @@ -1,102 +0,0 @@ -#ifndef OPENSEARCH_COMMUNICATION -#define OPENSEARCH_COMMUNICATION - -// clang-format off -#include -#include -#include -#include -#include "opensearch_types.h" -#include "opensearch_result_queue.h" - -//Keep rabbit at top otherwise it gives build error because of some variable names like max, min -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunused-parameter" -#endif // __APPLE__ -#include "rabbit.hpp" -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ -#include -#include -#include -#include -#include -#include -#include -#include -// clang-format on - -class OpenSearchCommunication { - public: - OpenSearchCommunication(); - ~OpenSearchCommunication(); - - // Create function for factory - std::string GetErrorMessage(); - ConnErrorType GetErrorType(); - bool ConnectionOptions(runtime_options& rt_opts, bool use_defaults, - int expand_dbname, unsigned int option_count); - bool ConnectionOptions2(); - bool ConnectDBStart(); - ConnStatusType GetConnectionStatus(); - void DropDBConnection(); - void LogMsg(OpenSearchLogLevel level, const char* msg); - int ExecDirect(const char* query, const char* fetch_size_); - void SendCursorQueries(std::string cursor); - OpenSearchResult* PopResult(); - std::string GetClientEncoding(); - bool SetClientEncoding(std::string& encoding); - static bool IsSQLPluginEnabled(std::shared_ptr< ErrorDetails > error_details); - bool CheckSQLPluginAvailability(); - std::string GetServerVersion(); - std::string GetServerDistribution(); - std::string GetClusterName(); - std::shared_ptr< Aws::Http::HttpResponse > IssueRequest( - const std::string& endpoint, const Aws::Http::HttpMethod request_type, - const std::string& content_type, const std::string& query, - const std::string& fetch_size = "", const std::string& cursor = ""); - void AwsHttpResponseToString( - std::shared_ptr< Aws::Http::HttpResponse > response, - std::string& output); - void SendCloseCursorRequest(const std::string& cursor); - void StopResultRetrieval(); - std::vector< std::string > GetColumnsWithSelectQuery( - const std::string table_name); - void SetSqlEndpoint(); - - // the endpoint is set according to distribution (ES/OpenSearch) - std::string sql_endpoint; - - private: - void InitializeConnection(); - bool CheckConnectionOptions(); - bool EstablishConnection(); - void ConstructOpenSearchResult(OpenSearchResult& result); - void GetJsonSchema(OpenSearchResult& opensearch_result); - void PrepareCursorResult(OpenSearchResult& opensearch_result); - std::shared_ptr< ErrorDetails > ParseErrorResponse( - OpenSearchResult& opensearch_result); - void SetErrorDetails(std::string reason, std::string message, - ConnErrorType error_type); - void SetErrorDetails(ErrorDetails details); - - // TODO #35 - Go through and add error messages on exit conditions - std::string m_error_message; - const std::vector< std::string > m_supported_client_encodings = {"UTF8"}; - - ConnStatusType m_status; - ConnErrorType m_error_type; - std::shared_ptr< ErrorDetails > m_error_details; - bool m_valid_connection_options; - bool m_is_retrieving; - OpenSearchResultQueue m_result_queue; - runtime_options m_rt_opts; - std::string m_client_encoding; - std::string m_response_str; - std::shared_ptr< Aws::Http::HttpClient > m_http_client; - std::string m_error_message_to_user; -}; - -#endif diff --git a/sql-odbc/src/sqlodbc/opensearch_connection.cpp b/sql-odbc/src/sqlodbc/opensearch_connection.cpp deleted file mode 100644 index de9bb5038a..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_connection.cpp +++ /dev/null @@ -1,223 +0,0 @@ -/* TryEnterCriticalSection needs the following #define */ -#ifndef _WIN32_WINNT -#define _WIN32_WINNT 0x0400 -#endif /* _WIN32_WINNT */ - -#include "opensearch_connection.h" - -#include -#include -#include - -#include "misc.h" - -/* for htonl */ -#ifdef WIN32 -#include -#else -#include -#endif - -#include -#include -#include - -#include "dlg_specific.h" -#include "environ.h" -#include "loadlib.h" -#include "multibyte.h" -#include "opensearch_apifunc.h" -#include "opensearch_helper.h" -#include "qresult.h" -#include "statement.h" - -#define PROTOCOL3_OPTS_MAX 30 -#define ERROR_BUFF_SIZE 200 -#define OPTION_COUNT 4 -#if OPTION_COUNT > PROTOCOL3_OPTS_MAX -#error("Option count (OPTION_COUNT) is greater than max option count allow (PROTOCOL3_OPTS_MAX).") -#endif - -void CC_determine_locale_encoding(ConnectionClass *self); - -char CC_connect(ConnectionClass *self) { - if (self == NULL) - return 0; - - // Attempt to connect to OpenSearch - int conn_code = LIBOPENSEARCH_connect(self); - if (conn_code <= 0) - return static_cast< char >(conn_code); - - // Set encodings - CC_determine_locale_encoding(self); -#ifdef UNICODE_SUPPORT - if (CC_is_in_unicode_driver(self)) { - if (!SQL_SUCCEEDED(CC_send_client_encoding(self, "UTF8"))) { - return 0; - } - } else -#endif - { - if (!SQL_SUCCEEDED( - CC_send_client_encoding(self, self->locale_encoding))) { - return 0; - } - } - - // Set cursor parameters based on connection info - self->status = CONN_CONNECTED; - if ((CC_is_in_unicode_driver(self)) && (CC_is_in_ansi_app(self))) - self->unicode |= CONN_DISALLOW_WCHAR; - - // 1 is SQL_SUCCESS and 2 is SQL_SCCUESS_WITH_INFO - return 1; -} - -/** - * @brief Prepends the appropriate protocol to the user supplied server url - * when necessary. With no protocol speciefied, the use_ssl flag determines - * returned value. - * - * @param self : Supplied connection input data - * @return std::string : Valid server URL with prepended protocol - */ -std::string generateValidServerUrl(ConnectionClass *self) { - std::string valid_server_url = ""; - std::string_view server_url = self->connInfo.server; - std::string_view http_prefix = "http://"; - std::string_view https_prefix = "https://"; - if(server_url.empty()) { - return valid_server_url; - } - - bool http_prefix_prepended = (server_url.size() > http_prefix.size() && (server_url.substr(0, http_prefix.size()) == http_prefix)); - bool https_prefix_prepended = (server_url.size() > https_prefix.size() && (server_url.substr(0, https_prefix.size()) == https_prefix)); - if (!http_prefix_prepended && !https_prefix_prepended) { - valid_server_url = self->connInfo.use_ssl ? - std::string(https_prefix) + std::string(server_url) : - std::string(http_prefix) + std::string(server_url); - } else { - valid_server_url = server_url; - } - return valid_server_url; -} - -int LIBOPENSEARCH_connect(ConnectionClass *self) { - if (self == NULL) - return 0; - - // Setup options - runtime_options rt_opts; - - // Connection - rt_opts.conn.server.assign(generateValidServerUrl(self)); - if(rt_opts.conn.server.empty()) { - return 0; - } - rt_opts.conn.port.assign(self->connInfo.port); - rt_opts.conn.timeout.assign(self->connInfo.response_timeout); - - // Authentication - rt_opts.auth.auth_type.assign(self->connInfo.authtype); - rt_opts.auth.username.assign(self->connInfo.username); - rt_opts.auth.password.assign(SAFE_NAME(self->connInfo.password)); - rt_opts.auth.region.assign(self->connInfo.region); - - // Encryption - rt_opts.crypt.verify_server = (self->connInfo.verify_server == 1); - rt_opts.crypt.use_ssl = (self->connInfo.use_ssl == 1); - - void *opensearchconn = OpenSearchConnectDBParams(rt_opts, FALSE, OPTION_COUNT); - if (opensearchconn == NULL) { - std::string err = GetErrorMsg(opensearchconn); - CC_set_error(self, CONN_OPENDB_ERROR, - (err.empty()) ? "OpenSearchConnectDBParams error" : err.c_str(), - "LIBOPENSEARCH_connect"); - return 0; - } - - // Check connection status - if (OpenSearchStatus(opensearchconn) != CONNECTION_OK) { - std::string msg = GetErrorMsg(opensearchconn); - char error_message_out[ERROR_BUFF_SIZE] = ""; - if (!msg.empty()) - SPRINTF_FIXED(error_message_out, "Connection error: %s", - msg.c_str()); - else - STRCPY_FIXED(error_message_out, - "Connection error: No message available."); - CC_set_error(self, CONN_OPENDB_ERROR, error_message_out, - "LIBOPENSEARCH_connect"); - OpenSearchDisconnect(opensearchconn); - return 0; - } - - // Set server version - std::string server_version = GetServerVersion(opensearchconn); - STRCPY_FIXED(self->opensearch_version, server_version.c_str()); - - std::string cluster_name = GetClusterName(opensearchconn); - STRCPY_FIXED(self->cluster_name, cluster_name.c_str()); - - self->opensearchconn = (void *)opensearchconn; - return 1; -} - -// TODO #36 - When we fix encoding, we should look into returning a code here. -// This is called in connection.c and the return code isn't checked -void CC_set_locale_encoding(ConnectionClass *self, const char *encoding) { - if (self == NULL) - return; - - // Set encoding - char *prev_encoding = self->locale_encoding; - self->locale_encoding = (encoding == NULL) ? NULL : strdup(encoding); - if (prev_encoding) - free(prev_encoding); -} - -// TODO #36 - Add return code - see above function comment -void CC_determine_locale_encoding(ConnectionClass *self) { - // Don't update if it's already set - if ((self == NULL) || (self->locale_encoding != NULL)) - return; - - // Get current db encoding and derive the locale encoding - // TODO #34 - Investigate locale - CC_set_locale_encoding(self, "SQL_ASCII"); -} - -int CC_send_client_encoding(ConnectionClass *self, const char *encoding) { - if ((self == NULL) || (encoding == NULL)) - return SQL_ERROR; - - // Update client encoding - std::string des_db_encoding(encoding); - std::string cur_db_encoding = OpenSearchGetClientEncoding(self->opensearchconn); - if (des_db_encoding != cur_db_encoding) { - if (!OpenSearchSetClientEncoding(self->opensearchconn, des_db_encoding)) { - return SQL_ERROR; - } - } - - // Update connection class to reflect updated client encoding - char *prev_encoding = self->original_client_encoding; - self->original_client_encoding = strdup(des_db_encoding.c_str()); - self->ccsc = static_cast< short >(opensearch_CS_code(des_db_encoding.c_str())); - self->mb_maxbyte_per_char = static_cast< short >(opensearch_mb_maxlen(self->ccsc)); - if (prev_encoding != NULL) - free(prev_encoding); - - return SQL_SUCCESS; -} - -void CC_initialize_opensearch_version(ConnectionClass *self) { - STRCPY_FIXED(self->opensearch_version, "7.4"); - self->opensearch_version_major = 7; - self->opensearch_version_minor = 4; -} - -void LIBOPENSEARCH_disconnect(void *conn) { - OpenSearchDisconnect(conn); -} diff --git a/sql-odbc/src/sqlodbc/opensearch_connection.h b/sql-odbc/src/sqlodbc/opensearch_connection.h deleted file mode 100644 index bc4a699c97..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_connection.h +++ /dev/null @@ -1,456 +0,0 @@ -#ifndef __OPENSEARCHCONNECTION_H__ -#define __OPENSEARCHCONNECTION_H__ - -#include -#include -#include - -#include "descriptor.h" -#include "opensearch_helper.h" -#include "opensearch_odbc.h" -#include "opensearch_utility.h" - -#ifdef __cplusplus -extern "C" { -#endif -typedef enum { - CONN_NOT_CONNECTED, /* Connection has not been established */ - CONN_CONNECTED, /* Connection is up and has been established */ - CONN_DOWN, /* Connection is broken */ - CONN_EXECUTING /* the connection is currently executing a - * statement */ -} CONN_Status; - -/* These errors have general sql error state */ -#define CONNECTION_SERVER_NOT_REACHED 101 -#define CONNECTION_MSG_TOO_LONG 103 -#define CONNECTION_COULD_NOT_SEND 104 -#define CONNECTION_NO_SUCH_DATABASE 105 -#define CONNECTION_BACKEND_CRAZY 106 -#define CONNECTION_NO_RESPONSE 107 -#define CONNECTION_SERVER_REPORTED_SEVERITY_FATAL 108 -#define CONNECTION_COULD_NOT_RECEIVE 109 -#define CONNECTION_SERVER_REPORTED_SEVERITY_ERROR 110 -#define CONNECTION_NEED_PASSWORD 112 -#define CONNECTION_COMMUNICATION_ERROR 113 - -#define CONN_ERROR_IGNORED (-3) -#define CONN_TRUNCATED (-2) -#define CONN_OPTION_VALUE_CHANGED (-1) -/* These errors correspond to specific SQL states */ -#define CONN_INIREAD_ERROR 201 -#define CONN_OPENDB_ERROR 202 -#define CONN_STMT_ALLOC_ERROR 203 -#define CONN_IN_USE 204 -#define CONN_UNSUPPORTED_OPTION 205 -/* Used by SetConnectoption to indicate unsupported options */ -#define CONN_INVALID_ARGUMENT_NO 206 -/* SetConnectOption: corresponds to ODBC--"S1009" */ -#define CONN_TRANSACT_IN_PROGRES 207 -#define CONN_NO_MEMORY_ERROR 208 -#define CONN_NOT_IMPLEMENTED_ERROR 209 -#define CONN_INVALID_AUTHENTICATION 210 -#define CONN_AUTH_TYPE_UNSUPPORTED 211 -#define CONN_UNABLE_TO_LOAD_DLL 212 -#define CONN_ILLEGAL_TRANSACT_STATE 213 -#define CONN_VALUE_OUT_OF_RANGE 214 - -#define CONN_OPTION_NOT_FOR_THE_DRIVER 216 -#define CONN_EXEC_ERROR 217 - -/* Conn_status defines */ -#define CONN_IN_AUTOCOMMIT 1L -#define CONN_IN_TRANSACTION (1L << 1) -#define CONN_IN_MANUAL_TRANSACTION (1L << 2) -#define CONN_IN_ERROR_BEFORE_IDLE (1L << 3) - -/* not connected yet || already disconnected */ -#define CC_not_connected(x) \ - (!(x) || CONN_DOWN == (x)->status || CONN_NOT_CONNECTED == (x)->status) - -/* AutoCommit functions */ -#define CC_is_in_autocommit(x) (x->transact_status & CONN_IN_AUTOCOMMIT) -#define CC_does_autocommit(x) \ - (CONN_IN_AUTOCOMMIT \ - == ((x)->transact_status \ - & (CONN_IN_AUTOCOMMIT | CONN_IN_MANUAL_TRANSACTION))) -#define CC_loves_visible_trans(x) \ - ((0 == ((x)->transact_status & CONN_IN_AUTOCOMMIT)) \ - || (0 != ((x)->transact_status & CONN_IN_MANUAL_TRANSACTION))) - -/* Transaction in/not functions */ -#define CC_set_in_trans(x) (x->transact_status |= CONN_IN_TRANSACTION) -#define CC_set_no_trans(x) \ - (x->transact_status &= ~(CONN_IN_TRANSACTION | CONN_IN_ERROR_BEFORE_IDLE)) -#define CC_is_in_trans(x) (0 != (x->transact_status & CONN_IN_TRANSACTION)) - -/* Manual transaction in/not functions */ -#define CC_set_in_manual_trans(x) \ - (x->transact_status |= CONN_IN_MANUAL_TRANSACTION) -#define CC_set_no_manual_trans(x) \ - (x->transact_status &= ~CONN_IN_MANUAL_TRANSACTION) -#define CC_is_in_manual_trans(x) \ - (0 != (x->transact_status & CONN_IN_MANUAL_TRANSACTION)) - -/* Error waiting for ROLLBACK */ -#define CC_set_in_error_trans(x) \ - (x->transact_status |= CONN_IN_ERROR_BEFORE_IDLE) -#define CC_set_no_error_trans(x) \ - (x->transact_status &= ~CONN_IN_ERROR_BEFORE_IDLE) -#define CC_is_in_error_trans(x) (x->transact_status & CONN_IN_ERROR_BEFORE_IDLE) - -#define CC_get_errornumber(x) (x->__error_number) -#define CC_get_errormsg(x) (x->__error_message) -#define CC_set_errornumber(x, n) (x->__error_number = n) - -/* Unicode handling */ -#define CONN_UNICODE_DRIVER (1L) -#define CONN_ANSI_APP (1L << 1) -#define CONN_DISALLOW_WCHAR (1L << 2) -#define CC_set_in_unicode_driver(x) (x->unicode |= CONN_UNICODE_DRIVER) -#define CC_set_in_ansi_app(x) (x->unicode |= CONN_ANSI_APP) -#define CC_is_in_unicode_driver(x) (0 != (x->unicode & CONN_UNICODE_DRIVER)) -#define CC_is_in_ansi_app(x) (0 != (x->unicode & CONN_ANSI_APP)) -#define CC_is_in_global_trans(x) (NULL != (x)->asdum) -#define ALLOW_WCHAR(x) \ - (0 != (x->unicode & CONN_UNICODE_DRIVER) \ - && 0 == (x->unicode & CONN_DISALLOW_WCHAR)) - -#define CC_MALLOC_return_with_error(t, tp, s, x, m, ret) \ - do { \ - if (t = malloc(s), NULL == t) { \ - CC_set_error(x, CONN_NO_MEMORY_ERROR, m, ""); \ - return ret; \ - } \ - } while (0) -#define CC_REALLOC_return_with_error(t, tp, s, x, m, ret) \ - do { \ - tp *tmp; \ - if (tmp = (tp *)realloc(t, s), NULL == tmp) { \ - CC_set_error(x, CONN_NO_MEMORY_ERROR, m, ""); \ - return ret; \ - } \ - t = tmp; \ - } while (0) - -/* For Multi-thread */ -#define INIT_CONN_CS(x) XPlatformInitializeCriticalSection(&((x)->cs)) -#define INIT_CONNLOCK(x) XPlatformInitializeCriticalSection(&((x)->slock)) -#define ENTER_CONN_CS(x) XPlatformEnterCriticalSection(((x)->cs)) -#define CONNLOCK_ACQUIRE(x) XPlatformEnterCriticalSection(((x)->slock)) -#define LEAVE_CONN_CS(x) XPlatformLeaveCriticalSection(((x)->cs)) -#define CONNLOCK_RELEASE(x) XPlatformLeaveCriticalSection(((x)->slock)) -#define DELETE_CONN_CS(x) XPlatformDeleteCriticalSection(&((x)->cs)) -#define DELETE_CONNLOCK(x) XPlatformDeleteCriticalSection(&((x)->slock)) - -#define ENTER_INNER_CONN_CS(conn, entered) \ - do { \ - ENTER_CONN_CS(conn); \ - entered++; \ - } while (0) - -#define LEAVE_INNER_CONN_CS(entered, conn) \ - do { \ - if (entered > 0) { \ - LEAVE_CONN_CS(conn); \ - entered--; \ - } \ - } while (0) - -#define CLEANUP_FUNC_CONN_CS(entered, conn) \ - do { \ - while (entered > 0) { \ - LEAVE_CONN_CS(conn); \ - entered--; \ - } \ - } while (0) - -/* - * Macros to compare the server's version with a specified version - * 1st parameter: pointer to a ConnectionClass object - * 2nd parameter: major version number - * 3rd parameter: minor version number - */ -#define SERVER_VERSION_GT(conn, major, minor) \ - ((conn)->opensearch_version_major > major \ - || ((conn)->opensearch_version_major == major \ - && (conn)->opensearch_version_minor > minor)) -#define SERVER_VERSION_GE(conn, major, minor) \ - ((conn)->opensearch_version_major > major \ - || ((conn)->opensearch_version_major == major \ - && (conn)->opensearch_version_minor >= minor)) -#define SERVER_VERSION_EQ(conn, major, minor) \ - ((conn)->opensearch_version_major == major && (conn)->opensearch_version_minor == minor) -#define STRING_AFTER_DOT(string) (strchr(#string, '.') + 1) - -/* - * Simplified macros to compare the server's version with a - * specified version - * Note: Never pass a variable as the second parameter. - * It must be a decimal constant of the form %d.%d . - */ -#define OPENSEARCH_VERSION_GT(conn, ver) \ - (SERVER_VERSION_GT(conn, (int)ver, atoi(STRING_AFTER_DOT(ver)))) -#define OPENSEARCH_VERSION_GE(conn, ver) \ - (SERVER_VERSION_GE(conn, (int)ver, atoi(STRING_AFTER_DOT(ver)))) -#define OPENSEARCH_VERSION_EQ(conn, ver) \ - (SERVER_VERSION_EQ(conn, (int)ver, atoi(STRING_AFTER_DOT(ver)))) -#define OPENSEARCH_VERSION_LE(conn, ver) (!OPENSEARCH_VERSION_GT(conn, ver)) -#define OPENSEARCH_VERSION_LT(conn, ver) (!OPENSEARCH_VERSION_GE(conn, ver)) - -/* This is used to store cached table information in the connection */ -struct col_info { - Int2 refcnt; - QResultClass *result; - opensearchNAME schema_name; - opensearchNAME table_name; - OID table_oid; - int table_info; - time_t acc_time; -}; -enum { TBINFO_HASOIDS = 1L, TBINFO_HASSUBCLASS = (1L << 1) }; -#define free_col_info_contents(coli) \ - { \ - if (NULL != coli->result) \ - QR_Destructor(coli->result); \ - coli->result = NULL; \ - NULL_THE_NAME(coli->schema_name); \ - NULL_THE_NAME(coli->table_name); \ - coli->table_oid = 0; \ - coli->refcnt = 0; \ - coli->acc_time = 0; \ - } -#define col_info_initialize(coli) (memset(coli, 0, sizeof(COL_INFO))) - -/* Translation DLL entry points */ -#ifdef WIN32 -#define DLLHANDLE HINSTANCE -#else -#define WINAPI CALLBACK -#define DLLHANDLE void * -#define HINSTANCE void * -#endif - -typedef BOOL(WINAPI *DataSourceToDriverProc)(UDWORD, SWORD, PTR, SDWORD, PTR, - SDWORD, SDWORD *, UCHAR *, SWORD, - SWORD *); -typedef BOOL(WINAPI *DriverToDataSourceProc)(UDWORD, SWORD, PTR, SDWORD, PTR, - SDWORD, SDWORD *, UCHAR *, SWORD, - SWORD *); - -/******* The Connection handle ************/ -struct ConnectionClass_ { - HENV henv; /* environment this connection was - * created on */ - SQLUINTEGER login_timeout; - signed char autocommit_public; - StatementOptions stmtOptions; - ARDFields ardOptions; - APDFields apdOptions; - char *__error_message; - int __error_number; - char sqlstate[8]; - CONN_Status status; - ConnInfo connInfo; - StatementClass **stmts; - Int2 num_stmts; - Int2 ncursors; - void *opensearchconn; - Int4 lobj_type; - Int2 coli_allocated; - Int2 ntables; - COL_INFO **col_info; - long translation_option; - HINSTANCE translation_handle; - DataSourceToDriverProc DataSourceToDriver; - DriverToDataSourceProc DriverToDataSource; - char transact_status; /* Is a transaction is currently - * in progress */ - char cluster_name[MAX_INFO_STRING]; - char opensearch_version[MAX_INFO_STRING]; /* Version of OpenSearch driver - * we're connected to - - * DJP 25-1-2001 */ - Int2 opensearch_version_major; - Int2 opensearch_version_minor; - char ms_jet; - char unicode; - char result_uncommitted; - char lo_is_domain; - char current_schema_valid; /* is current_schema valid? TRUE when - * current_schema == NULL means it's - * really NULL, while FALSE means it's - * unknown */ - unsigned char on_commit_in_progress; - /* for per statement rollback */ - char internal_svp; /* is set? */ - char internal_op; /* operation being executed as to internal savepoint */ - unsigned char rbonerr; - unsigned char opt_in_progress; - unsigned char opt_previous; - - char *original_client_encoding; - char *locale_encoding; - char *server_encoding; - Int2 ccsc; - Int2 mb_maxbyte_per_char; - SQLUINTEGER isolation; /* isolation level initially unknown */ - SQLUINTEGER server_isolation; /* isolation at server initially unknown */ - char *current_schema; - StatementClass *unnamed_prepared_stmt; - Int2 max_identifier_length; - Int2 num_discardp; - char **discardp; - int num_descs; - SQLUINTEGER - default_isolation; /* server's default isolation initially unkown */ - DescriptorClass **descs; - opensearchNAME schemaIns; - opensearchNAME tableIns; - SQLULEN stmt_timeout_in_effect; - void *cs; - void *slock; -#ifdef _HANDLE_ENLIST_IN_DTC_ - UInt4 gTranInfo; - void *asdum; -#endif /* _HANDLE_ENLIST_IN_DTC_ */ -}; - -/* Accessor functions */ -#define CC_get_env(x) ((x)->henv) -#define CC_get_database(x) (x->connInfo.database) -#define CC_get_server(x) (x->connInfo.server) -#define CC_get_DSN(x) (x->connInfo.dsn) -#define CC_get_username(x) (x->connInfo.username) -#define CC_is_onlyread(x) (x->connInfo.onlyread[0] == '1') -#define CC_fake_mss(x) (/* 0 != (x)->ms_jet && */ 0 < (x)->connInfo.fake_mss) -#define CC_accessible_only(x) (0 < (x)->connInfo.accessible_only) -#define CC_default_is_c(x) \ - (CC_is_in_ansi_app(x) \ - || x->ms_jet /* not only */ || TRUE /* but for any other ? */) - -#ifdef _HANDLE_ENLIST_IN_DTC_ -enum { - DTC_IN_PROGRESS = 1L, - DTC_ENLISTED = (1L << 1), - DTC_REQUEST_EXECUTING = (1L << 2), - DTC_ISOLATED = (1L << 3), - DTC_PREPARE_REQUESTED = (1L << 4) -}; -#define CC_set_dtc_clear(x) ((x)->gTranInfo = 0) -#define CC_set_dtc_enlisted(x) \ - ((x)->gTranInfo |= (DTC_IN_PROGRESS | DTC_ENLISTED)) -#define CC_no_dtc_enlisted(x) ((x)->gTranInfo &= (~DTC_ENLISTED)) -#define CC_is_dtc_enlisted(x) (0 != ((x)->gTranInfo & DTC_ENLISTED)) -#define CC_set_dtc_executing(x) ((x)->gTranInfo |= DTC_REQUEST_EXECUTING) -#define CC_no_dtc_executing(x) ((x)->gTranInfo &= (~DTC_REQUEST_EXECUTING)) -#define CC_is_dtc_executing(x) (0 != ((x)->gTranInfo & DTC_REQUEST_EXECUTING)) -#define CC_set_dtc_prepareRequested(x) \ - ((x)->gTranInfo |= (DTC_PREPARE_REQUESTED)) -#define CC_no_dtc_prepareRequested(x) \ - ((x)->gTranInfo &= (~DTC_PREPARE_REQUESTED)) -#define CC_is_dtc_prepareRequested(x) \ - (0 != ((x)->gTranInfo & DTC_PREPARE_REQUESTED)) -#define CC_is_dtc_executing(x) (0 != ((x)->gTranInfo & DTC_REQUEST_EXECUTING)) -#define CC_set_dtc_isolated(x) ((x)->gTranInfo |= DTC_ISOLATED) -#define CC_is_idle_in_global_transaction(x) \ - (0 != ((x)->gTranInfo & DTC_PREPARE_REQUESTED) \ - || (x)->gTranInfo == DTC_IN_PROGRESS) -#endif /* _HANDLE_ENLIST_IN_DTC_ */ -/* statement callback */ -#define CC_start_stmt(a) ((a)->rbonerr = 0) -#define CC_start_tc_stmt(a) ((a)->rbonerr = (1L << 1)) -#define CC_is_tc_stmt(a) (((a)->rbonerr & (1L << 1)) != 0) -#define CC_start_rb_stmt(a) ((a)->rbonerr = (1L << 2)) -#define CC_is_rb_stmt(a) (((a)->rbonerr & (1L << 2)) != 0) -#define CC_set_accessed_db(a) ((a)->rbonerr |= (1L << 3)) -#define CC_accessed_db(a) (((a)->rbonerr & (1L << 3)) != 0) -#define CC_start_rbpoint(a) ((a)->rbonerr |= (1L << 4), (a)->internal_svp = 1) -#define CC_started_rbpoint(a) (((a)->rbonerr & (1L << 4)) != 0) - -/* prototypes */ -ConnectionClass *CC_Constructor(void); -char CC_Destructor(ConnectionClass *self); -RETCODE CC_cleanup(ConnectionClass *self, BOOL keepCommunication); -BOOL CC_set_autocommit(ConnectionClass *self, BOOL on); -char CC_add_statement(ConnectionClass *self, StatementClass *stmt); -char CC_remove_statement(ConnectionClass *self, StatementClass *stmt); -char CC_add_descriptor(ConnectionClass *self, DescriptorClass *desc); -void CC_set_error(ConnectionClass *self, int number, const char *message, - const char *func); -void CC_set_errormsg(ConnectionClass *self, const char *message); -int CC_get_error(ConnectionClass *self, int *number, char **message); -void CC_clear_error(ConnectionClass *self); -void CC_log_error(const char *func, const char *desc, - const ConnectionClass *self); - -int CC_get_max_idlen(ConnectionClass *self); -char CC_get_escape(const ConnectionClass *self); -char *identifierEscape(const SQLCHAR *src, SQLLEN srclen, - const ConnectionClass *conn, char *buf, size_t bufsize, - BOOL double_quote); -int findIdentifier(const UCHAR *str, int ccsc, const UCHAR **next_token); -int eatTableIdentifiers(const UCHAR *str, int ccsc, opensearchNAME *table, - opensearchNAME *schema); - -char CC_connect(ConnectionClass *self); -int LIBOPENSEARCH_connect(ConnectionClass *self); -void LIBOPENSEARCH_disconnect(void *conn); -int CC_send_client_encoding(ConnectionClass *self, const char *encoding); -void CC_set_locale_encoding(ConnectionClass *self, const char *encoding); -void CC_initialize_opensearch_version(ConnectionClass *self); - -const char *CurrCat(const ConnectionClass *self); -const char *CurrCatString(const ConnectionClass *self); -SQLUINTEGER CC_get_isolation(ConnectionClass *self); - -SQLCHAR *make_lstring_ifneeded(ConnectionClass *, const SQLCHAR *s, ssize_t len, - BOOL); - -#define TABLE_IS_VALID(tbname, tblen) \ - ((tbname) && (tblen > 0 || SQL_NTS == tblen)) - -/* CC_send_query options */ -enum { - IGNORE_ABORT_ON_CONN = 1L /* not set the error result even when */ - , - CREATE_KEYSET = (1L << 1) /* create keyset for updatable cursors */ - , - GO_INTO_TRANSACTION = (1L << 2) /* issue BEGIN in advance */ - , - ROLLBACK_ON_ERROR = (1L << 3) /* rollback the query when an error occurs */ - , - END_WITH_COMMIT = (1L << 4) /* the query ends with COMMIT command */ - , - READ_ONLY_QUERY = (1L << 5) /* the query is read-only */ -}; -/* CC_on_abort options */ -#define NO_TRANS 1L -#define CONN_DEAD (1L << 1) /* connection is no longer valid */ - -/* - * internal savepoint related - */ - -#define _RELEASE_INTERNAL_SAVEPOINT - -/* Internal rollback */ -enum { PER_STATEMENT_ROLLBACK = 1, PER_QUERY_ROLLBACK }; - -/* Commands generated */ -enum { INTERNAL_SAVEPOINT_OPERATION = 1, INTERNAL_ROLLBACK_OPERATION }; - -/* Operations in progress */ -enum { SAVEPOINT_IN_PROGRESS = 1, PREPEND_IN_PROGRESS }; -/* StatementSvp entry option */ -enum { SVPOPT_RDONLY = 1L, SVPOPT_REDUCE_ROUNDTRIP = (1L << 1) }; -#define INIT_SVPOPT (SVPOPT_RDONLY) -#define CC_svp_init(a) \ - ((a)->internal_svp = (a)->internal_op = 0, \ - (a)->opt_in_progress = (a)->opt_previous = INIT_SVPOPT) -#define CC_init_opt_in_progress(a) ((a)->opt_in_progress = INIT_SVPOPT) -#define CC_init_opt_previous(a) ((a)->opt_previous = INIT_SVPOPT) - -#ifdef __cplusplus -} -#endif -#endif /* __OPENSEARCHCONNECTION_H__ */ diff --git a/sql-odbc/src/sqlodbc/opensearch_driver_connect.cpp b/sql-odbc/src/sqlodbc/opensearch_driver_connect.cpp deleted file mode 100644 index 29e3f3c012..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_driver_connect.cpp +++ /dev/null @@ -1,246 +0,0 @@ -#include "opensearch_driver_connect.h" - -#include -#include - -#include "opensearch_odbc.h" -#include "misc.h" - -#ifndef WIN32 -#include -#include -#else -#include -#endif - -#include - -#ifdef WIN32 -#include - -#include "resource.h" -#endif -#include -#include - -#include "dlg_specific.h" -#include "drvconn.h" -#include "opensearch_apifunc.h" - -static RETCODE CheckDriverComplete(const SQLUSMALLINT driver_completion, - const HWND hwnd, ConnInfo *ci, - const int reqs) { - (void)(ci); - (void)(reqs); - if (hwnd == NULL) - return SQL_SUCCESS; - switch (driver_completion) { -#ifdef WIN32 - case SQL_DRIVER_COMPLETE_REQUIRED: - case SQL_DRIVER_COMPLETE: - if (!paramRequired(ci, reqs)) - break; - case SQL_DRIVER_PROMPT: { - const RETCODE dialog_result = dconn_DoDialog(hwnd, ci); - if (dialog_result != SQL_SUCCESS) - return dialog_result; - break; - } -#endif // WIN32 - default: - break; - } - return SQL_SUCCESS; -} - -static RETCODE GetRequirementsAndConnect(const SQLUSMALLINT driver_completion, - const HWND hwnd, ConnInfo *ci, - int &reqs, ConnectionClass *conn, - int &ret_val) { - const RETCODE res = CheckDriverComplete(driver_completion, hwnd, ci, reqs); - if (res != SQL_SUCCESS) - return res; - - // Password is not a required parameter unless authentication asks for it. - // Let the application ask over and over until a password is entered (the - // user can always hit Cancel to get out) - if (paramRequired(ci, reqs)) { - CC_set_error(conn, CONN_OPENDB_ERROR, "Please supply password", - "OPENSEARCHAPI_DriverConnect->GetRequirements"); - return SQL_ERROR; - } - ret_val = CC_connect(conn); - return SQL_SUCCESS; -} - -static RETCODE CreateOutputConnectionString(ssize_t &len, ConnectionClass *conn, - const ConnInfo *ci, - const SQLSMALLINT conn_str_out_len, - SQLCHAR *conn_str_out, - const int retval) { - // Create the output connection string - SQLSMALLINT len_str_out = conn_str_out_len; - if (conn->ms_jet && len_str_out > 255) - len_str_out = 255; - char conn_str[MAX_CONNECT_STRING]; - makeConnectString(conn_str, ci, len_str_out); - - // Set result and check connection string - RETCODE result = ((retval == 1) ? SQL_SUCCESS : SQL_SUCCESS_WITH_INFO); - len = strlen(conn_str); - if (conn_str_out) { - // Return the completed string to the caller. The correct method is to - // only construct the connect string if a dialog was put up, otherwise, - // it should just copy the connection input string to the output. - // However, it seems ok to just always construct an output string. - // There are possible bad side effects on working applications (Access) - // by implementing the correct behavior - strncpy((char *)conn_str_out, conn_str, conn_str_out_len); - if (len >= conn_str_out_len) { - for (int clen = conn_str_out_len - 1; - clen >= 0 && conn_str_out[clen] != ';'; clen--) - conn_str_out[clen] = '\0'; - result = SQL_SUCCESS_WITH_INFO; - CC_set_error(conn, CONN_TRUNCATED, - "Buffer is too small for output conn str.", - "CreateOutputConnectionString"); - } - } - return result; -} - -static std::string CheckRetVal(const int retval, const HWND hwnd, - const SQLUSMALLINT driver_completion, - const int reqs, const ConnInfo *ci) { - (void)(ci); - (void)(reqs); - (void)(hwnd); - if (retval > 0) - return ""; - // Error attempting to connect - else if (retval == 0) - return "Error from CC_Connect"; - // More info is required - else if (retval < 0) { - // Not allowed to prompt, but PW is required - Error - if (driver_completion == SQL_DRIVER_NOPROMPT) { - return "Need password but Driver_NoPrompt is set"; - } else { -#ifdef WIN32 - if (!(hwnd && paramRequired(ci, reqs))) - return "Unable to prompt for required parameter"; -#else - return "Driver prompt only supported on Windows"; -#endif - } - } - return ""; -} - -static SQLRETURN SetupConnString(const SQLCHAR *conn_str_in, - const SQLSMALLINT conn_str_in_len, - ConnInfo *ci, ConnectionClass *conn) { - CSTR func = "SetupConnString"; - - // make_string uses malloc, need to overwrite delete operator to use free - // for unique_ptr - struct free_delete { - void operator()(void *x) { - if (x != NULL) { - free(x); - x = NULL; - } - } - }; - - // Make connection string and get DSN - std::unique_ptr< char, free_delete > conn_str( - make_string(conn_str_in, conn_str_in_len, NULL, 0)); - - if (!dconn_get_DSN_or_Driver(conn_str.get(), ci)) { - CC_set_error(conn, CONN_OPENDB_ERROR, "Connection string parse error", - func); - return SQL_ERROR; - } - - //This will be used to restore the log output dir fetched from connection string - //Since getDSNinfo overrides all available connection attributes - std::string conn_string_log_dir(ci->drivers.output_dir); - - // If the ConnInfo in the hdbc is missing anything, this function will fill - // them in from the registry (assuming of course there is a DSN given -- if - // not, it does nothing!) - getDSNinfo(ci, NULL); - - // Parse the connect string and fill in conninfo - if (!dconn_get_connect_attributes(conn_str.get(), ci)) { - CC_set_error(conn, CONN_OPENDB_ERROR, "Connection string parse error", - func); - return SQL_ERROR; - } - logs_on_off(1, ci->drivers.loglevel, ci->drivers.loglevel); - - //Sets log output dir to path retrived from connection string - //If connection string doesn't have log path then takes value from DSN - //If connection string & DSN both doesn't include log path then takes default value - if (!conn_string_log_dir.empty()) { - setLogDir(conn_string_log_dir.c_str()); - conn_string_log_dir.clear(); - } else { - setLogDir(ci->drivers.output_dir); - } - InitializeLogging(); - return SQL_SUCCESS; -} - -RETCODE OPENSEARCHAPI_DriverConnect(HDBC hdbc, HWND hwnd, SQLCHAR *conn_str_in, - SQLSMALLINT conn_str_in_len, SQLCHAR *conn_str_out, - SQLSMALLINT conn_str_out_len, - SQLSMALLINT *pcb_conn_str_out, - SQLUSMALLINT driver_completion) { - CSTR func = "OPENSEARCHAPI_DriverConnect"; - ConnectionClass *conn = (ConnectionClass *)hdbc; - - if (!conn) { - CC_log_error(func, "ConnectionClass handle is NULL", NULL); - return SQL_INVALID_HANDLE; - } - ConnInfo *ci = &(conn->connInfo); - - // Setup connection string - { - const SQLRETURN return_code = - SetupConnString(conn_str_in, conn_str_in_len, ci, conn); - if (return_code != SQL_SUCCESS) - return return_code; - } - - // Initialize opensearch_version - CC_initialize_opensearch_version(conn); - - int reqs = 0; - int retval = 0; - do { - const SQLRETURN return_code = GetRequirementsAndConnect( - driver_completion, hwnd, ci, reqs, conn, retval); - if (return_code != SQL_SUCCESS) - return return_code; - - // Check for errors - const std::string error_msg = - CheckRetVal(retval, hwnd, driver_completion, reqs, ci); - - // If we have an error, log it and exit - if (error_msg != "") { - CC_log_error(func, error_msg.c_str(), conn); - return SQL_ERROR; - } - } while (retval <= 0); - - ssize_t len = 0; - const RETCODE result = CreateOutputConnectionString( - len, conn, ci, conn_str_out_len, conn_str_out, retval); - if (pcb_conn_str_out) - *pcb_conn_str_out = static_cast< SQLSMALLINT >(len); - return result; -} diff --git a/sql-odbc/src/sqlodbc/opensearch_driver_connect.h b/sql-odbc/src/sqlodbc/opensearch_driver_connect.h deleted file mode 100644 index f5683e057b..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_driver_connect.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef __OPENSEARCH_DRIVER_CONNECT_H__ -#define __OPENSEARCH_DRIVER_CONNECT_H__ -#include "opensearch_connection.h" - -// C Interface -#ifdef __cplusplus -extern "C" { -#endif -RETCODE OPENSEARCHAPI_DriverConnect(HDBC hdbc, HWND hwnd, SQLCHAR *conn_str_in, - SQLSMALLINT conn_str_in_len, SQLCHAR *conn_str_out, - SQLSMALLINT conn_str_out_len, - SQLSMALLINT *pcb_conn_str_out, - SQLUSMALLINT driver_completion); -#ifdef __cplusplus -} -#endif - -#endif /* __OPENSEARCH_DRIVER_CONNECT_H__ */ diff --git a/sql-odbc/src/sqlodbc/opensearch_helper.cpp b/sql-odbc/src/sqlodbc/opensearch_helper.cpp deleted file mode 100644 index 9b50aff6cc..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_helper.cpp +++ /dev/null @@ -1,217 +0,0 @@ -#include "opensearch_helper.h" - -#include -#include -#include - -#include "opensearch_communication.h" - -void* OpenSearchConnectDBParams(runtime_options& rt_opts, int expand_dbname, - unsigned int option_count) { - // Initialize Connection - OpenSearchCommunication* conn = static_cast< OpenSearchCommunication* >(InitializeOpenSearchConn()); - if (!conn) - return NULL; - - // Set user defined connection options - if (!conn->ConnectionOptions(rt_opts, true, expand_dbname, option_count)) - return conn; - - // Set user derived connection options - if (!conn->ConnectionOptions2()) - return conn; - - // Connect to DB - if (!conn->ConnectDBStart()) - return conn; - - // Technically this is always the result, so we could remove the above or - // make 1 large if statement, but I think this is more legible - return conn; -} - -ConnStatusType OpenSearchStatus(void* opensearch_conn) { - return opensearch_conn - ? static_cast< OpenSearchCommunication* >(opensearch_conn)->GetConnectionStatus() - : ConnStatusType::CONNECTION_BAD; -} - -std::string GetErrorMsg(void* opensearch_conn) { - return opensearch_conn - ? static_cast< OpenSearchCommunication* >(opensearch_conn)->GetErrorMessage() - : NULL; -} - -ConnErrorType GetErrorType(void* opensearch_conn) { - return opensearch_conn - ? static_cast< OpenSearchCommunication* >(opensearch_conn)->GetErrorType() - : ConnErrorType::CONN_ERROR_SUCCESS; -} - -std::string GetServerVersion(void* opensearch_conn) { - return opensearch_conn - ? static_cast< OpenSearchCommunication* >(opensearch_conn)->GetServerVersion() - : ""; -} - -std::string GetClusterName(void* opensearch_conn) { - return opensearch_conn - ? static_cast< OpenSearchCommunication* >(opensearch_conn)->GetClusterName() - : ""; -} - -void* InitializeOpenSearchConn() { - return new OpenSearchCommunication(); -} - -int OpenSearchExecDirect(void* opensearch_conn, const char* statement, const char* fetch_size) { - return (opensearch_conn && statement) - ? static_cast< OpenSearchCommunication* >(opensearch_conn)->ExecDirect( - statement, fetch_size) - : -1; -} - -void OpenSearchSendCursorQueries(void* opensearch_conn, const char* cursor) { - static_cast< OpenSearchCommunication* >(opensearch_conn)->SendCursorQueries(cursor); -} - -OpenSearchResult* OpenSearchGetResult(void* opensearch_conn) { - return opensearch_conn - ? static_cast< OpenSearchCommunication* >(opensearch_conn)->PopResult() - : NULL; -} - -std::string OpenSearchGetClientEncoding(void* opensearch_conn) { - return opensearch_conn - ? static_cast< OpenSearchCommunication* >(opensearch_conn)->GetClientEncoding() - : ""; -} - -bool OpenSearchSetClientEncoding(void* opensearch_conn, std::string& encoding) { - return opensearch_conn - ? static_cast< OpenSearchCommunication* >(opensearch_conn)->SetClientEncoding( - encoding) - : false; -} - -void OpenSearchDisconnect(void* opensearch_conn) { - delete static_cast< OpenSearchCommunication* >(opensearch_conn); -} - -void OpenSearchClearResult(OpenSearchResult* opensearch_result) { - delete opensearch_result; -} - -void OpenSearchStopRetrieval(void* opensearch_conn) { - static_cast< OpenSearchCommunication* >(opensearch_conn)->StopResultRetrieval(); -} - -std::vector< std::string > OpenSearchGetColumnsWithSelectQuery( - void* opensearch_conn, const std::string table_name) { - return static_cast< OpenSearchCommunication* >(opensearch_conn)->GetColumnsWithSelectQuery( - table_name); -} - -// This class provides a cross platform way of entering critical sections -class CriticalSectionHelper { - public: - // Don't need to initialize lock owner because default constructor sets it - // to thread id 0, which is invalid - CriticalSectionHelper() : m_lock_count(0) { - } - ~CriticalSectionHelper() { - } - - void EnterCritical() { - // Get current thread id, if it's the lock owner, increment lock count, - // otherwise lock and take ownership - std::thread::id current_thread = std::this_thread::get_id(); - if (m_lock_owner == current_thread) { - m_lock_count++; - } else { - m_lock.lock(); - m_lock_owner = current_thread; - m_lock_count = 1; - } - } - - void ExitCritical() { - // Get current thread id, if it's the owner, decerement and unlock if - // the lock count is 0. Otherwise, log critical warning because we - // should only allow the lock owner to unlock - std::thread::id current_thread = std::this_thread::get_id(); - if (m_lock_owner == current_thread) { - if (m_lock_count == 0) { -// This should never happen. Log critical warning -#ifdef WIN32 -#pragma warning(push) -#pragma warning(disable : 4551) // MYLOG complains 'function call missing - // argument list' on Windows, which is isn't -#endif - MYLOG(OPENSEARCH_ERROR, "%s\n", - "CRITICAL WARNING: ExitCritical section called when lock " - "count was already 0!"); -#ifdef WIN32 -#pragma warning(pop) -#endif - } else if (--m_lock_count == 0) { - // Reset lock owner to invalid thread id (0) - m_lock_owner = std::thread::id(); - m_lock.unlock(); - } - } else { -// This should never happen. Log critical warning -#ifdef WIN32 -#pragma warning(push) -#pragma warning(disable : 4551) // MYLOG complains 'function call missing - // argument list' on Windows, which is isn't -#endif - MYLOG(OPENSEARCH_ERROR, "%s\n", - "CRITICAL WARNING: ExitCritical section called by thread " - "that does not own the lock!"); -#ifdef WIN32 -#pragma warning(pop) -#endif - } - } - - private: - size_t m_lock_count; - std::atomic< std::thread::id > m_lock_owner; - std::mutex m_lock; -}; - -// Initialize pointer to point to our helper class -void XPlatformInitializeCriticalSection(void** critical_section_helper) { - if (critical_section_helper != NULL) { - try { - *critical_section_helper = new CriticalSectionHelper(); - } catch (...) { - *critical_section_helper = NULL; - } - } -} - -// Call enter critical section -void XPlatformEnterCriticalSection(void* critical_section_helper) { - if (critical_section_helper != NULL) { - static_cast< CriticalSectionHelper* >(critical_section_helper) - ->EnterCritical(); - } -} - -// Call exit critical section -void XPlatformLeaveCriticalSection(void* critical_section_helper) { - if (critical_section_helper != NULL) { - static_cast< CriticalSectionHelper* >(critical_section_helper) - ->ExitCritical(); - } -} - -// Delete our helper class -void XPlatformDeleteCriticalSection(void** critical_section_helper) { - if (critical_section_helper != NULL) { - delete static_cast< CriticalSectionHelper* >(*critical_section_helper); - *critical_section_helper = NULL; - } -} diff --git a/sql-odbc/src/sqlodbc/opensearch_helper.h b/sql-odbc/src/sqlodbc/opensearch_helper.h deleted file mode 100644 index 0433c0a7b8..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_helper.h +++ /dev/null @@ -1,39 +0,0 @@ -#ifndef __OPENSEARCH_HELPER_H__ -#define __OPENSEARCH_HELPER_H__ - -#include "opensearch_types.h" - -#ifdef __cplusplus -// C++ interface -std::string OpenSearchGetClientEncoding(void* opensearch_conn); -bool OpenSearchSetClientEncoding(void* opensearch_conn, std::string& encoding); -OpenSearchResult* OpenSearchGetResult(void* opensearch_conn); -void OpenSearchClearResult(OpenSearchResult* opensearch_result); -void* OpenSearchConnectDBParams(runtime_options& rt_opts, int expand_dbname, - unsigned int option_count); -std::string GetServerVersion(void* opensearch_conn); -std::string GetClusterName(void* opensearch_conn); -std::string GetErrorMsg(void* opensearch_conn); -ConnErrorType GetErrorType(void* opensearch_conn); -std::vector< std::string > OpenSearchGetColumnsWithSelectQuery( - void* opensearch_conn, const std::string table_name); - -// C Interface -extern "C" { -#endif -void XPlatformInitializeCriticalSection(void** critical_section_helper); -void XPlatformEnterCriticalSection(void* critical_section_helper); -void XPlatformLeaveCriticalSection(void* critical_section_helper); -void XPlatformDeleteCriticalSection(void** critical_section_helper); -ConnStatusType OpenSearchStatus(void* opensearch_conn); -int OpenSearchExecDirect(void* opensearch_conn, const char* statement, const char* fetch_size); -void OpenSearchSendCursorQueries(void* opensearch_conn, const char* cursor); -void OpenSearchDisconnect(void* opensearch_conn); -void OpenSearchStopRetrieval(void* opensearch_conn); -#ifdef __cplusplus -} -#endif - -void* InitializeOpenSearchConn(); - -#endif // __OPENSEARCH_HELPER_H__ diff --git a/sql-odbc/src/sqlodbc/opensearch_info.cpp b/sql-odbc/src/sqlodbc/opensearch_info.cpp deleted file mode 100644 index ab3700eb1b..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_info.cpp +++ /dev/null @@ -1,1046 +0,0 @@ -#include "opensearch_info.h" - -#include -#include - -#include -#include -#include -#include -#include -#include - -// TODO #324 (SQL Plugin)- Update if OpenSearch extends support for multiple -// tables -#define DEFAULT_TYPE_STR \ - { 'k', 'e', 'y', 'w', 'o', 'r', 'd', '\0' } -#define DEFAULT_TYPE_INT (SQL_WVARCHAR) -#define EMPTY_VARCHAR \ - { '\0' } -#define OPENSEARCH_UNINITIALIZED (-2) -#define COLUMN_TEMPLATE_COUNT 18 -#define TABLE_TEMPLATE_COUNT 5 - -#define TABLE_CAT "TABLE_CAT" -#define TABLE_SCHEM "TABLE_SCHEM" -#define TABLE_NAME "TABLE_NAME" -#define COLUMN_NAME "COLUMN_NAME" -#define DATA_TYPE "DATA_TYPE" -#define TYPE_NAME "TYPE_NAME" -#define COLUMN_SIZE "COLUMN_SIZE" -#define BUFFER_LENGTH "BUFFER_LENGTH" -#define DECIMAL_DIGITS "DECIMAL_DIGITS" -#define NUM_PREC_RADIX "NUM_PREC_RADIX" -#define NULLABLE "NULLABLE" -#define REMARKS "REMARKS" -#define COLUMN_DEF "COLUMN_DEF" -#define SQL_DATA_TYPE "SQL_DATA_TYPE" -#define SQL_DATETIME_SUB "SQL_DATETIME_SUB" -#define CHAR_OCTET_LENGTH "CHAR_OCTET_LENGTH" -#define ORDINAL_POSITION "ORDINAL_POSITION" -#define IS_NULLABLE "IS_NULLABLE" -#define TABLE_QUALIFIER "TABLE_QUALIFIER" -#define TABLE_OWNER "TABLE_OWNER" -#define TABLE_TYPE "TABLE_TYPE" -#define PRECISION "PRECISION" -#define LITERAL_PREFIX "LITERAL_PREFIX" -#define LITERAL_SUFFIX "LITERAL_SUFFIX" -#define CREATE_PARAMS "CREATE_PARAMS" -#define CASE_SENSITIVE "CASE_SENSITIVE" -#define SEARCHABLE "SEARCHABLE" -#define UNSIGNED_ATTRIBUTE "UNSIGNED_ATTRIBUTE" -#define FIXED_PREC_SCALE "FIXED_PREC_SCALE" -#define AUTO_INCREMENT "AUTO_INCREMENT" -#define LOCAL_TYPE_NAME "LOCAL_TYPE_NAME" -#define MINIMUM_SCALE "MINIMUM_SCALE" -#define MAXIMUM_SCALE "MAXIMUM_SCALE" -#define INTERVAL_PRECISION "INTERVAL_PRECISION" - -const std::unordered_map< int, std::vector< int > > sql_opensearch_type_map = { - {SQL_BIT, {OPENSEARCH_TYPE_BOOL}}, - {SQL_TINYINT, {OPENSEARCH_TYPE_INT1}}, - {SQL_SMALLINT, {OPENSEARCH_TYPE_INT2}}, - {SQL_INTEGER, {OPENSEARCH_TYPE_INT4}}, - {SQL_BIGINT, {OPENSEARCH_TYPE_INT8}}, - {SQL_REAL, {OPENSEARCH_TYPE_HALF_FLOAT, OPENSEARCH_TYPE_FLOAT4}}, - {SQL_DOUBLE, {OPENSEARCH_TYPE_FLOAT8, OPENSEARCH_TYPE_SCALED_FLOAT}}, - {SQL_WVARCHAR, - {OPENSEARCH_TYPE_KEYWORD, OPENSEARCH_TYPE_TEXT, OPENSEARCH_TYPE_NESTED, - OPENSEARCH_TYPE_OBJECT}}, - {SQL_TYPE_TIMESTAMP, {OPENSEARCH_TYPE_DATETIME, OPENSEARCH_TYPE_TIMESTAMP}}}; - -const std::unordered_map< std::string, int > data_name_data_type_map = { - {OPENSEARCH_TYPE_NAME_BOOLEAN, SQL_BIT}, - {OPENSEARCH_TYPE_NAME_BYTE, SQL_TINYINT}, - {OPENSEARCH_TYPE_NAME_SHORT, SQL_SMALLINT}, - {OPENSEARCH_TYPE_NAME_INTEGER, SQL_INTEGER}, - {OPENSEARCH_TYPE_NAME_LONG, SQL_BIGINT}, - {OPENSEARCH_TYPE_NAME_HALF_FLOAT, SQL_REAL}, - {OPENSEARCH_TYPE_NAME_FLOAT, SQL_REAL}, - {OPENSEARCH_TYPE_NAME_DOUBLE, SQL_DOUBLE}, - {OPENSEARCH_TYPE_NAME_SCALED_FLOAT, SQL_DOUBLE}, - {OPENSEARCH_TYPE_NAME_KEYWORD, SQL_WVARCHAR}, - {OPENSEARCH_TYPE_NAME_TEXT, SQL_WVARCHAR}, - {OPENSEARCH_TYPE_NAME_DATE, SQL_TYPE_TIMESTAMP}, - {OPENSEARCH_TYPE_NAME_TIMESTAMP, SQL_TYPE_TIMESTAMP}, - {OPENSEARCH_TYPE_NAME_OBJECT, SQL_WVARCHAR}, - {OPENSEARCH_TYPE_NAME_NESTED, SQL_WVARCHAR}}; - -// Boilerplate code for easy column bind handling -class BindTemplate { - public: - BindTemplate(const bool can_be_null, const SQLUSMALLINT ordinal) - : m_len(OPENSEARCH_UNINITIALIZED), m_ordinal(ordinal) { - if (!can_be_null) - throw std::runtime_error( - "Do not use this constructor for values that can be NULL. A " - "constructor with " - "supplied default value must be used if value can be NULL."); - } - BindTemplate(const bool can_be_null, const SQLUSMALLINT ordinal, const Int2) - : m_len(OPENSEARCH_UNINITIALIZED), m_ordinal(ordinal) { - (void)(can_be_null); - } - BindTemplate(const bool can_be_null, const SQLUSMALLINT ordinal, const Int4) - : m_len(OPENSEARCH_UNINITIALIZED), m_ordinal(ordinal) { - (void)(can_be_null); - } - BindTemplate(const bool can_be_null, const SQLUSMALLINT ordinal, - const std::vector< SQLCHAR > &) - : m_len(OPENSEARCH_UNINITIALIZED), m_ordinal(ordinal) { - (void)(can_be_null); - } - virtual ~BindTemplate() { - } - - SQLPOINTER GetData() { - if (m_len == OPENSEARCH_UNINITIALIZED) - throw std::runtime_error( - "Length is uninitialized - Fetch must be executed before data " - "is retreived."); - return (m_len == SQL_NULL_DATA) ? NULL : GetDataForBind(); - } - - void BindColumn(StatementClass *stmt) { - RETCODE err = OPENSEARCHAPI_BindCol(stmt, m_ordinal, GetType(), - GetDataForBind(), GetSize(), &m_len); - if (!SQL_SUCCEEDED(err)) { - std::string error_msg = - "Failed to bind column with ordinal " - + std::to_string(m_ordinal) - + ". SQL Error code: " + std::to_string(err); - throw std::runtime_error(error_msg.c_str()); - } - } - void AssignData(TupleField *tuple) { - SQLPOINTER data = GetData(); - if ((data == NULL) || (m_len == SQL_NULL_DATA)) { - set_tuplefield_null(tuple); - return; - } - switch (GetType()) { - case SQL_C_LONG: - set_tuplefield_int4(tuple, *static_cast< Int4 * >(data)); - break; - case SQL_C_SHORT: - set_tuplefield_int2(tuple, *static_cast< Int2 * >(data)); - break; - case SQL_C_CHAR: - set_tuplefield_string(tuple, static_cast< const char * >(data)); - break; - default: - throw std::runtime_error( - std::string( - "Cannot convert unknown data type to tuplefield: " - + std::to_string(GetType())) - .c_str()); - } - } - BindTemplate(const BindTemplate &) = default; - BindTemplate &operator=(const BindTemplate &) = default; - virtual std::string AsString() = 0; - virtual void UpdateData(SQLPOINTER new_data, size_t size) = 0; - - private: - SQLLEN m_len; - SQLUSMALLINT m_ordinal; - - protected: - virtual SQLSMALLINT GetType() = 0; - virtual SQLLEN GetSize() = 0; - virtual SQLPOINTER GetDataForBind() = 0; -}; - -// 4 byte integer column -class BindTemplateInt4 : public BindTemplate { - public: - BindTemplateInt4(const bool nullable, const SQLUSMALLINT ordinal) - : BindTemplate(nullable, ordinal), m_data(0) { - } - BindTemplateInt4(const bool nullable, const SQLUSMALLINT ordinal, - const Int4 data) - : BindTemplate(nullable, ordinal, data), m_data(data) { - } - ~BindTemplateInt4() { - } - std::string AsString() { - return std::to_string(*static_cast< Int4 * >(GetData())); - } - void UpdateData(SQLPOINTER new_data, size_t size) { - (void)size; - m_data = *(Int4 *)new_data; - } - - private: - Int4 m_data; - - protected: - SQLPOINTER GetDataForBind() { - return &m_data; - } - SQLSMALLINT GetType() { - return SQL_C_LONG; - } - SQLLEN GetSize() { - return static_cast< SQLLEN >(sizeof(Int4)); - } -}; - -// 2 byte integer column -class BindTemplateInt2 : public BindTemplate { - public: - BindTemplateInt2(const bool nullable, const SQLUSMALLINT ordinal) - : BindTemplate(nullable, ordinal), m_data(0) { - } - BindTemplateInt2(const bool nullable, const SQLUSMALLINT ordinal, - const Int2 data) - : BindTemplate(nullable, ordinal, data), m_data(data) { - } - ~BindTemplateInt2() { - } - std::string AsString() { - return std::to_string(*static_cast< Int2 * >(GetData())); - } - void UpdateData(SQLPOINTER new_data, size_t size) { - (void)size; - m_data = *(Int2 *)new_data; - } - - private: - Int2 m_data; - - protected: - SQLPOINTER GetDataForBind() { - return &m_data; - } - SQLSMALLINT GetType() { - return SQL_C_SHORT; - } - SQLLEN GetSize() { - return static_cast< SQLLEN >(sizeof(Int2)); - } -}; - -// Varchar data -class BindTemplateSQLCHAR : public BindTemplate { - public: - BindTemplateSQLCHAR(const bool nullable, const SQLUSMALLINT ordinal) - : BindTemplate(nullable, ordinal), m_data(MAX_INFO_STRING, '\0') { - } - BindTemplateSQLCHAR(const bool nullable, const SQLUSMALLINT ordinal, - const std::vector< SQLCHAR > &data) - : BindTemplate(nullable, ordinal, data), m_data(MAX_INFO_STRING, '\0') { - if (data.size() >= m_data.size()) { - throw std::runtime_error( - "Default data size exceeds max info string size."); - } else { - m_data.insert(m_data.begin(), data.begin(), data.end()); - } - } - ~BindTemplateSQLCHAR() { - } - std::string AsString() { - char *bind_tbl_data_char = static_cast< char * >(GetData()); - return (bind_tbl_data_char == NULL) ? "" : bind_tbl_data_char; - } - void UpdateData(SQLPOINTER new_data, size_t size) { - m_data.clear(); - SQLCHAR *data = reinterpret_cast< SQLCHAR * >(new_data); - for (size_t i = 0; i < size; i++) { - m_data.push_back(*data++); - } - m_data.push_back(0); - } - - private: - std::vector< SQLCHAR > m_data; - - protected: - SQLPOINTER GetDataForBind() { - return m_data.data(); - } - SQLSMALLINT GetType() { - return SQL_C_CHAR; - } - SQLLEN GetSize() { - return static_cast< SQLLEN >(m_data.size()); - } -}; - -// Typedefs and macros to ease creation of BindTemplates -typedef std::unique_ptr< BindTemplate > bind_ptr; -typedef std::vector< bind_ptr > bind_vector; -#define _SQLCHAR_(...) \ - (std::make_unique< BindTemplateSQLCHAR >(BindTemplateSQLCHAR(__VA_ARGS__))) -#define _SQLINT2_(...) \ - (std::make_unique< BindTemplateInt2 >(BindTemplateInt2(__VA_ARGS__))) -#define _SQLINT4_(...) \ - (std::make_unique< BindTemplateInt4 >(BindTemplateInt4(__VA_ARGS__))) - -// Common function definitions -enum class TableResultSet { Catalog, Schema, TableTypes, TableLookUp, All }; -void ConvertToString(std::string &out, bool &valid, const SQLCHAR *sql_char, - const SQLSMALLINT sz); -QResultClass *SetupQResult(const bind_vector &cols, StatementClass *stmt, - StatementClass *col_stmt, const int col_cnt); -void CleanUp(StatementClass *stmt, StatementClass *sub_stmt, const RETCODE ret); -void ExecuteQuery(ConnectionClass *conn, HSTMT *stmt, const std::string &query); -void GetCatalogData(const std::string &query, StatementClass *stmt, - StatementClass *sub_stmt, const TableResultSet res_type, - std::string &table_type, - void (*populate_binds)(bind_vector &), - void (*setup_qres_info)(QResultClass *, EnvironmentClass *), - std::vector< std::string > *list_of_columns = NULL); - -// Common function declarations -void ConvertToString(std::string &out, bool &valid, const SQLCHAR *sql_char, - const SQLSMALLINT sz) { - valid = (sql_char != NULL); - if (!valid) { - out = "%"; - } else if (sz == SQL_NTS) { - out.assign(reinterpret_cast< const char * >(sql_char)); - } else if (sz <= 0) { - out = ""; - } else { - out.assign(reinterpret_cast< const char * >(sql_char), - static_cast< size_t >(sz)); - } -} - -QResultClass *SetupQResult(const bind_vector &cols, StatementClass *stmt, - StatementClass *col_stmt, const int col_cnt) { - (void)(cols); - (void)(col_stmt); - - // Initialize memory for data retreival - QResultClass *res = NULL; - if ((res = QR_Constructor()) == NULL) { - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, - "Couldn't allocate memory for Tables or Columns result.", - "FetchResults"); - throw std::runtime_error( - "Couldn't allocate memory for Tables or Columns result."); - } - SC_set_Result(stmt, res); - - // The binding structure for a statement is not set up until a statement is - // actually executed, so we'll have to do this ourselves - extend_column_bindings(SC_get_ARDF(stmt), - static_cast< SQLSMALLINT >(col_cnt)); - QR_set_num_fields(res, col_cnt); - - return res; -} - -void CleanUp(StatementClass *stmt, StatementClass *sub_stmt, - const RETCODE ret = SQL_ERROR) { - stmt->status = STMT_FINISHED; - stmt->catalog_result = TRUE; - - if (!SQL_SUCCEEDED(ret) && 0 >= SC_get_errornumber(stmt)) - SC_error_copy(stmt, sub_stmt, TRUE); - - // set up the current tuple pointer for - stmt->currTuple = -1; - SC_set_rowset_start(stmt, -1, FALSE); - SC_set_current_col(stmt, -1); - - if (sub_stmt) - OPENSEARCHAPI_FreeStmt(sub_stmt, SQL_DROP); -} - -void ExecuteQuery(ConnectionClass *conn, HSTMT *stmt, - const std::string &query) { - // Prepare statement - if (!SQL_SUCCEEDED(OPENSEARCHAPI_AllocStmt(conn, stmt, 0))) { - throw std::runtime_error("Failed to allocate memory for statement."); - } - - // Execute query - if (!SQL_SUCCEEDED(OPENSEARCHAPI_ExecDirect( - *stmt, reinterpret_cast< const SQLCHAR * >(query.c_str()), SQL_NTS, - 1))) { - std::string error_msg = "Failed to execute query '" + query + "'."; - throw std::runtime_error(error_msg.c_str()); - } -} - -// Table specific function definitions -void split(const std::string &input, const std::string &delim, - std::vector< std::string > &output); -void GenerateTableQuery(std::string &tables_query, const UWORD flag, - const std::string &table_name_value, - const TableResultSet result_type, - const bool table_valid); -void AssignTableBindTemplates(bind_vector &tabs); -void SetupTableQResInfo(QResultClass *res, EnvironmentClass *env); -void SetTableTuples(QResultClass *res, const TableResultSet res_type, - const bind_vector &bind_tbl, std::string &table_type, - StatementClass *stmt, StatementClass *tbl_stmt, - std::vector< std::string > *list_of_columns = NULL); - -// Table specific function declarations -void split(const std::string &input, const std::string &delim, - std::vector< std::string > &output) { - size_t start = 0; - size_t end = input.find(delim); - while (end != std::string::npos) { - output.push_back(input.substr(start, end - start)); - start = end + delim.length(); - end = input.find(delim, start); - } - output.push_back(input.substr(start, end)); -} - -// TODO #324 (SQL Plugin)- Fix patterns and escape characters for this -void GenerateTableQuery(std::string &tables_query, const UWORD flag, - const std::string &table_name_value, - const TableResultSet result_type, - const bool table_valid) { - bool search_pattern = (~flag & PODBC_NOT_SEARCH_PATTERN); - tables_query = "SHOW TABLES LIKE "; - if (table_valid && (table_name_value != "") - && (result_type == TableResultSet::All)) - tables_query += - search_pattern ? table_name_value : "^" + table_name_value + "$"; - else - tables_query += "%"; -} - -// In case of unique_ptr's, using push_back (over emplace_back) is preferred in -// C++14 and higher -void AssignTableBindTemplates(bind_vector &tabs) { - tabs.reserve(TABLE_TEMPLATE_COUNT); - tabs.push_back(_SQLCHAR_(false, 1, EMPTY_VARCHAR)); // TABLE_CAT 1 - tabs.push_back(_SQLCHAR_(false, 2, EMPTY_VARCHAR)); // TABLE_SCHEM 2 - tabs.push_back(_SQLCHAR_(false, 3, EMPTY_VARCHAR)); // TABLE_NAME 3 - tabs.push_back(_SQLCHAR_(false, 4, EMPTY_VARCHAR)); // TABLE_TYPE 4 - tabs.push_back(_SQLCHAR_(true, 5)); // REMARKS 5 -} - -void SetupTableQResInfo(QResultClass *res, EnvironmentClass *env) { - if (EN_is_odbc3(env)) { - QR_set_field_info_v(res, TABLES_CATALOG_NAME, TABLE_CAT, - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, TABLES_SCHEMA_NAME, TABLE_SCHEM, - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - } else { - QR_set_field_info_v(res, TABLES_CATALOG_NAME, TABLE_QUALIFIER, - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, TABLES_SCHEMA_NAME, TABLE_OWNER, - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - } - QR_set_field_info_v(res, TABLES_TABLE_NAME, TABLE_NAME, - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, TABLES_TABLE_TYPE, TABLE_TYPE, - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, TABLES_REMARKS, REMARKS, OPENSEARCH_TYPE_VARCHAR, - INFO_VARCHAR_SIZE); -} - -void SetTableTuples(QResultClass *res, const TableResultSet res_type, - const bind_vector &bind_tbl, std::string &table_type, - StatementClass *stmt, StatementClass *tbl_stmt, - std::vector< std::string > *list_of_columns) { - auto CheckResult = [&](const auto &res) { - if (res != SQL_NO_DATA_FOUND) { - SC_full_error_copy(stmt, tbl_stmt, FALSE); - throw std::runtime_error( - std::string("Failed to fetch data after query. Error code :" - + std::to_string(res)) - .c_str()); - } - }; - auto AssignData = [&](auto *res, const auto &binds) { - TupleField *tuple = QR_AddNew(res); - // Since we do not support catalogs, we will return an empty string for - // catalog names. This is required for Excel for Mac, which uses this - // information for its Data Preview window. - std::string catalog(""); - bind_tbl[TABLES_CATALOG_NAME]->UpdateData((void *)catalog.c_str(), 0); - - // TODO #630 - Revisit logic of adding tuples for SQLTables & SQLColumns - for (size_t i = 0; i < binds.size(); i++) { - // Add tuples for SQLColumns - if (binds.size() > COLUMNS_SQL_DATA_TYPE) { - // Add data type for data loading issue in Power BI Desktop - auto data_type = data_name_data_type_map - .find(bind_tbl[COLUMNS_TYPE_NAME]->AsString())->second; - if (i == COLUMNS_DATA_TYPE) { - set_tuplefield_int2(&tuple[COLUMNS_DATA_TYPE], - static_cast< short >(data_type)); - } else if (i == COLUMNS_SQL_DATA_TYPE) { - set_tuplefield_int2(&tuple[COLUMNS_SQL_DATA_TYPE], - static_cast< short >(data_type)); - } else { - binds[i]->AssignData(&tuple[i]); - } - } - // Add tuples for SQLTables - else { - binds[i]->AssignData(&tuple[i]); - } - } - }; - - // General case - if (res_type == TableResultSet::All) { - RETCODE result = SQL_NO_DATA_FOUND; - int ordinal_position = 0; - while (SQL_SUCCEEDED(result = OPENSEARCHAPI_Fetch(tbl_stmt))) { - if (bind_tbl[TABLES_TABLE_TYPE]->AsString() == "BASE TABLE") { - std::string table("TABLE"); - bind_tbl[TABLES_TABLE_TYPE]->UpdateData((void *)table.c_str(), - table.length()); - } - if (list_of_columns != NULL && !list_of_columns->empty()) { - if (std::find(list_of_columns->begin(), list_of_columns->end(), - bind_tbl[COLUMNS_COLUMN_NAME]->AsString()) - != list_of_columns->end()) { - ordinal_position++; - bind_tbl[COLUMNS_ORDINAL_POSITION]->UpdateData( - &ordinal_position, 0); - AssignData(res, bind_tbl); - } - } else { - AssignData(res, bind_tbl); - } - } - CheckResult(result); - } else if (res_type == TableResultSet::TableLookUp) { - // Get accepted table types - std::vector< std::string > table_types; - table_type.erase( - std::remove(table_type.begin(), table_type.end(), '\''), - table_type.end()); - split(table_type, ",", table_types); - - // Loop through all data - RETCODE result = SQL_NO_DATA_FOUND; - while (SQL_SUCCEEDED(result = OPENSEARCHAPI_Fetch(tbl_stmt))) { - // Replace BASE TABLE with TABLE for Excel & Power BI SQLTables call - if (bind_tbl[TABLES_TABLE_TYPE]->AsString() == "BASE TABLE") { - std::string table("TABLE"); - bind_tbl[TABLES_TABLE_TYPE]->UpdateData((void *)table.c_str(), - table.length()); - } - if (std::find(table_types.begin(), table_types.end(), - bind_tbl[TABLES_TABLE_TYPE]->AsString()) - != table_types.end()) { - AssignData(res, bind_tbl); - } - } - - CheckResult(result); - - } - // Special cases - only need single grab for this one - else { - RETCODE result; - if (!SQL_SUCCEEDED(result = OPENSEARCHAPI_Fetch(tbl_stmt))) { - SC_full_error_copy(stmt, tbl_stmt, FALSE); - throw std::runtime_error( - std::string("Failed to fetch data after query. Error code :" - + std::to_string(result)) - .c_str()); - } - - // Get index of result type of interest - size_t idx = NUM_OF_TABLES_FIELDS; - switch (res_type) { - case TableResultSet::TableTypes: - idx = TABLES_TABLE_TYPE; - break; - default: - // This should not be possible, handle it anyway - throw std::runtime_error( - "Result type is not an expected type."); - } - - // Get new tuple and assign index of interest (NULL others) - // TODO #324 (SQL Plugin)- Should these be unique? - TupleField *tuple = QR_AddNew(res); - for (size_t i = 0; i < bind_tbl.size(); i++) { - if (i == idx) - bind_tbl[i]->AssignData(&tuple[i]); - else - set_tuplefield_string(&tuple[i], NULL_STRING); - } - } -} - -// Column specific function definitions -void SetupColumnQResInfo(QResultClass *res, EnvironmentClass *unused); -void GenerateColumnQuery(std::string &query, const std::string &table_name, - const std::string &column_name, const bool table_valid, - const bool column_valid, const UWORD flag); -void AssignColumnBindTemplates(bind_vector &cols); - -// Column Specific function declarations -void SetupColumnQResInfo(QResultClass *res, EnvironmentClass *unused) { - (void)(unused); - - QR_set_field_info_v(res, COLUMNS_CATALOG_NAME, TABLE_CAT, - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, COLUMNS_SCHEMA_NAME, TABLE_SCHEM, - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, COLUMNS_TABLE_NAME, TABLE_NAME, - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, COLUMNS_COLUMN_NAME, COLUMN_NAME, - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, COLUMNS_DATA_TYPE, DATA_TYPE, OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, COLUMNS_TYPE_NAME, TYPE_NAME, - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, COLUMNS_PRECISION, COLUMN_SIZE, - OPENSEARCH_TYPE_INT4, 4); - QR_set_field_info_v(res, COLUMNS_LENGTH, BUFFER_LENGTH, - OPENSEARCH_TYPE_INT4, 4); - QR_set_field_info_v(res, COLUMNS_SCALE, DECIMAL_DIGITS, - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, COLUMNS_RADIX, NUM_PREC_RADIX, - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, COLUMNS_NULLABLE, NULLABLE, OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, COLUMNS_REMARKS, REMARKS, OPENSEARCH_TYPE_VARCHAR, - INFO_VARCHAR_SIZE); - QR_set_field_info_v(res, COLUMNS_COLUMN_DEF, COLUMN_DEF, - OPENSEARCH_TYPE_VARCHAR, - INFO_VARCHAR_SIZE); - QR_set_field_info_v(res, COLUMNS_SQL_DATA_TYPE, SQL_DATA_TYPE, - OPENSEARCH_TYPE_INT2, - 2); - QR_set_field_info_v(res, COLUMNS_SQL_DATETIME_SUB, SQL_DATETIME_SUB, - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, COLUMNS_CHAR_OCTET_LENGTH, CHAR_OCTET_LENGTH, - OPENSEARCH_TYPE_INT4, 4); - QR_set_field_info_v(res, COLUMNS_ORDINAL_POSITION, ORDINAL_POSITION, - OPENSEARCH_TYPE_INT4, 4); - QR_set_field_info_v(res, COLUMNS_IS_NULLABLE, IS_NULLABLE, - OPENSEARCH_TYPE_VARCHAR, - INFO_VARCHAR_SIZE); -} - -// TODO #325 (SQL Plugin)- Fix patterns and escape characters for this -void GenerateColumnQuery(std::string &query, const std::string &table_name, - const std::string &column_name, const bool table_valid, - const bool column_valid, const UWORD flag) { - bool search_pattern = (~flag & PODBC_NOT_SEARCH_PATTERN); - query = "DESCRIBE TABLES LIKE "; - query += table_valid - ? (search_pattern ? table_name : "^" + table_name + "$") - : "%"; - if (column_valid) - query += " COLUMNS LIKE " + column_name; -} - -// In case of unique_ptr's, using push_back (over emplace_back) is preferred in -// C++14 and higher -void AssignColumnBindTemplates(bind_vector &cols) { - cols.reserve(COLUMN_TEMPLATE_COUNT); - cols.push_back(_SQLCHAR_(true, 1)); // TABLE_CAT 1 - cols.push_back(_SQLCHAR_(true, 2)); // TABLE_SCHEM 2 - cols.push_back(_SQLCHAR_(false, 3, EMPTY_VARCHAR)); // TABLE_NAME 3 - cols.push_back(_SQLCHAR_(false, 4, EMPTY_VARCHAR)); // COLUMN_NAME 4 - cols.push_back( - _SQLINT2_(false, 5, DEFAULT_TYPE_INT)); // DATA_TYPE 5 - cols.push_back( - _SQLCHAR_(false, 6, DEFAULT_TYPE_STR)); // TYPE_NAME 6 - cols.push_back(_SQLINT4_(true, 7)); // COLUMN_SIZE 7 - cols.push_back(_SQLINT4_(true, 8)); // BUFFER_LENGTH 8 - cols.push_back(_SQLINT2_(true, 9)); // DECIMAL_DIGITS 9 - cols.push_back(_SQLINT2_(true, 10)); // NUM_PREC_RADIX 10 - cols.push_back( - _SQLINT2_(false, 11, SQL_NULLABLE_UNKNOWN)); // NULLABLE 11 - cols.push_back(_SQLCHAR_(true, 12)); // REMARKS 12 - cols.push_back(_SQLCHAR_(true, 13)); // COLUMN_DEF 13 - cols.push_back( - _SQLINT2_(false, 14, DEFAULT_TYPE_INT)); // SQL_DATA_TYPE 14 - cols.push_back(_SQLINT2_(true, 15)); // SQL_DATETIME_SUB 15 - cols.push_back(_SQLINT4_(true, 16)); // CHAR_OCTET_LENGTH 16 - cols.push_back(_SQLINT4_(false, 17, -1)); // ORDINAL_POSITION 17 - cols.push_back(_SQLCHAR_(true, 18)); // IS_NULLABLE 18 -} - -void GetCatalogData(const std::string &query, StatementClass *stmt, - StatementClass *sub_stmt, const TableResultSet res_type, - std::string &table_type, - void (*populate_binds)(bind_vector &), - void (*setup_qres_info)(QResultClass *, EnvironmentClass *), - std::vector< std::string > *list_of_columns) { - // Execute query - ExecuteQuery(SC_get_conn(stmt), reinterpret_cast< HSTMT * >(&sub_stmt), - query); - - // Bind Columns - bind_vector binds; - (*populate_binds)(binds); - std::for_each(binds.begin(), binds.end(), - [&](const auto &b) { b->BindColumn(sub_stmt); }); - QResultClass *res = - SetupQResult(binds, stmt, sub_stmt, static_cast< int >(binds.size())); - - // Setup QResultClass - (*setup_qres_info)( - res, static_cast< EnvironmentClass * >(CC_get_env(SC_get_conn(stmt)))); - SetTableTuples(res, res_type, binds, table_type, stmt, sub_stmt, - list_of_columns); - CleanUp(stmt, sub_stmt, SQL_SUCCESS); -} - -RETCODE SQL_API -OPENSEARCHAPI_Tables(HSTMT hstmt, const SQLCHAR *catalog_name_sql, - const SQLSMALLINT catalog_name_sz, const SQLCHAR *schema_name_sql, - const SQLSMALLINT schema_name_sz, const SQLCHAR *table_name_sql, - const SQLSMALLINT table_name_sz, const SQLCHAR *table_type_sql, - const SQLSMALLINT table_type_sz, const UWORD flag) { - CSTR func = "OPENSEARCHAPI_Tables"; - StatementClass *stmt = (StatementClass *)hstmt; - StatementClass *tbl_stmt = NULL; - RETCODE result = SQL_ERROR; - if ((result = SC_initialize_and_recycle(stmt)) != SQL_SUCCESS) - return result; - - try { - // Convert const SQLCHAR*'s to c++ strings - std::string catalog_name, schema_name, table_name, table_type; - bool catalog_valid, schema_valid, table_valid, table_type_valid; - ConvertToString(catalog_name, catalog_valid, catalog_name_sql, - catalog_name_sz); - ConvertToString(schema_name, schema_valid, schema_name_sql, - schema_name_sz); - ConvertToString(table_name, table_valid, table_name_sql, table_name_sz); - ConvertToString(table_type, table_type_valid, table_type_sql, - table_type_sz); - - // Special semantics for the CatalogName, SchemaName, and TableType - // arguments - TableResultSet result_type = TableResultSet::All; - - if (catalog_name == SQL_ALL_CATALOGS) { - if (schema_valid && table_valid && (table_name == "") - && (schema_name == "")) { - std::string error_msg("Catalogs not supported."); - SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, - error_msg.c_str(), func); - CleanUp(stmt, tbl_stmt); - return SQL_ERROR; - } - // result_type = TableResultSet::Catalog; - } - if (schema_name == SQL_ALL_SCHEMAS) { - if (catalog_valid && table_valid && (table_name == "") - && (catalog_name == "")) { - std::string error_msg("Schemas not supported."); - SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, - error_msg.c_str(), func); - CleanUp(stmt, tbl_stmt); - return SQL_ERROR; - } - // result_type = TableResultSet::Schema; - } - if (table_type_valid && (table_type == SQL_ALL_TABLE_TYPES)) { - if (catalog_valid && table_valid && schema_valid - && (table_name == "") && (catalog_name == "") - && (schema_name == "")) - result_type = TableResultSet::TableTypes; - } - if (table_type_valid && (table_type != SQL_ALL_TABLE_TYPES)) { - result_type = TableResultSet::TableLookUp; - } - - // Create query to find out list - std::string query; - GenerateTableQuery(query, flag, table_name, result_type, table_valid); - - // TODO #324 (SQL Plugin)- evaluate catalog & schema support - GetCatalogData(query, stmt, tbl_stmt, result_type, table_type, - AssignTableBindTemplates, SetupTableQResInfo); - return SQL_SUCCESS; - } catch (std::bad_alloc &e) { - std::string error_msg = std::string("Bad allocation exception: '") - + e.what() + std::string("'."); - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, error_msg.c_str(), func); - } catch (std::exception &e) { - std::string error_msg = - std::string("Generic exception: '") + e.what() + std::string("'."); - SC_set_error(stmt, STMT_INTERNAL_ERROR, error_msg.c_str(), func); - } catch (...) { - std::string error_msg = std::string("Unknown exception raised."); - SC_set_error(stmt, STMT_INTERNAL_ERROR, error_msg.c_str(), func); - } - CleanUp(stmt, tbl_stmt); - return SQL_ERROR; -} - -RETCODE SQL_API -OPENSEARCHAPI_Columns(HSTMT hstmt, const SQLCHAR *catalog_name_sql, - const SQLSMALLINT catalog_name_sz, const SQLCHAR *schema_name_sql, - const SQLSMALLINT schema_name_sz, const SQLCHAR *table_name_sql, - const SQLSMALLINT table_name_sz, const SQLCHAR *column_name_sql, - const SQLSMALLINT column_name_sz, const UWORD flag, - const OID reloid, const Int2 attnum) { - (void)(reloid); - (void)(attnum); - - CSTR func = "OPENSEARCHAPI_Columns"; - - // Declare outside of try so we can clean them up properly if an exception - // occurs - StatementClass *stmt = (StatementClass *)hstmt; - StatementClass *col_stmt = NULL; - RETCODE result = SQL_ERROR; - if ((result = SC_initialize_and_recycle(stmt)) != SQL_SUCCESS) - return result; - - try { - // Convert const SQLCHAR *'s to strings - std::string catalog_name, schema_name, table_name, column_name; - bool catalog_valid, schema_valid, table_valid, column_valid; - ConvertToString(catalog_name, catalog_valid, catalog_name_sql, - catalog_name_sz); - ConvertToString(schema_name, schema_valid, schema_name_sql, - schema_name_sz); - ConvertToString(table_name, table_valid, table_name_sql, table_name_sz); - ConvertToString(column_name, column_valid, column_name_sql, - column_name_sz); - - // Generate query - std::string query; - GenerateColumnQuery(query, table_name, column_name, table_valid, - column_valid, flag); - - // Get list of columns with SELECT * query since columns doesn't match - // with DESCRIBE & SELECT * query - std::vector< std::string > list_of_columns; - if (table_valid) { - ConnectionClass *conn = SC_get_conn(stmt); - list_of_columns = - OpenSearchGetColumnsWithSelectQuery(conn->opensearchconn, table_name); - } - - // TODO #324 (SQL Plugin)- evaluate catalog & schema support - - // Execute query - std::string table_type = ""; - GetCatalogData(query, stmt, col_stmt, TableResultSet::All, table_type, - AssignColumnBindTemplates, SetupColumnQResInfo, - &list_of_columns); - return SQL_SUCCESS; - } catch (std::bad_alloc &e) { - std::string error_msg = std::string("Bad allocation exception: '") - + e.what() + std::string("'."); - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, error_msg.c_str(), func); - } catch (std::exception &e) { - std::string error_msg = - std::string("Generic exception: '") + e.what() + std::string("'."); - SC_set_error(stmt, STMT_INTERNAL_ERROR, error_msg.c_str(), func); - } catch (...) { - std::string error_msg("Unknown exception raised."); - SC_set_error(stmt, STMT_INTERNAL_ERROR, error_msg.c_str(), func); - } - CleanUp(stmt, col_stmt); - return SQL_ERROR; -} -void CleanUp_GetTypeInfo(StatementClass *stmt, const RETCODE ret = SQL_ERROR) { - stmt->status = STMT_FINISHED; - stmt->currTuple = -1; - if (SQL_SUCCEEDED(ret)) - SC_set_rowset_start(stmt, -1, FALSE); - else - SC_set_Result(stmt, NULL); - SC_set_current_col(stmt, -1); -} - -void SetupTypeQResInfo(QResultClass *res) { - QR_set_field_info_v(res, GETTYPE_TYPE_NAME, TYPE_NAME, - OPENSEARCH_TYPE_VARCHAR, - MAX_INFO_STRING); - QR_set_field_info_v(res, GETTYPE_DATA_TYPE, DATA_TYPE, OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, GETTYPE_COLUMN_SIZE, PRECISION, - OPENSEARCH_TYPE_INT4, 4); - QR_set_field_info_v(res, GETTYPE_LITERAL_PREFIX, LITERAL_PREFIX, - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, GETTYPE_LITERAL_SUFFIX, LITERAL_SUFFIX, - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, GETTYPE_CREATE_PARAMS, CREATE_PARAMS, - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, GETTYPE_NULLABLE, NULLABLE, OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, GETTYPE_CASE_SENSITIVE, CASE_SENSITIVE, - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, GETTYPE_SEARCHABLE, SEARCHABLE, - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, GETTYPE_UNSIGNED_ATTRIBUTE, UNSIGNED_ATTRIBUTE, - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, GETTYPE_FIXED_PREC_SCALE, FIXED_PREC_SCALE, - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, GETTYPE_AUTO_UNIQUE_VALUE, AUTO_INCREMENT, - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, GETTYPE_LOCAL_TYPE_NAME, LOCAL_TYPE_NAME, - OPENSEARCH_TYPE_VARCHAR, MAX_INFO_STRING); - QR_set_field_info_v(res, GETTYPE_MINIMUM_SCALE, MINIMUM_SCALE, - OPENSEARCH_TYPE_INT2, - 2); - QR_set_field_info_v(res, GETTYPE_MAXIMUM_SCALE, MAXIMUM_SCALE, - OPENSEARCH_TYPE_INT2, - 2); - QR_set_field_info_v(res, GETTYPE_SQL_DATA_TYPE, SQL_DATA_TYPE, - OPENSEARCH_TYPE_INT2, - 2); - QR_set_field_info_v(res, GETTYPE_SQL_DATETIME_SUB, SQL_DATETIME_SUB, - OPENSEARCH_TYPE_INT2, 2); - QR_set_field_info_v(res, GETTYPE_NUM_PREC_RADIX, NUM_PREC_RADIX, - OPENSEARCH_TYPE_INT4, 4); - QR_set_field_info_v(res, GETTYPE_INTERVAL_PRECISION, INTERVAL_PRECISION, - OPENSEARCH_TYPE_INT2, 2); -} - -RETCODE SetTypeResult(ConnectionClass *conn, StatementClass *stmt, - QResultClass *res, int esType, int sqlType) { - TupleField *tuple; - - if (tuple = QR_AddNew(res), NULL == tuple) { - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, "Couldn't QR_AddNew.", - "SetTypeResult"); - CleanUp_GetTypeInfo(stmt, SQL_ERROR); - return SQL_ERROR; - } - - set_tuplefield_string(&tuple[GETTYPE_TYPE_NAME], - opensearchtype_attr_to_name(conn, esType, -1, FALSE)); - set_tuplefield_int2(&tuple[GETTYPE_NULLABLE], - opensearchtype_nullable(conn, esType)); - - set_tuplefield_int2(&tuple[GETTYPE_DATA_TYPE], - static_cast< short >(sqlType)); - set_tuplefield_int2(&tuple[GETTYPE_CASE_SENSITIVE], - opensearchtype_case_sensitive(conn, esType)); - set_tuplefield_int2(&tuple[GETTYPE_SEARCHABLE], - opensearchtype_searchable(conn, esType)); - set_tuplefield_int2(&tuple[GETTYPE_FIXED_PREC_SCALE], - opensearchtype_money(conn, esType)); - - // Localized data-source dependent data type name (always NULL) - set_tuplefield_null(&tuple[GETTYPE_LOCAL_TYPE_NAME]); - - // These values can be NULL - set_nullfield_int4( - &tuple[GETTYPE_COLUMN_SIZE], - opensearchtype_attr_column_size( - conn, esType, OPENSEARCH_ATP_UNSET, - OPENSEARCH_ADT_UNSET, OPENSEARCH_UNKNOWNS_UNSET)); - set_nullfield_string(&tuple[GETTYPE_LITERAL_PREFIX], - opensearchtype_literal_prefix(conn, esType)); - set_nullfield_string(&tuple[GETTYPE_LITERAL_SUFFIX], - opensearchtype_literal_suffix(conn, esType)); - set_nullfield_string(&tuple[GETTYPE_CREATE_PARAMS], - opensearchtype_create_params(conn, esType)); - set_nullfield_int2(&tuple[GETTYPE_UNSIGNED_ATTRIBUTE], - opensearchtype_unsigned(conn, esType)); - set_nullfield_int2(&tuple[GETTYPE_AUTO_UNIQUE_VALUE], - opensearchtype_auto_increment(conn, esType)); - set_nullfield_int2(&tuple[GETTYPE_MINIMUM_SCALE], - opensearchtype_min_decimal_digits(conn, esType)); - set_nullfield_int2(&tuple[GETTYPE_MAXIMUM_SCALE], - opensearchtype_max_decimal_digits(conn, esType)); - set_tuplefield_int2(&tuple[GETTYPE_SQL_DATA_TYPE], - static_cast< short >(sqlType)); - set_nullfield_int2(&tuple[GETTYPE_SQL_DATETIME_SUB], - opensearchtype_attr_to_datetime_sub( - conn, esType, OPENSEARCH_ATP_UNSET)); - set_nullfield_int4(&tuple[GETTYPE_NUM_PREC_RADIX], - opensearchtype_radix(conn, esType)); - set_nullfield_int4(&tuple[GETTYPE_INTERVAL_PRECISION], 0); - - return SQL_SUCCESS; -} - -RETCODE SQL_API OPENSEARCHAPI_GetTypeInfo(HSTMT hstmt, SQLSMALLINT fSqlType) { - CSTR func = "OPENSEARCHAPI_GetTypeInfo"; - StatementClass *stmt = (StatementClass *)hstmt; - ConnectionClass *conn; - conn = SC_get_conn(stmt); - QResultClass *res = NULL; - - int result_cols; - RETCODE result = SQL_ERROR; - - if (result = SC_initialize_and_recycle(stmt), SQL_SUCCESS != result) - return result; - - try { - if (res = QR_Constructor(), !res) { - SC_set_error(stmt, STMT_INTERNAL_ERROR, "Error creating result.", - func); - return SQL_ERROR; - } - SC_set_Result(stmt, res); - - result_cols = NUM_OF_GETTYPE_FIELDS; - extend_column_bindings(SC_get_ARDF(stmt), - static_cast< SQLSMALLINT >(result_cols)); - - stmt->catalog_result = TRUE; - QR_set_num_fields(res, result_cols); - SetupTypeQResInfo(res); - - if (fSqlType == SQL_ALL_TYPES) { - for (std::pair< int, std::vector< int > > sqlType : - sql_opensearch_type_map) { - for (auto const &openSearchType : sqlType.second) { - result = - SetTypeResult(conn, stmt, res, openSearchType, sqlType.first); - } - } - } else { - if (sql_opensearch_type_map.count(fSqlType) > 0) { - for (auto openSearchType : - sql_opensearch_type_map.at(fSqlType)) { - result = SetTypeResult(conn, stmt, res, openSearchType, fSqlType); - } - } - } - result = SQL_SUCCESS; - - } catch (std::bad_alloc &e) { - std::string error_msg = std::string("Bad allocation exception: '") - + e.what() + std::string("'."); - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, error_msg.c_str(), func); - } catch (std::exception &e) { - std::string error_msg = - std::string("Generic exception: '") + e.what() + std::string("'."); - SC_set_error(stmt, STMT_INTERNAL_ERROR, error_msg.c_str(), func); - } catch (...) { - std::string error_msg("Unknown exception raised."); - SC_set_error(stmt, STMT_INTERNAL_ERROR, error_msg.c_str(), func); - } - - CleanUp_GetTypeInfo(stmt, result); - return result; -} diff --git a/sql-odbc/src/sqlodbc/opensearch_info.h b/sql-odbc/src/sqlodbc/opensearch_info.h deleted file mode 100644 index bbe1906518..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_info.h +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef __OPENSEARCH_INFO_H__ -#define __OPENSEARCH_INFO_H__ -#include "opensearch_helper.h" -#include "opensearch_odbc.h" -#include "unicode_support.h" - -#ifndef WIN32 -#include -#endif - -#include "bind.h" -#include "catfunc.h" -#include "dlg_specific.h" -#include "environ.h" -#include "misc.h" -#include "multibyte.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "opensearch_types.h" -#include "qresult.h" -#include "statement.h" -#include "tuple.h" - -// C Interface -#ifdef __cplusplus -extern "C" { -#endif -RETCODE SQL_API OPENSEARCHAPI_Tables(HSTMT hstmt, const SQLCHAR* catalog_name_sql, - const SQLSMALLINT catalog_name_sz, - const SQLCHAR* schema_name_sql, - const SQLSMALLINT schema_name_sz, - const SQLCHAR* table_name_sql, - const SQLSMALLINT table_name_sz, - const SQLCHAR* table_type_sql, - const SQLSMALLINT table_type_sz, const UWORD flag); -RETCODE SQL_API -OPENSEARCHAPI_Columns(HSTMT hstmt, const SQLCHAR* catalog_name_sql, - const SQLSMALLINT catalog_name_sz, const SQLCHAR* schema_name_sql, - const SQLSMALLINT schema_name_sz, const SQLCHAR* table_name_sql, - const SQLSMALLINT table_name_sz, const SQLCHAR* column_name_sql, - const SQLSMALLINT column_name_sz, const UWORD flag, - const OID reloid, const Int2 attnum); - -RETCODE SQL_API OPENSEARCHAPI_GetTypeInfo(HSTMT hstmt, SQLSMALLINT fSqlType); -#ifdef __cplusplus -} -#endif - -#endif /* __OPENSEARCH_INFO_H__ */ diff --git a/sql-odbc/src/sqlodbc/opensearch_odbc.c b/sql-odbc/src/sqlodbc/opensearch_odbc.c deleted file mode 100644 index ce753eb3d6..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_odbc.c +++ /dev/null @@ -1,159 +0,0 @@ -#ifdef WIN32 -#ifdef _DEBUG -#include -#endif /* _DEBUG */ -#endif /* WIN32 */ -#include - -#include "dlg_specific.h" -#include "environ.h" -#include "opensearch_odbc.h" -#include "misc.h" - -#ifdef WIN32 -#include "loadlib.h" -#else -#include -#endif - -void unused_vargs(int cnt, ...) { -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunused-value" -#endif // __APPLE__ - (void)(cnt); -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ -} - -static int exeesm = 0; -BOOL isMsAccess(void) { - return 1 == exeesm; -} -BOOL isMsQuery(void) { - return 2 == exeesm; -} -BOOL isSqlServr(void) { - return 3 == exeesm; -} - -RETCODE SQL_API SQLDummyOrdinal(void); - -extern void *conns_cs, *common_cs; - -int initialize_global_cs(void) { - static int init = 1; - - if (!init) - return 0; - init = 0; -#ifdef WIN32 -#ifdef _DEBUG -#ifdef _MEMORY_DEBUG_ - _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF); -#endif /* _MEMORY_DEBUG_ */ -#endif /* _DEBUG */ -#endif /* WIN32 */ -#ifdef POSIX_THREADMUTEX_SUPPORT - getMutexAttr(); -#endif /* POSIX_THREADMUTEX_SUPPORT */ - InitializeLogging(); - INIT_CONNS_CS; - INIT_COMMON_CS; - - return 0; -} - -static void finalize_global_cs(void) { - DELETE_COMMON_CS; - DELETE_CONNS_CS; - FinalizeLogging(); -#ifdef _DEBUG -#ifdef _MEMORY_DEBUG_ - // _CrtDumpMemoryLeaks(); -#endif /* _MEMORY_DEBUG_ */ -#endif /* _DEBUG */ -} - -#ifdef WIN32 -HINSTANCE s_hModule; /* Saved module handle. */ -/* This is where the Driver Manager attaches to this Driver */ -BOOL WINAPI DllMain(HANDLE hInst, ULONG ul_reason_for_call, LPVOID lpReserved) { - const char *exename = GetExeProgramName(); - - switch (ul_reason_for_call) { - case DLL_PROCESS_ATTACH: - s_hModule = hInst; /* Save for dialog boxes */ - - if (stricmp(exename, "msaccess") == 0) - exeesm = 1; - else if (strnicmp(exename, "msqry", 5) == 0) - exeesm = 2; - else if (strnicmp(exename, "sqlservr", 8) == 0) - exeesm = 3; - initialize_global_cs(); - MYLOG(OPENSEARCH_DEBUG, "exe name=%s\n", exename); - break; - - case DLL_THREAD_ATTACH: - break; - - case DLL_PROCESS_DETACH: - MYLOG(OPENSEARCH_DEBUG, "DETACHING %s\n", DRIVER_FILE_NAME); - CleanupDelayLoadedDLLs(); - /* my(q)log is unavailable from here */ - finalize_global_cs(); - return TRUE; - - case DLL_THREAD_DETACH: - break; - - default: - break; - } - - return TRUE; - - UNREFERENCED_PARAMETER(lpReserved); -} - -#else /* not WIN32 */ - -#if defined(__GNUC__) || defined(__SUNPRO_C) - -/* Shared library initializer and destructor, using gcc's attributes */ - -static void __attribute__((constructor)) elasticodbc_init(void) { - initialize_global_cs(); -} - -static void __attribute__((destructor)) elasticodbc_fini(void) { - finalize_global_cs(); -} - -#else /* not __GNUC__ */ - -/* Shared library initialization on non-gcc systems. */ -BOOL _init(void) { - initialize_global_cs(); - return TRUE; -} - -BOOL _fini(void) { - finalize_global_cs(); - return TRUE; -} -#endif /* not __GNUC__ */ -#endif /* not WIN32 */ - -/* - * This function is used to cause the Driver Manager to - * call functions by number rather than name, which is faster. - * The ordinal value of this function must be 199 to have the - * Driver Manager do this. Also, the ordinal values of the - * functions must match the value of fFunction in SQLGetFunctions() - */ -RETCODE SQL_API SQLDummyOrdinal(void) { - return SQL_SUCCESS; -} diff --git a/sql-odbc/src/sqlodbc/opensearch_odbc.h b/sql-odbc/src/sqlodbc/opensearch_odbc.h deleted file mode 100644 index 23ba2c3f68..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_odbc.h +++ /dev/null @@ -1,663 +0,0 @@ -#ifndef __OPENSEARCHODBC_H__ -#define __OPENSEARCHODBC_H__ -#include - -/* #define __MS_REPORTS_ANSI_CHAR__ */ -void unused_vargs(int cnt, ...); -#define UNUSED(...) unused_vargs(0, __VA_ARGS__) - -#ifdef WIN32 -#define WIN32_LEAN_AND_MEAN -#include -#elif __APPLE__ - -#elif __linux__ -#include "linux/kconfig.h" -#endif - -#include /* for FILE* pointers: see GLOBAL_VALUES */ - -#include "version.h" - -#ifdef WIN32 -#ifdef _DEBUG -#ifndef _MEMORY_DEBUG_ -#include -#if (_MSC_VER < 1400) /* in case of VC7 or under */ -#include -#endif /* _MSC_VER */ -#define _CRTDBG_MAP_ALLOC -#include -#endif /* _MEMORY_DEBUG_ */ -#else /* _DEBUG */ -#include -#endif /* _DEBUG */ -#else /* WIN32 */ -#include -#endif /* WIN32 */ - -#if defined(__GNUC__) || defined(__IBMC__) -#if ((__GNUC__ * 100) + __GNUC_MINOR__) >= 404 -#define OPENSEARCH_PRINTF_ATTRIBUTE gnu_printf -#else -#define OPENSEARCH_PRINTF_ATTRIBUTE printf -#endif -#define opensearch_attribute_printf(f, a) \ - __attribute__((format(OPENSEARCH_PRINTF_ATTRIBUTE, f, a))) -#else -#define __attribute__(x) -#define opensearch_attribute_printf(f, a) -#endif /* __GNUC__ || __IBMC__ */ - -#ifdef _MEMORY_DEBUG_ -void *esdebug_alloc(size_t); -void *esdebug_calloc(size_t, size_t); -void *esdebug_realloc(void *, size_t); -char *esdebug_strdup(const char *); -void *esdebug_memcpy(void *, const void *, size_t); -void *esdebug_memset(void *, int c, size_t); -char *esdebug_strcpy(char *, const char *); -char *esdebug_strncpy(char *, const char *, size_t); -char *esdebug_strncpy_null(char *, const char *, size_t); -void esdebug_free(void *); -void debug_memory_check(void); - -#ifdef WIN32 -#undef strdup -#endif /* WIN32 */ -#define malloc esdebug_alloc -#define realloc esdebug_realloc -#define calloc esdebug_calloc -#define strdup esdebug_strdup -#define free esdebug_free -#define strcpy esdebug_strcpy -#define strncpy esdebug_strncpy -/* #define strncpy_null esdebug_strncpy_null */ -#define memcpy esdebug_memcpy -#define memset esdebug_memset -#endif /* _MEMORY_DEBUG_ */ - -#ifdef WIN32 -#pragma warning(push) -#pragma warning(disable : 4201) // nonstandard extension used: nameless - // struct/union warning -#include -#pragma warning(pop) -#endif /* WIN32 */ -/* Must come before sql.h */ -#define ODBCVER 0x0351 - -/* - * Default NAMEDATALEN value in the server. The server can be compiled with - * a different value, but this will do. - */ -#define NAMEDATALEN_V73 64 - -#ifndef NAMESTORAGELEN -#define NAMESTORAGELEN 64 -#endif /* NAMESTORAGELEN */ - -#if defined(WIN32) || defined(WITH_UNIXODBC) || defined(WITH_IODBC) -#ifdef WIN32 -#pragma warning(push) -#pragma warning(disable : 4201) // nonstandard extension used: nameless - // struct/union warning -#endif // WIN32 -#include -#include -#include -#if WIN32 -#pragma warning(pop) -#endif // WIN32 -#if defined(WIN32) && (_MSC_VER < 1300) /* in case of VC6 or under */ -#define SQLLEN SQLINTEGER -#define SQLULEN SQLUINTEGER -#define SQLSETPOSIROW SQLUSMALLINT -/* VC6 bypasses 64bit mode. */ -#define DWLP_USER DWL_USER -#define ULONG_PTR ULONG -#define LONG_PTR LONG -#define SetWindowLongPtr(hdlg, DWLP_USER, lParam) \ - SetWindowLong(hdlg, DWLP_USER, lParam) -#define GetWindowLongPtr(hdlg, DWLP_USER) GetWindowLong(hdlg, DWLP_USER); -#endif -#else -#include "iodbc.h" -#include "isql.h" -#include "isqlext.h" -#endif /* WIN32 */ - -#if defined(WIN32) -#include -#elif defined(WITH_UNIXODBC) -#include -#elif defined(WITH_IODBC) -#include -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -#define Int4 int -#define UInt4 unsigned int -#define Int2 short -#define UInt2 unsigned short -typedef SQLBIGINT Int8; -typedef UInt4 OID; - -#ifndef SQL_TRUE -#define SQL_TRUE TRUE -#endif /* SQL_TRUE */ -#ifndef SQL_FALSE -#define SQL_FALSE FALSE -#endif /* SQL_FALSE */ - -#define FORMAT_SMALLI "%d" /* SQLSMALLINT */ -#define FORMAT_USMALLI "%u" /* SQLUSMALLINT */ -#ifdef WIN32 -#ifndef SSIZE_T_DEFINED -#define ssize_t SSIZE_T -#define SSIZE_T_DEFINED -#endif /* SSIZE_T */ -#define FORMAT_SIZE_T "%Iu" /* size_t */ -#define FORMAT_SSIZE_T "%Id" /* ssize_t */ -#define FORMAT_INTEGER "%ld" /* SQLINTEGER */ -#define FORMAT_UINTEGER "%lu" /* SQLUINTEGER */ -#define FORMATI64 "%I64d" /* SQLBIGINT */ -#define FORMATI64U "%I64u" /* SQLUBIGINT */ -#ifdef _WIN64 -#define FORMAT_LEN "%I64d" /* SQLLEN */ -#define FORMAT_ULEN "%I64u" /* SQLULEN */ -#define FORMAT_POSIROW "%I64u" -#else /* _WIN64 */ -#define FORMAT_LEN "%ld" /* SQLLEN */ -#define FORMAT_ULEN "%lu" /* SQLULEN */ -#define FORMAT_POSIROW "%hu" -#endif /* _WIN64 */ -#else /* WIN32 */ -#define FORMAT_SIZE_T "%zu" /* size_t */ -#define FORMAT_SSIZE_T "%zd" /* ssize_t */ -#ifndef HAVE_SSIZE_T -typedef long ssize_t; -#endif /* HAVE_SSIZE_T */ - -#ifndef SIZEOF_VOID_P -#ifdef __APPLE__ -#define SIZEOF_VOID_P 8 -#else -#error "SIZEOF_VOID_P must be defined" -#endif // __APPLE__ -#endif // SIZEOF_VOID_P - -#ifndef SIZEOF_LONG -#ifdef __APPLE__ -#define SIZEOF_LONG 8 -#else -#error "SIZEOF_LONG must be defined" -#endif // __APPLE__ -#endif // SIZEOF_LONG - -#if (SIZEOF_VOID_P == SIZEOF_LONG) /* ILP32 or LP64 */ -typedef long LONG_PTR; -typedef unsigned long ULONG_PTR; -#elif defined(HAVE_LONG_LONG) /* LLP64 */ -typedef long long LONG_PTR; -typedef unsigned long long ULONG_PTR; -#else /* SIZEOF_VOID_P */ -#error appropriate long pointer type not found -#endif /* SIZEOF_VOID_P */ -#if (SIZEOF_LONG == 8) /* LP64 */ -#define FORMAT_INTEGER "%d" /* SQLINTEGER */ -#define FORMAT_UINTEGER "%u" /* SQLUINTEGER */ -#define FORMATI64 "%ld" /* SQLBIGINT */ -#define FORMATI64U "%lu" /* SQLUBIGINT */ -#if defined(WITH_UNIXODBC) && defined(BUILD_LEGACY_64_BIT_MODE) -#define FORMAT_LEN "%d" /* SQLLEN */ -#define FORMAT_ULEN "%u" /* SQLULEN */ -#else /* WITH_UNIXODBC */ -#define FORMAT_LEN "%ld" /* SQLLEN */ -#define FORMAT_ULEN "%lu" /* SQLULEN */ -#endif /* WITH_UNIXODBC */ -#else /* SIZEOF_LONG */ -#define FORMAT_INTEGER "%ld" /* SQLINTEGER */ -#define FORMAT_UINTEGER "%lu" /* SQLUINTEGER */ -#if defined(HAVE_LONG_LONG) -#define FORMATI64 "%lld" /* SQLBIGINT */ -#define FORMATI64U "%llu" /* SQLUBIGINT */ -#if (SIZEOF_VOID_P == 8) /* LLP64 */ -#define FORMAT_LEN "%lld" /* SQLLEN */ -#define FORMAT_ULEN "%llu" /* SQLULEN */ -#else /* SIZEOF_VOID_P ILP32 */ -#define FORMAT_LEN "%ld" /* SQLLEN */ -#define FORMAT_ULEN "%lu" /* SQLULEN */ -#endif /* SIZEOF_VOID_P */ -#else /* HAVE_LONG_LONG */ -#define FORMAT_LEN "%ld" /* SQLLEN */ -#define FORMAT_ULEN "%lu" /* SQLULEN */ -#endif /* HAVE_LONG_LONG */ -#endif /* SIZEOF_LONG */ - -#if (SIZEOF_VOID_P == 8) && !defined(WITH_IODBC) -#define FORMAT_POSIROW FORMAT_ULEN -#else -#define FORMAT_POSIROW "%u" -#endif - -#endif /* WIN32 */ - -#define CAST_PTR(type, ptr) (type)((LONG_PTR)(ptr)) -#define CAST_UPTR(type, ptr) (type)((ULONG_PTR)(ptr)) -#ifndef SQL_IS_LEN -#define SQL_IS_LEN (-1000) -#endif /* SQL_IS_LEN */ -#ifdef HAVE_SIGNED_CHAR -typedef signed char po_ind_t; -#else -typedef char po_ind_t; -#endif /* HAVE_SIGNED_CHAR */ - -#ifndef WIN32 -#if !defined(WITH_UNIXODBC) && !defined(WITH_IODBC) -typedef float SFLOAT; -typedef double SDOUBLE; -#endif /* WITH_UNIXODBC */ - -#ifndef CALLBACK -#define CALLBACK -#endif /* CALLBACK */ -#endif /* WIN32 */ - -#ifndef WIN32 -#define stricmp strcasecmp -#define strnicmp strncasecmp -#ifndef TRUE -#define TRUE (BOOL)1 -#endif /* TRUE */ -#ifndef FALSE -#define FALSE (BOOL)0 -#endif /* FALSE */ -#else - -#if (_MSC_VER < 1900) /* vc12 or under */ -#define POSIX_SNPRINTF_REQUIRED -#define snprintf posix_snprintf -extern int posix_snprintf(char *buf, size_t size, const char *format, ...); -#endif /* _MSC_VER */ -#ifndef strdup -#define strdup _strdup -#endif /* strdup */ -#define strnicmp _strnicmp -#define stricmp _stricmp -#endif /* WIN32 */ - -#define IS_NOT_SPACE(c) ((c) && !isspace((UCHAR)c)) - -#ifndef SQL_ATTR_APP_ROW_DESC -#define SQL_ATTR_APP_ROW_DESC 10010 -#endif -#ifndef SQL_ATTR_APP_PARAM_DESC -#define SQL_ATTR_APP_PARAM_DESC 10011 -#endif -#ifndef SQL_ATTR_IMP_ROW_DESC -#define SQL_ATTR_IMP_ROW_DESC 10012 -#endif -#ifndef SQL_ATTR_IMP_PARAM_DESC -#define SQL_ATTR_IMP_PARAM_DESC 10013 -#endif - -/* Driver stuff */ - -#define DRIVERNAME "OpenSearch ODBC" - -#define DBMS_NAME_UNICODE "OpenSearch Unicode" -#define DBMS_NAME_ANSI "OpenSearch ANSI" - -#define DRIVER_ODBC_VER "03.51" - -#ifdef UNICODE_SUPPORT -#define WCLEN sizeof(SQLWCHAR) -SQLULEN ucs2strlen(const SQLWCHAR *); -#else -#undef SQL_WCHAR -#undef SQL_WVARCHAR -#undef SQL_WLONGVARCHAR -#undef SQL_C_WCHAR -#define SQL_WCHAR SQL_WCHAR_IS_INHIBITED -#define SQL_WVARCHAR SQL_WVARCHAR_IS_INHIBITED -#define SQL_WLONGVARCHAR SQL_WLONGVARCHAR_IS_INHIBITED -#define SQL_C_WCHAR SQL_C_WCHAR_IS_INHIBITED -#endif - -#ifndef DBMS_NAME -#ifdef _WIN64 -#ifdef UNICODE_SUPPORT -#define DBMS_NAME DBMS_NAME_UNICODE "(x64)" -#else -#define DBMS_NAME DBMS_NAME_ANSI "(x64)" -#endif /* UNICODE_SUPPORT */ -#else /* _WIN64 */ -#ifdef UNICODE_SUPPORT -#define DBMS_NAME DBMS_NAME_UNICODE -#else -#define DBMS_NAME DBMS_NAME_ANSI -#endif /* UNICODE_SUPPORT */ -#endif /* _WIN64 */ -#endif /* DBMS_NAME */ - -#ifndef DBMS_NAME -#define DBMS_NAME "OpenSearch Legacy" -#endif /* DBMS_NAME */ -#ifdef WIN32 -#ifdef UNICODE_SUPPORT -#define DRIVER_FILE_NAME "sqlodbc.dll" -#else -#define DRIVER_FILE_NAME "sqlodbc.dll" -#endif /* UNICODE_SUPPORT */ -#else -#ifdef UNICODE_SUPPORT -#define DRIVER_FILE_NAME "libsqlodbc.dylib" -#else -#define DRIVER_FILE_NAME "libsqlodbc.dylib" -#endif -#endif /* WIN32 */ -BOOL isMsAccess(void); -BOOL isMsQuery(void); -BOOL isSqlServr(void); - -/* ESCAPEs */ -#define ESCAPE_IN_LITERAL '\\' -#define BYTEA_ESCAPE_CHAR '\\' -#define SEARCH_PATTERN_ESCAPE '\\' -#define LITERAL_QUOTE '\'' -#define IDENTIFIER_QUOTE '\"' -#define ODBC_ESCAPE_START '{' -#define ODBC_ESCAPE_END '}' -#define DOLLAR_QUOTE '$' -#define LITERAL_EXT 'E' -#define OPENSEARCH_CARRIAGE_RETURN '\r' -#define OPENSEARCH_LINEFEED '\n' - -/* Limits */ -#define MAXESPATH 1024 - -/* see an easy way round this - DJP 24-1-2001 */ -#define MAX_CONNECT_STRING 4096 -#define FETCH_MAX \ - 100 /* default number of rows to cache \ \ - * for declare/fetch */ -#define TUPLE_MALLOC_INC 100 -#define MAX_CONNECTIONS \ - 128 /* conns per environment \ \ - * (arbitrary) */ - -#ifdef NAMEDATALEN -#define MAX_SCHEMA_LEN NAMEDATALEN -#define MAX_TABLE_LEN NAMEDATALEN -#define MAX_COLUMN_LEN NAMEDATALEN -#define NAME_FIELD_SIZE NAMEDATALEN /* size of name fields */ -#if (NAMEDATALEN > NAMESTORAGELEN) -#undef NAMESTORAGELEN -#define NAMESTORAGELEN NAMEDATALEN -#endif -#endif /* NAMEDATALEN */ -#define MAX_CURSOR_LEN 32 - -#define SCHEMA_NAME_STORAGE_LEN NAMESTORAGELEN -#define TABLE_NAME_STORAGE_LEN NAMESTORAGELEN -#define COLUMN_NAME_STORAGE_LEN NAMESTORAGELEN -#define INDEX_KEYS_STORAGE_COUNT 32 - -/* Registry length limits */ -#define LARGE_REGISTRY_LEN 4096 /* used for special cases */ -#define MEDIUM_REGISTRY_LEN \ - 256 /* normal size for \ \ - * user,database,etc. */ -#define SMALL_REGISTRY_LEN 10 /* for 1/0 settings */ - -/* These prefixes denote system tables */ -#define ELASTIC_SYS_PREFIX "es_" - -/* Info limits */ -#define MAX_INFO_STRING 128 - -/* POSIX defines a PATH_MAX.( wondows is _MAX_PATH ..) */ -#ifndef PATH_MAX -#ifdef _MAX_PATH -#define PATH_MAX _MAX_PATH -#else -#define PATH_MAX 1024 -#endif /* _MAX_PATH */ -#endif /* PATH_MAX */ - -typedef struct ConnectionClass_ ConnectionClass; -typedef struct StatementClass_ StatementClass; -typedef struct QResultClass_ QResultClass; -typedef struct BindInfoClass_ BindInfoClass; -typedef struct ParameterInfoClass_ ParameterInfoClass; -typedef struct ParameterImplClass_ ParameterImplClass; -typedef struct ColumnInfoClass_ ColumnInfoClass; -typedef struct EnvironmentClass_ EnvironmentClass; -typedef struct TupleField_ TupleField; -typedef struct KeySet_ KeySet; -typedef struct Rollback_ Rollback; -typedef struct ARDFields_ ARDFields; -typedef struct APDFields_ APDFields; -typedef struct IRDFields_ IRDFields; -typedef struct IPDFields_ IPDFields; - -typedef struct col_info COL_INFO; -typedef struct lo_arg LO_ARG; - -/* opensearchNAME type define */ -typedef struct { - char *name; -} opensearchNAME; -#define GET_NAME(the_name) ((the_name).name) -#define SAFE_NAME(the_name) ((the_name).name ? (the_name).name : NULL_STRING) -#define PRINT_NAME(the_name) ((the_name).name ? (the_name).name : PRINT_NULL) -#define NAME_IS_NULL(the_name) (NULL == (the_name).name) -#define NAME_IS_VALID(the_name) (NULL != (the_name).name) -#define INIT_NAME(the_name) ((the_name).name = NULL) -#define NULL_THE_NAME(the_name) \ - do { \ - if ((the_name).name) \ - free((the_name).name); \ - (the_name).name = NULL; \ - } while (0) -#define STR_TO_NAME(the_name, str) \ - do { \ - if ((the_name).name) \ - free((the_name).name); \ - (the_name).name = (str ? strdup((str)) : NULL); \ - } while (0) -#define STRX_TO_NAME(the_name, str) \ - do { \ - if ((the_name).name) \ - free((the_name).name); \ - (the_name).name = strdup((str)); \ - } while (0) -#define STRN_TO_NAME(the_name, str, n) \ - do { \ - if ((the_name).name) \ - free((the_name).name); \ - if (str) { \ - (the_name).name = malloc((n) + 1); \ - if ((the_name).name) { \ - memcpy((the_name).name, str, (n)); \ - (the_name).name[(n)] = '\0'; \ - } \ - } else \ - (the_name).name = NULL; \ - } while (0) -#define NAME_TO_NAME(to, from) \ - do { \ - if ((to).name) \ - free((to).name); \ - if ((from).name) \ - (to).name = strdup(from.name); \ - else \ - (to).name = NULL; \ - } while (0) -#define MOVE_NAME(to, from) \ - do { \ - if ((to).name) \ - free((to).name); \ - (to).name = (from).name; \ - (from).name = NULL; \ - } while (0) -#define SET_NAME_DIRECTLY(the_name, str) ((the_name).name = (str)) - -#define NAMECMP(name1, name2) (strcmp(SAFE_NAME(name1), SAFE_NAME(name2))) -#define NAMEICMP(name1, name2) (stricmp(SAFE_NAME(name1), SAFE_NAME(name2))) -/* opensearchNAME define end */ - -typedef struct GlobalValues_ { - opensearchNAME drivername; - char output_dir[LARGE_REGISTRY_LEN]; - int loglevel; -} GLOBAL_VALUES; - -void copy_globals(GLOBAL_VALUES *to, const GLOBAL_VALUES *from); -void init_globals(GLOBAL_VALUES *glbv); -void finalize_globals(GLOBAL_VALUES *glbv); - -typedef struct StatementOptions_ { - SQLLEN maxRows; - SQLLEN maxLength; - SQLLEN keyset_size; - SQLUINTEGER cursor_type; - SQLUINTEGER scroll_concurrency; - SQLUINTEGER retrieve_data; - SQLUINTEGER use_bookmarks; - void *bookmark_ptr; - SQLUINTEGER metadata_id; - SQLULEN stmt_timeout; -} StatementOptions; - -/* Used to pass extra query info to send_query */ -typedef struct QueryInfo_ { - SQLLEN row_size; - SQLLEN fetch_size; - QResultClass *result_in; - const char *cursor; -} QueryInfo; - -/* Used to save the error information */ -typedef struct { - UInt4 status; - Int2 errorsize; - Int2 recsize; - Int2 errorpos; - char sqlstate[6]; - SQLLEN diag_row_count; - char __error_message[40]; -} OpenSearch_ErrorInfo; -OpenSearch_ErrorInfo *ER_Constructor(SDWORD errornumber, const char *errormsg); -OpenSearch_ErrorInfo *ER_Dup(const OpenSearch_ErrorInfo *from); -void ER_Destructor(OpenSearch_ErrorInfo *); -RETCODE SQL_API ER_ReturnError(OpenSearch_ErrorInfo *, SQLSMALLINT, UCHAR *, - SQLINTEGER *, UCHAR *, SQLSMALLINT, - SQLSMALLINT *, UWORD); - -void logs_on_off(int cnopen, int, int); - -#define OPENSEARCH_TYPE_LO_UNDEFINED \ - (-999) /* hack until permanent \ \ - * type available */ -#define OPENSEARCH_TYPE_LO_NAME "lo" -#define CTID_ATTNUM (-1) /* the attnum of ctid */ -#define OID_ATTNUM (-2) /* the attnum of oid */ -#define XMIN_ATTNUM (-3) /* the attnum of xmin */ - -/* sizes */ -#define TEXT_FIELD_SIZE \ - 8190 /* size of default text fields \ \ - * (not including null term) */ -#define MAX_VARCHAR_SIZE \ - 512 /* default maximum size of \ \ - * varchar fields (not including null term) */ -#define INFO_VARCHAR_SIZE \ - 254 /* varchar field size \ \ - * used in info.c */ - -#define OPENSEARCH_NUMERIC_MAX_PRECISION 1000 -#define OPENSEARCH_NUMERIC_MAX_SCALE 1000 - -/* Sufficient digits to recover original float values */ -#define OPENSEARCH_REAL_DIGITS 9 -#define OPENSEARCH_DOUBLE_DIGITS 17 - -#define INFO_INQUIRY_LEN \ - 8192 /* this seems sufficiently big for \ \ - * queries used in info.c inoue \ \ - * 2001/05/17 */ -#define LENADDR_SHIFT(x, sft) ((x) ? (SQLLEN *)((char *)(x) + (sft)) : NULL) - -/* Structure to hold all the connection attributes for a specific - connection (used for both registry and file, DSN and DRIVER) -*/ -typedef struct { - // Connection - char dsn[MEDIUM_REGISTRY_LEN]; - char desc[MEDIUM_REGISTRY_LEN]; - char drivername[MEDIUM_REGISTRY_LEN]; - char server[MEDIUM_REGISTRY_LEN]; - char port[SMALL_REGISTRY_LEN]; - char response_timeout[SMALL_REGISTRY_LEN]; - char fetch_size[SMALL_REGISTRY_LEN]; - - // Authentication - char authtype[MEDIUM_REGISTRY_LEN]; - char username[MEDIUM_REGISTRY_LEN]; - opensearchNAME password; - char region[MEDIUM_REGISTRY_LEN]; - - // Encryption - char use_ssl; - char verify_server; - - GLOBAL_VALUES drivers; /* moved from driver's option */ -} ConnInfo; - -#define SUPPORT_DESCRIBE_PARAM(conninfo_) (1) - -int initialize_global_cs(void); -enum { /* CC_conninfo_init option */ - CLEANUP_FOR_REUSE = 1L /* reuse the info */ - , - INIT_GLOBALS = (1L << 1) /* init globals memebers */ -}; -void CC_conninfo_init(ConnInfo *conninfo, UInt4 option); -void CC_conninfo_release(ConnInfo *conninfo); -void CC_copy_conninfo(ConnInfo *ci, const ConnInfo *sci); -const char *GetExeProgramName(); - -/* Define a type for defining a constant string expression */ -#ifndef CSTR -#define CSTR static const char *const -#endif /* CSTR */ - -CSTR NULL_STRING = ""; -CSTR PRINT_NULL = "(null)"; -#define OID_NAME "oid" -#define XMIN_NAME "xmin" -#define TABLEOID_NAME "tableoid" - -enum { - DISALLOW_UPDATABLE_CURSORS = 0, /* No cursors are updatable */ - ALLOW_STATIC_CURSORS = 1L, /* Static cursors are updatable */ - ALLOW_KEYSET_DRIVEN_CURSORS = - (1L << 1), /* Keyset-driven cursors are updatable */ - ALLOW_DYNAMIC_CURSORS = (1L << 2), /* Dynamic cursors are updatable */ - ALLOW_BULK_OPERATIONS = (1L << 3), /* Bulk operations available */ - SENSE_SELF_OPERATIONS = (1L << 4), /* Sense self update/delete/add */ -}; - -#ifdef __cplusplus -} -#endif - -#include "mylog.h" - -#endif /* __OPENSEARCHODBC_H__ */ diff --git a/sql-odbc/src/sqlodbc/opensearch_odbc.rc b/sql-odbc/src/sqlodbc/opensearch_odbc.rc deleted file mode 100644 index 30cf00e849..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_odbc.rc +++ /dev/null @@ -1,257 +0,0 @@ -// Microsoft Visual C++ generated resource script. -// -#include "resource.h" - -#define APSTUDIO_READONLY_SYMBOLS -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 2 resource. -// -#include "afxres.h" -#include "version.h" - -///////////////////////////////////////////////////////////////////////////// -#undef APSTUDIO_READONLY_SYMBOLS - -///////////////////////////////////////////////////////////////////////////// -// Japanese (Japan) resources - -#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_JPN) -LANGUAGE LANG_JAPANESE, SUBLANG_DEFAULT -#pragma code_page(932) - -#ifdef APSTUDIO_INVOKED -///////////////////////////////////////////////////////////////////////////// -// -// TEXTINCLUDE -// - -1 TEXTINCLUDE -BEGIN - "resource.h\0" -END - -2 TEXTINCLUDE -BEGIN - "#include ""afxres.h""\r\n" - "#include ""version.h""\r\n" - "\0" -END - -3 TEXTINCLUDE -BEGIN - "\r\n" - "\0" -END - -#endif // APSTUDIO_INVOKED - -#endif // Japanese (Japan) resources -///////////////////////////////////////////////////////////////////////////// - - -///////////////////////////////////////////////////////////////////////////// -// English (United States) resources - -#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU) -LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US -#pragma code_page(1252) - -///////////////////////////////////////////////////////////////////////////// -// -// Dialog -// - -DLG_CONFIG DIALOGEX 65, 43, 275, 250 -STYLE DS_SETFONT | DS_MODALFRAME | DS_3DLOOK | DS_CENTER | WS_POPUP | WS_VISIBLE | WS_CAPTION | WS_SYSMENU -CAPTION "OpenSearch ODBC Driver DSN Setup" -FONT 8, "MS Sans Serif", 0, 0, 0x0 -BEGIN - LTEXT "Data Source Name",IDC_DSNAMETEXT,23,7,77,12,NOT WS_GROUP - EDITTEXT IDC_DSNAME,108,6,120,12,ES_AUTOHSCROLL | WS_DISABLED - GROUPBOX "Connection Settings",IDC_CONN_SETTINGS,7,28,261,56 - LTEXT "Host",IDC_STATIC,20,46,20,8,NOT WS_GROUP - EDITTEXT IDC_SERVER,60,44,192,12,ES_AUTOHSCROLL - LTEXT "&Port",IDC_STATIC,20,66,19,8 - EDITTEXT IDC_PORT,60,64,192,13,ES_AUTOHSCROLL - GROUPBOX "Authentication Settings",IDC_AUTH_SETTINGS,7,93,260,92,BS_FLAT - LTEXT "Auth",IDC_AUTH_STATIC,21,110,19,8 - COMBOBOX IDC_AUTHTYPE,61,108,192,30,CBS_DROPDOWNLIST | CBS_SORT | WS_VSCROLL | WS_TABSTOP - LTEXT "User",IDC_USERNAME_STATIC,20,129,19,8 - EDITTEXT IDC_USER,61,127,191,12,ES_AUTOHSCROLL | WS_DISABLED - LTEXT "Password",IDC_PASSWORD_STATIC,20,149,41,12 - EDITTEXT IDC_PASSWORD,61,147,191,12,ES_PASSWORD | ES_AUTOHSCROLL | WS_DISABLED - LTEXT "Region",IDC_REGION_STATIC,20,170,28,8 - EDITTEXT IDC_REGION,61,168,191,12,ES_AUTOHSCROLL | WS_DISABLED - PUSHBUTTON "Advanced Options",ID_ADVANCED_OPTIONS,21,195,111,15,WS_GROUP - PUSHBUTTON "Logging Options",ID_LOG_OPTIONS,144,195,108,15,WS_GROUP - LTEXT "V.N.N.N",IDC_DRIVER_VERSION,10,228,108,8 - DEFPUSHBUTTON "OK",IDOK,119,224,44,15,WS_GROUP - DEFPUSHBUTTON "Test",IDOK2,167,224,44,15,WS_GROUP - PUSHBUTTON "Cancel",IDCANCEL,215,224,44,15 -END - -DLG_ADVANCED_OPTIONS DIALOGEX 0, 0, 157, 113 -STYLE DS_SETFONT | DS_MODALFRAME | DS_CENTER | WS_POPUP | WS_CAPTION | WS_SYSMENU -CAPTION "Advanced Options" -FONT 8, "MS Sans Serif", 0, 0, 0x0 -BEGIN - GROUPBOX "",IDC_STATIC,12,4,133,85 - CONTROL "SSL",IDC_USESSL,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,19,12,10,11 - LTEXT "Enable SSL",IDC_SSL_STATIC,37,14,39,8 - CONTROL "Host Verification",IDC_HOST_VER,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,19,32,10,9 - LTEXT "Hostname Verification",IDC_HOST_VER_STATIC,37,32,71,8 - LTEXT "Response Timeout (s)",IDC_CONNTIMEOUT_STATIC,19,51,70,8 - EDITTEXT IDC_CONNTIMEOUT,96,50,43,12,ES_AUTOHSCROLL - DEFPUSHBUTTON "OK",IDOK,49,94,44,14,WS_GROUP - PUSHBUTTON "Cancel",IDCANCEL,98,93,47,15 - LTEXT "Fetch Size",IDC_FETCH_SIZE_STATIC,19,71,35,8 - EDITTEXT IDC_FETCH_SIZE,96,70,43,12,ES_AUTOHSCROLL -END - -DLG_LOG_OPTIONS DIALOGEX 0, 0, 251, 79 -STYLE DS_SETFONT | DS_MODALFRAME | DS_CENTER | WS_POPUP | WS_CAPTION | WS_SYSMENU -CAPTION "Logging Options" -FONT 8, "MS Sans Serif", 0, 0, 0x0 -BEGIN - GROUPBOX "",IDC_STATIC,10,4,230,51 - LTEXT "Log Level",IDC_STATIC,22,15,31,8 - COMBOBOX IDC_LOG_LEVEL,71,13,149,30,CBS_DROPDOWNLIST | WS_VSCROLL | WS_TABSTOP - LTEXT "Log Path",IDC_STATIC,22,38,29,8 - EDITTEXT IDC_LOG_PATH,71,36,149,12,ES_AUTOHSCROLL - DEFPUSHBUTTON "OK",IDOK,144,60,44,14,WS_GROUP - PUSHBUTTON "Cancel",IDCANCEL,193,59,47,15 -END - - -///////////////////////////////////////////////////////////////////////////// -// -// DESIGNINFO -// - -#ifdef APSTUDIO_INVOKED -GUIDELINES DESIGNINFO -BEGIN - DLG_CONFIG, DIALOG - BEGIN - RIGHTMARGIN, 270 - TOPMARGIN, 1 - BOTTOMMARGIN, 200 - END - - DLG_ADVANCED_OPTIONS, DIALOG - BEGIN - LEFTMARGIN, 5 - RIGHTMARGIN, 152 - TOPMARGIN, 5 - BOTTOMMARGIN, 108 - END - - DLG_LOG_OPTIONS, DIALOG - BEGIN - LEFTMARGIN, 5 - RIGHTMARGIN, 246 - TOPMARGIN, 5 - BOTTOMMARGIN, 74 - END -END -#endif // APSTUDIO_INVOKED - - -///////////////////////////////////////////////////////////////////////////// -// -// Version -// - -VS_VERSION_INFO VERSIONINFO - FILEVERSION OPENSEARCH_DRVFILE_VERSION - PRODUCTVERSION OPENSEARCH_DRVFILE_VERSION - FILEFLAGSMASK 0x3L -#ifdef _DEBUG - FILEFLAGS 0x9L -#else - FILEFLAGS 0x8L -#endif - FILEOS 0x4L - FILETYPE 0x2L - FILESUBTYPE 0x0L -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "040904e4" - BEGIN - VALUE "Comments", "OpenSearch ANSI ODBC driver" - VALUE "CompanyName", "Amazon" - VALUE "FileDescription", "OpenSearch ODBC Driver (English)" - VALUE "FileVersion", OPENSEARCH_ODBC_VERSION - VALUE "InternalName", "sqlodbc" - VALUE "LegalCopyright", "Copyright" - VALUE "LegalTrademarks", "ODBC(TM) is a trademark of Microsoft Corporation. Microsoft? is a registered trademark of Microsoft Corporation. Windows(TM) is a trademark of Microsoft Corporation." - VALUE "OriginalFilename", "sqlodbc.dll" - VALUE "ProductName", "OpenSearch" - VALUE "ProductVersion", OPENSEARCH_ODBC_VERSION - END - END - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x409, 1252 - END -END - - -///////////////////////////////////////////////////////////////////////////// -// -// AFX_DIALOG_LAYOUT -// - -DLG_CONFIG AFX_DIALOG_LAYOUT -BEGIN - 0 -END - -DLG_ADVANCED_OPTIONS AFX_DIALOG_LAYOUT -BEGIN - 0 -END - -DLG_LOG_OPTIONS AFX_DIALOG_LAYOUT -BEGIN - 0 -END - - -///////////////////////////////////////////////////////////////////////////// -// -// String Table -// - -STRINGTABLE -BEGIN - IDS_AUTHTYPE_NONE "NONE" - IDS_AUTHTYPE_BASIC "BASIC" - IDS_AUTHTYPE_IAM "AWS_SIGV4" - IDS_LOGTYPE_OFF "LOG_OFF" - IDS_LOGTYPE_FATAL "LOG_FATAL" - IDS_LOGTYPE_ERROR "LOG_ERROR" - IDS_LOGTYPE_WARNING "LOG_WARNING" - IDS_LOGTYPE_INFO "LOG_INFO" - IDS_LOGTYPE_DEBUG "LOG_DEBUG" - IDS_LOGTYPE_TRACE "LOG_TRACE" - IDS_LOGTYPE_ALL "LOG_ALL" -END - -#endif // English (United States) resources -///////////////////////////////////////////////////////////////////////////// - - - -#ifndef APSTUDIO_INVOKED -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 3 resource. -// - - -///////////////////////////////////////////////////////////////////////////// -#endif // not APSTUDIO_INVOKED - diff --git a/sql-odbc/src/sqlodbc/opensearch_parse_result.cpp b/sql-odbc/src/sqlodbc/opensearch_parse_result.cpp deleted file mode 100644 index aa77d718b9..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_parse_result.cpp +++ /dev/null @@ -1,461 +0,0 @@ -#include "opensearch_parse_result.h" - -#include - -#include "opensearch_types.h" -#include "opensearch_helper.h" -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunused-parameter" -#endif // __APPLE__ -#include "rabbit.hpp" -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ -#include "statement.h" - -typedef std::vector< std::pair< std::string, OID > > schema_type; -typedef rabbit::array json_arr; -typedef json_arr::iterator::result_type json_arr_it; - -bool _CC_from_OpenSearchResult(QResultClass *q_res, ConnectionClass *conn, - const char *cursor, - OpenSearchResult &opensearch_result); -bool _CC_Metadata_from_OpenSearchResult(QResultClass *q_res, ConnectionClass *conn, - const char *cursor, - OpenSearchResult &opensearch_result); -bool _CC_No_Metadata_from_OpenSearchResult(QResultClass *q_res, ConnectionClass *conn, - const char *cursor, - OpenSearchResult &opensearch_result); -void GetSchemaInfo(schema_type &schema, json_doc &opensearch_result_doc); -bool AssignColumnHeaders(const schema_type &doc_schema, QResultClass *q_res, - const OpenSearchResult &opensearch_result); -bool AssignTableData(json_doc &opensearch_result_doc, QResultClass *q_res, - size_t doc_schema_size, ColumnInfoClass &fields); -bool AssignRowData(const json_arr_it &row, size_t row_schema_size, - QResultClass *q_res, ColumnInfoClass &fields, - const size_t &row_size); -void UpdateResultFields(QResultClass *q_res, const ConnectionClass *conn, - const SQLULEN starting_cached_rows, const char *cursor, - std::string &command_type); -bool QR_prepare_for_tupledata(QResultClass *q_res); -void SetError(const char *err); -void ClearError(); - -// clang-format off -// Not all of these are being used at the moment, but these are the keywords in the json -static const std::string JSON_KW_SCHEMA = "schema"; -static const std::string JSON_KW_NAME = "name"; -static const std::string JSON_KW_TYPE = "type"; -static const std::string JSON_KW_TOTAL = "total"; -static const std::string JSON_KW_SIZE = "size"; -static const std::string JSON_KW_STATUS = "status"; -static const std::string JSON_KW_DATAROWS = "datarows"; -static const std::string JSON_KW_ERROR = "error"; -static const std::string JSON_KW_CURSOR = "cursor"; - -// clang-format on -const std::unordered_map< std::string, OID > type_to_oid_map = { - {OPENSEARCH_TYPE_NAME_BOOLEAN, OPENSEARCH_TYPE_BOOL}, - {OPENSEARCH_TYPE_NAME_BYTE, OPENSEARCH_TYPE_INT2}, - {OPENSEARCH_TYPE_NAME_SHORT, OPENSEARCH_TYPE_INT2}, - {OPENSEARCH_TYPE_NAME_INTEGER, OPENSEARCH_TYPE_INT4}, - {OPENSEARCH_TYPE_NAME_LONG, OPENSEARCH_TYPE_INT8}, - {OPENSEARCH_TYPE_NAME_HALF_FLOAT, OPENSEARCH_TYPE_FLOAT4}, - {OPENSEARCH_TYPE_NAME_FLOAT, OPENSEARCH_TYPE_FLOAT4}, - {OPENSEARCH_TYPE_NAME_DOUBLE, OPENSEARCH_TYPE_FLOAT8}, - {OPENSEARCH_TYPE_NAME_SCALED_FLOAT, OPENSEARCH_TYPE_FLOAT8}, - {OPENSEARCH_TYPE_NAME_KEYWORD, OPENSEARCH_TYPE_VARCHAR}, - {OPENSEARCH_TYPE_NAME_TEXT, OPENSEARCH_TYPE_VARCHAR}, - {OPENSEARCH_TYPE_NAME_DATE, OPENSEARCH_TYPE_TIMESTAMP}, - {OPENSEARCH_TYPE_NAME_OBJECT, OPENSEARCH_TYPE_VARCHAR}, - {OPENSEARCH_TYPE_NAME_VARCHAR, OPENSEARCH_TYPE_VARCHAR}, - {OPENSEARCH_TYPE_NAME_DATE, OPENSEARCH_TYPE_DATE}}; - -#define OPENSEARCH_VARCHAR_SIZE (-2) -const std::unordered_map< OID, int16_t > oid_to_size_map = { - {OPENSEARCH_TYPE_BOOL, (int16_t)1}, - {OPENSEARCH_TYPE_INT2, (int16_t)2}, - {OPENSEARCH_TYPE_INT4, (int16_t)4}, - {OPENSEARCH_TYPE_INT8, (int16_t)8}, - {OPENSEARCH_TYPE_FLOAT4, (int16_t)4}, - {OPENSEARCH_TYPE_FLOAT8, (int16_t)8}, - {OPENSEARCH_TYPE_VARCHAR, (int16_t)OPENSEARCH_VARCHAR_SIZE}, - {OPENSEARCH_TYPE_DATE, (int16_t)OPENSEARCH_VARCHAR_SIZE}, - {OPENSEARCH_TYPE_TIMESTAMP, (int16_t)1}}; - -// Using global variable here so that the error message can be propagated -// without going otu of scope -std::string error_msg; - -void SetError(const char *err) { - error_msg = err; -} -void ClearError() { - error_msg = ""; -} -std::string GetResultParserError() { - return error_msg; -} - -BOOL CC_from_OpenSearchResult(QResultClass *q_res, ConnectionClass *conn, - const char *cursor, - OpenSearchResult &opensearch_result) { - ClearError(); - return _CC_from_OpenSearchResult(q_res, conn, cursor, opensearch_result) ? TRUE : FALSE; -} - -BOOL CC_Metadata_from_OpenSearchResult(QResultClass *q_res, ConnectionClass *conn, - const char *cursor, - OpenSearchResult &opensearch_result) { - ClearError(); - return _CC_Metadata_from_OpenSearchResult(q_res, conn, cursor, - opensearch_result) ? TRUE : FALSE; -} - -BOOL CC_No_Metadata_from_OpenSearchResult(QResultClass *q_res, ConnectionClass *conn, - const char *cursor, - OpenSearchResult &opensearch_result) { - ClearError(); - return _CC_No_Metadata_from_OpenSearchResult(q_res, conn, cursor, - opensearch_result) - ? TRUE - : FALSE; -} - -BOOL CC_Append_Table_Data(json_doc &opensearch_result_doc, QResultClass *q_res, - size_t doc_schema_size, ColumnInfoClass &fields) { - ClearError(); - return AssignTableData(opensearch_result_doc, q_res, doc_schema_size, fields) - ? TRUE - : FALSE; -} - -bool _CC_No_Metadata_from_OpenSearchResult(QResultClass *q_res, ConnectionClass *conn, - const char *cursor, - OpenSearchResult &opensearch_result) { - // Note - NULL conn and/or cursor is valid - if (q_res == NULL) - return false; - - try { - schema_type doc_schema; - GetSchemaInfo(doc_schema, opensearch_result.opensearch_result_doc); - - SQLULEN starting_cached_rows = q_res->num_cached_rows; - - // Assign table data and column headers - if (!AssignTableData(opensearch_result.opensearch_result_doc, q_res, doc_schema.size(), - *(q_res->fields))) - return false; - - // Update fields of QResult to reflect data written - UpdateResultFields(q_res, conn, starting_cached_rows, cursor, - opensearch_result.command_type); - - // Return true (success) - return true; - } catch (const rabbit::type_mismatch &e) { - SetError(e.what()); - } catch (const rabbit::parse_error &e) { - SetError(e.what()); - } catch (const std::exception &e) { - SetError(e.what()); - } catch (...) { - SetError("Unknown exception thrown in _CC_No_Metadata_from_OpenSearchResult."); - } - - // Exception occurred, return false (error) - return false; -} - -bool _CC_Metadata_from_OpenSearchResult(QResultClass *q_res, ConnectionClass *conn, - const char *cursor, - OpenSearchResult &opensearch_result) { - // Note - NULL conn and/or cursor is valid - if (q_res == NULL) - return false; - - QR_set_conn(q_res, conn); - try { - schema_type doc_schema; - GetSchemaInfo(doc_schema, opensearch_result.opensearch_result_doc); - - // Assign table data and column headers - if (!AssignColumnHeaders(doc_schema, q_res, opensearch_result)) - return false; - - // Set command type and cursor name - QR_set_command(q_res, opensearch_result.command_type.c_str()); - QR_set_cursor(q_res, cursor); - if (cursor == NULL) - QR_set_reached_eof(q_res); - - // Return true (success) - return true; - } catch (const rabbit::type_mismatch &e) { - SetError(e.what()); - } catch (const rabbit::parse_error &e) { - SetError(e.what()); - } catch (const std::exception &e) { - SetError(e.what()); - } catch (...) { - SetError("Unknown exception thrown in _CC_Metadata_from_OpenSearchResult."); - } - - // Exception occurred, return false (error) - return false; -} - -bool _CC_from_OpenSearchResult(QResultClass *q_res, ConnectionClass *conn, - const char *cursor, - OpenSearchResult &opensearch_result) { - // Note - NULL conn and/or cursor is valid - if (q_res == NULL) - return false; - - QR_set_conn(q_res, conn); - try { - schema_type doc_schema; - GetSchemaInfo(doc_schema, opensearch_result.opensearch_result_doc); - SQLULEN starting_cached_rows = q_res->num_cached_rows; - - // Assign table data and column headers - if ((!AssignColumnHeaders(doc_schema, q_res, opensearch_result)) - || (!AssignTableData(opensearch_result.opensearch_result_doc, q_res, doc_schema.size(), - *(q_res->fields)))) - return false; - - // Update fields of QResult to reflect data written - UpdateResultFields(q_res, conn, starting_cached_rows, cursor, - opensearch_result.command_type); - - // Return true (success) - return true; - } catch (const rabbit::type_mismatch &e) { - SetError(e.what()); - } catch (const rabbit::parse_error &e) { - SetError(e.what()); - } catch (const std::exception &e) { - SetError(e.what()); - } catch (...) { - SetError("Unknown exception thrown in CC_from_OpenSearchResult."); - } - - // Exception occurred, return false (error) - return false; -} - -void GetSchemaInfo(schema_type &schema, json_doc &opensearch_result_doc) { - json_arr schema_arr = opensearch_result_doc[JSON_KW_SCHEMA]; - for (auto it : schema_arr) { - auto mapped_oid = type_to_oid_map.find(it[JSON_KW_TYPE].as_string()); - OID type_oid = (mapped_oid == type_to_oid_map.end()) - ? SQL_WVARCHAR - : mapped_oid->second; - schema.push_back( - std::make_pair(it[JSON_KW_NAME].as_string(), type_oid)); - } -} - -bool AssignColumnHeaders(const schema_type &doc_schema, QResultClass *q_res, - const OpenSearchResult &opensearch_result) { - // Verify server_info size matches the schema size - if (opensearch_result.column_info.size() != doc_schema.size()) - return false; - - // Allocte memory for column fields - QR_set_num_fields(q_res, (uint16_t)opensearch_result.column_info.size()); - if (QR_get_fields(q_res)->coli_array == NULL) - return false; - - // Assign column info - for (size_t i = 0; i < doc_schema.size(); i++) { - auto type_size_ptr = oid_to_size_map.find(doc_schema[i].second); - int16_t type_size = (type_size_ptr == oid_to_size_map.end()) - ? OPENSEARCH_ADT_UNSET - : type_size_ptr->second; - CI_set_field_info(QR_get_fields(q_res), (int)i, - doc_schema[i].first.c_str(), doc_schema[i].second, - type_size, - opensearch_result.column_info[i].length_of_str, - opensearch_result.column_info[i].relation_id, - opensearch_result.column_info[i].attribute_number); - QR_set_rstatus(q_res, PORES_FIELDS_OK); - } - q_res->num_fields = CI_get_num_fields(QR_get_fields(q_res)); - - return true; -} - -// Responsible for looping through rows, allocating tuples and passing rows for -// assignment -bool AssignTableData(json_doc &opensearch_result_doc, QResultClass *q_res, - size_t doc_schema_size, ColumnInfoClass &fields) { - // Assign row info - json_arr opensearch_result_data = opensearch_result_doc[JSON_KW_DATAROWS]; - if (opensearch_result_data.size() == 0) - return true; - - // Figure out number of columns are in a row and make schema is not bigger - // than it - size_t row_size = std::distance(opensearch_result_data.begin()->value_begin(), - opensearch_result_data.begin()->value_end()); - if (row_size < doc_schema_size) { - return false; - } - for (auto it : opensearch_result_data) { - // Setup memory to receive tuple - if (!QR_prepare_for_tupledata(q_res)) - return false; - - // Assign row data - if (!AssignRowData(it, doc_schema_size, q_res, fields, row_size)) - return false; - } - - return true; -} - -// Responsible for assigning row data to tuples -bool AssignRowData(const json_arr_it &row, size_t row_schema_size, - QResultClass *q_res, ColumnInfoClass &fields, - const size_t &row_size) { - TupleField *tuple = - q_res->backend_tuples + (q_res->num_cached_rows * row_size); - - // Setup keyset if present - KeySet *ks = NULL; - if (QR_haskeyset(q_res)) { - ks = q_res->keyset + q_res->num_cached_keys; - ks->status = 0; - } - - // Loop through and assign data - size_t i = 0; - for (auto row_column = row.value_begin(); i < row_schema_size; - ++row_column, ++i) { - if (row_column->is_null()) { - tuple[i].len = SQL_NULL_DATA; - tuple[i].value = NULL; - } else { - // Copy string over to tuple - const std::string data = row_column->str(); - tuple[i].len = static_cast< int >(data.length()); - QR_MALLOC_return_with_error( - tuple[i].value, char, data.length() + 1, q_res, - "Out of memory in allocating item buffer.", false); - strcpy((char *)tuple[i].value, data.c_str()); - - // If data length exceeds current display size, set display size - if (fields.coli_array[i].display_size < tuple[i].len) - fields.coli_array[i].display_size = tuple[i].len; - } - } - - // If there are more rows than schema suggests, we have Keyset data - if (row_size > row_schema_size) { - if (ks == NULL) { - QR_set_rstatus(q_res, PORES_INTERNAL_ERROR); - QR_set_message(q_res, - "Keyset was NULL, but Keyset data was expected."); - return false; - } - - auto row_column = row.value_begin() + row_schema_size; - if (sscanf(row_column->str().c_str(), "(%u,%hu)", &ks->blocknum, - &ks->offset) - != 2) { - QR_set_rstatus(q_res, PORES_INTERNAL_ERROR); - QR_set_message(q_res, "Failed to assign Keyset."); - return false; - } - row_column++; - ks->oid = std::stoul(row_column->str(), nullptr, 10); - } - - // Increment relevant data - q_res->cursTuple++; - if (q_res->num_fields > 0) - QR_inc_num_cache(q_res); - else if (QR_haskeyset(q_res)) - q_res->num_cached_keys++; - - if ((SQLULEN)q_res->cursTuple >= q_res->num_total_read) - q_res->num_total_read = q_res->cursTuple + 1; - return true; -} - -void UpdateResultFields(QResultClass *q_res, const ConnectionClass *conn, - const SQLULEN starting_cached_rows, const char *cursor, - std::string &command_type) { - // Adjust total read - if (!QR_once_reached_eof(q_res) - && q_res->cursTuple >= (Int4)q_res->num_total_read) - q_res->num_total_read = q_res->cursTuple + 1; - - // Adjust eof and tuple cursor - if (q_res->num_cached_rows - starting_cached_rows < q_res->cmd_fetch_size) { - QR_set_reached_eof(q_res); - if (q_res->cursTuple < (Int4)q_res->num_total_read) - q_res->cursTuple = q_res->num_total_read; - } - - // Handle NULL connection - if (conn != NULL) { - q_res->fetch_number = static_cast< SQLLEN >(0); - QR_set_rowstart_in_cache(q_res, 0); - q_res->key_base = 0; - } - - // Set command type and cursor name - QR_set_command(q_res, command_type.c_str()); - QR_set_cursor(q_res, cursor); - if (cursor == NULL) - QR_set_reached_eof(q_res); - - // Set flags, adjust pointers, and return true (success) - q_res->dataFilled = true; - q_res->tupleField = - q_res->backend_tuples + (q_res->fetch_number * q_res->num_fields); - QR_set_rstatus(q_res, PORES_TUPLES_OK); -} - -bool QR_prepare_for_tupledata(QResultClass *q_res) { - if (QR_get_cursor(q_res)) { - return true; - } - - // If total tuples > allocated tuples, need to reallocate - if (q_res->num_fields > 0 - && QR_get_num_total_tuples(q_res) >= q_res->count_backend_allocated) { - SQLLEN tuple_size = (q_res->count_backend_allocated < 1) - ? TUPLE_MALLOC_INC - : q_res->count_backend_allocated * 2; - - // Will return false if allocation fails - QR_REALLOC_return_with_error( - q_res->backend_tuples, TupleField, - tuple_size * q_res->num_fields * sizeof(TupleField), q_res, - "Out of memory while reading tuples.", false); - q_res->count_backend_allocated = tuple_size; - } - - // If total keyset > allocated keyset, need to reallocate - if (QR_haskeyset(q_res) - && q_res->num_cached_keys >= q_res->count_keyset_allocated) { - SQLLEN keyset_size = (q_res->count_keyset_allocated < 1) - ? TUPLE_MALLOC_INC - : q_res->count_keyset_allocated * 2; - - // Will return false if macro fails - QR_REALLOC_return_with_error( - q_res->keyset, KeySet, sizeof(KeySet) * keyset_size, q_res, - "Out of memory while allocating keyset", false); - memset(&q_res->keyset[q_res->count_keyset_allocated], 0, - (keyset_size - q_res->count_keyset_allocated) * sizeof(KeySet)); - q_res->count_keyset_allocated = keyset_size; - } - - return true; -} diff --git a/sql-odbc/src/sqlodbc/opensearch_parse_result.h b/sql-odbc/src/sqlodbc/opensearch_parse_result.h deleted file mode 100644 index b74f180901..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_parse_result.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _OPENSEARCH_PARSE_RESULT_H_ -#define _OPENSEARCH_PARSE_RESULT_H_ -#include "qresult.h" - -#ifdef __cplusplus -std::string GetResultParserError(); -extern "C" { -#endif -#ifdef __cplusplus -} -#endif - -#ifdef __cplusplus -#include "opensearch_helper.h" -typedef rabbit::document json_doc; -// const char* is used instead of string for the cursor, because a NULL cursor -// is sometimes used Cannot pass q_res as reference because it breaks qresult.h -// macros that expect to use -> operator -BOOL CC_from_OpenSearchResult(QResultClass *q_res, ConnectionClass *conn, - const char *cursor, - OpenSearchResult &opensearch_result); -BOOL CC_Metadata_from_OpenSearchResult(QResultClass *q_res, ConnectionClass *conn, - const char *cursor, - OpenSearchResult &opensearch_result); -BOOL CC_No_Metadata_from_OpenSearchResult(QResultClass *q_res, ConnectionClass *conn, - const char *cursor, - OpenSearchResult &opensearch_result); -BOOL CC_Append_Table_Data(json_doc &opensearch_result_doc, QResultClass *q_res, - size_t doc_schema_size, ColumnInfoClass &fields); -#endif -#endif diff --git a/sql-odbc/src/sqlodbc/opensearch_result_queue.cpp b/sql-odbc/src/sqlodbc/opensearch_result_queue.cpp deleted file mode 100644 index dc6d3ddd7a..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_result_queue.cpp +++ /dev/null @@ -1,48 +0,0 @@ -#include "opensearch_result_queue.h" - -#include "opensearch_types.h" - -OpenSearchResultQueue::OpenSearchResultQueue(unsigned int capacity) - : m_push_semaphore(capacity, capacity), - m_pop_semaphore(0, capacity) { -} - -OpenSearchResultQueue::~OpenSearchResultQueue() { - while (!m_queue.empty()) { - delete m_queue.front(); - m_queue.pop(); - } -} - -void OpenSearchResultQueue::clear() { - std::scoped_lock lock(m_queue_mutex); - while (!m_queue.empty()) { - delete m_queue.front(); - m_queue.pop(); - m_push_semaphore.release(); - m_pop_semaphore.lock(); - } -} - -bool OpenSearchResultQueue::pop(unsigned int timeout_ms, OpenSearchResult*& result) { - if (m_pop_semaphore.try_lock_for(timeout_ms)) { - std::scoped_lock lock(m_queue_mutex); - result = m_queue.front(); - m_queue.pop(); - m_push_semaphore.release(); - return true; - } - - return false; -} - -bool OpenSearchResultQueue::push(unsigned int timeout_ms, OpenSearchResult* result) { - if (m_push_semaphore.try_lock_for(timeout_ms)) { - std::scoped_lock lock(m_queue_mutex); - m_queue.push(result); - m_pop_semaphore.release(); - return true; - } - - return false; -} diff --git a/sql-odbc/src/sqlodbc/opensearch_result_queue.h b/sql-odbc/src/sqlodbc/opensearch_result_queue.h deleted file mode 100644 index fef33da401..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_result_queue.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef OPENSEARCH_RESULT_QUEUE -#define OPENSEARCH_RESULT_QUEUE - -#include -#include - -#include "opensearch_semaphore.h" - -#define QUEUE_TIMEOUT 20 // milliseconds - -struct OpenSearchResult; - -class OpenSearchResultQueue { - public: - OpenSearchResultQueue(unsigned int capacity); - ~OpenSearchResultQueue(); - - void clear(); - bool pop(unsigned int timeout_ms, OpenSearchResult*& result); - bool push(unsigned int timeout_ms, OpenSearchResult* result); - - private: - std::queue< OpenSearchResult*> m_queue; - std::mutex m_queue_mutex; - opensearch_semaphore m_push_semaphore; - opensearch_semaphore m_pop_semaphore; -}; - -#endif diff --git a/sql-odbc/src/sqlodbc/opensearch_semaphore.cpp b/sql-odbc/src/sqlodbc/opensearch_semaphore.cpp deleted file mode 100644 index c35ac77461..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_semaphore.cpp +++ /dev/null @@ -1,89 +0,0 @@ -#include "opensearch_semaphore.h" - -#include - -#ifdef WIN32 -namespace { -HANDLE createSemaphore(unsigned int initial, unsigned int capacity) { - HANDLE semaphore = NULL; - std::string semName; - while (NULL == semaphore) { - semName = "es_sem_" + std::to_string(rand() * 1000); - semaphore = CreateSemaphore(NULL, initial, capacity, semName.c_str()); - } - - return semaphore; -} -} // namespace -#else -#include -#endif - -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunused-parameter" -#endif // __APPLE__ -opensearch_semaphore::opensearch_semaphore(unsigned int initial, unsigned int capacity) -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ - : -#ifdef WIN32 - m_semaphore(createSemaphore(initial, capacity)) -#elif defined(__APPLE__) - m_semaphore(dispatch_semaphore_create(initial)) -#endif -{ -#if !defined(WIN32) && !defined(__APPLE__) - sem_init(&m_semaphore, 0, capacity); -#endif -} - -opensearch_semaphore::~opensearch_semaphore() { -#ifdef WIN32 - CloseHandle(m_semaphore); -#elif defined(__APPLE__) -#else - sem_destroy(&m_semaphore); -#endif -} - -void opensearch_semaphore::lock() { -#ifdef WIN32 - WaitForSingleObject(m_semaphore, INFINITE); -#elif defined(__APPLE__) - dispatch_semaphore_wait(m_semaphore, DISPATCH_TIME_FOREVER); -#else - sem_wait(&m_semaphore); -#endif -} - -void opensearch_semaphore::release() { -#ifdef WIN32 - ReleaseSemaphore(m_semaphore, 1, NULL); -#elif defined(__APPLE__) - dispatch_semaphore_signal(m_semaphore); -#else - sem_post(&m_semaphore); -#endif -} - -bool opensearch_semaphore::try_lock_for(unsigned int timeout_ms) { -#ifdef WIN32 - return WaitForSingleObject(m_semaphore, timeout_ms) == WAIT_OBJECT_0; -#elif defined(__APPLE__) - return 0 - == dispatch_semaphore_wait( - m_semaphore, dispatch_time(DISPATCH_TIME_NOW, - static_cast< int64_t >( - timeout_ms * NSEC_PER_MSEC))); -#else - struct timespec ts; - if (-1 == clock_gettime(CLOCK_REALTIME, &ts)) { - return false; - } - - ts.tv_nsec += timeout_ms * 1000000; - return 0 == sem_timedwait(&m_semaphore & ts); -#endif -} diff --git a/sql-odbc/src/sqlodbc/opensearch_semaphore.h b/sql-odbc/src/sqlodbc/opensearch_semaphore.h deleted file mode 100644 index a5ffdbfc0e..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_semaphore.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef OPENSEARCH_SEMAPHORE -#define OPENSEARCH_SEMAPHORE - -#ifdef WIN32 - #include -#elif defined(__APPLE__) - #include -#else - #include -#endif - -class opensearch_semaphore { - public: - opensearch_semaphore(unsigned int initial, unsigned int capacity); - ~opensearch_semaphore(); - - void lock(); - void release(); - bool try_lock_for(unsigned int timeout_ms); - - private: -#ifdef WIN32 - HANDLE m_semaphore; -#elif defined(__APPLE__) - dispatch_semaphore_t m_semaphore; -#else - sem_t m_semaphore; -#endif -}; - -#endif diff --git a/sql-odbc/src/sqlodbc/opensearch_statement.cpp b/sql-odbc/src/sqlodbc/opensearch_statement.cpp deleted file mode 100644 index 94a91d0f77..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_statement.cpp +++ /dev/null @@ -1,335 +0,0 @@ -#include "opensearch_statement.h" - -#include "environ.h" // Critical section for statment -#include "misc.h" -#include "opensearch_apifunc.h" -#include "opensearch_helper.h" -#include "statement.h" - -extern "C" void *common_cs; - -RETCODE ExecuteStatement(StatementClass *stmt, BOOL commit) { - CSTR func = "ExecuteStatement"; - int func_cs_count = 0; - ConnectionClass *conn = SC_get_conn(stmt); - CONN_Status oldstatus = conn->status; - - auto CleanUp = [&]() -> RETCODE { - SC_SetExecuting(stmt, FALSE); - CLEANUP_FUNC_CONN_CS(func_cs_count, conn); - if (conn->status != CONN_DOWN) - conn->status = oldstatus; - if (SC_get_errornumber(stmt) == STMT_OK) - return SQL_SUCCESS; - else if (SC_get_errornumber(stmt) < STMT_OK) - return SQL_SUCCESS_WITH_INFO; - else { - if (!SC_get_errormsg(stmt) || !SC_get_errormsg(stmt)[0]) { - if (STMT_NO_MEMORY_ERROR != SC_get_errornumber(stmt)) - SC_set_errormsg(stmt, "Error while executing the query"); - SC_log_error(func, NULL, stmt); - } - return SQL_ERROR; - } - }; - - ENTER_INNER_CONN_CS(conn, func_cs_count); - - if (conn->status == CONN_EXECUTING) { - SC_set_error(stmt, STMT_SEQUENCE_ERROR, "Connection is already in use.", - func); - return CleanUp(); - } - - if (!SC_SetExecuting(stmt, TRUE)) { - SC_set_error(stmt, STMT_OPERATION_CANCELLED, "Cancel Request Accepted", - func); - return CleanUp(); - } - - conn->status = CONN_EXECUTING; - - QResultClass *res = SendQueryGetResult(stmt, commit); - if (!res) { - std::string es_conn_err = GetErrorMsg(SC_get_conn(stmt)->opensearchconn); - ConnErrorType es_err_type = GetErrorType(SC_get_conn(stmt)->opensearchconn); - std::string es_parse_err = GetResultParserError(); - if (!es_conn_err.empty()) { - if (es_err_type == ConnErrorType::CONN_ERROR_QUERY_SYNTAX) { - SC_set_error(stmt, STMT_QUERY_SYNTAX_ERROR, es_conn_err.c_str(), - func); - } else { - SC_set_error(stmt, STMT_NO_RESPONSE, es_conn_err.c_str(), func); - } - } else if (!es_parse_err.empty()) { - SC_set_error(stmt, STMT_EXEC_ERROR, es_parse_err.c_str(), func); - } else if (SC_get_errornumber(stmt) <= 0) { - SC_set_error(stmt, STMT_NO_RESPONSE, - "Failed to retrieve error message from result. " - "Connection may be down.", - func); - } - return CleanUp(); - } - - if (CONN_DOWN != conn->status) - conn->status = oldstatus; - stmt->status = STMT_FINISHED; - LEAVE_INNER_CONN_CS(func_cs_count, conn); - - // Check the status of the result - if (SC_get_errornumber(stmt) < 0) { - if (QR_command_successful(res)) - SC_set_errornumber(stmt, STMT_OK); - else if (QR_command_nonfatal(res)) - SC_set_errornumber(stmt, STMT_INFO_ONLY); - else - SC_set_errorinfo(stmt, res, 0); - } - - // Set cursor before the first tuple in the list - stmt->currTuple = -1; - SC_set_current_col(stmt, static_cast< int >(stmt->currTuple)); - SC_set_rowset_start(stmt, stmt->currTuple, FALSE); - - // Only perform if query was not aborted - if (!QR_get_aborted(res)) { - // Check if result columns were obtained from query - for (QResultClass *tres = res; tres; tres = tres->next) { - Int2 numcols = QR_NumResultCols(tres); - if (numcols <= 0) - continue; - ARDFields *opts = SC_get_ARDF(stmt); - extend_column_bindings(opts, numcols); - if (opts->bindings) - break; - - // Failed to allocate - QR_Destructor(res); - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, - "Could not get enough free memory to store " - "the binding information", - func); - return CleanUp(); - } - } - - QResultClass *last = SC_get_Result(stmt); - if (last) { - // Statement already contains a result - // Append to end if this hasn't happened - while (last->next != NULL) { - if (last == res) - break; - last = last->next; - } - if (last != res) - last->next = res; - } else { - // Statement does not contain a result - // Assign directly - SC_set_Result(stmt, res); - } - - // This will commit results for SQLExecDirect and will not commit - // results for SQLPrepare since only metadata is required for SQLPrepare - if (commit) { - GetNextResultSet(stmt); - } - - stmt->diag_row_count = res->recent_processed_row_count; - - return CleanUp(); -} - -SQLRETURN GetNextResultSet(StatementClass *stmt) { - ConnectionClass *conn = SC_get_conn(stmt); - QResultClass *q_res = SC_get_Result(stmt); - if ((q_res == NULL) && (conn == NULL)) { - return SQL_ERROR; - } - - SQLSMALLINT total_columns = -1; - if (!SQL_SUCCEEDED(SQLNumResultCols(stmt, &total_columns)) - || (total_columns == -1)) { - return SQL_ERROR; - } - - OpenSearchResult *es_res = OpenSearchGetResult(conn->opensearchconn); - if (es_res != NULL) { - // Save server cursor id to fetch more pages later - if (es_res->opensearch_result_doc.has("cursor")) { - QR_set_server_cursor_id( - q_res, es_res->opensearch_result_doc["cursor"].as_string().c_str()); - } else { - QR_set_server_cursor_id(q_res, NULL); - } - - // Responsible for looping through rows, allocating tuples and - // appending these rows in q_result - CC_Append_Table_Data(es_res->opensearch_result_doc, q_res, total_columns, - *(q_res->fields)); - } - - return SQL_SUCCESS; -} - -RETCODE RePrepareStatement(StatementClass *stmt) { - CSTR func = "RePrepareStatement"; - RETCODE result = SC_initialize_and_recycle(stmt); - if (result != SQL_SUCCESS) - return result; - if (!stmt->statement) { - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, - "Expected statement to be allocated.", func); - return SQL_ERROR; - } - - // If an SQLPrepare was performed prior to this, but was left in the - // described state because an error prior to SQLExecute then set the - // statement to finished so it can be recycled. - if (stmt->status == STMT_DESCRIBED) - stmt->status = STMT_FINISHED; - - return SQL_SUCCESS; -} - -RETCODE PrepareStatement(StatementClass *stmt, const SQLCHAR *stmt_str, - SQLINTEGER stmt_sz) { - CSTR func = "PrepareStatement"; - RETCODE result = SC_initialize_and_recycle(stmt); - if (result != SQL_SUCCESS) - return result; - - stmt->statement = make_string(stmt_str, stmt_sz, NULL, 0); - if (!stmt->statement) { - SC_set_error(stmt, STMT_NO_MEMORY_ERROR, - "No memory available to store statement", func); - return SQL_ERROR; - } - - // If an SQLPrepare was performed prior to this, but was left in the - // described state because an error prior to SQLExecute then set the - // statement to finished so it can be recycled. - if (stmt->status == STMT_DESCRIBED) - stmt->status = STMT_FINISHED; - stmt->statement_type = (short)statement_type(stmt->statement); - - return SQL_SUCCESS; -} - -QResultClass *SendQueryGetResult(StatementClass *stmt, BOOL commit) { - if (stmt == NULL) - return NULL; - - // Allocate QResultClass - QResultClass *res = QR_Constructor(); - if (res == NULL) - return NULL; - - // Send command - ConnectionClass *conn = SC_get_conn(stmt); - if (OpenSearchExecDirect(conn->opensearchconn, stmt->statement, - conn->connInfo.fetch_size) - != 0) { - QR_Destructor(res); - return NULL; - } - res->rstatus = PORES_COMMAND_OK; - - // Get OpenSearchResult - OpenSearchResult *es_res = OpenSearchGetResult(conn->opensearchconn); - if (es_res == NULL) { - QR_Destructor(res); - return NULL; - } - - BOOL success = - commit - ? CC_from_OpenSearchResult(res, conn, res->cursor_name, *es_res) - : CC_Metadata_from_OpenSearchResult(res, conn, res->cursor_name, - *es_res); - - // Convert result to QResultClass - if (!success) { - QR_Destructor(res); - res = NULL; - } - - if (commit) { - // Deallocate OpenSearchResult - OpenSearchClearResult(es_res); - res->opensearch_result = NULL; - } else { - // Set OpenSearchResult into connection class so it can be used later - res->opensearch_result = es_res; - } - return res; -} - -RETCODE AssignResult(StatementClass *stmt) { - if (stmt == NULL) - return SQL_ERROR; - - QResultClass *res = SC_get_Result(stmt); - if (!res || !res->opensearch_result) { - return SQL_ERROR; - } - - // Commit result to QResultClass - OpenSearchResult *es_res = static_cast< OpenSearchResult * >(res->opensearch_result); - ConnectionClass *conn = SC_get_conn(stmt); - if (!CC_No_Metadata_from_OpenSearchResult(res, conn, res->cursor_name, - *es_res)) { - QR_Destructor(res); - return SQL_ERROR; - } - GetNextResultSet(stmt); - - // Deallocate and return result - OpenSearchClearResult(es_res); - res->opensearch_result = NULL; - return SQL_SUCCESS; -} - -void ClearOpenSearchResult(void *opensearch_result) { - if (opensearch_result != NULL) { - OpenSearchResult *es_res = static_cast< OpenSearchResult * >(opensearch_result); - OpenSearchClearResult(es_res); - } -} - -SQLRETURN OPENSEARCHAPI_Cancel(HSTMT hstmt) { - // Verify pointer validity and convert to StatementClass - if (hstmt == NULL) - return SQL_INVALID_HANDLE; - StatementClass *stmt = (StatementClass *)hstmt; - - // Get execution delegate (if applicable) and initialize return code - StatementClass *opensearchtmt = - (stmt->execute_delegate == NULL) ? stmt : stmt->execute_delegate; - SQLRETURN ret = SQL_SUCCESS; - - // Entry common critical section - ENTER_COMMON_CS; - - // Waiting for more data from SQLParamData/SQLPutData - cancel statement - if (opensearchtmt->data_at_exec >= 0) { - // Enter statement critical section - ENTER_STMT_CS(stmt); - - // Clear info and cancel need data - SC_clear_error(stmt); - opensearchtmt->data_at_exec = -1; - opensearchtmt->put_data = FALSE; - cancelNeedDataState(opensearchtmt); - - // Leave statement critical section - LEAVE_STMT_CS(stmt); - } - - // Leave common critical section - LEAVE_COMMON_CS; - - return ret; -} diff --git a/sql-odbc/src/sqlodbc/opensearch_statement.h b/sql-odbc/src/sqlodbc/opensearch_statement.h deleted file mode 100644 index 620e0802e1..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_statement.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _OPENSEARCH_STATEMENT_H_ -#define _OPENSEARCH_STATEMENT_H_ - -#include "opensearch_parse_result.h" -#include "qresult.h" -#include "statement.h" - -#ifdef __cplusplus -extern "C" { -#endif -RETCODE RePrepareStatement(StatementClass *stmt); -RETCODE PrepareStatement(StatementClass* stmt, const SQLCHAR *stmt_str, SQLINTEGER stmt_sz); -RETCODE ExecuteStatement(StatementClass *stmt, BOOL commit); -QResultClass *SendQueryGetResult(StatementClass *stmt, BOOL commit); -RETCODE AssignResult(StatementClass *stmt); -SQLRETURN OPENSEARCHAPI_Cancel(HSTMT hstmt); -SQLRETURN GetNextResultSet(StatementClass *stmt); -void ClearOpenSearchResult(void *opensearch_result); -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sql-odbc/src/sqlodbc/opensearch_types.c b/sql-odbc/src/sqlodbc/opensearch_types.c deleted file mode 100644 index 8f5107b715..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_types.c +++ /dev/null @@ -1,1515 +0,0 @@ -#include "opensearch_types.h" - -#include "dlg_specific.h" -#include "environ.h" -#include "opensearch_connection.h" -#include "qresult.h" -#include "statement.h" -#ifndef WIN32 -#include -#endif - -#define EXPERIMENTAL_CURRENTLY - -SQLSMALLINT ansi_to_wtype(const ConnectionClass *self, SQLSMALLINT ansitype) { -#ifndef UNICODE_SUPPORT - return ansitype; -#else - if (!ALLOW_WCHAR(self)) - return ansitype; - switch (ansitype) { - case SQL_CHAR: - return SQL_WCHAR; - case SQL_VARCHAR: - return SQL_WVARCHAR; - case SQL_LONGVARCHAR: - return SQL_WLONGVARCHAR; - } - return ansitype; -#endif /* UNICODE_SUPPORT */ -} - -/* These are NOW the SQL Types reported in SQLGetTypeInfo. */ -SQLSMALLINT sqlTypes[] = { - SQL_BIGINT, - /* SQL_BINARY, -- Commented out because VarBinary is more correct. */ - SQL_BIT, SQL_CHAR, SQL_TYPE_DATE, SQL_DATE, SQL_DECIMAL, SQL_DOUBLE, - SQL_FLOAT, SQL_INTEGER, SQL_LONGVARBINARY, SQL_LONGVARCHAR, SQL_NUMERIC, - SQL_REAL, SQL_SMALLINT, SQL_TYPE_TIME, SQL_TYPE_TIMESTAMP, SQL_TIME, - SQL_TIMESTAMP, SQL_TINYINT, SQL_VARBINARY, SQL_VARCHAR, -#ifdef UNICODE_SUPPORT - SQL_WCHAR, SQL_WVARCHAR, SQL_WLONGVARCHAR, -#endif /* UNICODE_SUPPORT */ - SQL_GUID, -/* AFAIK SQL_INTERVAL types cause troubles in some spplications */ -#ifdef ES_INTERVAL_AS_SQL_INTERVAL - SQL_INTERVAL_MONTH, SQL_INTERVAL_YEAR, SQL_INTERVAL_YEAR_TO_MONTH, - SQL_INTERVAL_DAY, SQL_INTERVAL_HOUR, SQL_INTERVAL_MINUTE, - SQL_INTERVAL_SECOND, SQL_INTERVAL_DAY_TO_HOUR, SQL_INTERVAL_DAY_TO_MINUTE, - SQL_INTERVAL_DAY_TO_SECOND, SQL_INTERVAL_HOUR_TO_MINUTE, - SQL_INTERVAL_HOUR_TO_SECOND, SQL_INTERVAL_MINUTE_TO_SECOND, -#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ - 0}; - -#ifdef ODBCINT64 -#define ALLOWED_C_BIGINT SQL_C_SBIGINT -/* #define ALLOWED_C_BIGINT SQL_C_CHAR */ /* Delphi should be either ? */ -#else -#define ALLOWED_C_BIGINT SQL_C_CHAR -#endif - -OID opensearch_true_type(const ConnectionClass *conn, OID type, OID basetype) { - if (0 == basetype) - return type; - else if (0 == type) - return basetype; - else if (type == (OID)conn->lobj_type) - return type; - return basetype; -} - -#define MONTH_BIT (1 << 17) -#define YEAR_BIT (1 << 18) -#define DAY_BIT (1 << 19) -#define HOUR_BIT (1 << 26) -#define MINUTE_BIT (1 << 27) -#define SECOND_BIT (1 << 28) - -static Int4 getCharColumnSizeX(const ConnectionClass *conn, OID type, - int atttypmod, int adtsize_or_longestlen, - int handle_unknown_size_as) { - int p = -1, maxsize; - MYLOG(OPENSEARCH_TRACE, - "entering type=%d, atttypmod=%d, adtsize_or=%d, unknown = %d\n", type, - atttypmod, adtsize_or_longestlen, handle_unknown_size_as); - - maxsize = MAX_VARCHAR_SIZE; -#ifdef UNICODE_SUPPORT - if (CC_is_in_unicode_driver(conn) && isSqlServr() && maxsize > 4000) - maxsize = 4000; -#endif /* UNICODE_SUPPORT */ - - if (maxsize == TEXT_FIELD_SIZE + 1) /* magic length for testing */ - maxsize = 0; - - /* - * Static ColumnSize (i.e., the Maximum ColumnSize of the datatype) This - * has nothing to do with a result set. - */ - MYLOG(OPENSEARCH_DEBUG, "!!! atttypmod < 0 ?\n"); - if (atttypmod < 0 && adtsize_or_longestlen < 0) - return maxsize; - - MYLOG(OPENSEARCH_DEBUG, "!!! adtsize_or_logngest=%d\n", adtsize_or_longestlen); - p = adtsize_or_longestlen; /* longest */ - /* - * Catalog Result Sets -- use assigned column width (i.e., from - * set_tuplefield_string) - */ - MYLOG(OPENSEARCH_DEBUG, "!!! catalog_result=%d\n", handle_unknown_size_as); - if (UNKNOWNS_AS_LONGEST == handle_unknown_size_as) { - MYLOG(OPENSEARCH_DEBUG, "LONGEST: p = %d\n", p); - if (p > 0 && (atttypmod < 0 || atttypmod > p)) - return p; - } - if (TYPE_MAY_BE_ARRAY(type)) { - if (p > 0) - return p; - return maxsize; - } - - /* Size is unknown -- handle according to parameter */ - if (atttypmod > 0) /* maybe the length is known */ - { - return atttypmod; - } - - /* The type is really unknown */ - switch (handle_unknown_size_as) { - case UNKNOWNS_AS_DONTKNOW: - return -1; - case UNKNOWNS_AS_LONGEST: - case UNKNOWNS_AS_MAX: - break; - default: - return -1; - } - if (maxsize <= 0) - return maxsize; - switch (type) { - case OPENSEARCH_TYPE_BPCHAR: - case OPENSEARCH_TYPE_VARCHAR: - case OPENSEARCH_TYPE_TEXT: - return maxsize; - } - - if (p > maxsize) - maxsize = p; - return maxsize; -} - -/* - * Specify when handle_unknown_size_as parameter is unused - */ -#define UNUSED_HANDLE_UNKNOWN_SIZE_AS (-2) - -static SQLSMALLINT getNumericDecimalDigitsX(const ConnectionClass *conn, - OID type, int atttypmod, - int adtsize_or_longest, - int handle_unknown_size_as) { - UNUSED(conn, handle_unknown_size_as); - SQLSMALLINT default_decimal_digits = 6; - - MYLOG(OPENSEARCH_TRACE, "entering type=%d, atttypmod=%d\n", type, atttypmod); - - if (atttypmod < 0 && adtsize_or_longest < 0) - return default_decimal_digits; - - if (atttypmod > -1) - return (SQLSMALLINT)(atttypmod & 0xffff); - if (adtsize_or_longest <= 0) - return default_decimal_digits; - adtsize_or_longest >>= 16; /* extract the scale part */ - return (SQLSMALLINT)adtsize_or_longest; -} - -static Int4 /* OpenSearch restritiction */ -getNumericColumnSizeX(const ConnectionClass *conn, OID type, int atttypmod, - int adtsize_or_longest, int handle_unknown_size_as) { - UNUSED(conn); - Int4 default_column_size = 28; - MYLOG(OPENSEARCH_TRACE, "entering type=%d, typmod=%d\n", type, atttypmod); - - if (atttypmod > -1) - return (atttypmod >> 16) & 0xffff; - switch (handle_unknown_size_as) { - case UNKNOWNS_AS_DONTKNOW: - return SQL_NO_TOTAL; - } - if (adtsize_or_longest <= 0) - return default_column_size; - adtsize_or_longest %= (1 << 16); /* extract the precision part */ - switch (handle_unknown_size_as) { - case UNKNOWNS_AS_MAX: - return adtsize_or_longest > default_column_size - ? adtsize_or_longest - : default_column_size; - default: - if (adtsize_or_longest < 10) - adtsize_or_longest = 10; - } - return adtsize_or_longest; -} - -static SQLSMALLINT getTimestampDecimalDigitsX(const ConnectionClass *conn, - OID type, int atttypmod) { - UNUSED(conn); - MYLOG(OPENSEARCH_DEBUG, "type=%d, atttypmod=%d\n", type, atttypmod); - return (SQLSMALLINT)(atttypmod > -1 ? atttypmod : 6); -} - -#ifdef ES_INTERVAL_AS_SQL_INTERVAL -static SQLSMALLINT getIntervalDecimalDigits(OID type, int atttypmod) { - Int4 prec; - - MYLOG(OPENSEARCH_TRACE, "entering type=%d, atttypmod=%d\n", type, atttypmod); - - if ((atttypmod & SECOND_BIT) == 0) - return 0; - return (SQLSMALLINT)((prec = atttypmod & 0xffff) == 0xffff ? 6 : prec); -} -#endif // ES_INTERVAL_AS_SQL_INTERVAL - -SQLSMALLINT -opensearchtype_attr_to_concise_type(const ConnectionClass *conn, OID type, - int typmod, int adtsize_or_longestlen, - int handle_unknown_size_as) { - EnvironmentClass *env = (EnvironmentClass *)CC_get_env(conn); -#ifdef ES_INTERVAL_AS_SQL_INTERVAL - SQLSMALLINT sqltype; -#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ - BOOL bLongVarchar, bFixed = FALSE; - - switch (type) { - case OPENSEARCH_TYPE_CHAR: - return ansi_to_wtype(conn, SQL_CHAR); - case OPENSEARCH_TYPE_NAME: - case OPENSEARCH_TYPE_REFCURSOR: - return ansi_to_wtype(conn, SQL_VARCHAR); - - case OPENSEARCH_TYPE_BPCHAR: - bFixed = TRUE; - case OPENSEARCH_TYPE_VARCHAR: - if (getCharColumnSizeX(conn, type, typmod, adtsize_or_longestlen, - handle_unknown_size_as) - > MAX_VARCHAR_SIZE) - bLongVarchar = TRUE; - else - bLongVarchar = FALSE; - return ansi_to_wtype(conn, bLongVarchar - ? SQL_LONGVARCHAR - : (bFixed ? SQL_CHAR : SQL_VARCHAR)); - case OPENSEARCH_TYPE_TEXT: - bLongVarchar = DEFAULT_TEXTASLONGVARCHAR; - if (bLongVarchar) { - int column_size = getCharColumnSizeX(conn, type, typmod, - adtsize_or_longestlen, - handle_unknown_size_as); - if (column_size > 0 && column_size <= MAX_VARCHAR_SIZE) - bLongVarchar = FALSE; - } - return ansi_to_wtype(conn, - bLongVarchar ? SQL_LONGVARCHAR : SQL_VARCHAR); - - case OPENSEARCH_TYPE_BYTEA: - return SQL_VARBINARY; - case OPENSEARCH_TYPE_LO_UNDEFINED: - return SQL_LONGVARBINARY; - - case OPENSEARCH_TYPE_INT2: - return SQL_SMALLINT; - - case OPENSEARCH_TYPE_OID: - case OPENSEARCH_TYPE_XID: - case OPENSEARCH_TYPE_INT4: - return SQL_INTEGER; - - /* Change this to SQL_BIGINT for ODBC v3 bjm 2001-01-23 */ - case OPENSEARCH_TYPE_INT8: - if (conn->ms_jet) - return SQL_NUMERIC; /* maybe a little better than SQL_VARCHAR */ - return SQL_BIGINT; - - case OPENSEARCH_TYPE_NUMERIC: - return SQL_NUMERIC; - - case OPENSEARCH_TYPE_FLOAT4: - return SQL_REAL; - case OPENSEARCH_TYPE_FLOAT8: - return SQL_FLOAT; - case OPENSEARCH_TYPE_DATE: - if (EN_is_odbc3(env)) - return SQL_TYPE_DATE; - return SQL_DATE; - case OPENSEARCH_TYPE_TIME: - if (EN_is_odbc3(env)) - return SQL_TYPE_TIME; - return SQL_TIME; - case OPENSEARCH_TYPE_ABSTIME: - case OPENSEARCH_TYPE_DATETIME: - case OPENSEARCH_TYPE_TIMESTAMP_NO_TMZONE: - case OPENSEARCH_TYPE_TIMESTAMP: - if (EN_is_odbc3(env)) - return SQL_TYPE_TIMESTAMP; - return SQL_TIMESTAMP; - case OPENSEARCH_TYPE_MONEY: - return SQL_FLOAT; - case OPENSEARCH_TYPE_BOOL: - return SQL_BIT; - case OPENSEARCH_TYPE_XML: - return ansi_to_wtype(conn, SQL_LONGVARCHAR); - case OPENSEARCH_TYPE_INET: - case OPENSEARCH_TYPE_CIDR: - case OPENSEARCH_TYPE_MACADDR: - return ansi_to_wtype(conn, SQL_VARCHAR); - case OPENSEARCH_TYPE_UUID: - return SQL_GUID; - - case OPENSEARCH_TYPE_INTERVAL: -#ifdef ES_INTERVAL_AS_SQL_INTERVAL - if (sqltype = get_interval_type(atttypmod, NULL), 0 != sqltype) - return sqltype; -#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ - return ansi_to_wtype(conn, SQL_VARCHAR); - - default: - - /* - * first, check to see if 'type' is in list. If not, look up - * with query. Add oid, name to list. If it's already in - * list, just return. - */ - /* hack until permanent type is available */ - if (type == (OID)conn->lobj_type) - return SQL_LONGVARBINARY; - - bLongVarchar = DEFAULT_UNKNOWNSASLONGVARCHAR; - if (bLongVarchar) { - int column_size = getCharColumnSizeX(conn, type, typmod, - adtsize_or_longestlen, - handle_unknown_size_as); - if (column_size > 0 && column_size <= MAX_VARCHAR_SIZE) - bLongVarchar = FALSE; - } -#ifdef EXPERIMENTAL_CURRENTLY - return ansi_to_wtype(conn, - bLongVarchar ? SQL_LONGVARCHAR : SQL_VARCHAR); -#else - return bLongVarchar ? SQL_LONGVARCHAR : SQL_VARCHAR; -#endif /* EXPERIMENTAL_CURRENTLY */ - } -} - -SQLSMALLINT -opensearchtype_attr_to_sqldesctype(const ConnectionClass *conn, OID type, int typmod, - int adtsize_or_longestlen, - int handle_unknown_size_as) { - SQLSMALLINT rettype; - -#ifdef ES_INTERVAL_AS_SQL_INTERVAL - if (OPENSEARCH_TYPE_INTERVAL == type) - return SQL_INTERVAL; -#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ - switch (rettype = opensearchtype_attr_to_concise_type( - conn, type, typmod, adtsize_or_longestlen, - handle_unknown_size_as)) { - case SQL_TYPE_DATE: - case SQL_TYPE_TIME: - case SQL_TYPE_TIMESTAMP: - return SQL_DATETIME; - } - return rettype; -} - -SQLSMALLINT -opensearchtype_attr_to_datetime_sub(const ConnectionClass *conn, OID type, - int typmod) { - UNUSED(conn, type, typmod); - return -1; -} - -SQLSMALLINT -opensearchtype_attr_to_ctype(const ConnectionClass *conn, OID type, int typmod) { - UNUSED(typmod); - EnvironmentClass *env = (EnvironmentClass *)CC_get_env(conn); -#ifdef ES_INTERVAL_AS_SQL_INTERVAL - SQLSMALLINT ctype; -#endif /* ES_INTERVAL_A_SQL_INTERVAL */ - - switch (type) { - case OPENSEARCH_TYPE_INT8: - if (!conn->ms_jet) - return ALLOWED_C_BIGINT; - return SQL_C_CHAR; - case OPENSEARCH_TYPE_NUMERIC: - return SQL_C_CHAR; - case OPENSEARCH_TYPE_INT2: - return SQL_C_SSHORT; - case OPENSEARCH_TYPE_OID: - case OPENSEARCH_TYPE_XID: - return SQL_C_ULONG; - case OPENSEARCH_TYPE_INT4: - return SQL_C_SLONG; - case OPENSEARCH_TYPE_FLOAT4: - return SQL_C_FLOAT; - case OPENSEARCH_TYPE_FLOAT8: - return SQL_C_DOUBLE; - case OPENSEARCH_TYPE_DATE: - if (EN_is_odbc3(env)) - return SQL_C_TYPE_DATE; - return SQL_C_DATE; - case OPENSEARCH_TYPE_TIME: - if (EN_is_odbc3(env)) - return SQL_C_TYPE_TIME; - return SQL_C_TIME; - case OPENSEARCH_TYPE_ABSTIME: - case OPENSEARCH_TYPE_DATETIME: - case OPENSEARCH_TYPE_TIMESTAMP_NO_TMZONE: - case OPENSEARCH_TYPE_TIMESTAMP: - if (EN_is_odbc3(env)) - return SQL_C_TYPE_TIMESTAMP; - return SQL_C_TIMESTAMP; - case OPENSEARCH_TYPE_MONEY: - return SQL_C_FLOAT; - case OPENSEARCH_TYPE_BOOL: - return SQL_C_BIT; - - case OPENSEARCH_TYPE_BYTEA: - return SQL_C_BINARY; - case OPENSEARCH_TYPE_LO_UNDEFINED: - return SQL_C_BINARY; - case OPENSEARCH_TYPE_BPCHAR: - case OPENSEARCH_TYPE_VARCHAR: - case OPENSEARCH_TYPE_TEXT: - return ansi_to_wtype(conn, SQL_C_CHAR); - case OPENSEARCH_TYPE_UUID: - if (!conn->ms_jet) - return SQL_C_GUID; - return ansi_to_wtype(conn, SQL_C_CHAR); - - case OPENSEARCH_TYPE_INTERVAL: -#ifdef ES_INTERVAL_AS_SQL_INTERVAL - if (ctype = get_interval_type(atttypmod, NULL), 0 != ctype) - return ctype; -#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ - return ansi_to_wtype(conn, SQL_CHAR); - - default: - /* hack until permanent type is available */ - if (type == (OID)conn->lobj_type) - return SQL_C_BINARY; - - /* Experimental, Does this work ? */ -#ifdef EXPERIMENTAL_CURRENTLY - return ansi_to_wtype(conn, SQL_C_CHAR); -#else - return SQL_C_CHAR; -#endif /* EXPERIMENTAL_CURRENTLY */ - } -} - -const char *opensearchtype_attr_to_name(const ConnectionClass *conn, OID type, - int typmod, BOOL auto_increment) { - UNUSED(conn, typmod, conn, auto_increment); - switch (type) { - case OPENSEARCH_TYPE_BOOL: - return OPENSEARCH_TYPE_NAME_BOOLEAN; - case OPENSEARCH_TYPE_INT1: - return OPENSEARCH_TYPE_NAME_BYTE; - case OPENSEARCH_TYPE_INT2: - return OPENSEARCH_TYPE_NAME_SHORT; - case OPENSEARCH_TYPE_INT4: - return OPENSEARCH_TYPE_NAME_INTEGER; - case OPENSEARCH_TYPE_INT8: - return OPENSEARCH_TYPE_NAME_LONG; - case OPENSEARCH_TYPE_HALF_FLOAT: - return OPENSEARCH_TYPE_NAME_HALF_FLOAT; - case OPENSEARCH_TYPE_FLOAT4: - return OPENSEARCH_TYPE_NAME_FLOAT; - case OPENSEARCH_TYPE_FLOAT8: - return OPENSEARCH_TYPE_NAME_DOUBLE; - case OPENSEARCH_TYPE_SCALED_FLOAT: - return OPENSEARCH_TYPE_NAME_SCALED_FLOAT; - case OPENSEARCH_TYPE_KEYWORD: - return OPENSEARCH_TYPE_NAME_KEYWORD; - case OPENSEARCH_TYPE_TEXT: - return OPENSEARCH_TYPE_NAME_TEXT; - case OPENSEARCH_TYPE_NESTED: - return OPENSEARCH_TYPE_NAME_NESTED; - case OPENSEARCH_TYPE_DATETIME: - return OPENSEARCH_TYPE_NAME_DATE; - case OPENSEARCH_TYPE_TIMESTAMP: - return OPENSEARCH_TYPE_NAME_TIMESTAMP; - case OPENSEARCH_TYPE_OBJECT: - return OPENSEARCH_TYPE_NAME_OBJECT; - case OPENSEARCH_TYPE_VARCHAR: - return OPENSEARCH_TYPE_NAME_VARCHAR; - default: - return OPENSEARCH_TYPE_NAME_UNSUPPORTED; - } -} - -Int4 /* OpenSearch restriction */ -opensearchtype_attr_column_size(const ConnectionClass *conn, OID type, int atttypmod, - int adtsize_or_longest, int handle_unknown_size_as) { - UNUSED(handle_unknown_size_as, adtsize_or_longest, atttypmod, conn); - switch (type) { - case OPENSEARCH_TYPE_BOOL: - return 1; - case OPENSEARCH_TYPE_INT1: - return 3; - case OPENSEARCH_TYPE_INT2: - return 5; - case OPENSEARCH_TYPE_INT4: - return 10; - case OPENSEARCH_TYPE_INT8: - return 19; - case OPENSEARCH_TYPE_HALF_FLOAT: - return 7; - case OPENSEARCH_TYPE_FLOAT4: - return 7; - case OPENSEARCH_TYPE_FLOAT8: - return 15; - case OPENSEARCH_TYPE_SCALED_FLOAT: - return 15; - case OPENSEARCH_TYPE_KEYWORD: - return 256; - case OPENSEARCH_TYPE_TEXT: - return INT_MAX; - case OPENSEARCH_TYPE_NESTED: - return 0; - case OPENSEARCH_TYPE_DATETIME: - return 24; - case OPENSEARCH_TYPE_TIMESTAMP: - return 24; - case OPENSEARCH_TYPE_OBJECT: - return 0; - default: - return adtsize_or_longest; - } -} - -SQLSMALLINT -opensearchtype_attr_precision(const ConnectionClass *conn, OID type, int atttypmod, - int adtsize_or_longest, int handle_unknown_size_as) { - switch (type) { - case OPENSEARCH_TYPE_NUMERIC: - return (SQLSMALLINT)getNumericColumnSizeX(conn, type, atttypmod, - adtsize_or_longest, - handle_unknown_size_as); - case OPENSEARCH_TYPE_TIME: - case OPENSEARCH_TYPE_DATETIME: - case OPENSEARCH_TYPE_TIMESTAMP_NO_TMZONE: - return getTimestampDecimalDigitsX(conn, type, atttypmod); -#ifdef ES_INTERVAL_AS_SQL_INTERVAL - case OPENSEARCH_TYPE_INTERVAL: - return getIntervalDecimalDigits(type, atttypmod); -#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ - } - return -1; -} - -Int4 opensearchtype_attr_display_size(const ConnectionClass *conn, OID type, - int atttypmod, int adtsize_or_longestlen, - int handle_unknown_size_as) { - int dsize; - - switch (type) { - case OPENSEARCH_TYPE_INT2: - return 6; - - case OPENSEARCH_TYPE_OID: - case OPENSEARCH_TYPE_XID: - return 10; - - case OPENSEARCH_TYPE_INT4: - return 11; - - case OPENSEARCH_TYPE_INT8: - return 20; /* signed: 19 digits + sign */ - - case OPENSEARCH_TYPE_NUMERIC: - dsize = getNumericColumnSizeX(conn, type, atttypmod, - adtsize_or_longestlen, - handle_unknown_size_as); - return dsize <= 0 ? dsize : dsize + 2; - - case OPENSEARCH_TYPE_MONEY: - return 15; /* ($9,999,999.99) */ - - case OPENSEARCH_TYPE_FLOAT4: /* a sign, OPENSEARCH_REAL_DIGITS digits, a decimal point, - the letter E, a sign, and 2 digits */ - return (1 + OPENSEARCH_REAL_DIGITS + 1 + 1 + 3); - - case OPENSEARCH_TYPE_FLOAT8: /* a sign, OPENSEARCH_DOUBLE_DIGITS digits, a decimal - point, the letter E, a sign, and 3 digits */ - return (1 + OPENSEARCH_DOUBLE_DIGITS + 1 + 1 + 1 + 3); - - case OPENSEARCH_TYPE_MACADDR: - return 17; - case OPENSEARCH_TYPE_INET: - case OPENSEARCH_TYPE_CIDR: - return sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255/128"); - case OPENSEARCH_TYPE_UUID: - return 36; - case OPENSEARCH_TYPE_INTERVAL: - return 30; - - /* Character types use regular precision */ - default: - return opensearchtype_attr_column_size(conn, type, atttypmod, - adtsize_or_longestlen, - handle_unknown_size_as); - } -} - -Int4 opensearchtype_attr_buffer_length(const ConnectionClass *conn, OID type, - int atttypmod, int adtsize_or_longestlen, - int handle_unknown_size_as) { - int dsize; - - switch (type) { - case OPENSEARCH_TYPE_INT2: - return 2; /* sizeof(SQLSMALLINT) */ - - case OPENSEARCH_TYPE_OID: - case OPENSEARCH_TYPE_XID: - case OPENSEARCH_TYPE_INT4: - return 4; /* sizeof(SQLINTEGER) */ - - case OPENSEARCH_TYPE_INT8: - if (SQL_C_CHAR == opensearchtype_attr_to_ctype(conn, type, atttypmod)) - return 20; /* signed: 19 digits + sign */ - return 8; /* sizeof(SQLSBININT) */ - - case OPENSEARCH_TYPE_NUMERIC: - dsize = getNumericColumnSizeX(conn, type, atttypmod, - adtsize_or_longestlen, - handle_unknown_size_as); - return dsize <= 0 ? dsize : dsize + 2; - - case OPENSEARCH_TYPE_FLOAT4: - case OPENSEARCH_TYPE_MONEY: - return 4; /* sizeof(SQLREAL) */ - - case OPENSEARCH_TYPE_FLOAT8: - return 8; /* sizeof(SQLFLOAT) */ - - case OPENSEARCH_TYPE_DATE: - case OPENSEARCH_TYPE_TIME: - return 6; /* sizeof(DATE(TIME)_STRUCT) */ - - case OPENSEARCH_TYPE_ABSTIME: - case OPENSEARCH_TYPE_DATETIME: - case OPENSEARCH_TYPE_TIMESTAMP: - case OPENSEARCH_TYPE_TIMESTAMP_NO_TMZONE: - return 16; /* sizeof(TIMESTAMP_STRUCT) */ - - case OPENSEARCH_TYPE_MACADDR: - return 17; - case OPENSEARCH_TYPE_INET: - case OPENSEARCH_TYPE_CIDR: - return sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255/128"); - case OPENSEARCH_TYPE_UUID: - return 16; /* sizeof(SQLGUID) */ - - /* Character types use the default precision */ - case OPENSEARCH_TYPE_VARCHAR: - case OPENSEARCH_TYPE_BPCHAR: { - int coef = 1; - Int4 prec = opensearchtype_attr_column_size(conn, type, atttypmod, - adtsize_or_longestlen, - handle_unknown_size_as), - maxvarc; - if (SQL_NO_TOTAL == prec) - return prec; -#ifdef UNICODE_SUPPORT - if (CC_is_in_unicode_driver(conn)) - return prec * WCLEN; -#endif /* UNICODE_SUPPORT */ - coef = conn->mb_maxbyte_per_char; - if (coef < 2) - /* CR -> CR/LF */ - coef = 2; - if (coef == 1) - return prec; - maxvarc = MAX_VARCHAR_SIZE; - if (prec <= maxvarc && prec * coef > maxvarc) - return maxvarc; - return coef * prec; - } -#ifdef ES_INTERVAL_AS_SQL_INTERVAL - case OPENSEARCH_TYPE_INTERVAL: - return sizeof(SQL_INTERVAL_STRUCT); -#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ - - default: - return opensearchtype_attr_column_size(conn, type, atttypmod, - adtsize_or_longestlen, - handle_unknown_size_as); - } -} - -/* - */ -Int4 opensearchtype_attr_desclength(const ConnectionClass *conn, OID type, - int atttypmod, int adtsize_or_longestlen, - int handle_unknown_size_as) { - int dsize; - - switch (type) { - case OPENSEARCH_TYPE_INT2: - return 2; - - case OPENSEARCH_TYPE_OID: - case OPENSEARCH_TYPE_XID: - case OPENSEARCH_TYPE_INT4: - return 4; - - case OPENSEARCH_TYPE_INT8: - return 20; /* signed: 19 digits + sign */ - - case OPENSEARCH_TYPE_NUMERIC: - dsize = getNumericColumnSizeX(conn, type, atttypmod, - adtsize_or_longestlen, - handle_unknown_size_as); - return dsize <= 0 ? dsize : dsize + 2; - - case OPENSEARCH_TYPE_FLOAT4: - case OPENSEARCH_TYPE_MONEY: - return 4; - - case OPENSEARCH_TYPE_FLOAT8: - return 8; - - case OPENSEARCH_TYPE_DATE: - case OPENSEARCH_TYPE_TIME: - case OPENSEARCH_TYPE_ABSTIME: - case OPENSEARCH_TYPE_DATETIME: - case OPENSEARCH_TYPE_TIMESTAMP_NO_TMZONE: - case OPENSEARCH_TYPE_TIMESTAMP: - case OPENSEARCH_TYPE_VARCHAR: - case OPENSEARCH_TYPE_BPCHAR: - return opensearchtype_attr_column_size(conn, type, atttypmod, - adtsize_or_longestlen, - handle_unknown_size_as); - default: - return opensearchtype_attr_column_size(conn, type, atttypmod, - adtsize_or_longestlen, - handle_unknown_size_as); - } -} - -Int2 opensearchtype_attr_decimal_digits(const ConnectionClass *conn, OID type, - int atttypmod, int adtsize_or_longestlen, - int handle_unknown_size_as) { - switch (type) { - case OPENSEARCH_TYPE_INT2: - case OPENSEARCH_TYPE_OID: - case OPENSEARCH_TYPE_XID: - case OPENSEARCH_TYPE_INT4: - case OPENSEARCH_TYPE_INT8: - case OPENSEARCH_TYPE_FLOAT4: - case OPENSEARCH_TYPE_FLOAT8: - case OPENSEARCH_TYPE_MONEY: - case OPENSEARCH_TYPE_BOOL: - - /* - * Number of digits to the right of the decimal point in - * "yyyy-mm=dd hh:mm:ss[.f...]" - */ - case OPENSEARCH_TYPE_ABSTIME: - case OPENSEARCH_TYPE_TIMESTAMP: - return 0; - case OPENSEARCH_TYPE_TIME: - case OPENSEARCH_TYPE_DATETIME: - case OPENSEARCH_TYPE_TIMESTAMP_NO_TMZONE: - /* return 0; */ - return getTimestampDecimalDigitsX(conn, type, atttypmod); - - case OPENSEARCH_TYPE_NUMERIC: - return getNumericDecimalDigitsX(conn, type, atttypmod, - adtsize_or_longestlen, - handle_unknown_size_as); - -#ifdef ES_INTERVAL_AS_SQL_INTERVAL - case OPENSEARCH_TYPE_INTERVAL: - return getIntervalDecimalDigits(type, atttypmod); -#endif /* ES_INTERVAL_AS_SQL_INTERVAL */ - - default: - return -1; - } -} - -Int2 opensearchtype_attr_scale(const ConnectionClass *conn, OID type, int atttypmod, - int adtsize_or_longestlen, - int handle_unknown_size_as) { - switch (type) { - case OPENSEARCH_TYPE_NUMERIC: - return getNumericDecimalDigitsX(conn, type, atttypmod, - adtsize_or_longestlen, - handle_unknown_size_as); - } - return -1; -} - -Int4 opensearchtype_attr_transfer_octet_length(const ConnectionClass *conn, OID type, - int atttypmod, - int handle_unknown_size_as) { - int coef = 1; - Int4 maxvarc, column_size; - - switch (type) { - case OPENSEARCH_TYPE_VARCHAR: - case OPENSEARCH_TYPE_BPCHAR: - case OPENSEARCH_TYPE_TEXT: - case OPENSEARCH_TYPE_UNKNOWN: - column_size = opensearchtype_attr_column_size( - conn, type, atttypmod, OPENSEARCH_ADT_UNSET, - handle_unknown_size_as); - if (SQL_NO_TOTAL == column_size) - return column_size; -#ifdef UNICODE_SUPPORT - if (CC_is_in_unicode_driver(conn)) - return column_size * WCLEN; -#endif /* UNICODE_SUPPORT */ - coef = conn->mb_maxbyte_per_char; - if (coef < 2) - /* CR -> CR/LF */ - coef = 2; - if (coef == 1) - return column_size; - maxvarc = MAX_VARCHAR_SIZE; - if (column_size <= maxvarc && column_size * coef > maxvarc) - return maxvarc; - return coef * column_size; - case OPENSEARCH_TYPE_BYTEA: - return opensearchtype_attr_column_size(conn, type, atttypmod, - OPENSEARCH_ADT_UNSET, - handle_unknown_size_as); - default: - if (type == (OID)conn->lobj_type) - return opensearchtype_attr_column_size(conn, type, atttypmod, - OPENSEARCH_ADT_UNSET, - handle_unknown_size_as); - } - return -1; -} - -/* - * Casting parameters e.g. ?::timestamp is much more flexible - * than specifying parameter datatype oids determined by - * sqltype_to_bind_opensearchtype() via parse message. - */ -const char *sqltype_to_opensearchcast(const ConnectionClass *conn, - SQLSMALLINT fSqlType) { - const char *openSearchCast = NULL_STRING; - - switch (fSqlType) { - case SQL_BINARY: - case SQL_VARBINARY: - openSearchCast = "::bytea"; - break; - case SQL_TYPE_DATE: - case SQL_DATE: - openSearchCast = "::date"; - break; - case SQL_DECIMAL: - case SQL_NUMERIC: - openSearchCast = "::numeric"; - break; - case SQL_BIGINT: - openSearchCast = "::int8"; - break; - case SQL_INTEGER: - openSearchCast = "::int4"; - break; - case SQL_REAL: - openSearchCast = "::float4"; - break; - case SQL_SMALLINT: - case SQL_TINYINT: - openSearchCast = "::int2"; - break; - case SQL_TIME: - case SQL_TYPE_TIME: - openSearchCast = "::time"; - break; - case SQL_TIMESTAMP: - case SQL_TYPE_TIMESTAMP: - openSearchCast = "::timestamp"; - break; - case SQL_GUID: - if (OPENSEARCH_VERSION_GE(conn, 8.3)) - openSearchCast = "::uuid"; - break; - case SQL_INTERVAL_MONTH: - case SQL_INTERVAL_YEAR: - case SQL_INTERVAL_YEAR_TO_MONTH: - case SQL_INTERVAL_DAY: - case SQL_INTERVAL_HOUR: - case SQL_INTERVAL_MINUTE: - case SQL_INTERVAL_SECOND: - case SQL_INTERVAL_DAY_TO_HOUR: - case SQL_INTERVAL_DAY_TO_MINUTE: - case SQL_INTERVAL_DAY_TO_SECOND: - case SQL_INTERVAL_HOUR_TO_MINUTE: - case SQL_INTERVAL_HOUR_TO_SECOND: - case SQL_INTERVAL_MINUTE_TO_SECOND: - openSearchCast = "::interval"; - break; - } - - return openSearchCast; -} - -OID sqltype_to_opensearchtype(const ConnectionClass *conn, SQLSMALLINT fSqlType) { - OID openSearchType = 0; - switch (fSqlType) { - case SQL_BINARY: - openSearchType = OPENSEARCH_TYPE_BYTEA; - break; - - case SQL_CHAR: - openSearchType = OPENSEARCH_TYPE_BPCHAR; - break; - -#ifdef UNICODE_SUPPORT - case SQL_WCHAR: - openSearchType = OPENSEARCH_TYPE_BPCHAR; - break; -#endif /* UNICODE_SUPPORT */ - - case SQL_BIT: - openSearchType = OPENSEARCH_TYPE_BOOL; - break; - - case SQL_TYPE_DATE: - case SQL_DATE: - openSearchType = OPENSEARCH_TYPE_DATE; - break; - - case SQL_DOUBLE: - case SQL_FLOAT: - openSearchType = OPENSEARCH_TYPE_FLOAT8; - break; - - case SQL_DECIMAL: - case SQL_NUMERIC: - openSearchType = OPENSEARCH_TYPE_NUMERIC; - break; - - case SQL_BIGINT: - openSearchType = OPENSEARCH_TYPE_INT8; - break; - - case SQL_INTEGER: - openSearchType = OPENSEARCH_TYPE_INT4; - break; - - case SQL_LONGVARBINARY: - openSearchType = conn->lobj_type; - break; - - case SQL_LONGVARCHAR: - openSearchType = OPENSEARCH_TYPE_VARCHAR; - break; - -#ifdef UNICODE_SUPPORT - case SQL_WLONGVARCHAR: - openSearchType = OPENSEARCH_TYPE_VARCHAR; - break; -#endif /* UNICODE_SUPPORT */ - - case SQL_REAL: - openSearchType = OPENSEARCH_TYPE_FLOAT4; - break; - - case SQL_SMALLINT: - case SQL_TINYINT: - openSearchType = OPENSEARCH_TYPE_INT2; - break; - - case SQL_TIME: - case SQL_TYPE_TIME: - openSearchType = OPENSEARCH_TYPE_TIME; - break; - - case SQL_TIMESTAMP: - case SQL_TYPE_TIMESTAMP: - openSearchType = OPENSEARCH_TYPE_TIMESTAMP; - break; - - case SQL_VARBINARY: - openSearchType = OPENSEARCH_TYPE_BYTEA; - break; - - case SQL_VARCHAR: - openSearchType = OPENSEARCH_TYPE_VARCHAR; - break; - -#ifdef UNICODE_SUPPORT - case SQL_WVARCHAR: - openSearchType = OPENSEARCH_TYPE_VARCHAR; - break; -#endif /* UNICODE_SUPPORT */ - - case SQL_GUID: - if (OPENSEARCH_VERSION_GE(conn, 8.3)) - openSearchType = OPENSEARCH_TYPE_UUID; - break; - - case SQL_INTERVAL_MONTH: - case SQL_INTERVAL_YEAR: - case SQL_INTERVAL_YEAR_TO_MONTH: - case SQL_INTERVAL_DAY: - case SQL_INTERVAL_HOUR: - case SQL_INTERVAL_MINUTE: - case SQL_INTERVAL_SECOND: - case SQL_INTERVAL_DAY_TO_HOUR: - case SQL_INTERVAL_DAY_TO_MINUTE: - case SQL_INTERVAL_DAY_TO_SECOND: - case SQL_INTERVAL_HOUR_TO_MINUTE: - case SQL_INTERVAL_HOUR_TO_SECOND: - case SQL_INTERVAL_MINUTE_TO_SECOND: - openSearchType = OPENSEARCH_TYPE_INTERVAL; - break; - } - - return openSearchType; -} - -static int getAtttypmodEtc(const StatementClass *stmt, int col, - int *adtsize_or_longestlen) { - int atttypmod = -1; - - if (NULL != adtsize_or_longestlen) - *adtsize_or_longestlen = OPENSEARCH_ADT_UNSET; - if (col >= 0) { - const QResultClass *res; - - if (res = SC_get_Curres(stmt), NULL != res) { - atttypmod = QR_get_atttypmod(res, col); - if (NULL != adtsize_or_longestlen) { - if (stmt->catalog_result) - *adtsize_or_longestlen = QR_get_fieldsize(res, col); - else { - *adtsize_or_longestlen = QR_get_display_size(res, col); - if (OPENSEARCH_TYPE_NUMERIC == QR_get_field_type(res, col) - && atttypmod < 0 && *adtsize_or_longestlen > 0) { - SQLULEN i; - size_t sval, maxscale = 0; - const char *tval, *sptr; - - for (i = 0; i < res->num_cached_rows; i++) { - tval = QR_get_value_backend_text(res, i, col); - if (NULL != tval) { - sptr = strchr(tval, '.'); - if (NULL != sptr) { - sval = strlen(tval) - (sptr + 1 - tval); - if (sval > maxscale) - maxscale = sval; - } - } - } - *adtsize_or_longestlen += (int)(maxscale << 16); - } - } - } - } - } - return atttypmod; -} - -/* - * There are two ways of calling this function: - * - * 1. When going through the supported ES types (SQLGetTypeInfo) - * - * 2. When taking any type id (SQLColumns, SQLGetData) - * - * The first type will always work because all the types defined are returned - *here. The second type will return a default based on global parameter when it - *does not know. This allows for supporting types that are unknown. All - *other es routines in here return a suitable default. - */ -SQLSMALLINT -opensearchtype_to_concise_type(const StatementClass *stmt, OID type, int col, - int handle_unknown_size_as) { - int atttypmod, adtsize_or_longestlen; - - atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); - return opensearchtype_attr_to_concise_type(SC_get_conn(stmt), type, - atttypmod, adtsize_or_longestlen, - handle_unknown_size_as); -} - -SQLSMALLINT -opensearchtype_to_sqldesctype(const StatementClass *stmt, OID type, int col, - int handle_unknown_size_as) { - int adtsize_or_longestlen; - int atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); - - return opensearchtype_attr_to_sqldesctype(SC_get_conn(stmt), type, - atttypmod, adtsize_or_longestlen, - handle_unknown_size_as); -} - -const char *opensearchtype_to_name(const StatementClass *stmt, OID type, int col, - BOOL auto_increment) { - int atttypmod = getAtttypmodEtc(stmt, col, NULL); - - return opensearchtype_attr_to_name(SC_get_conn(stmt), type, atttypmod, - auto_increment); -} - -/* - * This corresponds to "precision" in ODBC 2.x. - * - * For OPENSEARCH_TYPE_VARCHAR, OPENSEARCH_TYPE_BPCHAR, OPENSEARCH_TYPE_NUMERIC, SQLColumns will - * override this length with the atttypmod length from es_attribute . - * - * If col >= 0, then will attempt to get the info from the result set. - * This is used for functions SQLDescribeCol and SQLColAttributes. - */ -Int4 /* OpenSearch restriction */ -opensearchtype_column_size(const StatementClass *stmt, OID type, int col, - int handle_unknown_size_as) { - int atttypmod, adtsize_or_longestlen; - - atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); - return opensearchtype_attr_column_size( - SC_get_conn(stmt), type, atttypmod, adtsize_or_longestlen, - stmt->catalog_result ? UNKNOWNS_AS_LONGEST : handle_unknown_size_as); -} - -/* - * precision in ODBC 3.x. - */ -SQLSMALLINT -opensearchtype_precision(const StatementClass *stmt, OID type, int col, - int handle_unknown_size_as) { - int atttypmod, adtsize_or_longestlen; - - atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); - return opensearchtype_attr_precision( - SC_get_conn(stmt), type, atttypmod, adtsize_or_longestlen, - stmt->catalog_result ? UNKNOWNS_AS_LONGEST : handle_unknown_size_as); -} - -Int4 opensearchtype_display_size(const StatementClass *stmt, OID type, int col, - int handle_unknown_size_as) { - int atttypmod, adtsize_or_longestlen; - - atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); - return opensearchtype_attr_display_size( - SC_get_conn(stmt), type, atttypmod, adtsize_or_longestlen, - stmt->catalog_result ? UNKNOWNS_AS_LONGEST : handle_unknown_size_as); -} - -/* - * The length in bytes of data transferred on an SQLGetData, SQLFetch, - * or SQLFetchScroll operation if SQL_C_DEFAULT is specified. - */ -Int4 opensearchtype_buffer_length(const StatementClass *stmt, OID type, int col, - int handle_unknown_size_as) { - int atttypmod, adtsize_or_longestlen; - - atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); - return opensearchtype_attr_buffer_length( - SC_get_conn(stmt), type, atttypmod, adtsize_or_longestlen, - stmt->catalog_result ? UNKNOWNS_AS_LONGEST : handle_unknown_size_as); -} - -/* - */ -Int4 opensearchtype_desclength(const StatementClass *stmt, OID type, int col, - int handle_unknown_size_as) { - int atttypmod, adtsize_or_longestlen; - - atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); - return opensearchtype_attr_desclength( - SC_get_conn(stmt), type, atttypmod, adtsize_or_longestlen, - stmt->catalog_result ? UNKNOWNS_AS_LONGEST : handle_unknown_size_as); -} - -#ifdef NOT_USED -/* - * Transfer octet length. - */ -Int4 opensearchtype_transfer_octet_length(const StatementClass *stmt, OID type, - int column_size) { - ConnectionClass *conn = SC_get_conn(stmt); - - int coef = 1; - Int4 maxvarc; - switch (type) { - case OPENSEARCH_TYPE_VARCHAR: - case OPENSEARCH_TYPE_BPCHAR: - case OPENSEARCH_TYPE_TEXT: - if (SQL_NO_TOTAL == column_size) - return column_size; -#ifdef UNICODE_SUPPORT - if (CC_is_in_unicode_driver(conn)) - return column_size * WCLEN; -#endif /* UNICODE_SUPPORT */ - coef = conn->mb_maxbyte_per_char; - if (coef < 2 && (conn->connInfo).lf_conversion) - /* CR -> CR/LF */ - coef = 2; - if (coef == 1) - return column_size; - maxvarc = conn->connInfo.drivers.max_varchar_size; - if (column_size <= maxvarc && column_size * coef > maxvarc) - return maxvarc; - return coef * column_size; - case OPENSEARCH_TYPE_BYTEA: - return column_size; - default: - if (type == conn->lobj_type) - return column_size; - } - return -1; -} -#endif /* NOT_USED */ - -/* - * corrsponds to "min_scale" in ODBC 2.x. - */ -Int2 opensearchtype_min_decimal_digits(const ConnectionClass *conn, OID type) { - UNUSED(conn, type); - return -1; -} - -/* - * corrsponds to "max_scale" in ODBC 2.x. - */ -Int2 opensearchtype_max_decimal_digits(const ConnectionClass *conn, OID type) { - UNUSED(conn, type); - return -1; -} - -/* - * corrsponds to "scale" in ODBC 2.x. - */ -Int2 opensearchtype_decimal_digits(const StatementClass *stmt, OID type, int col) { - int atttypmod, adtsize_or_longestlen; - - atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); - return opensearchtype_attr_decimal_digits(SC_get_conn(stmt), type, - atttypmod, adtsize_or_longestlen, - UNUSED_HANDLE_UNKNOWN_SIZE_AS); -} - -/* - * "scale" in ODBC 3.x. - */ -Int2 opensearchtype_scale(const StatementClass *stmt, OID type, int col) { - int atttypmod, adtsize_or_longestlen; - - atttypmod = getAtttypmodEtc(stmt, col, &adtsize_or_longestlen); - return opensearchtype_attr_scale(SC_get_conn(stmt), type, atttypmod, - adtsize_or_longestlen, - UNUSED_HANDLE_UNKNOWN_SIZE_AS); -} - -Int2 opensearchtype_radix(const ConnectionClass *conn, OID type) { - UNUSED(conn, type); - return 10; -} - -Int2 opensearchtype_nullable(const ConnectionClass *conn, OID type) { - UNUSED(conn, type); - return SQL_NULLABLE_UNKNOWN; /* everything should be nullable unknown */ -} - -Int2 opensearchtype_auto_increment(const ConnectionClass *conn, OID type) { - UNUSED(conn, type); - return SQL_FALSE; -} - -Int2 opensearchtype_case_sensitive(const ConnectionClass *conn, OID type) { - UNUSED(conn, type); - switch (type) { - case OPENSEARCH_TYPE_KEYWORD: - case OPENSEARCH_TYPE_TEXT: - return SQL_TRUE; - - default: - return SQL_FALSE; - } -} - -Int2 opensearchtype_money(const ConnectionClass *conn, OID type) { - UNUSED(conn, type); - return SQL_FALSE; -} - -Int2 opensearchtype_searchable(const ConnectionClass *conn, OID type) { - UNUSED(conn, type); - return SQL_SEARCHABLE; -} - -Int2 opensearchtype_unsigned(const ConnectionClass *conn, OID type) { - UNUSED(conn); - switch (type) { - case OPENSEARCH_TYPE_BOOL: - case OPENSEARCH_TYPE_KEYWORD: - case OPENSEARCH_TYPE_TEXT: - case OPENSEARCH_TYPE_NESTED: - case OPENSEARCH_TYPE_DATETIME: - case OPENSEARCH_TYPE_TIMESTAMP: - case OPENSEARCH_TYPE_OBJECT: - return SQL_TRUE; - - case OPENSEARCH_TYPE_INT1: - case OPENSEARCH_TYPE_INT2: - case OPENSEARCH_TYPE_INT4: - case OPENSEARCH_TYPE_INT8: - case OPENSEARCH_TYPE_HALF_FLOAT: - case OPENSEARCH_TYPE_FLOAT4: - case OPENSEARCH_TYPE_FLOAT8: - case OPENSEARCH_TYPE_SCALED_FLOAT: - return SQL_FALSE; - - default: - return -1; - } -} - -const char *opensearchtype_literal_prefix(const ConnectionClass *conn, OID type) { - UNUSED(conn); - switch (type) { - case OPENSEARCH_TYPE_KEYWORD: - case OPENSEARCH_TYPE_TEXT: - case OPENSEARCH_TYPE_NESTED: - case OPENSEARCH_TYPE_OBJECT: - return "\'"; - default: - return ""; - } -} - -const char *opensearchtype_literal_suffix(const ConnectionClass *conn, OID type) { - UNUSED(conn); - switch (type) { - case OPENSEARCH_TYPE_KEYWORD: - case OPENSEARCH_TYPE_TEXT: - case OPENSEARCH_TYPE_NESTED: - case OPENSEARCH_TYPE_OBJECT: - return "\'"; - default: - return ""; - } -} - -const char *opensearchtype_create_params(const ConnectionClass *conn, OID type) { - UNUSED(conn, type); - return NULL; -} - -SQLSMALLINT -sqltype_to_default_ctype(const ConnectionClass *conn, SQLSMALLINT sqltype) { - /* - * from the table on page 623 of ODBC 2.0 Programmer's Reference - * (Appendix D) - */ - switch (sqltype) { - case SQL_CHAR: - case SQL_VARCHAR: - case SQL_LONGVARCHAR: - case SQL_DECIMAL: - case SQL_NUMERIC: - return SQL_C_CHAR; - case SQL_BIGINT: - return ALLOWED_C_BIGINT; - -#ifdef UNICODE_SUPPORT - case SQL_WCHAR: - case SQL_WVARCHAR: - case SQL_WLONGVARCHAR: - return ansi_to_wtype(conn, SQL_C_CHAR); -#endif /* UNICODE_SUPPORT */ - - case SQL_BIT: - return SQL_C_BIT; - - case SQL_TINYINT: - return SQL_C_STINYINT; - - case SQL_SMALLINT: - return SQL_C_SSHORT; - - case SQL_INTEGER: - return SQL_C_SLONG; - - case SQL_REAL: - return SQL_C_FLOAT; - - case SQL_FLOAT: - case SQL_DOUBLE: - return SQL_C_DOUBLE; - - case SQL_BINARY: - case SQL_VARBINARY: - case SQL_LONGVARBINARY: - return SQL_C_BINARY; - - case SQL_DATE: - return SQL_C_DATE; - - case SQL_TIME: - return SQL_C_TIME; - - case SQL_TIMESTAMP: - return SQL_C_TIMESTAMP; - - case SQL_TYPE_DATE: - return SQL_C_TYPE_DATE; - - case SQL_TYPE_TIME: - return SQL_C_TYPE_TIME; - - case SQL_TYPE_TIMESTAMP: - return SQL_C_TYPE_TIMESTAMP; - - case SQL_GUID: - if (conn->ms_jet) - return SQL_C_CHAR; - else - return SQL_C_GUID; - - default: - /* should never happen */ - return SQL_C_CHAR; - } -} - -Int4 ctype_length(SQLSMALLINT ctype) { - switch (ctype) { - case SQL_C_SSHORT: - case SQL_C_SHORT: - return sizeof(SWORD); - - case SQL_C_USHORT: - return sizeof(UWORD); - - case SQL_C_SLONG: - case SQL_C_LONG: - return sizeof(SDWORD); - - case SQL_C_ULONG: - return sizeof(UDWORD); - - case SQL_C_FLOAT: - return sizeof(SFLOAT); - - case SQL_C_DOUBLE: - return sizeof(SDOUBLE); - - case SQL_C_BIT: - return sizeof(UCHAR); - - case SQL_C_STINYINT: - case SQL_C_TINYINT: - return sizeof(SCHAR); - - case SQL_C_UTINYINT: - return sizeof(UCHAR); - - case SQL_C_DATE: - case SQL_C_TYPE_DATE: - return sizeof(DATE_STRUCT); - - case SQL_C_TIME: - case SQL_C_TYPE_TIME: - return sizeof(TIME_STRUCT); - - case SQL_C_TIMESTAMP: - case SQL_C_TYPE_TIMESTAMP: - return sizeof(TIMESTAMP_STRUCT); - - case SQL_C_GUID: - return sizeof(SQLGUID); - case SQL_C_INTERVAL_YEAR: - case SQL_C_INTERVAL_MONTH: - case SQL_C_INTERVAL_YEAR_TO_MONTH: - case SQL_C_INTERVAL_DAY: - case SQL_C_INTERVAL_HOUR: - case SQL_C_INTERVAL_DAY_TO_HOUR: - case SQL_C_INTERVAL_MINUTE: - case SQL_C_INTERVAL_DAY_TO_MINUTE: - case SQL_C_INTERVAL_HOUR_TO_MINUTE: - case SQL_C_INTERVAL_SECOND: - case SQL_C_INTERVAL_DAY_TO_SECOND: - case SQL_C_INTERVAL_HOUR_TO_SECOND: - case SQL_C_INTERVAL_MINUTE_TO_SECOND: - return sizeof(SQL_INTERVAL_STRUCT); - case SQL_C_NUMERIC: - return sizeof(SQL_NUMERIC_STRUCT); - case SQL_C_SBIGINT: - case SQL_C_UBIGINT: - return sizeof(SQLBIGINT); - - case SQL_C_BINARY: - case SQL_C_CHAR: -#ifdef UNICODE_SUPPORT - case SQL_C_WCHAR: -#endif /* UNICODE_SUPPORT */ - return 0; - - default: /* should never happen */ - return 0; - } -} diff --git a/sql-odbc/src/sqlodbc/opensearch_types.h b/sql-odbc/src/sqlodbc/opensearch_types.h deleted file mode 100644 index 8c7dee2d74..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_types.h +++ /dev/null @@ -1,340 +0,0 @@ -#ifndef OPENSEARCH_TYPES -#define OPENSEARCH_TYPES - -#include "dlg_specific.h" -#include "opensearch_odbc.h" -#ifdef __cplusplus -extern "C" { -#endif - -/* the type numbers are defined by the OID's of the types' rows */ -/* in table opensearch_type */ - -#ifdef NOT_USED -#define ES_TYPE_LO ? ? ? ? /* waiting for permanent type */ -#endif - -#define OPENSEARCH_TYPE_NAME_BOOLEAN "boolean" -#define OPENSEARCH_TYPE_NAME_BYTE "byte" -#define OPENSEARCH_TYPE_NAME_SHORT "short" -#define OPENSEARCH_TYPE_NAME_INTEGER "integer" -#define OPENSEARCH_TYPE_NAME_LONG "long" -#define OPENSEARCH_TYPE_NAME_HALF_FLOAT "half_float" -#define OPENSEARCH_TYPE_NAME_FLOAT "float" -#define OPENSEARCH_TYPE_NAME_DOUBLE "double" -#define OPENSEARCH_TYPE_NAME_SCALED_FLOAT "scaled_float" -#define OPENSEARCH_TYPE_NAME_KEYWORD "keyword" -#define OPENSEARCH_TYPE_NAME_TEXT "text" -#define OPENSEARCH_TYPE_NAME_NESTED "nested" -#define OPENSEARCH_TYPE_NAME_DATE "date" -#define OPENSEARCH_TYPE_NAME_TIMESTAMP "timestamp" -#define OPENSEARCH_TYPE_NAME_OBJECT "object" -#define OPENSEARCH_TYPE_NAME_VARCHAR "varchar" -#define OPENSEARCH_TYPE_NAME_UNSUPPORTED "unsupported" - -#define MS_ACCESS_SERIAL "int identity" -#define OPENSEARCH_TYPE_BOOL 16 -#define OPENSEARCH_TYPE_BYTEA 17 -#define OPENSEARCH_TYPE_CHAR 18 -#define OPENSEARCH_TYPE_NAME 19 -#define OPENSEARCH_TYPE_INT8 20 -#define OPENSEARCH_TYPE_INT2 21 -#define OPENSEARCH_TYPE_INT2VECTOR 22 -#define OPENSEARCH_TYPE_INT4 23 -#define OPENSEARCH_TYPE_REGPROC 24 -#define OPENSEARCH_TYPE_TEXT 25 -#define OPENSEARCH_TYPE_OID 26 -#define OPENSEARCH_TYPE_TID 27 -#define OPENSEARCH_TYPE_XID 28 -#define OPENSEARCH_TYPE_CID 29 -#define OPENSEARCH_TYPE_OIDVECTOR 30 -#define OPENSEARCH_TYPE_INT1 31 -#define OPENSEARCH_TYPE_HALF_FLOAT 32 -#define OPENSEARCH_TYPE_SCALED_FLOAT 33 -#define OPENSEARCH_TYPE_KEYWORD 34 -#define OPENSEARCH_TYPE_NESTED 35 -#define OPENSEARCH_TYPE_OBJECT 36 -#define OPENSEARCH_TYPE_XML 142 -#define OPENSEARCH_TYPE_XMLARRAY 143 -#define OPENSEARCH_TYPE_CIDR 650 -#define OPENSEARCH_TYPE_FLOAT4 700 -#define OPENSEARCH_TYPE_FLOAT8 701 -#define OPENSEARCH_TYPE_ABSTIME 702 -#define OPENSEARCH_TYPE_UNKNOWN 705 -#define OPENSEARCH_TYPE_MONEY 790 -#define OPENSEARCH_TYPE_MACADDR 829 -#define OPENSEARCH_TYPE_INET 869 -#define OPENSEARCH_TYPE_TEXTARRAY 1009 -#define OPENSEARCH_TYPE_BPCHARARRAY 1014 -#define OPENSEARCH_TYPE_VARCHARARRAY 1015 -#define OPENSEARCH_TYPE_BPCHAR 1042 -#define OPENSEARCH_TYPE_VARCHAR 1043 -#define OPENSEARCH_TYPE_DATE 1082 -#define OPENSEARCH_TYPE_TIME 1083 -#define OPENSEARCH_TYPE_TIMESTAMP_NO_TMZONE 1114 /* since 7.2 */ -#define OPENSEARCH_TYPE_DATETIME 1184 /* timestamptz */ -#define OPENSEARCH_TYPE_INTERVAL 1186 -#define OPENSEARCH_TYPE_TIME_WITH_TMZONE 1266 /* since 7.1 */ -#define OPENSEARCH_TYPE_TIMESTAMP 1296 /* deprecated since 7.0 */ -#define OPENSEARCH_TYPE_BIT 1560 -#define OPENSEARCH_TYPE_NUMERIC 1700 -#define OPENSEARCH_TYPE_REFCURSOR 1790 -#define OPENSEARCH_TYPE_RECORD 2249 -#define OPENSEARCH_TYPE_ANY 2276 -#define OPENSEARCH_TYPE_VOID 2278 -#define OPENSEARCH_TYPE_UUID 2950 -#define INTERNAL_ASIS_TYPE (-9999) - -#define TYPE_MAY_BE_ARRAY(type) \ - ((type) == OPENSEARCH_TYPE_XMLARRAY || ((type) >= 1000 && (type) <= 1041)) -/* extern Int4 opensearch_types_defined[]; */ -extern SQLSMALLINT sqlTypes[]; - -/* Defines for opensearchtype_precision */ -#define OPENSEARCH_ATP_UNSET (-3) /* atttypmod */ -#define OPENSEARCH_ADT_UNSET (-3) /* adtsize_or_longestlen */ -#define OPENSEARCH_UNKNOWNS_UNSET 0 /* UNKNOWNS_AS_MAX */ -#define OPENSEARCH_WIDTH_OF_BOOLS_AS_CHAR 5 - -/* - * SQL_INTERVAL support is disabled because I found - * some applications which are unhappy with it. - * -#define OPENSEARCH_INTERVAL_AS_SQL_INTERVAL - */ - -OID opensearch_true_type(const ConnectionClass *, OID, OID); -OID sqltype_to_opensearchtype(const ConnectionClass *conn, SQLSMALLINT fSqlType); -const char *sqltype_to_opensearchcast(const ConnectionClass *conn, - SQLSMALLINT fSqlType); - -SQLSMALLINT opensearchtype_to_concise_type(const StatementClass *stmt, OID type, - int col, int handle_unknown_size_as); -SQLSMALLINT opensearchtype_to_sqldesctype(const StatementClass *stmt, OID type, int col, - int handle_unknown_size_as); -const char *opensearchtype_to_name(const StatementClass *stmt, OID type, int col, - BOOL auto_increment); - -SQLSMALLINT opensearchtype_attr_to_concise_type(const ConnectionClass *conn, OID type, - int typmod, int adtsize_or_longestlen, - int handle_unknown_size_as); -SQLSMALLINT opensearchtype_attr_to_sqldesctype(const ConnectionClass *conn, OID type, - int typmod, int adtsize_or_longestlen, - int handle_unknown_size_as); -SQLSMALLINT opensearchtype_attr_to_datetime_sub(const ConnectionClass *conn, OID type, - int typmod); -SQLSMALLINT opensearchtype_attr_to_ctype(const ConnectionClass *conn, OID type, - int typmod); -const char *opensearchtype_attr_to_name(const ConnectionClass *conn, OID type, - int typmod, BOOL auto_increment); -Int4 opensearchtype_attr_column_size(const ConnectionClass *conn, OID type, - int atttypmod, int adtsize_or_longest, - int handle_unknown_size_as); -Int4 opensearchtype_attr_buffer_length(const ConnectionClass *conn, OID type, - int atttypmod, int adtsize_or_longestlen, - int handle_unknown_size_as); -Int4 opensearchtype_attr_display_size(const ConnectionClass *conn, OID type, - int atttypmod, int adtsize_or_longestlen, - int handle_unknown_size_as); -Int2 opensearchtype_attr_decimal_digits(const ConnectionClass *conn, OID type, - int atttypmod, int adtsize_or_longestlen, - int handle_unknown_size_as); -Int4 opensearchtype_attr_transfer_octet_length(const ConnectionClass *conn, OID type, - int atttypmod, - int handle_unknown_size_as); -SQLSMALLINT opensearchtype_attr_precision(const ConnectionClass *conn, OID type, - int atttypmod, int adtsize_or_longest, - int handle_unknown_size_as); -Int4 opensearchtype_attr_desclength(const ConnectionClass *conn, OID type, - int atttypmod, int adtsize_or_longestlen, - int handle_unknown_size_as); -Int2 opensearchtype_attr_scale(const ConnectionClass *conn, OID type, int atttypmod, - int adtsize_or_longestlen, int handle_unknown_size_as); - -/* These functions can use static numbers or result sets(col parameter) */ -Int4 opensearchtype_column_size( - const StatementClass *stmt, OID type, int col, - int handle_unknown_size_as); /* corresponds to "precision" in ODBC 2.x */ -SQLSMALLINT opensearchtype_precision( - const StatementClass *stmt, OID type, int col, - int handle_unknown_size_as); /* "precsion in ODBC 3.x */ -/* the following size/length are of Int4 due to ES restriction */ -Int4 opensearchtype_display_size(const StatementClass *stmt, OID type, int col, - int handle_unknown_size_as); -Int4 opensearchtype_buffer_length(const StatementClass *stmt, OID type, int col, - int handle_unknown_size_as); -Int4 opensearchtype_desclength(const StatementClass *stmt, OID type, int col, - int handle_unknown_size_as); -// Int4 opensearchtype_transfer_octet_length(const ConnectionClass *conn, OID type, -// int column_size); - -SQLSMALLINT opensearchtype_decimal_digits( - const StatementClass *stmt, OID type, - int col); /* corresponds to "scale" in ODBC 2.x */ -SQLSMALLINT opensearchtype_min_decimal_digits( - const ConnectionClass *conn, - OID type); /* corresponds to "min_scale" in ODBC 2.x */ -SQLSMALLINT opensearchtype_max_decimal_digits( - const ConnectionClass *conn, - OID type); /* corresponds to "max_scale" in ODBC 2.x */ -SQLSMALLINT opensearchtype_scale(const StatementClass *stmt, OID type, - int col); /* ODBC 3.x " */ -Int2 opensearchtype_radix(const ConnectionClass *conn, OID type); -Int2 opensearchtype_nullable(const ConnectionClass *conn, OID type); -Int2 opensearchtype_auto_increment(const ConnectionClass *conn, OID type); -Int2 opensearchtype_case_sensitive(const ConnectionClass *conn, OID type); -Int2 opensearchtype_money(const ConnectionClass *conn, OID type); -Int2 opensearchtype_searchable(const ConnectionClass *conn, OID type); -Int2 opensearchtype_unsigned(const ConnectionClass *conn, OID type); -const char *opensearchtype_literal_prefix(const ConnectionClass *conn, OID type); -const char *opensearchtype_literal_suffix(const ConnectionClass *conn, OID type); -const char *opensearchtype_create_params(const ConnectionClass *conn, OID type); - -SQLSMALLINT sqltype_to_default_ctype(const ConnectionClass *stmt, - SQLSMALLINT sqltype); -Int4 ctype_length(SQLSMALLINT ctype); - -SQLSMALLINT ansi_to_wtype(const ConnectionClass *self, SQLSMALLINT ansitype); - -#ifdef __cplusplus -} -#endif - -typedef enum { - CONNECTION_OK, - CONNECTION_BAD, - /* Non-blocking mode only below here */ - - /* - * The existence of these should never be relied upon - they should only - * be used for user feedback or similar purposes. - */ - CONNECTION_STARTED, /* Waiting for connection to be made. */ - CONNECTION_MADE, /* Connection OK; waiting to send. */ - CONNECTION_AWAITING_RESPONSE, /* Waiting for a response from the postmaster. - */ - CONNECTION_AUTH_OK, /* Received authentication; waiting for backend startup. - */ - CONNECTION_SETENV, /* Negotiating environment. */ - CONNECTION_SSL_STARTUP, /* Negotiating SSL. */ - CONNECTION_NEEDED, /* Internal state: connect() needed */ - CONNECTION_CHECK_WRITABLE, /* Check if we could make a writable connection. - */ - CONNECTION_CONSUME, /* Wait for any pending message and consume them. */ - CONNECTION_GSS_STARTUP /* Negotiating GSSAPI. */ -} ConnStatusType; - -typedef enum { - CONN_ERROR_SUCCESS, // 0 - CONN_ERROR_QUERY_SYNTAX, // 42000 - CONN_ERROR_COMM_LINK_FAILURE, // 08S01 - CONN_ERROR_INVALID_NULL_PTR, // HY009 - CONN_ERROR_INVALID_AUTH, // 28000 - CONN_ERROR_UNABLE_TO_ESTABLISH // 08001 -} ConnErrorType; - -// Only expose this to C++ code, this will be passed through the C interface as -// a void* -#ifdef __cplusplus -#include - -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunused-parameter" -#endif // __APPLE__ -#include "rabbit.hpp" -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ - -#include -#include - -typedef struct authentication_options { - std::string auth_type; - std::string username; - std::string password; - std::string region; -} authentication_options; - -typedef struct encryption_options { - bool use_ssl; - bool verify_server; - std::string certificate_type; - std::string certificate; - std::string key; - std::string key_pw; -} encryption_options; - -typedef struct connection_options { - std::string server; - std::string port; - std::string timeout; - std::string fetch_size; -} connection_options; - -typedef struct runtime_options { - connection_options conn; - authentication_options auth; - encryption_options crypt; -} runtime_options; - -typedef struct ErrorDetails { - std::string reason; - std::string details; - std::string source_type; - ConnErrorType type; - ErrorDetails() { - reason = ""; - details = ""; - source_type = ""; - type = ConnErrorType::CONN_ERROR_SUCCESS; - } -} ErrorDetails; - -#define INVALID_OID 0 -#define KEYWORD_TYPE_OID 1043 -#define KEYWORD_TYPE_SIZE 255 -#define KEYWORD_DISPLAY_SIZE 255 -#define KEYWORD_LENGTH_OF_STR 255 - -// Copied from ColumnInfoClass's 'srvr_info' struct. Comments are the relevant -// name in 'srvr_info' -typedef struct ColumnInfo { - std::string field_name; // name - uint32_t type_oid; // adtid - int16_t type_size; // adtsize - int32_t display_size; // longest row - int32_t length_of_str; // the length of bpchar/varchar - uint32_t relation_id; // relid - int16_t attribute_number; // attid - ColumnInfo() { - field_name = ""; - type_oid = INVALID_OID; - type_size = 0; // ? - display_size = 0; // ? - length_of_str = 0; // ? - relation_id = INVALID_OID; - attribute_number = INVALID_OID; - } -} ColumnInfo; - -typedef struct OpenSearchResult { - uint32_t ref_count; // reference count. A ColumnInfo can be shared by - // several qresults. - uint16_t num_fields; - std::vector< ColumnInfo > column_info; - std::string cursor; - std::string result_json; - std::string command_type; // SELECT / FETCH / etc - rabbit::document opensearch_result_doc; - OpenSearchResult() { - ref_count = 0; - num_fields = 0; - result_json = ""; - command_type = ""; - } -} OpenSearchResult; - -#endif -#endif diff --git a/sql-odbc/src/sqlodbc/opensearch_utility.cpp b/sql-odbc/src/sqlodbc/opensearch_utility.cpp deleted file mode 100644 index faac8f389e..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_utility.cpp +++ /dev/null @@ -1,110 +0,0 @@ -#include "opensearch_utility.h" - -#include -#include -#include -#include -#include -#include - -#include - -// Used in the event that we run out of memory. This way we have a way of -// settings the buffer to point at an empty char array (because the buffer -// itself isn't const, we can't set this to const without having to later cast -// it away) -static char oom_buffer[1] = ""; -static char *oom_buffer_ptr = oom_buffer; - -static void MarkOpenSearchExpBufferBroken(OpenSearchExpBuffer str) { - if (str->data != oom_buffer) - free(str->data); - str->data = oom_buffer_ptr; - str->len = 0; - str->maxlen = 0; -} - -static bool EnlargeOpenSearchExpBuffer(OpenSearchExpBuffer str, size_t needed) { - if (OpenSearchExpBufferBroken(str)) - return 0; - - if (needed >= ((size_t)INT_MAX - str->len)) { - MarkOpenSearchExpBufferBroken(str); - return false; - } - - needed += str->len + 1; - if (needed <= str->maxlen) - return true; - - size_t newlen = (str->maxlen > 0) ? (2 * str->maxlen) : 64; - while (needed > newlen) - newlen = 2 * newlen; - - if (newlen > (size_t)INT_MAX) - newlen = (size_t)INT_MAX; - - char *newdata = (char *)realloc(str->data, newlen); - if (newdata != NULL) { - str->data = newdata; - str->maxlen = newlen; - return true; - } - - MarkOpenSearchExpBufferBroken(str); - return false; -} - -static bool AppendOpenSearchExpBufferVA(OpenSearchExpBuffer str, const char *fmt, - va_list args) { - size_t needed = 32; - if (str->maxlen > (str->len + 16)) { - size_t avail = str->maxlen - str->len; - - int nprinted = vsnprintf(str->data + str->len, avail, fmt, args); - if ((nprinted < 0) || (nprinted > (INT_MAX - 1))) { - MarkOpenSearchExpBufferBroken(str); - return true; - } else if ((size_t)nprinted < avail) { - str->len += nprinted; - return true; - } - needed = nprinted + 1; - } - return !EnlargeOpenSearchExpBuffer(str, needed); -} - -void InitOpenSearchExpBuffer(OpenSearchExpBuffer str) { - str->data = (char *)malloc(INITIAL_EXPBUFFER_SIZE); - if (str->data == NULL) { - str->data = oom_buffer_ptr; - str->maxlen = 0; - } else { - str->maxlen = INITIAL_EXPBUFFER_SIZE; - str->data[0] = '\0'; - } - str->len = 0; -} - -void AppendOpenSearchExpBuffer(OpenSearchExpBuffer str, const char *fmt, ...) { - if (OpenSearchExpBufferBroken(str)) - return; - - va_list args; - bool done = false; - int save_errno = errno; - do { - errno = save_errno; - va_start(args, fmt); - done = AppendOpenSearchExpBufferVA(str, fmt, args); - va_end(args); - } while (!done); -} - -void TermOpenSearchExpBuffer(OpenSearchExpBuffer str) { - if (str->data != oom_buffer) - free(str->data); - str->data = oom_buffer_ptr; - str->maxlen = 0; - str->len = 0; -} diff --git a/sql-odbc/src/sqlodbc/opensearch_utility.h b/sql-odbc/src/sqlodbc/opensearch_utility.h deleted file mode 100644 index bd5d3463c6..0000000000 --- a/sql-odbc/src/sqlodbc/opensearch_utility.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef OPENSEARCH_UTILITY_H -#define OPENSEARCH_UTILITY_H - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct OpenSearchExpBufferData { - char *data; - size_t len; - size_t maxlen; -} OpenSearchExpBufferData; - -typedef OpenSearchExpBufferData *OpenSearchExpBuffer; - -#define OpenSearchExpBufferBroken(str) ((str) == NULL || (str)->maxlen == 0) -#define OpenSearchExpBufferDataBroken(buf) ((buf).maxlen == 0) -#define INITIAL_EXPBUFFER_SIZE 256 - -void InitOpenSearchExpBuffer(OpenSearchExpBuffer str); -void AppendOpenSearchExpBuffer(OpenSearchExpBuffer str, const char *fmt, ...); -void TermOpenSearchExpBuffer(OpenSearchExpBuffer str); - -#ifdef __cplusplus -} -#endif - -#endif /* OPENSEARCH_UTILITY_H */ diff --git a/sql-odbc/src/sqlodbc/options.c b/sql-odbc/src/sqlodbc/options.c deleted file mode 100644 index fd100c0ea7..0000000000 --- a/sql-odbc/src/sqlodbc/options.c +++ /dev/null @@ -1,705 +0,0 @@ -#include - -#include "environ.h" -#include "opensearch_odbc.h" -#include "misc.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "qresult.h" -#include "statement.h" -#include "unicode_support.h" - -static RETCODE set_statement_option(ConnectionClass *conn, StatementClass *stmt, - SQLUSMALLINT fOption, SQLULEN vParam) { - CSTR func = "set_statement_option"; - char changed = FALSE; - SQLULEN setval; - - switch (fOption) { - case SQL_ASYNC_ENABLE: /* ignored */ - break; - - case SQL_BIND_TYPE: - /* now support multi-column and multi-row binding */ - if (conn) - conn->ardOptions.bind_size = (SQLUINTEGER)vParam; - if (stmt) - SC_get_ARDF(stmt)->bind_size = (SQLUINTEGER)vParam; - break; - - case SQL_CONCURRENCY: - /* - * positioned update isn't supported so cursor concurrency is - * read-only - */ - MYLOG(OPENSEARCH_DEBUG, "SQL_CONCURRENCY = " FORMAT_LEN " ", vParam); - setval = SQL_CONCUR_READ_ONLY; - if (conn) - conn->stmtOptions.scroll_concurrency = (SQLUINTEGER)setval; - else if (stmt) { - if (SC_get_Result(stmt)) { - SC_set_error( - stmt, STMT_INVALID_CURSOR_STATE_ERROR, - "The attr can't be changed because the cursor is open.", - func); - return SQL_ERROR; - } - stmt->options.scroll_concurrency = - stmt->options_orig.scroll_concurrency = (SQLUINTEGER)setval; - } - if (setval != vParam) - changed = TRUE; - MYPRINTF(0, "-> " FORMAT_LEN "\n", setval); - break; - - case SQL_CURSOR_TYPE: - /* - * if declare/fetch, then type can only be forward. otherwise, - * it can only be forward or static. - */ - MYLOG(OPENSEARCH_DEBUG, "SQL_CURSOR_TYPE = " FORMAT_LEN " ", vParam); - setval = SQL_CURSOR_FORWARD_ONLY; - if (SQL_CURSOR_STATIC == vParam) - setval = vParam; - else if (SQL_CURSOR_KEYSET_DRIVEN == vParam) { - setval = SQL_CURSOR_STATIC; /* at least scrollable */ - } else if (SQL_CURSOR_DYNAMIC == vParam) { - setval = SQL_CURSOR_STATIC; /* at least scrollable */ - } - if (conn) - conn->stmtOptions.cursor_type = (SQLUINTEGER)setval; - else if (stmt) { - if (SC_get_Result(stmt)) { - SC_set_error( - stmt, STMT_INVALID_CURSOR_STATE_ERROR, - "The attr can't be changed because the cursor is open.", - func); - return SQL_ERROR; - } - stmt->options_orig.cursor_type = stmt->options.cursor_type = - (SQLUINTEGER)setval; - } - if (setval != vParam) - changed = TRUE; - MYPRINTF(0, "-> " FORMAT_LEN "\n", setval); - break; - - case SQL_KEYSET_SIZE: /* ignored, but saved and returned */ - MYLOG(OPENSEARCH_DEBUG, "SQL_KEYSET_SIZE, vParam = " FORMAT_LEN "\n", - vParam); - - if (conn) - conn->stmtOptions.keyset_size = vParam; - if (stmt) { - stmt->options_orig.keyset_size = vParam; - if (!SC_get_Result(stmt)) - stmt->options.keyset_size = vParam; - if (stmt->options.keyset_size != (SQLLEN)vParam) - changed = TRUE; - } - - break; - - case SQL_MAX_LENGTH: /* ignored, but saved */ - MYLOG(OPENSEARCH_DEBUG, "SQL_MAX_LENGTH, vParam = " FORMAT_LEN "\n", - vParam); - if (conn) - conn->stmtOptions.maxLength = vParam; - if (stmt) { - stmt->options_orig.maxLength = vParam; - if (!SC_get_Result(stmt)) - stmt->options.maxLength = vParam; - if (stmt->options.maxLength != (SQLLEN)vParam) - changed = TRUE; - } - break; - - case SQL_MAX_ROWS: /* ignored, but saved */ - MYLOG(OPENSEARCH_DEBUG, "SQL_MAX_ROWS, vParam = " FORMAT_LEN "\n", vParam); - if (conn) - conn->stmtOptions.maxRows = vParam; - if (stmt) { - stmt->options_orig.maxRows = vParam; - if (!SC_get_Result(stmt)) - stmt->options.maxRows = vParam; - if (stmt->options.maxRows != (SQLLEN)vParam) - changed = TRUE; - } - break; - - case SQL_NOSCAN: /* ignored */ - MYLOG(OPENSEARCH_DEBUG, "SQL_NOSCAN, vParam = " FORMAT_LEN "\n", vParam); - break; - - case SQL_QUERY_TIMEOUT: /* ignored */ - MYLOG(OPENSEARCH_DEBUG, "SQL_QUERY_TIMEOUT, vParam = " FORMAT_LEN "\n", - vParam); - if (conn) - conn->stmtOptions.stmt_timeout = (SQLULEN)vParam; - if (stmt) - stmt->options.stmt_timeout = (SQLULEN)vParam; - break; - - case SQL_RETRIEVE_DATA: - MYLOG(OPENSEARCH_DEBUG, "SQL_RETRIEVE_DATA, vParam = " FORMAT_LEN "\n", - vParam); - if (conn) - conn->stmtOptions.retrieve_data = (SQLUINTEGER)vParam; - if (stmt) - stmt->options.retrieve_data = (SQLUINTEGER)vParam; - break; - - case SQL_ROWSET_SIZE: - MYLOG(OPENSEARCH_DEBUG, "SQL_ROWSET_SIZE, vParam = " FORMAT_LEN "\n", - vParam); - - if (vParam < 1) { - vParam = 1; - changed = TRUE; - } - - if (conn) - conn->ardOptions.size_of_rowset_odbc2 = vParam; - if (stmt) - SC_get_ARDF(stmt)->size_of_rowset_odbc2 = vParam; - break; - - case SQL_SIMULATE_CURSOR: /* NOT SUPPORTED */ - if (stmt) { - SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, - "Simulated positioned update/delete not " - "supported. Use the cursor library.", - func); - } - if (conn) { - CC_set_error(conn, CONN_NOT_IMPLEMENTED_ERROR, - "Simulated positioned update/delete not " - "supported. Use the cursor library.", - func); - } - return SQL_ERROR; - - case SQL_USE_BOOKMARKS: - if (stmt) { - MYLOG( - OPENSEARCH_DEBUG, "USE_BOOKMARKS %s\n", - (vParam == SQL_UB_OFF) - ? "off" - : ((vParam == SQL_UB_VARIABLE) ? "variable" : "fixed")); - setval = vParam; - stmt->options.use_bookmarks = (SQLUINTEGER)setval; - } - if (conn) - conn->stmtOptions.use_bookmarks = (SQLUINTEGER)vParam; - break; - - case 1204: /* SQL_COPT_SS_PRESERVE_CURSORS ? */ - if (stmt) { - SC_set_error(stmt, STMT_OPTION_NOT_FOR_THE_DRIVER, - "The option may be for MS SQL Server(Set)", func); - } else if (conn) { - CC_set_error(conn, CONN_OPTION_NOT_FOR_THE_DRIVER, - "The option may be for MS SQL Server(Set)", func); - } - return SQL_ERROR; - case 1227: /* SQL_SOPT_SS_HIDDEN_COLUMNS ? */ - case 1228: /* SQL_SOPT_SS_NOBROWSETABLE ? */ - if (stmt) { -#ifndef NOT_USED - if (0 != vParam) - changed = TRUE; - break; -#else - SC_set_error(stmt, STMT_OPTION_NOT_FOR_THE_DRIVER, - "The option may be for MS SQL Server(Set)", func); -#endif /* NOT_USED */ - } else if (conn) { - CC_set_error(conn, CONN_OPTION_NOT_FOR_THE_DRIVER, - "The option may be for MS SQL Server(Set)", func); - } - return SQL_ERROR; - default: { - char option[64]; - - if (stmt) { - SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, - "Unknown statement option (Set)", func); - SPRINTF_FIXED(option, "fOption=%d, vParam=" FORMAT_ULEN, - fOption, vParam); - SC_log_error(func, option, stmt); - } - if (conn) { - CC_set_error(conn, CONN_NOT_IMPLEMENTED_ERROR, - "Unknown statement option (Set)", func); - SPRINTF_FIXED(option, "fOption=%d, vParam=" FORMAT_ULEN, - fOption, vParam); - CC_log_error(func, option, conn); - } - - return SQL_ERROR; - } - } - - if (changed) { - if (stmt) { - SC_set_error(stmt, STMT_OPTION_VALUE_CHANGED, - "Requested value changed.", func); - } - if (conn) { - CC_set_error(conn, CONN_OPTION_VALUE_CHANGED, - "Requested value changed.", func); - } - return SQL_SUCCESS_WITH_INFO; - } else - return SQL_SUCCESS; -} - -/* Implements only SQL_AUTOCOMMIT */ -RETCODE SQL_API OPENSEARCHAPI_SetConnectOption(HDBC hdbc, SQLUSMALLINT fOption, - SQLULEN vParam) { - CSTR func = "OPENSEARCHAPI_SetConnectOption"; - ConnectionClass *conn = (ConnectionClass *)hdbc; - char changed = FALSE; - RETCODE retval; - BOOL autocomm_on; - - MYLOG(OPENSEARCH_TRACE, "entering fOption = %d vParam = " FORMAT_LEN "\n", fOption, - vParam); - if (!conn) { - CC_log_error(func, "", NULL); - return SQL_INVALID_HANDLE; - } - - switch (fOption) { - /* - * Statement Options (apply to all stmts on the connection and - * become defaults for new stmts) - */ - case SQL_ASYNC_ENABLE: - case SQL_BIND_TYPE: - case SQL_CONCURRENCY: - case SQL_CURSOR_TYPE: - case SQL_KEYSET_SIZE: - case SQL_MAX_LENGTH: - case SQL_MAX_ROWS: - case SQL_NOSCAN: - case SQL_QUERY_TIMEOUT: - case SQL_RETRIEVE_DATA: - case SQL_ROWSET_SIZE: - case SQL_SIMULATE_CURSOR: - case SQL_USE_BOOKMARKS: - /* - * Become the default for all future statements on this - * connection - */ - retval = set_statement_option(conn, NULL, fOption, vParam); - - if (retval == SQL_SUCCESS_WITH_INFO) - changed = TRUE; - else if (retval == SQL_ERROR) - return SQL_ERROR; - - break; - - /* - * Connection Options - */ - - case SQL_ACCESS_MODE: /* ignored */ - break; - - case SQL_AUTOCOMMIT: - switch (vParam) { - case SQL_AUTOCOMMIT_ON: - autocomm_on = TRUE; - break; - default: - CC_set_error(conn, CONN_INVALID_ARGUMENT_NO, - "Illegal parameter value for SQL_AUTOCOMMIT. " - "Turning SQL_AUTOCOMMIT off requires " - "transactions, which are not supported.", - func); - return SQL_ERROR; - } - if (autocomm_on && SQL_AUTOCOMMIT_OFF != conn->autocommit_public) - break; - else if (!autocomm_on - && SQL_AUTOCOMMIT_OFF == conn->autocommit_public) - break; - conn->autocommit_public = - (autocomm_on ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF); - MYLOG(OPENSEARCH_DEBUG, - "AUTOCOMMIT: transact_status=%d, vparam=" FORMAT_LEN "\n", - conn->transact_status, vParam); - - CC_set_autocommit(conn, autocomm_on); - break; - - case SQL_CURRENT_QUALIFIER: /* ignored */ - break; - - case SQL_LOGIN_TIMEOUT: - conn->login_timeout = (SQLUINTEGER)vParam; - break; - - case SQL_PACKET_SIZE: /* ignored */ - break; - - case SQL_QUIET_MODE: /* ignored */ - break; - - case SQL_TXN_ISOLATION: - if (conn->isolation == vParam) - break; - /* - * If the connection is not established, just record the setting to - * reflect it upon connection. - */ - if (CC_not_connected(conn)) { - conn->isolation = (UInt4)vParam; - break; - } - - conn->isolation = (UInt4)vParam; - break; - - /* These options should be handled by driver manager */ - case SQL_ODBC_CURSORS: - case SQL_OPT_TRACE: - case SQL_OPT_TRACEFILE: - case SQL_TRANSLATE_DLL: - case SQL_TRANSLATE_OPTION: - CC_log_error( - func, - "This connect option (Set) is only used by the Driver Manager", - conn); - break; - - default: { - char option[64]; - - CC_set_error(conn, CONN_UNSUPPORTED_OPTION, - "Unknown connect option (Set)", func); - SPRINTF_FIXED(option, "fOption=%d, vParam=" FORMAT_LEN, fOption, - vParam); -#ifdef WIN32 - if (fOption == 30002 && vParam) { - int cmp; -#ifdef UNICODE_SUPPORT - if (CC_is_in_unicode_driver(conn)) { - char *asPara = - ucs2_to_utf8((SQLWCHAR *)vParam, SQL_NTS, NULL, FALSE); - cmp = strcmp(asPara, "Microsoft Jet"); - free(asPara); - } else -#endif /* UNICODE_SUPPORT */ - cmp = strncmp((char *)vParam, "Microsoft Jet", 13); - if (0 == cmp) { - MYLOG(OPENSEARCH_DEBUG, "Microsoft Jet !!!!\n"); - CC_set_errornumber(conn, 0); - conn->ms_jet = 1; - return SQL_SUCCESS; - } - } -#endif /* WIN32 */ - CC_log_error(func, option, conn); - return SQL_ERROR; - } - } - - if (changed) { - CC_set_error(conn, CONN_OPTION_VALUE_CHANGED, - "Requested value changed.", func); - return SQL_SUCCESS_WITH_INFO; - } else - return SQL_SUCCESS; -} - -/* This function just can tell you whether you are in Autcommit mode or not */ -RETCODE SQL_API OPENSEARCHAPI_GetConnectOption(HDBC hdbc, SQLUSMALLINT fOption, - PTR pvParam, SQLINTEGER *StringLength, - SQLINTEGER BufferLength) { - CSTR func = "OPENSEARCHAPI_GetConnectOption"; - ConnectionClass *conn = (ConnectionClass *)hdbc; - const char *p = NULL; - SQLLEN len = sizeof(SQLINTEGER); - SQLRETURN result = SQL_SUCCESS; - - MYLOG(OPENSEARCH_TRACE, "entering...\n"); - - if (!conn) { - CC_log_error(func, "", NULL); - return SQL_INVALID_HANDLE; - } - - switch (fOption) { - case SQL_ACCESS_MODE: /* NOT SUPPORTED */ - *((SQLUINTEGER *)pvParam) = SQL_MODE_READ_WRITE; - break; - - case SQL_AUTOCOMMIT: - *((SQLUINTEGER *)pvParam) = conn->autocommit_public; - break; - - case SQL_CURRENT_QUALIFIER: /* don't use qualifiers */ - len = 0; - p = CurrCatString(conn); - break; - - case SQL_LOGIN_TIMEOUT: - *((SQLUINTEGER *)pvParam) = conn->login_timeout; - break; - - case SQL_PACKET_SIZE: /* NOT SUPPORTED */ - *((SQLUINTEGER *)pvParam) = 4096; - break; - - case SQL_QUERY_TIMEOUT: - *((SQLULEN *)pvParam) = conn->stmtOptions.stmt_timeout; - break; - - case SQL_QUIET_MODE: /* NOT SUPPORTED */ - *((SQLULEN *)pvParam) = 0; - break; - - case SQL_TXN_ISOLATION: - if (conn->isolation == 0) { - if (CC_not_connected(conn)) - return SQL_NO_DATA; - conn->isolation = CC_get_isolation(conn); - } - *((SQLUINTEGER *)pvParam) = conn->isolation; - break; - -#ifdef SQL_ATTR_CONNECTION_DEAD - case SQL_ATTR_CONNECTION_DEAD: -#else - case 1209: -#endif /* SQL_ATTR_CONNECTION_DEAD */ - MYLOG(OPENSEARCH_DEBUG, "CONNECTION_DEAD status=%d", conn->status); - *((SQLUINTEGER *)pvParam) = CC_not_connected(conn); - MYPRINTF(0, " val=" FORMAT_UINTEGER "\n", - *((SQLUINTEGER *)pvParam)); - break; - - case SQL_ATTR_ANSI_APP: - *((SQLUINTEGER *)pvParam) = CC_is_in_ansi_app(conn); - MYLOG(OPENSEARCH_DEBUG, "ANSI_APP val=" FORMAT_UINTEGER "\n", - *((SQLUINTEGER *)pvParam)); - break; - - /* These options should be handled by driver manager */ - case SQL_ODBC_CURSORS: - case SQL_OPT_TRACE: - case SQL_OPT_TRACEFILE: - case SQL_TRANSLATE_DLL: - case SQL_TRANSLATE_OPTION: - CC_log_error( - func, - "This connect option (Get) is only used by the Driver Manager", - conn); - break; - - default: { - char option[64]; - - CC_set_error(conn, CONN_UNSUPPORTED_OPTION, - "Unknown connect option (Get)", func); - SPRINTF_FIXED(option, "fOption=%d", fOption); - CC_log_error(func, option, conn); - return SQL_ERROR; - break; - } - } - - if (NULL != p && 0 == len) { - /* char/binary data */ - len = strlen(p); - - if (pvParam) { -#ifdef UNICODE_SUPPORT - if (CC_is_in_unicode_driver(conn)) { - len = utf8_to_ucs2(p, len, (SQLWCHAR *)pvParam, - BufferLength / WCLEN); - len *= WCLEN; - } else -#endif /* UNICODE_SUPPORT */ - strncpy_null((char *)pvParam, p, (size_t)BufferLength); - - if (len >= BufferLength) { - result = SQL_SUCCESS_WITH_INFO; - CC_set_error(conn, CONN_TRUNCATED, - "The buffer was too small for the pvParam.", func); - } - } - } - if (StringLength) - *StringLength = (SQLINTEGER)len; - return result; -} - -RETCODE SQL_API OPENSEARCHAPI_SetStmtOption(HSTMT hstmt, SQLUSMALLINT fOption, - SQLULEN vParam) { - CSTR func = "OPENSEARCHAPI_SetStmtOption"; - StatementClass *stmt = (StatementClass *)hstmt; - RETCODE retval; - - MYLOG(OPENSEARCH_DEBUG, " entering...\n"); - - /* - * Though we could fake Access out by just returning SQL_SUCCESS all - * the time, but it tries to set a huge value for SQL_MAX_LENGTH and - * expects the driver to reduce it to the real value. - */ - if (!stmt) { - SC_log_error(func, "", NULL); - return SQL_INVALID_HANDLE; - } - - retval = set_statement_option(NULL, stmt, fOption, vParam); - return retval; -} - -RETCODE SQL_API OPENSEARCHAPI_GetStmtOption(HSTMT hstmt, SQLUSMALLINT fOption, - PTR pvParam, SQLINTEGER *StringLength, - SQLINTEGER BufferLength) { - UNUSED(BufferLength); - CSTR func = "OPENSEARCHAPI_GetStmtOption"; - StatementClass *stmt = (StatementClass *)hstmt; - QResultClass *res; - SQLLEN ridx; - SQLINTEGER len = sizeof(SQLINTEGER); - Int4 bookmark; - - MYLOG(OPENSEARCH_TRACE, "entering...\n"); - - /* - * thought we could fake Access out by just returning SQL_SUCCESS all - * the time, but it tries to set a huge value for SQL_MAX_LENGTH and - * expects the driver to reduce it to the real value - */ - if (!stmt) { - SC_log_error(func, "", NULL); - return SQL_INVALID_HANDLE; - } - - switch (fOption) { - case SQL_GET_BOOKMARK: - case SQL_ROW_NUMBER: - - res = SC_get_Curres(stmt); - if (!res) { - SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, - "The cursor has no result.", func); - return SQL_ERROR; - } - - ridx = GIdx2CacheIdx(stmt->currTuple, stmt, res); - if (!SC_is_fetchcursor(stmt)) { - /* make sure we're positioned on a valid row */ - if ((ridx < 0) - || (((SQLULEN)ridx) >= QR_get_num_cached_tuples(res))) { - SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, - "Not positioned on a valid row.", func); - return SQL_ERROR; - } - } else { - if (stmt->currTuple < 0 || !res->tupleField) { - SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, - "Not positioned on a valid row.", func); - return SQL_ERROR; - } - } - - if (fOption == SQL_GET_BOOKMARK - && stmt->options.use_bookmarks == SQL_UB_OFF) { - SC_set_error( - stmt, STMT_OPERATION_INVALID, - "Operation invalid because use bookmarks not enabled.", - func); - return SQL_ERROR; - } - - bookmark = (int)SC_make_int4_bookmark(stmt->currTuple); - memcpy(pvParam, &bookmark, sizeof(UInt4)); - - break; - - case SQL_ASYNC_ENABLE: /* NOT SUPPORTED */ - *((SQLINTEGER *)pvParam) = SQL_ASYNC_ENABLE_OFF; - break; - - case SQL_BIND_TYPE: - *((SQLINTEGER *)pvParam) = SC_get_ARDF(stmt)->bind_size; - break; - - case SQL_CONCURRENCY: /* NOT REALLY SUPPORTED */ - MYLOG(OPENSEARCH_DEBUG, "SQL_CONCURRENCY " FORMAT_INTEGER "\n", - stmt->options.scroll_concurrency); - *((SQLINTEGER *)pvParam) = stmt->options.scroll_concurrency; - break; - - case SQL_CURSOR_TYPE: /* PARTIAL SUPPORT */ - MYLOG(OPENSEARCH_DEBUG, "SQL_CURSOR_TYPE " FORMAT_INTEGER "\n", - stmt->options.cursor_type); - *((SQLINTEGER *)pvParam) = stmt->options.cursor_type; - break; - - case SQL_KEYSET_SIZE: /* NOT SUPPORTED, but saved */ - MYLOG(OPENSEARCH_DEBUG, "SQL_KEYSET_SIZE\n"); - *((SQLLEN *)pvParam) = stmt->options.keyset_size; - break; - - case SQL_MAX_LENGTH: /* NOT SUPPORTED, but saved */ - *((SQLLEN *)pvParam) = stmt->options.maxLength; - break; - - case SQL_MAX_ROWS: /* NOT SUPPORTED, but saved */ - *((SQLLEN *)pvParam) = stmt->options.maxRows; - MYLOG(OPENSEARCH_DEBUG, "MAX_ROWS, returning " FORMAT_LEN "\n", - stmt->options.maxRows); - break; - - case SQL_NOSCAN: /* NOT SUPPORTED */ - *((SQLINTEGER *)pvParam) = SQL_NOSCAN_ON; - break; - - case SQL_QUERY_TIMEOUT: /* NOT SUPPORTED */ - *((SQLULEN *)pvParam) = stmt->options.stmt_timeout; - break; - - case SQL_RETRIEVE_DATA: - *((SQLINTEGER *)pvParam) = stmt->options.retrieve_data; - break; - - case SQL_ROWSET_SIZE: - *((SQLLEN *)pvParam) = SC_get_ARDF(stmt)->size_of_rowset_odbc2; - break; - - case SQL_SIMULATE_CURSOR: /* NOT SUPPORTED */ - *((SQLINTEGER *)pvParam) = SQL_SC_NON_UNIQUE; - break; - - case SQL_USE_BOOKMARKS: - *((SQLINTEGER *)pvParam) = stmt->options.use_bookmarks; - break; - case 1227: /* SQL_SOPT_SS_HIDDEN_COLUMNS ? */ - case 1228: /* SQL_SOPT_SS_NOBROWSETABLE ? */ - *((SQLINTEGER *)pvParam) = 0; - break; - - default: { - char option[64]; - - SC_set_error(stmt, STMT_NOT_IMPLEMENTED_ERROR, - "Unknown statement option (Get)", func); - SPRINTF_FIXED(option, "fOption=%d", fOption); - SC_log_error(func, option, stmt); - return SQL_ERROR; - } - } - if (StringLength) - *StringLength = len; - - return SQL_SUCCESS; -} diff --git a/sql-odbc/src/sqlodbc/parse.c b/sql-odbc/src/sqlodbc/parse.c deleted file mode 100644 index 670a61a6cd..0000000000 --- a/sql-odbc/src/sqlodbc/parse.c +++ /dev/null @@ -1,66 +0,0 @@ -#include -#include -#include - -#include "catfunc.h" -#include "opensearch_odbc.h" -#include "opensearch_types.h" -#include "misc.h" -#include "multibyte.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "opensearch_info.h" -#include "qresult.h" -#include "statement.h" - -Int4 FI_precision(const FIELD_INFO *fi) { - OID ftype; - - if (!fi) - return -1; - ftype = FI_type(fi); - switch (ftype) { - case OPENSEARCH_TYPE_NUMERIC: - return fi->column_size; - case OPENSEARCH_TYPE_DATETIME: - case OPENSEARCH_TYPE_TIMESTAMP_NO_TMZONE: - return fi->decimal_digits; - } - return 0; -} - -static void setNumFields(IRDFields *irdflds, size_t numFields) { - FIELD_INFO **fi = irdflds->fi; - size_t nfields = irdflds->nfields; - - if (numFields < nfields) { - int i; - - for (i = (int)numFields; i < (int)nfields; i++) { - if (fi[i]) - fi[i]->flag = 0; - } - } - irdflds->nfields = (UInt4)numFields; -} - -void SC_initialize_cols_info(StatementClass *stmt, BOOL DCdestroy, - BOOL parseReset) { - IRDFields *irdflds = SC_get_IRDF(stmt); - - /* Free the parsed table information */ - if (stmt->ti) { - TI_Destructor(stmt->ti, stmt->ntab); - free(stmt->ti); - stmt->ti = NULL; - } - stmt->ntab = 0; - if (DCdestroy) /* Free the parsed field information */ - DC_Destructor((DescriptorClass *)SC_get_IRD(stmt)); - else - setNumFields(irdflds, 0); - if (parseReset) { - stmt->parse_status = STMT_PARSE_NONE; - SC_reset_updatable(stmt); - } -} diff --git a/sql-odbc/src/sqlodbc/qresult.c b/sql-odbc/src/sqlodbc/qresult.c deleted file mode 100644 index 27c299098a..0000000000 --- a/sql-odbc/src/sqlodbc/qresult.c +++ /dev/null @@ -1,469 +0,0 @@ -#include "qresult.h" - -#include -#include -#include - -#include "misc.h" -#include "opensearch_statement.h" -#include "statement.h" - -/* - * Used for building a Manual Result only - * All info functions call this function to create the manual result set. - */ -void QR_set_num_fields(QResultClass *self, int new_num_fields) { - if (!self) - return; - MYLOG(OPENSEARCH_TRACE, "entering\n"); - - CI_set_num_fields(QR_get_fields(self), (SQLSMALLINT)new_num_fields); - - MYLOG(OPENSEARCH_TRACE, "leaving\n"); -} - -void QR_set_position(QResultClass *self, SQLLEN pos) { - self->tupleField = - self->backend_tuples - + ((QR_get_rowstart_in_cache(self) + pos) * self->num_fields); -} - -void QR_set_reqsize(QResultClass *self, Int4 reqsize) { - self->rowset_size_include_ommitted = reqsize; -} - -void QR_set_cursor(QResultClass *self, const char *name) { - ConnectionClass *conn = QR_get_conn(self); - - if (self->cursor_name) { - if (name && 0 == strcmp(name, self->cursor_name)) - return; - free(self->cursor_name); - if (conn) { - CONNLOCK_ACQUIRE(conn); - conn->ncursors--; - CONNLOCK_RELEASE(conn); - } - self->cursTuple = -1; - QR_set_no_cursor(self); - } else if (NULL == name) - return; - if (name) { - self->cursor_name = strdup(name); - if (conn) { - CONNLOCK_ACQUIRE(conn); - conn->ncursors++; - CONNLOCK_RELEASE(conn); - } - } else { - QResultClass *res; - - self->cursor_name = NULL; - for (res = self->next; NULL != res; res = res->next) { - if (NULL != res->cursor_name) - free(res->cursor_name); - res->cursor_name = NULL; - } - } -} - -void QR_set_rowstart_in_cache(QResultClass *self, SQLLEN start) { - if (QR_synchronize_keys(self)) - self->key_base = start; - self->base = start; -} - -void QR_inc_rowstart_in_cache(QResultClass *self, SQLLEN base_inc) { - if (!QR_has_valid_base(self)) - MYLOG(OPENSEARCH_DEBUG, " called while the cache is not ready\n"); - self->base += base_inc; - if (QR_synchronize_keys(self)) - self->key_base = self->base; -} - -void QR_set_fields(QResultClass *self, ColumnInfoClass *fields) { - ColumnInfoClass *curfields = QR_get_fields(self); - - if (curfields == fields) - return; - - /* - * Unlink the old columninfo from this result set, freeing it if this - * was the last reference. - */ - if (NULL != curfields) { - if (curfields->refcount > 1) - curfields->refcount--; - else - CI_Destructor(curfields); - } - self->fields = fields; - if (NULL != fields) - fields->refcount++; -} - -/* - * CLASS QResult - */ -QResultClass *QR_Constructor(void) { - QResultClass *rv; - - MYLOG(OPENSEARCH_TRACE, "entering\n"); - rv = (QResultClass *)malloc(sizeof(QResultClass)); - - if (rv != NULL) { - ColumnInfoClass *fields; - - rv->rstatus = PORES_EMPTY_QUERY; - rv->pstatus = 0; - - /* construct the column info */ - rv->fields = NULL; - if (fields = CI_Constructor(), NULL == fields) { - free(rv); - return NULL; - } - QR_set_fields(rv, fields); - rv->backend_tuples = NULL; - rv->sqlstate[0] = '\0'; - rv->message = NULL; - rv->messageref = NULL; - rv->command = NULL; - rv->notice = NULL; - rv->conn = NULL; - rv->next = NULL; - rv->count_backend_allocated = 0; - rv->count_keyset_allocated = 0; - rv->num_total_read = 0; - rv->num_cached_rows = 0; - rv->num_cached_keys = 0; - rv->fetch_number = 0; - rv->flags = - 0; /* must be cleared before calling QR_set_rowstart_in_cache() */ - QR_set_rowstart_in_cache(rv, -1); - rv->key_base = -1; - rv->recent_processed_row_count = -1; - rv->cursTuple = -1; - rv->move_offset = 0; - rv->num_fields = 0; - rv->num_key_fields = OPENSEARCH_NUM_NORMAL_KEYS; /* CTID + OID */ - rv->tupleField = NULL; - rv->cursor_name = NULL; - rv->aborted = FALSE; - - rv->cache_size = 0; - rv->cmd_fetch_size = 0; - rv->rowset_size_include_ommitted = 1; - rv->move_direction = 0; - rv->keyset = NULL; - rv->reload_count = 0; - rv->rb_alloc = 0; - rv->rb_count = 0; - rv->dataFilled = FALSE; - rv->rollback = NULL; - rv->ad_alloc = 0; - rv->ad_count = 0; - rv->added_keyset = NULL; - rv->added_tuples = NULL; - rv->up_alloc = 0; - rv->up_count = 0; - rv->updated = NULL; - rv->updated_keyset = NULL; - rv->updated_tuples = NULL; - rv->dl_alloc = 0; - rv->dl_count = 0; - rv->deleted = NULL; - rv->deleted_keyset = NULL; - rv->opensearch_result = NULL; - rv->server_cursor_id = NULL; - } - - MYLOG(OPENSEARCH_TRACE, "leaving\n"); - return rv; -} - -void QR_close_result(QResultClass *self, BOOL destroy) { - UNUSED(self); - QResultClass *next; - BOOL top = TRUE; - - if (!self) - return; - MYLOG(OPENSEARCH_TRACE, "entering\n"); - - while (self) { - QR_free_memory(self); /* safe to call anyway */ - - /* - * Should have been freed in the close() but just in case... - * QR_set_cursor clears the cursor name of all the chained results too, - * so we only need to do this for the first result in the chain. - */ - if (top) - QR_set_cursor(self, NULL); - - /* Free up column info */ - if (destroy) - QR_set_fields(self, NULL); - - /* Free command info (this is from strdup()) */ - if (self->command) { - free(self->command); - self->command = NULL; - } - - /* Free message info (this is from strdup()) */ - if (self->message) { - free(self->message); - self->message = NULL; - } - - /* Free notice info (this is from strdup()) */ - if (self->notice) { - free(self->notice); - self->notice = NULL; - } - - /* Free server_cursor_id (this is from strdup()) */ - if (self->server_cursor_id) { - free(self->server_cursor_id); - self->server_cursor_id = NULL; - } - - /* Destruct the result object in the chain */ - next = self->next; - self->next = NULL; - if (destroy) - free(self); - - /* Repeat for the next result in the chain */ - self = next; - destroy = TRUE; /* always destroy chained results */ - top = FALSE; - } - - MYLOG(OPENSEARCH_TRACE, "leaving\n"); -} - -void QR_reset_for_re_execute(QResultClass *self) { - MYLOG(OPENSEARCH_TRACE, "entering for %p\n", self); - if (!self) - return; - QR_close_result(self, FALSE); - /* reset flags etc */ - self->flags = 0; - QR_set_rowstart_in_cache(self, -1); - self->recent_processed_row_count = -1; - /* clear error info etc */ - self->rstatus = PORES_EMPTY_QUERY; - self->aborted = FALSE; - self->sqlstate[0] = '\0'; - self->messageref = NULL; - - MYLOG(OPENSEARCH_TRACE, "leaving\n"); -} - -void QR_Destructor(QResultClass *self) { - MYLOG(OPENSEARCH_TRACE, "entering\n"); - if (!self) - return; - QR_close_result(self, TRUE); - - MYLOG(OPENSEARCH_TRACE, "leaving\n"); -} - -void QR_set_command(QResultClass *self, const char *msg) { - if (self->command) - free(self->command); - - self->command = msg ? strdup(msg) : NULL; -} - -void QR_set_message(QResultClass *self, const char *msg) { - if (self->message) - free(self->message); - self->messageref = NULL; - - self->message = msg ? strdup(msg) : NULL; -} - -void QR_set_server_cursor_id(QResultClass *self, const char *server_cursor_id) { - if (self->server_cursor_id) { - free(self->server_cursor_id); - } - - self->server_cursor_id = server_cursor_id ? strdup(server_cursor_id) : NULL; -} - -void QR_add_message(QResultClass *self, const char *msg) { - char *message = self->message; - size_t alsize, pos, addlen; - - if (!msg || !msg[0]) - return; - addlen = strlen(msg); - if (message) { - pos = strlen(message) + 1; - alsize = pos + addlen + 1; - } else { - pos = 0; - alsize = addlen + 1; - } - char *message_tmp = realloc(message, alsize); - if (message_tmp) { - message = message_tmp; - if (pos > 0) - message[pos - 1] = ';'; - strncpy_null(message + pos, msg, addlen + 1); - self->message = message; - } -} - -void QR_set_notice(QResultClass *self, const char *msg) { - if (self->notice) - free(self->notice); - - self->notice = msg ? strdup(msg) : NULL; -} - -void QR_add_notice(QResultClass *self, const char *msg) { - char *message = self->notice; - size_t alsize, pos, addlen; - - if (!msg || !msg[0]) - return; - addlen = strlen(msg); - if (message) { - pos = strlen(message) + 1; - alsize = pos + addlen + 1; - } else { - pos = 0; - alsize = addlen + 1; - } - char *message_tmp = realloc(message, alsize); - if (message_tmp) { - message = message_tmp; - if (pos > 0) - message[pos - 1] = ';'; - strncpy_null(message + pos, msg, addlen + 1); - self->notice = message; - } -} - -TupleField *QR_AddNew(QResultClass *self) { - size_t alloc; - UInt4 num_fields; - - if (!self) - return NULL; - MYLOG(OPENSEARCH_ALL, FORMAT_ULEN "th row(%d fields) alloc=" FORMAT_LEN "\n", - self->num_cached_rows, QR_NumResultCols(self), - self->count_backend_allocated); - if (num_fields = QR_NumResultCols(self), !num_fields) - return NULL; - if (self->num_fields <= 0) { - self->num_fields = (unsigned short)num_fields; - QR_set_reached_eof(self); - } - alloc = self->count_backend_allocated; - if (!self->backend_tuples) { - self->num_cached_rows = 0; - alloc = TUPLE_MALLOC_INC; - QR_MALLOC_return_with_error(self->backend_tuples, TupleField, - alloc * sizeof(TupleField) * num_fields, - self, "Out of memory in QR_AddNew.", NULL); - } else if (self->num_cached_rows >= self->count_backend_allocated) { - alloc = self->count_backend_allocated * 2; - QR_REALLOC_return_with_error(self->backend_tuples, TupleField, - alloc * sizeof(TupleField) * num_fields, - self, "Out of memory in QR_AddNew.", NULL); - } - self->count_backend_allocated = alloc; - - if (self->backend_tuples) { - memset(self->backend_tuples + num_fields * self->num_cached_rows, 0, - num_fields * sizeof(TupleField)); - self->num_cached_rows++; - self->ad_count++; - } - return self->backend_tuples + num_fields * (self->num_cached_rows - 1); -} - -void QR_free_memory(QResultClass *self) { - SQLLEN num_backend_rows = self->num_cached_rows; - int num_fields = self->num_fields; - - MYLOG(OPENSEARCH_TRACE, "entering fcount=" FORMAT_LEN "\n", num_backend_rows); - - if (self->backend_tuples) { - ClearCachedRows(self->backend_tuples, num_fields, num_backend_rows); - free(self->backend_tuples); - self->count_backend_allocated = 0; - self->backend_tuples = NULL; - self->dataFilled = FALSE; - self->tupleField = NULL; - } - if (self->keyset) { - free(self->keyset); - self->keyset = NULL; - self->count_keyset_allocated = 0; - self->reload_count = 0; - } - if (self->rollback) { - free(self->rollback); - self->rb_alloc = 0; - self->rb_count = 0; - self->rollback = NULL; - } - if (self->deleted) { - free(self->deleted); - self->deleted = NULL; - } - if (self->deleted_keyset) { - free(self->deleted_keyset); - self->deleted_keyset = NULL; - } - self->dl_alloc = 0; - self->dl_count = 0; - /* clear added info */ - if (self->added_keyset) { - free(self->added_keyset); - self->added_keyset = NULL; - } - if (self->added_tuples) { - ClearCachedRows(self->added_tuples, num_fields, self->ad_count); - free(self->added_tuples); - self->added_tuples = NULL; - } - self->ad_alloc = 0; - self->ad_count = 0; - /* clear updated info */ - if (self->updated) { - free(self->updated); - self->updated = NULL; - } - if (self->updated_keyset) { - free(self->updated_keyset); - self->updated_keyset = NULL; - } - if (self->updated_tuples) { - ClearCachedRows(self->updated_tuples, num_fields, self->up_count); - free(self->updated_tuples); - self->updated_tuples = NULL; - } - if (self->opensearch_result) { - ClearOpenSearchResult(self->opensearch_result); - self->opensearch_result = NULL; - } - - self->up_alloc = 0; - self->up_count = 0; - - self->num_total_read = 0; - self->num_cached_rows = 0; - self->num_cached_keys = 0; - self->cursTuple = -1; - self->pstatus = 0; - - MYLOG(OPENSEARCH_TRACE, "leaving\n"); -} diff --git a/sql-odbc/src/sqlodbc/qresult.h b/sql-odbc/src/sqlodbc/qresult.h deleted file mode 100644 index 4e44c24638..0000000000 --- a/sql-odbc/src/sqlodbc/qresult.h +++ /dev/null @@ -1,292 +0,0 @@ -#ifndef __QRESULT_H__ -#define __QRESULT_H__ - -#include "columninfo.h" -#include "opensearch_connection.h" -#include "opensearch_odbc.h" -#include "tuple.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef enum QueryResultCode_ { - PORES_EMPTY_QUERY = 0, - PORES_COMMAND_OK, /* a query command that doesn't return - * anything was executed properly by the backend */ - PORES_TUPLES_OK, /* a query command that returns tuples - * was executed properly by the backend, OpenSearchResult - * contains the resulttuples */ - PORES_COPY_OUT, - PORES_COPY_IN, - PORES_BAD_RESPONSE, /* an unexpected response was recv'd from - * the backend */ - PORES_NONFATAL_ERROR, - PORES_FATAL_ERROR, - PORES_NO_MEMORY_ERROR, - PORES_FIELDS_OK = 100, /* field information from a query was - * successful */ - /* PORES_END_TUPLES, */ - PORES_INTERNAL_ERROR -} QueryResultCode; - -enum { - FQR_REACHED_EOF = (1L << 1) /* reached eof */ - , - FQR_HAS_VALID_BASE = (1L << 2), - FQR_NEEDS_SURVIVAL_CHECK = (1L << 3) /* check if the cursor is open */ -}; - -struct QResultClass_ { - ColumnInfoClass *fields; /* the Column information */ - ConnectionClass *conn; /* the connection this result is using - * (backend) */ - QResultClass *next; /* the following result class */ - - /* Stuff for declare/fetch tuples */ - SQLULEN num_total_read; /* the highest absolute position ever read in + 1 */ - SQLULEN count_backend_allocated; /* m(re)alloced count */ - SQLULEN num_cached_rows; /* count of tuples kept in backend_tuples member */ - SQLLEN fetch_number; /* 0-based index to the tuple to read next */ - SQLLEN cursTuple; /* absolute current position in the servr's cursor used to - retrieve tuples from the DB */ - SQLULEN move_offset; - SQLLEN base; /* relative position of rowset start in the current data - cache(backend_tuples) */ - - UInt2 num_fields; /* number of fields in the result */ - UInt2 num_key_fields; /* number of key fields in the result */ - UInt4 rowset_size_include_ommitted; /* ES restriction */ - SQLLEN recent_processed_row_count; - SQLULEN cache_size; - SQLULEN cmd_fetch_size; - - QueryResultCode rstatus; /* result status */ - - char sqlstate[8]; - char *message; - const char *messageref; - char *cursor_name; /* The name of the cursor for select statements */ - char *command; - char *notice; - - TupleField *backend_tuples; /* data from the backend (the tuple cache) */ - TupleField *tupleField; /* current backend tuple being retrieved */ - - char pstatus; /* processing status */ - char aborted; /* was aborted ? */ - char flags; /* this result contains keyset etc ? */ - po_ind_t move_direction; /* must move before fetching this - result set */ - SQLULEN count_keyset_allocated; /* m(re)alloced count */ - SQLULEN num_cached_keys; /* count of keys kept in backend_keys member */ - KeySet *keyset; - SQLLEN key_base; /* relative position of rowset start in the current keyset - cache */ - UInt2 reload_count; - UInt2 rb_alloc; /* count of allocated rollback info */ - UInt2 rb_count; /* count of rollback info */ - char dataFilled; /* Cache is filled with data ? */ - Rollback *rollback; - UInt4 ad_alloc; /* count of allocated added info */ - UInt4 ad_count; /* count of newly added rows */ - KeySet *added_keyset; /* added keyset info */ - TupleField *added_tuples; /* added data by myself */ - UInt2 dl_alloc; /* count of allocated deleted info */ - UInt2 dl_count; /* count of deleted info */ - SQLLEN *deleted; /* deleted index info */ - KeySet *deleted_keyset; /* deleted keyset info */ - UInt2 up_alloc; /* count of allocated updated info */ - UInt2 up_count; /* count of updated info */ - SQLLEN *updated; /* updated index info */ - KeySet *updated_keyset; /* uddated keyset info */ - TupleField *updated_tuples; /* uddated data by myself */ - void *opensearch_result; - char *server_cursor_id; -}; - -enum { - FQR_HASKEYSET = 1L, - FQR_WITHHOLD = (1L << 1), - FQR_HOLDPERMANENT = (1L << 2) /* the cursor is alive across transactions */ - , - FQR_SYNCHRONIZEKEYS = - (1L - << 3) /* synchronize the keyset range with that of cthe tuples cache */ -}; - -#define QR_haskeyset(self) (0 != (self->flags & FQR_HASKEYSET)) -#define QR_is_withhold(self) (0 != (self->flags & FQR_WITHHOLD)) -#define QR_is_permanent(self) (0 != (self->flags & FQR_HOLDPERMANENT)) -#define QR_synchronize_keys(self) (0 != (self->flags & FQR_SYNCHRONIZEKEYS)) -#define QR_get_fields(self) (self->fields) - -/* These functions are for retrieving data from the qresult */ -#define QR_get_value_backend(self, fieldno) (self->tupleField[fieldno].value) -#define QR_get_value_backend_row(self, tupleno, fieldno) \ - ((self->backend_tuples + (tupleno * self->num_fields))[fieldno].value) -#define QR_get_value_backend_text(self, tupleno, fieldno) \ - QR_get_value_backend_row(self, tupleno, fieldno) -#define QR_get_value_backend_int(self, tupleno, fieldno, isNull) \ - atoi(QR_get_value_backend_row(self, tupleno, fieldno)) - -/* These functions are used by both manual and backend results */ -#define QR_NumResultCols(self) (CI_get_num_fields(self->fields)) -#define QR_NumPublicResultCols(self) \ - (QR_haskeyset(self) \ - ? (CI_get_num_fields(self->fields) - self->num_key_fields) \ - : CI_get_num_fields(self->fields)) -#define QR_get_fieldname(self, fieldno_) \ - (CI_get_fieldname(self->fields, fieldno_)) -#define QR_get_fieldsize(self, fieldno_) \ - (CI_get_fieldsize(self->fields, fieldno_)) -#define QR_get_display_size(self, fieldno_) \ - (CI_get_display_size(self->fields, fieldno_)) -#define QR_get_atttypmod(self, fieldno_) \ - (CI_get_atttypmod(self->fields, fieldno_)) -#define QR_get_field_type(self, fieldno_) (CI_get_oid(self->fields, fieldno_)) -#define QR_get_relid(self, fieldno_) (CI_get_relid(self->fields, fieldno_)) -#define QR_get_attid(self, fieldno_) (CI_get_attid(self->fields, fieldno_)) - -/* These functions are used only for manual result sets */ -#define QR_get_num_total_tuples(self) \ - (QR_once_reached_eof(self) ? (self->num_total_read + self->ad_count) \ - : self->num_total_read) -#define QR_get_num_total_read(self) (self->num_total_read) -#define QR_get_num_cached_tuples(self) (self->num_cached_rows) -#define QR_set_field_info(self, field_num, name, adtid, adtsize, relid, attid) \ - (CI_set_field_info(self->fields, field_num, name, adtid, adtsize, -1, \ - relid, attid)) -#define QR_set_field_info_v(self, field_num, name, adtid, adtsize) \ - (CI_set_field_info(self->fields, field_num, name, adtid, adtsize, -1, 0, 0)) - -/* status macros */ -#define QR_command_successful(self) \ - (self \ - && !(self->rstatus == PORES_BAD_RESPONSE \ - || self->rstatus == PORES_NONFATAL_ERROR \ - || self->rstatus == PORES_FATAL_ERROR \ - || self->rstatus == PORES_NO_MEMORY_ERROR)) -#define QR_command_maybe_successful(self) \ - (self \ - && !(self->rstatus == PORES_BAD_RESPONSE \ - || self->rstatus == PORES_FATAL_ERROR \ - || self->rstatus == PORES_NO_MEMORY_ERROR)) -#define QR_command_nonfatal(self) (self->rstatus == PORES_NONFATAL_ERROR) -#define QR_set_conn(self, conn_) (self->conn = conn_) -#define QR_set_rstatus(self, condition) (self->rstatus = condition) -#define QR_set_sqlstatus(self, status) strcpy(self->sqlstatus, status) -#define QR_set_messageref(self, m) ((self)->messageref = m) -#define QR_set_aborted(self, aborted_) (self->aborted = aborted_) -#define QR_set_haskeyset(self) (self->flags |= FQR_HASKEYSET) -#define QR_set_synchronize_keys(self) (self->flags |= FQR_SYNCHRONIZEKEYS) -#define QR_set_no_cursor(self) \ - ((self)->flags &= ~(FQR_WITHHOLD | FQR_HOLDPERMANENT), \ - (self)->pstatus &= ~FQR_NEEDS_SURVIVAL_CHECK) -#define QR_set_withhold(self) (self->flags |= FQR_WITHHOLD) -#define QR_set_permanent(self) (self->flags |= FQR_HOLDPERMANENT) -#define QR_set_reached_eof(self) (self->pstatus |= FQR_REACHED_EOF) -#define QR_set_has_valid_base(self) (self->pstatus |= FQR_HAS_VALID_BASE) -#define QR_set_no_valid_base(self) (self->pstatus &= ~FQR_HAS_VALID_BASE) -#define QR_set_survival_check(self) (self->pstatus |= FQR_NEEDS_SURVIVAL_CHECK) -#define QR_set_no_survival_check(self) \ - (self->pstatus &= ~FQR_NEEDS_SURVIVAL_CHECK) -#define QR_inc_num_cache(self) \ - do { \ - self->num_cached_rows++; \ - if (QR_haskeyset(self)) \ - self->num_cached_keys++; \ - } while (0) -#define QR_set_next_in_cache(self, number) \ - do { \ - MYLOG(OPENSEARCH_ALL, "set the number to " FORMAT_LEN " to read next\n", \ - number); \ - self->fetch_number = number; \ - } while (0) -#define QR_inc_next_in_cache(self) \ - do { \ - MYLOG(OPENSEARCH_ALL, "increased the number " FORMAT_LEN, self->fetch_number); \ - self->fetch_number++; \ - MYLOG(OPENSEARCH_ALL, "to " FORMAT_LEN " to next read\n", self->fetch_number); \ - } while (0) - -#define QR_get_message(self) \ - ((self)->message ? (self)->message : (self)->messageref) -#define QR_get_command(self) (self->command) -#define QR_get_notice(self) (self->notice) -#define QR_get_rstatus(self) (self->rstatus) -#define QR_get_aborted(self) (self->aborted) -#define QR_get_conn(self) (self->conn) -#define QR_get_cursor(self) (self->cursor_name) -#define QR_get_rowstart_in_cache(self) (self->base) -#define QR_once_reached_eof(self) ((self->pstatus & FQR_REACHED_EOF) != 0) -#define QR_has_valid_base(self) (0 != (self->pstatus & FQR_HAS_VALID_BASE)) -#define QR_needs_survival_check(self) \ - (0 != (self->pstatus & FQR_NEEDS_SURVIVAL_CHECK)) - -#define QR_aborted(self) (!self || self->aborted) -#define QR_get_reqsize(self) (self->rowset_size_include_ommitted) - -#define QR_stop_movement(self) (self->move_direction = 0) -#define QR_is_moving(self) (0 != self->move_direction) -#define QR_is_not_moving(self) (0 == self->move_direction) -#define QR_set_move_forward(self) (self->move_direction = 1) -#define QR_is_moving_forward(self) (1 == self->move_direction) -#define QR_set_move_backward(self) (self->move_direction = -1) -#define QR_is_moving_backward(self) (-1 == self->move_direction) -#define QR_set_move_from_the_last(self) (self->move_direction = 2) -#define QR_is_moving_from_the_last(self) (2 == self->move_direction) -#define QR_is_moving_not_backward(self) (0 < self->move_direction) - -/* Core Functions */ -QResultClass *QR_Constructor(void); -void QR_Destructor(QResultClass *self); -TupleField *QR_AddNew(QResultClass *self); -void QR_close_result(QResultClass *self, BOOL destroy); -void QR_reset_for_re_execute(QResultClass *self); -void QR_free_memory(QResultClass *self); -void QR_set_command(QResultClass *self, const char *msg); -void QR_set_message(QResultClass *self, const char *msg); -void QR_add_message(QResultClass *self, const char *msg); -void QR_set_notice(QResultClass *self, const char *msg); -void QR_add_notice(QResultClass *self, const char *msg); - -void QR_set_num_fields(QResultClass *self, - int new_num_fields); /* catalog functions' result only */ -void QR_set_fields(QResultClass *self, ColumnInfoClass *); - -void QR_set_rowstart_in_cache(QResultClass *, SQLLEN); -void QR_inc_rowstart_in_cache(QResultClass *self, SQLLEN base_inc); -void QR_set_reqsize(QResultClass *self, Int4 reqsize); -void QR_set_position(QResultClass *self, SQLLEN pos); -void QR_set_cursor(QResultClass *self, const char *name); -SQLLEN getNthValid(const QResultClass *self, SQLLEN sta, UWORD orientation, - SQLULEN nth, SQLLEN *nearest); -void QR_set_server_cursor_id(QResultClass *self, const char *server_cursor_id); -#define QR_MALLOC_return_with_error(t, tp, s, a, m, r) \ - do { \ - if (t = (tp *)malloc(s), NULL == t) { \ - QR_set_rstatus(a, PORES_NO_MEMORY_ERROR); \ - qlog("QR_MALLOC_error\n"); \ - QR_free_memory(a); \ - QR_set_messageref(a, m); \ - return r; \ - } \ - } while (0) -#define QR_REALLOC_return_with_error(t, tp, s, a, m, r) \ - do { \ - tp *tmp; \ - if (tmp = (tp *)realloc(t, s), NULL == tmp) { \ - QR_set_rstatus(a, PORES_NO_MEMORY_ERROR); \ - qlog("QR_REALLOC_error\n"); \ - QR_free_memory(a); \ - QR_set_messageref(a, m); \ - return r; \ - } \ - t = tmp; \ - } while (0) - -#ifdef __cplusplus -} -#endif -#endif /* __QRESULT_H__ */ diff --git a/sql-odbc/src/sqlodbc/resource.h b/sql-odbc/src/sqlodbc/resource.h deleted file mode 100644 index ff5fc59c8d..0000000000 --- a/sql-odbc/src/sqlodbc/resource.h +++ /dev/null @@ -1,67 +0,0 @@ -//{{NO_DEPENDENCIES}} -// Microsoft Visual C++ generated include file. -// Used by opensearch_odbc.rc -// -#define IDS_BADDSN 1 -#define IDS_MSGTITLE 2 -#define IDOK2 3 -#define IDC_TEST 4 -#define IDC_PASSWORD_STATIC 4 -#define IDC_SSL_STATIC 4 -#define IDC_HOST_VER_STATIC 5 -#define IDC_DSNAME 400 -#define IDC_DSNAMETEXT 401 -#define IDC_DESC 404 -#define IDC_FETCH_SIZE_STATIC 404 -#define IDC_SERVER 407 -#define IDC_NOTICE_USER 414 -#define IDS_AUTHTYPE_NONE 417 -#define IDS_AUTHTYPE_BASIC 418 -#define IDS_AUTHTYPE_IAM 419 -#define IDS_LOGTYPE_OFF 420 -#define IDS_LOGTYPE_FATAL 421 -#define IDS_LOGTYPE_ERROR 422 -#define IDS_LOGTYPE_WARNING 423 -#define IDS_LOGTYPE_INFO 424 -#define IDS_LOGTYPE_DEBUG 425 -#define IDS_LOGTYPE_TRACE 426 -#define IDS_LOGTYPE_ALL 427 -#define DLG_CONFIG 1001 -#define IDC_PORT 1002 -#define IDC_USER 1006 -#define IDC_PASSWORD 1009 -#define IDC_MANAGEDSN 1077 -#define IDC_EDIT1 1112 -#define IDC_CONNTIMEOUT_STATIC 1112 -#define IDC_CHECK1 1113 -#define IDC_CHECK2 1114 -#define IDC_USESSL 1114 -#define IDC_COMBO1 1115 -#define IDC_AUTHTYPE 1115 -#define IDC_HOST_VER 1115 -#define IDC_USERNAME_STATIC 1116 -#define IDC_REGION 1121 -#define IDC_REGION_STATIC 1122 -#define IDC_AUTH_STATIC 1123 -#define ID_ADVANCED_OPTIONS 1124 -#define ID_LOG_OPTIONS 1125 -#define IDC_DRIVER_VERSION 1126 -#define IDC_AUTH_SETTINGS 1127 -#define IDC_CONN_SETTINGS 1128 -#define DLG_ADVANCED_OPTIONS 1129 -#define IDC_CONNTIMEOUT 1130 -#define DLG_LOG_OPTIONS 1131 -#define IDC_FETCH_SIZE 1131 -#define IDC_LOG_LEVEL 1132 -#define IDC_LOG_PATH 1133 - -// Next default values for new objects -// -#ifdef APSTUDIO_INVOKED -#ifndef APSTUDIO_READONLY_SYMBOLS -#define _APS_NEXT_RESOURCE_VALUE 113 -#define _APS_NEXT_COMMAND_VALUE 40001 -#define _APS_NEXT_CONTROL_VALUE 1135 -#define _APS_NEXT_SYMED_VALUE 101 -#endif -#endif diff --git a/sql-odbc/src/sqlodbc/results.c b/sql-odbc/src/sqlodbc/results.c deleted file mode 100644 index 18f1cd1a72..0000000000 --- a/sql-odbc/src/sqlodbc/results.c +++ /dev/null @@ -1,1651 +0,0 @@ -#include -#include -#include - -#include "bind.h" -#include "convert.h" -#include "dlg_specific.h" -#include "environ.h" -#include "opensearch_odbc.h" -#include "opensearch_types.h" -#include "misc.h" -#include "opensearch_apifunc.h" -#include "opensearch_connection.h" -#include "opensearch_statement.h" -#include "qresult.h" -#include "statement.h" - -/* Helper macro */ -#define getEffectiveOid(conn, fi) \ - opensearch_true_type((conn), (fi)->columntype, FI_type(fi)) -#define NULL_IF_NULL(a) ((a) ? ((const char *)(a)) : "(null)") - -RETCODE SQL_API OPENSEARCHAPI_RowCount(HSTMT hstmt, SQLLEN *pcrow) { - CSTR func = "OPENSEARCHAPI_RowCount"; - StatementClass *stmt = (StatementClass *)hstmt; - QResultClass *res; - - MYLOG(OPENSEARCH_TRACE, "entering...\n"); - if (!stmt) { - SC_log_error(func, NULL_STRING, NULL); - return SQL_INVALID_HANDLE; - } - - res = SC_get_Curres(stmt); - if (res) { - if (stmt->status != STMT_FINISHED) { - SC_set_error( - stmt, STMT_SEQUENCE_ERROR, - "Can't get row count while statement is still executing.", - func); - return SQL_ERROR; - } - } - - // Row count is not supported by this driver, so we will always report -1, - // as defined by the ODBC API for SQLRowCount. - *pcrow = -1; - - return SQL_SUCCESS; -} - -/* - * This returns the number of columns associated with the database - * attached to "hstmt". - */ -RETCODE SQL_API OPENSEARCHAPI_NumResultCols(HSTMT hstmt, SQLSMALLINT *pccol) { - CSTR func = "OPENSEARCHAPI_NumResultCols"; - StatementClass *stmt = (StatementClass *)hstmt; - QResultClass *result; - RETCODE ret = SQL_SUCCESS; - - MYLOG(OPENSEARCH_TRACE, "entering...\n"); - if (!stmt) { - SC_log_error(func, NULL_STRING, NULL); - return SQL_INVALID_HANDLE; - } - - SC_clear_error(stmt); -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wkeyword-macro" -#endif // __APPLE__ -#define return DONT_CALL_RETURN_FROM_HERE ? ? ? -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ - if (stmt->proc_return > 0) { - *pccol = 0; - goto cleanup; - } - - result = SC_get_Curres(stmt); - *pccol = QR_NumPublicResultCols(result); - -cleanup: -#undef return - return ret; -} - -#define USE_FI(fi, unknown) (fi && UNKNOWNS_AS_LONGEST != unknown) - -/* - * Return information about the database column the user wants - * information about. - */ -RETCODE SQL_API OPENSEARCHAPI_DescribeCol(HSTMT hstmt, SQLUSMALLINT icol, - SQLCHAR *szColName, SQLSMALLINT cbColNameMax, - SQLSMALLINT *pcbColName, - SQLSMALLINT *pfSqlType, SQLULEN *pcbColDef, - SQLSMALLINT *pibScale, - SQLSMALLINT *pfNullable) { - CSTR func = "OPENSEARCHAPI_DescribeCol"; - - /* gets all the information about a specific column */ - StatementClass *stmt = (StatementClass *)hstmt; - ConnectionClass *conn; - IRDFields *irdflds; - QResultClass *res = NULL; - char *col_name = NULL; - OID fieldtype = 0; - SQLLEN column_size = 0; - int unknown_sizes; - SQLINTEGER decimal_digits = 0; - FIELD_INFO *fi; - char buf[255]; - int len = 0; - RETCODE result = SQL_SUCCESS; - - MYLOG(OPENSEARCH_TRACE, "entering.%d..\n", icol); - - if (!stmt) { - SC_log_error(func, NULL_STRING, NULL); - return SQL_INVALID_HANDLE; - } - - conn = SC_get_conn(stmt); - unknown_sizes = DEFAULT_UNKNOWNSIZES; - - SC_clear_error(stmt); - -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wkeyword-macro" -#endif // __APPLE__ -#define return DONT_CALL_RETURN_FROM_HERE ? ? ? -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ - irdflds = SC_get_IRDF(stmt); - if (0 == icol) /* bookmark column */ - { - SQLSMALLINT fType = stmt->options.use_bookmarks == SQL_UB_VARIABLE - ? SQL_BINARY - : SQL_INTEGER; - - MYLOG(OPENSEARCH_ALL, "answering bookmark info\n"); - if (szColName && cbColNameMax > 0) - *szColName = '\0'; - if (pcbColName) - *pcbColName = 0; - if (pfSqlType) - *pfSqlType = fType; - if (pcbColDef) - *pcbColDef = 10; - if (pibScale) - *pibScale = 0; - if (pfNullable) - *pfNullable = SQL_NO_NULLS; - result = SQL_SUCCESS; - goto cleanup; - } - - /* - * Dont check for bookmark column. This is the responsibility of the - * driver manager. - */ - - icol--; /* use zero based column numbers */ - - fi = NULL; - if (icol < irdflds->nfields && irdflds->fi) - fi = irdflds->fi[icol]; - - if (!FI_is_applicable(fi)) { - fi = NULL; - - res = SC_get_Curres(stmt); - if (icol >= QR_NumPublicResultCols(res)) { - SC_set_error(stmt, STMT_INVALID_COLUMN_NUMBER_ERROR, - "Invalid column number in DescribeCol.", func); - SPRINTF_FIXED(buf, "Col#=%d, #Cols=%d,%d keys=%d", icol, - QR_NumResultCols(res), QR_NumPublicResultCols(res), - res->num_key_fields); - SC_log_error(func, buf, stmt); - result = SQL_ERROR; - goto cleanup; - } - if (icol < irdflds->nfields && irdflds->fi) - fi = irdflds->fi[icol]; - } - res = SC_get_Curres(stmt); -#ifdef SUPPRESS_LONGEST_ON_CURSORS - if (UNKNOWNS_AS_LONGEST == unknown_sizes) { - if (QR_once_reached_eof(res)) - unknown_sizes = UNKNOWNS_AS_LONGEST; - else - unknown_sizes = UNKNOWNS_AS_MAX; - } -#endif /* SUPPRESS_LONGEST_ON_CURSORS */ - /* handle constants */ - if (res && -2 == QR_get_fieldsize(res, icol)) - unknown_sizes = UNKNOWNS_AS_LONGEST; - - if (FI_is_applicable(fi)) { - fieldtype = getEffectiveOid(conn, fi); - if (NAME_IS_VALID(fi->column_alias)) - col_name = GET_NAME(fi->column_alias); - else - col_name = GET_NAME(fi->column_name); - if (USE_FI(fi, unknown_sizes)) { - column_size = fi->column_size; - decimal_digits = fi->decimal_digits; - } else { - column_size = opensearchtype_column_size(stmt, fieldtype, icol, - unknown_sizes); - decimal_digits = - opensearchtype_decimal_digits(stmt, fieldtype, icol); - } - - MYLOG(OPENSEARCH_DEBUG, - "PARSE: fieldtype=%u, col_name='%s', column_size=" FORMAT_LEN - "\n", - fieldtype, NULL_IF_NULL(col_name), column_size); - } else { - col_name = QR_get_fieldname(res, icol); - fieldtype = QR_get_field_type(res, icol); - - column_size = - opensearchtype_column_size(stmt, fieldtype, icol, unknown_sizes); - decimal_digits = opensearchtype_decimal_digits(stmt, fieldtype, icol); - } - - MYLOG(OPENSEARCH_DEBUG, "col %d fieldname = '%s'\n", icol, NULL_IF_NULL(col_name)); - MYLOG(OPENSEARCH_DEBUG, "col %d fieldtype = %d\n", icol, fieldtype); - MYLOG(OPENSEARCH_DEBUG, "col %d column_size = " FORMAT_LEN "\n", icol, column_size); - - result = SQL_SUCCESS; - - /* - * COLUMN NAME - */ - len = col_name ? (int)strlen(col_name) : 0; - - if (pcbColName) - *pcbColName = (SQLSMALLINT)len; - - if (szColName && cbColNameMax > 0) { - if (NULL != col_name) - strncpy_null((char *)szColName, col_name, cbColNameMax); - else - szColName[0] = '\0'; - - if (len >= cbColNameMax) { - result = SQL_SUCCESS_WITH_INFO; - SC_set_error(stmt, STMT_TRUNCATED, - "The buffer was too small for the colName.", func); - } - } - - /* - * CONCISE(SQL) TYPE - */ - if (pfSqlType) { - *pfSqlType = opensearchtype_to_concise_type(stmt, fieldtype, icol, - unknown_sizes); - - MYLOG(OPENSEARCH_DEBUG, "col %d *pfSqlType = %d\n", icol, *pfSqlType); - } - - /* - * COLUMN SIZE(PRECISION in 2.x) - */ - if (pcbColDef) { - if (column_size < 0) - column_size = 0; /* "I dont know" */ - - *pcbColDef = column_size; - - MYLOG(OPENSEARCH_DEBUG, "Col: col %d *pcbColDef = " FORMAT_ULEN "\n", icol, - *pcbColDef); - } - - /* - * DECIMAL DIGITS(SCALE in 2.x) - */ - if (pibScale) { - if (decimal_digits < 0) - decimal_digits = 0; - - *pibScale = (SQLSMALLINT)decimal_digits; - MYLOG(OPENSEARCH_DEBUG, "col %d *pibScale = %d\n", icol, *pibScale); - } - - /* - * NULLABILITY - */ - if (pfNullable) { - if (SC_has_outer_join(stmt)) - *pfNullable = TRUE; - else - *pfNullable = fi ? fi->nullable : opensearchtype_nullable(conn, fieldtype); - - MYLOG(OPENSEARCH_DEBUG, "col %d *pfNullable = %d\n", icol, *pfNullable); - } - -cleanup: -#undef return - return result; -} - -/* Returns result column descriptor information for a result set. */ -RETCODE SQL_API OPENSEARCHAPI_ColAttributes(HSTMT hstmt, SQLUSMALLINT icol, - SQLUSMALLINT fDescType, PTR rgbDesc, - SQLSMALLINT cbDescMax, SQLSMALLINT *pcbDesc, - SQLLEN *pfDesc) { - CSTR func = "OPENSEARCHAPI_ColAttributes"; - StatementClass *stmt = (StatementClass *)hstmt; - IRDFields *irdflds; - OID field_type = 0; - Int2 col_idx; - ConnectionClass *conn; - int column_size, unknown_sizes; - int cols = 0; - RETCODE result; - const char *p = NULL; - SQLLEN value = 0; - const FIELD_INFO *fi = NULL; - const TABLE_INFO *ti = NULL; - QResultClass *res; - BOOL stmt_updatable; - - MYLOG(OPENSEARCH_TRACE, "entering..col=%d %d len=%d.\n", icol, fDescType, - cbDescMax); - - if (!stmt) { - SC_log_error(func, NULL_STRING, NULL); - return SQL_INVALID_HANDLE; - } - stmt_updatable = SC_is_updatable(stmt) - /* The following doesn't seem appropriate for client side cursors - && stmt->options.scroll_concurrency != SQL_CONCUR_READ_ONLY - */ - ; - - if (pcbDesc) - *pcbDesc = 0; - irdflds = SC_get_IRDF(stmt); - conn = SC_get_conn(stmt); - - /* - * Dont check for bookmark column. This is the responsibility of the - * driver manager. For certain types of arguments, the column number - * is ignored anyway, so it may be 0. - */ - - res = SC_get_Curres(stmt); - if (0 == icol && SQL_DESC_COUNT != fDescType) /* bookmark column */ - { - MYLOG(OPENSEARCH_ALL, "answering bookmark info\n"); - switch (fDescType) { - case SQL_DESC_OCTET_LENGTH: - if (pfDesc) - *pfDesc = 4; - break; - case SQL_DESC_TYPE: - if (pfDesc) - *pfDesc = stmt->options.use_bookmarks == SQL_UB_VARIABLE - ? SQL_BINARY - : SQL_INTEGER; - break; - } - return SQL_SUCCESS; - } - - col_idx = icol - 1; - - unknown_sizes = DEFAULT_UNKNOWNSIZES; - - /* not appropriate for SQLColAttributes() */ - if (stmt->catalog_result) - unknown_sizes = UNKNOWNS_AS_LONGEST; - else if (unknown_sizes == UNKNOWNS_AS_DONTKNOW) - unknown_sizes = UNKNOWNS_AS_MAX; - - if (!stmt->catalog_result && SC_is_parse_forced(stmt) - && SC_can_parse_statement(stmt)) { - cols = irdflds->nfields; - - /* - * Column Count is a special case. The Column number is ignored - * in this case. - */ - if (fDescType == SQL_DESC_COUNT) { - if (pfDesc) - *pfDesc = cols; - - return SQL_SUCCESS; - } - - if (SC_parsed_status(stmt) != STMT_PARSE_FATAL && irdflds->fi) { - if (col_idx >= cols) { - SC_set_error(stmt, STMT_INVALID_COLUMN_NUMBER_ERROR, - "Invalid column number in ColAttributes.", func); - return SQL_ERROR; - } - } - } - - if ((unsigned int)col_idx < irdflds->nfields && irdflds->fi) - fi = irdflds->fi[col_idx]; - if (FI_is_applicable(fi)) - field_type = getEffectiveOid(conn, fi); - else { - fi = NULL; - switch (fDescType) { - case SQL_COLUMN_OWNER_NAME: - case SQL_COLUMN_TABLE_NAME: - case SQL_COLUMN_TYPE: - case SQL_COLUMN_TYPE_NAME: - case SQL_COLUMN_AUTO_INCREMENT: - case SQL_DESC_NULLABLE: - case SQL_DESC_BASE_TABLE_NAME: - case SQL_DESC_BASE_COLUMN_NAME: - case SQL_COLUMN_UPDATABLE: - case 1212: /* SQL_CA_SS_COLUMN_KEY ? */ - break; - } - - res = SC_get_Curres(stmt); - cols = QR_NumPublicResultCols(res); - - /* - * Column Count is a special case. The Column number is ignored - * in this case. - */ - if (fDescType == SQL_DESC_COUNT) { - if (pfDesc) - *pfDesc = cols; - - return SQL_SUCCESS; - } - - if (col_idx >= cols) { - SC_set_error(stmt, STMT_INVALID_COLUMN_NUMBER_ERROR, - "Invalid column number in ColAttributes.", func); - return SQL_ERROR; - } - - field_type = QR_get_field_type(res, col_idx); - if ((unsigned int)col_idx < irdflds->nfields && irdflds->fi) - fi = irdflds->fi[col_idx]; - } - if (FI_is_applicable(fi)) { - ti = fi->ti; - field_type = getEffectiveOid(conn, fi); - } - - MYLOG(OPENSEARCH_DEBUG, "col %d field_type=%d fi,ti=%p,%p\n", col_idx, field_type, - fi, ti); - -#ifdef SUPPRESS_LONGEST_ON_CURSORS - if (UNKNOWNS_AS_LONGEST == unknown_sizes) { - if (QR_once_reached_eof(res)) - unknown_sizes = UNKNOWNS_AS_LONGEST; - else - unknown_sizes = UNKNOWNS_AS_MAX; - } -#endif /* SUPPRESS_LONGEST_ON_CURSORS */ - /* handle constants */ - if (res && -2 == QR_get_fieldsize(res, col_idx)) - unknown_sizes = UNKNOWNS_AS_LONGEST; - - column_size = - (USE_FI(fi, unknown_sizes) && fi->column_size > 0) - ? fi->column_size - : opensearchtype_column_size(stmt, field_type, col_idx, - unknown_sizes); - switch (fDescType) { - case SQL_COLUMN_AUTO_INCREMENT: /* == SQL_DESC_AUTO_UNIQUE_VALUE */ - if (fi && fi->auto_increment) - value = TRUE; - else - value = opensearchtype_auto_increment(conn, field_type); - if (value == -1) /* non-numeric becomes FALSE (ODBC Doc) */ - value = FALSE; - MYLOG(OPENSEARCH_DEBUG, "AUTO_INCREMENT=" FORMAT_LEN "\n", value); - - break; - - case SQL_COLUMN_CASE_SENSITIVE: /* == SQL_DESC_CASE_SENSITIVE */ - value = opensearchtype_case_sensitive(conn, field_type); - break; - - /* - * This special case is handled above. - * - * case SQL_COLUMN_COUNT: - */ - case SQL_COLUMN_DISPLAY_SIZE: /* == SQL_DESC_DISPLAY_SIZE */ - value = (USE_FI(fi, unknown_sizes) && 0 != fi->display_size) - ? fi->display_size - : opensearchtype_display_size(stmt, field_type, col_idx, - unknown_sizes); - - MYLOG(OPENSEARCH_DEBUG, "col %d, display_size= " FORMAT_LEN "\n", col_idx, - value); - - break; - - case SQL_COLUMN_LABEL: /* == SQL_DESC_LABEL */ - if (fi && (NAME_IS_VALID(fi->column_alias))) { - p = GET_NAME(fi->column_alias); - - MYLOG(OPENSEARCH_DEBUG, "COLUMN_LABEL = '%s'\n", p); - break; - } - /* otherwise same as column name -- FALL THROUGH!!! */ - - case SQL_DESC_NAME: - MYLOG(OPENSEARCH_ALL, "fi=%p (alias, name)=", fi); - if (fi) - MYPRINTF(OPENSEARCH_DEBUG, "(%s,%s)\n", PRINT_NAME(fi->column_alias), - PRINT_NAME(fi->column_name)); - else - MYPRINTF(OPENSEARCH_DEBUG, "NULL\n"); - p = fi ? (NAME_IS_NULL(fi->column_alias) - ? SAFE_NAME(fi->column_name) - : GET_NAME(fi->column_alias)) - : QR_get_fieldname(res, col_idx); - - MYLOG(OPENSEARCH_DEBUG, "COLUMN_NAME = '%s'\n", p); - break; - - case SQL_COLUMN_LENGTH: - value = (USE_FI(fi, unknown_sizes) && fi->length > 0) - ? fi->length - : opensearchtype_buffer_length(stmt, field_type, - col_idx, unknown_sizes); - if (0 > value) - /* if (-1 == value) I'm not sure which is right */ - value = 0; - - MYLOG(OPENSEARCH_DEBUG, "col %d, column_length = " FORMAT_LEN "\n", col_idx, - value); - break; - - case SQL_COLUMN_MONEY: /* == SQL_DESC_FIXED_PREC_SCALE */ - value = opensearchtype_money(conn, field_type); - MYLOG(OPENSEARCH_ALL, "COLUMN_MONEY=" FORMAT_LEN "\n", value); - break; - - case SQL_DESC_NULLABLE: - if (SC_has_outer_join(stmt)) - value = TRUE; - else - value = fi ? fi->nullable : opensearchtype_nullable(conn, field_type); - MYLOG(OPENSEARCH_ALL, "COLUMN_NULLABLE=" FORMAT_LEN "\n", value); - break; - - case SQL_COLUMN_OWNER_NAME: /* == SQL_DESC_SCHEMA_NAME */ - p = ti ? SAFE_NAME(ti->schema_name) : NULL_STRING; - MYLOG(OPENSEARCH_DEBUG, "SCHEMA_NAME = '%s'\n", p); - break; - - case SQL_COLUMN_PRECISION: /* in 2.x */ - value = column_size; - if (value < 0) - value = 0; - - MYLOG(OPENSEARCH_DEBUG, "col %d, column_size = " FORMAT_LEN "\n", col_idx, - value); - break; - - case SQL_COLUMN_QUALIFIER_NAME: /* == SQL_DESC_CATALOG_NAME */ - p = ti ? CurrCatString(conn) - : NULL_STRING; /* empty string means *not supported* */ - break; - - case SQL_COLUMN_SCALE: /* in 2.x */ - value = opensearchtype_decimal_digits(stmt, field_type, col_idx); - MYLOG(OPENSEARCH_ALL, "COLUMN_SCALE=" FORMAT_LEN "\n", value); - if (value < 0) - value = 0; - break; - - case SQL_COLUMN_SEARCHABLE: /* == SQL_DESC_SEARCHABLE */ - value = opensearchtype_searchable(conn, field_type); - break; - - case SQL_COLUMN_TABLE_NAME: /* == SQL_DESC_TABLE_NAME */ - p = ti ? SAFE_NAME(ti->table_name) : NULL_STRING; - - MYLOG(OPENSEARCH_DEBUG, "TABLE_NAME = '%s'\n", p); - break; - - case SQL_COLUMN_TYPE: /* == SQL_DESC_CONCISE_TYPE */ - value = opensearchtype_to_concise_type(stmt, field_type, col_idx, - unknown_sizes); - MYLOG(OPENSEARCH_DEBUG, "COLUMN_TYPE=" FORMAT_LEN "\n", value); - break; - - case SQL_COLUMN_TYPE_NAME: /* == SQL_DESC_TYPE_NAME */ - p = opensearchtype_to_name(stmt, field_type, col_idx, - fi && fi->auto_increment); - break; - - case SQL_COLUMN_UNSIGNED: /* == SQL_DESC_UNSINGED */ - value = opensearchtype_unsigned(conn, field_type); - if (value == -1) /* non-numeric becomes TRUE (ODBC Doc) */ - value = SQL_TRUE; - - break; - - case SQL_COLUMN_UPDATABLE: /* == SQL_DESC_UPDATABLE */ - - /* - * Neither Access or Borland care about this. - * - * if (field_type == OPENSEARCH_TYPE_OID) pfDesc = SQL_ATTR_READONLY; - * else - */ - if (!stmt_updatable) - value = SQL_ATTR_READONLY; - else - value = - fi ? (fi->updatable ? SQL_ATTR_WRITE : SQL_ATTR_READONLY) - : (QR_get_attid(res, col_idx) > 0 ? SQL_ATTR_WRITE - : SQL_ATTR_READONLY); - if (SQL_ATTR_READONLY != value) { - const char *name = fi ? SAFE_NAME(fi->column_name) - : QR_get_fieldname(res, col_idx); - if (stricmp(name, OID_NAME) == 0 || stricmp(name, "ctid") == 0 - || stricmp(name, XMIN_NAME) == 0) - value = SQL_ATTR_READONLY; - else if (conn->ms_jet && fi && fi->auto_increment) - value = SQL_ATTR_READONLY; - } - - MYLOG(OPENSEARCH_DEBUG, "%s: UPDATEABLE = " FORMAT_LEN "\n", func, value); - break; - case SQL_DESC_BASE_COLUMN_NAME: - - p = fi ? SAFE_NAME(fi->column_name) - : QR_get_fieldname(res, col_idx); - - MYLOG(OPENSEARCH_DEBUG, "BASE_COLUMN_NAME = '%s'\n", p); - break; - case SQL_DESC_BASE_TABLE_NAME: /* the same as TABLE_NAME ok ? */ - p = ti ? SAFE_NAME(ti->table_name) : NULL_STRING; - - MYLOG(OPENSEARCH_DEBUG, "BASE_TABLE_NAME = '%s'\n", p); - break; - case SQL_DESC_LENGTH: /* different from SQL_COLUMN_LENGTH */ - value = (fi && column_size > 0) - ? column_size - : opensearchtype_desclength(stmt, field_type, col_idx, - unknown_sizes); - if (-1 == value) - value = 0; - - MYLOG(OPENSEARCH_DEBUG, "col %d, desc_length = " FORMAT_LEN "\n", col_idx, - value); - break; - case SQL_DESC_OCTET_LENGTH: - value = (USE_FI(fi, unknown_sizes) && fi->length > 0) - ? fi->length - : opensearchtype_attr_transfer_octet_length( - conn, field_type, column_size, unknown_sizes); - if (-1 == value) - value = 0; - MYLOG(OPENSEARCH_DEBUG, "col %d, octet_length = " FORMAT_LEN "\n", col_idx, - value); - break; - case SQL_DESC_PRECISION: /* different from SQL_COLUMN_PRECISION */ - if (value = FI_precision(fi), value <= 0) - value = opensearchtype_precision(stmt, field_type, col_idx, - unknown_sizes); - if (value < 0) - value = 0; - - MYLOG(OPENSEARCH_DEBUG, "col %d, desc_precision = " FORMAT_LEN "\n", - col_idx, value); - break; - case SQL_DESC_SCALE: /* different from SQL_COLUMN_SCALE */ - value = opensearchtype_scale(stmt, field_type, col_idx); - if (value < 0) - value = 0; - break; - case SQL_DESC_LOCAL_TYPE_NAME: - p = opensearchtype_to_name(stmt, field_type, col_idx, - fi && fi->auto_increment); - break; - case SQL_DESC_TYPE: - value = opensearchtype_to_sqldesctype(stmt, field_type, col_idx, - unknown_sizes); - break; - case SQL_DESC_NUM_PREC_RADIX: - value = opensearchtype_radix(conn, field_type); - break; - case SQL_DESC_LITERAL_PREFIX: - p = opensearchtype_literal_prefix(conn, field_type); - break; - case SQL_DESC_LITERAL_SUFFIX: - p = opensearchtype_literal_suffix(conn, field_type); - break; - case SQL_DESC_UNNAMED: - value = (fi && NAME_IS_NULL(fi->column_name) - && NAME_IS_NULL(fi->column_alias)) - ? SQL_UNNAMED - : SQL_NAMED; - break; - case 1211: /* SQL_CA_SS_COLUMN_HIDDEN ? */ - value = 0; - break; - case 1212: /* SQL_CA_SS_COLUMN_KEY ? */ - SC_set_error(stmt, STMT_OPTION_NOT_FOR_THE_DRIVER, - "this request may be for MS SQL Server", func); - return SQL_ERROR; - default: - SC_set_error(stmt, STMT_INVALID_OPTION_IDENTIFIER, - "ColAttribute for this type not implemented yet", - func); - return SQL_ERROR; - } - - result = SQL_SUCCESS; - - if (p) { /* char/binary data */ - size_t len = strlen(p); - - if (rgbDesc) { - strncpy_null((char *)rgbDesc, p, (size_t)cbDescMax); - - if (len >= (size_t)cbDescMax) { - result = SQL_SUCCESS_WITH_INFO; - SC_set_error(stmt, STMT_TRUNCATED, - "The buffer was too small for the rgbDesc.", func); - } - } - - if (pcbDesc) - *pcbDesc = (SQLSMALLINT)len; - } else { - /* numeric data */ - if (pfDesc) - *pfDesc = value; - } - - return result; -} - -/* Returns result data for a single column in the current row. */ -RETCODE SQL_API OPENSEARCHAPI_GetData(HSTMT hstmt, SQLUSMALLINT icol, - SQLSMALLINT fCType, PTR rgbValue, - SQLLEN cbValueMax, SQLLEN *pcbValue) { - CSTR func = "OPENSEARCHAPI_GetData"; - QResultClass *res; - StatementClass *stmt = (StatementClass *)hstmt; - UInt2 num_cols; - SQLLEN num_rows; - OID field_type; - int atttypmod; - void *value = NULL; - RETCODE result = SQL_SUCCESS; - char get_bookmark = FALSE; - SQLSMALLINT target_type; - int precision = -1; -#ifdef WITH_UNIXODBC - SQLCHAR dum_rgb[2] = "\0\0"; -#endif /* WITH_UNIXODBC */ - - MYLOG(OPENSEARCH_TRACE, "entering stmt=%p icol=%d\n", stmt, icol); - - if (!stmt) { - SC_log_error(func, NULL_STRING, NULL); - return SQL_INVALID_HANDLE; - } - res = SC_get_Curres(stmt); - - if (STMT_EXECUTING == stmt->status) { - SC_set_error(stmt, STMT_SEQUENCE_ERROR, - "Can't get data while statement is still executing.", - func); - return SQL_ERROR; - } - - if (stmt->status != STMT_FINISHED) { - SC_set_error(stmt, STMT_STATUS_ERROR, - "GetData can only be called after the successful " - "execution on a SQL statement", - func); - return SQL_ERROR; - } - -#ifdef WITH_UNIXODBC - if (NULL == rgbValue) /* unixODBC allows rgbValue is NULL? */ - { - cbValueMax = 0; - rgbValue = dum_rgb; /* to avoid a crash */ - } -#endif /* WITH_UNIXODBC */ - if (SQL_ARD_TYPE == fCType) { - ARDFields *opts; - BindInfoClass *binfo = NULL; - - opts = SC_get_ARDF(stmt); - if (0 == icol) - binfo = opts->bookmark; - else if (icol <= opts->allocated && opts->bindings) - binfo = &opts->bindings[icol - 1]; - if (binfo) { - target_type = binfo->returntype; - MYLOG(OPENSEARCH_DEBUG, "SQL_ARD_TYPE=%d\n", target_type); - precision = binfo->precision; - } else { - SC_set_error(stmt, STMT_STATUS_ERROR, - "GetData can't determine the type via ARD", func); - return SQL_ERROR; - } - } else - target_type = fCType; - if (icol == 0) { - if (stmt->options.use_bookmarks == SQL_UB_OFF) { - SC_set_error( - stmt, STMT_COLNUM_ERROR, - "Attempt to retrieve bookmark with bookmark usage disabled", - func); - return SQL_ERROR; - } - - /* Make sure it is the bookmark data type */ - switch (target_type) { - case SQL_C_BOOKMARK: - case SQL_C_VARBOOKMARK: - break; - default: - MYLOG( - OPENSEARCH_ALL, - "GetData Column 0 is type %d not of type SQL_C_BOOKMARK\n", - target_type); - SC_set_error(stmt, STMT_PROGRAM_TYPE_OUT_OF_RANGE, - "Column 0 is not of type SQL_C_BOOKMARK", func); - return SQL_ERROR; - } - - get_bookmark = TRUE; - } else { - /* use zero-based column numbers */ - icol--; - - /* make sure the column number is valid */ - num_cols = QR_NumPublicResultCols(res); - if (icol >= num_cols) { - SC_set_error(stmt, STMT_INVALID_COLUMN_NUMBER_ERROR, - "Invalid column number.", func); - return SQL_ERROR; - } - } - -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wkeyword-macro" -#endif // __APPLE__ -#define return DONT_CALL_RETURN_FROM_HERE ? ? ? -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ - if (!SC_is_fetchcursor(stmt)) { - /* make sure we're positioned on a valid row */ - num_rows = QR_get_num_total_tuples(res); - if ((stmt->currTuple < 0) || (stmt->currTuple >= num_rows)) { - SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, - "Not positioned on a valid row for GetData.", func); - result = SQL_ERROR; - goto cleanup; - } - MYLOG(OPENSEARCH_DEBUG, " num_rows = " FORMAT_LEN "\n", num_rows); - - if (!get_bookmark) { - SQLLEN curt = GIdx2CacheIdx(stmt->currTuple, stmt, res); - value = QR_get_value_backend_row(res, curt, icol); - MYLOG(OPENSEARCH_DEBUG, - "currT=" FORMAT_LEN " base=" FORMAT_LEN " rowset=" FORMAT_LEN - "\n", - stmt->currTuple, QR_get_rowstart_in_cache(res), - SC_get_rowset_start(stmt)); - MYLOG(OPENSEARCH_DEBUG, " value = '%s'\n", NULL_IF_NULL(value)); - } - } else { - /* it's a SOCKET result (backend data) */ - if (stmt->currTuple == -1 || !res || !res->tupleField) { - SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, - "Not positioned on a valid row for GetData.", func); - result = SQL_ERROR; - goto cleanup; - } - - if (!get_bookmark) { - /** value = QR_get_value_backend(res, icol); maybe thiw doesn't work - */ - SQLLEN curt = GIdx2CacheIdx(stmt->currTuple, stmt, res); - value = QR_get_value_backend_row(res, curt, icol); - } - MYLOG(OPENSEARCH_DEBUG, " socket: value = '%s'\n", NULL_IF_NULL(value)); - } - - if (get_bookmark) { - BOOL contents_get = FALSE; - - if (rgbValue) { - if (SQL_C_BOOKMARK == target_type - || (SQLLEN)sizeof(UInt4) <= cbValueMax) { - Int4 bookmark = (int)SC_make_int4_bookmark(stmt->currTuple); - contents_get = TRUE; - memcpy(rgbValue, &bookmark, sizeof(bookmark)); - } - } - if (pcbValue) - *pcbValue = sizeof(Int4); - - if (contents_get) - result = SQL_SUCCESS; - else { - SC_set_error(stmt, STMT_TRUNCATED, - "The buffer was too small for the GetData.", func); - result = SQL_SUCCESS_WITH_INFO; - } - goto cleanup; - } - - field_type = QR_get_field_type(res, icol); - atttypmod = QR_get_atttypmod(res, icol); - - MYLOG(OPENSEARCH_DEBUG, - "**** icol = %d, target_type = %d, field_type = %d, value = '%s'\n", - icol, target_type, field_type, NULL_IF_NULL(value)); - - SC_set_current_col(stmt, icol); - - result = (RETCODE)copy_and_convert_field(stmt, field_type, atttypmod, value, - target_type, precision, rgbValue, - cbValueMax, pcbValue, pcbValue); - - switch (result) { - case COPY_OK: - result = SQL_SUCCESS; - break; - - case COPY_UNSUPPORTED_TYPE: - SC_set_error(stmt, STMT_RESTRICTED_DATA_TYPE_ERROR, - "Received an unsupported type from OpenSearch.", - func); - result = SQL_ERROR; - break; - - case COPY_UNSUPPORTED_CONVERSION: - SC_set_error(stmt, STMT_RESTRICTED_DATA_TYPE_ERROR, - "Couldn't handle the necessary data type conversion.", - func); - result = SQL_ERROR; - break; - - case COPY_RESULT_TRUNCATED: - SC_set_error(stmt, STMT_TRUNCATED, - "The buffer was too small for the GetData.", func); - result = SQL_SUCCESS_WITH_INFO; - break; - - case COPY_INVALID_STRING_CONVERSION: /* invalid string */ - SC_set_error(stmt, STMT_STRING_CONVERSION_ERROR, - "invalid string conversion occured.", func); - result = SQL_ERROR; - break; - - case COPY_GENERAL_ERROR: /* error msg already filled in */ - result = SQL_ERROR; - break; - - case COPY_NO_DATA_FOUND: - /* SC_log_error(func, "no data found", stmt); */ - result = SQL_NO_DATA_FOUND; - break; - - default: - SC_set_error( - stmt, STMT_INTERNAL_ERROR, - "Unrecognized return value from copy_and_convert_field.", func); - result = SQL_ERROR; - break; - } - -cleanup: -#undef return - MYLOG(OPENSEARCH_TRACE, "leaving %d\n", result); - return result; -} - -/* - * Returns data for bound columns in the current row ("hstmt->iCursor"), - * advances the cursor. - */ -RETCODE SQL_API OPENSEARCHAPI_Fetch(HSTMT hstmt) { - CSTR func = "OPENSEARCHAPI_Fetch"; - StatementClass *stmt = (StatementClass *)hstmt; - ARDFields *opts; - QResultClass *res; - BindInfoClass *bookmark; - RETCODE retval = SQL_SUCCESS; - - MYLOG(OPENSEARCH_TRACE, "entering stmt = %p, stmt->result= %p\n", stmt, - stmt ? SC_get_Curres(stmt) : NULL); - - if (!stmt) { - SC_log_error(func, NULL_STRING, NULL); - return SQL_INVALID_HANDLE; - } - - SC_clear_error(stmt); - - if (!(res = SC_get_Curres(stmt), res)) { - SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, - "Null statement result in OPENSEARCHAPI_Fetch.", func); - return SQL_ERROR; - } - - /* Not allowed to bind a bookmark column when using SQLFetch. */ - opts = SC_get_ARDF(stmt); - if ((bookmark = opts->bookmark, bookmark) && bookmark->buffer) { - SC_set_error( - stmt, STMT_COLNUM_ERROR, - "Not allowed to bind a bookmark column when using OPENSEARCHAPI_Fetch", - func); - return SQL_ERROR; - } - - if (stmt->status == STMT_EXECUTING) { - SC_set_error(stmt, STMT_SEQUENCE_ERROR, - "Can't fetch while statement is still executing.", func); - return SQL_ERROR; - } - - if (stmt->status != STMT_FINISHED) { - SC_set_error(stmt, STMT_SEQUENCE_ERROR, - "Fetch can only be called after the successful execution " - "on a SQL statement", - func); - return SQL_ERROR; - } - - if (opts->bindings == NULL) { - if (!SC_may_fetch_rows(stmt)) - return SQL_NO_DATA_FOUND; - /* just to avoid a crash if the user insists on calling this */ - /* function even if SQL_ExecDirect has reported an Error */ - SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, - "Bindings were not allocated properly.", func); - return SQL_ERROR; - } - -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wkeyword-macro" -#endif // __APPLE__ -#define return DONT_CALL_RETURN_FROM_HERE ? ? ? -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ - if (stmt->rowset_start < 0) - SC_set_rowset_start(stmt, 0, TRUE); - QR_set_reqsize(res, 1); - /* QR_inc_rowstart_in_cache(res, stmt->last_fetch_count_include_ommitted); - */ - SC_inc_rowset_start(stmt, stmt->last_fetch_count_include_ommitted); - - retval = SC_fetch(stmt); -#undef return - return retval; -} - -SQLLEN -getNthValid(const QResultClass *res, SQLLEN sta, UWORD orientation, SQLULEN nth, - SQLLEN *nearest) { - SQLLEN i, num_tuples = QR_get_num_total_tuples(res), nearp; - SQLULEN count; - KeySet *keyset; - - if (!QR_once_reached_eof(res)) - num_tuples = INT_MAX; - /* Note that the parameter nth is 1-based */ - MYLOG(OPENSEARCH_DEBUG, - "get " FORMAT_ULEN "th Valid data from " FORMAT_LEN " to %s [dlt=%d]", - nth, sta, orientation == SQL_FETCH_PRIOR ? "backward" : "forward", - res->dl_count); - if (0 == res->dl_count) { - MYPRINTF(OPENSEARCH_DEBUG, "\n"); - if (SQL_FETCH_PRIOR == orientation) { - if (sta + 1 >= (SQLLEN)nth) { - *nearest = sta + 1 - nth; - return nth; - } - *nearest = -1; - return -(SQLLEN)(sta + 1); - } else { - nearp = sta - 1 + nth; - if (nearp < num_tuples) { - *nearest = nearp; - return nth; - } - *nearest = num_tuples; - return -(SQLLEN)(num_tuples - sta); - } - } - count = 0; - if (QR_get_cursor(res)) { - SQLLEN *deleted = res->deleted; - SQLLEN delsta; - - if (SQL_FETCH_PRIOR == orientation) { - *nearest = sta + 1 - nth; - delsta = (-1); - MYPRINTF(OPENSEARCH_DEBUG, "deleted "); - for (i = res->dl_count - 1; i >= 0 && *nearest <= deleted[i]; i--) { - MYPRINTF(OPENSEARCH_DEBUG, "[" FORMAT_LEN "]=" FORMAT_LEN " ", i, - deleted[i]); - if (sta >= deleted[i]) { - (*nearest)--; - if (i > delsta) - delsta = i; - } - } - MYPRINTF(OPENSEARCH_DEBUG, "nearest=" FORMAT_LEN "\n", *nearest); - if (*nearest < 0) { - *nearest = -1; - count = sta - delsta; - } else - return nth; - } else { - MYPRINTF(OPENSEARCH_DEBUG, "\n"); - *nearest = sta - 1 + nth; - delsta = res->dl_count; - if (!QR_once_reached_eof(res)) - num_tuples = INT_MAX; - for (i = 0; i < res->dl_count && *nearest >= deleted[i]; i++) { - if (sta <= deleted[i]) { - (*nearest)++; - if (i < delsta) - delsta = i; - } - } - if (*nearest >= num_tuples) { - *nearest = num_tuples; - count = *nearest - sta + delsta - res->dl_count; - } else - return nth; - } - } else if (SQL_FETCH_PRIOR == orientation) { - for (i = sta, keyset = res->keyset + sta; i >= 0; i--, keyset--) { - if (0 - == (keyset->status - & (CURS_SELF_DELETING | CURS_SELF_DELETED - | CURS_OTHER_DELETED))) { - *nearest = i; - MYPRINTF(OPENSEARCH_DEBUG, " nearest=" FORMAT_LEN "\n", *nearest); - if (++count == nth) - return count; - } - } - *nearest = -1; - } else { - for (i = sta, keyset = res->keyset + sta; i < num_tuples; - i++, keyset++) { - if (0 - == (keyset->status - & (CURS_SELF_DELETING | CURS_SELF_DELETED - | CURS_OTHER_DELETED))) { - *nearest = i; - MYPRINTF(OPENSEARCH_DEBUG, " nearest=" FORMAT_LEN "\n", *nearest); - if (++count == nth) - return count; - } - } - *nearest = num_tuples; - } - MYPRINTF(OPENSEARCH_DEBUG, " nearest not found\n"); - return -(SQLLEN)count; -} - -/* - * return NO_DATA_FOUND macros - * save_rowset_start or num_tuples must be defined - */ -#define EXTFETCH_RETURN_BOF(stmt, res) \ - { \ - MYLOG(OPENSEARCH_ALL, "RETURN_BOF\n"); \ - SC_set_rowset_start(stmt, -1, TRUE); \ - stmt->currTuple = -1; \ - /* move_cursor_position_if_needed(stmt, res); */ \ - return SQL_NO_DATA_FOUND; \ - } -#define EXTFETCH_RETURN_EOF(stmt, res) \ - { \ - MYLOG(OPENSEARCH_ALL, "RETURN_EOF\n"); \ - SC_set_rowset_start(stmt, num_tuples, TRUE); \ - stmt->currTuple = -1; \ - /* move_cursor_position_if_needed(stmt, res); */ \ - return SQL_NO_DATA_FOUND; \ - } - -/* This fetchs a block of data (rowset). */ -RETCODE SQL_API OPENSEARCHAPI_ExtendedFetch(HSTMT hstmt, SQLUSMALLINT fFetchType, - SQLLEN irow, SQLULEN *pcrow, - SQLUSMALLINT *rgfRowStatus, - SQLLEN bookmark_offset, SQLLEN rowsetSize) { - UNUSED(bookmark_offset, irow); - CSTR func = "OPENSEARCHAPI_ExtendedFetch"; - StatementClass *stmt = (StatementClass *)hstmt; - ARDFields *opts; - QResultClass *res; - BindInfoClass *bookmark; - SQLLEN num_tuples, i, fc_io; - SQLLEN save_rowset_size, progress_size; - SQLLEN rowset_start, rowset_end = (-1); - RETCODE result = SQL_SUCCESS; - char truncated, error, should_set_rowset_start = FALSE; - SQLLEN currp; - UWORD pstatus; - BOOL currp_is_valid, reached_eof, useCursor; - SQLLEN reqsize = rowsetSize; - - MYLOG(OPENSEARCH_TRACE, "entering stmt=%p rowsetSize=" FORMAT_LEN "\n", stmt, - rowsetSize); - - if (!stmt) { - SC_log_error(func, NULL_STRING, NULL); - return SQL_INVALID_HANDLE; - } - - /* if (SC_is_fetchcursor(stmt) && !stmt->manual_result) */ - if ((SQL_CURSOR_FORWARD_ONLY != stmt->options.cursor_type) - || (fFetchType != SQL_FETCH_NEXT)) { - SC_set_error(stmt, STMT_FETCH_OUT_OF_RANGE, - "Only SQL_CURSOR_FORWARD_ONLY with SQL_FETCH_NEXT " - "cursor's are supported.", - func); - return SQL_ERROR; - } - - SC_clear_error(stmt); - - if (!(res = SC_get_Curres(stmt), res)) { - SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, - "Null statement result in OPENSEARCHAPI_ExtendedFetch.", func); - return SQL_ERROR; - } - - opts = SC_get_ARDF(stmt); - /* - * If a bookmark column is bound but bookmark usage is off, then error. - */ - if ((bookmark = opts->bookmark, bookmark) && bookmark->buffer - && stmt->options.use_bookmarks == SQL_UB_OFF) { - SC_set_error( - stmt, STMT_COLNUM_ERROR, - "Attempt to retrieve bookmark with bookmark usage disabled", func); - return SQL_ERROR; - } - - if (stmt->status == STMT_EXECUTING) { - SC_set_error(stmt, STMT_SEQUENCE_ERROR, - "Can't fetch while statement is still executing.", func); - return SQL_ERROR; - } - - if (stmt->status != STMT_FINISHED) { - SC_set_error(stmt, STMT_STATUS_ERROR, - "ExtendedFetch can only be called after the successful " - "execution on a SQL statement", - func); - return SQL_ERROR; - } - - if (opts->bindings == NULL) { - if (!SC_may_fetch_rows(stmt)) - return SQL_NO_DATA_FOUND; - /* just to avoid a crash if the user insists on calling this */ - /* function even if SQL_ExecDirect has reported an Error */ - SC_set_error(stmt, STMT_INVALID_CURSOR_STATE_ERROR, - "Bindings were not allocated properly.", func); - return SQL_ERROR; - } - - /* Initialize to no rows fetched */ - if (rgfRowStatus) - for (i = 0; i < rowsetSize; i++) - *(rgfRowStatus + i) = SQL_ROW_NOROW; - - if (pcrow) - *pcrow = 0; - - useCursor = (SC_is_fetchcursor(stmt) && NULL != QR_get_cursor(res)); - num_tuples = QR_get_num_total_tuples(res); - reached_eof = QR_once_reached_eof(res) && QR_get_cursor(res); - if (useCursor && !reached_eof) - num_tuples = INT_MAX; - - MYLOG(OPENSEARCH_ALL, "num_tuples=" FORMAT_LEN "\n", num_tuples); - /* Save and discard the saved rowset size */ - save_rowset_size = stmt->save_rowset_size; - stmt->save_rowset_size = -1; - rowset_start = SC_get_rowset_start(stmt); - - QR_stop_movement(res); - res->move_offset = 0; - switch (fFetchType) { - case SQL_FETCH_NEXT: - progress_size = - (save_rowset_size > 0 ? save_rowset_size : rowsetSize); - if (rowset_start < 0) - SC_set_rowset_start(stmt, 0, TRUE); - else if (res->keyset) { - if (stmt->last_fetch_count <= progress_size) { - SC_inc_rowset_start( - stmt, stmt->last_fetch_count_include_ommitted); - progress_size -= stmt->last_fetch_count; - } - if (progress_size > 0) { - if (getNthValid(res, SC_get_rowset_start(stmt), - SQL_FETCH_NEXT, progress_size + 1, - &rowset_start) - <= 0) { - EXTFETCH_RETURN_EOF(stmt, res) - } else - should_set_rowset_start = TRUE; - } - } else - SC_inc_rowset_start(stmt, progress_size); - MYLOG(OPENSEARCH_DEBUG, - "SQL_FETCH_NEXT: num_tuples=" FORMAT_LEN - ", currtuple=" FORMAT_LEN ", rowst=" FORMAT_LEN "\n", - num_tuples, stmt->currTuple, rowset_start); - break; - default: - SC_set_error(stmt, STMT_FETCH_OUT_OF_RANGE, - "Unsupported OPENSEARCHAPI_ExtendedFetch Direction", func); - return SQL_ERROR; - } - - /* - * CHECK FOR PROPER CURSOR STATE - */ - - /* - * Handle Declare Fetch style specially because the end is not really - * the end... - */ - if (!should_set_rowset_start) - rowset_start = SC_get_rowset_start(stmt); - - // Get more results when cursor reaches end - { - ConnectionClass *conn = SC_get_conn(stmt); - if (conn != NULL) { - const SQLLEN end_rowset_size = rowset_start + rowsetSize; - while ((end_rowset_size >= num_tuples) - && (NULL != res->server_cursor_id)) { - GetNextResultSet(stmt); - num_tuples = QR_get_num_total_tuples(res); - } - } - } - - if (useCursor) { - if (reached_eof && rowset_start >= num_tuples) { - EXTFETCH_RETURN_EOF(stmt, res) - } - } else { - /* If *new* rowset is after the result_set, return no data found */ - if (rowset_start >= num_tuples) { - EXTFETCH_RETURN_EOF(stmt, res) - } - } - /* If *new* rowset is prior to result_set, return no data found */ - if (rowset_start < 0) { - if (rowset_start + rowsetSize <= 0) { - EXTFETCH_RETURN_BOF(stmt, res) - } else { /* overlap with beginning of result set, - * so get first rowset */ - SC_set_rowset_start(stmt, 0, TRUE); - } - should_set_rowset_start = FALSE; - } - -#ifdef __APPLE__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wkeyword-macro" -#endif // __APPLE__ -#define return DONT_CALL_RETURN_FROM_HERE ? ? ? -#ifdef __APPLE__ -#pragma clang diagnostic pop -#endif // __APPLE__ - /* set the rowset_start if needed */ - if (should_set_rowset_start) - SC_set_rowset_start(stmt, rowset_start, TRUE); - if (rowset_end < 0 && QR_haskeyset(res)) { - getNthValid(res, rowset_start, SQL_FETCH_NEXT, rowsetSize, &rowset_end); - reqsize = rowset_end - rowset_start + 1; - } - QR_set_reqsize(res, (Int4)reqsize); - /* currTuple is always 1 row prior to the rowset start */ - stmt->currTuple = RowIdx2GIdx(-1, stmt); - QR_set_rowstart_in_cache(res, SC_get_rowset_start(stmt)); - - /* Physical Row advancement occurs for each row fetched below */ - - MYLOG(OPENSEARCH_DEBUG, "new currTuple = " FORMAT_LEN "\n", stmt->currTuple); - - truncated = error = FALSE; - - currp = -1; - stmt->bind_row = 0; /* set the binding location */ - result = SC_fetch(stmt); - if (SQL_ERROR == result) - goto cleanup; - if (SQL_NO_DATA_FOUND != result && res->keyset) { - currp = GIdx2KResIdx(SC_get_rowset_start(stmt), stmt, res); - MYLOG(OPENSEARCH_ALL, "currp=" FORMAT_LEN "\n", currp); - if (currp < 0) { - result = SQL_ERROR; - MYLOG(OPENSEARCH_DEBUG, - "rowset_start=" FORMAT_LEN " but currp=" FORMAT_LEN "\n", - SC_get_rowset_start(stmt), currp); - SC_set_error(stmt, STMT_INTERNAL_ERROR, - "rowset_start not in the keyset", func); - goto cleanup; - } - } - for (i = 0, fc_io = 0; SQL_NO_DATA_FOUND != result && SQL_ERROR != result; - currp++) { - fc_io++; - currp_is_valid = FALSE; - if (res->keyset) { - if ((SQLULEN)currp < res->num_cached_keys) { - currp_is_valid = TRUE; - res->keyset[currp].status &= - ~CURS_IN_ROWSET; /* Off the flag first */ - } else { - MYLOG(OPENSEARCH_DEBUG, "Umm current row is out of keyset\n"); - break; - } - } - MYLOG(OPENSEARCH_ALL, "ExtFetch result=%d\n", result); - if (currp_is_valid && SQL_SUCCESS_WITH_INFO == result - && 0 == stmt->last_fetch_count) { - MYLOG(OPENSEARCH_ALL, "just skipping deleted row " FORMAT_LEN "\n", currp); - if (rowsetSize - i + fc_io > reqsize) - QR_set_reqsize(res, (Int4)(rowsetSize - i + fc_io)); - result = SC_fetch(stmt); - if (SQL_ERROR == result) - break; - continue; - } - - /* Determine Function status */ - if (result == SQL_SUCCESS_WITH_INFO) - truncated = TRUE; - else if (result == SQL_ERROR) - error = TRUE; - - /* Determine Row Status */ - if (rgfRowStatus) { - if (result == SQL_ERROR) - *(rgfRowStatus + i) = SQL_ROW_ERROR; - else if (currp_is_valid) { - pstatus = (res->keyset[currp].status & KEYSET_INFO_PUBLIC); - if (pstatus != 0 && pstatus != SQL_ROW_ADDED) { - rgfRowStatus[i] = pstatus; - } else - rgfRowStatus[i] = SQL_ROW_SUCCESS; - /* refresh the status */ - /* if (SQL_ROW_DELETED != pstatus) */ - res->keyset[currp].status &= (~KEYSET_INFO_PUBLIC); - } else - *(rgfRowStatus + i) = SQL_ROW_SUCCESS; - } - if (SQL_ERROR != result && currp_is_valid) - res->keyset[currp].status |= - CURS_IN_ROWSET; /* This is the unique place where the - CURS_IN_ROWSET bit is turned on */ - i++; - if (i >= rowsetSize) - break; - stmt->bind_row = (SQLSETPOSIROW)i; /* set the binding location */ - result = SC_fetch(stmt); - } - if (SQL_ERROR == result) - goto cleanup; - - /* Save the fetch count for SQLSetPos */ - stmt->last_fetch_count = i; - stmt->save_rowset_size = rowsetSize; - /* - currp = KResIdx2GIdx(currp, stmt, res); - stmt->last_fetch_count_include_ommitted = GIdx2RowIdx(currp, stmt); - */ - stmt->last_fetch_count_include_ommitted = fc_io; - - /* Reset next binding row */ - stmt->bind_row = 0; - - /* Move the cursor position to the first row in the result set. */ - stmt->currTuple = RowIdx2GIdx(0, stmt); - - /* For declare/fetch, need to reset cursor to beginning of rowset */ - if (useCursor) - QR_set_position(res, 0); - - /* Set the number of rows retrieved */ - if (pcrow) - *pcrow = i; - MYLOG(OPENSEARCH_ALL, "pcrow=" FORMAT_LEN "\n", i); - - if (i == 0) - /* Only DeclareFetch should wind up here */ - result = SQL_NO_DATA_FOUND; - else if (error) - result = SQL_ERROR; - else if (truncated) - result = SQL_SUCCESS_WITH_INFO; - else if (SC_get_errornumber(stmt) == STMT_POS_BEFORE_RECORDSET) - result = SQL_SUCCESS_WITH_INFO; - else - result = SQL_SUCCESS; - -cleanup: -#undef return - return result; -} - -/* - * This determines whether there are more results sets available for - * the "hstmt". - */ -/* CC: return SQL_NO_DATA_FOUND since we do not support multiple result sets */ -RETCODE SQL_API OPENSEARCHAPI_MoreResults(HSTMT hstmt) { - StatementClass *stmt = (StatementClass *)hstmt; - QResultClass *res; - RETCODE ret = SQL_SUCCESS; - - MYLOG(OPENSEARCH_TRACE, "entering...\n"); - res = SC_get_Curres(stmt); - if (res) { - res = res->next; - SC_set_Curres(stmt, res); - } - if (res) { - SQLSMALLINT num_p; - - if (stmt->multi_statement < 0) - OPENSEARCHAPI_NumParams(stmt, &num_p); - if (stmt->multi_statement > 0) { - const char *cmdstr; - - SC_initialize_cols_info(stmt, FALSE, TRUE); - stmt->statement_type = STMT_TYPE_UNKNOWN; - if (cmdstr = QR_get_command(res), NULL != cmdstr) - stmt->statement_type = (short)statement_type(cmdstr); - stmt->join_info = 0; - SC_clear_parse_method(stmt); - } - stmt->diag_row_count = res->recent_processed_row_count; - SC_set_rowset_start(stmt, -1, FALSE); - stmt->currTuple = -1; - } else { - OPENSEARCHAPI_FreeStmt(hstmt, SQL_CLOSE); - ret = SQL_NO_DATA_FOUND; - } - MYLOG(OPENSEARCH_DEBUG, "leaving %d\n", ret); - return ret; -} - -SQLLEN ClearCachedRows(TupleField *tuple, int num_fields, SQLLEN num_rows) { - SQLLEN i; - - for (i = 0; i < num_fields * num_rows; i++, tuple++) { - if (tuple->value) { - MYLOG(OPENSEARCH_ALL, - "freeing tuple[" FORMAT_LEN "][" FORMAT_LEN "].value=%p\n", - i / num_fields, i % num_fields, tuple->value); - free(tuple->value); - tuple->value = NULL; - } - tuple->len = -1; - } - return i; -} - -/* Set the cursor name on a statement handle */ -RETCODE SQL_API OPENSEARCHAPI_SetCursorName(HSTMT hstmt, const SQLCHAR *szCursor, - SQLSMALLINT cbCursor) { - CSTR func = "OPENSEARCHAPI_SetCursorName"; - StatementClass *stmt = (StatementClass *)hstmt; - - MYLOG(OPENSEARCH_TRACE, "entering hstmt=%p, szCursor=%p, cbCursorMax=%d\n", hstmt, - szCursor, cbCursor); - - if (!stmt) { - SC_log_error(func, NULL_STRING, NULL); - return SQL_INVALID_HANDLE; - } - - SET_NAME_DIRECTLY(stmt->cursor_name, - make_string(szCursor, cbCursor, NULL, 0)); - return SQL_SUCCESS; -} - -/* Return the cursor name for a statement handle */ -RETCODE SQL_API OPENSEARCHAPI_GetCursorName(HSTMT hstmt, SQLCHAR *szCursor, - SQLSMALLINT cbCursorMax, - SQLSMALLINT *pcbCursor) { - CSTR func = "OPENSEARCHAPI_GetCursorName"; - StatementClass *stmt = (StatementClass *)hstmt; - size_t len = 0; - RETCODE result; - - MYLOG(OPENSEARCH_DEBUG, - "entering hstmt=%p, szCursor=%p, cbCursorMax=%d, pcbCursor=%p\n", - hstmt, szCursor, cbCursorMax, pcbCursor); - - if (!stmt) { - SC_log_error(func, NULL_STRING, NULL); - return SQL_INVALID_HANDLE; - } - result = SQL_SUCCESS; - len = strlen(SC_cursor_name(stmt)); - - if (szCursor) { - strncpy_null((char *)szCursor, SC_cursor_name(stmt), cbCursorMax); - - if (len >= (size_t)cbCursorMax) { - result = SQL_SUCCESS_WITH_INFO; - SC_set_error(stmt, STMT_TRUNCATED, - "The buffer was too small for the GetCursorName.", - func); - } - } - - if (pcbCursor) - *pcbCursor = (SQLSMALLINT)len; - - return result; -} diff --git a/sql-odbc/src/sqlodbc/setup.c b/sql-odbc/src/sqlodbc/setup.c deleted file mode 100644 index 89cc0dc957..0000000000 --- a/sql-odbc/src/sqlodbc/setup.c +++ /dev/null @@ -1,698 +0,0 @@ -#ifdef WIN32 -#include "opensearch_enlist.h" -#include "opensearch_odbc.h" -#include "loadlib.h" -#include "misc.h" // strncpy_null - -//#include "environ.h" -#ifdef WIN32 -#include -#endif -#include -#include - -#include "dlg_specific.h" -#include "opensearch_apifunc.h" -#include "resource.h" -#include "win_setup.h" - -#define INTFUNC __stdcall - -extern HINSTANCE s_hModule; /* Saved module handle. */ - -/* Constants */ -#define MIN(x, y) ((x) < (y) ? (x) : (y)) - -#define MAXKEYLEN (32 + 1) /* Max keyword length */ -#define MAXDESC (255 + 1) /* Max description length */ -#define MAXDSNAME (32 + 1) /* Max data source name length */ - -static void ParseAttributes(LPCSTR lpszAttributes, LPSETUPDLG lpsetupdlg); -static BOOL SetDSNAttributes(HWND hwndParent, LPSETUPDLG lpsetupdlg, - DWORD *errcode); -static BOOL SetDriverAttributes(LPCSTR lpszDriver, DWORD *pErrorCode, - LPSTR pErrorMessage, WORD cbMessage); -static void CenterDialog(HWND hdlg); - -/*-------- - * ConfigDSN - * - * Description: ODBC Setup entry point - * This entry point is called by the ODBC Installer - * (see file header for more details) - * Input : hwnd ----------- Parent window handle - * fRequest ------- Request type (i.e., add, config, or remove) - * lpszDriver ----- Driver name - * lpszAttributes - data source attribute string - * Output : TRUE success, FALSE otherwise - *-------- - */ -BOOL CALLBACK ConfigDSN(HWND hwnd, WORD fRequest, LPCSTR lpszDriver, - LPCSTR lpszAttributes) { - BOOL fSuccess; /* Success/fail flag */ - GLOBALHANDLE hglbAttr; - LPSETUPDLG lpsetupdlg; - - /* Allocate attribute array */ - hglbAttr = GlobalAlloc(GMEM_MOVEABLE | GMEM_ZEROINIT, sizeof(SETUPDLG)); - if (!hglbAttr) - return FALSE; - lpsetupdlg = (LPSETUPDLG)GlobalLock(hglbAttr); - - /* First of all, parse attribute string only for DSN entry */ - CC_conninfo_init(&(lpsetupdlg->ci), INIT_GLOBALS); - if (lpszAttributes) - ParseAttributes(lpszAttributes, lpsetupdlg); - - /* Save original data source name */ - if (lpsetupdlg->ci.dsn[0]) - STRCPY_FIXED(lpsetupdlg->szDSN, lpsetupdlg->ci.dsn); - else - lpsetupdlg->szDSN[0] = '\0'; - - /* Remove data source */ - if (ODBC_REMOVE_DSN == fRequest) { - /* Fail if no data source name was supplied */ - if (!lpsetupdlg->ci.dsn[0]) - fSuccess = FALSE; - - /* Otherwise remove data source from ODBC.INI */ - else - fSuccess = SQLRemoveDSNFromIni(lpsetupdlg->ci.dsn); - } - /* Add or Configure data source */ - else { - /* Save passed variables for global access (e.g., dialog access) */ - lpsetupdlg->hwndParent = hwnd; - lpsetupdlg->lpszDrvr = lpszDriver; - lpsetupdlg->fNewDSN = (ODBC_ADD_DSN == fRequest); - lpsetupdlg->fDefault = !lstrcmpi(lpsetupdlg->ci.dsn, INI_DSN); - - /* Cleanup conninfo and restore data source name */ - CC_conninfo_init(&(lpsetupdlg->ci), CLEANUP_FOR_REUSE | INIT_GLOBALS); - STRCPY_FIXED(lpsetupdlg->ci.dsn, lpsetupdlg->szDSN); - /* Get common attributes of Data Source */ - getDSNinfo(&(lpsetupdlg->ci), lpsetupdlg->lpszDrvr); - /* - * Parse attribute string again - * - * NOTE: Values supplied in the attribute string will always - * override settings in ODBC.INI - */ - if (lpszAttributes) - ParseAttributes(lpszAttributes, lpsetupdlg); - - /* - * Display the appropriate dialog (if parent window handle - * supplied) - */ - if (hwnd) { - /* Display dialog(s) */ - fSuccess = - (IDOK - == DialogBoxParam(s_hModule, MAKEINTRESOURCE(DLG_CONFIG), hwnd, - ConfigDlgProc, (LPARAM)lpsetupdlg)); - } else if (lpsetupdlg->ci.dsn[0]) { - MYLOG(OPENSEARCH_DEBUG, "SetDSNAttributes\n"); - fSuccess = SetDSNAttributes(hwnd, lpsetupdlg, NULL); - } else - fSuccess = FALSE; - } - - CC_conninfo_release(&(lpsetupdlg->ci)); - GlobalUnlock(hglbAttr); - GlobalFree(hglbAttr); - - return fSuccess; -} - -/*-------- - * ConfigDriver - * - * Description: ODBC Setup entry point - * This entry point is called by the ODBC Installer - * (see file header for more details) - * Arguments : hwnd ----------- Parent window handle - * fRequest ------- Request type (i.e., add, config, or remove) - * lpszDriver ----- Driver name - * lpszArgs ------- A null-terminated string containing - arguments for a driver specific fRequest - * lpszMsg -------- A null-terimated string containing - an output message from the driver setup - * cnMsgMax ------- Length of lpszMSg - * pcbMsgOut ------ Total number of bytes available to - return in lpszMsg - * Returns : TRUE success, FALSE otherwise - *-------- - */ -BOOL CALLBACK ConfigDriver(HWND hwnd, WORD fRequest, LPCSTR lpszDriver, - LPCSTR lpszArgs, LPSTR lpszMsg, WORD cbMsgMax, - WORD *pcbMsgOut) { - UNUSED(lpszArgs, hwnd); - DWORD errorCode = 0; - BOOL fSuccess = TRUE; /* Success/fail flag */ - - if (cbMsgMax > 0 && NULL != lpszMsg) - *lpszMsg = '\0'; - if (NULL != pcbMsgOut) - *pcbMsgOut = 0; - - /* Add the driver */ - switch (fRequest) { - case ODBC_INSTALL_DRIVER: - fSuccess = - SetDriverAttributes(lpszDriver, &errorCode, lpszMsg, cbMsgMax); - if (cbMsgMax > 0 && NULL != lpszMsg) - *pcbMsgOut = (WORD)strlen(lpszMsg); - break; - case ODBC_REMOVE_DRIVER: - break; - default: - errorCode = ODBC_ERROR_INVALID_REQUEST_TYPE; - fSuccess = FALSE; - } - - if (!fSuccess) - SQLPostInstallerError(errorCode, lpszMsg); - return fSuccess; -} - -/*------- - * CenterDialog - * - * Description: Center the dialog over the frame window - * Input : hdlg -- Dialog window handle - * Output : None - *------- - */ -static void CenterDialog(HWND hdlg) { - HWND hwndFrame; - RECT rcDlg, rcScr, rcFrame; - int cx, cy; - - hwndFrame = GetParent(hdlg); - - GetWindowRect(hdlg, &rcDlg); - cx = rcDlg.right - rcDlg.left; - cy = rcDlg.bottom - rcDlg.top; - - GetClientRect(hwndFrame, &rcFrame); - ClientToScreen(hwndFrame, (LPPOINT)(&rcFrame.left)); - ClientToScreen(hwndFrame, (LPPOINT)(&rcFrame.right)); - rcDlg.top = rcFrame.top + (((rcFrame.bottom - rcFrame.top) - cy) >> 1); - rcDlg.left = rcFrame.left + (((rcFrame.right - rcFrame.left) - cx) >> 1); - rcDlg.bottom = rcDlg.top + cy; - rcDlg.right = rcDlg.left + cx; - - GetWindowRect(GetDesktopWindow(), &rcScr); - if (rcDlg.bottom > rcScr.bottom) { - rcDlg.bottom = rcScr.bottom; - rcDlg.top = rcDlg.bottom - cy; - } - if (rcDlg.right > rcScr.right) { - rcDlg.right = rcScr.right; - rcDlg.left = rcDlg.right - cx; - } - - if (rcDlg.left < 0) - rcDlg.left = 0; - if (rcDlg.top < 0) - rcDlg.top = 0; - - MoveWindow(hdlg, rcDlg.left, rcDlg.top, cx, cy, TRUE); - return; -} - -/*------- - * ConfigDlgProc - * Description: Manage add data source name dialog - * Input : hdlg --- Dialog window handle - * wMsg --- Message - * wParam - Message parameter - * lParam - Message parameter - * Output : TRUE if message processed, FALSE otherwise - *------- - */ -INT_PTR CALLBACK ConfigDlgProc(HWND hdlg, UINT wMsg, WPARAM wParam, - LPARAM lParam) { - LPSETUPDLG lpsetupdlg; - ConnInfo *ci; - DWORD cmd; - - switch (wMsg) { - /* Initialize the dialog */ - case WM_INITDIALOG: - lpsetupdlg = (LPSETUPDLG)lParam; - ci = &lpsetupdlg->ci; - - SetWindowLongPtr(hdlg, DWLP_USER, lParam); - CenterDialog(hdlg); /* Center dialog */ - - /* Initialize dialog fields */ - SetDlgStuff(hdlg, ci); - - /* Save drivername */ - if (!(lpsetupdlg->ci.drivername[0])) - STRCPY_FIXED(lpsetupdlg->ci.drivername, lpsetupdlg->lpszDrvr); - - if (lpsetupdlg->fNewDSN || !ci->dsn[0]) - EnableWindow(GetDlgItem(hdlg, IDC_DSNAME), TRUE); - if (lpsetupdlg->fDefault) { - EnableWindow(GetDlgItem(hdlg, IDC_DSNAME), FALSE); - } else - SendDlgItemMessage(hdlg, IDC_DSNAME, EM_LIMITTEXT, - (WPARAM)(MAXDSNAME - 1), 0L); - - SendDlgItemMessage(hdlg, IDC_DESC, EM_LIMITTEXT, - (WPARAM)(MAXDESC - 1), 0L); - - if (!stricmp(ci->authtype, AUTHTYPE_IAM)) { - SendDlgItemMessage(hdlg, IDC_AUTHTYPE, CB_SETCURSEL, 0, - (WPARAM)0); - } else if (!stricmp(ci->authtype, AUTHTYPE_BASIC)) { - SendDlgItemMessage(hdlg, IDC_AUTHTYPE, CB_SETCURSEL, 1, - (WPARAM)0); - } else { // AUTHTYPE_NONE - SendDlgItemMessage(hdlg, IDC_AUTHTYPE, CB_SETCURSEL, 2, - (WPARAM)0); - } - - return TRUE; /* Focus was not set */ - - /* Process buttons */ - case WM_COMMAND: - lpsetupdlg = (LPSETUPDLG)GetWindowLongPtr(hdlg, DWLP_USER); - switch (cmd = GET_WM_COMMAND_ID(wParam, lParam)) { - /* - * Ensure the OK button is enabled only when a data - * source name - */ - /* is entered */ - case IDC_DSNAME: - if (GET_WM_COMMAND_CMD(wParam, lParam) == EN_CHANGE) { - char szItem[MAXDSNAME]; /* Edit control text */ - - /* Enable/disable the OK button */ - EnableWindow(GetDlgItem(hdlg, IDOK), - GetDlgItemText(hdlg, IDC_DSNAME, szItem, - sizeof(szItem))); - return TRUE; - } - break; - - /* Accept results */ - case IDOK: - /* Retrieve dialog values */ - if (!lpsetupdlg->fDefault) - GetDlgItemText(hdlg, IDC_DSNAME, lpsetupdlg->ci.dsn, - sizeof(lpsetupdlg->ci.dsn)); - - /* Get Dialog Values */ - GetDlgStuff(hdlg, &lpsetupdlg->ci); - /* Update ODBC.INI */ - SetDSNAttributes(hdlg, lpsetupdlg, NULL); - - case IDCANCEL: - EndDialog(hdlg, wParam); - return TRUE; - - case IDOK2: // <== TEST butter - { - /* Get Dialog Values */ - GetDlgStuff(hdlg, &lpsetupdlg->ci); - test_connection(lpsetupdlg->hwndParent, &lpsetupdlg->ci, - FALSE); - return TRUE; - break; - } - case ID_ADVANCED_OPTIONS: { - GetDlgStuff(hdlg, &lpsetupdlg->ci); - if (DialogBoxParam( - s_hModule, MAKEINTRESOURCE(DLG_ADVANCED_OPTIONS), - hdlg, advancedOptionsProc, (LPARAM)&lpsetupdlg->ci) > 0) - EndDialog(hdlg, 0); - break; - } - case ID_LOG_OPTIONS: { - if (DialogBoxParam( - s_hModule, MAKEINTRESOURCE(DLG_LOG_OPTIONS), hdlg, - logOptionsProc, (LPARAM)&lpsetupdlg->ci) > 0) - EndDialog(hdlg, 0); - break; - } - case IDC_AUTHTYPE: { - SetAuthenticationVisibility(hdlg, GetCurrentAuthMode(hdlg)); - } - } - break; - case WM_CTLCOLORSTATIC: - if (lParam == (LPARAM)GetDlgItem(hdlg, IDC_NOTICE_USER)) { - HBRUSH hBrush = (HBRUSH)GetStockObject(LTGRAY_BRUSH); - SetTextColor((HDC)wParam, RGB(255, 0, 0)); - return (LRESULT)hBrush; - } - break; - } - - /* Message not processed */ - return FALSE; -} - -#ifdef USE_PROC_ADDRESS -#define SQLALLOCHANDLEFUNC sqlallochandle -#define SQLSETENVATTRFUNC sqlsetenvattr -#define SQLDISCONNECTFUNC sqldisconnect -#define SQLFREEHANDLEFUNC sqlfreehandle -#ifdef UNICODE_SUPPORT -#define SQLGETDIAGRECFUNC sqlgetdiagrecw -#define SQLDRIVERCONNECTFUNC sqldriverconnectw -#define SQLSETCONNECTATTRFUNC sqlsetconnectattrw -#else -#define SQLGETDIAGRECFUNC sqlgetdiagrec -#define SQLDRIVERCONNECTFUNC sqldriverconnect -#define SQLSETCONNECTATTRFUNC sqlsetconnectAttr -#endif /* UNICODE_SUPPORT */ -#else -#define SQLALLOCHANDLEFUNC SQLAllocHandle -#define SQLSETENVATTRFUNC SQLSetEnvAttr -#define SQLDISCONNECTFUNC SQLDisconnect -#define SQLFREEHANDLEFUNC SQLFreeHandle -#ifdef UNICODE_SUPPORT -#define SQLGETDIAGRECFUNC SQLGetDiagRecW -#define SQLDRIVERCONNECTFUNC SQLDriverConnectW -#define SQLSETCONNECTATTRFUNC SQLSetConnectAttrW -#else -#define SQLGETDIAGRECFUNC SQLGetDiagRec -#define SQLDRIVERCONNECTFUNC SQLDriverConnect -#define SQLSETCONNECTATTRFUNC SQLSetConnectAttr -#endif /* UNICODE_SUPPORT */ -#endif /* USE_PROC_ADDRESS */ - -#define MAX_CONNECT_STRING_LEN 2048 -#ifdef UNICODE_SUPPORT -#define MESSAGEBOXFUNC MessageBoxW -#define _T(str) L##str -#define SNTPRINTF _snwprintf -#else -#define MESSAGEBOXFUNC MessageBoxA -#define _T(str) str -#define SNTPRINTF snprintf -#endif /* UNICODE_SUPPORT */ - -void test_connection(HANDLE hwnd, ConnInfo *ci, BOOL withDTC) { - SQLINTEGER errnum; - char out_conn[MAX_CONNECT_STRING_LEN]; - SQLRETURN ret; - SQLHENV env = SQL_NULL_HANDLE; - SQLHDBC conn = SQL_NULL_HANDLE; - SQLSMALLINT str_len; - char dsn_1st; - BOOL connected = FALSE; -#ifdef UNICODE_SUPPORT - SQLWCHAR wout_conn[MAX_CONNECT_STRING_LEN]; - SQLWCHAR szMsg[SQL_MAX_MESSAGE_LENGTH]; - const SQLWCHAR *ermsg = NULL; - SQLWCHAR *conn_str; -#else - SQLCHAR szMsg[SQL_MAX_MESSAGE_LENGTH]; - const SQLCHAR *ermsg = NULL; - SQLCHAR *conn_str; -#endif /* UNICODE_SUPPORT */ - - dsn_1st = ci->dsn[0]; - ci->dsn[0] = '\0'; - makeConnectString(out_conn, ci, sizeof(out_conn)); -#ifdef UNICODE_SUPPORT - MultiByteToWideChar(CP_ACP, MB_PRECOMPOSED, out_conn, -1, wout_conn, - sizeof(wout_conn) / sizeof(wout_conn[0])); - conn_str = wout_conn; -#else - conn_str = out_conn; -#endif /* UNICODE_SUPPORT */ - ci->dsn[0] = dsn_1st; - if (!SQL_SUCCEEDED( - ret = SQLALLOCHANDLEFUNC(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &env))) { - ermsg = _T("SQLAllocHandle for env error"); - goto cleanup; - } - if (!SQL_SUCCEEDED(ret = SQLSETENVATTRFUNC(env, SQL_ATTR_ODBC_VERSION, - (SQLPOINTER)SQL_OV_ODBC3, 0))) { - SNTPRINTF(szMsg, _countof(szMsg), _T("SQLAllocHandle for env error=%d"), - ret); - goto cleanup; - } - if (!SQL_SUCCEEDED(ret = SQLALLOCHANDLEFUNC(SQL_HANDLE_DBC, env, &conn))) { - SQLGETDIAGRECFUNC(SQL_HANDLE_ENV, env, 1, NULL, &errnum, szMsg, - _countof(szMsg), &str_len); - ermsg = szMsg; - goto cleanup; - } - if (!SQL_SUCCEEDED(ret = SQLDRIVERCONNECTFUNC(conn, hwnd, conn_str, SQL_NTS, - NULL, MAX_CONNECT_STRING_LEN, - &str_len, - SQL_DRIVER_NOPROMPT))) { - SQLGETDIAGRECFUNC(SQL_HANDLE_DBC, conn, 1, NULL, &errnum, szMsg, - _countof(szMsg), &str_len); - ermsg = szMsg; - goto cleanup; - } - connected = TRUE; - ermsg = _T("Connection successful"); - - if (withDTC) { -#ifdef _HANDLE_ENLIST_IN_DTC_ - HRESULT res; - void *pObj = NULL; - - pObj = CALL_GetTransactionObject(&res); - if (NULL != pObj) { - SQLRETURN ret = SQLSETCONNECTATTRFUNC(conn, SQL_ATTR_ENLIST_IN_DTC, - (SQLPOINTER)pObj, 0); - if (SQL_SUCCEEDED(ret)) { - SQLSETCONNECTATTRFUNC(conn, SQL_ATTR_ENLIST_IN_DTC, - SQL_DTC_DONE, 0); - SNTPRINTF(szMsg, _countof(szMsg), - _T("%s\nenlistment was successful\n"), ermsg); - ermsg = szMsg; - } else { - int strl; - - SNTPRINTF(szMsg, _countof(szMsg), _T("%s\nMSDTC error:"), - ermsg); - for (strl = 0; strl < SQL_MAX_MESSAGE_LENGTH; strl++) { - if (!szMsg[strl]) - break; - } - SQLGETDIAGRECFUNC( - SQL_HANDLE_DBC, conn, 1, NULL, &errnum, szMsg + strl, - (SQLSMALLINT)(_countof(szMsg) - strl), &str_len); - ermsg = szMsg; - } - CALL_ReleaseTransactionObject(pObj); - } else if (FAILED(res)) { - SNTPRINTF(szMsg, _countof(szMsg), - _T("%s\nDistibuted Transaction enlistment error %x"), - ermsg, res); - ermsg = szMsg; - } -#else /* _HANDLE_ENLIST_IN_DTC_ */ - SNTPRINTF(szMsg, _countof(szMsg), - _T("%s\nDistibuted Transaction enlistment not supported by ") - _T("this driver"), - ermsg); - ermsg = szMsg; -#endif - } - -cleanup: - if (NULL != ermsg && NULL != hwnd) { - MESSAGEBOXFUNC(hwnd, ermsg, _T("Connection Test"), - MB_ICONEXCLAMATION | MB_OK); - } - -#undef _T - - if (NULL != conn) { - if (connected) - SQLDISCONNECTFUNC(conn); - SQLFREEHANDLEFUNC(SQL_HANDLE_DBC, conn); - } - if (env) - SQLFREEHANDLEFUNC(SQL_HANDLE_ENV, env); - - return; -} - -/*------- - * ParseAttributes - * - * Description: Parse attribute string moving values into the aAttr array - * Input : lpszAttributes - Pointer to attribute string - * Output : None (global aAttr normally updated) - *------- - */ -static void ParseAttributes(LPCSTR lpszAttributes, LPSETUPDLG lpsetupdlg) { - LPCSTR lpsz; - LPCSTR lpszStart; - char aszKey[MAXKEYLEN]; - size_t cbKey; - char value[MAXESPATH]; - - for (lpsz = lpszAttributes; *lpsz; lpsz++) { - /* - * Extract key name (e.g., DSN), it must be terminated by an - * equals - */ - lpszStart = lpsz; - for (;; lpsz++) { - if (!*lpsz) - return; /* No key was found */ - else if (*lpsz == '=') - break; /* Valid key found */ - } - /* Determine the key's index in the key table (-1 if not found) */ - cbKey = lpsz - lpszStart; - if (cbKey < sizeof(aszKey)) { - memcpy(aszKey, lpszStart, cbKey); - aszKey[cbKey] = '\0'; - } - - /* Locate end of key value */ - lpszStart = ++lpsz; - for (; *lpsz; lpsz++) - ; - - /* lpsetupdlg->aAttr[iElement].fSupplied = TRUE; */ - memcpy(value, lpszStart, MIN(lpsz - lpszStart + 1, MAXESPATH)); - - MYLOG(OPENSEARCH_DEBUG, "aszKey='%s', value='%s'\n", aszKey, value); - - /* Copy the appropriate value to the conninfo */ - copyConnAttributes(&lpsetupdlg->ci, aszKey, value); - } - return; -} - -/*-------- - * SetDSNAttributes - * - * Description: Write data source attributes to ODBC.INI - * Input : hwnd - Parent window handle (plus globals) - * Output : TRUE if successful, FALSE otherwise - *-------- - */ -static BOOL SetDSNAttributes(HWND hwndParent, LPSETUPDLG lpsetupdlg, - DWORD *errcode) { - LPCSTR lpszDSN; /* Pointer to data source name */ - - lpszDSN = lpsetupdlg->ci.dsn; - - if (errcode) - *errcode = 0; - /* Validate arguments */ - if (lpsetupdlg->fNewDSN && !*lpsetupdlg->ci.dsn) - return FALSE; - - /* Write the data source name */ - if (!SQLWriteDSNToIni(lpszDSN, lpsetupdlg->lpszDrvr)) { - RETCODE ret = SQL_ERROR; - DWORD err = (DWORD)SQL_ERROR; - char szMsg[SQL_MAX_MESSAGE_LENGTH]; - - ret = SQLInstallerError(1, &err, szMsg, sizeof(szMsg), NULL); - if (hwndParent) { - char szBuf[MAXESPATH]; - - if (SQL_SUCCESS != ret) { - LoadString(s_hModule, IDS_BADDSN, szBuf, sizeof(szBuf)); - SPRINTF_FIXED(szMsg, szBuf, lpszDSN); - } - LoadString(s_hModule, IDS_MSGTITLE, szBuf, sizeof(szBuf)); - MessageBox(hwndParent, szMsg, szBuf, MB_ICONEXCLAMATION | MB_OK); - } - if (errcode) - *errcode = err; - return FALSE; - } - - /* Update ODBC.INI */ - write_Ci_Drivers(ODBC_INI, lpsetupdlg->ci.dsn, &(lpsetupdlg->ci.drivers)); - writeDSNinfo(&lpsetupdlg->ci); - - /* If the data source name has changed, remove the old name */ - if (lstrcmpi(lpsetupdlg->szDSN, lpsetupdlg->ci.dsn)) - SQLRemoveDSNFromIni(lpsetupdlg->szDSN); - return TRUE; -} - -/*-------- - * SetDriverAttributes - * - * Description: Write driver information attributes to ODBCINST.INI - * Input : lpszDriver - The driver name - * Output : TRUE if successful, FALSE otherwise - *-------- - */ -static BOOL SetDriverAttributes(LPCSTR lpszDriver, DWORD *pErrorCode, - LPSTR message, WORD cbMessage) { - BOOL ret = FALSE; - char ver_string[8]; - - /* Validate arguments */ - if (!lpszDriver || !lpszDriver[0]) { - if (pErrorCode) - *pErrorCode = ODBC_ERROR_INVALID_NAME; - strncpy_null(message, "Driver name not specified", cbMessage); - return FALSE; - } - - if (!SQLWritePrivateProfileString(lpszDriver, "APILevel", "1", - ODBCINST_INI)) - goto cleanup; - if (!SQLWritePrivateProfileString(lpszDriver, "ConnectFunctions", "YYN", - ODBCINST_INI)) - goto cleanup; - SPRINTF_FIXED(ver_string, "%02x.%02x", ODBCVER / 256, ODBCVER % 256); - if (!SQLWritePrivateProfileString(lpszDriver, "DriverODBCVer", ver_string, - ODBCINST_INI)) - goto cleanup; - if (!SQLWritePrivateProfileString(lpszDriver, "FileUsage", "0", - ODBCINST_INI)) - goto cleanup; - if (!SQLWritePrivateProfileString(lpszDriver, "SQLLevel", "1", - ODBCINST_INI)) - goto cleanup; - - ret = TRUE; -cleanup: - if (!ret) { - if (pErrorCode) - *pErrorCode = ODBC_ERROR_REQUEST_FAILED; - strncpy_null(message, "Failed to WritePrivateProfileString", cbMessage); - } - return ret; -} - -BOOL INTFUNC ChangeDriverName(HWND hwndParent, LPSETUPDLG lpsetupdlg, - LPCSTR driver_name) { - DWORD err = 0; - ConnInfo *ci = &lpsetupdlg->ci; - - if (!ci->dsn[0]) { - err = IDS_BADDSN; - } else if (!driver_name || strnicmp(driver_name, "opensearch", 10)) { - err = IDS_BADDSN; - } else { - LPCSTR lpszDrvr = lpsetupdlg->lpszDrvr; - - lpsetupdlg->lpszDrvr = driver_name; - if (!SetDSNAttributes(hwndParent, lpsetupdlg, &err)) { - if (!err) - err = IDS_BADDSN; - lpsetupdlg->lpszDrvr = lpszDrvr; - } - } - return (err == 0); -} - -#endif /* WIN32 */ diff --git a/sql-odbc/src/sqlodbc/statement.c b/sql-odbc/src/sqlodbc/statement.c deleted file mode 100644 index ea4ece9d98..0000000000 --- a/sql-odbc/src/sqlodbc/statement.c +++ /dev/null @@ -1,1465 +0,0 @@ -// clang-format off -#include "statement.h" -#include "misc.h" // strncpy_null - -#include "bind.h" -#include "opensearch_connection.h" -#include "multibyte.h" -#include "qresult.h" -#include "convert.h" -#include "environ.h" -#include "loadlib.h" - -#include -#include -#include - -#include "opensearch_apifunc.h" -#include "opensearch_helper.h" -#include "opensearch_statement.h" -// clang-format on - -/* Map sql commands to statement types */ -static const struct { - int type; - char *s; -} Statement_Type[] = - - {{STMT_TYPE_SELECT, "SELECT"}, - {STMT_TYPE_INSERT, "INSERT"}, - {STMT_TYPE_UPDATE, "UPDATE"}, - {STMT_TYPE_DELETE, "DELETE"}, - {STMT_TYPE_PROCCALL, "{"}, - {STMT_TYPE_SET, "SET"}, - {STMT_TYPE_RESET, "RESET"}, - {STMT_TYPE_CREATE, "CREATE"}, - {STMT_TYPE_DECLARE, "DECLARE"}, - {STMT_TYPE_FETCH, "FETCH"}, - {STMT_TYPE_MOVE, "MOVE"}, - {STMT_TYPE_CLOSE, "CLOSE"}, - {STMT_TYPE_PREPARE, "PREPARE"}, - {STMT_TYPE_EXECUTE, "EXECUTE"}, - {STMT_TYPE_DEALLOCATE, "DEALLOCATE"}, - {STMT_TYPE_DROP, "DROP"}, - {STMT_TYPE_START, "BEGIN"}, - {STMT_TYPE_START, "START"}, - {STMT_TYPE_TRANSACTION, "SAVEPOINT"}, - {STMT_TYPE_TRANSACTION, "RELEASE"}, - {STMT_TYPE_TRANSACTION, "COMMIT"}, - {STMT_TYPE_TRANSACTION, "END"}, - {STMT_TYPE_TRANSACTION, "ROLLBACK"}, - {STMT_TYPE_TRANSACTION, "ABORT"}, - {STMT_TYPE_LOCK, "LOCK"}, - {STMT_TYPE_ALTER, "ALTER"}, - {STMT_TYPE_GRANT, "GRANT"}, - {STMT_TYPE_REVOKE, "REVOKE"}, - {STMT_TYPE_COPY, "COPY"}, - {STMT_TYPE_ANALYZE, "ANALYZE"}, - {STMT_TYPE_NOTIFY, "NOTIFY"}, - {STMT_TYPE_EXPLAIN, "EXPLAIN"} - - /* - * Special-commands that cannot be run in a transaction block. This isn't - * as granular as it could be. VACUUM can never be run in a transaction - * block, but some variants of REINDEX and CLUSTER can be. CHECKPOINT - * doesn't throw an error if you do, but it cannot be rolled back so - * there's no point in beginning a new transaction for it. - */ - , - {STMT_TYPE_SPECIAL, "VACUUM"}, - {STMT_TYPE_SPECIAL, "REINDEX"}, - {STMT_TYPE_SPECIAL, "CLUSTER"}, - {STMT_TYPE_SPECIAL, "CHECKPOINT"} - - , - {STMT_TYPE_WITH, "WITH"}, - {0, NULL}}; - -static void SC_set_error_if_not_set(StatementClass *self, int errornumber, - const char *errmsg, const char *func); - -RETCODE SQL_API OPENSEARCHAPI_AllocStmt(HDBC hdbc, HSTMT *phstmt, UDWORD flag) { - CSTR func = "OPENSEARCHAPI_AllocStmt"; - ConnectionClass *conn = (ConnectionClass *)hdbc; - StatementClass *stmt; - ARDFields *ardopts; - - MYLOG(OPENSEARCH_TRACE, "entering...\n"); - - if (!conn) { - CC_log_error(func, "", NULL); - return SQL_INVALID_HANDLE; - } - - stmt = SC_Constructor(conn); - - MYLOG(OPENSEARCH_DEBUG, "**** : hdbc = %p, stmt = %p\n", hdbc, stmt); - - if (!stmt) { - CC_set_error(conn, CONN_STMT_ALLOC_ERROR, - "No more memory to allocate a further SQL-statement", - func); - *phstmt = SQL_NULL_HSTMT; - return SQL_ERROR; - } - - if (!CC_add_statement(conn, stmt)) { - CC_set_error(conn, CONN_STMT_ALLOC_ERROR, - "Maximum number of statements exceeded.", func); - SC_Destructor(stmt); - *phstmt = SQL_NULL_HSTMT; - return SQL_ERROR; - } - - *phstmt = (HSTMT)stmt; - - stmt->iflag = flag; - /* Copy default statement options based from Connection options */ - if (0 != (PODBC_INHERIT_CONNECT_OPTIONS & flag)) { - stmt->options = stmt->options_orig = conn->stmtOptions; - stmt->ardi.ardf = conn->ardOptions; - } else { - InitializeStatementOptions(&stmt->options_orig); - stmt->options = stmt->options_orig; - InitializeARDFields(&stmt->ardi.ardf); - } - ardopts = SC_get_ARDF(stmt); - ARD_AllocBookmark(ardopts); - - /* Save the handle for later */ - stmt->phstmt = phstmt; - - return SQL_SUCCESS; -} - -RETCODE SQL_API OPENSEARCHAPI_FreeStmt(HSTMT hstmt, SQLUSMALLINT fOption) { - CSTR func = "OPENSEARCHAPI_FreeStmt"; - StatementClass *stmt = (StatementClass *)hstmt; - - MYLOG(OPENSEARCH_TRACE, "entering...hstmt=%p, fOption=%hi\n", hstmt, fOption); - - if (!stmt) { - SC_log_error(func, "", NULL); - return SQL_INVALID_HANDLE; - } - SC_clear_error(stmt); - - if (fOption == SQL_DROP) { - ConnectionClass *conn = stmt->hdbc; - - OpenSearchStopRetrieval(conn->opensearchconn); - - /* Remove the statement from the connection's statement list */ - if (conn) { - QResultClass *res; - - if (STMT_EXECUTING == stmt->status) { - SC_set_error(stmt, STMT_SEQUENCE_ERROR, - "Statement is currently executing a transaction.", - func); - return SQL_ERROR; /* stmt may be executing a transaction */ - } - if (conn->unnamed_prepared_stmt == stmt) - conn->unnamed_prepared_stmt = NULL; - - res = SC_get_Result(stmt); - QR_Destructor(res); - SC_init_Result(stmt); - if (!CC_remove_statement(conn, stmt)) { - SC_set_error(stmt, STMT_SEQUENCE_ERROR, - "Statement is currently executing a transaction.", - func); - return SQL_ERROR; /* stmt may be executing a - * transaction */ - } - } - - if (stmt->execute_delegate) { - OPENSEARCHAPI_FreeStmt(stmt->execute_delegate, SQL_DROP); - stmt->execute_delegate = NULL; - } - if (stmt->execute_parent) - stmt->execute_parent->execute_delegate = NULL; - /* Destroy the statement and free any results, cursors, etc. */ - SC_Destructor(stmt); - } else if (fOption == SQL_UNBIND) - SC_unbind_cols(stmt); - else if (fOption == SQL_CLOSE) { - OpenSearchStopRetrieval(stmt->hdbc->opensearchconn); - - /* - * this should discard all the results, but leave the statement - * itself in place (it can be executed again) - */ - stmt->transition_status = STMT_TRANSITION_ALLOCATED; - if (stmt->execute_delegate) { - OPENSEARCHAPI_FreeStmt(stmt->execute_delegate, SQL_DROP); - stmt->execute_delegate = NULL; - } - if (!SC_recycle_statement(stmt)) { - return SQL_ERROR; - } - SC_set_Curres(stmt, NULL); - } else if (fOption == SQL_RESET_PARAMS) - ; - else { - SC_set_error(stmt, STMT_OPTION_OUT_OF_RANGE_ERROR, - "Invalid option passed to OPENSEARCHAPI_FreeStmt.", func); - return SQL_ERROR; - } - - return SQL_SUCCESS; -} - -/* - * StatementClass implementation - */ -void InitializeStatementOptions(StatementOptions *opt) { - memset(opt, 0, sizeof(StatementOptions)); - opt->scroll_concurrency = SQL_CONCUR_READ_ONLY; - opt->cursor_type = SQL_CURSOR_FORWARD_ONLY; - opt->retrieve_data = SQL_RD_ON; - opt->use_bookmarks = SQL_UB_OFF; - opt->metadata_id = SQL_FALSE; -} - -static void SC_clear_parse_status(StatementClass *self, ConnectionClass *conn) { - UNUSED(self, conn); - self->parse_status = STMT_PARSE_NONE; -} - -static void SC_init_discard_output_params(StatementClass *self) { - ConnectionClass *conn = SC_get_conn(self); - - if (!conn) - return; - self->discard_output_params = 0; -} - -static void SC_init_parse_method(StatementClass *self) { - ConnectionClass *conn = SC_get_conn(self); - - self->parse_method = 0; - if (!conn) - return; - if (0 == (PODBC_EXTERNAL_STATEMENT & self->iflag)) - return; - if (self->catalog_result) - return; -} - -StatementClass *SC_Constructor(ConnectionClass *conn) { - StatementClass *rv; - - rv = (StatementClass *)malloc(sizeof(StatementClass)); - if (rv) { - rv->hdbc = conn; - rv->phstmt = NULL; - rv->result = NULL; - rv->curres = NULL; - rv->catalog_result = FALSE; - rv->prepare = NON_PREPARE_STATEMENT; - rv->prepared = NOT_PREPARED; - rv->status = STMT_ALLOCATED; - rv->external = FALSE; - rv->iflag = 0; - rv->plan_name = NULL; - rv->transition_status = STMT_TRANSITION_UNALLOCATED; - rv->multi_statement = -1; /* unknown */ - rv->num_params = -1; /* unknown */ - rv->processed_statements = NULL; - - rv->__error_message = NULL; - rv->__error_number = 0; - rv->opensearch_error = NULL; - - rv->statement = NULL; - rv->load_statement = NULL; - rv->statement_type = STMT_TYPE_UNKNOWN; - - rv->currTuple = -1; - rv->rowset_start = 0; - SC_set_rowset_start(rv, -1, FALSE); - rv->current_col = -1; - rv->bind_row = 0; - rv->from_pos = rv->load_from_pos = rv->where_pos = -1; - rv->last_fetch_count = rv->last_fetch_count_include_ommitted = 0; - rv->save_rowset_size = -1; - - rv->data_at_exec = -1; - rv->put_data = FALSE; - rv->ref_CC_error = FALSE; - rv->join_info = 0; - SC_init_parse_method(rv); - - rv->lobj_fd = -1; - INIT_NAME(rv->cursor_name); - - /* Parse Stuff */ - rv->ti = NULL; - rv->ntab = 0; - rv->num_key_fields = -1; /* unknown */ - SC_clear_parse_status(rv, conn); - rv->proc_return = -1; - SC_init_discard_output_params(rv); - rv->cancel_info = 0; - - /* Clear Statement Options -- defaults will be set in AllocStmt */ - memset(&rv->options, 0, sizeof(StatementOptions)); - InitializeEmbeddedDescriptor((DescriptorClass *)&(rv->ardi), rv, - SQL_ATTR_APP_ROW_DESC); - InitializeEmbeddedDescriptor((DescriptorClass *)&(rv->apdi), rv, - SQL_ATTR_APP_PARAM_DESC); - InitializeEmbeddedDescriptor((DescriptorClass *)&(rv->irdi), rv, - SQL_ATTR_IMP_ROW_DESC); - InitializeEmbeddedDescriptor((DescriptorClass *)&(rv->ipdi), rv, - SQL_ATTR_IMP_PARAM_DESC); - - rv->miscinfo = 0; - rv->execinfo = 0; - rv->rb_or_tc = 0; - SC_reset_updatable(rv); - rv->diag_row_count = 0; - rv->stmt_time = 0; - rv->execute_delegate = NULL; - rv->execute_parent = NULL; - rv->allocated_callbacks = 0; - rv->num_callbacks = 0; - rv->callbacks = NULL; - GetDataInfoInitialize(SC_get_GDTI(rv)); - PutDataInfoInitialize(SC_get_PDTI(rv)); - rv->lock_CC_for_rb = FALSE; - INIT_STMT_CS(rv); - } - return rv; -} - -char SC_Destructor(StatementClass *self) { - CSTR func = "SC_Destructor"; - QResultClass *res = SC_get_Result(self); - - MYLOG(OPENSEARCH_TRACE, "entering self=%p, self->result=%p, self->hdbc=%p\n", self, - res, self->hdbc); - SC_clear_error(self); - if (STMT_EXECUTING == self->status) { - SC_set_error(self, STMT_SEQUENCE_ERROR, - "Statement is currently executing a transaction.", func); - return FALSE; - } - - if (res) { - if (!self->hdbc) - res->conn = NULL; /* prevent any dbase activity */ - - QR_Destructor(res); - } - - SC_initialize_stmts(self, TRUE); - - /* Free the parsed table information */ - SC_initialize_cols_info(self, FALSE, TRUE); - - NULL_THE_NAME(self->cursor_name); - /* Free the parsed field information */ - DC_Destructor((DescriptorClass *)SC_get_ARDi(self)); - DC_Destructor((DescriptorClass *)SC_get_APDi(self)); - DC_Destructor((DescriptorClass *)SC_get_IRDi(self)); - DC_Destructor((DescriptorClass *)SC_get_IPDi(self)); - GDATA_unbind_cols(SC_get_GDTI(self), TRUE); - PDATA_free_params(SC_get_PDTI(self), STMT_FREE_PARAMS_ALL); - - if (self->__error_message) - free(self->__error_message); - if (self->opensearch_error) - ER_Destructor(self->opensearch_error); - cancelNeedDataState(self); - if (self->callbacks) - free(self->callbacks); - - DELETE_STMT_CS(self); - free(self); - - MYLOG(OPENSEARCH_TRACE, "leaving\n"); - - return TRUE; -} - -void SC_init_Result(StatementClass *self) { - self->result = self->curres = NULL; - MYLOG(OPENSEARCH_TRACE, "leaving(%p)\n", self); -} - -void SC_set_Result(StatementClass *self, QResultClass *res) { - if (res != self->result) { - MYLOG(OPENSEARCH_DEBUG, "(%p, %p)\n", self, res); - QR_Destructor(self->result); - self->result = self->curres = res; - } -} - -int statement_type(const char *statement) { - int i; - - /* ignore leading whitespace in query string */ - while (*statement && (isspace((UCHAR)*statement) || *statement == '(')) - statement++; - - for (i = 0; Statement_Type[i].s; i++) - if (!strnicmp(statement, Statement_Type[i].s, - strlen(Statement_Type[i].s))) - return Statement_Type[i].type; - - return STMT_TYPE_OTHER; -} - -void SC_set_planname(StatementClass *stmt, const char *plan_name) { - if (stmt->plan_name) - free(stmt->plan_name); - if (plan_name && plan_name[0]) - stmt->plan_name = strdup(plan_name); - else - stmt->plan_name = NULL; -} - -void SC_set_rowset_start(StatementClass *stmt, SQLLEN start, BOOL valid_base) { - QResultClass *res = SC_get_Curres(stmt); - SQLLEN incr = start - stmt->rowset_start; - - MYLOG(OPENSEARCH_DEBUG, "%p->SC_set_rowstart " FORMAT_LEN "->" FORMAT_LEN "(%s) ", - stmt, stmt->rowset_start, start, valid_base ? "valid" : "unknown"); - if (res != NULL) { - BOOL valid = QR_has_valid_base(res); - MYPRINTF(OPENSEARCH_DEBUG, ":(%p)QR is %s", res, - QR_has_valid_base(res) ? "valid" : "unknown"); - - if (valid) { - if (valid_base) - QR_inc_rowstart_in_cache(res, incr); - else - QR_set_no_valid_base(res); - } else if (valid_base) { - QR_set_has_valid_base(res); - if (start < 0) - QR_set_rowstart_in_cache(res, -1); - else - QR_set_rowstart_in_cache(res, start); - } - if (!QR_get_cursor(res)) - res->key_base = start; - MYPRINTF(OPENSEARCH_DEBUG, ":(%p)QR result=" FORMAT_LEN "(%s)", res, - QR_get_rowstart_in_cache(res), - QR_has_valid_base(res) ? "valid" : "unknown"); - } - stmt->rowset_start = start; - MYPRINTF(OPENSEARCH_DEBUG, ":stmt result=" FORMAT_LEN "\n", stmt->rowset_start); -} -void SC_inc_rowset_start(StatementClass *stmt, SQLLEN inc) { - SQLLEN start = stmt->rowset_start + inc; - - SC_set_rowset_start(stmt, start, TRUE); -} -int SC_set_current_col(StatementClass *stmt, int col) { - if (col == stmt->current_col) - return col; - if (col >= 0) - reset_a_getdata_info(SC_get_GDTI(stmt), col + 1); - stmt->current_col = (short)col; - - return stmt->current_col; -} - -void SC_set_prepared(StatementClass *stmt, int prepared) { - if (NOT_PREPARED == prepared) - SC_set_planname(stmt, NULL); - - // po_ind_t -> char - stmt->prepared = (po_ind_t)prepared; -} - -/* - * Initialize stmt_with_params and load_statement member pointer - * deallocating corresponding prepared plan. Also initialize - * statement member pointer if specified. - */ -RETCODE -SC_initialize_stmts(StatementClass *self, BOOL initializeOriginal) { - ProcessedStmt *pstmt; - ProcessedStmt *next_pstmt; - - if (self->lock_CC_for_rb) { - LEAVE_CONN_CS(SC_get_conn(self)); - self->lock_CC_for_rb = FALSE; - } - if (initializeOriginal) { - if (self->statement) { - free(self->statement); - self->statement = NULL; - } - - pstmt = self->processed_statements; - while (pstmt) { - if (pstmt->query) - free(pstmt->query); - next_pstmt = pstmt->next; - free(pstmt); - pstmt = next_pstmt; - } - self->processed_statements = NULL; - - self->prepare = NON_PREPARE_STATEMENT; - SC_set_prepared(self, NOT_PREPARED); - self->statement_type = STMT_TYPE_UNKNOWN; /* unknown */ - self->multi_statement = -1; /* unknown */ - self->num_params = -1; /* unknown */ - self->proc_return = -1; /* unknown */ - self->join_info = 0; - SC_init_parse_method(self); - SC_init_discard_output_params(self); - } - if (self->load_statement) { - free(self->load_statement); - self->load_statement = NULL; - } - - return 0; -} - -BOOL SC_opencheck(StatementClass *self, const char *func) { - QResultClass *res; - - if (!self) - return FALSE; - if (self->status == STMT_EXECUTING) { - SC_set_error(self, STMT_SEQUENCE_ERROR, - "Statement is currently executing a transaction.", func); - return TRUE; - } - /* - * We can dispose the result of Describe-only any time. - */ - if (self->prepare && self->status == STMT_DESCRIBED) { - MYLOG(OPENSEARCH_DEBUG, "self->prepare && self->status == STMT_DESCRIBED\n"); - return FALSE; - } - if (res = SC_get_Curres(self), NULL != res) { - if (QR_command_maybe_successful(res) && res->backend_tuples) { - SC_set_error(self, STMT_SEQUENCE_ERROR, "The cursor is open.", - func); - return TRUE; - } - } - - return FALSE; -} - -RETCODE -SC_initialize_and_recycle(StatementClass *self) { - SC_initialize_stmts(self, TRUE); - if (!SC_recycle_statement(self)) - return SQL_ERROR; - - return SQL_SUCCESS; -} - -void SC_reset_result_for_rerun(StatementClass *self) { - QResultClass *res; - ColumnInfoClass *flds; - - if (!self) - return; - if (res = SC_get_Result(self), NULL == res) - return; - flds = QR_get_fields(res); - if (NULL == flds || 0 == CI_get_num_fields(flds)) - SC_set_Result(self, NULL); - else { - QR_reset_for_re_execute(res); - SC_set_Curres(self, NULL); - } -} - -/* - * Called from SQLPrepare if STMT_PREMATURE, or - * from SQLExecute if STMT_FINISHED, or - * from SQLFreeStmt(SQL_CLOSE) - */ -char SC_recycle_statement(StatementClass *self) { - CSTR func = "SC_recycle_statement"; - ConnectionClass *conn; - - MYLOG(OPENSEARCH_TRACE, "entering self=%p\n", self); - - SC_clear_error(self); - /* This would not happen */ - if (self->status == STMT_EXECUTING) { - SC_set_error(self, STMT_SEQUENCE_ERROR, - "Statement is currently executing a transaction.", func); - return FALSE; - } - - if (SC_get_conn(self)->unnamed_prepared_stmt == self) - SC_get_conn(self)->unnamed_prepared_stmt = NULL; - - conn = SC_get_conn(self); - switch (self->status) { - case STMT_ALLOCATED: - /* this statement does not need to be recycled */ - return TRUE; - - case STMT_READY: - break; - - case STMT_DESCRIBED: - break; - - case STMT_FINISHED: - break; - - default: - SC_set_error(self, STMT_INTERNAL_ERROR, - "An internal error occured while recycling statements", - func); - return FALSE; - } - - switch (self->prepared) { - case NOT_PREPARED: - /* Free the parsed table/field information */ - SC_initialize_cols_info(self, TRUE, TRUE); - - MYLOG(OPENSEARCH_DEBUG, "SC_clear_parse_status\n"); - SC_clear_parse_status(self, conn); - break; - } - - /* Free any cursors */ - if (SC_get_Result(self)) - SC_set_Result(self, NULL); - self->miscinfo = 0; - self->execinfo = 0; - /* self->rbonerr = 0; Never clear the bits here */ - - /* - * Reset only parameters that have anything to do with results - */ - self->status = STMT_READY; - self->catalog_result = FALSE; /* not very important */ - - self->currTuple = -1; - SC_set_rowset_start(self, -1, FALSE); - SC_set_current_col(self, -1); - self->bind_row = 0; - MYLOG(OPENSEARCH_DEBUG, "statement=%p ommitted=0\n", self); - self->last_fetch_count = self->last_fetch_count_include_ommitted = 0; - - self->__error_message = NULL; - self->__error_number = 0; - - self->lobj_fd = -1; - - SC_initialize_stmts(self, FALSE); - cancelNeedDataState(self); - self->cancel_info = 0; - /* - * reset the current attr setting to the original one. - */ - self->options.scroll_concurrency = self->options_orig.scroll_concurrency; - self->options.cursor_type = self->options_orig.cursor_type; - self->options.keyset_size = self->options_orig.keyset_size; - self->options.maxLength = self->options_orig.maxLength; - self->options.maxRows = self->options_orig.maxRows; - - return TRUE; -} - -/* This is only called from SQLFreeStmt(SQL_UNBIND) */ -char SC_unbind_cols(StatementClass *self) { - ARDFields *opts = SC_get_ARDF(self); - GetDataInfo *gdata = SC_get_GDTI(self); - BindInfoClass *bookmark; - - ARD_unbind_cols(opts, FALSE); - GDATA_unbind_cols(gdata, FALSE); - if (bookmark = opts->bookmark, bookmark != NULL) { - bookmark->buffer = NULL; - bookmark->used = NULL; - } - - return 1; -} - -void SC_clear_error(StatementClass *self) { - QResultClass *res; - - self->__error_number = 0; - if (self->__error_message) { - free(self->__error_message); - self->__error_message = NULL; - } - if (self->opensearch_error) { - ER_Destructor(self->opensearch_error); - self->opensearch_error = NULL; - } - self->diag_row_count = 0; - if (res = SC_get_Curres(self), res) { - QR_set_message(res, NULL); - QR_set_notice(res, NULL); - res->sqlstate[0] = '\0'; - } - self->stmt_time = 0; - memset(&self->localtime, 0, sizeof(self->localtime)); - self->localtime.tm_sec = -1; - SC_unref_CC_error(self); -} - -/* - * This function creates an error info which is the concatenation - * of the result, statement, connection, and socket messages. - */ - -/* Map sql commands to statement types */ -static const struct { - int number; - const char ver3str[6]; - const char ver2str[6]; -} Statement_sqlstate[] = - - {{STMT_ERROR_IN_ROW, "01S01", "01S01"}, - {STMT_OPTION_VALUE_CHANGED, "01S02", "01S02"}, - {STMT_ROW_VERSION_CHANGED, "01001", "01001"}, /* data changed */ - {STMT_POS_BEFORE_RECORDSET, "01S06", "01S06"}, - {STMT_TRUNCATED, "01004", "01004"}, /* data truncated */ - {STMT_INFO_ONLY, "00000", - "00000"}, /* just an information that is returned, no error */ - - {STMT_OK, "00000", "00000"}, /* OK */ - {STMT_EXEC_ERROR, "HY000", "S1000"}, /* also a general error */ - {STMT_STATUS_ERROR, "HY010", "S1010"}, - {STMT_SEQUENCE_ERROR, "HY010", "S1010"}, /* Function sequence error */ - {STMT_NO_MEMORY_ERROR, "HY001", "S1001"}, /* memory allocation failure */ - {STMT_COLNUM_ERROR, "07009", "S1002"}, /* invalid column number */ - {STMT_NO_STMTSTRING, "HY001", - "S1001"}, /* having no stmtstring is also a malloc problem */ - {STMT_ERROR_TAKEN_FROM_BACKEND, "HY000", "S1000"}, /* general error */ - {STMT_INTERNAL_ERROR, "HY000", "S1000"}, /* general error */ - {STMT_STILL_EXECUTING, "HY010", "S1010"}, - {STMT_NOT_IMPLEMENTED_ERROR, "HYC00", "S1C00"}, /* == 'driver not - * capable' */ - {STMT_BAD_PARAMETER_NUMBER_ERROR, "07009", "S1093"}, - {STMT_OPTION_OUT_OF_RANGE_ERROR, "HY092", "S1092"}, - {STMT_INVALID_COLUMN_NUMBER_ERROR, "07009", "S1002"}, - {STMT_RESTRICTED_DATA_TYPE_ERROR, "07006", "07006"}, - {STMT_INVALID_CURSOR_STATE_ERROR, "07005", "24000"}, - {STMT_CREATE_TABLE_ERROR, "42S01", "S0001"}, /* table already exists */ - {STMT_QUERY_SYNTAX_ERROR, "42000", "42000"}, /* query syntax error */ - {STMT_NO_CURSOR_NAME, "S1015", "S1015"}, - {STMT_INVALID_CURSOR_NAME, "34000", "34000"}, - {STMT_INVALID_ARGUMENT_NO, "HY024", "S1009"}, /* invalid argument value */ - {STMT_ROW_OUT_OF_RANGE, "HY107", "S1107"}, - {STMT_OPERATION_CANCELLED, "HY008", "S1008"}, - {STMT_INVALID_CURSOR_POSITION, "HY109", "S1109"}, - {STMT_VALUE_OUT_OF_RANGE, "HY019", "22003"}, - {STMT_OPERATION_INVALID, "HY011", "S1011"}, - {STMT_PROGRAM_TYPE_OUT_OF_RANGE, "?????", "?????"}, - {STMT_BAD_ERROR, "08S01", "08S01"}, /* communication link failure */ - {STMT_INVALID_OPTION_IDENTIFIER, "HY092", "HY092"}, - {STMT_RETURN_NULL_WITHOUT_INDICATOR, "22002", "22002"}, - {STMT_INVALID_DESCRIPTOR_IDENTIFIER, "HY091", "HY091"}, - {STMT_OPTION_NOT_FOR_THE_DRIVER, "HYC00", "HYC00"}, - {STMT_FETCH_OUT_OF_RANGE, "HY106", "S1106"}, - {STMT_COUNT_FIELD_INCORRECT, "07002", "07002"}, - {STMT_INVALID_NULL_ARG, "HY009", "S1009"}, - {STMT_NO_RESPONSE, "08S01", "08S01"}, - {STMT_COMMUNICATION_ERROR, "08S01", "08S01"}}; - -static OpenSearch_ErrorInfo *SC_create_errorinfo(const StatementClass *self, OpenSearch_ErrorInfo *opensearch_error_fail_safe) { - QResultClass *res = SC_get_Curres(self); - ConnectionClass *conn = SC_get_conn(self); - Int4 errornum; - size_t pos; - BOOL resmsg = FALSE, detailmsg = FALSE, msgend = FALSE; - BOOL looponce, loopend; - char msg[4096], *wmsg; - char *ermsg = NULL, *sqlstate = NULL; - OpenSearch_ErrorInfo *opensearch_error; - - if (self->opensearch_error) - return self->opensearch_error; - errornum = self->__error_number; - if (errornum == 0) - return NULL; - - looponce = (SC_get_Result(self) != res); - msg[0] = '\0'; - for (loopend = FALSE; (NULL != res) && !loopend; res = res->next) { - if (looponce) - loopend = TRUE; - if ('\0' != res->sqlstate[0]) { - if (NULL != sqlstate && strnicmp(res->sqlstate, "00", 2) == 0) - continue; - sqlstate = res->sqlstate; - if ('0' != sqlstate[0] || '1' < sqlstate[1]) - loopend = TRUE; - } - if (NULL != res->message) { - STRCPY_FIXED(msg, res->message); - detailmsg = resmsg = TRUE; - } else if (NULL != res->messageref) { - STRCPY_FIXED(msg, res->messageref); - detailmsg = resmsg = TRUE; - } - if (msg[0]) - ermsg = msg; - else if (QR_get_notice(res)) { - char *notice = QR_get_notice(res); - size_t len = strlen(notice); - if (len < sizeof(msg)) { - memcpy(msg, notice, len); - msg[len] = '\0'; - ermsg = msg; - } else { - ermsg = notice; - msgend = TRUE; - } - } - } - if (!msgend && (wmsg = SC_get_errormsg(self), wmsg) && wmsg[0]) { - pos = strlen(msg); - - snprintf(&msg[pos], sizeof(msg) - pos, "%s%s", detailmsg ? ";\n" : "", - wmsg); - ermsg = msg; - detailmsg = TRUE; - } - if (!self->ref_CC_error) - msgend = TRUE; - - if (conn && !msgend) { - if (!resmsg && (wmsg = CC_get_errormsg(conn), wmsg) - && wmsg[0] != '\0') { - pos = strlen(msg); - snprintf(&msg[pos], sizeof(msg) - pos, ";\n%s", - CC_get_errormsg(conn)); - } - - ermsg = msg; - } - opensearch_error = ER_Constructor(self->__error_number, ermsg); - if (!opensearch_error) { - if (opensearch_error_fail_safe) { - memset(opensearch_error_fail_safe, 0, sizeof(*opensearch_error_fail_safe)); - opensearch_error = opensearch_error_fail_safe; - opensearch_error->status = self->__error_number; - opensearch_error->errorsize = sizeof(opensearch_error->__error_message); - STRCPY_FIXED(opensearch_error->__error_message, ermsg); - opensearch_error->recsize = -1; - } else - return NULL; - } - if (sqlstate) - STRCPY_FIXED(opensearch_error->sqlstate, sqlstate); - else if (conn) { - if (!msgend && conn->sqlstate[0]) - STRCPY_FIXED(opensearch_error->sqlstate, conn->sqlstate); - else { - EnvironmentClass *env = (EnvironmentClass *)CC_get_env(conn); - - errornum -= LOWEST_STMT_ERROR; - if (errornum < 0 - || (unsigned long long)errornum - >= sizeof(Statement_sqlstate) - / sizeof(Statement_sqlstate[0])) { - errornum = 1 - LOWEST_STMT_ERROR; - } - STRCPY_FIXED(opensearch_error->sqlstate, - EN_is_odbc3(env) - ? Statement_sqlstate[errornum].ver3str - : Statement_sqlstate[errornum].ver2str); - } - } - - return opensearch_error; -} - -void SC_reset_delegate(RETCODE retcode, StatementClass *stmt) { - UNUSED(retcode); - StatementClass *delegate = stmt->execute_delegate; - - if (!delegate) - return; - OPENSEARCHAPI_FreeStmt(delegate, SQL_DROP); -} - -void SC_set_error(StatementClass *self, int number, const char *message, - const char *func) { - if (self->__error_message) - free(self->__error_message); - self->__error_number = number; - self->__error_message = message ? strdup(message) : NULL; - if (func && number != STMT_OK && number != STMT_INFO_ONLY) - SC_log_error(func, "", self); -} - -void SC_set_errormsg(StatementClass *self, const char *message) { - if (self->__error_message) - free(self->__error_message); - self->__error_message = message ? strdup(message) : NULL; -} - -void SC_error_copy(StatementClass *self, const StatementClass *from, - BOOL check) { - QResultClass *self_res, *from_res; - BOOL repstate; - - MYLOG(OPENSEARCH_TRACE, "entering %p->%p check=%i\n", from, self, check); - if (!from) - return; /* for safety */ - if (self == from) - return; /* for safety */ - if (check) { - if (0 == from->__error_number) /* SQL_SUCCESS */ - return; - if (0 > from->__error_number && /* SQL_SUCCESS_WITH_INFO */ - 0 < self->__error_number) - return; - } - self->__error_number = from->__error_number; - if (!check || from->__error_message) { - if (self->__error_message) - free(self->__error_message); - self->__error_message = - from->__error_message ? strdup(from->__error_message) : NULL; - } - if (self->opensearch_error) { - ER_Destructor(self->opensearch_error); - self->opensearch_error = NULL; - } - self_res = SC_get_Curres(self); - from_res = SC_get_Curres(from); - if (!self_res || !from_res) - return; - QR_add_message(self_res, QR_get_message(from_res)); - QR_add_notice(self_res, QR_get_notice(from_res)); - repstate = FALSE; - if (!check) - repstate = TRUE; - else if (from_res->sqlstate[0]) { - if (!self_res->sqlstate[0] || strncmp(self_res->sqlstate, "00", 2) == 0) - repstate = TRUE; - else if (strncmp(from_res->sqlstate, "01", 2) >= 0) - repstate = TRUE; - } - if (repstate) - STRCPY_FIXED(self_res->sqlstate, from_res->sqlstate); -} - -void SC_full_error_copy(StatementClass *self, const StatementClass *from, - BOOL allres) { - OpenSearch_ErrorInfo *opensearch_error; - - MYLOG(OPENSEARCH_TRACE, "entering %p->%p\n", from, self); - if (!from) - return; /* for safety */ - if (self == from) - return; /* for safety */ - if (self->__error_message) { - free(self->__error_message); - self->__error_message = NULL; - } - if (from->__error_message) - self->__error_message = strdup(from->__error_message); - self->__error_number = from->__error_number; - if (from->opensearch_error) { - if (self->opensearch_error) - ER_Destructor(self->opensearch_error); - self->opensearch_error = ER_Dup(from->opensearch_error); - return; - } else if (!allres) - return; - opensearch_error = SC_create_errorinfo(from, NULL); - if (!opensearch_error || !opensearch_error->__error_message[0]) { - ER_Destructor(opensearch_error); - return; - } - if (self->opensearch_error) - ER_Destructor(self->opensearch_error); - self->opensearch_error = opensearch_error; -} - -/* Returns the next SQL error information. */ -RETCODE SQL_API OPENSEARCHAPI_StmtError(SQLHSTMT hstmt, SQLSMALLINT RecNumber, - SQLCHAR *szSqlState, SQLINTEGER *pfNativeError, - SQLCHAR *szErrorMsg, SQLSMALLINT cbErrorMsgMax, - SQLSMALLINT *pcbErrorMsg, UWORD flag) { - /* CC: return an error of a hdesc */ - OpenSearch_ErrorInfo *opensearch_error, error; - StatementClass *stmt = (StatementClass *)hstmt; - int errnum = SC_get_errornumber(stmt); - - if (opensearch_error = SC_create_errorinfo(stmt, &error), NULL == opensearch_error) - return SQL_NO_DATA_FOUND; - if (opensearch_error != &error) - stmt->opensearch_error = opensearch_error; - if (STMT_NO_MEMORY_ERROR == errnum && !opensearch_error->__error_message[0]) - STRCPY_FIXED(opensearch_error->__error_message, "Memory Allocation Error??"); - return ER_ReturnError(opensearch_error, RecNumber, szSqlState, pfNativeError, - szErrorMsg, cbErrorMsgMax, pcbErrorMsg, flag); -} - -time_t SC_get_time(StatementClass *stmt) { - if (!stmt) - return time(NULL); - if (0 == stmt->stmt_time) - stmt->stmt_time = time(NULL); - return stmt->stmt_time; -} - -struct tm *SC_get_localtime(StatementClass *stmt) { -#ifndef HAVE_LOCALTIME_R - struct tm *tim; -#endif /* HAVE_LOCALTIME_R */ - - if (stmt->localtime.tm_sec < 0) { - SC_get_time(stmt); -#ifdef HAVE_LOCALTIME_R - localtime_r(&stmt->stmt_time, &(stmt->localtime)); -#else - tim = localtime(&stmt->stmt_time); - stmt->localtime = *tim; -#endif /* HAVE_LOCALTIME_R */ - } - - return &(stmt->localtime); -} - -RETCODE -SC_fetch(StatementClass *self) { - CSTR func = "SC_fetch"; - QResultClass *res = SC_get_Curres(self); - ARDFields *opts; - GetDataInfo *gdata; - int retval; - RETCODE result; - - Int2 num_cols, lf; - OID type; - int atttypmod; - char *value; - ColumnInfoClass *coli; - BindInfoClass *bookmark; - BOOL useCursor = FALSE; - KeySet *keyset = NULL; - - /* TupleField *tupleField; */ - - MYLOG(OPENSEARCH_TRACE, "entering statement=%p res=%p ommitted=0\n", self, res); - self->last_fetch_count = self->last_fetch_count_include_ommitted = 0; - if (!res) - return SQL_ERROR; - coli = QR_get_fields(res); /* the column info */ - - MYLOG(OPENSEARCH_DEBUG, "fetch_cursor=%d, %p->total_read=" FORMAT_LEN "\n", - SC_is_fetchcursor(self), res, res->num_total_read); - - if (self->currTuple >= (Int4)QR_get_num_total_tuples(res) - 1 - || (self->options.maxRows > 0 - && self->currTuple == self->options.maxRows - 1)) { - /* - * if at the end of the tuples, return "no data found" and set - * the cursor past the end of the result set - */ - self->currTuple = QR_get_num_total_tuples(res); - return SQL_NO_DATA_FOUND; - } - - MYLOG(OPENSEARCH_DEBUG, "**** : non-cursor_result\n"); - (self->currTuple)++; - - num_cols = QR_NumPublicResultCols(res); - - result = SQL_SUCCESS; - self->last_fetch_count++; - MYLOG(OPENSEARCH_DEBUG, "stmt=%p ommitted++\n", self); - self->last_fetch_count_include_ommitted++; - - opts = SC_get_ARDF(self); - /* - * If the bookmark column was bound then return a bookmark. Since this - * is used with SQLExtendedFetch, and the rowset size may be greater - * than 1, and an application can use row or column wise binding, use - * the code in copy_and_convert_field() to handle that. - */ - if ((bookmark = opts->bookmark, bookmark) && bookmark->buffer) { - SC_set_current_col(self, -1); - SC_Create_bookmark(self, bookmark, (int)self->bind_row, - (int)self->currTuple, keyset); - } - - if (self->options.retrieve_data == SQL_RD_OFF) /* data isn't required */ - return SQL_SUCCESS; - /* The following adjustment would be needed after SQLMoreResults() */ - if (opts->allocated < num_cols) - extend_column_bindings(opts, num_cols); - gdata = SC_get_GDTI(self); - if (gdata->allocated != opts->allocated) - extend_getdata_info(gdata, opts->allocated, TRUE); - for (lf = 0; lf < num_cols; lf++) { - MYLOG(OPENSEARCH_DEBUG, - "fetch: cols=%d, lf=%d, opts = %p, opts->bindings = %p, buffer[] " - "= %p\n", - num_cols, lf, opts, opts->bindings, opts->bindings[lf].buffer); - - /* reset for SQLGetData */ - GETDATA_RESET(gdata->gdata[lf]); - - if (NULL == opts->bindings) - continue; - if (opts->bindings[lf].buffer != NULL) { - /* this column has a binding */ - - /* type = QR_get_field_type(res, lf); */ - type = CI_get_oid(coli, lf); /* speed things up */ - atttypmod = CI_get_atttypmod(coli, lf); /* speed things up */ - - MYLOG(OPENSEARCH_DEBUG, "type = %d, atttypmod = %d\n", type, atttypmod); - - if (useCursor) - value = QR_get_value_backend(res, lf); - else { - SQLLEN curt = GIdx2CacheIdx(self->currTuple, self, res); - MYLOG(OPENSEARCH_DEBUG, - "%p->base=" FORMAT_LEN " curr=" FORMAT_LEN - " st=" FORMAT_LEN " valid=%d\n", - res, QR_get_rowstart_in_cache(res), self->currTuple, - SC_get_rowset_start(self), QR_has_valid_base(res)); - MYLOG(OPENSEARCH_DEBUG, "curt=" FORMAT_LEN "\n", curt); - value = QR_get_value_backend_row(res, curt, lf); - } - - MYLOG(OPENSEARCH_DEBUG, "value = '%s'\n", - (value == NULL) ? "" : value); - - retval = copy_and_convert_field_bindinfo(self, type, atttypmod, - value, lf); - - MYLOG(OPENSEARCH_DEBUG, "copy_and_convert: retval = %d\n", retval); - - switch (retval) { - case COPY_OK: - break; /* OK, do next bound column */ - - case COPY_UNSUPPORTED_TYPE: - SC_set_error( - self, STMT_RESTRICTED_DATA_TYPE_ERROR, - "Received an unsupported type from OpenSearch.", - func); - result = SQL_ERROR; - break; - - case COPY_UNSUPPORTED_CONVERSION: - SC_set_error( - self, STMT_RESTRICTED_DATA_TYPE_ERROR, - "Couldn't handle the necessary data type conversion.", - func); - result = SQL_ERROR; - break; - - case COPY_RESULT_TRUNCATED: - SC_set_error(self, STMT_TRUNCATED, - "Fetched item was truncated.", func); - MYLOG(OPENSEARCH_DEBUG, "The %dth item was truncated\n", lf + 1); - MYLOG(OPENSEARCH_DEBUG, "The buffer size = " FORMAT_LEN, - opts->bindings[lf].buflen); - MYLOG(OPENSEARCH_DEBUG, " and the value is '%s'\n", value); - result = SQL_SUCCESS_WITH_INFO; - break; - - case COPY_INVALID_STRING_CONVERSION: /* invalid string */ - SC_set_error(self, STMT_STRING_CONVERSION_ERROR, - "invalid string conversion occured.", func); - result = SQL_ERROR; - break; - - /* error msg already filled in */ - case COPY_GENERAL_ERROR: - result = SQL_ERROR; - break; - - /* This would not be meaningful in SQLFetch. */ - case COPY_NO_DATA_FOUND: - break; - - default: - SC_set_error(self, STMT_INTERNAL_ERROR, - "Unrecognized return value from " - "copy_and_convert_field.", - func); - result = SQL_ERROR; - break; - } - } - } - - return result; -} - -#include "dlg_specific.h" - -#define CALLBACK_ALLOC_ONCE 4 - -RETCODE dequeueNeedDataCallback(RETCODE retcode, StatementClass *stmt) { - RETCODE ret; - NeedDataCallfunc func; - void *data; - int i, cnt; - - MYLOG(OPENSEARCH_TRACE, "entering ret=%d count=%d\n", retcode, stmt->num_callbacks); - if (SQL_NEED_DATA == retcode) - return retcode; - if (stmt->num_callbacks <= 0) - return retcode; - func = stmt->callbacks[0].func; - data = stmt->callbacks[0].data; - for (i = 1; i < stmt->num_callbacks; i++) - stmt->callbacks[i - 1] = stmt->callbacks[i]; - cnt = --stmt->num_callbacks; - ret = (*func)(retcode, data); - free(data); - if (SQL_NEED_DATA != ret && cnt > 0) - ret = dequeueNeedDataCallback(ret, stmt); - return ret; -} - -void cancelNeedDataState(StatementClass *stmt) { - int cnt = stmt->num_callbacks, i; - - stmt->num_callbacks = 0; - for (i = 0; i < cnt; i++) { - if (stmt->callbacks[i].data) - free(stmt->callbacks[i].data); - } - SC_reset_delegate(SQL_ERROR, stmt); -} - -void SC_log_error(const char *func, const char *desc, - const StatementClass *self) { - const char *head; -#define NULLCHECK(a) (a ? a : "(NULL)") - if (self) { - QResultClass *res = SC_get_Result(self); - const ARDFields *opts = SC_get_ARDF(self); - const APDFields *apdopts = SC_get_APDF(self); - SQLLEN rowsetSize; - const int level = 9; - - rowsetSize = (STMT_TRANSITION_EXTENDED_FETCH == self->transition_status - ? opts->size_of_rowset_odbc2 - : opts->size_of_rowset); - if (SC_get_errornumber(self) <= 0) - head = "STATEMENT WARNING"; - else { - head = "STATEMENT ERROR"; - QLOG(level, "%s: func=%s, desc='%s', errnum=%d, errmsg='%s'\n", - head, func, desc, self->__error_number, - NULLCHECK(self->__error_message)); - } - MYLOG(OPENSEARCH_DEBUG, "%s: func=%s, desc='%s', errnum=%d, errmsg='%s'\n", - head, func, desc, self->__error_number, - NULLCHECK(self->__error_message)); - if (SC_get_errornumber(self) > 0) { - QLOG(level, - " " - "------------------------------------------------------------" - "\n"); - QLOG(level, " hdbc=%p, stmt=%p, result=%p\n", - self->hdbc, self, res); - QLOG(level, " prepare=%d, external=%d\n", - self->prepare, self->external); - QLOG(level, " bindings=%p, bindings_allocated=%d\n", - opts->bindings, opts->allocated); - QLOG(level, - " parameters=%p, parameters_allocated=%d\n", - apdopts->parameters, apdopts->allocated); - QLOG(level, " statement_type=%d, statement='%s'\n", - self->statement_type, NULLCHECK(self->statement)); - QLOG(level, - " currTuple=" FORMAT_LEN - ", current_col=%d, lobj_fd=%d\n", - self->currTuple, self->current_col, self->lobj_fd); - QLOG(level, - " maxRows=" FORMAT_LEN - ", rowset_size=" FORMAT_LEN ", keyset_size=" FORMAT_LEN - ", cursor_type=" FORMAT_UINTEGER - ", scroll_concurrency=" FORMAT_UINTEGER "\n", - self->options.maxRows, rowsetSize, self->options.keyset_size, - self->options.cursor_type, self->options.scroll_concurrency); - QLOG(level, " cursor_name='%s'\n", - SC_cursor_name(self)); - - QLOG(level, - " ----------------QResult Info " - "-------------------------------\n"); - - if (res) { - QLOG(level, - " fields=%p, backend_tuples=%p, " - "tupleField=%p, conn=%p\n", - QR_get_fields(res), res->backend_tuples, res->tupleField, - res->conn); - QLOG(level, - " fetch_count=" FORMAT_LEN - ", num_total_rows=" FORMAT_ULEN - ", num_fields=%d, cursor='%s'\n", - res->fetch_number, QR_get_num_total_tuples(res), - res->num_fields, NULLCHECK(QR_get_cursor(res))); - QLOG(level, - " message='%s', command='%s', " - "notice='%s'\n", - NULLCHECK(QR_get_message(res)), NULLCHECK(res->command), - NULLCHECK(res->notice)); - QLOG(level, " status=%d\n", - QR_get_rstatus(res)); - } - - /* Log the connection error if there is one */ - CC_log_error(func, desc, self->hdbc); - } - } else { - MYLOG(OPENSEARCH_DEBUG, "INVALID STATEMENT HANDLE ERROR: func=%s, desc='%s'\n", - func, desc); - } -} - -extern void *common_cs; - -BOOL SC_SetExecuting(StatementClass *self, BOOL on) { - BOOL exeSet = FALSE; - ENTER_COMMON_CS; /* short time blocking */ - if (on) { - if (0 == (self->cancel_info & CancelRequestSet)) { - self->status = STMT_EXECUTING; - exeSet = TRUE; - } - } else { - self->cancel_info = 0; - self->status = STMT_FINISHED; - exeSet = TRUE; - } - LEAVE_COMMON_CS; - return exeSet; -} - -#ifdef NOT_USED -BOOL SC_SetCancelRequest(StatementClass *self) { - BOOL enteredCS = FALSE; - - ENTER_COMMON_CS; - if (0 != (self->cancel_info & CancelCompleted)) - ; - else if (STMT_EXECUTING == self->status) { - self->cancel_info |= CancelRequestSet; - } else { - /* try to acquire */ - if (TRY_ENTER_STMT_CS(self)) - enteredCS = TRUE; - else - self->cancel_info |= CancelRequestSet; - } - LEAVE_COMMON_CS; - return enteredCS; -} -#endif /* NOT_USED */ - -static void SC_set_error_if_not_set(StatementClass *self, int errornumber, - const char *errmsg, const char *func) { - int errnum = SC_get_errornumber(self); - - if (errnum <= 0) { - const char *emsg = SC_get_errormsg(self); - - if (emsg && 0 == errnum) - SC_set_errornumber(self, errornumber); - else - SC_set_error(self, errornumber, errmsg, func); - } -} - -void SC_set_errorinfo(StatementClass *self, QResultClass *res, int errkind) { - ConnectionClass *conn = SC_get_conn(self); - - if (CC_not_connected(conn)) { - SC_set_error_if_not_set(self, STMT_COMMUNICATION_ERROR, - "The connection has been lost", __FUNCTION__); - return; - } - - switch (QR_get_rstatus(res)) { - case PORES_NO_MEMORY_ERROR: - SC_set_error_if_not_set(self, STMT_NO_MEMORY_ERROR, - "memory allocation error???", __FUNCTION__); - break; - case PORES_BAD_RESPONSE: - SC_set_error_if_not_set(self, STMT_COMMUNICATION_ERROR, - "communication error occured", - __FUNCTION__); - break; - case PORES_INTERNAL_ERROR: - SC_set_error_if_not_set(self, STMT_INTERNAL_ERROR, - "Internal error fetching next row", - __FUNCTION__); - break; - default: - switch (errkind) { - case 1: - SC_set_error_if_not_set( - self, STMT_EXEC_ERROR, - "Error while fetching the next result", __FUNCTION__); - break; - default: - SC_set_error_if_not_set(self, STMT_EXEC_ERROR, - "Error while executing the query", - __FUNCTION__); - break; - } - break; - } -} - -int SC_Create_bookmark(StatementClass *self, BindInfoClass *bookmark, - Int4 bind_row, Int4 currTuple, const KeySet *keyset) { - ARDFields *opts = SC_get_ARDF(self); - SQLUINTEGER bind_size = opts->bind_size; - SQLULEN offset = opts->row_offset_ptr ? *opts->row_offset_ptr : 0; - size_t cvtlen = sizeof(Int4); - OPENSEARCH_BM opensearch_bm; - - MYLOG(OPENSEARCH_TRACE, "entering type=%d buflen=" FORMAT_LEN " buf=%p\n", - bookmark->returntype, bookmark->buflen, bookmark->buffer); - memset(&opensearch_bm, 0, sizeof(opensearch_bm)); - if (SQL_C_BOOKMARK == bookmark->returntype) - ; - else if (bookmark->buflen >= (SQLLEN)sizeof(opensearch_bm)) - cvtlen = sizeof(opensearch_bm); - else if (bookmark->buflen >= 12) - cvtlen = 12; - opensearch_bm.index = SC_make_int4_bookmark(currTuple); - if (keyset) - opensearch_bm.keys = *keyset; - memcpy(CALC_BOOKMARK_ADDR(bookmark, offset, bind_size, bind_row), &opensearch_bm, - cvtlen); - if (bookmark->used) { - SQLLEN *used = LENADDR_SHIFT(bookmark->used, offset); - - if (bind_size > 0) - used = (SQLLEN *)((char *)used + (bind_row * bind_size)); - else - used = (SQLLEN *)((char *)used + (bind_row * sizeof(SQLLEN))); - *used = cvtlen; - } - MYLOG(OPENSEARCH_TRACE, "leaving cvtlen=" FORMAT_SIZE_T " ix(bl,of)=%d(%d,%d)\n", - cvtlen, - opensearch_bm.index, opensearch_bm.keys.blocknum, - opensearch_bm.keys.offset); - - return COPY_OK; -} diff --git a/sql-odbc/src/sqlodbc/statement.h b/sql-odbc/src/sqlodbc/statement.h deleted file mode 100644 index 7dc57ff05c..0000000000 --- a/sql-odbc/src/sqlodbc/statement.h +++ /dev/null @@ -1,504 +0,0 @@ -#ifndef __STATEMENT_H__ -#define __STATEMENT_H__ - -#include - -#include "bind.h" -#include "descriptor.h" -#include "opensearch_helper.h" -#include "opensearch_odbc.h" -#include "opensearch_types.h" -#include "tuple.h" - -// C Interface -#ifdef __cplusplus -extern "C" { -#endif - -enum { - CancelRequestSet = 1L, - CancelRequestAccepted = (1L << 1), - CancelCompleted = (1L << 2) -}; - -typedef enum { - STMT_ALLOCATED, /* The statement handle is allocated, but - * not used so far */ - STMT_READY, /* the statement is waiting to be executed */ - STMT_DESCRIBED, /* ODBC states that it is legal to call - * e.g. SQLDescribeCol before a call to - * SQLExecute, but after SQLPrepare. To - * get all the necessary information in - * such a case, we parse the query _before_ - * the actual call to SQLExecute, and the - * result set contains only column information, - * but no actual data. */ - STMT_FINISHED, /* statement execution has finished */ - STMT_EXECUTING /* statement execution is still going on */ -} STMT_Status; -/* - * ERROR status code - * - * The code for warnings must be minus - * and LOWEST_STMT_ERROR must be set to - * the least code number. - * The code for STMT_OK is 0 and error - * codes follow after it. - */ -enum { - LOWEST_STMT_ERROR = (-6) - /* minus values mean warning returns */ - , - STMT_ERROR_IN_ROW = (-6), - STMT_OPTION_VALUE_CHANGED = (-5), - STMT_ROW_VERSION_CHANGED = (-4), - STMT_POS_BEFORE_RECORDSET = (-3), - STMT_TRUNCATED = (-2), - STMT_INFO_ONLY = (-1) - /* not an error message, - * just a notification - * to be returned by - * SQLError - */ - , - STMT_OK = 0, - STMT_EXEC_ERROR, - STMT_STATUS_ERROR, - STMT_SEQUENCE_ERROR, - STMT_NO_MEMORY_ERROR, - STMT_COLNUM_ERROR, - STMT_NO_STMTSTRING, - STMT_ERROR_TAKEN_FROM_BACKEND, - STMT_INTERNAL_ERROR, - STMT_STILL_EXECUTING, - STMT_NOT_IMPLEMENTED_ERROR, - STMT_BAD_PARAMETER_NUMBER_ERROR, - STMT_OPTION_OUT_OF_RANGE_ERROR, - STMT_INVALID_COLUMN_NUMBER_ERROR, - STMT_RESTRICTED_DATA_TYPE_ERROR, - STMT_INVALID_CURSOR_STATE_ERROR, - STMT_CREATE_TABLE_ERROR, - STMT_QUERY_SYNTAX_ERROR, - STMT_NO_CURSOR_NAME, - STMT_INVALID_CURSOR_NAME, - STMT_INVALID_ARGUMENT_NO, - STMT_ROW_OUT_OF_RANGE, - STMT_OPERATION_CANCELLED, - STMT_INVALID_CURSOR_POSITION, - STMT_VALUE_OUT_OF_RANGE, - STMT_OPERATION_INVALID, - STMT_PROGRAM_TYPE_OUT_OF_RANGE, - STMT_BAD_ERROR, - STMT_INVALID_OPTION_IDENTIFIER, - STMT_RETURN_NULL_WITHOUT_INDICATOR, - STMT_INVALID_DESCRIPTOR_IDENTIFIER, - STMT_OPTION_NOT_FOR_THE_DRIVER, - STMT_FETCH_OUT_OF_RANGE, - STMT_COUNT_FIELD_INCORRECT, - STMT_INVALID_NULL_ARG, - STMT_NO_RESPONSE, - STMT_COMMUNICATION_ERROR, - STMT_STRING_CONVERSION_ERROR -}; - -/* statement types */ -enum { - STMT_TYPE_UNKNOWN = -2, - STMT_TYPE_OTHER = -1, - STMT_TYPE_SELECT = 0, - STMT_TYPE_WITH, - STMT_TYPE_PROCCALL, - STMT_TYPE_TRANSACTION, - STMT_TYPE_DECLARE, - STMT_TYPE_FETCH, - STMT_TYPE_CLOSE, - STMT_TYPE_INSERT, - STMT_TYPE_UPDATE, - STMT_TYPE_DELETE, - STMT_TYPE_CREATE, - STMT_TYPE_ALTER, - STMT_TYPE_DROP, - STMT_TYPE_GRANT, - STMT_TYPE_REVOKE, - STMT_TYPE_LOCK, - STMT_TYPE_PREPARE, - STMT_TYPE_EXECUTE, - STMT_TYPE_DEALLOCATE, - STMT_TYPE_ANALYZE, - STMT_TYPE_NOTIFY, - STMT_TYPE_EXPLAIN, - STMT_TYPE_SET, - STMT_TYPE_RESET, - STMT_TYPE_MOVE, - STMT_TYPE_COPY, - STMT_TYPE_START, - STMT_TYPE_SPECIAL -}; - -#define STMT_UPDATE(stmt) ((stmt)->statement_type > STMT_TYPE_PROCCALL) - -/* Parsing status */ -enum { - STMT_PARSE_NONE = 0, - STMT_PARSE_COMPLETE /* the driver parsed the statement */ - , - STMT_PARSE_INCOMPLETE, - STMT_PARSE_FATAL, - STMT_PARSE_MASK = 3L, - STMT_PARSED_OIDS = (1L << 2), - STMT_FOUND_KEY = (1L << 3), - STMT_HAS_ROW_DESCRIPTION = (1L << 4) /* already got the col info */ - , - STMT_REFLECTED_ROW_DESCRIPTION = (1L << 5) -}; - -/* transition status */ -enum { - STMT_TRANSITION_UNALLOCATED = 0, - STMT_TRANSITION_ALLOCATED = 1, - STMT_TRANSITION_FETCH_SCROLL = 6, - STMT_TRANSITION_EXTENDED_FETCH = 7 -}; - -/* Result style */ -enum { STMT_FETCH_NONE = 0, STMT_FETCH_NORMAL, STMT_FETCH_EXTENDED }; - -#define OPENSEARCH_NUM_NORMAL_KEYS 2 - -typedef RETCODE (*NeedDataCallfunc)(RETCODE, void *); -typedef struct { - NeedDataCallfunc func; - void *data; -} NeedDataCallback; - -/* - * ProcessedStmt represents a fragment of the original SQL query, after - * converting ? markers to $n style, processing ODBC escapes, and splitting - * a multi-statement into individual statements. Each individual statement - * is represented by one ProcessedStmt struct. - */ -struct ProcessedStmt { - struct ProcessedStmt *next; - char *query; - int num_params; /* number of parameter markers in this, - * fragment or -1 if not known */ -}; -typedef struct ProcessedStmt ProcessedStmt; - -/******** Statement Handle ***********/ -struct StatementClass_ { - ConnectionClass *hdbc; /* pointer to ConnectionClass this - * statement belongs to */ - QResultClass *result; /* result of the current statement */ - QResultClass *curres; /* the current result in the chain */ - HSTMT *phstmt; - StatementOptions options; - StatementOptions options_orig; - /* attached descriptor handles */ - DescriptorClass *ard; - DescriptorClass *apd; - DescriptorClass *ird; - DescriptorClass *ipd; - /* implicit descriptor handles */ - DescriptorClass ardi; - DescriptorClass irdi; - DescriptorClass apdi; - DescriptorClass ipdi; - - STMT_Status status; - char *__error_message; - int __error_number; - OpenSearch_ErrorInfo *opensearch_error; - - SQLLEN currTuple; /* current absolute row number (GetData, - * SetPos, SQLFetch) */ - GetDataInfo gdata_info; - SQLLEN save_rowset_size; /* saved rowset size in case of - * change/FETCH_NEXT */ - SQLLEN rowset_start; /* start of rowset (an absolute row - * number) */ - SQLSETPOSIROW bind_row; /* current offset for Multiple row/column - * binding */ - Int2 current_col; /* current column for GetData -- used to - * handle multiple calls */ - SQLLEN last_fetch_count; /* number of rows retrieved in - * last fetch/extended fetch */ - int lobj_fd; /* fd of the current large object */ - - char *statement; /* if non--null pointer to the SQL - * statement that has been executed */ - /* - * processed_statements is the SQL after splitting multi-statement into - * parts, and replacing ? markers with $n style markers, or injecting the - * values in UseServerSidePrepare=0 mode. - */ - ProcessedStmt *processed_statements; - - TABLE_INFO **ti; - Int2 ntab; - Int2 num_key_fields; - Int2 statement_type; /* According to the defines above */ - Int2 num_params; - Int2 data_at_exec; /* Number of params needing SQLPutData */ - UDWORD iflag; /* OPENSEARCHAPI_AllocStmt parameter */ - PutDataInfo pdata_info; - po_ind_t parse_status; - po_ind_t proc_return; - po_ind_t put_data; /* Has SQLPutData been called ? */ - po_ind_t catalog_result; /* Is this a result of catalog function ? */ - po_ind_t prepare; /* is this a prepared statement ? */ - po_ind_t prepared; /* is this statement already - * prepared at the server ? */ - po_ind_t external; /* Allocated via SQLAllocHandle() */ - po_ind_t transition_status; /* Transition status */ - po_ind_t multi_statement; /* -1:unknown 0:single 1:multi */ - po_ind_t rb_or_tc; /* rollback on error */ - po_ind_t - discard_output_params; /* discard output parameters on parse stage */ - po_ind_t cancel_info; /* cancel information */ - po_ind_t ref_CC_error; /* refer to CC_error ? */ - po_ind_t lock_CC_for_rb; /* lock CC for statement rollback ? */ - po_ind_t join_info; /* have joins ? */ - po_ind_t parse_method; /* parse_statement is forced or ? */ - opensearchNAME cursor_name; - char *plan_name; - unsigned char miscinfo; - unsigned char execinfo; - po_ind_t updatable; - SQLLEN diag_row_count; - char *load_statement; /* to (re)load updatable individual rows */ - ssize_t from_pos; - ssize_t load_from_pos; - ssize_t where_pos; - SQLLEN last_fetch_count_include_ommitted; - time_t stmt_time; - struct tm localtime; - /* SQL_NEED_DATA Callback list */ - StatementClass *execute_delegate; - StatementClass *execute_parent; - UInt2 allocated_callbacks; - UInt2 num_callbacks; - NeedDataCallback *callbacks; - void *cs; -}; - -#define SC_get_conn(a) ((a)->hdbc) -void SC_init_Result(StatementClass *self); -void SC_set_Result(StatementClass *self, QResultClass *res); -#define SC_get_Result(a) ((a)->result) -#define SC_set_Curres(a, b) ((a)->curres = b) -#define SC_get_Curres(a) ((a)->curres) -#define SC_get_ARD(a) ((a)->ard) -#define SC_get_APD(a) ((a)->apd) -#define SC_get_IRD(a) ((a)->ird) -#define SC_get_IPD(a) ((a)->ipd) -#define SC_get_ARDF(a) (&(SC_get_ARD(a)->ardf)) -#define SC_get_APDF(a) (&(SC_get_APD(a)->apdf)) -#define SC_get_IRDF(a) (&(SC_get_IRD(a)->irdf)) -#define SC_get_IPDF(a) (&(SC_get_IPD(a)->ipdf)) -#define SC_get_ARDi(a) (&((a)->ardi)) -#define SC_get_APDi(a) (&((a)->apdi)) -#define SC_get_IRDi(a) (&((a)->irdi)) -#define SC_get_IPDi(a) (&((a)->ipdi)) -#define SC_get_GDTI(a) (&((a)->gdata_info)) -#define SC_get_PDTI(a) (&((a)->pdata_info)) - -#define SC_get_errornumber(a) ((a)->__error_number) -#define SC_set_errornumber(a, n) ((a)->__error_number = n) -#define SC_get_errormsg(a) ((a)->__error_message) -#define SC_is_prepare_statement(a) (0 != ((a)->prepare & PREPARE_STATEMENT)) -#define SC_get_prepare_method(a) ((a)->prepare & (~PREPARE_STATEMENT)) - -#define SC_parsed_status(a) ((a)->parse_status & STMT_PARSE_MASK) -#define SC_set_parse_status(a, s) ((a)->parse_status |= s) -#define SC_update_not_ready(a) \ - (SC_parsed_status(a) == STMT_PARSE_NONE \ - || 0 == ((a)->parse_status & STMT_PARSED_OIDS)) -#define SC_update_ready(a) \ - (SC_parsed_status(a) == STMT_PARSE_COMPLETE \ - && 0 != ((a)->parse_status & STMT_FOUND_KEY) && (a)->updatable) -#define SC_set_checked_hasoids(a, b) \ - ((a)->parse_status |= (STMT_PARSED_OIDS | (b ? STMT_FOUND_KEY : 0))) -#define SC_checked_hasoids(a) (0 != ((a)->parse_status & STMT_PARSED_OIDS)) -#define SC_set_delegate(p, c) \ - ((p)->execute_delegate = c, (c)->execute_parent = p) - -#define SC_is_updatable(s) (0 < ((s)->updatable)) -#define SC_reset_updatable(s) ((s)->updatable = -1) -#define SC_set_updatable(s, b) ((s)->updatable = (b)) -#define SC_clear_parse_method(s) ((s)->parse_method = 0) -#define SC_is_parse_forced(s) (0 != ((s)->parse_method & 1L)) -#define SC_set_parse_forced(s) ((s)->parse_method |= 1L) - -#define SC_cursor_is_valid(s) (NAME_IS_VALID((s)->cursor_name)) -#define SC_cursor_name(s) (SAFE_NAME((s)->cursor_name)) - -void SC_reset_delegate(RETCODE, StatementClass *); - -#define SC_is_lower_case(a, b) \ - ((a)->options.metadata_id || (b)->connInfo.lower_case_identifier) - -#define SC_MALLOC_return_with_error(t, tp, s, a, m, r) \ - do { \ - if (t = (tp *)malloc(s), NULL == t) { \ - SC_set_error(a, STMT_NO_MEMORY_ERROR, m, "SC_MALLOC"); \ - return r; \ - } \ - } while (0) -#define SC_MALLOC_gexit_with_error(t, tp, s, a, m, r) \ - do { \ - if (t = (tp *)malloc(s), NULL == t) { \ - SC_set_error(a, STMT_NO_MEMORY_ERROR, m, "SC_MALLOC"); \ - r; \ - goto cleanup; \ - } \ - } while (0) -#define SC_REALLOC_return_with_error(t, tp, s, a, m, r) \ - do { \ - tp *tmp; \ - if (tmp = (tp *)realloc(t, s), NULL == tmp) { \ - SC_set_error(a, STMT_NO_MEMORY_ERROR, m, "SC_REALLOC"); \ - return r; \ - } \ - t = tmp; \ - } while (0) -#define SC_REALLOC_gexit_with_error(t, tp, s, a, m, r) \ - do { \ - tp *tmp; \ - if (tmp = (tp *)realloc(t, s), NULL == tmp) { \ - SC_set_error(a, STMT_NO_MEMORY_ERROR, m, __FUNCTION__); \ - r; \ - goto cleanup; \ - } \ - t = tmp; \ - } while (0) - -/* options for SC_free_params() */ -#define STMT_FREE_PARAMS_ALL 0 -#define STMT_FREE_PARAMS_DATA_AT_EXEC_ONLY 1 - -/* prepare state */ -enum { - NON_PREPARE_STATEMENT = 0, - PREPARE_STATEMENT = 1, - PREPARE_BY_THE_DRIVER = (1L << 1), - NAMED_PARSE_REQUEST = (3L << 1), - PARSE_TO_EXEC_ONCE = (4L << 1), - PARSE_REQ_FOR_INFO = (5L << 1) -}; - -/* prepared state */ -enum { NOT_PREPARED = 0, PREPARED, EXECUTED }; - -/* misc info */ -#define SC_set_fetchcursor(a) ((a)->miscinfo |= (1L << 1)) -#define SC_no_fetchcursor(a) ((a)->miscinfo &= ~(1L << 1)) -#define SC_is_fetchcursor(a) (((a)->miscinfo & (1L << 1)) != 0) -#define SC_miscinfo_clear(a) ((a)->miscinfo = 0) -#define SC_set_with_hold(a) ((a)->execinfo |= 1L) -#define SC_set_without_hold(a) ((a)->execinfo &= (~1L)) -#define SC_is_with_hold(a) (((a)->execinfo & 1L) != 0) -#define SC_set_readonly(a) ((a)->execinfo |= (1L << 1)) -#define SC_set_no_readonly(a) ((a)->execinfo &= ~(1L << 1)) -#define SC_is_readonly(a) (((a)->execinfo & (1L << 1)) != 0) -#define SC_execinfo_clear(a) (((a)->execinfo = 0) -#define STMT_HAS_OUTER_JOIN 1L -#define STMT_HAS_INNER_JOIN (1L << 1) -#define SC_has_join(a) (0 != (a)->join_info) -#define SC_has_outer_join(a) (0 != (STMT_HAS_OUTER_JOIN & (a)->join_info)) -#define SC_has_inner_join(a) (0 != (STMT_HAS_INNER_JOIN & (a)->join_info)) -#define SC_set_outer_join(a) ((a)->join_info |= STMT_HAS_OUTER_JOIN) -#define SC_set_inner_join(a) ((a)->join_info |= STMT_HAS_INNER_JOIN) - -#define SC_start_tc_stmt(a) ((a)->rb_or_tc = (1L << 1)) -#define SC_is_tc_stmt(a) (((a)->rb_or_tc & (1L << 1)) != 0) -#define SC_start_rb_stmt(a) ((a)->rb_or_tc = (1L << 2)) -#define SC_is_rb_stmt(a) (((a)->rb_or_tc & (1L << 2)) != 0) -#define SC_unref_CC_error(a) (((a)->ref_CC_error) = FALSE) -#define SC_ref_CC_error(a) (((a)->ref_CC_error) = TRUE) -#define SC_can_parse_statement(a) (STMT_TYPE_SELECT == (a)->statement_type) -/* - * DECLARE CURSOR + FETCH can only be used with SELECT-type queries. And - * it's not currently supported with array-bound parameters. - */ -#define SC_may_use_cursor(a) \ - (SC_get_APDF(a)->paramset_size <= 1 \ - && (STMT_TYPE_SELECT == (a)->statement_type \ - || STMT_TYPE_WITH == (a)->statement_type)) -#define SC_may_fetch_rows(a) \ - (STMT_TYPE_SELECT == (a)->statement_type \ - || STMT_TYPE_WITH == (a)->statement_type) - -/* For Multi-thread */ -#define INIT_STMT_CS(x) XPlatformInitializeCriticalSection(&((x)->cs)) -#define ENTER_STMT_CS(x) XPlatformEnterCriticalSection(((x)->cs)) -#define TRY_ENTER_STMT_CS(x) XPlatformTryEnterCriticalSection(&((x)->cs)) -#define LEAVE_STMT_CS(x) XPlatformLeaveCriticalSection(((x)->cs)) -#define DELETE_STMT_CS(x) XPlatformDeleteCriticalSection(&((x)->cs)) - -/* Statement prototypes */ -StatementClass *SC_Constructor(ConnectionClass *); -void InitializeStatementOptions(StatementOptions *opt); -char SC_Destructor(StatementClass *self); -BOOL SC_opencheck(StatementClass *self, const char *func); -RETCODE SC_initialize_and_recycle(StatementClass *self); -void SC_initialize_cols_info(StatementClass *self, BOOL DCdestroy, - BOOL parseReset); -void SC_reset_result_for_rerun(StatementClass *self); -int statement_type(const char *statement); -char SC_unbind_cols(StatementClass *self); -char SC_recycle_statement(StatementClass *self); -void SC_clear_error(StatementClass *self); -void SC_set_error(StatementClass *self, int errnum, const char *msg, - const char *func); -void SC_set_errormsg(StatementClass *self, const char *msg); -void SC_error_copy(StatementClass *self, const StatementClass *from, BOOL); -void SC_full_error_copy(StatementClass *self, const StatementClass *from, BOOL); -void SC_set_prepared(StatementClass *self, int); -void SC_set_planname(StatementClass *self, const char *plan_name); -void SC_set_rowset_start(StatementClass *self, SQLLEN, BOOL); -void SC_inc_rowset_start(StatementClass *self, SQLLEN); -RETCODE SC_initialize_stmts(StatementClass *self, BOOL); -RETCODE SC_fetch(StatementClass *self); -void SC_log_error(const char *func, const char *desc, - const StatementClass *self); -time_t SC_get_time(StatementClass *self); -struct tm *SC_get_localtime(StatementClass *self); -int SC_Create_bookmark(StatementClass *stmt, BindInfoClass *bookmark, - Int4 row_pos, Int4 currTuple, const KeySet *keyset); -int SC_set_current_col(StatementClass *self, int col); -BOOL SC_SetExecuting(StatementClass *self, BOOL on); -BOOL SC_SetCancelRequest(StatementClass *self); - -BOOL SC_connection_lost_check(StatementClass *stmt, const char *funcname); -void SC_set_errorinfo(StatementClass *self, QResultClass *res, int errkind); -RETCODE dequeueNeedDataCallback(RETCODE, StatementClass *self); -void cancelNeedDataState(StatementClass *self); - -/* - * Macros to convert global index <-> relative index in resultset/rowset - */ -/* a global index to the relative index in a rowset */ -#define SC_get_rowset_start(stmt) ((stmt)->rowset_start) -#define GIdx2RowIdx(gidx, stmt) (gidx - (stmt)->rowset_start) -/* a global index to the relative index in a resultset(not a rowset) */ -#define GIdx2CacheIdx(gidx, s, r) \ - (gidx - (QR_has_valid_base(r) ? ((s)->rowset_start - (r)->base) : 0)) -#define GIdx2KResIdx(gidx, s, r) \ - (gidx - (QR_has_valid_base(r) ? ((s)->rowset_start - (r)->key_base) : 0)) -/* a relative index in a rowset to the global index */ -#define RowIdx2GIdx(ridx, stmt) (ridx + (stmt)->rowset_start) -/* a relative index in a resultset to the global index */ -#define CacheIdx2GIdx(ridx, stmt, res) \ - (ridx - (res)->base + (stmt)->rowset_start) -#define KResIdx2GIdx(ridx, stmt, res) \ - (ridx - (res)->key_base + (stmt)->rowset_start) - -#define BOOKMARK_SHIFT 1 -#define SC_make_int4_bookmark(b) ((b < 0) ? (b) : (b + BOOKMARK_SHIFT)) -#define SC_resolve_int4_bookmark(b) ((b < 0) ? (b) : (b - BOOKMARK_SHIFT)) - -#ifdef __cplusplus -} -#endif -#endif /* __STATEMENT_H__ */ diff --git a/sql-odbc/src/sqlodbc/tuple.c b/sql-odbc/src/sqlodbc/tuple.c deleted file mode 100644 index d8a7c06126..0000000000 --- a/sql-odbc/src/sqlodbc/tuple.c +++ /dev/null @@ -1,43 +0,0 @@ -// clang-format off -#include "tuple.h" -#include "misc.h" - -#include -#include -// clang-format on - -void set_tuplefield_null(TupleField *tuple_field) { - tuple_field->len = 0; - // Changing value to strdup("") from NULL to fix error - // "Object cannot be cast from DBNull to other types" in Excel & Power BI - tuple_field->value = strdup(""); /* NULL; */ -} - -void set_tuplefield_string(TupleField *tuple_field, const char *string) { - if (string) { - tuple_field->len = (Int4)strlen(string); /* ES restriction */ - tuple_field->value = strdup(string); - } - if (!tuple_field->value) - set_tuplefield_null(tuple_field); -} - -void set_tuplefield_int2(TupleField *tuple_field, Int2 value) { - char buffer[10]; - - ITOA_FIXED(buffer, value); - - tuple_field->len = (Int4)(strlen(buffer) + 1); - /* +1 ... is this correct (better be on the save side-...) */ - tuple_field->value = strdup(buffer); -} - -void set_tuplefield_int4(TupleField *tuple_field, Int4 value) { - char buffer[15]; - - ITOA_FIXED(buffer, value); - - tuple_field->len = (Int4)(strlen(buffer) + 1); - /* +1 ... is this correct (better be on the save side-...) */ - tuple_field->value = strdup(buffer); -} diff --git a/sql-odbc/src/sqlodbc/tuple.h b/sql-odbc/src/sqlodbc/tuple.h deleted file mode 100644 index cc5a29eec1..0000000000 --- a/sql-odbc/src/sqlodbc/tuple.h +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef __TUPLE_H__ -#define __TUPLE_H__ - -#include "opensearch_odbc.h" - -// C Interface -#ifdef __cplusplus -extern "C" { -#endif - -/* Used by backend data AND manual result sets */ -struct TupleField_ { - Int4 len; /* ES length of the current Tuple */ - void *value; /* an array representing the value */ -}; - -/* keyset(TID + OID) info */ -struct KeySet_ { - UWORD status; - UInt2 offset; - UInt4 blocknum; - OID oid; -}; -/* Rollback(index + original TID) info */ -struct Rollback_ { - SQLLEN index; - UInt4 blocknum; - UInt2 offset; - OID oid; - UWORD option; -}; -#define KEYSET_INFO_PUBLIC 0x07 -#define CURS_SELF_ADDING (1L << 3) -#define CURS_SELF_DELETING (1L << 4) -#define CURS_SELF_UPDATING (1L << 5) -#define CURS_SELF_ADDED (1L << 6) -#define CURS_SELF_DELETED (1L << 7) -#define CURS_SELF_UPDATED (1L << 8) -#define CURS_NEEDS_REREAD (1L << 9) -#define CURS_IN_ROWSET (1L << 10) -#define CURS_OTHER_DELETED (1L << 11) - -/* These macros are wrappers for the corresponding set_tuplefield functions - but these handle automatic NULL determination and call set_tuplefield_null() - if appropriate for the datatype (used by SQLGetTypeInfo). -*/ -#define set_nullfield_string(FLD, VAL) \ - ((VAL) ? set_tuplefield_string(FLD, (VAL)) : set_tuplefield_null(FLD)) -#define set_nullfield_int2(FLD, VAL) \ - ((VAL) != -1 ? set_tuplefield_int2(FLD, (VAL)) : set_tuplefield_null(FLD)) -#define set_nullfield_int4(FLD, VAL) \ - ((VAL) != -1 ? set_tuplefield_int4(FLD, (VAL)) : set_tuplefield_null(FLD)) - -void set_tuplefield_null(TupleField *tuple_field); -void set_tuplefield_string(TupleField *tuple_field, const char *string); -void set_tuplefield_int2(TupleField *tuple_field, Int2 value); -void set_tuplefield_int4(TupleField *tuple_field, Int4 value); -SQLLEN ClearCachedRows(TupleField *tuple, int num_fields, SQLLEN num_rows); - -typedef struct _OPENSEARCH_BM_ { - Int4 index; - KeySet keys; -} OPENSEARCH_BM; - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/sql-odbc/src/sqlodbc/unicode_support.h b/sql-odbc/src/sqlodbc/unicode_support.h deleted file mode 100644 index e2b2a63521..0000000000 --- a/sql-odbc/src/sqlodbc/unicode_support.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef __UNICODE_SUPPORT_H__ -#define __UNICODE_SUPPORT_H__ - -#include "opensearch_odbc.h" - -#ifdef UNICODE_SUPPORT -#define WCLEN sizeof(SQLWCHAR) -enum { CONVTYPE_UNKNOWN, WCSTYPE_UTF16_LE, WCSTYPE_UTF32_LE, C16TYPE_UTF16_LE }; -char *ucs2_to_utf8(const SQLWCHAR *ucs2str, SQLLEN ilen, SQLLEN *olen, - BOOL tolower); -SQLULEN utf8_to_ucs2_lf(const char *utf8str, SQLLEN ilen, BOOL lfconv, - SQLWCHAR *ucs2str, SQLULEN buflen, BOOL errcheck); -int get_convtype(void); -#define utf8_to_ucs2(utf8str, ilen, ucs2str, buflen) \ - utf8_to_ucs2_lf(utf8str, ilen, FALSE, ucs2str, buflen, FALSE) - -SQLLEN bindcol_hybrid_estimate(const char *ldt, BOOL lf_conv, char **wcsbuf); -SQLLEN bindcol_hybrid_exec(SQLWCHAR *utf16, const char *ldt, size_t n, - BOOL lf_conv, char **wcsbuf); -SQLLEN bindcol_localize_estimate(const char *utf8dt, BOOL lf_conv, - char **wcsbuf); -SQLLEN bindcol_localize_exec(char *ldt, size_t n, BOOL lf_conv, char **wcsbuf); -SQLLEN bindpara_wchar_to_msg(const SQLWCHAR *utf16, char **wcsbuf, SQLLEN used); - -SQLLEN locale_to_sqlwchar(SQLWCHAR *utf16, const char *ldt, size_t n, - BOOL lf_conv); -#endif /* UNICODE_SUPPORT */ - -#endif /* __UNICODE_SUPPORT_H__ */ diff --git a/sql-odbc/src/sqlodbc/version.h b/sql-odbc/src/sqlodbc/version.h deleted file mode 100644 index baf7e67abc..0000000000 --- a/sql-odbc/src/sqlodbc/version.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef __VERSION_H__ -#define __VERSION_H__ - -/* - * BuildAll may pass ELASTICDRIVERVERSION, ELASTIC_RESOURCE_VERSION - * and OPENSEARCH_DRVFILE_VERSION via winbuild/elasticodbc.vcxproj. - */ -#ifdef OPENSEARCH_ODBC_VERSION - -#ifndef OPENSEARCHDRIVERVERSION -#define OPENSEARCHDRIVERVERSION OPENSEARCH_ODBC_VERSION -#endif -#ifndef OPENSEARCH_RESOURCE_VERSION -#define OPENSEARCH_RESOURCE_VERSION OPENSEARCHDRIVERVERSION -#endif -#ifndef OPENSEARCH_DRVFILE_VERSION -#define OPENSEARCH_DRVFILE_VERSION OPENSEARCH_ODBC_DRVFILE_VERSION -#endif - -#endif // OPENSEARCH_ODBC_VERSION - -#endif diff --git a/sql-odbc/src/sqlodbc/win_setup.h b/sql-odbc/src/sqlodbc/win_setup.h deleted file mode 100644 index 9abae27d30..0000000000 --- a/sql-odbc/src/sqlodbc/win_setup.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef _WIN_SETUP_H__ -#define _WIN_SETUP_H__ - -#ifndef INTFUNC -#define INTFUNC __stdcall -#endif /* INTFUNC */ -#define MAXDSNAME (32 + 1) /* Max data source name length */ -/* Globals */ -/* NOTE: All these are used by the dialog procedures */ -typedef struct tagSETUPDLG { - HWND hwndParent; /* Parent window handle */ - LPCSTR lpszDrvr; /* Driver description */ - ConnInfo ci; - char szDSN[MAXDSNAME]; /* Original data source name */ - BOOL fNewDSN; /* New data source flag */ - BOOL fDefault; /* Default data source flag */ - -} SETUPDLG, *LPSETUPDLG; - -/* Prototypes */ -INT_PTR CALLBACK ConfigDlgProc(HWND hdlg, UINT wMsg, WPARAM wParam, - LPARAM lParam); -BOOL INTFUNC ChangeDriverName(HWND hwnd, LPSETUPDLG lpsetupdlg, - LPCSTR driver_name); - -void test_connection(HANDLE hwnd, ConnInfo *ci, BOOL withDTC); - -#endif /* _WIN_SETUP_H__ */ diff --git a/sql-odbc/src/sqlodbc/win_unicode.c b/sql-odbc/src/sqlodbc/win_unicode.c deleted file mode 100644 index 706e86e53c..0000000000 --- a/sql-odbc/src/sqlodbc/win_unicode.c +++ /dev/null @@ -1,1091 +0,0 @@ -#ifdef UNICODE_SUPPORT - -#include -#include -#include -#include "unicode_support.h" - -#ifdef WIN32 -#define FORMAT_SIZE_T "%Iu" -#else -#define FORMAT_SIZE_T "%zu" -#endif - -#if (defined(__STDC_ISO_10646__) && defined(HAVE_MBSTOWCS) \ - && defined(HAVE_WCSTOMBS)) \ - || defined(WIN32) -#define __WCS_ISO10646__ -static BOOL use_wcs = FALSE; -#endif - -#if (defined(__STDC_UTF_16__) && defined(HAVE_UCHAR_H) \ - && defined(HAVE_MBRTOC16) && defined(HAVE_C16RTOMB)) -#define __CHAR16_UTF_16__ -#include -static BOOL use_c16 = FALSE; -#endif - -static int convtype = -1; - -int get_convtype(void) { - const UCHAR *cdt; - (void)(cdt); -#if defined(__WCS_ISO10646__) - if (convtype < 0) { - wchar_t *wdt = L"a"; - int sizeof_w = sizeof(wchar_t); - - cdt = (UCHAR *)wdt; - switch (sizeof_w) { - case 2: - if ('a' == cdt[0] && '\0' == cdt[1] && '\0' == cdt[2] - && '\0' == cdt[3]) { - MYLOG(OPENSEARCH_DEBUG, " UTF-16LE detected\n"); - convtype = WCSTYPE_UTF16_LE; - use_wcs = TRUE; - } - break; - case 4: - if ('a' == cdt[0] && '\0' == cdt[1] && '\0' == cdt[2] - && '\0' == cdt[3] && '\0' == cdt[4] && '\0' == cdt[5] - && '\0' == cdt[6] && '\0' == cdt[7]) { - MYLOG(OPENSEARCH_DEBUG, " UTF32-LE detected\n"); - convtype = WCSTYPE_UTF32_LE; - use_wcs = TRUE; - } - break; - } - } -#endif /* __WCS_ISO10646__ */ -#ifdef __CHAR16_UTF_16__ - if (convtype < 0) { - char16_t *c16dt = u"a"; - - cdt = (UCHAR *)c16dt; - if ('a' == cdt[0] && '\0' == cdt[1] && '\0' == cdt[2] - && '\0' == cdt[3]) { - MYLOG(OPENSEARCH_DEBUG, " C16_UTF-16LE detected\n"); - convtype = C16TYPE_UTF16_LE; - use_c16 = TRUE; - } - } -#endif /* __CHAR16_UTF_16__ */ - if (convtype < 0) - convtype = CONVTYPE_UNKNOWN; /* unknown */ - return convtype; -} - -#define byte3check 0xfffff800 -#define byte2_base 0x80c0 -#define byte2_mask1 0x07c0 -#define byte2_mask2 0x003f -#define byte3_base 0x8080e0 -#define byte3_mask1 0xf000 -#define byte3_mask2 0x0fc0 -#define byte3_mask3 0x003f - -#define surrog_check 0xfc00 -#define surrog1_bits 0xd800 -#define surrog2_bits 0xdc00 -#define byte4_base 0x808080f0 -#define byte4_sr1_mask1 0x0700 -#define byte4_sr1_mask2 0x00fc -#define byte4_sr1_mask3 0x0003 -#define byte4_sr2_mask1 0x03c0 -#define byte4_sr2_mask2 0x003f -#define surrogate_adjust (0x10000 >> 10) - -static int little_endian = -1; - -SQLULEN ucs2strlen(const SQLWCHAR *ucs2str) { - SQLULEN len; - for (len = 0; ucs2str[len]; len++) - ; - return len; -} -char *ucs2_to_utf8(const SQLWCHAR *ucs2str, SQLLEN ilen, SQLLEN *olen, - BOOL lower_identifier) { - char *utf8str; - int len = 0; - MYLOG(OPENSEARCH_DEBUG, "%p ilen=" FORMAT_LEN " ", ucs2str, ilen); - - if (!ucs2str) { - if (olen) - *olen = SQL_NULL_DATA; - return NULL; - } - if (little_endian < 0) { - int crt = 1; - little_endian = (0 != ((char *)&crt)[0]); - } - if (ilen < 0) - ilen = ucs2strlen(ucs2str); - MYPRINTF(0, " newlen=" FORMAT_LEN, ilen); - utf8str = (char *)malloc(ilen * 4 + 1); - if (utf8str) { - int i = 0; - UInt2 byte2code; - Int4 byte4code, surrd1, surrd2; - const SQLWCHAR *wstr; - - for (i = 0, wstr = ucs2str; i < ilen; i++, wstr++) { - if (!*wstr) - break; - else if (0 == (*wstr & 0xffffff80)) /* ASCII */ - { - if (lower_identifier) - utf8str[len++] = (char)tolower(*wstr); - else - utf8str[len++] = (char)*wstr; - } else if ((*wstr & byte3check) == 0) { - byte2code = byte2_base | ((byte2_mask1 & *wstr) >> 6) - | ((byte2_mask2 & *wstr) << 8); - if (little_endian) - memcpy(utf8str + len, (char *)&byte2code, - sizeof(byte2code)); - else { - utf8str[len] = ((char *)&byte2code)[1]; - utf8str[len + 1] = ((char *)&byte2code)[0]; - } - len += sizeof(byte2code); - } - /* surrogate pair check for non ucs-2 code */ - else if (surrog1_bits == (*wstr & surrog_check)) { - surrd1 = (*wstr & ~surrog_check) + surrogate_adjust; - wstr++; - i++; - surrd2 = (*wstr & ~surrog_check); - byte4code = byte4_base | ((byte4_sr1_mask1 & surrd1) >> 8) - | ((byte4_sr1_mask2 & surrd1) << 6) - | ((byte4_sr1_mask3 & surrd1) << 20) - | ((byte4_sr2_mask1 & surrd2) << 10) - | ((byte4_sr2_mask2 & surrd2) << 24); - if (little_endian) - memcpy(utf8str + len, (char *)&byte4code, - sizeof(byte4code)); - else { - utf8str[len] = ((char *)&byte4code)[3]; - utf8str[len + 1] = ((char *)&byte4code)[2]; - utf8str[len + 2] = ((char *)&byte4code)[1]; - utf8str[len + 3] = ((char *)&byte4code)[0]; - } - len += sizeof(byte4code); - } else { - byte4code = byte3_base | ((byte3_mask1 & *wstr) >> 12) - | ((byte3_mask2 & *wstr) << 2) - | ((byte3_mask3 & *wstr) << 16); - if (little_endian) - memcpy(utf8str + len, (char *)&byte4code, 3); - else { - utf8str[len] = ((char *)&byte4code)[3]; - utf8str[len + 1] = ((char *)&byte4code)[2]; - utf8str[len + 2] = ((char *)&byte4code)[1]; - } - len += 3; - } - } - utf8str[len] = '\0'; - if (olen) - *olen = len; - } - return utf8str; -} - -#define byte3_m1 0x0f -#define byte3_m2 0x3f -#define byte3_m3 0x3f -#define byte2_m1 0x1f -#define byte2_m2 0x3f -#define byte4_m1 0x07 -#define byte4_m2 0x3f -#define byte4_m31 0x30 -#define byte4_m32 0x0f -#define byte4_m4 0x3f - -/* - * Convert a string from UTF-8 encoding to UCS-2. - * - * utf8str - input string in UTF-8 - * ilen - length of input string in bytes (or minus) - * lfconv - TRUE if line feeds (LF) should be converted to CR + LF - * ucs2str - output buffer - * bufcount - size of output buffer - * errcheck - if TRUE, check for invalidly encoded input characters - * - * Returns the number of SQLWCHARs copied to output buffer. If the output - * buffer is too small, the output is truncated. The output string is - * NULL-terminated, except when the output is truncated. - */ -SQLULEN -utf8_to_ucs2_lf(const char *utf8str, SQLLEN ilen, BOOL lfconv, - SQLWCHAR *ucs2str, SQLULEN bufcount, BOOL errcheck) { - int i; - SQLULEN rtn, ocount, wcode; - const UCHAR *str; - - MYLOG(OPENSEARCH_DEBUG, "ilen=" FORMAT_LEN " bufcount=" FORMAT_ULEN, ilen, - bufcount); - if (!utf8str) - return 0; - - if (!bufcount) - ucs2str = NULL; - else if (!ucs2str) - bufcount = 0; - if (ilen < 0) - ilen = strlen(utf8str); - for (i = 0, ocount = 0, str = (SQLCHAR *)utf8str; i < ilen && *str;) { - if ((*str & 0x80) == 0) { - if (lfconv && OPENSEARCH_LINEFEED == *str - && (i == 0 || OPENSEARCH_CARRIAGE_RETURN != str[-1])) { - if (ocount < bufcount) - ucs2str[ocount] = OPENSEARCH_CARRIAGE_RETURN; - ocount++; - } - if (ocount < bufcount) - ucs2str[ocount] = *str; - ocount++; - i++; - str++; - } else if (0xf8 == (*str & 0xf8)) /* more than 5 byte code */ - { - ocount = (SQLULEN)-1; - goto cleanup; - } else if (0xf0 == (*str & 0xf8)) /* 4 byte code */ - { - if (errcheck) { - if (i + 4 > ilen || 0 == (str[1] & 0x80) || 0 == (str[2] & 0x80) - || 0 == (str[3] & 0x80)) { - ocount = (SQLULEN)-1; - goto cleanup; - } - } - if (ocount < bufcount) { - wcode = (surrog1_bits | ((((UInt4)*str) & byte4_m1) << 8) - | ((((UInt4)str[1]) & byte4_m2) << 2) - | ((((UInt4)str[2]) & byte4_m31) >> 4)) - - surrogate_adjust; - ucs2str[ocount] = (SQLWCHAR)wcode; - } - ocount++; - if (ocount < bufcount) { - wcode = surrog2_bits | ((((UInt4)str[2]) & byte4_m32) << 6) - | (((UInt4)str[3]) & byte4_m4); - ucs2str[ocount] = (SQLWCHAR)wcode; - } - ocount++; - i += 4; - str += 4; - } else if (0xe0 == (*str & 0xf0)) /* 3 byte code */ - { - if (errcheck) { - if (i + 3 > ilen || 0 == (str[1] & 0x80) - || 0 == (str[2] & 0x80)) { - ocount = (SQLULEN)-1; - goto cleanup; - } - } - if (ocount < bufcount) { - wcode = ((((UInt4)*str) & byte3_m1) << 12) - | ((((UInt4)str[1]) & byte3_m2) << 6) - | (((UInt4)str[2]) & byte3_m3); - ucs2str[ocount] = (SQLWCHAR)wcode; - } - ocount++; - i += 3; - str += 3; - } else if (0xc0 == (*str & 0xe0)) /* 2 byte code */ - { - if (errcheck) { - if (i + 2 > ilen || 0 == (str[1] & 0x80)) { - ocount = (SQLULEN)-1; - goto cleanup; - } - } - if (ocount < bufcount) { - wcode = ((((UInt4)*str) & byte2_m1) << 6) - | (((UInt4)str[1]) & byte2_m2); - ucs2str[ocount] = (SQLWCHAR)wcode; - } - ocount++; - i += 2; - str += 2; - } else { - ocount = (SQLULEN)-1; - goto cleanup; - } - } -cleanup: - rtn = ocount; - if (ocount == (SQLULEN)-1) { - if (!errcheck) - rtn = 0; - ocount = 0; - } - if (ocount < bufcount && ucs2str) - ucs2str[ocount] = 0; - MYPRINTF(OPENSEARCH_ALL, " ocount=" FORMAT_ULEN "\n", ocount); - return rtn; -} - -#ifdef __WCS_ISO10646__ - -/* UCS4 => utf8 */ -#define byte4check 0xffff0000 -#define byte4_check 0x10000 -#define byte4_mask1 0x1c0000 -#define byte4_mask2 0x3f000 -#define byte4_mask3 0x0fc0 -#define byte4_mask4 0x003f - -#define byte4_m3 0x3f - -static SQLULEN ucs4strlen(const UInt4 *ucs4str) { - SQLULEN len; - for (len = 0; ucs4str[len]; len++) - ; - return len; -} - -static char *ucs4_to_utf8(const UInt4 *ucs4str, SQLLEN ilen, SQLLEN *olen, - BOOL lower_identifier) { - char *utf8str; - int len = 0; - MYLOG(OPENSEARCH_DEBUG, " %p ilen=" FORMAT_LEN "\n", ucs4str, ilen); - - if (!ucs4str) { - if (olen) - *olen = SQL_NULL_DATA; - return NULL; - } - if (little_endian < 0) { - int crt = 1; - little_endian = (0 != ((char *)&crt)[0]); - } - if (ilen < 0) - ilen = ucs4strlen(ucs4str); - MYLOG(OPENSEARCH_DEBUG, " newlen=" FORMAT_LEN "\n", ilen); - utf8str = (char *)malloc(ilen * 4 + 1); - if (utf8str) { - int i; - UInt2 byte2code; - Int4 byte4code; - const UInt4 *wstr; - - for (i = 0, wstr = ucs4str; i < ilen; i++, wstr++) { - if (!*wstr) - break; - else if (0 == (*wstr & 0xffffff80)) /* ASCII */ - { - if (lower_identifier) - utf8str[len++] = (char)tolower(*wstr); - else - utf8str[len++] = (char)*wstr; - } else if ((*wstr & byte3check) == 0) { - byte2code = byte2_base | ((byte2_mask1 & *wstr) >> 6) - | ((byte2_mask2 & *wstr) << 8); - if (little_endian) - memcpy(utf8str + len, (char *)&byte2code, - sizeof(byte2code)); - else { - utf8str[len] = ((char *)&byte2code)[1]; - utf8str[len + 1] = ((char *)&byte2code)[0]; - } - len += sizeof(byte2code); - } else if ((*wstr & byte4check) == 0) { - byte4code = byte3_base | ((byte3_mask1 & *wstr) >> 12) - | ((byte3_mask2 & *wstr) << 2) - | ((byte3_mask3 & *wstr) << 16); - if (little_endian) - memcpy(utf8str + len, (char *)&byte4code, 3); - else { - utf8str[len] = ((char *)&byte4code)[3]; - utf8str[len + 1] = ((char *)&byte4code)[2]; - utf8str[len + 2] = ((char *)&byte4code)[1]; - } - len += 3; - } else { - byte4code = byte4_base | ((byte4_mask1 & *wstr) >> 18) - | ((byte4_mask2 & *wstr) >> 4) - | ((byte4_mask3 & *wstr) << 10) - | ((byte4_mask4 & *wstr) << 24); - /* MYLOG(OPENSEARCH_DEBUG, " %08x->%08x\n", *wstr, byte4code); */ - if (little_endian) - memcpy(utf8str + len, (char *)&byte4code, - sizeof(byte4code)); - else { - utf8str[len] = ((char *)&byte4code)[3]; - utf8str[len + 1] = ((char *)&byte4code)[2]; - utf8str[len + 2] = ((char *)&byte4code)[1]; - utf8str[len + 3] = ((char *)&byte4code)[0]; - } - len += sizeof(byte4code); - } - } - utf8str[len] = '\0'; - if (olen) - *olen = len; - } - return utf8str; -} - -/* - * Convert a string from UTF-8 encoding to UTF-32. - * - * utf8str - input string in UTF-8 - * ilen - length of input string in bytes (or minus) - * lfconv - TRUE if line feeds (LF) should be converted to CR + LF - * ucs4str - output buffer - * bufcount - size of output buffer - * errcheck - if TRUE, check for invalidly encoded input characters - * - * Returns the number of UInt4s copied to output buffer. If the output - * buffer is too small, the output is truncated. The output string is - * NULL-terminated, except when the output is truncated. - */ -static SQLULEN utf8_to_ucs4_lf(const char *utf8str, SQLLEN ilen, BOOL lfconv, - UInt4 *ucs4str, SQLULEN bufcount, - BOOL errcheck) { - int i; - SQLULEN rtn, ocount, wcode; - const UCHAR *str; - - MYLOG(OPENSEARCH_DEBUG, " ilen=" FORMAT_LEN " bufcount=" FORMAT_ULEN "\n", ilen, bufcount); - if (!utf8str) - return 0; - - if (!bufcount) - ucs4str = NULL; - else if (!ucs4str) - bufcount = 0; - if (ilen < 0) - ilen = strlen(utf8str); - for (i = 0, ocount = 0, str = (SQLCHAR *)utf8str; i < ilen && *str;) { - if ((*str & 0x80) == 0) { - if (lfconv && OPENSEARCH_LINEFEED == *str - && (i == 0 || OPENSEARCH_CARRIAGE_RETURN != str[-1])) { - if (ocount < bufcount) - ucs4str[ocount] = OPENSEARCH_CARRIAGE_RETURN; - ocount++; - } - if (ocount < bufcount) - ucs4str[ocount] = *str; - ocount++; - i++; - str++; - } else if (0xf8 == (*str & 0xf8)) /* more than 5 byte code */ - { - ocount = (SQLULEN)-1; - goto cleanup; - } else if (0xf0 == (*str & 0xf8)) /* 4 byte code */ - { - if (errcheck) { - if (i + 4 > ilen || 0 == (str[1] & 0x80) || 0 == (str[2] & 0x80) - || 0 == (str[3] & 0x80)) { - ocount = (SQLULEN)-1; - goto cleanup; - } - } - if (ocount < bufcount) { - wcode = (((((UInt4)*str) & byte4_m1) << 18) - | ((((UInt4)str[1]) & byte4_m2) << 12) - | ((((UInt4)str[2]) & byte4_m3) << 6)) - | (((UInt4)str[3]) & byte4_m4); - ucs4str[ocount] = (unsigned int)wcode; - } - ocount++; - i += 4; - str += 4; - } else if (0xe0 == (*str & 0xf0)) /* 3 byte code */ - { - if (errcheck) { - if (i + 3 > ilen || 0 == (str[1] & 0x80) - || 0 == (str[2] & 0x80)) { - ocount = (SQLULEN)-1; - goto cleanup; - } - } - if (ocount < bufcount) { - wcode = ((((UInt4)*str) & byte3_m1) << 12) - | ((((UInt4)str[1]) & byte3_m2) << 6) - | (((UInt4)str[2]) & byte3_m3); - ucs4str[ocount] = (unsigned int)wcode; - } - ocount++; - i += 3; - str += 3; - } else if (0xc0 == (*str & 0xe0)) /* 2 byte code */ - { - if (errcheck) { - if (i + 2 > ilen || 0 == (str[1] & 0x80)) { - ocount = (SQLULEN)-1; - goto cleanup; - } - } - if (ocount < bufcount) { - wcode = ((((UInt4)*str) & byte2_m1) << 6) - | (((UInt4)str[1]) & byte2_m2); - ucs4str[ocount] = (SQLWCHAR)wcode; - } - ocount++; - i += 2; - str += 2; - } else { - ocount = (SQLULEN)-1; - goto cleanup; - } - } -cleanup: - rtn = ocount; - if (ocount == (SQLULEN)-1) { - if (!errcheck) - rtn = 0; - ocount = 0; - } - if (ocount < bufcount && ucs4str) - ucs4str[ocount] = 0; - MYLOG(OPENSEARCH_DEBUG, " ocount=" FORMAT_ULEN "\n", ocount); - return rtn; -} - -#define SURROGATE_CHECK 0xfc -#define SURROG1_BYTE 0xd8 -#define SURROG2_BYTE 0xdc - -static int ucs4_to_ucs2_lf(const unsigned int *ucs4str, SQLLEN ilen, - SQLWCHAR *ucs2str, int bufcount, BOOL lfconv) { - int outlen = 0, i; - UCHAR *ucdt; - SQLWCHAR *sqlwdt, dmy_wchar; - UCHAR *const udt = (UCHAR *)&dmy_wchar; - unsigned int uintdt; - - MYLOG(OPENSEARCH_DEBUG, " ilen=" FORMAT_LEN " bufcount=%d\n", ilen, bufcount); - if (ilen < 0) - ilen = ucs4strlen(ucs4str); - for (i = 0; i < ilen && (uintdt = ucs4str[i], uintdt); i++) { - sqlwdt = (SQLWCHAR *)&uintdt; - ucdt = (UCHAR *)&uintdt; - if (0 == sqlwdt[1]) { - if (lfconv && OPENSEARCH_LINEFEED == ucdt[0] - && (i == 0 - || OPENSEARCH_CARRIAGE_RETURN != *((UCHAR *)&ucs4str[i - 1]))) { - if (outlen < bufcount) { - udt[0] = OPENSEARCH_CARRIAGE_RETURN; - udt[1] = 0; - ucs2str[outlen] = *((SQLWCHAR *)udt); - } - outlen++; - } - if (outlen < bufcount) - ucs2str[outlen] = sqlwdt[0]; - outlen++; - continue; - } - sqlwdt[1]--; - udt[0] = ((0xfc & ucdt[1]) >> 2) | ((0x3 & ucdt[2]) << 6); - // printf("%02x", udt[0]); - udt[1] = SURROG1_BYTE | ((0xc & ucdt[2]) >> 2); - // printf("%02x", udt[1]); - if (outlen < bufcount) - ucs2str[outlen] = *((SQLWCHAR *)udt); - outlen++; - udt[0] = ucdt[0]; - // printf("%02x", udt[0]); - udt[1] = SURROG2_BYTE | (0x3 & ucdt[1]); - // printf("%02x\n", udt[1]); - if (outlen < bufcount) - ucs2str[outlen] = *((SQLWCHAR *)udt); - outlen++; - } - if (outlen < bufcount) - ucs2str[outlen] = 0; - - return outlen; -} -static int ucs2_to_ucs4(const SQLWCHAR *ucs2str, SQLLEN ilen, - unsigned int *ucs4str, int bufcount) { - int outlen = 0, i; - UCHAR *ucdt; - SQLWCHAR sqlwdt; - unsigned int dmy_uint; - UCHAR *const udt = (UCHAR *)&dmy_uint; - - MYLOG(OPENSEARCH_DEBUG, " ilen=" FORMAT_LEN " bufcount=%d\n", ilen, bufcount); - if (ilen < 0) - ilen = ucs2strlen(ucs2str); - udt[3] = 0; /* always */ - for (i = 0; i < ilen && (sqlwdt = ucs2str[i], sqlwdt); i++) { - ucdt = (UCHAR *)(ucs2str + i); - // printf("IN=%x\n", sqlwdt); - if ((ucdt[1] & SURROGATE_CHECK) != SURROG1_BYTE) { - // printf("SURROG1=%2x\n", ucdt[1] & SURROG1_BYTE); - if (outlen < bufcount) { - udt[0] = ucdt[0]; - udt[1] = ucdt[1]; - udt[2] = 0; - ucs4str[outlen] = *((unsigned int *)udt); - } - outlen++; - continue; - } - /* surrogate pair */ - udt[0] = ucdt[2]; - udt[1] = (ucdt[3] & 0x3) | ((ucdt[0] & 0x3f) << 2); - udt[2] = (((ucdt[0] & 0xc0) >> 6) | ((ucdt[1] & 0x3) << 2)) + 1; - // udt[3] = 0; needless - if (outlen < bufcount) - ucs4str[outlen] = *((unsigned int *)udt); - outlen++; - i++; - } - if (outlen < bufcount) - ucs4str[outlen] = 0; - - return outlen; -} -#endif /* __WCS_ISO10646__ */ - -#if defined(__WCS_ISO10646__) - -static SQLULEN utf8_to_wcs_lf(const char *utf8str, SQLLEN ilen, BOOL lfconv, - wchar_t *wcsstr, SQLULEN bufcount, - BOOL errcheck) { - switch (get_convtype()) { - case WCSTYPE_UTF16_LE: - return utf8_to_ucs2_lf(utf8str, ilen, lfconv, (SQLWCHAR *)wcsstr, - bufcount, errcheck); - case WCSTYPE_UTF32_LE: - return utf8_to_ucs4_lf(utf8str, ilen, lfconv, (UInt4 *)wcsstr, - bufcount, errcheck); - } - return (SQLULEN)~0; -} - -static char *wcs_to_utf8(const wchar_t *wcsstr, SQLLEN ilen, SQLLEN *olen, - BOOL lower_identifier) { - switch (get_convtype()) { - case WCSTYPE_UTF16_LE: - return ucs2_to_utf8((const SQLWCHAR *)wcsstr, ilen, olen, - lower_identifier); - case WCSTYPE_UTF32_LE: - return ucs4_to_utf8((const UInt4 *)wcsstr, ilen, olen, - lower_identifier); - } - - return NULL; -} - -/* - * Input strings must be NULL terminated. - * Output wide character strings would be NULL terminated. The result - * outmsg would be truncated when the buflen is small. - * - * The output NULL terminator is counted as buflen. - * if outmsg is NULL or buflen is 0, only output length is returned. - * As for return values, NULL terminators aren't counted. - */ -static int msgtowstr(const char *inmsg, wchar_t *outmsg, int buflen) { - int outlen = -1; - - MYLOG(OPENSEARCH_DEBUG, " inmsg=%p buflen=%d\n", inmsg, buflen); -#ifdef WIN32 - if (NULL == outmsg) - buflen = 0; - if ((outlen = - MultiByteToWideChar(CP_ACP, MB_PRECOMPOSED | MB_ERR_INVALID_CHARS, - inmsg, -1, outmsg, buflen)) - > 0) - outlen--; - else if (ERROR_INSUFFICIENT_BUFFER == GetLastError()) - outlen = - MultiByteToWideChar(CP_ACP, MB_PRECOMPOSED | MB_ERR_INVALID_CHARS, - inmsg, -1, NULL, 0) - - 1; - else - outlen = -1; -#else - if (0 == buflen) - outmsg = NULL; - outlen = mbstowcs((wchar_t *)outmsg, inmsg, buflen); -#endif /* WIN32 */ - if (outmsg && outlen >= buflen) { - outmsg[buflen - 1] = 0; - MYLOG(OPENSEARCH_DEBUG, " out=%dchars truncated to %d\n", outlen, buflen - 1); - } - MYLOG(OPENSEARCH_DEBUG, " buf=%dchars out=%dchars\n", buflen, outlen); - - return outlen; -} - -/* - * Input wide character strings must be NULL terminated. - * Output strings would be NULL terminated. The result outmsg would be - * truncated when the buflen is small. - * - * The output NULL terminator is counted as buflen. - * if outmsg is NULL or buflen is 0, only output length is returned. - * As for return values, NULL terminators aren't counted. - */ -static int wstrtomsg(const wchar_t *wstr, char *outmsg, int buflen) { - int outlen = -1; - - MYLOG(OPENSEARCH_DEBUG, " wstr=%p buflen=%d\n", wstr, buflen); -#ifdef WIN32 - if (NULL == outmsg) - buflen = 0; - if ((outlen = WideCharToMultiByte(CP_ACP, 0, wstr, -1, outmsg, buflen, NULL, - NULL)) - > 0) - outlen--; - else if (ERROR_INSUFFICIENT_BUFFER == GetLastError()) - outlen = - WideCharToMultiByte(CP_ACP, 0, wstr, -1, NULL, 0, NULL, NULL) - 1; - else - outlen = -1; -#else - if (0 == buflen) - outmsg = NULL; - outlen = wcstombs(outmsg, wstr, buflen); -#endif /* WIN32 */ - if (outmsg && outlen >= buflen) { - outmsg[buflen - 1] = 0; - MYLOG(OPENSEARCH_DEBUG, " out=%dbytes truncated to %d\n", outlen, buflen - 1); - } - MYLOG(OPENSEARCH_DEBUG, " buf=%dbytes outlen=%dbytes\n", buflen, outlen); - - return outlen; -} -#endif /* __WCS_ISO10646__ */ - -#if defined(__CHAR16_UTF_16__) - -static mbstate_t initial_state; - -static SQLLEN mbstoc16_lf(char16_t *c16dt, const char *c8dt, size_t n, - BOOL lf_conv) { - int i; - size_t brtn; - const char *cdt; - mbstate_t mbst = initial_state; - - MYLOG(OPENSEARCH_DEBUG, " c16dt=%p size=" FORMAT_SIZE_T "\n", c16dt, n); - for (i = 0, cdt = c8dt; i < n || (!c16dt); i++) { - if (lf_conv && OPENSEARCH_LINEFEED == *cdt && i > 0 - && OPENSEARCH_CARRIAGE_RETURN != cdt[-1]) { - if (c16dt) - c16dt[i] = OPENSEARCH_CARRIAGE_RETURN; - i++; - } - brtn = mbrtoc16(c16dt ? c16dt + i : NULL, cdt, 4, &mbst); - if (0 == brtn) - break; - if (brtn == (size_t)-1 || brtn == (size_t)-2) - return -1; - if (brtn == (size_t)-3) - continue; - cdt += brtn; - } - if (c16dt && i >= n) - c16dt[n - 1] = 0; - - return i; -} - -static SQLLEN c16tombs(char *c8dt, const char16_t *c16dt, size_t n) { - int i; - SQLLEN result = 0; - size_t brtn; - char *cdt, c4byte[4]; - mbstate_t mbst = initial_state; - - MYLOG(OPENSEARCH_DEBUG, " c8dt=%p size=" FORMAT_SIZE_T "u\n", c8dt, n); - if (!c8dt) - n = 0; - for (i = 0, cdt = c8dt; c16dt[i] && (result < n || (!cdt)); i++) { - if (NULL != cdt && result + 4 < n) - brtn = c16rtomb(cdt, c16dt[i], &mbst); - else { - brtn = c16rtomb(c4byte, c16dt[i], &mbst); - if (brtn < 5) { - SQLLEN result_n = result + brtn; - - if (result_n < n) - memcpy(cdt, c4byte, brtn); - else { - if (cdt && n > 0) { - c8dt[result] = '\0'; /* truncate */ - return result_n; - } - } - } - } - /* - printf("c16dt=%04X brtn=%lu result=%ld cdt=%02X%02X%02X%02X\n", - c16dt[i], brtn, result, (UCHAR) cdt[0], (UCHAR) cdt[1], (UCHAR) - cdt[2], (UCHAR) cdt[3]); - */ - if (brtn == (size_t)-1) { - if (n > 0) - c8dt[n - 1] = '\0'; - return -1; - } - if (cdt) - cdt += brtn; - result += brtn; - } - if (cdt) - *cdt = '\0'; - - return result; -} -#endif /* __CHAR16_UTF_16__ */ - - -// -// SQLBindParameter hybrid case -// SQLWCHAR(UTF-16) => the current locale -// -SQLLEN bindpara_wchar_to_msg(const SQLWCHAR *utf16, char **wcsbuf, - SQLLEN used) { - SQLLEN l = (-2); - char *ldt = NULL; - SQLWCHAR *utf16_nts, *alloc_nts = NULL, ntsbuf[128]; - int count; - - if (SQL_NTS == used) { - count = (int)ucs2strlen(utf16); - utf16_nts = (SQLWCHAR *)utf16; - } else if (used < 0) - return -1; - else { - count = (int)(used / WCLEN); - if (used + WCLEN <= sizeof(ntsbuf)) - utf16_nts = ntsbuf; - else { - if (NULL == (alloc_nts = (SQLWCHAR *)malloc(used + WCLEN))) - return l; - utf16_nts = alloc_nts; - } - memcpy(utf16_nts, utf16, used); - utf16_nts[count] = 0; - } - - get_convtype(); - MYLOG(OPENSEARCH_DEBUG, "\n"); -#if defined(__WCS_ISO10646__) - if (use_wcs) { -#pragma warning(push) -#pragma warning(disable : 4127) - if (sizeof(SQLWCHAR) == sizeof(wchar_t)) -#pragma warning(pop) - { - ldt = (char *)malloc(2 * count + 1); - l = wstrtomsg((wchar_t *)utf16_nts, ldt, 2 * count + 1); - } else { - unsigned int *utf32 = - (unsigned int *)malloc((count + 1) * sizeof(unsigned int)); - - l = ucs2_to_ucs4(utf16_nts, -1, utf32, count + 1); - if ((l = wstrtomsg((wchar_t *)utf32, NULL, 0)) >= 0) { - ldt = (char *)malloc(l + 1); - l = wstrtomsg((wchar_t *)utf32, ldt, (int)l + 1); - } - free(utf32); - } - } -#endif /* __WCS_ISO10646__ */ -#ifdef __CHAR16_UTF_16__ - if (use_c16) { - ldt = (char *)malloc(4 * count + 1); - l = c16tombs(ldt, (const char16_t *)utf16_nts, 4 * count + 1); - } -#endif /* __CHAR16_UTF_16__ */ - if (l < 0 && NULL != ldt) - free(ldt); - else - *wcsbuf = ldt; - - if (NULL != alloc_nts) - free(alloc_nts); - return l; -} - -size_t convert_linefeeds(const char *s, char *dst, size_t max, BOOL convlf, - BOOL *changed); -// -// SQLBindCol hybrid case -// the current locale => SQLWCHAR(UTF-16) -// -SQLLEN bindcol_hybrid_estimate(const char *ldt, BOOL lf_conv, char **wcsbuf) { - UNUSED(ldt, wcsbuf); - SQLLEN l = (-2); - - get_convtype(); - MYLOG(OPENSEARCH_DEBUG, " lf_conv=%d\n", lf_conv); -#if defined(__WCS_ISO10646__) - if (use_wcs) { - unsigned int *utf32 = NULL; - -#pragma warning(push) -#pragma warning(disable : 4127) - if (sizeof(SQLWCHAR) == sizeof(wchar_t)) -#pragma warning(pop) - { - l = msgtowstr(ldt, (wchar_t *)NULL, 0); - if (l >= 0 && lf_conv) { - BOOL changed; - size_t len; - - len = convert_linefeeds(ldt, NULL, 0, TRUE, &changed); - if (changed) { - l += (len - strlen(ldt)); - *wcsbuf = (char *)malloc(len + 1); - convert_linefeeds(ldt, *wcsbuf, len + 1, TRUE, NULL); - } - } - } else { - int count = (int)strlen(ldt); - - utf32 = (unsigned int *)malloc((count + 1) * sizeof(unsigned int)); - if ((l = msgtowstr(ldt, (wchar_t *)utf32, count + 1)) >= 0) { - l = ucs4_to_ucs2_lf(utf32, -1, NULL, 0, lf_conv); - *wcsbuf = (char *)utf32; - } - } - if (l < 0 && NULL != utf32) - free(utf32); - } -#endif /* __WCS_ISO10646__ */ -#ifdef __CHAR16_UTF_16__ - if (use_c16) - l = mbstoc16_lf((char16_t *)NULL, ldt, 0, lf_conv); -#endif /* __CHAR16_UTF_16__ */ - - return l; -} - -SQLLEN bindcol_hybrid_exec(SQLWCHAR *utf16, const char *ldt, size_t n, - BOOL lf_conv, char **wcsbuf) { - UNUSED(ldt, utf16, wcsbuf); - SQLLEN l = (-2); - - get_convtype(); - MYLOG(OPENSEARCH_DEBUG, " size=" FORMAT_SIZE_T " lf_conv=%d\n", n, lf_conv); -#if defined(__WCS_ISO10646__) - if (use_wcs) { - unsigned int *utf32 = NULL; - BOOL midbuf = (wcsbuf && *wcsbuf); - -#pragma warning(push) -#pragma warning(disable : 4127) - if (sizeof(SQLWCHAR) == sizeof(wchar_t)) -#pragma warning(pop) - { - if (midbuf) - l = msgtowstr(*wcsbuf, (wchar_t *)utf16, (int)n); - else - l = msgtowstr(ldt, (wchar_t *)utf16, (int)n); - } else if (midbuf) { - utf32 = (unsigned int *)*wcsbuf; - l = ucs4_to_ucs2_lf(utf32, -1, utf16, (int)n, lf_conv); - } else { - int count = (int)strlen(ldt); - - utf32 = (unsigned int *)malloc((count + 1) * sizeof(unsigned int)); - if ((l = msgtowstr(ldt, (wchar_t *)utf32, count + 1)) >= 0) { - l = ucs4_to_ucs2_lf(utf32, -1, utf16, (int)n, lf_conv); - } - free(utf32); - } - if (midbuf) { - free(*wcsbuf); - *wcsbuf = NULL; - } - } -#endif /* __WCS_ISO10646__ */ -#ifdef __CHAR16_UTF_16__ - if (use_c16) { - l = mbstoc16_lf((char16_t *)utf16, ldt, n, lf_conv); - } -#endif /* __CHAR16_UTF_16__ */ - - return l; -} - -SQLLEN locale_to_sqlwchar(SQLWCHAR *utf16, const char *ldt, size_t n, - BOOL lf_conv) { - return bindcol_hybrid_exec(utf16, ldt, n, lf_conv, NULL); -} - -// -// SQLBindCol localize case -// UTF-8 => the current locale -// -SQLLEN bindcol_localize_estimate(const char *utf8dt, BOOL lf_conv, - char **wcsbuf) { - UNUSED(utf8dt); - SQLLEN l = (-2); - char *convalc = NULL; - - get_convtype(); - MYLOG(OPENSEARCH_DEBUG, " lf_conv=%d\n", lf_conv); -#if defined(__WCS_ISO10646__) - if (use_wcs) { - wchar_t *wcsalc = NULL; - - l = utf8_to_wcs_lf(utf8dt, -1, lf_conv, NULL, 0, FALSE); - wcsalc = (wchar_t *)malloc(sizeof(wchar_t) * (l + 1)); - convalc = (char *)wcsalc; - l = utf8_to_wcs_lf(utf8dt, -1, lf_conv, wcsalc, l + 1, FALSE); - l = wstrtomsg(wcsalc, NULL, 0); - } -#endif /* __WCS_ISO10646__ */ -#ifdef __CHAR16_UTF_16__ - if (use_c16) { - SQLWCHAR *wcsalc = NULL; - - l = utf8_to_ucs2_lf(utf8dt, -1, lf_conv, (SQLWCHAR *)NULL, 0, FALSE); - wcsalc = (SQLWCHAR *)malloc(sizeof(SQLWCHAR) * (l + 1)); - convalc = (char *)wcsalc; - l = utf8_to_ucs2_lf(utf8dt, -1, lf_conv, wcsalc, l + 1, FALSE); - l = c16tombs(NULL, (char16_t *)wcsalc, 0); - } -#endif /* __CHAR16_UTF_16__ */ - if (l < 0 && NULL != convalc) - free(convalc); - else if (NULL != convalc) - *wcsbuf = (char *)convalc; - - MYLOG(OPENSEARCH_DEBUG, " return=" FORMAT_LEN "\n", l); - return l; -} - -SQLLEN bindcol_localize_exec(char *ldt, size_t n, BOOL lf_conv, char **wcsbuf) { - UNUSED(ldt, lf_conv); - SQLLEN l = (-2); - - get_convtype(); - MYLOG(OPENSEARCH_DEBUG, " size=" FORMAT_SIZE_T "\n", n); -#if defined(__WCS_ISO10646__) - if (use_wcs) { - wchar_t *wcsalc = (wchar_t *)*wcsbuf; - - l = wstrtomsg(wcsalc, ldt, (int)n); - } -#endif /* __WCS_ISO10646__ */ -#ifdef __CHAR16_UTF_16__ - if (use_c16) { - char16_t *wcsalc = (char16_t *)*wcsbuf; - - l = c16tombs(ldt, (char16_t *)wcsalc, n); - } -#endif /* __CHAR16_UTF_16__ */ - free(*wcsbuf); - *wcsbuf = NULL; - - MYLOG(OPENSEARCH_DEBUG, " return=" FORMAT_LEN "\n", l); - return l; -} - -#endif /* UNICODE_SUPPORT */ diff --git a/sql-odbc/src/vcpkg.json b/sql-odbc/src/vcpkg.json deleted file mode 100644 index a5903fbb85..0000000000 --- a/sql-odbc/src/vcpkg.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "name": "sql-odbc", - "version-string": "1.1.0.1", - "dependencies": ["aws-sdk-cpp", "rapidjson", "zlib", "gtest", "curl"], - "builtin-baseline": "6ca56aeb457f033d344a7106cb3f9f1abf8f4e98", - "overrides": [ - { "name": "aws-sdk-cpp", "version": "1.8.83#2" }, - { "name": "rapidjson", "version": "2022-06-28#1" }, - { "name": "zlib", "version": "1.2.12#1" }, - { "name": "gtest", "version": "1.11.0" } - ] -} diff --git a/sql/build.gradle b/sql/build.gradle index 13a7ceba53..d7509db099 100644 --- a/sql/build.gradle +++ b/sql/build.gradle @@ -27,6 +27,7 @@ plugins { id "io.freefair.lombok" id 'jacoco' id 'antlr' + id 'com.diffplug.spotless' version '6.22.0' } generateGrammarSource { @@ -45,18 +46,16 @@ dependencies { antlr "org.antlr:antlr4:4.7.1" implementation "org.antlr:antlr4-runtime:4.7.1" - implementation group: 'com.google.guava', name: 'guava', version: '31.0.1-jre' - implementation group: 'org.json', name: 'json', version:'20180813' - implementation group: 'org.springframework', name: 'spring-context', version: "${spring_version}" - implementation group: 'org.springframework', name: 'spring-beans', version: "${spring_version}" + implementation group: 'com.google.guava', name: 'guava', version: "${guava_version}" + implementation group: 'org.json', name: 'json', version:'20231013' implementation project(':common') implementation project(':core') api project(':protocol') - testImplementation('org.junit.jupiter:junit-jupiter:5.6.2') + testImplementation('org.junit.jupiter:junit-jupiter:5.9.3') testImplementation group: 'org.hamcrest', name: 'hamcrest-library', version: '2.1' - testImplementation group: 'org.mockito', name: 'mockito-core', version: '3.12.4' - testImplementation group: 'org.mockito', name: 'mockito-junit-jupiter', version: '3.12.4' + testImplementation group: 'org.mockito', name: 'mockito-core', version: '5.7.0' + testImplementation group: 'org.mockito', name: 'mockito-junit-jupiter', version: '5.7.0' testImplementation(testFixtures(project(":core"))) } @@ -70,8 +69,8 @@ test { jacocoTestReport { reports { - html.enabled true - xml.enabled true + html.required = true + xml.required = true } afterEvaluate { classDirectories.setFrom(files(classDirectories.files.collect { diff --git a/sql/src/main/antlr/OpenSearchSQLIdentifierParser.g4 b/sql/src/main/antlr/OpenSearchSQLIdentifierParser.g4 deleted file mode 100644 index cd65e5066c..0000000000 --- a/sql/src/main/antlr/OpenSearchSQLIdentifierParser.g4 +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -/* -MySQL (Positive Technologies) grammar -The MIT License (MIT). -Copyright (c) 2015-2017, Ivan Kochurkin (kvanttt@gmail.com), Positive Technologies. -Copyright (c) 2017, Ivan Khudyashev (IHudyashov@ptsecurity.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ - -parser grammar OpenSearchSQLIdentifierParser; - -options { tokenVocab=OpenSearchSQLLexer; } - - -// Identifiers - -tableName - : qualifiedName - ; - -columnName - : qualifiedName - ; - -alias - : ident - ; - -qualifiedName - : ident (DOT ident)* - ; - -ident - : DOT? ID - | BACKTICK_QUOTE_ID - | keywordsCanBeId - ; - -keywordsCanBeId - : FULL - | FIELD | D | T | TS // OD SQL and ODBC special - | COUNT | SUM | AVG | MAX | MIN - | TIMESTAMP | DATE | TIME | DAYOFWEEK - | FIRST | LAST - | CURRENT_DATE | CURRENT_TIME | CURRENT_TIMESTAMP | LOCALTIME | LOCALTIMESTAMP | UTC_TIMESTAMP | UTC_DATE | UTC_TIME - | CURDATE | CURTIME | NOW - ; diff --git a/sql/src/main/antlr/OpenSearchSQLLexer.g4 b/sql/src/main/antlr/OpenSearchSQLLexer.g4 index a359f48be3..ba7c5be85a 100644 --- a/sql/src/main/antlr/OpenSearchSQLLexer.g4 +++ b/sql/src/main/antlr/OpenSearchSQLLexer.g4 @@ -134,7 +134,6 @@ STDDEV_SAMP: 'STDDEV_SAMP'; SUBSTRING: 'SUBSTRING'; TRIM: 'TRIM'; - // Keywords, but can be ID // Common Keywords, but can be ID @@ -176,6 +175,7 @@ TABLES: 'TABLES'; ABS: 'ABS'; ACOS: 'ACOS'; ADD: 'ADD'; +ADDTIME: 'ADDTIME'; ASCII: 'ASCII'; ASIN: 'ASIN'; ATAN: 'ATAN'; @@ -200,20 +200,25 @@ DATE: 'DATE'; DATE_ADD: 'DATE_ADD'; DATE_FORMAT: 'DATE_FORMAT'; DATE_SUB: 'DATE_SUB'; +DATEDIFF: 'DATEDIFF'; DAYNAME: 'DAYNAME'; DAYOFMONTH: 'DAYOFMONTH'; DAYOFWEEK: 'DAYOFWEEK'; DAYOFYEAR: 'DAYOFYEAR'; DEGREES: 'DEGREES'; +DIVIDE: 'DIVIDE'; E: 'E'; EXP: 'EXP'; EXPM1: 'EXPM1'; +EXTRACT: 'EXTRACT'; FLOOR: 'FLOOR'; FROM_DAYS: 'FROM_DAYS'; FROM_UNIXTIME: 'FROM_UNIXTIME'; +GET_FORMAT: 'GET_FORMAT'; IF: 'IF'; IFNULL: 'IFNULL'; ISNULL: 'ISNULL'; +LAST_DAY: 'LAST_DAY'; LENGTH: 'LENGTH'; LN: 'LN'; LOCALTIME: 'LOCALTIME'; @@ -244,23 +249,29 @@ RINT: 'RINT'; ROUND: 'ROUND'; RTRIM: 'RTRIM'; REVERSE: 'REVERSE'; +SEC_TO_TIME: 'SEC_TO_TIME'; SIGN: 'SIGN'; SIGNUM: 'SIGNUM'; SIN: 'SIN'; SINH: 'SINH'; SQRT: 'SQRT'; +STR_TO_DATE: 'STR_TO_DATE'; SUBDATE: 'SUBDATE'; +SUBTIME: 'SUBTIME'; SUBTRACT: 'SUBTRACT'; SYSDATE: 'SYSDATE'; TAN: 'TAN'; TIME: 'TIME'; +TIMEDIFF: 'TIMEDIFF'; +TIME_FORMAT: 'TIME_FORMAT'; TIME_TO_SEC: 'TIME_TO_SEC'; TIMESTAMP: 'TIMESTAMP'; TRUNCATE: 'TRUNCATE'; TO_DAYS: 'TO_DAYS'; -UTC_DATE: 'UTC_DATE'; +TO_SECONDS: 'TO_SECONDS'; UNIX_TIMESTAMP: 'UNIX_TIMESTAMP'; UPPER: 'UPPER'; +UTC_DATE: 'UTC_DATE'; UTC_TIME: 'UTC_TIME'; UTC_TIMESTAMP: 'UTC_TIMESTAMP'; @@ -311,18 +322,26 @@ MULTI_MATCH: 'MULTI_MATCH'; MULTIMATCHQUERY: 'MULTIMATCHQUERY'; NESTED: 'NESTED'; PERCENTILES: 'PERCENTILES'; +PERCENTILE: 'PERCENTILE'; +PERCENTILE_APPROX: 'PERCENTILE_APPROX'; REGEXP_QUERY: 'REGEXP_QUERY'; REVERSE_NESTED: 'REVERSE_NESTED'; QUERY: 'QUERY'; RANGE: 'RANGE'; SCORE: 'SCORE'; +SCOREQUERY: 'SCOREQUERY'; +SCORE_QUERY: 'SCORE_QUERY'; SECOND_OF_MINUTE: 'SECOND_OF_MINUTE'; STATS: 'STATS'; TERM: 'TERM'; TERMS: 'TERMS'; +TIMESTAMPADD: 'TIMESTAMPADD'; +TIMESTAMPDIFF: 'TIMESTAMPDIFF'; TOPHITS: 'TOPHITS'; TYPEOF: 'TYPEOF'; WEEK_OF_YEAR: 'WEEK_OF_YEAR'; +WEEKOFYEAR: 'WEEKOFYEAR'; +WEEKDAY: 'WEEKDAY'; WILDCARDQUERY: 'WILDCARDQUERY'; WILDCARD_QUERY: 'WILDCARD_QUERY'; @@ -332,6 +351,7 @@ STRCMP: 'STRCMP'; // DATE AND TIME FUNCTIONS ADDDATE: 'ADDDATE'; +YEARWEEK: 'YEARWEEK'; // RELEVANCE FUNCTIONS AND PARAMETERS ALLOW_LEADING_WILDCARD: 'ALLOW_LEADING_WILDCARD'; @@ -379,7 +399,7 @@ MATCH_BOOL_PREFIX: 'MATCH_BOOL_PREFIX'; // Operators. Arithmetics STAR: '*'; -DIVIDE: '/'; +SLASH: '/'; MODULE: '%'; PLUS: '+'; MINUS: '-'; @@ -448,7 +468,6 @@ BACKTICK_QUOTE_ID: BQUOTA_STRING; // Fragments for Literal primitives fragment EXPONENT_NUM_PART: 'E' [-+]? DEC_DIGIT+; -fragment ID_LITERAL: [@*A-Z]+?[*A-Z_\-0-9]*; fragment DQUOTA_STRING: '"' ( '\\'. | '""' | ~('"'| '\\') )* '"'; fragment SQUOTA_STRING: '\'' ('\\'. | '\'\'' | ~('\'' | '\\'))* '\''; fragment BQUOTA_STRING: '`' ( '\\'. | '``' | ~('`'|'\\'))* '`'; @@ -456,6 +475,10 @@ fragment HEX_DIGIT: [0-9A-F]; fragment DEC_DIGIT: [0-9]; fragment BIT_STRING_L: 'B' '\'' [01]+ '\''; +// Identifiers cannot start with a single '_' since this an OpenSearch reserved +// metadata field. Two underscores (or more) is acceptable, such as '__field'. +fragment ID_LITERAL: ([@*A-Z_])+?[*A-Z_\-0-9]*; + // Last tokens must generate Errors ERROR_RECOGNITION: . -> channel(ERRORCHANNEL); diff --git a/sql/src/main/antlr/OpenSearchSQLParser.g4 b/sql/src/main/antlr/OpenSearchSQLParser.g4 index 58d4be1813..4f67cc82c0 100644 --- a/sql/src/main/antlr/OpenSearchSQLParser.g4 +++ b/sql/src/main/antlr/OpenSearchSQLParser.g4 @@ -30,532 +30,820 @@ THE SOFTWARE. parser grammar OpenSearchSQLParser; -import OpenSearchSQLIdentifierParser; - -options { tokenVocab=OpenSearchSQLLexer; } - +options { tokenVocab = OpenSearchSQLLexer; } // Top Level Description // Root rule + root - : sqlStatement? SEMI? EOF - ; + : sqlStatement? SEMI? EOF + ; -// Only SELECT +// Only SELECT sqlStatement - : dmlStatement | adminStatement - ; + : dmlStatement + | adminStatement + ; dmlStatement - : selectStatement - ; - + : selectStatement + ; // Data Manipulation Language -// Primary DML Statements - +// Primary DML Statements selectStatement - : querySpecification #simpleSelect - ; + : querySpecification # simpleSelect + ; adminStatement - : showStatement - | describeStatement - ; + : showStatement + | describeStatement + ; showStatement - : SHOW TABLES tableFilter - ; + : SHOW TABLES tableFilter + ; describeStatement - : DESCRIBE TABLES tableFilter columnFilter? - ; + : DESCRIBE TABLES tableFilter columnFilter? + ; columnFilter - : COLUMNS LIKE showDescribePattern - ; + : COLUMNS LIKE showDescribePattern + ; tableFilter - : LIKE showDescribePattern - ; + : LIKE showDescribePattern + ; showDescribePattern - : oldID=compatibleID | stringLiteral - ; + : oldID=compatibleID | stringLiteral + ; compatibleID - : (MODULE | ID)+? - ; + : (MODULE | ID)+? + ; -// Select Statement's Details +// Select Statement's Details querySpecification - : selectClause - fromClause? - limitClause? - ; + : selectClause fromClause? limitClause? + ; selectClause - : SELECT selectSpec? selectElements - ; + : SELECT selectSpec? selectElements + ; selectSpec - : (ALL | DISTINCT) - ; + : (ALL | DISTINCT) + ; selectElements - : (star=STAR | selectElement) (COMMA selectElement)* - ; + : (star = STAR | selectElement) (COMMA selectElement)* + ; selectElement - : expression (AS? alias)? - ; + : expression (AS? alias)? + ; fromClause - : FROM relation - (whereClause)? - (groupByClause)? - (havingClause)? - (orderByClause)? // Place it under FROM for now but actually not necessary ex. A UNION B ORDER BY - ; + : FROM relation (whereClause)? (groupByClause)? (havingClause)? (orderByClause)? // Place it under FROM for now but actually not necessary ex. A UNION B ORDER BY + + ; relation - : tableName (AS? alias)? #tableAsRelation - | LR_BRACKET subquery=querySpecification RR_BRACKET AS? alias #subqueryAsRelation - ; + : tableName (AS? alias)? # tableAsRelation + | LR_BRACKET subquery = querySpecification RR_BRACKET AS? alias # subqueryAsRelation + ; whereClause - : WHERE expression - ; + : WHERE expression + ; groupByClause - : GROUP BY groupByElements - ; + : GROUP BY groupByElements + ; groupByElements - : groupByElement (COMMA groupByElement)* - ; + : groupByElement (COMMA groupByElement)* + ; groupByElement - : expression - ; + : expression + ; havingClause - : HAVING expression - ; + : HAVING expression + ; orderByClause - : ORDER BY orderByElement (COMMA orderByElement)* - ; + : ORDER BY orderByElement (COMMA orderByElement)* + ; orderByElement - : expression order=(ASC | DESC)? (NULLS (FIRST | LAST))? - ; + : expression order = (ASC | DESC)? (NULLS (FIRST | LAST))? + ; limitClause - : LIMIT (offset=decimalLiteral COMMA)? limit=decimalLiteral - | LIMIT limit=decimalLiteral OFFSET offset=decimalLiteral - ; + : LIMIT (offset = decimalLiteral COMMA)? limit = decimalLiteral + | LIMIT limit = decimalLiteral OFFSET offset = decimalLiteral + ; // Window Function's Details windowFunctionClause - : function=windowFunction overClause - ; + : function = windowFunction overClause + ; windowFunction - : functionName=(ROW_NUMBER | RANK | DENSE_RANK) - LR_BRACKET functionArgs? RR_BRACKET #scalarWindowFunction - | aggregateFunction #aggregateWindowFunction - ; + : functionName = (ROW_NUMBER | RANK | DENSE_RANK) LR_BRACKET functionArgs? RR_BRACKET # scalarWindowFunction + | aggregateFunction # aggregateWindowFunction + ; overClause - : OVER LR_BRACKET partitionByClause? orderByClause? RR_BRACKET - ; + : OVER LR_BRACKET partitionByClause? orderByClause? RR_BRACKET + ; partitionByClause - : PARTITION BY expression (COMMA expression)* - ; - - -// Literals + : PARTITION BY expression (COMMA expression)* + ; +// Literals constant - : stringLiteral #string - | sign? decimalLiteral #signedDecimal - | sign? realLiteral #signedReal - | booleanLiteral #boolean - | datetimeLiteral #datetime - | intervalLiteral #interval - | nullLiteral #null - // Doesn't support the following types for now - //| BIT_STRING - //| NOT? nullLiteral=(NULL_LITERAL | NULL_SPEC_LITERAL) - //| LEFT_BRACE dateType=(D | T | TS | DATE | TIME | TIMESTAMP) stringLiteral RIGHT_BRACE - ; + : stringLiteral # string + | sign? decimalLiteral # signedDecimal + | sign? realLiteral # signedReal + | booleanLiteral # boolean + | datetimeLiteral # datetime + | intervalLiteral # interval + | nullLiteral # null + // Doesn't support the following types for now + //| BIT_STRING + //| NOT? nullLiteral=(NULL_LITERAL | NULL_SPEC_LITERAL) + ; decimalLiteral - : DECIMAL_LITERAL | ZERO_DECIMAL | ONE_DECIMAL | TWO_DECIMAL - ; + : DECIMAL_LITERAL + | ZERO_DECIMAL + | ONE_DECIMAL + | TWO_DECIMAL + ; + +numericLiteral + : decimalLiteral + | realLiteral + ; stringLiteral - : STRING_LITERAL - | DOUBLE_QUOTE_ID - ; + : STRING_LITERAL + | DOUBLE_QUOTE_ID + ; booleanLiteral - : TRUE | FALSE - ; + : TRUE + | FALSE + ; realLiteral - : REAL_LITERAL - ; + : REAL_LITERAL + ; sign - : PLUS | MINUS - ; + : PLUS + | MINUS + ; nullLiteral - : NULL_LITERAL - ; + : NULL_LITERAL + ; // Date and Time Literal, follow ANSI 92 datetimeLiteral - : dateLiteral - | timeLiteral - | timestampLiteral - ; + : dateLiteral + | timeLiteral + | timestampLiteral + ; dateLiteral - : DATE date=stringLiteral - ; + : DATE date = stringLiteral + | LEFT_BRACE (DATE | D) date = stringLiteral RIGHT_BRACE + ; timeLiteral - : TIME time=stringLiteral - ; + : TIME time = stringLiteral + | LEFT_BRACE (TIME | T) time = stringLiteral RIGHT_BRACE + ; timestampLiteral - : TIMESTAMP timestamp=stringLiteral - ; + : TIMESTAMP timestamp = stringLiteral + | LEFT_BRACE (TIMESTAMP | TS) timestamp = stringLiteral RIGHT_BRACE + ; // Actually, these constants are shortcuts to the corresponding functions datetimeConstantLiteral - : CURRENT_DATE - | CURRENT_TIME - | CURRENT_TIMESTAMP - | DAY_OF_YEAR - | LOCALTIME - | LOCALTIMESTAMP - | MONTH_OF_YEAR - | UTC_TIMESTAMP - | UTC_DATE - | UTC_TIME - | WEEK_OF_YEAR - ; + : CURRENT_DATE + | CURRENT_TIME + | CURRENT_TIMESTAMP + | LOCALTIME + | LOCALTIMESTAMP + | UTC_TIMESTAMP + | UTC_DATE + | UTC_TIME + ; intervalLiteral - : INTERVAL expression intervalUnit - ; + : INTERVAL expression intervalUnit + ; intervalUnit - : MICROSECOND | SECOND | MINUTE | HOUR | DAY | WEEK | MONTH | QUARTER | YEAR | SECOND_MICROSECOND - | MINUTE_MICROSECOND | MINUTE_SECOND | HOUR_MICROSECOND | HOUR_SECOND | HOUR_MINUTE | DAY_MICROSECOND - | DAY_SECOND | DAY_MINUTE | DAY_HOUR | YEAR_MONTH - ; + : MICROSECOND + | SECOND + | MINUTE + | HOUR + | DAY + | WEEK + | MONTH + | QUARTER + | YEAR + | SECOND_MICROSECOND + | MINUTE_MICROSECOND + | MINUTE_SECOND + | HOUR_MICROSECOND + | HOUR_SECOND + | HOUR_MINUTE + | DAY_MICROSECOND + | DAY_SECOND + | DAY_MINUTE + | DAY_HOUR + | YEAR_MONTH + ; // predicates // Simplified approach for expression expression - : NOT expression #notExpression - | left=expression AND right=expression #andExpression - | left=expression OR right=expression #orExpression - | predicate #predicateExpression - ; + : NOT expression # notExpression + | left = expression AND right = expression # andExpression + | left = expression OR right = expression # orExpression + | predicate # predicateExpression + ; predicate - : expressionAtom #expressionAtomPredicate - | left=predicate comparisonOperator right=predicate #binaryComparisonPredicate - | predicate IS nullNotnull #isNullPredicate - | left=predicate NOT? LIKE right=predicate #likePredicate - | left=predicate REGEXP right=predicate #regexpPredicate - | predicate NOT? IN '(' expressions ')' #inPredicate - ; + : expressionAtom # expressionAtomPredicate + | left = predicate comparisonOperator right = predicate # binaryComparisonPredicate + | predicate IS nullNotnull # isNullPredicate + | predicate NOT? BETWEEN predicate AND predicate # betweenPredicate + | left = predicate NOT? LIKE right = predicate # likePredicate + | left = predicate REGEXP right = predicate # regexpPredicate + | predicate NOT? IN '(' expressions ')' # inPredicate + ; expressions - : expression (',' expression)* - ; + : expression (',' expression)* + ; expressionAtom - : constant #constantExpressionAtom - | columnName #fullColumnNameExpressionAtom - | functionCall #functionCallExpressionAtom - | LR_BRACKET expression RR_BRACKET #nestedExpressionAtom - | left=expressionAtom mathOperator right=expressionAtom #mathExpressionAtom - ; - -mathOperator - : PLUS | MINUS | STAR | DIVIDE | MODULE - ; + : constant # constantExpressionAtom + | columnName # fullColumnNameExpressionAtom + | functionCall # functionCallExpressionAtom + | LR_BRACKET expression RR_BRACKET # nestedExpressionAtom + | left = expressionAtom mathOperator = (STAR | SLASH | MODULE) right = expressionAtom # mathExpressionAtom + | left = expressionAtom mathOperator = (PLUS | MINUS) right = expressionAtom # mathExpressionAtom + ; comparisonOperator - : '=' | '>' | '<' | '<' '=' | '>' '=' - | '<' '>' | '!' '=' - ; + : '=' + | '>' + | '<' + | '<' '=' + | '>' '=' + | '<' '>' + | '!' '=' + ; nullNotnull - : NOT? NULL_LITERAL - ; + : NOT? NULL_LITERAL + ; functionCall - : scalarFunctionName LR_BRACKET functionArgs RR_BRACKET #scalarFunctionCall - | specificFunction #specificFunctionCall - | windowFunctionClause #windowFunctionCall - | aggregateFunction #aggregateFunctionCall - | aggregateFunction (orderByClause)? filterClause #filteredAggregationFunctionCall - | relevanceFunction #relevanceFunctionCall - | highlightFunction #highlightFunctionCall - | positionFunction #positionFunctionCall - ; - + : nestedFunctionName LR_BRACKET allTupleFields RR_BRACKET # nestedAllFunctionCall + | scalarFunctionName LR_BRACKET functionArgs RR_BRACKET # scalarFunctionCall + | specificFunction # specificFunctionCall + | windowFunctionClause # windowFunctionCall + | aggregateFunction # aggregateFunctionCall + | aggregateFunction (orderByClause)? filterClause # filteredAggregationFunctionCall + | scoreRelevanceFunction # scoreRelevanceFunctionCall + | relevanceFunction # relevanceFunctionCall + | highlightFunction # highlightFunctionCall + | positionFunction # positionFunctionCall + | extractFunction # extractFunctionCall + | getFormatFunction # getFormatFunctionCall + | timestampFunction # timestampFunctionCall + ; + +timestampFunction + : timestampFunctionName LR_BRACKET simpleDateTimePart COMMA firstArg = functionArg COMMA secondArg = functionArg RR_BRACKET + ; + +timestampFunctionName + : TIMESTAMPADD + | TIMESTAMPDIFF + ; + +getFormatFunction + : GET_FORMAT LR_BRACKET getFormatType COMMA functionArg RR_BRACKET + ; + +getFormatType + : DATE + | DATETIME + | TIME + | TIMESTAMP + ; + +extractFunction + : EXTRACT LR_BRACKET datetimePart FROM functionArg RR_BRACKET + ; + +simpleDateTimePart + : MICROSECOND + | SECOND + | MINUTE + | HOUR + | DAY + | WEEK + | MONTH + | QUARTER + | YEAR + ; + +complexDateTimePart + : SECOND_MICROSECOND + | MINUTE_MICROSECOND + | MINUTE_SECOND + | HOUR_MICROSECOND + | HOUR_SECOND + | HOUR_MINUTE + | DAY_MICROSECOND + | DAY_SECOND + | DAY_MINUTE + | DAY_HOUR + | YEAR_MONTH + ; + +datetimePart + : simpleDateTimePart + | complexDateTimePart + ; highlightFunction - : HIGHLIGHT LR_BRACKET relevanceField (COMMA highlightArg)* RR_BRACKET - ; + : HIGHLIGHT LR_BRACKET relevanceField (COMMA highlightArg)* RR_BRACKET + ; positionFunction - : POSITION LR_BRACKET functionArg IN functionArg RR_BRACKET - ; + : POSITION LR_BRACKET functionArg IN functionArg RR_BRACKET + ; + +matchQueryAltSyntaxFunction + : field = relevanceField EQUAL_SYMBOL MATCH_QUERY LR_BRACKET query = relevanceQuery RR_BRACKET + ; scalarFunctionName - : mathematicalFunctionName - | dateTimeFunctionName - | textFunctionName - | flowControlFunctionName - | systemFunctionName - ; + : mathematicalFunctionName + | dateTimeFunctionName + | textFunctionName + | flowControlFunctionName + | systemFunctionName + | nestedFunctionName + ; specificFunction - : CASE expression caseFuncAlternative+ - (ELSE elseArg=functionArg)? END #caseFunctionCall - | CASE caseFuncAlternative+ - (ELSE elseArg=functionArg)? END #caseFunctionCall - | CAST '(' expression AS convertedDataType ')' #dataTypeFunctionCall - ; + : CASE expression caseFuncAlternative+ (ELSE elseArg = functionArg)? END # caseFunctionCall + | CASE caseFuncAlternative+ (ELSE elseArg = functionArg)? END # caseFunctionCall + | CAST '(' expression AS convertedDataType ')' # dataTypeFunctionCall + ; relevanceFunction - : noFieldRelevanceFunction | singleFieldRelevanceFunction | multiFieldRelevanceFunction - ; + : noFieldRelevanceFunction + | singleFieldRelevanceFunction + | multiFieldRelevanceFunction + | altSingleFieldRelevanceFunction + | altMultiFieldRelevanceFunction + ; + +scoreRelevanceFunction + : scoreRelevanceFunctionName LR_BRACKET relevanceFunction (COMMA weight = relevanceFieldWeight)? RR_BRACKET + ; noFieldRelevanceFunction - : noFieldRelevanceFunctionName LR_BRACKET query=relevanceQuery (COMMA relevanceArg)* RR_BRACKET - ; + : noFieldRelevanceFunctionName LR_BRACKET query = relevanceQuery (COMMA relevanceArg)* RR_BRACKET + ; // Field is a single column singleFieldRelevanceFunction - : singleFieldRelevanceFunctionName LR_BRACKET - field=relevanceField COMMA query=relevanceQuery - (COMMA relevanceArg)* RR_BRACKET; + : singleFieldRelevanceFunctionName LR_BRACKET field = relevanceField COMMA query = relevanceQuery (COMMA relevanceArg)* RR_BRACKET + ; // Field is a list of columns multiFieldRelevanceFunction - : multiFieldRelevanceFunctionName LR_BRACKET - LT_SQR_PRTHS field=relevanceFieldAndWeight (COMMA field=relevanceFieldAndWeight)* RT_SQR_PRTHS - COMMA query=relevanceQuery (COMMA relevanceArg)* RR_BRACKET - | multiFieldRelevanceFunctionName LR_BRACKET - alternateMultiMatchQuery COMMA alternateMultiMatchField (COMMA relevanceArg)* RR_BRACKET - ; + : multiFieldRelevanceFunctionName LR_BRACKET LT_SQR_PRTHS field = relevanceFieldAndWeight (COMMA field = relevanceFieldAndWeight)* RT_SQR_PRTHS COMMA query = relevanceQuery (COMMA relevanceArg)* RR_BRACKET + | multiFieldRelevanceFunctionName LR_BRACKET alternateMultiMatchQuery COMMA alternateMultiMatchField (COMMA relevanceArg)* RR_BRACKET + ; + +altSingleFieldRelevanceFunction + : field = relevanceField EQUAL_SYMBOL altSyntaxFunctionName = altSingleFieldRelevanceFunctionName LR_BRACKET query = relevanceQuery (COMMA relevanceArg)* RR_BRACKET + ; + +altMultiFieldRelevanceFunction + : field = relevanceField EQUAL_SYMBOL altSyntaxFunctionName = altMultiFieldRelevanceFunctionName LR_BRACKET query = relevanceQuery (COMMA relevanceArg)* RR_BRACKET + ; convertedDataType - : typeName=DATE - | typeName=TIME - | typeName=TIMESTAMP - | typeName=INT - | typeName=INTEGER - | typeName=DOUBLE - | typeName=LONG - | typeName=FLOAT - | typeName=STRING - | typeName=BOOLEAN - ; + : typeName = DATE + | typeName = TIME + | typeName = TIMESTAMP + | typeName = INT + | typeName = INTEGER + | typeName = DOUBLE + | typeName = LONG + | typeName = FLOAT + | typeName = STRING + | typeName = BOOLEAN + ; caseFuncAlternative - : WHEN condition=functionArg - THEN consequent=functionArg - ; + : WHEN condition = functionArg THEN consequent = functionArg + ; aggregateFunction - : functionName=aggregationFunctionName LR_BRACKET functionArg RR_BRACKET - #regularAggregateFunctionCall - | COUNT LR_BRACKET STAR RR_BRACKET #countStarFunctionCall - | COUNT LR_BRACKET DISTINCT functionArg RR_BRACKET #distinctCountFunctionCall - ; + : functionName = aggregationFunctionName LR_BRACKET functionArg RR_BRACKET # regularAggregateFunctionCall + | COUNT LR_BRACKET STAR RR_BRACKET # countStarFunctionCall + | COUNT LR_BRACKET DISTINCT functionArg RR_BRACKET # distinctCountFunctionCall + | percentileApproxFunction # percentileApproxFunctionCall + ; + +percentileApproxFunction + : (PERCENTILE | PERCENTILE_APPROX) LR_BRACKET aggField = functionArg + COMMA percent = numericLiteral (COMMA compression = numericLiteral)? RR_BRACKET + ; filterClause - : FILTER LR_BRACKET WHERE expression RR_BRACKET - ; + : FILTER LR_BRACKET WHERE expression RR_BRACKET + ; aggregationFunctionName - : AVG | COUNT | SUM | MIN | MAX | VAR_POP | VAR_SAMP | VARIANCE | STD | STDDEV | STDDEV_POP | STDDEV_SAMP - ; + : AVG + | COUNT + | SUM + | MIN + | MAX + | VAR_POP + | VAR_SAMP + | VARIANCE + | STD + | STDDEV + | STDDEV_POP + | STDDEV_SAMP + ; mathematicalFunctionName - : ABS | CBRT | CEIL | CEILING | CONV | CRC32 | E | EXP | FLOOR | LN | LOG | LOG10 | LOG2 | MOD | PI | POW | POWER - | RAND | ROUND | SIGN | SQRT | TRUNCATE - | trigonometricFunctionName - ; + : ABS + | CBRT + | CEIL + | CEILING + | CONV + | CRC32 + | E + | EXP + | EXPM1 + | FLOOR + | LN + | LOG + | LOG10 + | LOG2 + | MOD + | PI + | POW + | POWER + | RAND + | RINT + | ROUND + | SIGN + | SIGNUM + | SQRT + | TRUNCATE + | trigonometricFunctionName + | arithmeticFunctionName + ; trigonometricFunctionName - : ACOS | ASIN | ATAN | ATAN2 | COS | COT | DEGREES | RADIANS | SIN | TAN - ; + : ACOS + | ASIN + | ATAN + | ATAN2 + | COS + | COSH + | COT + | DEGREES + | RADIANS + | SIN + | SINH + | TAN + ; + +arithmeticFunctionName + : ADD + | SUBTRACT + | MULTIPLY + | DIVIDE + | MOD + | MODULUS + ; dateTimeFunctionName - : datetimeConstantLiteral - | ADDDATE - | CONVERT_TZ - | CURDATE - | CURTIME - | DATE - | DATE_ADD - | DATE_FORMAT - | DATE_SUB - | DATETIME - | DAY - | DAYNAME - | DAYOFMONTH - | DAYOFWEEK - | DAYOFYEAR - | FROM_DAYS - | FROM_UNIXTIME - | HOUR - | MAKEDATE - | MAKETIME - | MICROSECOND - | MINUTE - | MONTH - | MONTHNAME - | NOW - | PERIOD_ADD - | PERIOD_DIFF - | QUARTER - | SECOND - | SUBDATE - | SYSDATE - | TIME - | TIME_TO_SEC - | TIMESTAMP - | TO_DAYS - | UNIX_TIMESTAMP - | WEEK - | YEAR - ; + : datetimeConstantLiteral + | ADDDATE + | ADDTIME + | CONVERT_TZ + | CURDATE + | CURTIME + | DATE + | DATE_ADD + | DATE_FORMAT + | DATE_SUB + | DATEDIFF + | DATETIME + | DAY + | DAYNAME + | DAYOFMONTH + | DAY_OF_MONTH + | DAYOFWEEK + | DAYOFYEAR + | DAY_OF_YEAR + | DAY_OF_WEEK + | FROM_DAYS + | FROM_UNIXTIME + | HOUR + | HOUR_OF_DAY + | LAST_DAY + | MAKEDATE + | MAKETIME + | MICROSECOND + | MINUTE + | MINUTE_OF_DAY + | MINUTE_OF_HOUR + | MONTH + | MONTHNAME + | MONTH_OF_YEAR + | NOW + | PERIOD_ADD + | PERIOD_DIFF + | QUARTER + | SEC_TO_TIME + | SECOND + | SECOND_OF_MINUTE + | SUBDATE + | SUBTIME + | SYSDATE + | STR_TO_DATE + | TIME + | TIME_FORMAT + | TIME_TO_SEC + | TIMEDIFF + | TIMESTAMP + | TO_DAYS + | TO_SECONDS + | UNIX_TIMESTAMP + | WEEK + | WEEKDAY + | WEEK_OF_YEAR + | WEEKOFYEAR + | YEAR + | YEARWEEK + ; textFunctionName - : SUBSTR | SUBSTRING | TRIM | LTRIM | RTRIM | LOWER | UPPER - | CONCAT | CONCAT_WS | SUBSTR | LENGTH | STRCMP | RIGHT | LEFT - | ASCII | LOCATE | REPLACE | REVERSE - ; + : SUBSTR + | SUBSTRING + | TRIM + | LTRIM + | RTRIM + | LOWER + | UPPER + | CONCAT + | CONCAT_WS + | SUBSTR + | LENGTH + | STRCMP + | RIGHT + | LEFT + | ASCII + | LOCATE + | REPLACE + | REVERSE + ; flowControlFunctionName - : IF | IFNULL | NULLIF | ISNULL - ; + : IF + | IFNULL + | NULLIF + | ISNULL + ; noFieldRelevanceFunctionName - : QUERY - ; + : QUERY + ; systemFunctionName - : TYPEOF - ; + : TYPEOF + ; + +nestedFunctionName + : NESTED + ; + +scoreRelevanceFunctionName + : SCORE + | SCOREQUERY + | SCORE_QUERY + ; singleFieldRelevanceFunctionName - : MATCH | MATCHQUERY | MATCH_QUERY - | MATCH_PHRASE | MATCHPHRASE | MATCHPHRASEQUERY - | MATCH_BOOL_PREFIX | MATCH_PHRASE_PREFIX - | WILDCARD_QUERY | WILDCARDQUERY - ; + : MATCH + | MATCHQUERY + | MATCH_QUERY + | MATCH_PHRASE + | MATCHPHRASE + | MATCHPHRASEQUERY + | MATCH_BOOL_PREFIX + | MATCH_PHRASE_PREFIX + | WILDCARD_QUERY + | WILDCARDQUERY + ; multiFieldRelevanceFunctionName - : MULTI_MATCH - | MULTIMATCH - | MULTIMATCHQUERY - | SIMPLE_QUERY_STRING - | QUERY_STRING - ; + : MULTI_MATCH + | MULTIMATCH + | MULTIMATCHQUERY + | SIMPLE_QUERY_STRING + | QUERY_STRING + ; + +altSingleFieldRelevanceFunctionName + : MATCH_QUERY + | MATCHQUERY + | MATCH_PHRASE + | MATCHPHRASE + ; + +altMultiFieldRelevanceFunctionName + : MULTI_MATCH + | MULTIMATCH + ; functionArgs - : (functionArg (COMMA functionArg)*)? - ; + : (functionArg (COMMA functionArg)*)? + ; functionArg - : expression - ; + : expression + ; relevanceArg - : relevanceArgName EQUAL_SYMBOL relevanceArgValue - | argName=stringLiteral EQUAL_SYMBOL argVal=relevanceArgValue - ; + : relevanceArgName EQUAL_SYMBOL relevanceArgValue + | argName = stringLiteral EQUAL_SYMBOL argVal = relevanceArgValue + ; highlightArg - : highlightArgName EQUAL_SYMBOL highlightArgValue - ; + : highlightArgName EQUAL_SYMBOL highlightArgValue + ; relevanceArgName - : ALLOW_LEADING_WILDCARD | ANALYZER | ANALYZE_WILDCARD | AUTO_GENERATE_SYNONYMS_PHRASE_QUERY - | BOOST | CASE_INSENSITIVE | CUTOFF_FREQUENCY | DEFAULT_FIELD | DEFAULT_OPERATOR | ENABLE_POSITION_INCREMENTS - | ESCAPE | FIELDS | FLAGS | FUZZINESS | FUZZY_MAX_EXPANSIONS | FUZZY_PREFIX_LENGTH - | FUZZY_REWRITE | FUZZY_TRANSPOSITIONS | LENIENT | LOW_FREQ_OPERATOR | MAX_DETERMINIZED_STATES - | MAX_EXPANSIONS | MINIMUM_SHOULD_MATCH | OPERATOR | PHRASE_SLOP | PREFIX_LENGTH - | QUOTE_ANALYZER | QUOTE_FIELD_SUFFIX | REWRITE | SLOP | TIE_BREAKER | TIME_ZONE | TYPE - | ZERO_TERMS_QUERY - ; + : ALLOW_LEADING_WILDCARD + | ANALYZER + | ANALYZE_WILDCARD + | AUTO_GENERATE_SYNONYMS_PHRASE_QUERY + | BOOST + | CASE_INSENSITIVE + | CUTOFF_FREQUENCY + | DEFAULT_FIELD + | DEFAULT_OPERATOR + | ENABLE_POSITION_INCREMENTS + | ESCAPE + | FIELDS + | FLAGS + | FUZZINESS + | FUZZY_MAX_EXPANSIONS + | FUZZY_PREFIX_LENGTH + | FUZZY_REWRITE + | FUZZY_TRANSPOSITIONS + | LENIENT + | LOW_FREQ_OPERATOR + | MAX_DETERMINIZED_STATES + | MAX_EXPANSIONS + | MINIMUM_SHOULD_MATCH + | OPERATOR + | PHRASE_SLOP + | PREFIX_LENGTH + | QUOTE_ANALYZER + | QUOTE_FIELD_SUFFIX + | REWRITE + | SLOP + | TIE_BREAKER + | TIME_ZONE + | TYPE + | ZERO_TERMS_QUERY + ; highlightArgName - : HIGHLIGHT_POST_TAGS | HIGHLIGHT_PRE_TAGS - ; + : HIGHLIGHT_POST_TAGS + | HIGHLIGHT_PRE_TAGS + ; relevanceFieldAndWeight - : field=relevanceField - | field=relevanceField weight=relevanceFieldWeight - | field=relevanceField BIT_XOR_OP weight=relevanceFieldWeight - ; + : field = relevanceField + | field = relevanceField weight = relevanceFieldWeight + | field = relevanceField BIT_XOR_OP weight = relevanceFieldWeight + ; relevanceFieldWeight - : realLiteral - | decimalLiteral - ; + : numericLiteral + ; relevanceField - : qualifiedName - | stringLiteral - ; + : qualifiedName + | stringLiteral + ; relevanceQuery - : relevanceArgValue - ; + : relevanceArgValue + ; relevanceArgValue - : qualifiedName - | constant - ; + : qualifiedName + | constant + ; highlightArgValue - : stringLiteral - ; + : stringLiteral + ; alternateMultiMatchArgName - : FIELDS - | QUERY - | stringLiteral - ; + : FIELDS + | QUERY + | stringLiteral + ; alternateMultiMatchQuery - : argName=alternateMultiMatchArgName EQUAL_SYMBOL argVal=relevanceArgValue - ; + : argName = alternateMultiMatchArgName EQUAL_SYMBOL argVal = relevanceArgValue + ; alternateMultiMatchField - : argName=alternateMultiMatchArgName EQUAL_SYMBOL argVal=relevanceArgValue - | argName=alternateMultiMatchArgName EQUAL_SYMBOL - LT_SQR_PRTHS argVal=relevanceArgValue RT_SQR_PRTHS - ; + : argName = alternateMultiMatchArgName EQUAL_SYMBOL argVal = relevanceArgValue + | argName = alternateMultiMatchArgName EQUAL_SYMBOL LT_SQR_PRTHS argVal = relevanceArgValue RT_SQR_PRTHS + ; + +// Identifiers +tableName + : qualifiedName + ; + +columnName + : qualifiedName + ; + +allTupleFields + : path = qualifiedName DOT STAR + ; + +alias + : ident + ; + +qualifiedName + : ident (DOT ident)* + ; + +ident + : DOT? ID + | BACKTICK_QUOTE_ID + | keywordsCanBeId + | scalarFunctionName + ; + +keywordsCanBeId + : FULL + | FIELD + | D + | T + | TS // OD SQL and ODBC special + | COUNT + | SUM + | AVG + | MAX + | MIN + | FIRST + | LAST + | TYPE // TODO: Type is keyword required by relevancy function. Remove this when relevancy functions moved out + ; diff --git a/sql/src/main/java/org/opensearch/sql/sql/SQLService.java b/sql/src/main/java/org/opensearch/sql/sql/SQLService.java index 082a3e9581..e1ca778453 100644 --- a/sql/src/main/java/org/opensearch/sql/sql/SQLService.java +++ b/sql/src/main/java/org/opensearch/sql/sql/SQLService.java @@ -3,7 +3,6 @@ * SPDX-License-Identifier: Apache-2.0 */ - package org.opensearch.sql.sql; import java.util.Optional; @@ -21,9 +20,7 @@ import org.opensearch.sql.sql.parser.AstBuilder; import org.opensearch.sql.sql.parser.AstStatementBuilder; -/** - * SQL service. - */ +/** SQL service. */ @RequiredArgsConstructor public class SQLService { @@ -65,16 +62,36 @@ private AbstractPlan plan( SQLQueryRequest request, Optional> queryListener, Optional> explainListener) { - // 1.Parse query and convert parse tree (CST) to abstract syntax tree (AST) - ParseTree cst = parser.parse(request.getQuery()); - Statement statement = - cst.accept( - new AstStatementBuilder( - new AstBuilder(request.getQuery()), - AstStatementBuilder.StatementBuilderContext.builder() - .isExplain(request.isExplainRequest()) - .build())); + boolean isExplainRequest = request.isExplainRequest(); + if (request.getCursor().isPresent()) { + // Handle v2 cursor here -- legacy cursor was handled earlier. + if (isExplainRequest) { + throw new UnsupportedOperationException( + "Explain of a paged query continuation " + + "is not supported. Use `explain` for the initial query request."); + } + if (request.isCursorCloseRequest()) { + return queryExecutionFactory.createCloseCursor( + request.getCursor().get(), queryListener.orElse(null)); + } + return queryExecutionFactory.create( + request.getCursor().get(), + isExplainRequest, + queryListener.orElse(null), + explainListener.orElse(null)); + } else { + // 1.Parse query and convert parse tree (CST) to abstract syntax tree (AST) + ParseTree cst = parser.parse(request.getQuery()); + Statement statement = + cst.accept( + new AstStatementBuilder( + new AstBuilder(request.getQuery()), + AstStatementBuilder.StatementBuilderContext.builder() + .isExplain(isExplainRequest) + .fetchSize(request.getFetchSize()) + .build())); - return queryExecutionFactory.create(statement, queryListener, explainListener); + return queryExecutionFactory.create(statement, queryListener, explainListener); + } } } diff --git a/sql/src/main/java/org/opensearch/sql/sql/antlr/AnonymizerListener.java b/sql/src/main/java/org/opensearch/sql/sql/antlr/AnonymizerListener.java new file mode 100644 index 0000000000..0d1b89f7a9 --- /dev/null +++ b/sql/src/main/java/org/opensearch/sql/sql/antlr/AnonymizerListener.java @@ -0,0 +1,107 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.sql.sql.antlr; + +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.BACKTICK_QUOTE_ID; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.BOOLEAN; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.COMMA; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.DECIMAL_LITERAL; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.DOT; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.EQUAL_SYMBOL; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.EXCLAMATION_SYMBOL; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.FALSE; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.FROM; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.GREATER_SYMBOL; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.ID; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.LESS_SYMBOL; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.ONE_DECIMAL; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.REAL_LITERAL; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.STRING_LITERAL; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.TIMESTAMP; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.TRUE; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.TWO_DECIMAL; +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer.ZERO_DECIMAL; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.tree.ErrorNode; +import org.antlr.v4.runtime.tree.ParseTreeListener; +import org.antlr.v4.runtime.tree.TerminalNode; + +/** Parse tree listener for anonymizing SQL requests. */ +public class AnonymizerListener implements ParseTreeListener { + private String anonymizedQueryString = ""; + private static final int NO_TYPE = -1; + private int previousType = NO_TYPE; + + @Override + public void enterEveryRule(ParserRuleContext ctx) {} + + @Override + public void exitEveryRule(ParserRuleContext ctx) {} + + @Override + public void visitTerminal(TerminalNode node) { + // In these situations don't add a space prior: + // 1. a DOT between two identifiers + // 2. before a comma + // 3. between equal comparison tokens: e.g <= + // 4. between alt not equals: <> + int token = node.getSymbol().getType(); + boolean isDotIdentifiers = token == DOT || previousType == DOT; + boolean isComma = token == COMMA; + boolean isEqualComparison = + ((token == EQUAL_SYMBOL) + && (previousType == LESS_SYMBOL + || previousType == GREATER_SYMBOL + || previousType == EXCLAMATION_SYMBOL)); + boolean isNotEqualComparisonAlternative = + previousType == LESS_SYMBOL && token == GREATER_SYMBOL; + if (!isDotIdentifiers && !isComma && !isEqualComparison && !isNotEqualComparisonAlternative) { + anonymizedQueryString += " "; + } + + // anonymize the following tokens + switch (node.getSymbol().getType()) { + case ID: + case TIMESTAMP: + case BACKTICK_QUOTE_ID: + if (previousType == FROM) { + anonymizedQueryString += "table"; + } else { + anonymizedQueryString += "identifier"; + } + break; + case ZERO_DECIMAL: + case ONE_DECIMAL: + case TWO_DECIMAL: + case DECIMAL_LITERAL: + case REAL_LITERAL: + anonymizedQueryString += "number"; + break; + case STRING_LITERAL: + anonymizedQueryString += "'string_literal'"; + break; + case BOOLEAN: + case TRUE: + case FALSE: + anonymizedQueryString += "boolean_literal"; + break; + case NO_TYPE: + // end of file + break; + default: + anonymizedQueryString += node.getText().toUpperCase(); + } + previousType = node.getSymbol().getType(); + } + + @Override + public void visitErrorNode(ErrorNode node) {} + + public String getAnonymizedQueryString() { + return "(" + anonymizedQueryString + ")"; + } +} diff --git a/sql/src/main/java/org/opensearch/sql/sql/antlr/SQLSyntaxParser.java b/sql/src/main/java/org/opensearch/sql/sql/antlr/SQLSyntaxParser.java index ee1e991bd4..d1a6adc236 100644 --- a/sql/src/main/java/org/opensearch/sql/sql/antlr/SQLSyntaxParser.java +++ b/sql/src/main/java/org/opensearch/sql/sql/antlr/SQLSyntaxParser.java @@ -3,33 +3,40 @@ * SPDX-License-Identifier: Apache-2.0 */ - package org.opensearch.sql.sql.antlr; import org.antlr.v4.runtime.CommonTokenStream; import org.antlr.v4.runtime.tree.ParseTree; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.sql.common.antlr.CaseInsensitiveCharStream; import org.opensearch.sql.common.antlr.Parser; import org.opensearch.sql.common.antlr.SyntaxAnalysisErrorListener; import org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer; import org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser; -/** - * SQL syntax parser which encapsulates an ANTLR parser. - */ +/** SQL syntax parser which encapsulates an ANTLR parser. */ public class SQLSyntaxParser implements Parser { + private static final Logger LOG = LogManager.getLogger(SQLSyntaxParser.class); /** * Parse a SQL query by ANTLR parser. - * @param query a SQL query - * @return parse tree root + * + * @param query a SQL query + * @return parse tree root */ @Override public ParseTree parse(String query) { + AnonymizerListener anonymizer = new AnonymizerListener(); + OpenSearchSQLLexer lexer = new OpenSearchSQLLexer(new CaseInsensitiveCharStream(query)); OpenSearchSQLParser parser = new OpenSearchSQLParser(new CommonTokenStream(lexer)); parser.addErrorListener(new SyntaxAnalysisErrorListener()); - return parser.root(); - } + parser.addParseListener(anonymizer); + + ParseTree parseTree = parser.root(); + LOG.info("New Engine Request Query: {}", anonymizer.getAnonymizedQueryString()); + return parseTree; + } } diff --git a/sql/src/main/java/org/opensearch/sql/sql/config/SQLServiceConfig.java b/sql/src/main/java/org/opensearch/sql/sql/config/SQLServiceConfig.java deleted file mode 100644 index 4287883c34..0000000000 --- a/sql/src/main/java/org/opensearch/sql/sql/config/SQLServiceConfig.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - - -package org.opensearch.sql.sql.config; - -import org.opensearch.sql.executor.QueryManager; -import org.opensearch.sql.executor.execution.QueryPlanFactory; -import org.opensearch.sql.sql.SQLService; -import org.opensearch.sql.sql.antlr.SQLSyntaxParser; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.config.ConfigurableBeanFactory; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Scope; - -/** - * SQL service configuration for Spring container initialization. - */ -@Configuration -public class SQLServiceConfig { - - @Autowired - private QueryManager queryManager; - - @Autowired - private QueryPlanFactory queryExecutionFactory; - - /** - * The registration of OpenSearch storage engine happens here because - * OpenSearchStorageEngine is dependent on NodeClient. - * - * @return SQLService. - */ - @Bean - @Scope(value = ConfigurableBeanFactory.SCOPE_PROTOTYPE) - public SQLService sqlService() { - return new SQLService( - new SQLSyntaxParser(), - queryManager, - queryExecutionFactory); - } - -} - diff --git a/sql/src/main/java/org/opensearch/sql/sql/domain/SQLQueryRequest.java b/sql/src/main/java/org/opensearch/sql/sql/domain/SQLQueryRequest.java index 508f80cee4..8b7c450a87 100644 --- a/sql/src/main/java/org/opensearch/sql/sql/domain/SQLQueryRequest.java +++ b/sql/src/main/java/org/opensearch/sql/sql/domain/SQLQueryRequest.java @@ -3,16 +3,14 @@ * SPDX-License-Identifier: Apache-2.0 */ - package org.opensearch.sql.sql.domain; -import com.google.common.base.Strings; -import com.google.common.collect.ImmutableSet; import java.util.Collections; import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.stream.Stream; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.RequiredArgsConstructor; @@ -21,115 +19,129 @@ import org.json.JSONObject; import org.opensearch.sql.protocol.response.format.Format; -/** - * SQL query request. - */ +/** SQL query request. */ @ToString @EqualsAndHashCode @RequiredArgsConstructor public class SQLQueryRequest { - - private static final Set SUPPORTED_FIELDS = ImmutableSet.of( - "query", "fetch_size", "parameters"); + private static final String QUERY_FIELD_CURSOR = "cursor"; + private static final Set SUPPORTED_FIELDS = + Set.of("query", "fetch_size", "parameters", QUERY_FIELD_CURSOR); private static final String QUERY_PARAMS_FORMAT = "format"; private static final String QUERY_PARAMS_SANITIZE = "sanitize"; + private static final String QUERY_PARAMS_PRETTY = "pretty"; - /** - * JSON payload in REST request. - */ + /** JSON payload in REST request. */ private final JSONObject jsonContent; - /** - * SQL query. - */ - @Getter - private final String query; + /** SQL query. */ + @Getter private final String query; - /** - * Request path. - */ + /** Request path. */ private final String path; - /** - * Request format. - */ + /** Request format. */ private final String format; - /** - * Request params. - */ + /** Request params. */ private Map params = Collections.emptyMap(); @Getter @Accessors(fluent = true) private boolean sanitize = true; - /** - * Constructor of SQLQueryRequest that passes request params. - */ + @Getter + @Accessors(fluent = true) + private boolean pretty = false; + + private String cursor; + + /** Constructor of SQLQueryRequest that passes request params. */ public SQLQueryRequest( - JSONObject jsonContent, String query, String path, Map params) { + JSONObject jsonContent, + String query, + String path, + Map params, + String cursor) { this.jsonContent = jsonContent; this.query = query; this.path = path; this.params = params; this.format = getFormat(params); this.sanitize = shouldSanitize(params); + this.pretty = shouldPretty(params); + this.cursor = cursor; } /** - * Pre-check if the request can be supported by meeting ALL the following criteria: - * 1.Only supported fields present in request body, ex. "filter" and "cursor" are not supported - * 2.No fetch_size or "fetch_size=0". In other word, it's not a cursor request - * 3.Response format is default or can be supported. + * Pre-check if the request can be supported by meeting ALL the following criteria: 1.Only + * supported fields present in request body, ex. "filter" and "cursor" are not supported + * 2.Response format is default or can be supported. * - * @return true if supported. + * @return true if supported. */ public boolean isSupported() { - return isOnlySupportedFieldInPayload() - && isFetchSizeZeroIfPresent() - && isSupportedFormat(); + var noCursor = !isCursor(); + var noQuery = query == null; + var noUnsupportedParams = + params.isEmpty() || (params.size() == 1 && params.containsKey(QUERY_PARAMS_FORMAT)); + var noContent = jsonContent == null || jsonContent.isEmpty(); + + return ((!noCursor + && noQuery + && noUnsupportedParams + && noContent) // if cursor is given, but other things + || (noCursor && !noQuery)) // or if cursor is not given, but query + && isOnlySupportedFieldInPayload() // and request has supported fields only + && isSupportedFormat(); // and request is in supported format + } + + private boolean isCursor() { + return cursor != null && !cursor.isEmpty(); } /** * Check if request is to explain rather than execute the query. - * @return true if it is a explain request + * + * @return true if it is an explain request */ public boolean isExplainRequest() { return path.endsWith("/_explain"); } - /** - * Decide on the formatter by the requested format. - */ + public boolean isCursorCloseRequest() { + return path.endsWith("/close"); + } + + /** Decide on the formatter by the requested format. */ public Format format() { Optional optionalFormat = Format.of(format); if (optionalFormat.isPresent()) { return optionalFormat.get(); } else { throw new IllegalArgumentException( - String.format(Locale.ROOT,"response in %s format is not supported.", format)); + String.format(Locale.ROOT, "response in %s format is not supported.", format)); } } private boolean isOnlySupportedFieldInPayload() { - return SUPPORTED_FIELDS.containsAll(jsonContent.keySet()); + return jsonContent == null || SUPPORTED_FIELDS.containsAll(jsonContent.keySet()); + } + + public Optional getCursor() { + return Optional.ofNullable(cursor); } - private boolean isFetchSizeZeroIfPresent() { - return (jsonContent.optInt("fetch_size") == 0); + public int getFetchSize() { + return jsonContent.optInt("fetch_size"); } private boolean isSupportedFormat() { - return Strings.isNullOrEmpty(format) || "jdbc".equalsIgnoreCase(format) - || "csv".equalsIgnoreCase(format) || "raw".equalsIgnoreCase(format); + return Stream.of("csv", "jdbc", "raw").anyMatch(format::equalsIgnoreCase); } private String getFormat(Map params) { - if (params.containsKey(QUERY_PARAMS_FORMAT)) { - return params.get(QUERY_PARAMS_FORMAT); - } - return "jdbc"; + return params.getOrDefault(QUERY_PARAMS_FORMAT, "jdbc"); } private boolean shouldSanitize(Map params) { @@ -139,4 +151,10 @@ private boolean shouldSanitize(Map params) { return true; } + private boolean shouldPretty(Map params) { + if (params.containsKey(QUERY_PARAMS_PRETTY)) { + return Boolean.parseBoolean(params.get(QUERY_PARAMS_PRETTY)); + } + return false; + } } diff --git a/sql/src/main/java/org/opensearch/sql/sql/parser/AstAggregationBuilder.java b/sql/src/main/java/org/opensearch/sql/sql/parser/AstAggregationBuilder.java index bd4464d00e..e46147b7a3 100644 --- a/sql/src/main/java/org/opensearch/sql/sql/parser/AstAggregationBuilder.java +++ b/sql/src/main/java/org/opensearch/sql/sql/parser/AstAggregationBuilder.java @@ -3,7 +3,6 @@ * SPDX-License-Identifier: Apache-2.0 */ - package org.opensearch.sql.sql.parser; import static java.util.Collections.emptyList; @@ -27,6 +26,8 @@ import org.opensearch.sql.sql.parser.context.QuerySpecification; /** + * + * *
    SelectExpressionAnalyzerTest
      * AST aggregation builder that builds AST aggregation node for the following scenarios:
      *
    @@ -59,9 +60,7 @@
     @RequiredArgsConstructor
     public class AstAggregationBuilder extends OpenSearchSQLParserBaseVisitor {
     
    -  /**
    -   * Query specification that contains info collected beforehand.
    -   */
    +  /** Query specification that contains info collected beforehand. */
       private final QuerySpecification querySpec;
     
       @Override
    @@ -78,10 +77,7 @@ public UnresolvedPlan visit(ParseTree groupByClause) {
     
       private UnresolvedPlan buildExplicitAggregation() {
         List groupByItems = replaceGroupByItemIfAliasOrOrdinal();
    -    return new Aggregation(
    -        new ArrayList<>(querySpec.getAggregators()),
    -        emptyList(),
    -        groupByItems);
    +    return new Aggregation(new ArrayList<>(querySpec.getAggregators()), emptyList(), groupByItems);
       }
     
       private UnresolvedPlan buildImplicitAggregation() {
    @@ -89,33 +85,32 @@ private UnresolvedPlan buildImplicitAggregation() {
     
         if (invalidSelectItem.isPresent()) {
           // Report semantic error to avoid fall back to old engine again
    -      throw new SemanticCheckException(StringUtils.format(
    -          "Explicit GROUP BY clause is required because expression [%s] "
    -              + "contains non-aggregated column", invalidSelectItem.get()));
    +      throw new SemanticCheckException(
    +          StringUtils.format(
    +              "Explicit GROUP BY clause is required because expression [%s] "
    +                  + "contains non-aggregated column",
    +              invalidSelectItem.get()));
         }
     
         return new Aggregation(
    -        new ArrayList<>(querySpec.getAggregators()),
    -        emptyList(),
    -        querySpec.getGroupByItems());
    +        new ArrayList<>(querySpec.getAggregators()), emptyList(), querySpec.getGroupByItems());
       }
     
       private List replaceGroupByItemIfAliasOrOrdinal() {
    -    return querySpec.getGroupByItems()
    -                    .stream()
    -                    .map(querySpec::replaceIfAliasOrOrdinal)
    -                    .map(expr -> new Alias(expr.toString(), expr))
    -                    .collect(Collectors.toList());
    +    return querySpec.getGroupByItems().stream()
    +        .map(querySpec::replaceIfAliasOrOrdinal)
    +        .map(expr -> new Alias(expr.toString(), expr))
    +        .collect(Collectors.toList());
       }
     
       /**
    -   * Find non-aggregate item in SELECT clause. Note that literal is special which is not required
    -   * to be applied by aggregate function.
    +   * Find non-aggregate item in SELECT clause. Note that literal is special which is not required to
    +   * be applied by aggregate function.
        */
       private Optional findNonAggregatedItemInSelect() {
         return querySpec.getSelectItems().stream()
    -                                     .filter(this::isNonAggregateOrLiteralExpression)
    -                                     .findFirst();
    +        .filter(this::isNonAggregateOrLiteralExpression)
    +        .findFirst();
       }
     
       private boolean isAggregatorNotFoundAnywhere() {
    @@ -132,8 +127,7 @@ private boolean isNonAggregateOrLiteralExpression(UnresolvedExpression expr) {
         }
     
         List children = expr.getChild();
    -    return children.stream().anyMatch(child ->
    -        isNonAggregateOrLiteralExpression((UnresolvedExpression) child));
    +    return children.stream()
    +        .anyMatch(child -> isNonAggregateOrLiteralExpression((UnresolvedExpression) child));
       }
    -
     }
    diff --git a/sql/src/main/java/org/opensearch/sql/sql/parser/AstBuilder.java b/sql/src/main/java/org/opensearch/sql/sql/parser/AstBuilder.java
    index 6edce6eb15..ab96f16263 100644
    --- a/sql/src/main/java/org/opensearch/sql/sql/parser/AstBuilder.java
    +++ b/sql/src/main/java/org/opensearch/sql/sql/parser/AstBuilder.java
    @@ -3,7 +3,6 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql.parser;
     
     import static java.util.Collections.emptyList;
    @@ -37,27 +36,24 @@
     import org.opensearch.sql.ast.tree.Values;
     import org.opensearch.sql.common.antlr.SyntaxCheckException;
     import org.opensearch.sql.common.utils.StringUtils;
    +import org.opensearch.sql.expression.function.BuiltinFunctionName;
     import org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser;
     import org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.QuerySpecificationContext;
     import org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParserBaseVisitor;
     import org.opensearch.sql.sql.parser.context.ParsingContext;
     
    -/**
    - * Abstract syntax tree (AST) builder.
    - */
    +/** Abstract syntax tree (AST) builder. */
     @RequiredArgsConstructor
     public class AstBuilder extends OpenSearchSQLParserBaseVisitor {
     
       private final AstExpressionBuilder expressionBuilder = new AstExpressionBuilder();
     
    -  /**
    -   * Parsing context stack that contains context for current query parsing.
    -   */
    +  /** Parsing context stack that contains context for current query parsing. */
       private final ParsingContext context = new ParsingContext();
     
       /**
    -   * SQL query to get original token text. This is necessary because token.getText() returns
    -   * text without whitespaces or other characters discarded by lexer.
    +   * SQL query to get original token text. This is necessary because token.getText() returns text
    +   * without whitespaces or other characters discarded by lexer.
        */
       private final String query;
     
    @@ -90,8 +86,7 @@ public UnresolvedPlan visitQuerySpecification(QuerySpecificationContext queryCon
     
         if (queryContext.fromClause() == null) {
           Optional allFields =
    -          project.getProjectList().stream().filter(node -> node instanceof AllFields)
    -              .findFirst();
    +          project.getProjectList().stream().filter(node -> node instanceof AllFields).findFirst();
           if (allFields.isPresent()) {
             throw new SyntaxCheckException("No FROM clause found for select all");
           }
    @@ -118,9 +113,8 @@ public UnresolvedPlan visitQuerySpecification(QuerySpecificationContext queryCon
     
       @Override
       public UnresolvedPlan visitSelectClause(SelectClauseContext ctx) {
    -    ImmutableList.Builder builder =
    -        new ImmutableList.Builder<>();
    -    if (ctx.selectElements().star != null) { //TODO: project operator should be required?
    +    ImmutableList.Builder builder = new ImmutableList.Builder<>();
    +    if (ctx.selectElements().star != null) { // TODO: project operator should be required?
           builder.add(AllFields.of());
         }
         ctx.selectElements().selectElement().forEach(field -> builder.add(visitSelectItem(field)));
    @@ -131,8 +125,7 @@ public UnresolvedPlan visitSelectClause(SelectClauseContext ctx) {
       public UnresolvedPlan visitLimitClause(OpenSearchSQLParser.LimitClauseContext ctx) {
         return new Limit(
             Integer.parseInt(ctx.limit.getText()),
    -        ctx.offset == null ? 0 : Integer.parseInt(ctx.offset.getText())
    -    );
    +        ctx.offset == null ? 0 : Integer.parseInt(ctx.offset.getText()));
       }
     
       @Override
    @@ -151,6 +144,8 @@ public UnresolvedPlan visitFromClause(FromClauseContext ctx) {
         }
     
         if (ctx.havingClause() != null) {
    +      UnresolvedPlan havingPlan = visit(ctx.havingClause());
    +      verifySupportsCondition(((Filter) havingPlan).getCondition());
           result = visit(ctx.havingClause()).attach(result);
         }
     
    @@ -161,16 +156,34 @@ public UnresolvedPlan visitFromClause(FromClauseContext ctx) {
         return result;
       }
     
    +  /**
    +   * Ensure NESTED function is not used in HAVING clause and fallback to legacy engine. Can remove
    +   * when support is added for NESTED function in HAVING clause.
    +   *
    +   * @param func : Function in HAVING clause
    +   */
    +  private void verifySupportsCondition(UnresolvedExpression func) {
    +    if (func instanceof Function) {
    +      if (((Function) func).getFuncName().equalsIgnoreCase(BuiltinFunctionName.NESTED.name())) {
    +        throw new SyntaxCheckException(
    +            "Falling back to legacy engine. Nested function is not supported in the HAVING"
    +                + " clause.");
    +      }
    +      ((Function) func).getFuncArgs().stream().forEach(e -> verifySupportsCondition(e));
    +    }
    +  }
    +
       @Override
       public UnresolvedPlan visitTableAsRelation(TableAsRelationContext ctx) {
    -    String tableAlias = (ctx.alias() == null) ? null
    -        : StringUtils.unquoteIdentifier(ctx.alias().getText());
    +    String tableAlias =
    +        (ctx.alias() == null) ? null : StringUtils.unquoteIdentifier(ctx.alias().getText());
         return new Relation(visitAstExpression(ctx.tableName()), tableAlias);
       }
     
       @Override
       public UnresolvedPlan visitSubqueryAsRelation(SubqueryAsRelationContext ctx) {
    -    return new RelationSubquery(visit(ctx.subquery), ctx.alias().getText());
    +    String subqueryAlias = StringUtils.unquoteIdentifier(ctx.alias().getText());
    +    return new RelationSubquery(visit(ctx.subquery), subqueryAlias);
       }
     
       @Override
    @@ -204,5 +217,4 @@ private UnresolvedExpression visitSelectItem(SelectElementContext ctx) {
           return new Alias(name, expr, alias);
         }
       }
    -
     }
    diff --git a/sql/src/main/java/org/opensearch/sql/sql/parser/AstExpressionBuilder.java b/sql/src/main/java/org/opensearch/sql/sql/parser/AstExpressionBuilder.java
    index bae22595ca..59de306966 100644
    --- a/sql/src/main/java/org/opensearch/sql/sql/parser/AstExpressionBuilder.java
    +++ b/sql/src/main/java/org/opensearch/sql/sql/parser/AstExpressionBuilder.java
    @@ -3,9 +3,10 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql.parser;
     
    +import static org.opensearch.sql.ast.dsl.AstDSL.between;
    +import static org.opensearch.sql.ast.dsl.AstDSL.not;
     import static org.opensearch.sql.ast.dsl.AstDSL.qualifiedName;
     import static org.opensearch.sql.ast.dsl.AstDSL.stringLiteral;
     import static org.opensearch.sql.expression.function.BuiltinFunctionName.IS_NOT_NULL;
    @@ -14,6 +15,10 @@
     import static org.opensearch.sql.expression.function.BuiltinFunctionName.NOT_LIKE;
     import static org.opensearch.sql.expression.function.BuiltinFunctionName.POSITION;
     import static org.opensearch.sql.expression.function.BuiltinFunctionName.REGEXP;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.AltMultiFieldRelevanceFunctionContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.AltSingleFieldRelevanceFunctionContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.AlternateMultiMatchFieldContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.BetweenPredicateContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.BinaryComparisonPredicateContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.BooleanContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.CaseFuncAlternativeContext;
    @@ -24,18 +29,31 @@
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.DataTypeFunctionCallContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.DateLiteralContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.DistinctCountFunctionCallContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.ExtractFunctionCallContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.FilterClauseContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.FilteredAggregationFunctionCallContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.FunctionArgContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.GetFormatFunctionCallContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.HighlightFunctionCallContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.InPredicateContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.IsNullPredicateContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.LikePredicateContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.MathExpressionAtomContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.MultiFieldRelevanceFunctionContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.NestedAllFunctionCallContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.NoFieldRelevanceFunctionContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.NotExpressionContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.NullLiteralContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.OverClauseContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.PositionFunctionContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.QualifiedNameContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.RegexpPredicateContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.RegularAggregateFunctionCallContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.RelevanceArgContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.RelevanceFieldAndWeightContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.ScalarFunctionCallContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.ScalarWindowFunctionContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.ScoreRelevanceFunctionContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.ShowDescribePatternContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.SignedDecimalContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.SignedRealContext;
    @@ -44,6 +62,7 @@
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.StringLiteralContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.TableFilterContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.TimeLiteralContext;
    +import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.TimestampFunctionCallContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.TimestampLiteralContext;
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.WindowFunctionClauseContext;
     import static org.opensearch.sql.sql.parser.ParserUtils.createSortOption;
    @@ -60,29 +79,12 @@
     import org.apache.commons.lang3.tuple.ImmutablePair;
     import org.apache.commons.lang3.tuple.Pair;
     import org.opensearch.sql.ast.dsl.AstDSL;
    -import org.opensearch.sql.ast.expression.AggregateFunction;
    -import org.opensearch.sql.ast.expression.AllFields;
    -import org.opensearch.sql.ast.expression.And;
    -import org.opensearch.sql.ast.expression.Case;
    -import org.opensearch.sql.ast.expression.Cast;
    -import org.opensearch.sql.ast.expression.DataType;
    -import org.opensearch.sql.ast.expression.Function;
    -import org.opensearch.sql.ast.expression.HighlightFunction;
    -import org.opensearch.sql.ast.expression.Interval;
    -import org.opensearch.sql.ast.expression.IntervalUnit;
    -import org.opensearch.sql.ast.expression.Literal;
    -import org.opensearch.sql.ast.expression.Not;
    -import org.opensearch.sql.ast.expression.Or;
    -import org.opensearch.sql.ast.expression.QualifiedName;
    -import org.opensearch.sql.ast.expression.RelevanceFieldList;
    -import org.opensearch.sql.ast.expression.UnresolvedArgument;
    -import org.opensearch.sql.ast.expression.UnresolvedExpression;
    -import org.opensearch.sql.ast.expression.When;
    -import org.opensearch.sql.ast.expression.WindowFunction;
    +import org.opensearch.sql.ast.expression.*;
     import org.opensearch.sql.ast.tree.Sort.SortOption;
     import org.opensearch.sql.common.utils.StringUtils;
     import org.opensearch.sql.expression.function.BuiltinFunctionName;
     import org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser;
    +import org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.AlternateMultiMatchQueryContext;
     import org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.AndExpressionContext;
     import org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.ColumnNameContext;
     import org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.IdentContext;
    @@ -92,9 +94,7 @@
     import org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.TableNameContext;
     import org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParserBaseVisitor;
     
    -/**
    - * Expression builder to parse text to expression in AST.
    - */
    +/** Expression builder to parse text to expression in AST. */
     public class AstExpressionBuilder extends OpenSearchSQLParserBaseVisitor {
     
       @Override
    @@ -120,9 +120,7 @@ public UnresolvedExpression visitQualifiedName(QualifiedNameContext ctx) {
       @Override
       public UnresolvedExpression visitMathExpressionAtom(MathExpressionAtomContext ctx) {
         return new Function(
    -        ctx.mathOperator().getText(),
    -        Arrays.asList(visit(ctx.left), visit(ctx.right))
    -    );
    +        ctx.mathOperator.getText(), Arrays.asList(visit(ctx.left), visit(ctx.right)));
       }
     
       @Override
    @@ -130,32 +128,49 @@ public UnresolvedExpression visitNestedExpressionAtom(NestedExpressionAtomContex
         return visit(ctx.expression()); // Discard parenthesis around
       }
     
    +  @Override
    +  public UnresolvedExpression visitNestedAllFunctionCall(NestedAllFunctionCallContext ctx) {
    +    return new NestedAllTupleFields(visitQualifiedName(ctx.allTupleFields().path).toString());
    +  }
    +
       @Override
       public UnresolvedExpression visitScalarFunctionCall(ScalarFunctionCallContext ctx) {
         return buildFunction(ctx.scalarFunctionName().getText(), ctx.functionArgs().functionArg());
       }
     
       @Override
    -  public UnresolvedExpression visitHighlightFunctionCall(
    -      OpenSearchSQLParser.HighlightFunctionCallContext ctx) {
    +  public UnresolvedExpression visitGetFormatFunctionCall(GetFormatFunctionCallContext ctx) {
    +    return new Function(
    +        ctx.getFormatFunction().GET_FORMAT().toString(), getFormatFunctionArguments(ctx));
    +  }
    +
    +  @Override
    +  public UnresolvedExpression visitHighlightFunctionCall(HighlightFunctionCallContext ctx) {
         ImmutableMap.Builder builder = ImmutableMap.builder();
    -    ctx.highlightFunction().highlightArg().forEach(v -> builder.put(
    -        v.highlightArgName().getText().toLowerCase(),
    -        new Literal(StringUtils.unquoteText(v.highlightArgValue().getText()),
    -            DataType.STRING))
    -    );
    +    ctx.highlightFunction()
    +        .highlightArg()
    +        .forEach(
    +            v ->
    +                builder.put(
    +                    v.highlightArgName().getText().toLowerCase(),
    +                    new Literal(
    +                        StringUtils.unquoteText(v.highlightArgValue().getText()),
    +                        DataType.STRING)));
     
    -    return new HighlightFunction(visit(ctx.highlightFunction().relevanceField()),
    -        builder.build());
    +    return new HighlightFunction(visit(ctx.highlightFunction().relevanceField()), builder.build());
       }
     
       @Override
    -  public UnresolvedExpression visitPositionFunction(
    -          OpenSearchSQLParser.PositionFunctionContext ctx) {
    +  public UnresolvedExpression visitTimestampFunctionCall(TimestampFunctionCallContext ctx) {
         return new Function(
    -            POSITION.getName().getFunctionName(),
    -            Arrays.asList(visitFunctionArg(ctx.functionArg(0)),
    -                    visitFunctionArg(ctx.functionArg(1))));
    +        ctx.timestampFunction().timestampFunctionName().getText(), timestampFunctionArguments(ctx));
    +  }
    +
    +  @Override
    +  public UnresolvedExpression visitPositionFunction(PositionFunctionContext ctx) {
    +    return new Function(
    +        POSITION.getName().getFunctionName(),
    +        Arrays.asList(visitFunctionArg(ctx.functionArg(0)), visitFunctionArg(ctx.functionArg(1))));
       }
     
       @Override
    @@ -173,8 +188,7 @@ public UnresolvedExpression visitColumnFilter(ColumnFilterContext ctx) {
       }
     
       @Override
    -  public UnresolvedExpression visitShowDescribePattern(
    -      ShowDescribePatternContext ctx) {
    +  public UnresolvedExpression visitShowDescribePattern(ShowDescribePatternContext ctx) {
         if (ctx.compatibleID() != null) {
           return stringLiteral(ctx.compatibleID().getText());
         } else {
    @@ -184,7 +198,7 @@ public UnresolvedExpression visitShowDescribePattern(
     
       @Override
       public UnresolvedExpression visitFilteredAggregationFunctionCall(
    -      OpenSearchSQLParser.FilteredAggregationFunctionCallContext ctx) {
    +      FilteredAggregationFunctionCallContext ctx) {
         AggregateFunction agg = (AggregateFunction) visit(ctx.aggregateFunction());
         return agg.condition(visit(ctx.filterClause()));
       }
    @@ -195,21 +209,18 @@ public UnresolvedExpression visitWindowFunctionClause(WindowFunctionClauseContex
     
         List partitionByList = Collections.emptyList();
         if (overClause.partitionByClause() != null) {
    -      partitionByList = overClause.partitionByClause()
    -                                  .expression()
    -                                  .stream()
    -                                  .map(this::visit)
    -                                  .collect(Collectors.toList());
    +      partitionByList =
    +          overClause.partitionByClause().expression().stream()
    +              .map(this::visit)
    +              .collect(Collectors.toList());
         }
     
         List> sortList = Collections.emptyList();
         if (overClause.orderByClause() != null) {
    -      sortList = overClause.orderByClause()
    -                           .orderByElement()
    -                           .stream()
    -                           .map(item -> ImmutablePair.of(
    -                               createSortOption(item), visit(item.expression())))
    -                           .collect(Collectors.toList());
    +      sortList =
    +          overClause.orderByClause().orderByElement().stream()
    +              .map(item -> ImmutablePair.of(createSortOption(item), visit(item.expression())))
    +              .collect(Collectors.toList());
         }
         return new WindowFunction(visit(ctx.function), partitionByList, sortList);
       }
    @@ -222,17 +233,12 @@ public UnresolvedExpression visitScalarWindowFunction(ScalarWindowFunctionContex
       @Override
       public UnresolvedExpression visitRegularAggregateFunctionCall(
           RegularAggregateFunctionCallContext ctx) {
    -    return new AggregateFunction(
    -        ctx.functionName.getText(),
    -        visitFunctionArg(ctx.functionArg()));
    +    return new AggregateFunction(ctx.functionName.getText(), visitFunctionArg(ctx.functionArg()));
       }
     
       @Override
       public UnresolvedExpression visitDistinctCountFunctionCall(DistinctCountFunctionCallContext ctx) {
    -    return new AggregateFunction(
    -        ctx.COUNT().getText(),
    -        visitFunctionArg(ctx.functionArg()),
    -        true);
    +    return new AggregateFunction(ctx.COUNT().getText(), visitFunctionArg(ctx.functionArg()), true);
       }
     
       @Override
    @@ -241,41 +247,48 @@ public UnresolvedExpression visitCountStarFunctionCall(CountStarFunctionCallCont
       }
     
       @Override
    -  public UnresolvedExpression visitFilterClause(OpenSearchSQLParser.FilterClauseContext ctx) {
    +  public UnresolvedExpression visitFilterClause(FilterClauseContext ctx) {
         return visit(ctx.expression());
       }
     
       @Override
       public UnresolvedExpression visitIsNullPredicate(IsNullPredicateContext ctx) {
         return new Function(
    -        ctx.nullNotnull().NOT() == null ? IS_NULL.getName().getFunctionName() :
    -            IS_NOT_NULL.getName().getFunctionName(),
    +        ctx.nullNotnull().NOT() == null
    +            ? IS_NULL.getName().getFunctionName()
    +            : IS_NOT_NULL.getName().getFunctionName(),
             Arrays.asList(visit(ctx.predicate())));
       }
     
    +  @Override
    +  public UnresolvedExpression visitBetweenPredicate(BetweenPredicateContext ctx) {
    +    UnresolvedExpression func =
    +        between(visit(ctx.predicate(0)), visit(ctx.predicate(1)), visit(ctx.predicate(2)));
    +
    +    if (ctx.NOT() != null) {
    +      func = not(func);
    +    }
    +    return func;
    +  }
    +
       @Override
       public UnresolvedExpression visitLikePredicate(LikePredicateContext ctx) {
         return new Function(
    -        ctx.NOT() == null ? LIKE.getName().getFunctionName() :
    -            NOT_LIKE.getName().getFunctionName(),
    +        ctx.NOT() == null ? LIKE.getName().getFunctionName() : NOT_LIKE.getName().getFunctionName(),
             Arrays.asList(visit(ctx.left), visit(ctx.right)));
       }
     
       @Override
       public UnresolvedExpression visitRegexpPredicate(RegexpPredicateContext ctx) {
    -    return new Function(REGEXP.getName().getFunctionName(),
    -            Arrays.asList(visit(ctx.left), visit(ctx.right)));
    +    return new Function(
    +        REGEXP.getName().getFunctionName(), Arrays.asList(visit(ctx.left), visit(ctx.right)));
       }
     
       @Override
    -  public UnresolvedExpression visitInPredicate(OpenSearchSQLParser.InPredicateContext ctx) {
    +  public UnresolvedExpression visitInPredicate(InPredicateContext ctx) {
         UnresolvedExpression field = visit(ctx.predicate());
    -    List inLists = ctx
    -        .expressions()
    -        .expression()
    -        .stream()
    -        .map(this::visit)
    -        .collect(Collectors.toList());
    +    List inLists =
    +        ctx.expressions().expression().stream().map(this::visit).collect(Collectors.toList());
         UnresolvedExpression in = AstDSL.in(field, inLists);
         return ctx.NOT() != null ? AstDSL.not(in) : in;
       }
    @@ -340,34 +353,30 @@ public UnresolvedExpression visitTimeLiteral(TimeLiteralContext ctx) {
       }
     
       @Override
    -  public UnresolvedExpression visitTimestampLiteral(
    -      TimestampLiteralContext ctx) {
    +  public UnresolvedExpression visitTimestampLiteral(TimestampLiteralContext ctx) {
         return AstDSL.timestampLiteral(StringUtils.unquoteText(ctx.timestamp.getText()));
       }
     
       @Override
       public UnresolvedExpression visitIntervalLiteral(IntervalLiteralContext ctx) {
    -    return new Interval(
    -        visit(ctx.expression()), IntervalUnit.of(ctx.intervalUnit().getText()));
    +    return new Interval(visit(ctx.expression()), IntervalUnit.of(ctx.intervalUnit().getText()));
       }
     
       @Override
    -  public UnresolvedExpression visitBinaryComparisonPredicate(
    -      BinaryComparisonPredicateContext ctx) {
    +  public UnresolvedExpression visitBinaryComparisonPredicate(BinaryComparisonPredicateContext ctx) {
         String functionName = ctx.comparisonOperator().getText();
         return new Function(
             functionName.equals("<>") ? "!=" : functionName,
    -        Arrays.asList(visit(ctx.left), visit(ctx.right))
    -    );
    +        Arrays.asList(visit(ctx.left), visit(ctx.right)));
       }
     
       @Override
       public UnresolvedExpression visitCaseFunctionCall(CaseFunctionCallContext ctx) {
         UnresolvedExpression caseValue = (ctx.expression() == null) ? null : visit(ctx.expression());
    -    List whenStatements = ctx.caseFuncAlternative()
    -                                   .stream()
    -                                   .map(when -> (When) visit(when))
    -                                   .collect(Collectors.toList());
    +    List whenStatements =
    +        ctx.caseFuncAlternative().stream()
    +            .map(when -> (When) visit(when))
    +            .collect(Collectors.toList());
         UnresolvedExpression elseStatement = (ctx.elseArg == null) ? null : visit(ctx.elseArg);
     
         return new Case(caseValue, whenStatements, elseStatement);
    @@ -379,23 +388,39 @@ public UnresolvedExpression visitCaseFuncAlternative(CaseFuncAlternativeContext
       }
     
       @Override
    -  public UnresolvedExpression visitDataTypeFunctionCall(
    -      DataTypeFunctionCallContext ctx) {
    +  public UnresolvedExpression visitDataTypeFunctionCall(DataTypeFunctionCallContext ctx) {
         return new Cast(visit(ctx.expression()), visit(ctx.convertedDataType()));
       }
     
       @Override
    -  public UnresolvedExpression visitConvertedDataType(
    -      ConvertedDataTypeContext ctx) {
    +  public UnresolvedExpression visitConvertedDataType(ConvertedDataTypeContext ctx) {
         return AstDSL.stringLiteral(ctx.getText());
       }
     
       @Override
    -  public UnresolvedExpression visitNoFieldRelevanceFunction(
    -          OpenSearchSQLParser.NoFieldRelevanceFunctionContext ctx) {
    +  public UnresolvedExpression visitPercentileApproxFunctionCall(
    +      OpenSearchSQLParser.PercentileApproxFunctionCallContext ctx) {
    +    ImmutableList.Builder builder = ImmutableList.builder();
    +    builder.add(
    +        new UnresolvedArgument(
    +            "percent",
    +            AstDSL.doubleLiteral(
    +                Double.valueOf(ctx.percentileApproxFunction().percent.getText()))));
    +    if (ctx.percentileApproxFunction().compression != null) {
    +      builder.add(
    +          new UnresolvedArgument(
    +              "compression",
    +              AstDSL.doubleLiteral(
    +                  Double.valueOf(ctx.percentileApproxFunction().compression.getText()))));
    +    }
    +    return new AggregateFunction(
    +        "percentile", visit(ctx.percentileApproxFunction().aggField), builder.build());
    +  }
    +
    +  @Override
    +  public UnresolvedExpression visitNoFieldRelevanceFunction(NoFieldRelevanceFunctionContext ctx) {
         return new Function(
    -            ctx.noFieldRelevanceFunctionName().getText().toLowerCase(),
    -            noFieldRelevanceArguments(ctx));
    +        ctx.noFieldRelevanceFunctionName().getText().toLowerCase(), noFieldRelevanceArguments(ctx));
       }
     
       @Override
    @@ -406,6 +431,14 @@ public UnresolvedExpression visitSingleFieldRelevanceFunction(
             singleFieldRelevanceArguments(ctx));
       }
     
    +  @Override
    +  public UnresolvedExpression visitAltSingleFieldRelevanceFunction(
    +      AltSingleFieldRelevanceFunctionContext ctx) {
    +    return new Function(
    +        ctx.altSyntaxFunctionName.getText().toLowerCase(),
    +        altSingleFieldRelevanceFunctionArguments(ctx));
    +  }
    +
       @Override
       public UnresolvedExpression visitMultiFieldRelevanceFunction(
           MultiFieldRelevanceFunctionContext ctx) {
    @@ -413,10 +446,9 @@ public UnresolvedExpression visitMultiFieldRelevanceFunction(
         // 'MULTI_MATCH('query'='query_val', 'fields'='*fields_val')'
         String funcName = StringUtils.unquoteText(ctx.multiFieldRelevanceFunctionName().getText());
         if ((funcName.equalsIgnoreCase(BuiltinFunctionName.MULTI_MATCH.toString())
    -        || funcName.equalsIgnoreCase(BuiltinFunctionName.MULTIMATCH.toString())
    -        || funcName.equalsIgnoreCase(BuiltinFunctionName.MULTIMATCHQUERY.toString()))
    -        && ! ctx.getRuleContexts(OpenSearchSQLParser.AlternateMultiMatchQueryContext.class)
    -        .isEmpty()) {
    +            || funcName.equalsIgnoreCase(BuiltinFunctionName.MULTIMATCH.toString())
    +            || funcName.equalsIgnoreCase(BuiltinFunctionName.MULTIMATCHQUERY.toString()))
    +        && !ctx.getRuleContexts(AlternateMultiMatchQueryContext.class).isEmpty()) {
           return new Function(
               ctx.multiFieldRelevanceFunctionName().getText().toLowerCase(),
               alternateMultiMatchArguments(ctx));
    @@ -427,115 +459,211 @@ public UnresolvedExpression visitMultiFieldRelevanceFunction(
         }
       }
     
    -  private Function buildFunction(String functionName,
    -                                 List arg) {
    +  @Override
    +  public UnresolvedExpression visitAltMultiFieldRelevanceFunction(
    +      AltMultiFieldRelevanceFunctionContext ctx) {
         return new Function(
    -        functionName,
    -        arg
    -            .stream()
    -            .map(this::visitFunctionArg)
    -            .collect(Collectors.toList())
    -    );
    +        ctx.altSyntaxFunctionName.getText().toLowerCase(),
    +        altMultiFieldRelevanceFunctionArguments(ctx));
    +  }
    +
    +  /**
    +   * Visit score-relevance function and collect children.
    +   *
    +   * @param ctx the parse tree
    +   * @return children
    +   */
    +  public UnresolvedExpression visitScoreRelevanceFunction(ScoreRelevanceFunctionContext ctx) {
    +    Literal weight =
    +        ctx.weight == null
    +            ? new Literal(Double.valueOf(1.0), DataType.DOUBLE)
    +            : new Literal(Double.parseDouble(ctx.weight.getText()), DataType.DOUBLE);
    +    return new ScoreFunction(visit(ctx.relevanceFunction()), weight);
    +  }
    +
    +  private Function buildFunction(String functionName, List arg) {
    +    return new Function(
    +        functionName, arg.stream().map(this::visitFunctionArg).collect(Collectors.toList()));
    +  }
    +
    +  @Override
    +  public UnresolvedExpression visitExtractFunctionCall(ExtractFunctionCallContext ctx) {
    +    return new Function(
    +        ctx.extractFunction().EXTRACT().toString(), getExtractFunctionArguments(ctx));
       }
     
       private QualifiedName visitIdentifiers(List identifiers) {
         return new QualifiedName(
             identifiers.stream()
    -                   .map(RuleContext::getText)
    -                   .map(StringUtils::unquoteIdentifier)
    -                   .collect(Collectors.toList())
    -    );
    +            .map(RuleContext::getText)
    +            .map(StringUtils::unquoteIdentifier)
    +            .collect(Collectors.toList()));
       }
     
    -  private void fillRelevanceArgs(List args,
    -                                 ImmutableList.Builder builder) {
    +  private void fillRelevanceArgs(
    +      List args, ImmutableList.Builder builder) {
         // To support old syntax we must support argument keys as quoted strings.
    -    args.forEach(v -> builder.add(v.argName == null
    -        ? new UnresolvedArgument(v.relevanceArgName().getText().toLowerCase(),
    -            new Literal(StringUtils.unquoteText(v.relevanceArgValue().getText()),
    -            DataType.STRING))
    -        : new UnresolvedArgument(StringUtils.unquoteText(v.argName.getText()).toLowerCase(),
    -            new Literal(StringUtils.unquoteText(v.argVal.getText()), DataType.STRING))));
    +    args.forEach(
    +        v ->
    +            builder.add(
    +                v.argName == null
    +                    ? new UnresolvedArgument(
    +                        v.relevanceArgName().getText().toLowerCase(),
    +                        new Literal(
    +                            StringUtils.unquoteText(v.relevanceArgValue().getText()),
    +                            DataType.STRING))
    +                    : new UnresolvedArgument(
    +                        StringUtils.unquoteText(v.argName.getText()).toLowerCase(),
    +                        new Literal(
    +                            StringUtils.unquoteText(v.argVal.getText()), DataType.STRING))));
       }
     
       private List noFieldRelevanceArguments(
    -          OpenSearchSQLParser.NoFieldRelevanceFunctionContext ctx) {
    +      NoFieldRelevanceFunctionContext ctx) {
         // all the arguments are defaulted to string values
         // to skip environment resolving and function signature resolving
         ImmutableList.Builder builder = ImmutableList.builder();
    -    builder.add(new UnresolvedArgument("query",
    -            new Literal(StringUtils.unquoteText(ctx.query.getText()), DataType.STRING)));
    +    builder.add(
    +        new UnresolvedArgument(
    +            "query", new Literal(StringUtils.unquoteText(ctx.query.getText()), DataType.STRING)));
         fillRelevanceArgs(ctx.relevanceArg(), builder);
         return builder.build();
       }
     
       private List singleFieldRelevanceArguments(
    -        OpenSearchSQLParser.SingleFieldRelevanceFunctionContext ctx) {
    +      SingleFieldRelevanceFunctionContext ctx) {
         // all the arguments are defaulted to string values
         // to skip environment resolving and function signature resolving
         ImmutableList.Builder builder = ImmutableList.builder();
    -    builder.add(new UnresolvedArgument("field",
    -        new Literal(StringUtils.unquoteText(ctx.field.getText()), DataType.STRING)));
    -    builder.add(new UnresolvedArgument("query",
    -        new Literal(StringUtils.unquoteText(ctx.query.getText()), DataType.STRING)));
    +    builder.add(
    +        new UnresolvedArgument(
    +            "field", new QualifiedName(StringUtils.unquoteText(ctx.field.getText()))));
    +    builder.add(
    +        new UnresolvedArgument(
    +            "query", new Literal(StringUtils.unquoteText(ctx.query.getText()), DataType.STRING)));
         fillRelevanceArgs(ctx.relevanceArg(), builder);
         return builder.build();
       }
     
    -
    +  private List altSingleFieldRelevanceFunctionArguments(
    +      AltSingleFieldRelevanceFunctionContext ctx) {
    +    // all the arguments are defaulted to string values
    +    // to skip environment resolving and function signature resolving
    +    ImmutableList.Builder builder = ImmutableList.builder();
    +    builder.add(
    +        new UnresolvedArgument(
    +            "field", new QualifiedName(StringUtils.unquoteText(ctx.field.getText()))));
    +    builder.add(
    +        new UnresolvedArgument(
    +            "query", new Literal(StringUtils.unquoteText(ctx.query.getText()), DataType.STRING)));
    +    fillRelevanceArgs(ctx.relevanceArg(), builder);
    +    return builder.build();
    +  }
     
       private List multiFieldRelevanceArguments(
    -      OpenSearchSQLParser.MultiFieldRelevanceFunctionContext ctx) {
    +      MultiFieldRelevanceFunctionContext ctx) {
         // all the arguments are defaulted to string values
         // to skip environment resolving and function signature resolving
         ImmutableList.Builder builder = ImmutableList.builder();
    -    var fields = new RelevanceFieldList(ctx
    -        .getRuleContexts(OpenSearchSQLParser.RelevanceFieldAndWeightContext.class)
    -        .stream()
    -        .collect(Collectors.toMap(
    -            f -> StringUtils.unquoteText(f.field.getText()),
    -            f -> (f.weight == null) ? 1F : Float.parseFloat(f.weight.getText()))));
    +    var fields =
    +        new RelevanceFieldList(
    +            ctx.getRuleContexts(RelevanceFieldAndWeightContext.class).stream()
    +                .collect(
    +                    Collectors.toMap(
    +                        f -> StringUtils.unquoteText(f.field.getText()),
    +                        f -> (f.weight == null) ? 1F : Float.parseFloat(f.weight.getText()))));
         builder.add(new UnresolvedArgument("fields", fields));
    -    builder.add(new UnresolvedArgument("query",
    -        new Literal(StringUtils.unquoteText(ctx.query.getText()), DataType.STRING)));
    +    builder.add(
    +        new UnresolvedArgument(
    +            "query", new Literal(StringUtils.unquoteText(ctx.query.getText()), DataType.STRING)));
         fillRelevanceArgs(ctx.relevanceArg(), builder);
         return builder.build();
       }
     
    +  private List getFormatFunctionArguments(GetFormatFunctionCallContext ctx) {
    +    List args =
    +        Arrays.asList(
    +            new Literal(ctx.getFormatFunction().getFormatType().getText(), DataType.STRING),
    +            visitFunctionArg(ctx.getFormatFunction().functionArg()));
    +    return args;
    +  }
    +
    +  private List timestampFunctionArguments(TimestampFunctionCallContext ctx) {
    +    List args =
    +        Arrays.asList(
    +            new Literal(ctx.timestampFunction().simpleDateTimePart().getText(), DataType.STRING),
    +            visitFunctionArg(ctx.timestampFunction().firstArg),
    +            visitFunctionArg(ctx.timestampFunction().secondArg));
    +    return args;
    +  }
    +
       /**
    -   * Adds support for multi_match alternate syntax like
    -   * MULTI_MATCH('query'='Dale', 'fields'='*name').
    +   * Adds support for multi_match alternate syntax like MULTI_MATCH('query'='Dale',
    +   * 'fields'='*name').
    +   *
        * @param ctx : Context for multi field relevance function.
        * @return : Returns list of all arguments for relevance function.
        */
       private List alternateMultiMatchArguments(
    -      OpenSearchSQLParser.MultiFieldRelevanceFunctionContext ctx) {
    +      MultiFieldRelevanceFunctionContext ctx) {
         // all the arguments are defaulted to string values
         // to skip environment resolving and function signature resolving
         ImmutableList.Builder builder = ImmutableList.builder();
         Map fieldAndWeightMap = new HashMap<>();
     
    -    String[] fieldAndWeights = StringUtils.unquoteText(
    -        ctx.getRuleContexts(OpenSearchSQLParser.AlternateMultiMatchFieldContext.class)
    -        .stream().findFirst().get().argVal.getText()).split(",");
    +    String[] fieldAndWeights =
    +        StringUtils.unquoteText(
    +                ctx.getRuleContexts(AlternateMultiMatchFieldContext.class).stream()
    +                    .findFirst()
    +                    .get()
    +                    .argVal
    +                    .getText())
    +            .split(",");
     
         for (var fieldAndWeight : fieldAndWeights) {
           String[] splitFieldAndWeights = fieldAndWeight.split("\\^");
    -      fieldAndWeightMap.put(splitFieldAndWeights[0],
    +      fieldAndWeightMap.put(
    +          splitFieldAndWeights[0],
               splitFieldAndWeights.length > 1 ? Float.parseFloat(splitFieldAndWeights[1]) : 1F);
         }
    -    builder.add(new UnresolvedArgument("fields",
    -        new RelevanceFieldList(fieldAndWeightMap)));
    -
    -    ctx.getRuleContexts(OpenSearchSQLParser.AlternateMultiMatchQueryContext.class)
    -        .stream().findFirst().ifPresent(
    -          arg ->
    -            builder.add(new UnresolvedArgument("query",
    -                new Literal(StringUtils.unquoteText(arg.argVal.getText()), DataType.STRING)))
    -        );
    +    builder.add(new UnresolvedArgument("fields", new RelevanceFieldList(fieldAndWeightMap)));
    +
    +    ctx.getRuleContexts(AlternateMultiMatchQueryContext.class).stream()
    +        .findFirst()
    +        .ifPresent(
    +            arg ->
    +                builder.add(
    +                    new UnresolvedArgument(
    +                        "query",
    +                        new Literal(
    +                            StringUtils.unquoteText(arg.argVal.getText()), DataType.STRING))));
     
         fillRelevanceArgs(ctx.relevanceArg(), builder);
     
         return builder.build();
       }
    +
    +  private List altMultiFieldRelevanceFunctionArguments(
    +      AltMultiFieldRelevanceFunctionContext ctx) {
    +    // all the arguments are defaulted to string values
    +    // to skip environment resolving and function signature resolving
    +    var map = new HashMap();
    +    map.put(ctx.field.getText(), 1F);
    +    ImmutableList.Builder builder = ImmutableList.builder();
    +    var fields = new RelevanceFieldList(map);
    +    builder.add(new UnresolvedArgument("fields", fields));
    +    builder.add(
    +        new UnresolvedArgument(
    +            "query", new Literal(StringUtils.unquoteText(ctx.query.getText()), DataType.STRING)));
    +    fillRelevanceArgs(ctx.relevanceArg(), builder);
    +    return builder.build();
    +  }
    +
    +  private List getExtractFunctionArguments(ExtractFunctionCallContext ctx) {
    +    List args =
    +        Arrays.asList(
    +            new Literal(ctx.extractFunction().datetimePart().getText(), DataType.STRING),
    +            visitFunctionArg(ctx.extractFunction().functionArg()));
    +    return args;
    +  }
     }
    diff --git a/sql/src/main/java/org/opensearch/sql/sql/parser/AstHavingFilterBuilder.java b/sql/src/main/java/org/opensearch/sql/sql/parser/AstHavingFilterBuilder.java
    index f90ea2f991..94c11d05af 100644
    --- a/sql/src/main/java/org/opensearch/sql/sql/parser/AstHavingFilterBuilder.java
    +++ b/sql/src/main/java/org/opensearch/sql/sql/parser/AstHavingFilterBuilder.java
    @@ -3,7 +3,6 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql.parser;
     
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.QualifiedNameContext;
    @@ -13,10 +12,9 @@
     import org.opensearch.sql.sql.parser.context.QuerySpecification;
     
     /**
    - * AST Having filter builder that builds HAVING clause condition expressions
    - * and replace alias by original expression in SELECT clause.
    - * The reason for this is it's hard to replace afterwards since UnresolvedExpression
    - * is immutable.
    + * AST Having filter builder that builds HAVING clause condition expressions and replace alias by
    + * original expression in SELECT clause. The reason for this is it's hard to replace afterwards
    + * since UnresolvedExpression is immutable.
      */
     @RequiredArgsConstructor
     public class AstHavingFilterBuilder extends AstExpressionBuilder {
    @@ -34,5 +32,4 @@ private UnresolvedExpression replaceAlias(UnresolvedExpression expr) {
         }
         return expr;
       }
    -
     }
    diff --git a/sql/src/main/java/org/opensearch/sql/sql/parser/AstSortBuilder.java b/sql/src/main/java/org/opensearch/sql/sql/parser/AstSortBuilder.java
    index 1b872dce54..2594709f4f 100644
    --- a/sql/src/main/java/org/opensearch/sql/sql/parser/AstSortBuilder.java
    +++ b/sql/src/main/java/org/opensearch/sql/sql/parser/AstSortBuilder.java
    @@ -3,7 +3,6 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql.parser;
     
     import static org.opensearch.sql.ast.dsl.AstDSL.booleanLiteral;
    @@ -27,8 +26,8 @@
     import org.opensearch.sql.sql.parser.context.QuerySpecification;
     
     /**
    - * AST sort builder that builds Sort AST node from ORDER BY clause. During this process, the item
    - * in order by may be replaced by item in project list if it's an alias or ordinal. This is same as
    + * AST sort builder that builds Sort AST node from ORDER BY clause. During this process, the item in
    + * order by may be replaced by item in project list if it's an alias or ordinal. This is same as
      * GROUP BY building process.
      */
     @RequiredArgsConstructor
    @@ -38,9 +37,7 @@ public class AstSortBuilder extends OpenSearchSQLParserBaseVisitor createSortFields() {
    @@ -57,8 +54,8 @@ private List createSortFields() {
       }
     
       /**
    -   * Argument "asc" is required.
    -   * Argument "nullFirst" is optional and determined by Analyzer later if absent.
    +   * Argument "asc" is required. Argument "nullFirst" is optional and determined by Analyzer later
    +   * if absent.
        */
       private List createSortArguments(SortOption option) {
         SortOrder sortOrder = option.getSortOrder();
    @@ -71,5 +68,4 @@ private List createSortArguments(SortOption option) {
         }
         return args.build();
       }
    -
     }
    diff --git a/sql/src/main/java/org/opensearch/sql/sql/parser/AstStatementBuilder.java b/sql/src/main/java/org/opensearch/sql/sql/parser/AstStatementBuilder.java
    index 40d549764a..593e7b51ff 100644
    --- a/sql/src/main/java/org/opensearch/sql/sql/parser/AstStatementBuilder.java
    +++ b/sql/src/main/java/org/opensearch/sql/sql/parser/AstStatementBuilder.java
    @@ -26,7 +26,7 @@ public class AstStatementBuilder extends OpenSearchSQLParserBaseVisitor contexts = new ArrayDeque<>();
     
    @@ -31,10 +30,10 @@ public QuerySpecification peek() {
     
       /**
        * Pop up query context.
    -   * @return  query context after popup.
    +   *
    +   * @return query context after popup.
        */
       public QuerySpecification pop() {
         return contexts.pop();
       }
    -
     }
    diff --git a/sql/src/main/java/org/opensearch/sql/sql/parser/context/QuerySpecification.java b/sql/src/main/java/org/opensearch/sql/sql/parser/context/QuerySpecification.java
    index 21dddde2b9..abcd4f2073 100644
    --- a/sql/src/main/java/org/opensearch/sql/sql/parser/context/QuerySpecification.java
    +++ b/sql/src/main/java/org/opensearch/sql/sql/parser/context/QuerySpecification.java
    @@ -3,7 +3,6 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql.parser.context;
     
     import static org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser.FilteredAggregationFunctionCallContext;
    @@ -42,6 +41,7 @@
     
     /**
      * Query specification domain that collects basic info for a simple query.
    + *
      * 
      * (I) What is the impact of this new abstraction?
      *  This abstraction and collecting process turns AST building process into two phases:
    @@ -61,10 +61,9 @@
     @ToString
     public class QuerySpecification {
     
    -  /**
    -   * Items in SELECT clause and mapping from alias to select item.
    -   */
    +  /** Items in SELECT clause and mapping from alias to select item. */
       private final List selectItems = new ArrayList<>();
    +
       private final Map selectItemsByAlias = new HashMap<>();
     
       /**
    @@ -74,31 +73,30 @@ public class QuerySpecification {
       private final Set aggregators = new LinkedHashSet<>();
     
       /**
    -   * Items in GROUP BY clause that may be:
    -   *  1) Simple field name
    -   *  2) Field nested in scalar function call
    -   *  3) Ordinal that points to expression in SELECT
    -   *  4) Alias that points to expression in SELECT.
    +   * Items in GROUP BY clause that may be: 1) Simple field name 2) Field nested in scalar function
    +   * call 3) Ordinal that points to expression in SELECT 4) Alias that points to expression in
    +   * SELECT.
        */
       private final List groupByItems = new ArrayList<>();
     
    -  /**
    -   * Items in ORDER BY clause that may be different forms as above and its options.
    -   */
    +  /** Items in ORDER BY clause that may be different forms as above and its options. */
       private final List orderByItems = new ArrayList<>();
    +
       private final List orderByOptions = new ArrayList<>();
     
       /**
        * Collect all query information in the parse tree excluding info in sub-query).
    -   * @param query   query spec node in parse tree
    +   *
    +   * @param query query spec node in parse tree
        */
       public void collect(QuerySpecificationContext query, String queryString) {
         query.accept(new QuerySpecificationCollector(queryString));
       }
     
       /**
    -   * Replace unresolved expression if it's an alias or ordinal that represents
    -   * an actual expression in SELECT list.
    +   * Replace unresolved expression if it's an alias or ordinal that represents an actual expression
    +   * in SELECT list.
    +   *
        * @param expr item to be replaced
        * @return select item that the given expr represents
        */
    @@ -118,8 +116,8 @@ private boolean isIntegerLiteral(UnresolvedExpression expr) {
         }
     
         if (((Literal) expr).getType() != DataType.INTEGER) {
    -      throw new SemanticCheckException(StringUtils.format(
    -          "Non-integer constant [%s] found in ordinal", expr));
    +      throw new SemanticCheckException(
    +          StringUtils.format("Non-integer constant [%s] found in ordinal", expr));
         }
         return true;
       }
    @@ -127,25 +125,26 @@ private boolean isIntegerLiteral(UnresolvedExpression expr) {
       private UnresolvedExpression getSelectItemByOrdinal(UnresolvedExpression expr) {
         int ordinal = (Integer) ((Literal) expr).getValue();
         if (ordinal <= 0 || ordinal > selectItems.size()) {
    -      throw new SemanticCheckException(StringUtils.format(
    -          "Ordinal [%d] is out of bound of select item list", ordinal));
    +      throw new SemanticCheckException(
    +          StringUtils.format("Ordinal [%d] is out of bound of select item list", ordinal));
         }
         return selectItems.get(ordinal - 1);
       }
     
       /**
        * Check if an expression is a select alias.
    -   * @param expr  expression
    +   *
    +   * @param expr expression
        * @return true if it's an alias
        */
       public boolean isSelectAlias(UnresolvedExpression expr) {
    -    return (expr instanceof QualifiedName)
    -        && (selectItemsByAlias.containsKey(expr.toString()));
    +    return (expr instanceof QualifiedName) && (selectItemsByAlias.containsKey(expr.toString()));
       }
     
       /**
        * Get original expression aliased in SELECT clause.
    -   * @param expr  alias
    +   *
    +   * @param expr alias
        * @return expression in SELECT
        */
       public UnresolvedExpression getSelectItemByAlias(UnresolvedExpression expr) {
    @@ -223,8 +222,7 @@ public Void visitAggregateFunctionCall(AggregateFunctionCallContext ctx) {
         @Override
         public Void visitFilteredAggregationFunctionCall(FilteredAggregationFunctionCallContext ctx) {
           UnresolvedExpression aggregateFunction = visitAstExpression(ctx);
    -      aggregators.add(
    -          AstDSL.alias(getTextInQuery(ctx, queryString), aggregateFunction));
    +      aggregators.add(AstDSL.alias(getTextInQuery(ctx, queryString), aggregateFunction));
           return super.visitFilteredAggregationFunctionCall(ctx);
         }
     
    @@ -236,5 +234,4 @@ private UnresolvedExpression visitAstExpression(ParseTree tree) {
           return expressionBuilder.visit(tree);
         }
       }
    -
     }
    diff --git a/sql/src/test/java/org/opensearch/sql/common/antlr/SyntaxParserTestBase.java b/sql/src/test/java/org/opensearch/sql/common/antlr/SyntaxParserTestBase.java
    index 526dc4e816..87f2083774 100644
    --- a/sql/src/test/java/org/opensearch/sql/common/antlr/SyntaxParserTestBase.java
    +++ b/sql/src/test/java/org/opensearch/sql/common/antlr/SyntaxParserTestBase.java
    @@ -1,24 +1,20 @@
     package org.opensearch.sql.common.antlr;
     
     import static org.junit.jupiter.api.Assertions.assertNotNull;
    -import static org.junit.jupiter.api.Assertions.assertNull;
     import static org.junit.jupiter.api.Assertions.assertThrows;
     
     import lombok.AccessLevel;
     import lombok.Getter;
     import lombok.RequiredArgsConstructor;
    -import org.opensearch.sql.sql.antlr.SQLSyntaxParser;
     
    -/**
    - * A base class for tests for SQL or PPL parser.
    - */
    +/** A base class for tests for SQL or PPL parser. */
     @RequiredArgsConstructor(access = AccessLevel.PROTECTED)
     public abstract class SyntaxParserTestBase {
    -  @Getter
    -  private final Parser parser;
    +  @Getter private final Parser parser;
     
       /**
        * A helper function that fails a test if the parser rejects a given query.
    +   *
        * @param query Query to test.
        */
       protected void acceptQuery(String query) {
    @@ -27,6 +23,7 @@ protected void acceptQuery(String query) {
     
       /**
        * A helper function that fails a test if the parser accepts a given query.
    +   *
        * @param query Query to test.
        */
       protected void rejectQuery(String query) {
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/SQLServiceTest.java b/sql/src/test/java/org/opensearch/sql/sql/SQLServiceTest.java
    index 9abe37cd06..b124757069 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/SQLServiceTest.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/SQLServiceTest.java
    @@ -3,63 +3,54 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql;
     
    +import static org.junit.jupiter.api.Assertions.assertEquals;
     import static org.junit.jupiter.api.Assertions.assertNotNull;
     import static org.junit.jupiter.api.Assertions.fail;
     import static org.mockito.ArgumentMatchers.any;
     import static org.mockito.Mockito.doAnswer;
     import static org.opensearch.sql.executor.ExecutionEngine.QueryResponse;
     
    -import java.util.Collections;
    +import java.util.Map;
     import java.util.concurrent.TimeUnit;
     import org.json.JSONObject;
     import org.junit.jupiter.api.AfterEach;
     import org.junit.jupiter.api.BeforeEach;
    +import org.junit.jupiter.api.DisplayNameGeneration;
    +import org.junit.jupiter.api.DisplayNameGenerator;
     import org.junit.jupiter.api.Test;
     import org.junit.jupiter.api.extension.ExtendWith;
     import org.mockito.Mock;
     import org.mockito.junit.jupiter.MockitoExtension;
     import org.opensearch.sql.common.response.ResponseListener;
     import org.opensearch.sql.executor.DefaultQueryManager;
    -import org.opensearch.sql.executor.ExecutionEngine;
     import org.opensearch.sql.executor.ExecutionEngine.ExplainResponse;
     import org.opensearch.sql.executor.ExecutionEngine.ExplainResponseNode;
    -import org.opensearch.sql.executor.QueryManager;
     import org.opensearch.sql.executor.QueryService;
     import org.opensearch.sql.executor.execution.QueryPlanFactory;
    -import org.opensearch.sql.sql.config.SQLServiceConfig;
    +import org.opensearch.sql.sql.antlr.SQLSyntaxParser;
     import org.opensearch.sql.sql.domain.SQLQueryRequest;
    -import org.springframework.context.annotation.AnnotationConfigApplicationContext;
     
     @ExtendWith(MockitoExtension.class)
    +@DisplayNameGeneration(DisplayNameGenerator.ReplaceUnderscores.class)
     class SQLServiceTest {
     
       private static String QUERY = "/_plugins/_sql";
     
       private static String EXPLAIN = "/_plugins/_sql/_explain";
     
    -  private AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext();
    -
       private SQLService sqlService;
     
       private DefaultQueryManager queryManager;
     
    -  @Mock
    -  private QueryService queryService;
    -
    -  @Mock
    -  private ExecutionEngine.Schema schema;
    +  @Mock private QueryService queryService;
     
       @BeforeEach
       public void setUp() {
         queryManager = DefaultQueryManager.defaultQueryManager();
    -    context.registerBean(QueryManager.class, () -> queryManager);
    -    context.registerBean(QueryPlanFactory.class, () -> new QueryPlanFactory(queryService));
    -    context.register(SQLServiceConfig.class);
    -    context.refresh();
    -    sqlService = context.getBean(SQLService.class);
    +    sqlService =
    +        new SQLService(new SQLSyntaxParser(), queryManager, new QueryPlanFactory(queryService));
       }
     
       @AfterEach
    @@ -68,16 +59,10 @@ public void cleanup() throws InterruptedException {
       }
     
       @Test
    -  public void canExecuteSqlQuery() {
    -    doAnswer(invocation -> {
    -      ResponseListener listener = invocation.getArgument(1);
    -      listener.onResponse(new QueryResponse(schema, Collections.emptyList()));
    -      return null;
    -    }).when(queryService).execute(any(), any());
    -
    +  public void can_execute_sql_query() {
         sqlService.execute(
             new SQLQueryRequest(new JSONObject(), "SELECT 123", QUERY, "jdbc"),
    -        new ResponseListener() {
    +        new ResponseListener<>() {
               @Override
               public void onResponse(QueryResponse response) {
                 assertNotNull(response);
    @@ -91,13 +76,42 @@ public void onFailure(Exception e) {
       }
     
       @Test
    -  public void canExecuteCsvFormatRequest() {
    -    doAnswer(invocation -> {
    -      ResponseListener listener = invocation.getArgument(1);
    -      listener.onResponse(new QueryResponse(schema, Collections.emptyList()));
    -      return null;
    -    }).when(queryService).execute(any(), any());
    +  public void can_execute_cursor_query() {
    +    sqlService.execute(
    +        new SQLQueryRequest(new JSONObject(), null, QUERY, Map.of("format", "jdbc"), "n:cursor"),
    +        new ResponseListener<>() {
    +          @Override
    +          public void onResponse(QueryResponse response) {
    +            assertNotNull(response);
    +          }
     
    +          @Override
    +          public void onFailure(Exception e) {
    +            fail(e);
    +          }
    +        });
    +  }
    +
    +  @Test
    +  public void can_execute_close_cursor_query() {
    +    sqlService.execute(
    +        new SQLQueryRequest(
    +            new JSONObject(), null, QUERY + "/close", Map.of("format", "jdbc"), "n:cursor"),
    +        new ResponseListener<>() {
    +          @Override
    +          public void onResponse(QueryResponse response) {
    +            assertNotNull(response);
    +          }
    +
    +          @Override
    +          public void onFailure(Exception e) {
    +            fail(e);
    +          }
    +        });
    +  }
    +
    +  @Test
    +  public void can_execute_csv_format_request() {
         sqlService.execute(
             new SQLQueryRequest(new JSONObject(), "SELECT 123", QUERY, "csv"),
             new ResponseListener() {
    @@ -114,14 +128,57 @@ public void onFailure(Exception e) {
       }
     
       @Test
    -  public void canExplainSqlQuery() {
    -    doAnswer(invocation -> {
    -      ResponseListener listener = invocation.getArgument(1);
    -      listener.onResponse(new ExplainResponse(new ExplainResponseNode("Test")));
    -      return null;
    -    }).when(queryService).explain(any(), any());
    -
    -    sqlService.explain(new SQLQueryRequest(new JSONObject(), "SELECT 123", EXPLAIN, "csv"),
    +  public void can_execute_raw_format_request() {
    +    sqlService.execute(
    +        new SQLQueryRequest(new JSONObject(), "SELECT 123", QUERY, "raw"),
    +        new ResponseListener() {
    +          @Override
    +          public void onResponse(QueryResponse response) {
    +            assertNotNull(response);
    +          }
    +
    +          @Override
    +          public void onFailure(Exception e) {
    +            fail(e);
    +          }
    +        });
    +  }
    +
    +  @Test
    +  public void can_execute_pretty_raw_format_request() {
    +    sqlService.execute(
    +        new SQLQueryRequest(
    +            new JSONObject(),
    +            "SELECT 123",
    +            QUERY,
    +            Map.of("format", "jdbc", "pretty", "true"),
    +            "n:cursor"),
    +        new ResponseListener() {
    +          @Override
    +          public void onResponse(QueryResponse response) {
    +            assertNotNull(response);
    +          }
    +
    +          @Override
    +          public void onFailure(Exception e) {
    +            fail(e);
    +          }
    +        });
    +  }
    +
    +  @Test
    +  public void can_explain_sql_query() {
    +    doAnswer(
    +            invocation -> {
    +              ResponseListener listener = invocation.getArgument(1);
    +              listener.onResponse(new ExplainResponse(new ExplainResponseNode("Test")));
    +              return null;
    +            })
    +        .when(queryService)
    +        .explain(any(), any());
    +
    +    sqlService.explain(
    +        new SQLQueryRequest(new JSONObject(), "SELECT 123", EXPLAIN, "csv"),
             new ResponseListener() {
               @Override
               public void onResponse(ExplainResponse response) {
    @@ -136,7 +193,27 @@ public void onFailure(Exception e) {
       }
     
       @Test
    -  public void canCaptureErrorDuringExecution() {
    +  public void cannot_explain_cursor_query() {
    +    sqlService.explain(
    +        new SQLQueryRequest(new JSONObject(), null, EXPLAIN, Map.of("format", "jdbc"), "n:cursor"),
    +        new ResponseListener() {
    +          @Override
    +          public void onResponse(ExplainResponse response) {
    +            fail(response.toString());
    +          }
    +
    +          @Override
    +          public void onFailure(Exception e) {
    +            assertEquals(
    +                "Explain of a paged query continuation is not supported."
    +                    + " Use `explain` for the initial query request.",
    +                e.getMessage());
    +          }
    +        });
    +  }
    +
    +  @Test
    +  public void can_capture_error_during_execution() {
         sqlService.execute(
             new SQLQueryRequest(new JSONObject(), "SELECT", QUERY, ""),
             new ResponseListener() {
    @@ -153,7 +230,7 @@ public void onFailure(Exception e) {
       }
     
       @Test
    -  public void canCaptureErrorDuringExplain() {
    +  public void can_capture_error_during_explain() {
         sqlService.explain(
             new SQLQueryRequest(new JSONObject(), "SELECT", EXPLAIN, ""),
             new ResponseListener() {
    @@ -168,5 +245,4 @@ public void onFailure(Exception e) {
               }
             });
       }
    -
     }
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/antlr/BracketedTimestampTest.java b/sql/src/test/java/org/opensearch/sql/sql/antlr/BracketedTimestampTest.java
    new file mode 100644
    index 0000000000..120cd233fc
    --- /dev/null
    +++ b/sql/src/test/java/org/opensearch/sql/sql/antlr/BracketedTimestampTest.java
    @@ -0,0 +1,40 @@
    +/*
    + * Copyright OpenSearch Contributors
    + * SPDX-License-Identifier: Apache-2.0
    + */
    +
    +package org.opensearch.sql.sql.antlr;
    +
    +import org.junit.jupiter.api.Test;
    +
    +public class BracketedTimestampTest extends SQLParserTest {
    +  @Test
    +  void date_shortened_test() {
    +    acceptQuery("SELECT {d '2001-05-07'}");
    +  }
    +
    +  @Test
    +  void date_test() {
    +    acceptQuery("SELECT {date '2001-05-07'}");
    +  }
    +
    +  @Test
    +  void time_shortened_test() {
    +    acceptQuery("SELECT {t '10:11:12'}");
    +  }
    +
    +  @Test
    +  void time_test() {
    +    acceptQuery("SELECT {time '10:11:12'}");
    +  }
    +
    +  @Test
    +  void timestamp_shortened_test() {
    +    acceptQuery("SELECT {ts '2001-05-07 10:11:12'}");
    +  }
    +
    +  @Test
    +  void timestamp_test() {
    +    acceptQuery("SELECT {timestamp '2001-05-07 10:11:12'}");
    +  }
    +}
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/antlr/HighlightTest.java b/sql/src/test/java/org/opensearch/sql/sql/antlr/HighlightTest.java
    index 6826a37c0b..ae1e418357 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/antlr/HighlightTest.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/antlr/HighlightTest.java
    @@ -15,14 +15,14 @@ void single_field_test() {
     
       @Test
       void multiple_highlights_test() {
    -    acceptQuery("SELECT HIGHLIGHT(Tags), HIGHLIGHT(Body) FROM Index "
    -        + "WHERE MULTI_MATCH([Tags, Body], 'Time')");
    +    acceptQuery(
    +        "SELECT HIGHLIGHT(Tags), HIGHLIGHT(Body) FROM Index "
    +            + "WHERE MULTI_MATCH([Tags, Body], 'Time')");
       }
     
       @Test
       void wildcard_test() {
    -    acceptQuery("SELECT HIGHLIGHT('T*') FROM Index "
    -        + "WHERE MULTI_MATCH([Tags, Body], 'Time')");
    +    acceptQuery("SELECT HIGHLIGHT('T*') FROM Index " + "WHERE MULTI_MATCH([Tags, Body], 'Time')");
       }
     
       @Test
    @@ -33,13 +33,12 @@ void highlight_all_test() {
     
       @Test
       void multiple_parameters_failure_test() {
    -    rejectQuery("SELECT HIGHLIGHT(Tags1, Tags2) FROM Index "
    -        + "WHERE MULTI_MATCH([Tags, Body], 'Time')");
    +    rejectQuery(
    +        "SELECT HIGHLIGHT(Tags1, Tags2) FROM Index " + "WHERE MULTI_MATCH([Tags, Body], 'Time')");
       }
     
       @Test
       void no_parameters_failure_test() {
    -    rejectQuery("SELECT HIGHLIGHT() FROM Index "
    -        + "WHERE MULTI_MATCH([Tags, Body], 'Time')");
    +    rejectQuery("SELECT HIGHLIGHT() FROM Index " + "WHERE MULTI_MATCH([Tags, Body], 'Time')");
       }
     }
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/antlr/MatchBoolPrefixParserTest.java b/sql/src/test/java/org/opensearch/sql/sql/antlr/MatchBoolPrefixParserTest.java
    index 66c4d5be9d..db5ce18edb 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/antlr/MatchBoolPrefixParserTest.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/antlr/MatchBoolPrefixParserTest.java
    @@ -25,14 +25,13 @@ static Stream generateValidArguments() {
             new String("max_expansions=50"),
             new String("fuzzy_transpositions=true"),
             new String("fuzzy_rewrite=constant_score"),
    -        new String("boost=1")
    -    );
    +        new String("boost=1"));
       }
     
       @ParameterizedTest
       @MethodSource("generateValidArguments")
       public void testValidArguments(String arg) {
    -    acceptQuery("SELECT * FROM T WHERE MATCH_BOOL_PREFIX(message, 'query', " + arg  + ")");
    +    acceptQuery("SELECT * FROM T WHERE MATCH_BOOL_PREFIX(message, 'query', " + arg + ")");
       }
     
       @Test
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/antlr/SQLParserTest.java b/sql/src/test/java/org/opensearch/sql/sql/antlr/SQLParserTest.java
    index 7b8b415ee7..db091a4932 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/antlr/SQLParserTest.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/antlr/SQLParserTest.java
    @@ -1,3 +1,8 @@
    +/*
    + * Copyright OpenSearch Contributors
    + * SPDX-License-Identifier: Apache-2.0
    + */
    +
     package org.opensearch.sql.sql.antlr;
     
     import org.opensearch.sql.common.antlr.SyntaxParserTestBase;
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/antlr/SQLSyntaxParserTest.java b/sql/src/test/java/org/opensearch/sql/sql/antlr/SQLSyntaxParserTest.java
    index bfd0f93ec9..a1a6923bf1 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/antlr/SQLSyntaxParserTest.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/antlr/SQLSyntaxParserTest.java
    @@ -3,7 +3,6 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql.antlr;
     
     import static org.junit.jupiter.api.Assertions.assertNotNull;
    @@ -72,8 +71,7 @@ public void canParseHiddenIndexName() {
     
       @Test
       public void canNotParseIndexNameWithSpecialChar() {
    -    assertThrows(SyntaxCheckException.class,
    -        () -> parser.parse("SELECT * FROM hello+world"));
    +    assertThrows(SyntaxCheckException.class, () -> parser.parse("SELECT * FROM hello+world"));
       }
     
       @Test
    @@ -83,14 +81,12 @@ public void canParseIndexNameWithSpecialCharQuoted() {
     
       @Test
       public void canNotParseIndexNameStartingWithNumber() {
    -    assertThrows(SyntaxCheckException.class,
    -        () -> parser.parse("SELECT * FROM 123test"));
    +    assertThrows(SyntaxCheckException.class, () -> parser.parse("SELECT * FROM 123test"));
       }
     
       @Test
       public void canNotParseIndexNameSingleQuoted() {
    -    assertThrows(SyntaxCheckException.class,
    -        () -> parser.parse("SELECT * FROM 'test'"));
    +    assertThrows(SyntaxCheckException.class, () -> parser.parse("SELECT * FROM 'test'"));
       }
     
       @Test
    @@ -100,14 +96,15 @@ public void canParseWhereClause() {
     
       @Test
       public void canParseSelectClauseWithLogicalOperator() {
    -    assertNotNull(parser.parse(
    -        "SELECT age = 10 AND name = 'John' OR NOT (balance > 1000) FROM test"));
    +    assertNotNull(
    +        parser.parse("SELECT age = 10 AND name = 'John' OR NOT (balance > 1000) FROM test"));
       }
     
       @Test
       public void canParseWhereClauseWithLogicalOperator() {
    -    assertNotNull(parser.parse("SELECT name FROM test "
    -        + "WHERE age = 10 AND name = 'John' OR NOT (balance > 1000)"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT name FROM test " + "WHERE age = 10 AND name = 'John' OR NOT (balance > 1000)"));
       }
     
       @Test
    @@ -127,9 +124,11 @@ public void canParseDistinctClause() {
       @Test
       public void canParseCaseStatement() {
         assertNotNull(parser.parse("SELECT CASE WHEN age > 30 THEN 'age1' ELSE 'age2' END FROM test"));
    -    assertNotNull(parser.parse("SELECT CASE WHEN age > 30 THEN 'age1' "
    -                                        + " WHEN age < 50 THEN 'age2' "
    -                                        + " ELSE 'age3' END FROM test"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT CASE WHEN age > 30 THEN 'age1' "
    +                + " WHEN age < 50 THEN 'age2' "
    +                + " ELSE 'age3' END FROM test"));
         assertNotNull(parser.parse("SELECT CASE age WHEN 30 THEN 'age1' ELSE 'age2' END FROM test"));
         assertNotNull(parser.parse("SELECT CASE age WHEN 30 THEN 'age1' END FROM test"));
       }
    @@ -146,10 +145,11 @@ public void canNotParseAggregateFunctionWithWrongArgument() {
       public void canParseOrderByClause() {
         assertNotNull(parser.parse("SELECT name, age FROM test ORDER BY name, age"));
         assertNotNull(parser.parse("SELECT name, age FROM test ORDER BY name ASC, age DESC"));
    -    assertNotNull(parser.parse(
    -        "SELECT name, age FROM test ORDER BY name NULLS LAST, age NULLS FIRST"));
    -    assertNotNull(parser.parse(
    -        "SELECT name, age FROM test ORDER BY name ASC NULLS FIRST, age DESC NULLS LAST"));
    +    assertNotNull(
    +        parser.parse("SELECT name, age FROM test ORDER BY name NULLS LAST, age NULLS FIRST"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT name, age FROM test ORDER BY name ASC NULLS FIRST, age DESC NULLS LAST"));
       }
     
       @Test
    @@ -167,16 +167,69 @@ private static Stream nowLikeFunctionsData() {
             Arguments.of("curtime", true, false),
             Arguments.of("current_time", true, true),
             Arguments.of("curdate", false, false),
    -        Arguments.of("current_date", false, true)
    -    );
    +        Arguments.of("current_date", false, true),
    +        Arguments.of("utc_date", false, true),
    +        Arguments.of("utc_time", false, true),
    +        Arguments.of("utc_timestamp", false, true));
    +  }
    +
    +  private static Stream getPartForExtractFunction() {
    +    return Stream.of(
    +        Arguments.of("MICROSECOND"),
    +        Arguments.of("SECOND"),
    +        Arguments.of("MINUTE"),
    +        Arguments.of("HOUR"),
    +        Arguments.of("DAY"),
    +        Arguments.of("WEEK"),
    +        Arguments.of("MONTH"),
    +        Arguments.of("QUARTER"),
    +        Arguments.of("YEAR"),
    +        Arguments.of("SECOND_MICROSECOND"),
    +        Arguments.of("MINUTE_MICROSECOND"),
    +        Arguments.of("MINUTE_SECOND"),
    +        Arguments.of("HOUR_MICROSECOND"),
    +        Arguments.of("HOUR_SECOND"),
    +        Arguments.of("HOUR_MINUTE"),
    +        Arguments.of("DAY_MICROSECOND"),
    +        Arguments.of("DAY_SECOND"),
    +        Arguments.of("DAY_MINUTE"),
    +        Arguments.of("DAY_HOUR"),
    +        Arguments.of("YEAR_MONTH"));
    +  }
    +
    +  @ParameterizedTest(name = "{0}")
    +  @MethodSource("getPartForExtractFunction")
    +  public void can_parse_extract_function(String part) {
    +    assertNotNull(parser.parse(String.format("SELECT extract(%s FROM \"2023-02-06\")", part)));
    +  }
    +
    +  private static Stream getInvalidPartForExtractFunction() {
    +    return Stream.of(Arguments.of("INVALID"), Arguments.of("\"SECOND\""), Arguments.of("123"));
    +  }
    +
    +  @ParameterizedTest(name = "{0}")
    +  @MethodSource("getInvalidPartForExtractFunction")
    +  public void cannot_parse_extract_function_invalid_part(String part) {
    +    assertThrows(
    +        SyntaxCheckException.class,
    +        () -> parser.parse(String.format("SELECT extract(%s FROM \"2023-02-06\")", part)));
    +  }
    +
    +  @Test
    +  public void can_parse_weekday_function() {
    +    assertNotNull(parser.parse("SELECT weekday('2022-11-18')"));
    +    assertNotNull(parser.parse("SELECT day_of_week('2022-11-18')"));
       }
     
       @ParameterizedTest(name = "{0}")
       @MethodSource("nowLikeFunctionsData")
       public void can_parse_now_like_functions(String name, Boolean hasFsp, Boolean hasShortcut) {
    -    var calls = new ArrayList() {{
    -        add(name + "()");
    -      }};
    +    var calls =
    +        new ArrayList() {
    +          {
    +            add(name + "()");
    +          }
    +        };
         if (hasShortcut) {
           calls.add(name);
         }
    @@ -190,6 +243,37 @@ public void can_parse_now_like_functions(String name, Boolean hasFsp, Boolean ha
         assertNotNull(parser.parse("SELECT id FROM test WHERE " + String.join(" AND ", calls)));
       }
     
    +  private static Stream get_format_arguments() {
    +    Stream.Builder args = Stream.builder();
    +    String[] types = {"DATE", "DATETIME", "TIME", "TIMESTAMP"};
    +    String[] formats = {"'USA'", "'JIS'", "'ISO'", "'EUR'", "'INTERNAL'"};
    +
    +    for (String type : types) {
    +      for (String format : formats) {
    +        args.add(Arguments.of(type, format));
    +      }
    +    }
    +
    +    return args.build();
    +  }
    +
    +  @ParameterizedTest(name = "{0}")
    +  @MethodSource("get_format_arguments")
    +  public void can_parse_get_format_function(String type, String format) {
    +    assertNotNull(parser.parse(String.format("SELECT GET_FORMAT(%s, %s)", type, format)));
    +  }
    +
    +  @Test
    +  public void cannot_parse_get_format_function_with_bad_arg() {
    +    assertThrows(
    +        SyntaxCheckException.class, () -> parser.parse("GET_FORMAT(NONSENSE_ARG,'INTERNAL')"));
    +  }
    +
    +  @Test
    +  public void can_parse_hour_functions() {
    +    assertNotNull(parser.parse("SELECT hour('2022-11-18 12:23:34')"));
    +    assertNotNull(parser.parse("SELECT hour_of_day('12:23:34')"));
    +  }
     
       @Test
       public void can_parse_week_of_year_functions() {
    @@ -197,12 +281,33 @@ public void can_parse_week_of_year_functions() {
         assertNotNull(parser.parse("SELECT week_of_year('2022-11-18')"));
       }
     
    +  @Test
    +  public void can_parse_dayofmonth_functions() {
    +    assertNotNull(parser.parse("SELECT dayofmonth('2022-11-18')"));
    +    assertNotNull(parser.parse("SELECT day_of_month('2022-11-18')"));
    +  }
    +
    +  @Test
    +  public void can_parse_day_of_week_functions() {
    +    assertNotNull(parser.parse("SELECT dayofweek('2022-11-18')"));
    +    assertNotNull(parser.parse("SELECT day_of_week('2022-11-18')"));
    +  }
    +
       @Test
       public void can_parse_dayofyear_functions() {
         assertNotNull(parser.parse("SELECT dayofyear('2022-11-18')"));
         assertNotNull(parser.parse("SELECT day_of_year('2022-11-18')"));
       }
     
    +  @Test
    +  public void can_parse_minute_functions() {
    +    assertNotNull(parser.parse("SELECT minute('12:23:34')"));
    +    assertNotNull(parser.parse("SELECT minute_of_hour('12:23:34')"));
    +
    +    assertNotNull(parser.parse("SELECT minute('2022-12-20 12:23:34')"));
    +    assertNotNull(parser.parse("SELECT minute_of_hour('2022-12-20 12:23:34')"));
    +  }
    +
       @Test
       public void can_parse_month_of_year_function() {
         assertNotNull(parser.parse("SELECT month('2022-11-18')"));
    @@ -216,196 +321,198 @@ public void can_parse_month_of_year_function() {
     
         assertNotNull(parser.parse("SELECT month(timestamp('2022-11-18 00:00:00'))"));
         assertNotNull(parser.parse("SELECT month_of_year(timestamp('2022-11-18 00:00:00'))"));
    -
       }
     
       @Test
       public void can_parse_multi_match_relevance_function() {
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multimatch(\"fields\"=\"field\", query=\"query\")"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multimatchquery(fields=\"field\", \"query\"=\"query\")"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multi_match(\"fields\"=\"field\", \"query\"=\"query\")"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multi_match(\'fields\'=\'field\', \'query\'=\'query\')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multi_match(fields=\'field\', query=\'query\')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multi_match(['address'], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multi_match(['address', 'notes'], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multi_match([\"*\"], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multi_match([\"address\"], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multi_match([`address`], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multi_match([address], 'query')"));
    -
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE"
    -            + " multi_match(['address' ^ 1.0, 'notes' ^ 2.2], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multi_match(['address' ^ 1.1, 'notes'], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multi_match(['address', 'notes' ^ 1.5], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multi_match(['address', 'notes' 3], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE multi_match(['address' ^ .3, 'notes' 3], 'query')"));
    -
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE"
    -            + " multi_match([\"Tags\" ^ 1.5, Title, `Body` 4.2], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE"
    -            + " multi_match([\"Tags\" ^ 1.5, Title, `Body` 4.2], 'query', analyzer=keyword,"
    -            + "operator='AND', tie_breaker=0.3, type = \"most_fields\", fuzziness = \"AUTO\")"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE multimatch(\"fields\"=\"field\", query=\"query\")"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE multimatchquery(fields=\"field\", \"query\"=\"query\")"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE multi_match(\"fields\"=\"field\", \"query\"=\"query\")"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE multi_match(\'fields\'=\'field\', \'query\'=\'query\')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE multi_match(fields=\'field\', query=\'query\')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE multi_match(['address'], 'query')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE multi_match(['address', 'notes'], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE multi_match([\"*\"], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE multi_match([\"address\"], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE multi_match([`address`], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE multi_match([address], 'query')"));
    +
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE"
    +                + " multi_match(['address' ^ 1.0, 'notes' ^ 2.2], 'query')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE multi_match(['address' ^ 1.1, 'notes'], 'query')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE multi_match(['address', 'notes' ^ 1.5], 'query')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE multi_match(['address', 'notes' 3], 'query')"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE multi_match(['address' ^ .3, 'notes' 3], 'query')"));
    +
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE"
    +                + " multi_match([\"Tags\" ^ 1.5, Title, `Body` 4.2], 'query')"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE multi_match([\"Tags\" ^ 1.5, Title, `Body` 4.2], 'query',"
    +                + " analyzer=keyword,operator='AND', tie_breaker=0.3, type = \"most_fields\","
    +                + " fuzziness = \"AUTO\")"));
    +  }
    +
    +  @Test
    +  public void can_parse_second_functions() {
    +    assertNotNull(parser.parse("SELECT second('12:23:34')"));
    +    assertNotNull(parser.parse("SELECT second_of_minute('2022-11-18')"));
    +    assertNotNull(parser.parse("SELECT second('2022-11-18 12:23:34')"));
    +    assertNotNull(parser.parse("SELECT second_of_minute('2022-11-18 12:23:34')"));
       }
     
       @Test
       public void can_parse_simple_query_string_relevance_function() {
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE simple_query_string(['address'], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE simple_query_string(['address', 'notes'], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE simple_query_string([\"*\"], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE simple_query_string([\"address\"], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE simple_query_string([`address`], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE simple_query_string([address], 'query')"));
    -
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE"
    -            + " simple_query_string(['address' ^ 1.0, 'notes' ^ 2.2], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE simple_query_string(['address' ^ 1.1, 'notes'], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE simple_query_string(['address', 'notes' ^ 1.5], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE simple_query_string(['address', 'notes' 3], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE simple_query_string(['address' ^ .3, 'notes' 3], 'query')"));
    -
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE"
    -            + " simple_query_string([\"Tags\" ^ 1.5, Title, `Body` 4.2], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE"
    -            + " simple_query_string([\"Tags\" ^ 1.5, Title, `Body` 4.2], 'query', analyzer=keyword,"
    -            + "flags='AND', quote_field_suffix=\".exact\", fuzzy_prefix_length = 4)"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE simple_query_string(['address'], 'query')"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE simple_query_string(['address', 'notes'], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE simple_query_string([\"*\"], 'query')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE simple_query_string([\"address\"], 'query')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE simple_query_string([`address`], 'query')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE simple_query_string([address], 'query')"));
    +
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE"
    +                + " simple_query_string(['address' ^ 1.0, 'notes' ^ 2.2], 'query')"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE simple_query_string(['address' ^ 1.1, 'notes'], 'query')"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE simple_query_string(['address', 'notes' ^ 1.5], 'query')"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE simple_query_string(['address', 'notes' 3], 'query')"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE simple_query_string(['address' ^ .3, 'notes' 3], 'query')"));
    +
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE"
    +                + " simple_query_string([\"Tags\" ^ 1.5, Title, `Body` 4.2], 'query')"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE simple_query_string([\"Tags\" ^ 1.5, Title, `Body` 4.2],"
    +                + " 'query', analyzer=keyword,flags='AND', quote_field_suffix=\".exact\","
    +                + " fuzzy_prefix_length = 4)"));
       }
     
       @Test
    -  public void can_parse_query_string_relevance_function() {
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string(['*'], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string(['address'], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string(['add*'], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string(['*ess'], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string(['address', 'notes'], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string([\"*\"], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string([\"address\"], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string([\"ad*\"], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string([\"*s\"], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string([\"address\", \"notes\"], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string([`*`], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string([`address`], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string([`ad*`], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string([`*ss`], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string([`address`, `notes`], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string([address], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string([addr*], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string([*ss], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string([address, notes], 'query')"));
    -
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE"
    -            + " query_string(['address' ^ 1.0, 'notes' ^ 2.2], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string(['address' ^ 1.1, 'notes'], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string(['address', 'notes' ^ 1.5], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string(['address', 'notes' 3], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE query_string(['address' ^ .3, 'notes' 3], 'query')"));
    -
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE"
    -            + " query_string([\"Tags\" ^ 1.5, Title, `Body` 4.2], 'query')"));
    -    assertNotNull(parser.parse(
    -        "SELECT id FROM test WHERE"
    -            + " query_string([\"Tags\" ^ 1.5, Title, `Body` 4.2], 'query', analyzer=keyword,"
    -            + "operator='AND', tie_breaker=0.3, type = \"most_fields\", fuzziness = 4)"));
    +  public void can_parse_str_to_date() {
    +    assertNotNull(parser.parse("SELECT STR_TO_DATE('01,5,2013','%d,%m,%Y')"));
    +
    +    assertNotNull(parser.parse("SELECT STR_TO_DATE('a09:30:17','a%h:%i:%s')"));
    +
    +    assertNotNull(parser.parse("SELECT STR_TO_DATE('abc','abc');"));
       }
     
    +  @Test
    +  public void can_parse_query_string_relevance_function() {
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string(['*'], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string(['address'], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string(['add*'], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string(['*ess'], 'query')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE query_string(['address', 'notes'], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string([\"*\"], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string([\"address\"], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string([\"ad*\"], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string([\"*s\"], 'query')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE query_string([\"address\", \"notes\"], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string([`*`], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string([`address`], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string([`ad*`], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string([`*ss`], 'query')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE query_string([`address`, `notes`], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string([address], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string([addr*], 'query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query_string([*ss], 'query')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE query_string([address, notes], 'query')"));
    +
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE"
    +                + " query_string(['address' ^ 1.0, 'notes' ^ 2.2], 'query')"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE query_string(['address' ^ 1.1, 'notes'], 'query')"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE query_string(['address', 'notes' ^ 1.5], 'query')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE query_string(['address', 'notes' 3], 'query')"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE query_string(['address' ^ .3, 'notes' 3], 'query')"));
    +
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE"
    +                + " query_string([\"Tags\" ^ 1.5, Title, `Body` 4.2], 'query')"));
    +    assertNotNull(
    +        parser.parse(
    +            "SELECT id FROM test WHERE"
    +                + " query_string([\"Tags\" ^ 1.5, Title, `Body` 4.2], 'query', analyzer=keyword,"
    +                + "operator='AND', tie_breaker=0.3, type = \"most_fields\", fuzziness = 4)"));
    +  }
     
       @Test
       public void can_parse_query_relevance_function() {
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query('address:query')"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query('address:query OR notes:query')"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query(\"address:query\")"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query(\"address:query OR notes:query\")"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query(`address:query`)"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query(`address:query OR notes:query`)"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query('*:query')"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query(\"*:query\")"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query(`*:query`)"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query('address:*uery OR notes:?uery')"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query(\"address:*uery OR notes:?uery\")"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query(`address:*uery OR notes:?uery`)"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query('address:qu*ry OR notes:qu?ry')"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query(\"address:qu*ry OR notes:qu?ry\")"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query(`address:qu*ry OR notes:qu?ry`)"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query('address:query notes:query')"));
    -    assertNotNull(parser.parse(
    -            "SELECT id FROM test WHERE query(\"address:query notes:query\")"));
    -    assertNotNull(parser.parse(
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query('address:query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query('address:query OR notes:query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query(\"address:query\")"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE query(\"address:query OR notes:query\")"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query(`address:query`)"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query(`address:query OR notes:query`)"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query('*:query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query(\"*:query\")"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query(`*:query`)"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query('address:*uery OR notes:?uery')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE query(\"address:*uery OR notes:?uery\")"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query(`address:*uery OR notes:?uery`)"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query('address:qu*ry OR notes:qu?ry')"));
    +    assertNotNull(
    +        parser.parse("SELECT id FROM test WHERE query(\"address:qu*ry OR notes:qu?ry\")"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query(`address:qu*ry OR notes:qu?ry`)"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query('address:query notes:query')"));
    +    assertNotNull(parser.parse("SELECT id FROM test WHERE query(\"address:query notes:query\")"));
    +    assertNotNull(
    +        parser.parse(
                 "SELECT id FROM test WHERE "
    -                    + "query(\"Body:\'taste beer\' Tags:\'taste beer\'  Title:\'taste beer\'\")"));
    +                + "query(\"Body:\'taste beer\' Tags:\'taste beer\'  Title:\'taste beer\'\")"));
       }
     
    -
       @Test
       public void can_parse_match_relevance_function() {
         assertNotNull(parser.parse("SELECT * FROM test WHERE match(column, \"this is a test\")"));
    @@ -419,19 +526,18 @@ public void can_parse_match_relevance_function() {
       public void can_parse_matchquery_relevance_function() {
         assertNotNull(parser.parse("SELECT * FROM test WHERE matchquery(column, \"this is a test\")"));
         assertNotNull(parser.parse("SELECT * FROM test WHERE matchquery(column, 'this is a test')"));
    -    assertNotNull(parser.parse(
    -        "SELECT * FROM test WHERE matchquery(`column`, \"this is a test\")"));
    +    assertNotNull(
    +        parser.parse("SELECT * FROM test WHERE matchquery(`column`, \"this is a test\")"));
         assertNotNull(parser.parse("SELECT * FROM test WHERE matchquery(`column`, 'this is a test')"));
         assertNotNull(parser.parse("SELECT * FROM test WHERE matchquery(column, 100500)"));
       }
     
       @Test
       public void can_parse_match_query_relevance_function() {
    -    assertNotNull(parser.parse(
    -        "SELECT * FROM test WHERE match_query(column, \"this is a test\")"));
    +    assertNotNull(parser.parse("SELECT * FROM test WHERE match_query(column, \"this is a test\")"));
         assertNotNull(parser.parse("SELECT * FROM test WHERE match_query(column, 'this is a test')"));
    -    assertNotNull(parser.parse(
    -        "SELECT * FROM test WHERE match_query(`column`, \"this is a test\")"));
    +    assertNotNull(
    +        parser.parse("SELECT * FROM test WHERE match_query(`column`, \"this is a test\")"));
         assertNotNull(parser.parse("SELECT * FROM test WHERE match_query(`column`, 'this is a test')"));
         assertNotNull(parser.parse("SELECT * FROM test WHERE match_query(column, 100500)"));
       }
    @@ -439,15 +545,57 @@ public void can_parse_match_query_relevance_function() {
       @Test
       public void can_parse_match_phrase_relevance_function() {
         assertNotNull(
    -            parser.parse("SELECT * FROM test WHERE match_phrase(column, \"this is a test\")"));
    +        parser.parse("SELECT * FROM test WHERE match_phrase(column, \"this is a test\")"));
         assertNotNull(parser.parse("SELECT * FROM test WHERE match_phrase(column, 'this is a test')"));
         assertNotNull(
    -            parser.parse("SELECT * FROM test WHERE match_phrase(`column`, \"this is a test\")"));
    +        parser.parse("SELECT * FROM test WHERE match_phrase(`column`, \"this is a test\")"));
         assertNotNull(
    -            parser.parse("SELECT * FROM test WHERE match_phrase(`column`, 'this is a test')"));
    +        parser.parse("SELECT * FROM test WHERE match_phrase(`column`, 'this is a test')"));
         assertNotNull(parser.parse("SELECT * FROM test WHERE match_phrase(column, 100500)"));
       }
     
    +  @Test
    +  public void can_parse_minute_of_day_function() {
    +    assertNotNull(parser.parse("SELECT minute_of_day(\"12:23:34\");"));
    +    assertNotNull(parser.parse("SELECT minute_of_day('12:23:34');"));
    +    ;
    +    assertNotNull(parser.parse("SELECT minute_of_day(\"2022-12-14 12:23:34\");"));
    +    ;
    +    assertNotNull(parser.parse("SELECT minute_of_day('2022-12-14 12:23:34');"));
    +    ;
    +  }
    +
    +  @Test
    +  public void can_parse_sec_to_time_function() {
    +    assertNotNull(parser.parse("SELECT sec_to_time(-6897)"));
    +    assertNotNull(parser.parse("SELECT sec_to_time(6897)"));
    +    assertNotNull(parser.parse("SELECT sec_to_time(6897.123)"));
    +  }
    +
    +  @Test
    +  public void can_parse_last_day_function() {
    +    assertNotNull(parser.parse("SELECT last_day(\"2017-06-20\")"));
    +    assertNotNull(parser.parse("SELECT last_day('2004-01-01 01:01:01')"));
    +  }
    +
    +  @Test
    +  public void can_parse_timestampadd_function() {
    +    assertNotNull(parser.parse("SELECT TIMESTAMPADD(MINUTE, 1, '2003-01-02')"));
    +    assertNotNull(parser.parse("SELECT TIMESTAMPADD(WEEK,1,'2003-01-02')"));
    +  }
    +
    +  @Test
    +  public void can_parse_timestampdiff_function() {
    +    assertNotNull(parser.parse("SELECT TIMESTAMPDIFF(MINUTE, '2003-01-02', '2003-01-02')"));
    +    assertNotNull(parser.parse("SELECT TIMESTAMPDIFF(WEEK,'2003-01-02','2003-01-02')"));
    +  }
    +
    +  @Test
    +  public void can_parse_to_seconds_function() {
    +    assertNotNull(parser.parse("SELECT to_seconds(\"2023-02-20\")"));
    +    assertNotNull(parser.parse("SELECT to_seconds(950501)"));
    +  }
    +
       @Test
       public void can_parse_wildcard_query_relevance_function() {
         assertNotNull(
    @@ -459,43 +607,82 @@ public void can_parse_wildcard_query_relevance_function() {
         assertNotNull(
             parser.parse("SELECT * FROM test WHERE wildcard_query(`column`, 'this is a test*')"));
         assertNotNull(
    -        parser.parse("SELECT * FROM test WHERE wildcard_query(`column`, 'this is a test*', "
    -            + "boost=1.5, case_insensitive=true, rewrite=\"scoring_boolean\")"));
    +        parser.parse(
    +            "SELECT * FROM test WHERE wildcard_query(`column`, 'this is a test*', "
    +                + "boost=1.5, case_insensitive=true, rewrite=\"scoring_boolean\")"));
    +  }
    +
    +  @Test
    +  public void can_parse_nested_function() {
    +    assertNotNull(parser.parse("SELECT NESTED(PATH.INNER_FIELD) FROM TEST"));
    +    assertNotNull(parser.parse("SELECT NESTED('PATH.INNER_FIELD') FROM TEST"));
    +    assertNotNull(parser.parse("SELECT SUM(NESTED(PATH.INNER_FIELD)) FROM TEST"));
    +    assertNotNull(parser.parse("SELECT NESTED(PATH.INNER_FIELD, PATH) FROM TEST"));
    +    assertNotNull(parser.parse("SELECT * FROM TEST WHERE NESTED(PATH.INNER_FIELDS) = 'A'"));
    +    assertNotNull(parser.parse("SELECT * FROM TEST WHERE NESTED(PATH.INNER_FIELDS, PATH) = 'A'"));
    +    assertNotNull(parser.parse("SELECT FIELD FROM TEST ORDER BY nested(PATH.INNER_FIELD, PATH)"));
    +  }
    +
    +  @Test
    +  public void can_parse_yearweek_function() {
    +    assertNotNull(parser.parse("SELECT yearweek('1987-01-01')"));
    +    assertNotNull(parser.parse("SELECT yearweek('1987-01-01', 1)"));
       }
     
       @ParameterizedTest
       @MethodSource({
    -      "matchPhraseComplexQueries",
    -      "matchPhraseGeneratedQueries",
    -      "generateMatchPhraseQueries",
    -      "matchPhraseQueryComplexQueries"
    +    "matchPhraseComplexQueries",
    +    "matchPhraseGeneratedQueries",
    +    "generateMatchPhraseQueries",
    +    "matchPhraseQueryComplexQueries"
       })
       public void canParseComplexMatchPhraseArgsTest(String query) {
         assertNotNull(parser.parse(query));
       }
     
       @ParameterizedTest
    -  @MethodSource({
    -      "generateMatchPhrasePrefixQueries"
    -  })
    +  @MethodSource({"generateMatchPhrasePrefixQueries"})
       public void canParseComplexMatchPhrasePrefixQueries(String query) {
         assertNotNull(parser.parse(query));
       }
     
       private static Stream matchPhraseComplexQueries() {
         return Stream.of(
    -      "SELECT * FROM t WHERE match_phrase(c, 3)",
    -      "SELECT * FROM t WHERE match_phrase(c, 3, fuzziness=AUTO)",
    -      "SELECT * FROM t WHERE match_phrase(c, 3, zero_terms_query=\"all\")",
    -      "SELECT * FROM t WHERE match_phrase(c, 3, lenient=true)",
    -      "SELECT * FROM t WHERE match_phrase(c, 3, lenient='true')",
    -      "SELECT * FROM t WHERE match_phrase(c, 3, operator=xor)",
    -      "SELECT * FROM t WHERE match_phrase(c, 3, cutoff_frequency=0.04)",
    -      "SELECT * FROM t WHERE match_phrase(c, 3, cutoff_frequency=0.04, analyzer = english, "
    -              + "prefix_length=34, fuzziness='auto', minimum_should_match='2<-25% 9<-3')",
    -      "SELECT * FROM t WHERE match_phrase(c, 3, minimum_should_match='2<-25% 9<-3')",
    -      "SELECT * FROM t WHERE match_phrase(c, 3, operator='AUTO')"
    -    );
    +        "SELECT * FROM t WHERE match_phrase(c, 3)",
    +        "SELECT * FROM t WHERE match_phrase(c, 3, fuzziness=AUTO)",
    +        "SELECT * FROM t WHERE match_phrase(c, 3, zero_terms_query=\"all\")",
    +        "SELECT * FROM t WHERE match_phrase(c, 3, lenient=true)",
    +        "SELECT * FROM t WHERE match_phrase(c, 3, lenient='true')",
    +        "SELECT * FROM t WHERE match_phrase(c, 3, operator=xor)",
    +        "SELECT * FROM t WHERE match_phrase(c, 3, cutoff_frequency=0.04)",
    +        "SELECT * FROM t WHERE match_phrase(c, 3, cutoff_frequency=0.04, analyzer = english, "
    +            + "prefix_length=34, fuzziness='auto', minimum_should_match='2<-25% 9<-3')",
    +        "SELECT * FROM t WHERE match_phrase(c, 3, minimum_should_match='2<-25% 9<-3')",
    +        "SELECT * FROM t WHERE match_phrase(c, 3, operator='AUTO')");
    +  }
    +
    +  @Test
    +  public void canParseMatchQueryAlternateSyntax() {
    +    assertNotNull(parser.parse("SELECT * FROM test WHERE Field = matchquery('query')"));
    +    assertNotNull(parser.parse("SELECT * FROM test WHERE Field = matchquery(\"query\")"));
    +    assertNotNull(parser.parse("SELECT * FROM test WHERE Field = match_query('query')"));
    +    assertNotNull(parser.parse("SELECT * FROM test WHERE Field = match_query(\"query\")"));
    +  }
    +
    +  @Test
    +  public void canParseMatchPhraseAlternateSyntax() {
    +    assertNotNull(parser.parse("SELECT * FROM test WHERE Field = match_phrase('query')"));
    +    assertNotNull(parser.parse("SELECT * FROM test WHERE Field = match_phrase(\"query\")"));
    +    assertNotNull(parser.parse("SELECT * FROM test WHERE Field = matchphrase('query')"));
    +    assertNotNull(parser.parse("SELECT * FROM test WHERE Field = matchphrase(\"query\")"));
    +  }
    +
    +  @Test
    +  public void canParseMultiMatchAlternateSyntax() {
    +    assertNotNull(parser.parse("SELECT * FROM test WHERE Field = multi_match('query')"));
    +    assertNotNull(parser.parse("SELECT * FROM test WHERE Field = multi_match(\"query\")"));
    +    assertNotNull(parser.parse("SELECT * FROM test WHERE Field = multimatch('query')"));
    +    assertNotNull(parser.parse("SELECT * FROM test WHERE Field = multimatch(\"query\")"));
       }
     
       private static Stream matchPhraseQueryComplexQueries() {
    @@ -510,50 +697,51 @@ private static Stream matchPhraseQueryComplexQueries() {
             "SELECT * FROM t WHERE matchphrasequery(c, 3, cutoff_frequency=0.04, analyzer = english, "
                 + "prefix_length=34, fuzziness='auto', minimum_should_match='2<-25% 9<-3')",
             "SELECT * FROM t WHERE matchphrasequery(c, 3, minimum_should_match='2<-25% 9<-3')",
    -        "SELECT * FROM t WHERE matchphrasequery(c, 3, operator='AUTO')"
    -    );
    +        "SELECT * FROM t WHERE matchphrasequery(c, 3, operator='AUTO')");
       }
     
       private static Stream matchPhraseGeneratedQueries() {
         var matchArgs = new HashMap();
    -    matchArgs.put("fuzziness", new String[]{ "AUTO", "AUTO:1,5", "1" });
    -    matchArgs.put("fuzzy_transpositions", new Boolean[]{ true, false });
    -    matchArgs.put("operator", new String[]{ "and", "or" });
    -    matchArgs.put("minimum_should_match",
    -            new String[]{ "3", "-2", "75%", "-25%", "3<90%", "2<-25% 9<-3" });
    -    matchArgs.put("analyzer", new String[]{ "standard", "stop", "english" });
    -    matchArgs.put("zero_terms_query", new String[]{ "none", "all" });
    -    matchArgs.put("lenient", new Boolean[]{ true, false });
    +    matchArgs.put("fuzziness", new String[] {"AUTO", "AUTO:1,5", "1"});
    +    matchArgs.put("fuzzy_transpositions", new Boolean[] {true, false});
    +    matchArgs.put("operator", new String[] {"and", "or"});
    +    matchArgs.put(
    +        "minimum_should_match", new String[] {"3", "-2", "75%", "-25%", "3<90%", "2<-25% 9<-3"});
    +    matchArgs.put("analyzer", new String[] {"standard", "stop", "english"});
    +    matchArgs.put("zero_terms_query", new String[] {"none", "all"});
    +    matchArgs.put("lenient", new Boolean[] {true, false});
         // deprecated
    -    matchArgs.put("cutoff_frequency", new Double[]{ .0, 0.001, 1., 42. });
    -    matchArgs.put("prefix_length", new Integer[]{ 0, 2, 5 });
    -    matchArgs.put("max_expansions", new Integer[]{ 0, 5, 20 });
    -    matchArgs.put("boost", new Double[]{ .5, 1., 2.3 });
    +    matchArgs.put("cutoff_frequency", new Double[] {.0, 0.001, 1., 42.});
    +    matchArgs.put("prefix_length", new Integer[] {0, 2, 5});
    +    matchArgs.put("max_expansions", new Integer[] {0, 5, 20});
    +    matchArgs.put("boost", new Double[] {.5, 1., 2.3});
     
         return generateQueries("match", matchArgs);
       }
     
       private static Stream generateMatchPhraseQueries() {
         var matchPhraseArgs = new HashMap();
    -    matchPhraseArgs.put("analyzer", new String[]{ "standard", "stop", "english" });
    -    matchPhraseArgs.put("max_expansions", new Integer[]{ 0, 5, 20 });
    -    matchPhraseArgs.put("slop", new Integer[]{ 0, 1, 2 });
    +    matchPhraseArgs.put("analyzer", new String[] {"standard", "stop", "english"});
    +    matchPhraseArgs.put("max_expansions", new Integer[] {0, 5, 20});
    +    matchPhraseArgs.put("slop", new Integer[] {0, 1, 2});
     
         return generateQueries("match_phrase", matchPhraseArgs);
       }
     
       private static Stream generateMatchPhrasePrefixQueries() {
    -    return generateQueries("match_phrase_prefix", ImmutableMap.builder()
    -        .put("analyzer", new String[] {"standard", "stop", "english"})
    -        .put("slop", new Integer[] {0, 1, 2})
    -        .put("max_expansions", new Integer[] {0, 3, 10})
    -        .put("zero_terms_query", new String[] {"NONE", "ALL", "NULL"})
    -        .put("boost", new Float[] {-0.5f, 1.0f, 1.2f})
    -        .build());
    -  }
    -
    -  private static Stream generateQueries(String function,
    -                                                Map functionArgs) {
    +    return generateQueries(
    +        "match_phrase_prefix",
    +        ImmutableMap.builder()
    +            .put("analyzer", new String[] {"standard", "stop", "english"})
    +            .put("slop", new Integer[] {0, 1, 2})
    +            .put("max_expansions", new Integer[] {0, 3, 10})
    +            .put("zero_terms_query", new String[] {"NONE", "ALL", "NULL"})
    +            .put("boost", new Float[] {-0.5f, 1.0f, 1.2f})
    +            .build());
    +  }
    +
    +  private static Stream generateQueries(
    +      String function, Map functionArgs) {
         var rand = new Random(0);
     
         class QueryGenerator implements Iterator {
    @@ -561,7 +749,7 @@ class QueryGenerator implements Iterator {
           private int currentQuery = 0;
     
           private String randomIdentifier() {
    -        return RandomStringUtils.random(10, 0, 0,true, false, null, rand);
    +        return RandomStringUtils.random(10, 0, 0, true, false, null, rand);
           }
     
           @Override
    @@ -575,16 +763,17 @@ public String next() {
             currentQuery += 1;
     
             StringBuilder query = new StringBuilder();
    -        query.append(String.format("SELECT * FROM test WHERE %s(%s, %s", function,
    -            randomIdentifier(),
    -            randomIdentifier()));
    +        query.append(
    +            String.format(
    +                "SELECT * FROM test WHERE %s(%s, %s",
    +                function, randomIdentifier(), randomIdentifier()));
             var args = new ArrayList();
             for (var pair : functionArgs.entrySet()) {
               if (rand.nextBoolean()) {
                 var arg = new StringBuilder();
                 arg.append(rand.nextBoolean() ? "," : ", ");
    -            arg.append(rand.nextBoolean() ? pair.getKey().toLowerCase()
    -                    : pair.getKey().toUpperCase());
    +            arg.append(
    +                rand.nextBoolean() ? pair.getKey().toLowerCase() : pair.getKey().toUpperCase());
                 arg.append(rand.nextBoolean() ? "=" : " = ");
                 if (pair.getValue() instanceof String[] || rand.nextBoolean()) {
                   var quoteSymbol = rand.nextBoolean() ? '\'' : '"';
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/domain/SQLQueryRequestTest.java b/sql/src/test/java/org/opensearch/sql/sql/domain/SQLQueryRequestTest.java
    index 52a1f534e9..2b64b13b35 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/domain/SQLQueryRequestTest.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/domain/SQLQueryRequestTest.java
    @@ -3,66 +3,77 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql.domain;
     
    +import static org.junit.jupiter.api.Assertions.assertAll;
     import static org.junit.jupiter.api.Assertions.assertEquals;
     import static org.junit.jupiter.api.Assertions.assertFalse;
     import static org.junit.jupiter.api.Assertions.assertThrows;
     import static org.junit.jupiter.api.Assertions.assertTrue;
     
     import com.google.common.collect.ImmutableMap;
    +import java.util.HashMap;
     import java.util.Map;
     import org.json.JSONObject;
    +import org.junit.jupiter.api.DisplayNameGeneration;
    +import org.junit.jupiter.api.DisplayNameGenerator;
     import org.junit.jupiter.api.Test;
     import org.opensearch.sql.protocol.response.format.Format;
     
    +@DisplayNameGeneration(DisplayNameGenerator.ReplaceUnderscores.class)
     public class SQLQueryRequestTest {
     
       @Test
    -  public void shouldSupportQuery() {
    +  public void should_support_query() {
         SQLQueryRequest request = SQLQueryRequestBuilder.request("SELECT 1").build();
         assertTrue(request.isSupported());
       }
     
       @Test
    -  public void shouldSupportQueryWithJDBCFormat() {
    -    SQLQueryRequest request = SQLQueryRequestBuilder.request("SELECT 1")
    -                                                    .format("jdbc")
    -                                                    .build();
    -    assertTrue(request.isSupported());
    -    assertEquals(request.format(), Format.JDBC);
    +  public void should_support_query_with_JDBC_format() {
    +    SQLQueryRequest request = SQLQueryRequestBuilder.request("SELECT 1").format("jdbc").build();
    +    assertAll(
    +        () -> assertTrue(request.isSupported()), () -> assertEquals(request.format(), Format.JDBC));
       }
     
       @Test
    -  public void shouldSupportQueryWithQueryFieldOnly() {
    +  public void should_support_query_with_query_field_only() {
         SQLQueryRequest request =
    -        SQLQueryRequestBuilder.request("SELECT 1")
    -                              .jsonContent("{\"query\": \"SELECT 1\"}")
    -                              .build();
    +        SQLQueryRequestBuilder.request("SELECT 1").jsonContent("{\"query\": \"SELECT 1\"}").build();
         assertTrue(request.isSupported());
       }
     
       @Test
    -  public void shouldSupportQueryWithParameters() {
    -    SQLQueryRequest request =
    +  public void should_support_query_with_parameters() {
    +    SQLQueryRequest requestWithContent =
             SQLQueryRequestBuilder.request("SELECT 1")
                 .jsonContent("{\"query\": \"SELECT 1\", \"parameters\":[]}")
                 .build();
    -    assertTrue(request.isSupported());
    +    SQLQueryRequest requestWithParams =
    +        SQLQueryRequestBuilder.request("SELECT 1").params(Map.of("one", "two")).build();
    +    assertAll(
    +        () -> assertTrue(requestWithContent.isSupported()),
    +        () -> assertTrue(requestWithParams.isSupported()));
       }
     
       @Test
    -  public void shouldSupportQueryWithZeroFetchSize() {
    +  public void should_support_query_without_parameters() {
    +    SQLQueryRequest requestWithNoParams =
    +        SQLQueryRequestBuilder.request("SELECT 1").params(Map.of()).build();
    +    assertTrue(requestWithNoParams.isSupported());
    +  }
    +
    +  @Test
    +  public void should_support_query_with_zero_fetch_size() {
         SQLQueryRequest request =
             SQLQueryRequestBuilder.request("SELECT 1")
    -                              .jsonContent("{\"query\": \"SELECT 1\", \"fetch_size\": 0}")
    -                              .build();
    +            .jsonContent("{\"query\": \"SELECT 1\", \"fetch_size\": 0}")
    +            .build();
         assertTrue(request.isSupported());
       }
     
       @Test
    -  public void shouldSupportQueryWithParametersAndZeroFetchSize() {
    +  public void should_support_query_with_parameters_and_zero_fetch_size() {
         SQLQueryRequest request =
             SQLQueryRequestBuilder.request("SELECT 1")
                 .jsonContent("{\"query\": \"SELECT 1\", \"fetch_size\": 0, \"parameters\":[]}")
    @@ -71,86 +82,154 @@ public void shouldSupportQueryWithParametersAndZeroFetchSize() {
       }
     
       @Test
    -  public void shouldSupportExplain() {
    +  public void should_support_explain() {
         SQLQueryRequest explainRequest =
    -        SQLQueryRequestBuilder.request("SELECT 1")
    -                              .path("_plugins/_sql/_explain")
    -                              .build();
    -    assertTrue(explainRequest.isExplainRequest());
    -    assertTrue(explainRequest.isSupported());
    +        SQLQueryRequestBuilder.request("SELECT 1").path("_plugins/_sql/_explain").build();
    +
    +    assertAll(
    +        () -> assertTrue(explainRequest.isExplainRequest()),
    +        () -> assertTrue(explainRequest.isSupported()));
       }
     
       @Test
    -  public void shouldNotSupportCursorRequest() {
    +  public void should_support_cursor_request() {
         SQLQueryRequest fetchSizeRequest =
             SQLQueryRequestBuilder.request("SELECT 1")
    -                              .jsonContent("{\"query\": \"SELECT 1\", \"fetch_size\": 5}")
    -                              .build();
    -    assertFalse(fetchSizeRequest.isSupported());
    +            .jsonContent("{\"query\": \"SELECT 1\", \"fetch_size\": 5}")
    +            .build();
     
         SQLQueryRequest cursorRequest =
    -        SQLQueryRequestBuilder.request("SELECT 1")
    -                              .jsonContent("{\"cursor\": \"abcdefgh...\"}")
    -                              .build();
    -    assertFalse(cursorRequest.isSupported());
    +        SQLQueryRequestBuilder.request(null).cursor("abcdefgh...").build();
    +
    +    assertAll(
    +        () -> assertTrue(fetchSizeRequest.isSupported()),
    +        () -> assertTrue(cursorRequest.isSupported()));
    +  }
    +
    +  @Test
    +  public void should_support_cursor_close_request() {
    +    SQLQueryRequest closeRequest =
    +        SQLQueryRequestBuilder.request(null).cursor("pewpew").path("_plugins/_sql/close").build();
    +
    +    SQLQueryRequest emptyCloseRequest =
    +        SQLQueryRequestBuilder.request(null).cursor("").path("_plugins/_sql/close").build();
    +
    +    SQLQueryRequest pagingRequest = SQLQueryRequestBuilder.request(null).cursor("pewpew").build();
    +
    +    assertAll(
    +        () -> assertTrue(closeRequest.isSupported()),
    +        () -> assertTrue(closeRequest.isCursorCloseRequest()),
    +        () -> assertTrue(pagingRequest.isSupported()),
    +        () -> assertFalse(pagingRequest.isCursorCloseRequest()),
    +        () -> assertFalse(emptyCloseRequest.isSupported()),
    +        () -> assertTrue(emptyCloseRequest.isCursorCloseRequest()));
    +  }
    +
    +  @Test
    +  public void should_not_support_request_with_empty_cursor() {
    +    SQLQueryRequest requestWithEmptyCursor =
    +        SQLQueryRequestBuilder.request(null).cursor("").build();
    +    SQLQueryRequest requestWithNullCursor =
    +        SQLQueryRequestBuilder.request(null).cursor(null).build();
    +    assertAll(
    +        () -> assertFalse(requestWithEmptyCursor.isSupported()),
    +        () -> assertFalse(requestWithNullCursor.isSupported()));
    +  }
    +
    +  @Test
    +  public void should_not_support_request_with_unknown_field() {
    +    SQLQueryRequest request =
    +        SQLQueryRequestBuilder.request("SELECT 1").jsonContent("{\"pewpew\": 42}").build();
    +    assertFalse(request.isSupported());
    +  }
    +
    +  @Test
    +  public void should_not_support_request_with_cursor_and_something_else() {
    +    SQLQueryRequest requestWithQuery =
    +        SQLQueryRequestBuilder.request("SELECT 1").cursor("n:12356").build();
    +    SQLQueryRequest requestWithParams =
    +        SQLQueryRequestBuilder.request(null).cursor("n:12356").params(Map.of("one", "two")).build();
    +    SQLQueryRequest requestWithParamsWithFormat =
    +        SQLQueryRequestBuilder.request(null)
    +            .cursor("n:12356")
    +            .params(Map.of("format", "jdbc"))
    +            .build();
    +    SQLQueryRequest requestWithParamsWithFormatAnd =
    +        SQLQueryRequestBuilder.request(null)
    +            .cursor("n:12356")
    +            .params(Map.of("format", "jdbc", "something", "else"))
    +            .build();
    +    SQLQueryRequest requestWithFetchSize =
    +        SQLQueryRequestBuilder.request(null)
    +            .cursor("n:12356")
    +            .jsonContent("{\"fetch_size\": 5}")
    +            .build();
    +    SQLQueryRequest requestWithNoParams =
    +        SQLQueryRequestBuilder.request(null).cursor("n:12356").params(Map.of()).build();
    +    SQLQueryRequest requestWithNoContent =
    +        SQLQueryRequestBuilder.request(null).cursor("n:12356").jsonContent("{}").build();
    +    assertAll(
    +        () -> assertFalse(requestWithQuery.isSupported()),
    +        () -> assertFalse(requestWithParams.isSupported()),
    +        () -> assertFalse(requestWithFetchSize.isSupported()),
    +        () -> assertTrue(requestWithNoParams.isSupported()),
    +        () -> assertTrue(requestWithParamsWithFormat.isSupported()),
    +        () -> assertFalse(requestWithParamsWithFormatAnd.isSupported()),
    +        () -> assertTrue(requestWithNoContent.isSupported()));
       }
     
       @Test
    -  public void shouldUseJDBCFormatByDefault() {
    +  public void should_use_JDBC_format_by_default() {
         SQLQueryRequest request =
             SQLQueryRequestBuilder.request("SELECT 1").params(ImmutableMap.of()).build();
         assertEquals(request.format(), Format.JDBC);
       }
     
       @Test
    -  public void shouldSupportCSVFormatAndSanitize() {
    -    SQLQueryRequest csvRequest =
    -        SQLQueryRequestBuilder.request("SELECT 1")
    -                              .format("csv")
    -                              .build();
    -    assertTrue(csvRequest.isSupported());
    -    assertEquals(csvRequest.format(), Format.CSV);
    -    assertTrue(csvRequest.sanitize());
    +  public void should_support_CSV_format_and_sanitize() {
    +    SQLQueryRequest csvRequest = SQLQueryRequestBuilder.request("SELECT 1").format("csv").build();
    +    assertAll(
    +        () -> assertTrue(csvRequest.isSupported()),
    +        () -> assertEquals(csvRequest.format(), Format.CSV),
    +        () -> assertTrue(csvRequest.sanitize()));
       }
     
       @Test
    -  public void shouldSkipSanitizeIfSetFalse() {
    +  public void should_skip_sanitize_if_set_false() {
         ImmutableMap.Builder builder = ImmutableMap.builder();
         Map params = builder.put("format", "csv").put("sanitize", "false").build();
         SQLQueryRequest csvRequest = SQLQueryRequestBuilder.request("SELECT 1").params(params).build();
    -    assertEquals(csvRequest.format(), Format.CSV);
    -    assertFalse(csvRequest.sanitize());
    +    assertAll(
    +        () -> assertEquals(csvRequest.format(), Format.CSV),
    +        () -> assertFalse(csvRequest.sanitize()));
       }
     
       @Test
    -  public void shouldNotSupportOtherFormat() {
    -    SQLQueryRequest csvRequest =
    -        SQLQueryRequestBuilder.request("SELECT 1")
    -            .format("other")
    -            .build();
    -    assertFalse(csvRequest.isSupported());
    -    assertThrows(IllegalArgumentException.class, csvRequest::format,
    -        "response in other format is not supported.");
    +  public void should_not_support_other_format() {
    +    SQLQueryRequest csvRequest = SQLQueryRequestBuilder.request("SELECT 1").format("other").build();
    +
    +    assertAll(
    +        () -> assertFalse(csvRequest.isSupported()),
    +        () ->
    +            assertEquals(
    +                "response in other format is not supported.",
    +                assertThrows(IllegalArgumentException.class, csvRequest::format).getMessage()));
       }
     
       @Test
    -  public void shouldSupportRawFormat() {
    -    SQLQueryRequest csvRequest =
    -            SQLQueryRequestBuilder.request("SELECT 1")
    -                    .format("raw")
    -                    .build();
    +  public void should_support_raw_format() {
    +    SQLQueryRequest csvRequest = SQLQueryRequestBuilder.request("SELECT 1").format("raw").build();
         assertTrue(csvRequest.isSupported());
       }
     
    -  /**
    -   * SQL query request build helper to improve test data setup readability.
    -   */
    +  /** SQL query request build helper to improve test data setup readability. */
       private static class SQLQueryRequestBuilder {
         private String jsonContent;
         private String query;
         private String path = "_plugins/_sql";
         private String format;
    -    private Map params;
    +    private String cursor;
    +    private Map params = new HashMap<>();
     
         static SQLQueryRequestBuilder request(String query) {
           SQLQueryRequestBuilder builder = new SQLQueryRequestBuilder();
    @@ -178,15 +257,17 @@ SQLQueryRequestBuilder params(Map params) {
           return this;
         }
     
    +    SQLQueryRequestBuilder cursor(String cursor) {
    +      this.cursor = cursor;
    +      return this;
    +    }
    +
         SQLQueryRequest build() {
    -      if (jsonContent == null) {
    -        jsonContent = "{\"query\": \"" + query + "\"}";
    +      if (format != null) {
    +        params.put("format", format);
           }
    -      if (params != null) {
    -        return new SQLQueryRequest(new JSONObject(jsonContent), query, path, params);
    -      }
    -      return new SQLQueryRequest(new JSONObject(jsonContent), query, path, format);
    +      return new SQLQueryRequest(
    +          jsonContent == null ? null : new JSONObject(jsonContent), query, path, params, cursor);
         }
       }
    -
     }
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/parser/AnonymizerListenerTest.java b/sql/src/test/java/org/opensearch/sql/sql/parser/AnonymizerListenerTest.java
    new file mode 100644
    index 0000000000..4d2addf3d3
    --- /dev/null
    +++ b/sql/src/test/java/org/opensearch/sql/sql/parser/AnonymizerListenerTest.java
    @@ -0,0 +1,210 @@
    +/*
    + * Copyright OpenSearch Contributors
    + * SPDX-License-Identifier: Apache-2.0
    + */
    +
    +package org.opensearch.sql.sql.parser;
    +
    +import static org.junit.jupiter.api.Assertions.assertEquals;
    +import static org.mockito.Mockito.mock;
    +
    +import org.antlr.v4.runtime.CommonTokenStream;
    +import org.antlr.v4.runtime.tree.ErrorNode;
    +import org.junit.jupiter.api.Test;
    +import org.opensearch.sql.common.antlr.CaseInsensitiveCharStream;
    +import org.opensearch.sql.sql.antlr.AnonymizerListener;
    +import org.opensearch.sql.sql.antlr.parser.OpenSearchSQLLexer;
    +import org.opensearch.sql.sql.antlr.parser.OpenSearchSQLParser;
    +
    +public class AnonymizerListenerTest {
    +
    +  private final AnonymizerListener anonymizerListener = new AnonymizerListener();
    +
    +  /**
    +   * Helper function to parse SQl queries for testing purposes.
    +   *
    +   * @param query SQL query to be anonymized.
    +   */
    +  private void parse(String query) {
    +    OpenSearchSQLLexer lexer = new OpenSearchSQLLexer(new CaseInsensitiveCharStream(query));
    +    OpenSearchSQLParser parser = new OpenSearchSQLParser(new CommonTokenStream(lexer));
    +    parser.addParseListener(anonymizerListener);
    +
    +    parser.root();
    +  }
    +
    +  @Test
    +  public void queriesShouldHaveAnonymousFieldAndIndex() {
    +    String query = "SELECT ABS(balance) FROM accounts WHERE age > 30 GROUP BY ABS(balance)";
    +    String expectedQuery =
    +        "( SELECT ABS ( identifier ) FROM table "
    +            + "WHERE identifier > number GROUP BY ABS ( identifier ) )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesShouldAnonymousNumbers() {
    +    String query = "SELECT ABS(20), LOG(20.20) FROM accounts";
    +    String expectedQuery = "( SELECT ABS ( number ), LOG ( number ) FROM table )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesShouldHaveAnonymousBooleanLiterals() {
    +    String query = "SELECT TRUE FROM accounts";
    +    String expectedQuery = "( SELECT boolean_literal FROM table )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesShouldHaveAnonymousInputStrings() {
    +    String query = "SELECT * FROM accounts WHERE name = 'Oliver'";
    +    String expectedQuery = "( SELECT * FROM table WHERE identifier = 'string_literal' )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithAliasesShouldAnonymizeSensitiveData() {
    +    String query = "SELECT balance AS b FROM accounts AS a";
    +    String expectedQuery = "( SELECT identifier AS identifier FROM table AS identifier )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithFunctionsShouldAnonymizeSensitiveData() {
    +    String query = "SELECT LTRIM(firstname) FROM accounts";
    +    String expectedQuery = "( SELECT LTRIM ( identifier ) FROM table )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithAggregatesShouldAnonymizeSensitiveData() {
    +    String query = "SELECT MAX(price) - MIN(price) from tickets";
    +    String expectedQuery = "( SELECT MAX ( identifier ) - MIN ( identifier ) FROM table )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithSubqueriesShouldAnonymizeSensitiveData() {
    +    String query =
    +        "SELECT a.f, a.l, a.a FROM "
    +            + "(SELECT firstname AS f, lastname AS l, age AS a FROM accounts WHERE age > 30) a";
    +    String expectedQuery =
    +        "( SELECT identifier.identifier, identifier.identifier, identifier.identifier FROM ( SELECT"
    +            + " identifier AS identifier, identifier AS identifier, identifier AS identifier FROM"
    +            + " table WHERE identifier > number ) identifier )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithLimitShouldAnonymizeSensitiveData() {
    +    String query = "SELECT balance FROM accounts LIMIT 5";
    +    String expectedQuery = "( SELECT identifier FROM table LIMIT number )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithOrderByShouldAnonymizeSensitiveData() {
    +    String query = "SELECT firstname FROM accounts ORDER BY lastname";
    +    String expectedQuery = "( SELECT identifier FROM table ORDER BY identifier )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithHavingShouldAnonymizeSensitiveData() {
    +    String query = "SELECT SUM(balance) FROM accounts GROUP BY lastname HAVING COUNT(balance) > 2";
    +    String expectedQuery =
    +        "( SELECT SUM ( identifier ) FROM table "
    +            + "GROUP BY identifier HAVING COUNT ( identifier ) > number )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithHighlightShouldAnonymizeSensitiveData() {
    +    String query = "SELECT HIGHLIGHT(str0) FROM CALCS WHERE QUERY_STRING(['str0'], 'FURNITURE')";
    +    String expectedQuery =
    +        "( SELECT HIGHLIGHT ( identifier ) FROM table WHERE "
    +            + "QUERY_STRING ( [ 'string_literal' ], 'string_literal' ) )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithMatchShouldAnonymizeSensitiveData() {
    +    String query = "SELECT str0 FROM CALCS WHERE MATCH(str0, 'FURNITURE')";
    +    String expectedQuery =
    +        "( SELECT identifier FROM table " + "WHERE MATCH ( identifier, 'string_literal' ) )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithPositionShouldAnonymizeSensitiveData() {
    +    String query = "SELECT POSITION('world' IN 'helloworld')";
    +    String expectedQuery = "( SELECT POSITION ( 'string_literal' IN 'string_literal' ) )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithMatch_Bool_Prefix_ShouldAnonymizeSensitiveData() {
    +    String query =
    +        "SELECT firstname, address FROM accounts WHERE "
    +            + "match_bool_prefix(address, 'Bristol Street', minimum_should_match=2)";
    +    String expectedQuery =
    +        "( SELECT identifier, identifier FROM table WHERE MATCH_BOOL_PREFIX "
    +            + "( identifier, 'string_literal', MINIMUM_SHOULD_MATCH = number ) )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithGreaterOrEqualShouldAnonymizeSensitiveData() {
    +    String query = "SELECT int0 FROM accounts WHERE int0 >= 0";
    +    String expectedQuery = "( SELECT identifier FROM table WHERE identifier >= number )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithLessOrEqualShouldAnonymizeSensitiveData() {
    +    String query = "SELECT int0 FROM accounts WHERE int0 <= 0";
    +    String expectedQuery = "( SELECT identifier FROM table WHERE identifier <= number )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithNotEqualShouldAnonymizeSensitiveData() {
    +    String query = "SELECT int0 FROM accounts WHERE int0 != 0";
    +    String expectedQuery = "( SELECT identifier FROM table WHERE identifier != number )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  @Test
    +  public void queriesWithNotEqualAlternateShouldAnonymizeSensitiveData() {
    +    String query = "SELECT int0 FROM calcs WHERE int0 <> 0";
    +    String expectedQuery = "( SELECT identifier FROM table WHERE identifier <> number )";
    +    parse(query);
    +    assertEquals(expectedQuery, anonymizerListener.getAnonymizedQueryString());
    +  }
    +
    +  /** Test added for coverage, but the errorNode will not be hit normally. */
    +  @Test
    +  public void enterErrorNote() {
    +    ErrorNode node = mock(ErrorNode.class);
    +    anonymizerListener.visitErrorNode(node);
    +  }
    +}
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/parser/AstAggregationBuilderTest.java b/sql/src/test/java/org/opensearch/sql/sql/parser/AstAggregationBuilderTest.java
    index 79896d9400..95188e20b6 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/parser/AstAggregationBuilderTest.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/parser/AstAggregationBuilderTest.java
    @@ -3,7 +3,6 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql.parser;
     
     import static java.util.Collections.emptyList;
    @@ -30,7 +29,6 @@
     import org.junit.jupiter.api.DisplayNameGeneration;
     import org.junit.jupiter.api.DisplayNameGenerator;
     import org.junit.jupiter.api.Test;
    -import org.opensearch.sql.ast.expression.AllFields;
     import org.opensearch.sql.ast.expression.UnresolvedExpression;
     import org.opensearch.sql.ast.tree.Aggregation;
     import org.opensearch.sql.ast.tree.UnresolvedPlan;
    @@ -60,10 +58,9 @@ void can_build_group_by_clause_with_scalar_expression() {
             buildAggregation("SELECT ABS(age + 1) FROM test GROUP BY ABS(age + 1)"),
             allOf(
                 hasGroupByItems(
    -                alias("ABS(+(age, 1))", function("ABS",
    -                    function("+",
    -                        qualifiedName("age"),
    -                        intLiteral(1))))),
    +                alias(
    +                    "ABS(+(age, 1))",
    +                    function("ABS", function("+", qualifiedName("age"), intLiteral(1))))),
                 hasAggregators()));
       }
     
    @@ -80,9 +77,7 @@ void can_build_group_by_clause_with_complicated_aggregators() {
       void can_build_group_by_clause_without_aggregators() {
         assertThat(
             buildAggregation("SELECT state FROM test GROUP BY state"),
    -        allOf(
    -            hasGroupByItems(alias("state", qualifiedName("state"))),
    -            hasAggregators()));
    +        allOf(hasGroupByItems(alias("state", qualifiedName("state"))), hasAggregators()));
       }
     
       @Test
    @@ -102,50 +97,43 @@ void can_build_implicit_group_by_for_aggregator_in_having_clause() {
             buildAggregation("SELECT true FROM test HAVING AVG(age) > 30"),
             allOf(
                 hasGroupByItems(),
    -            hasAggregators(
    -                alias("AVG(age)", aggregate("AVG", qualifiedName("age"))))));
    +            hasAggregators(alias("AVG(age)", aggregate("AVG", qualifiedName("age"))))));
     
         assertThat(
    -            buildAggregation("SELECT PI() FROM test HAVING AVG(age) > 30"),
    -            allOf(
    -                    hasGroupByItems(),
    -                    hasAggregators(
    -                            alias("AVG(age)", aggregate("AVG", qualifiedName("age"))))));
    +        buildAggregation("SELECT PI() FROM test HAVING AVG(age) > 30"),
    +        allOf(
    +            hasGroupByItems(),
    +            hasAggregators(alias("AVG(age)", aggregate("AVG", qualifiedName("age"))))));
     
         assertThat(
    -            buildAggregation("SELECT ABS(1.5) FROM test HAVING AVG(age) > 30"),
    -            allOf(
    -                    hasGroupByItems(),
    -                    hasAggregators(
    -                            alias("AVG(age)", aggregate("AVG", qualifiedName("age"))))));
    +        buildAggregation("SELECT ABS(1.5) FROM test HAVING AVG(age) > 30"),
    +        allOf(
    +            hasGroupByItems(),
    +            hasAggregators(alias("AVG(age)", aggregate("AVG", qualifiedName("age"))))));
     
         assertThat(
    -            buildAggregation("SELECT ABS(ABS(1.5)) FROM test HAVING AVG(age) > 30"),
    -            allOf(
    -                    hasGroupByItems(),
    -                    hasAggregators(
    -                            alias("AVG(age)", aggregate("AVG", qualifiedName("age"))))));
    +        buildAggregation("SELECT ABS(ABS(1.5)) FROM test HAVING AVG(age) > 30"),
    +        allOf(
    +            hasGroupByItems(),
    +            hasAggregators(alias("AVG(age)", aggregate("AVG", qualifiedName("age"))))));
     
         assertThat(
             buildAggregation("SELECT INTERVAL 1 DAY FROM test HAVING AVG(age) > 30"),
             allOf(
                 hasGroupByItems(),
    -            hasAggregators(
    -                alias("AVG(age)", aggregate("AVG", qualifiedName("age"))))));
    +            hasAggregators(alias("AVG(age)", aggregate("AVG", qualifiedName("age"))))));
     
         assertThat(
             buildAggregation("SELECT CAST(1 AS LONG) FROM test HAVING AVG(age) > 30"),
             allOf(
                 hasGroupByItems(),
    -            hasAggregators(
    -                alias("AVG(age)", aggregate("AVG", qualifiedName("age"))))));
    +            hasAggregators(alias("AVG(age)", aggregate("AVG", qualifiedName("age"))))));
     
         assertThat(
             buildAggregation("SELECT CASE WHEN true THEN 1 ELSE 2 END FROM test HAVING AVG(age) > 30"),
             allOf(
                 hasGroupByItems(),
    -            hasAggregators(
    -                alias("AVG(age)", aggregate("AVG", qualifiedName("age"))))));
    +            hasAggregators(alias("AVG(age)", aggregate("AVG", qualifiedName("age"))))));
       }
     
       @Test
    @@ -155,8 +143,7 @@ void can_build_distinct_aggregator() {
             allOf(
                 hasGroupByItems(alias("age", qualifiedName("age"))),
                 hasAggregators(
    -                alias("COUNT(DISTINCT name)", distinctAggregate("COUNT", qualifiedName(
    -                    "name"))))));
    +                alias("COUNT(DISTINCT name)", distinctAggregate("COUNT", qualifiedName("name"))))));
       }
     
       @Test
    @@ -168,8 +155,8 @@ void should_build_nothing_if_no_group_by_and_no_aggregators_in_select() {
       void should_replace_group_by_alias_by_expression_in_select_clause() {
         assertThat(
             buildAggregation("SELECT state AS s, name FROM test GROUP BY s, name"),
    -        hasGroupByItems(alias("state", qualifiedName("state")),
    -            alias("name", qualifiedName("name"))));
    +        hasGroupByItems(
    +            alias("state", qualifiedName("state")), alias("name", qualifiedName("name"))));
     
         assertThat(
             buildAggregation("SELECT ABS(age) AS a FROM test GROUP BY a"),
    @@ -191,25 +178,30 @@ void should_replace_group_by_ordinal_by_expression_in_select_clause() {
     
       @Test
       void should_report_error_for_non_integer_ordinal_in_group_by() {
    -    SemanticCheckException error = assertThrows(SemanticCheckException.class, () ->
    -        buildAggregation("SELECT state AS s FROM test GROUP BY 1.5"));
    -    assertEquals(
    -        "Non-integer constant [1.5] found in ordinal",
    -        error.getMessage());
    +    SemanticCheckException error =
    +        assertThrows(
    +            SemanticCheckException.class,
    +            () -> buildAggregation("SELECT state AS s FROM test GROUP BY 1.5"));
    +    assertEquals("Non-integer constant [1.5] found in ordinal", error.getMessage());
       }
     
    -  @Disabled("This validation is supposed to be in analyzing phase. This test should be enabled "
    +  @Disabled(
    +      "This validation is supposed to be in analyzing phase. This test should be enabled "
               + "once https://github.com/opensearch-project/sql/issues/910 has been resolved")
       @Test
       void should_report_error_for_mismatch_between_select_and_group_by_items() {
    -    SemanticCheckException error1 = assertThrows(SemanticCheckException.class, () ->
    -        buildAggregation("SELECT name FROM test GROUP BY state"));
    +    SemanticCheckException error1 =
    +        assertThrows(
    +            SemanticCheckException.class,
    +            () -> buildAggregation("SELECT name FROM test GROUP BY state"));
         assertEquals(
             "Expression [name] that contains non-aggregated column is not present in group by clause",
             error1.getMessage());
     
    -    SemanticCheckException error2 = assertThrows(SemanticCheckException.class, () ->
    -        buildAggregation("SELECT ABS(name + 1) FROM test GROUP BY name"));
    +    SemanticCheckException error2 =
    +        assertThrows(
    +            SemanticCheckException.class,
    +            () -> buildAggregation("SELECT ABS(name + 1) FROM test GROUP BY name"));
         assertEquals(
             "Expression [Function(funcName=ABS, funcArgs=[Function(funcName=+, "
                 + "funcArgs=[name, Literal(value=1, type=INTEGER)])])] that contains "
    @@ -219,15 +211,19 @@ void should_report_error_for_mismatch_between_select_and_group_by_items() {
     
       @Test
       void should_report_error_for_non_aggregated_item_in_select_if_no_group_by() {
    -    SemanticCheckException error1 = assertThrows(SemanticCheckException.class, () ->
    -        buildAggregation("SELECT age, AVG(balance) FROM tests"));
    +    SemanticCheckException error1 =
    +        assertThrows(
    +            SemanticCheckException.class,
    +            () -> buildAggregation("SELECT age, AVG(balance) FROM tests"));
         assertEquals(
             "Explicit GROUP BY clause is required because expression [age] "
                 + "contains non-aggregated column",
             error1.getMessage());
     
    -    SemanticCheckException error2 = assertThrows(SemanticCheckException.class, () ->
    -        buildAggregation("SELECT ABS(age + 1), AVG(balance) FROM tests"));
    +    SemanticCheckException error2 =
    +        assertThrows(
    +            SemanticCheckException.class,
    +            () -> buildAggregation("SELECT ABS(age + 1), AVG(balance) FROM tests"));
         assertEquals(
             "Explicit GROUP BY clause is required because expression [ABS(+(age, 1))] "
                 + "contains non-aggregated column",
    @@ -236,19 +232,25 @@ void should_report_error_for_non_aggregated_item_in_select_if_no_group_by() {
     
       @Test
       void should_report_error_for_group_by_ordinal_out_of_bound_of_select_list() {
    -    SemanticCheckException error1 = assertThrows(SemanticCheckException.class, () ->
    -        buildAggregation("SELECT age, AVG(balance) FROM tests GROUP BY 0"));
    +    SemanticCheckException error1 =
    +        assertThrows(
    +            SemanticCheckException.class,
    +            () -> buildAggregation("SELECT age, AVG(balance) FROM tests GROUP BY 0"));
         assertEquals("Ordinal [0] is out of bound of select item list", error1.getMessage());
     
    -    SemanticCheckException error2 = assertThrows(SemanticCheckException.class, () ->
    -        buildAggregation("SELECT age, AVG(balance) FROM tests GROUP BY 3"));
    +    SemanticCheckException error2 =
    +        assertThrows(
    +            SemanticCheckException.class,
    +            () -> buildAggregation("SELECT age, AVG(balance) FROM tests GROUP BY 3"));
         assertEquals("Ordinal [3] is out of bound of select item list", error2.getMessage());
       }
     
       @Test
       void should_report_error_for_non_aggregated_item_in_select_if_only_having() {
    -    SemanticCheckException error = assertThrows(SemanticCheckException.class, () ->
    -        buildAggregation("SELECT age FROM tests HAVING AVG(balance) > 30"));
    +    SemanticCheckException error =
    +        assertThrows(
    +            SemanticCheckException.class,
    +            () -> buildAggregation("SELECT age FROM tests HAVING AVG(balance) > 30"));
         assertEquals(
             "Explicit GROUP BY clause is required because expression [age] "
                 + "contains non-aggregated column",
    @@ -263,10 +265,10 @@ private Matcher hasAggregators(UnresolvedExpression... exprs) {
         return featureValueOf("aggregators", Aggregation::getAggExprList, exprs);
       }
     
    -  private Matcher featureValueOf(String name,
    -                                                 Function> getter,
    -                                                 UnresolvedExpression... exprs) {
    +  private Matcher featureValueOf(
    +      String name,
    +      Function> getter,
    +      UnresolvedExpression... exprs) {
         Matcher> subMatcher =
             (exprs.length == 0) ? equalTo(emptyList()) : equalTo(Arrays.asList(exprs));
         return new FeatureMatcher>(subMatcher, name, "") {
    @@ -296,5 +298,4 @@ private QuerySpecificationContext parse(String query) {
         parser.addErrorListener(new SyntaxAnalysisErrorListener());
         return parser.querySpecification();
       }
    -
     }
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/parser/AstBuilderTest.java b/sql/src/test/java/org/opensearch/sql/sql/parser/AstBuilderTest.java
    index 2aed4f2834..edee692500 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/parser/AstBuilderTest.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/parser/AstBuilderTest.java
    @@ -3,7 +3,6 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql.parser;
     
     import static java.util.Collections.emptyList;
    @@ -33,21 +32,14 @@
     
     import com.google.common.collect.ImmutableList;
     import java.util.HashMap;
    -import java.util.List;
     import java.util.Map;
    -import java.util.stream.Stream;
    -import org.antlr.v4.runtime.tree.ParseTree;
     import org.junit.jupiter.api.Test;
    -import org.junit.jupiter.params.ParameterizedTest;
    -import org.junit.jupiter.params.provider.Arguments;
    -import org.junit.jupiter.params.provider.MethodSource;
     import org.opensearch.sql.ast.dsl.AstDSL;
     import org.opensearch.sql.ast.expression.AllFields;
     import org.opensearch.sql.ast.expression.DataType;
     import org.opensearch.sql.ast.expression.Literal;
    -import org.opensearch.sql.ast.tree.UnresolvedPlan;
    +import org.opensearch.sql.ast.expression.NestedAllTupleFields;
     import org.opensearch.sql.common.antlr.SyntaxCheckException;
    -import org.opensearch.sql.sql.antlr.SQLSyntaxParser;
     
     class AstBuilderTest extends AstBuilderTestBase {
     
    @@ -60,40 +52,31 @@ public void can_build_select_literals() {
                 alias("'hello'", stringLiteral("hello")),
                 alias("\"world\"", stringLiteral("world")),
                 alias("false", booleanLiteral(false)),
    -            alias("-4.567", doubleLiteral(-4.567))
    -        ),
    -        buildAST("SELECT 123, 'hello', \"world\", false, -4.567")
    -    );
    +            alias("-4.567", doubleLiteral(-4.567))),
    +        buildAST("SELECT 123, 'hello', \"world\", false, -4.567"));
       }
     
       @Test
       public void can_build_select_function_call_with_alias() {
         assertEquals(
    -        project(
    -            relation("test"),
    -            alias(
    -                "ABS(age)",
    -                function("ABS", qualifiedName("age")),
    -                "a"
    -            )
    -        ),
    -        buildAST("SELECT ABS(age) AS a FROM test")
    -    );
    +        project(relation("test"), alias("ABS(age)", function("ABS", qualifiedName("age")), "a")),
    +        buildAST("SELECT ABS(age) AS a FROM test"));
       }
     
       @Test
       public void can_build_select_all_from_index() {
    -    assertEquals(
    -        project(
    -            relation("test"),
    -            AllFields.of()
    -        ),
    -        buildAST("SELECT * FROM test")
    -    );
    +    assertEquals(project(relation("test"), AllFields.of()), buildAST("SELECT * FROM test"));
     
         assertThrows(SyntaxCheckException.class, () -> buildAST("SELECT *"));
       }
     
    +  @Test
    +  public void can_build_nested_select_all() {
    +    assertEquals(
    +        project(relation("test"), alias("nested(field.*)", new NestedAllTupleFields("field"))),
    +        buildAST("SELECT nested(field.*) FROM test"));
    +  }
    +
       @Test
       public void can_build_select_all_and_fields_from_index() {
         assertEquals(
    @@ -101,32 +84,22 @@ public void can_build_select_all_and_fields_from_index() {
                 relation("test"),
                 AllFields.of(),
                 alias("age", qualifiedName("age")),
    -            alias("age", qualifiedName("age"), "a")
    -        ),
    -        buildAST("SELECT *, age, age as a FROM test")
    -    );
    +            alias("age", qualifiedName("age"), "a")),
    +        buildAST("SELECT *, age, age as a FROM test"));
       }
     
       @Test
       public void can_build_select_fields_from_index() {
         assertEquals(
    -        project(
    -            relation("test"),
    -            alias("age", qualifiedName("age"))
    -        ),
    -        buildAST("SELECT age FROM test")
    -    );
    +        project(relation("test"), alias("age", qualifiedName("age"))),
    +        buildAST("SELECT age FROM test"));
       }
     
       @Test
       public void can_build_select_fields_with_alias() {
         assertEquals(
    -        project(
    -            relation("test"),
    -            alias("age", qualifiedName("age"), "a")
    -        ),
    -        buildAST("SELECT age AS a FROM test")
    -    );
    +        project(relation("test"), alias("age", qualifiedName("age"), "a")),
    +        buildAST("SELECT age AS a FROM test"));
       }
     
       @Test
    @@ -134,17 +107,8 @@ public void can_build_select_fields_with_alias_quoted() {
         assertEquals(
             project(
                 relation("test"),
    -            alias(
    -                "(age + 10)",
    -                function("+", qualifiedName("age"), intLiteral(10)),
    -                "Age_Expr"
    -            )
    -        ),
    -        buildAST("SELECT"
    -                + " (age + 10) AS `Age_Expr` "
    -                + "FROM test"
    -        )
    -    );
    +            alias("(age + 10)", function("+", qualifiedName("age"), intLiteral(10)), "Age_Expr")),
    +        buildAST("SELECT" + " (age + 10) AS `Age_Expr` " + "FROM test"));
       }
     
       @Test
    @@ -152,42 +116,27 @@ public void can_build_from_index_with_alias() {
         assertEquals(
             project(
                 filter(
    -                relation("test", "tt"),
    -                function("=", qualifiedName("tt", "age"), intLiteral(30))),
    -            alias("tt.name", qualifiedName("tt", "name"))
    -        ),
    -        buildAST("SELECT tt.name FROM test AS tt WHERE tt.age = 30")
    -    );
    +                relation("test", "tt"), function("=", qualifiedName("tt", "age"), intLiteral(30))),
    +            alias("tt.name", qualifiedName("tt", "name"))),
    +        buildAST("SELECT tt.name FROM test AS tt WHERE tt.age = 30"));
       }
     
       @Test
       public void can_build_from_index_with_alias_quoted() {
         assertEquals(
             project(
    -            filter(
    -                relation("test", "t"),
    -                function("=", qualifiedName("t", "age"), intLiteral(30))),
    -            alias("`t`.name", qualifiedName("t", "name"))
    -        ),
    -        buildAST("SELECT `t`.name FROM test `t` WHERE `t`.age = 30")
    -    );
    +            filter(relation("test", "t"), function("=", qualifiedName("t", "age"), intLiteral(30))),
    +            alias("`t`.name", qualifiedName("t", "name"))),
    +        buildAST("SELECT `t`.name FROM test `t` WHERE `t`.age = 30"));
       }
     
       @Test
       public void can_build_where_clause() {
         assertEquals(
             project(
    -            filter(
    -                relation("test"),
    -                function(
    -                    "=",
    -                    qualifiedName("name"),
    -                    stringLiteral("John"))
    -            ),
    -            alias("name", qualifiedName("name"))
    -        ),
    -        buildAST("SELECT name FROM test WHERE name = 'John'")
    -    );
    +            filter(relation("test"), function("=", qualifiedName("name"), stringLiteral("John"))),
    +            alias("name", qualifiedName("name"))),
    +        buildAST("SELECT name FROM test WHERE name = 'John'"));
       }
     
       @Test
    @@ -196,8 +145,7 @@ public void can_build_count_literal() {
             project(
                 agg(
                     relation("test"),
    -                ImmutableList.of(
    -                    alias("COUNT(1)", aggregate("COUNT", intLiteral(1)))),
    +                ImmutableList.of(alias("COUNT(1)", aggregate("COUNT", intLiteral(1)))),
                     emptyList(),
                     emptyList(),
                     emptyList()),
    @@ -211,8 +159,7 @@ public void can_build_count_star() {
             project(
                 agg(
                     relation("test"),
    -                ImmutableList.of(
    -                    alias("COUNT(*)", aggregate("COUNT", AllFields.of()))),
    +                ImmutableList.of(alias("COUNT(*)", aggregate("COUNT", AllFields.of()))),
                     emptyList(),
                     emptyList(),
                     emptyList()),
    @@ -322,9 +269,7 @@ public void can_build_having_clause() {
                         emptyList(),
                         ImmutableList.of(alias("name", qualifiedName("name"))),
                         emptyList()),
    -                function(">",
    -                    aggregate("MIN", qualifiedName("balance")),
    -                    intLiteral(1000))),
    +                function(">", aggregate("MIN", qualifiedName("balance")), intLiteral(1000))),
                 alias("name", qualifiedName("name")),
                 alias("AVG(age)", aggregate("AVG", qualifiedName("age")))),
             buildAST("SELECT name, AVG(age) FROM test GROUP BY name HAVING MIN(balance) > 1000"));
    @@ -337,14 +282,11 @@ public void can_build_having_condition_using_alias() {
                 filter(
                     agg(
                         relation("test"),
    -                    ImmutableList.of(
    -                        alias("AVG(age)", aggregate("AVG", qualifiedName("age")))),
    +                    ImmutableList.of(alias("AVG(age)", aggregate("AVG", qualifiedName("age")))),
                         emptyList(),
                         ImmutableList.of(alias("name", qualifiedName("name"))),
                         emptyList()),
    -                function(">",
    -                    aggregate("AVG", qualifiedName("age")),
    -                    intLiteral(1000))),
    +                function(">", aggregate("AVG", qualifiedName("age")), intLiteral(1000))),
                 alias("name", qualifiedName("name")),
                 alias("AVG(age)", aggregate("AVG", qualifiedName("age")), "a")),
             buildAST("SELECT name, AVG(age) AS a FROM test GROUP BY name HAVING a > 1000"));
    @@ -354,9 +296,7 @@ public void can_build_having_condition_using_alias() {
       public void can_build_order_by_field_name() {
         assertEquals(
             project(
    -            sort(
    -                relation("test"),
    -                field("name", argument("asc", booleanLiteral(true)))),
    +            sort(relation("test"), field("name", argument("asc", booleanLiteral(true)))),
                 alias("name", qualifiedName("name"))),
             buildAST("SELECT name FROM test ORDER BY name"));
       }
    @@ -368,8 +308,7 @@ public void can_build_order_by_function() {
                 sort(
                     relation("test"),
                     field(
    -                    function("ABS", qualifiedName("name")),
    -                    argument("asc", booleanLiteral(true)))),
    +                    function("ABS", qualifiedName("name")), argument("asc", booleanLiteral(true)))),
                 alias("name", qualifiedName("name"))),
             buildAST("SELECT name FROM test ORDER BY ABS(name)"));
       }
    @@ -378,9 +317,7 @@ public void can_build_order_by_function() {
       public void can_build_order_by_alias() {
         assertEquals(
             project(
    -            sort(
    -                relation("test"),
    -                field("name", argument("asc", booleanLiteral(true)))),
    +            sort(relation("test"), field("name", argument("asc", booleanLiteral(true)))),
                 alias("name", qualifiedName("name"), "n")),
             buildAST("SELECT name AS n FROM test ORDER BY n ASC"));
       }
    @@ -389,9 +326,7 @@ public void can_build_order_by_alias() {
       public void can_build_order_by_ordinal() {
         assertEquals(
             project(
    -            sort(
    -                relation("test"),
    -                field("name", argument("asc", booleanLiteral(false)))),
    +            sort(relation("test"), field("name", argument("asc", booleanLiteral(false)))),
                 alias("name", qualifiedName("name"))),
             buildAST("SELECT name FROM test ORDER BY 1 DESC"));
       }
    @@ -418,8 +353,7 @@ public void can_build_select_distinct_clause() {
                     emptyList(),
                     emptyList(),
                     ImmutableList.of(
    -                    alias("name", qualifiedName("name")),
    -                    alias("age", qualifiedName("age"))),
    +                    alias("name", qualifiedName("name")), alias("age", qualifiedName("age"))),
                     emptyList()),
                 alias("name", qualifiedName("name")),
                 alias("age", qualifiedName("age"))),
    @@ -435,26 +369,21 @@ public void can_build_select_distinct_clause_with_function() {
                     emptyList(),
                     emptyList(),
                     ImmutableList.of(
    -                    alias("SUBSTRING(name, 1, 2)",
    +                    alias(
    +                        "SUBSTRING(name, 1, 2)",
                             function(
    -                            "SUBSTRING",
    -                            qualifiedName("name"),
    -                            intLiteral(1), intLiteral(2)))),
    +                            "SUBSTRING", qualifiedName("name"), intLiteral(1), intLiteral(2)))),
                     emptyList()),
    -            alias("SUBSTRING(name, 1, 2)",
    -                function(
    -                    "SUBSTRING",
    -                    qualifiedName("name"),
    -                    intLiteral(1), intLiteral(2)))),
    +            alias(
    +                "SUBSTRING(name, 1, 2)",
    +                function("SUBSTRING", qualifiedName("name"), intLiteral(1), intLiteral(2)))),
             buildAST("SELECT DISTINCT SUBSTRING(name, 1, 2) FROM test"));
       }
     
       @Test
       public void can_build_select_all_clause() {
         assertEquals(
    -        buildAST("SELECT name, age FROM test"),
    -        buildAST("SELECT ALL name, age FROM test")
    -    );
    +        buildAST("SELECT name, age FROM test"), buildAST("SELECT ALL name, age FROM test"));
       }
     
       @Test
    @@ -463,34 +392,43 @@ public void can_build_order_by_null_option() {
             project(
                 sort(
                     relation("test"),
    -                field("name",
    +                field(
    +                    "name",
                         argument("asc", booleanLiteral(true)),
                         argument("nullFirst", booleanLiteral(false)))),
    -        alias("name", qualifiedName("name"))),
    +            alias("name", qualifiedName("name"))),
             buildAST("SELECT name FROM test ORDER BY name NULLS LAST"));
       }
     
    +  /**
    +   * Ensure Nested function falls back to legacy engine when used in an HAVING clause. TODO Remove
    +   * this test when support is added.
    +   */
    +  @Test
    +  public void nested_in_having_clause_throws_exception() {
    +    SyntaxCheckException exception =
    +        assertThrows(
    +            SyntaxCheckException.class,
    +            () -> buildAST("SELECT count(*) FROM test HAVING nested(message.info)"));
    +
    +    assertEquals(
    +        "Falling back to legacy engine. Nested function is not supported in the HAVING clause.",
    +        exception.getMessage());
    +  }
    +
       @Test
       public void can_build_order_by_sort_order_keyword_insensitive() {
         assertEquals(
             project(
    -            sort(
    -                relation("test"),
    -                field("age",
    -                    argument("asc", booleanLiteral(true)))),
    +            sort(relation("test"), field("age", argument("asc", booleanLiteral(true)))),
                 alias("age", qualifiedName("age"))),
    -        buildAST("SELECT age FROM test ORDER BY age ASC")
    -    );
    +        buildAST("SELECT age FROM test ORDER BY age ASC"));
     
         assertEquals(
             project(
    -            sort(
    -                relation("test"),
    -                field("age",
    -                    argument("asc", booleanLiteral(true)))),
    +            sort(relation("test"), field("age", argument("asc", booleanLiteral(true)))),
                 alias("age", qualifiedName("age"))),
    -        buildAST("SELECT age FROM test ORDER BY age asc")
    -    );
    +        buildAST("SELECT age FROM test ORDER BY age asc"));
       }
     
       @Test
    @@ -502,20 +440,32 @@ public void can_build_from_subquery() {
                         project(
                             relation("test"),
                             alias("firstname", qualifiedName("firstname"), "firstName"),
    -                        alias("lastname", qualifiedName("lastname"), "lastName")
    -                    ),
    -                    "a"
    -                ),
    -                function(">", qualifiedName("age"), intLiteral(20))
    -            ),
    +                        alias("lastname", qualifiedName("lastname"), "lastName")),
    +                    "a"),
    +                function(">", qualifiedName("age"), intLiteral(20))),
                 alias("a.firstName", qualifiedName("a", "firstName")),
                 alias("lastName", qualifiedName("lastName"))),
             buildAST(
                 "SELECT a.firstName, lastName FROM ("
                     + "SELECT firstname AS firstName, lastname AS lastName FROM test"
    -                + ") AS a where age > 20"
    -        )
    -    );
    +                + ") AS a where age > 20"));
    +  }
    +
    +  @Test
    +  public void can_build_from_subquery_with_backquoted_alias() {
    +    assertEquals(
    +        project(
    +            relationSubquery(
    +                project(
    +                    relation("test"), alias("firstname", qualifiedName("firstname"), "firstName")),
    +                "a"),
    +            alias("a.firstName", qualifiedName("a", "firstName"))),
    +        buildAST(
    +            "SELECT a.firstName "
    +                + "FROM ( "
    +                + " SELECT `firstname` AS `firstName` "
    +                + " FROM `test` "
    +                + ") AS `a`"));
       }
     
       @Test
    @@ -524,12 +474,9 @@ public void can_build_show_all_tables() {
             project(
                 filter(
                     relation(TABLE_INFO),
    -                function("like", qualifiedName("TABLE_NAME"), stringLiteral("%"))
    -            ),
    -            AllFields.of()
    -        ),
    -        buildAST("SHOW TABLES LIKE '%'")
    -    );
    +                function("like", qualifiedName("TABLE_NAME"), stringLiteral("%"))),
    +            AllFields.of()),
    +        buildAST("SHOW TABLES LIKE '%'"));
       }
     
       @Test
    @@ -538,17 +485,14 @@ public void can_build_show_selected_tables() {
             project(
                 filter(
                     relation(TABLE_INFO),
    -                function("like", qualifiedName("TABLE_NAME"), stringLiteral("a_c%"))
    -            ),
    -            AllFields.of()
    -        ),
    -        buildAST("SHOW TABLES LIKE 'a_c%'")
    -    );
    +                function("like", qualifiedName("TABLE_NAME"), stringLiteral("a_c%"))),
    +            AllFields.of()),
    +        buildAST("SHOW TABLES LIKE 'a_c%'"));
       }
     
       /**
    -   * Todo, ideally the identifier (%) couldn't be used in LIKE operator, only the string literal
    -   * is allowed.
    +   * Todo, ideally the identifier (%) couldn't be used in LIKE operator, only the string literal is
    +   * allowed.
        */
       @Test
       public void show_compatible_with_old_engine_syntax() {
    @@ -556,34 +500,23 @@ public void show_compatible_with_old_engine_syntax() {
             project(
                 filter(
                     relation(TABLE_INFO),
    -                function("like", qualifiedName("TABLE_NAME"), stringLiteral("%"))
    -            ),
    -            AllFields.of()
    -        ),
    -        buildAST("SHOW TABLES LIKE %")
    -    );
    +                function("like", qualifiedName("TABLE_NAME"), stringLiteral("%"))),
    +            AllFields.of()),
    +        buildAST("SHOW TABLES LIKE %"));
       }
     
       @Test
       public void describe_compatible_with_old_engine_syntax() {
         assertEquals(
    -        project(
    -            relation(mappingTable("a_c%")),
    -            AllFields.of()
    -        ),
    -        buildAST("DESCRIBE TABLES LIKE a_c%")
    -    );
    +        project(relation(mappingTable("a_c%")), AllFields.of()),
    +        buildAST("DESCRIBE TABLES LIKE a_c%"));
       }
     
       @Test
       public void can_build_describe_selected_tables() {
         assertEquals(
    -        project(
    -            relation(mappingTable("a_c%")),
    -            AllFields.of()
    -        ),
    -        buildAST("DESCRIBE TABLES LIKE 'a_c%'")
    -    );
    +        project(relation(mappingTable("a_c%")), AllFields.of()),
    +        buildAST("DESCRIBE TABLES LIKE 'a_c%'"));
       }
     
       @Test
    @@ -592,17 +525,14 @@ public void can_build_describe_selected_tables_field_filter() {
             project(
                 filter(
                     relation(mappingTable("a_c%")),
    -                function("like", qualifiedName("COLUMN_NAME"), stringLiteral("name%"))
    -            ),
    -            AllFields.of()
    -        ),
    -        buildAST("DESCRIBE TABLES LIKE 'a_c%' COLUMNS LIKE 'name%'")
    -    );
    +                function("like", qualifiedName("COLUMN_NAME"), stringLiteral("name%"))),
    +            AllFields.of()),
    +        buildAST("DESCRIBE TABLES LIKE 'a_c%' COLUMNS LIKE 'name%'"));
       }
     
       /**
    -   * Todo, ideally the identifier (%) couldn't be used in LIKE operator, only the string literal
    -   * is allowed.
    +   * Todo, ideally the identifier (%) couldn't be used in LIKE operator, only the string literal is
    +   * allowed.
        */
       @Test
       public void describe_and_column_compatible_with_old_engine_syntax() {
    @@ -610,23 +540,16 @@ public void describe_and_column_compatible_with_old_engine_syntax() {
             project(
                 filter(
                     relation(mappingTable("a_c%")),
    -                function("like", qualifiedName("COLUMN_NAME"), stringLiteral("name%"))
    -            ),
    -            AllFields.of()
    -        ),
    -        buildAST("DESCRIBE TABLES LIKE a_c% COLUMNS LIKE name%")
    -    );
    +                function("like", qualifiedName("COLUMN_NAME"), stringLiteral("name%"))),
    +            AllFields.of()),
    +        buildAST("DESCRIBE TABLES LIKE a_c% COLUMNS LIKE name%"));
       }
     
       @Test
       public void can_build_alias_by_keywords() {
         assertEquals(
    -        project(
    -            relation("test"),
    -            alias("avg_age", qualifiedName("avg_age"), "avg")
    -        ),
    -        buildAST("SELECT avg_age AS avg FROM test")
    -    );
    +        project(relation("test"), alias("avg_age", qualifiedName("avg_age"), "avg")),
    +        buildAST("SELECT avg_age AS avg FROM test"));
       }
     
       @Test
    @@ -634,42 +557,20 @@ public void can_build_limit_clause() {
         assertEquals(
             project(
                 limit(
    -                sort(
    -                    relation("test"),
    -                    field("age", argument("asc", booleanLiteral(true)))
    -                ),
    -                10,
    -                0
    -            ),
    +                sort(relation("test"), field("age", argument("asc", booleanLiteral(true)))), 10, 0),
                 alias("name", qualifiedName("name")),
    -            alias("age", qualifiedName("age"))
    -        ),
    -        buildAST("SELECT name, age FROM test ORDER BY age LIMIT 10")
    -    );
    +            alias("age", qualifiedName("age"))),
    +        buildAST("SELECT name, age FROM test ORDER BY age LIMIT 10"));
       }
     
       @Test
       public void can_build_limit_clause_with_offset() {
         assertEquals(
    -        project(
    -            limit(
    -                relation("test"),
    -                10,
    -                5
    -            ),
    -            alias("name", qualifiedName("name"))
    -        ),
    +        project(limit(relation("test"), 10, 5), alias("name", qualifiedName("name"))),
             buildAST("SELECT name FROM test LIMIT 10 OFFSET 5"));
     
         assertEquals(
    -        project(
    -            limit(
    -                relation("test"),
    -                10,
    -                5
    -            ),
    -            alias("name", qualifiedName("name"))
    -        ),
    +        project(limit(relation("test"), 10, 5), alias("name", qualifiedName("name"))),
             buildAST("SELECT name FROM test LIMIT 5, 10"));
       }
     
    @@ -677,11 +578,10 @@ public void can_build_limit_clause_with_offset() {
       public void can_build_qualified_name_highlight() {
         Map args = new HashMap<>();
         assertEquals(
    -        project(relation("test"),
    -            alias("highlight(fieldA)",
    -                highlight(AstDSL.qualifiedName("fieldA"), args))),
    -        buildAST("SELECT highlight(fieldA) FROM test")
    -    );
    +        project(
    +            relation("test"),
    +            alias("highlight(fieldA)", highlight(AstDSL.qualifiedName("fieldA"), args))),
    +        buildAST("SELECT highlight(fieldA) FROM test"));
       }
     
       @Test
    @@ -690,23 +590,22 @@ public void can_build_qualified_highlight_with_arguments() {
         args.put("pre_tags", new Literal("", DataType.STRING));
         args.put("post_tags", new Literal("", DataType.STRING));
         assertEquals(
    -        project(relation("test"),
    -            alias("highlight(fieldA, pre_tags='', post_tags='')",
    +        project(
    +            relation("test"),
    +            alias(
    +                "highlight(fieldA, pre_tags='', post_tags='')",
                     highlight(AstDSL.qualifiedName("fieldA"), args))),
    -        buildAST("SELECT highlight(fieldA, pre_tags='', post_tags='') "
    -            + "FROM test")
    -    );
    +        buildAST(
    +            "SELECT highlight(fieldA, pre_tags='', post_tags='') " + "FROM test"));
       }
     
       @Test
       public void can_build_string_literal_highlight() {
         Map args = new HashMap<>();
         assertEquals(
    -        project(relation("test"),
    -            alias("highlight(\"fieldA\")",
    -                highlight(AstDSL.stringLiteral("fieldA"), args))),
    -        buildAST("SELECT highlight(\"fieldA\") FROM test")
    -    );
    +        project(
    +            relation("test"),
    +            alias("highlight(\"fieldA\")", highlight(AstDSL.stringLiteral("fieldA"), args))),
    +        buildAST("SELECT highlight(\"fieldA\") FROM test"));
       }
    -
     }
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/parser/AstBuilderTestBase.java b/sql/src/test/java/org/opensearch/sql/sql/parser/AstBuilderTestBase.java
    index 2161eb5b1a..602f17ce85 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/parser/AstBuilderTestBase.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/parser/AstBuilderTestBase.java
    @@ -10,9 +10,7 @@
     import org.opensearch.sql.sql.antlr.SQLSyntaxParser;
     
     public class AstBuilderTestBase {
    -  /**
    -   * SQL syntax parser that helps prepare parse tree as AstBuilder input.
    -   */
    +  /** SQL syntax parser that helps prepare parse tree as AstBuilder input. */
       private final SQLSyntaxParser parser = new SQLSyntaxParser();
     
       protected UnresolvedPlan buildAST(String query) {
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/parser/AstExpressionBuilderTest.java b/sql/src/test/java/org/opensearch/sql/sql/parser/AstExpressionBuilderTest.java
    index 9af4119fdf..e89f2af9b0 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/parser/AstExpressionBuilderTest.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/parser/AstExpressionBuilderTest.java
    @@ -3,12 +3,12 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql.parser;
     
     import static org.junit.jupiter.api.Assertions.assertEquals;
     import static org.opensearch.sql.ast.dsl.AstDSL.aggregate;
     import static org.opensearch.sql.ast.dsl.AstDSL.and;
    +import static org.opensearch.sql.ast.dsl.AstDSL.between;
     import static org.opensearch.sql.ast.dsl.AstDSL.booleanLiteral;
     import static org.opensearch.sql.ast.dsl.AstDSL.caseWhen;
     import static org.opensearch.sql.ast.dsl.AstDSL.dateLiteral;
    @@ -35,6 +35,7 @@
     import com.google.common.collect.ImmutableList;
     import com.google.common.collect.ImmutableMap;
     import java.util.HashMap;
    +import java.util.stream.Stream;
     import org.antlr.v4.runtime.CommonTokenStream;
     import org.apache.commons.lang3.tuple.ImmutablePair;
     import org.junit.jupiter.api.Test;
    @@ -55,228 +56,195 @@ class AstExpressionBuilderTest {
     
       @Test
       public void canBuildStringLiteral() {
    -    assertEquals(
    -        stringLiteral("hello"),
    -        buildExprAst("'hello'")
    -    );
    -    assertEquals(
    -        stringLiteral("hello"),
    -        buildExprAst("\"hello\"")
    -    );
    +    assertEquals(stringLiteral("hello"), buildExprAst("'hello'"));
    +    assertEquals(stringLiteral("hello"), buildExprAst("\"hello\""));
       }
     
       @Test
       public void canBuildIntegerLiteral() {
    -    assertEquals(
    -        intLiteral(123),
    -        buildExprAst("123")
    -    );
    -    assertEquals(
    -        intLiteral(Integer.MAX_VALUE),
    -        buildExprAst(String.valueOf(Integer.MAX_VALUE))
    -    );
    -    assertEquals(
    -        intLiteral(Integer.MIN_VALUE),
    -        buildExprAst(String.valueOf(Integer.MIN_VALUE))
    -    );
    +    assertEquals(intLiteral(123), buildExprAst("123"));
    +    assertEquals(intLiteral(Integer.MAX_VALUE), buildExprAst(String.valueOf(Integer.MAX_VALUE)));
    +    assertEquals(intLiteral(Integer.MIN_VALUE), buildExprAst(String.valueOf(Integer.MIN_VALUE)));
       }
     
       @Test
       public void canBuildLongLiteral() {
    +    assertEquals(longLiteral(1234567890123L), buildExprAst("1234567890123"));
         assertEquals(
    -        longLiteral(1234567890123L),
    -        buildExprAst("1234567890123")
    -    );
    -    assertEquals(
    -        longLiteral(Integer.MAX_VALUE + 1L),
    -        buildExprAst(String.valueOf(Integer.MAX_VALUE + 1L))
    -    );
    +        longLiteral(Integer.MAX_VALUE + 1L), buildExprAst(String.valueOf(Integer.MAX_VALUE + 1L)));
         assertEquals(
    -        longLiteral(Integer.MIN_VALUE - 1L),
    -        buildExprAst(String.valueOf(Integer.MIN_VALUE - 1L))
    -    );
    +        longLiteral(Integer.MIN_VALUE - 1L), buildExprAst(String.valueOf(Integer.MIN_VALUE - 1L)));
       }
     
       @Test
       public void canBuildNegativeRealLiteral() {
    -    assertEquals(
    -        doubleLiteral(-4.567),
    -        buildExprAst("-4.567")
    -    );
    +    assertEquals(doubleLiteral(-4.567), buildExprAst("-4.567"));
       }
     
       @Test
       public void canBuildBooleanLiteral() {
    -    assertEquals(
    -        booleanLiteral(true),
    -        buildExprAst("true")
    -    );
    +    assertEquals(booleanLiteral(true), buildExprAst("true"));
       }
     
       @Test
       public void canBuildDateLiteral() {
    -    assertEquals(
    -        dateLiteral("2020-07-07"),
    -        buildExprAst("DATE '2020-07-07'")
    -    );
    +    assertEquals(dateLiteral("2020-07-07"), buildExprAst("DATE '2020-07-07'"));
       }
     
       @Test
       public void canBuildTimeLiteral() {
    -    assertEquals(
    -        timeLiteral("11:30:45"),
    -        buildExprAst("TIME '11:30:45'")
    -    );
    +    assertEquals(timeLiteral("11:30:45"), buildExprAst("TIME '11:30:45'"));
       }
     
       @Test
       public void canBuildTimestampLiteral() {
         assertEquals(
    -        timestampLiteral("2020-07-07 11:30:45"),
    -        buildExprAst("TIMESTAMP '2020-07-07 11:30:45'")
    -    );
    +        timestampLiteral("2020-07-07 11:30:45"), buildExprAst("TIMESTAMP '2020-07-07 11:30:45'"));
       }
     
       @Test
       public void canBuildIntervalLiteral() {
    -    assertEquals(
    -        intervalLiteral(1, DataType.INTEGER, "day"),
    -        buildExprAst("interval 1 day")
    -    );
    +    assertEquals(intervalLiteral(1, DataType.INTEGER, "day"), buildExprAst("interval 1 day"));
       }
     
       @Test
       public void canBuildArithmeticExpression() {
    +    assertEquals(function("+", intLiteral(1), intLiteral(2)), buildExprAst("1 + 2"));
    +  }
    +
    +  @Test
    +  public void canBuildArithmeticExpressionPrecedence() {
         assertEquals(
    -        function("+", intLiteral(1), intLiteral(2)),
    -        buildExprAst("1 + 2")
    -    );
    +        function("+", intLiteral(1), function("*", intLiteral(2), intLiteral(3))),
    +        buildExprAst("1 + 2 * 3"));
       }
     
       @Test
       public void canBuildFunctionWithoutArguments() {
    -    assertEquals(
    -        function("PI"),
    -        buildExprAst("PI()")
    -    );
    +    assertEquals(function("PI"), buildExprAst("PI()"));
       }
     
       @Test
       public void canBuildExpressionWithParentheses() {
         assertEquals(
    -        function("*",
    +        function(
    +            "*",
                 function("+", doubleLiteral(-1.0), doubleLiteral(2.3)),
    -            function("-", intLiteral(3), intLiteral(1))
    -        ),
    -        buildExprAst("(-1.0 + 2.3) * (3 - 1)")
    -    );
    +            function("-", intLiteral(3), intLiteral(1))),
    +        buildExprAst("(-1.0 + 2.3) * (3 - 1)"));
       }
     
       @Test
       public void canBuildFunctionCall() {
    +    assertEquals(function("abs", intLiteral(-1)), buildExprAst("abs(-1)"));
    +  }
    +
    +  @Test
    +  public void canBuildExtractFunctionCall() {
    +    assertEquals(
    +        function("extract", stringLiteral("DAY"), dateLiteral("2023-02-09")).toString(),
    +        buildExprAst("extract(DAY FROM \"2023-02-09\")").toString());
    +  }
    +
    +  @Test
    +  public void canBuildGetFormatFunctionCall() {
         assertEquals(
    -        function("abs", intLiteral(-1)),
    -        buildExprAst("abs(-1)")
    -    );
    +        function("get_format", stringLiteral("DATE"), stringLiteral("USA")),
    +        buildExprAst("get_format(DATE,\"USA\")"));
       }
     
       @Test
       public void canBuildNestedFunctionCall() {
         assertEquals(
    -        function("abs",
    -            function("*",
    -              function("abs", intLiteral(-5)),
    -              intLiteral(-1)
    -            )
    -        ),
    -        buildExprAst("abs(abs(-5) * -1)")
    -    );
    +        function("abs", function("*", function("abs", intLiteral(-5)), intLiteral(-1))),
    +        buildExprAst("abs(abs(-5) * -1)"));
       }
     
       @Test
       public void canBuildDateAndTimeFunctionCall() {
         assertEquals(
             function("dayofmonth", dateLiteral("2020-07-07")),
    -        buildExprAst("dayofmonth(DATE '2020-07-07')")
    -    );
    +        buildExprAst("dayofmonth(DATE '2020-07-07')"));
       }
     
       @Test
    -  public void canBuildComparisonExpression() {
    +  public void canBuildTimestampAddFunctionCall() {
         assertEquals(
    -        function("!=", intLiteral(1), intLiteral(2)),
    -        buildExprAst("1 != 2")
    -    );
    +        function("timestampadd", stringLiteral("WEEK"), intLiteral(1), dateLiteral("2023-03-14")),
    +        buildExprAst("timestampadd(WEEK, 1, DATE '2023-03-14')"));
    +  }
     
    +  @Test
    +  public void canBuildTimstampDiffFunctionCall() {
         assertEquals(
    -        function("!=", intLiteral(1), intLiteral(2)),
    -        buildExprAst("1 <> 2")
    -    );
    +        function(
    +            "timestampdiff",
    +            stringLiteral("WEEK"),
    +            timestampLiteral("2023-03-15 00:00:01"),
    +            dateLiteral("2023-03-14")),
    +        buildExprAst("timestampdiff(WEEK, TIMESTAMP '2023-03-15 00:00:01', DATE '2023-03-14')"));
    +  }
    +
    +  @Test
    +  public void canBuildComparisonExpression() {
    +    assertEquals(function("!=", intLiteral(1), intLiteral(2)), buildExprAst("1 != 2"));
    +
    +    assertEquals(function("!=", intLiteral(1), intLiteral(2)), buildExprAst("1 <> 2"));
       }
     
       @Test
       public void canBuildNullTestExpression() {
    -    assertEquals(
    -        function("is null", intLiteral(1)),
    -        buildExprAst("1 is NULL")
    -    );
    +    assertEquals(function("is null", intLiteral(1)), buildExprAst("1 is NULL"));
     
    -    assertEquals(
    -        function("is not null", intLiteral(1)),
    -        buildExprAst("1 IS NOT null")
    -    );
    +    assertEquals(function("is not null", intLiteral(1)), buildExprAst("1 IS NOT null"));
       }
     
       @Test
       public void canBuildNullTestExpressionWithNULLLiteral() {
    -    assertEquals(
    -        function("is null", nullLiteral()),
    -        buildExprAst("NULL is NULL")
    -    );
    +    assertEquals(function("is null", nullLiteral()), buildExprAst("NULL is NULL"));
     
    -    assertEquals(
    -        function("is not null", nullLiteral()),
    -        buildExprAst("NULL IS NOT null")
    -    );
    +    assertEquals(function("is not null", nullLiteral()), buildExprAst("NULL IS NOT null"));
       }
     
       @Test
       public void canBuildLikeExpression() {
         assertEquals(
             function("like", stringLiteral("str"), stringLiteral("st%")),
    -        buildExprAst("'str' like 'st%'")
    -    );
    +        buildExprAst("'str' like 'st%'"));
     
         assertEquals(
             function("not like", stringLiteral("str"), stringLiteral("st%")),
    -        buildExprAst("'str' not like 'st%'")
    -    );
    +        buildExprAst("'str' not like 'st%'"));
       }
     
       @Test
       public void canBuildRegexpExpression() {
         assertEquals(
             function("regexp", stringLiteral("str"), stringLiteral(".*")),
    -        buildExprAst("'str' regexp '.*'")
    -    );
    +        buildExprAst("'str' regexp '.*'"));
       }
     
       @Test
    -  public void canBuildLogicalExpression() {
    +  public void canBuildBetweenExpression() {
         assertEquals(
    -        and(booleanLiteral(true), booleanLiteral(false)),
    -        buildExprAst("true AND false")
    -    );
    +        between(qualifiedName("age"), intLiteral(10), intLiteral(30)),
    +        buildExprAst("age BETWEEN 10 AND 30"));
    +  }
     
    +  @Test
    +  public void canBuildNotBetweenExpression() {
         assertEquals(
    -        or(booleanLiteral(true), booleanLiteral(false)),
    -        buildExprAst("true OR false")
    -    );
    +        not(between(qualifiedName("age"), intLiteral(10), intLiteral(30))),
    +        buildExprAst("age NOT BETWEEN 10 AND 30"));
    +  }
     
    -    assertEquals(
    -        not(booleanLiteral(false)),
    -        buildExprAst("NOT false")
    -    );
    +  @Test
    +  public void canBuildLogicalExpression() {
    +    assertEquals(and(booleanLiteral(true), booleanLiteral(false)), buildExprAst("true AND false"));
    +
    +    assertEquals(or(booleanLiteral(true), booleanLiteral(false)), buildExprAst("true OR false"));
    +
    +    assertEquals(not(booleanLiteral(false)), buildExprAst("NOT false"));
       }
     
       @Test
    @@ -305,8 +273,8 @@ public void canBuildWindowFunctionWithNullOrderSpecified() {
             window(
                 function("DENSE_RANK"),
                 ImmutableList.of(),
    -            ImmutableList.of(ImmutablePair.of(
    -                new SortOption(ASC, NULL_LAST), qualifiedName("age")))),
    +            ImmutableList.of(
    +                ImmutablePair.of(new SortOption(ASC, NULL_LAST), qualifiedName("age")))),
             buildExprAst("DENSE_RANK() OVER (ORDER BY age ASC NULLS LAST)"));
       }
     
    @@ -314,35 +282,27 @@ public void canBuildWindowFunctionWithNullOrderSpecified() {
       public void canBuildStringLiteralHighlightFunction() {
         HashMap args = new HashMap<>();
         assertEquals(
    -        highlight(AstDSL.stringLiteral("fieldA"), args),
    -        buildExprAst("highlight(\"fieldA\")")
    -    );
    +        highlight(AstDSL.stringLiteral("fieldA"), args), buildExprAst("highlight(\"fieldA\")"));
       }
     
       @Test
       public void canBuildQualifiedNameHighlightFunction() {
         HashMap args = new HashMap<>();
         assertEquals(
    -        highlight(AstDSL.qualifiedName("fieldA"), args),
    -        buildExprAst("highlight(fieldA)")
    -    );
    +        highlight(AstDSL.qualifiedName("fieldA"), args), buildExprAst("highlight(fieldA)"));
       }
     
       @Test
       public void canBuildStringLiteralPositionFunction() {
         assertEquals(
    -            function("position", stringLiteral("substr"), stringLiteral("str")),
    -            buildExprAst("position(\"substr\" IN \"str\")")
    -    );
    +        function("position", stringLiteral("substr"), stringLiteral("str")),
    +        buildExprAst("position(\"substr\" IN \"str\")"));
       }
     
       @Test
       public void canBuildWindowFunctionWithoutOrderBy() {
         assertEquals(
    -        window(
    -            function("RANK"),
    -            ImmutableList.of(qualifiedName("state")),
    -            ImmutableList.of()),
    +        window(function("RANK"), ImmutableList.of(qualifiedName("state")), ImmutableList.of()),
             buildExprAst("RANK() OVER (PARTITION BY state)"));
       }
     
    @@ -352,8 +312,7 @@ public void canBuildAggregateWindowFunction() {
             window(
                 aggregate("AVG", qualifiedName("age")),
                 ImmutableList.of(qualifiedName("state")),
    -            ImmutableList.of(ImmutablePair.of(
    -                new SortOption(null, null), qualifiedName("age")))),
    +            ImmutableList.of(ImmutablePair.of(new SortOption(null, null), qualifiedName("age")))),
             buildExprAst("AVG(age) OVER (PARTITION BY state ORDER BY age)"));
       }
     
    @@ -362,11 +321,8 @@ public void canBuildCaseConditionStatement() {
         assertEquals(
             caseWhen(
                 null, // no else statement
    -            when(
    -                function(">", qualifiedName("age"), intLiteral(30)),
    -                stringLiteral("age1"))),
    -        buildExprAst("CASE WHEN age > 30 THEN 'age1' END")
    -    );
    +            when(function(">", qualifiedName("age"), intLiteral(30)), stringLiteral("age1"))),
    +        buildExprAst("CASE WHEN age > 30 THEN 'age1' END"));
       }
     
       @Test
    @@ -376,149 +332,168 @@ public void canBuildCaseValueStatement() {
                 qualifiedName("age"),
                 stringLiteral("age2"),
                 when(intLiteral(30), stringLiteral("age1"))),
    -        buildExprAst("CASE age WHEN 30 THEN 'age1' ELSE 'age2' END")
    -    );
    +        buildExprAst("CASE age WHEN 30 THEN 'age1' ELSE 'age2' END"));
       }
     
       @Test
       public void canBuildKeywordsAsIdentifiers() {
    -    assertEquals(
    -        qualifiedName("timestamp"),
    -        buildExprAst("timestamp")
    -    );
    +    assertEquals(qualifiedName("timestamp"), buildExprAst("timestamp"));
       }
     
       @Test
       public void canBuildKeywordsAsIdentInQualifiedName() {
    -    assertEquals(
    -        qualifiedName("test", "timestamp"),
    -        buildExprAst("test.timestamp")
    -    );
    +    assertEquals(qualifiedName("test", "timestamp"), buildExprAst("test.timestamp"));
    +  }
    +
    +  @Test
    +  public void canBuildMetaDataFieldAsQualifiedName() {
    +    Stream.of("_id", "_index", "_sort", "_score", "_maxscore")
    +        .forEach(field -> assertEquals(qualifiedName(field), buildExprAst(field)));
    +  }
    +
    +  @Test
    +  public void canBuildNonMetaDataFieldAsQualifiedName() {
    +    Stream.of("id", "__id", "_routing", "___field")
    +        .forEach(field -> assertEquals(qualifiedName(field), buildExprAst(field)));
       }
     
       @Test
       public void canCastFieldAsString() {
         assertEquals(
             AstDSL.cast(qualifiedName("state"), stringLiteral("string")),
    -        buildExprAst("cast(state as string)")
    -    );
    +        buildExprAst("cast(state as string)"));
       }
     
       @Test
       public void canCastValueAsString() {
         assertEquals(
    -        AstDSL.cast(intLiteral(1), stringLiteral("string")),
    -        buildExprAst("cast(1 as string)")
    -    );
    +        AstDSL.cast(intLiteral(1), stringLiteral("string")), buildExprAst("cast(1 as string)"));
       }
     
       @Test
       public void filteredAggregation() {
         assertEquals(
    -        AstDSL.filteredAggregate("avg", qualifiedName("age"),
    -            function(">", qualifiedName("age"), intLiteral(20))),
    -        buildExprAst("avg(age) filter(where age > 20)")
    -    );
    +        AstDSL.filteredAggregate(
    +            "avg", qualifiedName("age"), function(">", qualifiedName("age"), intLiteral(20))),
    +        buildExprAst("avg(age) filter(where age > 20)"));
       }
     
       @Test
       public void canBuildVarSamp() {
    -    assertEquals(
    -        aggregate("var_samp", qualifiedName("age")),
    -        buildExprAst("var_samp(age)"));
    +    assertEquals(aggregate("var_samp", qualifiedName("age")), buildExprAst("var_samp(age)"));
       }
     
       @Test
       public void canBuildVarPop() {
    -    assertEquals(
    -        aggregate("var_pop", qualifiedName("age")),
    -        buildExprAst("var_pop(age)"));
    +    assertEquals(aggregate("var_pop", qualifiedName("age")), buildExprAst("var_pop(age)"));
       }
     
       @Test
       public void canBuildVariance() {
    -    assertEquals(
    -        aggregate("variance", qualifiedName("age")),
    -        buildExprAst("variance(age)"));
    +    assertEquals(aggregate("variance", qualifiedName("age")), buildExprAst("variance(age)"));
       }
     
       @Test
       public void distinctCount() {
         assertEquals(
             AstDSL.distinctAggregate("count", qualifiedName("name")),
    -        buildExprAst("count(distinct name)")
    -    );
    +        buildExprAst("count(distinct name)"));
       }
     
       @Test
       public void filteredDistinctCount() {
         assertEquals(
    -        AstDSL.filteredDistinctCount("count", qualifiedName("name"), function(
    -            ">", qualifiedName("age"), intLiteral(30))),
    -        buildExprAst("count(distinct name) filter(where age > 30)")
    -    );
    +        AstDSL.filteredDistinctCount(
    +            "count", qualifiedName("name"), function(">", qualifiedName("age"), intLiteral(30))),
    +        buildExprAst("count(distinct name) filter(where age > 30)"));
    +  }
    +
    +  @Test
    +  public void canBuildPercentile() {
    +    Object expected =
    +        aggregate("percentile", qualifiedName("age"), unresolvedArg("percent", doubleLiteral(50D)));
    +    assertEquals(expected, buildExprAst("percentile(age, 50)"));
    +    assertEquals(expected, buildExprAst("percentile(age, 50.0)"));
    +  }
    +
    +  @Test
    +  public void canBuildPercentileWithCompression() {
    +    Object expected =
    +        aggregate(
    +            "percentile",
    +            qualifiedName("age"),
    +            unresolvedArg("percent", doubleLiteral(50D)),
    +            unresolvedArg("compression", doubleLiteral(100D)));
    +    assertEquals(expected, buildExprAst("percentile(age, 50, 100)"));
    +    assertEquals(expected, buildExprAst("percentile(age, 50.0, 100.0)"));
       }
     
       @Test
       public void matchPhraseQueryAllParameters() {
         assertEquals(
    -        AstDSL.function("matchphrasequery",
    -            unresolvedArg("field", stringLiteral("test")),
    +        AstDSL.function(
    +            "matchphrasequery",
    +            unresolvedArg("field", qualifiedName("test")),
                 unresolvedArg("query", stringLiteral("search query")),
                 unresolvedArg("slop", stringLiteral("3")),
                 unresolvedArg("analyzer", stringLiteral("standard")),
    -            unresolvedArg("zero_terms_query", stringLiteral("NONE"))
    -        ),
    -        buildExprAst("matchphrasequery(test, 'search query', slop = 3"
    -            + ", analyzer = 'standard', zero_terms_query='NONE'"
    -            + ")")
    -    );
    +            unresolvedArg("zero_terms_query", stringLiteral("NONE"))),
    +        buildExprAst(
    +            "matchphrasequery(test, 'search query', slop = 3"
    +                + ", analyzer = 'standard', zero_terms_query='NONE'"
    +                + ")"));
       }
     
       @Test
       public void matchPhrasePrefixAllParameters() {
         assertEquals(
    -        AstDSL.function("match_phrase_prefix",
    -          unresolvedArg("field", stringLiteral("test")),
    -          unresolvedArg("query", stringLiteral("search query")),
    -          unresolvedArg("slop", stringLiteral("3")),
    -          unresolvedArg("boost", stringLiteral("1.5")),
    -          unresolvedArg("analyzer", stringLiteral("standard")),
    -          unresolvedArg("max_expansions", stringLiteral("4")),
    -          unresolvedArg("zero_terms_query", stringLiteral("NONE"))
    -          ),
    -        buildExprAst("match_phrase_prefix(test, 'search query', slop = 3, boost = 1.5"
    -            + ", analyzer = 'standard', max_expansions = 4, zero_terms_query='NONE'"
    -            + ")")
    -    );
    +        AstDSL.function(
    +            "match_phrase_prefix",
    +            unresolvedArg("field", qualifiedName("test")),
    +            unresolvedArg("query", stringLiteral("search query")),
    +            unresolvedArg("slop", stringLiteral("3")),
    +            unresolvedArg("boost", stringLiteral("1.5")),
    +            unresolvedArg("analyzer", stringLiteral("standard")),
    +            unresolvedArg("max_expansions", stringLiteral("4")),
    +            unresolvedArg("zero_terms_query", stringLiteral("NONE"))),
    +        buildExprAst(
    +            "match_phrase_prefix(test, 'search query', slop = 3, boost = 1.5"
    +                + ", analyzer = 'standard', max_expansions = 4, zero_terms_query='NONE'"
    +                + ")"));
       }
     
       @Test
       public void relevanceMatch() {
    -    assertEquals(AstDSL.function("match",
    -        unresolvedArg("field", stringLiteral("message")),
    -        unresolvedArg("query", stringLiteral("search query"))),
    -        buildExprAst("match('message', 'search query')")
    -    );
    -
    -    assertEquals(AstDSL.function("match",
    -        unresolvedArg("field", stringLiteral("message")),
    -        unresolvedArg("query", stringLiteral("search query")),
    -        unresolvedArg("analyzer", stringLiteral("keyword")),
    -        unresolvedArg("operator", stringLiteral("AND"))),
    +    assertEquals(
    +        AstDSL.function(
    +            "match",
    +            unresolvedArg("field", qualifiedName("message")),
    +            unresolvedArg("query", stringLiteral("search query"))),
    +        buildExprAst("match('message', 'search query')"));
    +
    +    assertEquals(
    +        AstDSL.function(
    +            "match",
    +            unresolvedArg("field", qualifiedName("message")),
    +            unresolvedArg("query", stringLiteral("search query")),
    +            unresolvedArg("analyzer", stringLiteral("keyword")),
    +            unresolvedArg("operator", stringLiteral("AND"))),
             buildExprAst("match('message', 'search query', analyzer='keyword', operator='AND')"));
       }
     
       @Test
       public void relevanceMatchQuery() {
    -    assertEquals(AstDSL.function("matchquery",
    -            unresolvedArg("field", stringLiteral("message")),
    +    assertEquals(
    +        AstDSL.function(
    +            "matchquery",
    +            unresolvedArg("field", qualifiedName("message")),
                 unresolvedArg("query", stringLiteral("search query"))),
    -        buildExprAst("matchquery('message', 'search query')")
    -    );
    +        buildExprAst("matchquery('message', 'search query')"));
     
    -    assertEquals(AstDSL.function("matchquery",
    -            unresolvedArg("field", stringLiteral("message")),
    +    assertEquals(
    +        AstDSL.function(
    +            "matchquery",
    +            unresolvedArg("field", qualifiedName("message")),
                 unresolvedArg("query", stringLiteral("search query")),
                 unresolvedArg("analyzer", stringLiteral("keyword")),
                 unresolvedArg("operator", stringLiteral("AND"))),
    @@ -527,143 +502,298 @@ public void relevanceMatchQuery() {
     
       @Test
       public void relevanceMatch_Query() {
    -    assertEquals(AstDSL.function("match_query",
    -            unresolvedArg("field", stringLiteral("message")),
    +    assertEquals(
    +        AstDSL.function(
    +            "match_query",
    +            unresolvedArg("field", qualifiedName("message")),
                 unresolvedArg("query", stringLiteral("search query"))),
    -        buildExprAst("match_query('message', 'search query')")
    -    );
    +        buildExprAst("match_query('message', 'search query')"));
     
    -    assertEquals(AstDSL.function("match_query",
    -            unresolvedArg("field", stringLiteral("message")),
    +    assertEquals(
    +        AstDSL.function(
    +            "match_query",
    +            unresolvedArg("field", qualifiedName("message")),
                 unresolvedArg("query", stringLiteral("search query")),
                 unresolvedArg("analyzer", stringLiteral("keyword")),
                 unresolvedArg("operator", stringLiteral("AND"))),
             buildExprAst("match_query('message', 'search query', analyzer='keyword', operator='AND')"));
       }
     
    +  @Test
    +  public void relevanceMatchQueryAltSyntax() {
    +    assertEquals(
    +        AstDSL.function(
    +                "match_query",
    +                unresolvedArg("field", stringLiteral("message")),
    +                unresolvedArg("query", stringLiteral("search query")))
    +            .toString(),
    +        buildExprAst("message = match_query('search query')").toString());
    +
    +    assertEquals(
    +        AstDSL.function(
    +                "match_query",
    +                unresolvedArg("field", stringLiteral("message")),
    +                unresolvedArg("query", stringLiteral("search query")))
    +            .toString(),
    +        buildExprAst("message = match_query(\"search query\")").toString());
    +
    +    assertEquals(
    +        AstDSL.function(
    +                "matchquery",
    +                unresolvedArg("field", stringLiteral("message")),
    +                unresolvedArg("query", stringLiteral("search query")))
    +            .toString(),
    +        buildExprAst("message = matchquery('search query')").toString());
    +
    +    assertEquals(
    +        AstDSL.function(
    +                "matchquery",
    +                unresolvedArg("field", stringLiteral("message")),
    +                unresolvedArg("query", stringLiteral("search query")))
    +            .toString(),
    +        buildExprAst("message = matchquery(\"search query\")").toString());
    +  }
    +
    +  @Test
    +  public void relevanceMatchPhraseAltSyntax() {
    +    assertEquals(
    +        AstDSL.function(
    +                "match_phrase",
    +                unresolvedArg("field", stringLiteral("message")),
    +                unresolvedArg("query", stringLiteral("search query")))
    +            .toString(),
    +        buildExprAst("message = match_phrase('search query')").toString());
    +
    +    assertEquals(
    +        AstDSL.function(
    +                "match_phrase",
    +                unresolvedArg("field", stringLiteral("message")),
    +                unresolvedArg("query", stringLiteral("search query")))
    +            .toString(),
    +        buildExprAst("message = match_phrase(\"search query\")").toString());
    +
    +    assertEquals(
    +        AstDSL.function(
    +                "matchphrase",
    +                unresolvedArg("field", stringLiteral("message")),
    +                unresolvedArg("query", stringLiteral("search query")))
    +            .toString(),
    +        buildExprAst("message = matchphrase('search query')").toString());
    +
    +    assertEquals(
    +        AstDSL.function(
    +                "matchphrase",
    +                unresolvedArg("field", stringLiteral("message")),
    +                unresolvedArg("query", stringLiteral("search query")))
    +            .toString(),
    +        buildExprAst("message = matchphrase(\"search query\")").toString());
    +  }
    +
    +  @Test
    +  public void relevanceMultiMatchAltSyntax() {
    +    assertEquals(
    +        AstDSL.function(
    +            "multi_match",
    +            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of("field1", 1.F))),
    +            unresolvedArg("query", stringLiteral("search query"))),
    +        buildExprAst("field1 = multi_match('search query')"));
    +
    +    assertEquals(
    +        AstDSL.function(
    +            "multi_match",
    +            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of("field1", 1.F))),
    +            unresolvedArg("query", stringLiteral("search query"))),
    +        buildExprAst("field1 = multi_match(\"search query\")"));
    +
    +    assertEquals(
    +        AstDSL.function(
    +            "multimatch",
    +            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of("field1", 1.F))),
    +            unresolvedArg("query", stringLiteral("search query"))),
    +        buildExprAst("field1 = multimatch('search query')"));
    +
    +    assertEquals(
    +        AstDSL.function(
    +            "multimatch",
    +            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of("field1", 1.F))),
    +            unresolvedArg("query", stringLiteral("search query"))),
    +        buildExprAst("field1 = multimatch(\"search query\")"));
    +  }
    +
       @Test
       public void relevanceMulti_match() {
    -    assertEquals(AstDSL.function("multi_match",
    -            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of(
    -                "field2", 3.2F, "field1", 1.F))),
    +    assertEquals(
    +        AstDSL.function(
    +            "multi_match",
    +            unresolvedArg(
    +                "fields", new RelevanceFieldList(ImmutableMap.of("field2", 3.2F, "field1", 1.F))),
                 unresolvedArg("query", stringLiteral("search query"))),
    -        buildExprAst("multi_match(['field1', 'field2' ^ 3.2], 'search query')")
    -    );
    +        buildExprAst("multi_match(['field1', 'field2' ^ 3.2], 'search query')"));
     
    -    assertEquals(AstDSL.function("multi_match",
    -            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of(
    -                "field2", 3.2F, "field1", 1.F))),
    +    assertEquals(
    +        AstDSL.function(
    +            "multi_match",
    +            unresolvedArg(
    +                "fields", new RelevanceFieldList(ImmutableMap.of("field2", 3.2F, "field1", 1.F))),
                 unresolvedArg("query", stringLiteral("search query")),
                 unresolvedArg("analyzer", stringLiteral("keyword")),
                 unresolvedArg("operator", stringLiteral("AND"))),
    -        buildExprAst("multi_match(['field1', 'field2' ^ 3.2], 'search query',"
    -            + "analyzer='keyword', 'operator'='AND')"));
    +        buildExprAst(
    +            "multi_match(['field1', 'field2' ^ 3.2], 'search query',"
    +                + "analyzer='keyword', 'operator'='AND')"));
       }
     
       @Test
       public void relevanceMultimatch_alternate_parameter_syntax() {
    -    assertEquals(AstDSL.function("multimatch",
    -            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of(
    -                "field1", 1F, "field2", 2F))),
    +    assertEquals(
    +        AstDSL.function(
    +            "multimatch",
    +            unresolvedArg(
    +                "fields", new RelevanceFieldList(ImmutableMap.of("field1", 1F, "field2", 2F))),
                 unresolvedArg("query", stringLiteral("search query"))),
    -        buildExprAst("multimatch(query='search query', fields=['field1^1.0,field2^2.0'])")
    -    );
    +        buildExprAst("multimatch(query='search query', fields=['field1^1.0,field2^2.0'])"));
     
    -    assertEquals(AstDSL.function("multimatch",
    -            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of(
    -                "field1", 1F, "field2", 2F))),
    +    assertEquals(
    +        AstDSL.function(
    +            "multimatch",
    +            unresolvedArg(
    +                "fields", new RelevanceFieldList(ImmutableMap.of("field1", 1F, "field2", 2F))),
                 unresolvedArg("query", stringLiteral("search query")),
                 unresolvedArg("analyzer", stringLiteral("keyword")),
                 unresolvedArg("operator", stringLiteral("AND"))),
    -        buildExprAst("multimatch(query='search query', fields=['field1^1.0,field2^2.0'],"
    -            + "analyzer='keyword', operator='AND')"));
    +        buildExprAst(
    +            "multimatch(query='search query', fields=['field1^1.0,field2^2.0'],"
    +                + "analyzer='keyword', operator='AND')"));
       }
     
       @Test
       public void relevanceMultimatchquery_alternate_parameter_syntax() {
    -    assertEquals(AstDSL.function("multimatchquery",
    -            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of(
    -                "field", 1F))),
    +    assertEquals(
    +        AstDSL.function(
    +            "multimatchquery",
    +            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of("field", 1F))),
                 unresolvedArg("query", stringLiteral("search query"))),
    -        buildExprAst("multimatchquery(query='search query', fields='field')")
    -    );
    +        buildExprAst("multimatchquery(query='search query', fields='field')"));
     
    -    assertEquals(AstDSL.function("multimatchquery",
    -            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of(
    -                "field", 1F))),
    +    assertEquals(
    +        AstDSL.function(
    +            "multimatchquery",
    +            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of("field", 1F))),
                 unresolvedArg("query", stringLiteral("search query")),
                 unresolvedArg("analyzer", stringLiteral("keyword")),
                 unresolvedArg("operator", stringLiteral("AND"))),
    -        buildExprAst("multimatchquery(query='search query', fields='field',"
    -            + "analyzer='keyword', 'operator'='AND')"));
    +        buildExprAst(
    +            "multimatchquery(query='search query', fields='field',"
    +                + "analyzer='keyword', 'operator'='AND')"));
       }
     
       @Test
       public void relevanceSimple_query_string() {
    -    assertEquals(AstDSL.function("simple_query_string",
    -            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of(
    -                "field2", 3.2F, "field1", 1.F))),
    +    assertEquals(
    +        AstDSL.function(
    +            "simple_query_string",
    +            unresolvedArg(
    +                "fields", new RelevanceFieldList(ImmutableMap.of("field2", 3.2F, "field1", 1.F))),
                 unresolvedArg("query", stringLiteral("search query"))),
    -        buildExprAst("simple_query_string(['field1', 'field2' ^ 3.2], 'search query')")
    -    );
    +        buildExprAst("simple_query_string(['field1', 'field2' ^ 3.2], 'search query')"));
     
    -    assertEquals(AstDSL.function("simple_query_string",
    -            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of(
    -                "field2", 3.2F, "field1", 1.F))),
    +    assertEquals(
    +        AstDSL.function(
    +            "simple_query_string",
    +            unresolvedArg(
    +                "fields", new RelevanceFieldList(ImmutableMap.of("field2", 3.2F, "field1", 1.F))),
                 unresolvedArg("query", stringLiteral("search query")),
                 unresolvedArg("analyzer", stringLiteral("keyword")),
                 unresolvedArg("operator", stringLiteral("AND"))),
    -        buildExprAst("simple_query_string(['field1', 'field2' ^ 3.2], 'search query',"
    -            + "analyzer='keyword', operator='AND')"));
    +        buildExprAst(
    +            "simple_query_string(['field1', 'field2' ^ 3.2], 'search query',"
    +                + "analyzer='keyword', operator='AND')"));
       }
     
       @Test
       public void relevanceQuery_string() {
    -    assertEquals(AstDSL.function("query_string",
    -            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of(
    -                "field2", 3.2F, "field1", 1.F))),
    +    assertEquals(
    +        AstDSL.function(
    +            "query_string",
    +            unresolvedArg(
    +                "fields", new RelevanceFieldList(ImmutableMap.of("field2", 3.2F, "field1", 1.F))),
                 unresolvedArg("query", stringLiteral("search query"))),
    -        buildExprAst("query_string(['field1', 'field2' ^ 3.2], 'search query')")
    -    );
    +        buildExprAst("query_string(['field1', 'field2' ^ 3.2], 'search query')"));
     
    -    assertEquals(AstDSL.function("query_string",
    -            unresolvedArg("fields", new RelevanceFieldList(ImmutableMap.of(
    -                "field2", 3.2F, "field1", 1.F))),
    +    assertEquals(
    +        AstDSL.function(
    +            "query_string",
    +            unresolvedArg(
    +                "fields", new RelevanceFieldList(ImmutableMap.of("field2", 3.2F, "field1", 1.F))),
                 unresolvedArg("query", stringLiteral("search query")),
                 unresolvedArg("analyzer", stringLiteral("keyword")),
                 unresolvedArg("time_zone", stringLiteral("Canada/Pacific")),
                 unresolvedArg("tie_breaker", stringLiteral("1.3"))),
    -        buildExprAst("query_string(['field1', 'field2' ^ 3.2], 'search query',"
    -            + "analyzer='keyword', time_zone='Canada/Pacific', tie_breaker='1.3')"));
    +        buildExprAst(
    +            "query_string(['field1', 'field2' ^ 3.2], 'search query',"
    +                + "analyzer='keyword', time_zone='Canada/Pacific', tie_breaker='1.3')"));
       }
     
       @Test
       public void relevanceWildcard_query() {
    -    assertEquals(AstDSL.function("wildcard_query",
    -            unresolvedArg("field", stringLiteral("field")),
    +    assertEquals(
    +        AstDSL.function(
    +            "wildcard_query",
    +            unresolvedArg("field", qualifiedName("field")),
                 unresolvedArg("query", stringLiteral("search query*")),
                 unresolvedArg("boost", stringLiteral("1.5")),
                 unresolvedArg("case_insensitive", stringLiteral("true")),
                 unresolvedArg("rewrite", stringLiteral("scoring_boolean"))),
    -        buildExprAst("wildcard_query(field, 'search query*', boost=1.5,"
    -            + "case_insensitive=true, rewrite='scoring_boolean'))")
    -    );
    +        buildExprAst(
    +            "wildcard_query(field, 'search query*', boost=1.5,"
    +                + "case_insensitive=true, rewrite='scoring_boolean'))"));
    +  }
    +
    +  @Test
    +  public void relevanceScore_query() {
    +    assertEquals(
    +        AstDSL.score(
    +            AstDSL.function(
    +                "query_string",
    +                unresolvedArg(
    +                    "fields",
    +                    new RelevanceFieldList(ImmutableMap.of("field1", 1.F, "field2", 3.2F))),
    +                unresolvedArg("query", stringLiteral("search query"))),
    +            AstDSL.doubleLiteral(1.0)),
    +        buildExprAst("score(query_string(['field1', 'field2' ^ 3.2], 'search query'))"));
    +  }
    +
    +  @Test
    +  public void relevanceScore_withBoost_query() {
    +    assertEquals(
    +        AstDSL.score(
    +            AstDSL.function(
    +                "query_string",
    +                unresolvedArg(
    +                    "fields",
    +                    new RelevanceFieldList(ImmutableMap.of("field1", 1.F, "field2", 3.2F))),
    +                unresolvedArg("query", stringLiteral("search query"))),
    +            doubleLiteral(1.0)),
    +        buildExprAst("score(query_string(['field1', 'field2' ^ 3.2], 'search query'), 1.0)"));
       }
     
       @Test
       public void relevanceQuery() {
    -    assertEquals(AstDSL.function("query",
    -                    unresolvedArg("query", stringLiteral("field1:query OR field2:query"))),
    -            buildExprAst("query('field1:query OR field2:query')")
    -    );
    +    assertEquals(
    +        AstDSL.function(
    +            "query", unresolvedArg("query", stringLiteral("field1:query OR field2:query"))),
    +        buildExprAst("query('field1:query OR field2:query')"));
     
    -    assertEquals(AstDSL.function("query",
    -                    unresolvedArg("query", stringLiteral("search query")),
    -                    unresolvedArg("analyzer", stringLiteral("keyword")),
    -                    unresolvedArg("time_zone", stringLiteral("Canada/Pacific")),
    -                    unresolvedArg("tie_breaker", stringLiteral("1.3"))),
    -            buildExprAst("query('search query',"
    -                    + "analyzer='keyword', time_zone='Canada/Pacific', tie_breaker='1.3')"));
    +    assertEquals(
    +        AstDSL.function(
    +            "query",
    +            unresolvedArg("query", stringLiteral("search query")),
    +            unresolvedArg("analyzer", stringLiteral("keyword")),
    +            unresolvedArg("time_zone", stringLiteral("Canada/Pacific")),
    +            unresolvedArg("tie_breaker", stringLiteral("1.3"))),
    +        buildExprAst(
    +            "query('search query',"
    +                + "analyzer='keyword', time_zone='Canada/Pacific', tie_breaker='1.3')"));
       }
     
       @Test
    @@ -677,7 +807,8 @@ public void canBuildInClause() {
             buildExprAst("age not in (20, 30)"));
     
         assertEquals(
    -        AstDSL.in(qualifiedName("age"),
    +        AstDSL.in(
    +            qualifiedName("age"),
                 AstDSL.function("abs", AstDSL.intLiteral(20)),
                 AstDSL.function("abs", AstDSL.intLiteral(30))),
             buildExprAst("age in (abs(20), abs(30))"));
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/parser/AstHavingFilterBuilderTest.java b/sql/src/test/java/org/opensearch/sql/sql/parser/AstHavingFilterBuilderTest.java
    index 1cb1ab5f8b..b2e4c54160 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/parser/AstHavingFilterBuilderTest.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/parser/AstHavingFilterBuilderTest.java
    @@ -3,7 +3,6 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql.parser;
     
     import static org.junit.jupiter.api.Assertions.assertEquals;
    @@ -30,8 +29,7 @@
     @ExtendWith(MockitoExtension.class)
     class AstHavingFilterBuilderTest {
     
    -  @Mock
    -  private QuerySpecification querySpec;
    +  @Mock private QuerySpecification querySpec;
     
       private AstHavingFilterBuilder builder;
     
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/parser/AstNowLikeFunctionTest.java b/sql/src/test/java/org/opensearch/sql/sql/parser/AstNowLikeFunctionTest.java
    index 19b48ca0bd..639d73e419 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/parser/AstNowLikeFunctionTest.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/parser/AstNowLikeFunctionTest.java
    @@ -25,29 +25,29 @@
     class AstNowLikeFunctionTest extends AstBuilderTestBase {
     
       private static Stream allFunctions() {
    -    return Stream.of("curdate",
    -        "current_date",
    -        "current_time",
    -        "current_timestamp",
    -        "curtime",
    -        "localtimestamp",
    -        "localtime",
    -         "now",
    -         "sysdate")
    +    return Stream.of(
    +            "curdate",
    +            "current_date",
    +            "current_time",
    +            "current_timestamp",
    +            "curtime",
    +            "localtimestamp",
    +            "localtime",
    +            "now",
    +            "sysdate",
    +            "utc_date",
    +            "utc_time",
    +            "utc_timestamp")
             .map(Arguments::of);
       }
     
       private static Stream supportFsp() {
    -    return Stream.of("sysdate")
    -        .map(Arguments::of);
    +    return Stream.of("sysdate").map(Arguments::of);
       }
     
       private static Stream supportShortcut() {
    -    return Stream.of("current_date",
    -            "current_time",
    -            "current_timestamp",
    -            "localtimestamp",
    -            "localtime")
    +    return Stream.of(
    +            "current_date", "current_time", "current_timestamp", "localtimestamp", "localtime")
             .map(Arguments::of);
       }
     
    @@ -56,12 +56,7 @@ private static Stream supportShortcut() {
       void project_call(String name) {
         String call = name + "()";
         assertEquals(
    -        project(
    -            values(emptyList()),
    -            alias(call, function(name))
    -        ),
    -        buildAST("SELECT " + call)
    -    );
    +        project(values(emptyList()), alias(call, function(name))), buildAST("SELECT " + call));
       }
     
       @ParameterizedTest
    @@ -70,29 +65,16 @@ void filter_call(String name) {
         String call = name + "()";
         assertEquals(
             project(
    -            filter(
    -                relation("test"),
    -                function(
    -                    "=",
    -                    qualifiedName("data"),
    -                    function(name))
    -            ),
    -            AllFields.of()
    -        ),
    -        buildAST("SELECT * FROM test WHERE data = " + call)
    -    );
    +            filter(relation("test"), function("=", qualifiedName("data"), function(name))),
    +            AllFields.of()),
    +        buildAST("SELECT * FROM test WHERE data = " + call));
       }
     
    -
       @ParameterizedTest
       @MethodSource("supportFsp")
       void fsp(String name) {
         assertEquals(
    -        project(
    -            values(emptyList()),
    -            alias(name + "(0)", function(name, intLiteral(0)))
    -        ),
    -        buildAST("SELECT " + name + "(0)")
    -    );
    +        project(values(emptyList()), alias(name + "(0)", function(name, intLiteral(0)))),
    +        buildAST("SELECT " + name + "(0)"));
       }
     }
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/parser/AstQualifiedNameBuilderTest.java b/sql/src/test/java/org/opensearch/sql/sql/parser/AstQualifiedNameBuilderTest.java
    index 92b535144f..b0a7592990 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/parser/AstQualifiedNameBuilderTest.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/parser/AstQualifiedNameBuilderTest.java
    @@ -3,7 +3,6 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql.parser;
     
     import static org.junit.jupiter.api.Assertions.assertEquals;
    @@ -51,6 +50,10 @@ public void canBuildQualifiedIdentifier() {
         buildFromQualifiers("account.location.city").expectQualifiedName("account", "location", "city");
       }
     
    +  @Test
    +  public void commonKeywordCanBeUsedAsIdentifier() {
    +    buildFromIdentifier("type").expectQualifiedName("type");
    +  }
     
       @Test
       public void functionNameCanBeUsedAsIdentifier() {
    @@ -62,9 +65,10 @@ public void functionNameCanBeUsedAsIdentifier() {
     
       void assertFunctionNameCouldBeId(String antlrFunctionName) {
         List functionList =
    -        Arrays.stream(antlrFunctionName.split("\\|")).map(String::stripLeading)
    -            .map(String::stripTrailing).collect(
    -            Collectors.toList());
    +        Arrays.stream(antlrFunctionName.split("\\|"))
    +            .map(String::stripLeading)
    +            .map(String::stripTrailing)
    +            .collect(Collectors.toList());
     
         assertFalse(functionList.isEmpty());
         for (String functionName : functionList) {
    @@ -105,5 +109,4 @@ private OpenSearchSQLParser createParser(String expr) {
           return parser;
         }
       }
    -
     }
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/parser/AstSortBuilderTest.java b/sql/src/test/java/org/opensearch/sql/sql/parser/AstSortBuilderTest.java
    index 3c8d155e65..f72f1ba0ff 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/parser/AstSortBuilderTest.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/parser/AstSortBuilderTest.java
    @@ -3,7 +3,6 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql.parser;
     
     import static org.junit.jupiter.api.Assertions.assertEquals;
    @@ -40,14 +39,11 @@
     @ExtendWith(MockitoExtension.class)
     class AstSortBuilderTest {
     
    -  @Mock
    -  private QuerySpecification querySpec;
    +  @Mock private QuerySpecification querySpec;
     
    -  @Mock
    -  private OrderByClauseContext orderByClause;
    +  @Mock private OrderByClauseContext orderByClause;
     
    -  @Mock
    -  private UnresolvedPlan child;
    +  @Mock private UnresolvedPlan child;
     
       @Test
       void can_build_sort_node() {
    @@ -56,32 +52,35 @@ void can_build_sort_node() {
     
         ImmutableMap> expects =
             ImmutableMap.>builder()
    -            .put(new SortOption(null, null),
    -                ImmutableList.of(argument("asc", booleanLiteral(true))))
    -            .put(new SortOption(ASC, null),
    -                ImmutableList.of(argument("asc", booleanLiteral(true))))
    -            .put(new SortOption(DESC, null),
    +            .put(
    +                new SortOption(null, null), ImmutableList.of(argument("asc", booleanLiteral(true))))
    +            .put(new SortOption(ASC, null), ImmutableList.of(argument("asc", booleanLiteral(true))))
    +            .put(
    +                new SortOption(DESC, null),
                     ImmutableList.of(argument("asc", booleanLiteral(false))))
    -            .put(new SortOption(null, NULL_LAST),
    +            .put(
    +                new SortOption(null, NULL_LAST),
                     ImmutableList.of(
                         argument("asc", booleanLiteral(true)),
                         argument("nullFirst", booleanLiteral(false))))
    -            .put(new SortOption(DESC, NULL_FIRST),
    +            .put(
    +                new SortOption(DESC, NULL_FIRST),
                     ImmutableList.of(
                         argument("asc", booleanLiteral(false)),
                         argument("nullFirst", booleanLiteral(true))))
                 .build();
     
    -    expects.forEach((option, expect) -> {
    -      when(querySpec.getOrderByOptions()).thenReturn(ImmutableList.of(option));
    +    expects.forEach(
    +        (option, expect) -> {
    +          when(querySpec.getOrderByOptions()).thenReturn(ImmutableList.of(option));
     
    -      AstSortBuilder sortBuilder = new AstSortBuilder(querySpec);
    -      assertEquals(
    -          new Sort(
    -              child, // has to mock and attach child otherwise Guava ImmutableList NPE in getChild()
    -              ImmutableList.of(field("name", expect))),
    -          sortBuilder.visitOrderByClause(orderByClause).attach(child));
    -    });
    +          AstSortBuilder sortBuilder = new AstSortBuilder(querySpec);
    +          assertEquals(
    +              new Sort(
    +                  child, // has to mock and attach child otherwise Guava ImmutableList NPE in
    +                  // getChild()
    +                  ImmutableList.of(field("name", expect))),
    +              sortBuilder.visitOrderByClause(orderByClause).attach(child));
    +        });
       }
    -
     }
    diff --git a/sql/src/test/java/org/opensearch/sql/sql/parser/context/QuerySpecificationTest.java b/sql/src/test/java/org/opensearch/sql/sql/parser/context/QuerySpecificationTest.java
    index 2f75e89002..ed18b3e692 100644
    --- a/sql/src/test/java/org/opensearch/sql/sql/parser/context/QuerySpecificationTest.java
    +++ b/sql/src/test/java/org/opensearch/sql/sql/parser/context/QuerySpecificationTest.java
    @@ -3,7 +3,6 @@
      * SPDX-License-Identifier: Apache-2.0
      */
     
    -
     package org.opensearch.sql.sql.parser.context;
     
     import static org.junit.jupiter.api.Assertions.assertEquals;
    @@ -35,32 +34,27 @@ class QuerySpecificationTest {
     
       @Test
       void can_collect_group_by_items_in_group_by_clause() {
    -    QuerySpecification querySpec = collect(
    -        "SELECT name, ABS(age) FROM test GROUP BY name, ABS(age)");
    +    QuerySpecification querySpec =
    +        collect("SELECT name, ABS(age) FROM test GROUP BY name, ABS(age)");
     
         assertEquals(
    -        ImmutableList.of(
    -            qualifiedName("name"),
    -            function("ABS", qualifiedName("age"))),
    +        ImmutableList.of(qualifiedName("name"), function("ABS", qualifiedName("age"))),
             querySpec.getGroupByItems());
       }
     
       @Test
       void can_collect_select_items_in_select_clause() {
    -    QuerySpecification querySpec = collect(
    -        "SELECT name, ABS(age) FROM test");
    +    QuerySpecification querySpec = collect("SELECT name, ABS(age) FROM test");
     
         assertEquals(
    -        ImmutableList.of(
    -            qualifiedName("name"),
    -            function("ABS", qualifiedName("age"))),
    +        ImmutableList.of(qualifiedName("name"), function("ABS", qualifiedName("age"))),
             querySpec.getSelectItems());
       }
     
       @Test
       void can_collect_aggregators_in_select_clause() {
    -    QuerySpecification querySpec = collect(
    -        "SELECT name, AVG(age), SUM(balance) FROM test GROUP BY name");
    +    QuerySpecification querySpec =
    +        collect("SELECT name, AVG(age), SUM(balance) FROM test GROUP BY name");
     
         assertEquals(
             ImmutableSet.of(
    @@ -71,29 +65,25 @@ void can_collect_aggregators_in_select_clause() {
     
       @Test
       void can_collect_nested_aggregators_in_select_clause() {
    -    QuerySpecification querySpec = collect(
    -        "SELECT name, ABS(1 + AVG(age)) FROM test GROUP BY name");
    +    QuerySpecification querySpec =
    +        collect("SELECT name, ABS(1 + AVG(age)) FROM test GROUP BY name");
     
         assertEquals(
    -        ImmutableSet.of(
    -            alias("AVG(age)", aggregate("AVG", qualifiedName("age")))),
    +        ImmutableSet.of(alias("AVG(age)", aggregate("AVG", qualifiedName("age")))),
             querySpec.getAggregators());
       }
     
       @Test
       void can_collect_alias_in_select_clause() {
    -    QuerySpecification querySpec = collect(
    -        "SELECT name AS n FROM test GROUP BY n");
    +    QuerySpecification querySpec = collect("SELECT name AS n FROM test GROUP BY n");
     
    -    assertEquals(
    -        ImmutableMap.of("n", qualifiedName("name")),
    -        querySpec.getSelectItemsByAlias());
    +    assertEquals(ImmutableMap.of("n", qualifiedName("name")), querySpec.getSelectItemsByAlias());
       }
     
       @Test
       void should_deduplicate_same_aggregators() {
    -    QuerySpecification querySpec = collect(
    -        "SELECT AVG(age), AVG(balance), AVG(age) FROM test GROUP BY name");
    +    QuerySpecification querySpec =
    +        collect("SELECT AVG(age), AVG(balance), AVG(age) FROM test GROUP BY name");
     
         assertEquals(
             ImmutableSet.of(
    @@ -119,20 +109,24 @@ void can_collect_sort_options_in_order_by_clause() {
     
       @Test
       void should_skip_sort_items_in_window_function() {
    -    assertEquals(1,
    -        collect("SELECT name, RANK() OVER(ORDER BY age) "
    -            + "FROM test ORDER BY name"
    -        ).getOrderByOptions().size());
    +    assertEquals(
    +        1,
    +        collect("SELECT name, RANK() OVER(ORDER BY age) " + "FROM test ORDER BY name")
    +            .getOrderByOptions()
    +            .size());
       }
     
       @Test
       void can_collect_filtered_aggregation() {
         assertEquals(
    -        ImmutableSet.of(alias("AVG(age) FILTER(WHERE age > 20)",
    -            filteredAggregate("AVG", qualifiedName("age"),
    -                function(">", qualifiedName("age"), intLiteral(20))))),
    -        collect("SELECT AVG(age) FILTER(WHERE age > 20) FROM test").getAggregators()
    -    );
    +        ImmutableSet.of(
    +            alias(
    +                "AVG(age) FILTER(WHERE age > 20)",
    +                filteredAggregate(
    +                    "AVG",
    +                    qualifiedName("age"),
    +                    function(">", qualifiedName("age"), intLiteral(20))))),
    +        collect("SELECT AVG(age) FILTER(WHERE age > 20) FROM test").getAggregators());
       }
     
       private QuerySpecification collect(String query) {
    @@ -147,5 +141,4 @@ private QuerySpecificationContext parse(String query) {
         parser.addErrorListener(new SyntaxAnalysisErrorListener());
         return parser.querySpecification();
       }
    -
     }
    diff --git a/workbench/.cypress/integration/ui.spec.js b/workbench/.cypress/integration/ui.spec.js
    deleted file mode 100644
    index b552ba24cd..0000000000
    --- a/workbench/.cypress/integration/ui.spec.js
    +++ /dev/null
    @@ -1,247 +0,0 @@
    -/*
    - * Copyright OpenSearch Contributors
    - * SPDX-License-Identifier: Apache-2.0
    - */
    -
    -
    -/// 
    -
    -import { edit } from 'brace';
    -import { delay, files, testDataSet, testQueries, verifyDownloadData } from '../utils/constants';
    -
    -describe('Dump test data', () => {
    -  it('Indexes test data for SQL and PPL', () => {
    -    const dumpDataSet = (url, index) =>
    -      cy.request(url).then((response) => {
    -        cy.request({
    -          method: 'POST',
    -          form: true,
    -          url: 'api/console/proxy',
    -          headers: {
    -            'content-type': 'application/json;charset=UTF-8',
    -            'osd-xsrf': true,
    -          },
    -          qs: {
    -            path: `${index}/_bulk`,
    -            method: 'POST',
    -          },
    -          body: response.body,
    -        });
    -      });
    -
    -    testDataSet.forEach(({url, index}) => dumpDataSet(url, index));
    -  });
    -});
    -
    -describe('Test PPL UI', () => {
    -  beforeEach(() => {
    -    cy.visit('app/opensearch-query-workbench');
    -    cy.wait(delay);
    -    cy.get('.euiToggle__input[title=PPL]').click({ force: true });
    -    cy.wait(delay);
    -  });
    -
    -  it('Confirm results are empty', () => {
    -    cy.get('.euiTextAlign')
    -      .contains('Enter a query in the query editor above to see results.')
    -      .should('have.length', 1);
    -  });
    -
    -  it('Test Run button', () => {
    -    cy.get('textarea.ace_text-input').eq(0).focus().type('source=accounts', { force: true });
    -    cy.wait(delay);
    -    cy.get('.euiButton__text').contains('Run').click({ force: true });
    -    cy.wait(delay);
    -    cy.get('.euiTab__content').contains('Events').click({ force: true });
    -
    -    cy.get('span.euiTableCellContent__text')
    -      .eq(19)
    -      .should((employer) => {
    -        expect(employer).to.contain('Pyrami');
    -      });
    -  });
    -
    -  it('Test Clear button', () => {
    -    cy.get('textarea.ace_text-input').eq(0).focus().type('source=accounts', { force: true });
    -    cy.wait(delay);
    -    cy.get('.euiButton__text').contains('Run').click({ force: true });
    -    cy.wait(delay);
    -    cy.get('.euiTab__content').contains('Events').click({ force: true });
    -    cy.wait(delay);
    -    cy.get('.euiButton__text').contains('Clear').click({ force: true });
    -    cy.wait(delay);
    -
    -    cy.get('.euiTextAlign')
    -      .contains('Enter a query in the query editor above to see results.')
    -      .should('have.length', 1);
    -    cy.get('.ace_content')
    -      .eq(0)
    -      .then((queryEditor) => {
    -        const editor = edit(queryEditor[0]);
    -        expect(editor.getValue()).to.equal('');
    -      });
    -  });
    -
    -  it('Test full screen view', () => {
    -    cy.get('.euiButton__text').contains('Full screen view').should('not.exist');
    -    cy.get('.euiTitle').contains('Query Workbench').should('exist');
    -
    -    cy.get('textarea.ace_text-input').eq(0).focus().type('source=accounts', { force: true });
    -    cy.wait(delay);
    -    cy.get('.euiButton__text').contains('Run').click({ force: true });
    -    cy.wait(delay);
    -    cy.get('.euiButton__text').contains('Full screen view').click({ force: true });
    -
    -    cy.get('.euiTitle').should('not.exist');
    -
    -    cy.get('button#exit-fullscreen-button').click({ force: true });
    -    cy.wait(delay);
    -    cy.get('.euiButton__text').contains('Full screen view').should('exist');
    -    cy.get('.euiTitle').contains('Query Workbench').should('exist');
    -  });
    -});
    -
    -describe('Test SQL UI', () => {
    -  beforeEach(() => {
    -    cy.visit('app/opensearch-query-workbench');
    -    cy.wait(delay);
    -    cy.get('.euiToggle__input[title=SQL]').click({ force: true });
    -    cy.wait(delay);
    -  });
    -
    -  it('Confirm results are empty', () => {
    -    cy.get('.euiTextAlign')
    -      .contains('Enter a query in the query editor above to see results.')
    -      .should('have.length', 1);
    -  });
    -
    -  it('Test Run button and field search', () => {
    -    cy.get('textarea.ace_text-input')
    -      .eq(0)
    -      .focus()
    -      .type('{enter}select * from accounts where balance > 49500;', { force: true });
    -    cy.wait(delay);
    -    cy.get('.euiButton__text').contains('Run').click({ force: true });
    -    cy.wait(delay);
    -    cy.get('.euiTab__content').contains('accounts').click({ force: true });
    -
    -    cy.get('input.euiFieldSearch').type('marissa');
    -    cy.get('span.euiTableCellContent__text')
    -      .eq(13)
    -      .should((account_number) => {
    -        expect(account_number).to.contain('803');
    -      });
    -  });
    -
    -  it('Test Translate button', () => {
    -    cy.get('textarea.ace_text-input').eq(0).focus().type('{selectall}{backspace}', { force: true });
    -    cy.wait(delay);
    -    cy.get('textarea.ace_text-input')
    -      .eq(0)
    -      .focus()
    -      .type('{selectall}{backspace}select log(balance) from accounts where abs(age) > 20;', {
    -        force: true,
    -      });
    -    cy.wait(delay);
    -    cy.get('.euiButton__text').contains('Explain').click({ force: true });
    -    cy.wait(delay);
    -
    -    // hard to get euiCodeBlock content, check length instead
    -    cy.get('.euiCodeBlock__code').children().should('have.length', 13);
    -  });
    -
    -  it('Test Clear button', () => {
    -    cy.get('.euiButton__text').contains('Clear').click({ force: true });
    -    cy.wait(delay);
    -
    -    cy.get('.ace_content')
    -      .eq(0)
    -      .then((queryEditor) => {
    -        const editor = edit(queryEditor[0]);
    -        expect(editor.getValue()).to.equal('');
    -      });
    -  });
    -
    -  it('Test full screen view', () => {
    -    cy.get('.euiButton__text').contains('Full screen view').should('not.exist');
    -    cy.get('.euiTitle').contains('Query Workbench').should('exist');
    -
    -    cy.get('.euiButton__text').contains('Run').click({ force: true });
    -    cy.wait(delay);
    -    cy.get('.euiButton__text').contains('Full screen view').click({ force: true });
    -
    -    cy.get('.euiTitle').should('not.exist');
    -  });
    -});
    -
    -describe('Test and verify SQL downloads', () => {
    -  verifyDownloadData.map(({ title, url, file }) => {
    -    it(title, () => {
    -      cy.request({
    -        method: 'POST',
    -        form: true,
    -        url: url,
    -        headers: {
    -          'content-type': 'application/json;charset=UTF-8',
    -          'osd-xsrf': true,
    -        },
    -        body: {
    -          query: 'select * from accounts where balance > 49500',
    -        },
    -      }).then((response) => {
    -        if (title === 'Download and verify CSV' || title === 'Download and verify Text') {
    -          expect(response.body.data.body).to.have.string(files[file]);
    -        } else {
    -          expect(response.body.data.resp).to.have.string(files[file]);
    -        }
    -      });
    -    });
    -  });
    -});
    -
    -describe('Test table display', () => {
    -  beforeEach(() => {
    -    cy.visit('app/opensearch-query-workbench');
    -    cy.wait(delay);
    -    cy.get('.euiToggle__input[title=SQL]').click({ force: true });
    -    cy.wait(delay);
    -    cy.get('textarea.ace_text-input').eq(0).focus().type('{selectall}{backspace}', { force: true });
    -    cy.wait(delay);
    -  });
    -
    -  testQueries.map(({ title, query, cell_idx, expected_string }) => {
    -    it(title, () => {
    -      cy.get('textarea.ace_text-input')
    -        .eq(0)
    -        .focus()
    -        .type(`{selectall}{backspace}${query}`, { force: true });
    -      cy.wait(delay);
    -      cy.get('.euiButton__text').contains('Run').click({ force: true });
    -      cy.wait(delay);
    -
    -      cy.get('span.euiTableCellContent__text')
    -        .eq(cell_idx)
    -        .should((cell) => {
    -          expect(cell).to.contain(expected_string);
    -        });
    -    });
    -  });
    -
    -  it('Test nested fields display', () => {
    -    cy.get('textarea.ace_text-input')
    -      .eq(0)
    -      .focus()
    -      .type(`{selectall}{backspace}select * from employee_nested;`, { force: true });
    -    cy.wait(delay);
    -    cy.get('.euiButton__text').contains('Run').click({ force: true });
    -    cy.wait(delay);
    -
    -    cy.get('button.euiLink').eq(2).click({ force: true });
    -    cy.wait(delay);
    -    cy.get('span.euiTableCellContent__text')
    -      .eq(24)
    -      .should((cell) => {
    -        expect(cell).to.contain('comment_2_1');
    -      });
    -  });
    -});
    diff --git a/workbench/.cypress/plugins/index.js b/workbench/.cypress/plugins/index.js
    deleted file mode 100644
    index da3b59f2b6..0000000000
    --- a/workbench/.cypress/plugins/index.js
    +++ /dev/null
    @@ -1,28 +0,0 @@
    -/*
    - * Copyright OpenSearch Contributors
    - * SPDX-License-Identifier: Apache-2.0
    - */
    -
    -
    -/// 
    -
    -// ***********************************************************
    -// This example plugins/index.js can be used to load plugins
    -//
    -// You can change the location of this file or turn off loading
    -// the plugins file with the 'pluginsFile' configuration option.
    -//
    -// You can read more here:
    -// https://on.cypress.io/plugins-guide
    -// ***********************************************************
    -
    -// This function is called when a project is opened or re-opened (e.g. due to
    -// the project's config changing)
    -
    -/**
    - * @type {Cypress.PluginConfig}
    - */
    -module.exports = (on, config) => {
    -  // `on` is used to hook into various events Cypress emits
    -  // `config` is the resolved Cypress config
    -};
    diff --git a/workbench/.cypress/support/commands.js b/workbench/.cypress/support/commands.js
    deleted file mode 100644
    index abc0af8632..0000000000
    --- a/workbench/.cypress/support/commands.js
    +++ /dev/null
    @@ -1,72 +0,0 @@
    -/*
    - * Copyright OpenSearch Contributors
    - * SPDX-License-Identifier: Apache-2.0
    - */
    -
    -
    -// ***********************************************
    -// This example commands.js shows you how to
    -// create various custom commands and overwrite
    -// existing commands.
    -//
    -// For more comprehensive examples of custom
    -// commands please read more here:
    -// https://on.cypress.io/custom-commands
    -// ***********************************************
    -//
    -//
    -// -- This is a parent command --
    -// Cypress.Commands.add("login", (email, password) => { ... })
    -//
    -//
    -// -- This is a child command --
    -// Cypress.Commands.add("drag", { prevSubject: 'element'}, (subject, options) => { ... })
    -//
    -//
    -// -- This is a dual command --
    -// Cypress.Commands.add("dismiss", { prevSubject: 'optional'}, (subject, options) => { ... })
    -//
    -//
    -// -- This will overwrite an existing command --
    -// Cypress.Commands.overwrite("visit", (originalFn, url, options) => { ... })
    -
    -const { ADMIN_AUTH } = require('./constants');
    -
    -Cypress.Commands.overwrite('visit', (originalFn, url, options) => {
    -  // Add the basic auth header when security enabled in the OpenSearch cluster
    -  // https://github.com/cypress-io/cypress/issues/1288
    -  if (Cypress.env('security_enabled')) {
    -    if (options) {
    -      options.auth = ADMIN_AUTH;
    -    } else {
    -      options = { auth: ADMIN_AUTH };
    -    }
    -    // Add query parameters - select the default OpenSearch Dashboards tenant
    -    options.qs = { security_tenant: 'private' };
    -    return originalFn(url, options);
    -  } else {
    -    return originalFn(url, options);
    -  }
    -});
    -
    -// Be able to add default options to cy.request(), https://github.com/cypress-io/cypress/issues/726
    -Cypress.Commands.overwrite('request', (originalFn, ...args) => {
    -  let defaults = {};
    -  // Add the basic authentication header when security enabled in the OpenSearch cluster
    -  if (Cypress.env('security_enabled')) {
    -    defaults.auth = ADMIN_AUTH;
    -  }
    -
    -  let options = {};
    -  if (typeof args[0] === 'object' && args[0] !== null) {
    -    options = Object.assign({}, args[0]);
    -  } else if (args.length === 1) {
    -    [options.url] = args;
    -  } else if (args.length === 2) {
    -    [options.method, options.url] = args;
    -  } else if (args.length === 3) {
    -    [options.method, options.url, options.body] = args;
    -  }
    -
    -  return originalFn(Object.assign({}, defaults, options));
    -});
    diff --git a/workbench/.cypress/support/constants.js b/workbench/.cypress/support/constants.js
    deleted file mode 100644
    index 1001fd49d4..0000000000
    --- a/workbench/.cypress/support/constants.js
    +++ /dev/null
    @@ -1,9 +0,0 @@
    -/*
    - * Copyright OpenSearch Contributors
    - * SPDX-License-Identifier: Apache-2.0
    - */
    -
    -export const ADMIN_AUTH = {
    -  username: 'admin',
    -  password: 'admin',
    -};
    diff --git a/workbench/.cypress/support/index.js b/workbench/.cypress/support/index.js
    deleted file mode 100644
    index 929137ae3f..0000000000
    --- a/workbench/.cypress/support/index.js
    +++ /dev/null
    @@ -1,31 +0,0 @@
    -/*
    - * Copyright OpenSearch Contributors
    - * SPDX-License-Identifier: Apache-2.0
    - */
    -
    -
    -// ***********************************************************
    -// This example support/index.js is processed and
    -// loaded automatically before your test files.
    -//
    -// This is a great place to put global configuration and
    -// behavior that modifies Cypress.
    -//
    -// You can change the location of this file or turn off
    -// automatically serving support files with the
    -// 'supportFile' configuration option.
    -//
    -// You can read more here:
    -// https://on.cypress.io/configuration
    -// ***********************************************************
    -
    -// Import commands.js using ES2015 syntax:
    -import './commands';
    -
    -// Alternatively you can use CommonJS syntax:
    -// require('./commands')
    -
    -// Switch the base URL of OpenSearch when security enabled in the cluster
    -if (Cypress.env('security_enabled')) {
    -  Cypress.env('opensearch', 'https://localhost:9200');
    -}
    diff --git a/workbench/.cypress/tsconfig.json b/workbench/.cypress/tsconfig.json
    deleted file mode 100644
    index 36de33deef..0000000000
    --- a/workbench/.cypress/tsconfig.json
    +++ /dev/null
    @@ -1,8 +0,0 @@
    -{
    -  "compilerOptions": {
    -    "allowJs": true,
    -    "baseUrl": "../node_modules",
    -    "types": ["cypress"]
    -  },
    -  "include": ["**/*.*"]
    -}
    diff --git a/workbench/.cypress/utils/constants.js b/workbench/.cypress/utils/constants.js
    deleted file mode 100644
    index 226b7121db..0000000000
    --- a/workbench/.cypress/utils/constants.js
    +++ /dev/null
    @@ -1,99 +0,0 @@
    -/*
    - * Copyright OpenSearch Contributors
    - * SPDX-License-Identifier: Apache-2.0
    - */
    -
    -
    -export const delay = 1000;
    -
    -export const testDataSet = [
    -  {
    -    url: 'https://raw.githubusercontent.com/opensearch-project/sql/main/integ-test/src/test/resources/accounts.json',
    -    index: 'accounts',
    -  },
    -  {
    -    url: 'https://raw.githubusercontent.com/opensearch-project/sql/main/integ-test/src/test/resources/employee_nested.json',
    -    index: 'employee_nested'
    -  }
    -]
    -
    -export const verifyDownloadData = [
    -  {
    -    title: 'Download and verify JSON',
    -    url: 'api/sql_console/sqljson',
    -    file: 'JSONFile'
    -  },
    -  {
    -    title: 'Download and verify JDBC',
    -    url: 'api/sql_console/sqlquery',
    -    file: 'JDBCFile'
    -  },
    -  {
    -    title: 'Download and verify CSV',
    -    url: 'api/sql_console/sqlcsv',
    -    file: 'CSVFile'
    -  },
    -  {
    -    title: 'Download and verify Text',
    -    url: 'api/sql_console/sqltext',
    -    file: 'TextFile'
    -  },
    -];
    -
    -export const testQueries = [
    -  {
    -    title: 'Test GROUP BY',
    -    query: 'select count(*) from accounts group by gender;',
    -    cell_idx: 3,
    -    expected_string: '493'
    -  },
    -  {
    -    title: 'Test GROUP BY with aliases and scalar function',
    -    query: 'SELECT ABS(age) AS a FROM accounts GROUP BY ABS(age);',
    -    cell_idx: 17,
    -    expected_string: '27'
    -  },
    -  {
    -    title: 'Test GROUP BY and HAVING',
    -    query: 'SELECT age, MAX(balance) FROM accounts GROUP BY age HAVING MIN(balance) > 3000;',
    -    cell_idx: 5,
    -    expected_string: '49119'
    -  },
    -  {
    -    title: 'Test ORDER BY',
    -    query: 'SELECT account_number FROM accounts ORDER BY account_number DESC;',
    -    cell_idx: 5,
    -    expected_string: '998'
    -  },
    -  {
    -    title: 'Test JOIN',
    -    query: 'select a.account_number, a.firstname, a.lastname, e.id, e.name from accounts a join employee_nested e order by a.account_number;',
    -    cell_idx: 45,
    -    expected_string: 'Duke'
    -  },
    -];
    -
    -export const files = {
    -  JSONFile:
    -    `"hits":[{"_index":"accounts","_type":"_doc","_id":"842","_score":0,"_source":{"account_number":842,"balance":49587,"firstname":"Meagan","lastname":"Buckner","age":23,"gender":"F","address":"833 Bushwick Court","employer":"Biospan","email":"meaganbuckner@biospan.com","city":"Craig","state":"TX"}},{"_index":"accounts","_type":"_doc","_id":"854","_score":0,"_source":{"account_number":854,"balance":49795,"firstname":"Jimenez","lastname":"Barry","age":25,"gender":"F","address":"603 Cooper Street","employer":"Verton","email":"jimenezbarry@verton.com","city":"Moscow","state":"AL"}},{"_index":"accounts","_type":"_doc","_id":"97","_score":0,"_source":{"account_number":97,"balance":49671,"firstname":"Karen","lastname":"Trujillo","age":40,"gender":"F","address":"512 Cumberland Walk","employer":"Tsunamia","email":"karentrujillo@tsunamia.com","city":"Fredericktown","state":"MO"}},{"_index":"accounts","_type":"_doc","_id":"168","_score":0,"_source":{"account_number":168,"balance":49568,"firstname":"Carissa","lastname":"Simon","age":20,"gender":"M","address":"975 Flatbush Avenue","employer":"Zillacom","email":"carissasimon@zillacom.com","city":"Neibert","state":"IL"}},{"_index":"accounts","_type":"_doc","_id":"240","_score":0,"_source":{"account_number":240,"balance":49741,"firstname":"Oconnor","lastname":"Clay","age":35,"gender":"F","address":"659 Highland Boulevard","employer":"Franscene","email":"oconnorclay@franscene.com","city":"Kilbourne","state":"NH"}},{"_index":"accounts","_type":"_doc","_id":"803","_score":0,"_source":{"account_number":803,"balance":49567,"firstname":"Marissa","lastname":"Spears","age":25,"gender":"M","address":"963 Highland Avenue","employer":"Centregy","email":"marissaspears@centregy.com","city":"Bloomington","state":"MS"}},{"_index":"accounts","_type":"_doc","_id":"248","_score":0,"_source":{"account_number":248,"balance":49989,"firstname":"West","lastname":"England","age":36,"gender":"M","address":"717 Hendrickson Place","employer":"Obliq","email":"westengland@obliq.com","city":"Maury","state":"WA"}}]`,
    -  JDBCFile:
    -    `{"schema":[{"name":"account_number","type":"long"},{"name":"firstname","type":"text"},{"name":"address","type":"text"},{"name":"balance","type":"long"},{"name":"gender","type":"text"},{"name":"city","type":"text"},{"name":"employer","type":"text"},{"name":"state","type":"text"},{"name":"age","type":"long"},{"name":"email","type":"text"},{"name":"lastname","type":"text"}],"datarows":[[842,"Meagan","833 Bushwick Court",49587,"F","Craig","Biospan","TX",23,"meaganbuckner@biospan.com","Buckner"],[854,"Jimenez","603 Cooper Street",49795,"F","Moscow","Verton","AL",25,"jimenezbarry@verton.com","Barry"],[97,"Karen","512 Cumberland Walk",49671,"F","Fredericktown","Tsunamia","MO",40,"karentrujillo@tsunamia.com","Trujillo"],[168,"Carissa","975 Flatbush Avenue",49568,"M","Neibert","Zillacom","IL",20,"carissasimon@zillacom.com","Simon"],[240,"Oconnor","659 Highland Boulevard",49741,"F","Kilbourne","Franscene","NH",35,"oconnorclay@franscene.com","Clay"],[803,"Marissa","963 Highland Avenue",49567,"M","Bloomington","Centregy","MS",25,"marissaspears@centregy.com","Spears"],[248,"West","717 Hendrickson Place",49989,"M","Maury","Obliq","WA",36,"westengland@obliq.com","England"]],"total":7,"size":7,"status":200}`,
    -  CSVFile:
    -    `account_number,firstname,address,balance,gender,city,employer,state,age,email,lastname
    -842,Meagan,833 Bushwick Court,49587,F,Craig,Biospan,TX,23,meaganbuckner@biospan.com,Buckner
    -854,Jimenez,603 Cooper Street,49795,F,Moscow,Verton,AL,25,jimenezbarry@verton.com,Barry
    -97,Karen,512 Cumberland Walk,49671,F,Fredericktown,Tsunamia,MO,40,karentrujillo@tsunamia.com,Trujillo
    -168,Carissa,975 Flatbush Avenue,49568,M,Neibert,Zillacom,IL,20,carissasimon@zillacom.com,Simon
    -240,Oconnor,659 Highland Boulevard,49741,F,Kilbourne,Franscene,NH,35,oconnorclay@franscene.com,Clay
    -803,Marissa,963 Highland Avenue,49567,M,Bloomington,Centregy,MS,25,marissaspears@centregy.com,Spears
    -248,West,717 Hendrickson Place,49989,M,Maury,Obliq,WA,36,westengland@obliq.com,England`,
    -  TextFile:
    -    `account_number|firstname|address|balance|gender|city|employer|state|age|email|lastname
    -842|Meagan|833 Bushwick Court|49587|F|Craig|Biospan|TX|23|meaganbuckner@biospan.com|Buckner
    -854|Jimenez|603 Cooper Street|49795|F|Moscow|Verton|AL|25|jimenezbarry@verton.com|Barry
    -97|Karen|512 Cumberland Walk|49671|F|Fredericktown|Tsunamia|MO|40|karentrujillo@tsunamia.com|Trujillo
    -168|Carissa|975 Flatbush Avenue|49568|M|Neibert|Zillacom|IL|20|carissasimon@zillacom.com|Simon
    -240|Oconnor|659 Highland Boulevard|49741|F|Kilbourne|Franscene|NH|35|oconnorclay@franscene.com|Clay
    -803|Marissa|963 Highland Avenue|49567|M|Bloomington|Centregy|MS|25|marissaspears@centregy.com|Spears
    -248|West|717 Hendrickson Place|49989|M|Maury|Obliq|WA|36|westengland@obliq.com|England`,
    -};
    diff --git a/workbench/.gitignore b/workbench/.gitignore
    deleted file mode 100644
    index d3fce84065..0000000000
    --- a/workbench/.gitignore
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -node_modules/
    -/build/
    -.cypress/screenshots
    -.cypress/videos
    -/target/*
    diff --git a/workbench/.opensearch_dashboards-plugin-helpers.json b/workbench/.opensearch_dashboards-plugin-helpers.json
    deleted file mode 100644
    index 340532a661..0000000000
    --- a/workbench/.opensearch_dashboards-plugin-helpers.json
    +++ /dev/null
    @@ -1,11 +0,0 @@
    -{
    -  "serverSourcePatterns": [
    -    "package.json",
    -    "tsconfig.json",
    -    "yarn.lock",
    -    ".yarnrc",
    -    "index.js",
    -    "{lib,public,server,test}/**/*",
    -    "!__tests__"
    -  ]
    -}
    diff --git a/workbench/CODE_OF_CONDUCT.md b/workbench/CODE_OF_CONDUCT.md
    deleted file mode 100644
    index ed4f094f6d..0000000000
    --- a/workbench/CODE_OF_CONDUCT.md
    +++ /dev/null
    @@ -1,3 +0,0 @@
    -## Code of Conduct
    -
    -This project has adopted an [Open Source Code of Conduct](https://github.com/opensearch-project/project-website/blob/main/CONTRIBUTING.md#code-of-conduct).
    diff --git a/workbench/CONTRIBUTING.md b/workbench/CONTRIBUTING.md
    deleted file mode 100644
    index ce761c2eac..0000000000
    --- a/workbench/CONTRIBUTING.md
    +++ /dev/null
    @@ -1,59 +0,0 @@
    -# Contributing Guidelines
    -
    -Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
    -documentation, we greatly value feedback and contributions from our community.
    -
    -Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
    -information to effectively respond to your bug report or contribution.
    -
    -
    -## Reporting Bugs/Feature Requests
    -
    -We welcome you to use the GitHub issue tracker to report bugs or suggest features.
    -
    -When filing an issue, please check [existing open](https://github.com/opensearch-project/sql/issues?q=is%3Aissue+is%3Aopen+workbench), or [recently closed](https://github.com/opensearch-project/sql/issues?q=is%3Aissue+workbench+is%3Aclosed+), issues to make sure somebody else hasn't already
    -reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
    -
    -* A reproducible test case or series of steps
    -* The version of our code being used
    -* Any modifications you've made relevant to the bug
    -* Anything unusual about your environment or deployment
    -
    -
    -## Contributing via Pull Requests
    -Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
    -
    -1. You are working against the latest source on the *main* branch.
    -2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
    -3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
    -
    -To send us a pull request, please:
    -
    -1. Fork the repository.
    -2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
    -3. Ensure local tests pass.
    -4. Commit to your fork using clear commit messages.
    -5. Send us a pull request, answering any default questions in the pull request interface.
    -6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
    -
    -GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
    -[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
    -
    -
    -## Finding contributions to work on
    -Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any issue tagged ["good first issue"](https://github.com/opensearch-project/sql/issues?q=is%3Aopen+is%3Aissue+label%3Aworkbench+label%3A%22good+first+issue%22) is a great place to start.
    -
    -
    -## Code of Conduct
    -
    -This project has adopted an [Open Source Code of Conduct](https://github.com/opensearch-project/project-website/blob/main/CONTRIBUTING.md#code-of-conduct).
    -
    -
    -## Security issue notifications
    -
    -If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public GitHub issue.
    -
    -
    -## Licensing
    -
    -See the [LICENSE](LICENSE.TXT) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
    diff --git a/workbench/CONTRIBUTORS.md b/workbench/CONTRIBUTORS.md
    deleted file mode 100644
    index 4299ae5e4e..0000000000
    --- a/workbench/CONTRIBUTORS.md
    +++ /dev/null
    @@ -1,15 +0,0 @@
    -Contributors in order of last name:
    -
    -Peng Huo
    -
    -Abbas Hussain
    -
    -Anirudh Jadhav
    -
    -Joshua Li
    -
    -Francesca Paoletti
    -
    -Alolita Sharma
    -
    -Chloe Zhang
    diff --git a/workbench/DEVELOPER_GUIDE.md b/workbench/DEVELOPER_GUIDE.md
    deleted file mode 100644
    index 9d564a3cd1..0000000000
    --- a/workbench/DEVELOPER_GUIDE.md
    +++ /dev/null
    @@ -1,55 +0,0 @@
    -## Developer Guide
    -
    -So you want to contribute code to this project? Excellent! We're glad you're here. Here's what you need to do.
    -
    -### Setup
    -
    -1. Download OpenSearch for the version that matches the [OpenSearch Dashboards version specified in package.json](./package.json#L8).
    -1. Download and install the most recent version of [OpenSearch SQL plugin](https://github.com/opensearch-project/sql).
    -1. Download the OpenSearch Dashboards source code for the [version specified in package.json](./package.json#L8) you want to set up.
    -
    -   See the [OpenSearch Dashboards contributing guide](https://github.com/opensearch-project/OpenSearch-Dashboards/blob/main/CONTRIBUTING.md) to get started.
    -   
    -1. Change your node version to the version specified in `.node-version` inside the OpenSearch Dashboards root directory.
    -1. cd into the OpenSearch Dashboards source code directory.
    -1. Check out this package from version control into the `plugins` directory.
    -```
    -git clone git@github.com:opensearch-project/sql.git plugins --no-checkout
    -cd plugins
    -echo 'workbench/*' >> .git/info/sparse-checkout
    -git config core.sparseCheckout true
    -git checkout main
    -```
    -6. Run `yarn osd bootstrap` inside `OpenSearch-Dashboards/plugins/workbench`.
    -
    -Ultimately, your directory structure should look like this:
    -
    -```md
    -.
    -├── OpenSearch-Dashboards
    -│   └── plugins
    -│       └── workbench
    -```
    -
    -
    -## Build
    -
    -To build the plugin's distributable zip simply run `yarn build`.
    -
    -Example output: `./build/query-workbench-dashboards*.zip`
    -
    -
    -## Run
    -
    -- `yarn start`
    -
    -  Starts OpenSearch Dashboards and includes this plugin. OpenSearch Dashboards will be available on `localhost:5601`.
    -
    -- `NODE_PATH=../../node_modules yarn test:jest`
    -
    -  Runs the plugin tests.
    -
    -
    -### Submitting Changes
    -
    -See [CONTRIBUTING](CONTRIBUTING.md).
    \ No newline at end of file
    diff --git a/workbench/LICENSE.TXT b/workbench/LICENSE.TXT
    deleted file mode 100644
    index 67db858821..0000000000
    --- a/workbench/LICENSE.TXT
    +++ /dev/null
    @@ -1,175 +0,0 @@
    -
    -                                 Apache License
    -                           Version 2.0, January 2004
    -                        http://www.apache.org/licenses/
    -
    -   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -   1. Definitions.
    -
    -      "License" shall mean the terms and conditions for use, reproduction,
    -      and distribution as defined by Sections 1 through 9 of this document.
    -
    -      "Licensor" shall mean the copyright owner or entity authorized by
    -      the copyright owner that is granting the License.
    -
    -      "Legal Entity" shall mean the union of the acting entity and all
    -      other entities that control, are controlled by, or are under common
    -      control with that entity. For the purposes of this definition,
    -      "control" means (i) the power, direct or indirect, to cause the
    -      direction or management of such entity, whether by contract or
    -      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -      outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -      "You" (or "Your") shall mean an individual or Legal Entity
    -      exercising permissions granted by this License.
    -
    -      "Source" form shall mean the preferred form for making modifications,
    -      including but not limited to software source code, documentation
    -      source, and configuration files.
    -
    -      "Object" form shall mean any form resulting from mechanical
    -      transformation or translation of a Source form, including but
    -      not limited to compiled object code, generated documentation,
    -      and conversions to other media types.
    -
    -      "Work" shall mean the work of authorship, whether in Source or
    -      Object form, made available under the License, as indicated by a
    -      copyright notice that is included in or attached to the work
    -      (an example is provided in the Appendix below).
    -
    -      "Derivative Works" shall mean any work, whether in Source or Object
    -      form, that is based on (or derived from) the Work and for which the
    -      editorial revisions, annotations, elaborations, or other modifications
    -      represent, as a whole, an original work of authorship. For the purposes
    -      of this License, Derivative Works shall not include works that remain
    -      separable from, or merely link (or bind by name) to the interfaces of,
    -      the Work and Derivative Works thereof.
    -
    -      "Contribution" shall mean any work of authorship, including
    -      the original version of the Work and any modifications or additions
    -      to that Work or Derivative Works thereof, that is intentionally
    -      submitted to Licensor for inclusion in the Work by the copyright owner
    -      or by an individual or Legal Entity authorized to submit on behalf of
    -      the copyright owner. For the purposes of this definition, "submitted"
    -      means any form of electronic, verbal, or written communication sent
    -      to the Licensor or its representatives, including but not limited to
    -      communication on electronic mailing lists, source code control systems,
    -      and issue tracking systems that are managed by, or on behalf of, the
    -      Licensor for the purpose of discussing and improving the Work, but
    -      excluding communication that is conspicuously marked or otherwise
    -      designated in writing by the copyright owner as "Not a Contribution."
    -
    -      "Contributor" shall mean Licensor and any individual or Legal Entity
    -      on behalf of whom a Contribution has been received by Licensor and
    -      subsequently incorporated within the Work.
    -
    -   2. Grant of Copyright License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      copyright license to reproduce, prepare Derivative Works of,
    -      publicly display, publicly perform, sublicense, and distribute the
    -      Work and such Derivative Works in Source or Object form.
    -
    -   3. Grant of Patent License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      (except as stated in this section) patent license to make, have made,
    -      use, offer to sell, sell, import, and otherwise transfer the Work,
    -      where such license applies only to those patent claims licensable
    -      by such Contributor that are necessarily infringed by their
    -      Contribution(s) alone or by combination of their Contribution(s)
    -      with the Work to which such Contribution(s) was submitted. If You
    -      institute patent litigation against any entity (including a
    -      cross-claim or counterclaim in a lawsuit) alleging that the Work
    -      or a Contribution incorporated within the Work constitutes direct
    -      or contributory patent infringement, then any patent licenses
    -      granted to You under this License for that Work shall terminate
    -      as of the date such litigation is filed.
    -
    -   4. Redistribution. You may reproduce and distribute copies of the
    -      Work or Derivative Works thereof in any medium, with or without
    -      modifications, and in Source or Object form, provided that You
    -      meet the following conditions:
    -
    -      (a) You must give any other recipients of the Work or
    -          Derivative Works a copy of this License; and
    -
    -      (b) You must cause any modified files to carry prominent notices
    -          stating that You changed the files; and
    -
    -      (c) You must retain, in the Source form of any Derivative Works
    -          that You distribute, all copyright, patent, trademark, and
    -          attribution notices from the Source form of the Work,
    -          excluding those notices that do not pertain to any part of
    -          the Derivative Works; and
    -
    -      (d) If the Work includes a "NOTICE" text file as part of its
    -          distribution, then any Derivative Works that You distribute must
    -          include a readable copy of the attribution notices contained
    -          within such NOTICE file, excluding those notices that do not
    -          pertain to any part of the Derivative Works, in at least one
    -          of the following places: within a NOTICE text file distributed
    -          as part of the Derivative Works; within the Source form or
    -          documentation, if provided along with the Derivative Works; or,
    -          within a display generated by the Derivative Works, if and
    -          wherever such third-party notices normally appear. The contents
    -          of the NOTICE file are for informational purposes only and
    -          do not modify the License. You may add Your own attribution
    -          notices within Derivative Works that You distribute, alongside
    -          or as an addendum to the NOTICE text from the Work, provided
    -          that such additional attribution notices cannot be construed
    -          as modifying the License.
    -
    -      You may add Your own copyright statement to Your modifications and
    -      may provide additional or different license terms and conditions
    -      for use, reproduction, or distribution of Your modifications, or
    -      for any such Derivative Works as a whole, provided Your use,
    -      reproduction, and distribution of the Work otherwise complies with
    -      the conditions stated in this License.
    -
    -   5. Submission of Contributions. Unless You explicitly state otherwise,
    -      any Contribution intentionally submitted for inclusion in the Work
    -      by You to the Licensor shall be under the terms and conditions of
    -      this License, without any additional terms or conditions.
    -      Notwithstanding the above, nothing herein shall supersede or modify
    -      the terms of any separate license agreement you may have executed
    -      with Licensor regarding such Contributions.
    -
    -   6. Trademarks. This License does not grant permission to use the trade
    -      names, trademarks, service marks, or product names of the Licensor,
    -      except as required for reasonable and customary use in describing the
    -      origin of the Work and reproducing the content of the NOTICE file.
    -
    -   7. Disclaimer of Warranty. Unless required by applicable law or
    -      agreed to in writing, Licensor provides the Work (and each
    -      Contributor provides its Contributions) on an "AS IS" BASIS,
    -      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -      implied, including, without limitation, any warranties or conditions
    -      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -      PARTICULAR PURPOSE. You are solely responsible for determining the
    -      appropriateness of using or redistributing the Work and assume any
    -      risks associated with Your exercise of permissions under this License.
    -
    -   8. Limitation of Liability. In no event and under no legal theory,
    -      whether in tort (including negligence), contract, or otherwise,
    -      unless required by applicable law (such as deliberate and grossly
    -      negligent acts) or agreed to in writing, shall any Contributor be
    -      liable to You for damages, including any direct, indirect, special,
    -      incidental, or consequential damages of any character arising as a
    -      result of this License or out of the use or inability to use the
    -      Work (including but not limited to damages for loss of goodwill,
    -      work stoppage, computer failure or malfunction, or any and all
    -      other commercial damages or losses), even if such Contributor
    -      has been advised of the possibility of such damages.
    -
    -   9. Accepting Warranty or Additional Liability. While redistributing
    -      the Work or Derivative Works thereof, You may choose to offer,
    -      and charge a fee for, acceptance of support, warranty, indemnity,
    -      or other liability obligations and/or rights consistent with this
    -      License. However, in accepting such obligations, You may act only
    -      on Your own behalf and on Your sole responsibility, not on behalf
    -      of any other Contributor, and only if You agree to indemnify,
    -      defend, and hold each Contributor harmless for any liability
    -      incurred by, or claims asserted against, such Contributor by reason
    -      of your accepting any such warranty or additional liability.
    diff --git a/workbench/NOTICE b/workbench/NOTICE
    deleted file mode 100644
    index f42ba2537e..0000000000
    --- a/workbench/NOTICE
    +++ /dev/null
    @@ -1,2 +0,0 @@
    -OpenSearch Dashboards SQL Plugin
    -Copyright OpenSearch Contributors
    diff --git a/workbench/README.md b/workbench/README.md
    deleted file mode 100644
    index 870ee43df8..0000000000
    --- a/workbench/README.md
    +++ /dev/null
    @@ -1,35 +0,0 @@
    -# OpenSearch Dashboards Query Workbench
    -
    -The OpenSearch Dashboards Query Workbench enables you to query your OpenSearch data using either SQL or PPL syntax from a dedicated OpenSearch Dashboards UI. You can download your query results data in JSON, JDBC, CSV and raw text formats.
    -
    -
    -## Documentation
    -
    -Please see our technical [documentation](https://opensearch.org/docs/latest/search-plugins/sql/workbench/) to learn more about its features.
    -
    -
    -## Contributing
    -
    -See [developer guide](DEVELOPER_GUIDE.md) and [how to contribute to this project](CONTRIBUTING.md). 
    -
    -## Getting Help
    -
    -If you find a bug, or have a feature request, please don't hesitate to open an issue in this repository.
    -
    -For more information, see [project website](https://opensearch.org/) and [documentation](https://opensearch.org/docs/latest/). If you need help and are unsure where to open an issue, try [forums](https://discuss.opendistrocommunity.dev/).
    -
    -## Code of Conduct
    -
    -This project has adopted the [Amazon Open Source Code of Conduct](CODE_OF_CONDUCT.md). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq), or contact [opensource-codeofconduct@amazon.com](mailto:opensource-codeofconduct@amazon.com) with any additional questions or comments.
    -
    -## Security
    -
    -If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public GitHub issue.
    -
    -## License
    -
    -This project is licensed under the [Apache v2.0 License](../LICENSE.txt).
    -
    -## Copyright
    -
    -Copyright OpenSearch Contributors. See [NOTICE](./NOTICE) for details.
    diff --git a/workbench/THIRD-PARTY b/workbench/THIRD-PARTY
    deleted file mode 100644
    index 1496b14e1c..0000000000
    --- a/workbench/THIRD-PARTY
    +++ /dev/null
    @@ -1,397 +0,0 @@
    -** @elastic/eui; version 23.1.0 -- https://elastic.github.io/eui/#/
    -Copyright 2020 Elasticsearch BV
    -** TSLint; version 6.1.2 -- https://github.com/palantir/tslint
    -Copyright 2017 Palantir Technologies, Inc.
    -** typescript; version 3.0.3 -- https://github.com/Microsoft/TypeScript
    -/*!
    -*****************************************************************************
    -Copyright (c) Microsoft Corporation. All rights reserved.
    -Licensed under the Apache License, Version 2.0 (the "License"); you may not use
    -this file except in compliance with the License. You may obtain a copy of the
    -License at http://www.apache.org/licenses/LICENSE-2.0
    -
    -THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
    -ANY
    -KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
    -WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
    -MERCHANTABLITY OR NON-INFRINGEMENT.
    -
    -See the Apache Version 2.0 License for specific language governing permissions
    -and limitations under the License.
    -*****************************************************************************
    -*/
    -
    -Apache License
    -
    -Version 2.0, January 2004
    -
    -http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND
    -DISTRIBUTION
    -
    -   1. Definitions.
    -
    -      "License" shall mean the terms and conditions for use, reproduction, and
    -      distribution as defined by Sections 1 through 9 of this document.
    -
    -      "Licensor" shall mean the copyright owner or entity authorized by the
    -      copyright owner that is granting the License.
    -
    -      "Legal Entity" shall mean the union of the acting entity and all other
    -      entities that control, are controlled by, or are under common control
    -      with that entity. For the purposes of this definition, "control" means
    -      (i) the power, direct or indirect, to cause the direction or management
    -      of such entity, whether by contract or otherwise, or (ii) ownership of
    -      fifty percent (50%) or more of the outstanding shares, or (iii)
    -      beneficial ownership of such entity.
    -
    -      "You" (or "Your") shall mean an individual or Legal Entity exercising
    -      permissions granted by this License.
    -
    -      "Source" form shall mean the preferred form for making modifications,
    -      including but not limited to software source code, documentation source,
    -      and configuration files.
    -
    -      "Object" form shall mean any form resulting from mechanical
    -      transformation or translation of a Source form, including but not limited
    -      to compiled object code, generated documentation, and conversions to
    -      other media types.
    -
    -      "Work" shall mean the work of authorship, whether in Source or Object
    -      form, made available under the License, as indicated by a copyright
    -      notice that is included in or attached to the work (an example is
    -      provided in the Appendix below).
    -
    -      "Derivative Works" shall mean any work, whether in Source or Object form,
    -      that is based on (or derived from) the Work and for which the editorial
    -      revisions, annotations, elaborations, or other modifications represent,
    -      as a whole, an original work of authorship. For the purposes of this
    -      License, Derivative Works shall not include works that remain separable
    -      from, or merely link (or bind by name) to the interfaces of, the Work and
    -      Derivative Works thereof.
    -
    -      "Contribution" shall mean any work of authorship, including the original
    -      version of the Work and any modifications or additions to that Work or
    -      Derivative Works thereof, that is intentionally submitted to Licensor for
    -      inclusion in the Work by the copyright owner or by an individual or Legal
    -      Entity authorized to submit on behalf of the copyright owner. For the
    -      purposes of this definition, "submitted" means any form of electronic,
    -      verbal, or written communication sent to the Licensor or its
    -      representatives, including but not limited to communication on electronic
    -      mailing lists, source code control systems, and issue tracking systems
    -      that are managed by, or on behalf of, the Licensor for the purpose of
    -      discussing and improving the Work, but excluding communication that is
    -      conspicuously marked or otherwise designated in writing by the copyright
    -      owner as "Not a Contribution."
    -
    -      "Contributor" shall mean Licensor and any individual or Legal Entity on
    -      behalf of whom a Contribution has been received by Licensor and
    -      subsequently incorporated within the Work.
    -
    -   2. Grant of Copyright License. Subject to the terms and conditions of this
    -   License, each Contributor hereby grants to You a perpetual, worldwide,
    -   non-exclusive, no-charge, royalty-free, irrevocable copyright license to
    -   reproduce, prepare Derivative Works of, publicly display, publicly perform,
    -   sublicense, and distribute the Work and such Derivative Works in Source or
    -   Object form.
    -
    -   3. Grant of Patent License. Subject to the terms and conditions of this
    -   License, each Contributor hereby grants to You a perpetual, worldwide,
    -   non-exclusive, no-charge, royalty-free, irrevocable (except as stated in
    -   this section) patent license to make, have made, use, offer to sell, sell,
    -   import, and otherwise transfer the Work, where such license applies only to
    -   those patent claims licensable by such Contributor that are necessarily
    -   infringed by their Contribution(s) alone or by combination of their
    -   Contribution(s) with the Work to which such Contribution(s) was submitted.
    -   If You institute patent litigation against any entity (including a
    -   cross-claim or counterclaim in a lawsuit) alleging that the Work or a
    -   Contribution incorporated within the Work constitutes direct or contributory
    -   patent infringement, then any patent licenses granted to You under this
    -   License for that Work shall terminate as of the date such litigation is
    -   filed.
    -
    -   4. Redistribution. You may reproduce and distribute copies of the Work or
    -   Derivative Works thereof in any medium, with or without modifications, and
    -   in Source or Object form, provided that You meet the following conditions:
    -
    -      (a) You must give any other recipients of the Work or Derivative Works a
    -      copy of this License; and
    -
    -      (b) You must cause any modified files to carry prominent notices stating
    -      that You changed the files; and
    -
    -      (c) You must retain, in the Source form of any Derivative Works that You
    -      distribute, all copyright, patent, trademark, and attribution notices
    -      from the Source form of the Work, excluding those notices that do not
    -      pertain to any part of the Derivative Works; and
    -
    -      (d) If the Work includes a "NOTICE" text file as part of its
    -      distribution, then any Derivative Works that You distribute must include
    -      a readable copy of the attribution notices contained within such NOTICE
    -      file, excluding those notices that do not pertain to any part of the
    -      Derivative Works, in at least one of the following places: within a
    -      NOTICE text file distributed as part of the Derivative Works; within the
    -      Source form or documentation, if provided along with the Derivative
    -      Works; or, within a display generated by the Derivative Works, if and
    -      wherever such third-party notices normally appear. The contents of the
    -      NOTICE file are for informational purposes only and do not modify the
    -      License. You may add Your own attribution notices within Derivative Works
    -      that You distribute, alongside or as an addendum to the NOTICE text from
    -      the Work, provided that such additional attribution notices cannot be
    -      construed as modifying the License.
    -
    -      You may add Your own copyright statement to Your modifications and may
    -      provide additional or different license terms and conditions for use,
    -      reproduction, or distribution of Your modifications, or for any such
    -      Derivative Works as a whole, provided Your use, reproduction, and
    -      distribution of the Work otherwise complies with the conditions stated in
    -      this License.
    -
    -   5. Submission of Contributions. Unless You explicitly state otherwise, any
    -   Contribution intentionally submitted for inclusion in the Work by You to the
    -   Licensor shall be under the terms and conditions of this License, without
    -   any additional terms or conditions. Notwithstanding the above, nothing
    -   herein shall supersede or modify the terms of any separate license agreement
    -   you may have executed with Licensor regarding such Contributions.
    -
    -   6. Trademarks. This License does not grant permission to use the trade
    -   names, trademarks, service marks, or product names of the Licensor, except
    -   as required for reasonable and customary use in describing the origin of the
    -   Work and reproducing the content of the NOTICE file.
    -
    -   7. Disclaimer of Warranty. Unless required by applicable law or agreed to in
    -   writing, Licensor provides the Work (and each Contributor provides its
    -   Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    -   KIND, either express or implied, including, without limitation, any
    -   warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or
    -   FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining
    -   the appropriateness of using or redistributing the Work and assume any risks
    -   associated with Your exercise of permissions under this License.
    -
    -   8. Limitation of Liability. In no event and under no legal theory, whether
    -   in tort (including negligence), contract, or otherwise, unless required by
    -   applicable law (such as deliberate and grossly negligent acts) or agreed to
    -   in writing, shall any Contributor be liable to You for damages, including
    -   any direct, indirect, special, incidental, or consequential damages of any
    -   character arising as a result of this License or out of the use or inability
    -   to use the Work (including but not limited to damages for loss of goodwill,
    -   work stoppage, computer failure or malfunction, or any and all other
    -   commercial damages or losses), even if such Contributor has been advised of
    -   the possibility of such damages.
    -
    -   9. Accepting Warranty or Additional Liability. While redistributing the Work
    -   or Derivative Works thereof, You may choose to offer, and charge a fee for,
    -   acceptance of support, warranty, indemnity, or other liability obligations
    -   and/or rights consistent with this License. However, in accepting such
    -   obligations, You may act only on Your own behalf and on Your sole
    -   responsibility, not on behalf of any other Contributor, and only if You
    -   agree to indemnify, defend, and hold each Contributor harmless for any
    -   liability incurred by, or claims asserted against, such Contributor by
    -   reason of your accepting any such warranty or additional liability. END OF
    -   TERMS AND CONDITIONS
    -
    -APPENDIX: How to apply the Apache License to your work.
    -
    -To apply the Apache License to your work, attach the following boilerplate
    -notice, with the fields enclosed by brackets "[]" replaced with your own
    -identifying information. (Don't include the brackets!) The text should be
    -enclosed in the appropriate comment syntax for the file format. We also
    -recommend that a file or class name and description of purpose be included on
    -the same "printed page" as the copyright notice for easier identification
    -within third-party archives.
    -
    -Copyright [yyyy] [name of copyright owner]
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -
    -you may not use this file except in compliance with the License.
    -
    -You may obtain a copy of the License at
    -
    -http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -
    -distributed under the License is distributed on an "AS IS" BASIS,
    -
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -
    -See the License for the specific language governing permissions and
    -
    -limitations under the License.
    -
    -* For @elastic/eui see also this required NOTICE:
    -    Copyright 2017 Elasticsearch BV
    -* For TSLint see also this required NOTICE:
    -    Copyright 2017 Palantir Technologies, Inc.
    -* For typescript see also this required NOTICE:
    -    /*!
    -    *****************************************************************************
    -    Copyright (c) Microsoft Corporation. All rights reserved.
    -    Licensed under the Apache License, Version 2.0 (the "License"); you may not
    -    use
    -    this file except in compliance with the License. You may obtain a copy of
    -    the
    -    License at http://www.apache.org/licenses/LICENSE-2.0
    -
    -    THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
    -    OF ANY
    -    KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
    -    WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
    -    MERCHANTABLITY OR NON-INFRINGEMENT.
    -
    -    See the Apache Version 2.0 License for specific language governing
    -    permissions
    -    and limitations under the License.
    -    *****************************************************************************
    -    */
    -
    -------
    -
    -** enzyme; version 3.1.0 -- http://airbnb.io/enzyme/
    -Copyright (c) 2015 Airbnb, Inc.
    -
    -The MIT License (MIT)
    -
    -Copyright (c) 2015 Airbnb, Inc.
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy of
    -this software and associated documentation files (the "Software"), to deal in
    -the Software without restriction, including without limitation the rights to
    -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
    -of the Software, and to permit persons to whom the Software is furnished to do
    -so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    -SOFTWARE.
    -
    -------
    -
    -** react; version 16.3.0 -- https://reactjs.org/
    -Copyright (c) Facebook, Inc. and its affiliates.
    -
    -MIT License
    -
    -Copyright (c) Facebook, Inc. and its affiliates.
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    -SOFTWARE.
    -
    -------
    -
    -** expect.js; version 0.3.1 -- https://github.com/Automattic/expect.js/
    -Copyright (c) 2011 Guillermo Rauch 
    -Heavily borrows from should.js by TJ Holowaychuck - MIT.
    -
    -(The MIT License)
    -
    -Copyright (c) 2011 Guillermo Rauch 
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy of
    -this software and associated documentation files (the 'Software'), to deal in
    -the Software without restriction, including without limitation the rights to
    -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
    -of the Software, and to permit persons to whom the Software is furnished to do
    -so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    -SOFTWARE.
    -
    -------
    -
    -** jest; version 23.6.0 -- https://jestjs.io/
    -Copyright (c) 2014-present, Facebook, Inc.
    -
    -MIT License
    -
    -For Jest software
    -
    -Copyright (c) 2014-present, Facebook, Inc.
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    -SOFTWARE.
    -
    -------
    -
    -** @types/react; version 16.3.14 -- https://www.npmjs.com/package/@types/react
    -These definitions were written by Asana https://asana.com, AssureSign
    -http://www.assuresign.com, Microsoft https://microsoft.com, John Reilly
    -https://github.com/johnnyreilly, Benoit Benezech https://github.com/bbenezech,
    -Patricio Zavolinsky https://github.com/pzavolinsky, Digiguru
    -https://github.com/digiguru, Eric Anderson https://github.com/ericanderson,
    -Tanguy Krotoff https://github.com/tkrotoff, Dovydas Navickas
    -https://github.com/DovydasNavickas, Stéphane Goetz https://github.com/onigoetz,
    -Josh Rutherford https://github.com/theruther4d, Guilherme Hübner
    -https://github.com/guilhermehubner, Ferdy Budhidharma
    -https://github.com/ferdaber, Johann Rakotoharisoa
    -https://github.com/jrakotoharisoa, Olivier Pascal
    -https://github.com/pascaloliv, Martin Hochel https://github.com/hotell, Frank
    -Li https://github.com/franklixuefei, Jessica Franco
    -https://github.com/Kovensky, Paul Sherman https://github.com/pshrmn.
    -
    -This project is licensed under the MIT license.
    -Copyrights are respective of each contributor listed at the beginning of each
    -definition file.
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy of
    -this software and associated documentation files (the "Software"), to deal in
    -the Software without restriction, including without limitation the rights to
    -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
    -of the Software, and to permit persons to whom the Software is furnished to do
    -so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    -SOFTWARE.
    diff --git a/workbench/babel.config.js b/workbench/babel.config.js
    deleted file mode 100644
    index 19139b6863..0000000000
    --- a/workbench/babel.config.js
    +++ /dev/null
    @@ -1,12 +0,0 @@
    -/*
    - * Copyright OpenSearch Contributors
    - * SPDX-License-Identifier: Apache-2.0
    - */
    -
    -
    -// babelrc doesn't respect NODE_PATH anymore but using require does.
    -// Alternative to install them locally in node_modules
    -module.exports = {
    -  presets: [require("@babel/preset-env"), require("@babel/preset-react"), require("@babel/preset-typescript")],
    -  plugins: [require("@babel/plugin-proposal-class-properties"), require("@babel/plugin-proposal-object-rest-spread"), ["@babel/transform-runtime"]]
    -};
    diff --git a/workbench/common/index.ts b/workbench/common/index.ts
    deleted file mode 100644
    index a3d39b178b..0000000000
    --- a/workbench/common/index.ts
    +++ /dev/null
    @@ -1,8 +0,0 @@
    -/*
    - * Copyright OpenSearch Contributors
    - * SPDX-License-Identifier: Apache-2.0
    - */
    -
    -
    -export const PLUGIN_ID = 'queryWorkbenchDashboards';
    -export const PLUGIN_NAME = 'Query Workbench';
    diff --git a/workbench/cypress.json b/workbench/cypress.json
    deleted file mode 100644
    index 53c4ba96d8..0000000000
    --- a/workbench/cypress.json
    +++ /dev/null
    @@ -1,18 +0,0 @@
    -{
    -  "baseUrl": "http://localhost:5601",
    -  "video": true,
    -  "fixturesFolder": ".cypress/fixtures",
    -  "integrationFolder": ".cypress/integration",
    -  "pluginsFile": ".cypress/plugins/index.js",
    -  "screenshotsFolder": ".cypress/screenshots",
    -  "supportFile": ".cypress/support/index.js",
    -  "videosFolder": ".cypress/videos",
    -  "requestTimeout": 60000,
    -  "responseTimeout": 60000,
    -  "defaultCommandTimeout": 60000,
    -  "env": {
    -    "opensearch": "localhost:9200",
    -    "opensearchDashboards": "localhost:5601",
    -    "security_enabled": true
    -  }
    -}
    diff --git a/workbench/opensearch_dashboards.json b/workbench/opensearch_dashboards.json
    deleted file mode 100644
    index 1bd7b74c8e..0000000000
    --- a/workbench/opensearch_dashboards.json
    +++ /dev/null
    @@ -1,9 +0,0 @@
    -{
    -  "id": "queryWorkbenchDashboards",
    -  "version": "2.5.0.0",
    -  "opensearchDashboardsVersion": "2.5.0",
    -  "server": true,
    -  "ui": true,
    -  "requiredPlugins": ["navigation"],
    -  "optionalPlugins": []
    -}
    diff --git a/workbench/package.json b/workbench/package.json
    deleted file mode 100644
    index 08fd7dcb02..0000000000
    --- a/workbench/package.json
    +++ /dev/null
    @@ -1,57 +0,0 @@
    -{
    -  "name": "opensearch-query-workbench",
    -  "version": "2.5.0.0",
    -  "description": "Query Workbench",
    -  "main": "index.js",
    -  "license": "Apache-2.0",
    -  "homepage": "https://github.com/opensearch-project/sql/tree/main/workbench",
    -  "repository": {
    -    "type": "git",
    -    "url": "https://github.com/opensearch-project/sql/tree/main/workbench"
    -  },
    -  "scripts": {
    -    "osd": "node ../../scripts/osd",
    -    "opensearch": "node ../../scripts/opensearch",
    -    "lint": "tslint .",
    -    "start": "plugin-helpers start",
    -    "test:server": "plugin-helpers test:server",
    -    "test:browser": "plugin-helpers test:browser",
    -    "test:jest": "../../node_modules/.bin/jest --config ./test/jest.config.js",
    -    "build": "yarn plugin_helpers build",
    -    "plugin_helpers": "node ../../scripts/plugin_helpers"
    -  },
    -  "dependencies": {
    -    "brace": "0.11.1",
    -    "react-double-scrollbar": "^0.0.15"
    -  },
    -  "devDependencies": {
    -    "@testing-library/user-event": "^13.1.9",
    -    "@types/hapi-latest": "npm:@types/hapi@18.0.3",
    -    "@types/react-router-dom": "^5.3.2",
    -    "cypress": "^5.0.0",
    -    "eslint": "^6.8.0",
    -    "eslint-plugin-no-unsanitized": "^3.0.2",
    -    "eslint-plugin-prefer-object-spread": "^1.2.1",
    -    "husky": "^4.2.5",
    -    "jest-raw-loader": "^1.0.1",
    -    "lint-staged": "^10.2.0",
    -    "mutationobserver-shim": "^0.3.3",
    -    "ts-jest": "^26.1.0",
    -    "ts-node": "^8.9.1",
    -    "tslint": "^6.1.2",
    -    "tslint-config-prettier": "^1.18.0",
    -    "tslint-plugin-prettier": "^2.0.1"
    -  },
    -  "resolutions": {
    -    "**/@types/node": "^10.12.27",
    -    "@types/react": "^16.3.14",
    -    "**/@types/angular": "^1.6.50",
    -    "**/@types/jest": "^24.0.9",
    -    "**/@types/react-dom": "^16.0.5",
    -    "**/@types/react-router-dom": "^4.3.1",
    -    "eslint-utils": "^2.0.0",
    -    "json-schema": "^0.4.0",
    -    "**/@types/react": "^16.3.14",
    -    "ansi-regex": "^5.0.1"
    -  }
    -}
    diff --git a/workbench/public/ace-themes/sql_console.js b/workbench/public/ace-themes/sql_console.js
    deleted file mode 100644
    index c841db28da..0000000000
    --- a/workbench/public/ace-themes/sql_console.js
    +++ /dev/null
    @@ -1,16 +0,0 @@
    -/*
    - * Copyright OpenSearch Contributors
    - * SPDX-License-Identifier: Apache-2.0
    - */
    -
    -
    -import * as ace from 'brace';
    -
    -ace.define('ace/theme/sql_console', ['require', 'exports', 'module', 'ace/lib/dom'], function (acequire, exports, module) {
    -  exports.isDark = false;
    -  exports.cssClass = 'ace-sql-console';
    -  exports.cssText = require('../index.scss');
    -
    -  const dom = acequire('../lib/dom');
    -  dom.importCssString(exports.cssText, exports.cssClass);
    -});
    diff --git a/workbench/public/app.scss b/workbench/public/app.scss
    deleted file mode 100644
    index 1f9a0be739..0000000000
    --- a/workbench/public/app.scss
    +++ /dev/null
    @@ -1,103 +0,0 @@
    -/*
    - * Copyright OpenSearch Contributors
    - * SPDX-License-Identifier: Apache-2.0
    - */
    -
    -
    -@import '../node_modules/@elastic/eui/src/global_styling/variables/colors';
    -@import '../node_modules/@elastic/eui/src/global_styling/variables/size';
    -
    -.sql-console-query-container {
    -  padding: $euiSizeL;
    -}
    -
    -.sql-console-query-editor {
    -  .sql-query-panel-header {
    -    margin-top: -10px;
    -  }
    -  .resize-panel {
    -    resize: vertical;
    -    overflow: auto;
    -    cursor: row-resize;
    -  }
    -  .sql-editor-link:visited {
    -    background-color: rgb(255,255,255);
    -  }
    -}
    -
    -.sql-console-query-result{
    -  .query-result-panel-header {
    -    color: #3f3f3f;
    -  }
    -  scroll-behavior: smooth;
    -  .tabs-container {
    -    overflow: hidden;
    -    margin: 3px;
    -  }
    -  .table-name {
    -    color: #3f3f3f;
    -    margin-bottom: 7px;
    -  }
    -  .table-item-count {
    -    color: #8a9596;
    -    font-weight: 370;
    -  }
    -  .sideNav-table {
    -    border: solid 1px rgb(217, 217, 217);
    -    border-collapse: separate;
    -  }
    -  .search-panel {
    -    display: inline-flex;
    -    width: 70%;
    -  }
    -  .search-bar {
    -    width:80%;
    -  }
    -  .pagination-container {
    -    padding: 10px;
    -    padding-top: 15px;
    -    padding-bottom: 15px;
    -  }
    -
    -  .tab-arrow-down-container {
    -    padding: 25px;
    -    height: 56px;
    -    vertical-align: middle;
    -    color: rgb(0, 121, 165);
    -  }
    -
    -  .toggleContainer {
    -    margin: 20px;
    -  }
    -}
    -
    -.sql-console-results-container {
    -  margin-top: -17px;
    -}
    -
    -.expanded-row {
    -  /*background-color: rgb(232,243,246);
    -  /*background-color: rgb(245,247,250);*/
    -  border-collapse: separate;
    -}
    -
    -.expanded-row:hover {
    -  background-color: rgb(232,243,246);
    -}
    -
    -.no-background {
    -  background-color: rgba(0, 0, 0, 0);
    -}
    -
    -/* Message Tab */
    -.code-editor {
    -  color: #006BB4;
    -}
    -
    -.error-message {
    -  color: red;
    -}
    -
    -.successful-message{
    -  color: #006BB4;
    -}
    diff --git a/workbench/public/application.tsx b/workbench/public/application.tsx
    deleted file mode 100644
    index 8957c22b4e..0000000000
    --- a/workbench/public/application.tsx
    +++ /dev/null
    @@ -1,30 +0,0 @@
    -/*
    - * Copyright OpenSearch Contributors
    - * SPDX-License-Identifier: Apache-2.0
    - */
    -
    -
    -import React from 'react';
    -import ReactDOM from 'react-dom';
    -import { AppMountParameters, CoreStart } from '../../../src/core/public';
    -import { AppPluginStartDependencies } from './types';
    -import { WorkbenchApp } from './components/app';
    -
    -export const renderApp = (
    -  { notifications, http, chrome }: CoreStart,
    -  { navigation }: AppPluginStartDependencies,
    -  { appBasePath, element }: AppMountParameters
    -) => {
    -  ReactDOM.render(
    -    ,
    -    element
    -  );
    -
    -  return () => ReactDOM.unmountComponentAtNode(element);
    -};
    diff --git a/workbench/public/components/Header/Header.test.tsx b/workbench/public/components/Header/Header.test.tsx
    deleted file mode 100644
    index 7fc409b067..0000000000
    --- a/workbench/public/components/Header/Header.test.tsx
    +++ /dev/null
    @@ -1,18 +0,0 @@
    -/*
    - * Copyright OpenSearch Contributors
    - * SPDX-License-Identifier: Apache-2.0
    - */
    -
    -
    -import React from "react";
    -import "@testing-library/jest-dom/extend-expect";
    -import { render } from "@testing-library/react";
    -import Header from "./Header";
    -
    -
    -describe("
    spec", () => { - it("renders the component", () => { - render(
    ); - expect(document.body.children[0]).toMatchSnapshot(); - }); -}); diff --git a/workbench/public/components/Header/Header.tsx b/workbench/public/components/Header/Header.tsx deleted file mode 100644 index beb82c1f2a..0000000000 --- a/workbench/public/components/Header/Header.tsx +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -import React from 'react'; -import { EuiHorizontalRule } from '@elastic/eui'; - -const Header = () => { - return ( -
    - -
    - ); -}; - -export default Header; diff --git a/workbench/public/components/Header/__snapshots__/Header.test.tsx.snap b/workbench/public/components/Header/__snapshots__/Header.test.tsx.snap deleted file mode 100644 index 34fa845af8..0000000000 --- a/workbench/public/components/Header/__snapshots__/Header.test.tsx.snap +++ /dev/null @@ -1,11 +0,0 @@ -// Jest Snapshot v1, https://goo.gl/fbAQLP - -exports[`
    spec renders the component 1`] = ` -
    -
    -
    -
    -
    -`; diff --git a/workbench/public/components/Main/__snapshots__/main.test.tsx.snap b/workbench/public/components/Main/__snapshots__/main.test.tsx.snap deleted file mode 100644 index defafbdc77..0000000000 --- a/workbench/public/components/Main/__snapshots__/main.test.tsx.snap +++ /dev/null @@ -1,2688 +0,0 @@ -// Jest Snapshot v1, https://goo.gl/fbAQLP - -exports[`
    spec click clear button 1`] = ` -
    -
    -
    -
    -
    -
    -

    - Query Workbench -

    -
    -
    -
    - - query-language-swtich - -
    - - -
    -
    -
    - -
    -
    -
    -
    -
    -
    -

    - Query editor -

    -
    -
    -
    - -
    -