diff --git a/.github/workflows/cmake-ctest.yml b/.github/workflows/cmake-ctest.yml index b7c49490ab1..70baea28baa 100644 --- a/.github/workflows/cmake-ctest.yml +++ b/.github/workflows/cmake-ctest.yml @@ -99,6 +99,19 @@ jobs: 7z a -tzip ${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip hdf5 shell: pwsh + - name: Publish msi binary (Windows) + id: publish-ctest-msi-binary + run: | + mkdir "${{ runner.workspace }}/build" + mkdir "${{ runner.workspace }}/build/hdf5" + Copy-Item -Path ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING -Destination ${{ runner.workspace }}/build/hdf5/ + Copy-Item -Path ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING_LBNL_HDF5 -Destination ${{ runner.workspace }}/build/hdf5/ + Copy-Item -Path ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-MSVC/README.md -Destination ${{ runner.workspace }}/build/hdf5/ + Copy-Item -Path ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-MSVC/* -Destination ${{ runner.workspace }}/build/hdf5/ -Include *.msi + cd "${{ runner.workspace }}/build" + 7z a -tzip ${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi.zip hdf5 + shell: pwsh + - name: List files in the space (Windows) run: | Get-ChildItem -Path ${{ github.workspace }} @@ -113,6 +126,13 @@ jobs: path: ${{ runner.workspace }}/build/${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + - name: Save published msi binary (Windows) + uses: actions/upload-artifact@v4 + with: + name: msi-vs2022_cl-binary + path: ${{ runner.workspace }}/build/${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi.zip + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + build_and_test_linux: # Linux (Ubuntu) w/ gcc + CMake # @@ -470,6 +490,19 @@ jobs: 7z a -tzip ${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_intel.zip hdf5 shell: pwsh + - name: Publish msi binary (Windows_intel) + id: publish-ctest-msi-binary + run: | + mkdir "${{ runner.workspace }}/build" + mkdir "${{ runner.workspace }}/build/hdf5" + Copy-Item -Path ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING -Destination ${{ runner.workspace }}/build/hdf5/ + Copy-Item -Path ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING_LBNL_HDF5 -Destination ${{ runner.workspace }}/build/hdf5/ + Copy-Item -Path ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-Intel/README.md -Destination ${{ runner.workspace }}/build/hdf5/ + Copy-Item -Path ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-Intel/* -Destination ${{ runner.workspace }}/build/hdf5/ -Include *.msi + cd "${{ runner.workspace }}/build" + 7z a -tzip ${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi.zip hdf5 + shell: pwsh + - name: List files in the space (Windows_intel) run: | Get-ChildItem -Path ${{ github.workspace }} @@ -484,6 +517,13 @@ jobs: path: ${{ runner.workspace }}/build/${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_intel.zip if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + - name: Save published msi binary (Windows_intel) + uses: actions/upload-artifact@v4 + with: + name: msi-vs2022_intel-binary + path: ${{ runner.workspace }}/build/${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi.zip + if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` + build_and_test_linux_intel: # Linux (Ubuntu) w/ OneAPI + CMake # diff --git a/.github/workflows/main-cmake-par.yml b/.github/workflows/main-cmake-par.yml index e517c79f212..9a87dead10d 100644 --- a/.github/workflows/main-cmake-par.yml +++ b/.github/workflows/main-cmake-par.yml @@ -42,6 +42,7 @@ jobs: mkdir "${{ runner.workspace }}/build" cd "${{ runner.workspace }}/build" CC=mpicc cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake \ + --log-level=VERBOSE \ -DCMAKE_BUILD_TYPE=${{ inputs.build_mode }} \ -DBUILD_SHARED_LIBS=ON \ -DHDF5_ENABLE_ALL_WARNINGS=ON \ diff --git a/.github/workflows/main-cmake-spc.yml b/.github/workflows/main-cmake-spc.yml index fc21da60435..df9ccff495c 100644 --- a/.github/workflows/main-cmake-spc.yml +++ b/.github/workflows/main-cmake-spc.yml @@ -227,6 +227,7 @@ jobs: cd "${{ runner.workspace }}/build" cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake \ -G Ninja \ + --log-level=VERBOSE \ -DCMAKE_BUILD_TYPE=Release \ -DBUILD_SHARED_LIBS=ON \ -DHDF5_ENABLE_ALL_WARNINGS=ON \ @@ -235,12 +236,12 @@ jobs: -DHDF5_BUILD_FORTRAN=ON \ -DHDF5_BUILD_JAVA=ON \ -DHDF5_BUILD_DOC=OFF \ - -DLIBAEC_USE_LOCALCONTENT=ON \ - -DZLIB_USE_LOCALCONTENT=ON \ + -DLIBAEC_USE_LOCALCONTENT=OFF \ + -DZLIB_USE_LOCALCONTENT=OFF \ -DHDF5_ENABLE_MIRROR_VFD:BOOL=ON \ -DHDF5_ENABLE_DIRECT_VFD:BOOL=ON \ -DHDF5_ENABLE_ROS3_VFD:BOOL=ON \ - -DHDF5_USE_ZLIBNG:BOOL=ON \ + -DHDF5_USE_ZLIB_NG:BOOL=ON \ $GITHUB_WORKSPACE shell: bash diff --git a/.github/workflows/publish-branch.yml b/.github/workflows/publish-branch.yml new file mode 100644 index 00000000000..1ee8ee22bea --- /dev/null +++ b/.github/workflows/publish-branch.yml @@ -0,0 +1,44 @@ +name: hdf5 publish files in HDF5 folder from branch to S3 + +# Triggers the workflow on demand +on: + workflow_dispatch: + inputs: + local_dir: + description: 'HDF5 local directory' + type: string + required: true + target_dir: + description: 'hdf5 target bucket directory' + type: string + required: true + permissions: + contents: read + +jobs: + publish-tag: + runs-on: ubuntu-latest + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - name: Get Sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + fetch-depth: 0 + ref: '${{ github.head_ref || github.ref_name }}' + + - name: List files for the space + run: | + ls -l ${{ github.workspace }} + ls ${{ github.workspace }}/HDF5 + + - name: Setup AWS CLI + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_REGION }} + + - name: Sync dir to S3 bucket + run: | + aws s3 sync ./HDF5/${{ inputs.local_dir }} s3://${{ secrets.AWS_S3_BUCKET }}/${{ vars.TARGET_PATH }}/${{ inputs.target_dir }} + diff --git a/.github/workflows/publish-release.yml b/.github/workflows/publish-release.yml index 57922e80c5e..d7c7fd091d1 100644 --- a/.github/workflows/publish-release.yml +++ b/.github/workflows/publish-release.yml @@ -47,9 +47,9 @@ jobs: aws s3 sync ./HDF5 s3://${{ secrets.AWS_S3_BUCKET }}/${{ vars.TARGET_PATH }}/${{ inputs.target_dir }}/downloads --delete - name: Uncompress source (Linux) - run: tar -zxvf ${{ github.workspace }}/${{ inputs.use_hdf }}.doxygen.tar.gz + run: tar -zxvf ${{ github.workspace }}/HDF5/${{ inputs.use_hdf }}.doxygen.tar.gz - name: Sync userguide to S3 bucket run: | - aws s3 sync ./doxygen s3://${{ secrets.AWS_S3_BUCKET }}/${{ vars.TARGET_PATH }}/${{ inputs.target_dir }}/documentation --delete + aws s3 sync ./HDF5/doxygen s3://${{ secrets.AWS_S3_BUCKET }}/${{ vars.TARGET_PATH }}/${{ inputs.target_dir }}/documentation/doxygen --delete diff --git a/.github/workflows/release-files.yml b/.github/workflows/release-files.yml index 55b76894470..24c40786aec 100644 --- a/.github/workflows/release-files.yml +++ b/.github/workflows/release-files.yml @@ -100,6 +100,12 @@ jobs: name: zip-vs2022_cl-binary path: ${{ github.workspace }} + - name: Get published msi binary (Windows) + uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 + with: + name: msi-vs2022_cl-binary + path: ${{ github.workspace }} + - name: Get published binary (MacOS) uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 with: @@ -136,6 +142,12 @@ jobs: name: zip-vs2022_intel-binary path: ${{ github.workspace }} + - name: Get published msi binary (Windows_intel) + uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 + with: + name: msi-vs2022_intel-binary + path: ${{ github.workspace }} + - name: Get published binary (Linux_intel) uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 with: @@ -173,8 +185,10 @@ jobs: sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc_s3.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt + sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi.zip >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_intel.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.zip >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt + sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi.zip >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}.html.abi.reports.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt - name: Create sha256 sums for files for nonversioned files @@ -216,8 +230,10 @@ jobs: ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc_s3.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip + ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi.zip ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_intel.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.zip + ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi.zip ${{ steps.get-file-base.outputs.FILE_BASE }}.html.abi.reports.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` @@ -242,8 +258,10 @@ jobs: ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc_s3.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip + ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi.zip ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_intel.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.zip + ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi.zip ${{ steps.get-file-base.outputs.FILE_BASE }}.html.abi.reports.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` diff --git a/.github/workflows/remove-files.yml b/.github/workflows/remove-files.yml index 488515f0d0c..334ecb0c45b 100644 --- a/.github/workflows/remove-files.yml +++ b/.github/workflows/remove-files.yml @@ -56,5 +56,7 @@ jobs: ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc_s3.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip + ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi.zip ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_intel.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.zip + ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi.zip diff --git a/.github/workflows/vol_adios2.yml b/.github/workflows/vol_adios2.yml index c4bd2d009c5..d05d35427d5 100644 --- a/.github/workflows/vol_adios2.yml +++ b/.github/workflows/vol_adios2.yml @@ -28,7 +28,6 @@ jobs: - name: Checkout HDF5 uses: actions/checkout@v4.1.7 with: - repository: HDFGroup/hdf5 path: hdf5 - name: Configure HDF5 diff --git a/.github/workflows/vol_async.yml b/.github/workflows/vol_async.yml index 371db471828..1d6861ac503 100644 --- a/.github/workflows/vol_async.yml +++ b/.github/workflows/vol_async.yml @@ -24,7 +24,6 @@ jobs: - name: Checkout HDF5 uses: actions/checkout@v4.1.7 with: - repository: HDFGroup/hdf5 path: hdf5 - name: Checkout Argobots diff --git a/.github/workflows/vol_cache.yml b/.github/workflows/vol_cache.yml index 3c55afdde8c..70f325ca2dd 100644 --- a/.github/workflows/vol_cache.yml +++ b/.github/workflows/vol_cache.yml @@ -37,7 +37,6 @@ jobs: - name: Checkout HDF5 uses: actions/checkout@v4.1.7 with: - repository: HDFGroup/hdf5 path: hdf5 - name: Checkout Argobots diff --git a/.github/workflows/vol_ext_passthru.yml b/.github/workflows/vol_ext_passthru.yml index a276dc3ae88..b9579063638 100644 --- a/.github/workflows/vol_ext_passthru.yml +++ b/.github/workflows/vol_ext_passthru.yml @@ -24,7 +24,6 @@ jobs: - name: Checkout HDF5 uses: actions/checkout@v4.1.7 with: - repository: HDFGroup/hdf5 path: hdf5 - name: Checkout vol-external-passthrough diff --git a/.github/workflows/vol_log.yml b/.github/workflows/vol_log.yml index f8c7ce73ea1..a7becfd4830 100644 --- a/.github/workflows/vol_log.yml +++ b/.github/workflows/vol_log.yml @@ -25,7 +25,6 @@ jobs: - name: Checkout HDF5 uses: actions/checkout@v4.1.7 with: - repository: HDFGroup/hdf5 path: hdf5 # Log-based VOL currently doesn't have CMake support diff --git a/.github/workflows/vol_rest.yml b/.github/workflows/vol_rest.yml index 0a54890d605..487f854e9f1 100644 --- a/.github/workflows/vol_rest.yml +++ b/.github/workflows/vol_rest.yml @@ -44,7 +44,6 @@ jobs: - name: Checkout HDF5 uses: actions/checkout@v4.1.7 with: - repository: HDFGroup/hdf5 path: hdf5 - name: Configure HDF5 with REST VOL connector diff --git a/CMakeFilters.cmake b/CMakeFilters.cmake index cccf80b916a..52d65e59e47 100644 --- a/CMakeFilters.cmake +++ b/CMakeFilters.cmake @@ -10,7 +10,8 @@ # help@hdfgroup.org. # option (HDF5_USE_ZLIB_NG "Use zlib-ng library as zlib library" OFF) -option (HDF5_USE_LIBAEC_STATIC "Use static AEC library" OFF) +option (HDF5_USE_ZLIB_STATIC "Find static zlib library" OFF) +option (HDF5_USE_LIBAEC_STATIC "Find static AEC library" OFF) option (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" OFF) option (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" OFF) @@ -86,8 +87,16 @@ if (HDF5_ENABLE_Z_LIB_SUPPORT) set (PACKAGE_NAME ${ZLIB_PACKAGE_NAME}${HDF_PACKAGE_EXT}) endif () set(ZLIB_FOUND FALSE) - find_package (ZLIB NAMES ${PACKAGE_NAME} COMPONENTS static shared) + if (HDF5_USE_ZLIB_STATIC) + set(ZLIB_SEACH_TYPE static) + else () + set(ZLIB_SEACH_TYPE shared) + endif () + find_package (ZLIB NAMES ${PACKAGE_NAME} COMPONENTS ${ZLIB_SEACH_TYPE}) if (NOT ZLIB_FOUND) + if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0") + set(ZLIB_USE_STATIC_LIBS ${HDF5_USE_ZLIB_STATIC}) + endif() find_package (ZLIB) # Legacy find endif () set(H5_ZLIB_FOUND ${ZLIB_FOUND}) @@ -95,7 +104,12 @@ if (HDF5_ENABLE_Z_LIB_SUPPORT) set (H5_ZLIB_HEADER "zlib.h") set (H5_ZLIB_INCLUDE_DIR_GEN ${ZLIB_INCLUDE_DIR}) set (H5_ZLIB_INCLUDE_DIRS ${H5_ZLIB_INCLUDE_DIRS} ${ZLIB_INCLUDE_DIR}) - set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${ZLIB_LIBRARIES}) + # The FindZLIB.cmake module does not set an OUTPUT_NAME + # on the target. The target returned is: ZLIB::ZLIB + get_filename_component (libname ${ZLIB_LIBRARIES} NAME_WLE) + string (REGEX REPLACE "^lib" "" libname ${libname}) + set_target_properties (ZLIB::ZLIB PROPERTIES OUTPUT_NAME zlib-static) + set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ZLIB::ZLIB) endif () else () if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") @@ -131,9 +145,14 @@ option (HDF5_ENABLE_SZIP_SUPPORT "Use SZip Filter" ON) if (HDF5_ENABLE_SZIP_SUPPORT) option (HDF5_ENABLE_SZIP_ENCODING "Use SZip Encoding" ON) if (NOT SZIP_USE_EXTERNAL) + if (HDF5_USE_LIBAEC_STATIC) + set(LIBAEC_SEACH_TYPE static) + else () + set(LIBAEC_SEACH_TYPE shared) + endif () set(libaec_USE_STATIC_LIBS ${HDF5_USE_LIBAEC_STATIC}) set(SZIP_FOUND FALSE) - find_package (SZIP NAMES ${LIBAEC_PACKAGE_NAME}${HDF_PACKAGE_EXT} COMPONENTS static shared) + find_package (SZIP NAMES ${LIBAEC_PACKAGE_NAME}${HDF_PACKAGE_EXT} COMPONENTS ${LIBAEC_SEACH_TYPE}) if (NOT SZIP_FOUND) find_package (SZIP) # Legacy find endif () diff --git a/CMakeLists.txt b/CMakeLists.txt index 9bb22ca51dd..615c76047bd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -44,6 +44,17 @@ if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR) ) endif () +# Whether the most recently called project() command, in the current scope or above, +# was in the top level CMakeLists.txt file. +if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.21.0") + if(NOT PROJECT_IS_TOP_LEVEL) + set (HDF5_EXTERNALLY_CONFIGURED 1) + endif() +else() + if (NOT CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR) + set (HDF5_EXTERNALLY_CONFIGURED 1) + endif() +endif() #----------------------------------------------------------------------------- # Instructions for use : Sub-Project Build # diff --git a/CMakePresets.json b/CMakePresets.json index fe13960bbf9..ac436e2c9e6 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -36,7 +36,9 @@ "HDF5_PACKAGE_EXTLIBS": "ON", "HDF5_USE_ZLIB_NG": "OFF", "ZLIB_USE_LOCALCONTENT": "OFF", - "LIBAEC_USE_LOCALCONTENT": "OFF" + "LIBAEC_USE_LOCALCONTENT": "OFF", + "HDF5_USE_ZLIB_STATIC": "ON", + "HDF5_USE_LIBAEC_STATIC": "ON" } }, { diff --git a/HDF5Examples/config/cmake/HDFExampleMacros.cmake b/HDF5Examples/config/cmake/HDFExampleMacros.cmake index c540918b9f1..c5e7b70c6bb 100644 --- a/HDF5Examples/config/cmake/HDFExampleMacros.cmake +++ b/HDF5Examples/config/cmake/HDFExampleMacros.cmake @@ -227,7 +227,20 @@ macro (HDF5_SUPPORT) if (HDF_BUILD_JAVA AND HDF5_Java_FOUND) if (${HDF5_BUILD_JAVA}) set (CMAKE_JAVA_INCLUDE_PATH "${CMAKE_JAVA_INCLUDE_PATH};${HDF5_JAVA_INCLUDE_DIRS}") - set (H5EX_JAVA_LIBRARY ${HDF5_JAVA_LIBRARY}) + if (HDF5_BUILD_MODE) + string(TOUPPER "${HDF5_BUILD_MODE}" UPPER_BUILD_TYPE) + get_target_property(libsoname ${HDF5_JAVA_LIBRARY} IMPORTED_SONAME_${UPPER_BUILD_TYPE}) + elseif (HDF_CFG_NAME) + string(TOUPPER "${HDF_CFG_NAME}" UPPER_BUILD_TYPE) + get_target_property(libsoname ${HDF5_JAVA_LIBRARY} IMPORTED_SONAME_${UPPER_BUILD_TYPE}) + else() + get_target_property(libsoname ${HDF5_JAVA_LIBRARY} IMPORTED_SONAME) + endif() + get_filename_component (libname ${libsoname} NAME_WE) + string (REGEX REPLACE "^lib" "" libname ${libname}) + message (STATUS "HDF5 lib:${HDF5_JAVA_LIBRARY} OR ${libsoname} OR ${libname}") + set (H5EX_JAVA_LIBRARY ${libname}) +# set (H5EX_JAVA_LIBRARY $) set (H5EX_JAVA_LIBRARIES ${HDF5_JAVA_LIBRARY}) message (STATUS "HDF5 lib:${H5EX_JAVA_LIBRARY} jars:${HDF5_JAVA_INCLUDE_DIRS}}") else () diff --git a/README.md b/README.md index fcb0882166d..f5d6a54c695 100644 --- a/README.md +++ b/README.md @@ -93,7 +93,7 @@ are tentative. | 1.14.5 | oss-fuzz fixes, ros3 VFD improvements | | 1.14.6 | Last maintenance release of 1.14 | | 1.16.0 | Complex number support, updated library defaults (cache sizes, etc.) | -| 2.0.0 | Multi-threaded HDF5, crashproofing / metadata journaling, Full (VFD) SWMR, encryption, digital signatures, semantic versioning | +| 2.0.0 | Multi-threaded HDF5, crashproofing / metadata journaling, Full (VFD) SWMR, encryption, digital signatures, sparse datasets, improved storage for variable-length datatypes, better Unicode support (especially on Windows), semantic versioning | Some HDF5 2.0.0 features listed here may be released in a 1.16.x release. diff --git a/config/cmake/cacheinit.cmake b/config/cmake/cacheinit.cmake index b3e7f6ed19b..4e56a1d5383 100644 --- a/config/cmake/cacheinit.cmake +++ b/config/cmake/cacheinit.cmake @@ -52,6 +52,7 @@ set (ZLIB_TGZ_ORIGPATH "https://github.com/madler/zlib/releases/download/v1.3" C set (ZLIB_USE_LOCALCONTENT ON CACHE BOOL "Use local file for ZLIB FetchContent" FORCE) set (ZLIB_GIT_URL "https://github.com/madler/zlib.git" CACHE STRING "Use ZLIB from GitHub repository" FORCE) set (ZLIB_GIT_BRANCH "develop" CACHE STRING "" FORCE) +set (HDF5_USE_ZLIB_STATIC ON CACHE BOOL "Use static zlib library" FORCE) set (ZLIBNG_PACKAGE_NAME "zlib-ng" CACHE STRING "Name of ZLIBNG package" FORCE) set (ZLIBNG_TGZ_NAME "2.1.6.tar.gz" CACHE STRING "Use HDF5_ZLib from compressed file" FORCE) @@ -65,6 +66,7 @@ set (LIBAEC_TGZ_ORIGPATH "https://github.com/MathisRosenhauer/libaec/releases/do set (LIBAEC_USE_LOCALCONTENT ON CACHE BOOL "Use local file for LIBAEC FetchContent" FORCE) set (LIBAEC_GIT_URL "https://github.com/MathisRosenhauer/libaec.git" CACHE STRING "Use LIBAEC from GitHub repository" FORCE) set (LIBAEC_GIT_BRANCH "v1.1.3" CACHE STRING "" FORCE) +set (HDF5_USE_LIBAEC_STATIC ON CACHE BOOL "Use static AEC library" FORCE) ######################## # API test options diff --git a/config/cmake/hdf5-config.cmake.in b/config/cmake/hdf5-config.cmake.in index e4ba904497b..5ee4d85e4d3 100644 --- a/config/cmake/hdf5-config.cmake.in +++ b/config/cmake/hdf5-config.cmake.in @@ -29,6 +29,8 @@ set (${HDF5_PACKAGE_NAME}_VALID_COMPONENTS Tools ) +set (${HDF5_PACKAGE_NAME}_BUILD_MODE @HDF_CFG_NAME@) + #----------------------------------------------------------------------------- # User Options #----------------------------------------------------------------------------- diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index c350e4f49b9..8a553a1a194 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -943,6 +943,7 @@ else ZLIB_TGZ_ORIGPATH "Use ZLIB from original location" "https://github.com/madler/zlib/releases/download/v1.2.13" ZLIB_TGZ_NAME "Use ZLIB from original compressed file" "zlib-1.2.13.tar.gz" ZLIB_USE_LOCALCONTENT "Use local file for ZLIB FetchContent" ON +HDF5_USE_ZLIB_STATIC "Find static zlib library" OFF SZIP_USE_EXTERNAL "Use External Library Building for SZIP else search" OFF if (HDF5_ENABLE_SZIP_SUPPORT) @@ -950,6 +951,7 @@ if (HDF5_ENABLE_SZIP_SUPPORT) LIBAEC_TGZ_ORIGPATH "Use LIBAEC from original location" "https://github.com/MathisRosenhauer/libaec/releases/download/v1.1.3" LIBAEC_TGZ_NAME "Use LIBAEC from original compressed file" "libaec-1.1.3.tar.gz" LIBAEC_USE_LOCALCONTENT "Use local file for LIBAEC FetchContent" ON +HDF5_USE_LIBAEC_STATIC "Find static AEC library" OFF PLUGIN_USE_EXTERNAL "Use External Library Building for PLUGINS else search" OFF if (WINDOWS) diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index f3f5002c9db..d70881df143 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -1758,6 +1758,19 @@ Bug Fixes since HDF5-1.14.0 release Configuration ------------- + - Fixed usage issue with FindZLIB.cmake module + + When building HDF5 with CMake and relying on the FindZLIB.cmake module, + the Find module would correctly find the ZLIB library but not set an OUTPUT_NAME + on the target. Also, the target returned, ZLIB::ZLIB, was not in the ZLIB_LIBRARIES + variable. This caused issues when requesting the OUTPUT_NAME of the target in + the pkg-config settings. + + Similar to HDF5_USE_LIBAEC_STATIC, "Find static AEC library", option, we added + a new option, HDF5_USE_ZLIB_STATIC, "Find static zlib library". These options + allow a user to specify whether to use a static or shared version of the compression + library in a find_package call. + - Corrected usage of FetchContent in the HDFLibMacros.cmake file. CMake version 3.30 changed the behavior of the FetchContent module to deprecate diff --git a/src/H5D.c b/src/H5D.c index f76584dfb54..316d45deee9 100644 --- a/src/H5D.c +++ b/src/H5D.c @@ -1888,7 +1888,7 @@ H5Dvlen_get_buf_size(hid_t dataset_id, hid_t type_id, hid_t space_id, hsize_t *s FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(dataset_id))) + if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dataset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid dataset identifier"); if (H5I_DATATYPE != H5I_get_type(type_id)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid datatype identifier"); diff --git a/src/H5F.c b/src/H5F.c index 5dcda189241..390f667648b 100644 --- a/src/H5F.c +++ b/src/H5F.c @@ -118,7 +118,7 @@ H5Fget_create_plist(hid_t file_id) FUNC_ENTER_API(H5I_INVALID_HID) /* check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(file_id))) + if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -164,7 +164,7 @@ H5Fget_access_plist(hid_t file_id) FUNC_ENTER_API(H5I_INVALID_HID) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(file_id))) + if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -439,7 +439,7 @@ H5Fget_vfd_handle(hid_t file_id, hid_t fapl_id, void **file_handle /*out*/) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file handle pointer"); /* Get the file object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(file_id))) + if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -1555,7 +1555,7 @@ H5Fget_intent(hid_t file_id, unsigned *intent_flags /*out*/) H5VL_file_get_args_t vol_cb_args; /* Arguments to VOL callback */ /* Get the internal file structure */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(file_id))) + if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -1594,7 +1594,7 @@ H5Fget_fileno(hid_t file_id, unsigned long *fnumber /*out*/) H5VL_file_get_args_t vol_cb_args; /* Arguments to VOL callback */ /* Get the internal file structure */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(file_id))) + if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -1631,7 +1631,7 @@ H5Fget_freespace(hid_t file_id) FUNC_ENTER_API((-1)) /* Get the file object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(file_id))) + if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, (-1), "invalid file identifier"); /* Set up VOL callback arguments */ @@ -1789,7 +1789,7 @@ H5Fget_mdc_config(hid_t file_id, H5AC_cache_config_t *config /*out*/) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Bad config ptr"); /* Get the file object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(file_id))) + if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -1827,7 +1827,7 @@ H5Fset_mdc_config(hid_t file_id, const H5AC_cache_config_t *config_ptr) FUNC_ENTER_API(FAIL) /* Get the file object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(file_id))) + if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -1959,7 +1959,7 @@ H5Freset_mdc_hit_rate_stats(hid_t file_id) FUNC_ENTER_API(FAIL) /* Get the file object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(file_id))) + if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c index 49fd484a4eb..642da8d0b66 100644 --- a/src/H5FDfamily.c +++ b/src/H5FDfamily.c @@ -423,7 +423,7 @@ H5FD__family_fapl_get(H5FD_t *_file) FUNC_ENTER_PACKAGE if (NULL == (fa = (H5FD_family_fapl_t *)H5MM_calloc(sizeof(H5FD_family_fapl_t)))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); + HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, NULL, "memory allocation failed"); fa->memb_size = file->memb_size; if (NULL == (plist = (H5P_genplist_t *)H5I_object(file->memb_fapl_id))) @@ -463,7 +463,7 @@ H5FD__family_fapl_copy(const void *_old_fa) FUNC_ENTER_PACKAGE if (NULL == (new_fa = (H5FD_family_fapl_t *)H5MM_malloc(sizeof(H5FD_family_fapl_t)))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); + HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, NULL, "memory allocation failed"); /* Copy the fields of the structure */ H5MM_memcpy(new_fa, old_fa, sizeof(H5FD_family_fapl_t)); @@ -671,7 +671,7 @@ H5FD__family_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxad /* Initialize file from file access properties */ if (NULL == (file = (H5FD_family_t *)H5MM_calloc(sizeof(H5FD_family_t)))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "unable to allocate file struct"); + HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, NULL, "unable to allocate file struct"); if (H5P_FILE_ACCESS_DEFAULT == fapl_id) { H5FD_family_fapl_t default_fa; @@ -760,7 +760,7 @@ H5FD__family_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxad assert(n > 0); if (NULL == (x = (H5FD_t **)H5MM_realloc(file->memb, n * sizeof(H5FD_t *)))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "unable to reallocate members"); + HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, NULL, "unable to reallocate members"); file->amembs = n; file->memb = x; } /* end if */ @@ -770,18 +770,23 @@ H5FD__family_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxad * otherwise an open failure means that we've reached the last member. * Allow H5F_ACC_CREAT only on the first family member. */ - H5E_BEGIN_TRY - { - file->memb[file->nmembs] = - H5FDopen(memb_name, (0 == file->nmembs ? flags : t_flags), file->memb_fapl_id, HADDR_UNDEF); + if (0 == file->nmembs) { + if (NULL == (file->memb[file->nmembs] = H5FDopen(memb_name, (0 == file->nmembs ? flags : t_flags), + file->memb_fapl_id, HADDR_UNDEF))) + HGOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, NULL, "unable to open member file"); } - H5E_END_TRY - if (!file->memb[file->nmembs]) { - if (0 == file->nmembs) - HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to open member file"); - H5E_clear_stack(); - break; + else { + H5E_PAUSE_ERRORS + { + file->memb[file->nmembs] = H5FDopen(memb_name, (0 == file->nmembs ? flags : t_flags), + file->memb_fapl_id, HADDR_UNDEF); + } + H5E_RESUME_ERRORS + + if (!file->memb[file->nmembs]) + break; } + file->nmembs++; } @@ -1005,7 +1010,7 @@ H5FD__family_set_eoa(H5FD_t *_file, H5FD_mem_t type, haddr_t abs_eoa) H5FD_t **x = (H5FD_t **)H5MM_realloc(file->memb, n * sizeof(H5FD_t *)); if (!x) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate memory block"); + HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, FAIL, "unable to allocate memory block"); file->amembs = n; file->memb = x; file->nmembs = u; @@ -1015,14 +1020,9 @@ H5FD__family_set_eoa(H5FD_t *_file, H5FD_mem_t type, haddr_t abs_eoa) if (u >= file->nmembs || !file->memb[u]) { file->nmembs = MAX(file->nmembs, u + 1); snprintf(memb_name, H5FD_FAM_MEMB_NAME_BUF_SIZE, file->name, u); - H5E_BEGIN_TRY - { - H5_CHECK_OVERFLOW(file->memb_size, hsize_t, haddr_t); - file->memb[u] = H5FDopen(memb_name, file->flags | H5F_ACC_CREAT, file->memb_fapl_id, - (haddr_t)file->memb_size); - } - H5E_END_TRY - if (NULL == file->memb[u]) + H5_CHECK_OVERFLOW(file->memb_size, hsize_t, haddr_t); + if (NULL == (file->memb[u] = H5FDopen(memb_name, file->flags | H5F_ACC_CREAT, file->memb_fapl_id, + (haddr_t)file->memb_size))) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to open member file"); } /* end if */ @@ -1082,7 +1082,7 @@ H5FD__family_get_eof(const H5FD_t *_file, H5FD_mem_t type) * loop with i==0. */ assert(file->nmembs > 0); - for (i = (int)file->nmembs - 1; i >= 0; --i) { + for (i = (int)(file->nmembs - 1); i >= 0; --i) { if ((eof = H5FD_get_eof(file->memb[i], type)) != 0) break; if (0 == i) @@ -1418,10 +1418,9 @@ H5FD__family_delete(const char *filename, hid_t fapl_id) bool default_config = false; hid_t memb_fapl_id = H5I_INVALID_HID; unsigned current_member; - char *member_name = NULL; - char *temp = NULL; - herr_t delete_error = FAIL; - herr_t ret_value = SUCCEED; + char *member_name = NULL; + char *temp = NULL; + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE @@ -1488,18 +1487,22 @@ H5FD__family_delete(const char *filename, hid_t fapl_id) * Note that this means that any missing files in the family will leave * undeleted members behind. */ - H5E_BEGIN_TRY - { - delete_error = H5FD_delete(member_name, memb_fapl_id); - } - H5E_END_TRY - if (FAIL == delete_error) { - if (0 == current_member) + if (0 == current_member) { + if (H5FD_delete(member_name, memb_fapl_id) < 0) HGOTO_ERROR(H5E_VFL, H5E_CANTDELETEFILE, FAIL, "unable to delete member file"); - else - H5E_clear_stack(); - break; } + else { + herr_t delete_error; + + H5E_PAUSE_ERRORS + { + delete_error = H5FD_delete(member_name, memb_fapl_id); + } + H5E_RESUME_ERRORS + if (delete_error < 0) + break; + } + current_member++; } /* end while */ diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c index 550560d9387..6c4e7d57ef2 100644 --- a/src/H5Fsuper.c +++ b/src/H5Fsuper.c @@ -117,7 +117,7 @@ H5F__super_ext_create(H5F_t *f, H5O_loc_t *ext_ptr) */ H5O_loc_reset(ext_ptr); if (H5O_create(f, (size_t)0, (size_t)1, H5P_GROUP_CREATE_DEFAULT, ext_ptr) < 0) - HGOTO_ERROR(H5E_OHDR, H5E_CANTCREATE, FAIL, "unable to create superblock extension"); + HGOTO_ERROR(H5E_FILE, H5E_CANTCREATE, FAIL, "unable to create superblock extension"); /* Record the address of the superblock extension */ f->shared->sblock->ext_addr = ext_ptr->addr; @@ -156,7 +156,7 @@ H5F__super_ext_open(H5F_t *f, haddr_t ext_addr, H5O_loc_t *ext_ptr) /* Open the superblock extension object header */ if (H5O_open(ext_ptr) < 0) - HGOTO_ERROR(H5E_OHDR, H5E_CANTOPENOBJ, FAIL, "unable to open superblock extension"); + HGOTO_ERROR(H5E_FILE, H5E_CANTOPENOBJ, FAIL, "unable to open superblock extension"); done: FUNC_LEAVE_NOAPI(ret_value) @@ -311,7 +311,7 @@ H5F__super_read(H5F_t *f, H5P_genplist_t *fa_plist, bool initial_read) H5P_genplist_t *c_plist; /* File creation property list */ H5FD_t *file; /* File driver pointer */ unsigned sblock_flags = H5AC__NO_FLAGS_SET; /* flags used in superblock unprotect call */ - haddr_t super_addr; /* Absolute address of superblock */ + haddr_t super_addr = HADDR_UNDEF; /* Absolute address of superblock */ haddr_t eof; /* End of file address */ unsigned rw_flags; /* Read/write permissions for file */ bool skip_eof_check = false; /* Whether to skip checking the EOF value */ @@ -339,7 +339,7 @@ H5F__super_read(H5F_t *f, H5P_genplist_t *fa_plist, bool initial_read) /* If we are an MPI application with at least two processes, the * following superblock signature location optimization is applicable. * - * Note:: For parallel applications which don't setup for using the + * Note: For parallel applications which don't setup for using the * HDF5 MPIO driver, we will arrive here with mpi_size == 1. * This occurs because of the variable initialization (above) and the * fact that we have skipped actually calling MPI functions to determine @@ -361,19 +361,13 @@ H5F__super_read(H5F_t *f, H5P_genplist_t *fa_plist, bool initial_read) /* Search for the file's signature only with rank 0 process */ if (0 == mpi_rank) { - herr_t status; - /* Try detecting file's signature */ /* (Don't leave before Bcast, to avoid hang on error) */ - H5E_BEGIN_TRY + H5E_PAUSE_ERRORS { - status = H5FD_locate_signature(file, &super_addr); + H5FD_locate_signature(file, &super_addr); } - H5E_END_TRY - - /* Set superblock address to undefined on error */ - if (status < 0) - super_addr = HADDR_UNDEF; + H5E_RESUME_ERRORS } /* end if */ /* Broadcast superblock address to other processes */ @@ -579,7 +573,7 @@ H5F__super_read(H5F_t *f, H5P_genplist_t *fa_plist, bool initial_read) /* Check if this private property exists in fapl */ if (H5P_exist_plist(fa_plist, H5F_ACS_SKIP_EOF_CHECK_NAME) > 0) if (H5P_get(fa_plist, H5F_ACS_SKIP_EOF_CHECK_NAME, &skip_eof_check) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get skip EOF check value"); + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get skip EOF check value"); if (H5F_INTENT(f) & H5F_ACC_SWMR_READ) { /* @@ -760,7 +754,7 @@ H5F__super_read(H5F_t *f, H5P_genplist_t *fa_plist, bool initial_read) */ if (H5P_exist_plist(fa_plist, H5F_ACS_NULL_FSM_ADDR_NAME) > 0) if (H5P_get(fa_plist, H5F_ACS_NULL_FSM_ADDR_NAME, &f->shared->null_fsm_addr) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get clearance for persisting fsm addr"); /* Retrieve the 'file space info' structure */ @@ -1091,7 +1085,7 @@ H5F__super_init(H5F_t *f) /* Allocate space for the superblock */ if (NULL == (sblock = H5FL_CALLOC(H5F_super_t))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); + HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, FAIL, "memory allocation failed"); /* Initialize various address information */ sblock->base_addr = HADDR_UNDEF; @@ -1101,15 +1095,15 @@ H5F__super_init(H5F_t *f) /* Get the shared file creation property list */ if (NULL == (plist = (H5P_genplist_t *)H5I_object(f->shared->fcpl_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list"); + HGOTO_ERROR(H5E_FILE, H5E_BADTYPE, FAIL, "not a property list"); /* Initialize sym_leaf_k */ if (H5P_get(plist, H5F_CRT_SYM_LEAF_NAME, &sblock->sym_leaf_k) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get byte number for object size"); + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get byte number for object size"); /* Initialize btree_k */ if (H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, &sblock->btree_k[0]) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get rank for btree internal nodes"); + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get rank for btree internal nodes"); /* Check for non-default free-space settings */ if (!(f->shared->fs_strategy == H5F_FILE_SPACE_STRATEGY_DEF && @@ -1184,9 +1178,9 @@ H5F__super_init(H5F_t *f) H5P_genplist_t *c_plist; /* Property list */ if (NULL == (c_plist = (H5P_genplist_t *)H5I_object(f->shared->fcpl_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not property list"); + HGOTO_ERROR(H5E_FILE, H5E_BADTYPE, FAIL, "not property list"); if (H5P_set(c_plist, H5F_CRT_SUPER_VERS_NAME, &super_vers) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set superblock version"); + HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set superblock version"); } /* end if */ if (H5FD_set_paged_aggr(f->shared->lf, (bool)H5F_PAGED_AGGR(f)) < 0) @@ -1271,7 +1265,7 @@ H5F__super_init(H5F_t *f) /* Insert superblock into cache, pinned */ if (H5AC_insert_entry(f, H5AC_SUPERBLOCK, (haddr_t)0, sblock, H5AC__PIN_ENTRY_FLAG | H5AC__FLUSH_LAST_FLAG | H5AC__FLUSH_COLLECTIVELY_FLAG) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "can't add superblock to cache"); + HGOTO_ERROR(H5E_FILE, H5E_CANTINS, FAIL, "can't add superblock to cache"); sblock_in_cache = true; /* Keep a copy of the superblock info */ @@ -1279,7 +1273,7 @@ H5F__super_init(H5F_t *f) /* Allocate space for the superblock */ if (HADDR_UNDEF == (superblock_addr = H5MF_alloc(f, H5FD_MEM_SUPER, superblock_size))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "file allocation failed for superblock"); + HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, FAIL, "file allocation failed for superblock"); /* set the drvinfo filed to NULL -- will overwrite this later if needed */ f->shared->drvinfo = NULL; @@ -1719,7 +1713,7 @@ H5F__super_ext_write_msg(H5F_t *f, unsigned id, void *mesg, bool may_create, uns /* Check for creating vs. writing */ if (may_create) { if (status) - HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "Message should not exist"); + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "Message should not exist"); /* Create the message with ID in the superblock extension */ if (H5O_msg_create(&ext_loc, id, (mesg_flags | H5O_MSG_FLAG_DONTSHARE), H5O_UPDATE_TIME, mesg) < 0) @@ -1727,7 +1721,7 @@ H5F__super_ext_write_msg(H5F_t *f, unsigned id, void *mesg, bool may_create, uns } /* end if */ else { if (!status) - HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "Message should exist"); + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "Message should exist"); /* Update the message with ID in the superblock extension */ if (H5O_msg_write(&ext_loc, id, (mesg_flags | H5O_MSG_FLAG_DONTSHARE), H5O_UPDATE_TIME, mesg) < 0) @@ -1784,27 +1778,27 @@ H5F__super_ext_remove_msg(H5F_t *f, unsigned id) /* Check if message with ID exists in the object header */ if ((status = H5O_msg_exists(&ext_loc, id)) < 0) - HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to check object header for message"); + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to check object header for message"); else if (status) { /* message exists */ H5O_hdr_info_t hdr_info; /* Object header info for superblock extension */ /* Remove the message */ if (H5O_msg_remove(&ext_loc, id, H5O_ALL, true) < 0) - HGOTO_ERROR(H5E_OHDR, H5E_CANTDELETE, FAIL, "unable to delete free-space manager info message"); + HGOTO_ERROR(H5E_FILE, H5E_CANTDELETE, FAIL, "unable to delete free-space manager info message"); /* Get info for the superblock extension's object header */ if (H5O_get_hdr_info(&ext_loc, &hdr_info) < 0) - HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to retrieve superblock extension info"); + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to retrieve superblock extension info"); /* If the object header is an empty base chunk, remove superblock extension */ if (hdr_info.nchunks == 1) { if ((null_count = H5O_msg_count(&ext_loc, H5O_NULL_ID)) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTCOUNT, FAIL, "unable to count messages"); + HGOTO_ERROR(H5E_FILE, H5E_CANTCOUNT, FAIL, "unable to count messages"); else if ((unsigned)null_count == hdr_info.nmesgs) { assert(H5_addr_defined(ext_loc.addr)); if (H5O_delete(f, ext_loc.addr) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTCOUNT, FAIL, "unable to count messages"); + HGOTO_ERROR(H5E_FILE, H5E_CANTCOUNT, FAIL, "unable to count messages"); f->shared->sblock->ext_addr = HADDR_UNDEF; } /* end else-if */ } /* end if */ diff --git a/src/H5SM.c b/src/H5SM.c index 94f2ea534fc..1c2d4e6caa7 100644 --- a/src/H5SM.c +++ b/src/H5SM.c @@ -169,7 +169,7 @@ H5SM_init(H5F_t *f, H5P_genplist_t *fc_plist, const H5O_loc_t *ext_loc) /* Allocate the SOHM indexes as an array. */ if (NULL == (table->indexes = (H5SM_index_header_t *)H5FL_ARR_MALLOC(H5SM_index_header_t, (size_t)table->num_indexes))) - HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, FAIL, "memory allocation failed for SOHM indexes"); + HGOTO_ERROR(H5E_SOHM, H5E_CANTALLOC, FAIL, "memory allocation failed for SOHM indexes"); /* Initialize all of the indexes, but don't allocate space for them to * hold messages until we actually need to write to them. @@ -195,7 +195,7 @@ H5SM_init(H5F_t *f, H5P_genplist_t *fc_plist, const H5O_loc_t *ext_loc) /* Allocate space for the table on disk */ if (HADDR_UNDEF == (table_addr = H5MF_alloc(f, H5FD_MEM_SOHM_TABLE, (hsize_t)table->table_size))) - HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, FAIL, "file allocation failed for SOHM table"); + HGOTO_ERROR(H5E_SOHM, H5E_CANTALLOC, FAIL, "file allocation failed for SOHM table"); /* Cache the new table */ if (H5AC_insert_entry(f, H5AC_SOHM_TABLE, table_addr, table, H5AC__NO_FLAGS_SET) < 0) @@ -288,12 +288,12 @@ H5SM__type_to_flag(unsigned type_id, unsigned *type_flag) * *------------------------------------------------------------------------- */ -ssize_t -H5SM__get_index(const H5SM_master_table_t *table, unsigned type_id) +herr_t +H5SM__get_index(const H5SM_master_table_t *table, unsigned type_id, ssize_t *idx) { - size_t x; unsigned type_flag; - ssize_t ret_value = FAIL; + ssize_t found_index = -1; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -304,13 +304,15 @@ H5SM__get_index(const H5SM_master_table_t *table, unsigned type_id) /* Search the indexes until we find one that matches this flag or we've * searched them all. */ - for (x = 0; x < table->num_indexes; ++x) - if (table->indexes[x].mesg_types & type_flag) - HGOTO_DONE((ssize_t)x); + for (size_t x = 0; x < table->num_indexes; ++x) + if (table->indexes[x].mesg_types & type_flag) { + found_index = (ssize_t)x; + break; + } + + /* Set output parameter */ + *idx = found_index; - /* At this point, ret_value is either the location of the correct - * index or it's still FAIL because we didn't find an index. - */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5SM__get_index() */ @@ -401,8 +403,10 @@ H5SM_get_fheap_addr(H5F_t *f, unsigned type_id, haddr_t *fheap_addr) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table"); /* Look up index for message type */ - if ((index_num = H5SM__get_index(table, type_id)) < 0) - HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to find correct SOHM index"); + if (H5SM__get_index(table, type_id, &index_num) < 0) + HGOTO_ERROR(H5E_SOHM, H5E_CANTGET, FAIL, "unable to check for SOHM index"); + if (index_num < 0) + HGOTO_ERROR(H5E_SOHM, H5E_NOTFOUND, FAIL, "unable to find correct SOHM index"); /* Retrieve heap address for index */ *fheap_addr = table->indexes[index_num].heap_addr; @@ -611,9 +615,9 @@ H5SM__create_list(H5F_t *f, H5SM_index_header_t *header) /* Allocate list in memory */ if (NULL == (list = H5FL_CALLOC(H5SM_list_t))) - HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, HADDR_UNDEF, "file allocation failed for SOHM list"); + HGOTO_ERROR(H5E_SOHM, H5E_CANTALLOC, HADDR_UNDEF, "file allocation failed for SOHM list"); if (NULL == (list->messages = (H5SM_sohm_t *)H5FL_ARR_CALLOC(H5SM_sohm_t, num_entries))) - HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, HADDR_UNDEF, "file allocation failed for SOHM list"); + HGOTO_ERROR(H5E_SOHM, H5E_CANTALLOC, HADDR_UNDEF, "file allocation failed for SOHM list"); /* Initialize messages in list */ for (x = 0; x < num_entries; x++) @@ -624,7 +628,7 @@ H5SM__create_list(H5F_t *f, H5SM_index_header_t *header) /* Allocate space for the list on disk */ if (HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_SOHM_INDEX, (hsize_t)header->list_size))) - HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, HADDR_UNDEF, "file allocation failed for SOHM list"); + HGOTO_ERROR(H5E_SOHM, H5E_CANTALLOC, HADDR_UNDEF, "file allocation failed for SOHM list"); /* Put the list into the cache */ if (H5AC_insert_entry(f, H5AC_SOHM_LIST, addr, list, H5AC__NO_FLAGS_SET) < 0) @@ -943,10 +947,10 @@ H5SM_can_share(H5F_t *f, H5SM_master_table_t *table, ssize_t *sohm_index_num, un /* Find the right index for this message type. If there is no such index * then this type of message isn't shareable */ - if ((index_num = H5SM__get_index(my_table, type_id)) < 0) { - H5E_clear_stack(); /*ignore error*/ + if (H5SM__get_index(my_table, type_id, &index_num) < 0) + HGOTO_ERROR(H5E_SOHM, H5E_CANTGET, FAIL, "unable to check for SOHM index"); + if (index_num < 0) HGOTO_DONE(false); - } /* end if */ /* If the message isn't big enough, don't bother sharing it */ if (0 == (mesg_size = H5O_msg_raw_size(f, type_id, true, mesg))) @@ -1234,7 +1238,7 @@ H5SM__write_mesg(H5F_t *f, H5O_t *open_oh, H5SM_index_header_t *header, bool def if ((buf_size = H5O_msg_raw_size(f, type_id, true, mesg)) == 0) HGOTO_ERROR(H5E_SOHM, H5E_BADSIZE, FAIL, "can't find message size"); if (NULL == (encoding_buf = H5MM_malloc(buf_size))) - HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, FAIL, "can't allocate buffer for encoding"); + HGOTO_ERROR(H5E_SOHM, H5E_CANTALLOC, FAIL, "can't allocate buffer for encoding"); if (H5O_msg_encode(f, type_id, true, (unsigned char *)encoding_buf, mesg) < 0) HGOTO_ERROR(H5E_SOHM, H5E_CANTENCODE, FAIL, "can't encode message to be shared"); @@ -1536,7 +1540,9 @@ H5SM_delete(H5F_t *f, H5O_t *open_oh, H5O_shared_t *sh_mesg) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table"); /* Find the correct index and try to delete from it */ - if ((index_num = H5SM__get_index(table, type_id)) < 0) + if (H5SM__get_index(table, type_id, &index_num) < 0) + HGOTO_ERROR(H5E_SOHM, H5E_CANTGET, FAIL, "unable to check for SOHM index"); + if (index_num < 0) HGOTO_ERROR(H5E_SOHM, H5E_NOTFOUND, FAIL, "unable to find correct SOHM index"); /* If mesg_buf is not NULL, the message's reference count has reached @@ -2110,7 +2116,9 @@ H5SM_get_refcount(H5F_t *f, unsigned type_id, const H5O_shared_t *sh_mesg, hsize HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table"); /* Find the correct index and find the message in it */ - if ((index_num = H5SM__get_index(table, type_id)) < 0) + if (H5SM__get_index(table, type_id, &index_num) < 0) + HGOTO_ERROR(H5E_SOHM, H5E_CANTGET, FAIL, "unable to check for SOHM index"); + if (index_num < 0) HGOTO_ERROR(H5E_SOHM, H5E_NOTFOUND, FAIL, "unable to find correct SOHM index"); header = &(table->indexes[index_num]); @@ -2241,7 +2249,7 @@ H5SM__read_iter_op(H5O_t *oh, H5O_mesg_t *mesg /*in,out*/, unsigned sequence, /* Allocate buffer to return the message in */ if (NULL == (udata->encoding_buf = H5MM_malloc(udata->buf_size))) - HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, H5_ITER_ERROR, "memory allocation failed"); + HGOTO_ERROR(H5E_SOHM, H5E_CANTALLOC, H5_ITER_ERROR, "memory allocation failed"); /* Copy the encoded message into the buffer to return */ H5MM_memcpy(udata->encoding_buf, mesg->raw, udata->buf_size); @@ -2275,7 +2283,7 @@ H5SM__read_mesg_fh_cb(const void *obj, size_t obj_len, void *_udata) /* Allocate a buffer to hold the message */ if (NULL == (udata->encoding_buf = H5MM_malloc(obj_len))) - HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, FAIL, "memory allocation failed"); + HGOTO_ERROR(H5E_SOHM, H5E_CANTALLOC, FAIL, "memory allocation failed"); /* Copy the message from the heap */ H5MM_memcpy(udata->encoding_buf, obj, obj_len); diff --git a/src/H5SMpkg.h b/src/H5SMpkg.h index 439954a1db3..4ff4c7da81f 100644 --- a/src/H5SMpkg.h +++ b/src/H5SMpkg.h @@ -252,7 +252,7 @@ H5_DLLVAR const H5B2_class_t H5SM_INDEX[1]; /****************************/ /* General routines */ -H5_DLL ssize_t H5SM__get_index(const H5SM_master_table_t *table, unsigned type_id); +H5_DLL herr_t H5SM__get_index(const H5SM_master_table_t *table, unsigned type_id, ssize_t *idx); /* Encode and decode routines, used for B-tree and cache encoding/decoding */ H5_DLL herr_t H5SM__message_compare(const void *rec1, const void *rec2, int *result); diff --git a/src/H5SMtest.c b/src/H5SMtest.c index bde1d1d5e9c..84da3195556 100644 --- a/src/H5SMtest.c +++ b/src/H5SMtest.c @@ -86,7 +86,9 @@ H5SM__get_mesg_count_test(H5F_t *f, unsigned type_id, size_t *mesg_count) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table"); /* Find the correct index for this message type */ - if ((index_num = H5SM__get_index(table, type_id)) < 0) + if (H5SM__get_index(table, type_id, &index_num) < 0) + HGOTO_ERROR(H5E_SOHM, H5E_CANTGET, FAIL, "unable to check for SOHM index"); + if (index_num < 0) HGOTO_ERROR(H5E_SOHM, H5E_NOTFOUND, FAIL, "unable to find correct SOHM index"); header = &(table->indexes[index_num]); diff --git a/src/H5TSbarrier.h b/src/H5TSbarrier.h index 4ffcca85497..e27ec08897f 100644 --- a/src/H5TSbarrier.h +++ b/src/H5TSbarrier.h @@ -86,9 +86,9 @@ H5TS_barrier_wait(H5TS_barrier_t *barrier) H5TS_atomic_fetch_add_uint(&barrier->generation, 1); } else { - /* Not the last thread, when for the generation to change */ + /* Not the last thread, wait for the generation to change */ while (H5TS_atomic_load_uint(&barrier->generation) == my_generation) - ; + H5TS_thread_yield(); } } #endif diff --git a/src/H5TSprivate.h b/src/H5TSprivate.h index 4fe35f6823e..4880962e4f3 100644 --- a/src/H5TSprivate.h +++ b/src/H5TSprivate.h @@ -86,13 +86,14 @@ /* Atomics macros */ #if defined(H5_HAVE_STDATOMIC_H) && !defined(__cplusplus) /* atomic_int */ -#define H5TS_atomic_init_int(obj, desired) atomic_init((obj), (desired)) -#define H5TS_atomic_load_int(obj) atomic_load(obj) -#define H5TS_atomic_store_int(obj, desired) atomic_store((obj), (desired)) -#define H5TS_atomic_compare_exchange_strong_ing(obj, arg) atomic_fetch_add((obj), (arg)) -#define H5TS_atomic_fetch_add_int(obj, arg) atomic_fetch_add((obj), (arg)) -#define H5TS_atomic_fetch_sub_int(obj, arg) atomic_fetch_sub((obj), (arg)) -#define H5TS_atomic_destroy_int(obj) /* void */ +#define H5TS_atomic_init_int(obj, desired) atomic_init((obj), (desired)) +#define H5TS_atomic_load_int(obj) atomic_load(obj) +#define H5TS_atomic_store_int(obj, desired) atomic_store((obj), (desired)) +#define H5TS_atomic_compare_exchange_strong_int(obj, exp, des) \ + atomic_compare_exchange_strong((obj), (exp), (des)) +#define H5TS_atomic_fetch_add_int(obj, arg) atomic_fetch_add((obj), (arg)) +#define H5TS_atomic_fetch_sub_int(obj, arg) atomic_fetch_sub((obj), (arg)) +#define H5TS_atomic_destroy_int(obj) /* void */ /* atomic_uint */ #define H5TS_atomic_init_uint(obj, desired) atomic_init((obj), (desired)) diff --git a/test/cache_api.c b/test/cache_api.c index 5ac6ca0a108..61c8062a0c0 100644 --- a/test/cache_api.c +++ b/test/cache_api.c @@ -1858,6 +1858,15 @@ check_file_mdc_api_errs(unsigned paged, hid_t fcpl_id) /* test H5Fget_mdc_config(). */ + /* Create an ID to use in the H5Fset_mdc_config/H5Fget_mdc_config tests */ + hid_t dtype_id = H5Tcopy(H5T_NATIVE_INT); + + if (dtype_id < 0) { + + pass = false; + failure_mssg = "H5Tcopy() failed.\n"; + } + scratch.version = H5C__CURR_AUTO_SIZE_CTL_VER; if (pass) { @@ -1877,6 +1886,18 @@ check_file_mdc_api_errs(unsigned paged, hid_t fcpl_id) pass = false; failure_mssg = "H5Fget_mdc_config() accepted invalid file_id."; } + + H5E_BEGIN_TRY + { + result = H5Fget_mdc_config(dtype_id, &scratch); /* not a file ID */ + } + H5E_END_TRY + + if (result >= 0) { + + pass = false; + failure_mssg = "H5Fget_mdc_config() accepted an ID that is not a file ID."; + } } if (pass) { @@ -1941,6 +1962,27 @@ check_file_mdc_api_errs(unsigned paged, hid_t fcpl_id) pass = false; failure_mssg = "H5Fset_mdc_config() accepted bad invalid file_id."; } + + H5E_BEGIN_TRY + { + result = H5Fset_mdc_config(dtype_id, &default_config); + } + H5E_END_TRY + + if (result >= 0) { + + pass = false; + failure_mssg = "H5Fset_mdc_config() accepted an ID that is not a file ID."; + } + } + + /* Close the temporary datatype */ + result = H5Tclose(dtype_id); + + if (result < 0) { + + pass = false; + failure_mssg = "H5Tclose() failed.\n"; } if (pass) { @@ -2050,6 +2092,37 @@ check_file_mdc_api_errs(unsigned paged, hid_t fcpl_id) pass = false; failure_mssg = "H5Freset_mdc_hit_rate_stats() accepted bad file_id."; } + + /* Create an ID to use in the next test */ + hid_t scalarsp_id = H5Screate(H5S_SCALAR); + + if (scalarsp_id < 0) { + + pass = false; + failure_mssg = "H5Screate() failed.\n"; + } + + /* Try to call H5Freset_mdc_hit_rate_stats with an inappropriate ID */ + H5E_BEGIN_TRY + { + result = H5Freset_mdc_hit_rate_stats(scalarsp_id); + } + H5E_END_TRY + + if (result >= 0) { + + pass = false; + failure_mssg = "H5Freset_mdc_hit_rate_stats() accepted an ID that is not a file_id."; + } + + /* Close the temporary dataspace */ + result = H5Sclose(scalarsp_id); + + if (result < 0) { + + pass = false; + failure_mssg = "H5Sclose() failed.\n"; + } } /* test H5Fget_mdc_size() */ diff --git a/test/tid.c b/test/tid.c index da8700f4f74..8dac7f19e41 100644 --- a/test/tid.c +++ b/test/tid.c @@ -18,6 +18,10 @@ #define H5I_FRIEND /*suppress error about including H5Ipkg */ #include "H5Ipkg.h" +/* Defines used in test_appropriate_ids */ +#define FILE_NAME "tid.h5" +#define DSET_NAME "Dataset 1" + static herr_t free_wrapper(void *p, void H5_ATTR_UNUSED **_ctx) { @@ -1369,6 +1373,127 @@ test_future_ids(void) return -1; } /* end test_future_ids() */ +/*------------------------------------------------------------------------- + * Function: test_appropriate_ids + * + * Purpose: Tests several API functions on detecting inappropriate ID. + * + * Return: Success: 0 + * Failure: number of errors + * + *------------------------------------------------------------------------- + */ +static int +test_appropriate_ids(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t fcpl_id = H5I_INVALID_HID; + hid_t plist = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + hsize_t dims = 2; + hssize_t free_space; + herr_t ret = SUCCEED; /* Generic return value */ + + /* Create file create property list */ + fcpl_id = H5Pcreate(H5P_FILE_CREATE); + CHECK(fcpl_id, H5I_INVALID_HID, "H5Pcreate"); + + file_id = H5Fcreate(FILE_NAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT); + CHECK(file_id, H5I_INVALID_HID, "H5Fcreate"); + + /* Create a dataset in the file */ + space_id = H5Screate_simple(1, &dims, NULL); + CHECK(space_id, H5I_INVALID_HID, "H5Screate_simple"); + dset_id = H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK(dset_id, H5I_INVALID_HID, "H5Dcreate2"); + + /* Close IDs */ + ret = H5Pclose(fcpl_id); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Sclose(space_id); + CHECK(ret, FAIL, "H5Sclose"); + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); + + file_id = H5Fopen(FILE_NAME, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(file_id, H5I_INVALID_HID, "H5Fopen"); + + /* Get the file create property */ + fcpl_id = H5Fget_create_plist(file_id); + CHECK(fcpl_id, H5I_INVALID_HID, "H5Fget_create_plist"); + + /* Get the file access property */ + fapl_id = H5Fget_access_plist(file_id); + CHECK(fapl_id, H5I_INVALID_HID, "H5Fget_access_plist"); + + dset_id = H5Dopen2(file_id, DSET_NAME, H5P_DEFAULT); + CHECK(dset_id, H5I_INVALID_HID, "H5Dopen2"); + + /*------------------------------------------------------------- + * Try to call functions passing in a wrong ID + *-----------------------------------------------------------*/ + H5E_BEGIN_TRY + { + plist = H5Fget_create_plist(dset_id); /* dset_id is not file ID */ + } + H5E_END_TRY + VERIFY(plist, H5I_INVALID_HID, "H5Fget_create_plist"); + + H5E_BEGIN_TRY + { + plist = H5Fget_access_plist(fapl_id); /* fapl_id is not file ID */ + } + H5E_END_TRY + VERIFY(plist, H5I_INVALID_HID, "H5Fget_access_plist"); + + H5E_BEGIN_TRY + { + unsigned intent; /* File access flags */ + ret = H5Fget_intent(dset_id, &intent); /* dset_id is not file ID */ + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Fget_intent"); + + H5E_BEGIN_TRY + { + unsigned long fileno = 0; + ret = H5Fget_fileno(dset_id, &fileno); /* dset_id is not file ID */ + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Fget_fileno"); + + H5E_BEGIN_TRY + { + free_space = H5Fget_freespace(dset_id); /* dset_id is not file ID */ + } + H5E_END_TRY + VERIFY(free_space, FAIL, "H5Fget_freespace"); + + H5E_BEGIN_TRY + { + void *os_file_handle = NULL; /* OS file handle */ + ret = H5Fget_vfd_handle(fapl_id, H5P_DEFAULT, &os_file_handle); /* fapl_id is not file ID */ + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Fget_vfd_handle"); + + /* Close IDs */ + ret = H5Pclose(fapl_id); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Pclose(fcpl_id); + CHECK(ret, FAIL, "H5Pclose"); + ret = H5Dclose(dset_id); + CHECK(ret, FAIL, "H5Dclose"); + ret = H5Fclose(file_id); + CHECK(ret, FAIL, "H5Fclose"); + + return 0; +} + void test_ids(void) { @@ -1389,4 +1514,6 @@ test_ids(void) TestErrPrintf("ID remove during H5Iclear_type test failed\n"); if (test_future_ids() < 0) TestErrPrintf("Future ID test failed\n"); + if (test_appropriate_ids() < 0) + TestErrPrintf("Detection of inappropriate ID test failed\n"); } diff --git a/test/tvltypes.c b/test/tvltypes.c index 4c8813037d1..1ca7de3bd83 100644 --- a/test/tvltypes.c +++ b/test/tvltypes.c @@ -491,6 +491,22 @@ test_vltypes_vlen_atomic(void) H5E_END_TRY VERIFY(ret, FAIL, "H5Dvlen_get_buf_size"); + /* Try to call H5Dvlen_get_buf_size with a wrong ID */ + H5E_BEGIN_TRY + { + ret = H5Dvlen_get_buf_size(tid1, dataset, sid2, &size); /* IDs in wrong order */ + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Dvlen_get_buf_size"); + + /* Try to call H5Dvlen_get_buf_size with a wrong ID */ + H5E_BEGIN_TRY + { + ret = H5Dvlen_get_buf_size(fid1, tid1, sid2, &size); /* not a dataset ID */ + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Dvlen_get_buf_size"); + /* Read dataset from disk */ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata); CHECK(ret, FAIL, "H5Dread");