diff --git a/ChangeLog b/ChangeLog index ba6f9ea94a5..a95aa51a688 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,1316 @@ ====================================================================== +Originator: Chris Fischer +Date: 10-16-2017 +Tag: cime5.4.0-alpha.04 +Answer Changes: None +Tests: scripts_regression_tests.py code_checker +Dependencies: + +Brief Summary: + - Cleanup of the compvar implementation. + - Refactor CPLHIST mode and add DATM CPLHIST topo capability. + - Fix py3 for erio test. + - Fix some issues found in type conversion. + - Port to python3 while maintaining compatibility with python 2.7 + - Remove developers table on README.md. + - Load balancing tool mods. + - Add auto-normalization to case_diff. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +af429bd Merge pull request #1965 from jedwards4b/iscompvar_cleanup +16adad1 Merge pull request #1950 from ESMCI/mvertens/cplhist_topo +02cc2fd Fix py3 for erio test +116e6e7 Merge pull request #1962 from jedwards4b/convert_to_string_fix +c5cf2af PR #1949 jedwards4b/py3_port_branch +c6cf37e Merge pull request #1926 from ESMCI/rljacob/update-readme +128625f Merge pull request #1942 from jedwards4b/load-balancing-tool-mods +6f43f73 Merge pull request #1958 from ESMCI/jgfouca/case_diff_auto_normalize + + +Modified files: git diff --name-status [previous_tag] +M CMakeLists.txt +M LICENSE.TXT +D README +M README.md +D README.unit_testing +M config/acme/allactive/config_compsets.xml +D config/acme/allactive/config_compsets.xml.cime2 +D config/acme/allactive/config_pes.xml +D config/acme/allactive/config_pes.xml.cime2 +M config/acme/allactive/config_pesall.xml +D config/acme/allactive/testmods_dirs/cam/outfrq9s/shell_commands +D config/acme/allactive/testmods_dirs/force_netcdf_pio/shell_commands +M config/acme/config_archive.xml +M config/acme/config_files.xml +M config/acme/config_grids.xml +M config/acme/machines/Makefile +M config/acme/machines/config_batch.xml +M config/acme/machines/config_batch.xsd +M config/acme/machines/config_compilers.xml +D config/acme/machines/config_lt_archive.xml +M config/acme/machines/config_machines.xml +M config/acme/machines/config_pio.xml +A config/acme/machines/syslog.anvil +M config/acme/machines/syslog.cetus +M config/acme/machines/syslog.cori-haswell +M config/acme/machines/syslog.cori-knl +M config/acme/machines/syslog.edison +M config/acme/machines/syslog.mira +M config/acme/machines/syslog.titan +M config/acme/machines/template.case.run +M config/acme/machines/template.case.test +D config/acme/machines/template.lt_archive +M config/acme/machines/template.st_archive +M config/acme/machines/userdefined_laptop_template/config_compilers.xml +M config/acme/machines/userdefined_laptop_template/config_machines.xml +A config/acme/testmods_dirs/allactive/cam/cosplite/user_nl_cam +A config/acme/testmods_dirs/allactive/cam/cosplite/xmlchange_cmnds +A config/acme/testmods_dirs/allactive/cam/cosplite_nhtfrq5/user_nl_cam +A config/acme/testmods_dirs/allactive/cam/cosplite_nhtfrq5/xmlchange_cmnds +A config/acme/testmods_dirs/allactive/cam/outfrq9s/xmlchange_cmnds +A config/acme/testmods_dirs/allactive/cam/rtm_null/xmlchange_cmnds +A config/acme/testmods_dirs/allactive/clm/betr/xmlchange_cmnds +A config/acme/testmods_dirs/allactive/clm/eca/user_nl_clm +A config/acme/testmods_dirs/allactive/clm/fates/user_nl_clm +A config/acme/testmods_dirs/allactive/force_netcdf_pio/shell_commands +A config/acme/testmods_dirs/allactive/force_netcdf_pio/xmlchange_cmnds +M config/cesm/config_archive.xml +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +D config/cesm/machines/Depends.goldbach +M config/cesm/machines/Makefile +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_compilers.xml +D config/cesm/machines/config_lt_archive.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/config_pio.xml +M config/cesm/machines/template.case.run +M config/cesm/machines/template.case.test +D config/cesm/machines/template.lt_archive +M config/cesm/machines/template.st_archive +M config/cesm/machines/userdefined_laptop_template/README.md +M config/cesm/machines/userdefined_laptop_template/config_compilers.xml +M config/cesm/machines/userdefined_laptop_template/config_machines.xml +M config/config_headers.xml +M config/config_tests.xml +M config/xml_schemas/config_batch.xsd +M config/xml_schemas/config_compsets.xsd +M config/xml_schemas/config_grids_v2.xsd +M config/xml_schemas/config_machines.xsd +M config/xml_schemas/config_machines_template.xml +M config/xml_schemas/config_pes.xsd +M config/xml_schemas/entry_id_base.xsd +A config/xml_schemas/entry_id_base_version3.xsd +A config/xml_schemas/entry_id_version3.xsd +A config/xml_schemas/env_batch.xsd +A config/xml_schemas/env_mach_pes.xsd +A config/xml_schemas/env_mach_specific.xsd +A doc/.gitignore +A doc/Makefile +A doc/README +A doc/source/README +A doc/source/Tools_user/index.rst.template +A doc/source/_static +A doc/source/build_cpl/adding-components.rst +A doc/source/build_cpl/index.rst +A doc/source/build_cpl/introduction.rst +A doc/source/conf.py +A doc/source/data_models/data-atm.rst +A doc/source/data_models/data-lnd.rst +A doc/source/data_models/data-model-science.rst +A doc/source/data_models/data-ocean.rst +A doc/source/data_models/data-river.rst +A doc/source/data_models/data-seaice.rst +A doc/source/data_models/data-wave.rst +A doc/source/data_models/design-details.rst +A doc/source/data_models/index.rst +A doc/source/data_models/input-namelists.rst +A doc/source/data_models/input-streams.rst +A doc/source/data_models/introduction.rst +A doc/source/driver_cpl/bit-for-bit-flag.rst +A doc/source/driver_cpl/budgets.rst +A doc/source/driver_cpl/cplug-02.1-figx1.jpg +A doc/source/driver_cpl/design.rst +A doc/source/driver_cpl/driver_threading_control.rst +A doc/source/driver_cpl/grids.rst +A doc/source/driver_cpl/history-and-restarts.rst +A doc/source/driver_cpl/implementation.rst +A doc/source/driver_cpl/index.rst +A doc/source/driver_cpl/initialization-and-restart.rst +A doc/source/driver_cpl/introduction.rst +A doc/source/driver_cpl/multi-instance.rst +A doc/source/driver_cpl/namelist-overview.rst +A doc/source/driver_cpl/time-management.rst +A doc/source/glossary/index.rst +A doc/source/index.rst +A doc/source/misc_tools/index.rst +A doc/source/misc_tools/load-balancing-tool.rst +A doc/source/users_guide/adding-cases.rst +A doc/source/users_guide/building-a-case.rst +A doc/source/users_guide/case-basics.rst +A doc/source/users_guide/cime-internals.rst +A doc/source/users_guide/cloning-a-case.rst +A doc/source/users_guide/create-a-case.rst +A doc/source/users_guide/customizing-a-case.rst +A doc/source/users_guide/index.rst +A doc/source/users_guide/introduction-and-overview.rst +A doc/source/users_guide/multi-instance.rst +A doc/source/users_guide/optimizing-processor-layout.rst +A doc/source/users_guide/porting-cime.rst +A doc/source/users_guide/running-a-case.rst +A doc/source/users_guide/setting-up-a-case.rst +A doc/source/users_guide/testing.rst +A doc/source/users_guide/troubleshooting.rst +A doc/source/users_guide/unit_testing.rst +A doc/source/users_guide/use_cases/basics-a-basic-example.rst +A doc/source/users_guide/use_cases/basics-mpi-example.rst +A doc/source/users_guide/use_cases/basics-setting-up-a-branch-or-hybrid-run.rst +A doc/source/users_guide/use_cases/use-cases.rst +A doc/source/what_cime/index.rst +A doc/source/xml_files/acme.rst +A doc/source/xml_files/atmosphere.rst +A doc/source/xml_files/cesm.rst +A doc/source/xml_files/common.rst +A doc/source/xml_files/components.rst +A doc/source/xml_files/drivers.rst +A doc/source/xml_files/esp.rst +A doc/source/xml_files/index.rst +A doc/source/xml_files/land.rst +A doc/source/xml_files/landice.rst +A doc/source/xml_files/ocean.rst +A doc/source/xml_files/river.rst +A doc/source/xml_files/seaice.rst +A doc/source/xml_files/wave.rst +A doc/tools_autodoc.cfg +A doc/tools_autodoc.py +A index.html +D scripts/Tools/README.post_process +M scripts/Tools/acme_check_env +D scripts/Tools/archive_metadata.sh +M scripts/Tools/bless_test_results +M scripts/Tools/case.build +M scripts/Tools/case.cmpgen_namelists +A scripts/Tools/case.qstatus +M scripts/Tools/case.setup +M scripts/Tools/case.submit +M scripts/Tools/case_diff +M scripts/Tools/check_case +D scripts/Tools/check_exactrestart.pl +M scripts/Tools/check_input_data +M scripts/Tools/check_lockedfiles +M scripts/Tools/cime_bisect +M scripts/Tools/cimeteststatus +M scripts/Tools/code_checker +M scripts/Tools/compare_namelists +M scripts/Tools/compare_test_results +M scripts/Tools/component_compare_baseline +M scripts/Tools/component_compare_copy +M scripts/Tools/component_compare_test +M scripts/Tools/component_generate_baseline +D scripts/Tools/create_train +D scripts/Tools/cron_script +A scripts/Tools/cs.status +D scripts/Tools/cs_status +M scripts/Tools/getTiming +M scripts/Tools/jenkins_generic_job +M scripts/Tools/jenkins_script +M scripts/Tools/list_acme_tests +D scripts/Tools/load.awk +D scripts/Tools/lt_archive.sh +D scripts/Tools/mdiag_reduce.csh +D scripts/Tools/mdiag_reduce.pl +M scripts/Tools/normalize_cases +M scripts/Tools/pelayout +M scripts/Tools/pointclm/makepointdata.py +M scripts/Tools/pointclm/post_process.py +M scripts/Tools/pointclm/site_fullrun.py +M scripts/Tools/preview_namelists +A scripts/Tools/preview_run +M scripts/Tools/save_provenance +M scripts/Tools/simple_compare +A scripts/Tools/testreporter.py +M scripts/Tools/update_acme_tests +M scripts/Tools/wait_for_tests +M scripts/Tools/xmlchange +M scripts/Tools/xmlconvertors/config_pes_converter.py +M scripts/Tools/xmlconvertors/grid_xml_converter.py +M scripts/Tools/xmlquery +M scripts/create_clone +M scripts/create_newcase +M scripts/create_test +A scripts/fortran_unit_testing/Examples/circle_area/src/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/circle_area/src/circle.F90 +A scripts/fortran_unit_testing/Examples/circle_area/tests/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/circle_area/tests/CTest/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/circle_area/tests/CTest/test_driver.F90 +A scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf +A scripts/fortran_unit_testing/Examples/interpolate_1d/src/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/interpolate_1d/src/interpolate_1d.F90 +A scripts/fortran_unit_testing/Examples/interpolate_1d/tests/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/interpolate_1d/tests/CTest/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/interpolate_1d/tests/CTest/test_driver.F90 +A scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt +A scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf +A scripts/fortran_unit_testing/Examples/test_list.xml +A scripts/fortran_unit_testing/README +A scripts/fortran_unit_testing/python/.gitignore +A scripts/fortran_unit_testing/python/printer.py +A scripts/fortran_unit_testing/python/test_xml_test_list.py +A scripts/fortran_unit_testing/python/xml_test_list.py +A scripts/fortran_unit_testing/run_tests.py +M scripts/lib/CIME/BuildTools/cmakemacroswriter.py +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/BuildTools/macrowriterbase.py +M scripts/lib/CIME/SystemTests/dae.py +M scripts/lib/CIME/SystemTests/eri.py +M scripts/lib/CIME/SystemTests/erio.py +M scripts/lib/CIME/SystemTests/erp.py +M scripts/lib/CIME/SystemTests/err.py +A scripts/lib/CIME/SystemTests/erri.py +M scripts/lib/CIME/SystemTests/ers.py +A scripts/lib/CIME/SystemTests/ers2.py +M scripts/lib/CIME/SystemTests/homme.py +A scripts/lib/CIME/SystemTests/irt.py +D scripts/lib/CIME/SystemTests/lii.py +A scripts/lib/CIME/SystemTests/mcc.py +M scripts/lib/CIME/SystemTests/nck.py +M scripts/lib/CIME/SystemTests/ncr.py +A scripts/lib/CIME/SystemTests/nodefail.py +M scripts/lib/CIME/SystemTests/pea.py +M scripts/lib/CIME/SystemTests/pem.py +M scripts/lib/CIME/SystemTests/pet.py +M scripts/lib/CIME/SystemTests/pre.py +A scripts/lib/CIME/SystemTests/restart_tests.py +M scripts/lib/CIME/SystemTests/seq.py +D scripts/lib/CIME/SystemTests/ssp.py +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/SystemTests/system_tests_compare_two.py +M scripts/lib/CIME/XML/archive.py +M scripts/lib/CIME/XML/batch.py +M scripts/lib/CIME/XML/compilerblock.py +M scripts/lib/CIME/XML/compilers.py +M scripts/lib/CIME/XML/component.py +M scripts/lib/CIME/XML/compsets.py +M scripts/lib/CIME/XML/entry_id.py +M scripts/lib/CIME/XML/env_archive.py +M scripts/lib/CIME/XML/env_base.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/env_mach_pes.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/XML/env_run.py +M scripts/lib/CIME/XML/env_test.py +M scripts/lib/CIME/XML/files.py +M scripts/lib/CIME/XML/generic_xml.py +M scripts/lib/CIME/XML/grids.py +D scripts/lib/CIME/XML/lt_archive.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/XML/namelist_definition.py +M scripts/lib/CIME/XML/pes.py +A scripts/lib/CIME/XML/test_reporter.py +M scripts/lib/CIME/XML/testlist.py +M scripts/lib/CIME/XML/tests.py +M scripts/lib/CIME/XML/testspec.py +A scripts/lib/CIME/aprun.py +M scripts/lib/CIME/bless_test_results.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/buildnml.py +M scripts/lib/CIME/case.py +A scripts/lib/CIME/case_clone.py +M scripts/lib/CIME/case_cmpgen_namelists.py +D scripts/lib/CIME/case_lt_archive.py +M scripts/lib/CIME/case_run.py +M scripts/lib/CIME/case_setup.py +M scripts/lib/CIME/case_st_archive.py +M scripts/lib/CIME/case_submit.py +M scripts/lib/CIME/case_test.py +M scripts/lib/CIME/check_input_data.py +M scripts/lib/CIME/check_lockedfiles.py +M scripts/lib/CIME/code_checker.py +M scripts/lib/CIME/compare_namelists.py +M scripts/lib/CIME/compare_test_results.py +M scripts/lib/CIME/get_timing.py +M scripts/lib/CIME/hist_utils.py +M scripts/lib/CIME/namelist.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/preview_namelists.py +M scripts/lib/CIME/provenance.py +M scripts/lib/CIME/simple_compare.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/test_status.py +M scripts/lib/CIME/test_utils.py +M scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py +M scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two_link_to_case2_output.py +M scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py +M scripts/lib/CIME/tests/case_fake.py +A scripts/lib/CIME/tests/test_user_mod_support.py +M scripts/lib/CIME/user_mod_support.py +M scripts/lib/CIME/utils.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/lib/cs.status.template +M scripts/lib/jenkins_generic_job.py +A scripts/lib/six.py +A scripts/lib/testreporter.template +M scripts/lib/update_acme_tests.py +D scripts/manage_case +M scripts/manage_pes +A scripts/query_config +M scripts/query_testlists +M scripts/tests/CMakeLists.txt +M scripts/tests/CTestConfig.cmake +M scripts/tests/list_tests +M scripts/tests/scripts_regression_tests.py +A scripts/tests/user_mods_test3/shell_commands +A src/CMake/.gitignore +A src/CMake/CESM_utils.cmake +A src/CMake/CIME_initial_setup.cmake +A src/CMake/CIME_utils.cmake +A src/CMake/ChangeLog +A src/CMake/Compilers.cmake +A src/CMake/FindpFUnit.cmake +A src/CMake/LICENSE +A src/CMake/README.md +A src/CMake/Sourcelist_utils.cmake +A src/CMake/TryCSizeOf.f90 +A src/CMake/TryMPIIO.f90 +A src/CMake/TryMPIMod.f90 +A src/CMake/TryMPISERIAL.f90 +A src/CMake/TryPnetcdf_inc.f90 +A src/CMake/TryPnetcdf_mod.f90 +A src/CMake/genf90_utils.cmake +A src/CMake/mpiexec.cmake +A src/CMake/pFUnit_utils.cmake +M src/build_scripts/buildlib.csm_share +M src/build_scripts/buildlib.gptl +A src/build_scripts/buildlib.internal_components +M src/build_scripts/buildlib.mct +M src/build_scripts/buildlib.mpi-serial +M src/build_scripts/buildlib.pio +D src/components/data_comps/datm/atm_comp_mct.F90 +T src/components/data_comps/datm/cime_config/buildlib +M src/components/data_comps/datm/cime_config/buildnml +M src/components/data_comps/datm/cime_config/config_archive.xml +M src/components/data_comps/datm/cime_config/config_component.xml +M src/components/data_comps/datm/cime_config/namelist_definition_datm.xml +M src/components/data_comps/datm/datm_comp_mod.F90 +M src/components/data_comps/datm/datm_shr_mod.F90 +A src/components/data_comps/datm/mct/atm_comp_mct.F90 +T src/components/data_comps/desp/cime_config/buildlib +M src/components/data_comps/desp/cime_config/buildnml +M src/components/data_comps/desp/cime_config/config_component.xml +M src/components/data_comps/desp/cime_config/namelist_definition_desp.xml +M src/components/data_comps/desp/desp_comp_mod.F90 +D src/components/data_comps/desp/esp_comp_mct.F90 +M src/components/data_comps/desp/esp_utils.F90 +A src/components/data_comps/desp/mct/esp_comp_mct.F90 +T src/components/data_comps/dice/cime_config/buildlib +M src/components/data_comps/dice/cime_config/buildnml +M src/components/data_comps/dice/cime_config/config_archive.xml +M src/components/data_comps/dice/cime_config/config_component.xml +M src/components/data_comps/dice/cime_config/namelist_definition_dice.xml +M src/components/data_comps/dice/dice_comp_mod.F90 +A src/components/data_comps/dice/dice_shr_mod.F90 +D src/components/data_comps/dice/ice_comp_mct.F90 +A src/components/data_comps/dice/mct/ice_comp_mct.F90 +T src/components/data_comps/dlnd/cime_config/buildlib +M src/components/data_comps/dlnd/cime_config/buildnml +M src/components/data_comps/dlnd/cime_config/config_archive.xml +M src/components/data_comps/dlnd/cime_config/config_component.xml +M src/components/data_comps/dlnd/cime_config/namelist_definition_dlnd.xml +M src/components/data_comps/dlnd/dlnd_comp_mod.F90 +A src/components/data_comps/dlnd/dlnd_shr_mod.F90 +D src/components/data_comps/dlnd/lnd_comp_mct.F90 +A src/components/data_comps/dlnd/mct/lnd_comp_mct.F90 +T src/components/data_comps/docn/cime_config/buildlib +M src/components/data_comps/docn/cime_config/buildnml +M src/components/data_comps/docn/cime_config/config_archive.xml +M src/components/data_comps/docn/cime_config/config_component.xml +M src/components/data_comps/docn/cime_config/namelist_definition_docn.xml +M src/components/data_comps/docn/docn_comp_mod.F90 +A src/components/data_comps/docn/docn_shr_mod.F90 +A src/components/data_comps/docn/mct/ocn_comp_mct.F90 +D src/components/data_comps/docn/ocn_comp_mct.F90 +T src/components/data_comps/drof/cime_config/buildlib +M src/components/data_comps/drof/cime_config/buildnml +M src/components/data_comps/drof/cime_config/config_archive.xml +M src/components/data_comps/drof/cime_config/config_component.xml +M src/components/data_comps/drof/cime_config/namelist_definition_drof.xml +M src/components/data_comps/drof/drof_comp_mod.F90 +A src/components/data_comps/drof/drof_shr_mod.F90 +A src/components/data_comps/drof/mct/rof_comp_mct.F90 +D src/components/data_comps/drof/rof_comp_mct.F90 +A src/components/data_comps/dshare/shr_dmodel_mod.F90 +A src/components/data_comps/dshare/shr_strdata_mod.F90 +A src/components/data_comps/dshare/shr_stream_mod.F90 +A src/components/data_comps/dshare/shr_tInterp_mod.F90 +T src/components/data_comps/dwav/cime_config/buildlib +M src/components/data_comps/dwav/cime_config/buildnml +M src/components/data_comps/dwav/cime_config/config_archive.xml +M src/components/data_comps/dwav/cime_config/config_component.xml +M src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml +M src/components/data_comps/dwav/dwav_comp_mod.F90 +A src/components/data_comps/dwav/dwav_shr_mod.F90 +A src/components/data_comps/dwav/mct/wav_comp_mct.F90 +D src/components/data_comps/dwav/wav_comp_mct.F90 +T src/components/stub_comps/satm/cime_config/buildlib +M src/components/stub_comps/satm/cime_config/buildnml +M src/components/stub_comps/satm/cime_config/config_component.xml +T src/components/stub_comps/sesp/cime_config/buildlib +M src/components/stub_comps/sesp/cime_config/buildnml +M src/components/stub_comps/sesp/cime_config/config_component.xml +T src/components/stub_comps/sglc/cime_config/buildlib +M src/components/stub_comps/sglc/cime_config/buildnml +M src/components/stub_comps/sglc/cime_config/config_component.xml +T src/components/stub_comps/sice/cime_config/buildlib +M src/components/stub_comps/sice/cime_config/buildnml +M src/components/stub_comps/sice/cime_config/config_component.xml +T src/components/stub_comps/slnd/cime_config/buildlib +M src/components/stub_comps/slnd/cime_config/buildnml +M src/components/stub_comps/slnd/cime_config/config_component.xml +T src/components/stub_comps/socn/cime_config/buildlib +M src/components/stub_comps/socn/cime_config/buildnml +M src/components/stub_comps/socn/cime_config/config_component.xml +T src/components/stub_comps/srof/cime_config/buildlib +M src/components/stub_comps/srof/cime_config/buildnml +M src/components/stub_comps/srof/cime_config/config_component.xml +T src/components/stub_comps/swav/cime_config/buildlib +M src/components/stub_comps/swav/cime_config/buildnml +M src/components/stub_comps/swav/cime_config/config_component.xml +T src/components/xcpl_comps/xatm/cime_config/buildlib +M src/components/xcpl_comps/xatm/cime_config/config_component.xml +T src/components/xcpl_comps/xglc/cime_config/buildlib +M src/components/xcpl_comps/xglc/cime_config/config_component.xml +T src/components/xcpl_comps/xice/cime_config/buildlib +M src/components/xcpl_comps/xice/cime_config/config_component.xml +T src/components/xcpl_comps/xlnd/cime_config/buildlib +M src/components/xcpl_comps/xlnd/cime_config/config_component.xml +T src/components/xcpl_comps/xocn/cime_config/buildlib +M src/components/xcpl_comps/xocn/cime_config/config_component.xml +T src/components/xcpl_comps/xrof/cime_config/buildlib +M src/components/xcpl_comps/xrof/cime_config/config_component.xml +M src/components/xcpl_comps/xshare/dead_mct_mod.F90 +M src/components/xcpl_comps/xshare/dead_mod.F90 +T src/components/xcpl_comps/xwav/cime_config/buildlib +M src/components/xcpl_comps/xwav/cime_config/config_component.xml +D src/drivers/mct/bld/namelist_files/namelist_defaults_drv.xml +D src/drivers/mct/bld/namelist_files/namelist_definition_drv.xml +D src/drivers/mct/bld/namelist_files/namelist_definition_drv_flds.xml +D src/drivers/mct/bld/namelist_files/namelist_definition_modio.xml +D src/drivers/mct/bld/testdir/Buildconf/camconf/drv_flds_in +D src/drivers/mct/bld/testdir/Buildconf/clmconf/drv_flds_in +D src/drivers/mct/bld/testdir/env_build.xml +D src/drivers/mct/bld/testdir/env_case.xml +D src/drivers/mct/bld/testdir/env_mach_pes.xml +D src/drivers/mct/bld/testdir/env_run.xml +D src/drivers/mct/bld/testdir/runtest +D src/drivers/mct/bld/testdir/user_nl_cpl +M src/drivers/mct/cime_config/buildexe +M src/drivers/mct/cime_config/buildnml +M src/drivers/mct/cime_config/config_archive.xml +M src/drivers/mct/cime_config/config_component.xml +M src/drivers/mct/cime_config/config_component_acme.xml +M src/drivers/mct/cime_config/config_component_cesm.xml +M src/drivers/mct/cime_config/config_compsets.xml +M src/drivers/mct/cime_config/config_pes.xml +M src/drivers/mct/cime_config/namelist_definition_drv.xml +A src/drivers/mct/cime_config/namelist_definition_drv_flds.xml +M src/drivers/mct/cime_config/namelist_definition_modelio.xml +M src/drivers/mct/cime_config/testdefs/testlist_drv.xml +A src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/default/shell_commands +A src/drivers/mct/cime_config/testdefs/testmods_dirs/drv/som/shell_commands +D src/drivers/mct/doc/bookinfo.xml +D src/drivers/mct/doc/chap1.xml +D src/drivers/mct/doc/chap2.xml +D src/drivers/mct/doc/figx1.jpg +D src/drivers/mct/doc/namelist.xml +D src/drivers/mct/doc/rundocbook.csh +D src/drivers/mct/doc/stylesheet.dsl +D src/drivers/mct/doc/ug.xml +M src/drivers/mct/main/CMakeLists.txt +D src/drivers/mct/main/cesm_comp_mod.F90 +D src/drivers/mct/main/cesm_driver.F90 +A src/drivers/mct/main/cime_comp_mod.F90 +A src/drivers/mct/main/cime_driver.F90 +M src/drivers/mct/main/component_mod.F90 +M src/drivers/mct/main/component_type_mod.F90 +M src/drivers/mct/main/map_glc2lnd_mod.F90 +M src/drivers/mct/main/map_lnd2glc_mod.F90 +M src/drivers/mct/main/mrg_mod.F90 +M src/drivers/mct/main/prep_aoflux_mod.F90 +M src/drivers/mct/main/prep_atm_mod.F90 +M src/drivers/mct/main/prep_glc_mod.F90 +M src/drivers/mct/main/prep_ice_mod.F90 +M src/drivers/mct/main/prep_lnd_mod.F90 +M src/drivers/mct/main/prep_ocn_mod.F90 +M src/drivers/mct/main/seq_diag_mct.F90 +M src/drivers/mct/main/seq_domain_mct.F90 +M src/drivers/mct/main/seq_flux_mct.F90 +M src/drivers/mct/main/seq_hist_mod.F90 +M src/drivers/mct/main/seq_io_mod.F90 +M src/drivers/mct/main/seq_rest_mod.F90 +D src/drivers/mct/main/vertical_gradient_calculator_2nd_order.F90 +D src/drivers/mct/main/vertical_gradient_calculator_base.F90 +D src/drivers/mct/main/vertical_gradient_calculator_factory.F90 +M src/drivers/mct/shr/CMakeLists.txt +M src/drivers/mct/shr/glc_elevclass_mod.F90 +M src/drivers/mct/shr/seq_comm_mct.F90 +M src/drivers/mct/shr/seq_drydep_mod.F90 +M src/drivers/mct/shr/seq_flds_mod.F90 +M src/drivers/mct/shr/seq_infodata_mod.F90 +M src/drivers/mct/shr/seq_timemgr_mod.F90 +A src/drivers/mct/shr/shr_ndep_mod.F90 +M src/drivers/mct/unit_test/CMakeLists.txt +A src/drivers/mct/unit_test/check_fields_test/CMakeLists.txt +A src/drivers/mct/unit_test/check_fields_test/test_check_fields.pf +M src/drivers/mct/unit_test/glc_elevclass_test/test_glc_elevclass.pf +D src/drivers/mct/unit_test/map_lnd2glc_test/CMakeLists.txt +D src/drivers/mct/unit_test/map_lnd2glc_test/test_map_lnd2glc.pf +M src/drivers/mct/unit_test/stubs/CMakeLists.txt +A src/drivers/mct/unit_test/stubs/seq_timemgr_mod.F90 +D src/drivers/mct/unit_test/stubs/vertical_gradient_calculator_constant.F90 +M src/drivers/mct/unit_test/utils/create_mapper_mod.F90 +M src/drivers/mct/unit_test/utils/mct_wrapper_mod.F90 +D src/drivers/mct/unit_test/vertical_gradient_calculator_test/CMakeLists.txt +D src/drivers/mct/unit_test/vertical_gradient_calculator_test/README +D src/drivers/mct/unit_test/vertical_gradient_calculator_test/gradient_example.txt +D src/drivers/mct/unit_test/vertical_gradient_calculator_test/plot_gradient +D src/drivers/mct/unit_test/vertical_gradient_calculator_test/test_vertical_gradient_calculator_2nd_order.pf +D src/drivers/mct/unit_test/vertical_gradient_calculator_test/test_vertical_gradient_calculator_factory.pf +D src/externals/CMake/.gitignore +D src/externals/CMake/CESM_utils.cmake +D src/externals/CMake/CIME_utils.cmake +D src/externals/CMake/ChangeLog +D src/externals/CMake/Compilers.cmake +D src/externals/CMake/FindpFUnit.cmake +D src/externals/CMake/LICENSE +D src/externals/CMake/README.md +D src/externals/CMake/Sourcelist_utils.cmake +D src/externals/CMake/TryCSizeOf.f90 +D src/externals/CMake/TryMPIIO.f90 +D src/externals/CMake/TryMPIMod.f90 +D src/externals/CMake/TryMPISERIAL.f90 +D src/externals/CMake/TryPnetcdf_inc.f90 +D src/externals/CMake/TryPnetcdf_mod.f90 +D src/externals/CMake/genf90_utils.cmake +D src/externals/CMake/mpiexec.cmake +D src/externals/CMake/pFUnit_utils.cmake +M src/externals/mct/mct/Makefile +M src/externals/mct/mct/m_AttrVect.F90 +M src/externals/mct/mct/m_GlobalSegMap.F90 +M src/externals/mct/mct/m_MCTWorld.F90 +M src/externals/mct/mct/m_MatAttrVectMul.F90 +M src/externals/mct/mct/m_Rearranger.F90 +M src/externals/mct/mct/m_Router.F90 +A src/externals/mct/mct/m_SPMDutils.F90 +M src/externals/mct/mpeu/m_FcComms.F90 +M src/externals/mct/mpi-serial/Makefile +D src/externals/mct/mpi-serial/NOTES +M src/externals/mct/mpi-serial/README +M src/externals/mct/mpi-serial/collective.c +A src/externals/mct/mpi-serial/error.c +A src/externals/mct/mpi-serial/ic_merge.c +M src/externals/mct/mpi-serial/mpi.c +M src/externals/mct/mpi-serial/mpi.h +M src/externals/mct/mpi-serial/mpif.h +M src/externals/mct/mpi-serial/tests/ftest.F90 +M src/externals/mct/mpi-serial/tests/ftest_old.F90 +M src/externals/mct/mpi-serial/type.c +M src/externals/mct/testsystem/testall/ReadSparseMatrixAsc.F90 +M src/externals/mct/testsystem/testall/cpl.F90 +M src/externals/pio1/pio/CMakeLists.txt +M src/externals/pio1/pio/pio.F90 +M src/externals/pio1/pio/pio_types.F90 +M src/externals/pio1/pio/piolib_mod.F90 +M src/externals/pio1/pio/pionfput_mod.F90.in +M src/externals/pio1/tests/testpio/perl5lib/ChangeLog +M src/externals/pio1/tests/testpio/perl5lib/XML/Changes +M src/externals/pio1/tests/testpio/perl5lib/XML/Lite.pm +M src/externals/pio1/tests/testpio/perl5lib/XML/Lite/Element.pm +M src/externals/pio1/tests/testpio/perl5lib/XML/README +D src/externals/pio1/tests/testpio/perl5lib/XML/man3/XML::Lite.3 +D src/externals/pio1/tests/testpio/perl5lib/XML/man3/XML::Lite::Element.3 +A src/externals/pio1/tests/testpio/perl5lib/XML/man3/XML_Lite.3 +A src/externals/pio1/tests/testpio/perl5lib/XML/man3/XML_Lite_Element.3 +M src/externals/pio2/.gitignore +M src/externals/pio2/CMakeLists.txt +M src/externals/pio2/CTestScript.cmake +A src/externals/pio2/Makefile.am +A src/externals/pio2/cmake_config.h.in +A src/externals/pio2/configure.ac +M src/externals/pio2/ctest/CTestEnvironment-anlworkstation.cmake +M src/externals/pio2/ctest/CTestEnvironment-cgd.cmake +M src/externals/pio2/ctest/CTestEnvironment-nwsc.cmake +A src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh +M src/externals/pio2/ctest/runcdash-cgd-nag.sh +M src/externals/pio2/ctest/runcdash-nwsc-intel-mpiserial.sh +M src/externals/pio2/ctest/runcdash-nwscla-intel.sh +M src/externals/pio2/ctest/runctest-cgd.sh +M src/externals/pio2/ctest/runctest-nwscla.sh +M src/externals/pio2/doc/source/Decomp.txt +M src/externals/pio2/doc/source/base.txt +M src/externals/pio2/doc/source/contributing_code.txt +M src/externals/pio2/doc/source/mach_walkthrough.txt +D src/externals/pio2/examples/basic/perl5lib/XML/man3/XML::Lite.3 +D src/externals/pio2/examples/basic/perl5lib/XML/man3/XML::Lite::Element.3 +A src/externals/pio2/examples/basic/perl5lib/XML/man3/XML_Lite.3 +A src/externals/pio2/examples/basic/perl5lib/XML/man3/XML_Lite_Element.3 +M src/externals/pio2/examples/c/CMakeLists.txt +A src/externals/pio2/examples/c/darray_async.c +A src/externals/pio2/examples/c/darray_no_async.c +M src/externals/pio2/examples/c/example1.c +M src/externals/pio2/scripts/prune_decomps.pl +A src/externals/pio2/src/Makefile.am +M src/externals/pio2/src/clib/CMakeLists.txt +A src/externals/pio2/src/clib/Makefile.am +M src/externals/pio2/src/clib/bget.c +D src/externals/pio2/src/clib/config.h.in +D src/externals/pio2/src/clib/dtypes.h +M src/externals/pio2/src/clib/pio.h +M src/externals/pio2/src/clib/pio_darray.c +M src/externals/pio2/src/clib/pio_darray_int.c +M src/externals/pio2/src/clib/pio_file.c +M src/externals/pio2/src/clib/pio_get_nc.c +M src/externals/pio2/src/clib/pio_getput_int.c +M src/externals/pio2/src/clib/pio_internal.h +M src/externals/pio2/src/clib/pio_lists.c +M src/externals/pio2/src/clib/pio_msg.c +M src/externals/pio2/src/clib/pio_nc.c +M src/externals/pio2/src/clib/pio_nc4.c +M src/externals/pio2/src/clib/pio_put_nc.c +M src/externals/pio2/src/clib/pio_rearrange.c +M src/externals/pio2/src/clib/pio_spmd.c +M src/externals/pio2/src/clib/pio_varm.c +M src/externals/pio2/src/clib/pioc.c +M src/externals/pio2/src/clib/pioc_sc.c +M src/externals/pio2/src/clib/pioc_support.c +M src/externals/pio2/src/flib/piolib_mod.F90 +A src/externals/pio2/tests/Makefile.am +M src/externals/pio2/tests/cunit/CMakeLists.txt +A src/externals/pio2/tests/cunit/Makefile.am +M src/externals/pio2/tests/cunit/pio_tests.h +A src/externals/pio2/tests/cunit/run_tests.sh +D src/externals/pio2/tests/cunit/test_async_2comp.c +M src/externals/pio2/tests/cunit/test_async_3proc.c +M src/externals/pio2/tests/cunit/test_async_4proc.c +A src/externals/pio2/tests/cunit/test_async_manyproc.c +A src/externals/pio2/tests/cunit/test_async_mpi.c +A src/externals/pio2/tests/cunit/test_async_multi2.c +A src/externals/pio2/tests/cunit/test_async_multicomp.c +M src/externals/pio2/tests/cunit/test_async_simple.c +M src/externals/pio2/tests/cunit/test_common.c +M src/externals/pio2/tests/cunit/test_darray.c +M src/externals/pio2/tests/cunit/test_darray_1d.c +A src/externals/pio2/tests/cunit/test_darray_2sync.c +M src/externals/pio2/tests/cunit/test_darray_3d.c +A src/externals/pio2/tests/cunit/test_darray_async.c +A src/externals/pio2/tests/cunit/test_darray_async_many.c +A src/externals/pio2/tests/cunit/test_darray_async_simple.c +A src/externals/pio2/tests/cunit/test_darray_frame.c +A src/externals/pio2/tests/cunit/test_darray_multi.c +M src/externals/pio2/tests/cunit/test_darray_multivar.c +A src/externals/pio2/tests/cunit/test_darray_multivar2.c +A src/externals/pio2/tests/cunit/test_darray_multivar3.c +A src/externals/pio2/tests/cunit/test_decomp_uneven.c +M src/externals/pio2/tests/cunit/test_decomps.c +M src/externals/pio2/tests/cunit/test_intercomm2.c +M src/externals/pio2/tests/cunit/test_iosystem2.c +M src/externals/pio2/tests/cunit/test_iosystem2_simple.c +M src/externals/pio2/tests/cunit/test_iosystem2_simple2.c +M src/externals/pio2/tests/cunit/test_iosystem3.c +M src/externals/pio2/tests/cunit/test_iosystem3_simple.c +M src/externals/pio2/tests/cunit/test_iosystem3_simple2.c +M src/externals/pio2/tests/cunit/test_pioc.c +M src/externals/pio2/tests/cunit/test_pioc_fill.c +M src/externals/pio2/tests/cunit/test_pioc_putget.c +M src/externals/pio2/tests/cunit/test_pioc_unlim.c +A src/externals/pio2/tests/cunit/test_rearr.c +M src/externals/pio2/tests/cunit/test_shared.c +M src/externals/pio2/tests/cunit/test_spmd.c +M src/externals/pio2/tests/general/CMakeLists.txt +M src/externals/pio2/tests/general/ncdf_get_put.F90.in +M src/externals/pio2/tests/general/ncdf_simple_tests.F90.in +M src/externals/pio2/tests/general/pio_decomp_fillval.F90.in +M src/externals/pio2/tests/general/pio_decomp_frame_tests.F90.in +M src/share/esmf_wrf_timemgr/ESMF_Stubs.F90 +M src/share/test/unit/CMakeLists.txt +A src/share/test/unit/mock/README +M src/share/test/unit/mock/shr_sys_mod.nompi_abortthrows.F90 +A src/share/test/unit/shr_abort_test/CMakeLists.txt +A src/share/test/unit/shr_abort_test/README +A src/share/test/unit/shr_abort_test/test_shr_abort.pf +M src/share/test/unit/shr_assert_test/CMakeLists.txt +M src/share/test/unit/shr_infnan_test/CMakeLists.txt +M src/share/test/unit/shr_infnan_test/test_infnan.F90 +M src/share/test/unit/shr_spfn_test/CMakeLists.txt +M src/share/test/unit/shr_string_test/CMakeLists.txt +M src/share/test/unit/shr_string_test/test_shr_string.pf +M src/share/timing/COPYING +M src/share/unit_test_stubs/README +M src/share/unit_test_stubs/pio/pio.F90.in +A src/share/unit_test_stubs/util/CMakeLists.txt +A src/share/unit_test_stubs/util/README +A src/share/unit_test_stubs/util/shr_abort_mod.abortthrows.F90 +M src/share/util/CMakeLists.txt +M src/share/util/mct_mod.F90 +A src/share/util/shr_abort_mod.F90 +D src/share/util/shr_dmodel_mod.F90 +M src/share/util/shr_mct_mod.F90 +M src/share/util/shr_pcdf_mod.F90 +M src/share/util/shr_pio_mod.F90 +D src/share/util/shr_strdata_mod.F90 +D src/share/util/shr_stream_mod.F90 +M src/share/util/shr_string_mod.F90 +M src/share/util/shr_sys_mod.F90 +D src/share/util/shr_tInterp_mod.F90 +M tools/Readme.configure +M tools/configure +M tools/cprnc/CMakeLists.txt +M tools/cprnc/README +M tools/cprnc/summarize_cprnc_diffs +M tools/load_balancing_tool/README +D tools/load_balancing_tool/code/cesm_scaling.gplot +D tools/load_balancing_tool/code/create_dataFile.pl +D tools/load_balancing_tool/code/f02_peList.txt +D tools/load_balancing_tool/code/f05_peList.txt +D tools/load_balancing_tool/code/f09_peList.txt +D tools/load_balancing_tool/code/f19_peList.txt +D tools/load_balancing_tool/code/fv_model.mod +D tools/load_balancing_tool/code/fv_model.run +D tools/load_balancing_tool/code/fv_second_pass.pl +D tools/load_balancing_tool/code/get_cesm_times.pl +D tools/load_balancing_tool/code/load_balance.pl +D tools/load_balancing_tool/code/merge.py +D tools/load_balancing_tool/code/model.data +D tools/load_balancing_tool/code/model.mod +D tools/load_balancing_tool/code/model.run +D tools/load_balancing_tool/code/neos.py +D tools/load_balancing_tool/global_variables.csh +A tools/load_balancing_tool/layouts.py +A tools/load_balancing_tool/load_balancing_solve.py +A tools/load_balancing_tool/load_balancing_submit.py +A tools/load_balancing_tool/optimize_model.py +D tools/load_balancing_tool/run_first.csh +D tools/load_balancing_tool/run_second.csh +A tools/load_balancing_tool/tests/__init__.py +A tools/load_balancing_tool/tests/atm_lnd.py +A tools/load_balancing_tool/tests/example.json +A tools/load_balancing_tool/tests/load_balancing_test.py +A tools/load_balancing_tool/tests/test.xml +A tools/load_balancing_tool/tests/timing/timing_1 +A tools/load_balancing_tool/tests/timing/timing_2 +A tools/load_balancing_tool/tests/timing/timing_3 +M tools/mapping/check_maps/.gitignore +M tools/mapping/check_maps/README +M tools/mapping/check_maps/src/ESMF_RegridWeightGenCheck.F90 +M tools/mapping/check_maps/src/Makefile +M tools/mapping/gen_domain_files/INSTALL +M tools/mapping/gen_domain_files/src/Makefile +M tools/mapping/gen_mapping_files/README +A tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/.gitignore +M tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/README +M tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/create_ESMF_map.sh +M tools/mapping/gen_mapping_files/runoff_to_ocn/INSTALL +M tools/mapping/gen_mapping_files/runoff_to_ocn/README +A tools/mapping/gen_mapping_files/runoff_to_ocn/map_r05_to_gx1v6_nnsm_e1000r300.nml +A tools/mapping/gen_mapping_files/runoff_to_ocn/map_r05_to_gx1v7_nnsm_e1000r300.nml +A tools/mapping/gen_mapping_files/runoff_to_ocn/map_r05_to_gx3v7_nnsm_e1000r500.nml +A tools/mapping/gen_mapping_files/runoff_to_ocn/map_rx1_to_gx1v6_nnsm_e1000r300.nml +A tools/mapping/gen_mapping_files/runoff_to_ocn/map_rx1_to_gx1v7_nnsm_e1000r300.nml +A tools/mapping/gen_mapping_files/runoff_to_ocn/map_rx1_to_gx3v7_nnsm_e1000r500.nml +A tools/mapping/gen_mapping_files/runoff_to_ocn/map_wr50a_to_ar9v4_nnsm_e1000r300.nml +A tools/mapping/gen_mapping_files/runoff_to_ocn/map_wr50a_to_gx3v7_nnsm_e1000r300.nml +A tools/mapping/gen_mapping_files/runoff_to_ocn/maps/.gitignore +A tools/mapping/gen_mapping_files/runoff_to_ocn/ncl/merge_mapping_files.ncl +A tools/mapping/gen_mapping_files/runoff_to_ocn/run_merge_mapping_files.sh +D tools/mapping/gen_mapping_files/runoff_to_ocn/runoff_map.nml +D tools/mapping/gen_mapping_files/runoff_to_ocn/runoff_map_r05_gx3v7.nml +D tools/mapping/gen_mapping_files/runoff_to_ocn/runoff_map_rx1_gx1v6.nml +D tools/mapping/gen_mapping_files/runoff_to_ocn/runoff_map_tx01.nml +D tools/mapping/gen_mapping_files/runoff_to_ocn/runoff_map_wr50a_ar9v4.nml +D tools/mapping/gen_mapping_files/runoff_to_ocn/runoff_map_wr50a_gx3v7.nml +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/Makefile +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/fixroff_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/kind_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/main.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/map_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/mapsort_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/shr_kind_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/shr_sys_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/shr_timer_mod.F90 +M tools/mapping/gen_mapping_files/runoff_to_ocn/src/smooth_mod.F90 +M tools/mapping/map_field/INSTALL +M tools/mapping/map_field/src/Makefile +M tools/mapping/map_field/src/map_field.F90 +A tools/statistical_ensemble_test/README +M tools/statistical_ensemble_test/ensemble.sh +D tools/statistical_ensemble_test/ncl_library/Zscore.ncl +D tools/statistical_ensemble_test/ncl_library/area_avg.ncl +D tools/statistical_ensemble_test/ncl_library/create_ensemble.ncl +D tools/statistical_ensemble_test/ncl_library/global.ncl +D tools/statistical_ensemble_test/ncl_library/plot_rmsz.ncl +D tools/statistical_ensemble_test/ncl_library/print.ncl +D tools/statistical_ensemble_test/ncl_library/rmsz.ncl +D tools/statistical_ensemble_test/ncl_library/vinterp.ncl +M tools/statistical_ensemble_test/pyCECT/pyCECT.py +M tools/statistical_ensemble_test/pyCECT/pyEnsLib.py +M tools/statistical_ensemble_test/pyCECT/pyEnsSum.py +M tools/statistical_ensemble_test/pyCECT/pyEnsSumPop.py +M tools/statistical_ensemble_test/single_run.sh +D tools/statistical_ensemble_test/test_run_against_ensemble.ncl +D tools/statistical_ensemble_test/validation_test.sh +D tools/unit_testing/Examples/circle_area/src/CMakeLists.txt +D tools/unit_testing/Examples/circle_area/src/circle.F90 +D tools/unit_testing/Examples/circle_area/tests/CMakeLists.txt +D tools/unit_testing/Examples/circle_area/tests/CTest/CMakeLists.txt +D tools/unit_testing/Examples/circle_area/tests/CTest/test_driver.F90 +D tools/unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt +D tools/unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf +D tools/unit_testing/Examples/interpolate_1d/src/CMakeLists.txt +D tools/unit_testing/Examples/interpolate_1d/src/interpolate_1d.F90 +D tools/unit_testing/Examples/interpolate_1d/tests/CMakeLists.txt +D tools/unit_testing/Examples/interpolate_1d/tests/CTest/CMakeLists.txt +D tools/unit_testing/Examples/interpolate_1d/tests/CTest/test_driver.F90 +D tools/unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt +D tools/unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf +D tools/unit_testing/Examples/test_list.xml +D tools/unit_testing/README +D tools/unit_testing/python/.gitignore +D tools/unit_testing/python/comparable.py +D tools/unit_testing/python/environment.py +D tools/unit_testing/python/machine_setup.py +D tools/unit_testing/python/printer.py +D tools/unit_testing/python/test_environment.py +D tools/unit_testing/python/test_xml_test_list.py +D tools/unit_testing/python/test_xml_utils.py +D tools/unit_testing/python/xml_test_list.py +D tools/unit_testing/python/xml_utils.py +D tools/unit_testing/run_tests.py +M tools/utils/find_circular_dependency.py +M utils/perl5lib/Build/Namelist.pm + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 10-9-2017 +Tag: cime5.4.0-alpha.03 +Answer Changes: None +Tests: scripts_regression_tests.pr + manual test of create_clone +Dependencies: + +Brief Summary: + - Fix SE domain files. + - Use keyword-value pairs for create_clone call. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +55d2dbe Merge pull request #1957 from billsacks/create_clone_args +443312c Merge pull request #1951 from gold2718/seDomainFiles + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/config_grids.xml +M scripts/create_clone +M src/drivers/mct/cime_config/config_component_cesm.xml + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 10-6-2017 +Tag: cime5.4.0-alpha.02 +Answer Changes: None +Tests: scripts_regression_tests.py SMS_Ln9.f19_f19_mg17.F2000_DEV.yellowstone_intel.cam-cosp + IRT_Ld7.T31_g37.B1850C5L45BGCR.cheyenne_intel + hand testing tools/load_balancing_tool/test/test_load_balancing_tool.py + load_balancing_test.py +Dependencies: + +Brief Summary: + - Fix cosp build instructions. + - Pio update branch. + - Refactor args to submit scripts. + - Archive pop tavg rpointer file. + - Add option to define test types in query_testlists. + - Fix tests for small systems. + - Only run driver build namelist after first day cycle. + - Load balancing tool mods. + - Use local baseline directory for testing. + - Add ability to cancel batch jobs to the system. + - A port to a generic Linux workstation. + - Fix pylint issue. + - Fix st_archive. + - Document uniqueness requirement for test-id. + - Add Argonne copyright statement. + - Rewrite of the load balancing tool. + - Imported mpi-serial changes from MCSclimate/mpi-serial. + +User interface changes: + - New batch_env optional entry in config_batch. + - Add --baseline-root argument to case.cmpgen_baselines. + - new batch_cancel field in config_batch. + +PR summary: git log --oneline --first-parent [previous_tag]..master +a2f0979 Merge pull request #1947 from jedwards4b/cosp_build_fix +1655800 Merge pull request #1948 from jedwards4b/pio_update_branch +d590c55 Merge pull request #1945 from ESMCI/jgfouca/refactor_args_to_submit_scripts +7f44f80 Merge pull request #1944 from jedwards4b/pop_tavg_archive +c7cf4eb Merge pull request #1930 from billsacks/query_testtypes +0742931 Merge pull request #1943 from jedwards4b/small_systems +34522a3 Merge pull request #1941 from jedwards4b/da_optimization +be3bf2e Merge pull request #1923 from jedwards4b/load-balancing-tool-mods +2d2ab96 Merge pull request #1939 from jedwards4b/srt_baselines +4e0e02e Merge pull request #1932 from ESMCI/jgfouca/add_job_cancel +d3a2da4 Merge pull request #1936 from jedwards4b/centos7_linux +bc35e32 fix pylint issue +874dc10 Merge pull request #1916 from ESMCI/kdraeder/fix_st_archive +7672387 Merge pull request #1934 from billsacks/document_testids +202e9a0 Merge pull request #1925 from ESMCI/rljacob/add-anl-copyright +b8b9f9e Merge pull request #1902 from ESMCI/sarich/tools/load-balancing-tool +4489a90 Merge pull request #1922 from gold2718/mpiserial_update + + + +Modified files: git diff --name-status [previous_tag] +M LICENSE.TXT +M config/acme/machines/config_batch.xml +M config/acme/machines/template.case.run +M config/acme/machines/template.case.test +M config/cesm/config_archive.xml +M config/cesm/machines/Makefile +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/template.case.run +M config/cesm/machines/template.case.test +M config/config_tests.xml +M config/xml_schemas/config_batch.xsd +M doc/source/misc_tools/index.rst +A doc/source/misc_tools/load-balancing-tool.rst +M scripts/Tools/case.cmpgen_namelists +M scripts/create_test +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/tests.py +M scripts/lib/CIME/case.py +M scripts/lib/CIME/case_run.py +M scripts/lib/CIME/case_setup.py +M scripts/lib/CIME/case_st_archive.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/utils.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/lib/jenkins_generic_job.py +M scripts/lib/update_acme_tests.py +M scripts/query_testlists +M scripts/tests/scripts_regression_tests.py +M src/externals/mct/mpi-serial/Makefile +M src/externals/mct/mpi-serial/mpi.c +M src/externals/mct/mpi-serial/mpif.h +M src/externals/mct/mpi-serial/tests/ftest.F90 +M src/externals/mct/mpi-serial/tests/ftest_old.F90 +M src/externals/mct/mpi-serial/type.c +M src/externals/pio1/pio/CMakeLists.txt +M src/externals/pio1/pio/pionfput_mod.F90.in +M src/externals/pio1/tests/testpio/perl5lib/ChangeLog +M src/externals/pio1/tests/testpio/perl5lib/XML/Changes +M src/externals/pio1/tests/testpio/perl5lib/XML/Lite.pm +M src/externals/pio1/tests/testpio/perl5lib/XML/Lite/Element.pm +M src/externals/pio1/tests/testpio/perl5lib/XML/README +D src/externals/pio1/tests/testpio/perl5lib/XML/man3/XML::Lite.3 +D src/externals/pio1/tests/testpio/perl5lib/XML/man3/XML::Lite::Element.3 +A src/externals/pio1/tests/testpio/perl5lib/XML/man3/XML_Lite.3 +A src/externals/pio1/tests/testpio/perl5lib/XML/man3/XML_Lite_Element.3 +M src/externals/pio1/timing/COPYING +M src/externals/pio2/.gitignore +M src/externals/pio2/CMakeLists.txt +M src/externals/pio2/CTestScript.cmake +A src/externals/pio2/Makefile.am +A src/externals/pio2/cmake_config.h.in +A src/externals/pio2/configure.ac +M src/externals/pio2/ctest/CTestEnvironment-cgd.cmake +M src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh +M src/externals/pio2/doc/source/Decomp.txt +M src/externals/pio2/doc/source/base.txt +M src/externals/pio2/doc/source/contributing_code.txt +M src/externals/pio2/doc/source/mach_walkthrough.txt +D src/externals/pio2/examples/basic/perl5lib/XML/man3/XML::Lite.3 +D src/externals/pio2/examples/basic/perl5lib/XML/man3/XML::Lite::Element.3 +A src/externals/pio2/examples/basic/perl5lib/XML/man3/XML_Lite.3 +A src/externals/pio2/examples/basic/perl5lib/XML/man3/XML_Lite_Element.3 +M src/externals/pio2/examples/c/CMakeLists.txt +M src/externals/pio2/scripts/prune_decomps.pl +A src/externals/pio2/src/Makefile.am +M src/externals/pio2/src/clib/CMakeLists.txt +A src/externals/pio2/src/clib/Makefile.am +D src/externals/pio2/src/clib/config.h.in +D src/externals/pio2/src/clib/dtypes.h +M src/externals/pio2/src/clib/pio.h +M src/externals/pio2/src/clib/pio_darray.c +M src/externals/pio2/src/clib/pio_darray_int.c +M src/externals/pio2/src/clib/pio_file.c +M src/externals/pio2/src/clib/pio_get_nc.c +M src/externals/pio2/src/clib/pio_getput_int.c +M src/externals/pio2/src/clib/pio_internal.h +M src/externals/pio2/src/clib/pio_lists.c +M src/externals/pio2/src/clib/pio_msg.c +M src/externals/pio2/src/clib/pio_nc.c +M src/externals/pio2/src/clib/pio_nc4.c +M src/externals/pio2/src/clib/pio_put_nc.c +M src/externals/pio2/src/clib/pio_rearrange.c +M src/externals/pio2/src/clib/pio_spmd.c +M src/externals/pio2/src/clib/pio_varm.c +M src/externals/pio2/src/clib/pioc.c +M src/externals/pio2/src/clib/pioc_sc.c +M src/externals/pio2/src/clib/pioc_support.c +M src/externals/pio2/src/flib/piolib_mod.F90 +M src/externals/pio2/src/gptl/COPYING +A src/externals/pio2/tests/Makefile.am +M src/externals/pio2/tests/cunit/CMakeLists.txt +A src/externals/pio2/tests/cunit/Makefile.am +M src/externals/pio2/tests/cunit/pio_tests.h +A src/externals/pio2/tests/cunit/run_tests.sh +M src/externals/pio2/tests/cunit/test_async_3proc.c +M src/externals/pio2/tests/cunit/test_async_4proc.c +A src/externals/pio2/tests/cunit/test_async_manyproc.c +A src/externals/pio2/tests/cunit/test_async_mpi.c +A src/externals/pio2/tests/cunit/test_async_multi2.c +A src/externals/pio2/tests/cunit/test_async_multicomp.c +M src/externals/pio2/tests/cunit/test_async_simple.c +M src/externals/pio2/tests/cunit/test_common.c +M src/externals/pio2/tests/cunit/test_darray.c +M src/externals/pio2/tests/cunit/test_darray_1d.c +A src/externals/pio2/tests/cunit/test_darray_2sync.c +M src/externals/pio2/tests/cunit/test_darray_3d.c +A src/externals/pio2/tests/cunit/test_darray_async.c +A src/externals/pio2/tests/cunit/test_darray_async_many.c +A src/externals/pio2/tests/cunit/test_darray_async_simple.c +A src/externals/pio2/tests/cunit/test_darray_frame.c +M src/externals/pio2/tests/cunit/test_darray_multi.c +M src/externals/pio2/tests/cunit/test_darray_multivar.c +M src/externals/pio2/tests/cunit/test_darray_multivar2.c +A src/externals/pio2/tests/cunit/test_darray_multivar3.c +M src/externals/pio2/tests/cunit/test_decomp_uneven.c +M src/externals/pio2/tests/cunit/test_decomps.c +M src/externals/pio2/tests/cunit/test_intercomm2.c +M src/externals/pio2/tests/cunit/test_iosystem2.c +M src/externals/pio2/tests/cunit/test_iosystem2_simple.c +M src/externals/pio2/tests/cunit/test_iosystem2_simple2.c +M src/externals/pio2/tests/cunit/test_iosystem3.c +M src/externals/pio2/tests/cunit/test_iosystem3_simple.c +M src/externals/pio2/tests/cunit/test_iosystem3_simple2.c +M src/externals/pio2/tests/cunit/test_pioc.c +M src/externals/pio2/tests/cunit/test_pioc_fill.c +M src/externals/pio2/tests/cunit/test_pioc_putget.c +M src/externals/pio2/tests/cunit/test_pioc_unlim.c +M src/externals/pio2/tests/cunit/test_rearr.c +M src/externals/pio2/tests/cunit/test_shared.c +M src/externals/pio2/tests/cunit/test_spmd.c +M src/externals/pio2/tests/general/CMakeLists.txt +M src/externals/pio2/tests/general/ncdf_simple_tests.F90.in +M src/externals/pio2/tests/general/pio_decomp_fillval.F90.in +M src/externals/pio2/tests/general/pio_decomp_frame_tests.F90.in +M src/externals/pio2/tests/general/util/pio_tutil.F90 +M tools/load_balancing_tool/README +D tools/load_balancing_tool/code/cesm_scaling.gplot +D tools/load_balancing_tool/code/create_dataFile.pl +D tools/load_balancing_tool/code/f02_peList.txt +D tools/load_balancing_tool/code/f05_peList.txt +D tools/load_balancing_tool/code/f09_peList.txt +D tools/load_balancing_tool/code/f19_peList.txt +D tools/load_balancing_tool/code/fv_model.mod +D tools/load_balancing_tool/code/fv_model.run +D tools/load_balancing_tool/code/fv_second_pass.pl +D tools/load_balancing_tool/code/get_cesm_times.pl +D tools/load_balancing_tool/code/load_balance.pl +D tools/load_balancing_tool/code/merge.py +D tools/load_balancing_tool/code/model.data +D tools/load_balancing_tool/code/model.mod +D tools/load_balancing_tool/code/model.run +D tools/load_balancing_tool/code/neos.py +D tools/load_balancing_tool/global_variables.csh +A tools/load_balancing_tool/layouts.py +A tools/load_balancing_tool/load_balancing_solve.py +A tools/load_balancing_tool/load_balancing_submit.py +A tools/load_balancing_tool/optimize_model.py +D tools/load_balancing_tool/run_first.csh +D tools/load_balancing_tool/run_second.csh +A tools/load_balancing_tool/tests/__init__.py +A tools/load_balancing_tool/tests/atm_lnd.py +A tools/load_balancing_tool/tests/example.json +A tools/load_balancing_tool/tests/load_balancing_test.py +A tools/load_balancing_tool/tests/test.xml +A tools/load_balancing_tool/tests/timing/timing_1 +A tools/load_balancing_tool/tests/timing/timing_2 +A tools/load_balancing_tool/tests/timing/timing_3 + +====================================================================== + +Originator: Chris Fischer +Date: 9-25-2017 +Tag: cime5.4.0-alpha.01 +Answer Changes: None +Tests: +Dependencies: + +Brief Summary: + - Update version to cime5.3.0-alpha.01 + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master + +Modified files: git diff --name-status [previous_tag] + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 9-25-2017 +Tag: cime5.3.0-alpha.36 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Big refactor to queue selection. + - Shorter paths for case2 using system_tests_compare_two. + - Update atm coupling frequency for high-resolution grids. + - Add '-framework Accelerate' to LDFLAGS on a mac. + - Fix E3SM idmap check. + - Fix pylint issues. + - ERR test rework. + - New error trap if invalid "idmap" file is present in seq_maps.rc. + - Yellowstone tasps per node correction. + - Added new CAM-SE FVM grids and removed incorrect aliases. + - Fix generate and compare baseline functions. + - Add a derived variable NTASKS_PER_INST_COMP where COMP is the component + name (ATM, LND, etc) + - Don't check_if_comp_var to return a new vid if it's not a comp var. + +User interface changes: + - jobmin and jobmax gone from config_batch.xml + - All CAM-SE grid aliases for data ocean models require a mask. + + +PR summary: git log --oneline --first-parent [previous_tag]..master +6049c79 Merge pull request #1918 from ESMCI/jgfouca/queue_selection_refactor_2 +56d71c5 Merge pull request #1919 from billsacks/compare_two_shorter_path +e1b549d Merge pull request #1921 from gold2718/atm_ncpl_mods +f5ac341 Merge pull request #1906 from billsacks/mac_lapack +c15c0ad Merge pull request #1917 from ESMCI/mvertens/fix_acme_idmapcheck +f722595 fix pylint issues +19922e1 Merge pull request #1912 from jedwards4b/err_test_rework +b1d1b9b Merge pull request #1893 from ESMCI/mvertens/trip_invalid_idmap +c330b46 Merge pull request #1911 from jedwards4b/yellowstone_tpn_correction +0136051 Merge pull request #1896 from gold2718/fix_camse_grids +e036093 Merge pull request #1904 from jedwards4b/fix_gen_and_comp +f0522a6 Merge pull request #1900 from jedwards4b/NTASKS_PER_INST_XXX +f5aafff Merge pull request #1901 from ESMCI/jgfouca/xmlquery_minor_fix +2daf8aa Minor pylint fix, make get_queue_specs public + + +Modified files: git diff --name-status [previous_tag] +M config/acme/machines/config_batch.xml +M config/cesm/config_grids.xml +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M config/config_headers.xml +M config/config_tests.xml +M config/xml_schemas/config_batch.xsd +M config/xml_schemas/env_batch.xsd +M scripts/create_test +M scripts/lib/CIME/SystemTests/err.py +M scripts/lib/CIME/SystemTests/system_tests_compare_two.py +M scripts/lib/CIME/XML/env_base.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/env_mach_pes.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/case.py +M scripts/lib/CIME/case_clone.py +M scripts/lib/CIME/case_setup.py +M scripts/lib/CIME/namelist.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py +M scripts/lib/CIME/tests/case_fake.py +M scripts/lib/CIME/utils.py +M scripts/tests/scripts_regression_tests.py +M src/build_scripts/buildlib.csm_share +M src/drivers/mct/cime_config/buildnml +M src/drivers/mct/cime_config/config_component.xml +M src/drivers/mct/cime_config/config_component_cesm.xml + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 9-19-2017 +Tag: cime5.3.0-alpha.35 +Answer Changes: None +Tests: scripts_regression_tests.py, PEM_Ln9.f19_g16_rx1.A.yellowstone_intel +Dependencies: + +Brief Summary: + - Test fixes. + - Can test single-submit on chama in addition to skybridge. + - Fix single submit option to create_test. + - Rename PES_PER_NODE to MAX_MPITASKS_PER_NODE. + - New git pelayout doc. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +e1e5e79 Merge pull request #1895 from jedwards4b/test_fixes +a3349ed Merge pull request #1899 from ESMCI/jgfouca/fix_single_submit +40e4b7b Merge pull request #1898 from ESMCI/jgfouca/fix_single_submit +5ba192b Merge pull request #1891 from jedwards4b/rename_pes_per_node +eb59169 Merge pull request #1888 from ESMCI/mvertens/new_pelayout_doc + +Modified files: git diff --name-status [previous_tag] +M config/acme/allactive/config_pesall.xml +M config/acme/machines/config_batch.xml +M config/acme/machines/config_machines.xml +M config/acme/machines/config_pio.xml +M config/acme/machines/userdefined_laptop_template/config_machines.xml +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_machines.xml +M config/cesm/machines/config_pio.xml +M config/cesm/machines/userdefined_laptop_template/config_machines.xml +M config/config_headers.xml +M config/config_tests.xml +M config/xml_schemas/config_machines.xsd +M config/xml_schemas/config_machines_template.xml +M config/xml_schemas/config_pes.xsd +M doc/source/users_guide/customizing-a-case.rst +M doc/source/users_guide/porting-cime.rst +M scripts/create_test +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/XML/env_mach_pes.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/aprun.py +M scripts/lib/CIME/case.py +M scripts/lib/CIME/case_setup.py +M scripts/lib/CIME/get_timing.py +M scripts/lib/CIME/test_scheduler.py +M scripts/query_config +M scripts/tests/scripts_regression_tests.py +M src/components/data_comps/docn/docn_shr_mod.F90 +M src/drivers/mct/cime_config/config_component.xml +M src/drivers/mct/main/cime_comp_mod.F90 + +====================================================================== + +====================================================================== + Originator: Chris Fischer Date: 9-13-2017 Tag: cime5.3.0-alpha.34 diff --git a/LICENSE.TXT b/LICENSE.TXT index 24def5e4fba..63bfa9ae5d3 100644 --- a/LICENSE.TXT +++ b/LICENSE.TXT @@ -4,12 +4,15 @@ All rights reserved. Copyright (c) 2017, Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. + and +Copyright (c) 2017, UChicago Argonne, LLC, All Rights Reserved +under Contract No. DE-AC02-06CH11357 with the Department of Energy Developed by: University Corporation for Atmospheric Research - National Center for Atmospheric Research https://www2.cesm.ucar.edu/working-groups/sewg and - DOE BER ACME project team members, including those at SNL and ANL + DOE BER E3SM project team members, including those at SNL and ANL Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), diff --git a/README.md b/README.md index 4ae5e2d6bdc..07424f5d280 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,16 @@ # cime Common Infrastructure for Modeling the Earth -**cime** (pronounced: seem) represents the infrastructure code for the +CIME, pronounced “SEAM”, contains the support scripts (configure, build, run, test), data models, essential +utility libraries, a “main” and other tools that are needed to build a single-executable coupled Earth System Model. +CIME is available in a stand-alone package that can be compiled and tested without active prognostic components +but is typically included in the source of a climate model. CIME does not contain: any active components, +any intra-component coupling capability (such as atmosphere physics-dynamics coupling). + +*cime* (pronounced: seem) is currently used by the Community Earth System Model - (CESM) and the -Accelerated Climate Model for Energy (ACME). -*cime* providess scripts for configuration, build, and testing of -models, as well as code for data and stub models for climate components, -and a driver code for bringing all the climate components together in a single executable. + (CESM) and the +Energy Exascale Earth System Model (E3SM). # Documentation @@ -15,34 +18,23 @@ See esmci.github.io/cime # Developers -*cime* was initially extracted from CESM as a stand-alone capability in 2015 -by members of the CSEG group at NCAR, the software engineering team of -the CESM model. The CSEG group had been developing this infrastrucure within -CESM for a number of years using NSF and DOE funding. After version 4 was released, -a joint development partnership was started with the software engineering group of -ACME, a DOE-funded project, which had branched from CESM in 2014. -Starting with development of version 5, *cime* is cooperative effort with contributions -and ownership from members of both teams. - -The following table documents the developers who have contributed to *cime*, -showing what versions of they contributed to, and with what source(s) of support. - -Name | Institution | Versions | Funding Source (for versions) ----------|------------------|----------|---------------------- -Mariana Vertenstein | NCAR | 1 - D | NSF, DOE -Jim Edwards | NCAR | 1 - D | NSF (1-D), DOE(1-2) -Jim Foucar | SNL | 5 - D | DOE -Rob Jacob | ANL | 5 - D | DOE -Bill Sacks | NCAR | 1 - D | NSF, DOE -Andreas Wilke | ANL | 5 - D | DOE -Jason Sarich | ANL | 5 - D | DOE -Michael Deakin | SNL | 5 - D | DOE -Erich Foster | SNL | 5 - D | DOE -Alice Bertini | NCAR | 1 - D | NSF -Chris Fischer | NCAR | 5 - D | NSF -Steve Goldhaber | NCAR | 1 - D | NSF, DOE -Mike Levy | NCAR | 1 - D | NSF, DOE -Sean Santos | NCAR | 1 - 4 | NSF - -_Key: D = Current development version (i.e. still active on project)_ +## Lead Developers +Case Control System: Jim Edwards (NCAR), Jim Foucar (SNL) + +MCT-based Coupler/Driver: Mariana Vertenstein (NCAR), Robert Jacob (ANL) + +Data Models: Mariana Vertenstein (NCAR) + +## Also Developed by +Alice Bertini (NCAR), Tony Craig (NCAR), Michael Deakin (SNL), Chris Fischer (NCAR), Steve Goldhaber (NCAR), +Erich Foster (SNL), Mike Levy (NCAR), Bill Sacks (NCAR), Andrew Salinger (SNL), Sean Santos (NCAR), Jason Sarich (ANL), +Andreas Wilke (ANL). + +# Acknowledgements + +CIME is jointly developed with support from the Earth System Modeling program of DOE's BER office and the CESM program +of NSF's Division of Atmospheric and Geospace Sciences. + +# License +CIME is free software made available under the BSD License. For details see the LICENSE file. diff --git a/config/acme/allactive/config_pesall.xml b/config/acme/allactive/config_pesall.xml index d7b496cd087..5c5ea284c31 100644 --- a/config/acme/allactive/config_pesall.xml +++ b/config/acme/allactive/config_pesall.xml @@ -4535,6 +4535,8 @@ ne30_ne30 grid on 40 nodes 36 ppn pure-MPI + 32 + 32 1350 72 @@ -6741,7 +6743,7 @@ -compset A_WCYCL* -res ne30_oEC* on 32 nodes pure-MPI - 32 + 32 32 675 @@ -6776,7 +6778,7 @@ -compset A_WCYCL* -res ne30_oEC* on 59 nodes pure-MPI - 32 + 32 32 1350 @@ -6811,7 +6813,7 @@ -compset A_WCYCL* -res ne30_oEC* on 115 nodes pure-MPI - 32 + 32 32 2700 diff --git a/config/acme/machines/config_batch.xml b/config/acme/machines/config_batch.xml index 73f56327e42..24017d8a678 100644 --- a/config/acme/machines/config_batch.xml +++ b/config/acme/machines/config_batch.xml @@ -16,6 +16,7 @@ + @@ -26,6 +27,7 @@ + @@ -36,6 +38,8 @@ qstat qsub + qdel + -v (\d+) --dependencies @@ -56,6 +60,8 @@ qstat qsub + qdel + -v #COBALT (\d+) --dependencies @@ -74,6 +80,7 @@ bjobs bsub + bkill < #BSUB <(\d+)> @@ -94,8 +101,8 @@ - -o {{ output_error_path }}.%J - -e {{ output_error_path }}.%J + -o {{ job_id }}.%J + -e {{ job_id }}.%J -J {{ job_id }} @@ -103,6 +110,8 @@ qstat qsub + qdel + -v #PBS ^(\S+)$ -W depend=afterok:jobid @@ -118,7 +127,7 @@ -N {{ job_id }} -r {{ rerunnable }} - + -j oe -m {{ mail_options }} -V @@ -128,6 +137,7 @@ showq msub + canceljob #MSUB (\d+)$ -W depend=afterok:jobid @@ -151,6 +161,7 @@ sbatch + scancel #SBATCH (\d+)$ -l depend=jobid @@ -171,7 +182,7 @@ -A {{ project }} - pbatch + pbatch @@ -179,6 +190,7 @@ squeue sbatch + scancel #SBATCH (\d+)$ --dependency=afterok:jobid @@ -206,8 +218,8 @@ -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }} - shared - batch + shared + batch @@ -218,15 +230,15 @@ -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }} - acme + acme - debug - bdw - knl + debug + bdw + knl @@ -237,14 +249,14 @@ -l nodes={{ num_nodes }} - batch + batch - debug - regular + debug + regular @@ -253,8 +265,8 @@ --constraint=haswell - debug - regular + debug + regular @@ -263,27 +275,27 @@ --constraint=knl,quad,cache - debug - regular + debug + regular - default + default - default + default - default - debug-cache-quad + default + debug-cache-quad @@ -293,7 +305,7 @@ --mail-user=email@pnnl.gov - small + small @@ -305,7 +317,7 @@ --error=slurm.err - slurm + slurm @@ -316,10 +328,10 @@ --mail-user=email@pnnl.gov --output=slurm.out --error=slurm.err - - - slurm - + + + slurm + @@ -423,8 +435,8 @@ -env "all" - batch - debug + batch + debug @@ -433,7 +445,7 @@ -P {{ project }} - batch + batch 4 4 - 2 + 2 @@ -540,7 +539,7 @@ 4 4 - 2 + 2 mpirun @@ -571,7 +570,7 @@ 32 64 - 64 + 64 none 0.1 @@ -640,7 +639,7 @@ 32 64 - 64 + 64 none mpirun @@ -707,7 +706,7 @@ make 32 64 - 64 + 64 none mpirun @@ -769,7 +768,7 @@ jgfouca at sandia dot gov 8 16 - 16 + 16 1 TRUE fy150001 @@ -845,7 +844,7 @@ jgfouca at sandia dot gov 8 16 - 16 + 16 1 TRUE fy150001 @@ -920,7 +919,7 @@ jgfouca at sandia dot gov 8 36 - 36 + 36 1 TRUE fy150001 @@ -990,7 +989,7 @@ acme 4 16 - 16 + 16 TRUE ACME 0.1 @@ -1093,7 +1092,7 @@ acme 8 36 - 36 + 36 FALSE ACME @@ -1208,7 +1207,7 @@ acme 8 36 - 36 + 36 acme TRUE -D PIO_BUILD_TIMING:BOOL=ON @@ -1290,7 +1289,7 @@ cobalt jayesh -at- mcs.anl.gov 8 - 4 + 4 64 TRUE ClimateEnergy_2 @@ -1300,7 +1299,7 @@ /usr/bin/runjob --label short - --ranks-per-node $PES_PER_NODE + --ranks-per-node $MAX_MPITASKS_PER_NODE --np $TOTALPES --block $COBALT_PARTNAME $LOCARGS $ENV{BGQ_SMP_VARS} @@ -1345,7 +1344,7 @@ bogenschutz1 -at- llnl.gov 8 16 - 16 + 16 lc_slurm @@ -1401,7 +1400,7 @@ bogenschutz1 -at- llnl.gov 8 16 - 16 + 16 lc_slurm @@ -1462,7 +1461,7 @@ donahue5 -at- llnl.gov 8 36 - 36 + 36 lc_slurm @@ -1518,7 +1517,7 @@ cobalt mickelso -at- mcs.anl.gov 8 - 4 + 4 64 TRUE ClimateEnergy_2 @@ -1528,7 +1527,7 @@ /usr/bin/runjob --label short - --ranks-per-node $PES_PER_NODE + --ranks-per-node $MAX_MPITASKS_PER_NODE --np $TOTALPES --block $COBALT_PARTNAME $LOCARGS $ENV{BGQ_SMP_VARS} @@ -1573,6 +1572,7 @@ acme 8 128 + 64 64 OceanClimate TRUE @@ -1672,7 +1672,7 @@ slurm balwinder.singh -at- pnnl.gov 8 - 8 + 8 FALSE @@ -1745,7 +1745,7 @@ balwinder.singh -at- pnnl.gov 8 16 - 16 + 16 TRUE @@ -1812,7 +1812,7 @@ balwinder.singh -at- pnnl.gov 8 24 - 24 + 24 FALSE @@ -1911,7 +1911,7 @@ pbs dmricciuto 8 - 8 + 8 8 /projects/cesm/devtools/mpich-3.0.4-gcc4.8.1/bin/mpirun @@ -1959,7 +1959,7 @@ pbs dmricciuto 32 - 32 + 32 32 /projects/cesm/devtools/mpich-3.0.4-gcc4.8.1/bin/mpirun @@ -1995,7 +1995,7 @@ yinj -at- ornl.gov 4 32 - 16 + 16 pbs FALSE @@ -2065,8 +2065,8 @@ $ENV{PROJWORK}/$PROJECT CNL pbs - 16 TRUE + 16 acme 8 @@ -2082,7 +2082,7 @@ @@ -2252,7 +2252,7 @@ pbs acme 8 - 16 + 16 32 TRUE -D PIO_BUILD_TIMING:BOOL=ON @@ -2262,7 +2262,7 @@ -j {{ hyperthreading }} -S {{ tasks_per_numa }} -n $TOTALPES - -N $PES_PER_NODE + -N $MAX_MPITASKS_PER_NODE -d $ENV{OMP_NUM_THREADS} -cc numa_node @@ -2402,7 +2402,7 @@ 4 36 - 32 + 32 climateacme TRUE luke.vanroekel @ gmail.com @@ -2424,7 +2424,7 @@ jonbob -at- lanl.gov 4 16 - 16 + 16 climateacme TRUE /lustre/scratch3/turquoise/$ENV{USER}/ACME/input_data @@ -2512,7 +2512,7 @@ jedwards@ucar.edu 8 30 - 15 + 15 TRUE mpirun.lsf @@ -2623,7 +2623,7 @@ -n $TOTALPES -S {{ tasks_per_numa }} - -N $PES_PER_NODE + -N $MAX_MPITASKS_PER_NODE -d $ENV{OMP_NUM_THREADS} @@ -2662,7 +2662,7 @@ -n $TOTALPES -S {{ tasks_per_numa }} - -N $PES_PER_NODE + -N $MAX_MPITASKS_PER_NODE -d $ENV{OMP_NUM_THREADS} @@ -2687,20 +2687,20 @@ gbisht at lbl dot gov 4 16 - 16 + 16 TRUE mpirun -np $TOTALPES - -npernode $PES_PER_NODE + -npernode $MAX_MPITASKS_PER_NODE mpirun -np $TOTALPES - -npernode $PES_PER_NODE + -npernode $MAX_MPITASKS_PER_NODE @@ -2750,20 +2750,20 @@ gbisht at lbl dot gov 4 12 - 12 + 12 TRUE mpirun -np $TOTALPES - -npernode $PES_PER_NODE + -npernode $MAX_MPITASKS_PER_NODE mpirun -np $TOTALPES - -npernode $PES_PER_NODE + -npernode $MAX_MPITASKS_PER_NODE @@ -2811,7 +2811,7 @@ none rgknox at lbl gov 4 - 4 + 4 FALSE @@ -2820,7 +2820,7 @@ mpirun -np $TOTALPES - -npernode $PES_PER_NODE + -npernode $MAX_MPITASKS_PER_NODE @@ -2847,7 +2847,7 @@ lsf acme 32 - 20 + 20 160 TRUE csc249 @@ -2862,7 +2862,7 @@ --report-bindings --display-map diff --git a/config/acme/machines/config_pio.xml b/config/acme/machines/config_pio.xml index f76c751661a..b63fbdc6cd5 100644 --- a/config/acme/machines/config_pio.xml +++ b/config/acme/machines/config_pio.xml @@ -20,7 +20,7 @@ - $PES_PER_NODE + $MAX_MPITASKS_PER_NODE 60 128 64 diff --git a/config/acme/machines/template.case.run b/config/acme/machines/template.case.run index 765da5a367b..0ff2d9a8701 100755 --- a/config/acme/machines/template.case.run +++ b/config/acme/machines/template.case.run @@ -71,6 +71,8 @@ def _main_func(description): test_results = doctest.testmod(verbose=True) sys.exit(1 if test_results.failed > 0 else 0) + sys.argv.extend([] if "ARGS_FOR_SCRIPT" not in os.environ else os.environ["ARGS_FOR_SCRIPT"].split()) + caseroot, skip_pnl = parse_command_line(sys.argv, description) with Case(caseroot, read_only=False) as case: success = case_run(case, skip_pnl=skip_pnl) diff --git a/config/acme/machines/template.case.test b/config/acme/machines/template.case.test index 6bc43206938..340e161b94e 100755 --- a/config/acme/machines/template.case.test +++ b/config/acme/machines/template.case.test @@ -63,6 +63,8 @@ def _main_func(description): test_results = doctest.testmod(verbose=True) sys.exit(1 if test_results.failed > 0 else 0) + sys.argv.extend([] if "ARGS_FOR_SCRIPT" not in os.environ else os.environ["ARGS_FOR_SCRIPT"].split()) + caseroot, testname, reset = parse_command_line(sys.argv, description) with Case(caseroot, read_only=False) as case: success = case_test(case, testname, reset) diff --git a/config/acme/machines/userdefined_laptop_template/config_machines.xml b/config/acme/machines/userdefined_laptop_template/config_machines.xml index 5790446f74e..f6cc9587d0b 100644 --- a/config/acme/machines/userdefined_laptop_template/config_machines.xml +++ b/config/acme/machines/userdefined_laptop_template/config_machines.xml @@ -19,7 +19,7 @@ __YOUR_NAME_HERE__ 4 4 - 2 + 2 diff --git a/config/cesm/config_archive.xml b/config/cesm/config_archive.xml index f148d3c4e14..f0f3b27ea8f 100644 --- a/config/cesm/config_archive.xml +++ b/config/cesm/config_archive.xml @@ -85,6 +85,10 @@ rpointer.ocn$NINST_STRING.ovf ./$CASE.pop$NINST_STRING.ro.$DATENAME + + rpointer.ocn$NINST_STRING.tavg + ./$CASE.pop$NINST_STRING.rh.$DATENAME.nc + diff --git a/config/cesm/config_grids.xml b/config/cesm/config_grids.xml index eef06c343cc..c478d59ed0c 100644 --- a/config/cesm/config_grids.xml +++ b/config/cesm/config_grids.xml @@ -613,6 +613,13 @@ + + ne5np4 + ne5np4 + ne5np4 + gx3v7 + + ne16np4 ne16np4 @@ -620,6 +627,13 @@ gx3v7 + + ne16np4 + ne16np4 + ne16np4 + gx3v7 + + ne30np4 ne30np4 @@ -666,6 +680,20 @@ gx1v7 + + ne30np4 + ne30np4 + ne30np4 + gx1v6 + + + + ne30np4 + ne30np4 + ne30np4 + gx1v7 + + ne60np4 ne60np4 @@ -680,6 +708,13 @@ gx1v7 + + ne60np4 + ne60np4 + ne60np4 + gx1v6 + + ne120np4 ne120np4 @@ -701,6 +736,13 @@ tx0.1v2 + + ne120np4 + ne120np4 + ne120np4 + gx1v6 + + ne240np4 0.23x0.31 @@ -724,104 +766,119 @@ tx0.1v2 - - ne16np4 - ne16np4 - ne16np4 - gx3v7 + + ne240np4 + ne240np4 + ne240np4 + gx1v6 - - ne16np4 - ne16np4 - ne16np4 - gx3v7 + + + + ne30np4.pg2 + ne30np4.pg2 + ne30np4.pg2 + gx1v7 - - ne30np4 - ne30np4 - ne30np4 - gx1v6 + + ne60np4.pg2 + ne60np4.pg2 + ne60np4.pg2 + gx1v7 - - ne30np4 - ne30np4 - ne30np4 - gx1v6 + + ne120np4.pg2 + ne120np4.pg2 + ne120np4.pg2 + gx1v7 - - ne30np4 - ne30np4 - ne30np4 + + ne240np4.pg2 + ne240np4.pg2 + ne240np4.pg2 gx1v7 - - ne60np4 - ne60np4 - ne60np4 - gx1v6 + + + + ne5np4.pg3 + ne5np4.pg3 + ne5np4.pg3 + gx3v7 - - ne60np4 - ne60np4 - ne60np4 - gx1v6 + + ne16np4.pg3 + ne16np4.pg3 + ne16np4.pg3 + gx3v7 - - ne60np4 - ne60np4 - ne60np4 + + ne30np4.pg3 + ne30np4.pg3 + ne30np4.pg3 gx1v7 - - ne120np4 - ne120np4 - ne120np4 - gx1v6 + + ne60np4.pg3 + ne60np4.pg3 + ne60np4.pg3 + gx1v7 - - ne120np4 - ne120np4 - ne120np4 - gx1v6 + + ne120np4.pg3 + ne120np4.pg3 + ne120np4.pg3 + gx1v7 - - ne120np4 - ne120np4 - ne120np4 + + ne240np4.pg3 + ne240np4.pg3 + ne240np4.pg3 gx1v7 - - ne240np4 - ne240np4 - ne240np4 - gx1v6 + + + + ne30np4.pg4 + ne30np4.pg4 + ne30np4.pg4 + gx1v7 - - ne240np4 - ne240np4 - ne240np4 - gx1v6 + + ne60np4.pg4 + ne60np4.pg4 + ne60np4.pg4 + gx1v7 - - ne240np4 - ne240np4 - ne240np4 + + ne120np4.pg4 + ne120np4.pg4 + ne120np4.pg4 gx1v7 + + + + ne0np4CONUS.ne30x8 + ne0np4CONUS.ne30x8 + ne0np4CONUS.ne30x8 + tx0.1v2 + + @@ -1064,21 +1121,71 @@ T42 is Gaussian grid: + + 1352 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne5np4_gx3v7.140810.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne5np4_gx3v7.140810.nc + ne5np4 is Spectral Elem 6-deg grid: + For ultra-low resolution spectral element grid testing + + + + 1350 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne5np4.pg3_gx3v7.170605.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne5np4.pg3_gx3v7.170605.nc + ne5np4 is Spectral Elem 6-deg grid with a 3x3 FVM physics grid: + EXPERIMENTAL FVM physics grid + + 13826 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne16np4_gx3v7.120406.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne16np4_gx3v7.121113.nc + $DIN_LOC_ROOT/share/domains/domain.lnd.ne16_gx1v7.171003.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne16_gx1v7.171003.nc ne16np4 is Spectral Elem 2-deg grid: For low resolution spectral element grid testing + + 13824 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne16pg3_gx1v7.171003.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne16pg3_gx1v7.171003.nc + ne16np4.pg3 is a Spectral Elem 2-deg grid with a 3x3 FVM physics grid: + EXPERIMENTAL FVM physics grid + + 48602 1 $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_gx1v6.110905.nc $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_gx1v6_110217.nc + $DIN_LOC_ROOT/share/domains/domain.lnd.ne30_gx1v7.171003.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne30_gx1v7.171003.nc ne30np4 is Spectral Elem 1-deg grid: + + 21600 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4.pg2_gx1v7.170628.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4.pg2_gx1v7.170628.nc + ne30np4.pg2 is a Spectral Elem 1-deg grid with a 2x2 FVM physics grid: + EXPERIMENTAL FVM physics grid + + + + 48600 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4.pg3_gx1v7.170605.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4.pg3_gx1v7_170605.nc + ne30np4.pg3 is a Spectral Elem 1-deg grid with a 3x3 FVM physics grid: + EXPERIMENTAL FVM physics grid + + + + 86400 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4.pg4_gx1v7.170628.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4.pg4_gx1v7.170628.nc + ne30np4.pg4 is a Spectral Elem 1-deg grid with a 4x4 FVM physics grid: + EXPERIMENTAL FVM physics grid + + 194402 1 $DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4_gx1v6.120406.nc @@ -1086,6 +1193,30 @@ ne60np4 is Spectral Elem 1/2-deg grid: + + 86400 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4.pg2_gx1v7.170628.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4.pg2_gx1v7.170628.nc + ne60np4.pg2 is a Spectral Elem 0.5-deg grid with a 2x2 FVM physics grid: + EXPERIMENTAL FVM physics grid + + + + 194400 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4.pg3_gx1v7.170628.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4.pg3_gx1v7.170628.nc + ne60np4.pg3 is a Spectral Elem 0.5-deg grid with a 3x3 FVM physics grid: + EXPERIMENTAL FVM physics grid + + + + 345600 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4.pg4_gx1v7.170628.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4.pg4_gx1v7.170628.nc + ne60np4.pg4 is a Spectral Elem 0.5-deg grid with a 4x4 FVM physics grid: + EXPERIMENTAL FVM physics grid + + 777602 1 $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4_gx1v6.110502.nc @@ -1093,6 +1224,30 @@ ne120np4 is Spectral Elem 1/4-deg grid: + + 345600 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg2_gx1v7.170629.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg2_gx1v7.170629.nc + ne120np4.pg2 is a Spectral Elem 0.25-deg grid with a 2x2 FVM physics grid: + EXPERIMENTAL FVM physics grid + + + + 777600 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg3_gx1v7.170629.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg3_gx1v7.170629.nc + ne120np4.pg3 is a Spectral Elem 0.25-deg grid with a 3x3 FVM physics grid: + EXPERIMENTAL FVM physics grid + + + + 1382400 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg4_gx1v7.170629.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg4_gx1v7.170629.nc + ne120np4.pg4 is a Spectral Elem 0.25-deg grid with a 4x4 FVM physics grid: + EXPERIMENTAL FVM physics grid + + 3110402 1 $DIN_LOC_ROOT/share/domains/domain.lnd.ne240np4_gx1v6.111226.nc @@ -1101,6 +1256,22 @@ Experimental for very high resolution experiments + + 1382400 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne240np4.pg2_gx1v7.170629.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne240np4.pg2_gx1v7.170629.nc + ne240np4.pg2 is a Spectral Elem 0.125-deg grid with a 2x2 FVM physics grid: + EXPERIMENTAL FVM physics grid + + + + 3110400 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne240np4.pg3_gx1v7.170629.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne240np4.pg3_gx1v7.170629.nc + ne240np4.pg3 is a Spectral Elem 0.125-deg grid with a 3x3 FVM physics grid: + EXPERIMENTAL FVM physics grid + + 320 384 $DIN_LOC_ROOT/share/domains/domain.ocn.gx1v6.090206.nc diff --git a/config/cesm/machines/Makefile b/config/cesm/machines/Makefile index 014088b01eb..1b0e4defed1 100644 --- a/config/cesm/machines/Makefile +++ b/config/cesm/machines/Makefile @@ -413,7 +413,7 @@ FFLAGS_NOOPT += $(FPPDEFS) ifeq ($(findstring -cosp,$(CAM_CONFIG_OPTS)),-cosp) # The following is for the COSP simulator code: - COSP_LIBDIR:=$(EXEROOT)/atm/obj/cosp + COSP_LIBDIR:=$(abspath $(EXEROOT)/atm/obj/cosp) endif ifeq ($(MODEL),cam) @@ -427,7 +427,7 @@ rrtmg_sw_k_g.o: rrtmg_sw_k_g.f90 ifdef COSP_LIBDIR -INCLDIR+=-I$(COSP_LIBDIR) -I$(COSP_LIBDIR)/../ +INCLDIR+=-I$(COSP_LIBDIR) -I$(COSP_LIBDIR)/../ -I../$(INSTALL_SHAREDPATH)/include -I../$(CSM_SHR_INCLUDE) $(COSP_LIBDIR)/libcosp.a: cam_abortutils.o $(MAKE) -C $(COSP_LIBDIR) F90='$(FC)' F90FLAGS='$(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS) $(FC_AUTO_R8)' \ F90FLAGS_noauto='$(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS)' \ diff --git a/config/cesm/machines/config_batch.xml b/config/cesm/machines/config_batch.xml index afbfadb93e1..55737b53725 100644 --- a/config/cesm/machines/config_batch.xml +++ b/config/cesm/machines/config_batch.xml @@ -25,8 +25,6 @@ be used. The following variables can be used to choose a queue : walltimemin: Giving the minimum amount of walltime for the queue. walltimemax: The maximum amount of walltime for a queue. - jobmin: The minimum task count required to use this queue. - jobmax: The maximum task count required to use this queue. nodemin: The minimum node count required to use this queue. nodemax: The maximum node count required to use this queue. --> @@ -53,6 +51,8 @@ qstat qsub + qdel + -v (\d+) --dependencies @@ -65,7 +65,7 @@ - + @@ -74,6 +74,8 @@ qstat qsub + qdel + -v #COBALT (\d+) --dependencies @@ -83,7 +85,7 @@ - + @@ -92,6 +94,7 @@ bjobs bsub + bkill < #BSUB <(\d+)> @@ -110,8 +113,8 @@ -R "span[ptile={{ tasks_per_node }}]" -N -a {{ poe }} - -o {{ output_error_path }}.%J - -e {{ output_error_path }}.%J + -o {{ job_id }}.%J + -e {{ job_id }}.%J -J {{ job_id }} @@ -119,6 +122,8 @@ qstat qsub + qdel + -v #PBS ^(\S+)$ -W depend=afterok:jobid @@ -134,7 +139,7 @@ -N {{ job_id }} -r {{ rerunnable }} - + -j oe -V @@ -142,6 +147,7 @@ squeue + scancel #SBATCH (\d+)$ --dependency=afterok:jobid @@ -158,7 +164,7 @@ --job-name={{ job_id }} --nodes={{ num_nodes }} --ntasks-per-node={{ tasks_per_node }} - --output={{ output_error_path }} + --output={{ job_id }} --exclusive @@ -183,7 +189,7 @@ -S {{ shell }} - batch + batch @@ -195,7 +201,7 @@ regular - debug + debug @@ -236,8 +242,8 @@ -S /bin/bash - debug - batch + debug + batch @@ -248,11 +254,11 @@ -S {{ shell }} - short - medium - long - overnight - monster + short + medium + long + overnight + monster @@ -272,7 +278,7 @@ -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }} - regular + regular @@ -282,26 +288,26 @@ -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }} - regular + regular - default + default - default + default sbatch - queue + queue @@ -314,7 +320,7 @@ -S {{ shell }} - normal + normal @@ -326,7 +332,7 @@ -S {{ shell }} - normal + normal @@ -338,7 +344,7 @@ -S {{ shell }} - normal + normal @@ -350,14 +356,7 @@ -S {{ shell }} - normal - - - - - sbatch - - ec + normal @@ -376,7 +375,7 @@ sbatch - batch + batch @@ -386,8 +385,8 @@ -C haswell - regular - + regular + @@ -398,24 +397,24 @@ -S 2 - regular - + regular + sbatch - regular - debug + regular + debug ssh stampede.tacc.utexas.edu cd $CASEROOT ; sbatch - normal - development + normal + development @@ -439,10 +438,10 @@ - caldera - regular - capability - premium + caldera + regular + capability + premium diff --git a/config/cesm/machines/config_compilers.xml b/config/cesm/machines/config_compilers.xml index d77e399b2c9..bebee700cb4 100644 --- a/config/cesm/machines/config_compilers.xml +++ b/config/cesm/machines/config_compilers.xml @@ -722,6 +722,14 @@ using a fortran linker. + + + + -framework Accelerate + + + mpixlc_r mpixlf2003_r diff --git a/config/cesm/machines/config_machines.xml b/config/cesm/machines/config_machines.xml index 0960de8e9dd..8c55b92d2a3 100644 --- a/config/cesm/machines/config_machines.xml +++ b/config/cesm/machines/config_machines.xml @@ -66,14 +66,14 @@ pbs cseg 32 - 16 + 16 TRUE aprun -n $TOTALPES - -N $PES_PER_NODE + -N $MAX_MPITASKS_PER_NODE -d $ENV{OMP_NUM_THREADS} @@ -145,12 +145,12 @@ lsf tcraig -at- ucar.edu 16 - 16 + 16 mpirun -hostfile $ENV{PBS_JOBID} - -ppn $PES_PER_NODE + -ppn $MAX_MPITASKS_PER_NODE -n $TOTALPES @@ -191,6 +191,64 @@ + + + Example port to centos7 linux system with gcc, netcdf, pnetcdf and mpich + using modules from http://www.admin-magazine.com/HPC/Articles/Environment-Modules + + regex.expression.matching.your.machine + LINUX + https://howto.get.out + gnu + mpich + none + + $ENV{HOME}/cesm/scratch + $ENV{HOME}/cesm/inputdata + $ENV{HOME}/cesm/inputdata/lmwg + $ENV{HOME}/cesm/archive/$CASE + $ENV{HOME}/cesm/cesm_baselines + $ENV{HOME}/cesm/tools/cime/tools/cprnc/cprnc + gmake + 8 + none + me@my.address + 8 + 8 + FALSE + + mpiexec + + -np {{ total_tasks }} + + + + /usr/share/Modules/init/perl.pm + /usr/share/Modules/init/python.py + /usr/share/Modules/init/csh + /usr/share/Modules/init/sh + /usr/bin/modulecmd perl + /usr/bin/modulecmd python + module + module + + + + + compiler/gnu/7.2.0 + mpi/gcc/mpich-3.2 + tool/netcdf/4.4.1.1/gcc + tool/parallel-netcdf/mpich + + + + 256M + + + -1 + + + NCAR SGI platform, os is Linux, 36 pes/node, batch system is PBS .*.cheyenne.ucar.edu @@ -208,7 +266,7 @@ cseg 36 - 36 + 36 TRUE mpiexec_mpt @@ -312,7 +370,7 @@ slurm tcraig -at- ucar.edu 24 - 24 + 24 FALSE srun @@ -409,7 +467,7 @@ slurm cseg 64 - 32 + 32 srun @@ -512,7 +570,7 @@ slurm cseg 256 - 64 + 64 srun @@ -612,7 +670,7 @@ slurm tcraig -at- ucar.edu 12 - 12 + 12 srun @@ -669,7 +727,7 @@ slurm cseg 48 - 24 + 24 srun @@ -768,14 +826,14 @@ pbs julio -at- ucar.edu 24 - 24 + 24 aprun -j {{ hyperthreading }} -n $TOTALPES -S {{ tasks_per_numa }} - -N $PES_PER_NODE + -N $MAX_MPITASKS_PER_NODE -d $ENV{OMP_NUM_THREADS} @@ -834,7 +892,7 @@ pbs cseg 48 - 48 + 48 mpiexec @@ -889,7 +947,7 @@ Customize these fields as appropriate for your system, - particularly changing MAX_TASKS_PER_NODE and PES_PER_NODE to the + particularly changing MAX_TASKS_PER_NODE and MAX_MPITASKS_PER_NODE to the number of cores on your machine. You may also want to change instances of '$ENV{HOME}/projects' to your desired directory organization. You can use this in either of two ways: (1) @@ -898,9 +956,12 @@ config_machines.xml file in your personal .cime directory and then changing the machine name (MACH="homebrew") to your machine name and the NODENAME_REGEX to something matching - your machine's hostname. In this case, you should not need the + your machine's hostname. With (2), you should not need the `--machine` argument, because the machine should be determined - automatically. + automatically. However, with (2), you will also need to copy the + homebrew-specific settings in config_compilers.xml into a + config_compilers.xml file in your personal .cime directory, again + changing the machine name (MACH="homebrew") to your machine name. something.matching.your.machine.hostname @@ -918,7 +979,7 @@ none __YOUR_NAME_HERE__ 4 - 4 + 4 mpirun @@ -950,7 +1011,7 @@ cseg 36 - 36 + 36 TRUE mpiexec_mpt @@ -1017,7 +1078,7 @@ none jgfouca at sandia dot gov 64 - 64 + 64 mpirun @@ -1069,14 +1130,14 @@ cobalt cseg 64 - 8 + 8 TRUE /usr/bin/runjob --label short - --ranks-per-node $PES_PER_NODE + --ranks-per-node $MAX_MPITASKS_PER_NODE --np $TOTALPES --block $COBALT_PARTNAME --envs OMP_WAIT_POLICY=active --envs BG_SMP_FAST_WAKEUP=yes $LOCARGS @@ -1120,7 +1181,7 @@ slurm tcraig -at- ucar.edu 32 - 32 + 32 FALSE mpiexec_mpt @@ -1165,7 +1226,7 @@ pbs fvitt -at- ucar.edu 28 - 28 + 28 TRUE mpiexec_mpt @@ -1219,7 +1280,7 @@ pbs fvitt -at- ucar.edu 24 - 24 + 24 TRUE mpiexec_mpt @@ -1273,7 +1334,7 @@ pbs fvitt -at- ucar.edu 16 - 16 + 16 TRUE mpiexec_mpt @@ -1327,7 +1388,7 @@ pbs fvitt -at- ucar.edu 20 - 20 + 20 TRUE mpiexec_mpt @@ -1381,12 +1442,12 @@ slurm edouard.davin -at- env.ethz.ch 32 - 32 + 32 aprun -n $TOTALPES - -N $PES_PER_NODE + -N $MAX_MPITASKS_PER_NODE -d $ENV{OMP_NUM_THREADS} @@ -1417,7 +1478,7 @@ none jgfouca at sandia dot gov 64 - 64 + 64 mpirun @@ -1469,14 +1530,14 @@ slurm jgfouca at sandia dot gov 16 - 16 + 16 TRUE mpirun -np $TOTALPES - -npernode $PES_PER_NODE + -npernode $MAX_MPITASKS_PER_NODE @@ -1531,7 +1592,7 @@ slurm cseg 16 - 16 + 16 ibrun @@ -1594,13 +1655,13 @@ cobalt_theta cseg 64 - 64 + 64 TRUE aprun -n $TOTALPES - -N $PES_PER_NODE + -N $MAX_MPITASKS_PER_NODE --cc depth -d $OMP_NUM_THREADS -e OMP_STACKSIZE=64M -e OMP_NUM_THREADS=$OMP_NUM_THREADS @@ -1682,7 +1743,7 @@ lsf cseg 30 - 15 + 15 TRUE + + @@ -45,10 +47,16 @@ + + + + + + @@ -79,7 +87,7 @@ @@ -142,8 +150,6 @@ - - diff --git a/config/xml_schemas/config_machines.xsd b/config/xml_schemas/config_machines.xsd index fc234bf5bca..2d131d2cd66 100644 --- a/config/xml_schemas/config_machines.xsd +++ b/config/xml_schemas/config_machines.xsd @@ -33,7 +33,7 @@ - + @@ -105,9 +105,9 @@ - - + diff --git a/config/xml_schemas/config_machines_template.xml b/config/xml_schemas/config_machines_template.xml index b661316f36e..f07c69928fa 100644 --- a/config/xml_schemas/config_machines_template.xml +++ b/config/xml_schemas/config_machines_template.xml @@ -84,12 +84,12 @@ + should always be >= MAX_MPITASKS_PER_NODE --> 36 - - 36 + 36 diff --git a/config/xml_schemas/config_pes.xsd b/config/xml_schemas/config_pes.xsd index 28000481e0f..f82a55c8f38 100644 --- a/config/xml_schemas/config_pes.xsd +++ b/config/xml_schemas/config_pes.xsd @@ -8,7 +8,7 @@ - + @@ -76,7 +76,7 @@ - + diff --git a/config/xml_schemas/env_batch.xsd b/config/xml_schemas/env_batch.xsd index 5c1f54bf77e..8ad08162403 100644 --- a/config/xml_schemas/env_batch.xsd +++ b/config/xml_schemas/env_batch.xsd @@ -6,8 +6,6 @@ - - diff --git a/config/xml_schemas/env_mach_pes.xsd b/config/xml_schemas/env_mach_pes.xsd index 21ee5351e65..df6dba2cbbe 100644 --- a/config/xml_schemas/env_mach_pes.xsd +++ b/config/xml_schemas/env_mach_pes.xsd @@ -57,7 +57,7 @@ - + diff --git a/doc/source/data_models/data-atm.rst b/doc/source/data_models/data-atm.rst index c82631e8383..eb9478eb181 100644 --- a/doc/source/data_models/data-atm.rst +++ b/doc/source/data_models/data-atm.rst @@ -4,9 +4,9 @@ Data Atmosphere (DATM) ====================== DATM is normally used to provide observational forcing data (or forcing data produced by a previous run using active components) to drive prognostic components. -In the case of CESM, these would be: CLM (I compset), POP2 (C compset), and POP2/CICE (G compset). +In the case of CESM, these would be: CLM (I compset), POP2 (C compset), and POP2/CICE (G compset). As a result, DATM variable settings are specific to the compset that will be targeted. -As examples, CORE2_NYF (CORE2 normal year forcing) is the DATM mode used in C and G compsets. +As examples, CORE2_NYF (CORE2 normal year forcing) is the DATM mode used in C and G compsets. CLM_QIAN, CLMCRUNCEP, CLMGSWP3 and CLM1PT are DATM modes using observational data for forcing CLM in I compsets. .. _datm-xml-vars: @@ -14,7 +14,7 @@ CLM_QIAN, CLMCRUNCEP, CLMGSWP3 and CLM1PT are DATM modes using observational dat ------------------ xml variables ------------------ -The following are ``$CASEROOT`` xml variables that CIME supports for DATM. +The following are ``$CASEROOT`` xml variables that CIME supports for DATM. These variables are defined in ``$CIMEROOT/src/components/data_comps/datm/cime_config/config_component.xml``. These variables will appear in ``env_run.xml`` and the resulting values are compset dependent. @@ -24,20 +24,26 @@ These variables will appear in ``env_run.xml`` and the resulting values are comp :header: "xml variable", "description" :widths: 20, 80 - "DATM_MODE", "Mode for atmospheric component" - "", "Valid values are: CORE2_NYF,CORE2_IAF,CLM_QIAN,CLM_QIAN_WISO,CLM1PT,CLMCRUNCEP," - "", "CLMCRUNCEP_V5,CLMGSWP3,WW3,CPLHISTForcing" - "DATM_PRESAERO", "Prescribed aerosol forcing, if any" - "DATM_TOPO", "Surface topography" - "DATM_CO2_TSERIES", "CO2 time series type" - "DATM_CPLHIST_CASE", "Coupler history data mode case name" - "DATM_CPLHIST_DIR", "Coupler history data mode directory containing coupler history data" - "DATM_CPLHIST_YR_ALIGN", "Coupler history data model simulation year corresponding to data starting year" - "DATM_CPLHIST_YR_START", "Coupler history data model starting year to loop data over" - "DATM_CPLHIST_YR_END", "Coupler history data model ending year to loop data over" - "DATM_CLMNCEP_YR_ALIGN", "I compsets only - simulation year corresponding to data starting year" - "DATM_CPLHIST_YR_START", "I compsets only - data model starting year to loop data over" - "DATM_CPLHIST_YR_END", "I compsets only - data model ending year to loop data over" + "DATM_MODE", "Mode for atmospheric component" + "", "Valid values are: CORE2_NYF,CORE2_IAF,CLM_QIAN,CLM_QIAN_WISO,CLM1PT,CLMCRUNCEP," + "", "CLMCRUNCEP_V5,CLMGSWP3,WW3,CPLHIST" + + "DATM_PRESAERO", "Optional prescribed aerosol forcing" + "DATM_TOPO", "Optional Surface topography" + "DATM_CO2_TSERIES", "Optional CO2 time series type" + + "DATM_CPLHIST_DOMAIN_FILE", "Coupler history forcing data mode - full pathname of model domain file " + "DATM_CPLHIST_CASE", "Coupler history forcing data mode - case name" + "DATM_CPLHIST_DIR", "Coupler history forcing data mode - directory containing coupler history data" + "DATM_CPLHIST_YR_ALIGN", "Coupler history forcing data mode - simulation year corresponding to DATM_CPLHIST_YR_START" + "DATM_CPLHIST_YR_START", "Coupler history forcing data mode - starting year to loop data over" + "DATM_CPLHIST_YR_END", "Coupler history forcing data mode - ending year to loop data over" + + "DATM_CLMNCEP_YR_ALIGN", "I compsets only - simulation year corresponding to data starting year" + "DATM_CLMNCEP_YR_START", "I compsets only - data model starting year to loop data over" + "DATM_CLMNCEP_YR_END", "I compsets only - data model ending year to loop data over" + +.. note:: If ``DATM_MODE`` is set to ``CPLHIST``, it is normally assumed that the model domain will be identical to **all** of the stream domains. To ensure this, the xml variables ``ATM_DOMAIN_PATH`` and ``ATM_DOMAIN_FILE`` are ignored and a valid setting **must be given** for ``DATM_CPLHIST_DOMAIN_FILE``. If ``DATM_CPLHIST_DOMAIN_FILE`` is set to ``null``, then the datm component domain information is read in from the first coupler history file in the target stream and it is assumed that the first coupler stream file that is pointed to contains the domain information for that stream. This is the default that should be used for this mode. Alternatively, ``DATM_CPLHIST_DOMAIN_FILE`` can be set to ``$ATM_DOMAIN_PATH/$ATM_DOMAIN_FILE`` in a non-default configuration. .. _datm-datamodes: @@ -45,29 +51,29 @@ These variables will appear in ``env_run.xml`` and the resulting values are comp datamode values -------------------- -The xml variable ``DATM_MODE`` sets the streams that are associated with DATM and also sets the namelist variable ``datamode`` that specifies what additional operations need to be done by DATM on the streams before returning to the driver. -One of the variables in ``shr_strdata_nml`` is ``datamode``, whose value is a character string. Each data model has a unique set of ``datamode`` values that it supports. -The valid values for ``datamode`` are set in the file ``namelist_definition_datm.xml`` using the xml variable ``DATM_MODE`` in the ``config_component.xml`` file for DATM. -CIME will generate a value ``datamode`` that is compset dependent. +The xml variable ``DATM_MODE`` (described in :ref:`datm_mode`) sets the streams that are associated with DATM and also sets the namelist variable ``datamode``. +``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DATM on the streams before returning to the driver. -The following are the supported DATM datamode values and their relationship to the ``$DATM_MODE`` xml variable value. +Each data model has its own set of supported ``datamode`` values. The following are the supported DATM ``datamode`` values, as defined in the file ``namelist_definition_datm.xml``. .. csv-table:: "Valid values for datamode namelist variable" :header: "datamode variable", "description" :widths: 20, 80 "NULL", "This mode turns off the data model as a provider of data to the coupler. The ``atm_present`` flag will be set to ``false`` and the coupler assumes no exchange of data to or from the data model." - "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams, if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero except for aerosol deposition fields which will be set to a special value. " - "CPLHIST","Uilize user-generated coupler history data to spin up prognostic component. Works exactly like COPYALL." - "CLMNCEP", "In conjunction with NCEP climatological atmosphere data, provides the atmosphere forcing favored by the Land Model Working Group when coupling an active land model with observed atmospheric forcing. This mode replicates code previously found in CLM (circa 2005), before the LMWG started using the CCSM flux coupler and data models to do active-land-only simulations." + "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams; if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero except for aerosol deposition fields which will be set to a special value. " + "CLMNCEP", "In conjunction with NCEP climatological atmosphere data, provides the atmosphere forcing favored by the Land Model Working Group when coupling an active land model with observed atmospheric forcing. This mode replicates code previously found in CLM (circa 2005), before the LMWG started using the CIME coupling infrastructure and data models to do active-land-only simulations." "CORE2_NYF", "Coordinated Ocean-ice Reference Experiments (CORE) Version 2 Normal Year Forcing." "CORE2_IAF", "In conjunction with with CORE Version 2 atmospheric forcing data, provides the atmosphere forcing favored by the Ocean Model Working Group when coupling an active ocean model with observed atmospheric forcing. This mode and associated data sets implement the CORE-IAF Version 2 forcing data, as developed by Large and Yeager (2008) at NCAR. Note that CORE2_NYF and CORE2_IAF work exactly the same way." +.. _datm_mode: + ------------------------------- DATM_MODE, datamode and streams ------------------------------- -The following tabe describes the valid values of ``DATM_MODE``, and how it relates to the associated input streams and the ``datamode`` namelist variable. +The following table describes the valid values of ``DATM_MODE`` (defined in the ``config_component.xml`` file for DATM), and how they relate to the associated input streams and the ``datamode`` namelist variable. +CIME will generate a value of ``DATM_MODE`` based on the compset. .. csv-table:: "Relationship between DATM_MODE, datamode and streams" :header: "DATM_MODE", "description-streams-datamode" @@ -102,9 +108,9 @@ The following tabe describes the valid values of ``DATM_MODE``, and how it relat "CLM1PT", "single point tower site atm input data" "","streams: CLM1PT.$ATM_GRID" "","datamode: CLMNCEP" - "CPLHISTForcing","user generated forcing data to spinup for I and G compsets" - "","streams: CPLHISTForcingForOcnIce.Solar,CPLHISTForcingForOcnIce.nonSolarFlux," - "","CPLHISTForcingForOcnIce.State3hr,CPLHISTForcingForOcnIce.State1hr" + "CPLHIST","user generated forcing data from using coupler history files used to spinup relevant prognostic components (for CESM this is CLM, POP and CISM)" + "","streams: CPLHISTForcing.Solar,CPLHISTForcing.nonSolarFlux," + "","CPLHISTForcing.State3hr,CPLHISTForcing.State1hr" "","datamode: CPLHIST" "WW3","WW3 wave watch data from a short period of hi WW3 wave watch data from a short period of hi temporal frequency COREv2 data" "","streams: WW3" @@ -114,7 +120,7 @@ The following tabe describes the valid values of ``DATM_MODE``, and how it relat Namelists -------------- -The DATM namelist file is ``datm_in`` (or ``datm_in_NNN`` for multiple instances). DATM namelists can be separated into two groups: *stream-independent* namelist variables that are specific to the DATM model and *stream-specific* namelist variables whose names are common to all the data models. +The DATM namelist file is ``datm_in`` (or ``datm_in_NNN`` for multiple instances). DATM namelists can be separated into two groups: *stream-independent* namelist variables that are specific to the DATM model and *stream-specific* namelist variables whose names are common to all the data models. Stream dependent input is in the namelist group ``"shr_strdata_nml`` which is discussed in :ref:`input streams ` and is the same for all data models. @@ -126,17 +132,17 @@ The stream-independent group is ``datm_nml`` and the DATM stream-independent nam datm_nml vars description ===================== ============================================================================================= decomp decomposition strategy (1d, root) - + 1d => vector decomposition, root => run on master task -restfilm master restart filename -restfils stream restart filename +restfilm master restart filename +restfils stream restart filename force_prognostic_true TRUE => force prognostic behavior bias_correct if set, include bias correction streams in namelist anomaly_forcing if set, includ anomaly forcing streams in namelist -factorfn filename containing correction factors for use in CORE2 modes (CORE2_IAF and CORE2_NYF) -presaero if true, prescribed aerosols are sent from datm +factorfn filename containing correction factors for use in CORE2 modes (CORE2_IAF and CORE2_NYF) +presaero if true, prescribed aerosols are sent from datm iradsw frequency to update radiation in number of time steps (of hours if negative) -wiso_datm if true, turn on water isotopes +wiso_datm if true, turn on water isotopes ===================== ============================================================================================= .. _datm-mode-independent-streams: @@ -145,23 +151,23 @@ wiso_datm if true, turn on water isotopes Streams independent of DATM_MODE value ------------------------------------------ -In general, each ``DATM_MODE`` xml variable is identified with a unique set of streams. +In general, each ``DATM_MODE`` xml variable is identified with a unique set of streams. However, there are several streams in DATM that can accompany any ``DATM_MODE`` setting. Currently, these are streams associated with prescribed aerosols, co2 time series, topography, anomoly forcing and bias correction. These mode-independent streams are activated different, depending on the stream. - ``prescribed aerosol stream:`` - To add this stream, set ``$DATM_PRESAERO`` to a supported value other than ``none``. + To add this stream, set ``$DATM_PRESAERO`` to a supported value other than ``none``. - ``co2 time series stream``: To add this stream, set ``$DATM_CO2_TSERIES`` to a supported value other than ``none``. - + - ``topo stream``: To add this stream, set ``$DATM_TOPO`` to a supported value other than ``none``. - ``anomaly forcing stream:`` To add this stream, you need to add any of the following keywword/value pair to the end of ``user_nl_datm``: - :: + :: Anomaly.Forcing.Precip = Anomaly.Forcing.Temperature = @@ -174,7 +180,7 @@ These mode-independent streams are activated different, depending on the stream. - ``bias_correct stream:`` To add this stream, you need to add any of the following keywword/value pair to the end of ``user_nl_datm``: - :: + :: BC.QIAN.CMAP.Precip = BC.QIAN.GPCP.Precip = @@ -270,7 +276,3 @@ In general, the stream input file should translate the stream input variable nam "snowl_HDO", "Faxa_snowl_HDO" "shum_16O", "Sa_shum_16O" "shum_18O", "Sa_shum_18O" - - - - diff --git a/doc/source/data_models/data-lnd.rst b/doc/source/data_models/data-lnd.rst index 2bdee8b6fb1..278a4706ba6 100644 --- a/doc/source/data_models/data-lnd.rst +++ b/doc/source/data_models/data-lnd.rst @@ -3,9 +3,9 @@ Data Land (DLND) ================ -The land model is unique because it supports land data and snow data (*lnd and sno*) almost as if they were two separate components, but they are in fact running in one component model through one interface. -The lnd (land) data consist of fields sent to the atmosphere. -This set of data is used when running DLND with an active atmosphere. +The land model is unique because it supports land data and snow data (*lnd and sno*) almost as if they were two separate components, but they are in fact running in one component model through one interface. +The lnd (land) data consist of fields sent to the atmosphere. +This set of data is used when running DLND with an active atmosphere. In general this is not a mode that is used or supported. The sno (snow) data consist of fields sent to the glacier model. This set of data is used when running dlnd with an active glacier model (TG compsets). Both sets of data are assumed to be on the same grid. @@ -15,7 +15,7 @@ The sno (snow) data consist of fields sent to the glacier model. This set of dat xml variables --------------- -The following are xml variables that CIME supports for DLND. +The following are xml variables that CIME supports for DLND. These variables are defined in ``$CIMEROOT/src/components/data_comps/dlnd/cime_config/config_component.xml``. These variables will appear in ``env_run.xml`` and are used by the DLND ``cime_config/buildnml`` script to generate the DLND namelist file ``dlnd_in`` and the required associated stream files for the case. @@ -27,11 +27,15 @@ These variables will appear in ``env_run.xml`` and are used by the DLND ``cime_c "DLND_MODE", "Mode for data land component" "", "Valid values are: NULL, CPLHIST, GLC_CPLHIST" - "DLND_CPLHIST_CASE", "Coupler history data mode case name" - "DLND_CPLHIST_DIR", "Coupler history data mode directory containing coupler history data" - "DLND_CPLHIST_YR_ALIGN", "Coupler history data model simulation year corresponding to data starting year" - "DLND_CPLHIST_YR_START", "Coupler history data model starting year to loop data over" - "DLND_CPLHIST_YR_END", "Coupler history data model ending year to loop data over" + + "DLND_CPLHIST_DOMAIN_FILE", "Coupler history forcing data mode - full pathname of model domain file" + "DLND_CPLHIST_CASE", "Coupler history forcing data mode - case name" + "DLND_CPLHIST_DIR", "Coupler history forcing data mode - directory containing coupler history data" + "DLND_CPLHIST_YR_ALIGN", "Coupler history forcing data mode - simulation year corresponding to DLND_CPLHIST_YR_START" + "DLND_CPLHIST_YR_START", "Coupler history forcing data mode - starting year to loop data over" + "DLND_CPLHIST_YR_END", "Coupler history forcing data mode - ending year to loop data over" + +.. note:: If ``DLND_MODE`` is set to ``CPLHIST``, it is normally assumed that the model domain will be identical to **all** of the stream domains. To ensure this, the xml variables ``LND_DOMAIN_PATH`` and ``LND_DOMAIN_FILE`` are ignored and a valid setting **must be given** for ``DLND_CPLHIST_DOMAIN_FILE``. If ``DLND_CPLHIST_DOMAIN_FILE`` is set to ``null``, then the dlnd component domain information is read in from the first coupler history file in the target stream and it is assumed that the first coupler stream file that is pointed to contains the domain information for that stream. Alternatively, ``DLND_CPLHIST_DOMAIN_FILE`` can be set to ``$LND_DOMAIN_PATH/$LND_DOMAIN_FILE``. .. _dlnd-datamodes: @@ -39,25 +43,26 @@ These variables will appear in ``env_run.xml`` and are used by the DLND ``cime_c datamode values -------------------- -The xml variable ``DLND_MODE`` sets the streams that are associated with DLND and also sets the namelist variable ``datamode`` that specifies what additional operations need to be done by DLND on the streams before returning to the driver. -One of the variables in ``shr_strdata_nml`` is ``datamode``, whose value is a character string. Each data model has a unique set of ``datamode`` values that it supports. -The valid values for ``datamode`` are set in the file ``namelist_definition_dlnd.xml`` using the xml variable ``DLND_MODE`` in the ``config_component.xml`` file for DLND. -CIME will generate a value ``datamode`` that is compset dependent. +The xml variable ``DLND_MODE`` (described in :ref:`dlnd_mode`) sets the streams that are associated with DLND and also sets the namelist variable ``datamode``. +``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DLND on the streams before returning to the driver. -The following are the supported DATM datamode values and their relationship to the ``$DATM_MODE`` xml variable value. +Each data model has its own set of supported ``datamode`` values. The following are the supported DLND ``datamode`` values, as defined in the file ``namelist_definition_dlnd.xml``. .. csv-table:: "Valid values for datamode namelist variable" :header: "datamode variable", "description" :widths: 20, 80 - "NULL", "Turns off the data model as a provider of data to the coupler. The ice_present flag will be set to false and the coupler will assume no exchange of data to or from the data model." - "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams, if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero." + "NULL", "Turns off the data model as a provider of data to the coupler. The ``lnd_present`` flag will be set to false and the coupler will assume no exchange of data to or from the data model." + "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams; if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero." + +.. _dlnd_mode: ------------------------------- DLND_MODE, datamode and streams ------------------------------- -The following tabe describes the valid values of ``DLND_MODE``, and how it relates to the associated input streams and the ``datamode`` namelist variable. +The following table describes the valid values of ``DLND_MODE`` (defined in the ``config_component.xml`` file for DLND), and how they relate to the associated input streams and the ``datamode`` namelist variable. +CIME will generate a value of ``DLND_MODE`` based on the compset. .. csv-table:: "Relationship between DLND_MODE, datamode and streams" :header: "DLND_MODE", "description-streams-datamode" @@ -66,12 +71,12 @@ The following tabe describes the valid values of ``DLND_MODE``, and how it relat "NULL", "null mode" "", "streams: none" "", "datamode: null" - "CPLHIST", "land forcing data (e.g. produced by CESM/CLM) from a previous model run is output in coupler history files and read in by the data land model." + "CPLHIST", "land forcing data (e.g. produced by CESM/CLM) from a previous model run are read in from coupler history files" "", "streams: lnd.cplhist" - "", "COPYALL" - "GLC_CPLHIST", "glc coupling fields (e.g. produced by CESM/CLM) from a previous model run are read in from a coupler history file." - "", "streams: glc.cplhist" - "", "COPYALL" + "", "datamode: COPYALL" + "GLC_CPLHIST", "glc coupling fields (e.g. produced by CESM/CLM) from a previous model run are read in from coupler history files" + "", "streams: sno.cplhist" + "", "datamode: COPYALL" --------- Namelists @@ -79,9 +84,9 @@ Namelists The namelist file for DLND is ``dlnd_in`` (or ``dlnd_in_NNN`` for multiple instances). -As is the case for all data models, DLND namelists can be separated into two groups, stream-independent and stream-dependent. +As is the case for all data models, DLND namelists can be separated into two groups, stream-independent and stream-dependent. -The stream dependent group is :ref:`shr_strdata_nml`. +The stream dependent group is :ref:`shr_strdata_nml`. .. _dlnd-stream-independent-namelists: @@ -89,14 +94,14 @@ The stream-independent group is ``dlnd_nml`` and the DLND stream-independent nam ===================== ====================================================== decomp decomposition strategy (1d, root) - + 1d => vector decomposition, root => run on master task -restfilm master restart filename -restfils stream restart filename +restfilm master restart filename +restfils stream restart filename force_prognostic_true TRUE => force prognostic behavior ===================== ====================================================== - -To change the namelist settings in dlnd_in, edit the file user_nl_dlnd. + +To change the namelist settings in ``dlnd_in``, edit the file ``user_nl_dlnd``. .. _dlnd-mode-independent-streams: @@ -119,57 +124,31 @@ In general, the stream input file should translate the stream input variable nam :header: "dlnd_fld (avifld)", "driver_fld (avofld)" :widths: 30, 30 - "t", "Sl_t" - "tref", "Sl_tref" - "qref", "Sl_qref" - "avsdr", "Sl_avsdr" - "anidr", "Sl_anidr" - "avsdf", "Sl_avsdf" - "anidf", "Sl_anidf" - "snowh", "Sl_snowh" - "taux", "Fall_taux" - "tauy", "Fall_tauy" - "lat", "Fall_lat" - "sen", "Fall_sen" - "lwup", "Fall_lwup" - "evap", "Fall_evap" - "swnet", "Fall_swnet" - "lfrac", "Sl_landfrac" - "fv", "Sl_fv" - "ram1", "Sl_ram1" - "flddst1", "Fall_flxdst1" - "flxdst2", "Fall_flxdst2" - "flxdst3", "Fall_flxdst3" - "flxdst4", "Fall_flxdst4" - "tsrfNN", "Sl_tsrf" - "topoNN", "Sl_topo" - "qiceNN", "Flgl_qice" - -where NN = (01,02,...,``nflds_snow * glc_nec)``, and ``nflds_snow`` is the number of snow fields in each elevation class and ``glc_nec`` is the number of elevation classes. - - - - - - - - - - - - - - - - - - - - - - - - - - - + "t", "Sl_t" + "tref", "Sl_tref" + "qref", "Sl_qref" + "avsdr", "Sl_avsdr" + "anidr", "Sl_anidr" + "avsdf", "Sl_avsdf" + "anidf", "Sl_anidf" + "snowh", "Sl_snowh" + "taux", "Fall_taux" + "tauy", "Fall_tauy" + "lat", "Fall_lat" + "sen", "Fall_sen" + "lwup", "Fall_lwup" + "evap", "Fall_evap" + "swnet", "Fall_swnet" + "lfrac", "Sl_landfrac" + "fv", "Sl_fv" + "ram1", "Sl_ram1" + "flddst1", "Fall_flxdst1" + "flxdst2", "Fall_flxdst2" + "flxdst3", "Fall_flxdst3" + "flxdst4", "Fall_flxdst4" + "tsrfNN", "Sl_tsrfNN" + "topoNN", "Sl_topoNN" + "qiceNN", "Flgl_qiceNN" + +where NN = (01,02,..., ``glc_nec``), and ``glc_nec`` is the number of glacier elevation classes. +Note that the number of elevation classes on the input files must be the same as in the run. diff --git a/doc/source/data_models/data-model-science.rst b/doc/source/data_models/data-model-science.rst index 1c25cd88afb..b92a7331e51 100644 --- a/doc/source/data_models/data-model-science.rst +++ b/doc/source/data_models/data-model-science.rst @@ -3,19 +3,19 @@ Data Model Science ================== -When a given data models run, the user must specify which *science mode* it will run in. +When a given data model is run, the user must specify which *science mode* it will run in. Each data model has a fixed set of fields that it must send to the coupler, but it is the choice of mode that specifies how that set of fields is to be computed. Each mode activates various assumptions about what input fields are available from the input data streams, what input fields are available from the the coupler, and how to use this input data to compute the output fields sent to the coupler. In general, a mode might specify... - that fields be set to a time invariant constant (so that no input data is needed) -- that fields be taken directly from a input data files (the input streams) +- that fields be taken directly from input data files (the input streams) - that fields be computed using data read in from input files -- that fields be computed using from data received from the coupler +- that fields be computed using data received from the coupler - some combination of the above. -If a science mode is chosen that is not consistent with the input data provided, the model may abort (perhaps with a "missing data" error message), or the model may send erroneous data to the coupler (for example, if a mode assumes an input stream has temperature in Kelvin on it, but it really has temperature in Celsius). +If a science mode is chosen that is not consistent with the input data provided, the model may abort (perhaps with a "missing data" error message), or the model may send erroneous data to the coupler (for example, if a mode assumes an input stream has temperature in Kelvin, but it really has temperature in Celsius). Such an error is unlikely unless a user has edited the run scripts to specify either non-standard input data or a non-standard science mode. When editing the run scripts to use non-standard stream data or modes, users must be careful that the input data is consistent with the science mode and should verify that the data model is providing data to the coupler as expected. @@ -23,7 +23,7 @@ The data model mode is a character string that is set in the namelist variable ` ``datm``, ``dlnd``, ``drof``, ``docn``, ``dice`` and ``dwav`` has its own set of valid datamode values, two modes are common to all data models: ``COPYALL`` and ``NULL``. ``dataMode = "COPYALL"`` - The default mode is COPYALL -- the model will assume *all* the data that must be sent to the coupler will be found in the input data streams, and that this data can be sent to the coupler, unaltered, except for spatial and temporal interpolation. + The default mode is ``COPYALL`` -- the model will assume *all* the data that must be sent to the coupler will be found in the input data streams, and that this data can be sent to the coupler, unaltered, except for spatial and temporal interpolation. ``dataMode = "NULL"`` - NULL mode turns off the data model as a provider of data to the coupler. The model_present flag (eg. atm_present) will be set to false and the coupler will assume no exchange of data to or from the data model. + ``NULL`` mode turns off the data model as a provider of data to the coupler. The ``model_present`` flag (eg. ``atm_present``) will be set to false and the coupler will assume no exchange of data to or from the data model. diff --git a/doc/source/data_models/data-ocean.rst b/doc/source/data_models/data-ocean.rst index 0709ac83934..02b3632044f 100644 --- a/doc/source/data_models/data-ocean.rst +++ b/doc/source/data_models/data-ocean.rst @@ -78,28 +78,29 @@ These variables will appear in ``env_run.xml`` and are used by the DOCN ``cime_c datamode values --------------- -The xml variable ``DOCN_MODE`` sets the streams that are associated with DOCN and also sets the namelist variable ``datamode`` that specifies what additional operations need to be done by DOCN on the streams before returning to the driver. -One of the variables in ``shr_strdata_nml`` is ``datamode``, whose value is a character string. Each data model has a unique set of ``datamode`` values that it supports. -The valid values for ``datamode`` are set in the file ``namelist_definition_docn.xml`` using the xml variable ``DOCN_MODE`` in the ``config_component.xml`` file for DOCN. -CIME will generate a value ``datamode`` that is compset dependent. +The xml variable ``DOCN_MODE`` (described in :ref:`docn_mode`) sets the streams that are associated with DOCN and also sets the namelist variable ``datamode``. +``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DOCN on the streams before returning to the driver. -The following are the supported DOCN datamode values and their relationship to the ``DOCN_MODE`` xml variable value. +Each data model has its own set of supported ``datamode`` values. The following are the supported DOCN ``datamode`` values, as defined in the file ``namelist_definition_docn.xml``. .. csv-table:: "Valid values for datamode namelist variable" :header: "datamode variable", "description" :widths: 20, 80 "NULL", "Turns off the data model as a provider of data to the coupler. The ocn_present flag will be set to false and the coupler will assume no exchange of data to or from the data model." - "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams, if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero." + "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams; if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero." "SSTDATA", "assumes the only field in the input stream is SST. It also assumes the SST is in Celsius and must be converted to Kelvin. All other fields are set to zero except for ocean salinity, which is set to a constant reference salinity value. Normally the ice fraction data is found in the same data files that provide SST data to the data ocean model. They are normally found in the same file because the SST and ice fraction data are derived from the same observational data sets and are consistent with each other. They are normally found in the same file because the SST and ice fraction data are derived from the same observational data sets and are consistent with each other." "IAF", "is the interannually varying version of SSTDATA" "SOM", "(slab ocean model) mode is a prognostic mode. This mode computes a prognostic sea surface temperature and a freeze/melt potential (surface Q-flux) used by the sea ice model. This calculation requires an external SOM forcing data file that includes ocean mixed layer depths and bottom-of-the-slab Q-fluxes. Scientifically appropriate bottom-of-the-slab Q-fluxes are normally ocean resolution dependent and are derived from the ocean model output of a fully coupled CCSM run. Note that while this mode runs out of the box, the default SOM forcing file is not scientifically appropriate and is provided for testing and development purposes only. Users must create scientifically appropriate data for their particular application. A tool is available to derive valid SOM forcing." +.. _docn_mode: + ------------------------------- DOCN_MODE, datamode and streams ------------------------------- -The following tabe describes the valid values of ``DOCN_MODE``, and how it relates to the associated input streams and the ``datamode`` namelist variable. +The following table describes the valid values of ``DOCN_MODE`` (defined in the ``config_component.xml`` file for DOCN), and how they relate to the associated input streams and the ``datamode`` namelist variable. +CIME will generate a value of ``DOCN_MODE`` based on the compset. .. csv-table:: "Relationship between DOCN_MODE, datamode and streams" :header: "DOCN_MODE, "description-streams-datamode" diff --git a/doc/source/data_models/data-river.rst b/doc/source/data_models/data-river.rst index b6e9c4ca474..2911149bc67 100644 --- a/doc/source/data_models/data-river.rst +++ b/doc/source/data_models/data-river.rst @@ -13,7 +13,7 @@ This data can either be observational (climatological or interannual river data) xml variables ------------- -The following are xml variables that CIME supports for DROF. +The following are xml variables that CIME supports for DROF. These variables are defined in ``$CIMEROOT/src/components/data_comps/drof/cime_config/config_component.xml``. These variables will appear in ``env_run.xml`` and are used by the DROF ``cime_config/buildnml`` script to generate the DROF namelist file ``drof_in`` and the required associated stream files for the case. @@ -23,13 +23,17 @@ These variables will appear in ``env_run.xml`` and are used by the DROF ``cime_c :header: "xml variable", "description" :widths: 15, 85 - "DROF_MODE", "Data mode" - "", "Valid values are: NULL,CPLHIST,DIATREN_ANN_RX1,DIATREN_IAF_RX1" - "DROF_CPLHIST_CASE", "Coupler history data mode case name" - "DROF_CPLHIST_DIR", "Coupler history data mode directory containing coupler history data" - "DROF_CPLHIST_YR_ALIGN", "Coupler history data model simulation year corresponding to data starting year" - "DROF_CPLHIST_YR_START", "Coupler history data model starting year to loop data over" - "DROF_CPLHIST_YR_END", "Coupler history data model ending year to loop data over" + "DROF_MODE", "Data mode" + "", "Valid values are: NULL,CPLHIST,DIATREN_ANN_RX1,DIATREN_IAF_RX1" + + "DROF_CPLHIST_DOMAIN_FILE", "Coupler history forcing data mode - full pathname of model domain file " + "DROF_CPLHIST_CASE", "Coupler history forcing data mode - case name" + "DROF_CPLHIST_DIR", "Coupler history forcing data mode - directory containing coupler history forcing data" + "DROF_CPLHIST_YR_ALIGN", "Coupler history forcing data mode - simulation year corresponding to DROF_CPLHIST_YR_START" + "DROF_CPLHIST_YR_START", "Coupler history forcing data mode - starting year to loop forcing data over" + "DROF_CPLHIST_YR_END", "Coupler history forcing data mode - ending year to loop forcing data over" + +.. note:: If ``DROF_MODE`` is set to ``CPLHIST``, it is normally assumed that the model domain will be identical to **all** of the stream domains. To ensure this, the xml variables ``ROF_DOMAIN_PATH`` and ``ROF_DOMAIN_FILE`` are ignored and a valid setting **must be given** for ``DROF_CPLHIST_DOMAIN_FILE``. If ``DROF_CPLHIST_DOMAIN_FILE`` is set to ``null``, then the drof component domain information is read in from the first coupler history file in the target stream and it is assumed that the first coupler stream file that is pointed to contains the domain information for that stream. This is the default mode that should be used for this mode. Alternatively, ``DROF_CPLHIST_DOMAIN_FILE`` can be set to ``$ROF_DOMAIN_PATH/$ROF_DOMAIN_FILE`` in a non-default configuration. .. _drof-datamodes: @@ -37,12 +41,10 @@ These variables will appear in ``env_run.xml`` and are used by the DROF ``cime_c datamode values -------------------- -The xml variable ``DROF_MODE`` sets the streams that are associated with DROF and also sets the namelist variable ``datamode`` that specifies what additional operations need to be done by DROF on the streams before returning to the driver. -One of the variables in ``shr_strdata_nml`` is ``datamode``, whose value is a character string. Each data model has a unique set of ``datamode`` values that it supports. -The valid values for ``datamode`` are set in the file ``namelist_definition_drof.xml`` using the xml variable ``DROF_MODE`` in the ``config_component.xml`` file for DROF. -CIME will generate a value ``datamode`` that is compset dependent. +The xml variable ``DROF_MODE`` (described in :ref:`drof_mode`) sets the streams that are associated with DROF and also sets the namelist variable ``datamode``. +``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DROF on the streams before returning to the driver. -The following are the supported DROF datamode values and their relationship to the ``DROF_MODE`` xml variable value. +Each data model has its own set of supported ``datamode`` values. The following are the supported DROF ``datamode`` values, as defined in the file ``namelist_definition_drof.xml``. .. csv-table:: "Valid values for datamode namelist variable" :header: "datamode variable", "description" @@ -76,11 +78,14 @@ force_prognostic_true TRUE => force prognostic behavior To change the namelist settings in ``drof_in``, edit the file ``user_nl_drof`` in your case directory. +.. _drof_mode: + ------------------------------- DROF_MODE, datamode and streams ------------------------------- -The following tabe describes the valid values of ``DROF_MODE``, and how it relates to the associated input streams and the ``datamode`` namelist variable. +The following table describes the valid values of ``DROF_MODE`` (defined in the ``config_component.xml`` file for DROF), and how they relate to the associated input streams and the ``datamode`` namelist variable. +CIME will generate a value of ``DROF_MODE`` based on the compset. .. csv-table:: "Relationship between DROF_MODE, datamode and streams" :header: "DROF_MODE", "description-streams-datamode" diff --git a/doc/source/data_models/data-seaice.rst b/doc/source/data_models/data-seaice.rst index 05aab50b38a..39449a02971 100644 --- a/doc/source/data_models/data-seaice.rst +++ b/doc/source/data_models/data-seaice.rst @@ -34,26 +34,27 @@ These variables will appear in ``env_run.xml`` and are used by the DICE ``cime_c datamode values -------------------- -The xml variable ``DICE_MODE`` sets the streams that are associated with DICE and also sets the namelist variable ``datamode`` that specifies what additional operations need to be done by DICE on the streams before returning to the driver. -One of the variables in ``shr_strdata_nml`` is ``datamode``, whose value is a character string. Each data model has a unique set of ``datamode`` values that it supports. -The valid values for ``datamode`` are set in the file ``namelist_definition_dice.xml`` using the xml variable ``DICE_MODE`` in the ``config_component.xml`` file for DICE. -CIME will generate a value ``datamode`` that is compset dependent. +The xml variable ``DICE_MODE`` (described in :ref:`dice_mode`) sets the streams that are associated with DICE and also sets the namelist variable ``datamode``. +``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DICE on the streams before returning to the driver. -The following are the supported DICE datamode values and their relationship to the ``DICE_MODE`` xml variable value. +Each data model has its own set of supported ``datamode`` values. The following are the supported DICE ``datamode`` values, as defined in the file ``namelist_definition_dice.xml``. .. csv-table:: "Valid values for datamode namelist variable" :header: "datamode variable", "description" :widths: 20, 80 "NULL", "Turns off the data model as a provider of data to the coupler. The ice_present flag will be set to false and the coupler will assume no exchange of data to or from the data model." - "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams, if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero." + "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams; if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero." "SSTDATA","Is a prognostic mode. It requires data be sent to the ice model. Ice fraction (extent) data is read from an input stream, atmosphere state variables are received from the coupler, and then an atmosphere-ice surface flux is computed and sent to the coupler. It is called ``SSTDATA`` mode because normally the ice fraction data is found in the same data files that provide SST data to the data ocean model. They are normally found in the same file because the SST and ice fraction data are derived from the same observational data sets and are consistent with each other. " +.. _dice_mode: + ------------------------------- DICE_MODE, datamode and streams ------------------------------- -The following tabe describes the valid values of ``DICE_MODE``, and how it relates to the associated input streams and the ``datamode`` namelist variable. +The following table describes the valid values of ``DICE_MODE`` (defined in the ``config_component.xml`` file for DICE), and how they relate to the associated input streams and the ``datamode`` namelist variable. +CIME will generate a value of ``DICE_MODE`` based on the compset. .. csv-table:: "Relationship between DICE_MODE, datamode and streams" :header: "DICE_MODE, "description-streams-datamode" diff --git a/doc/source/data_models/data-wave.rst b/doc/source/data_models/data-wave.rst index 2624c7eacc1..53ea0f9ae46 100644 --- a/doc/source/data_models/data-wave.rst +++ b/doc/source/data_models/data-wave.rst @@ -78,11 +78,14 @@ force_prognostic_true TRUE => force prognostic behavior To change the namelist settings in ``dwav_in``, edit the file ``user_nl_dwav`` in your case directory. +.. _dwav_mode: + ------------------------------- -DROF_MODE, datamode and streams +DWAV_MODE, datamode and streams ------------------------------- -The following tabe describes the valid values of ``DWAV_MODE``, and how it relates to the associated input streams and the ``datamode`` namelist variable. +The following table describes the valid values of ``DWAV_MODE`` (defined in the ``config_component.xml`` file for DWAV), and how they relate to the associated input streams and the ``datamode`` namelist variable. +CIME will generate a value of ``DWAV_MODE`` based on the compset. .. csv-table:: "Relationship between DWAV_MODE, datamode and streams" :header: "DWAV_MODE", "description-streams-datamode" diff --git a/doc/source/data_models/design-details.rst b/doc/source/data_models/design-details.rst index de0a37063eb..ad24102b214 100644 --- a/doc/source/data_models/design-details.rst +++ b/doc/source/data_models/design-details.rst @@ -4,35 +4,11 @@ Design Details ================ -The data model functionality is executed via set of specific operations associated with reading and interpolating data in space and time. -The strdata implementation does the following: - -1. determines nearest lower and upper bound data from the input dataset -2. if that is new data then read lower and upper bound data -3. fill lower and upper bound data -4. spatially map lower and upper bound data to model grid -5. time interpolate lower and upper bound data to model time -6. return fields to data model - - -.. _io-details: - ---------------------- -IO Through Data Models +Data Model Performance ---------------------- -Namlist variables referenced below are discussed in detail in :ref:`stream data namelist section `. - -The two timestamps of input data that bracket the present model time are read first. -These are called the lower and upper bounds of data and will change as the model advances. -Those two sets of inputdata are first filled based on the user setting of the namelist variables ``str_fillalgo`` and ``str_fillmask``. -That operation occurs on the input data grid. -The lower and upper bound data are then spatially mapped to the model grid based upon the user setting of the namelist variables ``str_mapalgo`` and ``str_mapmask``. -Spatial interpolation only occurs if the input data grid and model grid are not the identical, and this is determined in the strdata module automatically. -Time interpolation is the final step and is done using a time interpolation method specified by the user in namelist (via the shr_strdata_nml namelist variable "tintalgo"). -A final set of fields is then available to the data model on the model grid and for the current model time. - -There are two primary costs associated with strdata, reading data and spatially mapping data. +There are two primary costs associated with strdata: reading data and spatially mapping data. Time interpolation is relatively cheap in the current implementation. As much as possible, redundant operations are minimized. Fill and mapping weights are generated at initialization and saved. @@ -43,43 +19,48 @@ The present implementation doesn't support changing the order of operations, for Because the present computations are always linear, changing the order of operations will not fundamentally change the results. The present order of operations generally minimizes the mapping cost for typical data model use cases. +---------------------- +Data Model Limitations +---------------------- + There are several limitations in both options and usage within the data models at the present time. Spatial interpolation can only be performed from a two-dimensional latitude-longitude input grid. -The target grid can be arbitrary but the source grid must be able to be described by simple one-dimensional lists of longitudes and latitudes, although they don't have to have equally spaced. +The target grid can be arbitrary but the source grid must be able to be described by simple one-dimensional lists of longitudes and latitudes, although they don't have to be equally spaced. + +---------------------- +IO Through Data Models +---------------------- At the present time, data models can only read netcdf data, and IO is handled through either standard netcdf interfaces or through the PIO library using either netcdf or pnetcdf. If standard netcdf is used, global fields are read and then scattered one field at a time. -If PIO is used, then data will be read either serially or in parallel in chunks that are approximately the global field size divided by the number of io tasks. +If PIO is used, then data will be read either serially or in parallel in chunks that are approximately the global field size divided by the number of IO tasks. If pnetcdf is used through PIO, then the pnetcdf library must be included during the build of the model. The pnetcdf path and option is hardwired into the ``Macros.make`` file for the specific machine. -To turn on ``pnetcdf`` in the build, make sure the ``Macros.make`` variables PNETCDF_PATH, INC_PNETCDF, and LIB_PNETCDF are set and that the PIO CONFIG_ARGS sets the PNETCDF_PATH argument. +To turn on ``pnetcdf`` in the build, make sure the ``Macros.make`` variables ``PNETCDF_PATH``, ``INC_PNETCDF``, and ``LIB_PNETCDF`` are set and that the PIO ``CONFIG_ARGS`` sets the ``PNETCDF_PATH`` argument. -Beyond just the option of selecting IO with PIO, several namelist are available to help optimize PIO IO performance. +Beyond just the option of selecting IO with PIO, several namelist variables are available to help optimize PIO IO performance. Those are **TODO** - list these. The total mpi tasks that can be used for IO is limited to the total number of tasks used by the data model. -Often though, fewer io tasks result in improved performance. +Often though, using fewer IO tasks results in improved performance. In general, [io_root + (num_iotasks-1)*io_stride + 1] has to be less than the total number of data model tasks. In practice, PIO seems to perform optimally somewhere between the extremes of 1 task and all tasks, and is highly machine and problem dependent. -.. _restart-files: - -.. _restart-files: ------------- Restart Files ------------- -Restart files are generated automatically by the data models based upon a flag sent from the driver. +Restart files are generated automatically by the data models based on a flag sent from the driver. The restart files must meet the CIME naming convention and an ``rpointer`` file is generated at the same time. An ``rpointer`` file is a *restart pointer* file which contains the name of the most recently created restart file. -Normally, if restart files are read, the restart filenames are specified in the rpointer file. -Optionally though, there are namelist variables such as `restfilm`` to specify the restart filenames via namelist. If those namelist are set, the ``rpointer`` file will be ignored. -The default method is to use the ``rpointer`` files to specify the restart filenames. -In most cases, no model restart is required for the data models to restart exactly. +Normally, if restart files are read, the restart filenames are specified in the ``rpointer`` file. +Optionally though, there are namelist variables such as ``restfilm`` to specify the restart filenames via namelist. If those namelist variables are set, the ``rpointer`` file will be ignored. + +In most cases, no restart file is required for the data models to restart exactly. This is because there is no memory between timesteps in many of the data model science modes. -If a model restart is required, it will be written automatically and then must be used to continue the previous run. +If a restart file is required, it will be written automatically and then must be used to continue the previous run. There are separate stream restart files that only exist for performance reasons. A stream restart file contains information about the time axis of the input streams. -This information helps reduce the start costs associated with reading the input dataset time axis information. +This information helps reduce the startup costs associated with reading the input dataset time axis information. If a stream restart file is missing, the code will restart without it but may need to reread data from the input data files that would have been stored in the stream restart file. This will take extra time but will not impact the results. @@ -101,7 +82,7 @@ These routines contain three data structures that are leveraged by all the data The most basic type, ``shr_stream_fileType`` is contained in ``shr_stream_mod.F90`` and specifies basic information related to a given stream file. -:: +.. code-block:: Fortran type shr_stream_fileType character(SHR_KIND_CL) :: name = shr_stream_file_null ! the file name @@ -116,7 +97,7 @@ that encapsulates the information related to all files specific to a target stream. These are the list of files found in the ``domainInfo`` and ``fieldInfo`` blocks of the target stream description file (see the overview of the :ref:`stream_description_file`). -:: +.. code-block:: Fortran type shr_stream_streamType !private ! no public access to internal components @@ -162,13 +143,13 @@ and ``fieldInfo`` blocks of the target stream description file (see the overview character(SHR_KIND_CL) :: calendar ! stream calendar end type shr_stream_streamType -and finally, the ``shr_strdata_type`` is the heart of the CIME data +Finally, the ``shr_strdata_type`` is the heart of the CIME data model implemenentation and contains information for all the streams that are active for the target data model. The first part of the -shr_strdata_type is filled in by the namelist values read in from the +``shr_strdata_type`` is filled in by the namelist values read in from the namelist group (see the :ref:`stream data namelist section `). -:: +.. code-block:: Fortran type shr_strdata_type ! --- set by input namelist --- diff --git a/doc/source/data_models/index.rst b/doc/source/data_models/index.rst index 2a305c1afe1..a84dd0748e7 100644 --- a/doc/source/data_models/index.rst +++ b/doc/source/data_models/index.rst @@ -12,8 +12,9 @@ .. toctree:: :maxdepth: 3 :numbered: - + introduction.rst + input-namelists.rst input-streams.rst design-details.rst data-model-science.rst @@ -30,4 +31,3 @@ Indices and tables * :ref:`genindex` * :ref:`modindex` * :ref:`search` - diff --git a/doc/source/data_models/input-namelists.rst b/doc/source/data_models/input-namelists.rst new file mode 100644 index 00000000000..533f1c1b2f9 --- /dev/null +++ b/doc/source/data_models/input-namelists.rst @@ -0,0 +1,87 @@ +.. _input-namelists: + +Input Namelists +=============== + +Each data model has two namelist groups in its input namelist file: a **stream-dependent** and a **stream-independent** namelist group. + +The stream-dependent namelist group (``shr_strdata_nml``) specifies the data model mode, stream description text files, and interpolation options. +The stream description files will be provided as separate input files and contain the files and fields that need to be read. +The stream-independent namelist group (one of ``[datm_nml, dice_nml, dlnd_nml, docn_nml, drof_nml, dwav_nml]``) contains namelist input such as the data model decomposition, etc. + +For users wanting to introduce new data sources for any data model, it is important to know what modes are supported and the internal field names in the data model. +That information will be used in the ``shr_strdata_nml`` namelist and stream input files. + +Users will primarily set up different data model configurations through namelist settings. +**The stream input options and format are identical for all data models**. +The data model-specific namelist has significant overlap between data models, but each data model has a slightly different set of input namelist variables and each model reads that namelist from a unique filename. +The detailed namelist options for each data model will be described later, but each model will specify a filename or filenames for stream namelist input and each ``shr_strdata_nml`` namelist will specify a set of stream input files. + +The following example illustrates the basic set of namelist inputs:: + + &dlnd_nml + decomp = '1d' + / + &shr_strdata_nml + dataMode = 'CPLHIST' + domainFile = 'grid.nc' + streams = 'streama', 'streamb', 'streamc' + mapalgo = 'interpa', 'interpb', 'interpc' + / + +As mentioned above, the ``dataMode`` namelist variable that is associated with each data model specifies if there are any additional operations that need to be performed on that data model's input streams before return to the driver. +At a minimum, all data models support ``datamode`` values of ``NULL`` and ``COPYALL``. + +- ``NULL`` - turns off the data model as a provider of data to the coupler. + +- ``COPYALL`` - copies all fields directly from the input data streams. Any required fields not found on an input stream will be set to zero. + +Three stream description files (see :ref:`input streams`) are then expected to be available, ``streama``, ``streamb`` and ``streamc``. +Those files specify the input data filenames, input data grids, and input fields that are expected, among other things. +The stream files are **not** Fortran namelist format. +Their format and options will be described later. +As an example, one of the stream description files might look like +:: + + + + GENERIC + + + + dn10 dens + slp_ pslv + q_10 shum + t_10 tbot + u_10 u + v_10 v + + + /glade/proj3/cseg/inputdata/atm/datm7/NYF + + + 0 + + + nyf.ncep.T62.050923.nc + + + + + time time + lon lon + lat lat + area area + mask mask + + + /glade/proj3/cseg/inputdata/atm/datm7/NYF + + + nyf.ncep.T62.050923.nc + + + + + +In general, these examples of input files are not complete, but they do show the general hierarchy and feel of the data model input. diff --git a/doc/source/data_models/input-streams.rst b/doc/source/data_models/input-streams.rst index edb102ff4fb..3c4c0080924 100644 --- a/doc/source/data_models/input-streams.rst +++ b/doc/source/data_models/input-streams.rst @@ -12,7 +12,7 @@ The data models can have multiple input streams. The data for one stream may be all in one file or may be spread over several files. For example, 50 years of monthly average data might be contained all in one data file or it might be spread over 50 files, each containing one year of data. -The data models can *loop* over stream data -- repeatedly cycle over some subset of an input stream's time axis. When looping, the models can only loop over whole years. For example, an input stream might have SST data for years 1950 through 2000, but a model could loop over the data for years 1960 through 1980. A model *cannot* loop over partial years, for example, from 1950-Feb-10 through 1980-Mar-15. +The data models can *loop* over stream data -- i.e., repeatedly cycle over some subset of an input stream's time axis. When looping, the models can only loop over whole years. For example, an input stream might have SST data for years 1950 through 2000, but a model could loop over the data for years 1960 through 1980. A model *cannot* loop over partial years, for example, from 1950-Feb-10 through 1980-Mar-15. The input data must be in a netcdf file and the time axis in that file must be CF-1.0 compliant. @@ -26,16 +26,16 @@ Generally, information about what streams a user wants to use and how to use the -------------------------------------------------- Stream Data and shr_strdata_nml namelists -------------------------------------------------- -The stream data (referred to as *strdata*) input is set via a fortran namelist called ``shr_strdata_nml``. -That namelist, the associated strdata datatype, and the methods are contained in the share source code file, ``shr_strdata_mod.F90``. -In general, strdata input defines an array of input streams and operations to perform on those streams. -Therefore, many namelist inputs are arrays of character strings. +The stream data (referred to as *strdata*) input is set via a Fortran namelist called ``shr_strdata_nml``. +That namelist, the associated strdata datatype, and the methods are contained in the share source code file, ``shr_strdata_mod.F90``. +In general, strdata input defines an array of input streams and operations to perform on those streams. +Therefore, many namelist inputs are arrays of character strings. Different variables of the same index are associated. For instance, mapalgo(1) spatial interpolation will be performed between streams(1) and the target domain. -Each data model as an associated input namelist file, ``xxx_in``, where ``xxx=[datm,dlnd,dice,docn,drof,dwav]``. +Each data model has an associated input namelist file, ``xxx_in``, where ``xxx=[datm,dlnd,dice,docn,drof,dwav]``. -The input namelist file for each data model has a stream dependent namelist group, ``shr_strdata_nml``, and a stream independent namelist group. -The ``shr_strdata_nml`` namelist variables **are the same for all data models**. +The input namelist file for each data model has a stream dependent namelist group, ``shr_strdata_nml``, and a stream independent namelist group. +The ``shr_strdata_nml`` namelist variables **are the same for all data models**. =========== ========================================================================================================================== File Namelist Groups @@ -55,7 +55,7 @@ The following table summaries the ``shr_strdata_nml`` entries. =========== ========================================================================================================================== Namelist Description =========== ========================================================================================================================== -dataMode component specific mode. +dataMode component specific mode. Each CIME data model has its own datamode values as described below: @@ -75,27 +75,27 @@ domainFile component domain (all streams will be mapped to this domain). Spatial gridfile associated with the strdata. grid information will be read from this file and that grid will serve as the target grid - for all input data for this strdata input. - If the value is **null** then the domain of the first stream - will be used as the component domain + for all input data for this strdata input. + If the value is **null** then the domain of the first stream + will be used as the component domain - default="null" + default="null" -streams character array (up to 30 elemnets) of input stream filenames and associated years of data. +streams character array (up to 30 elements) of input stream filenames and associated years of data. - Each array entry consists of a stream_input_filename year_align year_first year_last. - The stream_input_filename is a stream text input file and the format and options are described elsewhere. - The year_align, year_first, and year_last provide information about the time axis of the file and how to relate - the input time axis to the model time axis. + Each array entry consists of a stream_input_filename year_align year_first year_last. + The stream_input_filename is a stream text input file and the format and options are described elsewhere. + The year_align, year_first, and year_last provide information about the time axis of the file and how to relate + the input time axis to the model time axis. default="null". -fillalgo array (up to 30 elements) of fill algorithms associated with the array of streams. +fillalgo array (up to 30 elements) of fill algorithms associated with the array of streams. - Valid options are just copy (ie. no fill), special value, nearest neighbor, nearest neighbor in "i" direction, - or nearest neighbor in "j" direction. + Valid options are just copy (ie. no fill), special value, nearest neighbor, nearest neighbor in "i" direction, + or nearest neighbor in "j" direction. - valid values: 'copy','spval','nn','nnoni','nnonj' + valid values: 'copy','spval','nn','nnoni','nnonj' default value='nn' @@ -105,7 +105,7 @@ fillmask array (up to 30 elements) of fill masks. default="nomask" -fillread array (up to 30 elements) fill mapping files to read. Secifies the weights file to read in instead of +fillread array (up to 30 elements) fill mapping files to read. Secifies the weights file to read in instead of computing the weights on the fly for the fill operation. If this is set, fillalgo and fillmask are ignored. default='NOT_SET' @@ -126,13 +126,12 @@ mapread array of spatial interpolation mapping files to read (optional) default='NOT_SET' -mapwrite array (up to 30 elements) of spatial interpolation mapping files to write (optional). Specifies the weights file - - to generate after weights are computed on the fly for the mapping (interpolation) operation, thereby allowing +mapwrite array (up to 30 elements) of spatial interpolation mapping files to write (optional). Specifies the weights file + to generate after weights are computed on the fly for the mapping (interpolation) operation, thereby allowing users to save and reuse a set of weights later. default='NOT_SET' -tintalgo array (up to 30 elements) of time interpolation algorithm options associated with the array of streams. +tintalgo array (up to 30 elements) of time interpolation algorithm options associated with the array of streams. valid values: lower,upper,nearest,linear,coszen lower = Use lower time-value @@ -149,16 +148,12 @@ tintalgo array (up to 30 elements) of time interpolation algorithm options as taxMode array (up to 30 elements) of time interpolation modes. - Time axis interpolation modes are associated with the array of streams for - - handling data outside the specified stream time axis. - - Valid options are to cycle the data based on the first, last, and align - - settings associated with the stream dataset, to extend the first and last - - valid value indefinitely, or to limit the interpolated data to fall only between + Time axis interpolation modes are associated with the array of streams for + handling data outside the specified stream time axis. + Valid options are to cycle the data based on the first, last, and align + settings associated with the stream dataset, to extend the first and last + valid value indefinitely, or to limit the interpolated data to fall only between the least and greatest valid value of the time array. valid values: cycle,extend,limit @@ -173,35 +168,25 @@ taxMode array (up to 30 elements) of time interpolation modes. dtlimit array (up to 30 elements) of setting delta time axis limit. - Specifies delta time ratio limits placed on the time interpolation - - associated with the array of streams. Causes the model to stop if - - the ratio of the running maximum delta time divided by the minimum delta time - - is greater than the dtlimit for that stream. For instance, with daily data, - - the delta time should be exactly one day throughout the dataset and - - the computed maximum divided by minimum delta time should always be 1.0. - + Specifies delta time ratio limits placed on the time interpolation + associated with the array of streams. Causes the model to stop if + the ratio of the running maximum delta time divided by the minimum delta time + is greater than the dtlimit for that stream. For instance, with daily data, + the delta time should be exactly one day throughout the dataset and + the computed maximum divided by minimum delta time should always be 1.0. For monthly data, the delta time should be between 28 and 31 days and the - maximum ratio should be about 1.1. The running value of the delta - time is computed as data is read and any wraparound or cycling is also - included. this input helps trap missing data or errors in cycling. - to turn off trapping, set the value to 1.0e30 or something similar. - default=1.5 + default=1.5 vectors paired vector field names =========== ========================================================================================================================== -``shr_strdata_nml`` contains a namelist variable, ``streams``, that specifies a list of input stream description files and for each file what years of data to use, and how to align the input stream time axis with the model run time axis. +``shr_strdata_nml`` contains a namelist variable, ``streams``, that specifies a list of input stream description files and for each file what years of data to use, and how to align the input stream time axis with the model run time axis. The general input format for the ``streams`` namelist variable is: :: @@ -211,10 +196,11 @@ The general input format for the ``streams`` namelist variable is: 'stream2.txt year_align year_first year_last ', ... 'streamN.txt year_align year_first year_last ' - / + / where: -:: + +.. code-block:: none streamN.txt the stream description file, a plain text file containing details about the input stream (see below) @@ -225,12 +211,59 @@ where: year_align a model year that will be aligned with data for year_first +--------------------- +Details on year_align +--------------------- + +The ``year_align`` value gives the simulation year corresponding to +``year_first``. A common usage is to set this to the year of +``RUN_STARTDATE``. With this setting, the forcing in the first year of +the run will be the forcing of year ``year_first``. Another use case is +to align the calendar of transient forcing with the model calendar. For +example, setting ``year_align`` = ``year_first`` will lead to the +forcing calendar being the same as the model calendar. The forcing for a +given model year would be the forcing of the same year. This would be +appropriate in transient runs where the model calendar is setup to span +the same year range as the forcing data. + +For some data model modes, ``year_align`` can be set via an xml variable +whose name ends with ``YR_ALIGN`` (there are a few such xml variables, +each pertaining to a particular data model mode). + +An example of this is land-only historical simulations in which we run +the model for 1850 to 2010 using atmospheric forcing data that is only +available for 1901 to 2010. In this case, we want to run the model for +years 1850 (so ``RUN_STARTDATE`` has year 1850) through 1900 by looping +over the forcing data for 1901-1920, and then run the model for years +1901-2010 using the forcing data from 1901-2010. To do this, we +initially set:: + + ./xmlchange DATM_CLMNCEP_YR_ALIGN=1901 + ./xmlchange DATM_CLMNCEP_YR_START=1901 + ./xmlchange DATM_CLMNCEP_YR_END=1920 + +When the model has completed year 1900, then we set:: + + ./xmlchange DATM_CLMNCEP_YR_ALIGN=1901 + ./xmlchange DATM_CLMNCEP_YR_START=1901 + ./xmlchange DATM_CLMNCEP_YR_END=2010 + +With this setup, the correlation between model run year and forcing year +looks like this:: + + RUN Year : 1850 ... 1860 1861 ... 1870 ... 1880 1881 ... 1890 ... 1900 1901 ... 2010 + FORCE Year : 1910 ... 1920 1901 ... 1910 ... 1920 1901 ... 1910 ... 1920 1901 ... 2010 + +Setting ``DATM_CLMNCEP_YR_ALIGN`` to 1901 tells the code that you want +to align model year 1901 with forcing data year 1901, and then it +calculates what the forcing year should be if the model starts in +year 1850. -------------------------------------------------- Customizing shr_strdata_nml values -------------------------------------------------- -The contents of ``shr_strdata_nml are automatically generated by that data model's **cime_config/buildnml** script. +The contents of ``shr_strdata_nml`` are automatically generated by that data model's **cime_config/buildnml** script. These contents are easily customizable for your target experiment. As an example we refer to the following ``datm_in`` contents (that would appear in both ``$CASEROOT/CaseDocs`` and ``$RUNDIR``): :: @@ -243,48 +276,49 @@ As an example we refer to the following ``datm_in`` contents (that would appear fillmask = 'nomask','nomask','nomask','nomask' mapalgo = 'bilinear','bilinear','bilinear','bilinear' mapmask = 'nomask','nomask','nomask','nomask' - streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1972 ", + streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1972 ", "datm.streams.txt.CLM_QIAN.Precip 1895 1948 1972 ", - "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1972 ", + "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1972 ", "datm.streams.txt.presaero.trans_1850-2000 1849 1849 2006" taxmode = 'cycle','cycle','cycle','cycle' tintalgo = 'coszen','nearest','linear','linear' vectors = 'null' / - -As is discussed in the :ref:`CIME User's Guide`, to change the contents of ``datm_in``, you must edit ``$CASEROOT/user_nl_datm``. -In the above example, you can to this to change any of the above settings **except for the names** -:: +As is discussed in the :ref:`CIME User's Guide`, to change the contents of ``datm_in``, you must edit ``$CASEROOT/user_nl_datm``. +In the above example, you can to this to change any of the above settings **except for the names** + +.. code-block:: none + datm.streams.txt.CLM_QIAN.Solar datm.streams.txt.CLM_QIAN.Precip - datm.streams.txt.CLM_QIAN.TPQW - datm.streams.txt.presaero.trans_1850-2000 + datm.streams.txt.CLM_QIAN.TPQW + datm.streams.txt.presaero.trans_1850-2000 -Other than these names, any namelist variable from ``shr_strdata_nml`` can be modified by adding the appropriate keyword/value pairs to ``user_nl_datm``. +Other than these names, any namelist variable from ``shr_strdata_nml`` can be modified by adding the appropriate keyword/value pairs to ``user_nl_datm``. As an example, the following could be the contents of ``$CASEROOT/user_nl_datm``: :: !------------------------------------------------------------------------ ! Users should ONLY USE user_nl_datm to change namelists variables - ! Users should add all user specific namelist changes below in the form of - ! namelist_var = new_namelist_value - ! Note that any namelist variable from shr_strdata_nml and datm_nml can - ! be modified below using the above syntax + ! Users should add all user specific namelist changes below in the form of + ! namelist_var = new_namelist_value + ! Note that any namelist variable from shr_strdata_nml and datm_nml can + ! be modified below using the above syntax ! User preview_namelists to view (not modify) the output namelist in the ! directory $CASEROOT/CaseDocs ! To modify the contents of a stream txt file, first use preview_namelists ! to obtain the contents of the stream txt files in CaseDocs, and then - ! place a copy of the modified stream txt file in $CASEROOT with the string - ! user_ prepended. + ! place a copy of the modified stream txt file in $CASEROOT with the string + ! user_ prepended. !------------------------------------------------------------------------ - streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1900 ", + streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1900 ", "datm.streams.txt.CLM_QIAN.Precip 1895 1948 1900 ", - "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1900 ", - "datm.streams.txt.presaero.trans_1850-2000 1849 1849 2006" - + "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1900 ", + "datm.streams.txt.presaero.trans_1850-2000 1849 1849 2006" + and the contents of ``shr_strdata_nml`` (in both ``$CASEROOT/CaseDocs`` and ``$RUNDIR``) would be :: @@ -295,14 +329,14 @@ and the contents of ``shr_strdata_nml`` (in both ``$CASEROOT/CaseDocs`` and ``$R fillmask = 'nomask','nomask','nomask','nomask' mapalgo = 'bilinear','bilinear','bilinear','bilinear' mapmask = 'nomask','nomask','nomask','nomask' - streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1900 ", + streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1900 ", "datm.streams.txt.CLM_QIAN.Precip 1895 1948 1900 ", - "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1900 ", + "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1900 ", "datm.streams.txt.presaero.trans_1850-2000 1849 1849 2006" taxmode = 'cycle','cycle','cycle','cycle' tintalgo = 'coszen','nearest','linear','linear' vectors = 'null' - + As is discussed in the :ref:`CIME User's Guide`, you should use **preview_namelists** to view (not modify) the output namelist in ``CaseDocs``. @@ -312,7 +346,7 @@ As is discussed in the :ref:`CIME User's Guide`, you should use Stream Description File ----------------------- The *stream description file* is not a Fortran namelist, but a locally built xml-like parsing implementation. -Sometimes it is called a "stream dot-text file" because it has a ".txt." in the filename. +Sometimes it is called a "stream dot-text file" because it has a ".txt." in the filename. Stream description files contain data that specifies the names of the fields in the stream, the names of the input data files, and the file system directory where the data files are located. The data elements found in the stream description file are: @@ -324,20 +358,20 @@ The data elements found in the stream description file are: Information about the domain data for this stream specified by the following 3 sub elements. ``variableNames`` - A list of the domain variable names. This is a paired list with the name of the variable in the netCDF file on the left and the name of the corresponding model variable on the right. This data models require five variables in this list. The names of model's variables (names on the right) must be: "time," "lon," "lat," "area," and "mask." + A list of the domain variable names. This is a paired list with the name of the variable in the netCDF file on the left and the name of the corresponding model variable on the right. This data models require five variables in this list. The names of model's variables (names on the right) must be: "time," "lon," "lat," "area," and "mask." ``filePath`` - The file system directory where the domain data file is located. + The file system directory where the domain data file is located. - ``fileNames`` - The name of the domain data file. Often the domain data is located in the same file as the field data (above), in which case the name of the domain file could simply be the name of the first field data file. Sometimes the field data files don't contain the domain data required by the data models, in this case, one new file can be created that contains the required data. + ``fileNames`` + The name of the domain data file. Often the domain data is located in the same file as the field data (above), in which case the name of the domain file could simply be the name of the first field data file. Sometimes the field data files don't contain the domain data required by the data models, in this case, one new file can be created that contains the required data. ``fieldInfo`` Information about the stream data for this stream specified by the following 3 required sub elements and optional offset element. ``variableNames`` - A list of the field variable names. This is a paired list with the name of the variable in the netCDF file on the left and the name of the corresponding model variable on the right. This is the list of fields to read in from the data file, there may be other fields in the file which are not read in (ie. they won't be used). + A list of the field variable names. This is a paired list with the name of the variable in the netCDF file on the left and the name of the corresponding model variable on the right. This is the list of fields to read in from the data file; there may be other fields in the file which are not read in (i.e., they won't be used). ``filePath`` The file system directory where the data files are located. @@ -348,17 +382,17 @@ The data elements found in the stream description file are: ``offset`` The offset allows a user to shift the time axis of a data stream by a fixed and constant number of seconds. For instance, if a data set contains daily average data with timestamps for the data at the end of the day, it might be appropriate to shift the time axis by 12 hours so the data is taken to be at the middle of the day instead of the end of the day. This feature supports only simple shifts in seconds as a way of correcting input data time axes without having to modify the input data time axis manually. This feature does not support more complex shifts such as end of month to mid-month. But in conjunction with the time interpolation methods in the strdata input, hopefully most user needs can be accommodated with the two settings. Note that a positive offset advances the input data time axis forward by that number of seconds. -The data models advance in time discretely. -At a given time, they read/derive fields from input files. -Those input files have data on a discrete time axis as well. -Each data point in the input files are associated with a discrete time (as opposed to a time interval). -Depending whether you pick lower, upper, nearest, linear, or coszen; the data in the input file will be "interpolated" to the time in the model. - -The offset shifts the time axis of the input data the given number of seconds. -So if the input data is at 0, 3600, 7200, 10800 seconds (hourly) and you set an offset of 1800, then the input data will be set at times 1800, 5400, 9000, and 12600. -So a model at time 3600 using linear interpolation would have data at "n=2" with offset of 0 will have data at "n=(2+3)/2" with an offset of 1800. -n=2 is the 2nd data in the time list 0, 3600, 7200, 10800 in this example. -n=(2+3)/2 is the average of the 2nd and 3rd data in the time list 0, 3600, 7200, 10800. +The data models advance in time discretely. +At a given time, they read/derive fields from input files. +Those input files have data on a discrete time axis as well. +Each data point in the input files is associated with a discrete time (as opposed to a time interval). +Depending on whether you pick lower, upper, nearest, linear, or coszen, the data in the input file will be "interpolated" to the time in the model. + +The offset shifts the time axis of the input data the given number of seconds. +So if the input data is at 0, 3600, 7200, 10800 seconds (hourly) and you set an offset of 1800, then the input data will be set at times 1800, 5400, 9000, and 12600. +So a model at time 3600 using linear interpolation would have data at "n=2" with offset of 0 will have data at "n=(2+3)/2" with an offset of 1800. +n=2 is the 2nd data in the time list 0, 3600, 7200, 10800 in this example. +n=(2+3)/2 is the average of the 2nd and 3rd data in the time list 0, 3600, 7200, 10800. offset can be positive or negative. Actual example: @@ -405,7 +439,7 @@ Actual example: Customizing stream description files -------------------------------------------------- -Each data model's **cime-config/buildnml** utility automatically generates the required stream description files for the case. +Each data model's **cime-config/buildnml** utility automatically generates the required stream description files for the case. The directory contents of each data model will look like the following (using DATM as an example) :: @@ -413,7 +447,7 @@ The directory contents of each data model will look like the following (using DA $CIMEROOT/components/data_comps/datm/cime_config/namelist_definition_datm.xml The ``namelist_definition_datm.xml`` file defines and sets default values for all the namelist variables and associated groups and also provides out-of-the box settings for the target data model and target stream. -**buildnml** utilizes this two files to construct the stream files for the given compset settings. You can modify the generated stream files for your particular needs by doing the following: +**buildnml** utilizes these two files to construct the stream files for the given compset settings. You can modify the generated stream files for your particular needs by doing the following: 1. Copy the relevant description file from ``$CASEROOT/CaseDocs`` to ``$CASEROOT`` and pre-pend a "\user_"\ string to the filename. Change the permission of the file to write. For example, assuming you are in **$CASEROOT** @@ -422,8 +456,7 @@ The ``namelist_definition_datm.xml`` file defines and sets default values for al cp $CASEROOT/CaseDocs/datm.streams.txt.CLM_QIAN.Solar $CASEROOT/user_datm.streams.txt.CLM_QIAN.Solar chmod u+w $CASEROOT/user_datm.streams.txt.CLM_QIAN.Solar -2. - - Edit ``$CASEROOT/user_datm.streams.txt.CLM_QIAN.Solar`` with your desired changes. +2. Edit ``$CASEROOT/user_datm.streams.txt.CLM_QIAN.Solar`` with your desired changes. - *Be sure not to put any tab characters in the file: use spaces instead*. diff --git a/doc/source/data_models/introduction.rst b/doc/source/data_models/introduction.rst index 3ec1846238a..c2b668ff380 100644 --- a/doc/source/data_models/introduction.rst +++ b/doc/source/data_models/introduction.rst @@ -15,149 +15,84 @@ However, rather than determining these fields prognostically, most data models s The data models typically read gridded data from observations or reanalysis products. Out of the box, they often provide a few possible data sources and/or time periods that you can choose from when setting up a case. However, data models can also be configured to read output from a previous coupled run. -For example, you can perform a fully-coupled run in which you ask for particular extra output streams; you can then use these saved "driver history" files as inputs to datm to run a later land-only spinup. +For example, you can perform a fully-coupled run in which you ask for particular extra output streams; you can then use these saved "coupler history" files as inputs to datm to run a later land-only spinup. In some cases, data models have prognostic functionality, that is, they also receive and use data sent by the driver. However, in most cases, the data models are not running prognostically and have no need to receive any data from the driver. -The CIME data models have parallel capability and share significant amounts of source code. +The CIME data models have parallel capability and share significant amounts of source code. Methods for reading and interpolating data have been established and can easily be reused: -The data model calls strdata ("stream data") methods which then call stream methods. -The stream methods are responsible for managing lists of input data files and their time axis. -The information is then passed up to the strdata methods where the data is read and interpolated in space and time. +The data model calls strdata ("stream data") methods which then call stream methods. +The stream methods are responsible for managing lists of input data files and their time axes. +The information is then passed up to the strdata methods where the data is read and interpolated in space and time. The interpolated data is passed up to the data model where final fields are derived, packed, and returned to the driver. ------ Design ------ -Data models function by reading in different streams of input data and interpolating those data both spatially and temporally to the appropriate final model grid and model time. +Data models function by reading in different streams of input data and interpolating those data both spatially and temporally to the appropriate final model grid and model time. +The strdata implementation does the following: -- **Each data model** +1. determines nearest lower and upper bound data from the input dataset +2. if that is new data then read lower and upper bound data +3. fill lower and upper bound data +4. spatially map lower and upper bound data to model grid +5. time interpolate lower and upper bound data to model time +6. return fields to data model - - communicates with the driver with fields on only the data model model grid. +The two timestamps of input data that bracket the present model time are read first. +These are called the lower and upper bounds of data and will change as the model advances. +Those two sets of inputdata are first filled based on the user setting of the namelist variables ``str_fillalgo`` and ``str_fillmask``. +That operation occurs on the input data grid. +The lower and upper bound data are then spatially mapped to the model grid based upon the user setting of the namelist variables ``str_mapalgo`` and ``str_mapmask``. +Spatial interpolation only occurs if the input data grid and model grid are not identical, and this is determined in the strdata module automatically. +Time interpolation is the final step and is done using a time interpolation method specified by the user in namelist (via the ``shr_strdata_nml`` namelist variable ``tintalgo``). +A final set of fields is then available to the data model on the model grid and for the current model time. +(See the :ref:`stream data namelist section ` for details on these and other namelist variables.) - - can be associated with multiple streams +**Each data model** - - is associated with only one datamode value (specified in the ``shr_strdata_nml`` namelist group) +- communicates with the driver with fields on only the data model grid - - has an xml variable in ``env_run.xml`` that specifies its mode. These are: - ``DATM_MODE``, ``DICE_MODE``, ``DLND_MODE``, ``DOCN_MODE``, ``DROF_MODE``, ``DWAV_MODE``. +- can be associated with multiple :ref:`streams` -- **Each ``DXXX_MODE`` xml variable variable specfies 2 things:** +- has an xml variable in ``env_run.xml`` that specifies its mode. + These are: ``DATM_MODE``, ``DICE_MODE``, ``DLND_MODE``, ``DOCN_MODE``, ``DROF_MODE``, ``DWAV_MODE``. + Each data model mode specifies the streams that are associated with that data model. - - the list of streams that are associated with the data model. - - - a ``datamode`` namelist variable that is associated with each data model and that determines if additional operations need to be performed on on the input streams before returning to the driver. - - at a minimum, all data models support ``datamode`` values of ``NULL`` and ``COPYALL``. - - - ``NULL`` - turns off the data model as a provider of data to the coupler. +- has two :ref:`namelist` groups in its input namelist file: a **stream-dependent** and a **stream-independent** namelist group. - - ``COPYALL`` - copies all fields directly from the input data streams. Any required fields not found on an input stream will be set to zero. - -- **Each data model stream** +- is associated with only one stream-independent namelist variable ``datamode`` (specified in the ``shr_strdata_nml`` namelist group) that determines if additional operations need to be performed on the input streams before returning to the driver. - - can be associated with multiple stream input files (specified in the ``shr_strdata_nml`` namelist group). -- **Each stream input file** +**Each** ``DXXX_MODE`` **xml variable variable specfies 2 things:** - - can contain data on a unique grid and unique temporal time stamps. +- the list of streams that are associated with the data model. - - is interpolated to a single model grid and the present model time. +- a ``datamode`` namelist variable that is associated with each data model and that determines if additional operations need to be performed on the input streams before returning to the driver. -More details of the data model design are covered in :ref:`design details`. + At a minimum, all data models support ``datamode`` values of ``NULL`` and ``COPYALL``. --------------- -Namelist Input --------------- + - ``NULL`` - turns off the data model as a provider of data to the coupler. -Each data model has two namelist groups in its input namelist file: a **stream-dependent** and a **stream-independent** namelist group. + - ``COPYALL`` - copies all fields directly from the input data streams. Any required fields not found on an input stream will be set to zero. -The stream-dependent namelist group (``shr_strdata_nml``) specifies the data model mode, stream description text files, and interpolation options. -The stream description files will be provided as separate input files and contain the files and fields that need to be read. -The stream-independent namelist group (one of ``[datm_nml, dice_nml, dlnd_nml, docn_nml, drof_nml, dwav_nml]``) contains namelist input such as the data model decomposition, etc. -From a user perspective, for any data model, it is important to know what modes are supported and the internal field names in the data model. -That information will be used in the strdata namelist and stream input files. +**Each data model stream** -Users will primarily setup different data model configurations through namelist settings. -**The strdata and stream input options and format are identical for all data models**. -The data model specific namelist has significant overlap between data models, but each data model has a slightly different set of input namelist variables and each model reads that namelist from a unique filename. -The detailed namelist options for each data model will be described later, but each model will specify a filename or filenames for strdata namelist input and each strdata namelist will specify a set of stream input files. +- can be associated with multiple stream input files (specified in the ``shr_strdata_nml`` namelist group). -The following example illustrates the basic set of namelist inputs:: - &dlnd_nml - decomp = '1d' - / - &shr_strdata_nml - dataMode = 'CPLHIST' - domainFile = 'grid.nc' - streams = 'streama', 'streamb', 'streamc' - mapalgo = 'interpa', 'interpb', 'interpc' - / +**Each stream input file** -As mentioned above, the ``dataMode`` namelist variable that is associated with each data model specifies if there is any additional operations that need to be performed on that data model's input streams before return to the driver. -At a minimum, all data models support ``datamode`` values of ``NULL`` and ``COPYALL``. +- can contain data on a unique grid and unique temporal time stamps. -- ``NULL`` - turns off the data model as a provider of data to the coupler. +- is interpolated to a single model grid and the present model time. -- ``COPYALL`` - copies all fields directly from the input data streams. Any required fields not found on an input stream will be set to zero. - -Three stream description files are then expected to be available, ``streama``, ``streamb`` and ``streamc``. -Those files specify the input data filenames, input data grids, and input fields that are expected, among other things. -The stream files are **not** Fortran namelist format. -Their format and options will be described later. -As an example, one of the stream description files might look like -:: - - - - GENERIC - - - - dn10 dens - slp_ pslv - q_10 shum - t_10 tbot - u_10 u - v_10 v - - - /glade/proj3/cseg/inputdata/atm/datm7/NYF - - - 0 - - - nyf.ncep.T62.050923.nc - - - - - time time - lon lon - lat lat - area area - mask mask - - - /glade/proj3/cseg/inputdata/atm/datm7/NYF - - - nyf.ncep.T62.050923.nc - - - - - -In general, these examples of input files are not complete, but they do show the general hierarchy and feel of the data model input. +More details of the data model design are covered in :ref:`design details`. ------------- Next Sections ------------- -In the next sections, more details will be presented including a full description of the science modes and namelist settings for the data atmosphere, data land, data runoff, data ocean, and data ice models; namelist settings for the strdata namelist input; a description of the format and options for the stream description input files; and a list of internal field names for each of the data components. +In the next sections, more details will be presented, including a full description of the science modes and namelist settings for the data atmosphere, data land, data runoff, data ocean, and data ice models; namelist settings for the strdata namelist input; a description of the format and options for the stream description input files; and a list of internal field names for each of the data components. The internal data model field names are important because they are used to setup the stream description files and to map the input data fields to the internal data model field names. - - diff --git a/doc/source/misc_tools/index.rst b/doc/source/misc_tools/index.rst index 1edc0e99356..13fe0b4ec40 100644 --- a/doc/source/misc_tools/index.rst +++ b/doc/source/misc_tools/index.rst @@ -23,7 +23,7 @@ Mapping Tools cprnc -load-balancing-tool +load-balancing-tool.rst Indices and tables ================== diff --git a/doc/source/misc_tools/load-balancing-tool.rst b/doc/source/misc_tools/load-balancing-tool.rst new file mode 100644 index 00000000000..3217579e0f4 --- /dev/null +++ b/doc/source/misc_tools/load-balancing-tool.rst @@ -0,0 +1,368 @@ +.. _load_balancing_tool: + + +************************* + CIME Load Balancing Tool +************************* + Originally Developed by Sheri Mickelson mickelso@ucar.edu + Yuri Alekseev (ALCF/Argonne National Laboratory + + Updated 2017 Jason Sarich sarich@mcs.anl.gov (Argonne National Laboratory) + + +This Load Balancing tool performs several operations intended to find +a reasonable PE layout for CIME simulations. These operations involve two +steps:: + + 1. load_balancing_submit.py + Run a series of simulations in order to obtain timing data + + 2. load_balancing_solve.py + Using the data provided in the previous program, solve a mixed integer + linear program to optimize the model throughput. Requires installation + of PuLP and uses the included COIN-CBC solver. (https://pythonhosted.org/PuLP) + +Also in this documentation is:: + + 3. More about the algorithm used + + 4. Extending the solver for other models + + + +***************** +For the impatient +***************** + +set PYTHONPATH to include + $CIME_DIR/scripts:$CIME_DIR/tools/load_balancing_tool + +create PE XML file to describe the PE layouts for the timing runs +.. code-block:: sh + +$ ./load_balancing_submit.py --res --compset --pesfile + +# wait for jobs to run + +$ ./load_balancing_solve.py --total-tasks --blocksize 8 + + + +******* +Testing +******* +.. code-block:: bash + +set PYTHONPATH to include + $CIME_DIR/scripts:$CIME_DIR/tools/load_balancing_tool + +# Run an example: +$ ./load_balancing_solve.py --json-input tests/example.json --blocksize 8 +Solving Mixed Integer Linear Program using PuLP interface to COIN-CBC +PuLP solver status: Solved +COST_ATM = 22.567587 +COST_ICE = 1.375768 +COST_LND = 1.316000 +COST_OCN = 15.745000 +COST_TOTAL = 23.943355 +NBLOCKS_ATM = 124 +NBLOCKS_ICE = 109 +NBLOCKS_LND = 15 +NBLOCKS_OCN = 4 +NTASKS_ATM = 992 +NTASKS_ICE = 872 +NTASKS_LND = 120 +NTASKS_OCN = 32 +NTASKS_TOTAL = 1024 + +# Run the test suite: +$ ./tests/load_balancing_test.py + + +****************************************************************** +1. Running simulations using load_balancing_submit.py +****************************************************************** + +Simulations can be run on a given system by executing the load_balancing_tool.py +script, located in cime/tools/load_balancing_tool/load_balancing_tool_submit.py. +This creates timing files in the case directory which will be used to solve +a mixed integer linear program optimizing the layout. + +As with the create_newcase and create_test scripts, command line options +are used to tailor the simulations for a given model. These values will be +directly forwarded to the passed:: + + --compiler + --project + --compset (required) + --res (required) + --machine + +Other options include:: + + --pesfile (required) + This file is used to designated the pes layout that + are used to create the timing data. The format is the same used + by CIME pes_files, but note that the 'pesize' tag will be used + to generate the casename. Also, this file will not be directly + passed through to CIME, but rather it will trigger xmlchange + commands to execute based on the values in the file. + + --test-id + By default, the load balancing tool will use casenames: + PFS_I0.res.compset.lbt + PFS_I1.res.compset.lbt + ... + PFS_IN.res.compset.lbt + for each simulation requested. These casenames will be forwarded to + the create_test script. + + Using this option will instead direct the tool to use: + PFS_I0.res.compset.test-id + PFS_I1.res.compset.test-id + ... + PFS_IN.res.compset.test-id + + --force-purge + Force the tool to remove any existing case directories if they + exist. Removes PFS_I*.res.compset.test-id + + --extra-options-file + Add extra xml options to the timing runs from a user file, + these options will be set after create_newcase and before + case.setup. + This text file should have one variable per line in + the format =. Example: + + STOP_OPTION=ndays + STOP_N=7 + DOUT_S=FALSE + + +****************************************************************** +2. Optimizing the layout using load_balacing_solve.py +****************************************************************** + +Reads timing data created with load_balancing_submit.py (or otherwise, +see --timing-files option) and solves an mixed integer optimization problem +using these timings. The default layout (IceLndAtmOcn) minimizes the cost per +model day assuming the layout:: + + ____________________ + | ICE | LND | | + |______|_______| | + | | OCN | + | ATM | | + |______________|_____| + + +An IceLndWavAtmOcn layout is also available. It is possible to extend +this tool to solve for other layouts (See Section 4 Extending the Load +Balancing Tool) + +Note -- threading is not considered part of this optimization, it is assumed that +all timing data have the same threading structure (i.e. all ATM runs use two threads per PE):: + + --layout + Name of the class used to solve the layout problem. The only built-in + class at this time is the default IceLndAtmOcn, but this can be extended. + See section 4 Extending the Load Balancing Tool + + --total-tasks N (required) + The total number of PEs that can be assigned + + --timing-dir + Optional, read in all files from this directory as timing data + + --test-id + The test-id used when submitting the timing jobs. This option can also + be used to set a single directory where ALL of the timing data is. + The solver will extract data from timing files that match either pattern: + .test-id/timing/timing..test-id + .test-id/timing/timing..test-id + + --blocksize N + The blocksize is the granularity of processors that will be group + together, useful for when PEs to be multiples of 8, 16, etc. + + --blocksize-XXX N + Components don't all have to have the same blocksize. The default + blocksize given by --blocksize can be overridden for a given component + using this option, where XXX can be ATM, ICE, GLC, etc. + Example: + --blocksize 8 --blocksize-GLC 1 + will set the GLC blocksize to 1 and all other blocksizes to 8 + + --milp-output + After extracting data from timing files and before solving, write the + data to a .json file where is can be analyzed or manually edited. + + --milp-input + Read in the problem from the given .json file instead of extracting from + timing files. + + --pe-output + Write the solution PE layout to a potential pe xml file. + + +*************************** +3. More about the algorithm +*************************** + +Before solving the mixed-integer linear program, a model of the cost vs ntasks +function is constructed for each component. + +Given a component data set of costs (C1,C2,..,Cn) and nblocks (N1,N2,..,Nn), +then an piecewise set of n+1 linear constraints are created using the idea: + +If N < N1 (which means that N1 cannot be 1), then assume that there is +perfect scalability from N to N1. Thus the cost is on the line +defined by the points (1, C1*N1) - (N1, C1). + +If N is between N_i and N_{i+1}, then the cost is on the line defined by the +points (N_i, C_i) and (N_{i+1}, C_{i+1}. + +If N > Nn, then we want to extrapolate the cost at N=total_tasks + (we define N{n+1} = total_tasks, C{n+1} = estimated cost using all nodes) + Assuming perfect scalability is problematic at this level, so we instead + assume that the parallel efficiency drops at the same factor as it does + from N=N{n-1} to N = Nn + + First solve for efficiency E: + C{n-1} - Cn = E * (C{n-1} * N{n-1} / Nn) + + Then E to find C{n+1} (cost at ntasks N{n+1}): + Cn - Ct = E * (Cn * Nn / Nt) + + Now cost is on the line defined by (Nn,Cn) - (Nt,Ct) + +Assuming that this piecewise linear function describes a convex function, we do +not have to explicitly construct this piecewise function and can instead use +each of the cost functions on the entire domain. + +These piecewise linear models give us the following linear constraints, where +the model time cost C as a function of N (ntasks) for each component +is constrained by:: + + C >= Ci - Ni * (C{i+1}-Ci) / (N{i+1}-Ni) + + N * (C{i+1}-Ci) / (N{i+1}-Ni) for i=0..n + + +These constraints should be in effect for any extensions of the solver (the +components involved may be different). + +There are options available in load_balancing_submit.py to inspect these +piecewise linear models:: + + --graph-models (requires matplotlib) + --print-models (debugging modes writes the models to the log) + + +Now that these constraints are defined, the mixed integer linear program (MILP) +follows from the layout:: + + NOTES: variable N[c] is number of tasks assigned for component c + variable NB[c] is the number of blocks assigned to component c + constant C[c]_i is the cost contributed by component c from + timing data set i + constant N[c]_i is the ntasks assigned to component c from + timing data set i + + ____________________ + | ICE | LND | | + T1 |______|_______| | + | | OCN | + | ATM | | + T |______________|_____| + + Min T + s.t. Tice <= T1 + Tlnd <= T1 + T1 + Tatm <= T + Tocn <= T + + NB[c] >= 1 for c in [ice,lnd,ocn,atm] + N[ice] + N[lnd] <= N[atm] + N[atm] + N[ocn] <= TotalTasks + N[c] = blocksize * NB[c], for c in [ice,lnd,ocn,atm] + + + T[c] >= C[c]_{i} - N[c]_{i} * + (C[c]_{i+1} - C[c]_{i}) / (N[c]_{i+1} - N[c]_{i}) + + N[c] * (C[c]_{i+1} - C[c]_{i}) + / (N[c]_{i+1} - N[c]_{i}), + for i=0..#data points (original + extrapolated, + c in [ice,lnd,ocn,atm] + all T vars >=0 + all N,NB vars integer + +This MILP is solved using the PuLP python interface to the COIN-CBC solver +https://pythonhosted.org/PuLP/ +https://www.coin-or.org/Cbc/ + + +************************************ +4. Extending the Load Balancing Tool +************************************ +The file $CIME_DIR/tools/load_balancing_tool/optimize_model.py +contains a base class OptimizeModel as well as an implementation class +IceLndAtmOcn. Any layout solver will look similar to IceLndAtmOcn +except for the components involved and the layout-specific constraints. + +Example class and inherited methods that should be overridden: + +file my_new_layout.py: +..code-block:: python + +import optimize_model + +class MyNewLayout(optimize_model.OptimizeModel) + def get_required_components(self): + """ + Should be overridden by derived class. Return a list of required + components (capitalized) used in the layout. + Example: return ['ATM', 'LND', 'ICE'] + """ + + def optimize(self): + """ + Run the optimization. + Must set self.state using LpStatus object + LpStatusOptimal -> STATE_SOLVED_OK + LpStatusNotSolved -> STATE_UNSOLVED + LpStatusInfeasible -> STATE_SOLVED_BAD + LpStatusUnbounded -> STATE_SOLVED_BAD + LpStatusUndefined -> STATE_UNDEFINED + -- use self.set_state(lpstatus) -- + Returns state + + If solved, then solution will be stored in self.X dictionary, indexed + by variable name. Suggested convention: + 'Tice', 'Tlnd', ... for cost per component + 'Nice', 'Nlnd', ... for ntasks per component + 'NBice', 'NBlnd', ... for number of blocks per component + + The default implementation of get_solution() returns a dictionary + of these variable keys and their values. + """ + + def get_solution(self): + """ + Return a dictionary of the solution variables, can be overridden. + Default implementation returns values in self.X + """ + + +To use this new layout: + 1. save the class MyNewLayout in file my_new_layout.py + 2. make sure that my_new_layout.py is in PYTHONPATH + 3. Use those names in your execution command line argument to --layout + $ ./load_balancing_solve.py ... --layout my_new_layout.MyNewLayout + + -- to permanently add to CIME -- + + 1. add MyNewLayout class to layouts.py + 2. run using '--layout MyNewLayout' + 3. add test in tests/load_balance_test.py that uses that name in command + line argument (see test for atm_lnd) + 4. make pull request diff --git a/doc/source/users_guide/customizing-a-case.rst b/doc/source/users_guide/customizing-a-case.rst index 0ec88eceed2..a36c80e4c04 100644 --- a/doc/source/users_guide/customizing-a-case.rst +++ b/doc/source/users_guide/customizing-a-case.rst @@ -63,21 +63,17 @@ The component processor layout is determined by the following settings: The entries in **env_mach_pes.xml** have the following meanings: - ================== ================================================================================================ - MAX_TASKS_PER_MODE The total number of (MPI tasks) * (OpenMP threads) allowed on a node. - This is defined in **config_machines.xml** and therefore given a default setting, but - can be user modified. - PES_PER_NODE The maximum number of MPI tasks per node. - This is defined in **config_machines.xml** and therefore given a default setting, but - can be user modified. - NTASKS Total number of MPI tasks. - A negative value indicates nodes rather than tasks, where - PES_PER_NODE * -NTASKS equals the number of MPI tasks. - NTHRDS Number of OpenMP threads per MPI task. - ROOTPE The global MPI task of the component root task; if negative, indicates nodes rather than tasks. - PSTRID The stride of MPI tasks across the global set of pes (for now set to 1). - NINST The number of component instances, which are spread evenly across NTASKS. - ================== ================================================================================================ +.. csv-table:: "Entries in env_mach_pes.xml" + :header: "xml variable", "description" + :widths: 25, 75 + + "MAX_TASKS_PER_MODE", "The total number of (MPI tasks) * (OpenMP threads) allowed on a node. This is defined in **config_machines.xml** and therefore given a default setting, but can be user modified." + "MAX_MPITASKS_PER_NODE", "The maximum number of MPI tasks per node. This is defined in **config_machines.xml** and therefore given a default setting, but can be user modified." + "NTASKS", "Total number of MPI tasks. A negative value indicates nodes rather than tasks, where MAX_MPITASKS_PER_NODE * -NTASKS equals the number of MPI tasks." + "NTHRDS", "Number of OpenMP threads per MPI task." + "ROOTPE", "The global MPI task of the component root task; if negative, indicates nodes rather than tasks." + "PSTRID", "The stride of MPI tasks across the global set of pes (for now set to 1)." + "NINST", "The number of component instances, which are spread evenly across NTASKS." ---------------- **Example 1** @@ -117,9 +113,9 @@ If you had set ``ROOTPE_OCN`` to 64 in this example, a total of 176 processors w **Example 2** ---------------- -If a component has **NTASKS=-2**, **NTHRDS=4** and **ROOTPE=0**, **PES_PER_NODE=4**, **MAX_TASKS_PER_NODE=4**, it will run on (8 MPI tasks * 4 threads) = 32 hardware processors on 8 nodes. +If a component has **NTASKS=-2**, **NTHRDS=4** and **ROOTPE=0**, **MAX_MPITASKS_PER_NODE=4**, **MAX_TASKS_PER_NODE=4**, it will run on (8 MPI tasks * 4 threads) = 32 hardware processors on 8 nodes. -If you intended 2 nodes INSTEAD of 8 nodes, then you would change **PES_PER_NODE=1** (using **xmlchange**). +If you intended 2 nodes INSTEAD of 8 nodes, then you would change **MAX_MPITASKS_PER_NODE=1** (using **xmlchange**). **Note**: **env_mach_pes.xml** *cannot* be modified after **case.setup** has been invoked without first running the following: diff --git a/doc/source/users_guide/porting-cime.rst b/doc/source/users_guide/porting-cime.rst index beb6598e9b5..052915c7e2d 100644 --- a/doc/source/users_guide/porting-cime.rst +++ b/doc/source/users_guide/porting-cime.rst @@ -117,7 +117,7 @@ Each ```` tag requires the following input: - ``BATCH_SYSTEM``: batch system used on this machine (none is okay) - ``SUPPORTED_BY``: contact information for support for this system - ``MAX_TASKS_PER_NODE``: maximum number of threads/tasks per shared memory node on the machine -- ``PES_PER_NODE``: number of physical PES per shared node on the machine. In practice the MPI tasks per node will not exceed this value. +- ``MAX_MPITASKS_PER_NODE``: number of physical PES per shared node on the machine. In practice the MPI tasks per node will not exceed this value. - ``PROJECT_REQUIRED``: Does this machine require a project to be specified to the batch system? - ``mpirun``: The mpi exec to start a job on this machine. This is itself an element that has sub-elements that must be filled: diff --git a/scripts/Tools/case.build b/scripts/Tools/case.build index c8ec78096c0..4ac984e3745 100755 --- a/scripts/Tools/case.build +++ b/scripts/Tools/case.build @@ -84,7 +84,7 @@ def _main_func(description): if cleanlist is not None or clean_all: build.clean(case, cleanlist, clean_all) elif(testname is not None): - logging.warn("Building test for {} in directory {}".format(testname, + logging.warning("Building test for {} in directory {}".format(testname, caseroot)) try: # The following line can throw exceptions if the testname is diff --git a/scripts/Tools/case.cmpgen_namelists b/scripts/Tools/case.cmpgen_namelists index 00cddeac4c6..450f5ec5a62 100755 --- a/scripts/Tools/case.cmpgen_namelists +++ b/scripts/Tools/case.cmpgen_namelists @@ -49,9 +49,12 @@ OR help="Force generation to use baselines with this name. " "Default will be to follow the case specification") + parser.add_argument("--baseline-root", + help="Root of baselines.") + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - return args.caseroot, args.compare, args.generate, args.compare_name, args.generate_name + return args.caseroot, args.compare, args.generate, args.compare_name, args.generate_name, args.baseline_root ############################################################################### def _main_func(description): @@ -60,9 +63,10 @@ def _main_func(description): test_results = doctest.testmod(verbose=True) sys.exit(1 if test_results.failed > 0 else 0) - caseroot, compare, generate, compare_name, generate_name = parse_command_line(sys.argv, description) + caseroot, compare, generate, compare_name, generate_name, baseline_root \ + = parse_command_line(sys.argv, description) with Case(caseroot, read_only=True) as case: - success = case_cmpgen_namelists(case, compare, generate, compare_name, generate_name) + success = case_cmpgen_namelists(case, compare, generate, compare_name, generate_name, baseline_root) sys.exit(0 if success else CIME.utils.TESTS_FAILED_ERR_CODE) diff --git a/scripts/Tools/case_diff b/scripts/Tools/case_diff index bad8bd81531..6568632afb1 100755 --- a/scripts/Tools/case_diff +++ b/scripts/Tools/case_diff @@ -6,12 +6,10 @@ directory trees. """ from standard_script_setup import * -from CIME.utils import run_cmd +from CIME.utils import run_cmd, run_cmd_no_fail import argparse, sys, os, doctest -IGNORE = [".git", "bin", "bakefiles", "SNTools.project"] - ############################################################################### def parse_command_line(args, description): ############################################################################### @@ -35,7 +33,8 @@ OR parser.add_argument("case2", help="Second case.") - parser.add_argument("skip_list", nargs="*", help="skip these files") + parser.add_argument("skip_list", nargs="*", + help="skip these files. You'll probably want to skip the bld directory if it's inside the case") parser.add_argument("-b", "--show-binary", action="store_true", help="Show binary diffs") @@ -45,7 +44,7 @@ OR return args.case1, args.case2, args.show_binary, args.skip_list ############################################################################### -def recursive_diff(dir1, dir2, show_binary=False, skip_list=()): +def recursive_diff(dir1, dir2, repls, show_binary=False, skip_list=()): ############################################################################### """ Starting at dir1, dir2 respectively, compare their contents @@ -68,14 +67,14 @@ def recursive_diff(dir1, dir2, show_binary=False, skip_list=()): # Print the unique items for dirname, set_obj in [(dir1, dir1_only), (dir2, dir2_only)]: for item in sorted(set_obj): - if (item not in IGNORE and item not in skip_list): - print "===============================================================================" - print os.path.join(dirname, item), "is unique" + if (item not in skip_list): + print ("===============================================================================") + print (os.path.join(dirname, item), "is unique") num_differing_files += 1 # Handling of the common items is trickier for item in sorted(both): - if (item in IGNORE or item in skip_list): + if (item in skip_list): continue path1 = os.path.join(dir1, item) path2 = os.path.join(dir2, item) @@ -83,8 +82,8 @@ def recursive_diff(dir1, dir2, show_binary=False, skip_list=()): # If the directory status of the files differs, report diff if (path1isdir != os.path.isdir(path2)): - print "===============================================================================" - print path1 + " DIFFERS (directory status)" + print ("===============================================================================") + print (path1 + " DIFFERS (directory status)") num_differing_files += 1 continue @@ -92,7 +91,7 @@ def recursive_diff(dir1, dir2, show_binary=False, skip_list=()): # files are directories, recursively check them, otherwise check # that the file contents match if (path1isdir): - num_differing_files += recursive_diff(path1, path2, show_binary) + num_differing_files += recursive_diff(path1, path2, repls, show_binary, skip_list) else: # # As a (huge) performance enhancement, if the files have the same # # size, we assume the contents match @@ -101,17 +100,21 @@ def recursive_diff(dir1, dir2, show_binary=False, skip_list=()): stat, out, err = run_cmd("file {}".format(path1)) if (stat != 0): - logging.warn("Failed to probe file '{}', out: '{}', err: '{}'".format(path1, out, err)) + logging.warning("Failed to probe file '{}', out: '{}', err: '{}'".format(path1, out, err)) continue is_text_file = "text" in out if (not (not show_binary and not is_text_file)): - stat, out, _ = run_cmd("diff -w {} {}".format(path1, path2)) + the_text = open(path2, "r").read() + for replace_item, replace_with in repls.iteritems(): + the_text = the_text.replace(replace_item, replace_with) + + stat, out, _ = run_cmd("diff -w {} -".format(path1), input_str=the_text) if (stat != 0): - print "===============================================================================" - print path1 + " DIFFERS (contents)" + print ("===============================================================================") + print (path1 + " DIFFERS (contents)") num_differing_files += 1 - print " ", out + print (" "+ out) return num_differing_files @@ -124,8 +127,15 @@ def _main_func(description): case1, case2, show_binary, skip_list = parse_command_line(sys.argv, description) - num_differing_files = recursive_diff(case1, case2, show_binary, skip_list) - print num_differing_files, "files are different" + xml_normalize_fields = ["TEST_TESTID", "SRCROOT"] + repls = {} + for xml_normalize_field in xml_normalize_fields: + val1 = run_cmd_no_fail("./xmlquery --value {}".format(xml_normalize_field), from_dir=case1) + val2 = run_cmd_no_fail("./xmlquery --value {}".format(xml_normalize_field), from_dir=case2) + repls[val2] = val1 + + num_differing_files = recursive_diff(case1, case2, repls, show_binary, skip_list) + print (num_differing_files, "files are different") sys.exit(0 if num_differing_files == 0 else 1) ############################################################################### diff --git a/scripts/Tools/cime_bisect b/scripts/Tools/cime_bisect index da144df52f1..23d3dde1ecc 100755 --- a/scripts/Tools/cime_bisect +++ b/scripts/Tools/cime_bisect @@ -95,9 +95,9 @@ def cime_bisect(testargs, good, bad, testroot, compiler, project, baseline_name, commits_we_want_to_test = run_cmd_no_fail("git rev-list {}..{} --merges --first-parent".format(good, bad)).splitlines() all_commits_ = run_cmd_no_fail("git rev-list {}..{}".format(good, bad)).splitlines() commits_to_skip = set(all_commits_) - set(commits_we_want_to_test) - print "Skipping these non-merge commits" + print("Skipping these non-merge commits") for item in commits_to_skip: - print item + print(item) else: commits_to_skip = set() @@ -145,8 +145,8 @@ def cime_bisect(testargs, good, bad, testroot, compiler, project, baseline_name, bad_commits_filtered = bad_commits - commits_to_skip expect(len(bad_commits_filtered) == 1, bad_commits_filtered) - print "Bad merge is:" - print run_cmd_no_fail("git show {}".format(bad_commits_filtered.pop())) + print("Bad merge is:") + print(run_cmd_no_fail("git show {}".format(bad_commits_filtered.pop()))) finally: run_cmd_no_fail("git bisect reset") diff --git a/scripts/Tools/cimeteststatus b/scripts/Tools/cimeteststatus index 7c8380ffc6a..4978d6e8ff1 100755 --- a/scripts/Tools/cimeteststatus +++ b/scripts/Tools/cimeteststatus @@ -5,9 +5,9 @@ Purpose: Give basic and detailed summaries of CIME(CESM) tests, and send the test results back to the test database. Authors: Jay Shollenberger and Ben Andre """ - -from __future__ import print_function -import sys +from standard_script_setup import * +#pylint: disable=import-error +from six.moves import urllib if sys.hexversion < 0x02070000: print(70 * "*") print("ERROR: {0} requires python >= 2.7.x. ".format(sys.argv[0])) @@ -17,12 +17,11 @@ if sys.hexversion < 0x02070000: sys.exit(1) import xml.etree.ElementTree as etree import argparse -import os, glob, re -import urllib -import urllib2 +import glob, re import pprint import getpass + testdburl = "https://csegweb.cgd.ucar.edu/testdb/cgi-bin/processXMLtest.cgi" class CimeTestStatus(): @@ -354,10 +353,10 @@ def getSuiteInfo(specfile): for t in root.findall('test'): testlist.append(t.get('case')) - if 'machine' not in suiteinfo.keys(): + if 'machine' not in suiteinfo: machnodelist = t.findall('mach') suiteinfo['machine'] = machnodelist[0].text - if 'compiler' not in suiteinfo.keys(): + if 'compiler' not in suiteinfo: compnodelist = t.findall('compiler') suiteinfo['compiler'] = compnodelist[0].text @@ -527,13 +526,13 @@ def sendTestReport(args, suiteinfo, cimetests, auth): # Get the testdb username/password, and POST # the data. print("sending test report for " + suiteinfo['machine'] + " " + suiteinfo['compiler']) - data = urllib.urlencode({'username':auth['username'], + data = urllib.parse.urlencode({'username':auth['username'], 'password':auth['password'], 'testXML':testrecord}) - req = urllib2.Request(testdburl, data) + req = urllib.request.Request(testdburl, data) try: - urllib2.urlopen(req) - except urllib2.URLError as e: + urllib.request.urlopen(req) + except urllib.error.URLError as e: print("Error when posting data: " + e.reason) if(args.debug): @@ -552,7 +551,7 @@ def authenticate(): auth['password'] = getpass.getpass("enter TestDB password:") return auth -def main(): +def _main_func(): """ Parse the arguments, get the suite information from the test spec, get the test statuses, then print a raw status, test summary, or send the test report. @@ -596,5 +595,4 @@ def main(): if __name__ == "__main__": - main() - + _main_func() diff --git a/scripts/Tools/code_checker b/scripts/Tools/code_checker index 63c04e88139..f5d63db1247 100755 --- a/scripts/Tools/code_checker +++ b/scripts/Tools/code_checker @@ -63,7 +63,7 @@ def _main_func(description): num_procs, files = parse_command_line(sys.argv, description) results = check_code(files, num_procs=num_procs, interactive=True) - for result in results.itervalues(): + for result in results.values(): if result != "": sys.exit(1) diff --git a/scripts/Tools/compare_namelists b/scripts/Tools/compare_namelists index 56d6eb52eea..03ca3c808d4 100755 --- a/scripts/Tools/compare_namelists +++ b/scripts/Tools/compare_namelists @@ -67,7 +67,7 @@ def _main_func(description): expect(success, "Namelist diff between files {} and {}\n{}".format(gold_file, compare_file, comments)) - print "Files {} and {} MATCH".format(gold_file, compare_file) + print("Files {} and {} MATCH".format(gold_file, compare_file)) ############################################################################### diff --git a/scripts/Tools/component_compare_baseline b/scripts/Tools/component_compare_baseline index b43b08c8caf..970d12600d5 100755 --- a/scripts/Tools/component_compare_baseline +++ b/scripts/Tools/component_compare_baseline @@ -45,7 +45,7 @@ def _main_func(description): caseroot, baseline_dir = parse_command_line(sys.argv, description) with Case(caseroot) as case: success, comments = compare_baseline(case, baseline_dir) - print comments + print(comments) sys.exit(0 if success else 1) diff --git a/scripts/Tools/component_compare_test b/scripts/Tools/component_compare_test index 8deace7958f..0af20a07b15 100755 --- a/scripts/Tools/component_compare_test +++ b/scripts/Tools/component_compare_test @@ -48,7 +48,7 @@ def _main_func(description): suffix1, suffix2, caseroot = parse_command_line(sys.argv, description) with Case(caseroot) as case: success, comments = compare_test(case, suffix1, suffix2) - print comments + print(comments) sys.exit(0 if success else 1) diff --git a/scripts/Tools/component_generate_baseline b/scripts/Tools/component_generate_baseline index 9a549c3ee1d..9268ab90acf 100755 --- a/scripts/Tools/component_generate_baseline +++ b/scripts/Tools/component_generate_baseline @@ -52,7 +52,7 @@ def _main_func(description): with Case(caseroot) as case: success, comments = generate_baseline(case, baseline_dir, allow_baseline_overwrite) - print comments + print(comments) sys.exit(0 if success else 1) diff --git a/scripts/Tools/cs.status b/scripts/Tools/cs.status index e75234b4fc7..2c57dc7652e 100755 --- a/scripts/Tools/cs.status +++ b/scripts/Tools/cs.status @@ -65,8 +65,9 @@ def cs_status(test_paths, summary=False): test_id_output[test_id] = output for test_id in sorted(test_id_output): - print test_id - print test_id_output[test_id], + print(test_id) + print(test_id_output[test_id]) + print(' ') ############################################################################### def _main_func(description): diff --git a/scripts/Tools/pelayout b/scripts/Tools/pelayout index 7effc06288c..6422e450287 100755 --- a/scripts/Tools/pelayout +++ b/scripts/Tools/pelayout @@ -113,10 +113,10 @@ def print_pelayout(case, ntasks, nthreads, rootpes, arg_format, header): comp_classes = case.get_values("COMP_CLASSES") if (header is not None): - print header + print(header) # End if for comp in comp_classes: - print format_pelayout(comp, ntasks[comp], nthreads[comp], rootpes[comp], arg_format) + print(format_pelayout(comp, ntasks[comp], nthreads[comp], rootpes[comp], arg_format)) # End for # End def print_pelayout diff --git a/scripts/Tools/preview_run b/scripts/Tools/preview_run index 003e036303e..5bfede3d4ab 100755 --- a/scripts/Tools/preview_run +++ b/scripts/Tools/preview_run @@ -57,14 +57,14 @@ def _main_func(description): logging.disable(logging.CRITICAL) with Case(caseroot, read_only=False) as case: - print "BATCH SUBMIT:" + print("BATCH SUBMIT:") case.load_env() job = "case.test" if case.get_value("TEST") else "case.run" job_id_to_cmd = case.submit_jobs(dry_run=True, job=job) for job_id, cmd in job_id_to_cmd: - print " ", job_id, "->", case.get_resolved_value(cmd) - print - print "MPIRUN:", case.get_resolved_value(case.get_mpirun_cmd()) + print(" ", job_id, "->", case.get_resolved_value(cmd)) + print() + print("MPIRUN:", case.get_resolved_value(case.get_mpirun_cmd())) if __name__ == "__main__": _main_func(__doc__) diff --git a/scripts/Tools/xmlquery b/scripts/Tools/xmlquery index 93239b4076f..adb85bcf2ce 100755 --- a/scripts/Tools/xmlquery +++ b/scripts/Tools/xmlquery @@ -244,9 +244,9 @@ def xmlquery_sub(case, variables, subgroup=None, fileonly=False, value = [] for comp in comp_classes: try: - nextval = get_value_as_string(case,var, attribute={"component" : comp}, resolved=resolved, subgroup=group) + nextval = get_value_as_string(case,var, attribute={"compclass" : comp}, resolved=resolved, subgroup=group) except: - nextval = get_value_as_string(case,var, attribute={"component" : comp}, resolved=False, subgroup=group) + nextval = get_value_as_string(case,var, attribute={"compclass" : comp}, resolved=False, subgroup=group) if nextval is not None: value.append(comp + ":" + "%s"%nextval) @@ -327,38 +327,38 @@ def _main_func(): wrapper.fix_sentence_endings = True for group in sorted(iter(results)): - if (len(variables) > 1 or len(results.keys()) > 1 or full) and not get_group: - print "\nResults in group %s"%group + if (len(variables) > 1 or len(results) > 1 or full) and not get_group: + print("\nResults in group %s"%group) for var in variables: if var in results[group]: if raw: - print results[group][var]['raw'] + print(results[group][var]['raw']) elif get_group: - print "\t%s: %s"%(var, results[group][var]['get_group']) + print("\t%s: %s"%(var, results[group][var]['get_group'])) elif value: sys.stdout.write("%s"%results[group][var]['value']) elif description: if results[group][var]['desc'][0] is not None: desc_text = ' '.join(results[group][var]['desc'][0].split()) - print "\t%s: %s"%(var, wrapper.fill(desc_text)) + print("\t%s: %s"%(var, wrapper.fill(desc_text))) elif fileonly: - print "\t%s: %s"%(var, results[group][var]['file']) + print("\t%s: %s"%(var, results[group][var]['file'])) elif dtype: - print "\t%s: %s"%(var, results[group][var]['type']) + print("\t%s: %s"%(var, results[group][var]['type'])) elif valid_values: if 'valid_values' in results[group][var]: - print "\t%s: %s"%(var, results[group][var]["valid_values"]) + print("\t%s: %s"%(var, results[group][var]["valid_values"])) elif full: if results[group][var]['desc'][0] is not None: desc_text = ' '.join(results[group][var]['desc'][0].split()) - print "\t%s: value=%s"%(var, results[group][var]['value']) - print "\t\ttype: %s"%(results[group][var]['type'][0]) + print("\t%s: value=%s"%(var, results[group][var]['value'])) + print("\t\ttype: %s"%(results[group][var]['type'][0])) if 'valid_values' in results[group][var]: - print "\t\tvalid_values: %s"%(results[group][var]["valid_values"]) - print "\t\tdescription: %s"%(wrapper.fill(desc_text)) - print "\t\tfile: %s"%(results[group][var]['file'][0]) + print("\t\tvalid_values: %s"%(results[group][var]["valid_values"])) + print("\t\tdescription: %s"%(wrapper.fill(desc_text))) + print("\t\tfile: %s"%(results[group][var]['file'][0])) else: - print "\t%s: %s"%(var, results[group][var]['value']) + print("\t%s: %s"%(var, results[group][var]['value'])) if (__name__ == "__main__"): diff --git a/scripts/create_clone b/scripts/create_clone index bb6d86ab665..e20f508d54e 100755 --- a/scripts/create_clone +++ b/scripts/create_clone @@ -83,7 +83,10 @@ def _main_func(): user_mods_dir = os.path.abspath(user_mods_dir) with Case(cloneroot, read_only=False) as clone: - clone.create_clone(case, keepexe, mach_dir, project, cime_output_root, user_mods_dir) + clone.create_clone(case, keepexe=keepexe, mach_dir=mach_dir, + project=project, + cime_output_root=cime_output_root, + user_mods_dir=user_mods_dir) ############################################################################### diff --git a/scripts/create_test b/scripts/create_test index 25100c903c0..5a2c2a47857 100755 --- a/scripts/create_test +++ b/scripts/create_test @@ -184,10 +184,14 @@ OR "The default is user-specified environment variable PROJECT") parser.add_argument("-t", "--test-id", - help="Specify an 'id' for the test. This is simply a" - "string that is appended to the end of a test name." - "If no testid is specified, then a time stamp plus random number" - "will be used.") + help="Specify an 'id' for the test. This is simply a " + "string that is appended to the end of a test name. " + "If no test-id is specified, then a time stamp plus random " + "string will be used (ensuring a high probability of uniqueness). " + "If test-id is specified, it is the user's responsibility to ensure " + "that each run of create_test uses a unique test-id. " + "(All sorts of problems can occur if you use the same test-id twice " + "on the same file system, even if the test lists are completely different.)") parser.add_argument("-j", "--parallel-jobs", type=int, default=None, help="Number of tasks create_test should perform simultaneously. Default " @@ -195,7 +199,7 @@ OR parser.add_argument("--proc-pool", type=int, default=None, help="The size of the processor pool that create_test can use. Default " - "is PES_PER_NODE + 25 percent.") + "is MAX_MPITASKS_PER_NODE + 25 percent.") parser.add_argument("--walltime", default=os.getenv("CIME_GLOBAL_WALLTIME"), help="Set the wallclock limit for all tests in the suite. " @@ -428,11 +432,10 @@ def single_submit_impl(machine_name, test_id, proc_pool, project, args, job_cost with Case(first_case, read_only=False) as case: env_batch = case.get_env("batch") - directives = env_batch.get_batch_directives(case, "case.run", raw=True) submit_cmd = env_batch.get_value("batch_submit", subgroup=None) submit_args = env_batch.get_submit_args(case, "case.run") - tasks_per_node = mach.get_value("PES_PER_NODE") + tasks_per_node = mach.get_value("MAX_MPITASKS_PER_NODE") num_nodes = int(math.ceil(float(proc_pool) / tasks_per_node)) if wall_time is None: wall_time = compute_total_time(job_cost_map, proc_pool) @@ -440,7 +443,7 @@ def single_submit_impl(machine_name, test_id, proc_pool, project, args, job_cost else: wall_time_bab = wall_time - queue = env_batch.select_best_queue(proc_pool, wall_time_bab) + queue = env_batch.select_best_queue(proc_pool, num_nodes, wall_time_bab) wall_time_max_bab = env_batch.get_queue_specs(queue)[3] if wall_time_max_bab is not None: wall_time_max = convert_to_seconds(wall_time_max_bab) @@ -448,21 +451,16 @@ def single_submit_impl(machine_name, test_id, proc_pool, project, args, job_cost wall_time = wall_time_max wall_time_bab = convert_to_babylonian_time(wall_time) - job_id = "create_test_single_submit_%s" % test_id - directives = directives.replace("{{ job_id }}", job_id) - directives = directives.replace("{{ num_nodes }}", str(num_nodes)) - directives = directives.replace("{{ tasks_per_node }}", str(tasks_per_node)) - directives = directives.replace("{{ ptile }}", str(tasks_per_node)) - directives = directives.replace("{{ totaltasks }}", str(tasks_per_node * num_nodes)) - - directives = directives.replace("{{ output_error_path }}", "create_test_single_submit_%s.err" % test_id) - directives = directives.replace("{{ job_wallclock_time }}", wall_time_bab) - directives = directives.replace("{{ job_queue }}", queue) - if project is not None: - directives = directives.replace("{{ project }}", project) - directives = directives.replace("{{ charge_account }}", charge_account) - - expect("{{" not in directives, "Could not resolve all items in directives:\n%s" % directives) + overrides = { + "job_id" : "create_test_single_submit_%s" % test_id, + "num_nodes" : num_nodes, + "tasks_per_node": tasks_per_node, + "totaltasks" : tasks_per_node * num_nodes, + "job_wallclock_time": wall_time_bab, + "job_queue": queue + } + + directives = env_batch.get_batch_directives(case, "case.run", overrides=overrides) # # Make simple submit script and submit diff --git a/scripts/lib/CIME/BuildTools/configure.py b/scripts/lib/CIME/BuildTools/configure.py index 1dd94c74667..cc3359655b0 100644 --- a/scripts/lib/CIME/BuildTools/configure.py +++ b/scripts/lib/CIME/BuildTools/configure.py @@ -82,7 +82,7 @@ def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, """ ems_path = os.path.join(output_dir, "env_mach_specific.xml") if os.path.exists(ems_path): - logger.warn("{} already exists, delete to replace".format(ems_path)) + logger.warning("{} already exists, delete to replace".format(ems_path)) return ems_file = EnvMachSpecific(output_dir, unit_testing=unit_testing) ems_file.populate(machobj) diff --git a/scripts/lib/CIME/BuildTools/macrowriterbase.py b/scripts/lib/CIME/BuildTools/macrowriterbase.py index db00e8383c4..7ccf9975336 100644 --- a/scripts/lib/CIME/BuildTools/macrowriterbase.py +++ b/scripts/lib/CIME/BuildTools/macrowriterbase.py @@ -14,6 +14,8 @@ from abc import ABCMeta, abstractmethod from CIME.XML.standard_module_setup import * from CIME.utils import get_cime_root +from six import add_metaclass + logger = logging.getLogger(__name__) def _get_components(value): @@ -69,6 +71,7 @@ def _get_components(value): return components +@add_metaclass(ABCMeta) class MacroWriterBase(object): """Abstract base class for macro file writers. @@ -101,8 +104,6 @@ class MacroWriterBase(object): end_ifeq """ - __metaclass__ = ABCMeta - indent_increment = 2 def __init__(self, output): @@ -132,7 +133,7 @@ def write_line(self, line): A trailing newline is added, whether or not the input has one. """ - self.output.write(unicode(self.indent_string() + line + "\n")) + self.output.write(str(self.indent_string() + line + "\n")) @abstractmethod def environment_variable_string(self, name): @@ -207,7 +208,7 @@ def write_macros_file_v1(macros, compiler, os_, machine, macros_file="Macros", o # A few things can be used from environ if not in XML for item in ["MPI_PATH", "NETCDF_PATH"]: if not item in macros and item in os.environ: - logger.warn("Setting {} from Environment".format(item)) + logger.warning("Setting {} from Environment".format(item)) macros[item] = os.environ[item] with open(macros_file, "w") as fd: @@ -222,7 +223,7 @@ def write_macros_file_v1(macros, compiler, os_, machine, macros_file="Macros", o fd.write("#\n# Makefile Macros \n") # print the settings out to the Macros file - for key, value in sorted(macros.iteritems()): + for key, value in sorted(macros.items()): if key == "_COND_": pass elif key.startswith("ADD_"): @@ -248,7 +249,7 @@ def write_macros_file_v1(macros, compiler, os_, machine, macros_file="Macros", o # print the settings out to the Macros file, do it in # two passes so that path values appear first in the # file. - for key, value in sorted(macros.iteritems()): + for key, value in sorted(macros.items()): if key == "_COND_": pass else: @@ -262,7 +263,7 @@ def write_macros_file_v1(macros, compiler, os_, machine, macros_file="Macros", o fd.write("set({} {})\n".format(cmake_var, value)) fd.write("list(APPEND CMAKE_PREFIX_PATH {})\n\n".format(value)) - for key, value in sorted(macros.iteritems()): + for key, value in sorted(macros.items()): if key == "_COND_": pass else: @@ -301,10 +302,10 @@ def write_macros_file_v1(macros, compiler, os_, machine, macros_file="Macros", o def _parse_hash(macros, fd, depth, output_format, cmakedebug=""): width = 2 * depth - for key, value in macros.iteritems(): + for key, value in macros.items(): if type(value) is dict: if output_format == "make" or "DEBUG" in key: - for key2, value2 in value.iteritems(): + for key2, value2 in value.items(): if output_format == "make": fd.write("{}ifeq ($({}), {}) \n".format(" " * width, key, key2)) diff --git a/scripts/lib/CIME/SystemTests/dae.py b/scripts/lib/CIME/SystemTests/dae.py index 5e70a6b2c5b..cdd062c9c04 100644 --- a/scripts/lib/CIME/SystemTests/dae.py +++ b/scripts/lib/CIME/SystemTests/dae.py @@ -54,7 +54,7 @@ def _case_two_setup(self): self._case.set_value("DATA_ASSIMILATION_CYCLES", 2) stopn = self._case.get_value("STOP_N") expect((stopn % 2) == 0, "ERROR: DAE test requires that STOP_N be even") - stopn = stopn / 2 + stopn = int(stopn / 2) self._case.set_value("STOP_N", stopn) self._case.flush() diff --git a/scripts/lib/CIME/SystemTests/eri.py b/scripts/lib/CIME/SystemTests/eri.py index 955b1918019..55446269aa2 100644 --- a/scripts/lib/CIME/SystemTests/eri.py +++ b/scripts/lib/CIME/SystemTests/eri.py @@ -52,12 +52,12 @@ def run_phase(self): stop_option = self._case.get_value("STOP_OPTION") run_startdate = self._case.get_value("RUN_STARTDATE") - stop_n1 = stop_n / 6 + stop_n1 = int(stop_n / 6) rest_n1 = stop_n1 start_1 = run_startdate stop_n2 = stop_n - stop_n1 - rest_n2 = stop_n2 / 2 + 1 + rest_n2 = int(stop_n2 / 2 + 1) hist_n = stop_n2 start_1_year, start_1_month, start_1_day = [int(item) for item in start_1.split("-")] @@ -65,7 +65,7 @@ def run_phase(self): start_2 = "{:04d}-{:02d}-{:02d}".format(start_2_year, start_1_month, start_1_day) stop_n3 = stop_n2 - rest_n2 - rest_n3 = stop_n3 / 2 + 1 + rest_n3 = int(stop_n3 / 2 + 1) stop_n4 = stop_n3 - rest_n3 diff --git a/scripts/lib/CIME/SystemTests/erio.py b/scripts/lib/CIME/SystemTests/erio.py index 90e36595eac..5859975f9c4 100644 --- a/scripts/lib/CIME/SystemTests/erio.py +++ b/scripts/lib/CIME/SystemTests/erio.py @@ -24,7 +24,7 @@ def _full_run(self, pio_type): expect(self._stop_n > 0, "Bad STOP_N: {:d}".format(self._stop_n)) # Move to config_tests.xml once that's ready - rest_n = self._stop_n/2 + 1 + rest_n = int(self._stop_n/2) + 1 self._case.set_value("REST_N", rest_n) self._case.set_value("REST_OPTION", stop_option) self._case.set_value("HIST_N", self._stop_n) @@ -39,7 +39,7 @@ def _full_run(self, pio_type): def _restart_run(self, pio_type, other_pio_type): stop_option = self._case.get_value("STOP_OPTION") - rest_n = self._stop_n/2 + 1 + rest_n = int(self._stop_n/2) + 1 stop_new = self._stop_n - rest_n expect(stop_new > 0, "ERROR: stop_n value {:d} too short {:d} {:d}".format(stop_new,self._stop_n,rest_n)) diff --git a/scripts/lib/CIME/SystemTests/erp.py b/scripts/lib/CIME/SystemTests/erp.py index 7a8b9d844f4..afe62227551 100644 --- a/scripts/lib/CIME/SystemTests/erp.py +++ b/scripts/lib/CIME/SystemTests/erp.py @@ -34,10 +34,10 @@ def _case_two_setup(self): nthreads = self._case1.get_value("NTHRDS_{}".format(comp)) rootpe = self._case1.get_value("ROOTPE_{}".format(comp)) if ( nthreads > 1 ): - self._case.set_value("NTHRDS_{}".format(comp), nthreads/2) + self._case.set_value("NTHRDS_{}".format(comp), int(nthreads/2)) if ( ntasks > 1 ): - self._case.set_value("NTASKS_{}".format(comp), ntasks/2) - self._case.set_value("ROOTPE_{}".format(comp), rootpe/2) + self._case.set_value("NTASKS_{}".format(comp), int(ntasks/2)) + self._case.set_value("ROOTPE_{}".format(comp), int(rootpe/2)) RestartTest._case_two_setup(self) # Note, some components, like CESM-CICE, have diff --git a/scripts/lib/CIME/SystemTests/err.py b/scripts/lib/CIME/SystemTests/err.py index 0da24c00310..8b19c0202aa 100644 --- a/scripts/lib/CIME/SystemTests/err.py +++ b/scripts/lib/CIME/SystemTests/err.py @@ -2,6 +2,7 @@ CIME ERR test This class inherits from ERS ERR tests short term archiving and restart capabilities """ +import glob, os, shutil from CIME.XML.standard_module_setup import * from CIME.SystemTests.restart_tests import RestartTest from CIME.case_st_archive import restore_from_archive @@ -10,18 +11,26 @@ logger = logging.getLogger(__name__) class ERR(RestartTest): - def __init__(self, case): # pylint: disable=super-init-not-called """ initialize an object interface to the ERR system test """ - RestartTest.__init__(self, case, # pylint: disable=non-parent-init-called + super(ERR, self).__init__(case, separate_builds = False, run_two_suffix = 'rest', run_one_description = 'initial', run_two_description = 'restart', multisubmit = True) + def _case_one_setup(self): + super(ERR, self)._case_one_setup() + self._case.set_value("DOUT_S", True) + + def _case_two_setup(self): + super(ERR, self)._case_two_setup() + self._case.set_value("DOUT_S", False) + + def _case_two_custom_prerun_action(self): dout_s_root = self._case1.get_value("DOUT_S_ROOT") rest_root = os.path.abspath(os.path.join(dout_s_root,"rest")) @@ -29,3 +38,15 @@ def _case_two_custom_prerun_action(self): expect(len(restart_list) >= 1, "No restart files found in {}".format(rest_root)) restore_from_archive(self._case, rest_dir= os.path.join(rest_root, restart_list[0])) + + def _case_two_custom_postrun_action(self): + # Link back to original case1 name + # This is needed so that the necessary files are present for + # baseline comparison and generation, + # since some of them may have been moved to the archive directory + for case_file in glob.iglob(os.path.join(self._case1.get_value("RUNDIR"), + "*.nc.{}".format(self._run_one_suffix))): + orig_file = case_file[:-(1+len(self._run_one_suffix))] + if not os.path.isfile(orig_file): + shutil.copyfile(case_file, orig_file) + diff --git a/scripts/lib/CIME/SystemTests/ers.py b/scripts/lib/CIME/SystemTests/ers.py index 28698084c1d..2de668606a0 100644 --- a/scripts/lib/CIME/SystemTests/ers.py +++ b/scripts/lib/CIME/SystemTests/ers.py @@ -28,7 +28,7 @@ def _ers_second_phase(self): stop_n = self._case.get_value("STOP_N") stop_option = self._case.get_value("STOP_OPTION") - rest_n = stop_n/2 + 1 + rest_n = int(stop_n/2 + 1) stop_new = stop_n - rest_n expect(stop_new > 0, "ERROR: stop_n value {:d} too short {:d} {:d}".format(stop_new,stop_n,rest_n)) diff --git a/scripts/lib/CIME/SystemTests/nodefail.py b/scripts/lib/CIME/SystemTests/nodefail.py index 4a1204f8876..daf5a0f8d5a 100644 --- a/scripts/lib/CIME/SystemTests/nodefail.py +++ b/scripts/lib/CIME/SystemTests/nodefail.py @@ -49,7 +49,7 @@ def _restart_fake_phase(self): with open(fake_exe_file, "w") as fd: fd.write(fake_exe) - os.chmod(fake_exe_file, 0755) + os.chmod(fake_exe_file, 0o755) prev_run_exe = self._case.get_value("run_exe") env_mach_specific = self._case.get_env("mach_specific") diff --git a/scripts/lib/CIME/SystemTests/pet.py b/scripts/lib/CIME/SystemTests/pet.py index e267789b292..3e41cc9b0cc 100644 --- a/scripts/lib/CIME/SystemTests/pet.py +++ b/scripts/lib/CIME/SystemTests/pet.py @@ -49,7 +49,7 @@ def _case_two_setup(self): # machines, if the mpiexec tries to exceed the procs-per-node that were given # to the batch submission, things break. Setting MAX_TASKS_PER_NODE to half of # it original value prevents this. - self._case.set_value("MAX_TASKS_PER_NODE", self._case.get_value("MAX_TASKS_PER_NODE") / 2) + self._case.set_value("MAX_TASKS_PER_NODE", int(self._case.get_value("MAX_TASKS_PER_NODE") / 2)) # Need to redo case_setup because we may have changed the number of threads case_setup(self._case, reset=True) diff --git a/scripts/lib/CIME/SystemTests/system_tests_common.py b/scripts/lib/CIME/SystemTests/system_tests_common.py index 617c6bb4a89..06991d1fca2 100644 --- a/scripts/lib/CIME/SystemTests/system_tests_common.py +++ b/scripts/lib/CIME/SystemTests/system_tests_common.py @@ -13,7 +13,7 @@ import CIME.build as build -import shutil, glob, gzip, time, traceback +import shutil, glob, gzip, time, traceback, six logger = logging.getLogger(__name__) @@ -88,16 +88,15 @@ def build(self, sharedlib_only=False, model_only=False): try: self.build_phase(sharedlib_only=(phase_name==SHAREDLIB_BUILD_PHASE), model_only=(phase_name==MODEL_BUILD_PHASE)) - except: + except BaseException as e: success = False - msg = sys.exc_info()[1].message - + msg = e.__str__() if "BUILD FAIL" in msg: # Don't want to print stacktrace for a model failure since that # is not a CIME/infrastructure problem. excmsg = msg else: - excmsg = "Exception during build:\n{}\n{}".format(sys.exc_info()[1], traceback.format_exc()) + excmsg = "Exception during build:\n{}\n{}".format(msg, traceback.format_exc()) logger.warning(excmsg) append_testlog(excmsg) @@ -155,15 +154,15 @@ def run(self): self._check_for_memleak() - except: + except BaseException as e: success = False - msg = sys.exc_info()[1].message + msg = e.__str__() if "RUN FAIL" in msg: # Don't want to print stacktrace for a model failure since that # is not a CIME/infrastructure problem. excmsg = msg else: - excmsg = "Exception during run:\n{}\n{}".format(sys.exc_info()[1], traceback.format_exc()) + excmsg = "Exception during run:\n{}\n{}".format(msg, traceback.format_exc()) logger.warning(excmsg) append_testlog(excmsg) @@ -241,10 +240,12 @@ def _coupler_log_indicates_run_complete(self): allgood = len(newestcpllogfiles) for cpllog in newestcpllogfiles: try: - if "SUCCESSFUL TERMINATION" in gzip.open(cpllog, 'rb').read(): + if six.b("SUCCESSFUL TERMINATION") in gzip.open(cpllog, 'rb').read(): allgood = allgood - 1 - except: - logger.info("{} is not compressed, assuming run failed".format(cpllog)) + except BaseException as e: + msg = e.__str__() + + logger.info("{} is not compressed, assuming run failed {}".format(cpllog, msg)) return allgood==0 @@ -282,7 +283,7 @@ def _get_mem_usage(self, cpllog): fopen = open with fopen(cpllog, "rb") as f: for line in f: - m = meminfo.match(line) + m = meminfo.match(line.decode('utf-8')) if m: memlist.append((float(m.group(1)), float(m.group(2)))) # Remove the last mem record, it's sometimes artificially high @@ -297,7 +298,7 @@ def _get_throughput(self, cpllog): """ if cpllog is not None and os.path.isfile(cpllog): with gzip.open(cpllog, "rb") as f: - cpltext = f.read() + cpltext = f.read().decode('utf-8') m = re.search(r"# simulated years / cmp-day =\s+(\d+\.\d+)\s",cpltext) if m: return float(m.group(1)) @@ -346,7 +347,7 @@ def compare_env_run(self, expected=None): diffs = f1obj.compare_xml(f2obj) for key in diffs.keys(): if expected is not None and key in expected: - logging.warn(" Resetting {} for test".format(key)) + logging.warning(" Resetting {} for test".format(key)) f1obj.set_value(key, f2obj.get_value(key, resolved=False)) else: print("WARNING: Found difference in test {}: case: {} original value {}".format(key, diffs[key][0], diffs[key][1])) @@ -389,7 +390,8 @@ def _compare_baseline(self): # compare memory usage to baseline newestcpllogfiles = self._get_latest_cpl_logs() - memlist = self._get_mem_usage(newestcpllogfiles[0]) + if len(newestcpllogfiles) > 0: + memlist = self._get_mem_usage(newestcpllogfiles[0]) for cpllog in newestcpllogfiles: m = re.search(r"/(cpl.*.log).*.gz",cpllog) if m is not None: @@ -468,7 +470,7 @@ def build_phase(self, sharedlib_only=False, model_only=False): f.write("#!/bin/bash\n") f.write(self._script) - os.chmod(modelexe, 0755) + os.chmod(modelexe, 0o755) build.post_build(self._case, []) diff --git a/scripts/lib/CIME/SystemTests/system_tests_compare_two.py b/scripts/lib/CIME/SystemTests/system_tests_compare_two.py index eac7c98d61e..14ae580b163 100644 --- a/scripts/lib/CIME/SystemTests/system_tests_compare_two.py +++ b/scripts/lib/CIME/SystemTests/system_tests_compare_two.py @@ -181,6 +181,11 @@ def build_phase(self, sharedlib_only=False, model_only=False): self._activate_case1() self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) self._activate_case2() + # Although we're doing separate builds, it still makes sense + # to share the sharedlibroot area with case1 so we can reuse + # pieces of the build from there. + self._case2.set_value("SHAREDLIBROOT", + self._case1.get_value("SHAREDLIBROOT")) self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) else: self._activate_case1() @@ -232,7 +237,6 @@ def run_phase(self, success_change=False): # pylint: disable=arguments-differ # Case1 is the "main" case, and we need to do the comparisons from there self._activate_case1() self._link_to_case2_output() - self._component_compare_test(self._run_one_suffix, self._run_two_suffix, success_change=success_change) def copy_case1_restarts_to_case2(self): @@ -257,8 +261,7 @@ def _get_caseroot2(self): """ Determines and returns caseroot for case2 - Assumes that self._case1 is already set to point to the case1 object, - and that self._run_two_suffix is already set. + Assumes that self._case1 is already set to point to the case1 object """ casename2 = self._case1.get_value("CASE") caseroot1 = self._case1.get_value("CASEROOT") @@ -268,6 +271,49 @@ def _get_caseroot2(self): return caseroot2 + def _get_output_root2(self): + """ + Determines and returns cime_output_root for case2 + + Assumes that self._case1 is already set to point to the case1 object + """ + # Since case 2 has the same name as case1 its CIME_OUTPUT_ROOT must also be different + output_root2 = os.path.join(self._case1.get_value("CIME_OUTPUT_ROOT"), + self._case1.get_value("CASE"), "case2") + return output_root2 + + def _get_case2_exeroot(self): + """ + Gets exeroot for case2. + + Returns None if we should use the default value of exeroot. + """ + if self._separate_builds: + # Put the case2 bld directory directly under the case2 + # CIME_OUTPUT_ROOT, rather than following the typical + # practice of putting it under CIME_OUTPUT_ROOT/CASENAME, + # because the latter leads to too-long paths that make some + # compilers fail. + # + # This only works because case2's CIME_OUTPUT_ROOT is unique + # to this case. (If case2's CIME_OUTPUT_ROOT were in some + # more generic location, then this would result in its bld + # directory being inadvertently shared with other tests.) + case2_exeroot = os.path.join(self._get_output_root2(), "bld") + else: + # Use default exeroot + case2_exeroot = None + return case2_exeroot + + def _get_case2_rundir(self): + """ + Gets rundir for case2. + """ + # Put the case2 run directory alongside its bld directory for + # consistency. (See notes about EXEROOT in _get_case2_exeroot.) + case2_rundir = os.path.join(self._get_output_root2(), "run") + return case2_rundir + def _setup_cases_if_not_yet_done(self): """ Determines if case2 already exists on disk. If it does, this method @@ -297,13 +343,12 @@ def _setup_cases_if_not_yet_done(self): self._case2 = self._case_from_existing_caseroot(self._caseroot2) else: try: - # Since case 2 has the same name as case1 its CIME_OUTPUT_ROOT must also be different - case2_output_root = os.path.join(self._case1.get_value("CIME_OUTPUT_ROOT"), - self._case1.get_value("CASE"), "case2") self._case2 = self._case1.create_clone( self._caseroot2, keepexe = not self._separate_builds, - cime_output_root = case2_output_root) + cime_output_root = self._get_output_root2(), + exeroot = self._get_case2_exeroot(), + rundir = self._get_case2_rundir()) self._setup_cases() except: # If a problem occurred in setting up the test cases, it's diff --git a/scripts/lib/CIME/XML/compilerblock.py b/scripts/lib/CIME/XML/compilerblock.py index c3fee91e4ec..a975c7efb27 100644 --- a/scripts/lib/CIME/XML/compilerblock.py +++ b/scripts/lib/CIME/XML/compilerblock.py @@ -201,7 +201,7 @@ def _elem_to_setting(self, elem): variables that this setting depends on. """ # Attributes on an element are the conditions on that element. - conditions = dict(elem.items()) + conditions = dict(list(elem.items())) if self._compiler is not None: conditions["COMPILER"] = self._compiler # Deal with internal markup. diff --git a/scripts/lib/CIME/XML/compilers.py b/scripts/lib/CIME/XML/compilers.py index 41d4bdfeaf3..1bbcf93d341 100644 --- a/scripts/lib/CIME/XML/compilers.py +++ b/scripts/lib/CIME/XML/compilers.py @@ -10,6 +10,7 @@ from CIME.BuildTools.makemacroswriter import MakeMacroWriter from CIME.BuildTools.cmakemacroswriter import CMakeMacroWriter from CIME.BuildTools.macroconditiontree import merge_optional_trees +import six logger = logging.getLogger(__name__) @@ -160,7 +161,7 @@ def write_macros_file(self, macros_file="Macros.make", output_format="make", xml else: format_ = output_format - if isinstance(macros_file, basestring): + if isinstance(macros_file, six.string_types): with open(macros_file, "w") as macros: self._write_macros_file_v2(format_, macros) else: @@ -205,7 +206,7 @@ def _write_macros_file_v2(self, build_system, output, xml=None): while value_lists: # Variables that are ready to be written. ready_variables = [ - var_name for var_name in value_lists.keys() + var_name for var_name in value_lists if value_lists[var_name].depends <= vars_written ] expect(len(ready_variables) > 0, @@ -254,7 +255,7 @@ def _add_to_macros(node, macros): else: cond_macros = macros["_COND_"] - for key, value2 in attrib.iteritems(): + for key, value2 in attrib.items(): if key not in cond_macros: cond_macros[key] = {} if value2 not in cond_macros[key]: diff --git a/scripts/lib/CIME/XML/component.py b/scripts/lib/CIME/XML/component.py index e0eed6c417b..1f4ae93162c 100644 --- a/scripts/lib/CIME/XML/component.py +++ b/scripts/lib/CIME/XML/component.py @@ -75,7 +75,7 @@ def _get_value_match(self, node, attributes=None, exact_match=False): for valnode in self.get_nodes("value", root=node): # loop through all the keys in valnode (value nodes) attributes - for key,value in valnode.attrib.iteritems(): + for key,value in valnode.attrib.items(): # determine if key is in attributes dictionary match_count = 0 if attributes is not None and key in attributes: @@ -281,6 +281,6 @@ def print_values(self): compsets[attrib] = text logger.info(" {}".format(helptext)) - for v in sorted(compsets.iteritems()): + for v in sorted(compsets.items()): label, definition = v logger.info(" {:20s} : {}".format(label, definition)) diff --git a/scripts/lib/CIME/XML/compsets.py b/scripts/lib/CIME/XML/compsets.py index f883511689b..d120bb30602 100644 --- a/scripts/lib/CIME/XML/compsets.py +++ b/scripts/lib/CIME/XML/compsets.py @@ -84,7 +84,7 @@ def print_values(self, arg_help=True): logger.info(" --------------------------------------") logger.info(" Compset Alias: Compset Long Name ") logger.info(" --------------------------------------") - for key in sorted(compsets_text.iterkeys()): + for key in sorted(compsets_text.keys()): logger.info(" {:20} : {}".format(key, compsets_text[key])) def return_all_values(self): @@ -92,11 +92,11 @@ def return_all_values(self): science_compsets = dict() help_text = self.get_value(name="help") compsets_text = self.get_value("names") - for key in sorted(compsets_text.iterkeys()): + for key in sorted(compsets_text.keys()): all_compsets[key] = compsets_text[key] # get the matching science support grids - for alias in all_compsets.iterkeys(): + for alias in all_compsets.keys(): science_compsets[alias] = self.get_compset_match(alias) return help_text, all_compsets diff --git a/scripts/lib/CIME/XML/entry_id.py b/scripts/lib/CIME/XML/entry_id.py index 94b2aca8423..8928a52add5 100644 --- a/scripts/lib/CIME/XML/entry_id.py +++ b/scripts/lib/CIME/XML/entry_id.py @@ -39,7 +39,7 @@ def set_default_value(self, vid, val): if node is not None: val = self.set_element_text("default_value", val, root=node) if val is None: - logger.warn("Called set_default_value on a node without default_value field") + logger.warning("Called set_default_value on a node without default_value field") return val @@ -211,7 +211,7 @@ def _set_valid_values(self, node, new_valid_values): current_value = node.get("value") valid_values_list = self._get_valid_values(node) if current_value is not None and current_value not in valid_values_list: - logger.warn("WARNING: Current setting for {} not in new valid values. Updating setting to \"{}\"".format(node.get("id"), valid_values_list[0])) + logger.warning("WARNING: Current setting for {} not in new valid values. Updating setting to \"{}\"".format(node.get("id"), valid_values_list[0])) self._set_value(node, valid_values_list[0]) return new_valid_values diff --git a/scripts/lib/CIME/XML/env_base.py b/scripts/lib/CIME/XML/env_base.py index 1540139c104..9f78d9a6d2c 100644 --- a/scripts/lib/CIME/XML/env_base.py +++ b/scripts/lib/CIME/XML/env_base.py @@ -1,7 +1,6 @@ """ Base class for env files. This class inherits from EntryID.py """ -import string from CIME.XML.standard_module_setup import * from CIME.XML.entry_id import EntryID from CIME.XML.headers import Headers @@ -31,28 +30,36 @@ def set_components(self, components): self._components = components def check_if_comp_var(self, vid, attribute=None): - # pylint: disable=no-member - if not hasattr(self, "_component_value_list") or\ - (self.get_nodes("entry", {"id" : vid}) and \ - not vid in self._component_value_list): - return vid, None, False - + nodes = self.get_nodes("entry", {"id" : vid}) + node = None comp = None - if vid in self._component_value_list: - if attribute is not None: - if "component" in attribute: - comp = attribute["component"] - return vid, comp, True - - for comp in self._components: - if "_"+comp in vid: - vid = string.replace(vid, '_'+comp, '', 1) - break - elif comp+"_" in vid: - vid = string.replace(vid, comp+'_', '', 1) - break - if vid in self._component_value_list: - return vid, comp, True + if len(nodes): + node = nodes[0] + if node: + valnodes = node.findall(".//value[@compclass]") + if len(valnodes) == 0: + logger.debug("vid {} is not a compvar".format(vid)) + return vid, None, False + else: + logger.debug("vid {} is a compvar".format(vid)) + if attribute is not None: + comp = attribute["compclass"] + return vid, comp, True + else: + if hasattr(self, "_components"): + new_vid = None + for comp in self._components: + if "_"+comp in vid: + new_vid = vid.replace('_'+comp, '', 1) + elif comp+"_" in vid: + new_vid = vid.replace(comp+'_', '', 1) + + if new_vid is not None: + break + if new_vid is not None: + logger.debug("vid {} is a compvar with comp {}".format(vid, comp)) + return new_vid, comp, True + return vid, None, False def get_value(self, vid, attribute=None, resolved=True, subgroup=None): @@ -72,9 +79,9 @@ def get_value(self, vid, attribute=None, resolved=True, subgroup=None): logger.debug("Not enough info to get value for {}".format(vid)) return value if attribute is None: - attribute = {"component" : comp} + attribute = {"compclass" : comp} else: - attribute["component"] = comp + attribute["compclass"] = comp node = self.get_optional_node("entry", {"id":vid}) if node is not None: type_str = self._get_type_info(node) @@ -101,19 +108,19 @@ def set_value(self, vid, value, subgroup=None, ignore_type=False): if iscompvar and comp is None: # pylint: disable=no-member for comp in self._components: - val = self._set_value(node, value, vid, subgroup, ignore_type, component=comp) + val = self._set_value(node, value, vid, subgroup, ignore_type, compclass=comp) else: - val = self._set_value(node, value, vid, subgroup, ignore_type, component=comp) + val = self._set_value(node, value, vid, subgroup, ignore_type, compclass=comp) return val # pylint: disable=arguments-differ - def _set_value(self, node, value, vid=None, subgroup=None, ignore_type=False, component=None): + def _set_value(self, node, value, vid=None, subgroup=None, ignore_type=False, compclass=None): if vid is None: vid = node.get("id") vid, _, iscompvar = self.check_if_comp_var(vid, None) if iscompvar: - attribute = {"component":component} + attribute = {"compclass":compclass} str_value = self.get_valid_value_string(node, value, vid, ignore_type) val = self.set_element_text("value", str_value, attribute, root=node) else: @@ -136,8 +143,18 @@ def cleanupnode(self, node): if dnode is not None: node.remove(dnode) vnode = node.find(".//values") - vid = node.get("id") - _, _, iscompvar = self.check_if_comp_var(vid) - if vnode is not None and not iscompvar: - node.remove(vnode) + if vnode is not None: + componentatt = vnode.findall(".//value[@component=\"ATM\"]") + # backward compatibility (compclasses and component were mixed + # now we seperated into component and compclass) + if len(componentatt) > 0: + for ccnode in vnode.findall(".//value[@component]"): + val = ccnode.attrib.get("component") + ccnode.attrib.pop("component") + ccnode.set("compclass",val) + compclassatt = vnode.findall(".//value[@compclass]") + + if len(compclassatt) == 0: + node.remove(vnode) + return node diff --git a/scripts/lib/CIME/XML/env_batch.py b/scripts/lib/CIME/XML/env_batch.py index dd378eec313..86e496ddc1b 100644 --- a/scripts/lib/CIME/XML/env_batch.py +++ b/scripts/lib/CIME/XML/env_batch.py @@ -21,8 +21,8 @@ def __init__(self, case_root=None, infile="env_batch.xml"): """ initialize an object interface to file env_batch.xml in the case directory """ - self.prereq_jobid = None - self.batchtype = None + self._prereq_jobid = None + self._batchtype = None # This arbitrary setting should always be overwritten self._default_walltime = "00:20:00" schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_batch.xsd") @@ -155,68 +155,60 @@ def set_batch_system(self, batchobj, batch_system_type=None): if batchobj.machine_node is not None: self.root.append(deepcopy(batchobj.machine_node)) - def make_batch_script(self, input_template, job, case, total_tasks, tasks_per_node, num_nodes, thread_count): + def make_batch_script(self, input_template, job, case): expect(os.path.exists(input_template), "input file '{}' does not exist".format(input_template)) - self.tasks_per_node = tasks_per_node - self.num_tasks = total_tasks - self.tasks_per_numa = tasks_per_node / 2 - self.thread_count = thread_count task_count = self.get_value("task_count", subgroup=job) + overrides = {} + if task_count is not None: + overrides["total_tasks"] = int(task_count) + overrides["num_nodes"] = int(math.ceil(float(task_count)/float(case.tasks_per_node))) - if task_count is None: - self.total_tasks = total_tasks - self.num_nodes = num_nodes - else: - self.total_tasks = int(task_count) - self.num_nodes = int(math.ceil(float(task_count)/float(tasks_per_node))) - - self.pedocumentation = "" - self.job_id = case.get_value("CASE") + os.path.splitext(job)[1] + overrides["pedocumentation"] = "" # TODO? + overrides["job_id"] = case.get_value("CASE") + os.path.splitext(job)[1] if "pleiades" in case.get_value("MACH"): # pleiades jobname needs to be limited to 15 chars - self.job_id = self.job_id[:15] - self.output_error_path = self.job_id + overrides["job_id"] = overrides["job_id"][:15] - self.batchdirectives = self.get_batch_directives(case, job) + overrides["batchdirectives"] = self.get_batch_directives(case, job, overrides=overrides) - output_text = transform_vars(open(input_template,"r").read(), case=case, subgroup=job, check_members=self) + output_text = transform_vars(open(input_template,"r").read(), case=case, subgroup=job, overrides=overrides) with open(job, "w") as fd: fd.write(output_text) os.chmod(job, os.stat(job).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) - def set_job_defaults(self, batch_jobs, pesize=None, num_nodes=None, tasks_per_node=None, walltime=None, force_queue=None, allow_walltime_override=False): - if self.batchtype is None: - self.batchtype = self.get_batch_system_type() + def set_job_defaults(self, batch_jobs, case): + walltime = case.get_value("USER_REQUESTED_WALLTIME") if case.get_value("USER_REQUESTED_WALLTIME") else None + force_queue = case.get_value("USER_REQUESTED_QUEUE") if case.get_value("USER_REQUESTED_QUEUE") else None - if self.batchtype == 'none': + if self._batchtype is None: + self._batchtype = self.get_batch_system_type() + + if self._batchtype == 'none': return for job, jsect in batch_jobs: task_count = jsect["task_count"] if "task_count" in jsect else None if task_count is None: - task_count = pesize - node_count = num_nodes + node_count = case.num_nodes else: - expect(tasks_per_node is not None, "Must provide tasks_per_node for custom task_count job '{}'".format(job)) - task_count = task_count - node_count = int(math.ceil(float(task_count)/float(tasks_per_node))) + node_count = int(math.ceil(float(task_count)/float(case.tasks_per_node))) if force_queue: - if not self.queue_meets_spec(force_queue, task_count, node_count, walltime=walltime, job=job): + if not self.queue_meets_spec(force_queue, node_count, walltime=walltime, job=job): logger.warning("WARNING: User-requested queue '{}' does not meet requirements for job '{}'".format(force_queue, job)) queue = force_queue else: - queue = self.select_best_queue(task_count, node_count, walltime=walltime, job=job) + queue = self.select_best_queue(node_count, walltime=walltime, job=job) if queue is None and walltime is not None: # Try to see if walltime was the holdup - queue = self.select_best_queue(task_count, node_count, walltime=None, job=job) + queue = self.select_best_queue(node_count, walltime=None, job=job) if queue is not None: # It was, override the walltime if a test, otherwise just warn the user - new_walltime = self._get_queue_specs(queue)[5] + new_walltime = self.get_queue_specs(queue)[3] expect(new_walltime is not None, "Should never make it here") logger.warning("WARNING: Requested walltime '{}' could not be matched by any queue".format(walltime)) - if allow_walltime_override: + if case.get_value("TEST"): logger.warning(" Using walltime '{}' instead".format(new_walltime)) walltime = new_walltime else: @@ -226,16 +218,16 @@ def set_job_defaults(self, batch_jobs, pesize=None, num_nodes=None, tasks_per_no logger.warning("WARNING: No queue on this system met the requirements for this job. Falling back to defaults") default_queue_node = self.get_default_queue() queue = default_queue_node.text - walltime = self._get_queue_specs(queue)[5] + walltime = self.get_queue_specs(queue)[3] if walltime is None: # Figure out walltime - specs = self._get_queue_specs(queue) + specs = self.get_queue_specs(queue) if specs is None: # Queue is unknown, use specs from default queue walltime = self.get_default_queue().get("walltimemax") else: - walltime = specs[5] + walltime = specs[3] walltime = self._default_walltime if walltime is None else walltime # last-chance fallback @@ -243,7 +235,7 @@ def set_job_defaults(self, batch_jobs, pesize=None, num_nodes=None, tasks_per_no self.set_value("JOB_WALLCLOCK_TIME", walltime, subgroup=job) logger.debug("Job {} queue {} walltime {}".format(job, queue, walltime)) - def get_batch_directives(self, case, job, raw=False): + def get_batch_directives(self, case, job, overrides=None): """ """ result = [] @@ -257,10 +249,11 @@ def get_batch_directives(self, case, job, raw=False): for node in nodes: directive = self.get_resolved_value("" if node.text is None else node.text) default = node.get("default") - if not raw: - directive = transform_vars(directive, case=case, subgroup=job, default=default, check_members=self) - elif default is not None: + if default is None: + directive = transform_vars(directive, case=case, subgroup=job, default=default, overrides=overrides) + else: directive = transform_vars(directive, default=default) + result.append("{} {}".format(directive_prefix, directive)) return "\n".join(result) @@ -279,7 +272,7 @@ def get_submit_args(self, case, job): for arg in submit_arg_nodes: flag = arg.get("flag") name = arg.get("name") - if self.batchtype == "cobalt" and job == "case.st_archive": + if self._batchtype == "cobalt" and job == "case.st_archive": if flag == "-n": name = 'task_count' if flag == "--mode": @@ -351,7 +344,7 @@ def submit_jobs(self, case, no_batch=False, job=None, skip_pnl=False, if prereq: jobs.append((job, self.get_value('dependency', subgroup=job))) - if self.batchtype == "cobalt": + if self._batchtype == "cobalt": break depid = OrderedDict() @@ -363,10 +356,10 @@ def submit_jobs(self, case, no_batch=False, job=None, skip_pnl=False, else: deps = [] jobid = "" - if self.prereq_jobid is not None: - jobid = self.prereq_jobid + if self._prereq_jobid is not None: + jobid = self._prereq_jobid for dep in deps: - if dep in depid.keys() and depid[dep] is not None: + if dep in depid and depid[dep] is not None: jobid += " " + str(depid[dep]) #TODO: doubt these will be used # elif dep == "and": @@ -379,7 +372,7 @@ def submit_jobs(self, case, no_batch=False, job=None, skip_pnl=False, if slen == 0: jobid = None - logger.warn("job is {}".format(job)) + logger.warning("job is {}".format(job)) result = self._submit_single_job(case, job, jobid, no_batch=no_batch, skip_pnl=skip_pnl, @@ -390,7 +383,7 @@ def submit_jobs(self, case, no_batch=False, job=None, skip_pnl=False, batch_job_id = str(alljobs.index(job)) if dry_run else result depid[job] = batch_job_id jobcmds.append( (job, result) ) - if self.batchtype == "cobalt": + if self._batchtype == "cobalt": break if dry_run: @@ -401,7 +394,7 @@ def submit_jobs(self, case, no_batch=False, job=None, skip_pnl=False, def _submit_single_job(self, case, job, depid=None, no_batch=False, skip_pnl=False, mail_user=None, mail_type='never', batch_args=None, dry_run=False): - logger.warn("Submit job {}".format(job)) + logger.warning("Submit job {}".format(job)) batch_system = self.get_value("BATCH_SYSTEM", subgroup=None) if batch_system is None or batch_system == "none" or no_batch: # Import here to avoid circular include @@ -449,7 +442,11 @@ def _submit_single_job(self, case, job, depid=None, no_batch=False, submitcmd += string + " " if job == 'case.run' and skip_pnl: - submitcmd += " --skip-preview-namelist" + batch_env_flag = self.get_value("batch_env", subgroup=None) + if not batch_env_flag: + submitcmd += " --skip-preview-namelist" + else: + submitcmd += " {} ARGS_FOR_SCRIPT='--skip-preview-namelist'".format(batch_env_flag) if dry_run: return submitcmd @@ -473,11 +470,11 @@ def get_batch_system_type(self): for node in nodes: type_ = node.get("type") if type_ is not None: - self.batchtype = type_ - return self.batchtype + self._batchtype = type_ + return self._batchtype def set_batch_system_type(self, batchtype): - self.batchtype = batchtype + self._batchtype = batchtype def get_job_id(self, output): jobid_pattern = self.get_value("jobid_pattern", subgroup=None) @@ -488,22 +485,21 @@ def get_job_id(self, output): jobid = search_match.group(1) return jobid - def queue_meets_spec(self, queue, num_pes, num_nodes, walltime=None, job=None): - specs = self._get_queue_specs(queue) + def queue_meets_spec(self, queue, num_nodes, walltime=None, job=None): + specs = self.get_queue_specs(queue) if specs is None: logger.warning("WARNING: queue '{}' is unknown to this system".format(queue)) return True - jobmin, jobmax, nodemin, nodemax, jobname, walltimemax, strict = specs + nodemin, nodemax, jobname, walltimemax, strict = specs # A job name match automatically meets spec if job is not None and jobname is not None: return jobname == job - for minval, maxval, val in [(jobmin, jobmax, num_pes), (nodemin, nodemax, num_nodes)]: - if (minval is not None and val < int(minval)) or \ - (maxval is not None and val > int(maxval)): - return False + if nodemin is not None and num_nodes < int(nodemin) or \ + nodemax is not None and num_nodes > int(nodemax): + return False if walltime is not None and walltimemax is not None and strict: walltime_s = convert_to_seconds(walltime) @@ -513,7 +509,7 @@ def queue_meets_spec(self, queue, num_pes, num_nodes, walltime=None, job=None): return True - def select_best_queue(self, num_pes, num_nodes, walltime=None, job=None): + def select_best_queue(self, num_nodes, walltime=None, job=None): # Make sure to check default queue first. all_queues = [] all_queues.append( self.get_default_queue()) @@ -521,28 +517,26 @@ def select_best_queue(self, num_pes, num_nodes, walltime=None, job=None): for queue in all_queues: if queue is not None: qname = queue.text - if self.queue_meets_spec(qname, num_pes, num_nodes, walltime=walltime, job=job): + if self.queue_meets_spec(qname, num_nodes, walltime=walltime, job=job): return qname return None - def _get_queue_specs(self, queue): + def get_queue_specs(self, queue): """ Get queue specifications by name. - Returns (jobmin, jobmax, jobname, walltimemax, is_strict) + Returns (nodemin, nodemax, jobname, walltimemax, is_strict) """ for queue_node in self.get_all_queues(): if queue_node.text == queue: - jobmin = queue_node.get("jobmin") - jobmax = queue_node.get("jobmax") nodemin = queue_node.get("nodemin") nodemax = queue_node.get("nodemax") jobname = queue_node.get("jobname") walltimemax = queue_node.get("walltimemax") strict = queue_node.get("strict") == "true" - return jobmin, jobmax, nodemin, nodemax, jobname, walltimemax, strict + return nodemin, nodemax, jobname, walltimemax, strict return None @@ -581,3 +575,17 @@ def get_status(self, jobid): logger.warning("Batch query command '{}' failed with error '{}'".format(cmd, err)) else: return out.strip() + + def cancel_job(self, jobid): + batch_cancel = self.get_optional_node("batch_cancel") + if batch_cancel is None: + logger.warning("Batch cancellation not supported on this platform") + return False + else: + cmd = batch_cancel.text + " " + str(jobid) + + status, out, err = run_cmd(cmd) + if status != 0: + logger.warning("Batch cancel command '{}' failed with error '{}'".format(cmd, out + "\n" + err)) + else: + return True diff --git a/scripts/lib/CIME/XML/env_mach_pes.py b/scripts/lib/CIME/XML/env_mach_pes.py index 2995aeefc7b..52c5828ce48 100644 --- a/scripts/lib/CIME/XML/env_mach_pes.py +++ b/scripts/lib/CIME/XML/env_mach_pes.py @@ -14,27 +14,26 @@ def __init__(self, case_root=None, infile="env_mach_pes.xml", components=None): initialize an object interface to file env_mach_pes.xml in the case directory """ self._components = components - self._component_value_list = ["NTASKS", "NTHRDS", "NINST", - "ROOTPE", "PSTRID", "NINST_LAYOUT"] schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_mach_pes.xsd") EnvBase.__init__(self, case_root, infile, schema=schema) - def get_value(self, vid, attribute=None, resolved=True, subgroup=None, pes_per_node=None): # pylint: disable=arguments-differ + def get_value(self, vid, attribute=None, resolved=True, subgroup=None, MAX_MPITASKS_PER_NODE=None): # pylint: disable=arguments-differ # Special variable NINST_MAX is used to determine the number of # drivers in multi-driver mode. if vid == "NINST_MAX": value = 1 for comp in self._components: - value = max(value, self.get_value("NINST_{}".format(comp))) + if comp != "CPL": + value = max(value, self.get_value("NINST_{}".format(comp))) return value value = EnvBase.get_value(self, vid, attribute, resolved, subgroup) if "NTASKS" in vid or "ROOTPE" in vid: - if pes_per_node is None: - pes_per_node = self.get_value("PES_PER_NODE") + if MAX_MPITASKS_PER_NODE is None: + MAX_MPITASKS_PER_NODE = self.get_value("MAX_MPITASKS_PER_NODE") if value is not None and value < 0: - value = -1*value*pes_per_node + value = -1*value*MAX_MPITASKS_PER_NODE return value @@ -60,7 +59,7 @@ def get_max_thread_count(self, comp_classes): ''' Find the maximum number of openmp threads for any component in the case ''' max_threads = 1 for comp in comp_classes: - threads = self.get_value("NTHRDS",attribute={"component":comp}) + threads = self.get_value("NTHRDS",attribute={"compclass":comp}) expect(threads is not None, "Error no thread count found for component class {}".format(comp)) if threads > max_threads: max_threads = threads @@ -71,7 +70,7 @@ def get_cost_pes(self, totaltasks, max_thread_count, machine=None): figure out the value of COST_PES which is the pe value used to estimate model cost """ expect(totaltasks > 0,"totaltasks > 0 expected totaltasks = {}".format(totaltasks)) - pespn = self.get_value("PES_PER_NODE") + pespn = self.get_value("MAX_MPITASKS_PER_NODE") num_nodes, spare_nodes = self.get_total_nodes(totaltasks, max_thread_count) num_nodes += spare_nodes # This is hardcoded because on yellowstone by default we @@ -85,10 +84,12 @@ def get_total_tasks(self, comp_classes): total_tasks = 0 maxinst = 1 for comp in comp_classes: - ntasks = self.get_value("NTASKS", attribute={"component":comp}) - rootpe = self.get_value("ROOTPE", attribute={"component":comp}) - pstrid = self.get_value("PSTRID", attribute={"component":comp}) - maxinst = max(maxinst, self.get_value("NINST", attribute={"component":comp})) + ntasks = self.get_value("NTASKS", attribute={"compclass":comp}) + rootpe = self.get_value("ROOTPE", attribute={"compclass":comp}) + pstrid = self.get_value("PSTRID", attribute={"compclass":comp}) + if comp != "CPL": + ninst = self.get_value("NINST", attribute={"compclass":comp}) + maxinst = max(maxinst, ninst) tt = rootpe + (ntasks - 1) * pstrid + 1 total_tasks = max(tt, total_tasks) if self.get_value("MULTI_DRIVER"): @@ -96,9 +97,9 @@ def get_total_tasks(self, comp_classes): return total_tasks def get_tasks_per_node(self, total_tasks, max_thread_count): - expect(total_tasks > 0,"totaltasks > 0 expected totaltasks = {}".format(total_tasks)) + expect(total_tasks > 0,"totaltasks > 0 expected, totaltasks = {}".format(total_tasks)) tasks_per_node = min(self.get_value("MAX_TASKS_PER_NODE")/ max_thread_count, - self.get_value("PES_PER_NODE"), total_tasks) + self.get_value("MAX_MPITASKS_PER_NODE"), total_tasks) return tasks_per_node if tasks_per_node > 0 else 1 def get_total_nodes(self, total_tasks, max_thread_count): diff --git a/scripts/lib/CIME/XML/env_mach_specific.py b/scripts/lib/CIME/XML/env_mach_specific.py index 50c830ce7d6..c30e66be08d 100644 --- a/scripts/lib/CIME/XML/env_mach_specific.py +++ b/scripts/lib/CIME/XML/env_mach_specific.py @@ -261,9 +261,9 @@ def _load_modules_generic(self, modules_to_load): cmd = "source {}".format(sh_init_cmd) - if os.environ.has_key("SOFTENV_ALIASES"): + if "SOFTENV_ALIASES" in os.environ: cmd += " && source $SOFTENV_ALIASES" - if os.environ.has_key("SOFTENV_LOAD"): + if "SOFTENV_LOAD" in os.environ: cmd += " && source $SOFTENV_LOAD" for action,argument in modules_to_load: @@ -343,7 +343,7 @@ def get_module_system_cmd_path(self, lang): cmd_nodes = self.get_optional_node("cmd_path", attributes={"lang":lang}) return cmd_nodes.text if cmd_nodes is not None else None - def get_mpirun(self, case, attribs, check_members=None, job="case.run", exe_only=False): + def get_mpirun(self, case, attribs, job="case.run", exe_only=False): """ Find best match, return (executable, {arg_name : text}) """ @@ -359,7 +359,7 @@ def get_mpirun(self, case, attribs, check_members=None, job="case.run", exe_only matches = 0 is_default = False - for key, value in attribs.iteritems(): + for key, value in attribs.items(): expect(key in self._allowed_mpi_attributes, "Unexpected key {} in mpirun attributes".format(key)) if key in xml_attribs: if xml_attribs[key].lower() == "false": @@ -405,7 +405,6 @@ def get_mpirun(self, case, attribs, check_members=None, job="case.run", exe_only arg_value = transform_vars(arg_node.text, case=case, subgroup=job, - check_members=check_members, default=arg_node.get("default")) args.append(arg_value) diff --git a/scripts/lib/CIME/XML/env_run.py b/scripts/lib/CIME/XML/env_run.py index 5bc59cb38e2..444af78a210 100644 --- a/scripts/lib/CIME/XML/env_run.py +++ b/scripts/lib/CIME/XML/env_run.py @@ -14,8 +14,6 @@ def __init__(self, case_root=None, infile="env_run.xml", components=None): initialize an object interface to file env_run.xml in the case directory """ self._components = components - self._component_value_list = ["PIO_TYPENAME", "PIO_STRIDE", "PIO_REARRANGER", - "PIO_NUMTASKS", "PIO_ROOT", "PIO_NETCDF_FORMAT"] schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_entry_id.xsd") EnvBase.__init__(self, case_root, infile, schema=schema) diff --git a/scripts/lib/CIME/XML/generic_xml.py b/scripts/lib/CIME/XML/generic_xml.py index 1321e587368..770d4f006a6 100644 --- a/scripts/lib/CIME/XML/generic_xml.py +++ b/scripts/lib/CIME/XML/generic_xml.py @@ -5,6 +5,7 @@ from CIME.XML.standard_module_setup import * from distutils.spawn import find_executable import getpass +import six logger = logging.getLogger(__name__) @@ -122,9 +123,9 @@ def get_nodes(self, nodename, attributes=None, root=None, xpath=None): # one attribute in an xpath query so we query seperately for each attribute # and create a result with the intersection of those lists - for key, value in attributes.iteritems(): + for key, value in attributes.items(): if value is not None: - expect(isinstance(value, basestring), + expect(isinstance(value, six.string_types), " Bad value passed for key {}".format(key)) xpath = ".//{}[@{}=\'{}\']".format(nodename, key, value) logger.debug("xpath is {}".format(xpath)) @@ -195,8 +196,8 @@ def get_resolved_value(self, raw_value): '4' >>> obj.get_resolved_value("0001-01-01") '0001-01-01' - >>> obj.get_resolved_value("$SHELL{echo hi}") - 'hi' + >>> obj.get_resolved_value("$SHELL{echo hi}") == 'hi' + True """ logger.debug("raw_value {}".format(raw_value)) reference_re = re.compile(r'\${?(\w+)}?') @@ -266,7 +267,7 @@ def validate_xml_file(self, filename, schema): logger.debug("Checking file {} against schema {}".format(filename, schema)) run_cmd_no_fail("{} --noout --schema {} {}".format(xmllint, schema, filename)) else: - logger.warn("xmllint not found, could not validate file {}".format(filename)) + logger.warning("xmllint not found, could not validate file {}".format(filename)) def get_element_text(self, element_name, attributes=None, root=None, xpath=None): element_node = self.get_optional_node(element_name, attributes, root, xpath) diff --git a/scripts/lib/CIME/XML/grids.py b/scripts/lib/CIME/XML/grids.py index 634a4be28eb..6d0a208ac3a 100644 --- a/scripts/lib/CIME/XML/grids.py +++ b/scripts/lib/CIME/XML/grids.py @@ -601,7 +601,7 @@ def _get_all_values_v1(self): domain_list.append({'domain':child.tag, 'text':child.text}) grid_info.update({'domains': domain_list}) - + # add mapping files grids = [ ("atm_grid", component_grids[0]), ("lnd_grid", component_grids[1]), ("ocn_grid", component_grids[2]), \ ("rof_grid", component_grids[3]), ("glc_grid", component_grids[5]), ("wav_grid", component_grids[6]) ] @@ -682,16 +682,16 @@ def _get_all_values_v2(self): gridnames = [] for grid_node in grid_nodes: gridnames.append(grid_node.text) - grids += grid_node.get("name") + ":" + grid_node.text + " " + grids += grid_node.get("name") + ":" + grid_node.text + " " grids = " non-default grids are: %s" %grids mask = "" mask_nodes = self.get_nodes("mask", root=model_grid_node) for mask_node in mask_nodes: mask += "\n mask is: %s" %(mask_node.text) - - grids_dict[alias] = {'aliases':aliases, - 'grids':grids, + + grids_dict[alias] = {'aliases':aliases, + 'grids':grids, 'mask':mask } gridnames = set(gridnames) @@ -712,4 +712,3 @@ def return_all_values(self): (default_comp_grids, all_grids) = self._get_all_values_v2() return help_text, default_comp_grids, all_grids - diff --git a/scripts/lib/CIME/XML/machines.py b/scripts/lib/CIME/XML/machines.py index e28a2e84d9f..4108c84e0fa 100644 --- a/scripts/lib/CIME/XML/machines.py +++ b/scripts/lib/CIME/XML/machines.py @@ -298,15 +298,15 @@ def print_values(self): os_ = machine.find("OS") compilers = machine.find("COMPILERS") max_tasks_per_node = machine.find("MAX_TASKS_PER_NODE") - pes_per_node = machine.find("PES_PER_NODE") + MAX_MPITASKS_PER_NODE = machine.find("MAX_MPITASKS_PER_NODE") print( " {} : {} ".format(name , desc.text)) print( " os ", os_.text) print( " compilers ",compilers.text) - if pes_per_node is not None: - print( " pes/node ",pes_per_node.text) + if MAX_MPITASKS_PER_NODE is not None: + print(" pes/node ",MAX_MPITASKS_PER_NODE.text) if max_tasks_per_node is not None: - print( " max_tasks/node ",max_tasks_per_node.text) + print(" max_tasks/node ",max_tasks_per_node.text) def return_all_values(self): # return a dictionary of machines @@ -318,20 +318,19 @@ def return_all_values(self): os_ = machine.find("OS") compilers = machine.find("COMPILERS") max_tasks_per_node = machine.find("MAX_TASKS_PER_NODE") - pes_per_node = machine.find("PES_PER_NODE") + MAX_MPITASKS_PER_NODE = machine.find("MAX_MPITASKS_PER_NODE") ppn = '' - if pes_per_node is not None: - ppn = pes_per_node.text + if MAX_MPITASKS_PER_NODE is not None: + ppn = MAX_MPITASKS_PER_NODE.text max_tasks_pn = '' if max_tasks_per_node is not None: max_tasks_pn = max_tasks_per_node.text - mach_dict[name] = { 'description' : desc.text, + mach_dict[name] = { 'description' : desc.text, 'os' : os_.text, 'compilers' : compilers.text, 'pes/node' : ppn, 'max_tasks/node' : max_tasks_pn } return mach_dict - diff --git a/scripts/lib/CIME/XML/namelist_definition.py b/scripts/lib/CIME/XML/namelist_definition.py index 6a8a631015c..a8054d6b6e5 100644 --- a/scripts/lib/CIME/XML/namelist_definition.py +++ b/scripts/lib/CIME/XML/namelist_definition.py @@ -270,7 +270,7 @@ def is_valid_value(self, name, value): if not is_valid_fortran_namelist_literal(type_, scalar): invalid.append(scalar) if len(invalid) > 0: - logger.warn("Invalid values {}".format(invalid)) + logger.warning("Invalid values {}".format(invalid)) return False # Now that we know that the strings as input are valid Fortran, do some @@ -296,7 +296,7 @@ def is_valid_value(self, name, value): if scalar not in compare_list: invalid.append(scalar) if len(invalid) > 0: - logger.warn("Invalid values {}".format(invalid)) + logger.warning("Invalid values {}".format(invalid)) return False # Check size of input array. diff --git a/scripts/lib/CIME/XML/pes.py b/scripts/lib/CIME/XML/pes.py index 3eefe78a23e..8502f25836e 100644 --- a/scripts/lib/CIME/XML/pes.py +++ b/scripts/lib/CIME/XML/pes.py @@ -114,11 +114,11 @@ def _find_matches(self, grid_nodes, grid, compset, machine, pesize_opts, overrid compset_choice = compset_match pesize_choice = pesize_match elif points == max_points: - logger.warn("mach_choice {} mach_match {}".format(mach_choice, mach_match)) - logger.warn("grid_choice {} grid_match {}".format(grid_choice, grid_match)) - logger.warn("compset_choice {} compset_match {}".format(compset_choice, compset_match)) - logger.warn("pesize_choice {} pesize_match {}".format(pesize_choice, pesize_match)) - logger.warn("points = {:d}".format(points)) + logger.warning("mach_choice {} mach_match {}".format(mach_choice, mach_match)) + logger.warning("grid_choice {} grid_match {}".format(grid_choice, grid_match)) + logger.warning("compset_choice {} compset_match {}".format(compset_choice, compset_match)) + logger.warning("pesize_choice {} pesize_match {}".format(pesize_choice, pesize_match)) + logger.warning("points = {:d}".format(points)) expect(False, "More than one PE layout matches given PE specs") if not override: for node in pe_select: diff --git a/scripts/lib/CIME/XML/test_reporter.py b/scripts/lib/CIME/XML/test_reporter.py index 1dd841bc6a2..01fda6313c3 100644 --- a/scripts/lib/CIME/XML/test_reporter.py +++ b/scripts/lib/CIME/XML/test_reporter.py @@ -2,15 +2,13 @@ Interface to the testreporter xml. This class inherits from GenericXML.py """ - +#pylint: disable=import-error +from six.moves import urllib +import six from CIME.XML.standard_module_setup import * from CIME.XML.generic_xml import GenericXML from CIME.utils import expect,get_model -import urllib - - - class TestReporter(GenericXML): def __init__(self): @@ -97,17 +95,16 @@ def push2testdb(self): # Post test result XML to CESM test database # xmlstr = ET.tostring(self.root,method="xml",encoding="UTF-8") - username=raw_input("Username:") + username=six.moves.input("Username:") os.system("stty -echo") - password=raw_input("Password:") + password=six.moves.input("Password:") os.system("stty echo") params={'username':username,'password':password,'testXML':xmlstr} url="https://csegweb.cgd.ucar.edu/testdb/cgi-bin/processXMLtest.cgi" - params = urllib.urlencode(params) - f = urllib.urlopen(url, params) + params = urllib.parse.urlencode(params) + f = urllib.request.urlopen(url, params) # # Print any messages from the post command # print(f.read()) print(f.code) - diff --git a/scripts/lib/CIME/XML/testlist.py b/scripts/lib/CIME/XML/testlist.py index c1fd4c8d48f..38a49650fa2 100644 --- a/scripts/lib/CIME/XML/testlist.py +++ b/scripts/lib/CIME/XML/testlist.py @@ -157,4 +157,3 @@ def get_tests(self, machine=None, category=None, compiler=None, compset=None, gr else: logger.critical("Did not recognize testlist file version {} for file {}" .format(self.get_version(), self.filename)) - diff --git a/scripts/lib/CIME/XML/tests.py b/scripts/lib/CIME/XML/tests.py index 9411e01e609..944f6934687 100644 --- a/scripts/lib/CIME/XML/tests.py +++ b/scripts/lib/CIME/XML/tests.py @@ -32,3 +32,22 @@ def get_test_node(self, testname): node = self.get_node("test",{"NAME":testname}) logger.debug("Found {}".format(node.text)) return node + + def print_values(self, skip_infrastructure_tests=True): + """ + Print each test type and its description. + + If skip_infrastructure_tests is True, then this does not write + information for tests with the attribute + INFRASTRUCTURE_TEST="TRUE". + """ + all_tests = self.get_nodes(nodename="test") + for one_test in all_tests: + if skip_infrastructure_tests: + infrastructure_test = one_test.get("INFRASTRUCTURE_TEST") + if (infrastructure_test is not None and + infrastructure_test.upper() == "TRUE"): + continue + name = one_test.get("NAME") + desc = one_test.find("DESC").text + print("{}: {}".format(name, desc)) diff --git a/scripts/lib/CIME/aprun.py b/scripts/lib/CIME/aprun.py index 272344325ed..97f572cd205 100755 --- a/scripts/lib/CIME/aprun.py +++ b/scripts/lib/CIME/aprun.py @@ -11,7 +11,7 @@ ############################################################################### def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, - max_tasks_per_node, pes_per_node, + max_tasks_per_node, MAX_MPITASKS_PER_NODE, pio_numtasks, pio_async_interface, compiler, machine, run_exe): ############################################################################### @@ -23,23 +23,23 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, >>> rootpes = [0, 0, 512, 0, 680, 512, 512, 0, 0] >>> pstrids = [1, 1, 1, 1, 1, 1, 1, 1, 1] >>> max_tasks_per_node = 16 - >>> pes_per_node = 16 + >>> MAX_MPITASKS_PER_NODE = 16 >>> pio_numtasks = -1 >>> pio_async_interface = False >>> compiler = "pgi" >>> machine = "titan" >>> run_exe = "acme.exe" - >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, pes_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe) + >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, MAX_MPITASKS_PER_NODE, pio_numtasks, pio_async_interface, compiler, machine, run_exe) (' -S 4 -n 680 -N 8 -d 2 acme.exe : -S 2 -n 128 -N 4 -d 4 acme.exe ', 117) >>> compiler = "intel" - >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, pes_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe) + >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, MAX_MPITASKS_PER_NODE, pio_numtasks, pio_async_interface, compiler, machine, run_exe) (' -S 4 -cc numa_node -n 680 -N 8 -d 2 acme.exe : -S 2 -cc numa_node -n 128 -N 4 -d 4 acme.exe ', 117) >>> ntasks = [64, 64, 64, 64, 64, 64, 64, 64, 1] >>> nthreads = [1, 1, 1, 1, 1, 1, 1, 1, 1] >>> rootpes = [0, 0, 0, 0, 0, 0, 0, 0, 0] >>> pstrids = [1, 1, 1, 1, 1, 1, 1, 1, 1] - >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, pes_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe) + >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, MAX_MPITASKS_PER_NODE, pio_numtasks, pio_async_interface, compiler, machine, run_exe) (' -S 8 -cc numa_node -n 64 -N 16 -d 1 acme.exe ', 4) """ max_tasks_per_node = 1 if max_tasks_per_node < 1 else max_tasks_per_node @@ -51,7 +51,7 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, # Check if we need to add pio's tasks to the total task count if pio_async_interface: - total_tasks += pio_numtasks if pio_numtasks > 0 else pes_per_node + total_tasks += pio_numtasks if pio_numtasks > 0 else MAX_MPITASKS_PER_NODE # Compute max threads for each mpi task maxt = [0] * total_tasks @@ -65,16 +65,16 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, c2 += 1 # make sure all maxt values at least 1 - for c1 in xrange(0, total_tasks): + for c1 in range(0, total_tasks): if maxt[c1] < 1: maxt[c1] = 1 # Compute task and thread settings for batch commands tasks_per_node, task_count, thread_count, max_thread_count, total_node_count, aprun_args = \ 0, 1, maxt[0], maxt[0], 0, "" - for c1 in xrange(1, total_tasks): + for c1 in range(1, total_tasks): if maxt[c1] != thread_count: - tasks_per_node = min(pes_per_node, max_tasks_per_node / thread_count) + tasks_per_node = min(MAX_MPITASKS_PER_NODE, int(max_tasks_per_node / thread_count)) tasks_per_node = min(task_count, tasks_per_node) @@ -98,8 +98,8 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, else: task_count += 1 - if pes_per_node > 0: - tasks_per_node = min(pes_per_node, max_tasks_per_node / thread_count) + if MAX_MPITASKS_PER_NODE > 0: + tasks_per_node = min(MAX_MPITASKS_PER_NODE, int(max_tasks_per_node / thread_count)) else: tasks_per_node = max_tasks_per_node / thread_count @@ -135,7 +135,7 @@ def get_aprun_cmd_for_case(case, run_exe): return _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, case.get_value("MAX_TASKS_PER_NODE"), - case.get_value("PES_PER_NODE"), + case.get_value("MAX_MPITASKS_PER_NODE"), case.get_value("PIO_NUMTASKS"), case.get_value("PIO_ASYNC_INTERFACE"), case.get_value("COMPILER"), diff --git a/scripts/lib/CIME/bless_test_results.py b/scripts/lib/CIME/bless_test_results.py index ac8ef41db8d..24623cea371 100644 --- a/scripts/lib/CIME/bless_test_results.py +++ b/scripts/lib/CIME/bless_test_results.py @@ -5,7 +5,7 @@ from CIME.hist_utils import generate_baseline, compare_baseline from CIME.case import Case -import os, glob, time +import os, glob, time, six ############################################################################### def bless_namelists(test_name, report_only, force, baseline_name, baseline_root): @@ -17,7 +17,7 @@ def bless_namelists(test_name, report_only, force, baseline_name, baseline_root) # Update namelist files print("Test '{}' had namelist diff".format(test_name)) if (not report_only and - (force or raw_input("Update namelists (y/n)? ").upper() in ["Y", "YES"])): + (force or six.moves.input("Update namelists (y/n)? ").upper() in ["Y", "YES"])): create_test_gen_args = " -g {} ".format(baseline_name if get_model() == "cesm" else " -g -b {} ".format(baseline_name)) stat, _, err = run_cmd("{}/create_test {} -n {} --baseline-root {} -o".format(get_scripts_root(), test_name, create_test_gen_args, baseline_root)) if stat != 0: @@ -43,7 +43,7 @@ def bless_history(test_name, testcase_dir_for_test, baseline_name, baseline_root else: print(comments) if (not report_only and - (force or raw_input("Update this diff (y/n)? ").upper() in ["Y", "YES"])): + (force or six.moves.input("Update this diff (y/n)? ").upper() in ["Y", "YES"])): result, comments = generate_baseline(case, baseline_dir=baseline_full_dir) if not result: logging.warning("Hist file bless FAILED for test {}".format(test_name)) diff --git a/scripts/lib/CIME/build.py b/scripts/lib/CIME/build.py index f0eb00eb39a..cf670697486 100644 --- a/scripts/lib/CIME/build.py +++ b/scripts/lib/CIME/build.py @@ -252,7 +252,7 @@ def _build_libraries(case, exeroot, sharedpath, caseroot, cimeroot, libroot, lid bldlog = open(file_build, "r") for line in bldlog: if re.search("Current setting for", line): - logger.warn(line) + logger.warning(line) # clm not a shared lib for ACME if get_model() != "acme" and (buildlist is None or "lnd" in buildlist): @@ -290,7 +290,7 @@ def _build_model_thread(config_dir, compclass, compname, caseroot, libroot, bldr t1 = time.time() cmd = os.path.join(caseroot, "SourceMods", "src." + compname, "buildlib") if os.path.isfile(cmd): - logger.warn("WARNING: using local buildlib script for {}".format(compname)) + logger.warning("WARNING: using local buildlib script for {}".format(compname)) else: cmd = os.path.join(config_dir, "buildlib") expect(os.path.isfile(cmd), "Could not find buildlib for {}".format(compname)) @@ -324,7 +324,7 @@ def _clean_impl(case, cleanlist, clean_all): sharedlibroot = os.path.abspath(case.get_value("SHAREDLIBROOT")) expect(sharedlibroot is not None,"No SHAREDLIBROOT defined in case") if sharedlibroot != exeroot and os.path.isdir(sharedlibroot): - logging.warn("cleaning directory {}".format(sharedlibroot)) + logging.warning("cleaning directory {}".format(sharedlibroot)) shutil.rmtree(sharedlibroot) else: expect(cleanlist is not None and len(cleanlist) > 0,"Empty cleanlist not expected") @@ -387,7 +387,7 @@ def _case_build_impl(caseroot, case, sharedlib_only, model_only, buildlist): # Retrieve relevant case data # This environment variable gets set for cesm Make and # needs to be unset before building again. - if "MODEL" in os.environ.keys(): + if "MODEL" in os.environ: del os.environ["MODEL"] build_threaded = case.get_build_threaded() casetools = case.get_value("CASETOOLS") diff --git a/scripts/lib/CIME/buildlib.py b/scripts/lib/CIME/buildlib.py index 56218106dbe..551f6325d75 100644 --- a/scripts/lib/CIME/buildlib.py +++ b/scripts/lib/CIME/buildlib.py @@ -3,9 +3,9 @@ """ from CIME.XML.standard_module_setup import * +from CIME.case import Case from CIME.utils import parse_args_and_handle_standard_logging_options, setup_standard_logging_options import sys, os, argparse, doctest - logger = logging.getLogger(__name__) ############################################################################### @@ -31,6 +31,14 @@ def parse_input(argv): args = parse_args_and_handle_standard_logging_options(argv, parser) + # Some compilers have trouble with long include paths, setting + # EXEROOT to the relative path from bldroot solves the problem + # doing it in the environment means we don't need to change all of + # the component buildlib scripts + with Case(args.caseroot) as case: + os.environ["EXEROOT"] = os.path.relpath(case.get_value("EXEROOT"), args.bldroot) + + return args.caseroot, args.libroot, args.bldroot def build_cime_component_lib(case, compname, libroot, bldroot): @@ -77,7 +85,4 @@ def run_gmake(case, compclass, libroot, bldroot, libname="", user_cppdefs=""): if user_cppdefs: cmd = cmd + "USER_CPPDEFS='{}'".format(user_cppdefs ) - rc, out, err = run_cmd(cmd) - expect(rc == 0, "Command {} failed rc={:d}\nout={}\nerr={}".format(cmd, rc, out, err)) - - print "Command {} completed with output {}\nerr {}".format(cmd, out, err) + run_cmd_no_fail(cmd, combine_output=True) diff --git a/scripts/lib/CIME/buildnml.py b/scripts/lib/CIME/buildnml.py index a600f681432..85c4f5b96ce 100644 --- a/scripts/lib/CIME/buildnml.py +++ b/scripts/lib/CIME/buildnml.py @@ -98,7 +98,7 @@ def create_namelist_infile(case, user_nl_file, namelist_infile, infile_text=""): with open(user_nl_file, "r") as file_usernl: lines_input = file_usernl.readlines() else: - logger.warn("WARNING: No file {} found in case directory".format(user_nl_file)) + logger.warning("WARNING: No file {} found in case directory".format(user_nl_file)) lines_output = [] lines_output.append("&comp_inparm \n") diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py index 59428baa3e4..b8f87f97272 100644 --- a/scripts/lib/CIME/case.py +++ b/scripts/lib/CIME/case.py @@ -7,7 +7,8 @@ from copy import deepcopy import glob, os, shutil, math from CIME.XML.standard_module_setup import * - +#pylint: disable=import-error,redefined-builtin +from six.moves import input from CIME.utils import expect, get_cime_root, append_status from CIME.utils import convert_to_type, get_model from CIME.utils import get_project, get_charge_account, check_name @@ -113,14 +114,12 @@ def __init__(self, case_root=None, read_only=True): self.initialize_derived_attributes() def check_if_comp_var(self, vid): - vid = vid - comp = None - iscompvar = False for env_file in self._env_entryid_files: - vid, comp, iscompvar = env_file.check_if_comp_var(vid) + new_vid, new_comp, iscompvar = env_file.check_if_comp_var(vid) if iscompvar: - return vid, comp, iscompvar - return vid, comp, iscompvar + return new_vid, new_comp, iscompvar + + return vid, None, False def initialize_derived_attributes(self): """ @@ -130,18 +129,17 @@ def initialize_derived_attributes(self): env_mach_pes = self.get_env("mach_pes") env_mach_spec = self.get_env('mach_specific') comp_classes = self.get_values("COMP_CLASSES") - pes_per_node = self.get_value("PES_PER_NODE") - + MAX_MPITASKS_PER_NODE = self.get_value("MAX_MPITASKS_PER_NODE") self.total_tasks = env_mach_pes.get_total_tasks(comp_classes) self.thread_count = env_mach_pes.get_max_thread_count(comp_classes) self.tasks_per_node = env_mach_pes.get_tasks_per_node(self.total_tasks, self.thread_count) logger.debug("total_tasks {} thread_count {}".format(self.total_tasks, self.thread_count)) self.tasks_per_numa = int(math.ceil(self.tasks_per_node / 2.0)) - smt_factor = max(1,int(self.get_value("MAX_TASKS_PER_NODE") / pes_per_node)) + smt_factor = max(1,int(self.get_value("MAX_TASKS_PER_NODE") / MAX_MPITASKS_PER_NODE)) threads_per_node = self.tasks_per_node * self.thread_count - threads_per_core = 1 if (threads_per_node <= pes_per_node) else smt_factor + threads_per_core = 1 if (threads_per_node <= MAX_MPITASKS_PER_NODE) else smt_factor self.cores_per_task = self.thread_count / threads_per_core mpi_attribs = { @@ -404,15 +402,15 @@ def set_valid_values(self, item, valid_values): return result def set_lookup_value(self, item, value): - if item in self.lookups.keys() and self.lookups[item] is not None: - logger.warn("Item {} already in lookups with value {}".format(item,self.lookups[item])) + if item in self.lookups and self.lookups[item] is not None: + logger.warning("Item {} already in lookups with value {}".format(item,self.lookups[item])) else: logger.debug("Setting in lookups: item {}, value {}".format(item,value)) self.lookups[item] = value def clean_up_lookups(self, allow_undefined=False): # put anything in the lookups table into existing env objects - for key,value in self.lookups.items(): + for key,value in list(self.lookups.items()): logger.debug("lookup key {} value {}".format(key, value)) result = self.set_value(key,value, allow_undefined=allow_undefined) if result is not None: @@ -464,6 +462,11 @@ def _set_compset(self, compset_name, files): return None, science_support + def get_primary_component(self): + if self._primary_component is None: + self._primary_component = self._find_primary_component() + return self._primary_component + def _find_primary_component(self): """ try to glean the primary component based on compset name @@ -524,7 +527,7 @@ def _set_info_from_primary_component(self, files, pesfile=None): Assumes that self._primary_component has already been set. """ - component = self._primary_component + component = self.get_primary_component() if pesfile is None: self._pesfile = files.get_value("PES_SPEC_FILE", {"component":component}) @@ -616,7 +619,7 @@ def _get_component_config_data(self, files): if len(self._component_classes) > len(self._components): self._components.append('sesp') - for i in xrange(1,len(self._component_classes)): + for i in range(1,len(self._component_classes)): comp_class = self._component_classes[i] comp_name = self._components[i-1] node_name = 'CONFIG_' + comp_class + '_FILE' @@ -635,11 +638,8 @@ def _get_component_config_data(self, files): for env_file in self._env_entryid_files: env_file.add_elements_by_group(compobj, attributes=attlist) - - self.clean_up_lookups() - def _setup_mach_pes(self, pecount, multi_driver, ninst, machine_name, mpilib): #-------------------------------------------- # pe layout @@ -719,10 +719,10 @@ def _setup_mach_pes(self, pecount, multi_driver, ninst, machine_name, mpilib): mach_pes_obj.set_value(rootpe_str, rootpe) pesize = 1 - pes_per_node = self.get_value("PES_PER_NODE") + MAX_MPITASKS_PER_NODE = self.get_value("MAX_MPITASKS_PER_NODE") for val in totaltasks: if val < 0: - val = -1*val*pes_per_node + val = -1*val*MAX_MPITASKS_PER_NODE if val > pesize: pesize = val if multi_driver: @@ -738,15 +738,14 @@ def _setup_mach_pes(self, pecount, multi_driver, ninst, machine_name, mpilib): mach_pes_obj.set_value(key, ninst) key = "NTASKS_{}".format(compclass) - if key not in pes_ntasks.keys(): + if key not in pes_ntasks: mach_pes_obj.set_value(key,1) key = "NTHRDS_{}".format(compclass) - if compclass not in pes_nthrds.keys(): + if compclass not in pes_nthrds: mach_pes_obj.set_value(compclass,1) return pesize - def configure(self, compset_name, grid_name, machine_name=None, project=None, pecount=None, compiler=None, mpilib=None, pesfile=None,user_grid=False, gridfile=None, @@ -785,7 +784,7 @@ def configure(self, compset_name, grid_name, machine_name=None, # This needs to be called after self.set_comp_classes, which is called # from self._get_component_config_data - self._primary_component = self._find_primary_component() + self._primary_component = self.get_primary_component() self._set_info_from_primary_component(files, pesfile=pesfile) @@ -841,7 +840,7 @@ def configure(self, compset_name, grid_name, machine_name=None, env_mach_specific_obj.populate(machobj) self.schedule_rewrite(env_mach_specific_obj) - pesize = self._setup_mach_pes(pecount, multi_driver, ninst, machine_name, mpilib) + self._setup_mach_pes(pecount, multi_driver, ninst, machine_name, mpilib) if multi_driver and ninst>1: logger.info(" Driver/Coupler has %s instances" % ninst) @@ -898,7 +897,7 @@ def configure(self, compset_name, grid_name, machine_name=None, if os.path.exists(wdir): expect(not test, "Directory {} already exists, aborting test".format(wdir)) if answer is None: - response = raw_input("\nDirectory {} already exists, (r)eplace, (a)bort, or (u)se existing?".format(wdir)) + response = input("\nDirectory {} already exists, (r)eplace, (a)bort, or (u)se existing?".format(wdir)) else: response = answer @@ -926,6 +925,11 @@ def configure(self, compset_name, grid_name, machine_name=None, #-------------------------------------------- # batch system (must come after initialize_derived_attributes) #-------------------------------------------- + if walltime: + self.set_value("USER_REQUESTED_WALLTIME", walltime) + if queue: + self.set_value("USER_REQUESTED_QUEUE", queue) + env_batch = self.get_env("batch") batch_system_type = machobj.get_value("BATCH_SYSTEM") @@ -934,7 +938,7 @@ def configure(self, compset_name, grid_name, machine_name=None, env_batch.set_batch_system(batch, batch_system_type=batch_system_type) env_batch.create_job_groups(bjobs) - env_batch.set_job_defaults(bjobs, pesize=pesize, num_nodes=self.num_nodes, tasks_per_node=self.tasks_per_node, walltime=walltime, force_queue=queue, allow_walltime_override=test) + env_batch.set_job_defaults(bjobs, self) self.schedule_rewrite(env_batch) # Make sure that parallel IO is not specified if total_tasks==1 @@ -991,7 +995,6 @@ def _create_caseroot_tools(self): os.path.join(toolsdir, "preview_run"), os.path.join(toolsdir, "check_input_data"), os.path.join(toolsdir, "check_case"), - os.path.join(toolsdir, "archive_metadata.sh"), os.path.join(toolsdir, "xmlchange"), os.path.join(toolsdir, "xmlquery"), os.path.join(toolsdir, "pelayout")) @@ -1162,20 +1165,41 @@ def submit_jobs(self, no_batch=False, job=None, skip_pnl=False, mail_type=mail_type, batch_args=batch_args, dry_run=dry_run) - def report_job_status(self): + def get_job_info(self): + """ + Get information on batch jobs associated with this case + """ xml_job_ids = self.get_value("JOB_IDS") if not xml_job_ids: - logger.info("No job ids associated with this case. Either case.submit was not run or was run with no-batch") + return {} else: + result = {} job_infos = xml_job_ids.split(", ") # pylint: disable=no-member for job_info in job_infos: jobname, jobid = job_info.split(":") + result[jobname] = jobid + + return result + + def report_job_status(self): + jobmap = self.get_job_info() + if not jobmap: + logger.info("No job ids associated with this case. Either case.submit was not run or was run with no-batch") + else: + for jobname, jobid in jobmap.items(): status = self.get_env("batch").get_status(jobid) if status: logger.info("{}: {}".format(jobname, status)) else: logger.info("{}: Unable to get status. Job may be complete already.".format(jobname)) + def cancel_batch_jobs(self, jobids): + env_batch = self.get_env('batch') + for jobid in jobids: + success = env_batch.cancel_job(jobid) + if not success: + logger.warning("Failed to kill {}".format(jobid)) + def get_mpirun_cmd(self, job="case.run"): env_mach_specific = self.get_env('mach_specific') run_exe = env_mach_specific.get_value("run_exe") @@ -1229,7 +1253,7 @@ def set_model_version(self, model): if version != "unknown": logger.info("{} model version found: {}".format(model, version)) else: - logger.warn("WARNING: No {} Model version found.".format(model)) + logger.warning("WARNING: No {} Model version found.".format(model)) def load_env(self, reset=False): if not self._is_env_loaded or reset: @@ -1269,9 +1293,9 @@ def _check_testlists(self, compset_alias, grid_name, files): if test["category"] == "prealpha" or test["category"] == "prebeta" or "aux_" in test["category"]: testcnt += 1 if testcnt > 0: - logger.warn("\n*********************************************************************************************************************************") - logger.warn("This compset and grid combination is not scientifically supported, however it is used in {:d} tests.".format(testcnt)) - logger.warn("*********************************************************************************************************************************\n") + logger.warning("\n*********************************************************************************************************************************") + logger.warning("This compset and grid combination is not scientifically supported, however it is used in {:d} tests.".format(testcnt)) + logger.warning("*********************************************************************************************************************************\n") else: expect(False, "\nThis compset and grid combination is untested in CESM. " "Override this warning with the --run-unsupported option to create_newcase.", @@ -1291,7 +1315,7 @@ def set_file(self, xmlfile): self.flush(flushall=True) - logger.warn("setting case file to {}".format(xmlfile)) + logger.warning("setting case file to {}".format(xmlfile)) components = self.get_value("COMP_CLASSES") new_env_file = None for env_file in self._env_entryid_files: @@ -1399,15 +1423,18 @@ def create(self, casename, srcroot, compset_name, grid_name, except: if os.path.exists(self._caseroot): if not logger.isEnabledFor(logging.DEBUG) and not test: - logger.warn("Failed to setup case, removing {}\nUse --debug to force me to keep caseroot".format(self._caseroot)) + logger.warning("Failed to setup case, removing {}\nUse --debug to force me to keep caseroot".format(self._caseroot)) shutil.rmtree(self._caseroot) else: - logger.warn("Leaving broken case dir {}".format(self._caseroot)) + logger.warning("Leaving broken case dir {}".format(self._caseroot)) raise - def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, cime_output_root=None, - user_mods_dir=None): + + def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, + cime_output_root=None, exeroot=None, rundir=None, + user_mods_dir=None): """ moved to case_clone """ return create_case_clone(self, newcase, keepexe=keepexe, mach_dir=mach_dir, - project=project, cime_output_root=cime_output_root, - user_mods_dir=user_mods_dir) + project=project, cime_output_root=cime_output_root, + exeroot=exeroot, rundir=rundir, + user_mods_dir=user_mods_dir) diff --git a/scripts/lib/CIME/case_clone.py b/scripts/lib/CIME/case_clone.py index 777ee05406f..00980feb854 100644 --- a/scripts/lib/CIME/case_clone.py +++ b/scripts/lib/CIME/case_clone.py @@ -1,4 +1,4 @@ -import os, glob, shutil, string +import os, glob, shutil from CIME.XML.standard_module_setup import * from CIME.utils import expect from CIME.user_mod_support import apply_user_mods @@ -9,10 +9,15 @@ logger = logging.getLogger(__name__) -def create_case_clone(case, newcase, keepexe=False, mach_dir=None, project=None, cime_output_root=None, - user_mods_dir=None): +def create_case_clone(case, newcase, keepexe=False, mach_dir=None, project=None, + cime_output_root=None, exeroot=None, rundir=None, + user_mods_dir=None): """ Create a case clone + + If exeroot or rundir are provided (not None), sets these directories + to the given paths; if not provided, uses default values for these + directories. It is an error to provide exeroot if keepexe is True. """ if cime_output_root is None: cime_output_root = case.get_value("CIME_OUTPUT_ROOT") @@ -39,14 +44,14 @@ def create_case_clone(case, newcase, keepexe=False, mach_dir=None, project=None, olduser = case.get_value("USER") newuser = os.environ.get("USER") if olduser != newuser: - cime_output_root = string.replace(cime_output_root, olduser, newuser) + cime_output_root = cime_output_root.replace(olduser, newuser) newcase.set_value("USER", newuser) newcase.set_value("CIME_OUTPUT_ROOT", cime_output_root) # try to make the new output directory and raise an exception # on any error other than directory already exists. if os.path.isdir(cime_output_root): - expect(os.access(cime_output_root, os.W_OK), "Directory {} is not writable" + expect(os.access(cime_output_root, os.W_OK), "Directory {} is not writable " "by this user. Use the --cime-output-root flag to provide a writable " "scratch directory".format(cime_output_root)) else: @@ -63,8 +68,8 @@ def create_case_clone(case, newcase, keepexe=False, mach_dir=None, project=None, newcase.set_value("BUILD_COMPLETE","TRUE") orig_bld_complete = case.get_value("BUILD_COMPLETE") if not orig_bld_complete: - logger.warn("\nWARNING: Creating a clone with --keepexe before building the original case may cause PIO_TYPENAME to be invalid in the clone") - logger.warn("Avoid this message by building case one before you clone.\n") + logger.warning("\nWARNING: Creating a clone with --keepexe before building the original case may cause PIO_TYPENAME to be invalid in the clone") + logger.warning("Avoid this message by building case one before you clone.\n") else: newcase.set_value("BUILD_COMPLETE","FALSE") @@ -72,6 +77,14 @@ def create_case_clone(case, newcase, keepexe=False, mach_dir=None, project=None, if mach_dir is not None: newcase.set_value("MACHDIR", mach_dir) + # set exeroot and rundir if requested + if exeroot is not None: + expect(not keepexe, "create_case_clone: if keepexe is True, " + "then exeroot cannot be set") + newcase.set_value("EXEROOT", exeroot) + if rundir is not None: + newcase.set_value("RUNDIR", rundir) + # Set project id # Note: we do not just copy this from the clone because it seems likely that # users will want to change this sometimes, especially when cloning another @@ -120,7 +133,7 @@ def create_case_clone(case, newcase, keepexe=False, mach_dir=None, project=None, success, comment = compare_files(os.path.join(newcaseroot, "env_build.xml"), os.path.join(newcaseroot, "LockedFiles", "env_build.xml")) if not success: - logger.warn(comment) + logger.warning(comment) shutil.rmtree(newcase_root) expect(False, "env_build.xml cannot be changed via usermods if keepexe is an option: \n " "Failed to clone case, removed {}\n".format(newcase_root)) diff --git a/scripts/lib/CIME/case_run.py b/scripts/lib/CIME/case_run.py index e2d09ef7258..0b5f756ff8f 100644 --- a/scripts/lib/CIME/case_run.py +++ b/scripts/lib/CIME/case_run.py @@ -12,10 +12,14 @@ logger = logging.getLogger(__name__) ############################################################################### -def pre_run_check(case, lid, skip_pnl=False): +def pre_run_check(case, lid, skip_pnl=False, da_cycle=0): ############################################################################### # Pre run initialization code.. + if da_cycle > 0: + create_namelists(case, component='cpl') + return + caseroot = case.get_value("CASEROOT") din_loc_root = case.get_value("DIN_LOC_ROOT") batchsubmit = case.get_value("BATCHSUBMIT") @@ -77,10 +81,10 @@ def pre_run_check(case, lid, skip_pnl=False): logger.info("-------------------------------------------------------------------------") ############################################################################### -def _run_model_impl(case, lid, skip_pnl=False): +def _run_model_impl(case, lid, skip_pnl=False, da_cycle=0): ############################################################################### - pre_run_check(case, lid, skip_pnl=skip_pnl) + pre_run_check(case, lid, skip_pnl=skip_pnl, da_cycle=da_cycle) model = case.get_value("MODEL") @@ -141,9 +145,9 @@ def _run_model_impl(case, lid, skip_pnl=False): return lid ############################################################################### -def run_model(case, lid, skip_pnl=False): +def run_model(case, lid, skip_pnl=False, da_cycle=0): ############################################################################### - functor = lambda: _run_model_impl(case, lid, skip_pnl=skip_pnl) + functor = lambda: _run_model_impl(case, lid, skip_pnl=skip_pnl, da_cycle=da_cycle) return run_and_log_case_status(functor, "case.run", caseroot=case.get_value("CASEROOT")) ############################################################################### @@ -206,12 +210,12 @@ def resubmit_check(case): # Note that Mira requires special logic dout_s = case.get_value("DOUT_S") - logger.warn("dout_s {} ".format(dout_s)) + logger.warning("dout_s {} ".format(dout_s)) mach = case.get_value("MACH") - logger.warn("mach {} ".format(mach)) + logger.warning("mach {} ".format(mach)) testcase = case.get_value("TESTCASE") resubmit_num = case.get_value("RESUBMIT") - logger.warn("resubmit_num {}".format(resubmit_num)) + logger.warning("resubmit_num {}".format(resubmit_num)) # If dout_s is True than short-term archiving handles the resubmit # If dout_s is True and machine is mira submit the st_archive script resubmit = False @@ -287,7 +291,7 @@ def case_run(case, skip_pnl=False): lid, prefix="prerun") case.read_xml() - lid = run_model(case, lid, skip_pnl) + lid = run_model(case, lid, skip_pnl, da_cycle=cycle) save_logs(case, lid) # Copy log files back to caseroot if case.get_value("CHECK_TIMING") or case.get_value("SAVE_TIMING"): get_timing(case, lid) # Run the getTiming script @@ -306,7 +310,7 @@ def case_run(case, skip_pnl=False): save_postrun_provenance(case) - logger.warn("check for resubmit") + logger.warning("check for resubmit") resubmit_check(case) return True diff --git a/scripts/lib/CIME/case_setup.py b/scripts/lib/CIME/case_setup.py index f13068eef3d..ddefb205639 100644 --- a/scripts/lib/CIME/case_setup.py +++ b/scripts/lib/CIME/case_setup.py @@ -49,7 +49,7 @@ def _build_usernl_files(case, model, comp): nlfile = "user_nl_{}".format(comp) model_nl = os.path.join(model_dir, nlfile) if ninst > 1: - for inst_counter in xrange(1, ninst+1): + for inst_counter in range(1, ninst+1): inst_nlfile = "{}_{:04d}".format(nlfile, inst_counter) if not os.path.exists(inst_nlfile): # If there is a user_nl_foo in the case directory, copy it @@ -129,19 +129,22 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False): # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component. multi_driver = case.get_value("MULTI_DRIVER") for comp in models: + ntasks = case.get_value("NTASKS_{}".format(comp)) if comp == "CPL": continue ninst = case.get_value("NINST_{}".format(comp)) - ntasks = case.get_value("NTASKS_{}".format(comp)) - if ninst > ntasks: - if ntasks == 1: - case.set_value("NTASKS_{}".format(comp), ninst) - else: - expect(False, "NINST_{} value {:d} greater than NTASKS_{} {:d}".format(comp, ninst, comp, ntasks)) - # But the NINST_LAYOUT may only be concurrent in multi_driver mode if multi_driver: expect(case.get_value("NINST_LAYOUT_{}".format(comp)) == "concurrent", "If multi_driver is TRUE, NINST_LAYOUT_{} must be concurrent".format(comp)) + case.set_value("NTASKS_PER_INST_{}".format(comp), ntasks) + else: + if ninst > ntasks: + if ntasks == 1: + case.set_value("NTASKS_{}".format(comp), ninst) + ntasks = ninst + else: + expect(False, "NINST_{} value {:d} greater than NTASKS_{} {:d}".format(comp, ninst, comp, ntasks)) + case.set_value("NTASKS_PER_INST_{}".format(comp), int(ntasks / ninst)) if os.path.exists("case.run"): logger.info("Machine/Decomp/Pes configuration has already been done ...skipping") @@ -165,9 +168,6 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False): cost_pes = env_mach_pes.get_cost_pes(pestot, thread_count, machine=case.get_value("MACH")) case.set_value("COST_PES", cost_pes) - # Make sure pio settings are consistent - tasks_per_node = env_mach_pes.get_tasks_per_node(pestot, thread_count) - case.initialize_derived_attributes() case.set_value("SMP_PRESENT", case.get_build_threaded()) @@ -175,15 +175,18 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False): # create batch files logger.info("Creating batch script case.run") env_batch = case.get_env("batch") - num_nodes = case.num_nodes for job in env_batch.get_jobs(): input_batch_script = os.path.join(case.get_value("MACHDIR"), env_batch.get_value('template', subgroup=job)) if job == "case.test" and testcase is not None and not test_mode: logger.info("Writing {} script".format(job)) - env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count) + env_batch.make_batch_script(input_batch_script, job, case) elif job != "case.test": logger.info("Writing {} script from input template {}".format(job, input_batch_script)) - env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count) + env_batch.make_batch_script(input_batch_script, job, case) + + # May need to select new batch settings if pelayout changed (e.g. problem is now too big for prev-selected queue) + env_batch.set_job_defaults([(("case.test" if case.get_value("TEST") else "case.run"), {})], case) + case.schedule_rewrite(env_batch) # Make a copy of env_mach_pes.xml in order to be able # to check that it does not change once case.setup is invoked diff --git a/scripts/lib/CIME/case_st_archive.py b/scripts/lib/CIME/case_st_archive.py index 82aeb40eb80..3b5a08c7ccc 100644 --- a/scripts/lib/CIME/case_st_archive.py +++ b/scripts/lib/CIME/case_st_archive.py @@ -165,8 +165,6 @@ def _get_ninst_info(case, compclass): for i in range(1,ninst+1): if ninst > 1: ninst_strings.append('_' + '{:04d}'.format(i)) - else: - ninst_strings.append('') logger.debug("ninst and ninst_strings are: {} and {} for {}".format(ninst, ninst_strings, compclass)) return ninst, ninst_strings @@ -175,7 +173,7 @@ def _get_ninst_info(case, compclass): def _get_component_archive_entries(case, archive): ############################################################################### """ - Each time this is generator function is called, it yields a tuple + Each time this generator function is called, it yields a tuple (archive_entry, compname, compclass) for one component in this case's compset components. """ @@ -214,9 +212,10 @@ def _archive_rpointer_files(casename, ninst_strings, rundir, save_interim_restar # put in a temporary setting for ninst_strings if they are empty # in order to have just one loop over ninst_strings below - if rpointer_content is not 'unset': + if rpointer_content != 'unset': if not ninst_strings: ninst_strings = ["empty"] + for ninst_string in ninst_strings: rpointer_file = temp_rpointer_file rpointer_content = temp_rpointer_content @@ -269,7 +268,7 @@ def _archive_history_files(case, archive, archive_entry, ############################################################################### """ perform short term archiving on history files in rundir - + Not doc-testable due to case and file system dependence """ @@ -289,15 +288,20 @@ def _archive_history_files(case, archive, archive_entry, rundir = case.get_value("RUNDIR") for suffix in archive.get_hist_file_extensions(archive_entry): for i in range(ninst): - if compname == 'dart': - newsuffix = casename + suffix - elif compname.find('mpas') == 0: - newsuffix = compname + '.*' + suffix - else: - if ninst_string: + if ninst_string: + if compname.find('mpas') == 0: + # Not correct, but MPAS' multi-instance name format is unknown. + newsuffix = compname + '.*' + suffix + else: newsuffix = casename + '.' + compname + ".*" + ninst_string[i] + suffix + else: + if compname.find('mpas') == 0: + newsuffix = compname + '.*' + suffix else: newsuffix = casename + '.' + compname + ".*" + suffix + + logger.debug("short term archiving suffix is {} ".format(newsuffix)) + pfile = re.compile(newsuffix) histfiles = [f for f in os.listdir(rundir) if pfile.search(f)] if histfiles: @@ -447,10 +451,6 @@ def _archive_restarts_date_comp(case, archive, archive_entry, pattern = suffix + datename_str pfile = re.compile(pattern) restfiles = [f for f in files if pfile.search(f)] - else: - pattern = suffix - pfile = re.compile(pattern) - restfiles = [f for f in os.listdir(rundir) if pfile.search(f)] for restfile in restfiles: restfile = os.path.basename(restfile) @@ -480,21 +480,20 @@ def _archive_restarts_date_comp(case, archive, archive_entry, srcfile = os.path.join(rundir, restfile) destfile = os.path.join(archive_restdir, restfile) last_restart_file_fn(srcfile, destfile) - logger.info("{} \n{} to \n{}".format( - last_restart_file_fn_msg, srcfile, destfile)) + logger.info("{} {} \n{} to \n{}".format( + "datename_is_last", last_restart_file_fn_msg, srcfile, destfile)) for histfile in histfiles_for_restart: srcfile = os.path.join(rundir, histfile) destfile = os.path.join(archive_restdir, histfile) expect(os.path.isfile(srcfile), "history restart file {} for last date does not exist ".format(srcfile)) shutil.copy(srcfile, destfile) - logger.info("copying \n{} to \n{}".format(srcfile, destfile)) + logger.info("datename_is_last + histfiles_for_restart copying \n{} to \n{}".format(srcfile, destfile)) else: # Only archive intermediate restarts if requested - otherwise remove them if case.get_value('DOUT_S_SAVE_INTERIM_RESTART_FILES'): srcfile = os.path.join(rundir, restfile) destfile = os.path.join(archive_restdir, restfile) - logger.info("moving \n{} to \n{}".format(srcfile, destfile)) expect(os.path.isfile(srcfile), "restart file {} does not exist ".format(srcfile)) archive_file_fn(srcfile, destfile) @@ -516,9 +515,9 @@ def _archive_restarts_date_comp(case, archive, archive_entry, try: os.remove(srcfile) except OSError: - logger.warn("unable to remove interim restart file {}".format(srcfile)) + logger.warning("unable to remove interim restart file {}".format(srcfile)) else: - logger.warn("interim restart file {} does not exist".format(srcfile)) + logger.warning("interim restart file {} does not exist".format(srcfile)) return histfiles_savein_rundir @@ -648,7 +647,7 @@ def case_st_archive(case, last_date_str=None, archive_incomplete_logs=True, copy rest_n = case.get_value('REST_N') stop_n = case.get_value('STOP_N') if rest_n < stop_n: - logger.warn('Restart files from end of run will be saved' + logger.warning('Restart files from end of run will be saved' 'interim restart files will be deleted') logger.info("st_archive starting") diff --git a/scripts/lib/CIME/case_submit.py b/scripts/lib/CIME/case_submit.py index 90176e85571..20243b6368e 100644 --- a/scripts/lib/CIME/case_submit.py +++ b/scripts/lib/CIME/case_submit.py @@ -62,13 +62,13 @@ def _submit(case, job=None, resubmit=False, no_batch=False, skip_pnl=False, case.set_value("RUN_WITH_SUBMIT", True) case.flush() - logger.warn("submit_jobs {}".format(job)) + logger.warning("submit_jobs {}".format(job)) job_ids = case.submit_jobs(no_batch=no_batch, job=job, skip_pnl=skip_pnl, mail_user=mail_user, mail_type=mail_type, batch_args=batch_args) xml_jobids = [] - for jobname, jobid in job_ids.iteritems(): + for jobname, jobid in job_ids.items(): logger.info("Submitted job {} with id {}".format(jobname, jobid)) if jobid: xml_jobids.append("{}:{}".format(jobname, jobid)) diff --git a/scripts/lib/CIME/case_test.py b/scripts/lib/CIME/case_test.py index 1136a133186..0f752b5558f 100644 --- a/scripts/lib/CIME/case_test.py +++ b/scripts/lib/CIME/case_test.py @@ -47,7 +47,7 @@ def case_test(case, testname=None, reset=False): testname = case.get_value('TESTCASE') expect(testname is not None, "testname argument not resolved") - logging.warn("Running test for {}".format(testname)) + logging.warning("Running test for {}".format(testname)) _set_up_signal_handlers() diff --git a/scripts/lib/CIME/check_lockedfiles.py b/scripts/lib/CIME/check_lockedfiles.py index f2bc049385d..12959afb6e8 100644 --- a/scripts/lib/CIME/check_lockedfiles.py +++ b/scripts/lib/CIME/check_lockedfiles.py @@ -71,7 +71,7 @@ def check_pelayouts_require_rebuild(case, models): new_inst = case.get_value("NINST_{}".format(comp)) if old_tasks != new_tasks or old_threads != new_threads or old_inst != new_inst: - logging.warn("{} pe change requires clean build {} {}".format(comp, old_tasks, new_tasks)) + logging.warning("{} pe change requires clean build {} {}".format(comp, old_tasks, new_tasks)) cleanflag = comp.lower() run_cmd_no_fail("./case.build --clean {}".format(cleanflag)) @@ -107,11 +107,11 @@ def check_lockedfiles(case): f1obj = case.get_env('batch') f2obj = EnvBatch(caseroot, lfile) else: - logging.warn("Locked XML file '{}' is not current being handled".format(fpart)) + logging.warning("Locked XML file '{}' is not current being handled".format(fpart)) continue diffs = f1obj.compare_xml(f2obj) if diffs: - logging.warn("File {} has been modified".format(lfile)) + logging.warning("File {} has been modified".format(lfile)) for key in diffs.keys(): print(" found difference in {} : case {} locked {}" .format(key, repr(diffs[key][0]), repr(diffs[key][1]))) @@ -122,9 +122,9 @@ def check_lockedfiles(case): expect(False, "Cannot change file env_case.xml, please" " recover the original copy from LockedFiles") elif objname == "env_build": - logging.warn("Setting build complete to False") + logging.warning("Setting build complete to False") case.set_value("BUILD_COMPLETE", False) - if "PIO_VERSION" in diffs.keys(): + if "PIO_VERSION" in diffs: case.set_value("BUILD_STATUS", 2) logging.critical("Changing PIO_VERSION requires running " "case.build --clean-all and rebuilding") diff --git a/scripts/lib/CIME/code_checker.py b/scripts/lib/CIME/code_checker.py index 6aae473c5d4..03eac9f1347 100644 --- a/scripts/lib/CIME/code_checker.py +++ b/scripts/lib/CIME/code_checker.py @@ -16,7 +16,9 @@ def _run_pylint(on_file, interactive): ############################################################################### pylint = find_executable("pylint") - cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement,logging-format-interpolation" + cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import" + cmd_options += ",fixme,broad-except,bare-except,eval-used,exec-used,global-statement" + cmd_options += ",logging-format-interpolation,no-name-in-module" cimeroot = get_cime_root() if "scripts/Tools" in on_file: @@ -58,6 +60,8 @@ def _should_pylint_skip(filepath): for dir_to_skip in list_of_directories_to_ignore: if dir_to_skip + "/" in filepath: return True + if filepath == "scripts/lib/six.py": + return True return False @@ -100,6 +104,10 @@ def check_code(files, num_procs=10, interactive=False): # Check every python file files_to_check = get_all_checkable_files() + if "scripts/lib/six.py" in files_to_check: + files_to_check.remove("scripts/lib/six.py") + logger.info("Not checking contributed file six.py") + expect(len(files_to_check) > 0, "No matching files found") # No point in using more threads than files diff --git a/scripts/lib/CIME/compare_namelists.py b/scripts/lib/CIME/compare_namelists.py index b8d75648027..d4b0fca41b6 100644 --- a/scripts/lib/CIME/compare_namelists.py +++ b/scripts/lib/CIME/compare_namelists.py @@ -353,7 +353,7 @@ def _compare_values(name, gold_value, comp_value, case): comments += " list variable '{}' has extra value {}\n".format(name, comp_value_list_item) elif (type(gold_value) is OrderedDict): - for key, gold_value_dict_item in gold_value.iteritems(): + for key, gold_value_dict_item in gold_value.items(): if (key in comp_value): comments += _compare_values("{} dict item {}".format(name, key), gold_value_dict_item, comp_value[key], case) @@ -425,7 +425,7 @@ def _compare_namelists(gold_namelists, comp_namelists, case): ... val3 = .false. ... /''' >>> comments = _compare_namelists(_parse_namelists(teststr1.splitlines(), 'foo'), _parse_namelists(teststr2.splitlines(), 'bar'), None) - >>> print comments + >>> print(comments) Missing namelist: nml1 Differences in namelist 'nml2': BASE: val21 = 'foo' @@ -493,12 +493,12 @@ def _compare_namelists(gold_namelists, comp_namelists, case): '' """ different_namelists = OrderedDict() - for namelist, gold_names in gold_namelists.iteritems(): + for namelist, gold_names in gold_namelists.items(): if (namelist not in comp_namelists): different_namelists[namelist] = ["Missing namelist: {}\n".format(namelist)] else: comp_names = comp_namelists[namelist] - for name, gold_value in gold_names.iteritems(): + for name, gold_value in gold_names.items(): if (name not in comp_names): different_namelists.setdefault(namelist, []).append(" missing variable: '{}'\n".format(name)) else: @@ -516,7 +516,7 @@ def _compare_namelists(gold_namelists, comp_namelists, case): different_namelists[namelist] = ["Found extra namelist: {}\n".format(namelist)] comments = "" - for namelist, nlcomment in different_namelists.iteritems(): + for namelist, nlcomment in different_namelists.items(): if len(nlcomment) == 1: comments += nlcomment[0] else: diff --git a/scripts/lib/CIME/compare_test_results.py b/scripts/lib/CIME/compare_test_results.py index ef476df35e0..4d486807060 100644 --- a/scripts/lib/CIME/compare_test_results.py +++ b/scripts/lib/CIME/compare_test_results.py @@ -152,7 +152,7 @@ def compare_test_results(baseline_name, baseline_root, test_root, compiler, test brief_result += " {}".format(compare_comment) brief_result += "\n" - print(brief_result,) + print(brief_result) append_status_cprnc_log(brief_result, logfile_name, test_dir) diff --git a/scripts/lib/CIME/get_timing.py b/scripts/lib/CIME/get_timing.py index 330e365215e..4ae32edbce4 100644 --- a/scripts/lib/CIME/get_timing.py +++ b/scripts/lib/CIME/get_timing.py @@ -125,8 +125,11 @@ def _getTiming(self, inst=0): ncpl_base_period = self.case.get_value("NCPL_BASE_PERIOD") ncpl = 0 for compclass in self.case.get_values("COMP_CLASSES"): - ncpl = max(ncpl, self.case.get_value("{}_NCPL".format(compclass))) - ocn_ncpl = self.case.get_value("OCN_NCPL") + comp_ncpl = self.case.get_value("{}_NCPL".format(compclass)) + if compclass == "OCN": + ocn_ncpl = comp_ncpl + if comp_ncpl is not None: + ncpl = max(ncpl, comp_ncpl) compset = self.case.get_value("COMPSET") if compset is None: @@ -137,8 +140,8 @@ def _getTiming(self, inst=0): stop_n = self.case.get_value("STOP_N") cost_pes = self.case.get_value("COST_PES") totalpes = self.case.get_value("TOTALPES") - pes_per_node = self.case.get_value("PES_PER_NODE") - smt_factor = max(1,int(self.case.get_value("MAX_TASKS_PER_NODE") / pes_per_node)) + MAX_MPITASKS_PER_NODE = self.case.get_value("MAX_MPITASKS_PER_NODE") + smt_factor = max(1,int(self.case.get_value("MAX_TASKS_PER_NODE") / MAX_MPITASKS_PER_NODE)) if cost_pes > 0: pecost = cost_pes @@ -179,7 +182,7 @@ def _getTiming(self, inst=0): try: shutil.copyfile(binfilename, finfilename) - except Exception, e: + except Exception as e: if not os.path.isfile(binfilename): logger.critical("File {} not found".format(binfilename)) else: @@ -191,7 +194,7 @@ def _getTiming(self, inst=0): fin = open(finfilename, "r") self.finlines = fin.readlines() fin.close() - except Exception, e: + except Exception as e: logger.critical("Unable to open file {}".format(finfilename)) raise e @@ -228,7 +231,7 @@ def _getTiming(self, inst=0): cpl.offset = 0 try: self.fout = open(foutfilename, "w") - except Exception, e: + except Exception as e: logger.critical("Could not open file for writing: {}".format(foutfilename)) raise e @@ -306,7 +309,7 @@ def _getTiming(self, inst=0): self.write("\n") self.write(" total pes active : {} \n".format(totalpes*maxthrds*smt_factor )) - self.write(" pes per node : {} \n".format(pes_per_node)) + self.write(" pes per node : {} \n".format(MAX_MPITASKS_PER_NODE)) self.write(" pe count for cost estimate : {} \n".format(pecost)) self.write("\n") diff --git a/scripts/lib/CIME/hist_utils.py b/scripts/lib/CIME/hist_utils.py index f329d80a26f..6ad007694e3 100644 --- a/scripts/lib/CIME/hist_utils.py +++ b/scripts/lib/CIME/hist_utils.py @@ -409,7 +409,7 @@ def generate_baseline(case, baseline_dir=None, allow_baseline_overwrite=False): # drop the date so that the name is generic newestcpllogfile = case.get_latest_cpl_log(coupler_log_path=case.get_value("LOGDIR")) if newestcpllogfile is None: - logger.warn("No cpl.log file found in log directory {}".format(case.get_value("LOGDIR"))) + logger.warning("No cpl.log file found in log directory {}".format(case.get_value("LOGDIR"))) else: shutil.copyfile(newestcpllogfile, os.path.join(basegen_dir, "cpl.log.gz")) diff --git a/scripts/lib/CIME/namelist.py b/scripts/lib/CIME/namelist.py index 70b73f9f245..b4613d63ba1 100644 --- a/scripts/lib/CIME/namelist.py +++ b/scripts/lib/CIME/namelist.py @@ -108,6 +108,7 @@ from CIME.XML.standard_module_setup import * from CIME.utils import expect +import six logger = logging.getLogger(__name__) @@ -640,10 +641,7 @@ def literal_to_python_value(literal, type_=None): >>> literal_to_python_value("") >>> literal_to_python_value("-1.D+10") -10000000000.0 - >>> literal_to_python_value("nan(1234)") - Traceback (most recent call last): - ... - ValueError: invalid literal for float(): nan(1234) + >>> shouldRaise(ValueError, literal_to_python_value, "nan(1234)") """ expect(FORTRAN_REPEAT_PREFIX_REGEX.search(literal) is None, "Cannot use repetition syntax in literal_to_python_value") @@ -688,8 +686,8 @@ def expand_literal_list(literals): ['true'] >>> expand_literal_list(['1', '2', 'f*', '3*3', '5']) ['1', '2', 'f*', '3', '3', '3', '5'] - >>> expand_literal_list([u'2*f*']) - [u'f*', u'f*'] + >>> expand_literal_list(['2*f*']) + ['f*', 'f*'] """ expanded = [] for literal in literals: @@ -711,8 +709,8 @@ def compress_literal_list(literals): ['true'] >>> compress_literal_list(['1', '2', 'f*', '3', '3', '3', '5']) ['1', '2', 'f*', '3', '3', '3', '5'] - >>> compress_literal_list([u'f*', u'f*']) - [u'f*', u'f*'] + >>> compress_literal_list(['f*', 'f*']) + ['f*', 'f*'] """ compressed = [] if len(literals) == 0: @@ -732,21 +730,21 @@ def compress_literal_list(literals): # Otherwise, write out the previous literal and start tracking the # new one. rep_str = str(num_reps) + '*' if num_reps > 1 else '' - if isinstance(old_literal, basestring): + if isinstance(old_literal, six.string_types): compressed.append(rep_str + old_literal) else: compressed.append(rep_str + str(old_literal)) old_literal = literal num_reps = 1 rep_str = str(num_reps) + '*' if num_reps > 1 else '' - if isinstance(old_literal, basestring): + if isinstance(old_literal, six.string_types): compressed.append(rep_str + old_literal) else: compressed.append(rep_str + str(old_literal)) return compressed else: for literal in literals: - if isinstance(literal, basestring): + if isinstance(literal, six.string_types): compressed.append(literal) else: compressed.append(str(literal)) @@ -833,7 +831,7 @@ def parse(in_file=None, text=None, groupless=False, convert_tab_to_space=True): "Must specify an input file or text to the namelist parser.") expect(in_file is None or text is None, "Cannot specify both input file and text to the namelist parser.") - if isinstance(in_file, str) or isinstance(in_file, unicode): + if isinstance(in_file, six.string_types): logger.debug("Reading namelist at: {}".format(in_file)) with open(in_file) as in_file_obj: text = in_file_obj.read() @@ -853,6 +851,23 @@ def parse(in_file=None, text=None, groupless=False, convert_tab_to_space=True): return Namelist(namelist_dict) +def shouldRaise(eclass, method, *args, **kw): + """ + A helper function to make doctests py3 compatible + http://python3porting.com/problems.html#running-doctests + """ + try: + method(*args, **kw) + except: + e = sys.exc_info()[1] + if not isinstance(e, eclass): + raise + return + raise Exception("Expected exception %s not raised" % + str(eclass)) + + + class Namelist(object): """Class representing a Fortran namelist. @@ -899,9 +914,9 @@ def get_group_names(self): >>> Namelist().get_group_names() [] >>> sorted(parse(text='&foo / &bar /').get_group_names()) - [u'bar', u'foo'] + ['bar', 'foo'] """ - return self._groups.keys() + return list(self._groups.keys()) def get_variable_names(self, group_name): """Return a list of all variables in the given namelist group. @@ -912,38 +927,38 @@ def get_variable_names(self, group_name): [] >>> x = parse(text='&foo bar=,bazz=true,bazz(2)=fred,bang=6*""/') >>> sorted(x.get_variable_names('fOo')) - [u'bang', u'bar', u'bazz', u'bazz(2)'] + ['bang', 'bar', 'bazz', 'bazz(2)'] >>> x = parse(text='&foo bar=,bazz=true,bang=6*""/') >>> sorted(x.get_variable_names('fOo')) - [u'bang', u'bar', u'bazz'] + ['bang', 'bar', 'bazz'] >>> x = parse(text='&foo bar(::)=,bazz=false,bazz(2)=true,bazz(:2:)=6*""/') >>> sorted(x.get_variable_names('fOo')) - [u'bar(::)', u'bazz', u'bazz(2)', u'bazz(:2:)'] + ['bar(::)', 'bazz', 'bazz(2)', 'bazz(:2:)'] """ group_name = group_name.lower() if group_name not in self._groups: return [] - return self._groups[group_name].keys() + return list(self._groups[group_name].keys()) def get_variable_value(self, group_name, variable_name): """Return the value of the specified variable. This function always returns a non-empty list containing strings. If the - specified `group_name` or `variable_name` is not present, `[u'']` is + specified `group_name` or `variable_name` is not present, `['']` is returned. >>> Namelist().get_variable_value('foo', 'bar') - [u''] + [''] >>> parse(text='&foo bar=1,2 /').get_variable_value('foo', 'bazz') - [u''] + [''] >>> parse(text='&foo bar=1,2 /').get_variable_value('foO', 'Bar') - [u'1', u'2'] + ['1', '2'] """ group_name = group_name.lower() variable_name = variable_name.lower() if group_name not in self._groups or \ variable_name not in self._groups[group_name]: - return [u''] + return [''] return self._groups[group_name][variable_name] @@ -953,17 +968,17 @@ def get_value(self, variable_name): This function is similar to `get_variable_value`, except that it does not require a `group_name`, and it requires that the `variable_name` be unique across all groups. - - >>> parse(text='&foo bar=1 / &bazz bar=1 /').get_value('bar') + + >>> parse(text='&foo bar=1 / &bazz bar=1 /').get_value('bar') # doctest: +ELLIPSIS Traceback (most recent call last): ... - SystemExit: ERROR: Namelist.get_value: Variable {} is present in multiple groups: [u'bazz', u'foo'] + SystemExit: ERROR: Namelist.get_value: Variable {} is present in multiple groups: ... >>> parse(text='&foo bar=1 / &bazz /').get_value('Bar') - [u'1'] + ['1'] >>> parse(text='&foo bar(2)=1 / &bazz /').get_value('Bar(2)') - [u'1'] + ['1'] >>> parse(text='&foo / &bazz /').get_value('bar') - [u''] + [''] """ variable_name = variable_name.lower() possible_groups = [group_name for group_name in self._groups @@ -974,32 +989,32 @@ def get_value(self, variable_name): if possible_groups: return self._groups[possible_groups[0]][variable_name] else: - return [u''] + return [''] def set_variable_value(self, group_name, variable_name, value, var_size=1): """Set the value of the specified variable. >>> x = parse(text='&foo bar=1 /') >>> x.get_variable_value('foo', 'bar') - [u'1'] - >>> x.set_variable_value('foo', 'bar(2)', [u'3'], var_size=4) + ['1'] + >>> x.set_variable_value('foo', 'bar(2)', ['3'], var_size=4) >>> x.get_variable_value('foo', 'bar') - [u'1', u'3'] - >>> x.set_variable_value('foo', 'bar(1)', [u'2']) + ['1', '3'] + >>> x.set_variable_value('foo', 'bar(1)', ['2']) >>> x.get_variable_value('foo', 'bar') - [u'2', u'3'] - >>> x.set_variable_value('foo', 'bar', [u'1']) + ['2', '3'] + >>> x.set_variable_value('foo', 'bar', ['1']) >>> x.get_variable_value('foo', 'bar') - [u'1', u'3'] - >>> x.set_variable_value('foo', 'bazz', [u'3']) - >>> x.set_variable_value('Brack', 'baR', [u'4']) + ['1', '3'] + >>> x.set_variable_value('foo', 'bazz', ['3']) + >>> x.set_variable_value('Brack', 'baR', ['4']) >>> x.get_variable_value('foo', 'bazz') - [u'3'] + ['3'] >>> x.get_variable_value('brack', 'bar') - [u'4'] - >>> x.set_variable_value('foo', 'red(2:6:2)', [u'2', u'4', u'6'], var_size=12) + ['4'] + >>> x.set_variable_value('foo', 'red(2:6:2)', ['2', '4', '6'], var_size=12) >>> x.get_variable_value('foo', 'red') - ['', u'2', '', u'4', '', u'6'] + ['', '2', '', '4', '', '6'] """ group_name = group_name.lower() @@ -1058,43 +1073,43 @@ def merge_nl(self, other, overwrite=False): >>> x = parse(text='&foo bar=1 bazz=,2 brat=3/') >>> y = parse(text='&foo bar=2 bazz=3*1 baker=4 / &foo2 barter=5 /') >>> y.get_value('bazz') - [u'1', u'1', u'1'] + ['1', '1', '1'] >>> x.merge_nl(y) >>> sorted(x.get_group_names()) - [u'foo', u'foo2'] + ['foo', 'foo2'] >>> sorted(x.get_variable_names('foo')) - [u'baker', u'bar', u'bazz', u'brat'] + ['baker', 'bar', 'bazz', 'brat'] >>> sorted(x.get_variable_names('foo2')) - [u'barter'] + ['barter'] >>> x.get_value('bar') - [u'1'] + ['1'] >>> x.get_value('bazz') - [u'1', u'2', u'1'] + ['1', '2', '1'] >>> x.get_value('brat') - [u'3'] + ['3'] >>> x.get_value('baker') - [u'4'] + ['4'] >>> x.get_value('barter') - [u'5'] + ['5'] >>> x = parse(text='&foo bar=1 bazz=,2 brat=3/') >>> y = parse(text='&foo bar=2 bazz=3*1 baker=4 / &foo2 barter=5 /') >>> x.merge_nl(y, overwrite=True) >>> sorted(x.get_group_names()) - [u'foo', u'foo2'] + ['foo', 'foo2'] >>> sorted(x.get_variable_names('foo')) - [u'baker', u'bar', u'bazz', u'brat'] + ['baker', 'bar', 'bazz', 'brat'] >>> sorted(x.get_variable_names('foo2')) - [u'barter'] + ['barter'] >>> x.get_value('bar') - [u'2'] + ['2'] >>> x.get_value('bazz') - [u'1', u'1', u'1'] + ['1', '1', '1'] >>> x.get_value('brat') - [u'3'] + ['3'] >>> x.get_value('baker') - [u'4'] + ['4'] >>> x.get_value('barter') - [u'5'] + ['5'] """ # Pretty simple strategy: go through the entire other namelist, and # merge all values with this one's. @@ -1109,6 +1124,14 @@ def merge_nl(self, other, overwrite=False): self.set_variable_value(group_name, variable_name, merged_val, var_size=len(merged_val)) + def get_group_variables(self, group_name): + group_variables = {} + group = self._groups[group_name] + for name in sorted(group.keys()): + value = group[name][0] + group_variables[name] = value + return group_variables + def write(self, out_file, groups=None, append=False, format_='nml', sorted_groups=True): """Write a Fortran namelist to a file. @@ -1126,7 +1149,7 @@ def write(self, out_file, groups=None, append=False, format_='nml', sorted_group """ expect(format_ in ('nml', 'rc', 'nmlcontents'), "Namelist.write: unexpected output format {!r}".format(str(format_))) - if isinstance(out_file, str) or isinstance(out_file, unicode): + if isinstance(out_file, six.string_types): logger.debug("Writing namelist to: {}".format(out_file)) flag = 'a' if append else 'w' with open(out_file, flag) as file_obj: @@ -1138,7 +1161,7 @@ def write(self, out_file, groups=None, append=False, format_='nml', sorted_group def _write(self, out_file, groups, format_, sorted_groups): """Unwrapped version of `write` assuming that a file object is input.""" if groups is None: - groups = self._groups.keys() + groups = list(self._groups.keys()) if format_ == 'nml' or format_ == 'nmlcontents': equals = ' =' elif format_ == 'rc': @@ -1241,7 +1264,7 @@ def __init__(self, text, groupless=False): self._line = 1 self._col = 0 # Text and its size. - self._text = unicode(text) + self._text = str(text) self._len = len(self._text) # Dictionary with group names as keys, and dictionaries of variable # name-value pairs as values. (Or a single flat dictionary if @@ -1269,10 +1292,8 @@ def _curr(self): def _next(self): """Return the character at the next position. - >>> _NamelistParser(' ')._next() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, _NamelistParser(' ')._next) + """ # If at the end of the file, we should raise _NamelistEOF. The easiest # way to do this is to just advance. @@ -1311,19 +1332,15 @@ def _advance(self, nchars=1, check_eof=False): >>> x._advance(3) >>> (x._pos, x._line, x._col) (7, 3, 1) - >>> x._advance(1) - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. - >>> _NamelistParser('abc\n')._advance(4) - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, x._advance, 1) + + >>> shouldRaise(_NamelistEOF, _NamelistParser('abc\n')._advance, 4) + >>> x = _NamelistParser('ab') >>> x._advance(check_eof=True) False >>> x._curr() - u'b' + 'b' >>> x._advance(check_eof=True) True """ @@ -1357,34 +1374,32 @@ def _eat_whitespace(self, allow_initial_comment=False): >>> x._eat_whitespace() True >>> x._curr() - u'a' + 'a' >>> x._eat_whitespace() False >>> x._advance() - >>> x._eat_whitespace() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, x._eat_whitespace) + >>> x = _NamelistParser(' \n! blah\n ! blah\n a') >>> x._eat_whitespace() True >>> x._curr() - u'a' + 'a' >>> x = _NamelistParser('! blah\n a') >>> x._eat_whitespace() False >>> x._curr() - u'!' + '!' >>> x = _NamelistParser(' ! blah\n a') >>> x._eat_whitespace() True >>> x._curr() - u'!' + '!' >>> x = _NamelistParser(' ! blah\n a') >>> x._eat_whitespace(allow_initial_comment=True) True >>> x._curr() - u'a' + 'a' """ eaten = False comment_allowed = allow_initial_comment @@ -1408,7 +1423,7 @@ def _eat_comment(self): >>> x._eat_comment() True >>> x._curr() - u' ' + ' ' >>> x._eat_comment() False >>> x._eat_whitespace() @@ -1416,17 +1431,13 @@ def _eat_comment(self): >>> x._eat_comment() True >>> x._curr() - u'a' + 'a' >>> x._advance(2) - >>> x._eat_comment() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, x._eat_comment) + >>> x = _NamelistParser('! foo\n') - >>> x._eat_comment() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, x._eat_comment) + """ if self._curr() != '!': return False @@ -1450,10 +1461,8 @@ def _expect_char(self, chars): >>> x = _NamelistParser('ab') >>> x._expect_char('a') >>> x._advance() - >>> x._expect_char('a') - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected 'a' but found 'b' + >>> shouldRaise(_NamelistParseError, x._expect_char, 'a') + >>> x._expect_char('ab') """ if self._curr() not in chars: @@ -1466,30 +1475,20 @@ def _expect_char(self, chars): def _parse_namelist_group_name(self): r"""Parses and returns a namelist group name at the current position. - >>> _NamelistParser('abc')._parse_namelist_group_name() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected '&' but found 'a' - >>> _NamelistParser('&abc')._parse_namelist_group_name() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistParseError, _NamelistParser('abc')._parse_namelist_group_name) + + >>> shouldRaise(_NamelistEOF, _NamelistParser('&abc')._parse_namelist_group_name) + >>> _NamelistParser('&abc ')._parse_namelist_group_name() - u'abc' + 'abc' >>> _NamelistParser('&abc\n')._parse_namelist_group_name() - u'abc' - >>> _NamelistParser('&abc/ ')._parse_namelist_group_name() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: 'abc/' is not a valid variable name - >>> _NamelistParser('&abc= ')._parse_namelist_group_name() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: 'abc=' is not a valid variable name - >>> _NamelistParser('& ')._parse_namelist_group_name() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: '' is not a valid variable name + 'abc' + >>> shouldRaise(_NamelistParseError, _NamelistParser('&abc/ ')._parse_namelist_group_name) + + >>> shouldRaise(_NamelistParseError, _NamelistParser('&abc= ')._parse_namelist_group_name) + + >>> shouldRaise(_NamelistParseError, _NamelistParser('& ')._parse_namelist_group_name) + """ self._expect_char("&") self._advance() @@ -1502,40 +1501,41 @@ def _parse_variable_name(self, allow_equals=True): variable name; if it is `False`, only white space can be used for this purpose. - >>> _NamelistParser('abc')._parse_variable_name() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, _NamelistParser('abc')._parse_variable_name) + >>> _NamelistParser('foo(2)= ')._parse_variable_name() - u'foo(2)' + 'foo(2)' >>> _NamelistParser('abc ')._parse_variable_name() - u'abc' + 'abc' >>> _NamelistParser('ABC ')._parse_variable_name() - u'abc' + 'abc' >>> _NamelistParser('abc\n')._parse_variable_name() - u'abc' + 'abc' >>> _NamelistParser('abc%fred\n')._parse_variable_name() - u'abc%fred' + 'abc%fred' >>> _NamelistParser('abc(2)@fred\n')._parse_variable_name() - u'abc(2)@fred' + 'abc(2)@fred' >>> _NamelistParser('abc(1:2:3)\n')._parse_variable_name() - u'abc(1:2:3)' + 'abc(1:2:3)' >>> _NamelistParser('abc=')._parse_variable_name() - u'abc' - >>> _NamelistParser('abc(1,2) ')._parse_variable_name() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: Multiple dimensions not supported in CIME namelist variables 'abc(1,2)' - >>> _NamelistParser('abc, ')._parse_variable_name() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: 'abc,' is not a valid variable name - >>> _NamelistParser(' ')._parse_variable_name() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: '' is not a valid variable name + 'abc' + >>> try: + ... _NamelistParser('abc(1,2) ')._parse_variable_name() + ... raise AssertionError("_NamelistParseError not raised") + ... except _NamelistParseError: + ... pass + >>> try: + ... _NamelistParser('abc, ')._parse_variable_name() + ... raise AssertionError("_NamelistParseError not raised") + ... except _NamelistParseError: + ... pass + >>> try: + ... _NamelistParser(' ')._parse_variable_name() + ... raise AssertionError("_NamelistParseError not raised") + ... except _NamelistParseError: + ... pass >>> _NamelistParser('foo+= ')._parse_variable_name() - u'foo' + 'foo' """ old_pos = self._pos separators = (' ', '\n', '=', '+') if allow_equals else (' ', '\n') @@ -1573,24 +1573,20 @@ def _parse_character_literal(self): Position on return is the last character of the string; we avoid advancing past that in order to avoid potential EOF errors. - >>> _NamelistParser('"abc')._parse_character_literal() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, _NamelistParser('"abc')._parse_character_literal) + >>> _NamelistParser('"abc" ')._parse_character_literal() - u'"abc"' + '"abc"' >>> _NamelistParser("'abc' ")._parse_character_literal() - u"'abc'" - >>> _NamelistParser("*abc* ")._parse_character_literal() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: *abc* is not a valid character literal + "'abc'" + >>> shouldRaise(_NamelistParseError, _NamelistParser("*abc* ")._parse_character_literal) + >>> _NamelistParser("'abc''def' ")._parse_character_literal() - u"'abc''def'" + "'abc''def'" >>> _NamelistParser("'abc''' ")._parse_character_literal() - u"'abc'''" + "'abc'''" >>> _NamelistParser("'''abc' ")._parse_character_literal() - u"'''abc'" + "'''abc'" """ delimiter = self._curr() old_pos = self._pos @@ -1617,16 +1613,12 @@ def _parse_complex_literal(self): Position on return is the last character of the string; we avoid advancing past that in order to avoid potential EOF errors. - >>> _NamelistParser('(1.,2.')._parse_complex_literal() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + >>> shouldRaise(_NamelistEOF, _NamelistParser('(1.,2.')._parse_complex_literal) + >>> _NamelistParser('(1.,2.) ')._parse_complex_literal() - u'(1.,2.)' - >>> _NamelistParser("(A,B) ")._parse_complex_literal() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: '(A,B)' is not a valid complex literal + '(1.,2.)' + >>> shouldRaise(_NamelistParseError, _NamelistParser("(A,B) ")._parse_complex_literal) + """ old_pos = self._pos while self._curr() != ')': @@ -1702,97 +1694,81 @@ def _parse_literal(self, allow_name=False, allow_eof_end=False): to mark the end of a literal. >>> _NamelistParser('"abc" ')._parse_literal() - u'"abc"' + '"abc"' >>> _NamelistParser("'abc' ")._parse_literal() - u"'abc'" - >>> _NamelistParser('"abc"')._parse_literal() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + "'abc'" + >>> shouldRaise(_NamelistEOF, _NamelistParser('"abc"')._parse_literal) + >>> _NamelistParser('"abc"')._parse_literal(allow_eof_end=True) - u'"abc"' + '"abc"' >>> _NamelistParser('(1.,2.) ')._parse_literal() - u'(1.,2.)' - >>> _NamelistParser('(1.,2.)')._parse_literal() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + '(1.,2.)' + >>> shouldRaise(_NamelistEOF, _NamelistParser('(1.,2.)')._parse_literal) + >>> _NamelistParser('(1.,2.)')._parse_literal(allow_eof_end=True) - u'(1.,2.)' + '(1.,2.)' >>> _NamelistParser('5 ')._parse_literal() - u'5' + '5' >>> _NamelistParser('6.9 ')._parse_literal() - u'6.9' + '6.9' >>> _NamelistParser('inf ')._parse_literal() - u'inf' + 'inf' >>> _NamelistParser('nan(booga) ')._parse_literal() - u'nan(booga)' + 'nan(booga)' >>> _NamelistParser('.FLORIDA$ ')._parse_literal() - u'.FLORIDA$' - >>> _NamelistParser('hamburger ')._parse_literal() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected literal value, but got 'hamburger' + '.FLORIDA$' + >>> shouldRaise(_NamelistParseError, _NamelistParser('hamburger ')._parse_literal) + >>> _NamelistParser('5,')._parse_literal() - u'5' + '5' >>> _NamelistParser('5\n')._parse_literal() - u'5' + '5' >>> _NamelistParser('5/')._parse_literal() - u'5' + '5' >>> _NamelistParser(',')._parse_literal() - u'' + '' >>> _NamelistParser('6*5 ')._parse_literal() - u'6*5' + '6*5' >>> _NamelistParser('6*(1., 2.) ')._parse_literal() - u'6*(1., 2.)' + '6*(1., 2.)' >>> _NamelistParser('6*"a" ')._parse_literal() - u'6*"a"' - >>> _NamelistParser('6*')._parse_literal() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + '6*"a"' + >>> shouldRaise(_NamelistEOF, _NamelistParser('6*')._parse_literal) + >>> _NamelistParser('6*')._parse_literal(allow_eof_end=True) - u'6*' - >>> _NamelistParser('foo= ')._parse_literal() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected literal value, but got 'foo=' - >>> _NamelistParser('foo+= ')._parse_literal() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected literal value, but got 'foo+=' + '6*' + >>> shouldRaise(_NamelistParseError, _NamelistParser('foo= ')._parse_literal) + + >>> shouldRaise(_NamelistParseError, _NamelistParser('foo+= ')._parse_literal) + >>> _NamelistParser('5,')._parse_literal(allow_name=True) - u'5' + '5' >>> x = _NamelistParser('foo= ') >>> x._parse_literal(allow_name=True) >>> x._curr() - u'f' + 'f' >>> x = _NamelistParser('foo+= ') >>> x._parse_literal(allow_name=True) >>> x._curr() - u'f' - >>> _NamelistParser('6*foo= ')._parse_literal(allow_name=True) - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected literal value, but got '6*foo=' - >>> _NamelistParser('6*foo+= ')._parse_literal(allow_name=True) - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected literal value, but got '6*foo+=' + 'f' + >>> shouldRaise(_NamelistParseError, _NamelistParser('6*foo= ')._parse_literal, allow_name=True) + + >>> shouldRaise(_NamelistParseError, _NamelistParser('6*foo+= ')._parse_literal, allow_name=True) + >>> x = _NamelistParser('foo = ') >>> x._parse_literal(allow_name=True) >>> x._curr() - u'f' + 'f' >>> x = _NamelistParser('foo\n= ') >>> x._parse_literal(allow_name=True) >>> x._curr() - u'f' + 'f' >>> _NamelistParser('')._parse_literal(allow_eof_end=True) - u'' + '' """ # Deal with empty input string. if allow_eof_end and self._pos == self._len: - return u'' + return '' # Deal with a repeated value prefix. old_pos = self._pos if FORTRAN_REPEAT_PREFIX_REGEX.search(self._text[self._pos:]): @@ -1863,47 +1839,45 @@ def _expect_separator(self, allow_eof=False): >>> x._expect_separator() True >>> x._curr() - u'a' + 'a' >>> x = _NamelistParser(" a") >>> x._expect_separator() True >>> x._curr() - u'a' + 'a' >>> x = _NamelistParser(",a") >>> x._expect_separator() True >>> x._curr() - u'a' + 'a' >>> x = _NamelistParser("/a") >>> x._expect_separator() False >>> x._curr() - u'/' + '/' >>> x = _NamelistParser("a") - >>> x._expect_separator() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected one of the characters in ' \n,/' but found 'a' + >>> shouldRaise(_NamelistParseError, x._expect_separator) + >>> x = _NamelistParser(" , a") >>> x._expect_separator() True >>> x._curr() - u'a' + 'a' >>> x = _NamelistParser(" / a") >>> x._expect_separator() False >>> x._curr() - u'/' + '/' >>> x = _NamelistParser(" , ! Some stuff\n a") >>> x._expect_separator() True >>> x._curr() - u'a' + 'a' >>> x = _NamelistParser(" , ! Some stuff\n ! Other stuff\n a") >>> x._expect_separator() True >>> x._curr() - u'a' + 'a' >>> _NamelistParser("")._expect_separator(allow_eof=True) False >>> x = _NamelistParser(" ") @@ -1913,10 +1887,8 @@ def _expect_separator(self, allow_eof=False): >>> x._expect_separator(allow_eof=True) True >>> x = _NamelistParser(" / ") - >>> x._expect_separator(allow_eof=True) - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: found group-terminating '/' in file without group names + >>> shouldRaise(_NamelistParseError, x._expect_separator, allow_eof=True) + """ errstring = "found group-terminating '/' in file without group names" # Deal with the possibility that we are already at EOF. @@ -1958,53 +1930,47 @@ def _parse_name_and_values(self, allow_eof_end=False): alternate file format in "groupless" mode.) >>> _NamelistParser("foo='bar' /")._parse_name_and_values() - (u'foo', [u"'bar'"], False) + ('foo', ["'bar'"], False) >>> _NamelistParser("foo(3)='bar' /")._parse_name_and_values() - (u'foo(3)', [u"'bar'"], False) + ('foo(3)', ["'bar'"], False) >>> _NamelistParser("foo ='bar' /")._parse_name_and_values() - (u'foo', [u"'bar'"], False) + ('foo', ["'bar'"], False) >>> _NamelistParser("foo=\n'bar' /")._parse_name_and_values() - (u'foo', [u"'bar'"], False) - >>> _NamelistParser("foo 'bar' /")._parse_name_and_values() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected '=' but found "'" + ('foo', ["'bar'"], False) + >>> shouldRaise(_NamelistParseError, _NamelistParser("foo 'bar' /")._parse_name_and_values) + >>> _NamelistParser("foo='bar','bazz' /")._parse_name_and_values() - (u'foo', [u"'bar'", u"'bazz'"], False) + ('foo', ["'bar'", "'bazz'"], False) >>> _NamelistParser("foo=,,'bazz',6*/")._parse_name_and_values() - (u'foo', [u'', u'', u"'bazz'", u'6*'], False) + ('foo', ['', '', "'bazz'", '6*'], False) >>> _NamelistParser("foo='bar' 'bazz' foo2='ban'")._parse_name_and_values() - (u'foo', [u"'bar'", u"'bazz'"], False) + ('foo', ["'bar'", "'bazz'"], False) >>> _NamelistParser("foo='bar' 'bazz' foo2(2)='ban'")._parse_name_and_values() - (u'foo', [u"'bar'", u"'bazz'"], False) - >>> _NamelistParser("foo= foo2='ban' ")._parse_name_and_values() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: expected literal value, but got "foo2='ban'" + ('foo', ["'bar'", "'bazz'"], False) + >>> shouldRaise(_NamelistParseError, _NamelistParser("foo= foo2='ban' ")._parse_name_and_values) + >>> _NamelistParser("foo=,,'bazz',6* ")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'', u'', u"'bazz'", u'6*'], False) + ('foo', ['', '', "'bazz'", '6*'], False) >>> _NamelistParser("foo(3)='bazz'")._parse_name_and_values(allow_eof_end=True) - (u'foo(3)', [u"'bazz'"], False) - >>> _NamelistParser("foo=")._parse_name_and_values() - Traceback (most recent call last): - ... - _NamelistEOF: Unexpected end of file encountered in namelist. + ('foo(3)', ["'bazz'"], False) + >>> shouldRaise(_NamelistEOF, _NamelistParser("foo=")._parse_name_and_values) + >>> _NamelistParser("foo=")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u''], False) + ('foo', [''], False) >>> _NamelistParser("foo= ")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u''], False) + ('foo', [''], False) >>> _NamelistParser("foo=2")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'2'], False) + ('foo', ['2'], False) >>> _NamelistParser("foo=1,2")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'1', u'2'], False) + ('foo', ['1', '2'], False) >>> _NamelistParser("foo(1:2)=1,2,3 ")._parse_name_and_values(allow_eof_end=True) Traceback (most recent call last): ... SystemExit: ERROR: Too many values for array foo(1:2) >>> _NamelistParser("foo=1,")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'1', u''], False) + ('foo', ['1', ''], False) >>> _NamelistParser("foo+=1")._parse_name_and_values(allow_eof_end=True) - (u'foo', [u'1'], True) + ('foo', ['1'], True) """ name = self._parse_variable_name() addto = False # This keeps track of whether += existed @@ -2021,7 +1987,7 @@ def _parse_name_and_values(self, allow_eof_end=False): except _NamelistEOF: # If we hit the end of file, return a name assigned to a null value. if allow_eof_end: - return name, [u''], addto + return name, [''], addto else: raise # Expect at least one literal, even if it's a null value. @@ -2051,54 +2017,52 @@ def _parse_namelist_group(self): >>> x = _NamelistParser("&group /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'group', {})]) + OrderedDict([('group', {})]) >>> x._curr() - u'/' + '/' >>> x = _NamelistParser("&group\n foo='bar','bazz'\n,, foo2=2*5\n /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'group', {u'foo': [u"'bar'", u"'bazz'", u''], u'foo2': [u'5', u'5']})]) + OrderedDict([('group', {'foo': ["'bar'", "'bazz'", ''], 'foo2': ['5', '5']})]) >>> x = _NamelistParser("&group\n foo='bar','bazz'\n,, foo2=2*5\n /", groupless=True) >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'foo', [u"'bar'", u"'bazz'", u'']), (u'foo2', [u'5', u'5'])]) + OrderedDict([('foo', ["'bar'", "'bazz'", '']), ('foo2', ['5', '5'])]) >>> x._curr() - u'/' + '/' >>> x = _NamelistParser("&group /&group /") >>> x._parse_namelist_group() >>> x._advance() - >>> x._parse_namelist_group() - Traceback (most recent call last): - ... - _NamelistParseError: Error in parsing namelist: Namelist group 'group' encountered twice. + >>> shouldRaise(_NamelistParseError, x._parse_namelist_group) + >>> x = _NamelistParser("&group foo='bar', foo='bazz' /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'group', {u'foo': [u"'bazz'"]})]) + OrderedDict([('group', {'foo': ["'bazz'"]})]) >>> x = _NamelistParser("&group foo='bar', foo= /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'group', {u'foo': [u"'bar'"]})]) + OrderedDict([('group', {'foo': ["'bar'"]})]) >>> x = _NamelistParser("&group foo='bar', foo= /", groupless=True) >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'foo', [u"'bar'"])]) + OrderedDict([('foo', ["'bar'"])]) >>> x = _NamelistParser("&group foo='bar', foo+='baz' /", groupless=True) >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'foo', [u"'bar'", u"'baz'"])]) + OrderedDict([('foo', ["'bar'", "'baz'"])]) >>> x = _NamelistParser("&group foo+='bar' /", groupless=True) >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'foo', [u"'bar'"])]) + OrderedDict([('foo', ["'bar'"])]) >>> x = _NamelistParser("&group foo='bar', foo+='baz' /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'group', {u'foo': [u"'bar'", u"'baz'"]})]) + OrderedDict([('group', {'foo': ["'bar'", "'baz'"]})]) >>> x = _NamelistParser("&group foo+='bar' /") >>> x._parse_namelist_group() >>> x._settings - OrderedDict([(u'group', {u'foo': [u"'bar'"]})]) + OrderedDict([('group', {'foo': ["'bar'"]})]) """ group_name = self._parse_namelist_group_name() if not self._groupless: @@ -2139,37 +2103,37 @@ def parse_namelist(self): >>> _NamelistParser(" \n!Comment").parse_namelist() OrderedDict() >>> _NamelistParser(" &group /").parse_namelist() - OrderedDict([(u'group', {})]) + OrderedDict([('group', {})]) >>> _NamelistParser("! Comment \n &group /! Comment\n ").parse_namelist() - OrderedDict([(u'group', {})]) + OrderedDict([('group', {})]) >>> _NamelistParser("! Comment \n &group /! Comment ").parse_namelist() - OrderedDict([(u'group', {})]) + OrderedDict([('group', {})]) >>> _NamelistParser("&group1\n foo='bar','bazz'\n,, foo2=2*5\n / &group2 /").parse_namelist() - OrderedDict([(u'group1', {u'foo': [u"'bar'", u"'bazz'", u''], u'foo2': [u'5', u'5']}), (u'group2', {})]) + OrderedDict([('group1', {'foo': ["'bar'", "'bazz'", ''], 'foo2': ['5', '5']}), ('group2', {})]) >>> _NamelistParser("!blah \n foo='bar','bazz'\n,, foo2=2*5\n ", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'", u"'bazz'", u'']), (u'foo2', [u'2*5'])]) + OrderedDict([('foo', ["'bar'", "'bazz'", '']), ('foo2', ['2*5'])]) >>> _NamelistParser("!blah \n foo='bar','bazz'\n,, foo2=2*5,6\n ", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'", u"'bazz'", u'']), (u'foo2', [u'2*5', u'6'])]) + OrderedDict([('foo', ["'bar'", "'bazz'", '']), ('foo2', ['2*5', '6'])]) >>> _NamelistParser("!blah \n foo='bar'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'"])]) + OrderedDict([('foo', ["'bar'"])]) >>> _NamelistParser("foo='bar', foo(3)='bazz'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'"]), (u'foo(3)', [u"'bazz'"])]) + OrderedDict([('foo', ["'bar'"]), ('foo(3)', ["'bazz'"])]) >>> _NamelistParser("foo(2)='bar'", groupless=True).parse_namelist() - OrderedDict([(u'foo(2)', [u"'bar'"])]) + OrderedDict([('foo(2)', ["'bar'"])]) >>> _NamelistParser("foo(2)='bar', foo(3)='bazz'", groupless=True).parse_namelist() - OrderedDict([(u'foo(2)', [u"'bar'"]), (u'foo(3)', [u"'bazz'"])]) + OrderedDict([('foo(2)', ["'bar'"]), ('foo(3)', ["'bazz'"])]) >>> _NamelistParser("foo='bar', foo='bazz'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bazz'"])]) + OrderedDict([('foo', ["'bazz'"])]) >>> _NamelistParser("foo='bar'\n foo+='bazz'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'", u"'bazz'"])]) + OrderedDict([('foo', ["'bar'", "'bazz'"])]) >>> _NamelistParser("foo='bar', foo='bazz'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bazz'"])]) + OrderedDict([('foo', ["'bazz'"])]) >>> _NamelistParser("foo='bar', foo=", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'"])]) + OrderedDict([('foo', ["'bar'"])]) >>> _NamelistParser("foo='bar', 'bazz'\n foo+='ban'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'", u"'bazz'", u"'ban'"])]) + OrderedDict([('foo', ["'bar'", "'bazz'", "'ban'"])]) >>> _NamelistParser("foo+='bar'", groupless=True).parse_namelist() - OrderedDict([(u'foo', [u"'bar'"])]) + OrderedDict([('foo', ["'bar'"])]) """ # Return empty dictionary for empty files. if self._len == 0: diff --git a/scripts/lib/CIME/nmlgen.py b/scripts/lib/CIME/nmlgen.py index 1d0786c9b1f..fe2bc304bff 100644 --- a/scripts/lib/CIME/nmlgen.py +++ b/scripts/lib/CIME/nmlgen.py @@ -175,6 +175,7 @@ def _to_namelist_literals(self, name, value): value[i] = self.quote_string(scalar) return compress_literal_list(value) + def get_value(self, name): """Get the current value of a given namelist variable. @@ -328,7 +329,7 @@ def _sub_fields(self, varnames): if self._case.get_value('GLC_NEC') == 0: glc_nec_indices = [0] else: - glc_nec_indices = range(self._case.get_value('GLC_NEC')) + glc_nec_indices = list(range(self._case.get_value('GLC_NEC'))) glc_nec_indices.append(glc_nec_indices[-1] + 1) glc_nec_indices.pop(0) for i in glc_nec_indices: @@ -569,7 +570,7 @@ def add_default(self, name, value=None, ignore_abs_path=None): continue file_path = self.set_abs_file_path(file_path) if not os.path.exists(file_path): - logger.warn("File not found: {} = {}, will attempt to download in check_input_data phase".format(name, literal)) + logger.warning("File not found: {} = {}, will attempt to download in check_input_data phase".format(name, literal)) current_literals[i] = string_to_character_literal(file_path) current_literals = compress_literal_list(current_literals) @@ -586,6 +587,10 @@ def create_shr_strdata_nml(self): self.add_default(variable, value=self._streams_namelists[variable]) + def get_group_variables(self, group_name): + return self._namelist.get_group_variables(group_name) + + def _write_input_files(self, input_data_list): """Write input data files to list.""" for group_name in self._namelist.get_group_names(): diff --git a/scripts/lib/CIME/preview_namelists.py b/scripts/lib/CIME/preview_namelists.py index fff00b22145..8706594d2ec 100644 --- a/scripts/lib/CIME/preview_namelists.py +++ b/scripts/lib/CIME/preview_namelists.py @@ -80,7 +80,7 @@ def create_namelists(case, component=None): # first look in the case SourceMods directory cmd = os.path.join(caseroot, "SourceMods", "src."+compname, "buildnml") if os.path.isfile(cmd): - logger.warn("\nWARNING: Using local buildnml file {}\n".format(cmd)) + logger.warning("\nWARNING: Using local buildnml file {}\n".format(cmd)) else: # otherwise look in the component config_dir cmd = os.path.join(config_dir, "buildnml") diff --git a/scripts/lib/CIME/simple_compare.py b/scripts/lib/CIME/simple_compare.py index 086297b4d1b..5d86b175764 100644 --- a/scripts/lib/CIME/simple_compare.py +++ b/scripts/lib/CIME/simple_compare.py @@ -65,7 +65,7 @@ def _compare_data(gold_lines, comp_lines, case): ... data00 ... ''' >>> results = _compare_data(teststr.splitlines(), teststr2.splitlines(), None) - >>> print results + >>> print(results) Inequivalent lines data2 data3 != data2 data30 NORMALIZED: data2 data3 != data2 data30 Found extra lines diff --git a/scripts/lib/CIME/test_scheduler.py b/scripts/lib/CIME/test_scheduler.py index db3173e489c..34ec7edf6b4 100644 --- a/scripts/lib/CIME/test_scheduler.py +++ b/scripts/lib/CIME/test_scheduler.py @@ -199,7 +199,7 @@ def __init__(self, test_names, test_data=None, # Oversubscribe by 1/4 if proc_pool is None: - pes = int(self._machobj.get_value("PES_PER_NODE")) + pes = int(self._machobj.get_value("MAX_MPITASKS_PER_NODE")) self._proc_pool = int(pes * 1.25) else: self._proc_pool = int(proc_pool) @@ -247,6 +247,10 @@ def __init__(self, test_names, test_data=None, # instead, errors will be placed in the TestStatus files for the various # tests cases + ########################################################################### + def get_testnames(self): + return list(self._tests.keys()) + ########################################################################### def _log_output(self, test, output): ########################################################################### @@ -364,9 +368,13 @@ def _create_newcase_phase(self, test): _, case_opts, grid, compset,\ machine, compiler, test_mods = CIME.utils.parse_test_name(test) - create_newcase_cmd = "{} --case {} --res {} --mach {} --compiler {} --compset {}"\ - " --test".format(os.path.join(self._cime_root, "scripts", "create_newcase"), - test_dir, grid, machine, compiler, compset) + create_newcase_cmd = "{} --case {} --res {} --compset {}"\ + " --test".format(os.path.join(self._cime_root, "scripts", "create_newcase"), + test_dir, grid, compset) + if machine is not None: + create_newcase_cmd += " --machine {}".format(machine) + if compiler is not None: + create_newcase_cmd += " --compiler {}".format(compiler) if self._project is not None: create_newcase_cmd += " --project {} ".format(self._project) if self._output_root is not None: @@ -520,6 +528,7 @@ def _xml_phase(self, test): envtest.set_test_parameter("STOP_OPTION",stop_option[opt]) opti = match.group(2) envtest.set_test_parameter("STOP_N", opti) + logger.debug (" STOP_OPTION set to %s" %stop_option[opt]) logger.debug (" STOP_N set to %s" %opti) @@ -540,6 +549,10 @@ def _xml_phase(self, test): envtest.set_test_parameter("PTS_LAT", "36.6") envtest.set_test_parameter("PTS_LON", "262.5") + elif opt.startswith('I'): + # Marker to distinguish tests with same name - ignored + continue + elif opt.startswith('M'): # M option handled by create newcase continue @@ -555,7 +568,7 @@ def _xml_phase(self, test): # handled in create_newcase continue elif opt.startswith('IOP'): - logger.warn("IOP test option not yet implemented") + logger.warning("IOP test option not yet implemented") else: expect(False, "Could not parse option '{}' ".format(opt)) @@ -668,7 +681,7 @@ def _wait_for_something_to_finish(self, threads_in_flight): expect(len(threads_in_flight) <= self._parallel_jobs, "Oversubscribed?") finished_tests = [] while not finished_tests: - for test, thread_info in threads_in_flight.iteritems(): + for test, thread_info in threads_in_flight.items(): if not thread_info[0].is_alive(): finished_tests.append((test, thread_info[1])) diff --git a/scripts/lib/CIME/test_status.py b/scripts/lib/CIME/test_status.py index d560804dc11..9bbb4a9c397 100644 --- a/scripts/lib/CIME/test_status.py +++ b/scripts/lib/CIME/test_status.py @@ -132,7 +132,7 @@ def __exit__(self, *_): self.flush() def __iter__(self): - for phase, data in self._phase_statuses.iteritems(): + for phase, data in self._phase_statuses.items(): yield phase, data[0] def get_name(self): @@ -215,7 +215,7 @@ def phase_statuses_dump(self, prefix=''): """ result = "" if self._phase_statuses: - for phase, data in self._phase_statuses.iteritems(): + for phase, data in self._phase_statuses.items(): status, comments = data if not comments: result += "{}{} {} {}\n".format(prefix, status, self._test_name, phase) @@ -314,7 +314,7 @@ def get_overall_test_status(self, wait_for_run=False, check_throughput=False, ch """ rv = TEST_PASS_STATUS run_phase_found = False - for phase, data in self._phase_statuses.iteritems(): + for phase, data in self._phase_statuses.items(): status = data[0] if phase == RUN_PHASE: run_phase_found = True diff --git a/scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py b/scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py index f352d548aea..342fe4ecd54 100644 --- a/scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py +++ b/scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py @@ -73,6 +73,7 @@ def __init__(self, case1, run_one_suffix = 'base', run_two_suffix = 'test', + separate_builds = False, multisubmit = False, case2setup_raises_exception = False, run_one_should_pass = True, @@ -88,6 +89,7 @@ def __init__(self, run_one_suffix (str, optional): Suffix used for first run. Defaults to 'base'. Currently MUST be 'base'. run_two_suffix (str, optional): Suffix used for the second run. Defaults to 'test'. + separate_builds (bool, optional): Passed to SystemTestsCompareTwo.__init__ multisubmit (bool, optional): Passed to SystemTestsCompareTwo.__init__ case2setup_raises_exception (bool, optional): If True, then the call to _case_two_setup will raise an exception. Default is False. @@ -110,7 +112,7 @@ def __init__(self, SystemTestsCompareTwo.__init__( self, case1, - separate_builds = False, + separate_builds = separate_builds, run_two_suffix = run_two_suffix, multisubmit = multisubmit) @@ -300,6 +302,23 @@ def test_setup(self): self.assertEqual('case2val', mytest._case2.get_value('var_set_in_setup')) + def test_setup_separate_builds_sharedlibroot(self): + # If we're using separate_builds, the two cases should still use + # the same sharedlibroot + + # Setup + case1root, _ = self.get_caseroots() + case1 = CaseFake(case1root) + case1.set_value("SHAREDLIBROOT", os.path.join(case1root, "sharedlibroot")) + + # Exercise + mytest = SystemTestsCompareTwoFake(case1, + separate_builds = True) + + # Verify + self.assertEqual(case1.get_value("SHAREDLIBROOT"), + mytest._case2.get_value("SHAREDLIBROOT")) + def test_setup_case2_exists(self): # If case2 already exists, then setup code should not be called diff --git a/scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py b/scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py index 67646109673..9164530e3f0 100644 --- a/scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py +++ b/scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py @@ -5,6 +5,7 @@ import shutil import tempfile from CIME.SystemTests.test_utils import user_nl_utils +import six class TestUserNLCopier(unittest.TestCase): @@ -108,7 +109,7 @@ def test_append_without_files_raises_exception(self): self.write_user_nl_file(component_exists, 'irrelevant contents') # Exercise & verify - self.assertRaisesRegexp(RuntimeError, "No user_nl files found", + six.assertRaisesRegex(self, RuntimeError, "No user_nl files found", user_nl_utils.append_to_user_nl_files, caseroot = self._caseroot, component = component_for_append, diff --git a/scripts/lib/CIME/tests/case_fake.py b/scripts/lib/CIME/tests/case_fake.py index da9effa7110..5c6745fc355 100644 --- a/scripts/lib/CIME/tests/case_fake.py +++ b/scripts/lib/CIME/tests/case_fake.py @@ -20,7 +20,12 @@ def __init__(self, case_root, create_case_root=True): os.makedirs(case_root) self.set_value('CASEROOT', case_root) casename = os.path.basename(case_root) - self.set_value('CIME_OUTPUT_ROOT','/tmp') + # Typically, CIME_OUTPUT_ROOT is independent of the case. Here, + # we nest it under CASEROOT so that (1) tests don't interfere + # with each other; (2) a cleanup that removes CASEROOT will also + # remove CIME_OUTPUT_ROOT. + self.set_value('CIME_OUTPUT_ROOT', + os.path.join(case_root, 'CIME_OUTPUT_ROOT')) self.set_value('CASE', casename) self.set_value('CASEBASEID', casename) self.set_value('RUN_TYPE', 'startup') @@ -64,7 +69,8 @@ def copy(self, newcasename, newcaseroot): return newcase - def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, cime_output_root=None): + def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, + cime_output_root=None, exeroot=None, rundir=None): # Need to disable unused-argument checking: keepexe is needed to match # the interface of Case, but is not used in this fake implementation # @@ -77,6 +83,12 @@ def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, cime newcase (str): full path to the new case. This directory should not already exist; it will be created keepexe (bool, optional): Ignored + mach_dir (str, optional): Ignored + project (str, optional): Ignored + cime_output_root (str, optional): New CIME_OUTPUT_ROOT for the clone + exeroot (str, optional): Ignored (because exeroot isn't used + in this fake case implementation) + rundir (str, optional): New RUNDIR for the clone Returns the clone case object """ @@ -84,6 +96,10 @@ def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, cime newcasename = os.path.basename(newcase) os.makedirs(newcaseroot) clone = self.copy(newcasename = newcasename, newcaseroot = newcaseroot) + if cime_output_root is not None: + self.set_value('CIME_OUTPUT_ROOT', cime_output_root) + if rundir is not None: + self.set_value('RUNDIR', rundir) return clone diff --git a/scripts/lib/CIME/tests/test_user_mod_support.py b/scripts/lib/CIME/tests/test_user_mod_support.py index a1bd9b3048a..75bcf33f03f 100644 --- a/scripts/lib/CIME/tests/test_user_mod_support.py +++ b/scripts/lib/CIME/tests/test_user_mod_support.py @@ -5,7 +5,7 @@ import tempfile import os from CIME.user_mod_support import apply_user_mods - +import six # ======================================================================== # Define some parameters # ======================================================================== @@ -114,7 +114,7 @@ def test_basic(self): def test_keepexe(self): self.createUserMod("foo") - with self.assertRaisesRegexp(SystemExit, "cannot have any source mods"): + with six.assertRaisesRegex(self, SystemExit, "cannot have any source mods"): apply_user_mods(self._caseroot, os.path.join(self._user_mods_parent_dir, "foo"), keepexe=True) diff --git a/scripts/lib/CIME/user_mod_support.py b/scripts/lib/CIME/user_mod_support.py index 6c3ef34071b..91bd3d65536 100644 --- a/scripts/lib/CIME/user_mod_support.py +++ b/scripts/lib/CIME/user_mod_support.py @@ -62,7 +62,7 @@ def apply_user_mods(caseroot, user_mods_path, keepexe=None): # We overwrite any existing SourceMods file so that later # include_dirs take precedence over earlier ones if os.path.isfile(case_source_mods): - logger.warn("WARNING: Overwriting existing SourceMods in {}".format(case_source_mods)) + logger.warning("WARNING: Overwriting existing SourceMods in {}".format(case_source_mods)) else: logger.info("Adding SourceMod to case {}".format(case_source_mods)) try: @@ -81,7 +81,7 @@ def apply_user_mods(caseroot, user_mods_path, keepexe=None): # Note that use of xmlchange_cmnds has been deprecated and will soon # be removed altogether, so new tests should rely on shell_commands if shell_commands_file.endswith("xmlchange_cmnds"): - logger.warn("xmlchange_cmnds is deprecated and will be removed " +\ + logger.warning("xmlchange_cmnds is deprecated and will be removed " +\ "in a future release; please rename {} shell_commands".format(shell_commands_file)) with open(shell_commands_file,"r") as fd: new_shell_commands = fd.read().replace("xmlchange","xmlchange --force") @@ -92,7 +92,7 @@ def apply_user_mods(caseroot, user_mods_path, keepexe=None): for shell_command_file in case_shell_command_files: if os.path.isfile(shell_command_file): - os.chmod(shell_command_file, 0777) + os.chmod(shell_command_file, 0o777) run_cmd_no_fail(shell_command_file) @@ -130,6 +130,6 @@ def build_include_dirs_list(user_mods_path, include_dirs=None): if os.path.isabs(newpath): build_include_dirs_list(newpath, include_dirs) else: - logger.warn("Could not resolve path '{}' in file '{}'".format(newpath, include_file)) + logger.warning("Could not resolve path '{}' in file '{}'".format(newpath, include_file)) return include_dirs diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index 13c8bf8076f..fdd13e2c1ff 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -2,10 +2,13 @@ Common functions used by cime python scripts Warning: you cannot use CIME Classes in this module as it causes circular dependencies """ -import logging, gzip, sys, os, time, re, shutil, glob, string, random, imp, errno, signal +import io, logging, gzip, sys, os, time, re, shutil, glob, string, random, imp, errno, signal import stat as statlib import warnings +import six from contextlib import contextmanager +#pylint: disable=import-error +from six.moves import configparser # Return this error code if the scripts worked but tests failed TESTS_FAILED_ERR_CODE = 100 @@ -74,7 +77,7 @@ def check_name(fullname, additional_chars=None, fullpath=False): name = fullname match = re.search(r"["+re.escape(chars)+"]", name) if match is not None: - logger.warn("Illegal character {} found in name {}".format(match.group(0), name)) + logger.warning("Illegal character {} found in name {}".format(match.group(0), name)) return False return True @@ -86,11 +89,10 @@ def _read_cime_config_file(): CIME_MODEL=acme,cesm PROJECT=someprojectnumber """ - from ConfigParser import SafeConfigParser as config_parser cime_config_file = os.path.abspath(os.path.join(os.path.expanduser("~"), ".cime","config")) - cime_config = config_parser() + cime_config = configparser.SafeConfigParser() if(os.path.isfile(cime_config_file)): cime_config.read(cime_config_file) else: @@ -247,12 +249,12 @@ def run_cmd(cmd, input_str=None, from_dir=None, verbose=None, # Real defaults for these value should be subprocess.PIPE if arg_stdout is _hack: arg_stdout = subprocess.PIPE - elif isinstance(arg_stdout, str): + elif isinstance(arg_stdout, six.string_types): arg_stdout = _convert_to_fd(arg_stdout, from_dir) if arg_stderr is _hack: arg_stderr = subprocess.STDOUT if combine_output else subprocess.PIPE - elif isinstance(arg_stderr, str): + elif isinstance(arg_stderr, six.string_types): arg_stderr = _convert_to_fd(arg_stdout, from_dir) if (verbose != False and (verbose or logger.isEnabledFor(logging.DEBUG))): @@ -272,15 +274,29 @@ def run_cmd(cmd, input_str=None, from_dir=None, verbose=None, env=env) output, errput = proc.communicate(input_str) - output = output.strip() if output is not None else output - errput = errput.strip() if errput is not None else errput - stat = proc.wait() + if output is not None: + try: + output = output.decode('utf-8').strip() + except AttributeError: + pass + if errput is not None: + try: + errput = errput.decode('utf-8').strip() + except AttributeError: + pass - if isinstance(arg_stdout, file): - arg_stdout.close() # pylint: disable=no-member + stat = proc.wait() + if six.PY2: + if isinstance(arg_stdout, file): # pylint: disable=undefined-variable + arg_stdout.close() # pylint: disable=no-member + if isinstance(arg_stderr, file) and arg_stderr is not arg_stdout: # pylint: disable=undefined-variable + arg_stderr.close() # pylint: disable=no-member + else: + if isinstance(arg_stdout, io.IOBase): + arg_stdout.close() # pylint: disable=no-member + if isinstance(arg_stderr, io.IOBase) and arg_stderr is not arg_stdout: + arg_stderr.close() # pylint: disable=no-member - if isinstance(arg_stderr, file) and arg_stderr is not arg_stdout: - arg_stderr.close() # pylint: disable=no-member if (verbose != False and (verbose or logger.isEnabledFor(logging.DEBUG))): if stat != 0: @@ -298,19 +314,17 @@ def run_cmd_no_fail(cmd, input_str=None, from_dir=None, verbose=None, Wrapper around subprocess to make it much more convenient to run shell commands. Expects command to work. Just returns output string. - >>> run_cmd_no_fail('echo foo') - 'foo' - + >>> run_cmd_no_fail('echo foo') == 'foo' + True >>> run_cmd_no_fail('echo THE ERROR >&2; false') # doctest:+ELLIPSIS Traceback (most recent call last): ... SystemExit: ERROR: Command: 'echo THE ERROR >&2; false' failed with error 'THE ERROR' from dir ... - >>> run_cmd_no_fail('grep foo', input_str='foo') - 'foo' - - >>> run_cmd_no_fail('echo THE ERROR >&2', combine_output=True) - 'THE ERROR' + >>> run_cmd_no_fail('grep foo', input_str=b'foo') == 'foo' + True + >>> run_cmd_no_fail('echo THE ERROR >&2', combine_output=True) == 'THE ERROR' + True """ stat, output, errput = run_cmd(cmd, input_str, from_dir, verbose, arg_stdout, arg_stderr, env, combine_output) if stat != 0: @@ -328,7 +342,8 @@ def check_minimum_python_version(major, minor): >>> check_minimum_python_version(sys.version_info[0], sys.version_info[1]) >>> """ - expect(sys.version_info[0] == major and sys.version_info[1] >= minor, + expect(sys.version_info[0] > major or + (sys.version_info[0] == major and sys.version_info[1] >= minor), "Python {:d}, minor version {:d}+ is required, you have {:d}.{:d}".format(major, minor, sys.version_info[0], sys.version_info[1])) def normalize_case_id(case_id): @@ -895,12 +910,20 @@ def convert_to_string(value, type_str=None, vid=""): """ Convert value back to string. vid is only for generating better error messages. + >>> convert_to_string(6, type_str="integer") == '6' + True + >>> convert_to_string('6', type_str="integer") == '6' + True + >>> convert_to_string('6.0', type_str="real") == '6.0' + True + >>> convert_to_string(6.01, type_str="real") == '6.01' + True """ - if value is not None and type(value) is not str: + if value is not None and not isinstance(value, six.string_types): if type_str == "char": - expect(type(value) is str, "Wrong type for entry id '{}'".format(vid)) + expect(isinstance(value, six.string_types), "Wrong type for entry id '{}'".format(vid)) elif type_str == "integer": - expect(type(value) is int, "Wrong type for entry id '{}'".format(vid)) + expect(isinstance(value, six.integer_types), "Wrong type for entry id '{}'".format(vid)) value = str(value) elif type_str == "logical": expect(type(value) is bool, "Wrong type for entry id '{}'".format(vid)) @@ -942,9 +965,9 @@ def convert_to_babylonian_time(seconds): >>> convert_to_babylonian_time(3661) '01:01:01' """ - hours = seconds / 3600 + hours = int(seconds / 3600) seconds %= 3600 - minutes = seconds / 60 + minutes = int(seconds / 60) seconds %= 60 return "{:02d}:{:02d}:{:02d}".format(hours, minutes, seconds) @@ -986,7 +1009,7 @@ def compute_total_time(job_cost_map, proc_pool): running_jobs = {} # name -> (procs, est-time, start-time) while len(waiting_jobs) > 0 or len(running_jobs) > 0: launched_jobs = [] - for jobname, data in waiting_jobs.iteritems(): + for jobname, data in waiting_jobs.items(): procs_for_job, time_for_job = data if procs_for_job <= proc_pool: proc_pool -= procs_for_job @@ -997,7 +1020,7 @@ def compute_total_time(job_cost_map, proc_pool): del waiting_jobs[launched_job] completed_jobs = [] - for jobname, data in running_jobs.iteritems(): + for jobname, data in running_jobs.items(): procs_for_job, time_for_job, time_started = data if (current_time - time_started) >= time_for_job: proc_pool += procs_for_job @@ -1112,7 +1135,7 @@ def does_file_have_string(filepath, text): """ return os.path.isfile(filepath) and text in open(filepath).read() -def transform_vars(text, case=None, subgroup=None, check_members=None, default=None): +def transform_vars(text, case=None, subgroup=None, overrides=None, default=None): """ Do the variable substitution for any variables that need transforms recursively. @@ -1121,47 +1144,45 @@ def transform_vars(text, case=None, subgroup=None, check_members=None, default=N 'cesm.stdout' >>> member_store = lambda : None >>> member_store.foo = "hi" - >>> transform_vars("I say {{ foo }}", check_members=member_store) + >>> transform_vars("I say {{ foo }}", overrides={"foo":"hi"}) 'I say hi' """ directive_re = re.compile(r"{{ (\w+) }}", flags=re.M) # loop through directive text, replacing each string enclosed with # template characters with the necessary values. - if check_members is None and case is not None: - check_members = case while directive_re.search(text): m = directive_re.search(text) variable = m.groups()[0] whole_match = m.group() - if check_members is not None and hasattr(check_members, variable.lower()) and getattr(check_members, variable.lower()) is not None: - repl = getattr(check_members, variable.lower()) - logger.debug("from check_members: in {}, replacing {} with {}".format(text, whole_match, str(repl))) + if overrides is not None and variable.lower() in overrides and overrides[variable.lower()] is not None: + repl = overrides[variable.lower()] + logger.debug("from overrides: in {}, replacing {} with {}".format(text, whole_match, str(repl))) + text = text.replace(whole_match, str(repl)) + + elif case is not None and hasattr(case, variable.lower()) and getattr(case, variable.lower()) is not None: + repl = getattr(case, variable.lower()) + logger.debug("from case members: in {}, replacing {} with {}".format(text, whole_match, str(repl))) text = text.replace(whole_match, str(repl)) + elif case is not None and case.get_value(variable.upper(), subgroup=subgroup) is not None: repl = case.get_value(variable.upper(), subgroup=subgroup) logger.debug("from case: in {}, replacing {} with {}".format(text, whole_match, str(repl))) text = text.replace(whole_match, str(repl)) + elif default is not None: logger.debug("from default: in {}, replacing {} with {}".format(text, whole_match, str(default))) text = text.replace(whole_match, default) + else: # If no queue exists, then the directive '-q' by itself will cause an error if "-q {{ queue }}" in text: text = "" else: - logger.warn("Could not replace variable '{}'".format(variable)) + logger.warning("Could not replace variable '{}'".format(variable)) text = text.replace(whole_match, "") return text -def get_my_queued_jobs(): - # TODO - return [] - -def delete_jobs(_): - # TODO - return True - def wait_for_unlocked(filepath): locked = True file_object = None @@ -1191,11 +1212,11 @@ def gzip_existing_file(filepath): >>> import tempfile >>> fd, filename = tempfile.mkstemp(text=True) - >>> _ = os.write(fd, "Hello World") + >>> _ = os.write(fd, b"Hello World") >>> os.close(fd) >>> gzfile = gzip_existing_file(filename) - >>> gunzip_existing_file(gzfile) - 'Hello World' + >>> gunzip_existing_file(gzfile) == b'Hello World' + True >>> os.remove(gzfile) """ expect(os.path.exists(filepath), "{} does not exists".format(filepath)) @@ -1254,6 +1275,7 @@ def find_system_test(testname, case): if system_test_dir not in sys.path: sys.path.append(system_test_dir) system_test_path = "{}.{}".format(testname.lower(), testname) + expect(len(fdir) > 0, "Test {} not found, aborting".format(testname)) expect(len(fdir) == 1, "Test {} found in multiple locations {}, aborting".format(testname, fdir)) expect(system_test_path is not None, "No test {} found".format(testname)) @@ -1316,19 +1338,23 @@ def analyze_build_log(comp, log, compiler): if re.search(warn_re, line): warncnt += 1 if re.search(error_re, line): - logger.warn(line) + logger.warning(line) if re.search(undefined_re, line): - logger.warn(line) + logger.warning(line) if warncnt > 0: logger.info("Component {} build complete with {} warnings".format(comp, warncnt)) def is_python_executable(filepath): + first_line = None if os.path.isfile(filepath): - with open(filepath, "r") as f: - first_line = f.readline() + with open(filepath, "rt") as f: + try: + first_line = f.readline() + except: + pass - return first_line.startswith("#!") and "python" in first_line + return first_line is not None and first_line.startswith("#!") and "python" in first_line return False def get_umask(): diff --git a/scripts/lib/CIME/wait_for_tests.py b/scripts/lib/CIME/wait_for_tests.py index a9b1d5ca6ef..5000bff60df 100644 --- a/scripts/lib/CIME/wait_for_tests.py +++ b/scripts/lib/CIME/wait_for_tests.py @@ -1,4 +1,6 @@ -import os, time, threading, Queue, socket, signal, distutils.spawn, shutil, glob +#pylint: disable=import-error +from six.moves import queue +import os, time, threading, socket, signal, distutils.spawn, shutil, glob import logging import xml.etree.ElementTree as xmlet @@ -7,6 +9,7 @@ from CIME.XML.machines import Machines from CIME.test_status import * + SIGNAL_RECEIVED = False ACME_MAIN_CDASH = "ACME_Climate" CDASH_DEFAULT_BUILD_GROUP = "ACME_Latest" @@ -146,7 +149,7 @@ def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_ti need_to_upload = False - for test_name, test_data in results.iteritems(): + for test_name, test_data in results.items(): test_path, test_status = test_data if (test_status not in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS]): @@ -299,7 +302,7 @@ def wait_for_test(test_path, results, wait, check_throughput, check_memory, igno ############################################################################### def wait_for_tests_impl(test_paths, no_wait=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False): ############################################################################### - results = Queue.Queue() + results = queue.Queue() for test_path in test_paths: t = threading.Thread(target=wait_for_test, args=(test_path, results, not no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak)) @@ -348,7 +351,7 @@ def wait_for_tests(test_paths, test_results = wait_for_tests_impl(test_paths, no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak) all_pass = True - for test_name, test_data in sorted(test_results.iteritems()): + for test_name, test_data in sorted(test_results.items()): test_path, test_status = test_data logging.info("Test '{}' finished with status '{}'".format(test_name, test_status)) logging.info(" Path: {}".format(test_path)) diff --git a/scripts/lib/jenkins_generic_job.py b/scripts/lib/jenkins_generic_job.py index 8dd27bf7b63..e4f094cfb95 100644 --- a/scripts/lib/jenkins_generic_job.py +++ b/scripts/lib/jenkins_generic_job.py @@ -1,8 +1,26 @@ import CIME.wait_for_tests from CIME.utils import expect +from CIME.case import Case import os, shutil, glob, signal, logging +############################################################################### +def cleanup_queue(test_root, test_id): +############################################################################### + """ + Delete all jobs left in the queue + """ + for teststatus_file in glob.iglob("{}/*{}*/TestStatus".format(test_root, test_id)): + case_dir = os.path.dirname(teststatus_file) + with Case(case_dir, read_only=True) as case: + jobmap = case.get_job_info() + jobkills = [] + for jobname, jobid in jobmap.items(): + logging.warning("Found leftover batch job {} ({}) that need to be deleted".format(jobid, jobname)) + jobkills.append(jobid) + + case.cancel_batch_jobs(jobkills) + ############################################################################### def jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, baseline_name, @@ -119,4 +137,8 @@ def jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, cdash_project=cdash_project, cdash_build_group=cdash_build_group) + if use_batch and CIME.wait_for_tests.SIGNAL_RECEIVED: + # Cleanup + cleanup_queue(test_root, test_id) + return tests_passed diff --git a/scripts/lib/six.py b/scripts/lib/six.py new file mode 100644 index 00000000000..a0297d7113d --- /dev/null +++ b/scripts/lib/six.py @@ -0,0 +1,890 @@ +# Copyright (c) 2010-2017 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Utilities for writing code that runs on Python 2 and 3""" + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.11.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("getoutput", "commands", "subprocess"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("splitvalue", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), + MovedAttribute("parse_http_list", "urllib2", "urllib.request"), + MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + try: + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + finally: + value = None + tb = None + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + try: + raise tp, value, tb + finally: + tb = None +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + try: + if from_value is None: + raise value + raise value from from_value + finally: + value = None +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + try: + raise value from from_value + finally: + value = None +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(type): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/scripts/lib/update_acme_tests.py b/scripts/lib/update_acme_tests.py index c25f1ff5d45..3764460f38b 100644 --- a/scripts/lib/update_acme_tests.py +++ b/scripts/lib/update_acme_tests.py @@ -1,6 +1,7 @@ import CIME.utils from CIME.utils import expect, convert_to_seconds, parse_test_name from CIME.XML.machines import Machines +import six # Here are the tests belonging to acme suites. Format is # ... @@ -48,11 +49,11 @@ "ERP.f45_g37_rx1.A", "SMS_D_Ln9.f19_g16_rx1.A", "DAE.f19_f19.A", - "PET_P32.f19_f19.A", + "PET_P4.f19_f19.A", "SMS.T42_T42.S", "PRE.f19_f19.ADESP", "PRE.f19_f19.ADESP_TEST", - "MCC_P12.f19_g16_rx1.A") + "MCC_P1.f19_g16_rx1.A") ), # @@ -188,27 +189,27 @@ def get_test_suite(suite, machine=None, compiler=None): tests = [] for item in tests_raw: test_mod = None - if (isinstance(item, str)): + if (isinstance(item, six.string_types)): test_name = item else: expect(isinstance(item, tuple), "Bad item type for item '{}'".format(str(item))) expect(len(item) in [2, 3], "Expected two or three items in item '{}'".format(str(item))) - expect(isinstance(item[0], str), "Expected string in first field of item '{}'".format(str(item))) - expect(isinstance(item[1], str), "Expected string in second field of item '{}'".format(str(item))) + expect(isinstance(item[0], six.string_types), "Expected string in first field of item '{}'".format(str(item))) + expect(isinstance(item[1], six.string_types), "Expected string in second field of item '{}'".format(str(item))) test_name = item[0] if (len(item) == 2): test_mod = item[1] else: - expect(type(item[2]) in [str, tuple], "Expected string or tuple for third field of item '{}'".format(str(item))) - test_mod_machines = [item[2]] if isinstance(item[2], str) else item[2] + expect(type(item[2]) in [six.string_types, tuple], "Expected string or tuple for third field of item '{}'".format(str(item))) + test_mod_machines = [item[2]] if isinstance(item[2], six.string_types) else item[2] if (machine in test_mod_machines): test_mod = item[1] tests.append(CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler, testmod=test_mod)) if (inherits_from is not None): - inherits_from = [inherits_from] if isinstance(inherits_from, str) else inherits_from + inherits_from = [inherits_from] if isinstance(inherits_from, six.string_types) else inherits_from for inherits in inherits_from: inherited_tests = get_test_suite(inherits, machine, compiler) @@ -221,7 +222,7 @@ def get_test_suite(suite, machine=None, compiler=None): ############################################################################### def get_test_suites(): ############################################################################### - return _TEST_SUITES.keys() + return list(_TEST_SUITES.keys()) ############################################################################### def infer_machine_name_from_tests(testargs): @@ -327,14 +328,14 @@ def get_recommended_test_time(test_full_name): _, rec_time, tests_raw = _TEST_SUITES[suite] for item in tests_raw: test_mod = None - if (isinstance(item, str)): + if (isinstance(item, six.string_types)): test_name = item else: test_name = item[0] if (len(item) == 2): test_mod = item[1] else: - test_mod_machines = [item[2]] if isinstance(item[2], str) else item[2] + test_mod_machines = [item[2]] if isinstance(item[2], six.string_types) else item[2] if (machine in test_mod_machines): test_mod = item[1] @@ -360,11 +361,13 @@ def sort_by_time(test_one, test_two): """ rec1, rec2 = get_recommended_test_time(test_one), get_recommended_test_time(test_two) if rec1 == rec2: - return cmp(test_one, test_two) + return (test_one > test_two) - (test_two < test_one) else: if rec2 is None: return -1 elif rec1 is None: return 1 else: - return cmp(convert_to_seconds(rec2), convert_to_seconds(rec1)) + a = convert_to_seconds(rec2) + b = convert_to_seconds(rec1) + return (a < b) - (b < a) diff --git a/scripts/manage_pes b/scripts/manage_pes index da00c465c56..0b689f00633 100755 --- a/scripts/manage_pes +++ b/scripts/manage_pes @@ -9,7 +9,7 @@ from CIME.XML.files import Files from CIME.XML.pes import Pes from CIME.XML.component import Component import argparse, sys, os, logging -import datetime, re +import datetime, re, six queryhelp = """ Query the peslist by grid and machine @@ -250,7 +250,7 @@ class ManagePes(object): self.print_gridnodes([match]) logger.info(" The new values would be") self.print_gridnodes([newmatch]) - override = raw_input(" Do you want to override the match with" + override = six.moves.input(" Do you want to override the match with" " your pe-layout [yes/no] (default is no)\n") if override.lower() != "y" and override.lower() != "yes": logger.info("Nothing done.") diff --git a/scripts/query_config b/scripts/query_config index f734d69ed3c..889c06e6d9c 100755 --- a/scripts/query_config +++ b/scripts/query_config @@ -92,7 +92,7 @@ def print_compset(name, files, all_components=False): elif config_file is None or not os.path.isfile(config_file): return - print "\nActive component: {}".format(name) + print("\nActive component: {}".format(name)) # Now parse the compsets file and write out the compset alias and longname as well as the help text # determine component xml content compsets = Compsets(config_file) @@ -144,7 +144,7 @@ def query_component(name, all_components=False): expect(config_exists, "Cannot find config_file {} on disk".format(config_file)) elif all_components and not config_exists: - print "WARNING: Couldn't find config_file {} on disk".format(config_file) + print("WARNING: Couldn't find config_file {} on disk".format(config_file)) return # If name is not a valid argument - exit with error expect(match_found, @@ -285,36 +285,36 @@ class Machines(CIME.XML.machines.Machines): if single_machine and machine_name is None: files = Files() config_file = files.get_value("MACHINES_SPEC_FILE") - print "Machine is not listed in config file: {}".format(config_file) + print("Machine is not listed in config file: {}".format(config_file)) else: # write out machines machines = self.get_nodes(nodename="machine") - print "Machine(s)" + print("Machine(s)") for machine in machines: name = machine.get("MACH") desc = machine.find("DESC") os_ = machine.find("OS") compilers = machine.find("COMPILERS") max_tasks_per_node = machine.find("MAX_TASKS_PER_NODE") - pes_per_node = machine.find("PES_PER_NODE") + MAX_MPITASKS_PER_NODE = machine.find("MAX_MPITASKS_PER_NODE") current_machine = self.probe_machine_name(warn=False) if not single_machine: name += " (current)" if current_machine and current_machine in name else "" - print " {} : {} ".format(name, desc.text) - print " os ", os_.text - print " compilers ",compilers.text - if pes_per_node is not None: - print " pes/node ",pes_per_node.text + print(" {} : {} ".format(name, desc.text)) + print(" os ", os_.text) + print(" compilers ",compilers.text) + if MAX_MPITASKS_PER_NODE is not None: + print(" pes/node ",MAX_MPITASKS_PER_NODE.text) if max_tasks_per_node is not None: - print " max_tasks/node ",max_tasks_per_node.text + print(" max_tasks/node ",max_tasks_per_node.text) elif single_machine and machine_name in name: - print " {} : {} ".format(name, desc.text) - print " os ", os_.text - print " compilers ",compilers.text - if pes_per_node is not None: - print " pes/node ",pes_per_node.text + print(" {} : {} ".format(name, desc.text)) + print(" os ", os_.text) + print(" compilers ",compilers.text) + if MAX_MPITASKS_PER_NODE is not None: + print(" pes/node ",MAX_MPITASKS_PER_NODE.text) if max_tasks_per_node is not None: - print " max_tasks/node ",max_tasks_per_node.text + print(" max_tasks/node ",max_tasks_per_node.text) def _main_func(description): """ diff --git a/scripts/query_testlists b/scripts/query_testlists index 8a962c9573a..3f314d868fb 100755 --- a/scripts/query_testlists +++ b/scripts/query_testlists @@ -4,7 +4,7 @@ Script to query xml test lists, displaying all tests in human-readable form. Usage: - ./query_testlists [--show-options] + ./query_testlists [--show-options] [--define-testtypes] Display a list of tests ./query_testlists --count Count tests by category/machine/compiler @@ -13,9 +13,10 @@ Usage: All of the above support the various --xml-* arguments for subsetting which tests are included. """ -from __future__ import print_function + from Tools.standard_script_setup import * from CIME.test_utils import get_tests_from_xml, test_to_string +from CIME.XML.tests import Tests from CIME.utils import expect logger = logging.getLogger(__name__) @@ -46,6 +47,11 @@ def parse_command_line(args, description): "(wallclock time, memory leak tolerance, etc.). " "(Has no effect with --list or --count options.)") + parser.add_argument("--define-testtypes", action="store_true", + help="At the top of the list of tests, define " + "all of the possible test types. " + "(Has no effect with --list or --count options.)") + parser.add_argument("--xml-category", help="Only include tests in this category; default is all categories") @@ -61,14 +67,37 @@ def parse_command_line(args, description): args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - expect(not(args.count and args.list_type), - "Cannot specify both --count and --list arguments.") + _check_argument_compatibility(args) if args.list_type: _process_list_type(args) return args +############################################################################### +def _check_argument_compatibility(args): +############################################################################### + """Ensures there are no incompatible arguments + + If incompatible arguments are found, aborts with a helpful error + message. + """ + + expect(not(args.count and args.list_type), + "Cannot specify both --count and --list arguments.") + + if args.count: + expect(not args.show_options, + "--show-options is incompatible with --count") + expect(not args.define_testtypes, + "--define-testtypes is incompatible with --count") + + if args.list_type: + expect(not args.show_options, + "--show-options is incompatible with --list") + expect(not args.define_testtypes, + "--define-testtypes is incompatible with --list") + ############################################################################### def _process_list_type(args): ############################################################################### @@ -87,7 +116,7 @@ def _process_list_type(args): args.list_type = 'compiler' ############################################################################### -def print_test_data(test_data, show_options): +def print_test_data(test_data, show_options, define_testtypes): ############################################################################### """ Args: @@ -96,6 +125,14 @@ def print_test_data(test_data, show_options): - category: test category """ + if define_testtypes: + print("#"*72) + print("Test types") + print("----------") + test_definitions = Tests() + test_definitions.print_values(skip_infrastructure_tests=True) + print("#"*72) + categories = sorted(set([item['category'] for item in test_data])) max_category_len = max([len(category) for category in categories]) max_test_len = max([len(item['name']) for item in test_data]) @@ -178,7 +215,7 @@ def _main_func(description): elif args.list_type: list_test_data(test_data, args.list_type) else: - print_test_data(test_data, args.show_options) + print_test_data(test_data, args.show_options, args.define_testtypes) if __name__ == "__main__": _main_func(__doc__) diff --git a/scripts/tests/list_tests b/scripts/tests/list_tests index d9de526aa85..01b08116753 100755 --- a/scripts/tests/list_tests +++ b/scripts/tests/list_tests @@ -1,5 +1,7 @@ #!/usr/bin/env python - +# This script will print the list of test classes in +# scripts_regression_tests.py +# import unittest DEBUG = False @@ -13,14 +15,15 @@ def list_tests_from(): if len(tests): for atest in tests: if DEBUG: - print atest + print(atest) for btest in atest._tests: btestname = btest.__str__().split() test_classes.append(btestname[1][1:-1].split('.')[1]) - # add this explicitly, not captured by the above - test_classes.append("B_CheckCode") - for ctest in sorted(list(set(test_classes))): - print ctest + # add this explicitly, not captured by the above + test_classes.append("B_CheckCode") + for ctest in sorted(list(set(test_classes))): + print(ctest) + if __name__ == "__main__": # Include the directories diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index 34a45b6dff0..4742e1c2b91 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -1,7 +1,7 @@ #!/usr/bin/env python -import io, glob, os, re, shutil, signal, sys, tempfile, \ - threading, time, logging, unittest, getpass, string +import glob, os, re, shutil, signal, sys, tempfile, \ + threading, time, logging, unittest, getpass from xml.etree.ElementTree import ParseError @@ -10,6 +10,8 @@ # Remove all pyc files to ensure we're testing the right things import subprocess subprocess.call('/bin/rm $(find . -name "*.pyc")', shell=True, cwd=LIB_DIR) +from six import assertRaisesRegex +import six from CIME.utils import run_cmd, run_cmd_no_fail, get_lids, get_current_commit import update_acme_tests @@ -80,7 +82,7 @@ def test_unittests(self): # # This is analogous to running: # python -m unittest discover -s CIME/tests -t . - # from cime/utils/python + # from cime/scripts/lib # # Yes, that means we have a bunch of unit tests run from this one unit # test. @@ -239,8 +241,8 @@ def tearDownClass(cls): teardown_root = True for tfile in cls._testdirs: if tfile not in cls._do_teardown: - print "Detected failed test or user request no teardown" - print "Leaving case directory : %s"%tfile + print("Detected failed test or user request no teardown") + print("Leaving case directory : %s"%tfile) teardown_root = False elif do_teardown: shutil.rmtree(tfile) @@ -321,7 +323,6 @@ def test_c_create_clone_keepexe(self): if os.path.exists(testdir): shutil.rmtree(testdir) prevtestdir = cls._testdirs[0] - cls._testdirs.append(testdir) user_mods_dir = os.path.join(CIME.utils.get_python_libs_root(), "..", "tests", "user_mods_test3") cmd = "%s/create_clone --clone %s --case %s --keepexe --user-mods-dir %s" \ @@ -341,7 +342,7 @@ def test_d_create_clone_new_user(self): run_cmd_assert_result(self, "./xmlchange USER=this_is_not_a_user", from_dir=prevtestdir) - fakeoutputroot = string.replace(cls._testroot, os.environ.get("USER"), "this_is_not_a_user") + fakeoutputroot = cls._testroot.replace(os.environ.get("USER"), "this_is_not_a_user") run_cmd_assert_result(self, "./xmlchange CIME_OUTPUT_ROOT=%s"%fakeoutputroot, from_dir=prevtestdir) @@ -518,8 +519,8 @@ def tearDownClass(cls): for tfile in cls._testdirs: if tfile not in cls._do_teardown: - print "Detected failed test or user request no teardown" - print "Leaving case directory : %s"%tfile + print("Detected failed test or user request no teardown") + print("Leaving case directory : %s"%tfile) elif do_teardown: shutil.rmtree(tfile) @@ -744,7 +745,7 @@ def setUp(self): self._machine = MACHINE.get_machine_name() self._compiler = MACHINE.get_default_compiler() if TEST_COMPILER is None else TEST_COMPILER self._baseline_name = "fake_testing_only_%s" % CIME.utils.get_timestamp() - self._baseline_area = MACHINE.get_value("BASELINE_ROOT") + self._baseline_area = os.path.join(TEST_ROOT, "baselines") self._testroot = TEST_ROOT self._hasbatch = MACHINE.has_batch_system() and not NO_BATCH self._do_teardown = True # Will never do teardown if test failed @@ -769,10 +770,10 @@ def tearDown(self): do_teardown = self._do_teardown and sys.exc_info() == (None, None, None) if (not do_teardown): - print "Detected failed test or user request no teardown" - print "Leaving files:" + print("Detected failed test or user request no teardown") + print("Leaving files:") for file_to_clean in files_to_clean: - print " ", file_to_clean + print(" ", file_to_clean) else: # For batch machines need to avoid race condition as batch system # finishes I/O for the case. @@ -790,6 +791,7 @@ def _create_test(self, extra_args, test_id=None, pre_run_errors=False, run_error ########################################################################### test_id = CIME.utils.get_timestamp() if test_id is None else test_id extra_args.append("-t {}".format(test_id)) + extra_args.append("--baseline-root {}".format(self._baseline_area)) if NO_BATCH: extra_args.append("--no-batch") if TEST_COMPILER: @@ -1175,11 +1177,11 @@ def test_bless_test_results(self): test_name = "TESTRUNDIFF_P1.f19_g16_rx1.A" if CIME.utils.get_model() == "acme": - genargs = ["-g", "-o", "-b", self._baseline_name, test_name] - compargs = ["-c", "-b", self._baseline_name, test_name] + genargs = ["-g", "-o", "-b", self._baseline_name, test_name, "--baseline-root", self._baseline_area] + compargs = ["-c", "-b", self._baseline_name, test_name, "--baseline-root", self._baseline_area] else: - genargs = ["-g", self._baseline_name, "-o", test_name] - compargs = ["-c", self._baseline_name, test_name] + genargs = ["-g", self._baseline_name, "-o", test_name, "--baseline-root", self._baseline_area] + compargs = ["-c", self._baseline_name, test_name, "--baseline-root", self._baseline_area] self._create_test(genargs) @@ -1194,8 +1196,8 @@ def test_bless_test_results(self): self._create_test(compargs, test_id=test_id, run_errors=True) # compare_test_results should detect the fail - cpr_cmd = "%s/compare_test_results --test-root %s -b %s -t %s 2>&1" \ - % (TOOLS_DIR, TEST_ROOT, self._baseline_name, test_id) + cpr_cmd = "{}/compare_test_results --test-root {} -b {} -t {} --baseline-root {} 2>&1" \ + .format(TOOLS_DIR, TEST_ROOT, self._baseline_name, test_id, self._baseline_area) output = run_cmd_assert_result(self, cpr_cmd, expected_stat=CIME.utils.TESTS_FAILED_ERR_CODE) # use regex @@ -1205,8 +1207,8 @@ def test_bless_test_results(self): msg="Cmd '%s' failed to display failed test in output:\n%s" % (cpr_cmd, output)) # Bless - run_cmd_no_fail("%s/bless_test_results --test-root %s --hist-only --force -b %s -t %s"\ - % (TOOLS_DIR, TEST_ROOT, self._baseline_name, test_id)) + run_cmd_no_fail("{}/bless_test_results --test-root {} --hist-only --force -b {} -t {} --baseline-root {}" + .format(TOOLS_DIR, TEST_ROOT, self._baseline_name, test_id,self._baseline_area)) # Hist compare should now pass again self._create_test(compargs) @@ -1217,11 +1219,15 @@ def test_rebless_namelist(self): # Generate some namelist baselines test_to_change = "TESTRUNPASS_P1.f19_g16_rx1.A" if CIME.utils.get_model() == "acme": - genargs = ["-n", "-g", "-o", "-b", self._baseline_name, "cime_test_only_pass"] - compargs = ["-n", "-c", "-b", self._baseline_name, "cime_test_only_pass"] + genargs = ["-n", "-g", "-o", "-b", self._baseline_name, "cime_test_only_pass", + "--baseline-root", self._baseline_area] + compargs = ["-n", "-c", "-b", self._baseline_name, "cime_test_only_pass", + "--baseline-root", self._baseline_area] else: - genargs = ["-n", "-g", self._baseline_name, "-o", "cime_test_only_pass"] - compargs = ["-n", "-c", self._baseline_name, "cime_test_only_pass"] + genargs = ["-n", "-g", self._baseline_name, "-o", "cime_test_only_pass", + "--baseline-root", self._baseline_area] + compargs = ["-n", "-c", self._baseline_name, "cime_test_only_pass", + "--baseline-root", self._baseline_area] self._create_test(genargs) @@ -1232,11 +1238,12 @@ def test_rebless_namelist(self): # Check standalone case.cmpgen_namelists casedir = os.path.join(self._testroot, "%s.C.%s" % (CIME.utils.get_full_test_name(test_to_change, machine=self._machine, compiler=self._compiler), test_id)) - run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir) + run_cmd_assert_result(self, "./case.cmpgen_namelists --baseline-root {}".format(self._baseline_area) + , from_dir=casedir) # compare_test_results should pass - cpr_cmd = "%s/compare_test_results --test-root %s -n -b %s -t %s 2>&1" \ - % (TOOLS_DIR, TEST_ROOT, self._baseline_name, test_id) + cpr_cmd = "{}/compare_test_results --test-root {} -n -b {} -t {} --baseline-root {} 2>&1" \ + .format(TOOLS_DIR, TEST_ROOT, self._baseline_name, test_id, self._baseline_area) output = run_cmd_assert_result(self, cpr_cmd) # use regex @@ -1270,17 +1277,19 @@ def test_rebless_namelist(self): self._create_test(compargs, test_id=test_id, pre_run_errors=True) casedir = os.path.join(self._testroot, "%s.C.%s" % (CIME.utils.get_full_test_name(test_to_change, machine=self._machine, compiler=self._compiler), test_id)) - run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100) + run_cmd_assert_result(self, "./case.cmpgen_namelists --baseline-root {}".format(self._baseline_area), + from_dir=casedir, expected_stat=100) # preview namelists should work run_cmd_assert_result(self, "./preview_namelists", from_dir=casedir) # This should still fail - run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100) + run_cmd_assert_result(self, "./case.cmpgen_namelists --baseline-root {}".format(self._baseline_area), + from_dir=casedir, expected_stat=100) # compare_test_results should fail - cpr_cmd = "%s/compare_test_results --test-root %s -n -b %s -t %s 2>&1" \ - % (TOOLS_DIR, TEST_ROOT, self._baseline_name, test_id) + cpr_cmd = "{}/compare_test_results --test-root {} -n -b {} -t {} --baseline-root {} 2>&1" \ + .format(TOOLS_DIR, TEST_ROOT, self._baseline_name, test_id, self._baseline_area) output = run_cmd_assert_result(self, cpr_cmd, expected_stat=CIME.utils.TESTS_FAILED_ERR_CODE) # use regex @@ -1290,8 +1299,8 @@ def test_rebless_namelist(self): msg="Cmd '%s' failed to display passed test in output:\n%s" % (cpr_cmd, output)) # Bless - run_cmd_no_fail("%s/bless_test_results --test-root %s -n --force -b %s -t %s" \ - % (TOOLS_DIR, TEST_ROOT, self._baseline_name, test_id)) + run_cmd_no_fail("{}/bless_test_results --test-root {} -n --force -b {} -t {} --baseline-root {}" + .format(TOOLS_DIR, TEST_ROOT, self._baseline_name, test_id, self._baseline_area)) # Basic namelist compare should now pass again self._create_test(compargs) @@ -1592,8 +1601,8 @@ def test_single_submit(self): self.skipTest("Skipping single submit. Not valid without batch") if CIME.utils.get_model() != "acme": self.skipTest("Skipping single submit. ACME experimental feature") - if self._machine != "skybridge": - self.skipTest("Skipping single submit. Only works on skybridge") + if self._machine not in ["skybridge", "chama"]: + self.skipTest("Skipping single submit. Only works on skybridge and chama") # Keep small enough for now that we don't have to worry about load balancing self._create_test(["--single-submit", "SMS_Ln9_P8.f45_g37_rx1.A", "SMS_Ln9_P8.f19_g16_rx1.A"], @@ -1685,8 +1694,8 @@ def get_macros(macro_maker, build_xml, build_system): """ # Build.write_macros expects file-like objects as input, so # we need to wrap the strings in StringIO objects. - xml = io.StringIO(unicode(build_xml)) - output = io.StringIO() + xml = six.StringIO(str(build_xml)) + output = six.StringIO() output_format = None if build_system == "Makefile": output_format = "make" @@ -1917,21 +1926,21 @@ class G_TestMacrosBasic(unittest.TestCase): def test_script_is_callable(self): """The test script can be called on valid output without dying.""" # This is really more a smoke test of this script than anything else. - maker = Compilers(MockMachines("mymachine", "SomeOS"), version="2.0") + maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0) test_xml = _wrap_config_build_xml("FALSE") get_macros(maker, test_xml, "Makefile") def test_script_rejects_bad_xml(self): """The macro writer rejects input that's not valid XML.""" - maker = Compilers(MockMachines("mymachine", "SomeOS"), version="2.0") + maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0) with self.assertRaises(ParseError): get_macros(maker, "This is not valid XML.", "Makefile") def test_script_rejects_bad_build_system(self): """The macro writer rejects a bad build system string.""" - maker = Compilers(MockMachines("mymachine", "SomeOS"), version="2.0") + maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0) bad_string = "argle-bargle." - with self.assertRaisesRegexp( + with assertRaisesRegex(self, SystemExit, "Unrecognized build system provided to write_macros: " + bad_string): get_macros(maker, "This string is irrelevant.", bad_string) @@ -1953,7 +1962,7 @@ class H_TestMakeMacros(unittest.TestCase): test_machine = "mymachine" def setUp(self): - self._maker = Compilers(MockMachines(self.test_machine, self.test_os), version="2.0") + self._maker = Compilers(MockMachines(self.test_machine, self.test_os), version=2.0) def xml_to_tester(self, xml_string): """Helper that directly converts an XML string to a MakefileTester.""" @@ -2033,7 +2042,7 @@ def test_reject_duplicate_defaults(self): """The macro writer dies if given many defaults.""" xml1 = """/path/to/default""" xml2 = """/path/to/other_default""" - with self.assertRaisesRegexp( + with assertRaisesRegex(self, SystemExit, "Variable MPI_PATH is set ambiguously in config_build.xml."): self.xml_to_tester(xml1+xml2) @@ -2042,7 +2051,7 @@ def test_reject_duplicates(self): """The macro writer dies if given many matches for a given configuration.""" xml1 = """/path/to/mpich""" xml2 = """/path/to/mpich2""" - with self.assertRaisesRegexp( + with assertRaisesRegex(self, SystemExit, "Variable MPI_PATH is set ambiguously in config_build.xml."): self.xml_to_tester(xml1+xml2) @@ -2051,7 +2060,7 @@ def test_reject_ambiguous(self): """The macro writer dies if given an ambiguous set of matches.""" xml1 = """/path/to/mpich""" xml2 = """/path/to/mpi-debug""" - with self.assertRaisesRegexp( + with assertRaisesRegex(self, SystemExit, "Variable MPI_PATH is set ambiguously in config_build.xml."): self.xml_to_tester(xml1+xml2) @@ -2183,7 +2192,7 @@ def test_env_and_shell_command(self): tester.assert_variable_equals("FFLAGS", "-O2 -fast", env={"OPT_LEVEL": "2"}) xml1 = """-O$SHELL{echo $ENV{OPT_LEVEL}} -fast""" err_msg = "Nesting not allowed.*" - with self.assertRaisesRegexp(SystemExit, err_msg): + with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(xml1) def test_config_variable_insertion(self): @@ -2213,12 +2222,12 @@ def test_config_reject_self_references(self): # references. xml1 = """MPI_LIB_NAME""" err_msg = ".* has bad references." - with self.assertRaisesRegexp(SystemExit, err_msg): + with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(""+xml1+"") xml1 = """${MPI_LIB_NAME}""" err_msg = ".* has bad references." - with self.assertRaisesRegexp(SystemExit, err_msg): + with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(""+xml1+"") def test_config_reject_cyclical_references(self): @@ -2226,12 +2235,12 @@ def test_config_reject_cyclical_references(self): xml1 = """MPI_PATH""" xml2 = """MPI_LIB_NAME""" err_msg = ".* has bad references." - with self.assertRaisesRegexp(SystemExit, err_msg): + with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(""+xml1+xml2+"") xml1 = """${MPI_PATH}""" xml2 = """${MPI_LIB_NAME}""" err_msg = ".* has bad references." - with self.assertRaisesRegexp(SystemExit, err_msg): + with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(""+xml1+xml2+"") def test_variable_insertion_with_machine_specific_setting(self): @@ -2240,14 +2249,14 @@ def test_variable_insertion_with_machine_specific_setting(self): xml2 = """MPI_PATH""".format(self.test_machine) xml3 = """MPI_LIB_NAME""" err_msg = ".* has bad references." - with self.assertRaisesRegexp(SystemExit, err_msg): + with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(xml1+xml2+xml3) xml1 = """something""" xml2 = """MPI_PATH""".format(self.test_machine) xml3 = """${MPI_LIB_NAME}""" err_msg = ".* has bad references." - with self.assertRaisesRegexp(SystemExit, err_msg): + with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(xml1+xml2+xml3) @@ -2280,8 +2289,8 @@ def _run_and_assert_query_testlist(self, extra_args=""): files = Files() testlist_drv = files.get_value("TESTS_SPEC_FILE", {"component":"drv"}) - run_cmd_assert_result(self, "%s/query_testlists --xml-testlist %s %s"% - (SCRIPT_DIR, testlist_drv, extra_args)) + run_cmd_assert_result(self, "{}/query_testlists --xml-testlist {} {}".format( + SCRIPT_DIR, testlist_drv, extra_args)) def test_query_testlists_runs(self): """Make sure that query_testlists runs successfully @@ -2292,6 +2301,10 @@ def test_query_testlists_runs(self): """ self._run_and_assert_query_testlist(extra_args="--show-options") + def test_query_testlists_define_testtypes_runs(self): + """Make sure that query_testlists runs successfully with the --define-testtypes argument""" + self._run_and_assert_query_testlist(extra_args="--define-testtypes") + def test_query_testlists_count_runs(self): """Make sure that query_testlists runs successfully with the --count argument""" self._run_and_assert_query_testlist(extra_args="--count") @@ -2326,7 +2339,7 @@ def check_for_pylint(): major = int(pylintver.group(1)) minor = int(pylintver.group(2)) if pylint is None or major < 1 or (major == 1 and minor < 5): - print "pylint version 1.5 or newer not found, pylint tests skipped" + print("pylint version 1.5 or newer not found, pylint tests skipped") return False return True @@ -2422,12 +2435,11 @@ def _main_func(): try: unittest.main(verbosity=2, catchbreak=True) - except SystemExit: - had_fails = sys.exc_info()[1].message - if had_fails: - print "Detected failures, leaving directory:", TEST_ROOT + except SystemExit as e: + if e.__str__() != "False": + print("Detected failures, leaving directory:", TEST_ROOT) else: - print "All pass, removing directory:", TEST_ROOT + print("All pass, removing directory:", TEST_ROOT) if os.path.exists(TEST_ROOT): shutil.rmtree(TEST_ROOT) diff --git a/src/build_scripts/buildlib.csm_share b/src/build_scripts/buildlib.csm_share index 2d86437118a..02985a8f0e3 100755 --- a/src/build_scripts/buildlib.csm_share +++ b/src/build_scripts/buildlib.csm_share @@ -11,6 +11,8 @@ if ($#ARGV < 2) { } my ($sharedlibroot, $installroot, $CASEROOT) = @ARGV; +print "$sharedlibroot <> $installroot <> $CASEROOT\n"; + chdir "${CASEROOT}" or die "Could not cd to \"$CASEROOT\""; my $CIMEROOT = `./xmlquery CIMEROOT --value`; @@ -51,6 +53,7 @@ $useesmf = "esmf" if ($USE_ESMF_LIB eq "TRUE"); my $libdir = "$sharedlibroot/$COMP_INTERFACE/$useesmf/${NINST_VALUE}/csm_share"; my $installdir = "$installroot/$COMP_INTERFACE/$useesmf/${NINST_VALUE}"; +print "installdir is $installdir\nlibdir is $libdir\n"; mkpath($libdir) unless -d $libdir; chdir($libdir) or die "Could not cd to $libdir: $!\n"; @@ -96,7 +99,7 @@ $multiinst_cppdefs = "$multiinst_cppdefs -DNUM_COMP_INST_WAV=$NINST_WAV"; $multiinst_cppdefs = "$multiinst_cppdefs -DNUM_COMP_INST_ROF=$NINST_ROF"; $multiinst_cppdefs = "$multiinst_cppdefs -DNUM_COMP_INST_ESP=$NINST_ESP"; -my $bld = "$GMAKE complib -j $GMAKE_J MODEL=csm_share COMPLIB=libcsm_share.a USER_CPPDEFS=\" $multiinst_cppdefs\" -f $CASETOOLS/Makefile "; +my $bld = "$GMAKE complib -j $GMAKE_J MODEL=csm_share COMPLIB=libcsm_share.a USER_CPPDEFS=\" $multiinst_cppdefs\" -f $CASETOOLS/Makefile "; my $rc = system($bld); if ($rc==0xff00){ @@ -113,8 +116,11 @@ if ($rc==0xff00){ die "signal $rc\n"; } if ( ! -d "$installdir/lib"){ + print "Creating installdir in $installdir\n"; mkpath("$installdir/lib"); mkpath("$installdir/include"); } +print "Copying libcsm_share.a to $installdir/lib\n"; system("cp -p -f libcsm_share.a $installdir/lib/"); +print "Copying modules to $installdir/include\n"; system("cp -p -f *.mod $installdir/include/"); diff --git a/src/build_scripts/buildlib.internal_components b/src/build_scripts/buildlib.internal_components index 410f7e4b049..5c23073d217 100755 --- a/src/build_scripts/buildlib.internal_components +++ b/src/build_scripts/buildlib.internal_components @@ -23,7 +23,6 @@ def buildlib(caseroot, libroot, bldroot, compname=None): if dir1 == "cime_config": compname = dir2 else: - print "HERE %s"%dir1 compname = dir1.split('.')[1] build_cime_component_lib(case, compname, libroot, bldroot) diff --git a/src/build_scripts/buildlib.pio b/src/build_scripts/buildlib.pio index 5b6909b0ddf..d8bf64203a8 100755 --- a/src/build_scripts/buildlib.pio +++ b/src/build_scripts/buildlib.pio @@ -137,7 +137,7 @@ def _main_func(description): valid_values += ",pnetcdf" if netcdf4_parallel_found: valid_values += ",netcdf4p,netcdf4c" - logger.warn("Updating valid_values for PIO_TYPENAME: %s", valid_values) + logger.warning("Updating valid_values for PIO_TYPENAME: %s", valid_values) case.set_valid_values("PIO_TYPENAME",valid_values) # nothing means use the general default @@ -148,7 +148,7 @@ def _main_func(description): case.set_valid_values(comp_pio_typename,valid_values) current_value = case.get_value(comp_pio_typename) if current_value not in valid_values: - logger.warn("Resetting PIO_TYPENAME to netcdf for component %s"%comp) + logger.warning("Resetting PIO_TYPENAME to netcdf for component %s"%comp) case.set_value(comp_pio_typename,"netcdf") diff --git a/src/components/data_comps/datm/cime_config/buildnml b/src/components/data_comps/datm/cime_config/buildnml index a76deb2312b..ae6434e7671 100755 --- a/src/components/data_comps/datm/cime_config/buildnml +++ b/src/components/data_comps/datm/cime_config/buildnml @@ -38,8 +38,6 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): #---------------------------------------------------- # Get a bunch of information from the case. #---------------------------------------------------- - atm_domain_file = case.get_value("ATM_DOMAIN_FILE") - atm_domain_path = case.get_value("ATM_DOMAIN_PATH") datm_mode = case.get_value("DATM_MODE") datm_topo = case.get_value("DATM_TOPO") datm_presaero = case.get_value("DATM_PRESAERO") @@ -62,10 +60,10 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): #---------------------------------------------------- # Log some settings. #---------------------------------------------------- - logger.debug("DATM mode is %s", datm_mode) - logger.debug("DATM grid is %s", atm_grid) - logger.debug("DATM presaero mode is %s", datm_presaero) - logger.debug("DATM topo mode is %s", datm_topo) + logger.debug("DATM mode is {}".format(datm_mode)) + logger.debug("DATM grid is {}".format(atm_grid)) + logger.debug("DATM presaero mode is {}".format(datm_presaero)) + logger.debug("DATM topo mode is {}".format(datm_topo)) #---------------------------------------------------- # Clear out old data. @@ -101,13 +99,13 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): # #pylint: disable=no-member if datm_presaero != "none": - streams.append("presaero.%s" % datm_presaero) + streams.append("presaero.{}".format(datm_presaero)) if datm_topo != "none": - streams.append("topo.%s" % datm_topo) + streams.append("topo.{}".format(datm_topo)) if datm_co2_tseries != "none": - streams.append("co2tseries.%s" % datm_co2_tseries) + streams.append("co2tseries.{}".format(datm_co2_tseries)) # Add bias correction stream if given in namelist. bias_correct = nmlgen.get_value("bias_correct") @@ -128,7 +126,7 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): continue inst_stream = stream + inst_string - logger.debug("DATM stream is %s", inst_stream) + logger.debug("DATM stream is {}".format(inst_stream)) stream_path = os.path.join(confdir, "datm.streams.txt." + inst_stream) user_stream_path = os.path.join(case.get_case_root(), "user_datm.streams.txt." + inst_stream) @@ -146,16 +144,20 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): # set per-stream variables nmlgen.create_shr_strdata_nml() - # set variables that are not per-stream - if datm_mode == 'CPLHISTForcingForOcnIce': - if atm_domain_file != "UNSET": - if case.get_value('ATM_DOMAIN_FILE') != 'UNSET': - nmlgen.add_default("domainfile", value=atm_domain_file, ignore_abs_path=True) + # Determine model domain filename (in datm_in) + if "CPLHIST" in datm_mode: + datm_cplhist_domain_file = case.get_value("DATM_CPLHIST_DOMAIN_FILE") + if datm_cplhist_domain_file == 'null': + logger.info(" .... Obtaining DATM model domain info from first stream file: {}".format(streams[0])) else: - nmlgen.add_default("domainfile", value='null') + logger.info(" .... Obtaining DATM model domain info from stream {}".format(streams[0])) + nmlgen.add_default("domainfile", value=datm_cplhist_domain_file) else: - full_domain_path = os.path.join(atm_domain_path, atm_domain_file) - nmlgen.add_default("domainfile", value=full_domain_path) + atm_domain_file = case.get_value("ATM_DOMAIN_FILE") + atm_domain_path = case.get_value("ATM_DOMAIN_PATH") + if atm_domain_file != "UNSET": + full_domain_path = os.path.join(atm_domain_path, atm_domain_file) + nmlgen.add_default("domainfile", value=full_domain_path) #---------------------------------------------------- # Finally, write out all the namelists. @@ -185,7 +187,7 @@ def buildnml(case, caseroot, compname): # Determine directory for user modified namelist_definitions.xml and namelist_defaults.xml user_xml_dir = os.path.join(caseroot, "SourceMods", "src." + compname) expect (os.path.isdir(user_xml_dir), - "user_xml_dir %s does not exist " %user_xml_dir) + "user_xml_dir {} does not exist ".format(user_xml_dir)) # NOTE: User definition *replaces* existing definition. files = Files() @@ -195,7 +197,7 @@ def buildnml(case, caseroot, compname): if os.path.isfile(user_definition): definition_file = [user_definition] for file_ in definition_file: - expect(os.path.isfile(file_), "Namelist XML file %s not found!" % file_) + expect(os.path.isfile(file_), "Namelist XML file {} not found!".format(file_)) # Create the namelist generator object - independent of instance nmlgen = NamelistGenerator(case, definition_file, files=files) @@ -207,7 +209,7 @@ def buildnml(case, caseroot, compname): # determine instance string inst_string = "" if ninst > 1: - inst_string = '_' + '%04d' % inst_counter + inst_string = '_' + '{:04d}'.format(inst_counter) # If multi-instance case does not have restart file, use # single-case restart for each instance @@ -224,7 +226,7 @@ def buildnml(case, caseroot, compname): # create namelist output infile using user_nl_file as input user_nl_file = os.path.join(caseroot, "user_nl_" + compname + inst_string) expect(os.path.isfile(user_nl_file), - "Missing required user_nl_file %s " %(user_nl_file)) + "Missing required user_nl_file {} ".format(user_nl_file)) infile = os.path.join(confdir, "namelist_infile") create_namelist_infile(case, user_nl_file, infile) namelist_infile = [infile] diff --git a/src/components/data_comps/datm/cime_config/config_component.xml b/src/components/data_comps/datm/cime_config/config_component.xml index fb676627e27..a215b529b28 100644 --- a/src/components/data_comps/datm/cime_config/config_component.xml +++ b/src/components/data_comps/datm/cime_config/config_component.xml @@ -17,7 +17,7 @@ CLM CRU NCEP v7 data set GSWP3 data set GSWP3v1 data set - Coupler hist data set + Coupler hist data set (in this mode, it is strongly recommended that the model domain and the coupler history forcing are on the same domain) single point tower site data set COREv2 normal year forcing COREv2 interannual forcing @@ -34,7 +34,7 @@ char - CORE2_NYF,CORE2_IAF,CLM_QIAN,CLM_QIAN_WISO,CLM1PT,CLMCRUNCEP,CLMCRUNCEPv7,CLMGSWP3v1,CPLHISTForcing + CORE2_NYF,CORE2_IAF,CLM_QIAN,CLM_QIAN_WISO,CLM1PT,CLMCRUNCEP,CLMCRUNCEPv7,CLMGSWP3v1,CPLHIST CORE2_NYF run_component_datm env_run.xml @@ -50,7 +50,7 @@ CLMCRUNCEPv7 CLMGSWP3v1 CLM1PT - CPLHISTForcing + CPLHIST @@ -82,12 +82,13 @@ char - none,observed + none,observed,cplhist observed none none + cplhist run_component_datm env_run.xml @@ -111,61 +112,77 @@ DATM CO2 time series + + char + + null + run_component_datm + env_run.xml + + Full pathname for domain file for datm when DATM_MODE is + CPLHIST, NOTE: if this is set to 'null' (the default), then + domain information is read in from the first coupler history + file in the target stream and it is assumed that the first + coupler stream file that is pointed to contains the domain + information for that stream. + + + char UNSET run_component_datm env_run.xml - directory for coupler history data mode (only used for CPLHIST3HrWx mode) + directory for coupler history data mode (only used for when DATM_MODE is CPLHIST) char UNSET - - b40.1850.track1.1deg.006a - run_component_datm env_run.xml - case name for coupler history data mode (only used for CPLHIST3HrWx mode) + case name used to determine stream filenames when DATM_MODE is CPLHIST integer - 1 - - 1 - + -999 run_component_datm env_run.xml - Simulation year corresponding to starting year (only used for CPLHIST3HrWx mode) + + Simulation year corresponding to DATM_CPLHIST_YR_START (only used + when DATM_MODE is CPLHIST). A common usage is to set this to + RUN_STARTDATE. With this setting, the forcing in the first year of + the run will be the forcing of year DATM_CPLHIST_YR_START. Another + use case is to align the calendar of transient forcing with the + model calendar. For example, setting + DATM_CPLHIST_YR_ALIGN=DATM_CPLHIST_YR_START will lead to the + forcing calendar being the same as the model calendar. The forcing + for a given model year would be the forcing of the same year. This + would be appropriate in transient runs where the model calendar is + setup to span the same year range as the forcing data. + integer -999 - - 960 - run_component_datm env_run.xml - starting year to loop data over (only used for CPLHIST3HrWx mode) + starting year to loop data over (only used when DATM_MODE is CPLHIST) integer -999 - - 1030 - run_component_datm env_run.xml - ending year to loop data over (only used for CPLHIST3HrWx mode) + ending year to loop data over (only used when DATM_MODE is CPLHIST) diff --git a/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml b/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml index fcbe0f12ca9..e5d56d230bd 100644 --- a/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml +++ b/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml @@ -39,7 +39,7 @@ CLM1PT = Run with supplied single point data (force CLM) CORE2_NYF = CORE2 normal year forcing (for forcing POP and CICE) CORE2_IAF = CORE2 intra-annual year forcing (for forcing POP and CICE) - CPLHISTForcing = Streams for lnd or ocn/ice forcing used for spinup + CPLHIST = Streams for lnd or ocn/ice forcing used for spinup presaero = Prescribed aerosol forcing topo = Surface topography co2tseries = Time series of prescribed CO2 forcing @@ -108,7 +108,7 @@ Note for CORE2_IAF: The most current versions of forcing files(those recommended for use) are duplicated - below and stored at /ccsm/ocn/iaf/): + below and stored at /ccsm/ocn/iaf/): gcgcs.prec.T62.current, giss.lwdn.T62.current, giss.swdn.T62.current, giss.swup.T62.current, ncep.dn10.T62.current, ncep.q_10.T62.current ncep.slp_.T62.current, ncep.t_10.T62.current, ncep.u_10.T62.current, ncep.v_10.T62.current @@ -135,13 +135,14 @@ presaero.cplhist topo.observed + topo.cplhist **********IMPORTANT NOTE: ************* - In the value matches below, regular expressions are used **** + In the value matches below, regular expressions are used **** If two matches are equivalent, the FIRST one will be used, so need to make sure that matches are not equivalent if possible - As an example, say datm_mode=CLMCRUNCEPv7, + As an example, say datm_mode=CLMCRUNCEPv7, the following order would result in an INCORRECT setting of streams for this datm_mode CLMCRUNCEP.Solar,CLMCRUNCEP.Precip,CLMCRUNCEP.TPQW @@ -171,7 +172,7 @@ CORE2_NYF.GISS,CORE2_NYF.GXGXS,CORE2_NYF.NCEP CORE2_IAF.GCGCS.PREC,CORE2_IAF.GISS.LWDN,CORE2_IAF.GISS.SWDN,CORE2_IAF.GISS.SWUP,CORE2_IAF.NCEP.DN10,CORE2_IAF.NCEP.Q_10,CORE2_IAF.NCEP.SLP_,CORE2_IAF.NCEP.T_10,CORE2_IAF.NCEP.U_10,CORE2_IAF.NCEP.V_10,CORE2_IAF.CORE2.ArcFactor CORE2_IAF.NCEP.DENS.SOFS,CORE2_IAF.NCEP.PSLV.SOFS,CORE2_IAF.PREC.SOFS.DAILY,CORE2_IAF.LWDN.SOFS.DAILY,CORE2_IAF.SWDN.SOFS.DAILY,CORE2_IAF.SWUP.SOFS.DAILY,CORE2_IAF.SHUM.SOFS.6HOUR,CORE2_IAF.TBOT.SOFS.6HOUR,CORE2_IAF.U.SOFS.6HOUR,CORE2_IAF.V.SOFS.6HOUR,CORE2_IAF.CORE2.ArcFactor - CPLHISTForcing.Solar,CPLHISTForcing.nonSolarFlux,CPLHISTForcing.State3hr,CPLHISTForcing.State1hr + CPLHISTForcing.Solar,CPLHISTForcing.nonSolarFlux,CPLHISTForcing.State3hr,CPLHISTForcing.State1hr @@ -225,6 +226,7 @@ $DIN_LOC_ROOT/atm/cam/chem/trop_mozart_aero/aero $DIN_LOC_ROOT/atm/cam/chem/trop_mozart_aero/aero $DIN_LOC_ROOT/atm/datm7/topo_forcing + null @@ -298,6 +300,7 @@ aerosoldep_rcp6.0_monthly_1849-2104_1.9x2.5_c100830.nc aerosoldep_rcp8.5_monthly_1849-2104_1.9x2.5_c100201.nc topodata_0.9x1.25_USGS_070110_stream_c151201.nc + null @@ -314,7 +317,7 @@ area area mask mask - + time time doma_lon lon doma_lat lat @@ -396,6 +399,7 @@ $DATM_CPLHIST_DIR $DIN_LOC_ROOT/atm/cam/chem/trop_mozart_aero/aero $DIN_LOC_ROOT/atm/datm7/topo_forcing + $DATM_CPLHIST_DIR @@ -432,6 +436,7 @@ $DATM_CPLHIST_CASE.cpl.ha2x3h.%ym.nc $DATM_CPLHIST_CASE.cpl.ha2x1h.%ym.nc $DATM_CPLHIST_CASE.cpl.ha2x1d.%ym.nc + $DATM_CPLHIST_CASE.cpl.ha2x3h.%ym.nc clmforc.Qian.c2006.T62.Solr.%ym.nc clmforc.Qian.c2006.T62.Prec.%ym.nc clmforc.Qian.c2006.T62.TPQW.%ym.nc @@ -1206,7 +1211,7 @@ a2x3h_Sa_z z a2x3h_Sa_tbot tbot - a2x3h_Sa_ptem ptem + a2x3h_Sa_ptem ptem a2x3h_Sa_shum shum a2x3h_Sa_pbot pbot a2x3h_Sa_dens dens @@ -1438,6 +1443,9 @@ DSTX04WD dstwet4 DSTX04DD dstdry4 + + a2x3h_Sa_topo topo + TOPO topo @@ -1470,6 +1478,7 @@ 1849 1849 1 + $DATM_CPLHIST_YR_ALIGN @@ -1521,6 +1530,7 @@ 1849 1849 1 + $DATM_CPLHIST_YR_START @@ -1573,6 +1583,7 @@ 2006 2104 1 + $DATM_CPLHIST_YR_END @@ -1588,6 +1599,7 @@ 900 900 0 + 0 @@ -1603,7 +1615,7 @@ char streams shr_strdata_nml - CLMNCEP,COPYALL,CPLHIST,CORE2_NYF,CORE2_IAF,NULL + CLMNCEP,COPYALL,CORE2_NYF,CORE2_IAF,NULL general method that operates on the data. this is generally implemented in the data models but is set in the strdata method for @@ -1626,8 +1638,6 @@ scientific modes supported by the model, they are listed below. The mode is selected by a character string set in the strdata namelist variable dataMode. - datamode = "CPLHIST" - The same as COPYALL mode. datamode = "CORE2_NYF" Coordinated Ocean-ice Reference Experiments (CORE) Version 2 Normal Year Forcing. datamode = "CORE2_IAF" @@ -1654,7 +1664,7 @@ CLMNCEP CORE2_NYF CORE2_IAF - CPLHIST + COPYALL @@ -1828,6 +1838,7 @@ linear linear nearest + nearest coszen nearest coszen @@ -1923,6 +1934,7 @@ 3.0 3.0 3.0 + 3.0 diff --git a/src/components/data_comps/datm/datm_comp_mod.F90 b/src/components/data_comps/datm/datm_comp_mod.F90 index 4519ae97dc3..d59d1d68ca3 100644 --- a/src/components/data_comps/datm/datm_comp_mod.F90 +++ b/src/components/data_comps/datm/datm_comp_mod.F90 @@ -23,7 +23,7 @@ module datm_comp_mod use seq_timemgr_mod, only: seq_timemgr_EClockGetData, seq_timemgr_RestartAlarmIsOn use datm_shr_mod , only: datm_shr_getNextRadCDay, datm_shr_esat, datm_shr_CORE2getFactors - use datm_shr_mod , only: atm_mode ! namelist input + use datm_shr_mod , only: datamode ! namelist input use datm_shr_mod , only: decomp ! namelist input use datm_shr_mod , only: wiso_datm ! namelist input use datm_shr_mod , only: rest_file ! namelist input @@ -32,7 +32,6 @@ module datm_comp_mod use datm_shr_mod , only: iradsw ! namelist input use datm_shr_mod , only: nullstr - ! ! !PUBLIC TYPES: implicit none @@ -666,9 +665,11 @@ subroutine datm_comp_run(EClock, x2a, a2x, & call t_startf('datm_strdata_advance') call shr_strdata_advance(SDATM,currentYMD,currentTOD,mpicom,'datm') call t_stopf('datm_strdata_advance') + call t_barrierf('datm_scatter_BARRIER',mpicom) + call t_startf('datm_scatter') - if (trim(atm_mode) /= 'COPYALL') then + if (trim(datamode) /= 'COPYALL') then lsize = mct_avect_lsize(a2x) do n = 1,lsize a2x%rAttr(kbid,n) = aerodep_spval @@ -717,13 +718,14 @@ subroutine datm_comp_run(EClock, x2a, a2x, & enddo call t_stopf('datm_scatter') - call t_startf('datm_mode') - select case (trim(atm_mode)) + !------------------------------------------------- + ! Determine data model behavior based on the mode + !------------------------------------------------- - case('COPYALL') - ! do nothing extra + call t_startf('datm_datamode') + select case (trim(datamode)) - case('CPLHIST') + case('COPYALL') ! do nothing extra case('CORE2_NYF','CORE2_IAF') @@ -732,7 +734,7 @@ subroutine datm_comp_run(EClock, x2a, a2x, & write(logunit,F00) 'ERROR: prec and swdn must be in streams for CORE2' call shr_sys_abort(trim(subname)//'ERROR: prec and swdn must be in streams for CORE2') endif - if (trim(atm_mode) == 'CORE2_IAF' ) then + if (trim(datamode) == 'CORE2_IAF' ) then if (starcf < 1 ) then write(logunit,F00) 'ERROR: tarcf must be in an input stream for CORE2_IAF' call shr_sys_abort(trim(subname)//'tarcf must be in an input stream for CORE2_IAF') @@ -776,7 +778,7 @@ subroutine datm_comp_run(EClock, x2a, a2x, & !--- Dupont correction to NCEP Arctic air T --- !--- don't correct during summer months (July-September) !--- ONLY correct when forcing year is 1997->2004 - if (trim(atm_mode) == 'CORE2_IAF' ) then + if (trim(datamode) == 'CORE2_IAF' ) then a2x%rAttr(ktbot,n) = a2x%rAttr(ktbot,n) + avstrm%rAttr(starcf,n) a2x%rAttr(kptem,n) = a2x%rAttr(ktbot,n) end if @@ -979,7 +981,7 @@ subroutine datm_comp_run(EClock, x2a, a2x, & end select - call t_stopf('datm_mode') + call t_stopf('datm_datamode') !---------------------------------------------------------- ! bias correction / anomaly forcing ( start block ) diff --git a/src/components/data_comps/datm/datm_shr_mod.F90 b/src/components/data_comps/datm/datm_shr_mod.F90 index 9ec7e335643..9b6bec422cb 100644 --- a/src/components/data_comps/datm/datm_shr_mod.F90 +++ b/src/components/data_comps/datm/datm_shr_mod.F90 @@ -47,7 +47,7 @@ module datm_shr_mod ! variables obtained from namelist read character(CL) , public :: rest_file ! restart filename character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: atm_mode ! mode + character(CL) , public :: datamode ! mode character(len=*), public, parameter :: nullstr = 'undefined' !-------------------------------------------------------------------------- @@ -156,19 +156,18 @@ subroutine datm_shr_read_namelists(mpicom, my_task, master_task, & ! Validate mode - atm_mode = trim(SDATM%dataMode) - if (trim(atm_mode) == 'NULL' .or. & - trim(atm_mode) == 'CORE2_NYF' .or. & - trim(atm_mode) == 'CORE2_IAF' .or. & - trim(atm_mode) == 'CLMNCEP' .or. & - trim(atm_mode) == 'CPLHIST' .or. & - trim(atm_mode) == 'COPYALL' ) then + datamode = trim(SDATM%dataMode) + if (trim(datamode) == 'NULL' .or. & + trim(datamode) == 'CORE2_NYF' .or. & + trim(datamode) == 'CORE2_IAF' .or. & + trim(datamode) == 'CLMNCEP' .or. & + trim(datamode) == 'COPYALL' ) then if (my_task == master_task) then - write(logunit,F00) ' atm mode = ',trim(atm_mode) + write(logunit,F00) ' datm datamode = ',trim(datamode) call shr_sys_flush(logunit) end if else - write(logunit,F00) ' ERROR illegal atm mode = ',trim(atm_mode) + write(logunit,F00) ' ERROR illegal datm datamode = ',trim(datamode) call shr_sys_abort() endif @@ -182,7 +181,7 @@ subroutine datm_shr_read_namelists(mpicom, my_task, master_task, & atm_present = .true. atm_prognostic = .true. endif - if (trim(atm_mode) /= 'NULL') then + if (trim(datamode) /= 'NULL') then atm_present = .true. end if diff --git a/src/components/data_comps/datm/mct/atm_comp_mct.F90 b/src/components/data_comps/datm/mct/atm_comp_mct.F90 index 5a9c1cbbb1a..da051f2530a 100644 --- a/src/components/data_comps/datm/mct/atm_comp_mct.F90 +++ b/src/components/data_comps/datm/mct/atm_comp_mct.F90 @@ -15,7 +15,6 @@ module atm_comp_mct use shr_file_mod , only: shr_file_freeunit use datm_comp_mod , only: datm_comp_init, datm_comp_run, datm_comp_final use datm_shr_mod , only: datm_shr_read_namelists - use datm_shr_mod , only: atm_mode use datm_shr_mod , only: presaero use seq_flds_mod , only: seq_flds_a2x_fields, seq_flds_x2a_fields diff --git a/src/components/data_comps/dice/cime_config/buildnml b/src/components/data_comps/dice/cime_config/buildnml index eef42ff3925..7c054e06a15 100755 --- a/src/components/data_comps/dice/cime_config/buildnml +++ b/src/components/data_comps/dice/cime_config/buildnml @@ -55,8 +55,8 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): #---------------------------------------------------- # Log some settings. #---------------------------------------------------- - logger.debug("DICE mode is %s", dice_mode) - logger.debug("DICE grid is %s", ice_grid) + logger.debug("DICE mode is {}".format(dice_mode)) + logger.debug("DICE grid is {}".format(ice_grid)) #---------------------------------------------------- # Clear out old data. @@ -95,7 +95,7 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): continue inst_stream = stream + inst_string - logger.debug("DICE stream is %s", inst_stream) + logger.debug("DICE stream is {}".format(inst_stream)) stream_path = os.path.join(confdir, "dice.streams.txt." + inst_stream) user_stream_path = os.path.join(case.get_case_root(), "user_dice.streams.txt." + inst_stream) @@ -145,7 +145,7 @@ def buildnml(case, caseroot, compname): # determine directory for user modified namelist_definitions.xml and namelist_defaults.xml user_xml_dir = os.path.join(caseroot, "SourceMods", "src." + compname) expect (os.path.isdir(user_xml_dir), - "user_xml_dir %s does not exist " %user_xml_dir) + "user_xml_dir {} does not exist ".format(user_xml_dir)) # NOTE: User definition *replaces* existing definition. files = Files() @@ -155,7 +155,7 @@ def buildnml(case, caseroot, compname): if os.path.isfile(user_definition): definition_file = [user_definition] for file_ in definition_file: - expect(os.path.isfile(file_), "Namelist XML file %s not found!" % file_) + expect(os.path.isfile(file_), "Namelist XML file {} not found!".format(file_)) # Create the namelist generator object - independent of instance nmlgen = NamelistGenerator(case, definition_file, files=files) @@ -167,7 +167,7 @@ def buildnml(case, caseroot, compname): # determine instance string inst_string = "" if ninst > 1: - inst_string = '_' + '%04d' % inst_counter + inst_string = '_' + "{:04d}".format(inst_counter) # If multi-instance case does not have restart file, use # single-case restart for each instance @@ -184,7 +184,7 @@ def buildnml(case, caseroot, compname): # create namelist output infile using user_nl_file as input user_nl_file = os.path.join(caseroot, "user_nl_" + compname + inst_string) expect(os.path.isfile(user_nl_file), - "Missing required user_nl_file %s " %(user_nl_file)) + "Missing required user_nl_file {} ".format(user_nl_file)) infile = os.path.join(confdir, "namelist_infile") create_namelist_infile(case, user_nl_file, infile) namelist_infile = [infile] diff --git a/src/components/data_comps/dice/dice_comp_mod.F90 b/src/components/data_comps/dice/dice_comp_mod.F90 index 8314c471d82..9310f17db00 100644 --- a/src/components/data_comps/dice/dice_comp_mod.F90 +++ b/src/components/data_comps/dice/dice_comp_mod.F90 @@ -25,7 +25,7 @@ module dice_comp_mod use shr_dmodel_mod , only: shr_dmodel_translate_list, shr_dmodel_translateAV_list, shr_dmodel_translateAV use seq_timemgr_mod , only: seq_timemgr_EClockGetData, seq_timemgr_RestartAlarmIsOn - use dice_shr_mod , only: ice_mode ! namelist input + use dice_shr_mod , only: datamode ! namelist input use dice_shr_mod , only: decomp ! namelist input use dice_shr_mod , only: rest_file ! namelist input use dice_shr_mod , only: rest_file_strm ! namelist input @@ -458,7 +458,7 @@ subroutine dice_comp_run(EClock, x2i, i2x, & !--- copy all fields from streams to i2x as default --- - if (trim(ice_mode) /= 'NULL') then + if (trim(datamode) /= 'NULL') then call t_startf('dice_strdata_advance') call shr_strdata_advance(SDICE,currentYMD,currentTOD,mpicom,'dice') call t_stopf('dice_strdata_advance') @@ -472,9 +472,12 @@ subroutine dice_comp_run(EClock, x2i, i2x, & call mct_aVect_zero(i2x) endif - call t_startf('dice_mode') + !------------------------------------------------- + ! Determine data model behavior based on the mode + !------------------------------------------------- - select case (trim(ice_mode)) + call t_startf('dice_datamode') + select case (trim(datamode)) case('COPYALL') ! do nothing extra @@ -586,18 +589,14 @@ subroutine dice_comp_run(EClock, x2i, i2x, & end do - !---------------------------------------------------------------------------- ! compute atm/ice surface fluxes - !---------------------------------------------------------------------------- call shr_flux_atmIce(iMask ,x2i%rAttr(kz,:) ,x2i%rAttr(kua,:) ,x2i%rAttr(kva,:), & x2i%rAttr(kptem,:) ,x2i%rAttr(kshum,:) ,x2i%rAttr(kdens,:) ,x2i%rAttr(ktbot,:), & i2x%rAttr(kt,:) ,i2x%rAttr(ksen,:) ,i2x%rAttr(klat,:) ,i2x%rAttr(klwup,:), & i2x%rAttr(kevap,:) ,i2x%rAttr(ktauxa,:) ,i2x%rAttr(ktauya,:) ,i2x%rAttr(ktref,:), & i2x%rAttr(kqref,:) ) - !---------------------------------------------------------------------------- ! compute ice/oce surface fluxes (except melth & meltw, see above) - !---------------------------------------------------------------------------- do n=1,lsize if (iMask(n) == 0) then i2x%rAttr(kswpen,n) = spval @@ -623,10 +622,11 @@ subroutine dice_comp_run(EClock, x2i, i2x, & ! iFrac0(n) = i2x%rAttr(kiFrac,n) end do - end select + !------------------------------------------------- ! optional per thickness category fields + !------------------------------------------------- if (seq_flds_i2o_per_cat) then do n=1,lsize @@ -635,7 +635,11 @@ subroutine dice_comp_run(EClock, x2i, i2x, & end do end if - call t_stopf('dice_mode') + call t_stopf('dice_datamode') + + !-------------------- + ! Write restart + !-------------------- if (write_restart) then call t_startf('dice_restart') diff --git a/src/components/data_comps/dice/dice_shr_mod.F90 b/src/components/data_comps/dice/dice_shr_mod.F90 index f1150bb7e01..aabb317b794 100644 --- a/src/components/data_comps/dice/dice_shr_mod.F90 +++ b/src/components/data_comps/dice/dice_shr_mod.F90 @@ -36,7 +36,7 @@ module dice_shr_mod ! variables obtained from namelist read character(CL) , public :: rest_file ! restart filename character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: ice_mode ! mode + character(CL) , public :: datamode ! mode character(len=*), public, parameter :: nullstr = 'undefined' !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONTAINS @@ -139,14 +139,15 @@ subroutine dice_shr_read_namelists(mpicom, my_task, master_task, & ! Validate mode - ice_mode = trim(SDICE%dataMode) - if (trim(ice_mode) == 'NULL' .or. & - trim(ice_mode) == 'SSTDATA' .or. & - trim(ice_mode) == 'COPYALL') then - if (my_task == master_task) & - write(logunit,F00) ' ice mode = ',trim(ice_mode) + datamode = trim(SDICE%dataMode) + if (trim(datamode) == 'NULL' .or. & + trim(datamode) == 'SSTDATA' .or. & + trim(datamode) == 'COPYALL') then + if (my_task == master_task) then + write(logunit,F00) ' dice datamode = ',trim(datamode) + end if else - write(logunit,F00) ' ERROR illegal ice mode = ',trim(ice_mode) + write(logunit,F00) ' ERROR illegal dice datamode = ',trim(datamode) call shr_sys_abort() endif @@ -160,10 +161,10 @@ subroutine dice_shr_read_namelists(mpicom, my_task, master_task, & ice_present = .true. ice_prognostic = .true. endif - if (trim(ice_mode) /= 'NULL') then + if (trim(datamode) /= 'NULL') then ice_present = .true. end if - if (trim(ice_mode) == 'SSTDATA' .or. trim(ice_mode) == 'COPYALL') then + if (trim(datamode) == 'SSTDATA' .or. trim(datamode) == 'COPYALL') then ice_prognostic = .true. endif diff --git a/src/components/data_comps/dlnd/cime_config/buildnml b/src/components/data_comps/dlnd/cime_config/buildnml index 46b0d82ad1a..182b0711133 100755 --- a/src/components/data_comps/dlnd/cime_config/buildnml +++ b/src/components/data_comps/dlnd/cime_config/buildnml @@ -38,11 +38,8 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): #---------------------------------------------------- # Get a bunch of information from the case. #---------------------------------------------------- - lnd_domain_file = case.get_value("LND_DOMAIN_FILE") - lnd_domain_path = case.get_value("LND_DOMAIN_PATH") dlnd_mode = case.get_value("DLND_MODE") lnd_grid = case.get_value("LND_GRID") - glc_nec = case.get_value("GLC_NEC") #---------------------------------------------------- @@ -56,9 +53,9 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): #---------------------------------------------------- # Log some settings. #---------------------------------------------------- - logger.debug("DLND mode is %s", dlnd_mode) - logger.debug("DLND grid is %s", lnd_grid) - logger.debug("DLND glc_nec is %s", glc_nec) + logger.debug("DLND mode is {}".format(dlnd_mode)) + logger.debug("DLND grid is {}".format(lnd_grid)) + logger.debug("DLND glc_nec is {}".format(glc_nec)) #---------------------------------------------------- # Clear out old data. @@ -84,6 +81,7 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): # Construct the list of streams. #---------------------------------------------------- streams = nmlgen.get_streams() + #---------------------------------------------------- # For each stream, create stream text file and update # shr_strdata_nml group and input data list. @@ -94,7 +92,7 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): continue inst_stream = stream + inst_string - logger.debug("DLND stream is %s", inst_stream) + logger.debug("DLND stream is {}".format(inst_stream)) stream_path = os.path.join(confdir, "dlnd.streams.txt." + inst_stream) user_stream_path = os.path.join(case.get_case_root(), "user_dlnd.streams.txt." + inst_stream) @@ -113,10 +111,20 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): # set per-stream variables nmlgen.create_shr_strdata_nml() - # set variables that are not per-stream - if lnd_domain_file != "UNSET": - full_domain_path = os.path.join(lnd_domain_path, lnd_domain_file) - nmlgen.add_default("domainfile", value=full_domain_path) + # Determine model domain filename (in dlnd_in) + if "CPLHIST" in dlnd_mode: + dlnd_cplhist_domain_file = case.get_value("DLND_CPLHIST_DOMAIN_FILE") + if dlnd_cplhist_domain_file == 'null': + logger.info(" .... Obtaining DLND model domain info from first stream file: {}".format(streams[0])) + else: + logger.info(" .... Obtaining DLND model domain info from stream {}".format(streams[0])) + nmlgen.add_default("domainfile", value=dlnd_cplhist_domain_file) + else: + lnd_domain_file = case.get_value("LND_DOMAIN_FILE") + lnd_domain_path = case.get_value("LND_DOMAIN_PATH") + if lnd_domain_file != "UNSET": + full_domain_path = os.path.join(lnd_domain_path, lnd_domain_file) + nmlgen.add_default("domainfile", value=full_domain_path) #---------------------------------------------------- # Finally, write out all the namelists. @@ -146,7 +154,7 @@ def buildnml(case, caseroot, compname): # determine directory for user modified namelist_definitions.xml user_xml_dir = os.path.join(caseroot, "SourceMods", "src." + compname) expect (os.path.isdir(user_xml_dir), - "user_xml_dir %s does not exist " %user_xml_dir) + "user_xml_dir {} does not exist ".format(user_xml_dir)) # NOTE: User definition *replaces* existing definition. files = Files() @@ -156,7 +164,7 @@ def buildnml(case, caseroot, compname): if os.path.isfile(user_definition): definition_file = [user_definition] for file_ in definition_file: - expect(os.path.isfile(file_), "Namelist XML file %s not found!" % file_) + expect(os.path.isfile(file_), "Namelist XML file {} not found!".format(file_)) # Create the namelist generator object - independent of instance nmlgen = NamelistGenerator(case, definition_file, files=files) @@ -167,7 +175,7 @@ def buildnml(case, caseroot, compname): # determine instance string inst_string = "" if ninst > 1: - inst_string = '_' + '%04d' % inst_counter + inst_string = '_' + "{:04d}".format(inst_counter) # If multi-instance case does not have restart file, use # single-case restart for each instance @@ -184,7 +192,7 @@ def buildnml(case, caseroot, compname): # create namelist output infile using user_nl_file as input user_nl_file = os.path.join(caseroot, "user_nl_" + compname + inst_string) expect(os.path.isfile(user_nl_file), - "Missing required user_nl_file %s " %(user_nl_file)) + "Missing required user_nl_file {} ".format(user_nl_file)) infile = os.path.join(confdir, "namelist_infile") create_namelist_infile(case, user_nl_file, infile) namelist_infile = [infile] diff --git a/src/components/data_comps/dlnd/cime_config/config_component.xml b/src/components/data_comps/dlnd/cime_config/config_component.xml index 7ef837a0886..26e13593e44 100644 --- a/src/components/data_comps/dlnd/cime_config/config_component.xml +++ b/src/components/data_comps/dlnd/cime_config/config_component.xml @@ -44,6 +44,25 @@ set to zero and not utilized. The default is NULL. + + char + + null + + $LND_DOMAIN_PATH/$LND_DOMAIN_FILE + + run_component_dlnd + env_run.xml + + Full pathname for domain file for dlnd when DLND_MODE is CPLHIST + or GLC_CPLHIST. NOTE: if this is set to 'null' (the default), + then domain information is read in from the first coupler + history file in the target stream and it is assumed that the + first coupler stream file that is pointed to contains the domain + information for that stream. + + + char UNSET @@ -76,7 +95,7 @@ integer - 1 + -999 1 1 @@ -86,12 +105,24 @@ run_component_dlnd env_run.xml - year align (only used for CPLHIST mode) + + Simulation year corresponding to DLND_CPLHIST_YR_START (only used + when DLND_MODE is CPLHIST or GLC_CPLHIST). A common usage is to + set this to RUN_STARTDATE. With this setting, the forcing in the + first year of the run will be the forcing of year + DLND_CPLHIST_YR_START. Another use case is to align the calendar + of transient forcing with the model calendar. For example, setting + DLND_CPLHIST_YR_ALIGN=DLND_CPLHIST_YR_START will lead to the + forcing calendar being the same as the model calendar. The forcing + for a given model year would be the forcing of the same year. This + would be appropriate in transient runs where the model calendar is + setup to span the same year range as the forcing data. + integer - 1 + -999 1976 26 @@ -101,12 +132,12 @@ run_component_dlnd env_run.xml - starting year to loop data over (only used for CPLHIST mode) + starting year to loop data over (only used when DLND_MODE is CPLHIST or GLC_CPLHIST) integer - 1 + -999 2005 100 @@ -116,7 +147,7 @@ run_component_dlnd env_run.xml - ending year to loop data over (only used for CPLHIST mode) + ending year to loop data over (only used when DLND_MODE is CPLHIST or GLC_CPLHIST) diff --git a/src/components/data_comps/dlnd/cime_config/namelist_definition_dlnd.xml b/src/components/data_comps/dlnd/cime_config/namelist_definition_dlnd.xml index 03a9d31e844..5d008e0dfb0 100644 --- a/src/components/data_comps/dlnd/cime_config/namelist_definition_dlnd.xml +++ b/src/components/data_comps/dlnd/cime_config/namelist_definition_dlnd.xml @@ -43,7 +43,7 @@ lnd.cplhist sno.cplhist - **** NOTE: in the value matches below, regular expressions are used **** + **** NOTE: in the value matches below, regular expressions are used **** If two matches are equivalent, the FIRST one will be used As an example, say dlnd_mode=GLC_CPLHIST @@ -51,7 +51,7 @@ the following will results in the CORRECT set of streams for this dlnd_mode COPYALL COPYALL - + the following order would result in an INCORRECT setting of streams for this datm_mode COPYALL COPYALL @@ -205,10 +205,7 @@ dataMode = "COPYALL" Copies all fields directly from the input data streams Any required fields not found on an input stream will be set to zero. - - Set by the following xml variables in env_run.xml - DLND_MODE - default value: NULL + Set by the DLND_MODE xml variable in env_run.xml NULL diff --git a/src/components/data_comps/dlnd/dlnd_comp_mod.F90 b/src/components/data_comps/dlnd/dlnd_comp_mod.F90 index 9874b0337c0..83b2d014588 100644 --- a/src/components/data_comps/dlnd/dlnd_comp_mod.F90 +++ b/src/components/data_comps/dlnd/dlnd_comp_mod.F90 @@ -22,7 +22,7 @@ module dlnd_comp_mod use seq_timemgr_mod , only: seq_timemgr_EClockGetData, seq_timemgr_RestartAlarmIsOn use glc_elevclass_mod , only: glc_get_num_elevation_classes, glc_elevclass_as_string - use dlnd_shr_mod , only: lnd_mode ! namelist input + use dlnd_shr_mod , only: datamode ! namelist input use dlnd_shr_mod , only: decomp ! namelist input use dlnd_shr_mod , only: rest_file ! namelist input use dlnd_shr_mod , only: rest_file_strm ! namelist input @@ -211,12 +211,12 @@ subroutine dlnd_comp_init(Eclock, x2l, l2x, & call shr_sys_flush(logunit) ! create a data model global seqmap (gsmap) given the data model global grid sizes - ! NOTE: gsmap is initialized using the decomp read in from the docn_in namelist + ! NOTE: gsmap is initialized using the decomp read in from the dlnd_in namelist ! (which by default is "1d") call shr_dmodel_gsmapcreate(gsmap,SDLND%nxg*SDLND%nyg,compid,mpicom,decomp) lsize = mct_gsmap_lsize(gsmap,mpicom) - ! create a rearranger from the data model SDOCN%gsmap to gsmap + ! create a rearranger from the data model DLND%gsmap to gsmap call mct_rearr_init(SDLND%gsmap, gsmap, mpicom, rearr) call t_stopf('dlnd_initgsmaps') @@ -385,6 +385,19 @@ subroutine dlnd_comp_run(EClock, x2l, l2x, & call t_stopf('dlnd') + !------------------------------------------------- + ! Determine data model behavior based on the mode + !------------------------------------------------- + + call t_startf('dlnd_datamode') + select case (trim(datamode)) + + case('COPYALL') + ! do nothing extra + + end select + call t_stopf('dlnd_datamode') + !-------------------- ! Write restart !-------------------- diff --git a/src/components/data_comps/dlnd/dlnd_shr_mod.F90 b/src/components/data_comps/dlnd/dlnd_shr_mod.F90 index 7bd4ba9b896..34714d144bc 100644 --- a/src/components/data_comps/dlnd/dlnd_shr_mod.F90 +++ b/src/components/data_comps/dlnd/dlnd_shr_mod.F90 @@ -32,7 +32,7 @@ module dlnd_shr_mod ! variables obtained from namelist read character(CL) , public :: rest_file ! restart filename character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: lnd_mode ! mode + character(CL) , public :: datamode ! mode character(len=*), public, parameter :: nullstr = 'undefined' !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONTAINS @@ -121,18 +121,18 @@ subroutine dlnd_shr_read_namelists(mpicom, my_task, master_task, & call shr_strdata_readnml(SDLND,trim(filename),mpicom=mpicom) !---------------------------------------------------------------------------- - ! Determine and validate lnd_mode + ! Determine and validate datamode !---------------------------------------------------------------------------- - lnd_mode = trim(SDLND%dataMode) + datamode = trim(SDLND%dataMode) - if (trim(lnd_mode) == 'NULL' .or. & - trim(lnd_mode) == 'COPYALL') then + if (trim(datamode) == 'NULL' .or. & + trim(datamode) == 'COPYALL') then if (my_task == master_task) then - write(logunit,F00) 'lnd mode = ',trim(lnd_mode) + write(logunit,F00) 'dlnd datamode = ',trim(datamode) end if else - write(logunit,F00) ' ERROR illegal lnd mode = ',trim(lnd_mode) + write(logunit,F00) ' ERROR illegal dlnd datamode = ',trim(datamode) call shr_sys_abort() end if @@ -146,7 +146,7 @@ subroutine dlnd_shr_read_namelists(mpicom, my_task, master_task, & lnd_present = .true. lnd_prognostic = .true. endif - if (trim(lnd_mode) /= 'NULL') then + if (trim(datamode) /= 'NULL') then lnd_present = .true. end if diff --git a/src/components/data_comps/docn/cime_config/buildnml b/src/components/data_comps/docn/cime_config/buildnml index 360c84e2db2..edc871c4f2d 100755 --- a/src/components/data_comps/docn/cime_config/buildnml +++ b/src/components/data_comps/docn/cime_config/buildnml @@ -55,8 +55,8 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): #---------------------------------------------------- # Log some settings. #---------------------------------------------------- - logger.debug("DOCN mode is %s", docn_mode) - logger.debug("DOCN grid is %s", ocn_grid) + logger.debug("DOCN mode is {}".format(docn_mode)) + logger.debug("DOCN grid is {}".format(ocn_grid)) #---------------------------------------------------- # Clear out old data. @@ -95,7 +95,7 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): continue inst_stream = stream + inst_string - logger.debug("DOCN stream is %s", inst_stream) + logger.debug("DOCN stream is {}".format(inst_stream)) stream_path = os.path.join(confdir, "docn.streams.txt." + inst_stream) user_stream_path = os.path.join(case.get_case_root(), "user_docn.streams.txt." + inst_stream) @@ -154,7 +154,7 @@ def buildnml(case, caseroot, compname): # determine directory for user modified namelist_definitions.xml user_xml_dir = os.path.join(caseroot, "SourceMods", "src." + compname) expect (os.path.isdir(user_xml_dir), - "user_xml_dir %s does not exist " %user_xml_dir) + "user_xml_dir {} does not exist ".format(user_xml_dir)) # NOTE: User definition *replaces* existing definition. files = Files() @@ -164,7 +164,7 @@ def buildnml(case, caseroot, compname): if os.path.isfile(user_definition): definition_file = [user_definition] for file_ in definition_file: - expect(os.path.isfile(file_), "Namelist XML file %s not found!" % file_) + expect(os.path.isfile(file_), "Namelist XML file {} not found!".format(file_)) # Create the namelist generator object - independent of instance nmlgen = NamelistGenerator(case, definition_file, files=files) @@ -176,7 +176,7 @@ def buildnml(case, caseroot, compname): # determine instance string inst_string = "" if ninst > 1: - inst_string = '_' + '%04d' % inst_counter + inst_string = '_' + "{:04d}".format(inst_counter) # If multi-instance case does not have restart file, use # single-case restart for each instance @@ -193,7 +193,7 @@ def buildnml(case, caseroot, compname): # create namelist output infile using user_nl_file as input user_nl_file = os.path.join(caseroot, "user_nl_" + compname + inst_string) expect(os.path.isfile(user_nl_file), - "Missing required user_nl_file %s " %(user_nl_file)) + "Missing required user_nl_file {} ".format(user_nl_file)) infile = os.path.join(confdir, "namelist_infile") create_namelist_infile(case, user_nl_file, infile) namelist_infile = [infile] diff --git a/src/components/data_comps/docn/cime_config/config_component.xml b/src/components/data_comps/docn/cime_config/config_component.xml index 96710214b19..a818266b082 100644 --- a/src/components/data_comps/docn/cime_config/config_component.xml +++ b/src/components/data_comps/docn/cime_config/config_component.xml @@ -131,7 +131,7 @@ run_component_docn env_run.xml Sets aquaplanet forcing filename instead of using an analytic form. - This is only used when DOCN_MODE=sst_aqpfile. + This is only used when DOCN_MODE=sst_aquapfile. diff --git a/src/components/data_comps/docn/docn_comp_mod.F90 b/src/components/data_comps/docn/docn_comp_mod.F90 index 3627f04e8fa..8df7ae0bc9c 100644 --- a/src/components/data_comps/docn/docn_comp_mod.F90 +++ b/src/components/data_comps/docn/docn_comp_mod.F90 @@ -22,8 +22,8 @@ module docn_comp_mod use shr_dmodel_mod , only: shr_dmodel_translate_list, shr_dmodel_translateAV_list, shr_dmodel_translateAV use seq_timemgr_mod , only: seq_timemgr_EClockGetData, seq_timemgr_RestartAlarmIsOn - use docn_shr_mod , only: ocn_mode ! namelist input - use docn_shr_mod , only: aquap_option ! derived from ocn_mode namelist input + use docn_shr_mod , only: datamode ! namelist input + use docn_shr_mod , only: aquap_option ! derived from datamode namelist input use docn_shr_mod , only: decomp ! namelist input use docn_shr_mod , only: rest_file ! namelist input use docn_shr_mod , only: rest_file_strm ! namelist input @@ -94,6 +94,8 @@ module docn_comp_mod !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONTAINS + !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + !=============================================================================== subroutine docn_comp_init(Eclock, x2o, o2x, & seq_flds_x2o_fields, seq_flds_o2x_fields, & @@ -215,7 +217,7 @@ subroutine docn_comp_init(Eclock, x2o, o2x, & ! Special logic for either prescribed or som aquaplanet - overwrite and ! set mask/frac to 1 - if (ocn_mode == 'SST_AQUAPANAL' .or. ocn_mode == 'SST_AQUAPFILE' .or. ocn_mode == 'SOM_AQUAP') then + if (datamode == 'SST_AQUAPANAL' .or. datamode == 'SST_AQUAPFILE' .or. datamode == 'SOM_AQUAP') then kmask = mct_aVect_indexRA(ggrid%data,'mask') ggrid%data%rattr(kmask,:) = 1 @@ -315,7 +317,7 @@ subroutine docn_comp_init(Eclock, x2o, o2x, & endif endif call shr_mpi_bcast(exists,mpicom,'exists') - if (trim(ocn_mode) == 'SOM' .or. trim(ocn_mode) == 'SOM_AQUAP') then + if (trim(datamode) == 'SOM' .or. trim(datamode) == 'SOM_AQUAP') then if (my_task == master_task) write(logunit,F00) ' reading ',trim(rest_file) call shr_pcdf_readwrite('read',SDOCN%pio_subsystem, SDOCN%io_type, & trim(rest_file), mpicom, gsmap=gsmap, rf1=somtp, rf1n='somtp', io_format=SDOCN%io_format) @@ -438,10 +440,12 @@ subroutine docn_comp_run(EClock, x2o, o2x, & enddo call t_stopf('docn_scatter') - ! --- handle the docn modes + !------------------------------------------------- + ! Determine data model behavior based on the mode + !------------------------------------------------- - call t_startf('docn_mode') - select case (trim(ocn_mode)) + call t_startf('docn_datamode') + select case (trim(datamode)) case('COPYALL') ! do nothing extra @@ -570,7 +574,7 @@ subroutine docn_comp_run(EClock, x2o, o2x, & end select - call t_stopf('docn_mode') + call t_stopf('docn_datamode') !-------------------- ! Write restart @@ -592,7 +596,7 @@ subroutine docn_comp_run(EClock, x2o, o2x, & close(nu) call shr_file_freeUnit(nu) endif - if (trim(ocn_mode) == 'SOM' .or. trim(ocn_mode) == 'SOM_AQUAP') then + if (trim(datamode) == 'SOM' .or. trim(datamode) == 'SOM_AQUAP') then if (my_task == master_task) write(logunit,F04) ' writing ',trim(rest_file),currentYMD,currentTOD call shr_pcdf_readwrite('write', SDOCN%pio_subsystem, SDOCN%io_type,& trim(rest_file), mpicom, gsmap, clobber=.true., rf1=somtp,rf1n='somtp') diff --git a/src/components/data_comps/docn/docn_shr_mod.F90 b/src/components/data_comps/docn/docn_shr_mod.F90 index 1ccf2754ec1..7c3d34713c9 100644 --- a/src/components/data_comps/docn/docn_shr_mod.F90 +++ b/src/components/data_comps/docn/docn_shr_mod.F90 @@ -32,7 +32,7 @@ module docn_shr_mod ! variables obtained from namelist read character(CL) , public :: rest_file ! restart filename character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: ocn_mode ! mode + character(CL) , public :: datamode ! mode integer(IN) , public :: aquap_option character(len=*), public, parameter :: nullstr = 'undefined' !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -122,37 +122,37 @@ subroutine docn_shr_read_namelists(mpicom, my_task, master_task, & call shr_strdata_readnml(SDOCN,trim(filename),mpicom=mpicom) - ocn_mode = trim(SDOCN%dataMode) + datamode = trim(SDOCN%dataMode) ! Special logic for prescribed aquaplanet - if (ocn_mode(1:9) == 'SST_AQUAP' .and. trim(ocn_mode) /= 'SST_AQUAPFILE') then + if (datamode(1:9) == 'SST_AQUAP' .and. trim(datamode) /= 'SST_AQUAPFILE') then ! First determine the prescribed aquaplanet option - if (len_trim(ocn_mode) == 10) then - read(ocn_mode(10:10),'(i1)') aquap_option - else if (len_trim(ocn_mode) == 11) then - read(ocn_mode(10:11),'(i2)') aquap_option + if (len_trim(datamode) == 10) then + read(datamode(10:10),'(i1)') aquap_option + else if (len_trim(datamode) == 11) then + read(datamode(10:11),'(i2)') aquap_option end if - ! Now remove the index from the ocn_mode value, to have a generic setting + ! Now remove the index from the datamode value, to have a generic setting ! for use below - ocn_mode = "SST_AQUAPANAL" + datamode = "SST_AQUAPANAL" end if ! Validate mode - if (trim(ocn_mode) == 'NULL' .or. & - trim(ocn_mode) == 'SSTDATA' .or. & - trim(ocn_mode) == 'SST_AQUAPANAL' .or. & - trim(ocn_mode) == 'SST_AQUAPFILE' .or. & - trim(ocn_mode) == 'COPYALL' .or. & - trim(ocn_mode) == 'IAF' .or. & - trim(ocn_mode) == 'SOM' .or. & - trim(ocn_mode) == 'SOM_AQUAP') then + if (trim(datamode) == 'NULL' .or. & + trim(datamode) == 'SSTDATA' .or. & + trim(datamode) == 'SST_AQUAPANAL' .or. & + trim(datamode) == 'SST_AQUAPFILE' .or. & + trim(datamode) == 'COPYALL' .or. & + trim(datamode) == 'IAF' .or. & + trim(datamode) == 'SOM' .or. & + trim(datamode) == 'SOM_AQUAP') then if (my_task == master_task) then - write(logunit,F00) ' ocn mode = ',trim(ocn_mode) + write(logunit,F00) ' docn datamode = ',trim(datamode) end if else - write(logunit,F00) ' ERROR illegal ocn mode = ',trim(ocn_mode) + write(logunit,F00) ' ERROR illegal docn datamode = ',trim(datamode) call shr_sys_abort() endif @@ -168,19 +168,16 @@ subroutine docn_shr_read_namelists(mpicom, my_task, master_task, & ocn_prognostic = .true. ocnrof_prognostic = .true. endif - if (trim(ocn_mode) /= 'NULL') then + if (trim(datamode) /= 'NULL') then ocn_present = .true. end if - if (trim(ocn_mode) == 'IAF') then + if (trim(datamode) == 'IAF') then ocn_prognostic = .true. ocnrof_prognostic = .true. endif - if (trim(ocn_mode) == 'SOM' .or. trim(ocn_mode) == 'SOM_AQUAP') then + if (trim(datamode) == 'SOM' .or. trim(datamode) == 'SOM_AQUAP') then ocn_prognostic = .true. endif - write(6,*)'DEBUG: ocn_present is ',ocn_present - write(6,*)'DEBUG: ocn_prognostic is ',ocn_prognostic - end subroutine docn_shr_read_namelists diff --git a/src/components/data_comps/drof/cime_config/buildnml b/src/components/data_comps/drof/cime_config/buildnml index 37b0a3a7a3f..2b4ece2ced3 100755 --- a/src/components/data_comps/drof/cime_config/buildnml +++ b/src/components/data_comps/drof/cime_config/buildnml @@ -54,8 +54,8 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): #---------------------------------------------------- # Log some settings. #---------------------------------------------------- - logger.debug("DROF mode is %s", drof_mode) - logger.debug("DROF grid is %s", rof_grid) + logger.debug("DROF mode is {}".format(drof_mode)) + logger.debug("DROF grid is {}".format(rof_grid)) #---------------------------------------------------- # Clear out old data. @@ -93,7 +93,7 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): continue inst_stream = stream + inst_string - logger.debug("DROF stream is %s", inst_stream) + logger.debug("DROF stream is {}".format(inst_stream)) stream_path = os.path.join(confdir, "drof.streams.txt." + inst_stream) user_stream_path = os.path.join(case.get_case_root(), "user_drof.streams.txt." + inst_stream) @@ -112,6 +112,21 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): # set per-stream variables nmlgen.create_shr_strdata_nml() + # Determine model domain filename (in drof_in) + if "CPLHIST" in drof_mode: + drof_cplhist_domain_file = case.get_value("DROF_CPLHIST_DOMAIN_FILE") + if drof_cplhist_domain_file == 'null': + logger.info(" .... Obtaining DROF model domain info from first stream file: {}".format(streams[0])) + else: + logger.info(" .... Obtaining DROF model domain info from stream {}".format(streams[0])) + nmlgen.add_default("domainfile", value=drof_cplhist_domain_file) + else: + rof_domain_file = case.get_value("ROF_DOMAIN_FILE") + rof_domain_path = case.get_value("ROF_DOMAIN_PATH") + if rof_domain_file != "UNSET": + full_domain_path = os.path.join(rof_domain_path, rof_domain_file) + nmlgen.add_default("domainfile", value=full_domain_path) + # set variables that are not per-stream if rof_domain_file != "UNSET": full_domain_path = os.path.join(rof_domain_path, rof_domain_file) @@ -145,7 +160,7 @@ def buildnml(case, caseroot, compname): # determine directory for user modified namelist_definitions.xml user_xml_dir = os.path.join(caseroot, "SourceMods", "src." + compname) expect (os.path.isdir(user_xml_dir), - "user_xml_dir %s does not exist " %user_xml_dir) + "user_xml_dir {} does not exist ".format(user_xml_dir)) # NOTE: User definition *replaces* existing definition. files = Files() @@ -155,7 +170,7 @@ def buildnml(case, caseroot, compname): if os.path.isfile(user_definition): definition_file = [user_definition] for file_ in definition_file: - expect(os.path.isfile(file_), "Namelist XML file %s not found!" % file_) + expect(os.path.isfile(file_), "Namelist XML file {} not found!".format(file_)) # Create the namelist generator object - independent of instance nmlgen = NamelistGenerator(case, definition_file) @@ -167,7 +182,7 @@ def buildnml(case, caseroot, compname): # determine instance string inst_string = "" if ninst > 1: - inst_string = '_' + '%04d' % inst_counter + inst_string = '_' + "{:04d}".format(inst_counter) # If multi-instance case does not have restart file, use # single-case restart for each instance @@ -184,7 +199,7 @@ def buildnml(case, caseroot, compname): # create namelist output infile using user_nl_file as input user_nl_file = os.path.join(caseroot, "user_nl_" + compname + inst_string) expect(os.path.isfile(user_nl_file), - "Missing required user_nl_file %s " %(user_nl_file)) + "Missing required user_nl_file {} ".format(user_nl_file)) infile = os.path.join(confdir, "namelist_infile") create_namelist_infile(case, user_nl_file, infile) namelist_infile = [infile] diff --git a/src/components/data_comps/drof/cime_config/config_component.xml b/src/components/data_comps/drof/cime_config/config_component.xml index dc2b86b0eea..54e23c9d2ef 100644 --- a/src/components/data_comps/drof/cime_config/config_component.xml +++ b/src/components/data_comps/drof/cime_config/config_component.xml @@ -52,13 +52,29 @@ default is DIATREN_ANN_RX1. + + char + + null + run_component_drof + env_run.xml + + Full pathname for domain file for drof when DROF_MODE is + CPLHIST. NOTE: if this is set to 'null' (the default), then + domain information is read in from the first coupler history + file in the target stream and it is assumed that the first + coupler stream file that is pointed to contains the domain + information for that stream. + + + char UNSET run_component_drof env_run.xml - directory for coupler history data mode (only used for CPLHIST mode) + directory for coupler history data mode (only used when DROF_MODE is CPLHIST mode) @@ -67,34 +83,46 @@ UNSET run_component_drof env_run.xml - case name for coupler history data mode (only used for CPLHIST mode) + case name for coupler history data mode (only used when DROF_MODE is CPLHIST mode) integer - 1 + -999 run_component_drof env_run.xml - year align (only used for CPLHIST mode) + + Simulation year corresponding to DROF_CPLHIST_YR_START (only used + when DROF_MODE is CPLHIST). A common usage is to set this to + RUN_STARTDATE. With this setting, the forcing in the first year of + the run will be the forcing of year DROF_CPLHIST_YR_START. Another + use case is to align the calendar of transient forcing with the + model calendar. For example, setting + DROF_CPLHIST_YR_ALIGN=DROF_CPLHIST_YR_START will lead to the + forcing calendar being the same as the model calendar. The forcing + for a given model year would be the forcing of the same year. This + would be appropriate in transient runs where the model calendar is + setup to span the same year range as the forcing data. + integer - 1 + -999 run_component_drof env_run.xml - starting year to loop data over (only used for CPLHIST mode) + starting year to loop data over (only used when DROF_MODE is CPLHIST) integer - 1 + -999 run_component_drof env_run.xml - ending year to loop data over (only used for CPLHIST mode) + ending year to loop data over (only used when DROF_MODE is CPLHIST) diff --git a/src/components/data_comps/drof/drof_comp_mod.F90 b/src/components/data_comps/drof/drof_comp_mod.F90 index 6a984a58ff3..7152af4a6e4 100644 --- a/src/components/data_comps/drof/drof_comp_mod.F90 +++ b/src/components/data_comps/drof/drof_comp_mod.F90 @@ -19,7 +19,7 @@ module drof_comp_mod use shr_dmodel_mod , only: shr_dmodel_translate_list, shr_dmodel_translateAV_list, shr_dmodel_translateAV use seq_timemgr_mod , only: seq_timemgr_EClockGetData, seq_timemgr_RestartAlarmIsOn - use drof_shr_mod , only: rof_mode ! namelist input + use drof_shr_mod , only: datamode ! namelist input use drof_shr_mod , only: decomp ! namelist input use drof_shr_mod , only: rest_file ! namelist input use drof_shr_mod , only: rest_file_strm ! namelist input @@ -180,6 +180,19 @@ subroutine drof_comp_init(Eclock, x2r, r2x, & call mct_aVect_zero(r2x) call t_stopf('drof_initmctavs') + !------------------------------------------------- + ! Determine data model behavior based on the mode + !------------------------------------------------- + + call t_startf('drof_datamode') + select case (trim(datamode)) + + case('COPYALL') + ! do nothing extra + + end select + call t_stopf('dlnd_datamode') + !---------------------------------------------------------------------------- ! Read restart !---------------------------------------------------------------------------- @@ -330,6 +343,19 @@ subroutine drof_comp_run(EClock, x2r, r2x, & call t_stopf('drof_r') + !------------------------------------------------- + ! Determine data model behavior based on the mode + !------------------------------------------------- + + call t_startf('drof_datamode') + select case (trim(datamode)) + + case('COPYALL') + ! do nothing extra + + end select + call t_stopf('drof_datamode') + !-------------------- ! Write restart !-------------------- diff --git a/src/components/data_comps/drof/drof_shr_mod.F90 b/src/components/data_comps/drof/drof_shr_mod.F90 index f574cbadde9..b519984e6bd 100644 --- a/src/components/data_comps/drof/drof_shr_mod.F90 +++ b/src/components/data_comps/drof/drof_shr_mod.F90 @@ -32,7 +32,7 @@ module drof_shr_mod ! variables obtained from namelist read character(CL) , public :: rest_file ! restart filename character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: rof_mode ! mode + character(CL) , public :: datamode ! mode character(len=*), public, parameter :: nullstr = 'undefined' !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONTAINS @@ -122,17 +122,17 @@ subroutine drof_shr_read_namelists(mpicom, my_task, master_task, & call shr_strdata_readnml(SDROF,trim(filename),mpicom=mpicom) - rof_mode = trim(SDROF%dataMode) + datamode = trim(SDROF%dataMode) ! Validate mode - if (trim(rof_mode) == 'NULL' .or. & - trim(rof_mode) == 'COPYALL') then + if (trim(datamode) == 'NULL' .or. & + trim(datamode) == 'COPYALL') then if (my_task == master_task) then - write(logunit,F00) 'rof mode = ',trim(rof_mode) + write(logunit,F00) 'drof datamode = ',trim(datamode) end if else - write(logunit,F00) ' ERROR illegal rof mode = ',trim(rof_mode) + write(logunit,F00) ' ERROR illegal drof datamode = ',trim(datamode) call shr_sys_abort() end if @@ -148,7 +148,7 @@ subroutine drof_shr_read_namelists(mpicom, my_task, master_task, & rof_present = .true. rof_prognostic = .true. endif - if (trim(rof_mode) /= 'NULL') then + if (trim(datamode) /= 'NULL') then rof_present = .true. rofice_present = .true. end if diff --git a/src/components/data_comps/dwav/cime_config/buildnml b/src/components/data_comps/dwav/cime_config/buildnml index 3aeffe5afbb..53f40c816d7 100755 --- a/src/components/data_comps/dwav/cime_config/buildnml +++ b/src/components/data_comps/dwav/cime_config/buildnml @@ -53,8 +53,8 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): #---------------------------------------------------- # Log some settings. #---------------------------------------------------- - logger.debug("DWAV mode is %s", dwav_mode) - logger.debug("DWAV grid is %s", wav_grid) + logger.debug("DWAV mode is {}".format(dwav_mode)) + logger.debug("DWAV grid is {}".format(wav_grid)) #---------------------------------------------------- # Clear out old data. @@ -91,7 +91,7 @@ def _create_namelists(case, confdir, inst_string, infile, nmlgen): continue inst_stream = stream + inst_string - logger.debug("DWAV stream is %s", inst_stream) + logger.debug("DWAV stream is {}".format(inst_stream)) stream_path = os.path.join(confdir, "dwav.streams.txt." + inst_stream) user_stream_path = os.path.join(case.get_case_root(), "user_dwav.streams.txt." + inst_stream) @@ -144,7 +144,7 @@ def buildnml(case, caseroot, compname): # determine directory for user modified namelist_definitions.xml user_xml_dir = os.path.join(caseroot, "SourceMods", "src." + compname) expect (os.path.isdir(user_xml_dir), - "user_xml_dir %s does not exist " %user_xml_dir) + "user_xml_dir {} does not exist ".format(user_xml_dir)) # NOTE: User definition *replaces* existing definition. files = Files() @@ -154,7 +154,7 @@ def buildnml(case, caseroot, compname): if os.path.isfile(user_definition): definition_file = [user_definition] for file_ in definition_file: - expect(os.path.isfile(file_), "Namelist XML file %s not found!" % file_) + expect(os.path.isfile(file_), "Namelist XML file {} not found!".format(file_)) # Create the namelist generator object - independent of instance nmlgen = NamelistGenerator(case, definition_file) @@ -166,7 +166,7 @@ def buildnml(case, caseroot, compname): # determine instance string inst_string = "" if ninst > 1: - inst_string = '_' + '%04d' % inst_counter + inst_string = '_' + "{:04d}".format(inst_counter) # If multi-instance case does not have restart file, use # single-case restart for each instance @@ -183,7 +183,7 @@ def buildnml(case, caseroot, compname): # create namelist output infile using user_nl_file as input user_nl_file = os.path.join(caseroot, "user_nl_" + compname + inst_string) expect(os.path.isfile(user_nl_file), - "Missing required user_nl_file %s " %(user_nl_file)) + "Missing required user_nl_file {} ".format(user_nl_file)) infile = os.path.join(confdir, "namelist_infile") create_namelist_infile(case, user_nl_file, infile) namelist_infile = [infile] diff --git a/src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml b/src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml index 745ebb789f8..6dc458ad941 100644 --- a/src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml +++ b/src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml @@ -1,3 +1,4 @@ + @@ -176,22 +177,22 @@ char streams shr_strdata_nml - null,copyall + NULL,COPYALL The wave data is associated with the wave model and is normally on a different grid than the ocean data. - datamode = "null" - null is always a valid option and means no data will be generated. + datamode = "NULL" + Is always a valid option and means no data will be generated. Turns off the data model as a provider of data to the coupler. - datamode = "copyall" + datamode = "COPYALL" Copies all fields directly from the input data streams Any required fields not found on an input stream will be set to zero. Set by the following xml variables in env_run.xml DWAV_MODE specifies values for wav mode: copyall,null - copyall - null + COPYALL + NULL diff --git a/src/components/data_comps/dwav/dwav_comp_mod.F90 b/src/components/data_comps/dwav/dwav_comp_mod.F90 index 2006f1ada40..5999cae4ffc 100644 --- a/src/components/data_comps/dwav/dwav_comp_mod.F90 +++ b/src/components/data_comps/dwav/dwav_comp_mod.F90 @@ -4,7 +4,7 @@ module dwav_comp_mod ! !USES: - + use esmf use mct_mod use perf_mod @@ -20,7 +20,7 @@ module dwav_comp_mod use shr_dmodel_mod , only: shr_dmodel_translate_list, shr_dmodel_translateAV_list, shr_dmodel_translateAV use seq_timemgr_mod , only: seq_timemgr_EClockGetData, seq_timemgr_RestartAlarmIsOn - use dwav_shr_mod , only: wav_mode ! namelist input + use dwav_shr_mod , only: datamode ! namelist input use dwav_shr_mod , only: decomp ! namelist input use dwav_shr_mod , only: rest_file ! namelist input use dwav_shr_mod , only: rest_file_strm ! namelist input @@ -71,7 +71,7 @@ subroutine dwav_comp_init(Eclock, x2w, w2x, & ! !INPUT/OUTPUT PARAMETERS: type(ESMF_Clock) , intent(in) :: EClock type(mct_aVect) , intent(inout) :: x2w, w2x ! input/output attribute vectors - type(shr_strdata_type) , intent(inout) :: SDWAV ! model + type(shr_strdata_type) , intent(inout) :: SDWAV ! model type(mct_gsMap) , pointer :: gsMap ! model global seg map (output) type(mct_gGrid) , pointer :: ggrid ! model ggrid (output) integer(IN) , intent(in) :: mpicom ! mpi communicator @@ -146,12 +146,12 @@ subroutine dwav_comp_init(Eclock, x2w, w2x, & call shr_sys_flush(logunit) ! create a data model global seqmap (gsmap) given the data model global grid sizes - ! NOTE: gsmap is initialized using the decomp read in from the docn_in namelist + ! NOTE: gsmap is initialized using the decomp read in from the docn_in namelist ! (which by default is "1d") call shr_dmodel_gsmapcreate(gsmap,SDWAV%nxg*SDWAV%nyg,compid,mpicom,decomp) lsize = mct_gsmap_lsize(gsmap,mpicom) - ! create a rearranger from the data model SDOCN%gsmap to gsmap + ! create a rearranger from the data model SDOCN%gsmap to gsmap call mct_rearr_init(SDWAV%gsmap,gsmap,mpicom,rearr) write(logunit,*)'lsize= ',lsize @@ -261,7 +261,7 @@ subroutine dwav_comp_run(EClock, x2w, w2x, & ! !INPUT/OUTPUT PARAMETERS: type(ESMF_Clock) , intent(in) :: EClock type(mct_aVect) , intent(inout) :: x2w - type(mct_aVect) , intent(inout) :: w2x + type(mct_aVect) , intent(inout) :: w2x type(shr_strdata_type) , intent(inout) :: SDWAV type(mct_gsMap) , pointer :: gsMap type(mct_gGrid) , pointer :: ggrid @@ -326,8 +326,19 @@ subroutine dwav_comp_run(EClock, x2w, w2x, & enddo call t_stopf('dwav_scatter') - call t_startf('dwav_mode') - call t_stopf('dwav_mode') + !------------------------------------------------- + ! Determine data model behavior based on the mode + !------------------------------------------------- + + call t_startf('datamode') + select case (trim(datamode)) + + case('COPYALL') + ! do nothing extra + + end select + + call t_stopf('datamode') !-------------------- ! Write restart @@ -393,9 +404,9 @@ subroutine dwav_comp_final(my_task, master_task, logunit) call t_startf('DWAV_FINAL') if (my_task == master_task) then - write(logunit,F91) + write(logunit,F91) write(logunit,F00) trim(myModelName),': end of main integration loop' - write(logunit,F91) + write(logunit,F91) end if call t_stopf('DWAV_FINAL') diff --git a/src/components/data_comps/dwav/dwav_shr_mod.F90 b/src/components/data_comps/dwav/dwav_shr_mod.F90 index f8dd6265857..8749f9216e5 100644 --- a/src/components/data_comps/dwav/dwav_shr_mod.F90 +++ b/src/components/data_comps/dwav/dwav_shr_mod.F90 @@ -1,13 +1,13 @@ module dwav_shr_mod ! !USES: - - use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8 + + use shr_kind_mod , only : IN=>SHR_KIND_IN, R8=>SHR_KIND_R8 use shr_kind_mod , only : CS=>SHR_KIND_CS, CL=>SHR_KIND_CL use shr_file_mod , only : shr_file_getunit, shr_file_freeunit use shr_sys_mod , only : shr_sys_flush, shr_sys_abort use shr_strdata_mod, only : shr_strdata_type, shr_strdata_readnml - use shr_mpi_mod , only : shr_mpi_bcast + use shr_mpi_mod , only : shr_mpi_bcast ! !PUBLIC TYPES: implicit none @@ -32,7 +32,7 @@ module dwav_shr_mod ! variables obtained from namelist read character(CL) , public :: rest_file ! restart filename character(CL) , public :: rest_file_strm ! restart filename for streams - character(CL) , public :: wav_mode ! mode + character(CL) , public :: datamode ! mode character(len=*), public, parameter :: nullstr = 'undefined' !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONTAINS @@ -121,18 +121,18 @@ subroutine dwav_shr_read_namelists(mpicom, my_task, master_task, & call shr_strdata_readnml(SDWAV,trim(filename),mpicom=mpicom) !---------------------------------------------------------------------------- - ! Determine and validate wav_mode + ! Determine and validate datamode !---------------------------------------------------------------------------- - wav_mode = trim(SDWAV%dataMode) + datamode = trim(SDWAV%dataMode) - if (trim(wav_mode) == 'null' .or. & - trim(wav_mode) == 'copyall') then + if (trim(datamode) == 'NULL' .or. & + trim(datamode) == 'COPYALL') then if (my_task == master_task) then - write(logunit,F00) 'wav mode = ',trim(wav_mode) + write(logunit,F00) 'dwav datamode = ',trim(datamode) end if else - write(logunit,F00) ' ERROR illegal wav mode = ',trim(wav_mode) + write(logunit,F00) ' ERROR illegal dwav datamode = ',trim(datamode) call shr_sys_abort() end if @@ -146,7 +146,7 @@ subroutine dwav_shr_read_namelists(mpicom, my_task, master_task, & wav_present = .true. wav_prognostic = .true. endif - if (trim(wav_mode) /= 'NULL') then + if (trim(datamode) /= 'NULL') then wav_present = .true. end if diff --git a/src/drivers/mct/cime_config/buildnml b/src/drivers/mct/cime_config/buildnml index 5d6d57ee7a2..970efa691c0 100755 --- a/src/drivers/mct/cime_config/buildnml +++ b/src/drivers/mct/cime_config/buildnml @@ -7,7 +7,7 @@ # Disable these because this is our standard setup # pylint: disable=wildcard-import,unused-wildcard-import,wrong-import-position -import os, shutil, sys, glob, itertools +import os, shutil, sys, glob, itertools, re _CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..") sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) @@ -19,6 +19,7 @@ from CIME.utils import expect from CIME.utils import get_model, get_time_in_seconds, get_timestamp from CIME.buildnml import create_namelist_infile, parse_input from CIME.XML.files import Files +from CIME.XML.grids import Grids logger = logging.getLogger(__name__) @@ -81,30 +82,29 @@ def _create_drv_namelists(case, infile, confdir, nmlgen, files): if case.get_value('CALENDAR') == 'NO_LEAP': basedt = 3600 * 24 * 365 else: - expect(False, "Invalid CALENDAR for NCPL_BASE_PERIOD %s " %ncpl_base_period) + expect(False, "Invalid CALENDAR for NCPL_BASE_PERIOD {} ".format(ncpl_base_period)) elif ncpl_base_period == 'decade': if case.get_value('CALENDAR') == 'NO_LEAP': basedt = 3600 * 24 * 365 * 10 else: - expect(False, "invalid NCPL_BASE_PERIOD NCPL_BASE_PERIOD %s " %ncpl_base_period) + expect(False, "invalid NCPL_BASE_PERIOD NCPL_BASE_PERIOD {} ".format(ncpl_base_period)) else: - expect(False, "invalid NCPL_BASE_PERIOD NCPL_BASE_PERIOD %s " %ncpl_base_period) + expect(False, "invalid NCPL_BASE_PERIOD NCPL_BASE_PERIOD {} ".format(ncpl_base_period)) if basedt < 0: - expect(False, "basedt invalid overflow for NCPL_BASE_PERIOD %s " %ncpl_base_period) + expect(False, "basedt invalid overflow for NCPL_BASE_PERIOD {} ".format(ncpl_base_period)) comps = case.get_values("COMP_CLASSES") mindt = basedt for comp in comps: ncpl = case.get_value(comp.upper() + '_NCPL') if ncpl is not None: - cpl_dt = basedt / int(ncpl) + cpl_dt = int(basedt / int(ncpl)) totaldt = cpl_dt * int(ncpl) if totaldt != basedt: - expect(False, " %s ncpl doesn't divide base dt evenly" %comp) + expect(False, " {} ncpl doesn't divide base dt evenly".format(comp)) nmlgen.add_default(comp.lower() + '_cpl_dt', value=cpl_dt) mindt = min(mindt, cpl_dt) -# elif comp.lower() is not 'cpl': #-------------------------------- # Overwrite: set start_ymd @@ -155,7 +155,7 @@ def _create_drv_namelists(case, infile, confdir, nmlgen, files): comp_classes = case.get_values("COMP_CLASSES") for comp in pause_comps: expect(comp == 'drv' or comp.upper() in comp_classes, - "Invalid PAUSE_COMPONENT_LIST, %s is not a valid component type"%comp) + "Invalid PAUSE_COMPONENT_LIST, {} is not a valid component type".format(comp)) # End for # End if # Set esp interval @@ -170,28 +170,82 @@ def _create_drv_namelists(case, infile, confdir, nmlgen, files): #-------------------------------- # (1) Write output namelist file drv_in and input dataset list. #-------------------------------- + write_drv_in_file(case, nmlgen, confdir) + + #-------------------------------- + # (2) Write out seq_map.rc file + #-------------------------------- + write_seq_maps_file(case, nmlgen, confdir) + + #-------------------------------- + # (3) Construct and write out drv_flds_in + #-------------------------------- + write_drv_flds_in_file(case, nmlgen, files) + +############################################################################### +def write_drv_in_file(case, nmlgen, confdir): +############################################################################### data_list_path = os.path.join(case.get_case_root(), "Buildconf", "cpl.input_data_list") if os.path.exists(data_list_path): os.remove(data_list_path) namelist_file = os.path.join(confdir, "drv_in") nmlgen.write_output_file(namelist_file, data_list_path ) - #-------------------------------- - # (2) Write out seq_map.rc file - #-------------------------------- +############################################################################### +def write_seq_maps_file(case, nmlgen, confdir): +############################################################################### + # first determine if there are invalid idmap settings + # if source and destination grid are different, mapping file must not be "idmap" + gridvalue = {} + ignore_component = {} + exclude_list = ["CPL","ESP"] + for comp_class in case.get_values("COMP_CLASSES"): + if comp_class not in exclude_list: + gridvalue[comp_class.lower()] = case.get_value(comp_class + "_GRID" ) + if case.get_value(comp_class + "_GRID" ) == 'null': + ignore_component[comp_class.lower()] = True + else: + ignore_component[comp_class.lower()] = False + + # Currently, hard-wire values of mapping file names to ignore + # TODO: for rof2ocn_fmapname -needs to be resolved since this is currently + # used in prep_ocn_mod.F90 if flood_present is True - this is in issue #1908. + # The following is only approriate for config_grids.xml version 2.0 or later + grid_version = Grids().get_version() + if grid_version >= 2.0: + ignore_idmaps = ["rof2ocn_fmapname", "glc2ice_rmapname", "glc2ocn_rmapname"] + group_variables = nmlgen.get_group_variables("seq_maps") + for name in group_variables: + value = group_variables[name] + if "mapname" in name: + value = re.sub('\"', '', value) + if 'idmap' == value: + component1 = name[0:3] + component2 = name[4:7] + if not ignore_component[component1] and not ignore_component[component2]: + if name in ignore_idmaps: + logger.warning(" NOTE: ignoring setting of {}=idmap in seq_maps.rc".format(name)) + else: + if "rof2ocn_" in name: + if case.get_value("COMP_OCN") == 'docn': + logger.warning(" NOTE: ignoring setting of {}=idmap in seq_maps.rc".format(name)) + else: + expect(gridvalue[component1] == gridvalue[component2], + "Need to provide valid mapping file between {} and {} in xml variable {} ".\ + format(component1, component2, name)) + + # now write out the file seq_maps_file = os.path.join(confdir, "seq_maps.rc") nmlgen.write_seq_maps(seq_maps_file) - #-------------------------------- - # (3) Construct and write out drv_flds_in - #-------------------------------- +############################################################################### +def write_drv_flds_in_file(case, nmlgen, files): +############################################################################### # In thte following, all values come simply from the infiles - no default values need to be added # FIXME - do want to add the possibility that will use a user definition file for drv_flds_in caseroot = case.get_value('CASEROOT') - namelist_file = os.path.join(confdir, "drv_flds_in") - nmlgen.add_default('drv_flds_in_files') drvflds_files = nmlgen.get_default('drv_flds_in_files') infiles = [] @@ -201,7 +255,6 @@ def _create_drv_namelists(case, infile, confdir, nmlgen, files): infiles.append(infile) if len(infiles) != 0: - # First read the drv_flds_in files and make sure that # for any key there are not two conflicting values dicts = {} @@ -235,8 +288,7 @@ def compare_drv_flds_in(first, second, infile1, infile2): for key in sharedKeys: if first[key] != second[key]: print('Key: {}, \n Value 1: {}, \n Value 2: {}'.format(key, first[key], second[key])) - expect(False, "incompatible settings in drv_flds_in from \n %s \n and \n %s" - % (infile1, infile2)) + expect(False, "incompatible settings in drv_flds_in from \n {} \n and \n {}".format(infile1, infile2)) ############################################################################### def _create_component_modelio_namelists(case, files): @@ -248,14 +300,14 @@ def _create_component_modelio_namelists(case, files): confdir = os.path.join(case.get_value("CASEBUILD"), "cplconf") lid = os.environ["LID"] if "LID" in os.environ else get_timestamp("%y%m%d-%H%M%S") - models = case.get_values("COMP_CLASSES") + #if we are in multi-coupler mode the number of instances of cpl will be the max # of any NINST_* value maxinst = 1 if case.get_value("MULTI_DRIVER"): maxinst = case.get_value("NINST_MAX") - for model in models: + for model in case.get_values("COMP_CLASSES"): model = model.lower() with NamelistGenerator(case, definition_file) as nmlgen: config = {} @@ -308,7 +360,7 @@ def buildnml(case, caseroot, component): user_xml_dir = os.path.join(caseroot, "SourceMods", "src.drv") expect (os.path.isdir(user_xml_dir), - "user_xml_dir %s does not exist " %user_xml_dir) + "user_xml_dir {} does not exist ".format(user_xml_dir)) files = Files() definition_file = [files.get_value("NAMELIST_DEFINITION_FILE", {"component": "drv"})] diff --git a/src/drivers/mct/cime_config/config_component.xml b/src/drivers/mct/cime_config/config_component.xml index 7ad0589011d..0aa35b2968a 100644 --- a/src/drivers/mct/cime_config/config_component.xml +++ b/src/drivers/mct/cime_config/config_component.xml @@ -976,7 +976,7 @@ 0 mach_pes_last env_mach_pes.xml - pes or cores used relative to PES_PER_NODE for accounting (0 means TOTALPES is valid) + pes or cores used relative to MAX_MPITASKS_PER_NODE for accounting (0 means TOTALPES is valid) @@ -1993,33 +1993,50 @@ integer - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 mach_pes env_mach_pes.xml number of tasks for each component + + integer + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + mach_pes + env_mach_pes.xml + Number of tasks per instance for each component. DO NOT EDIT: Set automatically by case.setup based on NTASKS, NINST and MULTI_DRIVER + + integer - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 mach_pes env_mach_pes.xml @@ -2029,15 +2046,15 @@ integer - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 - 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 mach_pes env_mach_pes.xml @@ -2059,14 +2076,14 @@ integer - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 mach_pes env_mach_pes.xml @@ -2079,14 +2096,14 @@ char sequential,concurrent - concurrent - concurrent - concurrent - concurrent - concurrent - concurrent - concurrent - concurrent + concurrent + concurrent + concurrent + concurrent + concurrent + concurrent + concurrent + concurrent mach_pes env_mach_pes.xml @@ -2096,15 +2113,15 @@ integer - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 mach_pes env_mach_pes.xml @@ -2127,7 +2144,7 @@ maximum number of tasks/ threads allowed per node - + integer 0 mach_pes_last @@ -2271,15 +2288,15 @@ env_run.xml pio io type - default - default - default - default - default - default - default - default - default + default + default + default + default + default + default + default + default + default @@ -2292,15 +2309,15 @@ https://www.unidata.ucar.edu/software/netcdf/docs/data_type.html - 64bit_offset - 64bit_offset - 64bit_offset - 64bit_offset - 64bit_offset - 64bit_offset - 64bit_offset - 64bit_offset - 64bit_offset + 64bit_offset + 64bit_offset + 64bit_offset + 64bit_offset + 64bit_offset + 64bit_offset + 64bit_offset + 64bit_offset + 64bit_offset @@ -2313,15 +2330,15 @@ be computed based on PIO_NUMTASKS and number of compute tasks - - - - - - - - - + + + + + + + + + @@ -2333,15 +2350,15 @@ pio rearranger choice box=1, subset=2 $PIO_VERSION - - - - - - - - - + + + + + + + + + @@ -2351,15 +2368,15 @@ env_run.xml pio root processor relative to component root - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 @@ -2372,15 +2389,15 @@ number of tasks - -99 - -99 - -99 - -99 - -99 - -99 - -99 - -99 - -99 + -99 + -99 + -99 + -99 + -99 + -99 + -99 + -99 + -99 @@ -2667,6 +2684,20 @@ + + char + run_desc + env_run.xml + Store user override for queue + + + + char + run_desc + env_run.xml + Store user override for walltime + + char diff --git a/src/drivers/mct/cime_config/config_component_cesm.xml b/src/drivers/mct/cime_config/config_component_cesm.xml index c6a4e4f0a6f..7ebf568ebc9 100644 --- a/src/drivers/mct/cime_config/config_component_cesm.xml +++ b/src/drivers/mct/cime_config/config_component_cesm.xml @@ -188,8 +188,16 @@ 1 96 96 + 96 + 96 96 - 96 + 96 + 96 + 96 + 144 + 144 + 144 + 144 72 144 288 diff --git a/src/drivers/mct/main/cime_comp_mod.F90 b/src/drivers/mct/main/cime_comp_mod.F90 index c591090425a..bd2239fafa4 100644 --- a/src/drivers/mct/main/cime_comp_mod.F90 +++ b/src/drivers/mct/main/cime_comp_mod.F90 @@ -3740,10 +3740,9 @@ subroutine cime_run() !---------------------------------------------------------- if (esp_present .and. esprun_alarm) then ! Make sure that all couplers are here in multicoupler mode before running ESP component - if (num_inst_driver > 1) then - call mpi_barrier(global_comm, ierr) - endif - + if (num_inst_driver > 1) then + call mpi_barrier(global_comm, ierr) + endif call component_run(Eclock_e, esp, esp_run, infodata, & comp_prognostic=esp_prognostic, comp_num=comp_num_esp, & timer_barrier= 'CPL:ESP_RUN_BARRIER', timer_comp_run='CPL:ESP_RUN', & diff --git a/src/externals/mct/mpi-serial/Makefile b/src/externals/mct/mpi-serial/Makefile index 7122f7423f4..0b1ca1db6c2 100644 --- a/src/externals/mct/mpi-serial/Makefile +++ b/src/externals/mct/mpi-serial/Makefile @@ -88,3 +88,6 @@ install: lib $(INSTALL) lib$(MODULE).a -m 644 $(libdir) $(INSTALL) mpi.h -m 644 $(includedir) $(INSTALL) mpif.h -m 644 $(includedir) + + + diff --git a/src/externals/mct/mpi-serial/mpi.c b/src/externals/mct/mpi-serial/mpi.c index d6f58adbce1..0353f477f81 100644 --- a/src/externals/mct/mpi-serial/mpi.c +++ b/src/externals/mct/mpi-serial/mpi.c @@ -323,7 +323,21 @@ int MPI_Get_library_version(char *version, int *resultlen) return(MPI_SUCCESS); } +/**********/ +void FC_FUNC( mpi_get_version, MPI_GET_VERSION )(int *mpi_vers, int *mpi_subvers, int *ierror) +{ + MPI_Get_Version(mpi_vers, mpi_subvers); + + *ierror=MPI_SUCCESS; +} + +int MPI_Get_Version(int *mpi_vers, int *mpi_subvers) +{ + *mpi_vers = 1; + *mpi_subvers = 0; + return (MPI_SUCCESS); +} /**********/ diff --git a/src/externals/mct/mpi-serial/mpif.h b/src/externals/mct/mpi-serial/mpif.h index b4537b5d4a2..678ad9e9fdd 100644 --- a/src/externals/mct/mpi-serial/mpif.h +++ b/src/externals/mct/mpi-serial/mpif.h @@ -325,3 +325,11 @@ parameter (MPI_BOTTOM=0) INTEGER MPI_MAX_LIBRARY_VERSION_STRING PARAMETER (MPI_MAX_LIBRARY_VERSION_STRING=80) + + ! + ! MPI Version + ! + INTEGER MPI_VERSION + PARAMETER (MPI_VERSION=1) + INTEGER MPI_SUBVERSION + PARAMETER (MPI_SUBVERSION=0) diff --git a/src/externals/mct/mpi-serial/tests/ftest.F90 b/src/externals/mct/mpi-serial/tests/ftest.F90 index b292b8b73cd..ef8681a35d1 100644 --- a/src/externals/mct/mpi-serial/tests/ftest.F90 +++ b/src/externals/mct/mpi-serial/tests/ftest.F90 @@ -7,8 +7,8 @@ program test implicit none integer ierr integer ec - character*(MPI_MAX_LIBRARY_VERSION_STRING) version - integer vlen + character*(MPI_MAX_LIBRARY_VERSION_STRING) version + integer vlen ec = 0 #ifdef TEST_INTERNAL @@ -17,8 +17,8 @@ program test call mpi_init(ierr) - call MPI_GET_LIBRARY_VERSION(version,vlen,ierr) - print *,"MPI Version '",version,"' len=",vlen + call MPI_GET_LIBRARY_VERSION(version,vlen,ierr) + print *,"MPI Version '",version,"' len=",vlen call test_contiguous(ec) call test_vector(ec) @@ -31,6 +31,7 @@ program test call test_multiple(ec) call test_multiple_indexed(ec) call test_collectives(ec) + call test_mpi_version(ec) call mpi_finalize(ierr) if (ec .eq. 0) then @@ -678,3 +679,31 @@ subroutine test_collectives(ec) end do end subroutine +!!!!!!!!!!!!!!!!!!!!!!!! +! Test MPI_VERSION +!!!!!!!!!!!!!!!!!!!!!!!! + + subroutine test_mpi_version(ec) + use mpi + integer ec + integer ierr + integer mpiv + integer mpisv + + print *, "Testing MPI_Get_Version" + + call mpi_get_version(mpiv, mpisv, ierr) + if (ierr /= MPI_SUCCESS) then + print *, "MPI_get_VERSION ierr not zero (",ierr,")" + ec = ec + 1 + else + if (mpiv /= MPI_VERSION) then + print *, "MPI_VERSION mismatch, should be ",MPI_VERSION,", found ",mpiv + ec = ec + 1 + end if + if (mpisv /= MPI_SUBVERSION) then + print *, "MPI_SUBVERSION mismatch, should be ",MPI_SUBVERSION,", found ",mpisv + ec = ec + 1 + end if + end if + end subroutine test_mpi_version diff --git a/src/externals/mct/mpi-serial/tests/ftest_old.F90 b/src/externals/mct/mpi-serial/tests/ftest_old.F90 index 1a35d2ef3ad..938d4472a94 100644 --- a/src/externals/mct/mpi-serial/tests/ftest_old.F90 +++ b/src/externals/mct/mpi-serial/tests/ftest_old.F90 @@ -1,163 +1,165 @@ -program test - implicit none - include "mpif.h" - integer ier + program test + implicit none + include "mpif.h" - integer sreq(10), sreq2(10), rreq(10), rreq2(10) - integer sbuf(10), sbuf2(10), rbuf(10), rbuf2(10) - integer tag - integer status(MPI_STATUS_SIZE,10) - integer i - integer comm2; - logical flag; - character pname(MPI_MAX_PROCESSOR_NAME) - integer pnamesize + integer ier - integer temp,position - integer errcount + integer sreq(10), sreq2(10), rreq(10), rreq2(10) + integer sbuf(10), sbuf2(10), rbuf(10), rbuf2(10) + integer tag + integer status(MPI_STATUS_SIZE,10) + integer i + integer comm2; + logical flag; + character pname(MPI_MAX_PROCESSOR_NAME) + integer pnamesize - errcount = 0 + integer temp,position + integer errcount - print *, 'Time=',mpi_wtime() + errcount = 0 - call mpi_initialized(flag,ier) - print *, 'MPI is initialized=',flag + print *, 'Time=',mpi_wtime() - call mpi_init(ier) + call mpi_initialized(flag,ier) + print *, 'MPI is initialized=',flag - call mpi_get_processor_name(pname,pnamesize,ier) - print *, 'proc name: "',pname(1:pnamesize),'" size:',pnamesize + call mpi_init(ier) + call mpi_get_processor_name(pname,pnamesize,ier) + print *, 'proc name: "',pname(1:pnamesize),'" size:',pnamesize - call mpi_comm_dup(MPI_COMM_WORLD,comm2,ier) - call mpi_initialized(flag,ier) - print *, 'MPI is initialized=',flag + call mpi_comm_dup(MPI_COMM_WORLD,comm2,ier) + call mpi_initialized(flag,ier) + print *, 'MPI is initialized=',flag - do i=1,5 - tag= 100+i - print *, 'Post receive tag ',tag - call mpi_irecv( rbuf(i),1,MPI_INTEGER,0,tag, & - MPI_COMM_WORLD,rreq(i),ier) + do i=1,5 + tag= 100+i + print *, 'Post receive tag ',tag - end do - do i=1,5 - ! tag=1100+i - ! print *, 'Post receive tag ',tag + call mpi_irecv( rbuf(i),1,MPI_INTEGER,0,tag, & + MPI_COMM_WORLD,rreq(i),ier) - call mpi_irecv( rbuf2(i),1,MPI_INTEGER, & - MPI_ANY_SOURCE, MPI_ANY_TAG, & - comm2,rreq2(i),ier) + end do + do i=1,5 +! tag=1100+i +! print *, 'Post receive tag ',tag - end do + call mpi_irecv( rbuf2(i),1,MPI_INTEGER, & + MPI_ANY_SOURCE, MPI_ANY_TAG, & + comm2,rreq2(i),ier) + end do - do i=1,5 - sbuf(i)=10*i - tag=100+i - print *, 'Send ',sbuf(i),' tag ',tag - call mpi_isend( sbuf(i),1,MPI_INTEGER,0,tag, & - MPI_COMM_WORLD,sreq(i),ier) - end do + do i=1,5 + sbuf(i)=10*i + tag=100+i + print *, 'Send ',sbuf(i),' tag ',tag + call mpi_isend( sbuf(i),1,MPI_INTEGER,0,tag, & + MPI_COMM_WORLD,sreq(i),ier) + end do - do i=1,5 - sbuf2(i)=1000+10*i - tag=1100+i - print *, 'Send ',sbuf2(i),' tag ',tag - call mpi_isend( sbuf2(i),1,MPI_INTEGER,0,tag, & - comm2,sreq2(i),ier) - end do + do i=1,5 + sbuf2(i)=1000+10*i + tag=1100+i + print *, 'Send ',sbuf2(i),' tag ',tag - do i=1,5 - if (sbuf(i) .ne. rbuf(i)) then - errcount = errcount+1 - print *, 'error on Send2' - print *, 'found ',sbuf2(i),' should be ',rbuf2(i) - end if - end do + call mpi_isend( sbuf2(i),1,MPI_INTEGER,0,tag, & + comm2,sreq2(i),ier) + end do - do i=1,5 - if (sbuf2(i) .ne. rbuf2(i)) then - errcount = errcount+1 - print *, 'error on Send2' - print *, 'found ',sbuf2(i),' should be ',rbuf2(i) - end if - end do + do i=1,5 + if (sbuf(i) .ne. rbuf(i)) then + errcount = errcount+1 + print *, 'error on Send2' + print *, 'found ',sbuf2(i),' should be ',rbuf2(i) + end if + end do - print *, 'Time=',mpi_wtime() - call mpi_waitall(5,sreq,status,ier) - print *,'sends on MPI_COMM_WORLD done' + do i=1,5 + if (sbuf2(i) .ne. rbuf2(i)) then + errcount = errcount+1 + print *, 'error on Send2' + print *, 'found ',sbuf2(i),' should be ',rbuf2(i) + end if + end do - call mpi_waitall(5,rreq,status,ier) - print *,'recvs on MPI_COMM_WORLD done' + print *, 'Time=',mpi_wtime() + call mpi_waitall(5,sreq,status,ier) + print *,'sends on MPI_COMM_WORLD done' - do i=1,5 - print *, 'Status source=',status(MPI_SOURCE,i), & - ' tag=',status(MPI_TAG,i) - end do + call mpi_waitall(5,rreq,status,ier) + print *,'recvs on MPI_COMM_WORLD done' + + do i=1,5 + print *, 'Status source=',status(MPI_SOURCE,i), & + ' tag=',status(MPI_TAG,i) + end do - call mpi_waitall(5,sreq2,status,ier) - print *,'sends on comm2 done' + call mpi_waitall(5,sreq2,status,ier) + print *,'sends on comm2 done' - call mpi_waitall(5,rreq2,status,ier) - print *,'recvs on comm2 done' + call mpi_waitall(5,rreq2,status,ier) + print *,'recvs on comm2 done' - do i=1,5 - print *, 'Status source=',status(MPI_SOURCE,i), & - ' tag=',status(MPI_TAG,i) - end do + do i=1,5 + print *, 'Status source=',status(MPI_SOURCE,i), & + ' tag=',status(MPI_TAG,i) + end do - ! pack/unpack +! pack/unpack - position=0 - do i=1,5 - temp=100+i - call mpi_pack(temp,1,MPI_INTEGER,sbuf,20,position,MPI_COMM_WORLD,ier) - end do + position=0 + do i=1,5 + temp=100+i + call mpi_pack(temp,1,MPI_INTEGER,sbuf,20,position,MPI_COMM_WORLD,ier) + end do - call mpi_isend(sbuf,position,MPI_PACKED,0,0,MPI_COMM_WORLD,sreq(1),ier) - call mpi_irecv(rbuf,position,MPI_PACKED,0,0,MPI_COMM_WORLD,rreq(1),ier) - call mpi_waitall(1,rreq,status,ier) + call mpi_isend(sbuf,position,MPI_PACKED,0,0,MPI_COMM_WORLD,sreq(1),ier) + call mpi_irecv(rbuf,position,MPI_PACKED,0,0,MPI_COMM_WORLD,rreq(1),ier) + call mpi_waitall(1,rreq,status,ier) - print *,"Pack/send/unpack:" + print *,"Pack/send/unpack:" - position=0 - do i=1,5 - call mpi_unpack( rbuf,20,position,temp,1,MPI_INTEGER, & - MPI_COMM_WORLD) - print *,temp - end do + position=0 + do i=1,5 + call mpi_unpack( rbuf,20,position,temp,1,MPI_INTEGER, & + MPI_COMM_WORLD) + print *,temp + end do + + do i=1,5 + if (rbuf(i) .ne. sbuf(i)) then + errcount = errcount + 1 + print *,"Error for pack/send/unpack" + print *,"found ",rbuf(i)," should be ",sbuf(i) + end if + end do +! + + + call mpi_finalize(ier) + + do i=1,5 + print *, 'Time=',mpi_wtime() + call sleep(1) + end do + + if (errcount .gt. 0) then + print *,errcount," errors" + else + print *,"No errors" + end if + + end - do i=1,5 - if (rbuf(i) .ne. sbuf(i)) then - errcount = errcount + 1 - print *,"Error for pack/send/unpack" - print *,"found ",rbuf(i)," should be ",sbuf(i) - end if - end do - ! - - - call mpi_finalize(ier) - - do i=1,5 - print *, 'Time=',mpi_wtime() - call sleep(1) - end do - - if (errcount .gt. 0) then - print *,errcount," errors" - else - print *,"No errors" - end if - -end program test diff --git a/src/externals/mct/mpi-serial/type.c b/src/externals/mct/mpi-serial/type.c index ac3b8400e63..8dd93f27414 100644 --- a/src/externals/mct/mpi-serial/type.c +++ b/src/externals/mct/mpi-serial/type.c @@ -843,3 +843,4 @@ int Pprint_typemap(Datatype type) return MPI_SUCCESS; } #endif //TEST_INTERNAL + diff --git a/src/externals/pio1/pio/CMakeLists.txt b/src/externals/pio1/pio/CMakeLists.txt index d2c162bb71d..facdbaceb25 100644 --- a/src/externals/pio1/pio/CMakeLists.txt +++ b/src/externals/pio1/pio/CMakeLists.txt @@ -175,12 +175,8 @@ if("${CMAKE_Fortran_COMPILER_ID}" STREQUAL "GNU") endif() ADD_LIBRARY(pio ${SRCS_F90} ${SRCS_C} ${TEMPSRCF90}) -if(PNETCDF_LIBRARY) - TARGET_LINK_LIBRARIES(pio ${PNETCDF_LIBRARIES}) -ENDIF() -if(NETCDF_LIBRARIES) - TARGET_LINK_LIBRARIES(pio ${NETCDF_LIBRARIES}) -ENDIF() +TARGET_LINK_LIBRARIES(pio ${PnetCDF_Fortran_LIBRARIES}) +TARGET_LINK_LIBRARIES(pio ${NetCDF_Fortran_LIBRARIES}) TARGET_LINK_LIBRARIES(pio ${ADDITIONAL_LIBS}) if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/../unittests) @@ -189,3 +185,5 @@ endif() if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/../testpio) ADD_SUBDIRECTORY(../testpio testpio) endif() + + diff --git a/src/externals/pio1/pio/pionfput_mod.F90.in b/src/externals/pio1/pio/pionfput_mod.F90.in index 48cd56d0df6..02e87db6000 100644 --- a/src/externals/pio1/pio/pionfput_mod.F90.in +++ b/src/externals/pio1/pio/pionfput_mod.F90.in @@ -696,11 +696,9 @@ contains #endif #endif #ifdef _NETCDF -#ifdef _NETCDF4 case(pio_iotype_netcdf4p) ierr=nf90_var_par_access(File%fh, varid, NF90_COLLECTIVE) ierr = nf90_put_var(File%fh, varid, ival, start=int(pstart), count=int(pcount)) -#endif case(pio_iotype_netcdf, pio_iotype_netcdf4c) ! Only io proc 0 will do writing if (Ios%io_rank == 0) then diff --git a/src/externals/pio1/tests/testpio/perl5lib/ChangeLog b/src/externals/pio1/tests/testpio/perl5lib/ChangeLog index a5bbed9f87b..d5bfab83683 100644 --- a/src/externals/pio1/tests/testpio/perl5lib/ChangeLog +++ b/src/externals/pio1/tests/testpio/perl5lib/ChangeLog @@ -6,7 +6,7 @@ Originator(s): erik Date: Sat Jun 13, 2009 One-line Summary: Add %ymd indicator for streams so can do year-month-days -M Streams/Template.pm ---- Add ability to write out %ymd year-month-day +M Streams/Template.pm ---- Add ability to write out %ymd year-month-day for filenames in streams. It assumes a noleap calendar -- could easily be extended to make Gregorian optional. @@ -14,7 +14,7 @@ M t/01.t ---- Change formatting of successful test M t/02.t ---- Add more tests for %ymd, and offset M t/03.t ---- Change formatting of successful test M t/04.t ---- Change formatting of successful test -M t/datm.streams.txt ---------- Add another year and the last-month +M t/datm.streams.txt ---------- Add another year and the last-month to start for testing A t/datm.ymd.streams.txt ------ Add streams test file with %ymd M t/datm.template.streams.xml - Add CPLHIST test section with %ymd @@ -27,7 +27,7 @@ Date: Tue Jun 9, 2009 One-line Summary: add offset support for streams template M Streams/Template.pm - + ============================================================== Tag name: perl5lib_090424 Originator(s): erik @@ -79,7 +79,7 @@ Build/Namelist.pm . Change validate_variable_value() from an object method to a class method, and remove the unused argument. . add fix to _split_namelist_value method to replace embedded newlines by - spaces. + spaces. Build/NamelistDefaults.pm . make the method interfaces case insensitive by converting all variable @@ -146,7 +146,7 @@ Originator(s): erik (KLUZEK ERIK 1326 CGD) Date: Mon Aug 11 10:44:52 MDT 2008 One-line Summary: Turn off printing of file existance if NOT -verbose -M Streams/Template.pm ----------- Turn off printing of file +M Streams/Template.pm ----------- Turn off printing of file checking if NOT $printing; ============================================================== @@ -190,8 +190,8 @@ about needing to do validation as is done now. Change the validate methods a bit and make them more robust. M Build/Config.pm --------------- Add get_valid_values method and use it internally. -M Build/NamelistDefinition.pm --- Add namelist validate_variable_value to validate - method. Add option to return without quotes to +M Build/NamelistDefinition.pm --- Add namelist validate_variable_value to validate + method. Add option to return without quotes to get_valid_values method. M Build/Namelist.pm ------------- Make validate_variable_value more robust. diff --git a/src/externals/pio1/tests/testpio/perl5lib/XML/Changes b/src/externals/pio1/tests/testpio/perl5lib/XML/Changes index d7ad4ec1388..d0be5104f77 100644 --- a/src/externals/pio1/tests/testpio/perl5lib/XML/Changes +++ b/src/externals/pio1/tests/testpio/perl5lib/XML/Changes @@ -1,27 +1,27 @@ -Revision history for Perl extension XML::Lite. - -0.14 31 January 2003 - - Fixed a major bug in parsing empty elements - - Fixed some typos in documenation - - Fixed error in documentation of XML::Element::get_attributes interface -0.13 13 November 2001 - - Minor bug fixes? -0.12 15 November 2001 - - Fixed bugs in test that failed on CPAN Testers - - Fixed warnings in XML::Lite::Element->_find_self - - Fixed bug where mutiple child lists failed (problem in opt code) - - Added tests for above - - Removed from CPAN because Matt Sergeant got upset -0.11 6 November 2001 - - XML::Lite::Element->get_text() now removes CDATA tags (but leaves content) -0.10 6 November 2001 - - Fixed children() and text() methods by re-vamping the - tree. - - Built tests for all exposed methods of all objects - - Built tests for all contructor calls -0.05 4 November 2001 - - Added get_text method -0.01 Sat Aug 25 13:31:48 2001 - - original version; created by h2xs 1.20 with options - -XA -n XML::Lite - +Revision history for Perl extension XML::Lite. + +0.14 31 January 2003 + - Fixed a major bug in parsing empty elements + - Fixed some typos in documenation + - Fixed error in documentation of XML::Element::get_attributes interface +0.13 13 November 2001 + - Minor bug fixes? +0.12 15 November 2001 + - Fixed bugs in test that failed on CPAN Testers + - Fixed warnings in XML::Lite::Element->_find_self + - Fixed bug where mutiple child lists failed (problem in opt code) + - Added tests for above + - Removed from CPAN because Matt Sergeant got upset +0.11 6 November 2001 + - XML::Lite::Element->get_text() now removes CDATA tags (but leaves content) +0.10 6 November 2001 + - Fixed children() and text() methods by re-vamping the + tree. + - Built tests for all exposed methods of all objects + - Built tests for all contructor calls +0.05 4 November 2001 + - Added get_text method +0.01 Sat Aug 25 13:31:48 2001 + - original version; created by h2xs 1.20 with options + -XA -n XML::Lite + diff --git a/src/externals/pio1/tests/testpio/perl5lib/XML/Lite.pm b/src/externals/pio1/tests/testpio/perl5lib/XML/Lite.pm index c1f7c821eae..d6aa32e978c 100644 --- a/src/externals/pio1/tests/testpio/perl5lib/XML/Lite.pm +++ b/src/externals/pio1/tests/testpio/perl5lib/XML/Lite.pm @@ -35,12 +35,12 @@ my $xml = new XML::Lite( xml => 'a_file.xml' ); =head1 DESCRIPTION -XML::Lite is a lightweight XML parser, with basic element traversing -methods. It is entirely self-contained, pure Perl (i.e. I based on -expat). It provides useful methods for reading most XML files, including -traversing and finding elements, reading attributes and such. It is -designed to take advantage of Perl-isms (Attribute lists are returned as -hashes, rather than, say, lists of objects). It provides only methods +XML::Lite is a lightweight XML parser, with basic element traversing +methods. It is entirely self-contained, pure Perl (i.e. I based on +expat). It provides useful methods for reading most XML files, including +traversing and finding elements, reading attributes and such. It is +designed to take advantage of Perl-isms (Attribute lists are returned as +hashes, rather than, say, lists of objects). It provides only methods for reading a file, currently. =head1 METHODS @@ -50,7 +50,7 @@ The following methods are available: =over 4 =cut - + use XML::Lite::Element; BEGIN { use vars qw( $VERSION @ISA ); @@ -75,17 +75,17 @@ use vars qw( %ERRORS ); =item my $xml = new XML::Lite( xml => $source[, ...] ); Creates a new XML::Lite object. The XML::Lite object acts as the document -object for the $source that is sent to it to parse. This means that you -create a new object for each document (or document sub-section). As the +object for the $source that is sent to it to parse. This means that you +create a new object for each document (or document sub-section). As the objects are lightweight this should not be a performance consideration. The object constructor can take several named parameters. Parameter names -may begin with a '-' (as in the example above) but are not required to. The +may begin with a '-' (as in the example above) but are not required to. The following parameters are recognized. - xml The source XML to parse. This can be a filename, a scalar that + xml The source XML to parse. This can be a filename, a scalar that contains the document (or document fragment), or an IO handle. - + As a convenince, if only on parameter is given, it is assumed to be the source. So you can use this, if you wish: @@ -99,7 +99,7 @@ sub new { my $proto = shift; my %parms; my $class = ref($proto) || $proto; - + # Parse parameters $self->{settings} = {}; if( @_ > 1 ) { @@ -109,7 +109,7 @@ sub new { while( ($k, $v) = each %parms ) { $k =~ s/^-//; # Removed leading '-' if it exists. (Why do Perl programmers use this?) $self->{settings}{$k} = $v; - } # end while + } # end while } else { $self->{settings}{xml} = $_[0]; } # end if; @@ -121,10 +121,10 @@ sub new { $self->{doc} = ''; $self->{_CDATA} = []; $self->{handlers} = {}; - + # Refer to global error messages $self->{ERRORS} = $self->{settings}{error_messages} || \%ERRORS; - + # Now parse the XML document and build look-up tables return undef unless $self->_parse_it(); @@ -181,8 +181,8 @@ sub root_element { Returns a list of all elements that match C<$name>. C<@list> is a list of L objects If called in a scalar context, this will return the -first element found that matches (it's more efficient -to call in a scalar context than assign the results +first element found that matches (it's more efficient +to call in a scalar context than assign the results to a list of one scalar). If no matching elements are found then returns C @@ -201,7 +201,7 @@ sub element_by_name; sub elements_by_name { my $self = shift; my( $name ) = @_; - + if( wantarray ) { my @list = (); foreach( @{$self->{elements}{$name}} ) { @@ -241,7 +241,7 @@ sub elements_by_name { # ---------------------------------------------------------- sub _parse_it { my $self = shift; - + # Get the xml content if( $self->{settings}{xml} =~ /^\s*{doc} = $self->{settings}{xml}; @@ -268,26 +268,26 @@ sub _parse_it { $self->{doc_offset} = length $1; # Store the number of removed chars for messages } # end if $self->{doc} =~ s/\s+$//; - - + + # Build lookup tables $self->{elements} = {}; $self->{tree} = []; # - These are used in the building process my $element_list = []; my $current_element = $self->{tree}; - + # Call init handler if defined &{$self->{handlers}{init}}($self) if defined $self->{handlers}{init}; - + # Make a table of offsets to each element start and end point # Table is a hash of element names to lists of offsets: # [start_tag_start, start_tag_end, end_tag_start, end_tag_end] # where tags include the '<' and '>' - - # Also make a tree of linked lists. List contains root element + + # Also make a tree of linked lists. List contains root element # and other nodes. Each node consits of a list ref (the position list) - # and a following list containing the child element. Text nodes are + # and a following list containing the child element. Text nodes are # a list ref (with just two positions). # Find the opening and closing of the XML, giving errors if not well-formed @@ -297,22 +297,22 @@ sub _parse_it { $self->_error( 'ROOT_NOT_CLOSED', $start_pos + $self->{doc_offset} ) if $end_pos == -1; my $doc_end = rindex( $self->{doc}, '>' ); $self->_error( 'ROOT_NOT_CLOSED' ) if $doc_end == -1; - + # Now walk through the document, one tag at a time, building up our # lookup tables while( $end_pos <= $doc_end ) { - + # Get a tag my $tag = substr( $self->{doc}, $start_pos, $end_pos - $start_pos + 1 ); # Get the tag name and see if it's an end tag (starts with \s]+)}; - + if( $end ) { # If there is no start tag for this end tag then throw an error $self->_error( 'NO_START', $start_pos + $self->{doc_offset}, $tag ) unless defined $self->{elements}{$name}; - - # Otherwise, add the end point to the array for the last element in + + # Otherwise, add the end point to the array for the last element in # the by-name lookup hash my( $x, $found ) = (@{$self->{elements}{$name}} - 1, 0); while( $x >= 0 ) { @@ -329,24 +329,24 @@ sub _parse_it { # If we didn't find an open element then throw an error $self->_error( 'NO_START', $start_pos + $self->{doc_offset}, $tag ) unless $found; - + # Call an end-tag handler if defined (not yet exposed) &{$self->{handlers}{end}}($self, $name) if defined $self->{handlers}{end}; - + # Close element in linked list (tree) $current_element = pop @$element_list; - + } else { - # Make a new list in the by-name lookup hash if none found by this name yet + # Make a new list in the by-name lookup hash if none found by this name yet $self->{elements}{$name} = [] unless defined $self->{elements}{$name}; - + # Add start points to the array of positions and push it on the hash my $pos_list = [$start_pos, $end_pos]; push @{$self->{elements}{$name}}, $pos_list; - + # Call start-tag handler if defined (not yet exposed) &{$self->{handlers}{start}}($self, $name) if defined $self->{handlers}{start}; - + # If this is a single-tag element (e.g. <.../>) then close it immediately if( $tag =~ m{/\s*>$} ) { push @$current_element, $pos_list; @@ -364,7 +364,7 @@ sub _parse_it { } # end if } # end if - + # Move the start pointer to beginning of next element $start_pos = index( $self->{doc}, '<', $start_pos + 1 ); last if $start_pos == -1 || $end_pos == $doc_end; @@ -372,16 +372,16 @@ sub _parse_it { # Now $end_pos is end of old tag and $start_pos is start of new # So do things on the data between the tags as needed if( $start_pos - $end_pos > 1 ) { - # Call any character data handler + # Call any character data handler &{$self->{handlers}{char}}($self, substr($self->{doc}, $end_pos + 1, $start_pos - $end_pos - 1)) if defined $self->{handlers}{char}; # Inserting the text into the linked list as well # push @$current_element, [$end_pos + 1, $start_pos - 1]; } # end if - + # Now finish by incrementing the parser to the next element $end_pos = index( $self->{doc}, '>', $start_pos + 1 ); - + # If there is no next element, and we're not at the end of the document, # then throw an error $self->_error( 'ELM_NOT_CLOSED', $start_pos + $self->{doc_offset} ) if $end_pos == -1; @@ -401,7 +401,7 @@ sub _parse_it { # # Returns: Scalar content of $file, undef on error # -# Description: Reads from $file and returns the content. +# Description: Reads from $file and returns the content. # $file may be either a filename or an IO handle # ---------------------------------------------------------- # Date Modification Author @@ -412,7 +412,7 @@ sub _get_a_file { my $self = shift; my $file = shift; my $content = undef; - + # If it's a ref and a handle, then read that if( ref($file) ) { $content = join '', <$file>; @@ -422,12 +422,12 @@ sub _get_a_file { open( XML, $file ) || return undef; $content = join '', ; close XML || return undef; - } + } # Don't know how to handle this type of parameter else { return undef; } # end if - + return $content; } # end _get_a_file @@ -448,10 +448,10 @@ sub _error { my $self = shift; my( $code, @args ) = @_; my $msg = $self->{ERRORS}{$code}; - + # Handle replacement codes $msg =~ s/\%(\d+)/$args[$1]/g; - + # Throw exception die ref($self) . ":$msg\n"; } # end _error @@ -462,7 +462,7 @@ sub _error { # # Args: $content # -# Returns: A reference to the CDATA element, padded to +# Returns: A reference to the CDATA element, padded to # original size. # # Description: Stores the CDATA element in the internal @@ -498,13 +498,13 @@ sub _store_cdata { sub _dump_tree { my $self = shift; my $node = shift || $self->{tree}; - + my $tree = ''; for( my $i = 0; $i < scalar(@$node) && defined $node->[$i]; $i++ ) { if( (scalar(@{$node->[$i]}) == 4) && (defined $node->[$i][2]) ) { $tree .= '[' . join( ',', @{$node->[$i]} ) . "] " - . substr($self->{doc}, $node->[$i][0], $node->[$i][1] - $node->[$i][0] + 1) - . "..." + . substr($self->{doc}, $node->[$i][0], $node->[$i][1] - $node->[$i][0] + 1) + . "..." . substr($self->{doc}, $node->[$i][2], $node->[$i][3] - $node->[$i][2] + 1) . " (child $i)\n"; # Do child list $i++; @@ -530,7 +530,7 @@ END { } =head1 BUGS Lots. This 'parser' (Matt Sergeant takes umbrance to my us of that word) will handle some XML -documents, but not all. +documents, but not all. =head1 VERSION diff --git a/src/externals/pio1/tests/testpio/perl5lib/XML/Lite/Element.pm b/src/externals/pio1/tests/testpio/perl5lib/XML/Lite/Element.pm index c611d6cd17f..388511d89a0 100644 --- a/src/externals/pio1/tests/testpio/perl5lib/XML/Lite/Element.pm +++ b/src/externals/pio1/tests/testpio/perl5lib/XML/Lite/Element.pm @@ -33,18 +33,18 @@ print $elm->get_attribute( 'attribute_name' ); =head1 DESCRIPTION -C objects contain rudimentary methods for querying XML -elements in an XML document as parsed by XML::Lite. Usually these objects +C objects contain rudimentary methods for querying XML +elements in an XML document as parsed by XML::Lite. Usually these objects are returned by method calls in XML::Lite. =head1 METHODS -The following methods are available. All methods like 'get_name' can be +The following methods are available. All methods like 'get_name' can be abbeviated as 'name.' =over 4 -=cut +=cut use strict; BEGIN { @@ -63,8 +63,8 @@ use vars qw(); Creates a new XML::Lite::Element object from the XML::Lite object, C<$owner_document>. -Currently, you must not call this manually. You can create an object with one of -the 'factory' methods in XML::Lite, such as C or C +Currently, you must not call this manually. You can create an object with one of +the 'factory' methods in XML::Lite, such as C or C or with one of the XML::Lite::Element 'factory' methods below, like C. =cut @@ -77,15 +77,15 @@ sub new { # The arguments are as follows: # $owner_document is an XML::Lite object within which this element lives # \@pointers is a two or four element array ref containing the offsets - # into the original document of the start and end points of + # into the original document of the start and end points of # the opening and closing (when it exists) tags for the element - + # Validate arguments return undef unless @_ >= 2; return undef unless ref($_[0]) && (ref($_[1]) eq 'ARRAY'); - + # Load 'em up - + # The data structure for the ::Element object has these properties # doc A reference to the containing XML::Lite object # node A reference to an array of pointers to our element in the document @@ -94,11 +94,11 @@ sub new { # name The name on our tag # _attrs A string of the attibutes in our tag (unparsed) # attrs A hash ref of attributes in our tag - + $self->{doc} = $_[0]; $self->{node} = $_[1]; - - # Using the pointers, find out tag name, and attribute list from the + + # Using the pointers, find out tag name, and attribute list from the # opening tag (if there are any attributes). my $tag = substr( $self->{doc}{doc}, $self->{node}[0], $self->{node}[1] - $self->{node}[0] + 1 ); if( $tag =~ m{^<\s*([^/>\s]+)\s+([^>]+)\s*/?\s*>$} ) { @@ -111,7 +111,7 @@ sub new { # Should have been caught in the parsing! maybe an assert? $self->{doc}->_error( 'ELM_NOT_CLOSED', $self->{node}[0] + $self->{doc}->{doc_offset} ); } # end if - + # Good. Now returns it. bless ($self, $class); return $self; @@ -142,16 +142,16 @@ sub content; sub get_content { my $self = shift; - # If we don't have any content, then we should return + # If we don't have any content, then we should return # '' right away. return '' unless defined $self->{node}[2]; - + # Using our pointers, find everything between our tags my $content = substr( $self->{doc}{doc}, $self->{node}[1] + 1, $self->{node}[2] - $self->{node}[1] - 1 ); - + # Now, restore any CDATA chunks that may have been pulled out $content =~ s//{doc}{_CDATA}[$1]]]>/g; - + # And return the content return $content; } # end get_content @@ -173,11 +173,11 @@ sub attributes; *attributes = \&get_attributes; sub get_attributes { my $self = shift; - + # Parse the attribute string into a hash of name-value pairs # unless we've already done that. $self->_parse_attrs() unless defined $self->{attrs}; - + # Just return a *copy* of the hash (this is read-only after all!) if ( defined($self->{attrs}) ) { return %{$self->{attrs}}; @@ -202,10 +202,10 @@ sub attribute; sub get_attribute { my $self = shift; my( $name ) = @_; - + # If we haven't parsed the attribute string into a hash, then do that. $self->_parse_attrs() unless defined $self->{attrs}; - + # Now return the requested attribute. If it's not there # then 'undef' is returned return $self->{attrs}{$name}; @@ -233,9 +233,9 @@ sub get_name { =item my @children = $element->get_children() -Returns a list of XML::Lite::Element objects for each element contained -within the current element. This does not return any text or CDATA in -the content of this element. You can parse that through the L +Returns a list of XML::Lite::Element objects for each element contained +within the current element. This does not return any text or CDATA in +the content of this element. You can parse that through the L method. If no child elements exist then an empty list is returned. @@ -256,7 +256,7 @@ sub get_children { my $self = shift; my @children = (); - # If we don't have any content, then we should return an emtpty + # If we don't have any content, then we should return an emtpty # list right away -- we have no children. return @children unless defined $self->{node}[2]; @@ -264,8 +264,8 @@ sub get_children { # This will also load {children} and {parent} as well $self->_find_self() unless defined $self->{self}; - # Now that we know who we are (if this didn't fail) we can - # iterate through the sub nodes (our child list) and make + # Now that we know who we are (if this didn't fail) we can + # iterate through the sub nodes (our child list) and make # XML::Lite::Elements objects for each child if( defined $self->{children} ) { my $i = 0; @@ -276,7 +276,7 @@ sub get_children { $node = $self->{children}[++$i]; } # end while } # end if - + return @children; } # end get_children @@ -304,14 +304,14 @@ sub get_text { my $self = shift; my $content = ''; - # If we don't have any content, then we should return + # If we don't have any content, then we should return # $content right away -- we have no text return $content unless defined $self->{node}[2]; # Otherwise get out content and children my @children = $self->get_children; my $orig_content = $self->get_content; - + # Then remove the child elements from our content my $start = 0; foreach( @children ) { @@ -320,10 +320,10 @@ sub get_text { $start = ($_->{node}[3] || $_->{node}[1]) - $self->{node}[1]; } # end foreach $content .= substr( $orig_content, $start ) if $start < length($orig_content); - + # Remove the CDATA wrapper, preserving the content $content =~ s//$1/g; - + # Return the left-over text return $content; } # end get_text @@ -352,7 +352,7 @@ sub get_text { # ---------------------------------------------------------- sub _parse_attrs { my $self = shift; - + my $attrs = $self->{_attrs}; if ( defined($attrs) ) { $attrs =~ s/^\s+//; @@ -364,7 +364,7 @@ sub _parse_attrs { $attrs =~ s/^\s+//; } # end while } - + return 1; } # end _parse_atttrs @@ -376,7 +376,7 @@ sub _parse_attrs { # Returns: A reference to our node or undef on error # # Description: Traverses the owner document's tree to find -# the node that references the current element. Sets +# the node that references the current element. Sets # $self-{self} as a side-effect. Even if this is already set, # _find_self will traverse again, so don't call unless needed. # ---------------------------------------------------------- @@ -387,8 +387,8 @@ sub _parse_attrs { # ---------------------------------------------------------- sub _find_self { my $self = shift; - - # We actually just call this recusively, so the first + + # We actually just call this recusively, so the first # argument can be a starting point to descend from # but we don't doc that above my $node = shift || $self->{doc}{tree}; @@ -405,10 +405,10 @@ sub _find_self { # If this is our self, then we're done! # NOTE: Since the list references are the same in the by-name hash # and tree objects, we can just do a reference compare here. - # If objects are ever created with non-factory methods then we need to + # If objects are ever created with non-factory methods then we need to # use a _compare_lists call. -# if( _compare_lists( $node->[$i], $self->{node} ) ) { - if( $node->[$i] eq $self->{node} ) { +# if( _compare_lists( $node->[$i], $self->{node} ) ) { + if( $node->[$i] eq $self->{node} ) { $self->{parent} = $node; $self->{self} = $node->[$i]; # If this list has children, then add a pointer to that list @@ -453,16 +453,16 @@ sub _find_self { # ---------------------------------------------------------- sub _compare_lists { my( $rA, $rB ) = @_; - + # Lists are not equal unless same size return 0 unless scalar(@$rA) == scalar(@$rB); - + # Now compare item by item. my $i; for( $i = 0; $i < scalar(@$rA); $i++ ) { return 0 unless $rA->[$i] eq $rB->[$i]; } # end for - + return 1; } # end _compare_lists diff --git a/src/externals/pio1/tests/testpio/perl5lib/XML/README b/src/externals/pio1/tests/testpio/perl5lib/XML/README index fa16ec05438..6234a760cec 100644 --- a/src/externals/pio1/tests/testpio/perl5lib/XML/README +++ b/src/externals/pio1/tests/testpio/perl5lib/XML/README @@ -7,7 +7,7 @@ for most things you need to do with XML files. It is not dependent on any other modules or external programs for installation. -NOTE that this parser will do many things that you want with XML but +NOTE that this parser will do many things that you want with XML but not everything. It is not a validating parser! It will not handle international characters (unless run on those systems). Use at your own risk. diff --git a/src/externals/pio1/tests/testpio/perl5lib/XML/man3/XML_Lite.3 b/src/externals/pio1/tests/testpio/perl5lib/XML/man3/XML_Lite.3 deleted file mode 100644 index f16455c6713..00000000000 --- a/src/externals/pio1/tests/testpio/perl5lib/XML/man3/XML_Lite.3 +++ /dev/null @@ -1,213 +0,0 @@ -.\" Automatically generated by Pod::Man version 1.02 -.\" Tue Mar 18 09:37:35 2003 -.\" -.\" Standard preamble: -.\" ====================================================================== -.de Sh \" Subsection heading -.br -.if t .Sp -.ne 5 -.PP -\fB\\$1\fR -.PP -.. -.de Sp \" Vertical space (when we can't use .PP) -.if t .sp .5v -.if n .sp -.. -.de Ip \" List item -.br -.ie \\n(.$>=3 .ne \\$3 -.el .ne 3 -.IP "\\$1" \\$2 -.. -.de Vb \" Begin verbatim text -.ft CW -.nf -.ne \\$1 -.. -.de Ve \" End verbatim text -.ft R - -.fi -.. -.\" Set up some character translations and predefined strings. \*(-- will -.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left -.\" double quote, and \*(R" will give a right double quote. | will give a -.\" real vertical bar. \*(C+ will give a nicer C++. Capital omega is used -.\" to do unbreakable dashes and therefore won't be available. \*(C` and -.\" \*(C' expand to `' in nroff, nothing in troff, for use with C<> -.tr \(*W-|\(bv\*(Tr -.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' -.ie n \{\ -. ds -- \(*W- -. ds PI pi -. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch -. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch -. ds L" "" -. ds R" "" -. ds C` ` -. ds C' ' -'br\} -.el\{\ -. ds -- \|\(em\| -. ds PI \(*p -. ds L" `` -. ds R" '' -'br\} -.\" -.\" If the F register is turned on, we'll generate index entries on stderr -.\" for titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and -.\" index entries marked with X<> in POD. Of course, you'll have to process -.\" the output yourself in some meaningful fashion. -.if \nF \{\ -. de IX -. tm Index:\\$1\t\\n%\t"\\$2" -. . -. nr % 0 -. rr F -.\} -.\" -.\" For nroff, turn off justification. Always turn off hyphenation; it -.\" makes way too many mistakes in technical documents. -.hy 0 -.if n .na -.\" -.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). -.\" Fear. Run. Save yourself. No user-serviceable parts. -.bd B 3 -. \" fudge factors for nroff and troff -.if n \{\ -. ds #H 0 -. ds #V .8m -. ds #F .3m -. ds #[ \f1 -. ds #] \fP -.\} -.if t \{\ -. ds #H ((1u-(\\\\n(.fu%2u))*.13m) -. ds #V .6m -. ds #F 0 -. ds #[ \& -. ds #] \& -.\} -. \" simple accents for nroff and troff -.if n \{\ -. ds ' \& -. ds ` \& -. ds ^ \& -. ds , \& -. ds ~ ~ -. ds / -.\} -.if t \{\ -. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" -. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' -. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' -. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' -. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' -. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' -.\} -. \" troff and (daisy-wheel) nroff accents -.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' -.ds 8 \h'\*(#H'\(*b\h'-\*(#H' -.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] -.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' -.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' -.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] -.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] -.ds ae a\h'-(\w'a'u*4/10)'e -.ds Ae A\h'-(\w'A'u*4/10)'E -. \" corrections for vroff -.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' -.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' -. \" for low resolution devices (crt and lpr) -.if \n(.H>23 .if \n(.V>19 \ -\{\ -. ds : e -. ds 8 ss -. ds o a -. ds d- d\h'-1'\(ga -. ds D- D\h'-1'\(hy -. ds th \o'bp' -. ds Th \o'LP' -. ds ae ae -. ds Ae AE -.\} -.rm #[ #] #H #V #F C -.\" ====================================================================== -.\" -.IX Title "Lite 3" -.TH Lite 3 "perl v5.6.0" "2003-03-17" "User Contributed Perl Documentation" -.UC -.SH "NAME" -\&\s-1XML:\s0:Lite \- A lightweight \s-1XML\s0 parser for simple files -.SH "SYNOPSIS" -.IX Header "SYNOPSIS" -use \s-1XML:\s0:Lite; -my \f(CW$xml\fR = new \s-1XML:\s0:Lite( xml => 'a_file.xml' ); -.SH "DESCRIPTION" -.IX Header "DESCRIPTION" -\&\s-1XML:\s0:Lite is a lightweight \s-1XML\s0 parser, with basic element traversing -methods. It is entirely self-contained, pure Perl (i.e. \fInot\fR based on -expat). It provides useful methods for reading most \s-1XML\s0 files, including -traversing and finding elements, reading attributes and such. It is -designed to take advantage of Perl-isms (Attribute lists are returned as -hashes, rather than, say, lists of objects). It provides only methods -for reading a file, currently. -.SH "METHODS" -.IX Header "METHODS" -The following methods are available: -.Ip "my \f(CW$xml\fR = new \s-1XML:\s0:Lite( xml => \f(CW$source\fR[, ...] );" 4 -.IX Item "my $xml = new XML::Lite( xml => $source[, ...] );" -Creates a new \s-1XML:\s0:Lite object. The \s-1XML:\s0:Lite object acts as the document -object for the \f(CW$source\fR that is sent to it to parse. This means that you -create a new object for each document (or document sub-section). As the -objects are lightweight this should not be a performance consideration. -.Sp -The object constructor can take several named parameters. Parameter names -may begin with a '\-' (as in the example above) but are not required to. The -following parameters are recognized. -.Sp -.Vb 2 -\& xml The source XML to parse. This can be a filename, a scalar that -\& contains the document (or document fragment), or an IO handle. -.Ve -As a convenince, if only on parameter is given, it is assumed to be the source. -So you can use this, if you wish: -.Sp -.Vb 1 -\& my $xml = new XML::Lite( 'file.xml' ); -.Ve -.Ip "my \f(CW$elm\fR = \f(CW$xml\fR->\fIroot_element()\fR" 4 -.IX Item "my $elm = $xml->root_element()" -Returns a reference to an \s-1XML:\s0:Lite::Element object that represents -the root element of the document. -.Sp -Returns \f(CW\*(C`undef\*(C'\fR on errors. -.Ip "@list = \f(CW$xml\fR->elements_by_name( \f(CW$name\fR )" 4 -.IX Item "@list = $xml->elements_by_name( $name )" -Returns a list of all elements that match \f(CW\*(C`$name\*(C'\fR. -\&\f(CW\*(C`@list\*(C'\fR is a list of the XML::Lite::Element manpage objects -If called in a scalar context, this will return the -first element found that matches (it's more efficient -to call in a scalar context than assign the results -to a list of one scalar). -.Sp -If no matching elements are found then returns \f(CW\*(C`undef\*(C'\fR -in scalar context or an empty list in array context. -.SH "BUGS" -.IX Header "BUGS" -Lots. This 'parser' (Matt Sergeant takes umbrance to my us of that word) will handle some \s-1XML\s0 -documents, but not all. -.SH "VERSION" -.IX Header "VERSION" -0.14 -.SH "AUTHOR" -.IX Header "AUTHOR" -Jeremy Wadsack for Wadsack-Allen Digital Group (dgsupport@wadsack-allen.com) -.SH "COPYRIGHT" -.IX Header "COPYRIGHT" -Copyright 2001\-2003 Wadsack-Allen. All rights reserved. -This library is free software; you can redistribute it and/or -modify it under the same terms as Perl itself. diff --git a/src/externals/pio1/tests/testpio/perl5lib/XML/man3/XML_Lite_Element.3 b/src/externals/pio1/tests/testpio/perl5lib/XML/man3/XML_Lite_Element.3 deleted file mode 100644 index f31d1336e46..00000000000 --- a/src/externals/pio1/tests/testpio/perl5lib/XML/man3/XML_Lite_Element.3 +++ /dev/null @@ -1,206 +0,0 @@ -.\" Automatically generated by Pod::Man version 1.02 -.\" Tue Mar 18 09:37:36 2003 -.\" -.\" Standard preamble: -.\" ====================================================================== -.de Sh \" Subsection heading -.br -.if t .Sp -.ne 5 -.PP -\fB\\$1\fR -.PP -.. -.de Sp \" Vertical space (when we can't use .PP) -.if t .sp .5v -.if n .sp -.. -.de Ip \" List item -.br -.ie \\n(.$>=3 .ne \\$3 -.el .ne 3 -.IP "\\$1" \\$2 -.. -.de Vb \" Begin verbatim text -.ft CW -.nf -.ne \\$1 -.. -.de Ve \" End verbatim text -.ft R - -.fi -.. -.\" Set up some character translations and predefined strings. \*(-- will -.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left -.\" double quote, and \*(R" will give a right double quote. | will give a -.\" real vertical bar. \*(C+ will give a nicer C++. Capital omega is used -.\" to do unbreakable dashes and therefore won't be available. \*(C` and -.\" \*(C' expand to `' in nroff, nothing in troff, for use with C<> -.tr \(*W-|\(bv\*(Tr -.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' -.ie n \{\ -. ds -- \(*W- -. ds PI pi -. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch -. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch -. ds L" "" -. ds R" "" -. ds C` ` -. ds C' ' -'br\} -.el\{\ -. ds -- \|\(em\| -. ds PI \(*p -. ds L" `` -. ds R" '' -'br\} -.\" -.\" If the F register is turned on, we'll generate index entries on stderr -.\" for titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and -.\" index entries marked with X<> in POD. Of course, you'll have to process -.\" the output yourself in some meaningful fashion. -.if \nF \{\ -. de IX -. tm Index:\\$1\t\\n%\t"\\$2" -. . -. nr % 0 -. rr F -.\} -.\" -.\" For nroff, turn off justification. Always turn off hyphenation; it -.\" makes way too many mistakes in technical documents. -.hy 0 -.if n .na -.\" -.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). -.\" Fear. Run. Save yourself. No user-serviceable parts. -.bd B 3 -. \" fudge factors for nroff and troff -.if n \{\ -. ds #H 0 -. ds #V .8m -. ds #F .3m -. ds #[ \f1 -. ds #] \fP -.\} -.if t \{\ -. ds #H ((1u-(\\\\n(.fu%2u))*.13m) -. ds #V .6m -. ds #F 0 -. ds #[ \& -. ds #] \& -.\} -. \" simple accents for nroff and troff -.if n \{\ -. ds ' \& -. ds ` \& -. ds ^ \& -. ds , \& -. ds ~ ~ -. ds / -.\} -.if t \{\ -. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" -. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' -. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' -. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' -. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' -. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' -.\} -. \" troff and (daisy-wheel) nroff accents -.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' -.ds 8 \h'\*(#H'\(*b\h'-\*(#H' -.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] -.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' -.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' -.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] -.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] -.ds ae a\h'-(\w'a'u*4/10)'e -.ds Ae A\h'-(\w'A'u*4/10)'E -. \" corrections for vroff -.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' -.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' -. \" for low resolution devices (crt and lpr) -.if \n(.H>23 .if \n(.V>19 \ -\{\ -. ds : e -. ds 8 ss -. ds o a -. ds d- d\h'-1'\(ga -. ds D- D\h'-1'\(hy -. ds th \o'bp' -. ds Th \o'LP' -. ds ae ae -. ds Ae AE -.\} -.rm #[ #] #H #V #F C -.\" ====================================================================== -.\" -.IX Title "Lite::Element 3" -.TH Lite::Element 3 "perl v5.6.0" "2003-01-31" "User Contributed Perl Documentation" -.UC -.SH "NAME" -\&\s-1XML:\s0:Lite::Element \- A class representing an \s-1XML\s0 element in an \s-1XML:\s0:Lite -document -.SH "SYNOPSIS" -.IX Header "SYNOPSIS" -use \s-1XML:\s0:Lite; -my \f(CW$xml\fR = new \s-1XML:\s0:Lite( \-xml => 'a_file.xml' ); -my \f(CW$elm\fR = \f(CW$xml\fR->elements_by_name( 'element_name' ); -print \f(CW$elm\fR->get_attribute( 'attribute_name' ); -.SH "DESCRIPTION" -.IX Header "DESCRIPTION" -\&\f(CW\*(C`XML::Lite::Element\*(C'\fR objects contain rudimentary methods for querying \s-1XML\s0 -elements in an \s-1XML\s0 document as parsed by \s-1XML:\s0:Lite. Usually these objects -are returned by method calls in \s-1XML:\s0:Lite. -.SH "METHODS" -.IX Header "METHODS" -The following methods are available. All methods like 'get_name' can be -abbeviated as 'name.' -.Ip "my \f(CW$element\fR = new \s-1XML:\s0:Lite::Element( \f(CW$owner_document\fR, \e@pointers );" 4 -.IX Item "my $element = new XML::Lite::Element( $owner_document, @pointers );" -Creates a new \s-1XML:\s0:Lite::Element object from the \s-1XML:\s0:Lite object, \f(CW\*(C`$owner_document\*(C'\fR. -.Sp -Currently, you must not call this manually. You can create an object with one of -the 'factory' methods in \s-1XML:\s0:Lite, such as \f(CW\*(C`element_by_name\*(C'\fR or \f(CW\*(C`root_element\*(C'\fR -or with one of the \s-1XML:\s0:Lite::Element 'factory' methods below, like \f(CW\*(C`get_children\*(C'\fR. -.Ip "my \f(CW$content\fR = \f(CW$element\fR->\fIget_content()\fR" 4 -.IX Item "my $content = $element->get_content()" -Returns the content of the \s-1XML\s0 element. This may include other \s-1XML\s0 tags. The -entire content is returned as a scalar. -.Ip "my \f(CW%attributes\fR = \f(CW$element\fR->\fIget_attributes()\fR" 4 -.IX Item "my %attributes = $element->get_attributes()" -Returns a hash of name \- value pairs for the attributes in this element. -.Ip "my \f(CW$value\fR = \f(CW$element\fR->get_attribute( \f(CW$name\fR )" 4 -.IX Item "my $value = $element->get_attribute( $name )" -Returns the value of the named attribute for this element. -.Ip "my \f(CW$name\fR = \f(CW$element\fR->\fIget_name()\fR" 4 -.IX Item "my $name = $element->get_name()" -Returns the name of the element tag -.Ip "my \f(CW@children\fR = \f(CW$element\fR->\fIget_children()\fR" 4 -.IX Item "my @children = $element->get_children()" -Returns a list of \s-1XML:\s0:Lite::Element objects for each element contained -within the current element. This does not return any text or \s-1CDATA\s0 in -the content of this element. You can parse that through the the get_content manpage -method. -.Sp -If no child elements exist then an empty list is returned. -.Ip "my \f(CW$text\fR = \f(CW$element\fR->\fIget_text()\fR" 4 -.IX Item "my $text = $element->get_text()" -Returns a scalar of the text within an element sans children elements. -This effectively takes the content of the element and strips all \s-1XML\s0 -elements. All text is concatenated into a single string. White space -is preserved. \s-1CDATA\s0 elements are included without the + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/src/externals/pio2/.gitignore b/src/externals/pio2/.gitignore index dc7b21644c8..8a7309999b8 100644 --- a/src/externals/pio2/.gitignore +++ b/src/externals/pio2/.gitignore @@ -2,5 +2,30 @@ html/ *~ \#*\# +*.o +*.in +*.lo +*.la +Makefile +acinclude.m4 +aclocal.m4 +atconfig +autom4te.cache +config-h.in +config.* +configure +stamp-h1 +conftest* +missing +libtool +install-sh +ltmain.sh +compile +depcomp +build/ +.deps/ +.libs/ +m4/ +*.nc diff --git a/src/externals/pio2/CMakeLists.txt b/src/externals/pio2/CMakeLists.txt index 514384a5a9e..c50087ad740 100644 --- a/src/externals/pio2/CMakeLists.txt +++ b/src/externals/pio2/CMakeLists.txt @@ -4,10 +4,13 @@ project (PIO C Fortran) # The project version number. set(VERSION_MAJOR 2 CACHE STRING "Project major version number.") -set(VERSION_MINOR 0 CACHE STRING "Project minor version number.") -set(VERSION_PATCH 28 CACHE STRING "Project patch version number.") +set(VERSION_MINOR 3 CACHE STRING "Project minor version number.") +set(VERSION_PATCH 0 CACHE STRING "Project patch version number.") mark_as_advanced(VERSION_MAJOR VERSION_MINOR VERSION_PATCH) +# The size of the data buffer for write/read_darray(). +set(PIO_BUFFER_SIZE 134217728) + #============================================================================== # USER-DEFINED OPTIONS (set with "-DOPT=VAL" from command line) #============================================================================== @@ -142,8 +145,8 @@ set (CMAKE_Fortran_COMPILER_DIRECTIVE "CPR${CMAKE_Fortran_COMPILER_NAME}" # configure a header file to pass some of the CMake settings # to the source code configure_file ( - "${PROJECT_SOURCE_DIR}/src/clib/config.h.in" - "${PROJECT_BINARY_DIR}/src/clib/config.h" + "${PROJECT_SOURCE_DIR}/cmake_config.h.in" + "${PROJECT_BINARY_DIR}/config.h" ) #============================================================================== diff --git a/src/externals/pio2/CTestScript.cmake b/src/externals/pio2/CTestScript.cmake index e817f422d15..23d2a25a418 100644 --- a/src/externals/pio2/CTestScript.cmake +++ b/src/externals/pio2/CTestScript.cmake @@ -50,7 +50,8 @@ if (HOSTNAME MATCHES "^yslogin" OR set (HOSTNAME_ID "nwsc") # New UCAR/NWSC SGI Machines elseif (HOSTNAME MATCHES "^laramie" OR - HOSTNAME MATCHES "^chadmin") + HOSTNAME MATCHES "^chadmin" OR + HOSTNAME MATCHES "^cheyenne") set (HOSTNAME_ID "nwscla") # ALCF/Argonne Machines elseif (HOSTNAME MATCHES "^mira" OR @@ -159,7 +160,7 @@ set (CTEST_UPDATE_COMMAND "${CTEST_GIT_COMMAND}") ctest_update () ## -- Configure -message (" -- Configure build - ${CTEST_BUILD_NAME} --") +message (" -- Configure build - ${CTEST_BUILD_NAME} -- with options ${CTEST_CONFIGURE_OPTIONS}") include (CTestEnvironment-${HOSTNAME_ID}) set (CTEST_CONFIGURE_COMMAND "${CMAKE_COMMAND} ${CTEST_CONFIGURE_OPTIONS} ${CTEST_SOURCE_DIRECTORY}") ctest_configure () diff --git a/src/externals/pio2/Makefile.am b/src/externals/pio2/Makefile.am new file mode 100644 index 00000000000..990da3e474e --- /dev/null +++ b/src/externals/pio2/Makefile.am @@ -0,0 +1,4 @@ + +SUBDIRS = src tests +#Recommended by libtoolize +ACLOCAL_AMFLAGS= -I m4 diff --git a/src/externals/pio2/src/clib/config.h.in b/src/externals/pio2/cmake_config.h.in similarity index 91% rename from src/externals/pio2/src/clib/config.h.in rename to src/externals/pio2/cmake_config.h.in index 8d80d37fd51..ba817fe5d38 100644 --- a/src/externals/pio2/src/clib/config.h.in +++ b/src/externals/pio2/cmake_config.h.in @@ -25,4 +25,7 @@ /** Size of MPI_Offset type. */ #define SIZEOF_MPI_OFFSET @SIZEOF_MPI_OFFSET@ +/* buffer size for darray data. */ +#define PIO_BUFFER_SIZE @PIO_BUFFER_SIZE@ + #endif /* _PIO_CONFIG_ */ diff --git a/src/externals/pio2/configure.ac b/src/externals/pio2/configure.ac new file mode 100644 index 00000000000..bfddcde9606 --- /dev/null +++ b/src/externals/pio2/configure.ac @@ -0,0 +1,100 @@ +## This is the autoconf file for the PIO library. +## Ed Hartnett 8/16/17 + +# Initialize autoconf and automake. +AC_INIT(pio, 2.3.0) +AC_CONFIG_SRCDIR(src/clib/pio_darray.c) +AM_INIT_AUTOMAKE([foreign serial-tests]) + +# The m4 directory holds macros for autoconf. +AC_CONFIG_MACRO_DIR([m4]) + +# Find and learn about the C compiler. +AC_PROG_CC + +# Libtool initialisation. +AC_PROG_LIBTOOL + +# Always use malloc in autotools builds. +AC_DEFINE([PIO_USE_MALLOC], [1], [use malloc for memory]) + +AC_MSG_CHECKING([whether a PIO_BUFFER_SIZE was specified]) +AC_ARG_WITH([piobuffersize], + [AS_HELP_STRING([--with-piobuffersize=], + [Specify buffer size for PIO.])], + [PIO_BUFFER_SIZE=$with_piobuffersize], [PIO_BUFFER_SIZE=134217728]) +AC_MSG_RESULT([$PIO_BUFFER_SIZE]) +AC_DEFINE_UNQUOTED([PIO_BUFFER_SIZE], [$PIO_BUFFER_SIZE], [buffer size for darray data.]) + +# Need to allow user to set this. +AC_DEFINE([PIO_ENABLE_LOGGING], [1], [log messages from library]) + +# NetCDF (at least classic) is required for PIO to build. +AC_DEFINE([_NETCDF], [1], [netCDF classic library available]) + +# Is parallel-netcdf library available? +#AC_DEFINE([_PNETCDF], [1], [parallel-netcdf library available]) + +# The PIO version, again. +AC_DEFINE([PIO_VERSION_MAJOR], [2], [PIO major version]) +AC_DEFINE([PIO_VERSION_MINOR], [3], [PIO minor version]) +AC_DEFINE([PIO_VERSION_PATCH], [0], [PIO patch version]) + +# ??? +AC_DEFINE([CPRGNU], [1], [defined by CMake build]) + +# We must have MPI to build PIO. +AC_DEFINE([HAVE_MPI], [1], [defined by CMake build]) + +# ??? +AC_DEFINE([INCLUDE_CMAKE_FCI], [1], [defined by CMake build]) + +# All builds are on LINUX. +AC_DEFINE([LINUX], [1], [defined by CMake build]) + +# Check for netCDF library. +AC_CHECK_LIB([netcdf], [nc_create], [], [AC_MSG_ERROR([Can't find or link to the netcdf library.])]) + +# Check for pnetcdf library. +AC_CHECK_LIB([pnetcdf], [ncmpi_create], [], []) + +# If we have parallel-netcdf, then set these as well. +if test x$ac_cv_lib_pnetcdf_ncmpi_create = xyes; then + AC_DEFINE([_PNETCDF], [1], [parallel-netcdf library available]) + AC_DEFINE([USE_PNETCDF_VARN], [1], [defined by CMake build]) + AC_DEFINE([USE_PNETCDF_VARN_ON_READ], [1], [defined by CMake build]) +fi + +# Do we have a parallel build of netCDF-4? +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([#include "netcdf_meta.h"], +[[#if !NC_HAS_PARALLEL +# error +#endif] +])], [have_netcdf_par=yes], [have_netcdf_par=no]) + +AC_MSG_CHECKING([whether netCDF provides parallel IO]) +AC_MSG_RESULT([${have_netcdf_par}]) +if test x$have_netcdf_par = xyes; then + AC_DEFINE([_NETCDF4],[1],[Does netCDF library provide netCDF-4 with parallel access]) +fi + +# Not working for some reason, so I will just set it... +AC_CHECK_TYPE([MPI_Offset], [], [], [#include ]) +if test "x${ac_cv_type_MPI_Offset}" = xyes; then + AC_CHECK_SIZEOF([MPI_Offset], [], [#include ]) +else + AC_MSG_ERROR([Unable to find type MPI_Offset in mpi.h]) +fi + +#AC_CHECK_SIZEOF([MPI_Offset], [], [[#include ]]) +#AC_DEFINE([SIZEOF_MPI_OFFSET], [8], [netCDF classic library available]) + +# Create the config.h file. +AC_CONFIG_HEADERS([config.h]) + +# Create the makefiles. +AC_OUTPUT(Makefile + src/Makefile + src/clib/Makefile + tests/Makefile + tests/cunit/Makefile) diff --git a/src/externals/pio2/ctest/CTestEnvironment-cgd.cmake b/src/externals/pio2/ctest/CTestEnvironment-cgd.cmake index 203e543686c..eb8606e46d0 100644 --- a/src/externals/pio2/ctest/CTestEnvironment-cgd.cmake +++ b/src/externals/pio2/ctest/CTestEnvironment-cgd.cmake @@ -9,7 +9,7 @@ # set with existing environment variables: NETCDF, PNETCDF, HDF5, etc. # Define the extra CMake configure options -set (CTEST_CONFIGURE_OPTIONS "-DCMAKE_VERBOSE_MAKEFILE=TRUE -DPNETCDF_DIR=$ENV{PNETCDF_PATH} -DNETCDF_DIR=$ENV{NETCDF_PATH}") +set (CTEST_CONFIGURE_OPTIONS "${CTEST_CONFIGURE_OPTIONS} -DCMAKE_VERBOSE_MAKEFILE=TRUE -DPNETCDF_DIR=$ENV{PNETCDF_PATH} -DNETCDF_DIR=$ENV{NETCDF_PATH}") # If MPISERIAL environment variable is set, then enable MPISERIAL if (DEFINED ENV{MPISERIAL}) diff --git a/src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh b/src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh index 56407d9c425..57cad940539 100755 --- a/src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh +++ b/src/externals/pio2/ctest/runcdash-cgd-gnu-openmpi.sh @@ -35,4 +35,4 @@ git checkout develop git pull origin develop -ctest -S CTestScript.cmake,${model} -VV +ctest -S CTestScript.cmake,${model} -VV -DCTEST_CONFIGURE_OPTIONS="-DCMAKE_EXE_LINKER_FLAGS=-ldl" diff --git a/src/externals/pio2/doc/source/Decomp.txt b/src/externals/pio2/doc/source/Decomp.txt index 8eac8a7138b..9ccce62a58f 100644 --- a/src/externals/pio2/doc/source/Decomp.txt +++ b/src/externals/pio2/doc/source/Decomp.txt @@ -67,7 +67,7 @@ If we have 2 io tasks the Box rearranger would give: While the subset rearranger would give:
 0: { 0  1  4  5  8  9  12 16 }
-1: { 2  3  6  7  10 11 13 14 17 18 19 }
+1: { 2  3  6  7  10 11 13 14 15 17 18 19 }
 
Note that while the box rearranger gives a data layout which is well diff --git a/src/externals/pio2/doc/source/base.txt b/src/externals/pio2/doc/source/base.txt index 9a0401076a9..210c9bbfbf3 100644 --- a/src/externals/pio2/doc/source/base.txt +++ b/src/externals/pio2/doc/source/base.txt @@ -21,6 +21,7 @@ \author Jim Edwards \author John M. Dennis \author Mariana Vertenstein +\author Edward Hartnett The Parallel I/O (PIO) library has been developed over several years to improve the ability of component models of the Community Earth diff --git a/src/externals/pio2/doc/source/contributing_code.txt b/src/externals/pio2/doc/source/contributing_code.txt index c811777a02b..264484f2951 100644 --- a/src/externals/pio2/doc/source/contributing_code.txt +++ b/src/externals/pio2/doc/source/contributing_code.txt @@ -72,6 +72,10 @@ group. process.
  • Document in complete sentences.
  • Use C (not C++) comment delimiters. +
  • Use the author tag to indicate which programmers have worked on +each function. When adding or changing a function in a non-trivial +way, programmers should add their name to the end of the list of +authors for that function. ## Emacs ## diff --git a/src/externals/pio2/doc/source/mach_walkthrough.txt b/src/externals/pio2/doc/source/mach_walkthrough.txt index 820a6a3f288..bae5540da85 100644 --- a/src/externals/pio2/doc/source/mach_walkthrough.txt +++ b/src/externals/pio2/doc/source/mach_walkthrough.txt @@ -322,13 +322,13 @@ installed.

    Download from the zlib downloads page. (These instructions were tested using version -1.2.8). Untar with: -

    tar zxf zlib-1.2.8.tar.gz
    +1.2.11). Untar with: +
    tar zxf zlib-1.2.11.tar.gz

    Build with: -

    cd zlib-1.2.8
    -CC=mpicc ./configure --prefix=/usr/local
    +
    cd zlib-1.2.11
    +CC=mpicc ./configure --prefix=/usr/local/zlib-1.2.11_mpich-3.2
     make all check
     sudo make install 
    @@ -342,70 +342,75 @@ with:
    tar zxf szip-2.1.tar.gz

    Build with:

    cd slib-2.1
    -CC=mpicc ./configure --prefix=/usr/local
    +CC=mpicc ./configure --prefix=/usr/local/szip-2.1_mpich-3.2
     make all check
     sudo make install 
  • Installing pNetCDF

    Download parallel-netcdf from the parallel-netcdf download -page. (These instructions were tested using version 1.7.0). Untar -with:

    tar zxf parallel-netcdf-1.7.0.tar.gz
    - +href="http://cucis.ece.northwestern.edu/projects/PnetCDF/download.html">parallel-netcdf +download page. (These instructions were tested using version +1.8.1). Untar with:
    tar zxf parallel-netcdf-1.8.1.tar.gz

    Build with: -

    cd parallel-netcdf-1.7.0
    -FC=mpifort CC=mpicc CFLAGS=-fPIC ./configure --prefix=/usr/local/pnetcdf-1.7.0
    +
    cd parallel-netcdf-1.8.1
    +FC=mpifort CC=mpicc CFLAGS=-fPIC ./configure --prefix=/usr/local/pnetcdf-1.8.1_mpich-3.2
     make all check
     sudo make install 
    -(This was also tested with version 1.6.1.) -
  • Installing HDF5

    Download HDF5 from the HDF5 download +href="https://www.hdfgroup.org/downloads/hdf5/source-code/">HDF5 download page. -

    1.10.0-patch1

    - -

    These instructions were tested using version 1.10.0-patch1. Untar -with:

    tar zxf hdf5-1.10.0-patch1.tar.gz
    +

    These instructions were tested using version 1.10.1. Untar with: +

    tar zxf hdf5-1.10.1.tar.gz
    . Note that in my case I need to +add /usr/local/bin to the PATH for su, because that is where mpicc is +found.

    Build with: -

    cd hdf5-1.10.0-patch1
    -CC=mpicc ./configure --with-zlib=/usr/local --prefix=/usr/local --with-szlib=/usr/local --enable-parallel
    +
    cd hdf5-1.10.1
    +CC=mpicc ./configure --with-zlib=/usr/local/zlib-1.2.11_mpich-3.2 --with-szlib=/usr/local/szip-2.1_mpich-3.2 --prefix=/usr/local/hdf5-1.10.1_mpich-3.2 --enable-parallel
     make all check
    -sudo make install 
    +sudo PATH=$PATH:/usr/local/bin make install
  • Installing NetCDF-4 C Library

    Download the netcdf C library from the NetCDF download -page. (These instructions were tested using version 4.4.0). Untar -with:

    tar zxf netcdf-c-4.4.0.tar.gz
    +page. Untar with:
    tar zxf netcdf-c-4.5.0-rc1.tar.gz

    Build with: -

    cd netcdf-c-4.4.1
    -CC=mpicc ./configure --enable-parallel-tests --prefix=/usr/local/netcdf-c-4.4.1 --with-hdf5=/usr/local --with-zlib=/usr/local --with-szlib=/usr/local
    +
    cd netcdf-c-4.5.0-rc1
    +CPPFLAGS='-I/usr/local/zlib-1.2.11_mpich-3.2 -I/usr/local/szip-2.1_mpich-3.2/include -I/usr/local/hdf5-1.10.1_mpich-3.2/include' LDFLAGS='-L/usr/local/zlib-1.2.11_mpich-3.2/lib -L/usr/local/szip-2.1_mpich-3.2/lib -L/usr/local/hdf5-1.10.1_mpich-3.2/lib' CC=mpicc ./configure --enable-parallel-tests --prefix=/usr/local/netcdf-4.4.1_mpich-3.2
     make all check
     sudo make install 
    +

    Note that you may not build netCDF with it's built-in +parallel-netCDF support, if you are also planning to use the +parallel-netCDF library with PIO. For PIO, parallel-netCDF must be +installed independently of netCDF. +

  • Installing NetCDF-4 Fortran Library

    Download the netcdf Fortran library from the NetCDF download -page. (These instructions were tested using version 4.4.3). Untar -with:

    tar zxf netcdf-fortran-4.4.3.tar.gz
    +page. Untar +with:
    tar zxf netcdf-fortran-4.4.4.tar.gz

    Build with: -

    cd netcdf-fortran-4.4.3
    -make distclean; LD_LIBRARY_PATH=/usr/local/netcdf-c-4.4.1/lib FC=mpifort CPPFLAGS=-I/usr/local/netcdf-c-4.4.1/include LDFLAGS=-L/usr/local/netcdf-c-4.4.1/lib CC=mpicc ./configure --enable-parallel-tests --prefix=/usr/local/netcdf-c-4.4.1 && make check
    +
    cd netcdf-fortran-4.4.4
    +CC=mpicc LD_LIBRARY_PATH=/usr/local/netcdf-4.4.1_mpich-3.2/lib FC=mpifort CPPFLAGS=-I/usr/local/netcdf-4.4.1_mpich-3.2/include LDFLAGS=-L/usr/local/netcdf-4.4.1_mpich-3.2/lib ./configure --enable-parallel-tests --prefix=/usr/local/netcdf-fortran-4.4.4_c_4.4.1_mpich-3.2
    +make all
     sudo make install 
    +

    Note that make check did not work because of a failure in a +test. This has been reported to the netCDF team. +

  • Installing ParallelIO Library

    Clone the ParallelIO library. @@ -415,7 +420,7 @@ sudo make install

  • cd ParallelIO mkdir build cd build -CC=mpicc FC=mpifort cmake -DNetCDF_C_PATH=/usr/local -DNetCDF_Fortran_PATH=/usr/local -DPnetCDF_PATH=/usr/local -DPIO_HDF5_LOGGING=On .. +CC=mpicc FC=mpifort cmake -DNetCDF_C_PATH=/usr/local/netcdf-4.4.1_mpich-3.2 -DNetCDF_Fortran_PATH=/usr/local/netcdf-fortran-4.4.4_c_4.4.1_mpich-3.2 -DPnetCDF_PATH=/usr/local/pnetcdf-1.8.1_mpich-3.2 -DPIO_HDF5_LOGGING=On -DPIO_USE_MALLOC=On .. make make check sudo make install @@ -426,7 +431,13 @@ build from the command line, including tests:
     cd build
    -rm -rf * && CFLAGS='-Wall -g' FFLAGS=-g CC=mpicc FC=mpifort cmake -DNetCDF_C_PATH=/usr/local/netcdf-4.4.1 -DNetCDF_Fortran_PATH=/usr/local/netcdf-fortran-4.4.4 -DPnetCDF_PATH=/usr/local/pnetcdf-1.8.1 -DPIO_ENABLE_LOGGING=On .. && make VERBOSE=1 all tests check
    +rm -rf * && CFLAGS='-Wall -g' FFLAGS=-g CC=mpicc FC=mpifort cmake -DNetCDF_C_PATH=/usr/local/netcdf-4.4.1_mpich-3.2 -DNetCDF_Fortran_PATH=/usr/local/netcdf-fortran-4.4.4_c_4.4.1_mpich-3.2 -DPnetCDF_PATH=/usr/local/pnetcdf-1.8.1_mpich-3.2 -DPIO_HDF5_LOGGING=On -DPIO_USE_MALLOC=On -DPIO_ENABLE_LOGGING=On .. && make VERBOSE=1 all tests check
    +
    + +

    To build with address sanitizer: + +

    +rm -rf * && CFLAGS='-Wall -g  -fsanitize=address -fno-omit-frame-pointer' FFLAGS='-g  -fsanitize=address -fno-omit-frame-pointer' CC=mpicc FC=mpifort cmake -DNetCDF_C_PATH=/usr/local/netcdf-4.4.1_mpich-3.2 -DNetCDF_Fortran_PATH=/usr/local/netcdf-fortran-4.4.4_c_4.4.1_mpich-3.2 -DPnetCDF_PATH=/usr/local/pnetcdf-1.8.1_mpich-3.2 -DPIO_HDF5_LOGGING=On -DPIO_USE_MALLOC=On -DPIO_ENABLE_LOGGING=On .. && make VERBOSE=1 all tests check
     

    Note the optional CFLAGS=-g which allows the use of a debugger @@ -435,6 +446,20 @@ which causes the build commands to be made visible.

    Note also the -DPIO_ENABLE_LOGGING=On which is helpful for debugging but should probably not be used in production builds. +

  • Building PIO with autotools. + +

    To build the PIO library with autotools, clone the repo and use a command like this: + +

    +autoreconf -i && LD_LIBRARY_PATH=/usr/local/netcdf-4.4.1_mpich-3.2/lib CC=mpicc CFLAGS='-g' CPPFLAGS='-I/usr/local/netcdf-4.4.1_mpich-3.2/include/ -I/usr/local/pnetcdf-1.8.1_mpich-3.2/include' LDFLAGS='-L/usr/local/netcdf-4.4.1_mpich-3.2/lib -L/usr/local/pnetcdf-1.8.1_mpich-3.2/lib' ./configure && make check
    +
    + +

    To build with the address sanitizer for memory checking (debugging builds only!): + +

    +autoreconf -i && LD_LIBRARY_PATH=/usr/local/netcdf-4.4.1_mpich-3.2/lib CC=mpicc CFLAGS='-g -fsanitize=address -fno-omit-frame-pointer' CPPFLAGS='-I/usr/local/netcdf-4.4.1_mpich-3.2/include/ -I/usr/local/pnetcdf-1.8.1_mpich-3.2/include' LDFLAGS='-L/usr/local/netcdf-4.4.1_mpich-3.2/lib -L/usr/local/pnetcdf-1.8.1_mpich-3.2/lib' ./configure && make check
    + 
    +
  • Building and Running Performance Tests

    Download a decomp file from our =3 .ne \\$3 -.el .ne 3 -.IP "\\$1" \\$2 -.. -.de Vb \" Begin verbatim text -.ft CW -.nf -.ne \\$1 -.. -.de Ve \" End verbatim text -.ft R - -.fi -.. -.\" Set up some character translations and predefined strings. \*(-- will -.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left -.\" double quote, and \*(R" will give a right double quote. | will give a -.\" real vertical bar. \*(C+ will give a nicer C++. Capital omega is used -.\" to do unbreakable dashes and therefore won't be available. \*(C` and -.\" \*(C' expand to `' in nroff, nothing in troff, for use with C<> -.tr \(*W-|\(bv\*(Tr -.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' -.ie n \{\ -. ds -- \(*W- -. ds PI pi -. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch -. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch -. ds L" "" -. ds R" "" -. ds C` ` -. ds C' ' -'br\} -.el\{\ -. ds -- \|\(em\| -. ds PI \(*p -. ds L" `` -. ds R" '' -'br\} -.\" -.\" If the F register is turned on, we'll generate index entries on stderr -.\" for titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and -.\" index entries marked with X<> in POD. Of course, you'll have to process -.\" the output yourself in some meaningful fashion. -.if \nF \{\ -. de IX -. tm Index:\\$1\t\\n%\t"\\$2" -. . -. nr % 0 -. rr F -.\} -.\" -.\" For nroff, turn off justification. Always turn off hyphenation; it -.\" makes way too many mistakes in technical documents. -.hy 0 -.if n .na -.\" -.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). -.\" Fear. Run. Save yourself. No user-serviceable parts. -.bd B 3 -. \" fudge factors for nroff and troff -.if n \{\ -. ds #H 0 -. ds #V .8m -. ds #F .3m -. ds #[ \f1 -. ds #] \fP -.\} -.if t \{\ -. ds #H ((1u-(\\\\n(.fu%2u))*.13m) -. ds #V .6m -. ds #F 0 -. ds #[ \& -. ds #] \& -.\} -. \" simple accents for nroff and troff -.if n \{\ -. ds ' \& -. ds ` \& -. ds ^ \& -. ds , \& -. ds ~ ~ -. ds / -.\} -.if t \{\ -. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" -. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' -. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' -. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' -. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' -. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' -.\} -. \" troff and (daisy-wheel) nroff accents -.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' -.ds 8 \h'\*(#H'\(*b\h'-\*(#H' -.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] -.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' -.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' -.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] -.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] -.ds ae a\h'-(\w'a'u*4/10)'e -.ds Ae A\h'-(\w'A'u*4/10)'E -. \" corrections for vroff -.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' -.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' -. \" for low resolution devices (crt and lpr) -.if \n(.H>23 .if \n(.V>19 \ -\{\ -. ds : e -. ds 8 ss -. ds o a -. ds d- d\h'-1'\(ga -. ds D- D\h'-1'\(hy -. ds th \o'bp' -. ds Th \o'LP' -. ds ae ae -. ds Ae AE -.\} -.rm #[ #] #H #V #F C -.\" ====================================================================== -.\" -.IX Title "Lite 3" -.TH Lite 3 "perl v5.6.0" "2003-03-17" "User Contributed Perl Documentation" -.UC -.SH "NAME" -\&\s-1XML:\s0:Lite \- A lightweight \s-1XML\s0 parser for simple files -.SH "SYNOPSIS" -.IX Header "SYNOPSIS" -use \s-1XML:\s0:Lite; -my \f(CW$xml\fR = new \s-1XML:\s0:Lite( xml => 'a_file.xml' ); -.SH "DESCRIPTION" -.IX Header "DESCRIPTION" -\&\s-1XML:\s0:Lite is a lightweight \s-1XML\s0 parser, with basic element traversing -methods. It is entirely self-contained, pure Perl (i.e. \fInot\fR based on -expat). It provides useful methods for reading most \s-1XML\s0 files, including -traversing and finding elements, reading attributes and such. It is -designed to take advantage of Perl-isms (Attribute lists are returned as -hashes, rather than, say, lists of objects). It provides only methods -for reading a file, currently. -.SH "METHODS" -.IX Header "METHODS" -The following methods are available: -.Ip "my \f(CW$xml\fR = new \s-1XML:\s0:Lite( xml => \f(CW$source\fR[, ...] );" 4 -.IX Item "my $xml = new XML::Lite( xml => $source[, ...] );" -Creates a new \s-1XML:\s0:Lite object. The \s-1XML:\s0:Lite object acts as the document -object for the \f(CW$source\fR that is sent to it to parse. This means that you -create a new object for each document (or document sub-section). As the -objects are lightweight this should not be a performance consideration. -.Sp -The object constructor can take several named parameters. Parameter names -may begin with a '\-' (as in the example above) but are not required to. The -following parameters are recognized. -.Sp -.Vb 2 -\& xml The source XML to parse. This can be a filename, a scalar that -\& contains the document (or document fragment), or an IO handle. -.Ve -As a convenince, if only on parameter is given, it is assumed to be the source. -So you can use this, if you wish: -.Sp -.Vb 1 -\& my $xml = new XML::Lite( 'file.xml' ); -.Ve -.Ip "my \f(CW$elm\fR = \f(CW$xml\fR->\fIroot_element()\fR" 4 -.IX Item "my $elm = $xml->root_element()" -Returns a reference to an \s-1XML:\s0:Lite::Element object that represents -the root element of the document. -.Sp -Returns \f(CW\*(C`undef\*(C'\fR on errors. -.Ip "@list = \f(CW$xml\fR->elements_by_name( \f(CW$name\fR )" 4 -.IX Item "@list = $xml->elements_by_name( $name )" -Returns a list of all elements that match \f(CW\*(C`$name\*(C'\fR. -\&\f(CW\*(C`@list\*(C'\fR is a list of the XML::Lite::Element manpage objects -If called in a scalar context, this will return the -first element found that matches (it's more efficient -to call in a scalar context than assign the results -to a list of one scalar). -.Sp -If no matching elements are found then returns \f(CW\*(C`undef\*(C'\fR -in scalar context or an empty list in array context. -.SH "BUGS" -.IX Header "BUGS" -Lots. This 'parser' (Matt Sergeant takes umbrance to my us of that word) will handle some \s-1XML\s0 -documents, but not all. -.SH "VERSION" -.IX Header "VERSION" -0.14 -.SH "AUTHOR" -.IX Header "AUTHOR" -Jeremy Wadsack for Wadsack-Allen Digital Group (dgsupport@wadsack-allen.com) -.SH "COPYRIGHT" -.IX Header "COPYRIGHT" -Copyright 2001\-2003 Wadsack-Allen. All rights reserved. -This library is free software; you can redistribute it and/or -modify it under the same terms as Perl itself. diff --git a/src/externals/pio2/examples/basic/perl5lib/XML/man3/XML_Lite_Element.3 b/src/externals/pio2/examples/basic/perl5lib/XML/man3/XML_Lite_Element.3 deleted file mode 100644 index 5eaf684214b..00000000000 --- a/src/externals/pio2/examples/basic/perl5lib/XML/man3/XML_Lite_Element.3 +++ /dev/null @@ -1,206 +0,0 @@ -.\" Automatically generated by Pod::Man version 1.02 -.\" Tue Mar 18 09:37:36 2003 -.\" -.\" Standard preamble: -.\" ====================================================================== -.de Sh \" Subsection heading -.br -.if t .Sp -.ne 5 -.PP -\fB\\$1\fR -.PP -.. -.de Sp \" Vertical space (when we can't use .PP) -.if t .sp .5v -.if n .sp -.. -.de Ip \" List item -.br -.ie \\n(.$>=3 .ne \\$3 -.el .ne 3 -.IP "\\$1" \\$2 -.. -.de Vb \" Begin verbatim text -.ft CW -.nf -.ne \\$1 -.. -.de Ve \" End verbatim text -.ft R - -.fi -.. -.\" Set up some character translations and predefined strings. \*(-- will -.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left -.\" double quote, and \*(R" will give a right double quote. | will give a -.\" real vertical bar. \*(C+ will give a nicer C++. Capital omega is used -.\" to do unbreakable dashes and therefore won't be available. \*(C` and -.\" \*(C' expand to `' in nroff, nothing in troff, for use with C<> -.tr \(*W-|\(bv\*(Tr -.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' -.ie n \{\ -. ds -- \(*W- -. ds PI pi -. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch -. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch -. ds L" "" -. ds R" "" -. ds C` ` -. ds C' ' -'br\} -.el\{\ -. ds -- \|\(em\| -. ds PI \(*p -. ds L" `` -. ds R" '' -'br\} -.\" -.\" If the F register is turned on, we'll generate index entries on stderr -.\" for titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and -.\" index entries marked with X<> in POD. Of course, you'll have to process -.\" the output yourself in some meaningful fashion. -.if \nF \{\ -. de IX -. tm Index:\\$1\t\\n%\t"\\$2" -. . -. nr % 0 -. rr F -.\} -.\" -.\" For nroff, turn off justification. Always turn off hyphenation; it -.\" makes way too many mistakes in technical documents. -.hy 0 -.if n .na -.\" -.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). -.\" Fear. Run. Save yourself. No user-serviceable parts. -.bd B 3 -. \" fudge factors for nroff and troff -.if n \{\ -. ds #H 0 -. ds #V .8m -. ds #F .3m -. ds #[ \f1 -. ds #] \fP -.\} -.if t \{\ -. ds #H ((1u-(\\\\n(.fu%2u))*.13m) -. ds #V .6m -. ds #F 0 -. ds #[ \& -. ds #] \& -.\} -. \" simple accents for nroff and troff -.if n \{\ -. ds ' \& -. ds ` \& -. ds ^ \& -. ds , \& -. ds ~ ~ -. ds / -.\} -.if t \{\ -. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" -. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' -. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' -. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' -. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' -. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' -.\} -. \" troff and (daisy-wheel) nroff accents -.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' -.ds 8 \h'\*(#H'\(*b\h'-\*(#H' -.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] -.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' -.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' -.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] -.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] -.ds ae a\h'-(\w'a'u*4/10)'e -.ds Ae A\h'-(\w'A'u*4/10)'E -. \" corrections for vroff -.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' -.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' -. \" for low resolution devices (crt and lpr) -.if \n(.H>23 .if \n(.V>19 \ -\{\ -. ds : e -. ds 8 ss -. ds o a -. ds d- d\h'-1'\(ga -. ds D- D\h'-1'\(hy -. ds th \o'bp' -. ds Th \o'LP' -. ds ae ae -. ds Ae AE -.\} -.rm #[ #] #H #V #F C -.\" ====================================================================== -.\" -.IX Title "Lite::Element 3" -.TH Lite::Element 3 "perl v5.6.0" "2003-01-31" "User Contributed Perl Documentation" -.UC -.SH "NAME" -\&\s-1XML:\s0:Lite::Element \- A class representing an \s-1XML\s0 element in an \s-1XML:\s0:Lite -document -.SH "SYNOPSIS" -.IX Header "SYNOPSIS" -use \s-1XML:\s0:Lite; -my \f(CW$xml\fR = new \s-1XML:\s0:Lite( \-xml => 'a_file.xml' ); -my \f(CW$elm\fR = \f(CW$xml\fR->elements_by_name( 'element_name' ); -print \f(CW$elm\fR->get_attribute( 'attribute_name' ); -.SH "DESCRIPTION" -.IX Header "DESCRIPTION" -\&\f(CW\*(C`XML::Lite::Element\*(C'\fR objects contain rudimentary methods for querying \s-1XML\s0 -elements in an \s-1XML\s0 document as parsed by \s-1XML:\s0:Lite. Usually these objects -are returned by method calls in \s-1XML:\s0:Lite. -.SH "METHODS" -.IX Header "METHODS" -The following methods are available. All methods like 'get_name' can be -abbeviated as 'name.' -.Ip "my \f(CW$element\fR = new \s-1XML:\s0:Lite::Element( \f(CW$owner_document\fR, \e@pointers );" 4 -.IX Item "my $element = new XML::Lite::Element( $owner_document, @pointers );" -Creates a new \s-1XML:\s0:Lite::Element object from the \s-1XML:\s0:Lite object, \f(CW\*(C`$owner_document\*(C'\fR. -.Sp -Currently, you must not call this manually. You can create an object with one of -the 'factory' methods in \s-1XML:\s0:Lite, such as \f(CW\*(C`element_by_name\*(C'\fR or \f(CW\*(C`root_element\*(C'\fR -or with one of the \s-1XML:\s0:Lite::Element 'factory' methods below, like \f(CW\*(C`get_children\*(C'\fR. -.Ip "my \f(CW$content\fR = \f(CW$element\fR->\fIget_content()\fR" 4 -.IX Item "my $content = $element->get_content()" -Returns the content of the \s-1XML\s0 element. This may include other \s-1XML\s0 tags. The -entire content is returned as a scalar. -.Ip "my \f(CW%attributes\fR = \f(CW$element\fR->\fIget_attributes()\fR" 4 -.IX Item "my %attributes = $element->get_attributes()" -Returns a hash of name \- value pairs for the attributes in this element. -.Ip "my \f(CW$value\fR = \f(CW$element\fR->get_attribute( \f(CW$name\fR )" 4 -.IX Item "my $value = $element->get_attribute( $name )" -Returns the value of the named attribute for this element. -.Ip "my \f(CW$name\fR = \f(CW$element\fR->\fIget_name()\fR" 4 -.IX Item "my $name = $element->get_name()" -Returns the name of the element tag -.Ip "my \f(CW@children\fR = \f(CW$element\fR->\fIget_children()\fR" 4 -.IX Item "my @children = $element->get_children()" -Returns a list of \s-1XML:\s0:Lite::Element objects for each element contained -within the current element. This does not return any text or \s-1CDATA\s0 in -the content of this element. You can parse that through the the get_content manpage -method. -.Sp -If no child elements exist then an empty list is returned. -.Ip "my \f(CW$text\fR = \f(CW$element\fR->\fIget_text()\fR" 4 -.IX Item "my $text = $element->get_text()" -Returns a scalar of the text within an element sans children elements. -This effectively takes the content of the element and strips all \s-1XML\s0 -elements. All text is concatenated into a single string. White space -is preserved. \s-1CDATA\s0 elements are included without the ; - open(F2,$nfile); - my @file2 = ; - foreach my $line (@file1){ - my $nline = shift (@file2); - if($line =~ /Obtained/){ - print "Files $file and $nfile are the same\n"; - $rmfile=1; - } - next if($line == $nline); - last; - } - close(F1); - close(F2); - unlink($nfile) if ($rmfile==1); - } +use Getopt::Long; + +my $rundir=""; +my $exe=""; +my $nargs = 0; +my $verbose = 0; + +# Reg expression that match the pio decomposition file names +my $PIO_DECOMP_FNAMES = "^piodecomp"; +my $BEGIN_STACK_TRACE = "Obtained"; + +# Remove duplicate decomposition files in "dirname" +sub rem_dup_decomp_files +{ + my($dirname) = @_; + # Find files in current directory that are + # named *piodecomp* - these are the pio + # decomposition files + opendir(F,$dirname); + #my @decompfiles = grep(/^piodecomp/,readdir(F)); + my @decompfile_info_tmp = map{ {FNAME=>$_, SIZE=>-s $_, IS_DUP=>0} } grep(/${PIO_DECOMP_FNAMES}/,readdir(F)); + closedir(F); + my @decompfile_info = sort { $a->{SIZE} <=> $b->{SIZE} } @decompfile_info_tmp; + my $ndecompfile_info = @decompfile_info; + + #for(my $i=0; $i<$ndecompfile_info; $i++){ + # print "File : $decompfile_info[$i]->{FNAME} , size = $decompfile_info[$i]->{SIZE}\n"; + #} + + my $rmfile=0; + # Compare the decomposition files to find + # duplicates - and delete the dups + for(my $i=0; $i<$ndecompfile_info; $i++){ + my $file = $decompfile_info[$i]->{FNAME}; + my $fsize = $decompfile_info[$i]->{SIZE}; + next if($decompfile_info[$i]->{IS_DUP}); + for(my $j=$i+1;$j<$ndecompfile_info;$j++){ + my $nfile = $decompfile_info[$j]->{FNAME}; + my $f2size = $decompfile_info[$j]->{SIZE}; + next if($decompfile_info[$j]->{IS_DUP}); + last if($fsize != $f2size); + if($verbose){ + print "Comparing $file, size=$fsize, $nfile, size=$f2size\n"; + } + if($fsize == $f2size){ + open(F1,$file); + my @file1 = ; + open(F2,$nfile); + my @file2 = ; + $rmfile = 1; + foreach my $line (@file1){ + my $nline = shift (@file2); + # Ignore stack traces when comparing files + # The stack traces start with a line containing + # "Obtained" + # Also, stack trace is the last line being + # compared + if(($line =~ /${BEGIN_STACK_TRACE}/) + && ($nline =~ /${BEGIN_STACK_TRACE}/)){ + if($verbose){ + print "Files $file and $nfile are the same (ignoring stack traces)\n"; + } + last; + } + next if($line eq $nline); + # Files are different, don't remove + $rmfile = 0; + last; + } + close(F1); + close(F2); + if($rmfile == 1){ + $decompfile_info[$j]->{IS_DUP} = 1; + } + } + } + } + for(my $i=0; $i<$ndecompfile_info; $i++){ + if($decompfile_info[$i]->{IS_DUP}){ + unlink($decompfile_info[$i]->{FNAME}); + } } } -opendir(F,$rundir); -my @decompfiles = grep(/^piodecomp/,readdir(F)); -closedir(F); -for(my $i=0; $i<= $#decompfiles; $i++){ - my $file = $decompfiles[$i]; - open(F1,$file); - my @file1 = ; - close(F1); - open(F1,">$file"); - foreach(@file1){ - if(/\[(.*)\]/){ - my $decode = `addr2line -e ../bld/cesm.exe $1`; - print F1 "$decode\n"; - print "$decode\n"; - }else{ - print F1 $_; - } - + +# Decode the stack traces in the pio decomposition files +sub decode_stack_traces +{ + # dirname => Directory that contains decomp files + # exe => executable (including path) that generated + # the decomposition files + my($dirname, $exe) = @_; + # Decode/Translate the stack trace + opendir(F,$dirname); + my @decompfiles = grep(/${PIO_DECOMP_FNAMES}/,readdir(F)); + closedir(F); + my $ndecompfiles = @decompfiles; + for(my $i=0; $i< $ndecompfiles; $i++){ + my $file = $decompfiles[$i]; + open(F1,$file); + my @file1 = ; + close(F1); + open(F1,">$file"); + foreach(@file1){ + # Find stack addresses in the file and use + # addrline to translate/decode the filenames and + # line numbers from it + if(/\[(.*)\]/){ + my $decode = `addr2line -e $exe $1`; + print F1 "$decode\n"; + print "$decode\n"; + }else{ + print F1 $_; + } + } + close(F1); } - close(F1); +} +sub print_usage_and_exit() +{ + print "\nUsage :\n./prune_decomps.pl --decomp-prune-dir= \n"; + print "\tOR\n"; + print "./prune_decomps.pl \n"; + print "The above commands can be used to remove duplicate decomposition\n"; + print "files in \n"; + print "Available options : \n"; + print "\t--decomp-prune-dir : Directory that contains the decomp files to be pruned\n"; + print "\t--exe : Executable that generated the decompositions \n"; + print "\t--verbose : Verbose debug output\n"; + exit; } + +# Main program + +# Read input args +GetOptions( + "decomp-prune-dir=s" => \$rundir, + "exe=s" => \$exe, + "verbose" => \$verbose +); + +$nargs = @ARGV; + +if($rundir eq ""){ + $rundir = shift; + if($rundir eq ""){ + &print_usage_and_exit(); + } +} +if($verbose){ print "Removing duplicate decomposition files from : \"", $rundir, "\"\n"; } +&rem_dup_decomp_files($rundir); + +if($exe ne ""){ + if($verbose){ print "Decoding stack traces for decomposition files from : \"", $rundir, "\"\n"; } + &decode_stack_traces($rundir, $exe); +} + diff --git a/src/externals/pio2/src/Makefile.am b/src/externals/pio2/src/Makefile.am new file mode 100644 index 00000000000..f4f0e71e6fb --- /dev/null +++ b/src/externals/pio2/src/Makefile.am @@ -0,0 +1 @@ +SUBDIRS = clib diff --git a/src/externals/pio2/src/clib/CMakeLists.txt b/src/externals/pio2/src/clib/CMakeLists.txt index 7fac8891a8e..1a0d7e17773 100644 --- a/src/externals/pio2/src/clib/CMakeLists.txt +++ b/src/externals/pio2/src/clib/CMakeLists.txt @@ -13,6 +13,7 @@ add_library (pioc topology.c pio_file.c pioc_support.c pio_lists.c # set up include-directories include_directories( + "${CMAKE_BINARY_DIR}" "${PROJECT_SOURCE_DIR}" # to find foo/foo.h "${PROJECT_BINARY_DIR}") # to find foo/config.h diff --git a/src/externals/pio2/src/clib/Makefile.am b/src/externals/pio2/src/clib/Makefile.am new file mode 100644 index 00000000000..756a004c35d --- /dev/null +++ b/src/externals/pio2/src/clib/Makefile.am @@ -0,0 +1,14 @@ +## This is the automake file to build the PIO C library. +# Ed Hartnett 8/19/17 + +# The library we are building. +lib_LTLIBRARIES = libpio.la + +# The header file. +include_HEADERS = pio.h + +# THe soure files. +libpio_la_SOURCES = bget.c pioc_sc.c pio_darray.c pio_file.c \ +pio_getput_int.c pio_msg.c pio_nc.c pio_rearrange.c pio_varm.c \ +pioc.c pioc_support.c pio_darray_int.c pio_get_nc.c pio_lists.c \ +pio_nc4.c pio_put_nc.c pio_spmd.c pio_internal.h bget.h diff --git a/src/externals/pio2/src/clib/dtypes.h b/src/externals/pio2/src/clib/dtypes.h deleted file mode 100644 index 9076cf0f75b..00000000000 --- a/src/externals/pio2/src/clib/dtypes.h +++ /dev/null @@ -1,5 +0,0 @@ -#define TYPEDOUBLE 102 -#define TYPEINT 103 -#define TYPETEXT 100 -#define TYPELONG 104 -#define TYPEREAL 101 diff --git a/src/externals/pio2/src/clib/pio.h b/src/externals/pio2/src/clib/pio.h index 9dace3782c9..02a51a56de5 100644 --- a/src/externals/pio2/src/clib/pio.h +++ b/src/externals/pio2/src/clib/pio.h @@ -50,7 +50,7 @@ /** Used in the decomposition netCDF file. */ /* Holds the version of the decomposition file. */ -#define DECOMP_VERSION_ATT_NAME "version" +#define DECOMP_VERSION_ATT_NAME "PIO_library_version" /* Holds the maximum length of any task map. */ #define DECOMP_MAX_MAPLEN_ATT_NAME "max_maplen" @@ -105,12 +105,16 @@ */ typedef struct var_desc_t { - /** The unlimited dimension in the netCDF file (typically the time - * dimension). -1 if there is no unlimited dimension. */ - int record; + /* Variable ID. */ + int varid; - /** Number of dimensions for this variable. */ - int ndims; + /* Non-zero if this is a record var (i.e. uses unlimited + * dimension). */ + int rec_var; + + /** The record number to be written. Ignored if there is no + * unlimited dimension. */ + int record; /** ID of each outstanding pnetcdf request for this variable. */ int *request; @@ -121,12 +125,6 @@ typedef struct var_desc_t /* Holds the fill value of this var. */ void *fillvalue; - /* The PIO data type (PIO_INT, PIO_FLOAT, etc.) */ - int pio_type; - - /* The size of the data type (2 for PIO_SHORT, 4 for PIO_INT, etc.) */ - PIO_Offset type_size; - /** Non-zero if fill mode is turned on for this var. */ int use_fill; @@ -134,8 +132,20 @@ typedef struct var_desc_t * missing sections of data when using the subset rearranger. */ void *fillbuf; - /** Data buffer for this variable. */ - void *iobuf; + /** The PIO data type. */ + int pio_type; + + /** The size, in bytes, of the PIO data type. */ + int pio_type_size; + + /** The MPI type of the data. */ + MPI_Datatype mpi_type; + + /** The size in bytes of a datum of MPI type mpitype. */ + int mpi_type_size; + + /** Pointer to next var in list. */ + struct var_desc_t *next; } var_desc_t; /** @@ -287,11 +297,17 @@ typedef struct io_desc_t /** The maximum number of bytes of this iodesc before flushing. */ int maxbytes; + /** The PIO type of the data. */ + int piotype; + + /** The size of one element of the piotype. */ + int piotype_size; + /** The MPI type of the data. */ - MPI_Datatype basetype; + MPI_Datatype mpitype; - /** The size in bytes of a datum of MPI type basetype. */ - int basetype_size; + /** The size in bytes of a datum of MPI type mpitype. */ + int mpitype_size; /** Length of the iobuffer on this task for a single field on the * IO node. The arrays from compute nodes gathered and rearranged @@ -391,13 +407,6 @@ typedef struct iosystem_desc_t * non-async) or the union (for async) communicator. */ MPI_Comm my_comm; - /** This MPI group contains the processors involved in - * computation. */ - MPI_Group compgroup; - - /** This MPI group contains the processors involved in I/O. */ - MPI_Group iogroup; - /** The number of tasks in the IO communicator. */ int num_iotasks; @@ -531,19 +540,28 @@ typedef struct file_desc_t /** The ncid that will be returned to the user. */ int pio_ncid; - /** The PIO_TYPE value that was used to open this file. */ + /** The IOTYPE value that was used to open this file. */ int iotype; /** List of variables in this file. */ - struct var_desc_t varlist[PIO_MAX_VARS]; + struct var_desc_t *varlist; + + /** Number of variables. */ + int nvars; - /** ??? */ - int mode; + /** True if file can be written to. */ + int writable; /** The wmulti_buffer is used to aggregate multiple variables with * the same communication pattern prior to a write. */ struct wmulti_buffer buffer; + /** Data buffer for this file. */ + void *iobuf; + + /** PIO data type. */ + int pio_type; + /** Pointer to the next file_desc_t in the list of open files. */ struct file_desc_t *next; @@ -760,7 +778,7 @@ extern "C" { /* Free resources associated with a decomposition. */ int PIOc_freedecomp(int iosysid, int ioid); - + int PIOc_readmap(const char *file, int *ndims, int **gdims, PIO_Offset *fmaplen, PIO_Offset **map, MPI_Comm comm); int PIOc_readmap_from_f90(const char *file,int *ndims, int **gdims, PIO_Offset *maplen, @@ -785,7 +803,7 @@ extern "C" { int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, int component_count, int *num_procs_per_comp, int **proc_list, MPI_Comm *io_comm, MPI_Comm *comp_comm, int rearranger, int *iosysidp); - + int PIOc_Init_Intercomm(int component_count, MPI_Comm peer_comm, MPI_Comm *comp_comms, MPI_Comm io_comm, int *iosysidp); int PIOc_get_numiotasks(int iosysid, int *numiotasks); @@ -826,6 +844,7 @@ extern "C" { int PIOc_createfile(int iosysid, int *ncidp, int *iotype, const char *fname, int mode); int PIOc_create(int iosysid, const char *path, int cmode, int *ncidp); int PIOc_openfile(int iosysid, int *ncidp, int *iotype, const char *fname, int mode); + int PIOc_openfile2(int iosysid, int *ncidp, int *iotype, const char *fname, int mode); int PIOc_open(int iosysid, const char *path, int mode, int *ncidp); int PIOc_closefile(int ncid); int PIOc_inq_format(int ncid, int *formatp); @@ -834,6 +853,7 @@ extern "C" { int PIOc_inq_nvars(int ncid, int *nvarsp); int PIOc_inq_natts(int ncid, int *ngattsp); int PIOc_inq_unlimdim(int ncid, int *unlimdimidp); + int PIOc_inq_unlimdims(int ncid, int *nunlimdimsp, int *unlimdimidsp); int PIOc_inq_type(int ncid, nc_type xtype, char *name, PIO_Offset *sizep); int PIOc_set_blocksize(int newblocksize); int PIOc_File_is_Open(int ncid); diff --git a/src/externals/pio2/src/clib/pio_darray.c b/src/externals/pio2/src/clib/pio_darray.c index 1d6f15c7d30..a4fc6e558f7 100644 --- a/src/externals/pio2/src/clib/pio_darray.c +++ b/src/externals/pio2/src/clib/pio_darray.c @@ -13,7 +13,7 @@ #include /* 10MB default limit. */ -PIO_Offset pio_buffer_size_limit = 10485760; +PIO_Offset pio_buffer_size_limit = PIO_BUFFER_SIZE; /* Global buffer pool pointer. */ void *CN_bpool = NULL; @@ -21,6 +21,11 @@ void *CN_bpool = NULL; /* Maximum buffer usage. */ PIO_Offset maxusage = 0; +/* For write_darray_multi_serial() and write_darray_multi_par() to + * indicate whether fill or data are being written. */ +#define DARRAY_FILL 1 +#define DARRAY_DATA 0 + /** * Set the PIO IO node data buffer size limit. * @@ -29,6 +34,7 @@ PIO_Offset maxusage = 0; * * @param limit the size of the buffer on the IO nodes * @return The previous limit setting. + * @author Jim Edwards */ PIO_Offset PIOc_set_buffer_size_limit(PIO_Offset limit) { @@ -93,6 +99,7 @@ PIO_Offset PIOc_set_buffer_size_limit(PIO_Offset limit) * @param flushtodisk non-zero to cause buffers to be flushed to disk. * @return 0 for success, error code otherwise. * @ingroup PIO_write_darray + * @author Jim Edwards, Ed Hartnett */ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, PIO_Offset arraylen, void *array, const int *frame, @@ -101,8 +108,10 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, iosystem_desc_t *ios; /* Pointer to io system information. */ file_desc_t *file; /* Pointer to file information. */ io_desc_t *iodesc; /* Pointer to IO description information. */ - int rlen; /* total data buffer size. */ - var_desc_t *vdesc0; /* pointer to var_desc structure for each var. */ + int rlen; /* Total data buffer size. */ + var_desc_t *vdesc0; /* First entry in array of var_desc structure for each var. */ + int fndims; /* Number of dims in the var in the file. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function calls. */ int ierr; /* Return code. */ /* Get the file info. */ @@ -113,16 +122,13 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, /* Check inputs. */ if (nvars <= 0 || !varids) return pio_err(ios, file, PIO_EINVAL, __FILE__, __LINE__); - for (int v = 0; v < nvars; v++) - if (varids[v] < 0 || varids[v] > PIO_MAX_VARS) - return pio_err(ios, file, PIO_EINVAL, __FILE__, __LINE__); LOG((1, "PIOc_write_darray_multi ncid = %d ioid = %d nvars = %d arraylen = %ld " "flushtodisk = %d", ncid, ioid, nvars, arraylen, flushtodisk)); /* Check that we can write to this file. */ - if (!(file->mode & PIO_WRITE)) + if (!file->writable) return pio_err(ios, file, PIO_EPERM, __FILE__, __LINE__); /* Get iodesc. */ @@ -131,14 +137,94 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, pioassert(iodesc->rearranger == PIO_REARR_BOX || iodesc->rearranger == PIO_REARR_SUBSET, "unknown rearranger", __FILE__, __LINE__); + /* Check the types of all the vars. They must match the type of + * the decomposition. */ + for (int v = 0; v < nvars; v++) + { + var_desc_t *vdesc; + if ((ierr = get_var_desc(varids[v], &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); + if (vdesc->pio_type != iodesc->piotype) + return pio_err(ios, file, PIO_EINVAL, __FILE__, __LINE__); + } + /* Get a pointer to the variable info for the first variable. */ - vdesc0 = &file->varlist[varids[0]]; + if ((ierr = get_var_desc(varids[0], &file->varlist, &vdesc0))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); + + /* Run these on all tasks if async is not in use, but only on + * non-IO tasks if async is in use. */ + if (!ios->async || !ios->ioproc) + { + /* Get the number of dims for this var. */ + LOG((3, "about to call PIOc_inq_varndims varids[0] = %d", varids[0])); + if ((ierr = PIOc_inq_varndims(file->pio_ncid, varids[0], &fndims))) + return check_netcdf(file, ierr, __FILE__, __LINE__); + LOG((3, "called PIOc_inq_varndims varids[0] = %d fndims = %d", varids[0], fndims)); + } + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_WRITEDARRAYMULTI; + char frame_present = frame ? true : false; /* Is frame non-NULL? */ + char fillvalue_present = fillvalue ? true : false; /* Is fillvalue non-NULL? */ + int flushtodisk_int = flushtodisk; /* Need this to be int not boolean. */ + + if (ios->compmaster == MPI_ROOT) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + /* Send the function parameters and associated informaiton + * to the msg handler. */ + if (!mpierr) + mpierr = MPI_Bcast(&ncid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&nvars, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast((void *)varids, nvars, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&ioid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&arraylen, 1, MPI_OFFSET, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(array, arraylen * iodesc->piotype_size, MPI_CHAR, ios->compmaster, + ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&frame_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr && frame_present) + mpierr = MPI_Bcast((void *)frame, nvars, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&fillvalue_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr && fillvalue_present) + mpierr = MPI_Bcast((void *)fillvalue, nvars * iodesc->piotype_size, MPI_CHAR, + ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&flushtodisk_int, 1, MPI_INT, ios->compmaster, ios->intercomm); + LOG((2, "PIOc_write_darray_multi file->pio_ncid = %d nvars = %d ioid = %d arraylen = %d " + "frame_present = %d fillvalue_present = %d flushtodisk = %d", file->pio_ncid, nvars, + ioid, arraylen, frame_present, fillvalue_present, flushtodisk)); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + /* Share results known only on computation tasks with IO tasks. */ + if ((mpierr = MPI_Bcast(&fndims, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + LOG((3, "shared fndims = %d", fndims)); + } /* if the buffer is already in use in pnetcdf we need to flush first */ - if (file->iotype == PIO_IOTYPE_PNETCDF && vdesc0->iobuf) - flush_output_buffer(file, 1, 0); + if (file->iotype == PIO_IOTYPE_PNETCDF && file->iobuf) + if ((ierr = flush_output_buffer(file, 1, 0))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); - pioassert(!vdesc0->iobuf, "buffer overwrite",__FILE__, __LINE__); + pioassert(!file->iobuf, "buffer overwrite",__FILE__, __LINE__); /* Determine total size of aggregated data (all vars/records). * For netcdf serial writes we collect the data on io nodes and @@ -154,30 +240,33 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, if (rlen > 0) { /* Allocate memory for the buffer for all vars/records. */ - if (!(vdesc0->iobuf = bget(iodesc->basetype_size * (size_t)rlen))) + if (!(file->iobuf = bget(iodesc->mpitype_size * (size_t)rlen))) return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); - LOG((3, "allocated %lld bytes for variable buffer", (size_t)rlen * iodesc->basetype_size)); + LOG((3, "allocated %lld bytes for variable buffer", (size_t)rlen * iodesc->mpitype_size)); /* If fill values are desired, and we're using the BOX * rearranger, insert fill values. */ if (iodesc->needsfill && iodesc->rearranger == PIO_REARR_BOX) + { + LOG((3, "inerting fill values iodesc->maxiobuflen = %d", iodesc->maxiobuflen)); for (int nv = 0; nv < nvars; nv++) for (int i = 0; i < iodesc->maxiobuflen; i++) - memcpy(&((char *)vdesc0->iobuf)[iodesc->basetype_size * (i + nv * iodesc->maxiobuflen)], - &((char *)fillvalue)[nv * iodesc->basetype_size], iodesc->basetype_size); + memcpy(&((char *)file->iobuf)[iodesc->mpitype_size * (i + nv * iodesc->maxiobuflen)], + &((char *)fillvalue)[nv * iodesc->mpitype_size], iodesc->mpitype_size); + } } else if (file->iotype == PIO_IOTYPE_PNETCDF && ios->ioproc) { /* this assures that iobuf is allocated on all iotasks thus - assuring that the flush_output_buffer call above is called - collectively (from all iotasks) */ - if (!(vdesc0->iobuf = bget(1))) + assuring that the flush_output_buffer call above is called + collectively (from all iotasks) */ + if (!(file->iobuf = bget(1))) return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); LOG((3, "allocated token for variable buffer")); } /* Move data from compute to IO tasks. */ - if ((ierr = rearrange_comp2io(ios, iodesc, array, vdesc0->iobuf, nvars))) + if ((ierr = rearrange_comp2io(ios, iodesc, array, file->iobuf, nvars))) return pio_err(ios, file, ierr, __FILE__, __LINE__); /* Write the darray based on the iotype. */ @@ -186,14 +275,14 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, { case PIO_IOTYPE_NETCDF4P: case PIO_IOTYPE_PNETCDF: - if ((ierr = pio_write_darray_multi_nc(file, nvars, varids, iodesc->ndims, iodesc->basetype, - iodesc->maxregions, iodesc->firstregion, iodesc->llen, - iodesc->num_aiotasks, vdesc0->iobuf, frame))) + if ((ierr = write_darray_multi_par(file, nvars, fndims, varids, iodesc, + DARRAY_DATA, frame))) return pio_err(ios, file, ierr, __FILE__, __LINE__); break; case PIO_IOTYPE_NETCDF4C: case PIO_IOTYPE_NETCDF: - if ((ierr = write_darray_multi_serial(file, nvars, varids, iodesc, 0, frame))) + if ((ierr = write_darray_multi_serial(file, nvars, fndims, varids, iodesc, + DARRAY_DATA, frame))) return pio_err(ios, file, ierr, __FILE__, __LINE__); break; @@ -205,11 +294,11 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, if (file->iotype != PIO_IOTYPE_PNETCDF) { /* Release resources. */ - if (vdesc0->iobuf) + if (file->iobuf) { LOG((3,"freeing variable buffer in pio_darray")); - brel(vdesc0->iobuf); - vdesc0->iobuf = NULL; + brel(file->iobuf); + file->iobuf = NULL; } } @@ -232,32 +321,31 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, /* Get a buffer. */ if (ios->io_rank == 0) - vdesc0->fillbuf = bget(iodesc->maxholegridsize * iodesc->basetype_size * nvars); + vdesc0->fillbuf = bget(iodesc->maxholegridsize * iodesc->mpitype_size * nvars); else if (iodesc->holegridsize > 0) - vdesc0->fillbuf = bget(iodesc->holegridsize * iodesc->basetype_size * nvars); + vdesc0->fillbuf = bget(iodesc->holegridsize * iodesc->mpitype_size * nvars); /* copying the fill value into the data buffer for the box * rearranger. This will be overwritten with data where * provided. */ for (int nv = 0; nv < nvars; nv++) for (int i = 0; i < iodesc->holegridsize; i++) - memcpy(&((char *)vdesc0->fillbuf)[iodesc->basetype_size * (i + nv * iodesc->holegridsize)], - &((char *)fillvalue)[iodesc->basetype_size * nv], iodesc->basetype_size); + memcpy(&((char *)vdesc0->fillbuf)[iodesc->mpitype_size * (i + nv * iodesc->holegridsize)], + &((char *)fillvalue)[iodesc->mpitype_size * nv], iodesc->mpitype_size); /* Write the darray based on the iotype. */ switch (file->iotype) { case PIO_IOTYPE_PNETCDF: case PIO_IOTYPE_NETCDF4P: - if ((ierr = pio_write_darray_multi_nc(file, nvars, varids, - iodesc->ndims, iodesc->basetype, iodesc->maxfillregions, - iodesc->fillregion, iodesc->holegridsize, - iodesc->num_aiotasks, vdesc0->fillbuf, frame))) + if ((ierr = write_darray_multi_par(file, nvars, fndims, varids, iodesc, + DARRAY_FILL, frame))) return pio_err(ios, file, ierr, __FILE__, __LINE__); break; case PIO_IOTYPE_NETCDF4C: case PIO_IOTYPE_NETCDF: - if ((ierr = write_darray_multi_serial(file, nvars, varids, iodesc, 1, frame))) + if ((ierr = write_darray_multi_serial(file, nvars, fndims, varids, iodesc, + DARRAY_FILL, frame))) return pio_err(ios, file, ierr, __FILE__, __LINE__); break; default: @@ -287,38 +375,41 @@ int PIOc_write_darray_multi(int ncid, const int *varids, int ioid, int nvars, /** * Find the fillvalue that should be used for a variable. * - * @param file Info about file we are writing to. + * @param file Info about file we are writing to. * @param varid the variable ID. * @param vdesc pointer to var_desc_t info for this var. * @returns 0 for success, non-zero error code for failure. * @ingroup PIO_write_darray + * @author Ed Hartnett */ int find_var_fillvalue(file_desc_t *file, int varid, var_desc_t *vdesc) { - iosystem_desc_t *ios; /* Pointer to io system information. */ + iosystem_desc_t *ios; /* Pointer to io system information. */ + int pio_type; + PIO_Offset type_size; int no_fill; int ierr; /* Check inputs. */ pioassert(file && file->iosystem && vdesc, "invalid input", __FILE__, __LINE__); ios = file->iosystem; - + LOG((3, "find_var_fillvalue file->pio_ncid = %d varid = %d", file->pio_ncid, varid)); - + /* Find out PIO data type of var. */ - if ((ierr = PIOc_inq_vartype(file->pio_ncid, varid, &vdesc->pio_type))) + if ((ierr = PIOc_inq_vartype(file->pio_ncid, varid, &pio_type))) return pio_err(ios, NULL, ierr, __FILE__, __LINE__); - + /* Find out length of type. */ - if ((ierr = PIOc_inq_type(file->pio_ncid, vdesc->pio_type, NULL, &vdesc->type_size))) + if ((ierr = PIOc_inq_type(file->pio_ncid, pio_type, NULL, &type_size))) return pio_err(ios, NULL, ierr, __FILE__, __LINE__); LOG((3, "getting fill value for varid = %d pio_type = %d type_size = %d", - varid, vdesc->pio_type, vdesc->type_size)); - + varid, pio_type, type_size)); + /* Allocate storage for the fill value. */ - if (!(vdesc->fillvalue = malloc(vdesc->type_size))) + if (!(vdesc->fillvalue = malloc(type_size))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - + /* Get the fill value. */ if ((ierr = PIOc_inq_var_fill(file->pio_ncid, varid, &no_fill, vdesc->fillvalue))) return pio_err(ios, NULL, ierr, __FILE__, __LINE__); @@ -374,6 +465,7 @@ int find_var_fillvalue(file_desc_t *file, int varid, var_desc_t *vdesc) * data. * @returns 0 for success, non-zero error code for failure. * @ingroup PIO_write_darray + * @author Jim Edwards, Ed Hartnett */ int PIOc_write_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void *array, void *fillvalue) @@ -383,14 +475,16 @@ int PIOc_write_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void * io_desc_t *iodesc; /* The IO description. */ var_desc_t *vdesc; /* Info about the var being written. */ void *bufptr; /* A data buffer. */ - MPI_Datatype vtype; /* The MPI type of the variable. */ wmulti_buffer *wmb; /* The write multi buffer for one or more vars. */ - int recordvar; /* Non-zero if this is a record variable. */ int needsflush = 0; /* True if we need to flush buffer. */ +#if PIO_USE_MALLOC + void *realloc_data = NULL; +#else bufsize totfree; /* Amount of free space in the buffer. */ bufsize maxfree; /* Max amount of free space in buffer. */ +#endif int mpierr = MPI_SUCCESS; /* Return code from MPI functions. */ - int ierr = PIO_NOERR; /* Return code. */ + int ierr = PIO_NOERR; /* Return code. */ LOG((1, "PIOc_write_darray ncid = %d varid = %d ioid = %d arraylen = %d", ncid, varid, ioid, arraylen)); @@ -401,7 +495,7 @@ int PIOc_write_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void * ios = file->iosystem; /* Can we write to this file? */ - if (!(file->mode & PIO_WRITE)) + if (!file->writable) return pio_err(ios, file, PIO_EPERM, __FILE__, __LINE__); /* Get decomposition information. */ @@ -419,37 +513,42 @@ int PIOc_write_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void * arraylen, iodesc->ndof)); /* Get var description. */ - vdesc = &(file->varlist[varid]); - LOG((2, "vdesc record %d ndims %d nreqs %d", vdesc->record, vdesc->ndims, - vdesc->nreqs)); + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); + + /* If the type of the var doesn't match the type of the + * decomposition, return an error. */ + if (iodesc->piotype != vdesc->pio_type) + return pio_err(ios, file, PIO_EINVAL, __FILE__, __LINE__); + pioassert(iodesc->mpitype_size == vdesc->mpi_type_size, "wrong mpi info", + __FILE__, __LINE__); /* If we don't know the fill value for this var, get it. */ if (!vdesc->fillvalue) if ((ierr = find_var_fillvalue(file, varid, vdesc))) - return pio_err(ios, file, PIO_EBADID, __FILE__, __LINE__); - - /* Is this a record variable? The user must set the vdesc->record - * value by calling PIOc_setframe() before calling this - * function. */ - recordvar = vdesc->record >= 0 ? 1 : 0; - LOG((3, "recordvar = %d", recordvar)); + return pio_err(ios, file, PIO_EBADID, __FILE__, __LINE__); + /* Check that if the user passed a fill value, it is correct. */ + if (fillvalue) + if (memcmp(fillvalue, vdesc->fillvalue, vdesc->pio_type_size)) + return pio_err(ios, file, PIO_EINVAL, __FILE__, __LINE__); + /* Move to end of list or the entry that matches this ioid. */ for (wmb = &file->buffer; wmb->next; wmb = wmb->next) - if (wmb->ioid == ioid && wmb->recordvar == recordvar) + if (wmb->ioid == ioid && wmb->recordvar == vdesc->rec_var) break; + LOG((3, "wmb->ioid = %d wmb->recordvar = %d", wmb->ioid, wmb->recordvar)); /* If we did not find an existing wmb entry, create a new wmb. */ - if (wmb->ioid != ioid || wmb->recordvar != recordvar) + if (wmb->ioid != ioid || wmb->recordvar != vdesc->rec_var) { /* Allocate a buffer. */ if (!(wmb->next = bget((bufsize)sizeof(wmulti_buffer)))) return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); - LOG((3, "allocated multi-buffer")); /* Set pointer to newly allocated buffer and initialize.*/ wmb = wmb->next; - wmb->recordvar = recordvar; + wmb->recordvar = vdesc->rec_var; wmb->next = NULL; wmb->ioid = ioid; wmb->num_arrays = 0; @@ -459,16 +558,36 @@ int PIOc_write_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void * wmb->frame = NULL; wmb->fillvalue = NULL; } - LOG((2, "wmb->num_arrays = %d arraylen = %d iodesc->basetype_size = %d\n", - wmb->num_arrays, arraylen, iodesc->basetype_size)); + LOG((2, "wmb->num_arrays = %d arraylen = %d vdesc->mpi_type_size = %d\n", + wmb->num_arrays, arraylen, vdesc->mpi_type_size)); +#if PIO_USE_MALLOC + /* Try realloc first and call flush if realloc fails. */ + if (arraylen > 0) + { + size_t data_size = (1 + wmb->num_arrays) * arraylen * vdesc->mpi_type_size; + + if ((realloc_data = realloc(wmb->data, data_size))) + { + needsflush = 0; + wmb->data = realloc_data; + } + else /* Failed to realloc, but wmb->data is still valid for a flush. */ + { + needsflush = 1; + } + LOG((2, "realloc attempted to get %ld bytes for data, needsflush %d", data_size, + needsflush)); + } +#else /* Find out how much free, contiguous space is available. */ bfreespace(&totfree, &maxfree); /* maxfree is the available memory. If that is < 10% greater than * the size of the current request needsflush is true. */ if (needsflush == 0) - needsflush = (maxfree <= 1.1 * (1 + wmb->num_arrays) * arraylen * iodesc->basetype_size); + needsflush = (maxfree <= 1.1 * (1 + wmb->num_arrays) * arraylen * vdesc->mpi_type_size); +#endif /* Tell all tasks on the computation communicator whether we need * to flush data. */ @@ -480,26 +599,40 @@ int PIOc_write_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void * /* Flush data if needed. */ if (needsflush > 0) { +#if !PIO_USE_MALLOC #ifdef PIO_ENABLE_LOGGING /* Collect a debug report about buffer. */ cn_buffer_report(ios, true); LOG((2, "maxfree = %ld wmb->num_arrays = %d (1 + wmb->num_arrays) *" - " arraylen * iodesc->basetype_size = %ld totfree = %ld\n", maxfree, wmb->num_arrays, - (1 + wmb->num_arrays) * arraylen * iodesc->basetype_size, totfree)); + " arraylen * vdesc->mpi_type_size = %ld totfree = %ld\n", maxfree, wmb->num_arrays, + (1 + wmb->num_arrays) * arraylen * vdesc->mpi_type_size, totfree)); #endif /* PIO_ENABLE_LOGGING */ +#endif /* !PIO_USE_MALLOC */ - /* If needsflush == 2 flush to disk otherwise just flush to io node. */ + /* If needsflush == 2 flush to disk otherwise just flush to io + * node. This will cause PIOc_write_darray_multi() to be + * called. */ if ((ierr = flush_buffer(ncid, wmb, needsflush == 2))) return pio_err(ios, file, ierr, __FILE__, __LINE__); } +#if PIO_USE_MALLOC + /* Try realloc again if there is a flush. */ + if (arraylen > 0 && needsflush > 0) + { + if (!(wmb->data = realloc(wmb->data, (1 + wmb->num_arrays) * arraylen * vdesc->mpi_type_size))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + LOG((2, "after a flush, realloc got %ld bytes for data", (1 + wmb->num_arrays) * arraylen * vdesc->mpi_type_size)); + } +#else /* Get memory for data. */ if (arraylen > 0) { - if (!(wmb->data = bgetr(wmb->data, (1 + wmb->num_arrays) * arraylen * iodesc->basetype_size))) + if (!(wmb->data = bgetr(wmb->data, (1 + wmb->num_arrays) * arraylen * vdesc->mpi_type_size))) return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); - LOG((2, "got %ld bytes for data", (1 + wmb->num_arrays) * arraylen * iodesc->basetype_size)); + LOG((2, "got %ld bytes for data", (1 + wmb->num_arrays) * arraylen * vdesc->mpi_type_size)); } +#endif /* vid is an array of variable ids in the wmb list, grow the list * and add the new entry. */ @@ -519,71 +652,11 @@ int PIOc_write_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void * if (iodesc->needsfill) { /* Get memory to hold fill value. */ - if (!(wmb->fillvalue = bgetr(wmb->fillvalue, iodesc->basetype_size * (1 + wmb->num_arrays)))) + if (!(wmb->fillvalue = bgetr(wmb->fillvalue, vdesc->mpi_type_size * (1 + wmb->num_arrays)))) return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); - /* If the user passed a fill value, use that, otherwise use - * the default fill value of the netCDF type. Copy the fill - * value to the buffer. */ - if (fillvalue) - { - memcpy((char *)wmb->fillvalue + iodesc->basetype_size * wmb->num_arrays, - fillvalue, iodesc->basetype_size); - LOG((3, "copied user-provided fill value iodesc->basetype_size = %d", - iodesc->basetype_size)); - } - else - { - void *fill; - signed char byte_fill = PIO_FILL_BYTE; - char char_fill = PIO_FILL_CHAR; - short short_fill = PIO_FILL_SHORT; - int int_fill = PIO_FILL_INT; - float float_fill = PIO_FILL_FLOAT; - double double_fill = PIO_FILL_DOUBLE; -#ifdef _NETCDF4 - unsigned char ubyte_fill = PIO_FILL_UBYTE; - unsigned short ushort_fill = PIO_FILL_USHORT; - unsigned int uint_fill = PIO_FILL_UINT; - long long int64_fill = PIO_FILL_INT64; - long long uint64_fill = PIO_FILL_UINT64; -#endif /* _NETCDF4 */ - vtype = (MPI_Datatype)iodesc->basetype; - LOG((3, "caller did not provide fill value vtype = %d", vtype)); - - /* This must be done with an if statement, not a case, or - * openmpi will not build. */ - if (vtype == MPI_BYTE) - fill = &byte_fill; - else if (vtype == MPI_CHAR) - fill = &char_fill; - else if (vtype == MPI_SHORT) - fill = &short_fill; - else if (vtype == MPI_INT) - fill = &int_fill; - else if (vtype == MPI_FLOAT) - fill = &float_fill; - else if (vtype == MPI_DOUBLE) - fill = &double_fill; -#ifdef _NETCDF4 - else if (vtype == MPI_UNSIGNED_CHAR) - fill = &ubyte_fill; - else if (vtype == MPI_UNSIGNED_SHORT) - fill = &ushort_fill; - else if (vtype == MPI_UNSIGNED) - fill = &uint_fill; - else if (vtype == MPI_LONG_LONG) - fill = &int64_fill; - else if (vtype == MPI_UNSIGNED_LONG_LONG) - fill = &uint64_fill; -#endif /* _NETCDF4 */ - else - return pio_err(ios, file, PIO_EBADTYPE, __FILE__, __LINE__); - - memcpy((char *)wmb->fillvalue + iodesc->basetype_size * wmb->num_arrays, - fill, iodesc->basetype_size); - LOG((3, "copied fill value")); - } + memcpy((char *)wmb->fillvalue + vdesc->mpi_type_size * wmb->num_arrays, + vdesc->fillvalue, vdesc->mpi_type_size); } /* Tell the buffer about the data it is getting. */ @@ -593,11 +666,11 @@ int PIOc_write_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void * wmb->vid[wmb->num_arrays])); /* Copy the user-provided data to the buffer. */ - bufptr = (void *)((char *)wmb->data + arraylen * iodesc->basetype_size * wmb->num_arrays); + bufptr = (void *)((char *)wmb->data + arraylen * vdesc->mpi_type_size * wmb->num_arrays); if (arraylen > 0) { - memcpy(bufptr, array, arraylen * iodesc->basetype_size); - LOG((3, "copied %ld bytes of user data", arraylen * iodesc->basetype_size)); + memcpy(bufptr, array, arraylen * vdesc->mpi_type_size); + LOG((3, "copied %ld bytes of user data", arraylen * vdesc->mpi_type_size)); } /* Add the unlimited dimension value of this variable to the frame @@ -606,9 +679,9 @@ int PIOc_write_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void * wmb->frame[wmb->num_arrays] = vdesc->record; wmb->num_arrays++; - LOG((2, "wmb->num_arrays = %d iodesc->maxbytes / iodesc->basetype_size = %d " + LOG((2, "wmb->num_arrays = %d iodesc->maxbytes / vdesc->mpi_type_size = %d " "iodesc->ndof = %d iodesc->llen = %d", wmb->num_arrays, - iodesc->maxbytes / iodesc->basetype_size, iodesc->ndof, iodesc->llen)); + iodesc->maxbytes / vdesc->mpi_type_size, iodesc->ndof, iodesc->llen)); return PIO_NOERR; } @@ -628,6 +701,7 @@ int PIOc_write_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void * * processor. * @return 0 for success, error code otherwise. * @ingroup PIO_read_darray + * @author Jim Edwards, Ed Hartnett */ int PIOc_read_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, void *array) @@ -658,7 +732,7 @@ int PIOc_read_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, /* Allocate a buffer for one record. */ if (ios->ioproc && rlen > 0) - if (!(iobuf = bget(iodesc->basetype_size * rlen))) + if (!(iobuf = bget(iodesc->mpitype_size * rlen))) return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); /* Call the correct darray read function based on iotype. */ @@ -667,7 +741,7 @@ int PIOc_read_darray(int ncid, int varid, int ioid, PIO_Offset arraylen, case PIO_IOTYPE_NETCDF: case PIO_IOTYPE_NETCDF4C: if ((ierr = pio_read_darray_nc_serial(file, iodesc, varid, iobuf))) - return pio_err(ios, file, ierr, __FILE__, __LINE__); + return pio_err(ios, file, ierr, __FILE__, __LINE__); break; case PIO_IOTYPE_PNETCDF: case PIO_IOTYPE_NETCDF4P: diff --git a/src/externals/pio2/src/clib/pio_darray_int.c b/src/externals/pio2/src/clib/pio_darray_int.c index 79572219b91..ea4c23b2de9 100644 --- a/src/externals/pio2/src/clib/pio_darray_int.c +++ b/src/externals/pio2/src/clib/pio_darray_int.c @@ -28,13 +28,12 @@ extern PIO_Offset maxusage; /* handler for freeing the memory buffer pool */ void bpool_free(void *p) { - free(p); - if(p == CN_bpool){ - CN_bpool = NULL; - } + free(p); + if(p == CN_bpool){ + CN_bpool = NULL; + } } - /** * Initialize the compute buffer to size pio_cnbuffer_limit. * @@ -45,6 +44,7 @@ void bpool_free(void *p) * @param ios pointer to the iosystem descriptor which will use the * new buffer. * @returns 0 for success, error code otherwise. + * @author Jim Edwards */ int compute_buffer_init(iosystem_desc_t *ios) { @@ -67,6 +67,79 @@ int compute_buffer_init(iosystem_desc_t *ios) return PIO_NOERR; } +/** + * Fill start/count arrays for write_darray_multi_par(). This is an + * internal funciton. + * + * @param ndims the number of dims in the decomposition. + * @param fndims the number of dims in the file. + * @param vdesc pointer to the var_desc_t info. + * @param region pointer to a region. + * @param frame array of record values. + * @param start an already-allocated array which gets the start + * values. + * @param count an already-allocated array which gets the count + * values. + * @return 0 for success, error code otherwise. + * @ingroup PIO_write_darray + * @author Ed Hartnett + */ +int find_start_count(int ndims, int fndims, var_desc_t *vdesc, + io_region *region, const int *frame, size_t *start, + size_t *count) +{ + /* Init start/count arrays to zero. */ + for (int i = 0; i < fndims; i++) + { + start[i] = 0; + count[i] = 0; + } + + if (region) + { + if (vdesc->record >= 0) + { + /* This is a record based multidimensional + * array. Figure out start/count for all but the + * record dimension (dimid 0). */ + for (int i = fndims - ndims; i < fndims; i++) + { + start[i] = region->start[i - (fndims - ndims)]; + count[i] = region->count[i - (fndims - ndims)]; + } + + /* Now figure out start/count for record dimension. */ + if (fndims > 1 && ndims < fndims && count[1] > 0) + { + count[0] = 1; + start[0] = frame[0]; + } + else if (fndims == ndims) + { + /* ??? */ + start[0] += vdesc->record; + } + } + else + { + /* This is a non record variable. */ + for (int i = 0; i < ndims; i++) + { + start[i] = region->start[i]; + count[i] = region->count[i]; + } + } + +#if PIO_ENABLE_LOGGING + /* Log arrays for debug purposes. */ + for (int i = 0; i < ndims; i++) + LOG((3, "start[%d] = %d count[%d] = %d", i, start[i], i, count[i])); +#endif /* PIO_ENABLE_LOGGING */ + } + + return PIO_NOERR; +} + /** * Write a set of one or more aggregated arrays to output file. This * function is only used with parallel-netcdf and netcdf-4 parallel @@ -77,153 +150,68 @@ int compute_buffer_init(iosystem_desc_t *ios) * @param nvars the number of variables to be written with this * decomposition. * @param vid: an array of the variable ids to be written. - * @param iodesc_ndims: the number of dimensions explicitly in the - * iodesc. - * @param basetype the basic type of the minimal data unit - * @param maxregions max number of blocks to be written from - * this iotask. - * @param firstregion pointer to the first element of a linked - * list of region descriptions. May be NULL. - * @param llen length of the iobuffer on this task for a single - * field. - * @param num_aiotasks actual number of iotasks participating. - * @param iobuf the buffer to be written from this mpi task. May be - * null. for example we have 8 ionodes and a distributed array with - * global size 4, then at least 4 nodes will have a null iobuf. In - * practice the box rearranger trys to have at least blocksize bytes - * on each io task and so if the total number of bytes to write is - * less than blocksize*numiotasks then some iotasks will have a NULL - * iobuf. - * @param frame the frame or record dimension for each of the nvars - * variables in iobuf. NULL if this iodesc contains non-record vars. + * @param iodesc pointer to the io_desc_t info. + * @param fill Non-zero if this write is fill data. + * @param frame the record dimension for each of the nvars variables + * in iobuf. NULL if this iodesc contains non-record vars. * @return 0 for success, error code otherwise. * @ingroup PIO_write_darray + * @author Jim Edwards, Ed Hartnett */ -int pio_write_darray_multi_nc(file_desc_t *file, int nvars, const int *vid, int iodesc_ndims, - MPI_Datatype basetype, int maxregions, io_region *firstregion, - PIO_Offset llen, int num_aiotasks, void *iobuf, - const int *frame) +int write_darray_multi_par(file_desc_t *file, int nvars, int fndims, const int *varids, + io_desc_t *iodesc, int fill, const int *frame) { iosystem_desc_t *ios; /* Pointer to io system information. */ - var_desc_t *vdesc; /* Pointer to var info struct. */ - int fndims; /* Number of dims for this var in the file. */ + var_desc_t *vdesc; /* Pointer to var info struct. */ int dsize; /* Data size (for one region). */ - int tsize; /* Size of MPI type. */ - int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ int ierr = PIO_NOERR; /* Check inputs. */ - pioassert(file && file->iosystem && vid && vid[0] >= 0 && vid[0] <= PIO_MAX_VARS, - "invalid input", __FILE__, __LINE__); + pioassert(file && file->iosystem && varids && varids[0] >= 0 && varids[0] <= PIO_MAX_VARS && + iodesc, "invalid input", __FILE__, __LINE__); - LOG((1, "pio_write_darray_multi_nc nvars = %d iodesc_ndims = %d basetype = %d " - "maxregions = %d llen = %d num_aiotasks = %d", nvars, iodesc_ndims, - basetype, maxregions, llen, num_aiotasks)); + LOG((1, "write_darray_multi_par nvars = %d iodesc->ndims = %d iodesc->mpitype = %d " + "iodesc->maxregions = %d iodesc->llen = %d", nvars, iodesc->ndims, + iodesc->mpitype, iodesc->maxregions, iodesc->llen)); #ifdef TIMING /* Start timing this function. */ - GPTLstart("PIO:write_darray_multi_nc"); + GPTLstart("PIO:write_darray_multi_par"); #endif /* Get pointer to iosystem. */ ios = file->iosystem; /* Point to var description scruct for first var. */ - vdesc = file->varlist + vid[0]; - - /* If async is in use, send message to IO master task. */ - if (ios->async) - { - if (!ios->ioproc) - { - int msg = 0; - if (ios->compmaster == MPI_ROOT) - mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + if ((ierr = get_var_desc(varids[0], &file->varlist, &vdesc))) + return pio_err(NULL, file, ierr, __FILE__, __LINE__); - if (!mpierr) - mpierr = MPI_Bcast(&file->pio_ncid, 1, MPI_INT, ios->compmaster, ios->intercomm); - } - - /* Handle MPI errors. */ - if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) - return check_mpi(file, mpierr2, __FILE__, __LINE__); - if (mpierr) - return check_mpi(file, mpierr, __FILE__, __LINE__); - } - - /* Find out how many dims this variable has. */ - if ((ierr = PIOc_inq_varndims(file->pio_ncid, vid[0], &fndims))) - return pio_err(ios, file, ierr, __FILE__, __LINE__); - - /* Find out the size of the MPI type. */ - if ((mpierr = MPI_Type_size(basetype, &tsize))) - return check_mpi(file, mpierr, __FILE__, __LINE__); - LOG((2, "fndims = %d tsize = %d", fndims, tsize)); + /* Set these differently for data and fill writing. */ + int num_regions = fill ? iodesc->maxfillregions: iodesc->maxregions; + io_region *region = fill ? iodesc->fillregion : iodesc->firstregion; + PIO_Offset llen = fill ? iodesc->holegridsize : iodesc->llen; + void *iobuf = fill ? vdesc->fillbuf : file->iobuf; /* If this is an IO task write the data. */ if (ios->ioproc) { - io_region *region = firstregion; int rrcnt = 0; /* Number of subarray requests (pnetcdf only). */ void *bufptr; size_t start[fndims]; size_t count[fndims]; - int ndims = iodesc_ndims; - PIO_Offset *startlist[maxregions]; /* Array of start arrays for ncmpi_iput_varn(). */ - PIO_Offset *countlist[maxregions]; /* Array of count arrays for ncmpi_iput_varn(). */ + int ndims = iodesc->ndims; + PIO_Offset *startlist[num_regions]; /* Array of start arrays for ncmpi_iput_varn(). */ + PIO_Offset *countlist[num_regions]; /* Array of count arrays for ncmpi_iput_varn(). */ - LOG((3, "maxregions = %d", maxregions)); + LOG((3, "num_regions = %d", num_regions)); /* Process each region of data to be written. */ - for (int regioncnt = 0; regioncnt < maxregions; regioncnt++) + for (int regioncnt = 0; regioncnt < num_regions; regioncnt++) { - /* Init start/count arrays to zero. */ - for (int i = 0; i < fndims; i++) - { - start[i] = 0; - count[i] = 0; - } - - if (region) - { - if (vdesc->record >= 0) - { - /* This is a record based multidimensional - * array. Figure out start/count for all but the - * record dimension (dimid 0). */ - for (int i = fndims - ndims; i < fndims; i++) - { - start[i] = region->start[i - (fndims - ndims)]; - count[i] = region->count[i - (fndims - ndims)]; - } - - /* Now figure out start/count for record dimension. */ - if (fndims > 1 && ndims < fndims && count[1] > 0) - { - count[0] = 1; - start[0] = frame[0]; - } - else if (fndims == ndims) - { - /* ??? */ - start[0] += vdesc->record; - } - } - else - { - /* This is a non record variable. */ - for (int i = 0; i < ndims; i++) - { - start[i] = region->start[i]; - count[i] = region->count[i]; - } - } -#if PIO_ENABLE_LOGGING - /* Log arrays for debug purposes. */ - for (int i = 0; i < ndims; i++) - LOG((3, "start[%d] = %d count[%d] = %d", i, start[i], i, count[i])); -#endif /* PIO_ENABLE_LOGGING */ - } + /* Fill the start/count arrays. */ + if ((ierr = find_start_count(iodesc->ndims, fndims, vdesc, region, frame, + start, count))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); /* IO tasks will run the netCDF/pnetcdf functions to write the data. */ switch (file->iotype) @@ -240,14 +228,14 @@ int pio_write_darray_multi_nc(file_desc_t *file, int nvars, const int *vid, int /* If there is data for this region, get a pointer to it. */ if (region) - bufptr = (void *)((char *)iobuf + tsize * (nv * llen + region->loffset)); + bufptr = (void *)((char *)iobuf + iodesc->mpitype_size * (nv * llen + region->loffset)); /* Ensure collective access. */ - ierr = nc_var_par_access(file->fh, vid[nv], NC_COLLECTIVE); + ierr = nc_var_par_access(file->fh, varids[nv], NC_COLLECTIVE); /* Write the data for this variable. */ if (!ierr) - ierr = nc_put_vara(file->fh, vid[nv], (size_t *)start, (size_t *)count, bufptr); + ierr = nc_put_vara(file->fh, varids[nv], (size_t *)start, (size_t *)count, bufptr); } break; #endif @@ -283,13 +271,14 @@ int pio_write_darray_multi_nc(file_desc_t *file, int nvars, const int *vid, int } /* Do this when we reach the last region. */ - if (regioncnt == maxregions - 1) + if (regioncnt == num_regions - 1) { /* For each variable to be written. */ for (int nv = 0; nv < nvars; nv++) { /* Get the var info. */ - vdesc = file->varlist + vid[nv]; + if ((ierr = get_var_desc(varids[nv], &file->varlist, &vdesc))) + return pio_err(NULL, file, ierr, __FILE__, __LINE__); /* If this is a record var, set the start for * the record dimension. */ @@ -298,11 +287,9 @@ int pio_write_darray_multi_nc(file_desc_t *file, int nvars, const int *vid, int startlist[rc][0] = frame[nv]; /* Get a pointer to the data. */ - bufptr = (void *)((char *)iobuf + nv * tsize * llen); + bufptr = (void *)((char *)iobuf + nv * iodesc->mpitype_size * llen); - /* ??? */ - int reqn = 0; - if (vdesc->nreqs % PIO_REQUEST_ALLOC_CHUNK == 0 ) + if (vdesc->nreqs % PIO_REQUEST_ALLOC_CHUNK == 0) { if (!(vdesc->request = realloc(vdesc->request, sizeof(int) * (vdesc->nreqs + PIO_REQUEST_ALLOC_CHUNK)))) @@ -310,24 +297,19 @@ int pio_write_darray_multi_nc(file_desc_t *file, int nvars, const int *vid, int for (int i = vdesc->nreqs; i < vdesc->nreqs + PIO_REQUEST_ALLOC_CHUNK; i++) vdesc->request[i] = NC_REQ_NULL; - reqn = vdesc->nreqs; } - else - while(vdesc->request[reqn] != NC_REQ_NULL) - reqn++; /* Write, in non-blocking fashion, a list of subarrays. */ - LOG((3, "about to call ncmpi_iput_varn() vid[%d] = %d rrcnt = %d, llen = %d", - nv, vid[nv], rrcnt, llen)); - ierr = ncmpi_iput_varn(file->fh, vid[nv], rrcnt, startlist, countlist, - bufptr, llen, basetype, vdesc->request + reqn); + LOG((3, "about to call ncmpi_iput_varn() varids[%d] = %d rrcnt = %d, llen = %d", + nv, varids[nv], rrcnt, llen)); + ierr = ncmpi_iput_varn(file->fh, varids[nv], rrcnt, startlist, countlist, + bufptr, llen, iodesc->mpitype, &vdesc->request[vdesc->nreqs]); /* keeps wait calls in sync */ - if (vdesc->request[reqn] == NC_REQ_NULL) - vdesc->request[reqn] = PIO_REQ_NULL; - - vdesc->nreqs += reqn + 1; + if (vdesc->request[vdesc->nreqs] == NC_REQ_NULL) + vdesc->request[vdesc->nreqs] = PIO_REQ_NULL; + vdesc->nreqs++; } /* Free resources. */ @@ -354,7 +336,7 @@ int pio_write_darray_multi_nc(file_desc_t *file, int nvars, const int *vid, int #ifdef TIMING /* Stop timing this function. */ - GPTLstop("PIO:write_darray_multi_nc"); + GPTLstop("PIO:write_darray_multi_par"); #endif return ierr; @@ -381,6 +363,7 @@ int pio_write_darray_multi_nc(file_desc_t *file, int nvars, const int *vid, int * regions. * @returns 0 for success, error code otherwise. * @ingroup PIO_read_darray + * @author Jim Edwards, Ed Hartnett **/ int find_all_start_count(io_region *region, int maxregions, int fndims, int iodesc_ndims, var_desc_t *vdesc, size_t *tmp_start, @@ -447,7 +430,8 @@ int find_all_start_count(io_region *region, int maxregions, int fndims, * * @return 0 for success, error code otherwise. * @ingroup PIO_write_darray - **/ + * @author Jim Edwards, Ed Hartnett + */ int send_all_start_count(iosystem_desc_t *ios, io_desc_t *iodesc, PIO_Offset llen, int maxregions, int nvars, int fndims, size_t *tmp_start, size_t *tmp_count, void *iobuf) @@ -483,7 +467,7 @@ int send_all_start_count(iosystem_desc_t *ios, io_desc_t *iodesc, PIO_Offset lle if ((mpierr = MPI_Send(tmp_count, maxregions * fndims, MPI_OFFSET, 0, ios->io_rank + 3 * ios->num_iotasks, ios->io_comm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - if ((mpierr = MPI_Send(iobuf, nvars * llen, iodesc->basetype, 0, + if ((mpierr = MPI_Send(iobuf, nvars * llen, iodesc->mpitype, 0, ios->io_rank + 4 * ios->num_iotasks, ios->io_comm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); LOG((3, "sent data for maxregions = %d", maxregions)); @@ -499,7 +483,7 @@ int send_all_start_count(iosystem_desc_t *ios, io_desc_t *iodesc, PIO_Offset lle * * @param file a pointer to the open file descriptor for the file * that will be written to. - * @param vid an array of the variable ids to be written + * @param varids an array of the variable ids to be written * @param frame the record dimension for each of the nvars variables * in iobuf. NULL if this iodesc contains non-record vars. * @param iodesc pointer to the decomposition info. @@ -525,8 +509,9 @@ int send_all_start_count(iosystem_desc_t *ios, io_desc_t *iodesc, PIO_Offset lle * iobuf. * @return 0 for success, error code otherwise. * @ingroup PIO_write_darray + * @author Jim Edwards, Ed Hartnett */ -int recv_and_write_data(file_desc_t *file, const int *vid, const int *frame, +int recv_and_write_data(file_desc_t *file, const int *varids, const int *frame, io_desc_t *iodesc, PIO_Offset llen, int maxregions, int nvars, int fndims, size_t *tmp_start, size_t *tmp_count, void *iobuf) { @@ -536,11 +521,19 @@ int recv_and_write_data(file_desc_t *file, const int *vid, const int *frame, size_t start[fndims], count[fndims]; size_t loffset; void *bufptr; - var_desc_t *vdesc; /* Contains info about the variable. */ + var_desc_t *vdesc; /* Contains info about the variable. */ MPI_Status status; /* Recv status for MPI. */ int mpierr; /* Return code from MPI function codes. */ int ierr; /* Return code. */ + /* Check inputs. */ + pioassert(file && varids && iodesc && tmp_start && tmp_count, "invalid input", + __FILE__, __LINE__); + + LOG((2, "recv_and_write_data llen = %d maxregions = %d nvars = %d fndims = %d", + llen, maxregions, nvars, fndims)); + + /* Get pointer to IO system. */ ios = file->iosystem; /* For each of the other tasks that are using this task @@ -575,7 +568,7 @@ int recv_and_write_data(file_desc_t *file, const int *vid, const int *frame, if ((mpierr = MPI_Recv(tmp_count, rregions * fndims, MPI_OFFSET, rtask, rtask + 3 * ios->num_iotasks, ios->io_comm, &status))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - if ((mpierr = MPI_Recv(iobuf, nvars * rlen, iodesc->basetype, rtask, + if ((mpierr = MPI_Recv(iobuf, nvars * rlen, iodesc->mpitype, rtask, rtask + 4 * ios->num_iotasks, ios->io_comm, &status))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); LOG((3, "received data rregions = %d fndims = %d", rregions, fndims)); @@ -608,10 +601,11 @@ int recv_and_write_data(file_desc_t *file, const int *vid, const int *frame, for (int nv = 0; nv < nvars; nv++) { LOG((3, "writing buffer var %d", nv)); - vdesc = file->varlist + vid[0]; + if ((ierr = get_var_desc(varids[0], &file->varlist, &vdesc))) + return pio_err(NULL, file, ierr, __FILE__, __LINE__); /* Get a pointer to the correct part of the buffer. */ - bufptr = (void *)((char *)iobuf + iodesc->basetype_size * (nv * rlen + loffset)); + bufptr = (void *)((char *)iobuf + iodesc->mpitype_size * (nv * rlen + loffset)); /* If this var has an unlimited dim, set * the start on that dim to the frame @@ -630,7 +624,7 @@ int recv_and_write_data(file_desc_t *file, const int *vid, const int *frame, } /* Call the netCDF functions to write the data. */ - if ((ierr = nc_put_vara(file->fh, vid[nv], start, count, bufptr))) + if ((ierr = nc_put_vara(file->fh, varids[nv], start, count, bufptr))) return check_netcdf2(ios, NULL, ierr, __FILE__, __LINE__); } /* next var */ @@ -656,92 +650,54 @@ int recv_and_write_data(file_desc_t *file, const int *vid, const int *frame, * Write a set of one or more aggregated arrays to output file in * serial mode. This function is called for netCDF classic and * netCDF-4 serial iotypes. Parallel iotypes use - * pio_write_darray_multi_nc(). + * write_darray_multi_par(). * * @param file a pointer to the open file descriptor for the file * that will be written to. * @param nvars the number of variables to be written with this * decomposition. - * @param vid an array of the variable ids to be written + * @param varids an array of the variable ids to be written * @param iodesc pointer to the decomposition info. - * @param maxregions max number of blocks to be written from this - * iotask. - * @param firstregion pointer to the first element of a linked - * list of region descriptions. May be NULL. - * @param llen length of the iobuffer on this task for a single - * field. - * @param iobuf the buffer to be written from this mpi task. May be - * null. for example we have 8 ionodes and a distributed array with - * global size 4, then at least 4 nodes will have a null iobuf. In - * practice the box rearranger trys to have at least blocksize bytes - * on each io task and so if the total number of bytes to write is - * less than blocksize*numiotasks then some iotasks will have a NULL - * iobuf. + * @param fill Non-zero if this write is fill data. * @param frame the record dimension for each of the nvars variables - * in iobuf. NULL if this iodesc contains non-record vars. + * in iobuf. NULL if this iodesc contains non-record vars. * @return 0 for success, error code otherwise. * @ingroup PIO_write_darray + * @author Jim Edwards, Ed Hartnett */ -int write_darray_multi_serial(file_desc_t *file, int nvars, const int *vid, +int write_darray_multi_serial(file_desc_t *file, int nvars, int fndims, const int *varids, io_desc_t *iodesc, int fill, const int *frame) { iosystem_desc_t *ios; /* Pointer to io system information. */ var_desc_t *vdesc; /* Contains info about the variable. */ - int fndims; /* Number of dims in the var in the file. */ - int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ int ierr; /* Return code. */ /* Check inputs. */ - pioassert(file && file->iosystem && file->varlist && vid && vid[0] >= 0 && - vid[0] <= PIO_MAX_VARS && iodesc, "invalid input", __FILE__, __LINE__); + pioassert(file && file->iosystem && varids && varids[0] >= 0 && + varids[0] <= PIO_MAX_VARS && iodesc, "invalid input", __FILE__, __LINE__); - LOG((1, "write_darray_multi_serial nvars = %d iodesc->ndims = %d iodesc->basetype = %d", - nvars, iodesc->ndims, iodesc->basetype)); + LOG((1, "write_darray_multi_serial nvars = %d fndims = %d iodesc->ndims = %d " + "iodesc->mpitype = %d", nvars, iodesc->ndims, fndims, iodesc->mpitype)); /* Get the iosystem info. */ ios = file->iosystem; /* Get the var info. */ - vdesc = file->varlist + vid[0]; - LOG((2, "vdesc record %d ndims %d nreqs %d ios->async = %d", vdesc->record, - vdesc->ndims, vdesc->nreqs, ios->async)); + if ((ierr = get_var_desc(varids[0], &file->varlist, &vdesc))) + return pio_err(NULL, file, ierr, __FILE__, __LINE__); - /* Set these differently for data and fill writing. */ + /* Set these differently for data and fill writing. iobuf may be + * null if array size < number of nodes. */ int num_regions = fill ? iodesc->maxfillregions: iodesc->maxregions; io_region *region = fill ? iodesc->fillregion : iodesc->firstregion; PIO_Offset llen = fill ? iodesc->holegridsize : iodesc->llen; - void *iobuf = fill ? vdesc->fillbuf : vdesc->iobuf; + void *iobuf = fill ? vdesc->fillbuf : file->iobuf; #ifdef TIMING /* Start timing this function. */ - GPTLstart("PIO:write_darray_multi_nc_serial"); + GPTLstart("PIO:write_darray_multi_serial"); #endif - /* If async is in use, and this is not an IO task, bcast the parameters. */ - if (ios->async) - { - if (!ios->ioproc) - { - int msg = 0; - - if (ios->comp_rank == 0) - mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); - - if (!mpierr) - mpierr = MPI_Bcast(&file->pio_ncid, 1, MPI_INT, ios->compmaster, ios->intercomm); - } - - /* Handle MPI errors. */ - if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) - return check_mpi(file, mpierr2, __FILE__, __LINE__); - if (mpierr) - return check_mpi(file, mpierr, __FILE__, __LINE__); - } - - /* Get the number of dimensions. */ - if ((ierr = PIOc_inq_varndims(file->pio_ncid, vid[0], &fndims))) - return pio_err(ios, file, ierr, __FILE__, __LINE__); - /* Only IO tasks participate in this code. */ if (ios->ioproc) { @@ -769,7 +725,7 @@ int write_darray_multi_serial(file_desc_t *file, int nvars, const int *vid, { /* Task 0 will receive data from all other IO tasks. */ - if ((ierr = recv_and_write_data(file, vid, frame, iodesc, llen, num_regions, nvars, fndims, + if ((ierr = recv_and_write_data(file, varids, frame, iodesc, llen, num_regions, nvars, fndims, tmp_start, tmp_count, iobuf))) return pio_err(ios, file, ierr, __FILE__, __LINE__); } @@ -777,7 +733,7 @@ int write_darray_multi_serial(file_desc_t *file, int nvars, const int *vid, #ifdef TIMING /* Stop timing this function. */ - GPTLstop("PIO:write_darray_multi_nc_serial"); + GPTLstop("PIO:write_darray_multi_serial"); #endif return PIO_NOERR; @@ -799,6 +755,7 @@ int write_darray_multi_serial(file_desc_t *file, int nvars, const int *vid, * iobuf. * @return 0 on success, error code otherwise. * @ingroup PIO_read_darray + * @author Jim Edwards, Ed Hartnett */ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobuf) { @@ -821,7 +778,8 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobu ios = file->iosystem; /* Get the variable info. */ - vdesc = file->varlist + vid; + if ((ierr = get_var_desc(vid, &file->varlist, &vdesc))) + return pio_err(NULL, file, ierr, __FILE__, __LINE__); /* Get the number of dimensions in the decomposition. */ ndims = iodesc->ndims; @@ -830,10 +788,6 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobu if ((ierr = PIOc_inq_varndims(file->pio_ncid, vid, &fndims))) return pio_err(ios, file, ierr, __FILE__, __LINE__); - /* Is this a non-record var? */ - if (fndims == ndims) - vdesc->record = -1; - /* IO procs will actially read the data. */ if (ios->ioproc) { @@ -847,16 +801,14 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobu PIO_Offset *countlist[iodesc->maxregions]; /* buffer is incremented by byte and loffset is in terms of - the iodessc->basetype so we need to multiply by the size of - the basetype. */ + the iodessc->mpitype so we need to multiply by the size of + the mpitype. */ region = iodesc->firstregion; /* ??? */ if (fndims > ndims) { ndims++; - if (vdesc->record < 0) - vdesc->record = 0; } /* For each regions, read the data. */ @@ -879,7 +831,7 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobu if (regioncnt == 0 || region == NULL) bufptr = iobuf; else - bufptr=(void *)((char *)iobuf + iodesc->basetype_size * region->loffset); + bufptr=(void *)((char *)iobuf + iodesc->mpitype_size * region->loffset); LOG((2, "%d %d %d", iodesc->llen - region->loffset, iodesc->llen, region->loffset)); @@ -915,7 +867,48 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobu { #ifdef _NETCDF4 case PIO_IOTYPE_NETCDF4P: - ierr = nc_get_vara(file->fh, vid, start, count, bufptr); + /* ierr = nc_get_vara(file->fh, vid, start, count, bufptr); */ + switch (iodesc->piotype) + { + case PIO_BYTE: + ierr = nc_get_vara_schar(file->fh, vid, start, count, (signed char*)bufptr); + break; + case PIO_CHAR: + ierr = nc_get_vara_text(file->fh, vid, start, count, (char*)bufptr); + break; + case PIO_SHORT: + ierr = nc_get_vara_short(file->fh, vid, start, count, (short*)bufptr); + break; + case PIO_INT: + ierr = nc_get_vara_int(file->fh, vid, start, count, (int*)bufptr); + break; + case PIO_FLOAT: + ierr = nc_get_vara_float(file->fh, vid, start, count, (float*)bufptr); + break; + case PIO_DOUBLE: + ierr = nc_get_vara_double(file->fh, vid, start, count, (double*)bufptr); + break; + case PIO_UBYTE: + ierr = nc_get_vara_uchar(file->fh, vid, start, count, (unsigned char*)bufptr); + break; + case PIO_USHORT: + ierr = nc_get_vara_ushort(file->fh, vid, start, count, (unsigned short*)bufptr); + break; + case PIO_UINT: + ierr = nc_get_vara_uint(file->fh, vid, start, count, (unsigned int*)bufptr); + break; + case PIO_INT64: + ierr = nc_get_vara_longlong(file->fh, vid, start, count, (long long*)bufptr); + break; + case PIO_UINT64: + ierr = nc_get_vara_ulonglong(file->fh, vid, start, count, (unsigned long long*)bufptr); + break; + case PIO_STRING: + ierr = nc_get_vara_string(file->fh, vid, start, count, (char**)bufptr); + break; + default: + return pio_err(ios, file, PIO_EBADTYPE, __FILE__, __LINE__); + } break; #endif #ifdef _PNETCDF @@ -943,7 +936,7 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobu { /* Read a list of subarrays. */ ierr = ncmpi_get_varn_all(file->fh, vid, rrlen, startlist, - countlist, iobuf, iodesc->llen, iodesc->basetype); + countlist, iobuf, iodesc->llen, iodesc->mpitype); /* Release the start and count arrays. */ for (int i = 0; i < rrlen; i++) @@ -995,12 +988,13 @@ int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobu * iobuf. * @returns 0 for success, error code otherwise. * @ingroup PIO_read_darray + * @author Jim Edwards, Ed Hartnett */ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobuf) { iosystem_desc_t *ios; /* Pointer to io system information. */ - var_desc_t *vdesc; /* Information about the variable. */ + var_desc_t *vdesc; /* Information about the variable. */ int ndims; /* Number of dims in decomposition. */ int fndims; /* Number of dims for this var in file. */ MPI_Status status; @@ -1011,6 +1005,7 @@ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid, pioassert(file && file->iosystem && iodesc && vid >= 0 && vid <= PIO_MAX_VARS, "invalid input", __FILE__, __LINE__); + LOG((2, "pio_read_darray_nc_serial vid = %d", vid)); #ifdef TIMING /* Start timing this function. */ GPTLstart("PIO:read_darray_nc_serial"); @@ -1018,7 +1013,8 @@ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid, ios = file->iosystem; /* Get var info for this var. */ - vdesc = file->varlist + vid; + if ((ierr = get_var_desc(vid, &file->varlist, &vdesc))) + return pio_err(NULL, file, ierr, __FILE__, __LINE__); /* Get the number of dims in our decomposition. */ ndims = iodesc->ndims; @@ -1027,9 +1023,10 @@ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid, if ((ierr = PIOc_inq_varndims(file->pio_ncid, vid, &fndims))) return pio_err(ios, file, ierr, __FILE__, __LINE__); - /* Is this a non-record var? */ - if (fndims == ndims) - vdesc->record = -1; + /* Confirm that we are being called with the correct ndims. */ + pioassert((fndims == ndims && vdesc->record < 0) || + (fndims == ndims + 1 && vdesc->record >= 0), + "unexpected record", __FILE__, __LINE__); if (ios->ioproc) { @@ -1042,16 +1039,10 @@ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid, void *bufptr; /* buffer is incremented by byte and loffset is in terms of - the iodessc->basetype so we need to multiply by the size of - the basetype. */ + the iodessc->mpitype so we need to multiply by the size of + the mpitype. */ region = iodesc->firstregion; - if (fndims > ndims) - { - if (vdesc->record < 0) - vdesc->record = 0; - } - /* Put together start/count arrays for all regions. */ for (int regioncnt = 0; regioncnt < iodesc->maxregions; regioncnt++) { @@ -1128,7 +1119,7 @@ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid, return check_mpi(file, mpierr, __FILE__, __LINE__); LOG((3, "sent iodesc->maxregions = %d tmp_count and tmp_start arrays", iodesc->maxregions)); - if ((mpierr = MPI_Recv(iobuf, iodesc->llen, iodesc->basetype, 0, + if ((mpierr = MPI_Recv(iobuf, iodesc->llen, iodesc->mpitype, 0, 4 * ios->num_iotasks + ios->io_rank, ios->io_comm, &status))) return check_mpi(file, mpierr, __FILE__, __LINE__); LOG((3, "received %d elements of data", iodesc->llen)); @@ -1177,7 +1168,7 @@ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid, for (int regioncnt = 0; regioncnt < maxregions; regioncnt++) { /* Get pointer where data should go. */ - bufptr = (void *)((char *)iobuf + iodesc->basetype_size * loffset); + bufptr = (void *)((char *)iobuf + iodesc->mpitype_size * loffset); regionsize = 1; /* ??? */ @@ -1202,7 +1193,50 @@ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid, loffset += regionsize; /* Read the data. */ - ierr = nc_get_vara(file->fh, vid, start, count, bufptr); + /* ierr = nc_get_vara(file->fh, vid, start, count, bufptr); */ + switch (iodesc->piotype) + { + case PIO_BYTE: + ierr = nc_get_vara_schar(file->fh, vid, start, count, (signed char*)bufptr); + break; + case PIO_CHAR: + ierr = nc_get_vara_text(file->fh, vid, start, count, (char*)bufptr); + break; + case PIO_SHORT: + ierr = nc_get_vara_short(file->fh, vid, start, count, (short*)bufptr); + break; + case PIO_INT: + ierr = nc_get_vara_int(file->fh, vid, start, count, (int*)bufptr); + break; + case PIO_FLOAT: + ierr = nc_get_vara_float(file->fh, vid, start, count, (float*)bufptr); + break; + case PIO_DOUBLE: + ierr = nc_get_vara_double(file->fh, vid, start, count, (double*)bufptr); + break; +#ifdef _NETCDF4 + case PIO_UBYTE: + ierr = nc_get_vara_uchar(file->fh, vid, start, count, (unsigned char*)bufptr); + break; + case PIO_USHORT: + ierr = nc_get_vara_ushort(file->fh, vid, start, count, (unsigned short*)bufptr); + break; + case PIO_UINT: + ierr = nc_get_vara_uint(file->fh, vid, start, count, (unsigned int*)bufptr); + break; + case PIO_INT64: + ierr = nc_get_vara_longlong(file->fh, vid, start, count, (long long*)bufptr); + break; + case PIO_UINT64: + ierr = nc_get_vara_ulonglong(file->fh, vid, start, count, (unsigned long long*)bufptr); + break; + case PIO_STRING: + ierr = nc_get_vara_string(file->fh, vid, start, count, (char**)bufptr); + break; +#endif /* _NETCDF4 */ + default: + return pio_err(ios, file, PIO_EBADTYPE, __FILE__, __LINE__); + } /* Check error code of netCDF call. */ if (ierr) @@ -1214,7 +1248,7 @@ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid, * ios->num_iotasks is the number of iotasks actually * used in this decomposition. */ if (rtask < ios->num_iotasks && tmp_bufsize > 0) - if ((mpierr = MPI_Send(iobuf, tmp_bufsize, iodesc->basetype, rtask, + if ((mpierr = MPI_Send(iobuf, tmp_bufsize, iodesc->mpitype, rtask, 4 * ios->num_iotasks + rtask, ios->io_comm))) return check_mpi(file, mpierr, __FILE__, __LINE__); } @@ -1239,6 +1273,7 @@ int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid, * @param addsize additional size to add to buffer (in bytes) * @return 0 for success, error code otherwise. * @ingroup PIO_write_darray + * @author Jim Edwards, Ed Hartnett */ int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize) { @@ -1282,9 +1317,11 @@ int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize) maxreq = 0; reqcnt = 0; rcnt = 0; - for (int i = 0; i < PIO_MAX_VARS; i++) + + for (int i = 0; i < file->nvars; i++) { - vdesc = file->varlist + i; + if ((ierr = get_var_desc(i, &file->varlist, &vdesc))) + return pio_err(NULL, file, ierr, __FILE__, __LINE__); reqcnt += vdesc->nreqs; if (vdesc->nreqs > 0) maxreq = i; @@ -1294,11 +1331,12 @@ int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize) for (int i = 0; i <= maxreq; i++) { - vdesc = file->varlist + i; + if ((ierr = get_var_desc(i, &file->varlist, &vdesc))) + return pio_err(NULL, file, ierr, __FILE__, __LINE__); #ifdef MPIO_ONESIDED /*onesided optimization requires that all of the requests in a wait_all call represent a contiguous block of data in the file */ - if (rcnt > 0 && (prev_record != vdesc->record || vdesc->nreqs==0)) + if (rcnt > 0 && (prev_record != vdesc->record || vdesc->nreqs == 0)) { ierr = ncmpi_wait_all(file->fh, rcnt, request, status); rcnt = 0; @@ -1323,15 +1361,17 @@ int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize) ierr = ncmpi_wait_all(file->fh, rcnt, request, status); /* Release resources. */ - for (int i = 0; i < PIO_MAX_VARS; i++) + if (file->iobuf) { - vdesc = file->varlist + i; - if (vdesc->iobuf) - { - LOG((3,"freeing variable buffer in flush_output_buffer")); - brel(vdesc->iobuf); - vdesc->iobuf = NULL; - } + LOG((3,"freeing variable buffer in flush_output_buffer")); + brel(file->iobuf); + file->iobuf = NULL; + } + + for (int v = 0; v < file->nvars; v++) + { + if ((ierr = get_var_desc(v, &file->varlist, &vdesc))) + return pio_err(NULL, file, ierr, __FILE__, __LINE__); if (vdesc->fillbuf) { brel(vdesc->fillbuf); @@ -1351,6 +1391,7 @@ int flush_output_buffer(file_desc_t *file, bool force, PIO_Offset addsize) * @param ios pointer to the IO system structure * @param collective true if collective report is desired * @ingroup PIO_write_darray + * @author Jim Edwards */ void cn_buffer_report(iosystem_desc_t *ios, bool collective) { @@ -1399,6 +1440,7 @@ void cn_buffer_report(iosystem_desc_t *ios, bool collective) * * @param ios pointer to the IO system structure. * @ingroup PIO_write_darray + * @author Jim Edwards */ void free_cn_buffer_pool(iosystem_desc_t *ios) { @@ -1424,6 +1466,7 @@ void free_cn_buffer_pool(iosystem_desc_t *ios) * @param flushtodisk if true, then flush data to disk. * @returns 0 for success, error code otherwise. * @ingroup PIO_write_darray + * @author Jim Edwards, Ed Hartnett */ int flush_buffer(int ncid, wmulti_buffer *wmb, bool flushtodisk) { @@ -1446,6 +1489,7 @@ int flush_buffer(int ncid, wmulti_buffer *wmb, bool flushtodisk) ret = PIOc_write_darray_multi(ncid, wmb->vid, wmb->ioid, wmb->num_arrays, wmb->arraylen, wmb->data, wmb->frame, wmb->fillvalue, flushtodisk); + LOG((2, "return from PIOc_write_darray_multi ret = %d", ret)); wmb->num_arrays = 0; @@ -1474,49 +1518,3 @@ int flush_buffer(int ncid, wmulti_buffer *wmb, bool flushtodisk) return PIO_NOERR; } -/** - * Compute the maximum aggregate number of bytes. This is called by - * subset_rearrange_create() and box_rearrange_create(). - * - * @param ios pointer to the IO system structure. - * @param iodesc a pointer to decomposition description. - * @returns 0 for success, error code otherwise. - */ -int compute_maxaggregate_bytes(iosystem_desc_t *ios, io_desc_t *iodesc) -{ - int maxbytesoniotask = INT_MAX; - int maxbytesoncomputetask = INT_MAX; - int maxbytes; - int mpierr; /* Return code from MPI functions. */ - - /* Check inputs. */ - pioassert(iodesc, "invalid input", __FILE__, __LINE__); - - LOG((2, "compute_maxaggregate_bytes iodesc->maxiobuflen = %d iodesc->ndof = %d", - iodesc->maxiobuflen, iodesc->ndof)); - - /* Determine the max bytes that can be held on IO task. */ - if (ios->ioproc && iodesc->maxiobuflen > 0) - maxbytesoniotask = pio_buffer_size_limit / iodesc->maxiobuflen; - - /* Determine the max bytes that can be held on computation task. */ - if (ios->comp_rank >= 0 && iodesc->ndof > 0) - maxbytesoncomputetask = pio_cnbuffer_limit / iodesc->ndof; - - /* Take the min of the max IO and max comp bytes. */ - maxbytes = min(maxbytesoniotask, maxbytesoncomputetask); - LOG((2, "compute_maxaggregate_bytes maxbytesoniotask = %d maxbytesoncomputetask = %d", - maxbytesoniotask, maxbytesoncomputetask)); - - /* Get the min value of this on all tasks. */ - LOG((3, "before allreaduce maxbytes = %d", maxbytes)); - if ((mpierr = MPI_Allreduce(MPI_IN_PLACE, &maxbytes, 1, MPI_INT, MPI_MIN, - ios->union_comm))) - return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - LOG((3, "after allreaduce maxbytes = %d", maxbytes)); - - /* Remember the result. */ - iodesc->maxbytes = maxbytes; - - return PIO_NOERR; -} diff --git a/src/externals/pio2/src/clib/pio_file.c b/src/externals/pio2/src/clib/pio_file.c index c0523997cfd..211d546426a 100644 --- a/src/externals/pio2/src/clib/pio_file.c +++ b/src/externals/pio2/src/clib/pio_file.c @@ -28,13 +28,43 @@ int pio_next_ncid = 16; * @param mode : The netcdf mode for the open operation * @return 0 for success, error code otherwise. * @ingroup PIO_openfile + * @author Jim Edwards, Ed Hartnett */ int PIOc_openfile(int iosysid, int *ncidp, int *iotype, const char *filename, int mode) { + LOG((1, "PIOc_openfile iosysid %d *iotype %d filename %s mode %d", iosysid, + iotype ? *iotype: 0, filename, mode)); return PIOc_openfile_retry(iosysid, ncidp, iotype, filename, mode, 1); } +/** + * Open an existing file using PIO library. + * + * This is like PIOc_openfile(), but if the open fails, this function + * will not try to open again as netCDF serial before giving + * up. Input parameters are read on comp task 0 and ignored elsewhere. + * + * Note that the file is opened with default fill mode, NOFILL for + * pnetcdf, and FILL for netCDF classic and netCDF-4 files. + * + * @param iosysid : A defined pio system descriptor (input) + * @param ncidp : A pio file descriptor (output) + * @param iotype : A pio output format (input) + * @param filename : The filename to open + * @param mode : The netcdf mode for the open operation + * @return 0 for success, error code otherwise. + * @ingroup PIO_openfile + * @author Ed Hartnett + */ +int PIOc_openfile2(int iosysid, int *ncidp, int *iotype, const char *filename, + int mode) +{ + LOG((1, "PIOc_openfile2 iosysid %d *iotype %d filename %s mode %d", iosysid, + iotype ? *iotype : 0, filename, mode)); + return PIOc_openfile_retry(iosysid, ncidp, iotype, filename, mode, 0); +} + /** * Open an existing file using PIO library. * @@ -46,6 +76,7 @@ int PIOc_openfile(int iosysid, int *ncidp, int *iotype, const char *filename, * @param ncidp pointer to int where ncid will go * @return 0 for success, error code otherwise. * @ingroup PIO_openfile + * @author Ed Hartnett */ int PIOc_open(int iosysid, const char *path, int mode, int *ncidp) { @@ -90,6 +121,7 @@ int PIOc_open(int iosysid, const char *path, int mode, int *ncidp) * @param mode The netcdf mode for the create operation. * @returns 0 for success, error code otherwise. * @ingroup PIO_createfile + * @author Jim Edwards, Ed Hartnett */ int PIOc_createfile(int iosysid, int *ncidp, int *iotype, const char *filename, int mode) @@ -101,6 +133,9 @@ int PIOc_createfile(int iosysid, int *ncidp, int *iotype, const char *filename, if (!(ios = pio_get_iosystem_from_id(iosysid))) return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); + LOG((1, "PIOc_createfile iosysid = %d iotype = %d filename = %s mode = %d", + iosysid, *iotype, filename, mode)); + /* Create the file. */ if ((ret = PIOc_createfile_int(iosysid, ncidp, iotype, filename, mode))) return pio_err(ios, NULL, ret, __FILE__, __LINE__); @@ -130,6 +165,7 @@ int PIOc_createfile(int iosysid, int *ncidp, int *iotype, const char *filename, * @param ncidp : A pio file descriptor (output) * @return 0 for success, error code otherwise. * @ingroup PIO_create + * @author Ed Hartnett */ int PIOc_create(int iosysid, const char *filename, int cmode, int *ncidp) { @@ -159,6 +195,7 @@ int PIOc_create(int iosysid, const char *filename, int cmode, int *ncidp) * * @param ncid: the file pointer * @returns PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_closefile(int ncid) { @@ -177,7 +214,7 @@ int PIOc_closefile(int ncid) /* Sync changes before closing on all tasks if async is not in * use, but only on non-IO tasks if async is in use. */ if (!ios->async || !ios->ioproc) - if (file->mode & PIO_WRITE) + if (file->writable) PIOc_sync(ncid); /* If async is in use and this is a comp tasks, then the compmaster @@ -221,9 +258,8 @@ int PIOc_closefile(int ncid) break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - if ((file->mode & PIO_WRITE)){ + if (file->writable) ierr = ncmpi_buffer_detach(file->fh); - } ierr = ncmpi_close(file->fh); break; #endif @@ -239,7 +275,8 @@ int PIOc_closefile(int ncid) return check_netcdf(file, ierr, __FILE__, __LINE__); /* Delete file from our list of open files. */ - pio_delete_file_from_list(ncid); + if ((ierr = pio_delete_file_from_list(ncid))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); return ierr; } @@ -250,13 +287,14 @@ int PIOc_closefile(int ncid) * @param iosysid a pio system handle. * @param filename a filename. * @returns PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_deletefile(int iosysid, const char *filename) { iosystem_desc_t *ios; /* Pointer to io system information. */ int ierr = PIO_NOERR; /* Return code from function calls. */ int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ - int msg = PIO_MSG_DELETE_FILE; + int msg = PIO_MSG_DELETE_FILE; size_t len; LOG((1, "PIOc_deletefile iosysid = %d filename = %s", iosysid, filename)); @@ -299,7 +337,7 @@ int PIOc_deletefile(int iosysid, const char *filename) mpierr = MPI_Barrier(ios->io_comm); if (!mpierr && ios->io_rank == 0) - ierr = nc_delete(filename); + ierr = nc_delete(filename); if (!mpierr) mpierr = MPI_Barrier(ios->io_comm); @@ -325,20 +363,52 @@ int PIOc_deletefile(int iosysid, const char *filename) * * @param ncid the ncid of the file to sync. * @returns PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_sync(int ncid) { iosystem_desc_t *ios; /* Pointer to io system information. */ file_desc_t *file; /* Pointer to file information. */ - wmulti_buffer *wmb, *twmb; int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ int ierr = PIO_NOERR; /* Return code from function calls. */ + LOG((1, "PIOc_sync ncid = %d", ncid)); + /* Get the file info from the ncid. */ if ((ierr = pio_get_file(ncid, &file))) return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); ios = file->iosystem; + /* Flush data buffers on computational tasks. */ + if (!ios->async || !ios->ioproc) + { + if (file->writable) + { + wmulti_buffer *wmb, *twmb; + + LOG((3, "PIOc_sync checking buffers")); + wmb = &file->buffer; + while (wmb) + { + /* If there are any data arrays waiting in the + * multibuffer, flush it. */ + if (wmb->num_arrays > 0) + flush_buffer(ncid, wmb, true); + twmb = wmb; + wmb = wmb->next; + if (twmb == &file->buffer) + { + twmb->ioid = -1; + twmb->next = NULL; + } + else + { + brel(twmb); + } + } + } + } + /* If async is in use, send message to IO master tasks. */ if (ios->async) { @@ -360,31 +430,9 @@ int PIOc_sync(int ncid) return check_mpi(file, mpierr, __FILE__, __LINE__); } - if (file->mode & PIO_WRITE) + /* Call the sync function on IO tasks. */ + if (file->writable) { - LOG((3, "PIOc_sync checking buffers")); - - /* cn_buffer_report( *ios, true); */ - wmb = &file->buffer; - while (wmb) - { - /* If there are any data arrays waiting in the - * multibuffer, flush it. */ - if (wmb->num_arrays > 0) - flush_buffer(ncid, wmb, true); - twmb = wmb; - wmb = wmb->next; - if (twmb == &file->buffer) - { - twmb->ioid = -1; - twmb->next = NULL; - } - else - { - brel(twmb); - } - } - if (ios->ioproc) { switch(file->iotype) diff --git a/src/externals/pio2/src/clib/pio_get_nc.c b/src/externals/pio2/src/clib/pio_get_nc.c index 2cd85d8f947..cb3ddc5b1c1 100644 --- a/src/externals/pio2/src/clib/pio_get_nc.c +++ b/src/externals/pio2/src/clib/pio_get_nc.c @@ -31,6 +31,7 @@ * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vars_text(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, char *buf) @@ -57,6 +58,7 @@ int PIOc_get_vars_text(int ncid, int varid, const PIO_Offset *start, const PIO_O * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vars_uchar(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, unsigned char *buf) @@ -83,6 +85,7 @@ int PIOc_get_vars_uchar(int ncid, int varid, const PIO_Offset *start, * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vars_schar(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, signed char *buf) @@ -110,6 +113,7 @@ int PIOc_get_vars_schar(int ncid, int varid, const PIO_Offset *start, * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vars_ushort(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, unsigned short *buf) @@ -136,6 +140,7 @@ int PIOc_get_vars_ushort(int ncid, int varid, const PIO_Offset *start, * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vars_short(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, short *buf) @@ -162,6 +167,7 @@ int PIOc_get_vars_short(int ncid, int varid, const PIO_Offset *start, * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vars_uint(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, unsigned int *buf) @@ -188,6 +194,7 @@ int PIOc_get_vars_uint(int ncid, int varid, const PIO_Offset *start, * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vars_int(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, int *buf) @@ -214,6 +221,7 @@ int PIOc_get_vars_int(int ncid, int varid, const PIO_Offset *start, const PIO_Of * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vars_long(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, long *buf) @@ -240,6 +248,7 @@ int PIOc_get_vars_long(int ncid, int varid, const PIO_Offset *start, * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vars_float(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, float *buf) @@ -267,6 +276,7 @@ int PIOc_get_vars_float(int ncid, int varid, const PIO_Offset *start, * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vars_double(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, double *buf) @@ -294,6 +304,7 @@ int PIOc_get_vars_double(int ncid, int varid, const PIO_Offset *start, * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vars_ulonglong(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, @@ -321,6 +332,7 @@ int PIOc_get_vars_ulonglong(int ncid, int varid, const PIO_Offset *start, * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vars_longlong(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, long long *buf) @@ -344,6 +356,7 @@ int PIOc_get_vars_longlong(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vara_text(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, char *buf) @@ -367,6 +380,7 @@ int PIOc_get_vara_text(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vara_uchar(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, unsigned char *buf) @@ -390,6 +404,7 @@ int PIOc_get_vara_uchar(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vara_schar(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, signed char *buf) @@ -414,6 +429,7 @@ int PIOc_get_vara_schar(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vara_ushort(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, unsigned short *buf) @@ -437,6 +453,7 @@ int PIOc_get_vara_ushort(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vara_short(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, short *buf) @@ -460,6 +477,7 @@ int PIOc_get_vara_short(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vara_long(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, long *buf) @@ -483,6 +501,7 @@ int PIOc_get_vara_long(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vara_uint(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, unsigned int *buf) @@ -506,6 +525,7 @@ int PIOc_get_vara_uint(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vara_int(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, int *buf) @@ -529,6 +549,7 @@ int PIOc_get_vara_int(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vara_float(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, float *buf) @@ -552,6 +573,7 @@ int PIOc_get_vara_float(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vara_double(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, double *buf) @@ -576,6 +598,7 @@ int PIOc_get_vara_double(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vara_ulonglong(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, unsigned long long *buf) @@ -599,6 +622,7 @@ int PIOc_get_vara_ulonglong(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vara_longlong(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, long long *buf) @@ -616,6 +640,7 @@ int PIOc_get_vara_longlong(int ncid, int varid, const PIO_Offset *start, * @param varid the variable ID number * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var_text(int ncid, int varid, char *buf) { @@ -632,6 +657,7 @@ int PIOc_get_var_text(int ncid, int varid, char *buf) * @param varid the variable ID number * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var_uchar(int ncid, int varid, unsigned char *buf) { @@ -648,6 +674,7 @@ int PIOc_get_var_uchar(int ncid, int varid, unsigned char *buf) * @param varid the variable ID number * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var_schar(int ncid, int varid, signed char *buf) { @@ -664,6 +691,7 @@ int PIOc_get_var_schar(int ncid, int varid, signed char *buf) * @param varid the variable ID number * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var_ushort(int ncid, int varid, unsigned short *buf) { @@ -680,6 +708,7 @@ int PIOc_get_var_ushort(int ncid, int varid, unsigned short *buf) * @param varid the variable ID number * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var_short(int ncid, int varid, short *buf) { @@ -696,6 +725,7 @@ int PIOc_get_var_short(int ncid, int varid, short *buf) * @param varid the variable ID number * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var_uint(int ncid, int varid, unsigned int *buf) { @@ -712,6 +742,7 @@ int PIOc_get_var_uint(int ncid, int varid, unsigned int *buf) * @param varid the variable ID number * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var_int(int ncid, int varid, int *buf) { @@ -728,6 +759,7 @@ int PIOc_get_var_int(int ncid, int varid, int *buf) * @param varid the variable ID number * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var_long (int ncid, int varid, long *buf) { @@ -744,6 +776,7 @@ int PIOc_get_var_long (int ncid, int varid, long *buf) * @param varid the variable ID number * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var_float(int ncid, int varid, float *buf) { @@ -760,6 +793,7 @@ int PIOc_get_var_float(int ncid, int varid, float *buf) * @param varid the variable ID number * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var_double(int ncid, int varid, double *buf) { @@ -776,6 +810,7 @@ int PIOc_get_var_double(int ncid, int varid, double *buf) * @param varid the variable ID number * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var_ulonglong(int ncid, int varid, unsigned long long *buf) { @@ -792,6 +827,7 @@ int PIOc_get_var_ulonglong(int ncid, int varid, unsigned long long *buf) * @param varid the variable ID number * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var_longlong(int ncid, int varid, long long *buf) { @@ -811,6 +847,7 @@ int PIOc_get_var_longlong(int ncid, int varid, long long *buf) * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var1_text(int ncid, int varid, const PIO_Offset *index, char *buf) { @@ -830,6 +867,7 @@ int PIOc_get_var1_text(int ncid, int varid, const PIO_Offset *index, char *buf) * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var1_uchar (int ncid, int varid, const PIO_Offset *index, unsigned char *buf) { @@ -849,6 +887,7 @@ int PIOc_get_var1_uchar (int ncid, int varid, const PIO_Offset *index, unsigned * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var1_schar(int ncid, int varid, const PIO_Offset *index, signed char *buf) { @@ -868,6 +907,7 @@ int PIOc_get_var1_schar(int ncid, int varid, const PIO_Offset *index, signed cha * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var1_ushort(int ncid, int varid, const PIO_Offset *index, unsigned short *buf) { @@ -887,6 +927,7 @@ int PIOc_get_var1_ushort(int ncid, int varid, const PIO_Offset *index, unsigned * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var1_short(int ncid, int varid, const PIO_Offset *index, short *buf) { @@ -908,6 +949,7 @@ int PIOc_get_var1_short(int ncid, int varid, const PIO_Offset *index, short *buf * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var1_uint(int ncid, int varid, const PIO_Offset *index, unsigned int *buf) { @@ -927,6 +969,7 @@ int PIOc_get_var1_uint(int ncid, int varid, const PIO_Offset *index, unsigned in * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var1_long (int ncid, int varid, const PIO_Offset *index, long *buf) { @@ -946,6 +989,7 @@ int PIOc_get_var1_long (int ncid, int varid, const PIO_Offset *index, long *buf) * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var1_int(int ncid, int varid, const PIO_Offset *index, int *buf) { @@ -965,6 +1009,7 @@ int PIOc_get_var1_int(int ncid, int varid, const PIO_Offset *index, int *buf) * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var1_float(int ncid, int varid, const PIO_Offset *index, float *buf) { @@ -984,6 +1029,7 @@ int PIOc_get_var1_float(int ncid, int varid, const PIO_Offset *index, float *buf * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var1_double (int ncid, int varid, const PIO_Offset *index, double *buf) { @@ -1003,6 +1049,7 @@ int PIOc_get_var1_double (int ncid, int varid, const PIO_Offset *index, double * * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var1_ulonglong (int ncid, int varid, const PIO_Offset *index, unsigned long long *buf) @@ -1023,6 +1070,7 @@ int PIOc_get_var1_ulonglong (int ncid, int varid, const PIO_Offset *index, * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var1_longlong(int ncid, int varid, const PIO_Offset *index, long long *buf) @@ -1041,6 +1089,7 @@ int PIOc_get_var1_longlong(int ncid, int varid, const PIO_Offset *index, * @param varid the variable ID number * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var(int ncid, int varid, void *buf) { @@ -1061,6 +1110,7 @@ int PIOc_get_var(int ncid, int varid, void *buf) * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var1(int ncid, int varid, const PIO_Offset *index, void *buf) { @@ -1084,6 +1134,7 @@ int PIOc_get_var1(int ncid, int varid, const PIO_Offset *index, void *buf) * the variable will be used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vara(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, void *buf) @@ -1111,6 +1162,7 @@ int PIOc_get_vara(int ncid, int varid, const PIO_Offset *start, const PIO_Offset * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vars(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, void *buf) diff --git a/src/externals/pio2/src/clib/pio_getput_int.c b/src/externals/pio2/src/clib/pio_getput_int.c index 187f74d058d..791cce1b1bd 100644 --- a/src/externals/pio2/src/clib/pio_getput_int.c +++ b/src/externals/pio2/src/clib/pio_getput_int.c @@ -27,6 +27,7 @@ * @param len the length of the attribute array. * @param op a pointer with the attribute data. * @return PIO_NOERR for success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_att_tc(int ncid, int varid, const char *name, nc_type atttype, PIO_Offset len, nc_type memtype, const void *op) @@ -233,6 +234,7 @@ int PIOc_put_att_tc(int ncid, int varid, const char *name, nc_type atttype, * of type memtype. * @param ip a pointer that will get the attribute value. * @return PIO_NOERR for success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_att_tc(int ncid, int varid, const char *name, nc_type memtype, void *ip) { @@ -479,6 +481,7 @@ int PIOc_get_att_tc(int ncid, int varid, const char *name, nc_type memtype, void * will be used. Use special PIO_LONG_INTERNAL for _long() functions. * @param buf pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, nc_type xtype, void *buf) @@ -744,6 +747,7 @@ int PIOc_get_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off * @param xtype the netcdf type of the variable. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var1_tc(int ncid, int varid, const PIO_Offset *index, nc_type xtype, void *buf) @@ -784,6 +788,7 @@ int PIOc_get_var1_tc(int ncid, int varid, const PIO_Offset *index, nc_type xtype * @param xtype the netcdf type of the variable. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_get_var_tc(int ncid, int varid, nc_type xtype, void *buf) { @@ -872,6 +877,7 @@ int PIOc_get_var_tc(int ncid, int varid, nc_type xtype, void *buf) * @param buf pointer to the data to be written. * * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, nc_type xtype, const void *buf) @@ -1070,7 +1076,9 @@ int PIOc_put_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off fake_stride = (PIO_Offset *)stride; LOG((2, "PIOc_put_vars_tc calling pnetcdf function")); - vdesc = &file->varlist[varid]; + /*vdesc = &file->varlist[varid];*/ + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); if (vdesc->nreqs % PIO_REQUEST_ALLOC_CHUNK == 0) if (!(vdesc->request = realloc(vdesc->request, sizeof(int) * (vdesc->nreqs + PIO_REQUEST_ALLOC_CHUNK)))) @@ -1227,6 +1235,7 @@ int PIOc_put_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Off * @param op pointer to the data to be written. * * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var1_tc(int ncid, int varid, const PIO_Offset *index, nc_type xtype, const void *op) @@ -1277,6 +1286,7 @@ int PIOc_put_var1_tc(int ncid, int varid, const PIO_Offset *index, nc_type xtype * @param op pointer to the data to be written. * * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var_tc(int ncid, int varid, nc_type xtype, const void *op) { diff --git a/src/externals/pio2/src/clib/pio_internal.h b/src/externals/pio2/src/clib/pio_internal.h index 9f1a4a18d5d..a8947135a24 100644 --- a/src/externals/pio2/src/clib/pio_internal.h +++ b/src/externals/pio2/src/clib/pio_internal.h @@ -1,7 +1,7 @@ /** * @file * Private headers and defines for the PIO C interface. - * @author Jim Edwards + * @author Jim Edwards, Ed Hartnett * @date 2014 * * @see http://code.google.com/p/parallelio/ @@ -10,6 +10,7 @@ #ifndef __PIO_INTERNAL__ #define __PIO_INTERNAL__ +#include #include /* These are the sizes of types in netCDF files. Do not replace these @@ -99,38 +100,65 @@ extern "C" { int pio_err(iosystem_desc_t *ios, file_desc_t *file, int err_num, const char *fname, int line); + /* Check return from MPI function and print error message. */ + void CheckMPIReturn(int ierr, const char *file, int line); + + /* Print error message and abort. */ + void piodie(const char *msg, const char *fname, int line); + + /* Assert that an expression is true. */ + void pioassert(bool exp, const char *msg, const char *fname, int line); + + /* Check the return code from an MPI function call. */ + int check_mpi(file_desc_t *file, int mpierr, const char *filename, int line); + + /* Check the return code from an MPI function call. */ + int check_mpi2(iosystem_desc_t *ios, file_desc_t *file, int mpierr, const char *filename, + int line); + + /* Check the return code from a netCDF call. */ + int check_netcdf(file_desc_t *file, int status, const char *fname, int line); + + /* Check the return code from a netCDF call, with ios pointer. */ + int check_netcdf2(iosystem_desc_t *ios, file_desc_t *file, int status, + const char *fname, int line); + /* For async cases, this runs on IO tasks and listens for messages. */ int pio_msg_handler2(int io_rank, int component_count, iosystem_desc_t **iosys, MPI_Comm io_comm); - void pio_get_env(void); + /* List operations for iosystem list. */ + int pio_add_to_iosystem_list(iosystem_desc_t *ios); + int pio_delete_iosystem_from_list(int piosysid); + iosystem_desc_t *pio_get_iosystem_from_id(int iosysid); + + /* List operations for decomposition list. */ int pio_add_to_iodesc_list(io_desc_t *iodesc); io_desc_t *pio_get_iodesc_from_id(int ioid); int pio_delete_iodesc_from_list(int ioid); int pio_num_iosystem(int *niosysid); + /* Allocate and initialize storage for decomposition information. */ + int malloc_iodesc(iosystem_desc_t *ios, int piotype, int ndims, io_desc_t **iodesc); + + /* List operations for file_desc_t list. */ int pio_get_file(int ncid, file_desc_t **filep); int pio_delete_file_from_list(int ncid); void pio_add_to_file_list(file_desc_t *file); - void pio_push_request(file_desc_t *file, int request); + + /* List operations for var_desc_t list. */ + int add_to_varlist(int varid, int rec_var, int pio_type, int pio_type_size, + MPI_Datatype mpi_type, int mpi_type_size, var_desc_t **varlist); + int get_var_desc(int varid, var_desc_t **varlist, var_desc_t **var_desc); + int delete_var_desc(int varid, var_desc_t **varlist); /* Create a file (internal function). */ int PIOc_createfile_int(int iosysid, int *ncidp, int *iotype, const char *filename, int mode); /* Open a file with optional retry as netCDF-classic if first * iotype does not work. */ - int PIOc_openfile_retry(int iosysid, int *ncidp, int *iotype, - const char *filename, int mode, int retry); - - iosystem_desc_t *pio_get_iosystem_from_id(int iosysid); - int pio_add_to_iosystem_list(iosystem_desc_t *ios); - - /* Check the return code from a netCDF call. */ - int check_netcdf(file_desc_t *file, int status, const char *fname, int line); - - /* Check the return code from a netCDF call, with ios pointer. */ - int check_netcdf2(iosystem_desc_t *ios, file_desc_t *file, int status, - const char *fname, int line); + int PIOc_openfile_retry(int iosysid, int *ncidp, int *iotype, const char *filename, int mode, + int retry); /* Given PIO type, find MPI type and type size. */ int find_mpi_type(int pio_type, MPI_Datatype *mpi_type, int *type_size); @@ -138,12 +166,6 @@ extern "C" { /* Check whether an IO type is valid for this build. */ int iotype_is_valid(int iotype); - /* Print error message and abort. */ - void piodie(const char *msg, const char *fname, int line); - - /* Assert that an expression is true. */ - void pioassert(bool exp, const char *msg, const char *fname, int line); - /* Compute start and count values for each io task for a decomposition. */ int CalcStartandCount(int pio_type, int ndims, const int *gdims, int num_io_procs, int myiorank, PIO_Offset *start, PIO_Offset *count, int *num_aiotasks); @@ -155,17 +177,16 @@ extern "C" { /* Create the MPI communicators needed by the subset rearranger. */ int default_subset_partition(iosystem_desc_t *ios, io_desc_t *iodesc); - /* Check return from MPI function and print error message. */ - void CheckMPIReturn(int ierr, const char *file, int line); - /* Like MPI_Alltoallw(), but with flow control. */ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendtypes, void *recvbuf, int *recvcounts, int *rdispls, MPI_Datatype *recvtypes, MPI_Comm comm, rearr_comm_fc_opt_t *fc); + /* Return the greatest common devisor of array ain as int_64. */ long long lgcd_array(int nain, long long* ain); - void PIO_Offset_size(MPI_Datatype *dtype, int *tsize); + /* Look for the largest block of data for io which can be + * expressed in terms of start and count. */ PIO_Offset GCDblocksize(int arrlen, const PIO_Offset *arr_in); /* Convert an index into dimension values. */ @@ -178,6 +199,9 @@ extern "C" { int determine_fill(iosystem_desc_t *ios, io_desc_t *iodesc, const int *gsize, const PIO_Offset *compmap); + /* Allocation memory for a data region. */ + int alloc_region2(iosystem_desc_t *ios, int ndims, io_region **region); + /* Set start and count so that they describe the first region in map.*/ PIO_Offset find_region(int ndims, const int *gdims, int maplen, const PIO_Offset *map, PIO_Offset *start, PIO_Offset *count); @@ -192,6 +216,9 @@ extern "C" { int region_size, int region_stride, const int *max_size, PIO_Offset *count); + /* Free a region list. */ + void free_region_list(io_region *top); + /* Compare sets of rearranger options. */ bool cmp_rearr_opts(const rearr_opt_t *rearr_opts, const rearr_opt_t *exp_rearr_opts); @@ -206,12 +233,10 @@ extern "C" { int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compmap, const int *gsize, int ndim, io_desc_t *iodesc); - /* Create a box rearranger. */ int box_rearrange_create(iosystem_desc_t *ios, int maplen, const PIO_Offset *compmap, const int *gsize, int ndim, io_desc_t *iodesc); - /* Move data from IO tasks to compute tasks. */ int rearrange_io2comp(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, void *rbuf); @@ -219,8 +244,6 @@ extern "C" { int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, void *rbuf, int nvars); - /* Allocate and initialize storage for decomposition information. */ - int malloc_iodesc(iosystem_desc_t *ios, int piotype, int ndims, io_desc_t **iodesc); void performance_tune_rearranger(iosystem_desc_t *ios, io_desc_t *iodesc); /* Flush contents of multi-buffer to disk. */ @@ -229,12 +252,6 @@ extern "C" { /* Compute the size that the IO tasks will need to hold the data. */ int compute_maxIObuffersize(MPI_Comm io_comm, io_desc_t *iodesc); - /* Allocation memory for a data region. */ - int alloc_region2(iosystem_desc_t *ios, int ndims, io_region **region); - - /* Delete an entry from the lost of open IO systems. */ - int pio_delete_iosystem_from_list(int piosysid); - /* Find greatest commond divisor. */ int gcd(int a, int b); @@ -244,12 +261,13 @@ extern "C" { /* Find greatest commond divisor in an array. */ int gcd_array(int nain, int *ain); - void free_region_list(io_region *top); - /* Convert a global coordinate value into a local array index. */ PIO_Offset coord_to_lindex(int ndims, const PIO_Offset *lcoord, const PIO_Offset *count); + /* Returns the smallest power of 2 greater than or equal to i. */ int ceil2(int i); + + /* ??? */ int pair(int np, int p, int k); /* Create MPI datatypes used for comp2io and io2comp data transfers. */ @@ -259,48 +277,42 @@ extern "C" { * transfers. */ int create_mpi_datatypes(MPI_Datatype basetype, int msgcnt, const PIO_Offset *mindex, const int *mcount, int *mfrom, MPI_Datatype *mtype); + + /* Used by subset rearranger to sort map. */ int compare_offsets(const void *a, const void *b) ; /* Print a trace statement, for debugging. */ void print_trace (FILE *fp); + /* Print diagonstic info to stdout. */ void cn_buffer_report(iosystem_desc_t *ios, bool collective); /* Initialize the compute buffer. */ int compute_buffer_init(iosystem_desc_t *ios); + /* Free the buffer pool. */ void free_cn_buffer_pool(iosystem_desc_t *ios); /* Flush PIO's data buffer. */ int flush_buffer(int ncid, wmulti_buffer *wmb, bool flushtodisk); - int compute_maxaggregate_bytes(iosystem_desc_t *ios, io_desc_t *iodesc); - /* Compute an element of start/count arrays. */ void compute_one_dim(int gdim, int ioprocs, int rank, PIO_Offset *start, PIO_Offset *count); - /* Check the return code from an MPI function call. */ - int check_mpi(file_desc_t *file, int mpierr, const char *filename, int line); - - /* Check the return code from an MPI function call. */ - int check_mpi2(iosystem_desc_t *ios, file_desc_t *file, int mpierr, const char *filename, - int line); - /* Darray support functions. */ /* Write aggregated arrays to file using parallel I/O (netCDF-4 parallel/pnetcdf) */ - int pio_write_darray_multi_nc(file_desc_t *file, int nvars, const int *vid, int iodesc_ndims, - MPI_Datatype basetype, int maxregions, io_region *firstregion, - PIO_Offset llen, int num_aiotasks, void *iobuf, - const int *frame); + int write_darray_multi_par(file_desc_t *file, int nvars, int fndims, const int *vid, + io_desc_t *iodesc, int fill, const int *frame); /* Write aggregated arrays to file using serial I/O (netCDF-3/netCDF-4 serial) */ - int write_darray_multi_serial(file_desc_t *file, int nvars, const int *vid, + int write_darray_multi_serial(file_desc_t *file, int nvars, int fndims, const int *vid, io_desc_t *iodesc, int fill, const int *frame); int pio_read_darray_nc(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobuf); int pio_read_darray_nc_serial(file_desc_t *file, io_desc_t *iodesc, int vid, void *iobuf); + int find_var_fillvalue(file_desc_t *file, int varid, var_desc_t *vdesc); /* Read atts with type conversion. */ int PIOc_get_att_tc(int ncid, int varid, const char *name, nc_type memtype, void *ip); @@ -315,7 +327,6 @@ extern "C" { int PIOc_get_var1_tc(int ncid, int varid, const PIO_Offset *index, nc_type xtype, void *buf); int PIOc_get_var_tc(int ncid, int varid, nc_type xtype, void *buf); - /* Generalized put functions. */ int PIOc_put_vars_tc(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, @@ -323,7 +334,7 @@ extern "C" { int PIOc_put_var1_tc(int ncid, int varid, const PIO_Offset *index, nc_type xtype, const void *op); int PIOc_put_var_tc(int ncid, int varid, nc_type xtype, const void *op); - + /* An internal replacement for a function pnetcdf does not * have. */ int pioc_pnetcdf_inq_type(int ncid, nc_type xtype, char *name, @@ -346,6 +357,10 @@ extern "C" { int *num_tasks, int **task_maplen, int *max_maplen, int **map, char *title, char *history, char *source, char *version, int *fortran_order); + /* Determine what tasks to use for each computational component. */ + int determine_procs(int num_io_procs, int component_count, int *num_procs_per_comp, + int **proc_list, int **my_proc_list); + #if defined(__cplusplus) } #endif @@ -550,6 +565,9 @@ enum PIO_MSG PIO_MSG_GET_VAR_CHUNK_CACHE, PIO_MSG_INITDECOMP_DOF, PIO_MSG_WRITEDARRAY, + PIO_MSG_WRITEDARRAYMULTI, + PIO_MSG_SETFRAME, + PIO_MSG_ADVANCEFRAME, PIO_MSG_READDARRAY, PIO_MSG_SETERRORHANDLING, PIO_MSG_FREEDECOMP, @@ -558,7 +576,8 @@ enum PIO_MSG PIO_MSG_EXIT, PIO_MSG_GET_ATT, PIO_MSG_PUT_ATT, - PIO_MSG_INQ_TYPE + PIO_MSG_INQ_TYPE, + PIO_MSG_INQ_UNLIMDIMS }; #endif /* __PIO_INTERNAL__ */ diff --git a/src/externals/pio2/src/clib/pio_lists.c b/src/externals/pio2/src/clib/pio_lists.c index df79302cfd1..c4edb5d5042 100644 --- a/src/externals/pio2/src/clib/pio_lists.c +++ b/src/externals/pio2/src/clib/pio_lists.c @@ -8,15 +8,17 @@ #include #include -static io_desc_t *pio_iodesc_list=NULL; -static io_desc_t *current_iodesc=NULL; -static iosystem_desc_t *pio_iosystem_list=NULL; +static io_desc_t *pio_iodesc_list = NULL; +static io_desc_t *current_iodesc = NULL; +static iosystem_desc_t *pio_iosystem_list = NULL; static file_desc_t *pio_file_list = NULL; -static file_desc_t *current_file=NULL; +static file_desc_t *current_file = NULL; -/** Add a new entry to the global list of open files. +/** + * Add a new entry to the global list of open files. * * @param file pointer to the file_desc_t struct for the new file. + * @author Jim Edwards */ void pio_add_to_file_list(file_desc_t *file) { @@ -45,7 +47,8 @@ void pio_add_to_file_list(file_desc_t *file) } } -/** Given ncid, find the file_desc_t data for an open file. The ncid +/** + * Given ncid, find the file_desc_t data for an open file. The ncid * used is the interally generated pio_ncid. * * @param ncid the PIO assigned ncid of the open file. @@ -53,6 +56,7 @@ void pio_add_to_file_list(file_desc_t *file) * will get a copy of the pointer to the file info. * * @returns 0 for success, error code otherwise. + * @author Ed Hartnett */ int pio_get_file(int ncid, file_desc_t **cfile1) { @@ -92,15 +96,17 @@ int pio_get_file(int ncid, file_desc_t **cfile1) return PIO_NOERR; } -/** Delete a file from the list of open files. +/** + * Delete a file from the list of open files. * * @param ncid ID of file to delete from list * @returns 0 for success, error code otherwise + * @author Jim Edwards, Ed Hartnett */ int pio_delete_file_from_list(int ncid) { - file_desc_t *cfile, *pfile = NULL; + int ret; /* Look through list of open files. */ for (cfile = pio_file_list; cfile; cfile = cfile->next) @@ -115,14 +121,14 @@ int pio_delete_file_from_list(int ncid) if (current_file == cfile) current_file = pfile; - /* Free any fill values that were allocated. */ - for (int v = 0; v < PIO_MAX_VARS; v++) - if (cfile->varlist[v].fillvalue) - free(cfile->varlist[v].fillvalue); + /* Free the varlist entries for this file. */ + while (cfile->varlist) + if ((ret = delete_var_desc(cfile->varlist->varid, &cfile->varlist))) + return pio_err(NULL, cfile, ret, __FILE__, __LINE__); /* Free the memory used for this file. */ free(cfile); - + return PIO_NOERR; } pfile = cfile; @@ -132,10 +138,12 @@ int pio_delete_file_from_list(int ncid) return PIO_EBADID; } -/** Delete iosystem info from list. +/** + * Delete iosystem info from list. * * @param piosysid the iosysid to delete * @returns 0 on success, error code otherwise + * @author Jim Edwards */ int pio_delete_iosystem_from_list(int piosysid) { @@ -145,6 +153,7 @@ int pio_delete_iosystem_from_list(int piosysid) for (ciosystem = pio_iosystem_list; ciosystem; ciosystem = ciosystem->next) { + LOG((3, "ciosystem->iosysid = %d", ciosystem->iosysid)); if (ciosystem->iosysid == piosysid) { if (piosystem == NULL) @@ -159,10 +168,12 @@ int pio_delete_iosystem_from_list(int piosysid) return PIO_EBADID; } -/** Add iosystem info to list. +/** + * Add iosystem info to list. * * @param ios pointer to the iosystem_desc_t info to add. * @returns 0 on success, error code otherwise + * @author Jim Edwards */ int pio_add_to_iosystem_list(iosystem_desc_t *ios) { @@ -191,10 +202,12 @@ int pio_add_to_iosystem_list(iosystem_desc_t *ios) return ios->iosysid; } -/** Get iosystem info from list. +/** + * Get iosystem info from list. * * @param iosysid id of the iosystem * @returns pointer to iosystem_desc_t, or NULL if not found. + * @author Jim Edwards */ iosystem_desc_t *pio_get_iosystem_from_id(int iosysid) { @@ -209,19 +222,23 @@ iosystem_desc_t *pio_get_iosystem_from_id(int iosysid) return NULL; } -/** Count the number of open iosystems. +/** + * Count the number of open iosystems. * * @param niosysid pointer that will get the number of open iosystems. * @returns 0 for success. + * @author Jim Edwards */ -int -pio_num_iosystem(int *niosysid) +int pio_num_iosystem(int *niosysid) { int count = 0; /* Count the elements in the list. */ for (iosystem_desc_t *c = pio_iosystem_list; c; c = c->next) + { + LOG((3, "pio_num_iosystem c->iosysid %d", c->iosysid)); count++; + } /* Return count to caller via pointer. */ if (niosysid) @@ -230,56 +247,63 @@ pio_num_iosystem(int *niosysid) return PIO_NOERR; } -/** Add an iodesc. +/** + * Add an iodesc. * * @param io_desc_t pointer to data to add to list. - * @returns the ioid of the newly added iodesc. + * @returns 0 for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int pio_add_to_iodesc_list(io_desc_t *iodesc) { io_desc_t *ciodesc; - int imax = 512; iodesc->next = NULL; if (pio_iodesc_list == NULL) pio_iodesc_list = iodesc; - else{ - imax++; - for (ciodesc = pio_iodesc_list; ciodesc->next; ciodesc=ciodesc->next, imax=ciodesc->ioid + 1); + else + { + for (ciodesc = pio_iodesc_list; ciodesc->next; ciodesc = ciodesc->next) + ; ciodesc->next = iodesc; } - iodesc->ioid = imax; current_iodesc = iodesc; - return iodesc->ioid; + return PIO_NOERR; } -/** Get an iodesc. +/** + * Get an iodesc. * * @param ioid ID of iodesc to get. * @returns pointer to the iodesc struc. + * @author Jim Edwards */ io_desc_t *pio_get_iodesc_from_id(int ioid) { io_desc_t *ciodesc = NULL; - if (current_iodesc != NULL && current_iodesc->ioid == abs(ioid)) - ciodesc = current_iodesc; - else - for (ciodesc = pio_iodesc_list; ciodesc; ciodesc = ciodesc->next) - if (ciodesc->ioid == abs(ioid)) - { - current_iodesc = ciodesc; - break; - } + /* Do we already have a pointer to it? */ + if (current_iodesc && current_iodesc->ioid == ioid) + return current_iodesc; + + /* Find the decomposition in the list. */ + for (ciodesc = pio_iodesc_list; ciodesc; ciodesc = ciodesc->next) + if (ciodesc->ioid == ioid) + { + current_iodesc = ciodesc; + break; + } return ciodesc; } -/** Delete an iodesc. +/** + * Delete an iodesc. * * @param ioid ID of iodesc to delete. * @returns 0 on success, error code otherwise. + * @author Jim Edwards */ int pio_delete_iodesc_from_list(int ioid) { @@ -303,3 +327,133 @@ int pio_delete_iodesc_from_list(int ioid) } return PIO_EBADID; } + +/** + * Add var_desc_t info to the list. + * + * @param varid the varid of the variable. + * @param rec_var non-zero if this is a record var. + * @param varlist pointer to list to add to. + * @returns 0 for success, error code otherwise. + * @author Ed Hartnett + */ +int add_to_varlist(int varid, int rec_var, int pio_type, int pio_type_size, MPI_Datatype mpi_type, + int mpi_type_size, var_desc_t **varlist) +{ + var_desc_t *var_desc; + + /* Check inputs. */ + pioassert(varid >= 0 && varlist, "invalid input", __FILE__, __LINE__); + + /* Allocate storage. */ + if (!(var_desc = calloc(1, sizeof(var_desc_t)))) + return PIO_ENOMEM; + + /* Set values. */ + var_desc->varid = varid; + var_desc->rec_var = rec_var; + var_desc->pio_type = pio_type; + var_desc->pio_type_size = pio_type_size; + var_desc->mpi_type = mpi_type; + var_desc->mpi_type_size = mpi_type_size; + var_desc->record = -1; + + /* Add to list. */ + if (*varlist) + { + var_desc_t *v; + + /* Move to end of list. */ + for (v = *varlist; v->next; v = v->next) + ; + v->next = var_desc; + } + else + *varlist = var_desc; + + return PIO_NOERR; +} + +/** + * Get a var_desc_t info for a variable. + * + * @param varid ID of variable to get var_desc_t of. + * @param varlist pointer to list of var_desc_t. + * @param var_desc pointer that gets pointer to var_desc_t struct. + * @returns 0 for success, error code otherwise. + * @author Ed Hartnett + */ +int get_var_desc(int varid, var_desc_t **varlist, var_desc_t **var_desc) +{ + var_desc_t *my_var; + + /* Check inputs. */ + pioassert(varlist, "invalid input", __FILE__, __LINE__); + + /* Empty varlist. */ + if (!*varlist) + return PIO_ENOTVAR; + + /* Find the var_desc_t for this varid. */ + for (my_var = *varlist; my_var; my_var = my_var->next) + if (my_var->varid == varid) + break; + + /* Did we find it? */ + if (!my_var) + return PIO_ENOTVAR; + else + *var_desc = my_var; + + return PIO_NOERR; +} + +/** + * Delete var_desc_t info for a variable. + * + * @param varid ID of variable to delete. + * @param varlist pointer to list of var_desc_t. + * @returns 0 on success, error code otherwise. + * @author Ed Hartnett + */ +int delete_var_desc(int varid, var_desc_t **varlist) +{ + var_desc_t *v; + var_desc_t *prev = NULL; + + /* Check inputs. */ + pioassert(varid >= 0 && varlist, "invalid input", __FILE__, __LINE__); + + /* Null list means no variables to delete. */ + if (!*varlist) + return PIO_ENOTVAR; + + /* Find the var_desc_t for this varid. */ + for (v = *varlist; v->next; v = v->next) + { + LOG((3, "v->varid = %d", v->varid)); + if (v->varid == varid) + break; + prev = v; + } + + /* Did we find it? */ + if (v->varid != varid) + { + LOG((3, "return notvar error")); + return PIO_ENOTVAR; + } + + /* Adjust next pointer. */ + if (prev) + prev->next = v->next; + else + *varlist = v->next; + + /* Free memory. */ + if (v->fillvalue) + free(v->fillvalue); + free(v); + + return PIO_NOERR; +} diff --git a/src/externals/pio2/src/clib/pio_msg.c b/src/externals/pio2/src/clib/pio_msg.c index b8ca0e9917b..c5b10244511 100644 --- a/src/externals/pio2/src/clib/pio_msg.c +++ b/src/externals/pio2/src/clib/pio_msg.c @@ -5,6 +5,11 @@ * messages from the computation nodes, and responds to messages by * running the appropriate netCDF function. * + * Note that when calling the PIOc_* funtion, the return code should + * be ignored. It is handled within the function. Only errors in + * internal pio_msg code should return an error from the handler + * function. + * * @author Ed Hartnett */ @@ -17,13 +22,15 @@ extern int my_rank; extern int pio_log_level; #endif /* PIO_ENABLE_LOGGING */ -/** This function is run on the IO tasks to handle nc_inq_type*() +/** + * This function is run on the IO tasks to handle nc_inq_type*() * functions. * * @param ios pointer to the iosystem info. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int inq_type_handler(iosystem_desc_t *ios) { @@ -33,7 +40,6 @@ int inq_type_handler(iosystem_desc_t *ios) char *namep = NULL, name[NC_MAX_NAME + 1]; PIO_Offset *sizep = NULL, size; int mpierr; - int ret; LOG((1, "inq_type_handler")); assert(ios); @@ -56,20 +62,21 @@ int inq_type_handler(iosystem_desc_t *ios) sizep = &size; /* Call the function. */ - if ((ret = PIOc_inq_type(ncid, xtype, namep, sizep))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_inq_type(ncid, xtype, namep, sizep); LOG((1, "inq_type_handler succeeded!")); return PIO_NOERR; } -/** This function is run on the IO tasks to find netCDF file +/** + * This function is run on the IO tasks to find netCDF file * format. * * @param ios pointer to the iosystem info. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int inq_format_handler(iosystem_desc_t *ios) { @@ -77,7 +84,6 @@ int inq_format_handler(iosystem_desc_t *ios) int *formatp = NULL, format; char format_present; int mpierr; - int ret; LOG((1, "inq_format_handler")); assert(ios); @@ -96,21 +102,20 @@ int inq_format_handler(iosystem_desc_t *ios) formatp = &format; /* Call the function. */ - if ((ret = PIOc_inq_format(ncid, formatp))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_inq_format(ncid, formatp); - if (formatp) - LOG((2, "inq_format_handler format = %d", *formatp)); LOG((1, "inq_format_handler succeeded!")); return PIO_NOERR; } -/** This function is run on the IO tasks to set the file fill mode. +/** + * This function is run on the IO tasks to set the file fill mode. * * @param ios pointer to the iosystem info. * @returns 0 for success, error code otherwise. * @internal + * @author Ed Hartnett */ int set_fill_handler(iosystem_desc_t *ios) { @@ -119,7 +124,6 @@ int set_fill_handler(iosystem_desc_t *ios) int old_modep_present; int old_mode, *old_modep = NULL; int mpierr; - int ret; LOG((1, "set_fill_handler")); assert(ios); @@ -140,20 +144,21 @@ int set_fill_handler(iosystem_desc_t *ios) old_modep = &old_mode; /* Call the function. */ - if ((ret = PIOc_set_fill(ncid, fillmode, old_modep))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_set_fill(ncid, fillmode, old_modep); LOG((1, "set_fill_handler succeeded!")); return PIO_NOERR; } -/** This function is run on the IO tasks to create a netCDF file. +/** + * This function is run on the IO tasks to create a netCDF file. * * @param ios pointer to the iosystem info. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int create_file_handler(iosystem_desc_t *ios) { @@ -162,7 +167,6 @@ int create_file_handler(iosystem_desc_t *ios) int iotype; int mode; int mpierr; - int ret; LOG((1, "create_file_handler comproot = %d", ios->comproot)); assert(ios); @@ -185,26 +189,26 @@ int create_file_handler(iosystem_desc_t *ios) len, filename, iotype, mode)); /* Call the create file function. */ - if ((ret = PIOc_createfile(ios->iosysid, &ncid, &iotype, filename, mode))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); - + PIOc_createfile(ios->iosysid, &ncid, &iotype, filename, mode); + LOG((1, "create_file_handler succeeded!")); return PIO_NOERR; } -/** This function is run on the IO tasks to close a netCDF file. It is +/** + * This function is run on the IO tasks to close a netCDF file. It is * only ever run on the IO tasks. * * @param ios pointer to the iosystem_desc_t. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int close_file_handler(iosystem_desc_t *ios) { int ncid; int mpierr; - int ret; LOG((1, "close_file_handler")); assert(ios); @@ -213,23 +217,24 @@ int close_file_handler(iosystem_desc_t *ios) * task is broadcasting. */ if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - LOG((1, "create_file_handler got parameter ncid = %d", ncid)); + LOG((1, "close_file_handler got parameter ncid = %d", ncid)); /* Call the close file function. */ - if ((ret = PIOc_closefile(ncid))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_closefile(ncid); LOG((1, "close_file_handler succeeded!")); return PIO_NOERR; } -/** This function is run on the IO tasks to inq a netCDF file. It is +/** + * This function is run on the IO tasks to inq a netCDF file. It is * only ever run on the IO tasks. * * @param ios pointer to the iosystem_desc_t. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int inq_handler(iosystem_desc_t *ios) { @@ -238,7 +243,6 @@ int inq_handler(iosystem_desc_t *ios) int *ndimsp = NULL, *nvarsp = NULL, *ngattsp = NULL, *unlimdimidp = NULL; char ndims_present, nvars_present, ngatts_present, unlimdimid_present; int mpierr; - int ret; LOG((1, "inq_handler")); assert(ios); @@ -271,8 +275,54 @@ int inq_handler(iosystem_desc_t *ios) unlimdimidp = &unlimdimid; /* Call the inq function to get the values. */ - if ((ret = PIOc_inq(ncid, ndimsp, nvarsp, ngattsp, unlimdimidp))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_inq(ncid, ndimsp, nvarsp, ngattsp, unlimdimidp); + + return PIO_NOERR; +} + +/** + * This function is run on the IO tasks to inq unlimited dimension + * ids of a netCDF file. It is only ever run on the IO tasks. + * + * @param ios pointer to the iosystem_desc_t. + * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code + * from netCDF base function. + * @internal + * @author Ed Hartnett + */ +int inq_unlimdims_handler(iosystem_desc_t *ios) +{ + int ncid; + int nunlimdims; + int unlimdimids; + int *nunlimdimsp = NULL, *unlimdimidsp = NULL; + char nunlimdimsp_present, unlimdimidsp_present; + int mpierr; + + LOG((1, "inq_unlimdims_handler")); + assert(ios); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(&nunlimdimsp_present, 1, MPI_CHAR, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(&unlimdimidsp_present, 1, MPI_CHAR, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + LOG((1, "inq_unlimdims_handler nunlimdimsp_present = %d unlimdimidsp_present = %d", + nunlimdimsp_present, unlimdimidsp_present)); + + /* NULLs passed in to any of the pointers in the original call + * need to be matched with NULLs here. Assign pointers where + * non-NULL pointers were passed in. */ + if (nunlimdimsp_present) + nunlimdimsp = &nunlimdims; + if (unlimdimidsp_present) + unlimdimidsp = &unlimdimids; + + /* Call the inq function to get the values. */ + PIOc_inq_unlimdims(ncid, nunlimdimsp, unlimdimidsp); return PIO_NOERR; } @@ -285,6 +335,7 @@ int inq_handler(iosystem_desc_t *ios) * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int inq_dim_handler(iosystem_desc_t *ios, int msg) { @@ -295,9 +346,7 @@ int inq_dim_handler(iosystem_desc_t *ios, int msg) PIO_Offset *dimlenp = NULL; char dimname[NC_MAX_NAME + 1]; PIO_Offset dimlen; - int mpierr; - int ret; LOG((1, "inq_dim_handler")); assert(ios); @@ -322,8 +371,7 @@ int inq_dim_handler(iosystem_desc_t *ios, int msg) dimlenp = &dimlen; /* Call the inq function to get the values. */ - if ((ret = PIOc_inq_dim(ncid, dimid, dimnamep, dimlenp))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_inq_dim(ncid, dimid, dimnamep, dimlenp); return PIO_NOERR; } @@ -335,16 +383,16 @@ int inq_dim_handler(iosystem_desc_t *ios, int msg) * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int inq_dimid_handler(iosystem_desc_t *ios) { int ncid; int *dimidp = NULL, dimid; - int mpierr; int id_present; - int ret; int namelen; char name[PIO_MAX_NAME + 1]; + int mpierr; LOG((1, "inq_dimid_handler")); assert(ios); @@ -367,8 +415,7 @@ int inq_dimid_handler(iosystem_desc_t *ios) dimidp = &dimid; /* Call the inq_dimid function. */ - if ((ret = PIOc_inq_dimid(ncid, name, dimidp))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_inq_dimid(ncid, name, dimidp); return PIO_NOERR; } @@ -381,18 +428,18 @@ int inq_dimid_handler(iosystem_desc_t *ios) * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int inq_att_handler(iosystem_desc_t *ios) { int ncid; int varid; - int mpierr; - int ret; char name[PIO_MAX_NAME + 1]; int namelen; nc_type xtype, *xtypep = NULL; PIO_Offset len, *lenp = NULL; char xtype_present, len_present; + int mpierr; LOG((1, "inq_att_handler")); assert(ios); @@ -420,8 +467,7 @@ int inq_att_handler(iosystem_desc_t *ios) lenp = &len; /* Call the function to learn about the attribute. */ - if ((ret = PIOc_inq_att(ncid, varid, name, xtypep, lenp))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_inq_att(ncid, varid, name, xtypep, lenp); return PIO_NOERR; } @@ -434,6 +480,7 @@ int inq_att_handler(iosystem_desc_t *ios) * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int inq_attname_handler(iosystem_desc_t *ios) { @@ -443,7 +490,6 @@ int inq_attname_handler(iosystem_desc_t *ios) char name[NC_MAX_NAME + 1], *namep = NULL; char name_present; int mpierr; - int ret; LOG((1, "inq_att_name_handler")); assert(ios); @@ -466,8 +512,7 @@ int inq_attname_handler(iosystem_desc_t *ios) namep = name; /* Call the function to learn about the attribute. */ - if ((ret = PIOc_inq_attname(ncid, varid, attnum, namep))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_inq_attname(ncid, varid, attnum, namep); return PIO_NOERR; } @@ -480,6 +525,7 @@ int inq_attname_handler(iosystem_desc_t *ios) * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int inq_attid_handler(iosystem_desc_t *ios) { @@ -490,7 +536,6 @@ int inq_attid_handler(iosystem_desc_t *ios) int id, *idp = NULL; char id_present; int mpierr; - int ret; LOG((1, "inq_attid_handler")); assert(ios); @@ -515,8 +560,7 @@ int inq_attid_handler(iosystem_desc_t *ios) idp = &id; /* Call the function to learn about the attribute. */ - if ((ret = PIOc_inq_attid(ncid, varid, name, idp))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_inq_attid(ncid, varid, name, idp); return PIO_NOERR; } @@ -528,13 +572,12 @@ int inq_attid_handler(iosystem_desc_t *ios) * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int att_put_handler(iosystem_desc_t *ios) { int ncid; int varid; - int mpierr; - int ret; char name[PIO_MAX_NAME + 1]; int namelen; PIO_Offset attlen; /* Number of elements in att array. */ @@ -543,6 +586,7 @@ int att_put_handler(iosystem_desc_t *ios) nc_type memtype; /* Type of att data in memory. */ PIO_Offset memtype_len; /* Length of element of memtype. */ void *op; + int mpierr; LOG((1, "att_put_handler")); assert(ios); @@ -581,15 +625,11 @@ int att_put_handler(iosystem_desc_t *ios) ncid, varid, namelen, name, atttype, attlen, atttype_len, memtype, memtype_len)); /* Call the function to write the attribute. */ - ret = PIOc_put_att_tc(ncid, varid, name, atttype, attlen, memtype, op); + PIOc_put_att_tc(ncid, varid, name, atttype, attlen, memtype, op); /* Free resources. */ free(op); - /* Did it work? */ - if (ret) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); - LOG((2, "att_put_handler complete!")); return PIO_NOERR; } @@ -601,6 +641,7 @@ int att_put_handler(iosystem_desc_t *ios) * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int att_get_handler(iosystem_desc_t *ios) { @@ -616,7 +657,6 @@ int att_get_handler(iosystem_desc_t *ios) PIO_Offset memtype_len; /* Length in bytes of an element of memype. */ int *ip; int iotype; - int ret; LOG((1, "att_get_handler")); assert(ios); @@ -652,15 +692,11 @@ int att_get_handler(iosystem_desc_t *ios) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); /* Call the function to read the attribute. */ - ret = PIOc_get_att_tc(ncid, varid, name, memtype, ip); + PIOc_get_att_tc(ncid, varid, name, memtype, ip); /* Free resources. */ free(ip); - /* Did it work? */ - if (ret) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); - return PIO_NOERR; } @@ -670,6 +706,7 @@ int att_get_handler(iosystem_desc_t *ios) * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int put_vars_handler(iosystem_desc_t *ios) { @@ -705,13 +742,13 @@ int put_vars_handler(iosystem_desc_t *ios) if ((mpierr = MPI_Bcast(&start_present, 1, MPI_CHAR, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - if (start_present) + if (start_present) if ((mpierr = MPI_Bcast(start, ndims, MPI_OFFSET, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); LOG((1, "put_vars_handler getting start[0] = %d ndims = %d", start[0], ndims)); if ((mpierr = MPI_Bcast(&count_present, 1, MPI_CHAR, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - if (count_present) + if (count_present) if ((mpierr = MPI_Bcast(count, ndims, MPI_OFFSET, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); if ((mpierr = MPI_Bcast(&stride_present, 1, MPI_CHAR, 0, ios->intercomm))) @@ -808,6 +845,7 @@ int put_vars_handler(iosystem_desc_t *ios) * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int get_vars_handler(iosystem_desc_t *ios) { @@ -815,7 +853,8 @@ int get_vars_handler(iosystem_desc_t *ios) int varid; int mpierr; PIO_Offset typelen; /** Length (in bytes) of this type. */ - nc_type xtype; /** Type of the data being written. */ + nc_type xtype; /** + * Type of the data being written. */ PIO_Offset *start; PIO_Offset *count; PIO_Offset *stride; @@ -843,7 +882,7 @@ int get_vars_handler(iosystem_desc_t *ios) if (start_present) { if (!(start = malloc(ndims * sizeof(PIO_Offset)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); if ((mpierr = MPI_Bcast(start, ndims, MPI_OFFSET, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); } @@ -852,7 +891,7 @@ int get_vars_handler(iosystem_desc_t *ios) if (count_present) { if (!(count = malloc(ndims * sizeof(PIO_Offset)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); if ((mpierr = MPI_Bcast(count, ndims, MPI_OFFSET, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); } @@ -861,7 +900,7 @@ int get_vars_handler(iosystem_desc_t *ios) if (stride_present) { if (!(stride = malloc(ndims * sizeof(PIO_Offset)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); if ((mpierr = MPI_Bcast(stride, ndims, MPI_OFFSET, 0, ios->intercomm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); } @@ -885,7 +924,7 @@ int get_vars_handler(iosystem_desc_t *ios) if (count_present) countp = count; - + if (stride_present) stridep = stride; @@ -947,7 +986,7 @@ int get_vars_handler(iosystem_desc_t *ios) free(count); if (stride_present) free(stride); - + LOG((1, "get_vars_handler succeeded!")); return PIO_NOERR; } @@ -963,13 +1002,12 @@ int inq_var_handler(iosystem_desc_t *ios) { int ncid; int varid; - int mpierr; char name_present, xtype_present, ndims_present, dimids_present, natts_present; char name[NC_MAX_NAME + 1], *namep = NULL; nc_type xtype, *xtypep = NULL; int *ndimsp = NULL, *dimidsp = NULL, *nattsp = NULL; int ndims, dimids[NC_MAX_DIMS], natts; - int ret; + int mpierr; LOG((1, "inq_var_handler")); assert(ios); @@ -1007,8 +1045,7 @@ int inq_var_handler(iosystem_desc_t *ios) nattsp = &natts; /* Call the inq function to get the values. */ - if ((ret = PIOc_inq_var(ncid, varid, namep, xtypep, ndimsp, dimidsp, nattsp))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_inq_var(ncid, varid, namep, xtypep, ndimsp, dimidsp, nattsp); if (ndims_present) LOG((2, "inq_var_handler ndims = %d", ndims)); @@ -1031,7 +1068,6 @@ int inq_var_chunking_handler(iosystem_desc_t *ios) int storage, *storagep = NULL; PIO_Offset chunksizes[NC_MAX_DIMS], *chunksizesp = NULL; int mpierr; - int ret; assert(ios); LOG((1, "inq_var_chunking_handler")); @@ -1056,8 +1092,7 @@ int inq_var_chunking_handler(iosystem_desc_t *ios) chunksizesp = chunksizes; /* Call the inq function to get the values. */ - if ((ret = PIOc_inq_var_chunking(ncid, varid, storagep, chunksizesp))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_inq_var_chunking(ncid, varid, storagep, chunksizesp); return PIO_NOERR; } @@ -1132,7 +1167,6 @@ int inq_var_endian_handler(iosystem_desc_t *ios) char endian_present; int endian, *endianp = NULL; int mpierr; - int ret; assert(ios); LOG((1, "inq_var_endian_handler")); @@ -1153,8 +1187,7 @@ int inq_var_endian_handler(iosystem_desc_t *ios) endianp = &endian; /* Call the inq function to get the values. */ - if ((ret = PIOc_inq_var_endian(ncid, varid, endianp))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_inq_var_endian(ncid, varid, endianp); return PIO_NOERR; } @@ -1177,7 +1210,6 @@ int inq_var_deflate_handler(iosystem_desc_t *ios) int deflate, *deflatep; int deflate_level, *deflate_levelp; int mpierr; - int ret; assert(ios); LOG((1, "inq_var_deflate_handler")); @@ -1216,8 +1248,7 @@ int inq_var_deflate_handler(iosystem_desc_t *ios) deflate_levelp = &deflate_level; /* Call the inq function to get the values. */ - if ((ret = PIOc_inq_var_deflate(ncid, varid, shufflep, deflatep, deflate_levelp))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_inq_var_deflate(ncid, varid, shufflep, deflatep, deflate_levelp); return PIO_NOERR; } @@ -1229,15 +1260,15 @@ int inq_var_deflate_handler(iosystem_desc_t *ios) * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int inq_varid_handler(iosystem_desc_t *ios) { int ncid; int varid; - int mpierr; - int ret; int namelen; char name[PIO_MAX_NAME + 1]; + int mpierr; assert(ios); @@ -1251,24 +1282,24 @@ int inq_varid_handler(iosystem_desc_t *ios) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); /* Call the inq_dimid function. */ - if ((ret = PIOc_inq_varid(ncid, name, &varid))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_inq_varid(ncid, name, &varid); return PIO_NOERR; } -/** This function is run on the IO tasks to sync a netCDF file. +/** + * This function is run on the IO tasks to sync a netCDF file. * * @param ios pointer to the iosystem_desc_t. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int sync_file_handler(iosystem_desc_t *ios) { int ncid; int mpierr; - int ret; LOG((1, "sync_file_handler")); assert(ios); @@ -1280,19 +1311,93 @@ int sync_file_handler(iosystem_desc_t *ios) LOG((1, "sync_file_handler got parameter ncid = %d", ncid)); /* Call the sync file function. */ - if ((ret = PIOc_sync(ncid))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_sync(ncid); LOG((2, "sync_file_handler succeeded!")); return PIO_NOERR; } -/** This function is run on the IO tasks to enddef a netCDF file. +/** + * This function is run on the IO tasks to set the record dimension + * value for a netCDF variable. + * + * @param ios pointer to the iosystem_desc_t. + * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code + * from netCDF base function. + * @internal + * @author Ed Hartnett + */ +int setframe_handler(iosystem_desc_t *ios) +{ + int ncid; + int varid; + int frame; + int mpierr; + + LOG((1, "setframe_handler")); + assert(ios); + + /* Get the parameters for this function that the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(&varid, 1, MPI_INT, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(&frame, 1, MPI_INT, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + LOG((1, "setframe_handler got parameter ncid = %d varid = %d frame = %d", + ncid, varid, frame)); + + /* Call the function. */ + PIOc_setframe(ncid, varid, frame); + + LOG((2, "setframe_handler succeeded!")); + return PIO_NOERR; +} + +/** + * This function is run on the IO tasks to increment the record + * dimension value for a netCDF variable. * * @param ios pointer to the iosystem_desc_t. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett + */ +int advanceframe_handler(iosystem_desc_t *ios) +{ + int ncid; + int varid; + int mpierr; + + LOG((1, "advanceframe_handler")); + assert(ios); + + /* Get the parameters for this function that the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(&varid, 1, MPI_INT, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + LOG((1, "advanceframe_handler got parameter ncid = %d varid = %d", + ncid, varid)); + + /* Call the function. */ + PIOc_advanceframe(ncid, varid); + + LOG((2, "advanceframe_handler succeeded!")); + return PIO_NOERR; +} + +/** + * This function is run on the IO tasks to enddef a netCDF file. + * + * @param ios pointer to the iosystem_desc_t. + * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code + * from netCDF base function. + * @internal + * @author Ed Hartnett */ int change_def_file_handler(iosystem_desc_t *ios, int msg) { @@ -1317,25 +1422,26 @@ int change_def_file_handler(iosystem_desc_t *ios, int msg) return PIO_NOERR; } -/** This function is run on the IO tasks to define a netCDF +/** + * This function is run on the IO tasks to define a netCDF * variable. * * @param ios pointer to the iosystem_desc_t. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int def_var_handler(iosystem_desc_t *ios) { int ncid; int namelen; char name[PIO_MAX_NAME + 1]; - int mpierr; - int ret; int varid; nc_type xtype; int ndims; int *dimids; + int mpierr; LOG((1, "def_var_handler comproot = %d", ios->comproot)); assert(ios); @@ -1363,11 +1469,7 @@ int def_var_handler(iosystem_desc_t *ios) "name = %s ncid = %d", namelen, name, ncid)); /* Call the function. */ - if ((ret = PIOc_def_var(ncid, name, xtype, ndims, dimids, &varid))) - { - free(dimids); - return pio_err(ios, NULL, ret, __FILE__, __LINE__); - } + PIOc_def_var(ncid, name, xtype, ndims, dimids, &varid); /* Free resources. */ free(dimids); @@ -1392,7 +1494,6 @@ int def_var_chunking_handler(iosystem_desc_t *ios) char chunksizes_present; PIO_Offset chunksizes[NC_MAX_DIMS], *chunksizesp = NULL; int mpierr; - int ret; assert(ios); LOG((1, "def_var_chunking_handler comproot = %d", ios->comproot)); @@ -1420,8 +1521,7 @@ int def_var_chunking_handler(iosystem_desc_t *ios) chunksizesp = chunksizes; /* Call the function. */ - if ((ret = PIOc_def_var_chunking(ncid, varid, storage, chunksizesp))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_def_var_chunking(ncid, varid, storage, chunksizesp); LOG((1, "def_var_chunking_handler succeeded!")); return PIO_NOERR; @@ -1496,7 +1596,6 @@ int def_var_endian_handler(iosystem_desc_t *ios) int varid; int endian; int mpierr; - int ret; assert(ios); LOG((1, "def_var_endian_handler comproot = %d", ios->comproot)); @@ -1513,8 +1612,7 @@ int def_var_endian_handler(iosystem_desc_t *ios) ncid, varid, endian)); /* Call the function. */ - if ((ret = PIOc_def_var_endian(ncid, varid, endian))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_def_var_endian(ncid, varid, endian); LOG((1, "def_var_chunking_handler succeeded!")); return PIO_NOERR; @@ -1535,7 +1633,6 @@ int def_var_deflate_handler(iosystem_desc_t *ios) int deflate; int deflate_level; int mpierr; - int ret; assert(ios); LOG((1, "def_var_deflate_handler comproot = %d", ios->comproot)); @@ -1556,8 +1653,7 @@ int def_var_deflate_handler(iosystem_desc_t *ios) "deflate = %d deflate_level = %d", ncid, varid, shuffle, deflate, deflate_level)); /* Call the function. */ - if ((ret = PIOc_def_var_deflate(ncid, varid, shuffle, deflate, deflate_level))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_def_var_deflate(ncid, varid, shuffle, deflate, deflate_level); LOG((1, "def_var_deflate_handler succeeded!")); return PIO_NOERR; @@ -1578,7 +1674,6 @@ int set_var_chunk_cache_handler(iosystem_desc_t *ios) PIO_Offset nelems; float preemption; int mpierr = MPI_SUCCESS; /* Return code from MPI function codes. */ - int ret; /* Return code. */ assert(ios); LOG((1, "set_var_chunk_cache_handler comproot = %d", ios->comproot)); @@ -1599,29 +1694,29 @@ int set_var_chunk_cache_handler(iosystem_desc_t *ios) "nelems = %d preemption = %g", ncid, varid, size, nelems, preemption)); /* Call the function. */ - if ((ret = PIOc_set_var_chunk_cache(ncid, varid, size, nelems, preemption))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_set_var_chunk_cache(ncid, varid, size, nelems, preemption); LOG((1, "def_var_chunk_cache_handler succeeded!")); return PIO_NOERR; } -/** This function is run on the IO tasks to define a netCDF +/** + * This function is run on the IO tasks to define a netCDF * dimension. * * @param ios pointer to the iosystem_desc_t. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int def_dim_handler(iosystem_desc_t *ios) { int ncid; int len, namelen; char name[PIO_MAX_NAME + 1]; - int mpierr; - int ret; int dimid; + int mpierr; LOG((1, "def_dim_handler comproot = %d", ios->comproot)); assert(ios); @@ -1640,29 +1735,29 @@ int def_dim_handler(iosystem_desc_t *ios) "name = %s len = %d ncid = %d", namelen, name, len, ncid)); /* Call the function. */ - if ((ret = PIOc_def_dim(ncid, name, len, &dimid))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_def_dim(ncid, name, len, &dimid); LOG((1, "def_dim_handler succeeded!")); return PIO_NOERR; } -/** This function is run on the IO tasks to rename a netCDF +/** + * This function is run on the IO tasks to rename a netCDF * dimension. * * @param ios pointer to the iosystem_desc_t. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int rename_dim_handler(iosystem_desc_t *ios) { int ncid; int namelen; char name[PIO_MAX_NAME + 1]; - int mpierr; - int ret; int dimid; + int mpierr; LOG((1, "rename_dim_handler")); assert(ios); @@ -1681,29 +1776,29 @@ int rename_dim_handler(iosystem_desc_t *ios) "name = %s ncid = %d dimid = %d", namelen, name, ncid, dimid)); /* Call the function. */ - if ((ret = PIOc_rename_dim(ncid, dimid, name))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_rename_dim(ncid, dimid, name); LOG((1, "rename_dim_handler succeeded!")); return PIO_NOERR; } -/** This function is run on the IO tasks to rename a netCDF +/** + * This function is run on the IO tasks to rename a netCDF * dimension. * * @param ios pointer to the iosystem_desc_t. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int rename_var_handler(iosystem_desc_t *ios) { int ncid; int namelen; char name[PIO_MAX_NAME + 1]; - int mpierr; - int ret; int varid; + int mpierr; LOG((1, "rename_var_handler")); assert(ios); @@ -1722,20 +1817,21 @@ int rename_var_handler(iosystem_desc_t *ios) "name = %s ncid = %d varid = %d", namelen, name, ncid, varid)); /* Call the function. */ - if ((ret = PIOc_rename_var(ncid, varid, name))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_rename_var(ncid, varid, name); LOG((1, "rename_var_handler succeeded!")); return PIO_NOERR; } -/** This function is run on the IO tasks to rename a netCDF +/** + * This function is run on the IO tasks to rename a netCDF * attribute. * * @param ios pointer to the iosystem_desc_t. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int rename_att_handler(iosystem_desc_t *ios) { @@ -1744,7 +1840,6 @@ int rename_att_handler(iosystem_desc_t *ios) int namelen, newnamelen; char name[PIO_MAX_NAME + 1], newname[PIO_MAX_NAME + 1]; int mpierr; - int ret; LOG((1, "rename_att_handler")); assert(ios); @@ -1767,20 +1862,21 @@ int rename_att_handler(iosystem_desc_t *ios) "newnamelen = %d newname = %s", namelen, name, ncid, varid, newnamelen, newname)); /* Call the function. */ - if ((ret = PIOc_rename_att(ncid, varid, name, newname))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_rename_att(ncid, varid, name, newname); LOG((1, "rename_att_handler succeeded!")); return PIO_NOERR; } -/** This function is run on the IO tasks to delete a netCDF +/** + * This function is run on the IO tasks to delete a netCDF * attribute. * * @param ios pointer to the iosystem_desc_t. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int delete_att_handler(iosystem_desc_t *ios) { @@ -1789,7 +1885,6 @@ int delete_att_handler(iosystem_desc_t *ios) int namelen; char name[PIO_MAX_NAME + 1]; int mpierr; - int ret; LOG((1, "delete_att_handler")); assert(ios); @@ -1808,20 +1903,21 @@ int delete_att_handler(iosystem_desc_t *ios) namelen, name, ncid, varid)); /* Call the function. */ - if ((ret = PIOc_del_att(ncid, varid, name))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_del_att(ncid, varid, name); LOG((1, "delete_att_handler succeeded!")); return PIO_NOERR; } -/** This function is run on the IO tasks to open a netCDF file. +/** + * This function is run on the IO tasks to open a netCDF file. * * * @param ios pointer to the iosystem_desc_t. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int open_file_handler(iosystem_desc_t *ios) { @@ -1855,24 +1951,25 @@ int open_file_handler(iosystem_desc_t *ios) /* Call the open file function. Errors are handling within * function, so return code can be ignored. */ - PIOc_openfile(ios->iosysid, &ncid, &iotype, filename, mode); + PIOc_openfile_retry(ios->iosysid, &ncid, &iotype, filename, mode, 0); return PIO_NOERR; } -/** This function is run on the IO tasks to delete a netCDF file. +/** + * This function is run on the IO tasks to delete a netCDF file. * * @param ios pointer to the iosystem_desc_t data. * * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int delete_file_handler(iosystem_desc_t *ios) { int len; int mpierr; - int ret; LOG((1, "delete_file_handler comproot = %d", ios->comproot)); assert(ios); @@ -1891,14 +1988,13 @@ int delete_file_handler(iosystem_desc_t *ios) len, filename)); /* Call the delete file function. */ - if ((ret = PIOc_deletefile(ios->iosysid, filename))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_deletefile(ios->iosysid, filename); LOG((1, "delete_file_handler succeeded!")); return PIO_NOERR; } -/** +/** * This function is run on the IO tasks to initialize a decomposition. * * @param ios pointer to the iosystem_desc_t data. @@ -1984,27 +2080,124 @@ int initdecomp_dof_handler(iosystem_desc_t *ios) /* Call the function. */ ret = PIOc_InitDecomp(iosysid, pio_type, ndims, dims, maplen, compmap, &ioid, rearrangerp, iostartp, iocountp); - + LOG((1, "PIOc_InitDecomp returned %d", ret)); return PIO_NOERR; } -/** This function is run on the IO tasks to... - * NOTE: not yet implemented +/** + * This function is run on the IO tasks to do darray writes. * * @param ios pointer to the iosystem_desc_t data. * * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ -int writedarray_handler(iosystem_desc_t *ios) +int write_darray_multi_handler(iosystem_desc_t *ios) { + int ncid; + file_desc_t *file; /* Pointer to file information. */ + int nvars; + int ioid; + io_desc_t *iodesc; /* The IO description. */ + char frame_present; + int *framep = NULL; + int *frame; + PIO_Offset arraylen; + void *array; + char fillvalue_present; + void *fillvaluep = NULL; + void *fillvalue; + int flushtodisk; + int mpierr; + int ret; + + LOG((1, "write_darray_multi_handler")); assert(ios); + + /* Get the parameters for this function that the the comp master + * task is broadcasting. */ + if ((mpierr = MPI_Bcast(&ncid, 1, MPI_INT, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(&nvars, 1, MPI_INT, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + int varids[nvars]; + if ((mpierr = MPI_Bcast(varids, nvars, MPI_INT, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(&ioid, 1, MPI_INT, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + + /* Get decomposition information. */ + if (!(iodesc = pio_get_iodesc_from_id(ioid))) + return pio_err(ios, file, PIO_EBADID, __FILE__, __LINE__); + + if ((mpierr = MPI_Bcast(&arraylen, 1, MPI_OFFSET, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if (!(array = malloc(arraylen * iodesc->piotype_size))) + return pio_err(NULL, NULL, PIO_ENOMEM, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(array, arraylen * iodesc->piotype_size, MPI_CHAR, 0, + ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(&frame_present, 1, MPI_CHAR, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if (frame_present) + { + if (!(frame = malloc(nvars * sizeof(int)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(frame, nvars, MPI_INT, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + } + if ((mpierr = MPI_Bcast(&fillvalue_present, 1, MPI_CHAR, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if (fillvalue_present) + { + if (!(fillvalue = malloc(nvars * iodesc->piotype_size))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(fillvalue, nvars * iodesc->piotype_size, MPI_CHAR, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + } + if ((mpierr = MPI_Bcast(&flushtodisk, 1, MPI_INT, 0, ios->intercomm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + LOG((1, "write_darray_multi_handler ncid = %d nvars = %d ioid = %d arraylen = %d " + "frame_present = %d fillvalue_present flushtodisk = %d", ncid, nvars, + ioid, arraylen, frame_present, fillvalue_present, flushtodisk)); + + /* Get file info based on ncid. */ + if ((ret = pio_get_file(ncid, &file))) + return pio_err(NULL, NULL, ret, __FILE__, __LINE__); + + /* Get decomposition information. */ + if (!(iodesc = pio_get_iodesc_from_id(ioid))) + return pio_err(ios, file, PIO_EBADID, __FILE__, __LINE__); + + /* Was a frame array provided? */ + if (frame_present) + framep = frame; + + /* Was a fillvalue array provided? */ + if (fillvalue_present) + fillvaluep = fillvalue; + + /* Call the function from IO tasks. Errors are handled within + * function. */ + PIOc_write_darray_multi(ncid, varids, ioid, nvars, arraylen, array, framep, + fillvaluep, flushtodisk); + + /* Free resources. */ + if (frame_present) + free(frame); + if (fillvalue_present) + free(fillvalue); + free(array); + + LOG((1, "write_darray_multi_handler succeeded!")); return PIO_NOERR; } -/** This function is run on the IO tasks to... +/** + * This function is run on the IO tasks to... * NOTE: not yet implemented * * @param ios pointer to the iosystem_desc_t data. @@ -2012,6 +2205,7 @@ int writedarray_handler(iosystem_desc_t *ios) * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int readdarray_handler(iosystem_desc_t *ios) { @@ -2019,14 +2213,15 @@ int readdarray_handler(iosystem_desc_t *ios) return PIO_NOERR; } -/** This function is run on the IO tasks to set the error handler. - * NOTE: not yet implemented +/** + * This function is run on the IO tasks to set the error handler. * * @param ios pointer to the iosystem_desc_t data. * * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int seterrorhandling_handler(iosystem_desc_t *ios) { @@ -2035,7 +2230,6 @@ int seterrorhandling_handler(iosystem_desc_t *ios) int old_method; int *old_methodp = NULL; int mpierr; - int ret; LOG((1, "seterrorhandling_handler comproot = %d", ios->comproot)); assert(ios); @@ -2054,8 +2248,7 @@ int seterrorhandling_handler(iosystem_desc_t *ios) old_methodp = &old_method; /* Call the function. */ - if ((ret = PIOc_set_iosystem_error_handling(ios->iosysid, method, old_methodp))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_set_iosystem_error_handling(ios->iosysid, method, old_methodp); LOG((1, "seterrorhandling_handler succeeded!")); return PIO_NOERR; @@ -2068,6 +2261,7 @@ int seterrorhandling_handler(iosystem_desc_t *ios) * @param ios pointer to the iosystem_desc_t data. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. + * @author Ed Hartnett */ int set_chunk_cache_handler(iosystem_desc_t *ios) { @@ -2077,7 +2271,6 @@ int set_chunk_cache_handler(iosystem_desc_t *ios) PIO_Offset nelems; float preemption; int mpierr = MPI_SUCCESS; /* Return code from MPI function codes. */ - int ret; /* Return code. */ LOG((1, "set_chunk_cache_handler called")); assert(ios); @@ -2098,8 +2291,7 @@ int set_chunk_cache_handler(iosystem_desc_t *ios) "nelems = %d preemption = %g", iosysid, iotype, size, nelems, preemption)); /* Call the function. */ - if ((ret = PIOc_set_chunk_cache(iosysid, iotype, size, nelems, preemption))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_set_chunk_cache(iosysid, iotype, size, nelems, preemption); LOG((1, "set_chunk_cache_handler succeeded!")); return PIO_NOERR; @@ -2112,6 +2304,7 @@ int set_chunk_cache_handler(iosystem_desc_t *ios) * @param ios pointer to the iosystem_desc_t data. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. + * @author Ed Hartnett */ int get_chunk_cache_handler(iosystem_desc_t *ios) { @@ -2122,7 +2315,6 @@ int get_chunk_cache_handler(iosystem_desc_t *ios) PIO_Offset nelems, *nelemsp; float preemption, *preemptionp; int mpierr = MPI_SUCCESS; /* Return code from MPI function codes. */ - int ret; /* Return code. */ LOG((1, "get_chunk_cache_handler called")); assert(ios); @@ -2152,8 +2344,7 @@ int get_chunk_cache_handler(iosystem_desc_t *ios) preemptionp = &preemption; /* Call the function. */ - if ((ret = PIOc_get_chunk_cache(iosysid, iotype, sizep, nelemsp, preemptionp))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_get_chunk_cache(iosysid, iotype, sizep, nelemsp, preemptionp); LOG((1, "get_chunk_cache_handler succeeded!")); return PIO_NOERR; @@ -2166,6 +2357,7 @@ int get_chunk_cache_handler(iosystem_desc_t *ios) * @param ios pointer to the iosystem_desc_t data. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. + * @author Ed Hartnett */ int get_var_chunk_cache_handler(iosystem_desc_t *ios) { @@ -2176,7 +2368,6 @@ int get_var_chunk_cache_handler(iosystem_desc_t *ios) PIO_Offset nelems, *nelemsp; float preemption, *preemptionp; int mpierr = MPI_SUCCESS; /* Return code from MPI function codes. */ - int ret; /* Return code. */ LOG((1, "get_var_chunk_cache_handler called")); assert(ios); @@ -2206,18 +2397,19 @@ int get_var_chunk_cache_handler(iosystem_desc_t *ios) preemptionp = &preemption; /* Call the function. */ - if ((ret = PIOc_get_var_chunk_cache(ncid, varid, sizep, nelemsp, preemptionp))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_get_var_chunk_cache(ncid, varid, sizep, nelemsp, preemptionp); LOG((1, "get_var_chunk_cache_handler succeeded!")); return PIO_NOERR; } -/** This function is run on the IO tasks to free the decomp hanlder. +/** + * This function is run on the IO tasks to free the decomp hanlder. * * @param ios pointer to the iosystem_desc_t data. * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. + * @author Ed Hartnett */ int freedecomp_handler(iosystem_desc_t *ios) { @@ -2239,24 +2431,25 @@ int freedecomp_handler(iosystem_desc_t *ios) /* Call the function. */ ret = PIOc_freedecomp(iosysid, ioid); - + LOG((1, "PIOc_freedecomp returned %d", ret)); return PIO_NOERR; } -/** Handle the finalize call. +/** + * Handle the finalize call. * * @param ios pointer to the iosystem info * @param index * @returns 0 for success, PIO_EIO for MPI Bcast errors, or error code * from netCDF base function. * @internal + * @author Ed Hartnett */ int finalize_handler(iosystem_desc_t *ios, int index) { int iosysid; int mpierr; - int ret; LOG((1, "finalize_handler called index = %d", index)); assert(ios); @@ -2268,10 +2461,7 @@ int finalize_handler(iosystem_desc_t *ios, int index) LOG((1, "finalize_handler got parameter iosysid = %d", iosysid)); /* Call the function. */ - LOG((2, "finalize_handler calling PIOc_finalize for iosysid = %d", - iosysid)); - if ((ret = PIOc_finalize(iosysid))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + PIOc_finalize(iosysid); LOG((1, "finalize_handler succeeded!")); return PIO_NOERR; @@ -2286,6 +2476,7 @@ int finalize_handler(iosystem_desc_t *ios, int index) * @param iosys pointer to pointer to iosystem info * @param io_comm MPI communicator for IO * @returns 0 for success, error code otherwise. + * @author Ed Hartnett */ int pio_msg_handler2(int io_rank, int component_count, iosystem_desc_t **iosys, MPI_Comm io_comm) @@ -2295,9 +2486,10 @@ int pio_msg_handler2(int io_rank, int component_count, iosystem_desc_t **iosys, MPI_Request req[component_count]; MPI_Status status; int index; + int open_components = component_count; + int finalize = 0; int mpierr; int ret = PIO_NOERR; - int open_components = component_count; LOG((1, "pio_msg_handler2 called")); assert(iosys); @@ -2317,8 +2509,8 @@ int pio_msg_handler2(int io_rank, int component_count, iosystem_desc_t **iosys, } } - /* If the message is not -1, keep processing messages. */ - while (msg != -1) + /* Keep processing messages until loop is broken. */ + while (1) { LOG((3, "pio_msg_handler2 at top of loop")); @@ -2330,10 +2522,12 @@ int pio_msg_handler2(int io_rank, int component_count, iosystem_desc_t **iosys, LOG((1, "about to call MPI_Waitany req[0] = %d MPI_REQUEST_NULL = %d", req[0], MPI_REQUEST_NULL)); for (int c = 0; c < component_count; c++) - LOG((2, "req[%d] = %d", c, req[c])); + LOG((3, "req[%d] = %d", c, req[c])); if ((mpierr = MPI_Waitany(component_count, req, &index, &status))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); LOG((3, "Waitany returned index = %d req[%d] = %d", index, index, req[index])); + for (int c = 0; c < component_count; c++) + LOG((3, "req[%d] = %d", c, req[c])); } /* Broadcast the index of the computational component that @@ -2347,8 +2541,8 @@ int pio_msg_handler2(int io_rank, int component_count, iosystem_desc_t **iosys, my_iosys = iosys[index]; /* Broadcast the msg value to the rest of the IO tasks. */ - LOG((3, "about to call msg MPI_Bcast my_iosys->io_comm = %d", my_iosys->io_comm)); - if ((mpierr = MPI_Bcast(&msg, 1, MPI_INT, 0, my_iosys->io_comm))) + LOG((3, "about to call msg MPI_Bcast io_comm = %d", io_comm)); + if ((mpierr = MPI_Bcast(&msg, 1, MPI_INT, 0, io_comm))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); LOG((1, "pio_msg_handler2 msg MPI_Bcast complete msg = %d", msg)); @@ -2356,92 +2550,92 @@ int pio_msg_handler2(int io_rank, int component_count, iosystem_desc_t **iosys, switch (msg) { case PIO_MSG_INQ_TYPE: - inq_type_handler(my_iosys); + ret = inq_type_handler(my_iosys); break; case PIO_MSG_INQ_FORMAT: - inq_format_handler(my_iosys); + ret = inq_format_handler(my_iosys); break; case PIO_MSG_CREATE_FILE: - create_file_handler(my_iosys); - LOG((2, "returned from create_file_handler")); + ret = create_file_handler(my_iosys); break; case PIO_MSG_SYNC: - sync_file_handler(my_iosys); + ret = sync_file_handler(my_iosys); break; case PIO_MSG_ENDDEF: case PIO_MSG_REDEF: - LOG((2, "calling change_def_file_handler")); - change_def_file_handler(my_iosys, msg); - LOG((2, "returned from change_def_file_handler")); + ret = change_def_file_handler(my_iosys, msg); break; case PIO_MSG_OPEN_FILE: - open_file_handler(my_iosys); + ret = open_file_handler(my_iosys); break; case PIO_MSG_CLOSE_FILE: - close_file_handler(my_iosys); + ret = close_file_handler(my_iosys); break; case PIO_MSG_DELETE_FILE: - delete_file_handler(my_iosys); + ret = delete_file_handler(my_iosys); break; case PIO_MSG_RENAME_DIM: - rename_dim_handler(my_iosys); + ret = rename_dim_handler(my_iosys); break; case PIO_MSG_RENAME_VAR: - rename_var_handler(my_iosys); + ret = rename_var_handler(my_iosys); break; case PIO_MSG_RENAME_ATT: - rename_att_handler(my_iosys); + ret = rename_att_handler(my_iosys); break; case PIO_MSG_DEL_ATT: - delete_att_handler(my_iosys); + ret = delete_att_handler(my_iosys); break; case PIO_MSG_DEF_DIM: - def_dim_handler(my_iosys); + ret = def_dim_handler(my_iosys); break; case PIO_MSG_DEF_VAR: - def_var_handler(my_iosys); + ret = def_var_handler(my_iosys); break; case PIO_MSG_DEF_VAR_CHUNKING: - def_var_chunking_handler(my_iosys); + ret = def_var_chunking_handler(my_iosys); break; case PIO_MSG_DEF_VAR_FILL: - def_var_fill_handler(my_iosys); + ret = def_var_fill_handler(my_iosys); break; case PIO_MSG_DEF_VAR_ENDIAN: - def_var_endian_handler(my_iosys); + ret = def_var_endian_handler(my_iosys); break; case PIO_MSG_DEF_VAR_DEFLATE: - def_var_deflate_handler(my_iosys); + ret = def_var_deflate_handler(my_iosys); break; case PIO_MSG_INQ_VAR_ENDIAN: - inq_var_endian_handler(my_iosys); + ret = inq_var_endian_handler(my_iosys); break; case PIO_MSG_SET_VAR_CHUNK_CACHE: - set_var_chunk_cache_handler(my_iosys); + ret = set_var_chunk_cache_handler(my_iosys); break; case PIO_MSG_GET_VAR_CHUNK_CACHE: - get_var_chunk_cache_handler(my_iosys); + ret = get_var_chunk_cache_handler(my_iosys); break; case PIO_MSG_INQ: - inq_handler(my_iosys); + ret = inq_handler(my_iosys); + break; + case PIO_MSG_INQ_UNLIMDIMS: + ret = inq_unlimdims_handler(my_iosys); break; case PIO_MSG_INQ_DIM: - inq_dim_handler(my_iosys, msg); + ret = inq_dim_handler(my_iosys, msg); break; case PIO_MSG_INQ_DIMID: - inq_dimid_handler(my_iosys); + ret = inq_dimid_handler(my_iosys); break; case PIO_MSG_INQ_VAR: - inq_var_handler(my_iosys); + ret = inq_var_handler(my_iosys); break; case PIO_MSG_INQ_VAR_CHUNKING: - inq_var_chunking_handler(my_iosys); + ret = inq_var_chunking_handler(my_iosys); break; case PIO_MSG_INQ_VAR_FILL: - inq_var_fill_handler(my_iosys); + ret = inq_var_fill_handler(my_iosys); break; case PIO_MSG_INQ_VAR_DEFLATE: - inq_var_deflate_handler(my_iosys); + ret = inq_var_deflate_handler(my_iosys); break; case PIO_MSG_GET_ATT: ret = att_get_handler(my_iosys); @@ -2450,62 +2644,70 @@ int pio_msg_handler2(int io_rank, int component_count, iosystem_desc_t **iosys, ret = att_put_handler(my_iosys); break; case PIO_MSG_INQ_VARID: - inq_varid_handler(my_iosys); + ret = inq_varid_handler(my_iosys); break; case PIO_MSG_INQ_ATT: - inq_att_handler(my_iosys); + ret = inq_att_handler(my_iosys); break; case PIO_MSG_INQ_ATTNAME: - inq_attname_handler(my_iosys); + ret = inq_attname_handler(my_iosys); break; case PIO_MSG_INQ_ATTID: - inq_attid_handler(my_iosys); + ret = inq_attid_handler(my_iosys); break; case PIO_MSG_GET_VARS: - get_vars_handler(my_iosys); + ret = get_vars_handler(my_iosys); break; case PIO_MSG_PUT_VARS: - put_vars_handler(my_iosys); + ret = put_vars_handler(my_iosys); break; case PIO_MSG_INITDECOMP_DOF: - initdecomp_dof_handler(my_iosys); + ret = initdecomp_dof_handler(my_iosys); break; - case PIO_MSG_WRITEDARRAY: - writedarray_handler(my_iosys); + case PIO_MSG_WRITEDARRAYMULTI: + ret = write_darray_multi_handler(my_iosys); + break; + case PIO_MSG_SETFRAME: + ret = setframe_handler(my_iosys); + break; + case PIO_MSG_ADVANCEFRAME: + ret = advanceframe_handler(my_iosys); break; case PIO_MSG_READDARRAY: - readdarray_handler(my_iosys); + ret = readdarray_handler(my_iosys); break; case PIO_MSG_SETERRORHANDLING: - seterrorhandling_handler(my_iosys); + ret = seterrorhandling_handler(my_iosys); break; case PIO_MSG_SET_CHUNK_CACHE: - set_chunk_cache_handler(my_iosys); + ret = set_chunk_cache_handler(my_iosys); break; case PIO_MSG_GET_CHUNK_CACHE: - get_chunk_cache_handler(my_iosys); + ret = get_chunk_cache_handler(my_iosys); break; case PIO_MSG_FREEDECOMP: - freedecomp_handler(my_iosys); + ret = freedecomp_handler(my_iosys); break; case PIO_MSG_SET_FILL: - set_fill_handler(my_iosys); + ret = set_fill_handler(my_iosys); break; case PIO_MSG_EXIT: - finalize_handler(my_iosys, index); - msg = -1; + finalize++; + ret = finalize_handler(my_iosys, index); break; default: LOG((0, "unknown message received %d", msg)); return PIO_EINVAL; } - /* If an error was returned by the handler, do nothing! */ - LOG((3, "pio_msg_handler2 checking error ret = %d", ret)); + /* If an error was returned by the handler, exit. */ + LOG((3, "pio_msg_handler2 ret %d msg %d index %d io_rank %d", ret, msg, index, io_rank)); + if (ret) + return pio_err(my_iosys, NULL, ret, __FILE__, __LINE__); /* Listen for another msg from the component whose message we * just handled. */ - if (!io_rank && msg != -1) + if (!io_rank && !finalize) { my_iosys = iosys[index]; LOG((3, "pio_msg_handler2 about to Irecv index = %d comproot = %d union_comm = %d", @@ -2520,9 +2722,13 @@ int pio_msg_handler2(int io_rank, int component_count, iosystem_desc_t **iosys, msg, open_components)); /* If there are no more open components, exit. */ - if (msg == -1) + if (finalize) + { if (--open_components) - msg = PIO_MSG_EXIT; + finalize = 0; + else + break; + } } LOG((3, "returning from pio_msg_handler2")); diff --git a/src/externals/pio2/src/clib/pio_nc.c b/src/externals/pio2/src/clib/pio_nc.c index 00932bdfd64..a258dc1abb4 100644 --- a/src/externals/pio2/src/clib/pio_nc.c +++ b/src/externals/pio2/src/clib/pio_nc.c @@ -33,6 +33,7 @@ * * @return PIO_NOERR for success, error code otherwise. See * PIOc_Set_File_Error_Handling + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq(int ncid, int *ndimsp, int *nvarsp, int *ngattsp, int *unlimdimidp) { @@ -159,6 +160,7 @@ int PIOc_inq(int ncid, int *ndimsp, int *nvarsp, int *ngattsp, int *unlimdimidp) * @param ncid the ncid of the open file. * @param ndimsp a pointer that will get the number of dimensions. * @returns 0 for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_ndims(int ncid, int *ndimsp) { @@ -173,6 +175,7 @@ int PIOc_inq_ndims(int ncid, int *ndimsp) * @param ncid the ncid of the open file. * @param nvarsp a pointer that will get the number of variables. * @returns 0 for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_nvars(int ncid, int *nvarsp) { @@ -186,6 +189,7 @@ int PIOc_inq_nvars(int ncid, int *nvarsp) * @param ncid the ncid of the open file. * @param nattsp a pointer that will get the number of attributes. * @returns 0 for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_natts(int ncid, int *ngattsp) { @@ -194,12 +198,13 @@ int PIOc_inq_natts(int ncid, int *ngattsp) /** * @ingroup PIO_inq_unlimdim - * Find out the dimension ids of any unlimited dimensions. + * Find out the dimension ids of the unlimited dimension. * * @param ncid the ncid of the open file. - * @param nattsp a pointer that will get an array of unlimited - * dimension IDs. + * @param unlimdimidp a pointer that will the ID of the unlimited + * dimension, or -1 if there is no unlimited dimension. * @returns 0 for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_unlimdim(int ncid, int *unlimdimidp) { @@ -207,6 +212,144 @@ int PIOc_inq_unlimdim(int ncid, int *unlimdimidp) return PIOc_inq(ncid, NULL, NULL, NULL, unlimdimidp); } +/** + * Find out the dimension ids of all unlimited dimensions. Note that + * only netCDF-4 files can have more than 1 unlimited dimension. + * + * @param ncid the ncid of the open file. + * @param nunlimdimsp a pointer that gets the number of unlimited + * dimensions. Ignored if NULL. + * @param unlimdimidsp a pointer that will get an array of unlimited + * dimension IDs. + * @returns 0 for success, error code otherwise. + * @ingroup PIO_inq_unlimdim + * @author Jim Edwards, Ed Hartnett + */ +int PIOc_inq_unlimdims(int ncid, int *nunlimdimsp, int *unlimdimidsp) +{ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int tmp_nunlimdims; /* The number of unlimited dims. */ + int ierr; /* Return code from function calls. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function calls. */ + + LOG((1, "PIOc_inq_unlimdims ncid = %d", ncid)); + + /* Find the info about this file. */ + if ((ierr = pio_get_file(ncid, &file))) + return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); + ios = file->iosystem; + + /* If async is in use, and this is not an IO task, bcast the parameters. */ + if (ios->async) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_INQ_UNLIMDIMS; /* Message for async notification. */ + char nunlimdimsp_present = nunlimdimsp ? true : false; + char unlimdimidsp_present = unlimdimidsp ? true : false; + + if (ios->compmaster == MPI_ROOT) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&ncid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&nunlimdimsp_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&unlimdimidsp_present, 1, MPI_CHAR, ios->compmaster, ios->intercomm); + LOG((2, "PIOc_inq_unlimdims ncid = %d nunlimdimsp_present = %d unlimdimidsp_present = %d", + ncid, nunlimdimsp_present, unlimdimidsp_present)); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + return check_mpi(file, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + LOG((2, "file->iotype = %d", file->iotype)); + /* If this is an IO task, then call the netCDF function. */ + if (ios->ioproc) + { + if (file->iotype == PIO_IOTYPE_NETCDF && file->do_io) + { + LOG((2, "netcdf")); + int tmp_unlimdimid; + ierr = nc_inq_unlimdim(file->fh, &tmp_unlimdimid); + LOG((2, "classic tmp_unlimdimid = %d", tmp_unlimdimid)); + tmp_nunlimdims = tmp_unlimdimid >= 0 ? 1 : 0; + if (nunlimdimsp) + *nunlimdimsp = tmp_unlimdimid >= 0 ? 1 : 0; + if (unlimdimidsp) + *unlimdimidsp = tmp_unlimdimid; + } +#ifdef _PNETCDF + else if (file->iotype == PIO_IOTYPE_PNETCDF) + { + LOG((2, "pnetcdf")); + int tmp_unlimdimid; + ierr = ncmpi_inq_unlimdim(file->fh, &tmp_unlimdimid); + LOG((2, "pnetcdf tmp_unlimdimid = %d", tmp_unlimdimid)); + tmp_nunlimdims = tmp_unlimdimid >= 0 ? 1 : 0; + if (nunlimdimsp) + *nunlimdimsp = tmp_nunlimdims; + if (unlimdimidsp) + *unlimdimidsp = tmp_unlimdimid; + } +#endif /* _PNETCDF */ +#ifdef _NETCDF4 + else if ((file->iotype == PIO_IOTYPE_NETCDF4C || file->iotype == PIO_IOTYPE_NETCDF4P) && + file->do_io) + { + LOG((2, "PIOc_inq calling netcdf-4 nc_inq_unlimdims")); + int *tmp_unlimdimids; + ierr = nc_inq_unlimdims(file->fh, &tmp_nunlimdims, NULL); + if (!ierr) + { + if (nunlimdimsp) + *nunlimdimsp = tmp_nunlimdims; + LOG((3, "tmp_nunlimdims = %d", tmp_nunlimdims)); + if (!(tmp_unlimdimids = malloc(tmp_nunlimdims * sizeof(int)))) + ierr = PIO_ENOMEM; + if (!ierr) + ierr = nc_inq_unlimdims(file->fh, &tmp_nunlimdims, tmp_unlimdimids); + if (unlimdimidsp) + for (int d = 0; d < tmp_nunlimdims; d++) + { + LOG((3, "tmp_unlimdimids[%d] = %d", d, tmp_unlimdimids[d])); + unlimdimidsp[d] = tmp_unlimdimids[d]; + } + free(tmp_unlimdimids); + } + } +#endif /* _NETCDF4 */ + + LOG((2, "PIOc_inq_unlimdims netcdf call returned %d", ierr)); + } + + /* Broadcast and check the return code. */ + if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if (ierr) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Broadcast results to all tasks. Ignore NULL parameters. */ + if ((mpierr = MPI_Bcast(&tmp_nunlimdims, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + if (nunlimdimsp) + if ((mpierr = MPI_Bcast(nunlimdimsp, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + if (unlimdimidsp) + if ((mpierr = MPI_Bcast(unlimdimidsp, tmp_nunlimdims, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + return PIO_NOERR; +} + /** * @ingroup PIO_typelen * Learn the name and size of a type. @@ -216,6 +359,7 @@ int PIOc_inq_unlimdim(int ncid, int *unlimdimidp) * @param name pointer that will get the name of the type. * @param sizep pointer that will get the size of the type in bytes. * @returns 0 for success, error code otherwise. + * @author Ed Hartnett */ int PIOc_inq_type(int ncid, nc_type xtype, char *name, PIO_Offset *sizep) { @@ -305,6 +449,7 @@ int PIOc_inq_type(int ncid, nc_type xtype, char *name, PIO_Offset *sizep) * @param ncid the ncid of an open file. * @param formatp a pointer that will get the format. * @returns 0 for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_format(int ncid, int *formatp) { @@ -384,6 +529,7 @@ int PIOc_inq_format(int ncid, int *formatp) * PIOc_openfile() or PIOc_createfile(). * @param lenp a pointer that will get the number of values * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_dim(int ncid, int dimid, char *name, PIO_Offset *lenp) { @@ -485,6 +631,7 @@ int PIOc_inq_dim(int ncid, int dimid, char *name, PIO_Offset *lenp) * @param name a pointer that gets the name of the dimension. Igorned * if NULL. * @returns 0 for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_dimname(int ncid, int dimid, char *name) { @@ -501,6 +648,7 @@ int PIOc_inq_dimname(int ncid, int dimid, char *name) * @param lenp a pointer that gets the length of the dimension. Igorned * if NULL. * @returns 0 for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_dimlen(int ncid, int dimid, PIO_Offset *lenp) { @@ -520,6 +668,7 @@ int PIOc_inq_dimlen(int ncid, int dimid, PIO_Offset *lenp) * PIOc_openfile() or PIOc_createfile(). * @param idp a pointer that will get the id of the variable or attribute. * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_dimid(int ncid, const char *name, int *idp) { @@ -611,6 +760,7 @@ int PIOc_inq_dimid(int ncid, const char *name, int *idp) * @param xtypep a pointer that will get the type of the attribute. * @param nattsp a pointer that will get the number of attributes * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_var(int ncid, int varid, char *name, nc_type *xtypep, int *ndimsp, int *dimidsp, int *nattsp) @@ -686,6 +836,7 @@ int PIOc_inq_var(int ncid, int varid, char *name, nc_type *xtypep, int *ndimsp, if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) { ierr = nc_inq_varndims(file->fh, varid, &ndims); + LOG((3, "nc_inq_varndims called ndims = %d", ndims)); if (!ierr) { char my_name[NC_MAX_NAME + 1]; @@ -738,11 +889,10 @@ int PIOc_inq_var(int ncid, int varid, char *name, nc_type *xtypep, int *ndimsp, if (ndimsp) { - if (ios->ioroot) - LOG((2, "PIOc_inq_var about to Bcast ndims = %d ios->ioroot = %d", *ndimsp, ios->ioroot)); + LOG((2, "PIOc_inq_var about to Bcast ndims = %d ios->ioroot = %d ios->my_comm = %d", + *ndimsp, ios->ioroot, ios->my_comm)); if ((mpierr = MPI_Bcast(ndimsp, 1, MPI_INT, ios->ioroot, ios->my_comm))) return check_mpi(file, mpierr, __FILE__, __LINE__); - file->varlist[varid].ndims = *ndimsp; LOG((2, "PIOc_inq_var Bcast ndims = %d", *ndimsp)); } if (dimidsp) @@ -767,6 +917,7 @@ int PIOc_inq_var(int ncid, int varid, char *name, nc_type *xtypep, int *ndimsp, * @param varid the variable ID. * @param name a pointer that will get the variable name. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_varname(int ncid, int varid, char *name) { @@ -782,6 +933,7 @@ int PIOc_inq_varname(int ncid, int varid, char *name) * @param xtypep a pointer that will get the type of the * attribute. Ignored if NULL. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_vartype(int ncid, int varid, nc_type *xtypep) { @@ -797,6 +949,7 @@ int PIOc_inq_vartype(int ncid, int varid, nc_type *xtypep) * @param ndimsp a pointer that will get the number of * dimensions. Ignored if NULL. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_varndims(int ncid, int varid, int *ndimsp) { @@ -812,6 +965,7 @@ int PIOc_inq_varndims(int ncid, int varid, int *ndimsp) * @param dimidsp a pointer that will get an array of dimids. Ignored * if NULL. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_vardimid(int ncid, int varid, int *dimidsp) { @@ -827,6 +981,7 @@ int PIOc_inq_vardimid(int ncid, int varid, int *dimidsp) * @param nattsp a pointer that will get the number of attriburtes. Ignored * if NULL. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_varnatts(int ncid, int varid, int *nattsp) { @@ -847,6 +1002,7 @@ int PIOc_inq_varnatts(int ncid, int varid, int *nattsp) * @param varid the variable ID. * @param varidp a pointer that will get the variable id * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_varid(int ncid, const char *name, int *varidp) { @@ -933,6 +1089,7 @@ int PIOc_inq_varid(int ncid, const char *name, int *varidp) * @param xtypep a pointer that will get the type of the attribute. * @param lenp a pointer that will get the number of values * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_att(int ncid, int varid, const char *name, nc_type *xtypep, PIO_Offset *lenp) @@ -1027,6 +1184,7 @@ int PIOc_inq_att(int ncid, int varid, const char *name, nc_type *xtypep, * @param lenp a pointer that gets the lenght of the attribute * array. Ignored if NULL. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_attlen(int ncid, int varid, const char *name, PIO_Offset *lenp) { @@ -1043,6 +1201,7 @@ int PIOc_inq_attlen(int ncid, int varid, const char *name, PIO_Offset *lenp) * @param xtypep a pointer that gets the type of the * attribute. Ignored if NULL. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_atttype(int ncid, int varid, const char *name, nc_type *xtypep) { @@ -1063,6 +1222,7 @@ int PIOc_inq_atttype(int ncid, int varid, const char *name, nc_type *xtypep) * @param varid the variable ID. * @param attnum the attribute ID. * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_attname(int ncid, int varid, int attnum, char *name) { @@ -1154,6 +1314,7 @@ int PIOc_inq_attname(int ncid, int varid, int attnum, char *name) * @param varid the variable ID. * @param idp a pointer that will get the id of the variable or attribute. * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_attid(int ncid, int varid, const char *name, int *idp) { @@ -1243,6 +1404,7 @@ int PIOc_inq_attid(int ncid, int varid, const char *name, int *idp) * @param ncid the ncid of the open file, obtained from * PIOc_openfile() or PIOc_createfile(). * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + * @author Jim Edwards, Ed Hartnett */ int PIOc_rename_dim(int ncid, int dimid, const char *name) { @@ -1328,6 +1490,7 @@ int PIOc_rename_dim(int ncid, int dimid, const char *name) * PIOc_openfile() or PIOc_createfile(). * @param varid the variable ID. * @return PIO_NOERR for success, error code otherwise. See PIOc_Set_File_Error_Handling + * @author Jim Edwards, Ed Hartnett */ int PIOc_rename_var(int ncid, int varid, const char *name) { @@ -1414,6 +1577,7 @@ int PIOc_rename_var(int ncid, int varid, const char *name) * @param varid the variable ID. * @return PIO_NOERR for success, error code otherwise. See * PIOc_Set_File_Error_Handling + * @author Jim Edwards, Ed Hartnett */ int PIOc_rename_att(int ncid, int varid, const char *name, const char *newname) @@ -1505,6 +1669,7 @@ int PIOc_rename_att(int ncid, int varid, const char *name, * @param varid the variable ID. * @param name of the attribute to delete. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_del_att(int ncid, int varid, const char *name) { @@ -1587,6 +1752,7 @@ int PIOc_del_att(int ncid, int varid, const char *name) * @param old_modep a pointer to an int that gets the old setting. * @return PIO_NOERR for success, error code otherwise. * @ingroup PIO_set_fill + * @author Jim Edwards, Ed Hartnett */ int PIOc_set_fill(int ncid, int fillmode, int *old_modep) { @@ -1610,6 +1776,7 @@ int PIOc_set_fill(int ncid, int fillmode, int *old_modep) int msg = PIO_MSG_SET_FILL; int old_modep_present = old_modep ? 1 : 0; + LOG((3, "PIOc_set_fill about to send msg %d", msg)); if (ios->compmaster == MPI_ROOT) mpierr = MPI_Send(&msg, 1,MPI_INT, ios->ioroot, 1, ios->union_comm); @@ -1675,6 +1842,7 @@ int PIOc_set_fill(int ncid, int fillmode, int *old_modep) * @param ncid the ncid of the open file, obtained from * PIOc_openfile() or PIOc_createfile(). * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_enddef(int ncid) { @@ -1693,6 +1861,7 @@ int PIOc_enddef(int ncid) * @param ncid the ncid of the open file, obtained from * PIOc_openfile() or PIOc_createfile(). * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_redef(int ncid) { @@ -1712,6 +1881,7 @@ int PIOc_redef(int ncid) * PIOc_openfile() or PIOc_createfile(). * @param idp a pointer that will get the id of the variable or attribute. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_def_dim(int ncid, const char *name, PIO_Offset len, int *idp) { @@ -1802,27 +1972,93 @@ int PIOc_def_dim(int ncid, const char *name, PIO_Offset len, int *idp) * @param varidp a pointer that will get the variable id * @return PIO_NOERR for success, error code otherwise. * @ingroup PIO_def_var + * @author Jim Edwards, Ed Hartnett */ int PIOc_def_var(int ncid, const char *name, nc_type xtype, int ndims, const int *dimidsp, int *varidp) { - iosystem_desc_t *ios; /* Pointer to io system information. */ - file_desc_t *file; /* Pointer to file information. */ - int ierr; /* Return code from function calls. */ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int invalid_unlim_dim = 0; /* True invalid dims are used. */ + int varid; /* The varid of the created var. */ + int rec_var = 0; /* Non-zero if this var uses unlimited dim. */ + PIO_Offset pio_type_size; /* Size of pio type in bytes. */ + MPI_Datatype mpi_type; /* The correspoding MPI type. */ + int mpi_type_size; /* Size of mpi type. */ int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ + int ierr; /* Return code from function calls. */ /* Get the file information. */ if ((ierr = pio_get_file(ncid, &file))) return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); ios = file->iosystem; - /* User must provide name and storage for varid. */ - if (!name || !varidp || strlen(name) > NC_MAX_NAME) + /* User must provide name. */ + if (!name || strlen(name) > NC_MAX_NAME) return pio_err(ios, file, PIO_EINVAL, __FILE__, __LINE__); LOG((1, "PIOc_def_var ncid = %d name = %s xtype = %d ndims = %d", ncid, name, xtype, ndims)); + /* Run this on all tasks if async is not in use, but only on + * non-IO tasks if async is in use. Learn whether each dimension + * is unlimited. */ + if (!ios->async || !ios->ioproc) + { + int nunlimdims; + + /* Get size of type. */ + if ((ierr = PIOc_inq_type(ncid, xtype, NULL, &pio_type_size))) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Get the MPI type corresponding with the PIO type. */ + if ((ierr = find_mpi_type(xtype, &mpi_type, NULL))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); + + /* Get the size of the MPI type. */ + if ((mpierr = MPI_Type_size(mpi_type, &mpi_type_size))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + + /* How many unlimited dims are present in the file? */ + if ((ierr = PIOc_inq_unlimdims(ncid, &nunlimdims, NULL))) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + if (nunlimdims) + { + int unlimdimids[nunlimdims]; + + /* Find the IDs of the unlimited dimension(s). */ + if ((ierr = PIOc_inq_unlimdims(ncid, NULL, unlimdimids))) + return check_netcdf(file, ierr, __FILE__, __LINE__); + + /* Check each dimid for this variable to see it it is an + * unlimited dimension. */ + for (int d = 0; d < ndims; d++) + { + int unlim_found = 0; + + /* Check against each unlimited dimid. */ + for (int ud = 0; ud < nunlimdims; ud++) + { + if (dimidsp[d] == unlimdimids[ud]) + { + unlim_found++; + break; + } + } + + /* Only first dim may be unlimited, for PIO. */ + if (unlim_found) + { + if (d == 0) + rec_var++; + else + invalid_unlim_dim++; + } + } + } + } + /* If using async, and not an IO task, then send parameters. */ if (ios->async) { @@ -1853,26 +2089,43 @@ int PIOc_def_var(int ncid, const char *name, nc_type xtype, int ndims, check_mpi(file, mpierr2, __FILE__, __LINE__); if (mpierr) return check_mpi(file, mpierr, __FILE__, __LINE__); + + /* Broadcast values currently only known on computation tasks to IO tasks. */ + if ((mpierr = MPI_Bcast(&rec_var, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(&invalid_unlim_dim, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(&pio_type_size, 1, MPI_OFFSET, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(&mpi_type, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(&mpi_type_size, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); } + /* Check that only one unlimited dim is specified, and that it is + * first. */ + if (invalid_unlim_dim) + return PIO_EINVAL; + /* If this is an IO task, then call the netCDF function. */ if (ios->ioproc) { #ifdef _PNETCDF if (file->iotype == PIO_IOTYPE_PNETCDF) - ierr = ncmpi_def_var(file->fh, name, xtype, ndims, dimidsp, varidp); + ierr = ncmpi_def_var(file->fh, name, xtype, ndims, dimidsp, &varid); #endif /* _PNETCDF */ if (file->iotype != PIO_IOTYPE_PNETCDF && file->do_io) - ierr = nc_def_var(file->fh, name, xtype, ndims, dimidsp, varidp); + ierr = nc_def_var(file->fh, name, xtype, ndims, dimidsp, &varid); #ifdef _NETCDF4 /* For netCDF-4 serial files, turn on compression for this variable. */ if (!ierr && file->iotype == PIO_IOTYPE_NETCDF4C) - ierr = nc_def_var_deflate(file->fh, *varidp, 0, 1, 1); + ierr = nc_def_var_deflate(file->fh, varid, 0, 1, 1); /* For netCDF-4 parallel files, set parallel access to collective. */ if (!ierr && file->iotype == PIO_IOTYPE_NETCDF4P) - ierr = nc_var_par_access(file->fh, *varidp, NC_COLLECTIVE); + ierr = nc_var_par_access(file->fh, varid, NC_COLLECTIVE); #endif /* _NETCDF4 */ } @@ -1883,9 +2136,16 @@ int PIOc_def_var(int ncid, const char *name, nc_type xtype, int ndims, return check_netcdf(file, ierr, __FILE__, __LINE__); /* Broadcast results. */ + if ((mpierr = MPI_Bcast(&varid, 1, MPI_INT, ios->ioroot, ios->my_comm))) + check_mpi(file, mpierr, __FILE__, __LINE__); if (varidp) - if ((mpierr = MPI_Bcast(varidp, 1, MPI_INT, ios->ioroot, ios->my_comm))) - check_mpi(file, mpierr, __FILE__, __LINE__); + *varidp = varid; + + /* Add to the list of var_desc_t structs for this file. */ + if ((ierr = add_to_varlist(varid, rec_var, xtype, (int)pio_type_size, mpi_type, + mpi_type_size, &file->varlist))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); + file->nvars++; return PIO_NOERR; } @@ -1915,6 +2175,7 @@ int PIOc_def_var(int ncid, const char *name, nc_type xtype, int ndims, * @param fill_value pointer to the fill value to be used if fill_mode is set to NC_FILL. * @return PIO_NOERR for success, otherwise an error code. * @ingroup PIO_def_var + * @author Jim Edwards, Ed Hartnett */ int PIOc_def_var_fill(int ncid, int varid, int fill_mode, const void *fill_valuep) { @@ -2002,7 +2263,7 @@ int PIOc_def_var_fill(int ncid, int varid, int fill_mode, const void *fill_value else if (file->iotype == PIO_IOTYPE_NETCDF) { LOG((2, "defining fill value attribute for netCDF classic file")); - if (file->do_io) + if (file->do_io) ierr = nc_put_att(file->fh, varid, _FillValue, xtype, 1, fill_valuep); } else @@ -2042,6 +2303,7 @@ int PIOc_def_var_fill(int ncid, int varid, int fill_mode, const void *fill_value * this variable. Ignored if NULL. * @return PIO_NOERR for success, error code otherwise. * @ingroup PIO_inq_var_fill + * @author Jim Edwards, Ed Hartnett */ int PIOc_inq_var_fill(int ncid, int varid, int *no_fill, void *fill_valuep) { @@ -2128,9 +2390,18 @@ int PIOc_inq_var_fill(int ncid, int varid, int *no_fill, void *fill_valuep) /* Get the file-level fill mode. */ if (no_fill) { - ierr = nc_set_fill(file->fh, NC_NOFILL, no_fill); - if (!ierr) - ierr = nc_set_fill(file->fh, *no_fill, NULL); + if (file->writable) + { + ierr = nc_set_fill(file->fh, NC_NOFILL, no_fill); + if (!ierr) + ierr = nc_set_fill(file->fh, *no_fill, NULL); + } + else + { + /* pnetcdf and netCDF-4 return PIO_FILL for read-only + * files. */ + *no_fill = PIO_FILL; + } } if (!ierr && fill_valuep) @@ -2212,6 +2483,7 @@ int PIOc_inq_var_fill(int ncid, int varid, int *no_fill, void *fill_valuep) * @param ip a pointer that will get the attribute value. * @return PIO_NOERR for success, error code otherwise. * @ingroup PIO_get_att + * @author Jim Edwards, Ed Hartnett */ int PIOc_get_att(int ncid, int varid, const char *name, void *ip) { @@ -2254,6 +2526,7 @@ int PIOc_get_att(int ncid, int varid, const char *name, void *ip) * @param len the length of the attribute array. * @param op a pointer with the attribute data. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_put_att(int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const void *op) @@ -2274,6 +2547,7 @@ int PIOc_put_att(int ncid, int varid, const char *name, nc_type xtype, * @param name the name of the attribute to get * @param ip a pointer that will get the attribute value. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_get_att_double(int ncid, int varid, const char *name, double *ip) { @@ -2293,6 +2567,7 @@ int PIOc_get_att_double(int ncid, int varid, const char *name, double *ip) * @param name the name of the attribute to get * @param ip a pointer that will get the attribute value. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_get_att_uchar(int ncid, int varid, const char *name, unsigned char *ip) { @@ -2312,6 +2587,7 @@ int PIOc_get_att_uchar(int ncid, int varid, const char *name, unsigned char *ip) * @param name the name of the attribute to get * @param ip a pointer that will get the attribute value. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_get_att_ushort(int ncid, int varid, const char *name, unsigned short *ip) { @@ -2331,6 +2607,7 @@ int PIOc_get_att_ushort(int ncid, int varid, const char *name, unsigned short *i * @param ip a pointer that will get the attribute value. * @return PIO_NOERR for success, error code otherwise. * @ingroup PIO_get_att + * @author Jim Edwards, Ed Hartnett */ int PIOc_get_att_uint(int ncid, int varid, const char *name, unsigned int *ip) { @@ -2350,6 +2627,7 @@ int PIOc_get_att_uint(int ncid, int varid, const char *name, unsigned int *ip) * @param name the name of the attribute to get * @param ip a pointer that will get the attribute value. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_get_att_long(int ncid, int varid, const char *name, long *ip) { @@ -2371,6 +2649,7 @@ int PIOc_get_att_long(int ncid, int varid, const char *name, long *ip) * @param ip a pointer that will get the attribute value. * @return PIO_NOERR for success, error code otherwise. * @ingroup PIO_get_att + * @author Jim Edwards, Ed Hartnett */ int PIOc_get_att_text(int ncid, int varid, const char *name, char *ip) { @@ -2390,6 +2669,7 @@ int PIOc_get_att_text(int ncid, int varid, const char *name, char *ip) * @param name the name of the attribute to get * @param ip a pointer that will get the attribute value. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_get_att_schar(int ncid, int varid, const char *name, signed char *ip) { @@ -2409,6 +2689,7 @@ int PIOc_get_att_schar(int ncid, int varid, const char *name, signed char *ip) * @param name the name of the attribute to get * @param ip a pointer that will get the attribute value. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_get_att_ulonglong(int ncid, int varid, const char *name, unsigned long long *ip) { @@ -2428,6 +2709,7 @@ int PIOc_get_att_ulonglong(int ncid, int varid, const char *name, unsigned long * @param name the name of the attribute to get * @param ip a pointer that will get the attribute value. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_get_att_short(int ncid, int varid, const char *name, short *ip) { @@ -2447,6 +2729,7 @@ int PIOc_get_att_short(int ncid, int varid, const char *name, short *ip) * @param name the name of the attribute to get * @param ip a pointer that will get the attribute value. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_get_att_int(int ncid, int varid, const char *name, int *ip) { @@ -2466,6 +2749,7 @@ int PIOc_get_att_int(int ncid, int varid, const char *name, int *ip) * @param name the name of the attribute to get * @param ip a pointer that will get the attribute value. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_get_att_longlong(int ncid, int varid, const char *name, long long *ip) { @@ -2485,6 +2769,7 @@ int PIOc_get_att_longlong(int ncid, int varid, const char *name, long long *ip) * @param name the name of the attribute to get * @param ip a pointer that will get the attribute value. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_get_att_float(int ncid, int varid, const char *name, float *ip) { @@ -2506,6 +2791,7 @@ int PIOc_get_att_float(int ncid, int varid, const char *name, float *ip) * @param len the length of the attribute array. * @param op a pointer with the attribute data. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_put_att_schar(int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const signed char *op) @@ -2528,6 +2814,7 @@ int PIOc_put_att_schar(int ncid, int varid, const char *name, nc_type xtype, * @param len the length of the attribute array. * @param op a pointer with the attribute data. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_put_att_long(int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const long *op) @@ -2550,6 +2837,7 @@ int PIOc_put_att_long(int ncid, int varid, const char *name, nc_type xtype, * @param len the length of the attribute array. * @param op a pointer with the attribute data. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_put_att_int(int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const int *op) @@ -2572,6 +2860,7 @@ int PIOc_put_att_int(int ncid, int varid, const char *name, nc_type xtype, * @param len the length of the attribute array. * @param op a pointer with the attribute data. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_put_att_uchar(int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const unsigned char *op) @@ -2594,6 +2883,7 @@ int PIOc_put_att_uchar(int ncid, int varid, const char *name, nc_type xtype, * @param len the length of the attribute array. * @param op a pointer with the attribute data. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_put_att_longlong(int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const long long *op) @@ -2616,6 +2906,7 @@ int PIOc_put_att_longlong(int ncid, int varid, const char *name, nc_type xtype, * @param len the length of the attribute array. * @param op a pointer with the attribute data. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_put_att_uint(int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const unsigned int *op) @@ -2638,6 +2929,7 @@ int PIOc_put_att_uint(int ncid, int varid, const char *name, nc_type xtype, * @param len the length of the attribute array. * @param op a pointer with the attribute data. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_put_att_float(int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const float *op) @@ -2660,6 +2952,7 @@ int PIOc_put_att_float(int ncid, int varid, const char *name, nc_type xtype, * @param len the length of the attribute array. * @param op a pointer with the attribute data. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_put_att_ulonglong(int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const unsigned long long *op) @@ -2682,6 +2975,7 @@ int PIOc_put_att_ulonglong(int ncid, int varid, const char *name, nc_type xtype, * @param len the length of the attribute array. * @param op a pointer with the attribute data. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_put_att_ushort(int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const unsigned short *op) @@ -2704,6 +2998,7 @@ int PIOc_put_att_ushort(int ncid, int varid, const char *name, nc_type xtype, * @param len the length of the attribute array. * @param op a pointer with the attribute data. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_put_att_text(int ncid, int varid, const char *name, PIO_Offset len, const char *op) @@ -2726,6 +3021,7 @@ int PIOc_put_att_text(int ncid, int varid, const char *name, * @param len the length of the attribute array. * @param op a pointer with the attribute data. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_put_att_short(int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const short *op) @@ -2748,6 +3044,7 @@ int PIOc_put_att_short(int ncid, int varid, const char *name, nc_type xtype, * @param len the length of the attribute array. * @param op a pointer with the attribute data. * @return PIO_NOERR for success, error code otherwise. + * @author Jim Edwards, Ed Hartnett */ int PIOc_put_att_double(int ncid, int varid, const char *name, nc_type xtype, PIO_Offset len, const double *op) diff --git a/src/externals/pio2/src/clib/pio_nc4.c b/src/externals/pio2/src/clib/pio_nc4.c index 0b1cdd0e6b7..086bbf699f8 100644 --- a/src/externals/pio2/src/clib/pio_nc4.c +++ b/src/externals/pio2/src/clib/pio_nc4.c @@ -29,6 +29,7 @@ * compressed. * @return PIO_NOERR for success, otherwise an error code. * @ingroup PIO_def_var + * @author Ed Hartnett */ int PIOc_def_var_deflate(int ncid, int varid, int shuffle, int deflate, int deflate_level) @@ -118,6 +119,7 @@ int PIOc_def_var_deflate(int ncid, int varid, int shuffle, int deflate, * if NULL. * @return PIO_NOERR for success, otherwise an error code. * @ingroup PIO_inq_var + * @author Ed Hartnett */ int PIOc_inq_var_deflate(int ncid, int varid, int *shufflep, int *deflatep, int *deflate_levelp) @@ -227,6 +229,7 @@ int PIOc_inq_var_deflate(int ncid, int varid, int *shufflep, int *deflatep, * @param chunksizep an array of chunksizes. Must have a chunksize for * every variable dimension. * @return PIO_NOERR for success, otherwise an error code. + * @author Ed Hartnett */ int PIOc_def_var_chunking(int ncid, int varid, int storage, const PIO_Offset *chunksizesp) { @@ -348,6 +351,7 @@ int PIOc_def_var_chunking(int ncid, int varid, int storage, const PIO_Offset *ch * dimensions. * @return PIO_NOERR for success, otherwise an error code. * @ingroup PIO_inq_var + * @author Ed Hartnett */ int PIOc_inq_var_chunking(int ncid, int varid, int *storagep, PIO_Offset *chunksizesp) { @@ -477,6 +481,7 @@ int PIOc_inq_var_chunking(int ncid, int varid, int *storagep, PIO_Offset *chunks * every variable dimension. * @return PIO_NOERR for success, otherwise an error code. * @ingroup PIO_def_var + * @author Ed Hartnett */ int PIOc_def_var_endian(int ncid, int varid, int endian) { @@ -552,6 +557,7 @@ int PIOc_def_var_endian(int ncid, int varid, int endian) * endianness. Ignored if NULL. * @return PIO_NOERR for success, otherwise an error code. * @ingroup PIO_inq_var + * @author Ed Hartnett */ int PIOc_inq_var_endian(int ncid, int varid, int *endianp) { @@ -643,6 +649,7 @@ int PIOc_inq_var_endian(int ncid, int varid, int *endianp) * @param preemption preemption setting for file cache. * @return PIO_NOERR for success, otherwise an error code. * @ingroup PIO_def_var + * @author Ed Hartnett */ int PIOc_set_chunk_cache(int iosysid, int iotype, PIO_Offset size, PIO_Offset nelems, float preemption) @@ -742,6 +749,7 @@ int PIOc_set_chunk_cache(int iosysid, int iotype, PIO_Offset size, PIO_Offset ne * @param preemptionp gets the preemption setting for file cache. * @return PIO_NOERR for success, otherwise an error code. * @ingroup PIO_def_var + * @author Ed Hartnett */ int PIOc_get_chunk_cache(int iosysid, int iotype, PIO_Offset *sizep, PIO_Offset *nelemsp, float *preemptionp) @@ -860,6 +868,7 @@ int PIOc_get_chunk_cache(int iosysid, int iotype, PIO_Offset *sizep, PIO_Offset * every variable dimension. * @return PIO_NOERR for success, otherwise an error code. * @ingroup PIO_def_var + * @author Ed Hartnett */ int PIOc_set_var_chunk_cache(int ncid, int varid, PIO_Offset size, PIO_Offset nelems, float preemption) @@ -945,6 +954,7 @@ int PIOc_set_var_chunk_cache(int ncid, int varid, PIO_Offset size, PIO_Offset ne * @param preemptionp will get the cache preemption value. Ignored if NULL. * @return PIO_NOERR for success, otherwise an error code. * @ingroup PIO_inq_var + * @author Ed Hartnett */ int PIOc_get_var_chunk_cache(int ncid, int varid, PIO_Offset *sizep, PIO_Offset *nelemsp, float *preemptionp) diff --git a/src/externals/pio2/src/clib/pio_put_nc.c b/src/externals/pio2/src/clib/pio_put_nc.c index 7c74144da3b..9abd69be7c8 100644 --- a/src/externals/pio2/src/clib/pio_put_nc.c +++ b/src/externals/pio2/src/clib/pio_put_nc.c @@ -30,6 +30,7 @@ * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vars_text(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, const char *op) @@ -56,6 +57,7 @@ int PIOc_put_vars_text(int ncid, int varid, const PIO_Offset *start, const PIO_O * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vars_uchar(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, @@ -83,6 +85,7 @@ int PIOc_put_vars_uchar(int ncid, int varid, const PIO_Offset *start, * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vars_schar(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, const signed char *op) @@ -110,6 +113,7 @@ int PIOc_put_vars_schar(int ncid, int varid, const PIO_Offset *start, const PIO_ * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vars_ushort(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, const unsigned short *op) @@ -136,6 +140,7 @@ int PIOc_put_vars_ushort(int ncid, int varid, const PIO_Offset *start, const PIO * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vars_short(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, const short *op) @@ -163,6 +168,7 @@ int PIOc_put_vars_short(int ncid, int varid, const PIO_Offset *start, * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vars_uint(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, const unsigned int *op) @@ -189,6 +195,7 @@ int PIOc_put_vars_uint(int ncid, int varid, const PIO_Offset *start, const PIO_O * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vars_int(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, const int *op) @@ -215,6 +222,7 @@ int PIOc_put_vars_int(int ncid, int varid, const PIO_Offset *start, const PIO_Of * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vars_long(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, const long *op) @@ -241,6 +249,7 @@ int PIOc_put_vars_long(int ncid, int varid, const PIO_Offset *start, const PIO_O * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vars_float(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, const float *op) @@ -268,6 +277,7 @@ int PIOc_put_vars_float(int ncid, int varid, const PIO_Offset *start, const PIO_ * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vars_longlong(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, const long long *op) @@ -295,6 +305,7 @@ int PIOc_put_vars_longlong(int ncid, int varid, const PIO_Offset *start, const P * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vars_double(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, const double *op) @@ -322,6 +333,7 @@ int PIOc_put_vars_double(int ncid, int varid, const PIO_Offset *start, const PIO * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vars_ulonglong(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, const unsigned long long *op) @@ -342,6 +354,7 @@ int PIOc_put_vars_ulonglong(int ncid, int varid, const PIO_Offset *start, const * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var1_text(int ncid, int varid, const PIO_Offset *index, const char *op) { @@ -361,6 +374,7 @@ int PIOc_put_var1_text(int ncid, int varid, const PIO_Offset *index, const char * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var1_uchar(int ncid, int varid, const PIO_Offset *index, const unsigned char *op) @@ -381,6 +395,7 @@ int PIOc_put_var1_uchar(int ncid, int varid, const PIO_Offset *index, * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var1_schar(int ncid, int varid, const PIO_Offset *index, const signed char *op) @@ -401,6 +416,7 @@ int PIOc_put_var1_schar(int ncid, int varid, const PIO_Offset *index, * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var1_ushort(int ncid, int varid, const PIO_Offset *index, const unsigned short *op) @@ -421,6 +437,7 @@ int PIOc_put_var1_ushort(int ncid, int varid, const PIO_Offset *index, * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var1_short(int ncid, int varid, const PIO_Offset *index, const short *op) @@ -441,6 +458,7 @@ int PIOc_put_var1_short(int ncid, int varid, const PIO_Offset *index, * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var1_uint(int ncid, int varid, const PIO_Offset *index, const unsigned int *op) @@ -461,6 +479,7 @@ int PIOc_put_var1_uint(int ncid, int varid, const PIO_Offset *index, * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var1_int(int ncid, int varid, const PIO_Offset *index, const int *op) { @@ -480,6 +499,7 @@ int PIOc_put_var1_int(int ncid, int varid, const PIO_Offset *index, const int *o * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var1_float(int ncid, int varid, const PIO_Offset *index, const float *op) { @@ -499,6 +519,7 @@ int PIOc_put_var1_float(int ncid, int varid, const PIO_Offset *index, const floa * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var1_long(int ncid, int varid, const PIO_Offset *index, const long *op) { @@ -518,6 +539,7 @@ int PIOc_put_var1_long(int ncid, int varid, const PIO_Offset *index, const long * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var1_double(int ncid, int varid, const PIO_Offset *index, const double *op) @@ -538,6 +560,7 @@ int PIOc_put_var1_double(int ncid, int varid, const PIO_Offset *index, * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var1_ulonglong(int ncid, int varid, const PIO_Offset *index, const unsigned long long *op) @@ -558,6 +581,7 @@ int PIOc_put_var1_ulonglong(int ncid, int varid, const PIO_Offset *index, * used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var1_longlong(int ncid, int varid, const PIO_Offset *index, const long long *op) @@ -581,6 +605,7 @@ int PIOc_put_var1_longlong(int ncid, int varid, const PIO_Offset *index, * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vara_text(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const char *op) @@ -604,6 +629,7 @@ int PIOc_put_vara_text(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vara_uchar(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const unsigned char *op) @@ -627,6 +653,7 @@ int PIOc_put_vara_uchar(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vara_schar(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const signed char *op) @@ -650,6 +677,7 @@ int PIOc_put_vara_schar(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vara_ushort(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const unsigned short *op) @@ -673,6 +701,7 @@ int PIOc_put_vara_ushort(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vara_short(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const short *op) @@ -696,6 +725,7 @@ int PIOc_put_vara_short(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vara_uint(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const unsigned int *op) @@ -719,6 +749,7 @@ int PIOc_put_vara_uint(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vara_int(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const int *op) @@ -742,6 +773,7 @@ int PIOc_put_vara_int(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vara_long(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const long *op) @@ -765,6 +797,7 @@ int PIOc_put_vara_long(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vara_float(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const float *op) @@ -788,6 +821,7 @@ int PIOc_put_vara_float(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vara_ulonglong(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const unsigned long long *op) @@ -811,6 +845,7 @@ int PIOc_put_vara_ulonglong(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vara_longlong(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const long long *op) @@ -834,6 +869,7 @@ int PIOc_put_vara_longlong(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vara_double(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const double *op) @@ -857,6 +893,7 @@ int PIOc_put_vara_double(int ncid, int varid, const PIO_Offset *start, * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var_text(int ncid, int varid, const char *op) { @@ -879,6 +916,7 @@ int PIOc_put_var_text(int ncid, int varid, const char *op) * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var_uchar(int ncid, int varid, const unsigned char *op) { @@ -901,6 +939,7 @@ int PIOc_put_var_uchar(int ncid, int varid, const unsigned char *op) * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var_schar(int ncid, int varid, const signed char *op) { @@ -923,6 +962,7 @@ int PIOc_put_var_schar(int ncid, int varid, const signed char *op) * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var_ushort(int ncid, int varid, const unsigned short *op) { @@ -945,6 +985,7 @@ int PIOc_put_var_ushort(int ncid, int varid, const unsigned short *op) * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var_short(int ncid, int varid, const short *op) { @@ -967,6 +1008,7 @@ int PIOc_put_var_short(int ncid, int varid, const short *op) * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var_uint(int ncid, int varid, const unsigned int *op) { @@ -989,6 +1031,7 @@ int PIOc_put_var_uint(int ncid, int varid, const unsigned int *op) * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var_int(int ncid, int varid, const int *op) { @@ -1011,6 +1054,7 @@ int PIOc_put_var_int(int ncid, int varid, const int *op) * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var_long(int ncid, int varid, const long *op) { @@ -1033,6 +1077,7 @@ int PIOc_put_var_long(int ncid, int varid, const long *op) * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var_float(int ncid, int varid, const float *op) { @@ -1055,6 +1100,7 @@ int PIOc_put_var_float(int ncid, int varid, const float *op) * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var_ulonglong(int ncid, int varid, const unsigned long long *op) { @@ -1077,6 +1123,7 @@ int PIOc_put_var_ulonglong(int ncid, int varid, const unsigned long long *op) * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var_longlong(int ncid, int varid, const long long *op) { @@ -1099,6 +1146,7 @@ int PIOc_put_var_longlong(int ncid, int varid, const long long *op) * the variable will be used. * @param op pointer to the data to be written. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var_double(int ncid, int varid, const double *op) { @@ -1115,6 +1163,7 @@ int PIOc_put_var_double(int ncid, int varid, const double *op) * @param varid the variable ID number * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var(int ncid, int varid, const void *op) { @@ -1134,6 +1183,7 @@ int PIOc_put_var(int ncid, int varid, const void *op) * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_var1(int ncid, int varid, const PIO_Offset *index, const void *op) { @@ -1156,6 +1206,7 @@ int PIOc_put_var1(int ncid, int varid, const PIO_Offset *index, const void *op) * the variable will be used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vara(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const void *op) @@ -1182,6 +1233,7 @@ int PIOc_put_vara(int ncid, int varid, const PIO_Offset *start, const PIO_Offset * used. * @param buf pointer that will get the data. * @return PIO_NOERR on success, error code otherwise. + * @author Ed Hartnett */ int PIOc_put_vars(int ncid, int varid, const PIO_Offset *start, const PIO_Offset *count, const PIO_Offset *stride, const void *op) diff --git a/src/externals/pio2/src/clib/pio_rearrange.c b/src/externals/pio2/src/clib/pio_rearrange.c index 74b27d45b06..8ee13f411e6 100644 --- a/src/externals/pio2/src/clib/pio_rearrange.c +++ b/src/externals/pio2/src/clib/pio_rearrange.c @@ -4,8 +4,8 @@ * @author Jim Edwards */ #include -#include #include +#include /** * Convert a 1-D index into a coordinate value in an arbitrary @@ -21,6 +21,7 @@ * of data. * @param dim_list array of length ndims that will get the dimensions * corresponding to this index. + * @author Jim Edwards */ void idx_to_dim_list(int ndims, const int *gdimlen, PIO_Offset idx, PIO_Offset *dim_list) @@ -66,6 +67,7 @@ void idx_to_dim_list(int ndims, const int *gdimlen, PIO_Offset idx, * @param max_size array of size dim + 1 that contains the maximum * sizes along that dimension. * @param count array of size dim + 1 that gets the new counts. + * @author Jim Edwards */ void expand_region(int dim, const int *gdimlen, int maplen, const PIO_Offset *map, int region_size, int region_stride, const int *max_size, @@ -142,6 +144,7 @@ void expand_region(int dim, const int *gdimlen, int maplen, const PIO_Offset *ma * @param count array (length ndims) that will get counts of found * region. * @returns length of the region found. + * @author Jim Edwards */ PIO_Offset find_region(int ndims, const int *gdimlen, int maplen, const PIO_Offset *map, PIO_Offset *start, PIO_Offset *count) @@ -188,6 +191,7 @@ PIO_Offset find_region(int ndims, const int *gdimlen, int maplen, const PIO_Offs * @param lcoord pointer to an offset. * @param count array of counts. * @returns the local array index. + * @author Jim Edwards */ PIO_Offset coord_to_lindex(int ndims, const PIO_Offset *lcoord, const PIO_Offset *count) { @@ -214,6 +218,7 @@ PIO_Offset coord_to_lindex(int ndims, const PIO_Offset *lcoord, const PIO_Offset * @param io_comm the IO communicator * @param iodesc a pointer to the io_desc_t struct. * @returns 0 for success, error code otherwise. + * @author Jim Edwards */ int compute_maxIObuffersize(MPI_Comm io_comm, io_desc_t *iodesc) { @@ -252,7 +257,7 @@ int compute_maxIObuffersize(MPI_Comm io_comm, io_desc_t *iodesc) * Create the derived MPI datatypes used for comp2io and io2comp * transfers. Used in define_iodesc_datatypes(). * - * @param basetype The MPI type of data (MPI_INT, etc.). + * @param mpitype The MPI type of data (MPI_INT, etc.). * @param msgcnt This is the number of MPI types that are created. * @param mindex An array (length numinds) of indexes into the data * array from the comp map. Will be NULL when count is zero. @@ -263,8 +268,9 @@ int compute_maxIObuffersize(MPI_Comm io_comm, io_desc_t *iodesc) * @param mtype pointer to an array (length msgcnt) which gets the * created datatypes. Will be NULL when iodesc->nrecvs == 0. * @returns 0 on success, error code otherwise. + * @author Jim Edwards */ -int create_mpi_datatypes(MPI_Datatype basetype, int msgcnt, +int create_mpi_datatypes(MPI_Datatype mpitype, int msgcnt, const PIO_Offset *mindex, const int *mcount, int *mfrom, MPI_Datatype *mtype) { @@ -278,9 +284,9 @@ int create_mpi_datatypes(MPI_Datatype basetype, int msgcnt, PIO_Offset bsizeT[msgcnt]; - LOG((1, "create_mpi_datatypes basetype = %d msgcnt = %d", basetype, msgcnt)); - LOG((2, "MPI_BYTE = %d MPI_CHAR = %d MPI_SHORT = %d MPI_INT = %d MPI_DOUBLE = %d", - MPI_BYTE, MPI_CHAR, MPI_SHORT, MPI_INT, MPI_DOUBLE)); + LOG((1, "create_mpi_datatypes mpitype = %d msgcnt = %d", mpitype, msgcnt)); + LOG((2, "MPI_BYTE = %d MPI_CHAR = %d MPI_SHORT = %d MPI_INT = %d MPI_FLOAT = %d MPI_DOUBLE = %d", + MPI_BYTE, MPI_CHAR, MPI_SHORT, MPI_INT, MPI_FLOAT, MPI_DOUBLE)); /* How many indicies in the array? */ for (int j = 0; j < msgcnt; j++) @@ -300,6 +306,8 @@ int create_mpi_datatypes(MPI_Datatype basetype, int msgcnt, int pos = 0; int ii = 0; + /* Determine the blocksize. This is done differently for the + * rearrangers. (If mfrom is NULL, this is the box rearranger.) */ if (mfrom == NULL) { LOG((3, "mfrom is NULL")); @@ -331,15 +339,19 @@ int create_mpi_datatypes(MPI_Datatype basetype, int msgcnt, { int len = mcount[i] / blocksize; int displace[len]; + LOG((3, "blocksize = %d i = %d mcount[%d] = %d len = %d", blocksize, i, i, + mcount[i], len)); if (blocksize == 1) { if (!mfrom) { + /* Box rearranger. */ for (int j = 0; j < len; j++) displace[j] = (int)(lindex[pos + j]); } else { + /* Subset rearranger. */ int k = 0; for (int j = 0; j < numinds; j++) if (mfrom[j] == i) @@ -362,10 +374,10 @@ int create_mpi_datatypes(MPI_Datatype basetype, int msgcnt, #endif /* PIO_ENABLE_LOGGING */ LOG((3, "calling MPI_Type_create_indexed_block len = %d blocksize = %d " - "basetype = %d", len, blocksize, basetype)); + "mpitype = %d", len, blocksize, mpitype)); /* Create an indexed datatype with constant-sized blocks. */ if ((mpierr = MPI_Type_create_indexed_block(len, blocksize, displace, - basetype, &mtype[i]))) + mpitype, &mtype[i]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); if (mtype[i] == PIO_DATATYPE_NULL) @@ -406,13 +418,15 @@ int create_mpi_datatypes(MPI_Datatype basetype, int msgcnt, * @param ios pointer to the iosystem_desc_t struct. * @param iodesc a pointer to the io_desc_t struct. * @returns 0 on success, error code otherwise. + * @author Jim Edwards */ int define_iodesc_datatypes(iosystem_desc_t *ios, io_desc_t *iodesc) { int ret; /* Return value. */ pioassert(ios && iodesc, "invalid input", __FILE__, __LINE__); - LOG((1, "define_iodesc_datatypes ios->ioproc = %d", ios->ioproc)); + LOG((1, "define_iodesc_datatypes ios->ioproc = %d iodesc->rtype is %sNULL, iodesc->nrecvs", + ios->ioproc, iodesc->rtype ? "not " : "", iodesc->nrecvs)); /* Set up the to transfer data to and from the IO tasks. */ if (ios->ioproc) @@ -437,7 +451,7 @@ int define_iodesc_datatypes(iosystem_desc_t *ios, io_desc_t *iodesc) int *mfrom = iodesc->rearranger == PIO_REARR_SUBSET ? iodesc->rfrom : NULL; /* Create the MPI datatypes. */ - if ((ret = create_mpi_datatypes(iodesc->basetype, iodesc->nrecvs, iodesc->rindex, + if ((ret = create_mpi_datatypes(iodesc->mpitype, iodesc->nrecvs, iodesc->rindex, iodesc->rcount, mfrom, iodesc->rtype))) return pio_err(ios, NULL, ret, __FILE__, __LINE__); } @@ -447,31 +461,34 @@ int define_iodesc_datatypes(iosystem_desc_t *ios, io_desc_t *iodesc) /* Define the datatypes for the computation components if they * don't exist. (These will be the send side in a write * operation.) */ - if (!iodesc->stype) + if (ios->compproc) { - int ntypes; + if (!iodesc->stype) + { + int ntypes; - /* Subset rearranger gets one type; box rearranger gets one - * type per IO task. */ - ntypes = iodesc->rearranger == PIO_REARR_SUBSET ? 1 : ios->num_iotasks; + /* Subset rearranger gets one type; box rearranger gets one + * type per IO task. */ + ntypes = iodesc->rearranger == PIO_REARR_SUBSET ? 1 : ios->num_iotasks; - /* Allocate memory for array of MPI types for the computation tasks. */ - if (!(iodesc->stype = malloc(ntypes * sizeof(MPI_Datatype)))) - return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - LOG((3, "allocated memory for computation MPI types ntypes = %d", ntypes)); + /* Allocate memory for array of MPI types for the computation tasks. */ + if (!(iodesc->stype = malloc(ntypes * sizeof(MPI_Datatype)))) + return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + LOG((3, "allocated memory for computation MPI types ntypes = %d", ntypes)); - /* Initialize send types to NULL. */ - for (int i = 0; i < ntypes; i++) - iodesc->stype[i] = PIO_DATATYPE_NULL; + /* Initialize send types to NULL. */ + for (int i = 0; i < ntypes; i++) + iodesc->stype[i] = PIO_DATATYPE_NULL; - /* Remember how many types we created for the send side. */ - iodesc->num_stypes = ntypes; + /* Remember how many types we created for the send side. */ + iodesc->num_stypes = ntypes; - /* Create the MPI data types. */ - LOG((3, "about to call create_mpi_datatypes for computation MPI types")); - if ((ret = create_mpi_datatypes(iodesc->basetype, ntypes, iodesc->sindex, - iodesc->scount, NULL, iodesc->stype))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + /* Create the MPI data types. */ + LOG((3, "about to call create_mpi_datatypes for computation MPI types")); + if ((ret = create_mpi_datatypes(iodesc->mpitype, ntypes, iodesc->sindex, + iodesc->scount, NULL, iodesc->stype))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); + } } LOG((3, "done with define_iodesc_datatypes()")); @@ -496,7 +513,7 @@ int define_iodesc_datatypes(iosystem_desc_t *ios, io_desc_t *iodesc) * task. . *

  • Allocates and inits iodesc->sindex arrays (length iodesc->ndof) * which holds indecies for computation tasks. - *
  • On IO tasks, allocates and init iodesc->rindex (length + *
  • On IO tasks, allocates and inits iodesc->rindex (length * totalrecv) with indices of the data to be sent/received from this * io task to each compute task. *
  • Uses pio_swapm() to send list of indicies on each compute task @@ -508,6 +525,7 @@ int define_iodesc_datatypes(iosystem_desc_t *ios, io_desc_t *iodesc) * @param dest_ioproc an array (length maplen) of IO task numbers. * @param dest_ioindex an array (length maplen) of IO indicies. * @returns 0 on success, error code otherwise. + * @author Jim Edwards */ int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, const int *dest_ioproc, const PIO_Offset *dest_ioindex) @@ -520,7 +538,8 @@ int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, pioassert(ios && iodesc && dest_ioproc && dest_ioindex && iodesc->rearranger == PIO_REARR_BOX && ios->num_uniontasks > 0, "invalid input", __FILE__, __LINE__); - LOG((1, "compute_counts ios->num_uniontasks = %d", ios->num_uniontasks)); + LOG((1, "compute_counts ios->num_uniontasks = %d ios->compproc %d ios->ioproc %d", + ios->num_uniontasks, ios->compproc, ios->ioproc)); /* Arrays for swapm all to all gather calls. */ MPI_Datatype sr_types[ios->num_uniontasks]; @@ -539,9 +558,10 @@ int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, /* iodesc->scount is the number of data elements sent to each IO * task from the current compute task. dest_ioindex[i] may be * -1. */ - for (int i = 0; i < iodesc->ndof; i++) - if (dest_ioindex[i] >= 0) - (iodesc->scount[dest_ioproc[i]])++; + if (ios->compproc) + for (int i = 0; i < iodesc->ndof; i++) + if (dest_ioindex[i] >= 0) + (iodesc->scount[dest_ioproc[i]])++; /* Initialize arrays used in swapm call. */ for (int i = 0; i < ios->num_uniontasks; i++) @@ -571,7 +591,7 @@ int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, send_counts[ios->ioranks[i]], ios->ioranks[i], send_displs[ios->ioranks[i]])); } } - + /* IO tasks need to know how many data elements they will receive * from each compute task. Allocate space for that, and set up * swapm call. */ @@ -582,11 +602,15 @@ int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, if (!(recv_buf = calloc(ios->num_comptasks, sizeof(int)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - /* Initialize arrays that keep track of ???. */ + /* Initialize arrays that keep track of counts and + * displacements for the all-to-all gather. */ for (int i = 0; i < ios->num_comptasks; i++) { recv_counts[ios->compranks[i]] = 1; recv_displs[ios->compranks[i]] = i * sizeof(int); + LOG((3, "recv_counts[%d] = %d recv_displs[%d] = %d", ios->compranks[i], + recv_counts[ios->compranks[i]], ios->compranks[i], + recv_displs[ios->compranks[i]])); } } @@ -625,7 +649,7 @@ int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, if (recv_buf[i] != 0) { iodesc->rcount[nrecvs] = recv_buf[i]; - iodesc->rfrom[nrecvs] = i; + iodesc->rfrom[nrecvs] = ios->compranks[i]; nrecvs++; } } @@ -641,7 +665,7 @@ int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, if (iodesc->sindex == NULL && iodesc->ndof > 0) if (!(iodesc->sindex = malloc(iodesc->ndof * sizeof(PIO_Offset)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); - LOG((2, "iodesc->ndof = %d", iodesc->ndof)); + LOG((2, "iodesc->ndof = %d ios->num_iotasks = %d", iodesc->ndof, ios->num_iotasks)); int tempcount[ios->num_iotasks]; int spos[ios->num_iotasks]; @@ -662,6 +686,8 @@ int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, int iorank; int ioindex; + LOG((3, "dest_ioproc[%d] = %d dest_ioindex[%d] = %d", i, dest_ioproc[i], i, + dest_ioindex[i])); iorank = dest_ioproc[i]; ioindex = dest_ioindex[i]; if (iorank > -1) @@ -754,6 +780,7 @@ int compute_counts(iosystem_desc_t *ios, io_desc_t *iodesc, * @param rbuf receive buffer. May be NULL. * @param nvars number of variables. * @returns 0 on success, error code otherwise. + * @author Jim Edwards */ int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, void *rbuf, int nvars) @@ -809,16 +836,16 @@ int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, recvtypes[i] = PIO_DATATYPE_NULL; sendtypes[i] = PIO_DATATYPE_NULL; } - LOG((3, "ntasks = %d iodesc->basetype_size = %d niotasks = %d", ntasks, - iodesc->basetype_size, niotasks)); + LOG((3, "ntasks = %d iodesc->mpitype_size = %d niotasks = %d", ntasks, + iodesc->mpitype_size, niotasks)); /* If it has not already been done, define the MPI data types that * will be used for this io_desc_t. */ if ((ret = define_iodesc_datatypes(ios, iodesc))) return pio_err(ios, NULL, ret, __FILE__, __LINE__); - /* If this io proc will exchange data with compute tasks create a - * MPI DataType for that exchange. */ + /* If this io proc, we need to exchange data with compute + * tasks. Create a MPI DataType for that exchange. */ LOG((2, "ios->ioproc %d iodesc->nrecvs = %d", ios->ioproc, iodesc->nrecvs)); if (ios->ioproc && iodesc->nrecvs > 0) { @@ -827,7 +854,7 @@ int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, if (iodesc->rtype[i] != PIO_DATATYPE_NULL) { LOG((3, "iodesc->rtype[%d] = %d iodesc->rearranger = %d", i, iodesc->rtype[i], - iodesc->rearranger)); + iodesc->rearranger)); if (iodesc->rearranger == PIO_REARR_SUBSET) { LOG((3, "exchanging data for subset rearranger")); @@ -838,11 +865,11 @@ int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, * is 1, the stride here is the length of the * collected array (llen). */ #if PIO_USE_MPISERIAL - if ((mpierr = MPI_Type_hvector(nvars, 1, (MPI_Aint)iodesc->llen * iodesc->basetype_size, + if ((mpierr = MPI_Type_hvector(nvars, 1, (MPI_Aint)iodesc->llen * iodesc->mpitype_size, iodesc->rtype[i], &recvtypes[i]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); #else - if ((mpierr = MPI_Type_create_hvector(nvars, 1, (MPI_Aint)iodesc->llen * iodesc->basetype_size, + if ((mpierr = MPI_Type_create_hvector(nvars, 1, (MPI_Aint)iodesc->llen * iodesc->mpitype_size, iodesc->rtype[i], &recvtypes[i]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); #endif /* PIO_USE_MPISERIAL */ @@ -853,17 +880,17 @@ int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, } else { - LOG((3, "exchanging data for box rearranger")); - LOG((3, "i = %d iodesc->rfrom[i] = %d recvcounts[iodesc->rfrom[i]] = %d", i, - iodesc->rfrom[i], recvcounts[iodesc->rfrom[i]])); recvcounts[iodesc->rfrom[i]] = 1; + LOG((3, "exchanging data for box rearranger i = %d iodesc->rfrom[i] = %d " + "recvcounts[iodesc->rfrom[i]] = %d", i, iodesc->rfrom[i], + recvcounts[iodesc->rfrom[i]])); #if PIO_USE_MPISERIAL - if ((mpierr = MPI_Type_hvector(nvars, 1, (MPI_Aint)iodesc->llen * iodesc->basetype_size, + if ((mpierr = MPI_Type_hvector(nvars, 1, (MPI_Aint)iodesc->llen * iodesc->mpitype_size, iodesc->rtype[i], &recvtypes[iodesc->rfrom[i]]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); #else - if ((mpierr = MPI_Type_create_hvector(nvars, 1, (MPI_Aint)iodesc->llen * iodesc->basetype_size, + if ((mpierr = MPI_Type_create_hvector(nvars, 1, (MPI_Aint)iodesc->llen * iodesc->mpitype_size, iodesc->rtype[i], &recvtypes[iodesc->rfrom[i]]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); #endif /* PIO_USE_MPISERIAL */ @@ -884,6 +911,7 @@ int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, for (int i = 0; i < niotasks; i++) { int io_comprank = ios->ioranks[i]; + LOG((3, "ios->ioranks[%d] = %d", i, ios->ioranks[i])); if (iodesc->rearranger == PIO_REARR_SUBSET) io_comprank = 0; @@ -893,11 +921,11 @@ int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, LOG((3, "io task %d creating sendtypes[%d]", i, io_comprank)); sendcounts[io_comprank] = 1; #if PIO_USE_MPISERIAL - if ((mpierr = MPI_Type_hvector(nvars, 1, (MPI_Aint)iodesc->ndof * iodesc->basetype_size, + if ((mpierr = MPI_Type_hvector(nvars, 1, (MPI_Aint)iodesc->ndof * iodesc->mpitype_size, iodesc->stype[i], &sendtypes[io_comprank]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); #else - if ((mpierr = MPI_Type_create_hvector(nvars, 1, (MPI_Aint)iodesc->ndof * iodesc->basetype_size, + if ((mpierr = MPI_Type_create_hvector(nvars, 1, (MPI_Aint)iodesc->ndof * iodesc->mpitype_size, iodesc->stype[i], &sendtypes[io_comprank]))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); #endif /* PIO_USE_MPISERIAL */ @@ -948,6 +976,7 @@ int rearrange_comp2io(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, * @param sbuf send buffer. * @param rbuf receive buffer. * @returns 0 on success, error code otherwise. + * @author Jim Edwards */ int rearrange_io2comp(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, void *rbuf) @@ -1075,6 +1104,7 @@ int rearrange_io2comp(iosystem_desc_t *ios, io_desc_t *iodesc, void *sbuf, * entire var (for non-record vars). * @param compmap only used for the box communicator. * @returns 0 on success, error code otherwise. + * @author Jim Edwards */ int determine_fill(iosystem_desc_t *ios, io_desc_t *iodesc, const int *gdimlen, const PIO_Offset *compmap) @@ -1120,9 +1150,9 @@ int determine_fill(iosystem_desc_t *ios, io_desc_t *iodesc, const int *gdimlen, /** * The box rearranger computes a mapping between IO tasks and compute * tasks such that the data on IO tasks can be written with a single - * call to the underlying netCDF library. This may involve an all to - * all rearrangement in the mapping, but should minimize data movement - * in lower level libraries. + * call to the underlying netCDF library. This may involve an + * all-to-all rearrangement in the mapping, but should minimize data + * movement in lower level libraries. * * On each compute task the application program passes a compmap array * of length ndof. This array describes the arrangement of data in @@ -1137,12 +1167,11 @@ int determine_fill(iosystem_desc_t *ios, io_desc_t *iodesc, const int *gdimlen, *
      *
    • For IO tasks, determines llen. *
    • Determine whether fill values will be needed. - *
    • Do an allgether of llen values into array iomaplen. + *
    • Do an allgather of llen values into array iomaplen. *
    • For each IO task, send starts/counts to all compute tasks. *
    • Find dest_ioindex and dest_ioproc for each element in the map. *
    • Call compute_counts(). *
    • On IO tasks, compute the max IO buffer size. - *
    • Call compute_maxaggregate_bytes(). *
    * * @param ios pointer to the iosystem_desc_t struct. @@ -1156,6 +1185,7 @@ int determine_fill(iosystem_desc_t *ios, io_desc_t *iodesc, const int *gdimlen, * @param iodesc a pointer to the io_desc_t struct, which must be * allocated before this function is called. * @returns 0 on success, error code otherwise. + * @author Jim Edwards */ int box_rearrange_create(iosystem_desc_t *ios, int maplen, const PIO_Offset *compmap, const int *gdimlen, int ndims, io_desc_t *iodesc) @@ -1370,12 +1400,6 @@ int box_rearrange_create(iosystem_desc_t *ios, int maplen, const PIO_Offset *com LOG((3, "iodesc->maxiobuflen = %d", iodesc->maxiobuflen)); } - /* Using maxiobuflen compute the maximum number of bytes that the - * io task buffer can handle. */ - if ((ret = compute_maxaggregate_bytes(ios, iodesc))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); - LOG((3, "iodesc->maxbytes = %d", iodesc->maxbytes)); - return PIO_NOERR; } @@ -1386,6 +1410,7 @@ int box_rearrange_create(iosystem_desc_t *ios, int maplen, const PIO_Offset *com * @param a pointer to an offset. * @param b pointer to another offset. * @returns 0 if offsets are the same or either pointer is NULL. + * @author Jim Edwards */ int compare_offsets(const void *a, const void *b) { @@ -1413,6 +1438,7 @@ int compare_offsets(const void *a, const void *b) * @param maxregions * @param firstregion pointer to the first region. * @returns 0 on success, error code otherwise. + * @author Jim Edwards */ int get_regions(int ndims, const int *gdimlen, int maplen, const PIO_Offset *map, int *maxregions, io_region *firstregion) @@ -1499,6 +1525,7 @@ int get_regions(int ndims, const int *gdimlen, int maplen, const PIO_Offset *map * @param ios pointer to the iosystem_desc_t struct. * @param iodesc a pointer to the io_desc_t struct. * @returns 0 on success, error code otherwise. + * @author Jim Edwards */ int default_subset_partition(iosystem_desc_t *ios, io_desc_t *iodesc) { @@ -1567,7 +1594,6 @@ int default_subset_partition(iosystem_desc_t *ios, io_desc_t *iodesc) *
  • On IO tasks, call get_regions() and distribute the max * maxregions to all tasks in IO communicator. *
  • On IO tasks, call compute_maxIObuffersize(). - *
  • Call compute_maxaggregate_bytes(). * * * @param ios pointer to the iosystem_desc_t struct. @@ -1580,6 +1606,7 @@ int default_subset_partition(iosystem_desc_t *ios, io_desc_t *iodesc) * @param ndims the number of dimensions. * @param iodesc a pointer to the io_desc_t struct. * @returns 0 on success, error code otherwise. + * @author Jim Edwards */ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compmap, const int *gdimlen, int ndims, io_desc_t *iodesc) @@ -1606,7 +1633,7 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma * of that subset_comm */ /* TODO: introduce a mechanism for users to define partitions */ if ((ret = default_subset_partition(ios, iodesc))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + return pio_err(ios, NULL, ret, __FILE__, __LINE__); iodesc->rearranger = PIO_REARR_SUBSET; /* Get size of this subset communicator and rank of this task in it. */ @@ -1973,11 +2000,6 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma iodesc->nrecvs = ntasks; } - /* Using maxiobuflen compute the maximum number of vars of this type that the io - task buffer can handle. */ - if ((ret = compute_maxaggregate_bytes(ios, iodesc))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); - return PIO_NOERR; } @@ -1987,6 +2009,7 @@ int subset_rearrange_create(iosystem_desc_t *ios, int maplen, PIO_Offset *compma * @param ios pointer to the iosystem description struct. * @param iodesc pointer to the IO description struct. * @returns 0 on success, error code otherwise. + * @author Jim Edwards */ void performance_tune_rearranger(iosystem_desc_t *ios, io_desc_t *iodesc) { @@ -2000,7 +2023,7 @@ void performance_tune_rearranger(iosystem_desc_t *ios, io_desc_t *iodesc) assert(iodesc); - if ((mpierr = MPI_Type_size(iodesc->basetype, &tsize))) + if ((mpierr = MPI_Type_size(iodesc->mpitype, &tsize))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); cbuf = NULL; ibuf = NULL; diff --git a/src/externals/pio2/src/clib/pio_spmd.c b/src/externals/pio2/src/clib/pio_spmd.c index c42fd60eff9..da2eef333b0 100644 --- a/src/externals/pio2/src/clib/pio_spmd.c +++ b/src/externals/pio2/src/clib/pio_spmd.c @@ -18,6 +18,7 @@ * @param i input number * @returns the smallest power of 2 greater than * or equal to i. + * @author Jim Edwards */ int ceil2(int i) { @@ -36,6 +37,7 @@ int ceil2(int i) * @param p integer between 0 and np - 1. * @param k integer between 0 and np - 1. * @returns (p + 1) ^ k else -1. + * @author Jim Edwards */ int pair(int np, int p, int k) { @@ -69,6 +71,7 @@ int pair(int np, int p, int k) * @param comm MPI communicator for the MPI_Alltoallw call. * @param fc pointer to the struct that provided flow control options. * @returns 0 for success, error code otherwise. + * @author Jim Edwards */ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendtypes, void *recvbuf, int *recvcounts, int *rdispls, MPI_Datatype *recvtypes, @@ -308,7 +311,7 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty else if (fc->isend) { if ((mpierr = MPI_Isend(ptr, sendcounts[p], sendtypes[p], p, tag, comm, - sndids + istep))) + sndids + istep))) return check_mpi(NULL, mpierr, __FILE__, __LINE__); } else @@ -390,105 +393,106 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty * @param comm communicator. * @param flow_cntl if non-zero, flow control will be used. * @returns 0 for success, error code otherwise. + * @author Jim Edwards */ -int pio_fc_gatherv(const void *sendbuf, int sendcnt, MPI_Datatype sendtype, - void *recvbuf, const int *recvcnts, const int *displs, - MPI_Datatype recvtype, int root, MPI_Comm comm, int flow_cntl) -{ - bool fc_gather; - int gather_block_size; - int mytask, nprocs; - int mtag; - MPI_Status status; - int hs; - int dsize; - int mpierr; /* Return code from MPI functions. */ - - if (flow_cntl > 0) - { - fc_gather = true; - gather_block_size = min(flow_cntl, MAX_GATHER_BLOCK_SIZE); - } - else - { - fc_gather = false; - } - - if (fc_gather) - { - if ((mpierr = MPI_Comm_rank(comm, &mytask))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - if ((mpierr = MPI_Comm_size(comm, &nprocs))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - - mtag = 2 * nprocs; - hs = 1; - - if (mytask == root) - { - int preposts = min(nprocs-1, gather_block_size); - int head = 0; - int count = 0; - int tail = 0; - MPI_Request rcvid[gather_block_size]; - - if ((mpierr = MPI_Type_size(recvtype, &dsize))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - - for (int p = 0; p < nprocs; p++) - { - if (p != root) - { - if (recvcnts[p] > 0) - { - count++; - if (count > preposts) - { - if ((mpierr = MPI_Wait(rcvid + tail, &status))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - tail = (tail + 1) % preposts; - } - - void *ptr = (void *)((char *)recvbuf + dsize * displs[p]); - - if ((mpierr = MPI_Irecv(ptr, recvcnts[p], recvtype, p, mtag, comm, rcvid + head))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - head = (head + 1) % preposts; - if ((mpierr = MPI_Send(&hs, 1, MPI_INT, p, mtag, comm))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - } - } - } - - /* copy local data */ - if ((mpierr = MPI_Type_size(sendtype, &dsize))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - if ((mpierr = MPI_Sendrecv(sendbuf, sendcnt, sendtype, mytask, 102, recvbuf, recvcnts[mytask], - recvtype, mytask, 102, comm, &status))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - - count = min(count, preposts); - if (count > 0) - if ((mpierr = MPI_Waitall(count, rcvid, MPI_STATUSES_IGNORE))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - } - else - { - if (sendcnt > 0) - { - if ((mpierr = MPI_Recv(&hs, 1, MPI_INT, root, mtag, comm, &status))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - if ((mpierr = MPI_Send(sendbuf, sendcnt, sendtype, root, mtag, comm))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - } - } - } - else - { - if ((mpierr = MPI_Gatherv(sendbuf, sendcnt, sendtype, recvbuf, recvcnts, - displs, recvtype, root, comm))) - return check_mpi(NULL, mpierr, __FILE__, __LINE__); - } - - return PIO_NOERR; -} +/* int pio_fc_gatherv(const void *sendbuf, int sendcnt, MPI_Datatype sendtype, */ +/* void *recvbuf, const int *recvcnts, const int *displs, */ +/* MPI_Datatype recvtype, int root, MPI_Comm comm, int flow_cntl) */ +/* { */ +/* bool fc_gather; */ +/* int gather_block_size; */ +/* int mytask, nprocs; */ +/* int mtag; */ +/* MPI_Status status; */ +/* int hs; */ +/* int dsize; */ +/* int mpierr; /\* Return code from MPI functions. *\/ */ + +/* if (flow_cntl > 0) */ +/* { */ +/* fc_gather = true; */ +/* gather_block_size = min(flow_cntl, MAX_GATHER_BLOCK_SIZE); */ +/* } */ +/* else */ +/* { */ +/* fc_gather = false; */ +/* } */ + +/* if (fc_gather) */ +/* { */ +/* if ((mpierr = MPI_Comm_rank(comm, &mytask))) */ +/* return check_mpi(NULL, mpierr, __FILE__, __LINE__); */ +/* if ((mpierr = MPI_Comm_size(comm, &nprocs))) */ +/* return check_mpi(NULL, mpierr, __FILE__, __LINE__); */ + +/* mtag = 2 * nprocs; */ +/* hs = 1; */ + +/* if (mytask == root) */ +/* { */ +/* int preposts = min(nprocs-1, gather_block_size); */ +/* int head = 0; */ +/* int count = 0; */ +/* int tail = 0; */ +/* MPI_Request rcvid[gather_block_size]; */ + +/* if ((mpierr = MPI_Type_size(recvtype, &dsize))) */ +/* return check_mpi(NULL, mpierr, __FILE__, __LINE__); */ + +/* for (int p = 0; p < nprocs; p++) */ +/* { */ +/* if (p != root) */ +/* { */ +/* if (recvcnts[p] > 0) */ +/* { */ +/* count++; */ +/* if (count > preposts) */ +/* { */ +/* if ((mpierr = MPI_Wait(rcvid + tail, &status))) */ +/* return check_mpi(NULL, mpierr, __FILE__, __LINE__); */ +/* tail = (tail + 1) % preposts; */ +/* } */ + +/* void *ptr = (void *)((char *)recvbuf + dsize * displs[p]); */ + +/* if ((mpierr = MPI_Irecv(ptr, recvcnts[p], recvtype, p, mtag, comm, rcvid + head))) */ +/* return check_mpi(NULL, mpierr, __FILE__, __LINE__); */ +/* head = (head + 1) % preposts; */ +/* if ((mpierr = MPI_Send(&hs, 1, MPI_INT, p, mtag, comm))) */ +/* return check_mpi(NULL, mpierr, __FILE__, __LINE__); */ +/* } */ +/* } */ +/* } */ + +/* /\* copy local data *\/ */ +/* if ((mpierr = MPI_Type_size(sendtype, &dsize))) */ +/* return check_mpi(NULL, mpierr, __FILE__, __LINE__); */ +/* if ((mpierr = MPI_Sendrecv(sendbuf, sendcnt, sendtype, mytask, 102, recvbuf, recvcnts[mytask], */ +/* recvtype, mytask, 102, comm, &status))) */ +/* return check_mpi(NULL, mpierr, __FILE__, __LINE__); */ + +/* count = min(count, preposts); */ +/* if (count > 0) */ +/* if ((mpierr = MPI_Waitall(count, rcvid, MPI_STATUSES_IGNORE))) */ +/* return check_mpi(NULL, mpierr, __FILE__, __LINE__); */ +/* } */ +/* else */ +/* { */ +/* if (sendcnt > 0) */ +/* { */ +/* if ((mpierr = MPI_Recv(&hs, 1, MPI_INT, root, mtag, comm, &status))) */ +/* return check_mpi(NULL, mpierr, __FILE__, __LINE__); */ +/* if ((mpierr = MPI_Send(sendbuf, sendcnt, sendtype, root, mtag, comm))) */ +/* return check_mpi(NULL, mpierr, __FILE__, __LINE__); */ +/* } */ +/* } */ +/* } */ +/* else */ +/* { */ +/* if ((mpierr = MPI_Gatherv(sendbuf, sendcnt, sendtype, recvbuf, recvcnts, */ +/* displs, recvtype, root, comm))) */ +/* return check_mpi(NULL, mpierr, __FILE__, __LINE__); */ +/* } */ + +/* return PIO_NOERR; */ +/* } */ diff --git a/src/externals/pio2/src/clib/pio_varm.c b/src/externals/pio2/src/clib/pio_varm.c index f02cb196c91..b37edc50cc5 100644 --- a/src/externals/pio2/src/clib/pio_varm.c +++ b/src/externals/pio2/src/clib/pio_varm.c @@ -48,7 +48,8 @@ int PIOc_put_varm (int ncid, int varid, const PIO_Offset start[], const PIO_Offs break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - vdesc = file->varlist + varid; + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); if (vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ vdesc->request = realloc(vdesc->request, @@ -117,7 +118,8 @@ int PIOc_put_varm_uchar (int ncid, int varid, const PIO_Offset start[], const PI break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - vdesc = file->varlist + varid; + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); if (vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ vdesc->request = realloc(vdesc->request, @@ -186,7 +188,8 @@ int PIOc_put_varm_short (int ncid, int varid, const PIO_Offset start[], const PI break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - vdesc = file->varlist + varid; + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); if (vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ vdesc->request = realloc(vdesc->request, @@ -256,7 +259,8 @@ int PIOc_put_varm_text (int ncid, int varid, const PIO_Offset start[], const PIO break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - vdesc = file->varlist + varid; + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); if (vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ vdesc->request = realloc(vdesc->request, @@ -327,7 +331,8 @@ int PIOc_put_varm_ushort (int ncid, int varid, const PIO_Offset start[], const P break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - vdesc = file->varlist + varid; + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); if (vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ vdesc->request = realloc(vdesc->request, @@ -398,7 +403,8 @@ int PIOc_put_varm_ulonglong (int ncid, int varid, const PIO_Offset start[], cons break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - vdesc = file->varlist + varid; + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); if (vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ vdesc->request = realloc(vdesc->request, @@ -468,7 +474,8 @@ int PIOc_put_varm_int (int ncid, int varid, const PIO_Offset start[], const PIO_ break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - vdesc = file->varlist + varid; + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); if (vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ vdesc->request = realloc(vdesc->request, @@ -539,7 +546,8 @@ int PIOc_put_varm_float (int ncid, int varid, const PIO_Offset start[], const PI break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - vdesc = file->varlist + varid; + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); if (vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ vdesc->request = realloc(vdesc->request, @@ -609,7 +617,8 @@ int PIOc_put_varm_long (int ncid, int varid, const PIO_Offset start[], const PIO break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - vdesc = file->varlist + varid; + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); if (vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ vdesc->request = realloc(vdesc->request, @@ -681,7 +690,8 @@ int PIOc_put_varm_uint (int ncid, int varid, const PIO_Offset start[], const PIO break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - vdesc = file->varlist + varid; + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); if (vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ vdesc->request = realloc(vdesc->request, @@ -752,7 +762,8 @@ int PIOc_put_varm_double (int ncid, int varid, const PIO_Offset start[], const P break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - vdesc = file->varlist + varid; + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); if (vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ vdesc->request = realloc(vdesc->request, @@ -822,7 +833,8 @@ int PIOc_put_varm_schar (int ncid, int varid, const PIO_Offset start[], const PI break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - vdesc = file->varlist + varid; + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); if (vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ vdesc->request = realloc(vdesc->request, @@ -891,7 +903,8 @@ int PIOc_put_varm_longlong (int ncid, int varid, const PIO_Offset start[], const break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - vdesc = file->varlist + varid; + if ((ierr = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); if (vdesc->nreqs%PIO_REQUEST_ALLOC_CHUNK == 0 ){ vdesc->request = realloc(vdesc->request, diff --git a/src/externals/pio2/src/clib/pioc.c b/src/externals/pio2/src/clib/pioc.c index 0dc4fed89fd..c49a811a1ee 100644 --- a/src/externals/pio2/src/clib/pioc.c +++ b/src/externals/pio2/src/clib/pioc.c @@ -18,6 +18,9 @@ int default_error_handler = PIO_INTERNAL_ERROR; * used (see pio_sc.c). */ extern int blocksize; +/* Used when assiging decomposition IDs. */ +int pio_next_ioid = 512; + /** * Check to see if PIO has been initialized. * @@ -25,6 +28,7 @@ extern int blocksize; * @param active pointer that gets true if IO system is active, false * otherwise. * @returns 0 on success, error code otherwise + * @author Jim Edwards */ int PIOc_iosystem_is_active(int iosysid, bool *active) { @@ -49,6 +53,7 @@ int PIOc_iosystem_is_active(int iosysid, bool *active) * * @param ncid the ncid of an open file * @returns 1 if file is open, 0 otherwise. + * @author Jim Edwards */ int PIOc_File_is_Open(int ncid) { @@ -75,6 +80,7 @@ int PIOc_File_is_Open(int ncid) * @param method the error handling method * @returns old error handler * @ingroup PIO_error_method + * @author Jim Edwards */ int PIOc_Set_File_Error_Handling(int ncid, int method) { @@ -105,21 +111,53 @@ int PIOc_Set_File_Error_Handling(int ncid, int method) * @param ncid the ncid of the open file * @param varid the variable ID * @returns 0 on success, error code otherwise + * @author Jim Edwards, Ed Hartnett */ int PIOc_advanceframe(int ncid, int varid) { - file_desc_t *file; + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + var_desc_t *vdesc; /* Info about the var. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ int ret; + LOG((1, "PIOc_advanceframe ncid = %d varid = %d")); + /* Get the file info. */ if ((ret = pio_get_file(ncid, &file))) return pio_err(NULL, NULL, ret, __FILE__, __LINE__); + ios = file->iosystem; + + /* Get info about variable. */ + if ((ret = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ret, __FILE__, __LINE__); - /* Check inputs. */ - if (varid < 0 || varid >= PIO_MAX_VARS) - return pio_err(NULL, file, PIO_EINVAL, __FILE__, __LINE__); + /* If using async, and not an IO task, then send parameters. */ + if (ios->async) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_ADVANCEFRAME; - file->varlist[varid].record++; + if (ios->compmaster == MPI_ROOT) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); + + if (!mpierr) + mpierr = MPI_Bcast(&ncid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&varid, 1, MPI_INT, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi2(ios, NULL, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + } + + /* Increment the record number. */ + /* file->varlist[varid].record++; */ + vdesc->record++; return PIO_NOERR; } @@ -133,21 +171,57 @@ int PIOc_advanceframe(int ncid, int varid) * first record, 1 for the second * @return PIO_NOERR for no error, or error code. * @ingroup PIO_setframe + * @author Jim Edwards, Ed Hartnett */ int PIOc_setframe(int ncid, int varid, int frame) { - file_desc_t *file; + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + var_desc_t *vdesc; /* Info about the var. */ + int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ int ret; + LOG((1, "PIOc_setframe ncid = %d varid = %d frame = %d", ncid, + varid, frame)); + /* Get file info. */ if ((ret = pio_get_file(ncid, &file))) return pio_err(NULL, NULL, ret, __FILE__, __LINE__); + ios = file->iosystem; + + /* Get info about variable. */ + if ((ret = get_var_desc(varid, &file->varlist, &vdesc))) + return pio_err(ios, file, ret, __FILE__, __LINE__); + + /* If using async, and not an IO task, then send parameters. */ + if (ios->async) + { + if (!ios->ioproc) + { + int msg = PIO_MSG_SETFRAME; - /* Check inputs. */ - if (varid < 0 || varid >= PIO_MAX_VARS) - return pio_err(NULL, file, PIO_EINVAL, __FILE__, __LINE__); + if (ios->compmaster == MPI_ROOT) + mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); - file->varlist[varid].record = frame; + if (!mpierr) + mpierr = MPI_Bcast(&ncid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&varid, 1, MPI_INT, ios->compmaster, ios->intercomm); + if (!mpierr) + mpierr = MPI_Bcast(&frame, 1, MPI_INT, ios->compmaster, ios->intercomm); + } + + /* Handle MPI errors. */ + if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) + check_mpi2(ios, NULL, mpierr2, __FILE__, __LINE__); + if (mpierr) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + } + + /* Set the record dimension value for this variable. This will be + * used by the write_darray functions. */ + /* file->varlist[varid].record = frame; */ + vdesc->record = frame; return PIO_NOERR; } @@ -159,6 +233,7 @@ int PIOc_setframe(int ncid, int varid, int frame) * @param numiotasks a pointer taht gets the number of IO * tasks. Ignored if NULL. * @returns 0 on success, error code otherwise + * @author Ed Hartnett */ int PIOc_get_numiotasks(int iosysid, int *numiotasks) { @@ -178,6 +253,7 @@ int PIOc_get_numiotasks(int iosysid, int *numiotasks) * * @param ioid IO descrption ID. * @returns the size of the array. + * @author Jim Edwards */ int PIOc_get_local_array_size(int ioid) { @@ -199,6 +275,7 @@ int PIOc_get_local_array_size(int ioid) * @param method the error handling method * @returns old error handler * @ingroup PIO_error_method + * @author Jim Edwards */ int PIOc_Set_IOSystem_Error_Handling(int iosysid, int method) { @@ -228,6 +305,7 @@ int PIOc_Set_IOSystem_Error_Handling(int iosysid, int method) * if NULL. * @returns 0 for success, error code otherwise. * @ingroup PIO_error_method + * @author Jim Edwards, Ed Hartnett */ int PIOc_set_iosystem_error_handling(int iosysid, int method, int *old_method) { @@ -313,7 +391,8 @@ int PIOc_set_iosystem_error_handling(int iosysid, int method, int *old_method) * @param compmap a 1 based array of offsets into the array record on * file. A 0 in this array indicates a value which should not be * transfered. - * @param ioidp pointer that will get the io description ID. + * @param ioidp pointer that will get the io description ID. Ignored + * if NULL. * @param rearranger pointer to the rearranger to be used for this * decomp or NULL to use the default. * @param iostart An array of start values for block cyclic @@ -326,6 +405,7 @@ int PIOc_set_iosystem_error_handling(int iosysid, int method, int *old_method) * iostarts are generated. * @returns 0 on success, error code otherwise * @ingroup PIO_initdecomp + * @author Jim Edwards, Ed Hartnett */ int PIOc_InitDecomp(int iosysid, int pio_type, int ndims, const int *gdimlen, int maplen, const PIO_Offset *compmap, int *ioidp, const int *rearranger, @@ -406,6 +486,7 @@ int PIOc_InitDecomp(int iosysid, int pio_type, int ndims, const int *gdimlen, in /* Allocate space for the iodesc info. This also allocates the * first region and copies the rearranger opts into this * iodesc. */ + LOG((2, "allocating iodesc pio_type %d ndims %d", pio_type, ndims)); if ((ierr = malloc_iodesc(ios, pio_type, ndims, &iodesc))) return pio_err(ios, NULL, ierr, __FILE__, __LINE__); @@ -462,8 +543,8 @@ int PIOc_InitDecomp(int iosysid, int pio_type, int ndims, const int *gdimlen, in /* Compute start and count values for each io task. */ LOG((2, "about to call CalcStartandCount pio_type = %d ndims = %d", pio_type, ndims)); if ((ierr = CalcStartandCount(pio_type, ndims, gdimlen, ios->num_iotasks, - ios->io_rank, iodesc->firstregion->start, - iodesc->firstregion->count, &iodesc->num_aiotasks))) + ios->io_rank, iodesc->firstregion->start, + iodesc->firstregion->count, &iodesc->num_aiotasks))) return pio_err(ios, NULL, ierr, __FILE__, __LINE__); } @@ -487,8 +568,23 @@ int PIOc_InitDecomp(int iosysid, int pio_type, int ndims, const int *gdimlen, in return pio_err(ios, NULL, ierr, __FILE__, __LINE__); } + /* Broadcast next ioid to all tasks from io root.*/ + if (ios->async) + { + LOG((3, "createfile bcasting pio_next_ioid %d", pio_next_ioid)); + if ((mpierr = MPI_Bcast(&pio_next_ioid, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + LOG((3, "createfile bcast pio_next_ioid %d", pio_next_ioid)); + } + + /* Set the decomposition ID. */ + iodesc->ioid = pio_next_ioid++; + if (ioidp) + *ioidp = iodesc->ioid; + /* Add this IO description to the list. */ - *ioidp = pio_add_to_iodesc_list(iodesc); + if ((ierr = pio_add_to_iodesc_list(iodesc))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); #if PIO_ENABLE_LOGGING /* Log results. */ @@ -499,7 +595,7 @@ int PIOc_InitDecomp(int iosysid, int pio_type, int ndims, const int *gdimlen, in iodesc->maxiobuflen)); for (int j = 0; j < iodesc->llen; j++) LOG((3, "rindex[%d] = %lld", j, iodesc->rindex[j])); -#endif /* PIO_ENABLE_LOGGING */ +#endif /* PIO_ENABLE_LOGGING */ /* This function only does something if pre-processor macro * PERFTUNE is set. */ @@ -533,6 +629,7 @@ int PIOc_InitDecomp(int iosysid, int pio_type, int ndims, const int *gdimlen, in * decompositions. If NULL ??? * @returns 0 on success, error code otherwise * @ingroup PIO_initdecomp + * @author Jim Edwards, Ed Hartnett */ int PIOc_init_decomp(int iosysid, int pio_type, int ndims, const int *gdimlen, int maplen, const PIO_Offset *compmap, int *ioidp, int rearranger, @@ -575,6 +672,7 @@ int PIOc_init_decomp(int iosysid, int pio_type, int ndims, const int *gdimlen, i * @param pointer that gets the IO ID. * @returns 0 for success, error code otherwise * @ingroup PIO_initdecomp + * @author Jim Edwards */ int PIOc_InitDecomp_bc(int iosysid, int pio_type, int ndims, const int *gdimlen, const long int *start, const long int *count, int *ioidp) @@ -665,8 +763,7 @@ int PIOc_InitDecomp_bc(int iosysid, int pio_type, int ndims, const int *gdimlen, * * * When complete, there are three MPI communicators (ios->comp_comm, - * ios->union_comm, and ios->io_comm), and two MPI groups - * (ios->compgroup and ios->iogroup) that must be freed by MPI. + * ios->union_comm, and ios->io_comm) that must be freed by MPI. * * @param comp_comm the MPI_Comm of the compute tasks. * @param num_iotasks the number of io tasks to use. @@ -678,12 +775,15 @@ int PIOc_InitDecomp_bc(int iosysid, int pio_type, int ndims, const int *gdimlen, * @param iosysidp index of the defined system descriptor. * @return 0 on success, otherwise a PIO error code. * @ingroup PIO_init + * @author Jim Edwards, Ed Hartnett */ int PIOc_Init_Intracomm(MPI_Comm comp_comm, int num_iotasks, int stride, int base, int rearr, int *iosysidp) { iosystem_desc_t *ios; int ustride; + MPI_Group compgroup; /* Contains tasks involved in computation. */ + MPI_Group iogroup; /* Contains the processors involved in I/O. */ int num_comptasks; /* The size of the comp_comm. */ int mpierr; /* Return value for MPI calls. */ int ret; /* Return code for function calls. */ @@ -719,7 +819,7 @@ int PIOc_Init_Intracomm(MPI_Comm comp_comm, int num_iotasks, int stride, int bas /* Initialize the rearranger options. */ ios->rearr_opts.comm_type = PIO_REARR_COMM_COLL; ios->rearr_opts.fcd = PIO_REARR_COMM_FC_2D_DISABLE; - + /* Copy the computation communicator into union_comm. */ if ((mpierr = MPI_Comm_dup(comp_comm, &ios->union_comm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); @@ -772,18 +872,25 @@ int PIOc_Init_Intracomm(MPI_Comm comp_comm, int num_iotasks, int stride, int bas ios->iomaster = MPI_ROOT; /* Create a group for the computation tasks. */ - if ((mpierr = MPI_Comm_group(ios->comp_comm, &ios->compgroup))) + if ((mpierr = MPI_Comm_group(ios->comp_comm, &compgroup))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); /* Create a group for the IO tasks. */ - if ((mpierr = MPI_Group_incl(ios->compgroup, ios->num_iotasks, ios->ioranks, - &ios->iogroup))) - return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Group_incl(compgroup, ios->num_iotasks, ios->ioranks, + &iogroup))) + return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); /* Create an MPI communicator for the IO tasks. */ - if ((mpierr = MPI_Comm_create(ios->comp_comm, ios->iogroup, &ios->io_comm))) + if ((mpierr = MPI_Comm_create(ios->comp_comm, iogroup, &ios->io_comm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); + /* Free the MPI groups. */ + if (compgroup != MPI_GROUP_NULL) + MPI_Group_free(&compgroup); + + if (iogroup != MPI_GROUP_NULL) + MPI_Group_free(&iogroup); + /* For the tasks that are doing IO, get their rank within the IO * communicator. If they are not doing IO, set their io_rank to * -1. */ @@ -822,6 +929,7 @@ int PIOc_Init_Intracomm(MPI_Comm comp_comm, int num_iotasks, int stride, int bas * @param rearr_opts the rearranger options * @param iosysidp a pointer that gets the IO system ID * @returns 0 for success, error code otherwise + * @author Jim Edwards */ int PIOc_Init_Intracomm_from_F90(int f90_comp_comm, const int num_iotasks, const int stride, @@ -860,6 +968,7 @@ int PIOc_Init_Intracomm_from_F90(int f90_comp_comm, * @param hint the hint for MPI * @param hintval the value of the hint * @returns 0 for success, or PIO_BADID if iosysid can't be found. + * @author Jim Edwards, Ed Hartnett */ int PIOc_set_hint(int iosysid, const char *hint, const char *hintval) { @@ -896,10 +1005,12 @@ int PIOc_set_hint(int iosysid, const char *hint, const char *hintval) * @param iosysid: the io system ID provided by PIOc_Init_Intracomm(). * @returns 0 for success or non-zero for error. * @ingroup PIO_finalize + * @author Jim Edwards, Ed Hartnett */ int PIOc_finalize(int iosysid) { iosystem_desc_t *ios; + int niosysid; /* The number of currently open IO systems. */ int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function codes. */ int ierr = PIO_NOERR; @@ -951,8 +1062,7 @@ int PIOc_finalize(int iosysid) free(ios->compranks); LOG((3, "Freed compranks.")); - /* Free the buffer pool. */ - int niosysid; + /* Learn the number of open IO systems. */ if ((ierr = pio_num_iosystem(&niosysid))) return pio_err(ios, NULL, ierr, __FILE__, __LINE__); LOG((2, "%d iosystems are still open.", niosysid)); @@ -964,13 +1074,6 @@ int PIOc_finalize(int iosysid) LOG((2, "Freed buffer pool.")); } - /* Free the MPI groups. */ - if (ios->compgroup != MPI_GROUP_NULL) - MPI_Group_free(&ios->compgroup); - - if (ios->iogroup != MPI_GROUP_NULL) - MPI_Group_free(&(ios->iogroup)); - /* Free the MPI communicators. my_comm is just a copy (but not an * MPI copy), so does not have to have an MPI_Comm_free() * call. comp_comm and io_comm are MPI duplicates of the comms @@ -995,7 +1098,11 @@ int PIOc_finalize(int iosysid) if ((ierr = pio_delete_iosystem_from_list(iosysid))) return pio_err(NULL, NULL, ierr, __FILE__, __LINE__); - pio_finalize_logging(); + if (niosysid == 1) + { + LOG((1, "about to finalize logging")); + pio_finalize_logging(); + } LOG((2, "PIOc_finalize completed successfully")); return PIO_NOERR; @@ -1008,6 +1115,7 @@ int PIOc_finalize(int iosysid) * @param ioproc a pointer that gets 1 if task is an IO task, 0 * otherwise. Ignored if NULL. * @returns 0 for success, or PIO_BADID if iosysid can't be found. + * @author Jim Edwards */ int PIOc_iam_iotask(int iosysid, bool *ioproc) { @@ -1030,6 +1138,7 @@ int PIOc_iam_iotask(int iosysid, bool *ioproc) * @param iorank a pointer that gets the io rank, or -1 if task is not * in the IO communicator. Ignored if NULL. * @returns 0 for success, or PIO_BADID if iosysid can't be found. + * @author Jim Edwards */ int PIOc_iotask_rank(int iosysid, int *iorank) { @@ -1049,6 +1158,7 @@ int PIOc_iotask_rank(int iosysid, int *iorank) * * @param iotype the io type to check * @returns 1 if iotype is in build, 0 if not. + * @author Jim Edwards */ int PIOc_iotype_available(int iotype) { @@ -1139,14 +1249,16 @@ int PIOc_iotype_available(int iotype) * caller.) * * @param rearranger the default rearranger to use for decompositions - * in this IO system. Must be either PIO_REARR_BOX or - * PIO_REARR_SUBSET. + * in this IO system. Only PIO_REARR_BOX is supported for + * async. Support for PIO_REARR_SUBSET will be provided in a future + * version. * * @param iosysidp pointer to array of length component_count that * gets the iosysid for each component. * * @return PIO_NOERR on success, error code otherwise. * @ingroup PIO_init + * @author Ed Hartnett */ int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, int component_count, int *num_procs_per_comp, int **proc_list, @@ -1154,71 +1266,29 @@ int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, int *iosysidp) { int my_rank; /* Rank of this task. */ - int **my_proc_list; /* Array of arrays of procs for comp components. */ - int *my_io_proc_list; /* List of processors in IO component. */ + int *my_proc_list[component_count]; /* Array of arrays of procs for comp components. */ + int my_io_proc_list[num_io_procs]; /* List of processors in IO component. */ int mpierr; /* Return code from MPI functions. */ int ret; /* Return code. */ - /* Check input parameters. */ + /* Check input parameters. Only allow box rearranger for now. */ if (num_io_procs < 1 || component_count < 1 || !num_procs_per_comp || !iosysidp || - (rearranger != PIO_REARR_BOX && rearranger != PIO_REARR_SUBSET)) - return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__); - - /* Temporarily limit to one computational component. */ - if (component_count > 1) + (rearranger != PIO_REARR_BOX)) return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__); /* Turn on the logging system for PIO. */ pio_init_logging(); - LOG((1, "PIOc_Init_Async num_io_procs = %d component_count = %d", num_io_procs, + LOG((1, "PIOc_init_async num_io_procs = %d component_count = %d", num_io_procs, component_count)); - /* If the user did not supply a list of process numbers to use for - * IO, create it. */ - if (!io_proc_list) - { - LOG((3, "calculating processors for IO component")); - if (!(my_io_proc_list = malloc(num_io_procs * sizeof(int)))) - return pio_err(NULL, NULL, PIO_ENOMEM, __FILE__, __LINE__); - for (int p = 0; p < num_io_procs; p++) - { - my_io_proc_list[p] = p; - LOG((3, "my_io_proc_list[%d] = %d", p, my_io_proc_list[p])); - } - } - else - my_io_proc_list = io_proc_list; - - /* If the user did not provide a list of processes for each - * component, create one. */ - if (!proc_list) - { - int last_proc = num_io_procs; - - /* Allocate space for array of arrays. */ - if (!(my_proc_list = malloc((component_count) * sizeof(int *)))) - return pio_err(NULL, NULL, PIO_ENOMEM, __FILE__, __LINE__); - - /* Fill the array of arrays. */ - for (int cmp = 0; cmp < component_count; cmp++) - { - LOG((3, "calculating processors for component %d num_procs_per_comp[cmp] = %d", cmp, num_procs_per_comp[cmp])); - - /* Allocate space for each array. */ - if (!(my_proc_list[cmp] = malloc(num_procs_per_comp[cmp] * sizeof(int)))) - return pio_err(NULL, NULL, PIO_ENOMEM, __FILE__, __LINE__); + /* Determine which tasks to use for IO. */ + for (int p = 0; p < num_io_procs; p++) + my_io_proc_list[p] = io_proc_list ? io_proc_list[p] : p; - int proc; - for (proc = last_proc; proc < num_procs_per_comp[cmp] + last_proc; proc++) - { - my_proc_list[cmp][proc - last_proc] = proc; - LOG((3, "my_proc_list[%d][%d] = %d", cmp, proc - last_proc, proc)); - } - last_proc = proc; - } - } - else - my_proc_list = proc_list; + /* Determine which tasks to use for each computational component. */ + if ((ret = determine_procs(num_io_procs, component_count, num_procs_per_comp, + proc_list, my_proc_list))) + return pio_err(NULL, NULL, ret, __FILE__, __LINE__); /* Get rank of this task in world. */ if ((ret = MPI_Comm_rank(world, &my_rank))) @@ -1242,7 +1312,7 @@ int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, MPI_Group world_group; if ((ret = MPI_Comm_group(world, &world_group))) return check_mpi(NULL, ret, __FILE__, __LINE__); - LOG((3, "world group created\n")); + LOG((3, "world group created")); /* We will create a group for the IO component. */ MPI_Group io_group; @@ -1315,14 +1385,12 @@ int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, my_iosys->num_comptasks = num_procs_per_comp[cmp]; my_iosys->num_iotasks = num_io_procs; my_iosys->num_uniontasks = my_iosys->num_comptasks + my_iosys->num_iotasks; - my_iosys->compgroup = MPI_GROUP_NULL; - my_iosys->iogroup = MPI_GROUP_NULL; my_iosys->default_rearranger = rearranger; /* Initialize the rearranger options. */ my_iosys->rearr_opts.comm_type = PIO_REARR_COMM_COLL; my_iosys->rearr_opts.fcd = PIO_REARR_COMM_FC_2D_DISABLE; - + /* The rank of the computation leader in the union comm. */ my_iosys->comproot = num_io_procs; LOG((3, "my_iosys->comproot = %d", my_iosys->comproot)); @@ -1353,21 +1421,20 @@ int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, /* Add proc numbers from computation component. */ for (int p = 0; p < num_procs_per_comp[cmp]; p++) + { proc_list_union[p + num_io_procs] = my_proc_list[cmp][p]; + LOG((3, "p %d num_io_procs %d proc_list_union[p + num_io_procs] %d ", + p, num_io_procs, proc_list_union[p + num_io_procs])); + } /* Allocate space for computation task ranks. */ if (!(my_iosys->compranks = calloc(my_iosys->num_comptasks, sizeof(int)))) return pio_err(NULL, NULL, PIO_ENOMEM, __FILE__, __LINE__); - - /* Remember computation task ranks. */ - for (int p = 0; p < num_procs_per_comp[cmp]; p++) - my_iosys->compranks[p] = my_proc_list[cmp][p]; - /* Create the union group. */ - if ((ret = MPI_Group_incl(world_group, nprocs_union, proc_list_union, &union_group[cmp]))) - return check_mpi(NULL, ret, __FILE__, __LINE__); - LOG((3, "created union MPI_group - union_group[%d] = %d with %d procs", cmp, - union_group[cmp], nprocs_union)); + /* Remember computation task ranks. We need the ranks within + * the union_comm. */ + for (int p = 0; p < num_procs_per_comp[cmp]; p++) + my_iosys->compranks[p] = num_io_procs + p; /* Remember whether this process is in the IO component. */ my_iosys->ioproc = in_io; @@ -1385,6 +1452,12 @@ int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, LOG((3, "pidx = %d num_procs_per_comp[%d] = %d in_cmp = %d", pidx, cmp, num_procs_per_comp[cmp], in_cmp)); + /* Create the union group. */ + if ((ret = MPI_Group_incl(world_group, nprocs_union, proc_list_union, &union_group[cmp]))) + return check_mpi(NULL, ret, __FILE__, __LINE__); + LOG((3, "created union MPI_group - union_group[%d] = %d with %d procs", cmp, + union_group[cmp], nprocs_union)); + /* Create an intracomm for this component. Only processes in * the component need to participate in the intracomm create * call. */ @@ -1437,17 +1510,17 @@ int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, /* All the processes in this component, and the IO component, * are part of the union_comm. */ + LOG((3, "before creating union_comm my_iosys->io_comm = %d group = %d", my_iosys->io_comm, union_group[cmp])); + if ((ret = MPI_Comm_create(world, union_group[cmp], &my_iosys->union_comm))) + return check_mpi(NULL, ret, __FILE__, __LINE__); + LOG((3, "created union comm for cmp %d my_iosys->union_comm %d", cmp, my_iosys->union_comm)); + if (in_io || in_cmp) { - LOG((3, "my_iosys->io_comm = %d group = %d", my_iosys->io_comm, union_group[cmp])); - /* Create a group for the union of the IO component - * and one of the computation components. */ - if ((ret = MPI_Comm_create(world, union_group[cmp], &my_iosys->union_comm))) - return check_mpi(NULL, ret, __FILE__, __LINE__); - if ((ret = MPI_Comm_rank(my_iosys->union_comm, &my_iosys->union_rank))) return check_mpi(NULL, ret, __FILE__, __LINE__); - + LOG((3, "my_iosys->union_rank %d", my_iosys->union_rank)); + /* Set my_comm to union_comm for async. */ my_iosys->my_comm = my_iosys->union_comm; LOG((3, "intracomm created for union cmp = %d union_rank = %d union_comm = %d", @@ -1460,7 +1533,7 @@ int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, LOG((3, "about to create intercomm for IO component to cmp = %d " "my_iosys->io_comm = %d", cmp, my_iosys->io_comm)); if ((ret = MPI_Intercomm_create(my_iosys->io_comm, 0, my_iosys->union_comm, - my_proc_list[cmp][0], 0, &my_iosys->intercomm))) + my_iosys->num_iotasks, cmp, &my_iosys->intercomm))) return check_mpi(NULL, ret, __FILE__, __LINE__); } else @@ -1469,7 +1542,7 @@ int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, LOG((3, "about to create intercomm for cmp = %d my_iosys->comp_comm = %d", cmp, my_iosys->comp_comm)); if ((ret = MPI_Intercomm_create(my_iosys->comp_comm, 0, my_iosys->union_comm, - my_io_proc_list[0], 0, &my_iosys->intercomm))) + 0, cmp, &my_iosys->intercomm))) return check_mpi(NULL, ret, __FILE__, __LINE__); } LOG((3, "intercomm created for cmp = %d", cmp)); @@ -1477,7 +1550,7 @@ int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, /* Add this id to the list of PIO iosystem ids. */ iosysidp[cmp] = pio_add_to_iosystem_list(my_iosys); - LOG((2, "new iosys ID added to iosystem_list iosysid = %d", iosysidp[cmp])); + LOG((2, "new iosys ID added to iosystem_list iosysidp[%d] = %d", cmp, iosysidp[cmp])); } /* next computational component */ /* Now call the function from which the IO tasks will not return @@ -1493,19 +1566,13 @@ int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, } /* Free resources if needed. */ - if (!io_proc_list) - free(my_io_proc_list); - if (in_io) if ((mpierr = MPI_Comm_free(&io_comm))) return check_mpi(NULL, ret, __FILE__, __LINE__); - if (!proc_list) - { - for (int cmp = 0; cmp < component_count; cmp++) - free(my_proc_list[cmp]); - free(my_proc_list); - } + /* Free the arrays of processor numbers. */ + for (int cmp = 0; cmp < component_count; cmp++) + free(my_proc_list[cmp]); /* Free MPI groups. */ if ((ret = MPI_Group_free(&io_group))) @@ -1532,6 +1599,7 @@ int PIOc_init_async(MPI_Comm world, int num_io_procs, int *io_proc_list, * @param newblocksize the new blocksize. * @returns 0 for success. * @ingroup PIO_set_blocksize + * @author Jim Edwards */ int PIOc_set_blocksize(int newblocksize) { diff --git a/src/externals/pio2/src/clib/pioc_sc.c b/src/externals/pio2/src/clib/pioc_sc.c index 98e3c6aa11a..1efae6c8c3e 100644 --- a/src/externals/pio2/src/clib/pioc_sc.c +++ b/src/externals/pio2/src/clib/pioc_sc.c @@ -23,6 +23,7 @@ int blocksize = DEFAULT_BLOCKSIZE; * @param a * @param b * @returns greates common divisor. + * @author Jim Edwards */ int gcd(int a, int b ) { @@ -38,6 +39,7 @@ int gcd(int a, int b ) * @param a * @param b * @returns greates common divisor. + * @author Jim Edwards */ long long lgcd(long long a, long long b) { @@ -46,39 +48,13 @@ long long lgcd(long long a, long long b) return lgcd(b % a, a); } -/** - * Return the gcd of elements in an int array. - * - * @param nain length of the array - * @param ain an array of length nain - * @returns greatest common divisor. - */ -int gcd_array(int nain, int *ain) -{ - int i; - int bsize = 1; - - for (i = 0; i < nain; i++) - if (ain[i] <= 1) - return bsize; - - bsize = ain[0]; - i = 1; - while (i < nain && bsize > 1) - { - bsize = gcd(bsize, ain[i]); - i++; - } - - return bsize; -} - /** * Return the greatest common devisor of array ain as int_64. * * @param nain number of elements in ain. * @param ain array of length nain. * @returns GCD of elements in ain. + * @author Jim Edwards */ long long lgcd_array(int nain, long long *ain) { @@ -109,6 +85,7 @@ long long lgcd_array(int nain, long long *ain) * @param rank IO rank of this task. * @param start pointer to PIO_Offset that will get the start value. * @param count pointer to PIO_Offset that will get the count value. + * @author Jim Edwards */ void compute_one_dim(int gdim, int ioprocs, int rank, PIO_Offset *start, PIO_Offset *count) @@ -154,6 +131,7 @@ void compute_one_dim(int gdim, int ioprocs, int rank, PIO_Offset *start, * @param arrlen * @param arr_in * @returns the size of the block + * @author Jim Edwards */ PIO_Offset GCDblocksize(int arrlen, const PIO_Offset *arr_in) { @@ -247,7 +225,7 @@ PIO_Offset GCDblocksize(int arrlen, const PIO_Offset *arr_in) if (arr_in[0] > 0) bsize = lgcd(bsize, arr_in[0]); } - + return bsize; } @@ -265,11 +243,12 @@ PIO_Offset GCDblocksize(int arrlen, const PIO_Offset *arr_in) * @param count array of length ndims with data count values. * @param num_aiotasks the number of IO tasks used(?) * @returns 0 for success, error code otherwise. + * @author Jim Edwards */ int CalcStartandCount(int pio_type, int ndims, const int *gdims, int num_io_procs, int myiorank, PIO_Offset *start, PIO_Offset *count, int *num_aiotasks) { - int minbytes; + int minbytes; int maxbytes; int minblocksize; /* Like minbytes, but in data elements. */ int basesize; /* Size in bytes of base data type. */ @@ -435,6 +414,6 @@ int CalcStartandCount(int pio_type, int ndims, const int *gdims, int num_io_proc /* Return the number of IO procs used to the caller. */ *num_aiotasks = use_io_procs; - + return PIO_NOERR; } diff --git a/src/externals/pio2/src/clib/pioc_support.c b/src/externals/pio2/src/clib/pioc_support.c index fba1ad9070a..5f97a9fad1a 100644 --- a/src/externals/pio2/src/clib/pioc_support.c +++ b/src/externals/pio2/src/clib/pioc_support.c @@ -108,7 +108,7 @@ int PIOc_set_log_level(int level) #if NETCDF_C_LOGGING_ENABLED int ret; - + /* If netcdf logging is available turn it on starting at level = 4. */ if (level > NC_LEVEL_DIFF) if ((ret = nc_set_log_level(level - NC_LEVEL_DIFF))) @@ -132,7 +132,7 @@ void pio_init_logging(void) { /* Create a filename with the rank in it. */ MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); - sprintf(log_filename, "pio_log_%d.txt", my_rank); + sprintf(log_filename, "pio_log_%d.log", my_rank); /* Open a file for this rank to log messages. */ LOG_FILE = fopen(log_filename, "w"); @@ -406,7 +406,6 @@ int check_netcdf2(iosystem_desc_t *ios, file_desc_t *file, int status, const char *fname, int line) { int eh = default_error_handler; /* Error handler that will be used. */ - char errmsg[PIO_MAX_NAME + 1]; /* Error message. */ /* User must provide this. */ pioassert(fname, "code file name must be provided", __FILE__, __LINE__); @@ -426,16 +425,13 @@ int check_netcdf2(iosystem_desc_t *ios, file_desc_t *file, int status, "invalid error handler", __FILE__, __LINE__); LOG((2, "check_netcdf2 chose error handler = %d", eh)); - /* Get an error message. */ - if (eh != PIO_BCAST_ERROR && !PIOc_strerror(status, errmsg)) - { - fprintf(stderr, "%s\n", errmsg); - LOG((1, "check_netcdf2 errmsg = %s", errmsg)); - } - /* Decide what to do based on the error handler. */ if (eh == PIO_INTERNAL_ERROR) + { + char errmsg[PIO_MAX_NAME + 1]; /* Error message. */ + PIOc_strerror(status, errmsg); piodie(errmsg, fname, line); /* Die! */ + } else if (eh == PIO_BCAST_ERROR) { if (ios) @@ -534,7 +530,7 @@ int alloc_region2(iosystem_desc_t *ios, int ndims, io_region **regionp) pioassert(ndims >= 0 && regionp, "invalid input", __FILE__, __LINE__); LOG((1, "alloc_region2 ndims = %d sizeof(io_region) = %d", ndims, sizeof(io_region))); - + /* Allocate memory for the io_region struct. */ if (!(region = calloc(1, sizeof(io_region)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); @@ -549,7 +545,7 @@ int alloc_region2(iosystem_desc_t *ios, int ndims, io_region **regionp) /* Return pointer to new region to caller. */ *regionp = region; - + return PIO_NOERR; } @@ -650,6 +646,7 @@ int malloc_iodesc(iosystem_desc_t *ios, int piotype, int ndims, io_desc_t **iodesc) { MPI_Datatype mpi_type; + PIO_Offset type_size; int mpierr; int ret; @@ -663,15 +660,23 @@ int malloc_iodesc(iosystem_desc_t *ios, int piotype, int ndims, if ((ret = find_mpi_type(piotype, &mpi_type, NULL))) return pio_err(ios, NULL, ret, __FILE__, __LINE__); + /* What is the size of the pio type? */ + if ((ret = pioc_pnetcdf_inq_type(0, piotype, NULL, &type_size))) + return pio_err(ios, NULL, ret, __FILE__, __LINE__); + /* Allocate space for the io_desc_t struct. */ if (!(*iodesc = calloc(1, sizeof(io_desc_t)))) return pio_err(ios, NULL, PIO_ENOMEM, __FILE__, __LINE__); + /* Remember the pio type and its size. */ + (*iodesc)->piotype = piotype; + (*iodesc)->piotype_size = type_size; + /* Remember the MPI type. */ - (*iodesc)->basetype = mpi_type; + (*iodesc)->mpitype = mpi_type; /* Get the size of the type. */ - if ((mpierr = MPI_Type_size((*iodesc)->basetype, &(*iodesc)->basetype_size))) + if ((mpierr = MPI_Type_size((*iodesc)->mpitype, &(*iodesc)->mpitype_size))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); /* Initialize some values in the struct. */ @@ -681,7 +686,7 @@ int malloc_iodesc(iosystem_desc_t *ios, int piotype, int ndims, /* Allocate space for, and initialize, the first region. */ if ((ret = alloc_region2(ios, ndims, &((*iodesc)->firstregion)))) - return pio_err(ios, NULL, ret, __FILE__, __LINE__); + return pio_err(ios, NULL, ret, __FILE__, __LINE__); /* Set the swap memory settings to defaults for this IO system. */ (*iodesc)->rearr_opts = ios->rearr_opts; @@ -717,6 +722,7 @@ void free_region_list(io_region *top) * @param iosysid the IO system ID. * @param ioid the ID of the decomposition map to free. * @returns 0 for success, error code otherwise. + * @author Jim Edwards */ int PIOc_freedecomp(int iosysid, int ioid) { @@ -724,6 +730,8 @@ int PIOc_freedecomp(int iosysid, int ioid) io_desc_t *iodesc; int mpierr = MPI_SUCCESS, mpierr2; /* Return code from MPI function calls. */ + LOG((1, "PIOc_freedecomp iosysid = %d ioid = %d", iosysid, ioid)); + if (!(ios = pio_get_iosystem_from_id(iosysid))) return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); @@ -748,18 +756,22 @@ int PIOc_freedecomp(int iosysid, int ioid) } /* Handle MPI errors. */ + LOG((3, "handline error mpierr %d ios->comproot %d", mpierr, ios->comproot)); if ((mpierr2 = MPI_Bcast(&mpierr, 1, MPI_INT, ios->comproot, ios->my_comm))) return check_mpi(NULL, mpierr2, __FILE__, __LINE__); + LOG((3, "handline error mpierr2 %d", mpierr2)); if (mpierr) return check_mpi(NULL, mpierr, __FILE__, __LINE__); } + LOG((3, "freeing map, dimlen")); /* Free the map. */ free(iodesc->map); /* Free the dimlens. */ free(iodesc->dimlen); + LOG((3, "freeing rfrom, rtype")); if (iodesc->rfrom) free(iodesc->rfrom); @@ -773,6 +785,7 @@ int PIOc_freedecomp(int iosysid, int ioid) free(iodesc->rtype); } + LOG((3, "freeing stype, scount")); if (iodesc->stype) { for (int i = 0; i < iodesc->num_stypes; i++) @@ -796,6 +809,7 @@ int PIOc_freedecomp(int iosysid, int ioid) if (iodesc->rindex) free(iodesc->rindex); + LOG((3, "freeing regions")); if (iodesc->firstregion) free_region_list(iodesc->firstregion); @@ -1000,7 +1014,7 @@ int PIOc_write_nc_decomp(int iosysid, const char *filename, int cmode, int ioid, ios->comp_comm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - /* Find the max maxplen. */ + /* Find the max maplen. */ if ((mpierr = MPI_Allreduce(&iodesc->maplen, &max_maplen, 1, MPI_INT, MPI_MAX, ios->comp_comm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); @@ -1019,12 +1033,12 @@ int PIOc_write_nc_decomp(int iosysid, const char *filename, int cmode, int ioid, my_map[e] = e < iodesc->maplen ? iodesc->map[e] - 1 : NC_FILL_INT; LOG((3, "my_map[%d] = %d", e, my_map[e])); } - + /* Gather my_map from all computation tasks and fill the full_map array. */ if ((mpierr = MPI_Allgather(&my_map, max_maplen, MPI_INT, full_map, max_maplen, MPI_INT, ios->comp_comm))) return check_mpi2(ios, NULL, mpierr, __FILE__, __LINE__); - + for (int p = 0; p < ios->num_comptasks; p++) for (int e = 0; e < max_maplen; e++) LOG((3, "full_map[%d][%d] = %d", p, e, full_map[p][e])); @@ -1220,7 +1234,7 @@ int pioc_write_nc_decomp_int(iosystem_desc_t *ios, const char *filename, int cmo /* Write an attribute with the stack trace. This can be helpful * for debugging. */ - #define MAX_BACKTRACE 10 +#define MAX_BACKTRACE 10 void *bt[MAX_BACKTRACE]; size_t bt_size; char **bt_strings; @@ -1244,7 +1258,6 @@ int pioc_write_nc_decomp_int(iosystem_desc_t *ios, const char *filename, int cmo strcat(full_bt, "\n"); } free(bt_strings); - printf("full_bt = %s", full_bt); /* Write the stack trace as an attribute. */ if ((ret = PIOc_put_att_text(ncid, NC_GLOBAL, DECOMP_BACKTRACE_ATT_NAME, @@ -1517,8 +1530,10 @@ int pioc_read_nc_decomp_int(int iosysid, const char *filename, int *ndims, int * } /* Close the netCDF decomp file. */ + LOG((2, "pioc_read_nc_decomp_int about to close file ncid = %d", ncid)); if ((ret = PIOc_closefile(ncid))) return pio_err(ios, NULL, ret, __FILE__, __LINE__); + LOG((2, "pioc_read_nc_decomp_int closed file")); return PIO_NOERR; } @@ -1672,7 +1687,7 @@ int PIOc_writemap_from_f90(const char *file, int ndims, const int *gdims, * parameters are read on comp task 0 and ignored elsewhere. * * @param iosysid A defined pio system ID, obtained from - * PIOc_InitIntercomm() or PIOc_InitAsync(). + * PIOc_Init_Intercomm() or PIOc_InitAsync(). * @param ncidp A pointer that gets the ncid of the newly created * file. * @param iotype A pointer to a pio output format. Must be one of @@ -1703,7 +1718,7 @@ int PIOc_createfile_int(int iosysid, int *ncidp, int *iotype, const char *filena if (!iotype_is_valid(*iotype)) return pio_err(ios, NULL, PIO_EINVAL, __FILE__, __LINE__); - LOG((1, "PIOc_createfile iosysid = %d iotype = %d filename = %s mode = %d", + LOG((1, "PIOc_createfile_int iosysid = %d iotype = %d filename = %s mode = %d", iosysid, *iotype, filename, mode)); /* Allocate space for the file info. */ @@ -1715,14 +1730,7 @@ int PIOc_createfile_int(int iosysid, int *ncidp, int *iotype, const char *filena file->iosystem = ios; file->iotype = *iotype; file->buffer.ioid = -1; - for (int i = 0; i < PIO_MAX_VARS; i++) - { - file->varlist[i].record = -1; - file->varlist[i].ndims = -1; - file->varlist[i].iobuf = NULL; - file->varlist[i].fillbuf = NULL; - } - file->mode = mode; + file->writable = 1; /* Set to true if this task should participate in IO (only true for * one task with netcdf serial files. */ @@ -1736,12 +1744,13 @@ int PIOc_createfile_int(int iosysid, int *ncidp, int *iotype, const char *filena * parameters. */ if (ios->async) { - int msg = PIO_MSG_CREATE_FILE; - size_t len = strlen(filename); - if (!ios->ioproc) { + int msg = PIO_MSG_CREATE_FILE; + size_t len = strlen(filename); + /* Send the message to the message handler. */ + LOG((3, "msg %d ios->union_comm %d MPI_COMM_NULL %d", msg, ios->union_comm, MPI_COMM_NULL)); if (ios->compmaster == MPI_ROOT) mpierr = MPI_Send(&msg, 1, MPI_INT, ios->ioroot, 1, ios->union_comm); @@ -1753,9 +1762,9 @@ int PIOc_createfile_int(int iosysid, int *ncidp, int *iotype, const char *filena if (!mpierr) mpierr = MPI_Bcast(&file->iotype, 1, MPI_INT, ios->compmaster, ios->intercomm); if (!mpierr) - mpierr = MPI_Bcast(&file->mode, 1, MPI_INT, ios->compmaster, ios->intercomm); + mpierr = MPI_Bcast(&mode, 1, MPI_INT, ios->compmaster, ios->intercomm); LOG((2, "len = %d filename = %s iotype = %d mode = %d", len, filename, - file->iotype, file->mode)); + file->iotype, mode)); } /* Handle MPI errors. */ @@ -1773,26 +1782,26 @@ int PIOc_createfile_int(int iosysid, int *ncidp, int *iotype, const char *filena { #ifdef _NETCDF4 case PIO_IOTYPE_NETCDF4P: - file->mode = file->mode | NC_MPIIO | NC_NETCDF4; + mode = mode | NC_MPIIO | NC_NETCDF4; LOG((2, "Calling nc_create_par io_comm = %d mode = %d fh = %d", - ios->io_comm, file->mode, file->fh)); - ierr = nc_create_par(filename, file->mode, ios->io_comm, ios->info, &file->fh); + ios->io_comm, mode, file->fh)); + ierr = nc_create_par(filename, mode, ios->io_comm, ios->info, &file->fh); LOG((2, "nc_create_par returned %d file->fh = %d", ierr, file->fh)); break; case PIO_IOTYPE_NETCDF4C: - file->mode = file->mode | NC_NETCDF4; + mode = mode | NC_NETCDF4; #endif case PIO_IOTYPE_NETCDF: if (!ios->io_rank) { - LOG((2, "Calling nc_create mode = %d", file->mode)); - ierr = nc_create(filename, file->mode, &file->fh); + LOG((2, "Calling nc_create mode = %d", mode)); + ierr = nc_create(filename, mode, &file->fh); } break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - LOG((2, "Calling ncmpi_create mode = %d", file->mode)); - ierr = ncmpi_create(ios->io_comm, filename, file->mode, ios->info, &file->fh); + LOG((2, "Calling ncmpi_create mode = %d", mode)); + ierr = ncmpi_create(ios->io_comm, filename, mode, ios->info, &file->fh); if (!ierr) ierr = ncmpi_buffer_attach(file->fh, pio_buffer_size_limit); break; @@ -1811,17 +1820,23 @@ int PIOc_createfile_int(int iosysid, int *ncidp, int *iotype, const char *filena return check_netcdf2(ios, NULL, ierr, __FILE__, __LINE__); } - /* Broadcast mode to all tasks. */ - if ((mpierr = MPI_Bcast(&file->mode, 1, MPI_INT, ios->ioroot, ios->union_comm))) + /* Broadcast writablility to all tasks. */ + if ((mpierr = MPI_Bcast(&file->writable, 1, MPI_INT, ios->ioroot, ios->my_comm))) return check_mpi(file, mpierr, __FILE__, __LINE__); - /* This flag is implied by netcdf create functions but we need - to know if its set. */ - file->mode = file->mode | PIO_WRITE; + /* Broadcast next ncid to all tasks from io root, necessary + * because files may be opened on mutilple iosystems, causing the + * underlying library to reuse ncids. Hilarious confusion + * ensues. */ + if (ios->async) + { + LOG((3, "createfile bcasting pio_next_ncid %d", pio_next_ncid)); + if ((mpierr = MPI_Bcast(&pio_next_ncid, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + LOG((3, "createfile bcast pio_next_ncid %d", pio_next_ncid)); + } - /* Assign the PIO ncid, necessary because files may be opened - * on mutilple iosystems, causing the underlying library to - * reuse ncids. Hilarious confusion ensues. */ + /* Assign the PIO ncid. */ file->pio_ncid = pio_next_ncid++; LOG((2, "file->fh = %d file->pio_ncid = %d", file->fh, file->pio_ncid)); @@ -1838,6 +1853,263 @@ int PIOc_createfile_int(int iosysid, int *ncidp, int *iotype, const char *filena return ierr; } +/** + * Check that a file meets PIO requirements for use of unlimited + * dimensions. This function is only called on netCDF-4 files. If the + * file is found to violate PIO requirements it is closed. + * + * @param ncid the file->fh for this file (the real netCDF ncid, not + * the pio_ncid). + * @returns 0 if file is OK, error code otherwise. + * @author Ed Hartnett + */ +int check_unlim_use(int ncid) +{ +#ifdef _NETCDF4 + int nunlimdims; /* Number of unlimited dims in file. */ + int nvars; /* Number of vars in file. */ + int ierr; /* Return code. */ + + /* Are there 2 or more unlimited dims in this file? */ + if ((ierr = nc_inq_unlimdims(ncid, &nunlimdims, NULL))) + return ierr; + if (nunlimdims < 2) + return PIO_NOERR; + + /* How many vars in file? */ + if ((ierr = nc_inq_nvars(ncid, &nvars))) + return ierr; + + /* Check each var. */ + for (int v = 0; v < nvars && !ierr; v++) + { + int nvardims; + if ((ierr = nc_inq_varndims(ncid, v, &nvardims))) + return ierr; + int vardimid[nvardims]; + if ((ierr = nc_inq_vardimid(ncid, v, vardimid))) + return ierr; + + /* Check all var dimensions, except the first. If we find + * unlimited, that's a problem. */ + for (int vd = 1; vd < nvardims; vd++) + { + size_t dimlen; + if ((ierr = nc_inq_dimlen(ncid, vardimid[vd], &dimlen))) + return ierr; + if (dimlen == NC_UNLIMITED) + { + nc_close(ncid); + return PIO_EINVAL; + } + } + } +#endif /* _NETCDF4 */ + + return PIO_NOERR; +} + +/** + * Internal function used when opening an existing file. This function + * is called by PIOc_openfile_retry(). It learns some things about the + * metadata in that file. The results end up in the file_desc_t. + * + * @param file pointer to the file_desc_t for this file. + * @param ncid the ncid assigned to the file when opened. + * @param iotype the iotype used to open the file. + * @param nvars a pointer that gets the number of vars in the file. + + * @param rec_var gets an array (length nvars) of rec_var values for + * each var in the file. This array must be freed by caller. + * @param pio_type gets an array (length nvars) of pio_type values for + * each var in the file. This array must be freed by caller. + * @param pio_type_size gets an array (length nvars) of the size of + * the PIO type for each var in the file. This array must be freed by + * caller. + * @param mpi_type gets an array (length nvars) of MPI type values for + * each var in the file. This array must be freed by caller. + * @param mpi_type_size gets an array (length nvars) of the size of + * the MPI type for each var in the file. This array must be freed by + * caller. + * + * @return 0 for success, error code otherwise. + * @ingroup PIO_openfile + * @author Ed Hartnett + */ +int inq_file_metadata(file_desc_t *file, int ncid, int iotype, int *nvars, int **rec_var, + int **pio_type, int **pio_type_size, int **mpi_type, int **mpi_type_size) +{ + int nunlimdims; /* The number of unlimited dimensions. */ + int unlimdimid; + int *unlimdimids; + int mpierr; + int ret; + + if (iotype == PIO_IOTYPE_PNETCDF) + { +#ifdef _PNETCDF + if ((ret = ncmpi_inq_nvars(ncid, nvars))) + return pio_err(NULL, file, PIO_ENOMEM, __FILE__, __LINE__); +#endif /* _PNETCDF */ + } + else + { + if ((ret = nc_inq_nvars(ncid, nvars))) + return pio_err(NULL, file, PIO_ENOMEM, __FILE__, __LINE__); + } + + if (*nvars) + { + if (!(*rec_var = malloc(*nvars * sizeof(int)))) + return PIO_ENOMEM; + if (!(*pio_type = malloc(*nvars * sizeof(int)))) + return PIO_ENOMEM; + if (!(*pio_type_size = malloc(*nvars * sizeof(int)))) + return PIO_ENOMEM; + if (!(*mpi_type = malloc(*nvars * sizeof(int)))) + return PIO_ENOMEM; + if (!(*mpi_type_size = malloc(*nvars * sizeof(int)))) + return PIO_ENOMEM; + } + + /* How many unlimited dims for this file? */ + if (iotype == PIO_IOTYPE_PNETCDF) + { +#ifdef _PNETCDF + if ((ret = ncmpi_inq_unlimdim(ncid, &unlimdimid))) + return pio_err(NULL, file, ret, __FILE__, __LINE__); + nunlimdims = unlimdimid == -1 ? 0 : 1; +#endif /* _PNETCDF */ + } + else if (iotype == PIO_IOTYPE_NETCDF) + { + if ((ret = nc_inq_unlimdim(ncid, &unlimdimid))) + return pio_err(NULL, file, ret, __FILE__, __LINE__); + nunlimdims = unlimdimid == -1 ? 0 : 1; + } + else + { +#ifdef _NETCDF4 + if ((ret = nc_inq_unlimdims(ncid, &nunlimdims, NULL))) + return pio_err(NULL, file, ret, __FILE__, __LINE__); +#endif /* _NETCDF4 */ + } + + /* Learn the unlimited dimension ID(s), if there are any. */ + if (nunlimdims) + { + if (!(unlimdimids = malloc(nunlimdims * sizeof(int)))) + return pio_err(NULL, file, PIO_ENOMEM, __FILE__, __LINE__); + if (iotype == PIO_IOTYPE_PNETCDF || iotype == PIO_IOTYPE_NETCDF) + { + unlimdimids[0] = unlimdimid; + } + else + { +#ifdef _NETCDF4 + if ((ret = nc_inq_unlimdims(ncid, NULL, unlimdimids))) + return pio_err(NULL, file, ret, __FILE__, __LINE__); +#endif /* _NETCDF4 */ + } + } + + /* Learn about each variable in the file. */ + for (int v = 0; v < *nvars; v++) + { + int var_ndims; /* Number of dims for this var. */ + nc_type my_type; + + /* Find type of the var and number of dims in this var. Also + * learn about type. */ + if (iotype == PIO_IOTYPE_PNETCDF) + { + PIO_Offset type_size; + +#ifdef _PNETCDF + if ((ret = ncmpi_inq_var(ncid, v, NULL, &my_type, &var_ndims, NULL, NULL))) + return pio_err(NULL, file, ret, __FILE__, __LINE__); + (*pio_type)[v] = (int)my_type; + if ((ret = pioc_pnetcdf_inq_type(ncid, (*pio_type)[v], NULL, &type_size))) + return check_netcdf(file, ret, __FILE__, __LINE__); + (*pio_type_size)[v] = type_size; +#endif /* _PNETCDF */ + } + else + { + size_t type_size; + + if ((ret = nc_inq_var(ncid, v, NULL, &my_type, &var_ndims, NULL, NULL))) + return pio_err(NULL, file, ret, __FILE__, __LINE__); + (*pio_type)[v] = (int)my_type; + if ((ret = nc_inq_type(ncid, (*pio_type)[v], NULL, &type_size))) + return check_netcdf(file, ret, __FILE__, __LINE__); + (*pio_type_size)[v] = type_size; + } + + /* Get the MPI type corresponding with the PIO type. */ + if ((ret = find_mpi_type((*pio_type)[v], &(*mpi_type)[v], NULL))) + return pio_err(NULL, file, ret, __FILE__, __LINE__); + + /* Get the size of the MPI type. */ + if ((mpierr = MPI_Type_size((*mpi_type)[v], &(*mpi_type_size)[v]))) + return check_mpi2(NULL, file, mpierr, __FILE__, __LINE__); + + /* What are the dimids associated with this var? */ + if (var_ndims) + { + int var_dimids[var_ndims]; + if (iotype == PIO_IOTYPE_PNETCDF) + { +#ifdef _PNETCDF + if ((ret = ncmpi_inq_vardimid(ncid, v, var_dimids))) + return pio_err(NULL, file, ret, __FILE__, __LINE__); +#endif /* _PNETCDF */ + } + else + { + if ((ret = nc_inq_vardimid(ncid, v, var_dimids))) + return pio_err(NULL, file, ret, __FILE__, __LINE__); + } + + /* Check against each variable dimid agains each unlimited + * dimid. */ + for (int d = 0; d < var_ndims; d++) + { + int unlim_found = 0; + + /* Check against each unlimited dimid. */ + for (int ud = 0; ud < nunlimdims; ud++) + { + if (var_dimids[d] == unlimdimids[ud]) + { + unlim_found++; + break; + } + } + + /* Only first dim may be unlimited, for PIO. */ + if (unlim_found) + { + if (d == 0) + (*rec_var)[v] = 1; + else + return pio_err(NULL, file, PIO_EINVAL, __FILE__, __LINE__); + + } + else + (*rec_var)[v] = 0; + + } + } + } /* next var */ + + /* Free resources. */ + if (nunlimdims) + free(unlimdimids); + + return PIO_NOERR; +} + /** * Open an existing file using PIO library. This is an internal * function. Depending on the value of the retry parameter, a failed @@ -1861,15 +2133,22 @@ int PIOc_createfile_int(int iosysid, int *ncidp, int *iotype, const char *filena * * @return 0 for success, error code otherwise. * @ingroup PIO_openfile + * @author Jim Edwards, Ed Hartnett */ int PIOc_openfile_retry(int iosysid, int *ncidp, int *iotype, const char *filename, int mode, int retry) { - iosystem_desc_t *ios; /** Pointer to io system information. */ - file_desc_t *file; /** Pointer to file information. */ - int imode; /** internal mode val for netcdf4 file open */ + iosystem_desc_t *ios; /* Pointer to io system information. */ + file_desc_t *file; /* Pointer to file information. */ + int imode; /* Internal mode val for netcdf4 file open. */ + int nvars = 0; + int *rec_var = NULL; + int *pio_type = NULL; + int *pio_type_size = NULL; + int *mpi_type = NULL; + int *mpi_type_size = NULL; int mpierr = MPI_SUCCESS, mpierr2; /** Return code from MPI function codes. */ - int ierr = PIO_NOERR; /** Return code from function calls. */ + int ierr = PIO_NOERR; /* Return code from function calls. */ /* Get the IO system info from the iosysid. */ if (!(ios = pio_get_iosystem_from_id(iosysid))) @@ -1892,13 +2171,7 @@ int PIOc_openfile_retry(int iosysid, int *ncidp, int *iotype, const char *filena file->fh = -1; file->iotype = *iotype; file->iosystem = ios; - file->mode = mode; - - for (int i = 0; i < PIO_MAX_VARS; i++) - { - file->varlist[i].record = -1; - file->varlist[i].ndims = -1; - } + file->writable = (mode & PIO_WRITE) ? 1 : 0; /* Set to true if this task should participate in IO (only true * for one task with netcdf serial files. */ @@ -1926,7 +2199,7 @@ int PIOc_openfile_retry(int iosysid, int *ncidp, int *iotype, const char *filena if (!mpierr) mpierr = MPI_Bcast(&file->iotype, 1, MPI_INT, ios->compmaster, ios->intercomm); if (!mpierr) - mpierr = MPI_Bcast(&file->mode, 1, MPI_INT, ios->compmaster, ios->intercomm); + mpierr = MPI_Bcast(&mode, 1, MPI_INT, ios->compmaster, ios->intercomm); } /* Handle MPI errors. */ @@ -1945,46 +2218,64 @@ int PIOc_openfile_retry(int iosysid, int *ncidp, int *iotype, const char *filena case PIO_IOTYPE_NETCDF4P: #ifdef _MPISERIAL - ierr = nc_open(filename, file->mode, &file->fh); + ierr = nc_open(filename, mode, &file->fh); #else - imode = file->mode | NC_MPIIO; - ierr = nc_open_par(filename, imode, ios->io_comm, ios->info, &file->fh); - if (ierr == PIO_NOERR) - file->mode = imode; + imode = mode | NC_MPIIO; + if ((ierr = nc_open_par(filename, imode, ios->io_comm, ios->info, &file->fh))) + break; + + /* Check the vars for valid use of unlim dims. */ + if ((ierr = check_unlim_use(file->fh))) + break; + + if ((ierr = inq_file_metadata(file, file->fh, PIO_IOTYPE_NETCDF4P, &nvars, &rec_var, &pio_type, + &pio_type_size, &mpi_type, &mpi_type_size))) + break; LOG((2, "PIOc_openfile_retry:nc_open_par filename = %s mode = %d imode = %d ierr = %d", - filename, file->mode, imode, ierr)); + filename, mode, imode, ierr)); #endif break; case PIO_IOTYPE_NETCDF4C: if (ios->io_rank == 0) { - imode = file->mode | NC_NETCDF4; - ierr = nc_open(filename, imode, &file->fh); - if (ierr == PIO_NOERR) - file->mode = imode; + if ((ierr = nc_open(filename, mode, &file->fh))) + break; + /* Check the vars for valid use of unlim dims. */ + if ((ierr = check_unlim_use(file->fh))) + break; + ierr = inq_file_metadata(file, file->fh, PIO_IOTYPE_NETCDF4C, &nvars, &rec_var, &pio_type, + &pio_type_size, &mpi_type, &mpi_type_size); } break; - -#endif +#endif /* _NETCDF4 */ case PIO_IOTYPE_NETCDF: if (ios->io_rank == 0) - ierr = nc_open(filename, file->mode, &file->fh); + { + if ((ierr = nc_open(filename, mode, &file->fh))) + break; + ierr = inq_file_metadata(file, file->fh, PIO_IOTYPE_NETCDF, &nvars, &rec_var, &pio_type, + &pio_type_size, &mpi_type, &mpi_type_size); + } break; #ifdef _PNETCDF case PIO_IOTYPE_PNETCDF: - ierr = ncmpi_open(ios->io_comm, filename, file->mode, ios->info, &file->fh); + ierr = ncmpi_open(ios->io_comm, filename, mode, ios->info, &file->fh); // This should only be done with a file opened to append - if (ierr == PIO_NOERR && (file->mode & PIO_WRITE)) + if (ierr == PIO_NOERR && (mode & PIO_WRITE)) { if (ios->iomaster == MPI_ROOT) LOG((2, "%d Setting IO buffer %ld", __LINE__, pio_buffer_size_limit)); ierr = ncmpi_buffer_attach(file->fh, pio_buffer_size_limit); } LOG((2, "ncmpi_open(%s) : fd = %d", filename, file->fh)); + + if (!ierr) + ierr = inq_file_metadata(file, file->fh, PIO_IOTYPE_PNETCDF, &nvars, &rec_var, &pio_type, + &pio_type_size, &mpi_type, &mpi_type_size); break; #endif @@ -2011,20 +2302,27 @@ int PIOc_openfile_retry(int iosysid, int *ncidp, int *iotype, const char *filena /* open netcdf file serially on main task */ if (ios->io_rank == 0) - ierr = nc_open(filename, file->mode, &file->fh); + { + if ((ierr = nc_open(filename, mode, &file->fh))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); + if ((ierr = inq_file_metadata(file, file->fh, PIO_IOTYPE_NETCDF, &nvars, &rec_var, &pio_type, + &pio_type_size, &mpi_type, &mpi_type_size))) + return pio_err(ios, file, ierr, __FILE__, __LINE__); + } else file->do_io = 0; } - LOG((2, "retry nc_open(%s) : fd = %d, iotype = %d, do_io = %d, ierr = %d", filename, file->fh, file->iotype, file->do_io, ierr)); + LOG((2, "retry nc_open(%s) : fd = %d, iotype = %d, do_io = %d, ierr = %d", + filename, file->fh, file->iotype, file->do_io, ierr)); } } /* Broadcast and check the return code. */ - LOG((2, "Bcasting error code ierr = %d ios->ioroot = %d ios->my_comm = %d", ierr, ios->ioroot, - ios->my_comm)); + LOG((2, "Bcasting error code ierr %d ios->ioroot %d ios->my_comm %d", + ierr, ios->ioroot, ios->my_comm)); if ((mpierr = MPI_Bcast(&ierr, 1, MPI_INT, ios->ioroot, ios->my_comm))) return check_mpi(file, mpierr, __FILE__, __LINE__); - LOG((2, "Bcast error code ierr = %d", ierr)); + LOG((2, "Bcast openfile_retry error code ierr = %d", ierr)); /* If there was an error, free allocated memory and deal with the error. */ if (ierr) @@ -2032,12 +2330,50 @@ int PIOc_openfile_retry(int iosysid, int *ncidp, int *iotype, const char *filena free(file); return check_netcdf2(ios, NULL, ierr, __FILE__, __LINE__); } - LOG((2, "error code Bcast complete ierr = %d ios->my_comm = %d", ierr, ios->my_comm)); - /* Broadcast results to all tasks. Ignore NULL parameters. */ - if ((mpierr = MPI_Bcast(&file->mode, 1, MPI_INT, ios->ioroot, ios->my_comm))) + /* Broadcast writability to all tasks. */ + if ((mpierr = MPI_Bcast(&file->writable, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + + /* Broadcast some values to all tasks from io root. */ + if (ios->async) + { + LOG((3, "open bcasting pio_next_ncid %d ios->ioroot %d", pio_next_ncid, ios->ioroot)); + if ((mpierr = MPI_Bcast(&pio_next_ncid, 1, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + + if ((mpierr = MPI_Bcast(&nvars, 1, MPI_INT, ios->ioroot, ios->my_comm))) return check_mpi(file, mpierr, __FILE__, __LINE__); + /* Non io tasks need to allocate to store info about variables. */ + if (nvars && !rec_var) + { + if (!(rec_var = malloc(nvars * sizeof(int)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + if (!(pio_type = malloc(nvars * sizeof(int)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + if (!(pio_type_size = malloc(nvars * sizeof(int)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + if (!(mpi_type = malloc(nvars * sizeof(int)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + if (!(mpi_type_size = malloc(nvars * sizeof(int)))) + return pio_err(ios, file, PIO_ENOMEM, __FILE__, __LINE__); + } + if (nvars) + { + if ((mpierr = MPI_Bcast(rec_var, nvars, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(pio_type, nvars, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(pio_type_size, nvars, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(mpi_type, nvars, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + if ((mpierr = MPI_Bcast(mpi_type_size, nvars, MPI_INT, ios->ioroot, ios->my_comm))) + return check_mpi(file, mpierr, __FILE__, __LINE__); + } + /* Create the ncid that the user will see. This is necessary * because otherwise ncids will be reused if files are opened * on multiple iosystems. */ @@ -2049,6 +2385,28 @@ int PIOc_openfile_retry(int iosysid, int *ncidp, int *iotype, const char *filena /* Add this file to the list of currently open files. */ pio_add_to_file_list(file); + /* Add info about the variables to the file_desc_t struct. */ + for (int v = 0; v < nvars; v++) + if ((ierr = add_to_varlist(v, rec_var[v], pio_type[v], pio_type_size[v], mpi_type[v], + mpi_type_size[v], &file->varlist))) + return pio_err(ios, NULL, ierr, __FILE__, __LINE__); + file->nvars = nvars; + + /* Free resources. */ + if (nvars) + { + if (rec_var) + free(rec_var); + if (pio_type) + free(pio_type); + if (pio_type_size) + free(pio_type_size); + if (mpi_type) + free(mpi_type); + if (mpi_type_size) + free(mpi_type_size); + } + LOG((2, "Opened file %s file->pio_ncid = %d file->fh = %d ierr = %d", filename, file->pio_ncid, file->fh, ierr)); @@ -2267,7 +2625,7 @@ int PIOc_set_rearr_opts(int iosysid, int comm_type, int fcd, bool enable_hs_c2i, (fcd < 0 || fcd > PIO_REARR_COMM_FC_2D_DISABLE) || (max_pend_req_c2i != PIO_REARR_COMM_UNLIMITED_PEND_REQ && max_pend_req_c2i < 0) || (max_pend_req_i2c != PIO_REARR_COMM_UNLIMITED_PEND_REQ && max_pend_req_i2c < 0)) - return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__); + return pio_err(NULL, NULL, PIO_EINVAL, __FILE__, __LINE__); /* Get the IO system info. */ if (!(ios = pio_get_iosystem_from_id(iosysid))) @@ -2278,3 +2636,71 @@ int PIOc_set_rearr_opts(int iosysid, int comm_type, int fcd, bool enable_hs_c2i, return PIO_NOERR; } + +/** + * This function determines which processes are assigned to the + * different computation components. This function is called by + * PIOc_init_async(). + * + * The user may have passed a specification of tasks as array + * proc_list, or it may be calculated by assigning processors starting + * at the first one after the IO component, and assigning them in + * order to each computation component. + * + * Note that memory is allocated for my_proc_list. This must be freed + * by the caller. + * + * @param num_io_proc the number of IO processes. + * @param component_count the number of computational components. + * @param num_procs_per_comp array (length component_count) which + * contains the number of processes to assign to each computation + * component. + * @param proc_list array (length component count) of arrays (length + * num_procs_per_comp_array[cmp]) which contain the list of processes + * for each computation component. May be NULL. + * @param array (length component count) of arrays (length + * num_procs_per_comp_array[cmp]) which will get the list of processes + * for each computation component. + * @returns 0 for success, error code otherwise + * @author Ed Hartnett + */ +int determine_procs(int num_io_procs, int component_count, int *num_procs_per_comp, + int **proc_list, int **my_proc_list) +{ + /* If the user did not provide a list of processes for each + * component, create one. */ + if (!proc_list) + { + int last_proc = num_io_procs; + + /* Fill the array of arrays. */ + for (int cmp = 0; cmp < component_count; cmp++) + { + LOG((3, "calculating processors for component %d num_procs_per_comp[cmp] = %d", + cmp, num_procs_per_comp[cmp])); + + /* Allocate space for each array. */ + if (!(my_proc_list[cmp] = malloc(num_procs_per_comp[cmp] * sizeof(int)))) + return pio_err(NULL, NULL, PIO_ENOMEM, __FILE__, __LINE__); + + int proc; + for (proc = last_proc; proc < num_procs_per_comp[cmp] + last_proc; proc++) + { + my_proc_list[cmp][proc - last_proc] = proc; + LOG((3, "my_proc_list[%d][%d] = %d", cmp, proc - last_proc, proc)); + } + last_proc = proc; + } + } + else + { + for (int cmp = 0; cmp < component_count; cmp++) + { + /* Allocate space for each array. */ + if (!(my_proc_list[cmp] = malloc(num_procs_per_comp[cmp] * sizeof(int)))) + return pio_err(NULL, NULL, PIO_ENOMEM, __FILE__, __LINE__); + memcpy(my_proc_list[cmp], proc_list[cmp], num_procs_per_comp[cmp] * sizeof(int)); + } + } + return PIO_NOERR; +} diff --git a/src/externals/pio2/src/flib/piolib_mod.F90 b/src/externals/pio2/src/flib/piolib_mod.F90 index e704dab4508..e6bb1a07d0c 100644 --- a/src/externals/pio2/src/flib/piolib_mod.F90 +++ b/src/externals/pio2/src/flib/piolib_mod.F90 @@ -198,9 +198,9 @@ module piolib_mod !! @verbinclude errorhandle !< interface PIO_seterrorhandling - module procedure seterrorhandlingf - module procedure seterrorhandlingi - module procedure seterrorhandlingg + module procedure seterrorhandlingfile + module procedure seterrorhandlingiosystem + module procedure seterrorhandlingiosysid end interface !> @@ -378,70 +378,55 @@ end subroutine setdebuglevel !! @param method : !! @copydoc PIO_error_method !< - subroutine seterrorhandlingf(file, method, oldmethod) + subroutine seterrorhandlingfile(file, method, oldmethod) type(file_desc_t), intent(inout) :: file integer, intent(in) :: method integer, intent(out), optional :: oldmethod - call seterrorhandlingi(file%iosystem, method, oldmethod) - end subroutine seterrorhandlingf + call seterrorhandlingiosysid(file%iosystem%iosysid, method, oldmethod) + end subroutine seterrorhandlingfile !> !! @ingroup PIO_seterrorhandling !! @public -!! @brief set the pio error handling method for the iosystem +!! @brief set the pio error handling method for a pio system !! @param iosystem : a defined pio system descriptor, see PIO_types !! @param method : !! @copydoc PIO_error_method !< - subroutine seterrorhandlingi(ios, method, oldmethod) - type(iosystem_desc_t), intent(inout) :: ios + subroutine seterrorhandlingiosystem(iosystem, method, oldmethod) + type(iosystem_desc_t), intent(inout) :: iosystem integer, intent(in) :: method integer, intent(out), optional :: oldmethod - - interface - integer(c_int) function PIOc_Set_IOSystem_Error_Handling(ios, method) & - bind(C,name="PIOc_Set_IOSystem_Error_Handling") - use iso_c_binding - integer(c_int), value :: ios - integer(c_int), value :: method - end function PIOc_Set_IOSystem_Error_Handling - end interface - integer(c_int) :: loldmethod - - loldmethod = PIOc_Set_IOSystem_Error_Handling(ios%iosysid, method) - if(present(oldmethod)) oldmethod = loldmethod - - - end subroutine seterrorhandlingi + call seterrorhandlingiosysid(iosystem%iosysid, method, oldmethod) + end subroutine seterrorhandlingiosystem !> !! @ingroup PIO_seterrorhandling !! @public -!! @brief set the pio error handling method for the iosystem -!! @param iosystem : a defined pio system descriptor, see PIO_types +!! @brief set the pio error handling method for a pio system or globally +!! @param iosysid : a pio system ID (pass PIO_DEFAULT to change the global default error handling) !! @param method : !! @copydoc PIO_error_method !< - subroutine seterrorhandlingg(global, method, oldmethod) - integer, intent(in) :: global + subroutine seterrorhandlingiosysid(iosysid, method, oldmethod) + integer, intent(in) :: iosysid integer, intent(in) :: method integer, intent(out), optional :: oldmethod interface - integer(c_int) function PIOc_Set_IOSystem_Error_Handling(global, method) & + integer(c_int) function PIOc_Set_IOSystem_Error_Handling(iosysid, method) & bind(C,name="PIOc_Set_IOSystem_Error_Handling") use iso_c_binding - integer(c_int), value :: global + integer(c_int), value :: iosysid integer(c_int), value :: method end function PIOc_Set_IOSystem_Error_Handling end interface integer(c_int) :: loldmethod - loldmethod = PIOc_Set_IOSystem_Error_Handling(global, method) + loldmethod = PIOc_Set_IOSystem_Error_Handling(iosysid, method) if(present(oldmethod)) oldmethod = loldmethod - - end subroutine seterrorhandlingg + end subroutine seterrorhandlingiosysid !> diff --git a/src/externals/pio2/src/gptl/COPYING b/src/externals/pio2/src/gptl/COPYING index 324ce86b24c..94a9ed024d3 100644 --- a/src/externals/pio2/src/gptl/COPYING +++ b/src/externals/pio2/src/gptl/COPYING @@ -1,17 +1,674 @@ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the “Software”), to deal -in the Software for any noncommercial purposes without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to permit -persons to whom the Software is furnished to do so, subject to the following -conditions: The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. Any -commercial use (including sale) of the software, and derivative development -towards commercial use, requires written permission of the copyright -holder. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO -EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES -OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/src/externals/pio2/tests/Makefile.am b/src/externals/pio2/tests/Makefile.am new file mode 100644 index 00000000000..b2c4a95e64d --- /dev/null +++ b/src/externals/pio2/tests/Makefile.am @@ -0,0 +1 @@ +SUBDIRS = cunit diff --git a/src/externals/pio2/tests/cunit/CMakeLists.txt b/src/externals/pio2/tests/cunit/CMakeLists.txt index 418340b7916..21553eb6645 100644 --- a/src/externals/pio2/tests/cunit/CMakeLists.txt +++ b/src/externals/pio2/tests/cunit/CMakeLists.txt @@ -1,6 +1,7 @@ include (LibMPI) include_directories("${CMAKE_SOURCE_DIR}/tests/cunit") +include_directories("${CMAKE_BINARY_DIR}") # Compiler-specific compiler options if ("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU") @@ -25,6 +26,8 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0") # Don't run these tests if we are using MPI SERIAL. if (NOT PIO_USE_MPISERIAL) + add_executable (test_async_mpi EXCLUDE_FROM_ALL test_async_mpi.c) + add_dependencies (tests test_async_mpi) add_executable (test_intercomm2 EXCLUDE_FROM_ALL test_intercomm2.c test_common.c) target_link_libraries (test_intercomm2 pioc) add_dependencies (tests test_intercomm2) @@ -65,22 +68,42 @@ if (NOT PIO_USE_MPISERIAL) target_link_libraries (test_pioc_fill pioc) add_executable (test_darray EXCLUDE_FROM_ALL test_darray.c test_common.c) target_link_libraries (test_darray pioc) + add_executable (test_darray_frame EXCLUDE_FROM_ALL test_darray_frame.c test_common.c) + target_link_libraries (test_darray_frame pioc) add_executable (test_darray_multi EXCLUDE_FROM_ALL test_darray_multi.c test_common.c) target_link_libraries (test_darray_multi pioc) add_executable (test_darray_multivar EXCLUDE_FROM_ALL test_darray_multivar.c test_common.c) target_link_libraries (test_darray_multivar pioc) add_executable (test_darray_multivar2 EXCLUDE_FROM_ALL test_darray_multivar2.c test_common.c) target_link_libraries (test_darray_multivar2 pioc) + add_executable (test_darray_multivar3 EXCLUDE_FROM_ALL test_darray_multivar3.c test_common.c) + target_link_libraries (test_darray_multivar3 pioc) add_executable (test_darray_1d EXCLUDE_FROM_ALL test_darray_1d.c test_common.c) target_link_libraries (test_darray_1d pioc) add_executable (test_darray_3d EXCLUDE_FROM_ALL test_darray_3d.c test_common.c) - target_link_libraries (test_darray_3d pioc) + target_link_libraries (test_darray_3d pioc) add_executable (test_decomp_uneven EXCLUDE_FROM_ALL test_decomp_uneven.c test_common.c) target_link_libraries (test_decomp_uneven pioc) add_executable (test_decomps EXCLUDE_FROM_ALL test_decomps.c test_common.c) target_link_libraries (test_decomps pioc) add_executable (test_rearr EXCLUDE_FROM_ALL test_rearr.c test_common.c) target_link_libraries (test_rearr pioc) + if (PIO_USE_MALLOC) + add_executable (test_darray_async_simple EXCLUDE_FROM_ALL test_darray_async_simple.c test_common.c) + target_link_libraries (test_darray_async_simple pioc) + add_executable (test_darray_async EXCLUDE_FROM_ALL test_darray_async.c test_common.c) + target_link_libraries (test_darray_async pioc) + add_executable (test_darray_async_many EXCLUDE_FROM_ALL test_darray_async_many.c test_common.c) + target_link_libraries (test_darray_async_many pioc) + add_executable (test_darray_2sync EXCLUDE_FROM_ALL test_darray_2sync.c test_common.c) + target_link_libraries (test_darray_2sync pioc) + add_executable (test_async_multicomp EXCLUDE_FROM_ALL test_async_multicomp.c test_common.c) + target_link_libraries (test_async_multicomp pioc) + add_executable (test_async_multi2 EXCLUDE_FROM_ALL test_async_multi2.c test_common.c) + target_link_libraries (test_async_multi2 pioc) + add_executable (test_async_manyproc EXCLUDE_FROM_ALL test_async_manyproc.c test_common.c) + target_link_libraries (test_async_manyproc pioc) + endif () endif () add_executable (test_spmd EXCLUDE_FROM_ALL test_spmd.c test_common.c) target_link_libraries (test_spmd pioc) @@ -91,19 +114,30 @@ add_dependencies (tests test_pioc_unlim) add_dependencies (tests test_pioc_putget) add_dependencies (tests test_pioc_fill) add_dependencies (tests test_darray) +add_dependencies (tests test_darray_frame) add_dependencies (tests test_darray_multi) add_dependencies (tests test_darray_multivar) add_dependencies (tests test_darray_multivar2) +add_dependencies (tests test_darray_multivar3) add_dependencies (tests test_darray_1d) add_dependencies (tests test_darray_3d) add_dependencies (tests test_decomp_uneven) add_dependencies (tests test_decomps) +if(PIO_USE_MALLOC) + add_dependencies (tests test_darray_async_simple) + add_dependencies (tests test_darray_async) + add_dependencies (tests test_darray_async_many) + add_dependencies (tests test_darray_2sync) + add_dependencies (tests test_async_multicomp) + add_dependencies (tests test_async_multi2) + add_dependencies (tests test_async_manyproc) +endif () # Test Timeout in seconds. if (PIO_VALGRIND_CHECK) - set (DEFAULT_TEST_TIMEOUT 240) + set (DEFAULT_TEST_TIMEOUT 480) else () - set (DEFAULT_TEST_TIMEOUT 120) + set (DEFAULT_TEST_TIMEOUT 240) endif () # All tests need a certain number of tasks, but they should be able to @@ -112,11 +146,16 @@ endif () set (AT_LEAST_TWO_TASKS 3) set (AT_LEAST_THREE_TASKS 4) set (AT_LEAST_FOUR_TASKS 5) +set (AT_LEAST_EIGHT_TASKS 9) if (PIO_USE_MPISERIAL) add_test(NAME test_pioc COMMAND test_pioc) else () + add_mpi_test(test_async_mpi + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_async_mpi + NUMPROCS ${AT_LEAST_FOUR_TASKS} + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) add_mpi_test(test_spmd EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_spmd NUMPROCS ${AT_LEAST_FOUR_TASKS} @@ -185,6 +224,10 @@ else () EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray NUMPROCS ${AT_LEAST_FOUR_TASKS} TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + add_mpi_test(test_darray_frame + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_frame + NUMPROCS ${AT_LEAST_FOUR_TASKS} + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) add_mpi_test(test_darray_multi EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_multi NUMPROCS ${AT_LEAST_FOUR_TASKS} @@ -197,6 +240,10 @@ else () EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_multivar2 NUMPROCS ${AT_LEAST_FOUR_TASKS} TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + add_mpi_test(test_darray_multivar3 + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_multivar3 + NUMPROCS ${AT_LEAST_FOUR_TASKS} + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) add_mpi_test(test_darray_1d EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_1d NUMPROCS ${AT_LEAST_FOUR_TASKS} @@ -205,6 +252,36 @@ else () EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_3d NUMPROCS ${AT_LEAST_FOUR_TASKS} TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + if(PIO_USE_MALLOC) + add_mpi_test(test_darray_2sync + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_2sync + NUMPROCS ${AT_LEAST_FOUR_TASKS} + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + add_mpi_test(test_darray_async_simple + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_async_simple + NUMPROCS ${AT_LEAST_FOUR_TASKS} + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + add_mpi_test(test_darray_async + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_async + NUMPROCS ${AT_LEAST_FOUR_TASKS} + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + add_mpi_test(test_darray_async_many + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_darray_async_many + NUMPROCS ${AT_LEAST_FOUR_TASKS} + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + add_mpi_test(test_async_multicomp + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_async_multicomp + NUMPROCS ${AT_LEAST_FOUR_TASKS} + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + add_mpi_test(test_async_multi2 + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_async_multi2 + NUMPROCS ${AT_LEAST_FOUR_TASKS} + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + add_mpi_test(test_async_manyproc + EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_async_manyproc + NUMPROCS ${AT_LEAST_EIGHT_TASKS} + TIMEOUT ${DEFAULT_TEST_TIMEOUT}) + endif () add_mpi_test(test_decomp_uneven EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/test_decomp_uneven NUMPROCS ${AT_LEAST_FOUR_TASKS} diff --git a/src/externals/pio2/tests/cunit/Makefile.am b/src/externals/pio2/tests/cunit/Makefile.am new file mode 100644 index 00000000000..16934faafeb --- /dev/null +++ b/src/externals/pio2/tests/cunit/Makefile.am @@ -0,0 +1,74 @@ +## This is the automake file for building the C tests for the PIO +## library. +# Ed Hartnett 8/17/17 + +# Link to our assembled library. +AM_LDFLAGS = ${top_builddir}/src/clib/libpio.la +AM_CPPFLAGS = -I$(top_srcdir)/src/clib + +# The tests that will be run. +PIO_TESTS = test_async_mpi test_spmd test_intercomm2 \ +test_async_simple test_async_3proc test_async_4proc \ +test_iosystem2_simple test_iosystem2_simple2 test_iosystem2 \ +test_iosystem3_simple test_iosystem3_simple2 test_iosystem3 test_pioc \ +test_pioc_unlim test_pioc_putget test_pioc_fill test_darray \ +test_darray_multi test_darray_multivar test_darray_multivar2 \ +test_darray_multivar3 test_darray_1d test_darray_3d \ +test_decomp_uneven test_decomps test_rearr test_darray_async_simple \ +test_darray_async test_darray_async_many test_darray_2sync \ +test_async_multicomp test_async_multi2 test_async_manyproc + +# Source code for each test. +test_async_mpi_SOURCES = test_async_mpi.c +test_intercomm2_SOURCES = test_intercomm2.c test_common.c pio_tests.h +test_async_simple_SOURCES = test_async_simple.c test_common.c pio_tests.h +test_async_3proc_SOURCES = test_async_3proc.c test_common.c pio_tests.h +test_async_4proc_SOURCES = test_async_4proc.c test_common.c pio_tests.h +test_iosystem2_simple_SOURCES = test_iosystem2_simple.c test_common.c pio_tests.h +test_iosystem2_simple2_SOURCES = test_iosystem2_simple2.c test_common.c pio_tests.h +test_iosystem2_SOURCES = test_iosystem2.c test_common.c pio_tests.h +test_iosystem3_simple_SOURCES = test_iosystem3_simple.c test_common.c pio_tests.h +test_iosystem3_simple2_SOURCES = test_iosystem3_simple2.c test_common.c pio_tests.h +test_iosystem3_SOURCES = test_iosystem3.c test_common.c pio_tests.h +test_pioc_SOURCES = test_pioc.c test_common.c test_shared.c pio_tests.h +test_pioc_unlim_SOURCES = test_pioc_unlim.c test_common.c test_shared.c pio_tests.h +test_pioc_putget_SOURCES = test_pioc_putget.c test_common.c test_shared.c pio_tests.h +test_pioc_fill_SOURCES = test_pioc_fill.c test_common.c test_shared.c pio_tests.h +test_darray_SOURCES = test_darray.c test_common.c pio_tests.h +test_darray_multi_SOURCES = test_darray_multi.c test_common.c pio_tests.h +test_darray_multivar_SOURCES = test_darray_multivar.c test_common.c pio_tests.h +test_darray_multivar2_SOURCES = test_darray_multivar2.c test_common.c pio_tests.h +test_darray_multivar3_SOURCES = test_darray_multivar3.c test_common.c pio_tests.h +test_darray_1d_SOURCES = test_darray_1d.c test_common.c pio_tests.h +test_darray_3d_SOURCES = test_darray_3d.c test_common.c pio_tests.h +test_decomp_uneven_SOURCES = test_decomp_uneven.c test_common.c pio_tests.h +test_decomps_SOURCES = test_decomps.c test_common.c pio_tests.h +test_rearr_SOURCES = test_rearr.c test_common.c pio_tests.h +test_darray_async_simple_SOURCES = test_darray_async_simple.c test_common.c pio_tests.h +test_darray_async_SOURCES = test_darray_async.c test_common.c pio_tests.h +test_darray_async_many_SOURCES = test_darray_async_many.c test_common.c pio_tests.h +test_darray_2sync_SOURCES = test_darray_2sync.c test_common.c pio_tests.h +test_spmd_SOURCES = test_spmd.c test_common.c pio_tests.h +test_intercomm2_SOURCES = test_async_simple.c test_common.c pio_tests.h +test_async_3proc_SOURCES = test_async_3proc.c test_common.c pio_tests.h +test_async_multicomp_SOURCES = test_async_multicomp.c test_common.c pio_tests.h +test_async_multi2_SOURCES = test_async_multi2.c test_common.c pio_tests.h +test_async_manyproc_SOURCES = test_async_manyproc.c test_common.c pio_tests.h + +# Build the tests for the tests target. +tests: ${PIO_TESTS} + +# Build the tests for make check. +check_PROGRAMS = $(PIO_TESTS) + +# Tests will run from a bash script. +TESTS = run_tests.sh + +# Bash script needs all tests built. +run_tests.sh : tests + +# Distribute the test script. +EXTRA_DIST = run_tests.sh + +# Clean up files produced during testing. +CLEANFILES = *.nc *.log diff --git a/src/externals/pio2/tests/cunit/pio_tests.h b/src/externals/pio2/tests/cunit/pio_tests.h index 5a601ea6df1..d1d0cae9894 100644 --- a/src/externals/pio2/tests/cunit/pio_tests.h +++ b/src/externals/pio2/tests/cunit/pio_tests.h @@ -19,6 +19,7 @@ /** The number of possible output netCDF output flavors available to * the ParallelIO library. */ #define NUM_FLAVORS 4 +#define NUM_IOTYPES 4 /** Number of netCDF types. */ #define NUM_NETCDF_TYPES 12 @@ -46,6 +47,19 @@ /** The meaning of life, the universe, and everything. */ #define TEST_VAL_42 42 +/* Dimension lengths used in some C tests. */ +#define DIM_LEN2 2 +#define DIM_LEN3 3 + +/* Number of dims in test file. */ +#define NDIM2 2 +#define NDIM3 3 +#ifdef _NETCDF4 +#define NUM_PIO_TYPES_TO_TEST 11 +#else +#define NUM_PIO_TYPES_TO_TEST 6 +#endif /* _NETCDF4 */ + /** Handle MPI errors. This should only be used with MPI library * function calls. */ #define MPIERR(e) do { \ @@ -73,7 +87,6 @@ char err_buffer[MPI_MAX_ERROR_STRING]; int resultlen; /* Function prototypes. */ -int pio_test_init(int argc, char **argv, int *my_rank, int *ntasks, int target_ntasks, MPI_Comm *test_comm); int pio_test_init2(int argc, char **argv, int *my_rank, int *ntasks, int min_ntasks, int max_ntasks, int log_level, MPI_Comm *test_comm); int create_nc_sample(int sample, int iosysid, int format, char *filename, int my_rank, int *ncid); @@ -84,6 +97,15 @@ int create_nc_sample_1(int iosysid, int format, char *filename, int my_rank, int int check_nc_sample_1(int iosysid, int format, char *filename, int my_rank, int *ncid); int create_nc_sample_2(int iosysid, int format, char *filename, int my_rank, int *ncid); int check_nc_sample_2(int iosysid, int format, char *filename, int my_rank, int *ncid); +int create_nc_sample_3(int iosysid, int iotype, int my_rank, int my_comp_idx, + char *filename, char *test_name, int verbose, int use_darray, + int ioid); +int check_nc_sample_3(int iosysid, int iotype, int my_rank, int my_comp_idx, + const char *filename, int verbose, int use_darray, int ioid); +int create_nc_sample_4(int iosysid, int iotype, int my_rank, int my_comp_idx, + char *filename, char *test_name, int verbose, int num_types); +int check_nc_sample_4(int iosysid, int iotype, int my_rank, int my_comp_idx, + const char *filename, int verbose, int num_types); int get_iotypes(int *num_flavors, int *flavors); int get_iotype_name(int iotype, char *name); int pio_test_finalize(MPI_Comm *test_comm); diff --git a/src/externals/pio2/tests/cunit/run_tests.sh b/src/externals/pio2/tests/cunit/run_tests.sh new file mode 100755 index 00000000000..8b1b8698be9 --- /dev/null +++ b/src/externals/pio2/tests/cunit/run_tests.sh @@ -0,0 +1,37 @@ +# Stop execution of script if error is returned. +set -e + +# Stop loop if ctrl-c is pressed. +trap exit SIGINT SIGTERM + +printf 'running PIO tests...\n' + +PIO_TESTS='test_async_mpi test_spmd test_rearr test_intercomm2 test_async_simple '\ +'test_async_3proc test_async_4proc test_iosystem2_simple test_iosystem2_simple2 '\ +'test_iosystem2 test_iosystem3_simple test_iosystem3_simple2 test_iosystem3 test_pioc '\ +'test_pioc_unlim test_pioc_putget test_pioc_fill test_darray test_darray_multi '\ +'test_darray_multivar test_darray_multivar2 test_darray_multivar3 test_darray_1d '\ +'test_darray_3d test_decomp_uneven test_decomps test_darray_async_simple '\ +'test_darray_async test_darray_async_many test_darray_2sync test_async_multicomp ' + +for TEST in $PIO_TESTS +do + success=false + echo "running ${TEST}" + mpiexec -n 4 ./${TEST} && success=true || break +done + +PIO_TESTS_8='test_async_multi2' + +for TEST in $PIO_TESTS_8 +do + success=false + echo "running ${TEST}" + mpiexec -n 8 ./${TEST} && success=true || break +done + +# Did we succeed? +if test x$success = xtrue; then + exit 0 +fi +exit 1 diff --git a/src/externals/pio2/tests/cunit/test_async_3proc.c b/src/externals/pio2/tests/cunit/test_async_3proc.c index 4a2f8435fe2..20c42d1ce83 100644 --- a/src/externals/pio2/tests/cunit/test_async_3proc.c +++ b/src/externals/pio2/tests/cunit/test_async_3proc.c @@ -5,12 +5,9 @@ * other 24 for computation. The netCDF sample files are created and * checked. * - * To run with valgrind, use this command: - *
    mpiexec -n 4 valgrind -v --leak-check=full --suppressions=../../../tests/unit/valsupp_test.supp
    - * --error-exitcode=99 --track-origins=yes ./test_async_8io_24comp
    - * - * Ed Hartnett + * @author Ed Hartnett */ +#include #include #include @@ -45,8 +42,8 @@ int main(int argc, char **argv) int num_io_procs[NUM_COMBOS] = {2, 1}; /* Initialize test. */ - if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, - &test_comm))) + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS, + -1, &test_comm))) ERR(ERR_INIT); /* Test code runs on TARGET_NTASKS tasks. The left over tasks do @@ -67,9 +64,6 @@ int main(int argc, char **argv) num_procs[combo], NULL, NULL, NULL, PIO_REARR_BOX, iosysid))) ERR(ERR_INIT); - for (int c = 0; c < COMPONENT_COUNT; c++) - printf("%d iosysid[%d] = %d\n", my_rank, c, iosysid[c]); - /* All the netCDF calls are only executed on the computation * tasks. The IO tasks have not returned from PIOc_Init_Intercomm, * and when the do, they should go straight to finalize. */ @@ -90,7 +84,6 @@ int main(int argc, char **argv) sprintf(filename, "%s_%s_%d_%d.nc", TEST_NAME, iotype_name, sample, my_comp_idx); /* Create sample file. */ - printf("%d %s creating file %s\n", my_rank, TEST_NAME, filename); if ((ret = create_nc_sample(sample, iosysid[my_comp_idx], flavor[flv], filename, my_rank, NULL))) ERR(ret); @@ -101,24 +94,19 @@ int main(int argc, char **argv) } /* next netcdf flavor */ /* Finalize the IO system. Only call this from the computation tasks. */ - printf("%d %s Freeing PIO resources\n", my_rank, TEST_NAME); for (int c = 0; c < COMPONENT_COUNT; c++) { if ((ret = PIOc_finalize(iosysid[c]))) ERR(ret); - printf("%d %s PIOc_finalize completed for iosysid = %d\n", my_rank, TEST_NAME, - iosysid[c]); } } /* endif comp_task */ /* Wait for everyone to catch up. */ - printf("%d %s waiting for all processes!\n", my_rank, TEST_NAME); MPI_Barrier(test_comm); } /* next combo */ } /* endif my_rank < TARGET_NTASKS */ /* Finalize test. */ - printf("%d %s finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ERR_AWFUL; diff --git a/src/externals/pio2/tests/cunit/test_async_4proc.c b/src/externals/pio2/tests/cunit/test_async_4proc.c index db0197ab9be..5eb23472db4 100644 --- a/src/externals/pio2/tests/cunit/test_async_4proc.c +++ b/src/externals/pio2/tests/cunit/test_async_4proc.c @@ -3,8 +3,9 @@ * * This very simple test runs on 4 ranks. * - * Ed Hartnett + * @author Ed Hartnett */ +#include #include #include @@ -39,9 +40,10 @@ int main(int argc, char **argv) int num_io_procs[NUM_COMBOS] = {3, 2, 1}; /* Initialize test. */ - if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, &test_comm))) + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS, + -1, &test_comm))) ERR(ERR_INIT); - + /* Test code runs on TARGET_NTASKS tasks. The left over tasks do * nothing. */ if (my_rank < TARGET_NTASKS) @@ -60,9 +62,6 @@ int main(int argc, char **argv) num_procs2[combo], NULL, NULL, NULL, PIO_REARR_BOX, iosysid))) ERR(ERR_INIT); - for (int c = 0; c < COMPONENT_COUNT; c++) - printf("%d iosysid[%d] = %d\n", my_rank, c, iosysid[c]); - /* All the netCDF calls are only executed on the computation * tasks. The IO tasks have not returned from PIOc_Init_Intercomm, * and when the do, they should go straight to finalize. */ @@ -83,7 +82,6 @@ int main(int argc, char **argv) sprintf(filename, "%s_%s_%d_%d.nc", TEST_NAME, iotype_name, sample, my_comp_idx); /* Create sample file. */ - printf("%d %s creating file %s\n", my_rank, TEST_NAME, filename); if ((ret = create_nc_sample(sample, iosysid[my_comp_idx], flavor[flv], filename, my_rank, NULL))) ERR(ret); @@ -94,24 +92,17 @@ int main(int argc, char **argv) } /* next netcdf flavor */ /* Finalize the IO system. Only call this from the computation tasks. */ - printf("%d %s Freeing PIO resources\n", my_rank, TEST_NAME); for (int c = 0; c < COMPONENT_COUNT; c++) - { if ((ret = PIOc_finalize(iosysid[c]))) ERR(ret); - printf("%d %s PIOc_finalize completed for iosysid = %d\n", my_rank, TEST_NAME, - iosysid[c]); - } } /* endif comp_task */ /* Wait for everyone to catch up. */ - printf("%d %s waiting for all processes!\n", my_rank, TEST_NAME); MPI_Barrier(test_comm); } /* next combo */ }/* my_rank < TARGET_NTASKS */ /* Finalize test. */ - printf("%d %s finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ERR_AWFUL; diff --git a/src/externals/pio2/tests/cunit/test_async_manyproc.c b/src/externals/pio2/tests/cunit/test_async_manyproc.c new file mode 100644 index 00000000000..7a203a1d37d --- /dev/null +++ b/src/externals/pio2/tests/cunit/test_async_manyproc.c @@ -0,0 +1,114 @@ +/* + * This tests async with multiple computation components. This test + * uses more processors than test_async_multicomp.c. In this test, the + * IO component has 3 processors, and the computational components + * each have 2 processors, so the test uses 7 total. + * + * @author Ed Hartnett + * @date 9/13/17 + */ +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 7 + +/* The name of this test. */ +#define TEST_NAME "test_async_manyproc" + +/* Number of processors that will do IO. */ +#define NUM_IO_PROCS 3 + +/* Number of tasks in each computation component. */ +#define NUM_COMP_PROCS 2 + +/* Number of computational components to create. */ +#define COMPONENT_COUNT 2 + +/* Number of dims in test file. */ +#define NDIM2 2 + +/* Run simple async test. */ +int main(int argc, char **argv) +{ + int my_rank; /* Zero-based rank of processor. */ + int ntasks; /* Number of processors involved in current execution. */ + int iosysid[COMPONENT_COUNT]; /* The ID for the parallel I/O system. */ + int num_iotypes; /* Number of PIO netCDF iotypes in this build. */ + int iotype[NUM_IOTYPES]; /* iotypes for the supported netCDF IO iotypes. */ + int num_procs[COMPONENT_COUNT] = {NUM_COMP_PROCS, NUM_COMP_PROCS}; /* Num procs for IO and computation. */ + int io_proc_list[NUM_IO_PROCS]; + int comp_proc_list1[NUM_COMP_PROCS] = {NUM_IO_PROCS, NUM_IO_PROCS + 1}; + int comp_proc_list2[NUM_COMP_PROCS] = {NUM_IO_PROCS + 2, NUM_IO_PROCS + 3}; + int *proc_list[COMPONENT_COUNT] = {comp_proc_list1, comp_proc_list2}; + MPI_Comm test_comm; + int verbose = 0; + int ret; /* Return code. */ + + /* Initialize our list of IO tasks. */ + for (int p = 0; p < NUM_IO_PROCS; p++) + io_proc_list[p] = p; + + /* Initialize test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS, + -1, &test_comm))) + ERR(ERR_INIT); + + /* Is the current process a computation task? */ + int comp_task = my_rank < NUM_IO_PROCS ? 0 : 1; + + /* Only do something on TARGET_NTASKS tasks. */ + if (my_rank < TARGET_NTASKS) + { + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_iotypes, iotype))) + ERR(ret); + + /* Initialize the IO system. The IO task will not return from + * this call, but instead will go into a loop, listening for + * messages. */ + if ((ret = PIOc_init_async(test_comm, NUM_IO_PROCS, io_proc_list, COMPONENT_COUNT, + num_procs, (int **)proc_list, NULL, NULL, PIO_REARR_BOX, iosysid))) + ERR(ERR_INIT); + if (verbose) + for (int c = 0; c < COMPONENT_COUNT; c++) + printf("my_rank %d cmp %d iosysid[%d] %d\n", my_rank, c, c, iosysid[c]); + + /* All the netCDF calls are only executed on the computation + * tasks. */ + if (comp_task) + { + for (int i = 0; i < num_iotypes; i++) + { + /* char filename[NC_MAX_NAME + 1]; /\* Test filename. *\/ */ + /* /\* Ranks 0, 1, 2 are IO. 3, 4 are the first */ + /* * computation component. 5, 6 are the second. *\/ */ + /* int my_comp_idx = my_rank < NUM_IO_PROCS + NUM_COMP_PROCS ? 0 : 1; /\* Index in iosysid array. *\/ */ + + /* /\* Create sample file. *\/ */ + /* if ((ret = create_nc_sample_3(iosysid[my_comp_idx], iotype[i], my_rank, my_comp_idx, */ + /* filename, TEST_NAME, verbose, 0, 0))) */ + /* ERR(ret); */ + + /* /\* Check the file for correctness. *\/ */ + /* if ((ret = check_nc_sample_3(iosysid[my_comp_idx], iotype[i], my_rank, my_comp_idx, */ + /* filename, verbose, 0, 0))) */ + /* ERR(ret); */ + } /* next netcdf iotype */ + + /* Finalize the IO system. Only call this from the computation tasks. */ + for (int c = 0; c < COMPONENT_COUNT; c++) + if ((ret = PIOc_finalize(iosysid[c]))) + ERR(ret); + } /* endif comp_task */ + } /* endif my_rank < TARGET_NTASKS */ + + /* Finalize test. */ + if ((ret = pio_test_finalize(&test_comm))) + return ERR_AWFUL; + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_async_mpi.c b/src/externals/pio2/tests/cunit/test_async_mpi.c new file mode 100644 index 00000000000..430a3b820a7 --- /dev/null +++ b/src/externals/pio2/tests/cunit/test_async_mpi.c @@ -0,0 +1,457 @@ +/* + * This program tests some MPI functionality that is used in PIO. This + * runs on three processors, and does the same MPI commands that are + * done when async mode is used, with 1 IO task, and two computation + * compoments, each of one task. + * + * Note that this test does not contain includes to pio headers, it is + * pure MPI code. + * + * @author Ed Hartnett + * @date 8/28/16 + */ +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 3 + +/* The name of this test. */ +#define TEST_NAME "test_async_mpi" + +/* Number of processors that will do IO. */ +#define NUM_IO_PROCS 1 + +/* Number of computational components to create. */ +#define COMPONENT_COUNT 2 + +#define ERR_AWFUL 1111 +#define ERR_WRONG 1112 + +#define MSG_EXIT 42 + +/* Handle MPI errors. This should only be used with MPI library + * function calls. */ +#define MPIERR(e) do { \ + MPI_Error_string(e, err_buffer, &resultlen); \ + fprintf(stderr, "MPI error, line %d, file %s: %s\n", __LINE__, __FILE__, err_buffer); \ + MPI_Finalize(); \ + return ERR_AWFUL; \ + } while (0) + +/* Handle non-MPI errors by finalizing the MPI library and exiting + * with an exit code. */ +#define ERR(e) do { \ + fprintf(stderr, "%d Error %d in %s, line %d\n", my_rank, e, __FILE__, __LINE__); \ + MPI_Finalize(); \ + return e; \ + } while (0) + +/* Global err buffer for MPI. When there is an MPI error, this buffer + * is used to store the error message that is associated with the MPI + * error. */ +char err_buffer[MPI_MAX_ERROR_STRING]; + +/* This is the length of the most recent MPI error message, stored + * int the global error string. */ +int resultlen; + +/* Put together a communicator with the correct number of tasks for + * this test (3). + */ +int get_test_comm(int my_rank, int ntasks, int min_ntasks, int max_ntasks, MPI_Comm *comm) +{ + int ret; + + /* Check that a valid number of processors was specified. */ + if (ntasks < min_ntasks) + { + fprintf(stderr, "ERROR: Number of processors must be at least %d for this test!\n", + min_ntasks); + return ERR_AWFUL; + } + else if (ntasks > max_ntasks) + { + /* If more tasks are available than we need for this test, + * create a communicator with exactly the number of tasks we + * need. */ + int color, key; + if (my_rank < max_ntasks) + { + color = 0; + key = my_rank; + } + else + { + color = 1; + key = my_rank - max_ntasks; + } + if ((ret = MPI_Comm_split(MPI_COMM_WORLD, color, key, comm))) + MPIERR(ret); + } + else + { + if ((ret = MPI_Comm_dup(MPI_COMM_WORLD, comm))) + MPIERR(ret); + } + return 0; +} + +/* + * This function is called by the IO task. This function will not + * return, unless there is an error. + * + * @param verbose non-zero to turn on printf statements. + * @param my_rank rank of this task. + * @param io_rank rank of the IO processor in union_comm. + * @param component_count number of computation components + * @param union_comm array (length component_count) of union_comm + * communicators. + * @param comproot array (length component_count) of ints. The rank of + * the computation leader in the union comm. + * @param comp_comm array (length component_count) of computaion + * component communicators. + * @param io_comm MPI communicator for IO + * @returns 0 for success, error code otherwise. + * @author Ed Hartnett + */ +int msg_handler(int verbose, int my_rank, int io_rank, int component_count, MPI_Comm *union_comm, + MPI_Comm *comp_comm, int *comproot, MPI_Comm io_comm) +{ + int msg = 0; + MPI_Request req[component_count]; + MPI_Status status; + int index; + int open_components = component_count; + int mpierr; + + /* Have IO comm rank 0 (the ioroot) register to receive + * (non-blocking) for a message from each of the comproots. */ + if (!io_rank) + { + for (int cmp = 0; cmp < component_count; cmp++) + { + if (verbose) + printf("my_rank %d cmp %d about to call MPI_Irecv comproot[cmp] %d union_comm[cmp] %d\n", + my_rank, cmp, comproot[cmp], union_comm[cmp]); + if ((mpierr = MPI_Irecv(&msg, 1, MPI_INT, comproot[cmp], MPI_ANY_TAG, + union_comm[cmp], &req[cmp]))) + MPIERR(mpierr); + if (verbose) + printf("my_rank %d MPI_Irecv req[%d] = %d\n", my_rank, cmp, req[cmp]); + } + } + + /* If the message is not -1, keep processing messages. */ + while (msg != -1) + { + if (verbose) + printf("my_rank %d msg_handler at top of loop\n", my_rank); + + /* Wait until any one of the requests are complete. Once it + * returns, the Waitany function automatically sets the + * appropriate member of the req array to MPI_REQUEST_NULL. */ + if (!io_rank) + { + if (verbose) + { + printf("my_rank %d about to call MPI_Waitany req[0] = %d MPI_REQUEST_NULL = %d\n", + my_rank, req[0], MPI_REQUEST_NULL); + for (int c = 0; c < component_count; c++) + printf("my_rank %d req[%d] = %d\n", my_rank, c, req[c]); + } + if ((mpierr = MPI_Waitany(component_count, req, &index, &status))) + MPIERR(mpierr); + if (verbose) + printf("my_rank %d Waitany returned index = %d req[%d] = %d\n", my_rank, index, index, req[index]); + } + + /* Broadcast the index and msg value to the rest of the IO tasks. */ + if (verbose) + printf("my_rank %d about to MPI_Bcast io_comm %d index %d msg %d\n", my_rank, io_comm, + index, msg); + if ((mpierr = MPI_Bcast(&index, 1, MPI_INT, 0, io_comm))) + MPIERR(mpierr); + if ((mpierr = MPI_Bcast(&msg, 1, MPI_INT, 0, io_comm))) + MPIERR(mpierr); + if (verbose) + printf("my_rank %d MPI_Bcast io_comm %d index %d msg %d\n", my_rank, io_comm, + index, msg); + + /* Handle the message. This code is run on all IO tasks. */ + switch (msg) + { + case MSG_EXIT: + if (verbose) + printf("exit message received\n"); + msg = -1; + break; + default: + return ERR_WRONG; + } + + /* Listen for another msg from the component whose message we + * just handled. */ + if (!io_rank && msg != -1) + { + if (verbose) + printf("my_rank %d msg_handler about to Irecv index = %d comproot = %d union_comm = %d\n", + my_rank, index, comproot[index], union_comm[index]); + if ((mpierr = MPI_Irecv(&msg, 1, MPI_INT, comproot[index], MPI_ANY_TAG, union_comm[index], + &req[index]))) + MPIERR(mpierr); + if (verbose) + printf("my_rank %d msg_handler called MPI_Irecv req[%d] = %d\n", my_rank, index, req[index]); + } + + if (verbose) + printf("my_rank %d msg_handler done msg = %d open_components = %d\n", my_rank, msg, open_components); + + /* If there are no more open components, exit. */ + if (msg == -1) + { + --open_components; + if (verbose) + printf("open_components %d\n", open_components); + if (open_components) + msg = MSG_EXIT; + else + return 0; + } + } + + return 0; +} + +/* Run simple async test. */ +int main(int argc, char **argv) +{ + int my_rank = 0; /* Zero-based rank of processor. */ + int ntasks; /* Number of processors involved in current execution. */ + MPI_Comm test_comm; /* Communicator for tasks running tests. */ + int mpierr; /* Return code from MPI functions. */ + int verbose = 0; /* Non-zero to turn on printf statements. */ + int ret; /* Return code from function calls. */ + + /* Initialize MPI. */ + if ((ret = MPI_Init(&argc, &argv))) + MPIERR(ret); + + /* Learn my rank and the total number of processors. */ + if ((ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_rank))) + MPIERR(ret); + if ((ret = MPI_Comm_size(MPI_COMM_WORLD, &ntasks))) + MPIERR(ret); + + /* Get test_comm. */ + if ((ret = get_test_comm(my_rank, ntasks, TARGET_NTASKS, TARGET_NTASKS, &test_comm))) + ERR(ret); + + /* Ignore all but 3 tasks. */ + if (my_rank < TARGET_NTASKS) + { + MPI_Group world_group; + MPI_Comm io_comm; + MPI_Group io_group; + int my_io_proc_list[1] = {0}; /* List of processors in IO component. */ + int num_io_procs = 1; + int num_procs_per_comp[COMPONENT_COUNT] = {1, 1}; + int in_io = my_rank ? 0 : 1; /* Non-zero if this task is in IO. */ + int io_rank = -1; /* Rank of current process in IO comm. */ + int comp_rank = -1; + int iomaster; /* MPI_ROOT on master IO task, MPI_PROC_NULL otherwise. */ + MPI_Group group[COMPONENT_COUNT]; /* Group with comp tasks. */ + MPI_Group union_group[COMPONENT_COUNT]; /* Group with IO and comp tasks. */ + int my_proc_list[COMPONENT_COUNT][1] = {{1}, {2}}; /* Tasks for computation components. */ + MPI_Comm comp_comm[COMPONENT_COUNT]; + MPI_Comm union_comm[COMPONENT_COUNT]; + MPI_Comm intercomm[COMPONENT_COUNT]; + int in_cmp[COMPONENT_COUNT] = {0, 0}; /* Is this process in this computation component? */ + + /* Create group for world. */ + if ((ret = MPI_Comm_group(test_comm, &world_group))) + MPIERR(ret); + + if (verbose) + printf("MPI_GROUP_NULL %d MPI_COMM_NULL %d\n", MPI_GROUP_NULL, MPI_COMM_NULL); + + /* There is one shared IO comm. Create it. */ + if ((ret = MPI_Group_incl(world_group, num_io_procs, my_io_proc_list, &io_group))) + MPIERR(ret); + if ((ret = MPI_Comm_create(test_comm, io_group, &io_comm))) + MPIERR(ret); + MPI_Group_free(&io_group); + if (verbose) + printf("my_rank %d created io comm io_comm = %d\n", my_rank, io_comm); + + /* For processes in the IO component, get their rank within the IO + * communicator. */ + if (in_io) + { + if ((ret = MPI_Comm_rank(io_comm, &io_rank))) + MPIERR(ret); + iomaster = !io_rank ? MPI_ROOT : MPI_PROC_NULL; + } + if (verbose) + printf("my_rank %d in_io %d io_rank %d IO %s\n", my_rank, in_io, + io_rank, iomaster == MPI_ROOT ? "MASTER" : "SERVANT"); + + /* For each computation component. */ + for (int cmp = 0; cmp < COMPONENT_COUNT; cmp++) + { + /* How many processors in the union comm? */ + int nprocs_union = num_io_procs + num_procs_per_comp[cmp]; + + /* This will hold proc numbers from both computation and IO + * components. */ + int proc_list_union[nprocs_union]; + + /* All the processes in this component, and the IO component, + * are part of the union_comm. */ + int union_rank = -1; + int pidx; + MPI_Comm io_comm2; + + /* Create a group for this component. */ + if ((ret = MPI_Group_incl(world_group, 1, my_proc_list[cmp], &group[cmp]))) + MPIERR(ret); + if (verbose) + printf("my_rank %d created component MPI group - group[%d] = %d\n", my_rank, cmp, group[cmp]); + + /* Add proc numbers from IO. */ + proc_list_union[0] = 0; + + /* Add proc numbers for this computation component. */ + for (int p = 0; p < num_procs_per_comp[cmp]; p++) + proc_list_union[p + num_io_procs] = my_proc_list[cmp][p]; + + /* Determine if current task is in this computation component. */ + for (pidx = 0; pidx < num_procs_per_comp[cmp]; pidx++) + if (my_rank == my_proc_list[cmp][pidx]) + break; + in_cmp[cmp] = (pidx == num_procs_per_comp[cmp]) ? 0 : 1; + if (verbose) + printf("my_rank %d pidx = %d num_procs_per_comp[%d] = %d in_cmp[cmp] = %d\n", + my_rank, pidx, cmp, num_procs_per_comp[cmp], in_cmp[cmp]); + + /* Create an intracomm for this component. */ + if ((ret = MPI_Comm_create(test_comm, group[cmp], &comp_comm[cmp]))) + MPIERR(ret); + MPI_Group_free(&group[cmp]); + + if (in_cmp[cmp]) + { + /* Get the rank in this comp comm. */ + if ((ret = MPI_Comm_rank(comp_comm[cmp], &comp_rank))) + MPIERR(ret); + } + if (verbose) + printf("my_rank %d intracomm created for cmp = %d comp_comm[cmp] = %d comp_rank = %d\n", + my_rank, cmp, comp_comm[cmp], comp_rank); + + /* If this is the IO component, make a copy of the IO comm for + * each computational component. */ + if (in_io) + { + if ((ret = MPI_Comm_dup(io_comm, &io_comm2))) + MPIERR(ret); + if (verbose) + printf("my_rank %d dup of io_comm = %d io_rank = %d\n", my_rank, io_comm, io_rank); + } + + /* Create a group for the union of the IO component + * and one of the computation components. */ + if ((ret = MPI_Group_incl(world_group, nprocs_union, proc_list_union, &union_group[cmp]))) + MPIERR(ret); + if ((ret = MPI_Comm_create(test_comm, union_group[cmp], &union_comm[cmp]))) + MPIERR(ret); + MPI_Group_free(&union_group[cmp]); + if (verbose) + printf("my_rank %d created union - union_group[%d] %d with %d procs union_comm[%d] %d\n", + my_rank, cmp, union_group[cmp], nprocs_union, cmp, union_comm[cmp]); + + + if (in_io || in_cmp[cmp]) + { + if ((ret = MPI_Comm_rank(union_comm[cmp], &union_rank))) + MPIERR(ret); + if (verbose) + printf("my_rank %d union_rank %d\n", my_rank, union_rank); + + if (in_io) + { + /* Create the intercomm from IO to computation component. */ + if ((ret = MPI_Intercomm_create(io_comm, 0, union_comm[cmp], + 1, cmp, &intercomm[cmp]))) + MPIERR(ret); + } + else if (in_cmp[cmp]) + { + /* Create the intercomm from computation component to IO component. */ + if ((ret = MPI_Intercomm_create(comp_comm[cmp], 0, union_comm[cmp], + 0, cmp, &intercomm[cmp]))) + MPIERR(ret); + } + if (verbose) + printf("my_rank %d intercomm created for cmp = %d\n", my_rank, cmp); + } /* in_io || in_cmp */ + + /* Free resources. */ + if (in_io) + MPI_Comm_free(&io_comm2); + } /* next computation component. */ + + /* Now launch IO message processing on the IO task. */ + int comproot[COMPONENT_COUNT] = {1, 1}; + if (in_io) + if ((ret = msg_handler(verbose, my_rank, 0, COMPONENT_COUNT, union_comm, comp_comm, + comproot, io_comm))) + ERR(ret); + + /* Send exit messages. */ + if (!in_io) + { + for (int cmp = 0; cmp < COMPONENT_COUNT; cmp++) + { + + int msg = MSG_EXIT; + int ioroot = 0; + + if (in_cmp[cmp]) + { + if (verbose) + printf("my_rank %d sending exit message on union_comm %d\n", my_rank, union_comm[cmp]); + if ((mpierr = MPI_Send(&msg, 1, MPI_INT, ioroot, 1, union_comm[cmp]))) + MPIERR(mpierr); + } + } + } + + /* Free MPI resources. */ + if (verbose) + printf("my_rank %d freeing resources\n", my_rank); + for (int cmp = 0; cmp < COMPONENT_COUNT; cmp++) + { + if (comp_comm[cmp] != MPI_COMM_NULL) + MPI_Comm_free(&comp_comm[cmp]); + if (union_comm[cmp] != MPI_COMM_NULL) + MPI_Comm_free(&union_comm[cmp]); + if (in_io || in_cmp[cmp]) + MPI_Comm_free(&intercomm[cmp]); + } + MPI_Group_free(&world_group); + if (io_comm != MPI_COMM_NULL) + MPI_Comm_free(&io_comm); + } + + /* Free the MPI communicator for this test. */ + MPI_Comm_free(&test_comm); + + /* Finalize MPI. */ + MPI_Finalize(); + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_async_multi2.c b/src/externals/pio2/tests/cunit/test_async_multi2.c new file mode 100644 index 00000000000..ebf538565c1 --- /dev/null +++ b/src/externals/pio2/tests/cunit/test_async_multi2.c @@ -0,0 +1,120 @@ +/* + * This tests async with multiple computation components. This is a + * more comprehensive test than test_async_multicomp.c. + * + * @author Ed Hartnett + * @date 9/12/17 + */ +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 3 + +/* The name of this test. */ +#define TEST_NAME "test_async_multi2" + +/* Number of processors that will do IO. */ +#define NUM_IO_PROCS 1 + +/* Number of tasks in each computation component. */ +#define NUM_COMP_PROCS 1 + +/* Number of computational components to create. */ +#define COMPONENT_COUNT 2 + +/* These are in test_common.c. */ +extern int *pio_type; + +/* Run simple async test. */ +int main(int argc, char **argv) +{ + int my_rank; /* Zero-based rank of processor. */ + int ntasks; /* Number of processors involved in current execution. */ + int iosysid[COMPONENT_COUNT]; /* The ID for the parallel I/O system. */ + int num_iotypes; /* Number of PIO netCDF iotypes in this build. */ + int iotype[NUM_IOTYPES]; /* iotypes for the supported netCDF IO iotypes. */ + int num_procs[COMPONENT_COUNT] = {1, 1}; /* Num procs for IO and computation. */ + int io_proc_list[NUM_IO_PROCS] = {0}; + int comp_proc_list1[NUM_COMP_PROCS] = {1}; + int comp_proc_list2[NUM_COMP_PROCS] = {2}; + int *proc_list[COMPONENT_COUNT] = {comp_proc_list1, comp_proc_list2}; + MPI_Comm test_comm; + int verbose = 1; + int ret; /* Return code. */ + + /* Initialize test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS, + -1, &test_comm))) + ERR(ERR_INIT); + + /* Is the current process a computation task? */ + int comp_task = my_rank < NUM_IO_PROCS ? 0 : 1; + + /* Only do something on TARGET_NTASKS tasks. */ + if (my_rank < TARGET_NTASKS) + { + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_iotypes, iotype))) + ERR(ret); + + /* Initialize the IO system. The IO task will not return from + * this call, but instead will go into a loop, listening for + * messages. */ + if ((ret = PIOc_init_async(test_comm, NUM_IO_PROCS, io_proc_list, COMPONENT_COUNT, + num_procs, (int **)proc_list, NULL, NULL, PIO_REARR_BOX, iosysid))) + ERR(ERR_INIT); + if (verbose) + for (int c = 0; c < COMPONENT_COUNT; c++) + printf("my_rank %d cmp %d iosysid[%d] %d\n", my_rank, c, c, iosysid[c]); + + /* All the netCDF calls are only executed on the computation + * tasks. */ + if (comp_task) + { + for (int i = 0; i < num_iotypes; i++) + { + char filename[NC_MAX_NAME + 1]; /* Test filename. */ + int my_comp_idx = my_rank - 1; /* Index in iosysid array. */ + int num_types = (iotype[i] == PIO_IOTYPE_NETCDF4C || + iotype[i] == PIO_IOTYPE_NETCDF4P) ? NUM_NETCDF_TYPES - 1 : NUM_CLASSIC_TYPES; + + /* Create a decomposition. */ + /* int dim_len_2d[NDIM2] = {DIM_LEN2, DIM_LEN3}; */ + /* int ioid[num_types]; */ + /* for (int t = 0; t < num_types; t++) */ + /* if ((ret = create_decomposition_2d(NUM_COMP_PROCS, my_rank, iosysid[my_comp_idx], dim_len_2d, &ioid[t], pio_type[t]))) */ + /* ERR(ret); */ + + /* Create sample file. */ + if ((ret = create_nc_sample_4(iosysid[my_comp_idx], iotype[i], my_rank, my_comp_idx, + filename, TEST_NAME, verbose, num_types))) + ERR(ret); + + /* Check the file for correctness. */ + if ((ret = check_nc_sample_4(iosysid[my_comp_idx], iotype[i], my_rank, my_comp_idx, + filename, verbose, num_types))) + ERR(ret); + + /* Free the decompositions. */ + /* for (int t = 0; t < num_types; t++) */ + /* if ((ret = PIOc_freedecomp(iosysid[my_comp_idx], ioid[t]))) */ + /* ERR(ret); */ + } /* next netcdf iotype */ + + /* Finalize the IO system. Only call this from the computation tasks. */ + for (int c = 0; c < COMPONENT_COUNT; c++) + if ((ret = PIOc_finalize(iosysid[c]))) + ERR(ret); + } /* endif comp_task */ + } /* endif my_rank < TARGET_NTASKS */ + + /* Finalize test. */ + if ((ret = pio_test_finalize(&test_comm))) + return ERR_AWFUL; + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_async_multicomp.c b/src/externals/pio2/tests/cunit/test_async_multicomp.c new file mode 100644 index 00000000000..9822106fdd8 --- /dev/null +++ b/src/externals/pio2/tests/cunit/test_async_multicomp.c @@ -0,0 +1,133 @@ +/* + * This tests async with multiple computation components. + * + * @author Ed Hartnett + * @date 8/25/17 + */ +#include +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 3 + +/* The name of this test. */ +#define TEST_NAME "test_async_multicomp" + +/* Number of processors that will do IO. */ +#define NUM_IO_PROCS 1 + +/* Number of tasks in each computation component. */ +#define NUM_COMP_PROCS 1 + +/* Number of computational components to create. */ +#define COMPONENT_COUNT 2 + +/* Number of dims in test file. */ +#define NDIM2 2 + +/* Number of vars in test file. */ +#define NVAR2 2 + +/* These are in test_common.c. */ +extern int *pio_type; + +/* Run simple async test. */ +int main(int argc, char **argv) +{ + int my_rank; /* Zero-based rank of processor. */ + int ntasks; /* Number of processors involved in current execution. */ + int iosysid[COMPONENT_COUNT]; /* The ID for the parallel I/O system. */ + int num_iotypes; /* Number of PIO netCDF iotypes in this build. */ + int iotype[NUM_IOTYPES]; /* iotypes for the supported netCDF IO iotypes. */ + int num_procs[COMPONENT_COUNT] = {1, 1}; /* Num procs for IO and computation. */ + int io_proc_list[NUM_IO_PROCS] = {0}; + int comp_proc_list1[NUM_COMP_PROCS] = {1}; + int comp_proc_list2[NUM_COMP_PROCS] = {2}; + int *proc_list[COMPONENT_COUNT] = {comp_proc_list1, comp_proc_list2}; + MPI_Comm test_comm; + int verbose = 0; + int ret; /* Return code. */ + + /* Initialize test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS, + -1, &test_comm))) + ERR(ERR_INIT); + + /* Is the current process a computation task? */ + int comp_task = my_rank < NUM_IO_PROCS ? 0 : 1; + + /* Only do something on TARGET_NTASKS tasks. */ + if (my_rank < TARGET_NTASKS) + { + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_iotypes, iotype))) + ERR(ret); + + /* This should fail. */ + if (PIOc_init_async(test_comm, NUM_IO_PROCS, io_proc_list, COMPONENT_COUNT, + num_procs, (int **)proc_list, NULL, NULL, PIO_REARR_SUBSET, iosysid) != PIO_EINVAL) + ERR(ERR_WRONG); + + /* Initialize the IO system. The IO task will not return from + * this call, but instead will go into a loop, listening for + * messages. */ + if ((ret = PIOc_init_async(test_comm, NUM_IO_PROCS, io_proc_list, COMPONENT_COUNT, + num_procs, (int **)proc_list, NULL, NULL, PIO_REARR_BOX, iosysid))) + ERR(ERR_INIT); + if (verbose) + for (int c = 0; c < COMPONENT_COUNT; c++) + printf("my_rank %d cmp %d iosysid[%d] %d\n", my_rank, c, c, iosysid[c]); + + /* All the netCDF calls are only executed on the computation + * tasks. */ + if (comp_task) + { + for (int i = 0; i < num_iotypes; i++) + { + char filename[NC_MAX_NAME + 1]; /* Test filename. */ + int my_comp_idx = my_rank - 1; /* Index in iosysid array. */ + int dim_len_2d[NDIM2] = {DIM_LEN2, DIM_LEN3}; + int ioid = 0; + + if ((ret = create_decomposition_2d(NUM_COMP_PROCS, my_rank, iosysid[my_comp_idx], dim_len_2d, + &ioid, PIO_SHORT))) + ERR(ret); + + /* Test with and without darrays. */ + for (int use_darray = 0; use_darray < 2; use_darray++) + { + + /* Create sample file. */ + if ((ret = create_nc_sample_3(iosysid[my_comp_idx], iotype[i], my_rank, my_comp_idx, + filename, TEST_NAME, verbose, use_darray, ioid))) + ERR(ret); + + /* Check the file for correctness. */ + if ((ret = check_nc_sample_3(iosysid[my_comp_idx], iotype[i], my_rank, my_comp_idx, + filename, verbose, 0, ioid))) + ERR(ret); + } /* next use_darray */ + + /* Free the decomposition. */ + if ((ret = PIOc_freedecomp(iosysid[my_comp_idx], ioid))) + ERR(ret); + + } /* next netcdf iotype */ + + /* Finalize the IO system. Only call this from the computation tasks. */ + for (int c = 0; c < COMPONENT_COUNT; c++) + if ((ret = PIOc_finalize(iosysid[c]))) + ERR(ret); + } /* endif comp_task */ + } /* endif my_rank < TARGET_NTASKS */ + + /* Finalize test. */ + if ((ret = pio_test_finalize(&test_comm))) + return ERR_AWFUL; + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_async_simple.c b/src/externals/pio2/tests/cunit/test_async_simple.c index 4b4836bcb02..aa197713a30 100644 --- a/src/externals/pio2/tests/cunit/test_async_simple.c +++ b/src/externals/pio2/tests/cunit/test_async_simple.c @@ -9,8 +9,9 @@ *
    mpiexec -n 4 valgrind -v --leak-check=full --suppressions=../../../tests/unit/valsupp_test.supp
      * --error-exitcode=99 --track-origins=yes ./test_async_simple
    * - * Ed Hartnett + * @author Ed Hartnett */ +#include #include #include @@ -44,8 +45,8 @@ int main(int argc, char **argv) MPI_Comm test_comm; /* Initialize test. */ - if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, - &test_comm))) + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS, + -1, &test_comm))) ERR(ERR_INIT); /* Only do something on TARGET_NTASKS tasks. */ @@ -97,7 +98,6 @@ int main(int argc, char **argv) sprintf(filename, "%s_%s_%d_%d.nc", TEST_NAME, iotype_name, sample, my_comp_idx); /* Create sample file. */ - printf("%d %s creating file %s\n", my_rank, TEST_NAME, filename); if ((ret = create_nc_sample(sample, iosysid[my_comp_idx], flavor[flv], filename, my_rank, NULL))) ERR(ret); @@ -108,19 +108,13 @@ int main(int argc, char **argv) } /* next netcdf flavor */ /* Finalize the IO system. Only call this from the computation tasks. */ - printf("%d %s Freeing PIO resources\n", my_rank, TEST_NAME); for (int c = 0; c < COMPONENT_COUNT; c++) - { if ((ret = PIOc_finalize(iosysid[c]))) ERR(ret); - printf("%d %s PIOc_finalize completed for iosysid = %d\n", my_rank, TEST_NAME, - iosysid[c]); - } } /* endif comp_task */ } /* endif my_rank < TARGET_NTASKS */ /* Finalize test. */ - printf("%d %s finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ERR_AWFUL; diff --git a/src/externals/pio2/tests/cunit/test_common.c b/src/externals/pio2/tests/cunit/test_common.c index e49e093e5f0..6086a4a19b0 100644 --- a/src/externals/pio2/tests/cunit/test_common.c +++ b/src/externals/pio2/tests/cunit/test_common.c @@ -3,6 +3,7 @@ * * Ed Hartnett */ +#include #include #include #include @@ -45,8 +46,83 @@ /* The value of the global attribute in the sample 2 output file. */ #define ATT_VALUE_S2 42 -/* For when we need 2D. */ -#define NDIM2 2 +/* Attribute name. */ +#define GLOBAL_ATT_NAME "global_att" + +/* The names of the variables created in test file. */ +#define SCALAR_VAR_NAME "scalar_var" +#define TWOD_VAR_NAME "twod_var" + +/* Used to create dimension names. */ +#define DIM_NAME "dim" + +/* Number of vars in test file. */ +#define NVAR 3 + +/* The names of the variables created in test file. */ +#define SCALAR_VAR_NAME "scalar_var" +#define THREED_VAR_NAME "threed_var" + +/* Dimension lengths. */ +#define DIM_X_LEN 2 +#define DIM_Y_LEN 3 + +/* Length of all attributes. */ +#define ATT_LEN 3 + +#ifdef _NETCDF4 +int pio_type[NUM_PIO_TYPES_TO_TEST] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE, + PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64}; +#else +int pio_type[NUM_PIO_TYPES_TO_TEST] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE}; +#endif /* _NETCDF4 */ + +/* Attribute test data. */ +signed char byte_att_data[ATT_LEN] = {NC_MAX_BYTE, NC_MIN_BYTE, NC_MAX_BYTE}; +char char_att_data[ATT_LEN] = {NC_MAX_CHAR, 0, NC_MAX_CHAR}; +short short_att_data[ATT_LEN] = {NC_MAX_SHORT, NC_MIN_SHORT, NC_MAX_SHORT}; +int int_att_data[ATT_LEN] = {NC_MAX_INT, NC_MIN_INT, NC_MAX_INT}; +float float_att_data[ATT_LEN] = {NC_MAX_FLOAT, NC_MIN_FLOAT, NC_MAX_FLOAT}; +double double_att_data[ATT_LEN] = {NC_MAX_DOUBLE, NC_MIN_DOUBLE, NC_MAX_DOUBLE}; +#ifdef _NETCDF4 +unsigned char ubyte_att_data[ATT_LEN] = {NC_MAX_UBYTE, 0, NC_MAX_UBYTE}; +unsigned short ushort_att_data[ATT_LEN] = {NC_MAX_USHORT, 0, NC_MAX_USHORT}; +unsigned int uint_att_data[ATT_LEN] = {NC_MAX_UINT, 0, NC_MAX_UINT}; +long long int64_att_data[ATT_LEN] = {NC_MAX_INT64, NC_MIN_INT64, NC_MAX_INT64}; +unsigned long long uint64_att_data[ATT_LEN] = {NC_MAX_UINT64, 0, NC_MAX_UINT64}; +#endif /* _NETCDF4 */ + +/* Scalar variable test data. */ +signed char byte_scalar_data = NC_MAX_BYTE; +char char_scalar_data = NC_MAX_CHAR; +short short_scalar_data = NC_MAX_SHORT; +int int_scalar_data = NC_MAX_INT; +float float_scalar_data = NC_MAX_FLOAT; +double double_scalar_data = NC_MAX_DOUBLE; +#ifdef _NETCDF4 +unsigned char ubyte_scalar_data = NC_MAX_UBYTE; +unsigned short ushort_scalar_data = NC_MAX_USHORT; +unsigned int uint_scalar_data = NC_MAX_UINT; +long long int64_scalar_data = NC_MAX_INT64; +unsigned long long uint64_scalar_data = NC_MAX_UINT64; +#endif /* _NETCDF4 */ + +/* Pointers to the data. */ +#ifdef _NETCDF4 +void *att_data[NUM_PIO_TYPES_TO_TEST] = {byte_att_data, char_att_data, short_att_data, + int_att_data, float_att_data, double_att_data, + ubyte_att_data, ushort_att_data, uint_att_data, + int64_att_data, uint64_att_data}; +void *scalar_data[NUM_PIO_TYPES_TO_TEST] = {&byte_scalar_data, &char_scalar_data, &short_scalar_data, + &int_scalar_data, &float_scalar_data, &double_scalar_data, + &ubyte_scalar_data, &ushort_scalar_data, &uint_scalar_data, + &int64_scalar_data, &uint64_scalar_data}; +#else +void *att_data[NUM_PIO_TYPES_TO_TEST] = {byte_att_data, char_att_data, short_att_data, + int_att_data, float_att_data, double_att_data}; +void *scalar_data[NUM_PIO_TYPES_TO_TEST] = {&byte_scalar_data, &char_scalar_data, &short_scalar_data, + &int_scalar_data, &float_scalar_data, &double_scalar_data}; +#endif /* _NETCDF4 */ /* How many flavors of netCDF are available? */ int @@ -103,25 +179,6 @@ int get_iotype_name(int iotype, char *name) return PIO_NOERR; } -/* Initalize the test system. - * - * @param argc argument count from main(). - * @param argv argument array from main(). - * @param my_rank pointer that gets this tasks rank. - * @param ntasks pointer that gets the number of tasks in WORLD - * communicator. - * @param target_ntasks the number of tasks this test needs to run. - * @param comm a pointer to an MPI communicator that will be created - * for this test and contain target_ntasks tasks from WORLD. - * @returns 0 for success, error code otherwise. -*/ -int pio_test_init(int argc, char **argv, int *my_rank, int *ntasks, - int target_ntasks, MPI_Comm *comm) -{ - return pio_test_init2(argc,argv, my_rank, ntasks, target_ntasks, - target_ntasks, 3, comm); -} - /* Initalize the test system. * * @param argc argument count from main(). @@ -134,7 +191,7 @@ int pio_test_init(int argc, char **argv, int *my_rank, int *ntasks, * @param comm a pointer to an MPI communicator that will be created * for this test and contain target_ntasks tasks from WORLD. * @returns 0 for success, error code otherwise. -*/ + */ int pio_test_init2(int argc, char **argv, int *my_rank, int *ntasks, int min_ntasks, int max_ntasks, int log_level, MPI_Comm *comm) { @@ -155,7 +212,6 @@ int pio_test_init2(int argc, char **argv, int *my_rank, int *ntasks, MPIERR(ret); if ((ret = MPI_Comm_size(MPI_COMM_WORLD, ntasks))) MPIERR(ret); - printf("%d has %d tasks\n", *my_rank, *ntasks); /* Check that a valid number of processors was specified. */ if (*ntasks < min_ntasks) @@ -166,36 +222,32 @@ int pio_test_init2(int argc, char **argv, int *my_rank, int *ntasks, } else if (*ntasks > max_ntasks) { - /* If more tasks are available than we need for this test, - * create a communicator with exactly the number of tasks we - * need. */ + /* If more tasks are available than we need for this test, + * create a communicator with exactly the number of tasks we + * need. */ int color, key; if (*my_rank < max_ntasks) { color = 0; - key = *my_rank; + key = *my_rank; } else { color = 1; key = *my_rank - max_ntasks; } - printf("%d splitting comm for test color = %d key = %d\n", *my_rank, color, key); if ((ret = MPI_Comm_split(MPI_COMM_WORLD, color, key, comm))) MPIERR(ret); } else { - printf("%d using whole comm for test\n", *my_rank); if ((ret = MPI_Comm_dup(MPI_COMM_WORLD, comm))) MPIERR(ret); } /* Turn on logging. */ - printf("%d setting log level to %d\n", *my_rank, log_level); if ((ret = PIOc_set_log_level(log_level))) return ret; - printf("%d done setting log level\n", *my_rank); /* Change error handling so we can test inval parameters. */ if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) @@ -205,10 +257,10 @@ int pio_test_init2(int argc, char **argv, int *my_rank, int *ntasks, } /* Finalize a PIO C test. -* -* @param test_comm pointer to the test communicator. -* @returns 0 for success, error code otherwise. -*/ + * + * @param test_comm pointer to the test communicator. + * @returns 0 for success, error code otherwise. + */ int pio_test_finalize(MPI_Comm *test_comm) { int ret = PIO_NOERR; /* Return value. */ @@ -327,7 +379,6 @@ create_nc_sample_0(int iosysid, int format, char *filename, int my_rank, int *nc /* Create the file. */ if ((ret = PIOc_createfile(iosysid, &ncid, &format, filename, NC_CLOBBER))) return ret; - printf("%d file created ncid = %d\n", my_rank, ncid); /* End define mode. */ if ((ret = PIOc_enddef(ncid))) @@ -346,10 +397,8 @@ create_nc_sample_0(int iosysid, int format, char *filename, int my_rank, int *nc *ncidp = ncid; else { - printf("%d closing file ncid = %d\n", my_rank, ncid); if ((ret = PIOc_closefile(ncid))) return ret; - printf("%d closed file ncid = %d\n", my_rank, ncid); } return PIO_NOERR; @@ -365,9 +414,8 @@ check_nc_sample_0(int iosysid, int format, char *filename, int my_rank, int *nci int ret; /* Re-open the file to check it. */ - printf("%d opening file %s format %d\n", my_rank, filename, format); - if ((ret = PIOc_openfile(iosysid, &ncid, &format, filename, - NC_NOWRITE))) + if ((ret = PIOc_openfile2(iosysid, &ncid, &format, filename, + NC_NOWRITE))) return ret; /* Find the number of dimensions, variables, and global attributes.*/ @@ -399,7 +447,6 @@ check_nc_sample_0(int iosysid, int format, char *filename, int my_rank, int *nci *ncidp = ncid; else { - printf("%d closing file (again) ncid = %d\n", my_rank, ncid); if ((ret = PIOc_closefile(ncid))) return ret; } @@ -433,30 +480,24 @@ create_nc_sample_1(int iosysid, int format, char *filename, int my_rank, int *nc /* Create the file. */ if ((ret = PIOc_createfile(iosysid, &ncid, &format, filename, NC_CLOBBER))) return ret; - printf("%d file created ncid = %d\n", my_rank, ncid); /* /\* End define mode, then re-enter it. *\/ */ if ((ret = PIOc_enddef(ncid))) return ret; - printf("%d calling redef\n", my_rank); if ((ret = PIOc_redef(ncid))) return ret; /* Define a dimension. */ - printf("%d defining dimension %s\n", my_rank, DIM_NAME_S1); if ((ret = PIOc_def_dim(ncid, DIM_NAME_S1, DIM_LEN_S1, &dimid))) return ret; /* Define a 1-D variable. */ - printf("%d defining variable %s\n", my_rank, VAR_NAME_S1); if ((ret = PIOc_def_var(ncid, VAR_NAME_S1, NC_INT, NDIM_S1, &dimid, &varid))) return ret; /* End define mode. */ - printf("%d ending define mode ncid = %d\n", my_rank, ncid); if ((ret = PIOc_enddef(ncid))) return ret; - printf("%d define mode ended ncid = %d\n", my_rank, ncid); /* Write some data. For the PIOc_put/get functions, all data must * be on compmaster before the function is called. Only @@ -464,7 +505,6 @@ create_nc_sample_1(int iosysid, int format, char *filename, int my_rank, int *nc * other computation tasks are ignored. */ for (int i = 0; i < DIM_LEN_S1; i++) data[i] = i; - printf("%d writing data\n", my_rank); start[0] = 0; count[0] = DIM_LEN_S1; if ((ret = PIOc_put_vars_tc(ncid, varid, start, count, NULL, NC_INT, data))) @@ -483,10 +523,8 @@ create_nc_sample_1(int iosysid, int format, char *filename, int my_rank, int *nc *ncidp = ncid; else { - printf("%d closing file ncid = %d\n", my_rank, ncid); if ((ret = PIOc_closefile(ncid))) return ret; - printf("%d closed file ncid = %d\n", my_rank, ncid); } return PIO_NOERR; @@ -507,9 +545,8 @@ check_nc_sample_1(int iosysid, int format, char *filename, int my_rank, int *nci int varndims, vardimids, varnatts; /* Re-open the file to check it. */ - printf("%d opening file %s format %d\n", my_rank, filename, format); - if ((ret = PIOc_openfile(iosysid, &ncid, &format, filename, - NC_NOWRITE))) + if ((ret = PIOc_openfile2(iosysid, &ncid, &format, filename, + NC_NOWRITE))) return ret; /* Try to read the data. */ @@ -519,7 +556,6 @@ check_nc_sample_1(int iosysid, int format, char *filename, int my_rank, int *nci return ret; for (int i = 0; i < DIM_LEN_S1; i++) { - printf("%d read data_in[%d] = %d\n", my_rank, i, data_in[i]); if (data_in[i] != i) return ERR_AWFUL; } @@ -570,7 +606,6 @@ check_nc_sample_1(int iosysid, int format, char *filename, int my_rank, int *nci *ncidp = ncid; else { - printf("%d closing file (again) ncid = %d\n", my_rank, ncid); if ((ret = PIOc_closefile(ncid))) return ret; } @@ -589,10 +624,8 @@ create_nc_sample_2(int iosysid, int format, char *filename, int my_rank, int *nc int ret; /* Create a netCDF file with one dimension and one variable. */ - printf("%d creating file %s\n", my_rank, filename); if ((ret = PIOc_createfile(iosysid, &ncid, &format, filename, NC_CLOBBER))) return ret; - printf("%d file created ncid = %d\n", my_rank, ncid); /* End define mode, then re-enter it. */ if ((ret = PIOc_enddef(ncid))) @@ -602,7 +635,6 @@ create_nc_sample_2(int iosysid, int format, char *filename, int my_rank, int *nc /* Define a dimension. */ char dimname2[NC_MAX_NAME + 1]; - printf("%d defining dimension %s\n", my_rank, DIM_NAME_S2); if ((ret = PIOc_def_dim(ncid, FIRST_DIM_NAME_S2, DIM_LEN_S2, &dimid))) return ret; if ((ret = PIOc_inq_dimname(ncid, 0, dimname2))) @@ -614,7 +646,6 @@ create_nc_sample_2(int iosysid, int format, char *filename, int my_rank, int *nc /* Define a 1-D variable. */ char varname2[NC_MAX_NAME + 1]; - printf("%d defining variable %s\n", my_rank, VAR_NAME_S2); if ((ret = PIOc_def_var(ncid, FIRST_VAR_NAME_S2, NC_INT, NDIM_S2, &dimid, &varid))) return ret; if ((ret = PIOc_inq_varname(ncid, 0, varname2))) @@ -625,7 +656,6 @@ create_nc_sample_2(int iosysid, int format, char *filename, int my_rank, int *nc return ret; /* Add a global attribute. */ - printf("%d writing attributes %s\n", my_rank, ATT_NAME_S2); int att_data = ATT_VALUE_S2; short short_att_data = ATT_VALUE_S2; float float_att_data = ATT_VALUE_S2; @@ -661,10 +691,8 @@ create_nc_sample_2(int iosysid, int format, char *filename, int my_rank, int *nc return ret; /* End define mode. */ - printf("%d ending define mode ncid = %d\n", my_rank, ncid); if ((ret = PIOc_enddef(ncid))) return ret; - printf("%d define mode ended ncid = %d\n", my_rank, ncid); /* Write some data. For the PIOc_put/get functions, all data must * be on compmaster before the function is called. Only @@ -672,8 +700,6 @@ create_nc_sample_2(int iosysid, int format, char *filename, int my_rank, int *nc * other computation tasks are ignored. */ for (int i = 0; i < DIM_LEN_S2; i++) data[i] = i; - printf("%d writing data\n", my_rank); - printf("%d writing data\n", my_rank); start[0] = 0; count[0] = DIM_LEN_S2; if ((ret = PIOc_put_vars_tc(ncid, varid, start, count, NULL, NC_INT, data))) @@ -684,10 +710,8 @@ create_nc_sample_2(int iosysid, int format, char *filename, int my_rank, int *nc *ncidp = ncid; else { - printf("%d closing file ncid = %d\n", my_rank, ncid); if ((ret = PIOc_closefile(ncid))) return ret; - printf("%d closed file ncid = %d\n", my_rank, ncid); } return PIO_NOERR; @@ -729,8 +753,7 @@ check_nc_sample_2(int iosysid, int format, char *filename, int my_rank, int *nci int data_in[DIM_LEN_S2]; /* Re-open the file to check it. */ - printf("%d opening file %s format %d\n", my_rank, filename, format); - if ((ret = PIOc_openfile(iosysid, &ncid, &format, filename, NC_NOWRITE))) + if ((ret = PIOc_openfile2(iosysid, &ncid, &format, filename, NC_NOWRITE))) return ERR_CHECK; /* Try to read the data. */ @@ -738,7 +761,6 @@ check_nc_sample_2(int iosysid, int format, char *filename, int my_rank, int *nci return ERR_CHECK; for (int i = 0; i < DIM_LEN_S2; i++) { - printf("%d read data_in[%d] = %d\n", my_rank, i, data_in[i]); if (data_in[i] != i) return ERR_AWFUL; } @@ -846,7 +868,6 @@ check_nc_sample_2(int iosysid, int format, char *filename, int my_rank, int *nci return ERR_WRONG; if ((ret = PIOc_get_att_int(ncid, NC_GLOBAL, ATT_NAME_S2, &att_data))) return ERR_CHECK; - printf("%d att_data = %d\n", my_rank, att_data); if (att_data != ATT_VALUE_S2) return ERR_WRONG; if ((ret = PIOc_inq_att(ncid, NC_GLOBAL, SHORT_ATT_NAME_S2, &atttype, &attlen))) @@ -871,7 +892,6 @@ check_nc_sample_2(int iosysid, int format, char *filename, int my_rank, int *nci *ncidp = ncid; else { - printf("%d closing file (again) ncid = %d\n", my_rank, ncid); if ((ret = PIOc_closefile(ncid))) return ERR_CHECK; } @@ -880,7 +900,7 @@ check_nc_sample_2(int iosysid, int format, char *filename, int my_rank, int *nci } /* Create the decomposition to divide the 3-dimensional sample data - * between the 4 tasks. For the purposes of decomposition we are only + * between tasks. For the purposes of decomposition we are only * concerned with 2 dimensions - we ignore the unlimited dimension. * * @param ntasks the number of available tasks @@ -911,12 +931,10 @@ int create_decomposition_2d(int ntasks, int my_rank, int iosysid, int *dim_len_2 compdof[i] = my_rank * elements_per_pe + i + 1; /* Create the PIO decomposition for this test. */ - printf("%d Creating decomposition elements_per_pe = %lld\n", my_rank, elements_per_pe); if ((ret = PIOc_InitDecomp(iosysid, pio_type, NDIM2, dim_len_2d, elements_per_pe, compdof, ioid, NULL, NULL, NULL))) ERR(ret); - printf("%d decomposition initialized.\n", my_rank); /* Free the mapping. */ free(compdof); @@ -924,3 +942,487 @@ int create_decomposition_2d(int ntasks, int my_rank, int iosysid, int *dim_len_2 return 0; } +/* + * This creates a test netCDF file in the specified format. This file + * is simple, with a global attribute, 2 dimensions, a scalar var, and + * a 2D var. + * + * @param iosysid identifies the IO system. + * @param iotype the iotype to be used to create the file. + * @param my_rank rank of this task in world (for debugging messages + * only). + * @param my_comp_idx the index of the computational component + * creating the file. + * @param filename pointer to buffer that will get filename. Must be + * PIO_MAX_NAME + 1 in size. + * @param test_name name of the test program. + * @param verbose non-zero to turn on printf statements. + * @param use_darray if non-zero, use darray functions to write data, + * otherwise use PIOc_put_var(). + * @param ioid the decomposition ID to use if darrays are used to + * write data. + * @returns 0 for success, error code otherwise. + */ +int create_nc_sample_3(int iosysid, int iotype, int my_rank, int my_comp_idx, + char *filename, char *test_name, int verbose, int use_darray, + int ioid) +{ + char iotype_name[NC_MAX_NAME + 1]; + int ncid; + signed char my_char_comp_idx = my_comp_idx; + int varid[NVAR]; + char att_name[PIO_MAX_NAME + 1]; + char var_name[PIO_MAX_NAME + 1]; + char dim_name[PIO_MAX_NAME + 1]; + int dimid[NDIM3]; + int dim_len[NDIM3] = {NC_UNLIMITED, DIM_X_LEN, DIM_Y_LEN}; + short data_2d[DIM_X_LEN * DIM_Y_LEN]; + int ret; + + /* Learn name of IOTYPE. */ + if ((ret = get_iotype_name(iotype, iotype_name))) + ERR(ret); + + /* Create a filename. */ + sprintf(filename, "%s_%s_cmp_%d_darray_%d.nc", test_name, iotype_name, my_comp_idx, + use_darray); + if (verbose) + printf("my_rank %d creating test file %s for iosysid %d\n", my_rank, filename, iosysid); + + /* Create the file. */ + if ((ret = PIOc_createfile(iosysid, &ncid, &iotype, filename, NC_CLOBBER))) + ERR(ret); + + /* Create a global attribute. */ + sprintf(att_name, "%s_%d", GLOBAL_ATT_NAME, my_comp_idx); + if ((ret = PIOc_put_att_schar(ncid, PIO_GLOBAL, att_name, PIO_BYTE, 1, &my_char_comp_idx))) + ERR(ret); + + /* Define a scalar variable. */ + sprintf(var_name, "%s_%d", SCALAR_VAR_NAME, my_comp_idx); + if ((ret = PIOc_def_var(ncid, var_name, PIO_INT, 0, NULL, &varid[0]))) + ERR(ret); + + /* Define dimensions. */ + for (int d = 0; d < NDIM3; d++) + { + sprintf(dim_name, "%s_%d_cmp_%d", DIM_NAME, d, my_comp_idx); + if ((ret = PIOc_def_dim(ncid, dim_name, dim_len[d], &dimid[d]))) + ERR(ret); + } + + /* Define a 2D variable. */ + sprintf(var_name, "%s_%d", TWOD_VAR_NAME, my_comp_idx); + if ((ret = PIOc_def_var(ncid, var_name, PIO_SHORT, NDIM2, &dimid[1], &varid[1]))) + ERR(ret); + + /* Define a 3D variable. */ + sprintf(var_name, "%s_%d", THREED_VAR_NAME, my_comp_idx); + if ((ret = PIOc_def_var(ncid, var_name, PIO_SHORT, NDIM3, dimid, &varid[2]))) + ERR(ret); + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Write the scalar variable. */ + if ((ret = PIOc_put_var_int(ncid, 0, &my_comp_idx))) + ERR(ret); + + /* Create some 2D data. */ + for (int i = 0; i < DIM_X_LEN * DIM_Y_LEN; i++) + data_2d[i] = my_comp_idx + i; + + /* Write the 2-D variable with put_var(). */ + if ((ret = PIOc_put_var_short(ncid, 1, data_2d))) + ERR(ret); + + /* Write the 3D data. */ + if (use_darray) + { + /* Write the records of data with PIOc_write_darray(). */ + if ((ret = PIOc_setframe(ncid, varid[2], 0))) + ERR(ret); + if ((ret = PIOc_write_darray(ncid, varid[2], ioid, DIM_X_LEN * DIM_Y_LEN, data_2d, NULL))) + ERR(ret); + if ((ret = PIOc_setframe(ncid, varid[2], 1))) + ERR(ret); + if ((ret = PIOc_write_darray(ncid, varid[2], ioid, DIM_X_LEN * DIM_Y_LEN, data_2d, NULL))) + ERR(ret); + } + else + { + PIO_Offset start[NDIM3] = {0, 0, 0}; + PIO_Offset count[NDIM3] = {1, DIM_X_LEN, DIM_Y_LEN}; + + /* Write a record of the 3-D variable with put_vara(). */ + if ((ret = PIOc_put_vara_short(ncid, varid[2], start, count, data_2d))) + ERR(ret); + + /* Write another record of the 3-D variable with put_vara(). */ + start[0] = 1; + if ((ret = PIOc_put_vara_short(ncid, varid[2], start, count, data_2d))) + ERR(ret); + } + + /* Close the file if ncidp was not provided. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + return PIO_NOERR; +} + +/* + * Check the file produced by create_nc_sample_3() for + * correctness. This checks all the metadata and data in the test + * file. + * + * @param iosysid identifies the IO system. + * @param iotype the iotype to be used to create the file. + * @param my_rank rank of this task in world (for debugging messages + * only). + * @param my_comp_idx the index of the computational component + * creating the file. + * @param filename the name of the file to check. + * @param test_name name of the test program. + * @param verbose non-zero to turn on printf statements. + * @param use_darray if non-zero, use darray functions to write data, + * otherwise use PIOc_put_var(). + * @param ioid the decomposition ID to use if darrays are used to + * write data. + * @returns 0 for success, error code otherwise. + */ +int check_nc_sample_3(int iosysid, int iotype, int my_rank, int my_comp_idx, + const char *filename, int verbose, int use_darray, int ioid) +{ + int ncid; + int nvars; + int ndims; + int ngatts; + int unlimdimid; + PIO_Offset att_len; + char att_name[PIO_MAX_NAME + 1]; + char var_name[PIO_MAX_NAME + 1]; + char var_name_expected[PIO_MAX_NAME + 1]; + int dimid[NDIM2]; + int xtype; + int natts; + int comp_idx_in; + short data_2d[DIM_X_LEN * DIM_Y_LEN]; + signed char att_data; + int ret; + + /* Open the test file. */ + if ((ret = PIOc_openfile2(iosysid, &ncid, &iotype, filename, PIO_NOWRITE))) + ERR(ret); + + /* Check file metadata. */ + if ((ret = PIOc_inq(ncid, &ndims, &nvars, &ngatts, &unlimdimid))) + ERR(ret); + if (ndims != NDIM3 || nvars != NVAR || ngatts != 1 || unlimdimid != 0) + ERR(ERR_WRONG); + + /* Check the global attribute. */ + sprintf(att_name, "%s_%d", GLOBAL_ATT_NAME, my_comp_idx); + if ((ret = PIOc_inq_att(ncid, NC_GLOBAL, att_name, &xtype, &att_len))) + ERR(ret); + if (xtype != PIO_BYTE || att_len != 1) + ERR(ERR_WRONG); + if ((ret = PIOc_get_att_schar(ncid, PIO_GLOBAL, att_name, &att_data))) + ERR(ret); + if (att_data != my_comp_idx) + ERR(ERR_WRONG); + + /* Check the scalar variable metadata. */ + if ((ret = PIOc_inq_var(ncid, 0, var_name, &xtype, &ndims, NULL, &natts))) + ERR(ret); + sprintf(var_name_expected, "%s_%d", SCALAR_VAR_NAME, my_comp_idx); + if (strcmp(var_name, var_name_expected) || xtype != PIO_INT || ndims != 0 || natts != 0) + ERR(ERR_WRONG); + + /* Check the scalar variable data. */ + if ((ret = PIOc_get_var_int(ncid, 0, &comp_idx_in))) + ERR(ret); + if (comp_idx_in != my_comp_idx) + ERR(ERR_WRONG); + + /* Check the 2D variable metadata. */ + if ((ret = PIOc_inq_var(ncid, 1, var_name, &xtype, &ndims, dimid, &natts))) + ERR(ret); + sprintf(var_name_expected, "%s_%d", TWOD_VAR_NAME, my_comp_idx); + if (strcmp(var_name, var_name_expected) || xtype != PIO_SHORT || ndims != 2 || natts != 0) + ERR(ERR_WRONG); + + /* Read the 2-D variable. */ + if ((ret = PIOc_get_var_short(ncid, 1, data_2d))) + ERR(ret); + + /* Check 2D data for correctness. */ + for (int i = 0; i < DIM_X_LEN * DIM_Y_LEN; i++) + if (data_2d[i] != my_comp_idx + i) + ERR(ERR_WRONG); + + /* Check the 3-D variable. */ + if (use_darray) + { + /* Read the record of data with PIOc_read_darray(). */ + if ((ret = PIOc_setframe(ncid, 2, 0))) + ERR(ret); + if ((ret = PIOc_read_darray(ncid, 2, ioid, DIM_X_LEN * DIM_Y_LEN, data_2d))) + ERR(ret); + if ((ret = PIOc_setframe(ncid, 2, 1))) + ERR(ret); + if ((ret = PIOc_read_darray(ncid, 2, ioid, DIM_X_LEN * DIM_Y_LEN, data_2d))) + ERR(ret); + } + else + { + PIO_Offset start[NDIM3] = {0, 0, 0}; + PIO_Offset count[NDIM3] = {1, DIM_X_LEN, DIM_Y_LEN}; + + /* Read a record of the 3-D variable with get_vara(). */ + if ((ret = PIOc_get_vara_short(ncid, 2, start, count, data_2d))) + ERR(ret); + for (int i = 0; i < DIM_X_LEN * DIM_Y_LEN; i++) + if (data_2d[i] != my_comp_idx + i) + ERR(ERR_WRONG); + + /* Read another record of the 3-D variable with get_vara(). */ + start[0] = 1; + if ((ret = PIOc_get_vara_short(ncid, 2, start, count, data_2d))) + ERR(ret); + for (int i = 0; i < DIM_X_LEN * DIM_Y_LEN; i++) + if (data_2d[i] != my_comp_idx + i) + ERR(ERR_WRONG); + } + + /* Close the test file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + return 0; +} + +/* + * This creates a test netCDF file in the specified format. This file + * is more complex. It has a global attribute of every type, 3 + * dimensions, including unlimited dimension, a scalar var of each + * type, and a 3D var of each type. + * + * @param iosysid identifies the IO system. + * @param iotype the iotype to be used to create the file. + * @param my_rank rank of this task in world (for debugging messages + * only). + * @param my_comp_idx the index of the computational component + * creating the file. + * @param filename pointer to buffer that will get filename. Must be + * PIO_MAX_NAME + 1 in size. + * @param test_name name of the test program. + * @param varbose non-zero to turn on printf statements. + * @returns 0 for success, error code otherwise. + */ +int create_nc_sample_4(int iosysid, int iotype, int my_rank, int my_comp_idx, + char *filename, char *test_name, int verbose, int num_types) +{ + char iotype_name[NC_MAX_NAME + 1]; + int ncid; + int scalar_varid[num_types]; + int varid[num_types]; + char att_name[PIO_MAX_NAME + 1]; + char var_name[PIO_MAX_NAME + 1]; + char dim_name[PIO_MAX_NAME + 1]; + int dimid[NDIM3]; + int dim_len[NDIM3] = {PIO_UNLIMITED, DIM_X_LEN, DIM_Y_LEN}; + /* short data_2d[DIM_X_LEN * DIM_Y_LEN]; */ + int ret; + + /* Learn name of IOTYPE. */ + if ((ret = get_iotype_name(iotype, iotype_name))) + ERR(ret); + + /* Create a filename. */ + sprintf(filename, "%s_%s_cmp_%d.nc", test_name, iotype_name, my_comp_idx); + if (verbose) + printf("my_rank %d creating test file %s for iosysid %d\n", my_rank, filename, iosysid); + + /* Create the file. */ + if ((ret = PIOc_createfile(iosysid, &ncid, &iotype, filename, NC_CLOBBER))) + ERR(ret); + + /* Create a global attributes of all types. */ + for (int t = 0; t < num_types; t++) + { + sprintf(att_name, "%s_cmp_%d_type_%d", GLOBAL_ATT_NAME, my_comp_idx, pio_type[t]); + if ((ret = PIOc_put_att(ncid, PIO_GLOBAL, att_name, pio_type[t], ATT_LEN, att_data[t]))) + ERR(ret); + } + + /* Define a scalar variable of each type. */ + for (int t = 0; t < num_types; t++) + { + sprintf(var_name, "%s_cmp_%d_type_%d", SCALAR_VAR_NAME, my_comp_idx, pio_type[t]); + if ((ret = PIOc_def_var(ncid, var_name, pio_type[t], 0, NULL, &scalar_varid[t]))) + ERR(ret); + } + + /* Define dimensions. */ + for (int d = 0; d < NDIM3; d++) + { + sprintf(dim_name, "%s_%d_cmp_%d", DIM_NAME, d, my_comp_idx); + if ((ret = PIOc_def_dim(ncid, dim_name, dim_len[d], &dimid[d]))) + ERR(ret); + } + + /* Define a 3D variable for each type. */ + for (int t = 0; t < num_types; t++) + { + sprintf(var_name, "%s_cmp_%d_type_%d", THREED_VAR_NAME, my_comp_idx, pio_type[t]); + if ((ret = PIOc_def_var(ncid, var_name, pio_type[t], NDIM3, dimid, &varid[t]))) + ERR(ret); + } + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Write the scalar variables. */ + for (int t = 0; t < num_types; t++) + if ((ret = PIOc_put_var(ncid, scalar_varid[t], scalar_data[t]))) + ERR(ret); + + /* Write the 3-D variables. */ + /* for (int t = 0; t < num_types; t++) */ + /* { */ + /* for (int i = 0; i < DIM_X_LEN * DIM_Y_LEN; i++) */ + /* data_2d[i] = my_comp_idx + i; */ + /* if ((ret = PIOc_put_var_short(ncid, 1, data_2d))) */ + /* ERR(ret); */ + /* } */ + + /* Close the file if ncidp was not provided. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + return PIO_NOERR; +} + +/* Check a test file for correctness. */ +int check_nc_sample_4(int iosysid, int iotype, int my_rank, int my_comp_idx, + const char *filename, int verbose, int num_types) +{ + int ncid; + int nvars; + int ndims; + int ngatts; + int unlimdimid; + /* PIO_Offset att_len; */ + /* char att_name[PIO_MAX_NAME + 1]; */ + char var_name[PIO_MAX_NAME + 1]; + /* int dimid[NDIM2]; */ + int xtype; + int natts; + /* int comp_idx_in; */ + /* short data_2d[DIM_X_LEN * DIM_Y_LEN]; */ + int ret; + + /* Open the test file. */ + if ((ret = PIOc_openfile2(iosysid, &ncid, &iotype, filename, PIO_NOWRITE))) + ERR(ret); + + /* Check file metadata. */ + if ((ret = PIOc_inq(ncid, &ndims, &nvars, &ngatts, &unlimdimid))) + ERR(ret); + if (ndims != NDIM3 || nvars != num_types * 2 || ngatts != num_types || unlimdimid != 0) + ERR(ERR_WRONG); + + /* Check the global attributes. */ + for (int t = 0; t < num_types; t++) + { + PIO_Offset type_size; + PIO_Offset att_len_in; + void *att_data_in; + char att_name[PIO_MAX_NAME + 1]; + + sprintf(att_name, "%s_cmp_%d_type_%d", GLOBAL_ATT_NAME, my_comp_idx, pio_type[t]); + if ((ret = PIOc_inq_att(ncid, NC_GLOBAL, att_name, &xtype, &att_len_in))) + ERR(ret); + if (xtype != pio_type[t] || att_len_in != ATT_LEN) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_type(ncid, xtype, NULL, &type_size))) + ERR(ret); + if (!(att_data_in = malloc(type_size * ATT_LEN))) + ERR(ERR_AWFUL); + if (verbose) + printf("my_rank %d t %d pio_type[t] %d type_size %lld\n", my_rank, t, pio_type[t], + type_size); + if ((ret = PIOc_get_att(ncid, PIO_GLOBAL, att_name, att_data_in))) + ERR(ret); + if (memcmp(att_data_in, att_data[t], type_size * ATT_LEN)) + ERR(ERR_WRONG); + free(att_data_in); + } + + /* Check the scalar variables. */ + for (int t = 0; t < num_types; t++) + { + int vid; + PIO_Offset type_size; + void *scalar_data_in; + + sprintf(var_name, "%s_cmp_%d_type_%d", SCALAR_VAR_NAME, my_comp_idx, pio_type[t]); + if ((ret = PIOc_inq_varid(ncid, var_name, &vid))) + ERR(ret); + if ((ret = PIOc_inq_var(ncid, vid, var_name, &xtype, &ndims, NULL, &natts))) + ERR(ret); + if (xtype != pio_type[t] || ndims != 0 || natts != 0) + ERR(ERR_WRONG); + + /* Check the data. */ + if ((ret = PIOc_inq_type(ncid, xtype, NULL, &type_size))) + ERR(ret); + if (!(scalar_data_in = malloc(type_size))) + ERR(ERR_AWFUL); + + if ((ret = PIOc_get_var(ncid, vid, scalar_data_in))) + ERR(ret); + /* if (comp_idx_in != my_comp_idx) */ + /* ERR(ERR_WRONG); */ + free(scalar_data_in); + } + + /* Check the 3D variables. */ + for (int t = 0; t < num_types; t++) + { + int vid; + /* PIO_Offset type_size; */ + /* void *threed_data_in; */ + int var_dimids[NDIM3]; + + sprintf(var_name, "%s_cmp_%d_type_%d", THREED_VAR_NAME, my_comp_idx, pio_type[t]); + if ((ret = PIOc_inq_varid(ncid, var_name, &vid))) + ERR(ret); + if ((ret = PIOc_inq_var(ncid, vid, var_name, &xtype, &ndims, var_dimids, &natts))) + ERR(ret); + if (xtype != pio_type[t] || ndims != NDIM3 || natts != 0) + ERR(ERR_WRONG); + + /* if ((ret = PIOc_inq_var(ncid, 1, var_name, &xtype, &ndims, dimid, &natts))) */ + /* ERR(ret); */ + /* sprintf(var_name_expected, "%s_%d", THREED_VAR_NAME, my_comp_idx); */ + /* if (strcmp(var_name, var_name_expected) || xtype != PIO_SHORT || ndims != 2 || natts != 0) */ + /* ERR(ERR_WRONG); */ + + /* /\* Read the 2-D variable. *\/ */ + /* if ((ret = PIOc_get_var_short(ncid, 1, data_2d))) */ + /* ERR(ret); */ + + /* /\* Check 2D data for correctness. *\/ */ + /* for (int i = 0; i < DIM_X_LEN * DIM_Y_LEN; i++) */ + /* if (data_2d[i] != my_comp_idx + i) */ + /* ERR(ERR_WRONG); */ + } + + /* Close the test file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_darray.c b/src/externals/pio2/tests/cunit/test_darray.c index e53e29bff4e..aaca7d88d61 100644 --- a/src/externals/pio2/tests/cunit/test_darray.c +++ b/src/externals/pio2/tests/cunit/test_darray.c @@ -1,8 +1,10 @@ /* * Tests for PIO distributed arrays. * - * Ed Hartnett, 2/16/17 + * @author Ed Hartnett + * @date 2/16/17 */ +#include #include #include #include @@ -36,8 +38,9 @@ /* The number of timesteps of data to write. */ #define NUM_TIMESTEPS 2 -/* The name of the variable in the netCDF output files. */ -#define VAR_NAME "foo" +/* The names of variables in the netCDF output files. */ +#define VAR_NAME "Billy-Bob" +#define VAR_NAME2 "Sally-Sue" /* Test cases relating to PIOc_write_darray_multi(). */ #define NUM_TEST_CASES_WRT_MULTI 3 @@ -52,7 +55,7 @@ char dim_name[NDIM][PIO_MAX_NAME + 1] = {"timestep", "x", "y"}; /* Length of the dimensions in the sample data. */ int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; -/** +/** * Test the darray functionality. Create a netCDF file with 3 * dimensions and 1 PIO_INT variable, and use darray to write some * data. @@ -64,7 +67,7 @@ int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; * @param my_rank rank of this task. * @param pio_type the type of the data. * @returns 0 for success, error code otherwise. -*/ + */ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank, int pio_type) { @@ -73,6 +76,8 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank int ncid; /* The ncid of the netCDF file. */ int ncid2; /* The ncid of the re-opened netCDF file. */ int varid; /* The ID of the netCDF varable. */ + int varid2; /* The ID of a varable of different type. */ + int wrong_varid = TEST_VAL_42; /* A wrong ID. */ int ret; /* Return code. */ PIO_Offset arraylen = 4; void *fillvalue; @@ -98,7 +103,7 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank /* Use PIO to create the example file in each of the four * available ways. */ - for (int fmt = 0; fmt < num_flavors; fmt++) + for (int fmt = 0; fmt < num_flavors; fmt++) { /* Add a couple of extra tests for the @@ -135,13 +140,10 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank } /* Create the netCDF output file. */ - printf("rank: %d Creating sample file %s with format %d type %d\n", my_rank, filename, - flavor[fmt], pio_type); if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) ERR(ret); /* Define netCDF dimensions and variable. */ - printf("%d Defining netCDF metadata...\n", my_rank); for (int d = 0; d < NDIM; d++) if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) ERR(ret); @@ -150,6 +152,11 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank if ((ret = PIOc_def_var(ncid, VAR_NAME, pio_type, NDIM, dimids, &varid))) ERR(ret); + /* Define a variable with a different type. */ + int other_type = pio_type == PIO_INT ? PIO_FLOAT : PIO_INT; + if ((ret = PIOc_def_var(ncid, VAR_NAME2, other_type, NDIM, dimids, &varid2))) + ERR(ret); + /* End define mode. */ if ((ret = PIOc_enddef(ncid))) ERR(ret); @@ -169,6 +176,10 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank ERR(ERR_WRONG); if (PIOc_write_darray(ncid, varid, ioid, arraylen - 1, test_data, fillvalue) != PIO_EINVAL) ERR(ERR_WRONG); + if (PIOc_write_darray(ncid, TEST_VAL_42, ioid, arraylen, test_data, fillvalue) != PIO_ENOTVAR) + ERR(ERR_WRONG); + if (PIOc_write_darray(ncid, varid2, ioid, arraylen, test_data, fillvalue) != PIO_EINVAL) + ERR(ERR_WRONG); /* Write the data. */ if ((ret = PIOc_write_darray(ncid, varid, ioid, arraylen, test_data, fillvalue))) @@ -191,13 +202,13 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank if (PIOc_write_darray_multi(ncid, &varid, ioid, -1, arraylen, test_data, &frame, fillvalue, flushtodisk) != PIO_EINVAL) ERR(ERR_WRONG); - /* if (PIOc_write_darray_multi(ncid, &varid, ioid, 1, arraylen, test_data, NULL, */ - /* fillvalue, flushtodisk) != PIO_EINVAL) */ - /* ERR(ERR_WRONG); */ if (PIOc_write_darray_multi(ncid, &varid_big, ioid, 1, arraylen, test_data, &frame, - fillvalue, flushtodisk) != PIO_EINVAL) + fillvalue, flushtodisk) != PIO_ENOTVAR) + ERR(ERR_WRONG); + if (PIOc_write_darray_multi(ncid, &wrong_varid, ioid, 1, arraylen, test_data, &frame, + fillvalue, flushtodisk) != PIO_ENOTVAR) ERR(ERR_WRONG); - + /* Write the data with the _multi function. */ if ((ret = PIOc_write_darray_multi(ncid, &varid, ioid, 1, arraylen, test_data, &frame, fillvalue, flushtodisk))) @@ -219,7 +230,11 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank if (PIOc_read_darray(ncid2, varid, ioid + TEST_VAL_42, arraylen, test_data_in) != PIO_EBADID) ERR(ERR_WRONG); - + + /* Set the record number. */ + if ((ret = PIOc_setframe(ncid2, varid, 0))) + ERR(ret); + /* Read the data. */ if ((ret = PIOc_read_darray(ncid2, varid, ioid, arraylen, test_data_in))) ERR(ret); @@ -258,9 +273,8 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank fillvalue, flushtodisk) != PIO_EPERM) ERR(ERR_WRONG); } - + /* Close the netCDF file. */ - printf("%d Closing the sample data file...\n", my_rank); if ((ret = PIOc_closefile(ncid2))) ERR(ret); } /* next fillvalue test case */ @@ -271,14 +285,14 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank } /** - * Run all the tests. + * Run all the tests. * * @param iosysid the IO system ID. * @param num_flavors number of available iotypes in the build. * @param flavor pointer to array of the available iotypes. * @param my_rank rank of this task. * @param test_comm the communicator the test is running on. - * @returns 0 for success, error code otherwise. + * @returns 0 for success, error code otherwise. */ int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm test_comm) @@ -291,7 +305,7 @@ int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, int ret; /* Return code. */ for (int t = 0; t < NUM_TYPES_TO_TEST; t++) - { + { /* This will be our file name for writing out decompositions. */ sprintf(filename, "%s_decomp_rank_%d_flavor_%d_type_%d.nc", TEST_NAME, my_rank, *flavor, pio_type[t]); @@ -299,12 +313,12 @@ int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, /* Decompose the data over the tasks. */ if ((ret = create_decomposition_2d(TARGET_NTASKS, my_rank, iosysid, dim_len_2d, &ioid, pio_type[t]))) - return ret; + return ret; /* Run a simple darray test. */ if ((ret = test_darray(iosysid, ioid, num_flavors, flavor, my_rank, pio_type[t]))) return ret; - + /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid))) ERR(ret); @@ -327,7 +341,7 @@ int main(int argc, char **argv) /* Initialize test. */ if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, - MIN_NTASKS, 3, &test_comm))) + MIN_NTASKS, -1, &test_comm))) ERR(ERR_INIT); if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) @@ -340,33 +354,30 @@ int main(int argc, char **argv) int ioproc_stride = 1; /* Stride in the mpi rank between io tasks. */ int ioproc_start = 0; /* Zero based rank of first processor to be used for I/O. */ int ret; /* Return code. */ - + /* Figure out iotypes. */ if ((ret = get_iotypes(&num_flavors, flavor))) ERR(ret); - printf("Runnings tests for %d flavors\n", num_flavors); for (int r = 0; r < NUM_REARRANGERS_TO_TEST; r++) { - /* Initialize the PIO IO system. This specifies how - * many and which processors are involved in I/O. */ - if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, ioproc_stride, - ioproc_start, rearranger[r], &iosysid))) - return ret; - - /* Run tests. */ - printf("%d Running tests...\n", my_rank); - if ((ret = test_all_darray(iosysid, num_flavors, flavor, my_rank, test_comm))) - return ret; - - /* Finalize PIO system. */ - if ((ret = PIOc_finalize(iosysid))) - return ret; + /* Initialize the PIO IO system. This specifies how + * many and which processors are involved in I/O. */ + if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, ioproc_stride, + ioproc_start, rearranger[r], &iosysid))) + return ret; + + /* Run tests. */ + if ((ret = test_all_darray(iosysid, num_flavors, flavor, my_rank, test_comm))) + return ret; + + /* Finalize PIO system. */ + if ((ret = PIOc_finalize(iosysid))) + return ret; } /* next rearranger */ } /* endif my_rank < TARGET_NTASKS */ /* Finalize the MPI library. */ - printf("%d %s Finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ret; diff --git a/src/externals/pio2/tests/cunit/test_darray_1d.c b/src/externals/pio2/tests/cunit/test_darray_1d.c index c5c6ac85012..189d2e3e9bb 100644 --- a/src/externals/pio2/tests/cunit/test_darray_1d.c +++ b/src/externals/pio2/tests/cunit/test_darray_1d.c @@ -2,8 +2,10 @@ * Tests for PIO distributed arrays. This test uses 1 dimension, * everything very simple. ;-) * - * Ed Hartnett, 2/27/17 + * @author Ed Hartnett + * @date 2/27/17 */ +#include #include #include #include @@ -76,8 +78,6 @@ int create_decomposition_1d(int ntasks, int my_rank, int iosysid, int pio_type, compdof, ioid, NULL, NULL, NULL))) ERR(ret); - printf("%d decomposition initialized.\n", my_rank); - return 0; } @@ -92,7 +92,7 @@ int create_decomposition_1d(int ntasks, int my_rank, int iosysid, int pio_type, * @param my_rank rank of this task. * @param test_comm the MPI communicator running the test. * @returns 0 for success, error code otherwise. -*/ + */ int test_darray_fill(int iosysid, int ioid, int pio_type, int num_flavors, int *flavor, int my_rank, MPI_Comm test_comm) { @@ -108,10 +108,10 @@ int test_darray_fill(int iosysid, int ioid, int pio_type, int num_flavors, int * void *expected_in; PIO_Offset type_size; /* Size of the data type. */ /* My rank as each type. */ - signed char my_byte_rank = my_rank; - char my_char_rank = my_rank; - short my_short_rank = my_rank; - float my_float_rank = my_rank; + signed char my_byte_rank = my_rank; + char my_char_rank = my_rank; + short my_short_rank = my_rank; + float my_float_rank = my_rank; double my_double_rank = my_rank; #ifdef _NETCDF4 unsigned char my_ubyte_rank = my_rank; @@ -148,11 +148,10 @@ int test_darray_fill(int iosysid, int ioid, int pio_type, int num_flavors, int * continue; /* NetCDF-4 types only work with netCDF-4 formats. */ - printf("pio_type = %d flavor[fmt] = %d\n", pio_type, flavor[fmt]); if (pio_type > PIO_DOUBLE && flavor[fmt] != PIO_IOTYPE_NETCDF4C && flavor[fmt] != PIO_IOTYPE_NETCDF4P) continue; - + for (int with_fillvalue = 0; with_fillvalue < NUM_FILLVALUE_PRESENT_TESTS; with_fillvalue++) { /* Create the filename. */ @@ -160,8 +159,6 @@ int test_darray_fill(int iosysid, int ioid, int pio_type, int num_flavors, int * pio_type, with_fillvalue); /* Create the netCDF output file. */ - printf("rank: %d Creating sample file %s with format %d...\n", my_rank, filename, - flavor[fmt]); if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) ERR(ret); @@ -199,7 +196,7 @@ int test_darray_fill(int iosysid, int ioid, int pio_type, int num_flavors, int * long long int64_test_data[2] = {my_rank, my_rank}; unsigned long long uint64_test_data[2] = {my_rank, my_rank}; #endif /* _NETCDF4 */ - + switch (pio_type) { case PIO_BYTE: @@ -362,7 +359,6 @@ int test_darray_fill(int iosysid, int ioid, int pio_type, int num_flavors, int * free(bufr); /* Close the netCDF file. */ - printf("%d Closing the sample data file...\n", my_rank); if ((ret = PIOc_closefile(ncid))) ERR(ret); } /* with_fillvalue */ @@ -382,7 +378,7 @@ int test_darray_fill(int iosysid, int ioid, int pio_type, int num_flavors, int * * @param my_rank rank of this task. * @param test_comm the MPI communicator running the test. * @returns 0 for success, error code otherwise. -*/ + */ int test_darray_fill_unlim(int iosysid, int ioid, int pio_type, int num_flavors, int *flavor, int my_rank, MPI_Comm test_comm) { @@ -399,10 +395,10 @@ int test_darray_fill_unlim(int iosysid, int ioid, int pio_type, int num_flavors, PIO_Offset type_size; /* Size of the data type. */ /* My rank as each type. */ - signed char my_byte_rank = my_rank; - char my_char_rank = my_rank; - short my_short_rank = my_rank; - float my_float_rank = my_rank; + signed char my_byte_rank = my_rank; + char my_char_rank = my_rank; + short my_short_rank = my_rank; + float my_float_rank = my_rank; double my_double_rank = my_rank; #ifdef _NETCDF4 unsigned char my_ubyte_rank = my_rank; @@ -438,18 +434,15 @@ int test_darray_fill_unlim(int iosysid, int ioid, int pio_type, int num_flavors, continue; /* NetCDF-4 types only work with netCDF-4 formats. */ - printf("pio_type = %d flavor[fmt] = %d\n", pio_type, flavor[fmt]); if (pio_type > PIO_DOUBLE && flavor[fmt] != PIO_IOTYPE_NETCDF4C && flavor[fmt] != PIO_IOTYPE_NETCDF4P) continue; - + /* Create the filename. */ sprintf(filename, "data_%s_iotype_%d_pio_type_%d_unlim.nc", TEST_NAME, flavor[fmt], pio_type); /* Create the netCDF output file. */ - printf("rank: %d Creating sample file %s with format %d...\n", my_rank, filename, - flavor[fmt]); if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) ERR(ret); @@ -586,6 +579,10 @@ int test_darray_fill_unlim(int iosysid, int ioid, int pio_type, int num_flavors, if (!(test_data_in = malloc(type_size * arraylen))) ERR(PIO_ENOMEM); + /* Set the record number for the unlimited dimension. */ + if ((ret = PIOc_setframe(ncid, varid, 0))) + ERR(ret); + /* Read the data. */ if ((ret = PIOc_read_darray(ncid, varid, ioid, arraylen, test_data_in))) ERR(ret); @@ -667,7 +664,6 @@ int test_darray_fill_unlim(int iosysid, int ioid, int pio_type, int num_flavors, free(bufr); /* Close the netCDF file. */ - printf("%d Closing the sample data file...\n", my_rank); if ((ret = PIOc_closefile(ncid))) ERR(ret); } /* next iotype */ @@ -687,7 +683,7 @@ int test_darray_fill_unlim(int iosysid, int ioid, int pio_type, int num_flavors, * @param rearranger the rearranger in use. * @param test_comm the MPI communicator for this test. * @returns 0 for success, error code otherwise. -*/ + */ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank, int pio_type, int rearranger, MPI_Comm test_comm) { @@ -705,12 +701,10 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, /* Create the filename. */ sprintf(filename, "decomp_%s_iotype_%d.nc", TEST_NAME, flavor[fmt]); - printf("writing decomp file %s\n", filename); if ((ret = PIOc_write_nc_decomp(iosysid, filename, 0, ioid, NULL, NULL, 0))) return ret; /* Read the data. */ - printf("reading decomp file %s\n", filename); if ((ret = PIOc_read_nc_decomp(iosysid, filename, &ioid2, test_comm, pio_type, title_in, history_in, &fortran_order_in))) return ret; @@ -775,12 +769,11 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, /* return ERR_WRONG; */ /* if (iodesc->num_aiotasks != TARGET_NTASKS) */ /* return ERR_WRONG; */ - printf("iodesc->nrecvs = %d iodesc->num_aiotasks = %d\n", iodesc->nrecvs, iodesc->num_aiotasks); if (iodesc->ndof != EXPECTED_MAPLEN) return ERR_WRONG; if (iodesc->rearranger != rearranger || iodesc->maxregions != 1) return ERR_WRONG; - if (!iodesc->needsfill || iodesc->basetype != expected_basetype) + if (!iodesc->needsfill || iodesc->mpitype != expected_basetype) return ERR_WRONG; /* Don't forget to add 1!! */ if (iodesc->map[0] != my_rank + 1 || iodesc->map[1] != 0) @@ -818,7 +811,7 @@ int main(int argc, char **argv) /* Initialize test. */ if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, - MIN_NTASKS, 3, &test_comm))) + MIN_NTASKS, -1, &test_comm))) ERR(ERR_INIT); if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) @@ -836,7 +829,6 @@ int main(int argc, char **argv) /* Figure out iotypes. */ if ((ret = get_iotypes(&num_flavors, flavor))) ERR(ret); - printf("Runnings tests for %d flavors\n", num_flavors); for (int r = 0; r < NUM_REARRANGERS_TO_TEST; r++) { @@ -882,7 +874,6 @@ int main(int argc, char **argv) } /* endif my_rank < TARGET_NTASKS */ /* Finalize the MPI library. */ - printf("%d %s Finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ret; diff --git a/src/externals/pio2/tests/cunit/test_darray_2sync.c b/src/externals/pio2/tests/cunit/test_darray_2sync.c new file mode 100644 index 00000000000..a5e27ccc4ab --- /dev/null +++ b/src/externals/pio2/tests/cunit/test_darray_2sync.c @@ -0,0 +1,593 @@ +/* + * This program tests darrays with async and non-async. + * + * @author Ed Hartnett + * @date 7/8/17 + */ +#include +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 4 + +/* The minimum number of tasks this test should run on. */ +#define MIN_NTASKS 1 + +/* The name of this test. */ +#define TEST_NAME "test_darray_2sync" + +#define NUM_IO_PROCS 1 +#define NUM_COMPUTATION_PROCS 3 +#define COMPONENT_COUNT 1 + +#define DIM_NAME "simple_dim" +#define DIM_LEN 6 +#define VAR_NAME "simple_var" +#define NDIM1 1 + +/* Declare and fill and array with all PIO types available. */ +#ifdef _NETCDF4 +#define MAX_NUM_TYPES 11 +int test_type[MAX_NUM_TYPES] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE, + PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64}; +#else +#define MAX_NUM_TYPES 6 +int test_type[MAX_NUM_TYPES] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE}; +#endif /* _NETCDF4 */ + +#define LEN2 2 + +/* Tests and incorrect fill values are rejected. */ +int darray_fill_test(int iosysid, int my_rank, int num_iotypes, int *iotype, + int async) +{ + /* For each of the available IOtypes... */ + for (int iot = 0; iot < num_iotypes; iot++) + { + /* Test more types for netCDF4 IOTYPES. */ + int num_types_to_test = (iotype[iot] == PIO_IOTYPE_NETCDF4C || + iotype[iot] == PIO_IOTYPE_NETCDF4P) ? MAX_NUM_TYPES : NUM_CLASSIC_TYPES; + + /* Test all available types. */ + for (int t = 0; t < num_types_to_test; t++) + { + int ncid; + int dimid; + int varid; + int ioid; + char filename[PIO_MAX_NAME + 1]; + /* The default fill values. */ + signed char default_fill_byte = PIO_FILL_BYTE; + unsigned char default_fill_char = PIO_FILL_CHAR; + short default_fill_short = PIO_FILL_SHORT; + int default_fill_int = PIO_FILL_INT; + float default_fill_float = PIO_FILL_FLOAT; + double default_fill_double = PIO_FILL_DOUBLE; +#ifdef _NETCDF4 + unsigned char default_fill_ubyte = PIO_FILL_UBYTE; + unsigned short default_fill_ushort = PIO_FILL_USHORT; + unsigned int default_fill_uint = PIO_FILL_UINT; + long long default_fill_int64 = PIO_FILL_INT64; + unsigned long long default_fill_uint64 = PIO_FILL_UINT64; +#endif /* _NETCDF4 */ + + /* Some incorrect fill values. */ + signed char wrong_fill_byte = TEST_VAL_42; + unsigned char wrong_fill_char = TEST_VAL_42; + short wrong_fill_short = TEST_VAL_42; + int wrong_fill_int = TEST_VAL_42; + float wrong_fill_float = TEST_VAL_42; + double wrong_fill_double = TEST_VAL_42; +#ifdef _NETCDF4 + unsigned char wrong_fill_ubyte = TEST_VAL_42; + unsigned short wrong_fill_ushort = TEST_VAL_42; + unsigned int wrong_fill_uint = TEST_VAL_42; + long long wrong_fill_int64 = TEST_VAL_42; + unsigned long long wrong_fill_uint64 = TEST_VAL_42; +#endif /* _NETCDF4 */ + + /* Some sample data. */ + signed char test_data_byte[LEN2] = {my_rank, -my_rank}; + unsigned char test_data_char[LEN2] = {my_rank, my_rank}; + short test_data_short[LEN2] = {my_rank, -my_rank}; + int test_data_int[LEN2] = {my_rank, -my_rank}; + float test_data_float[LEN2] = {my_rank, -my_rank}; + double test_data_double[LEN2] = {my_rank, -my_rank}; +#ifdef _NETCDF4 + unsigned char test_data_ubyte[LEN2] = {my_rank, my_rank}; + unsigned short test_data_ushort[LEN2] = {my_rank, my_rank}; + unsigned int test_data_uint[LEN2] = {my_rank, my_rank}; + long long test_data_int64[LEN2] = {my_rank, -my_rank}; + unsigned long long test_data_uint64[LEN2] = {my_rank, my_rank}; +#endif /* _NETCDF4 */ + void *test_data; + void *default_fillvalue; + void *wrong_fillvalue; + int ret; + + /* For unexplained reasons, pnetcdf code can't handle these types. */ + if (iotype[iot] == PIO_IOTYPE_PNETCDF && + (test_type[t] == PIO_BYTE || test_type[t] == PIO_CHAR)) + continue; + + switch(test_type[t]) + { + case PIO_BYTE: + test_data = test_data_byte; + default_fillvalue = &default_fill_byte; + wrong_fillvalue = &wrong_fill_byte; + break; + case PIO_CHAR: + test_data = test_data_char; + default_fillvalue = &default_fill_char; + wrong_fillvalue = &wrong_fill_char; + break; + case PIO_SHORT: + test_data = test_data_short; + default_fillvalue = &default_fill_short; + wrong_fillvalue = &wrong_fill_short; + break; + case PIO_INT: + test_data = test_data_int; + default_fillvalue = &default_fill_int; + wrong_fillvalue = &wrong_fill_int; + break; + case PIO_FLOAT: + test_data = test_data_float; + default_fillvalue = &default_fill_float; + wrong_fillvalue = &wrong_fill_float; + break; + case PIO_DOUBLE: + test_data = test_data_double; + default_fillvalue = &default_fill_double; + wrong_fillvalue = &wrong_fill_double; + break; +#ifdef _NETCDF4 + case PIO_UBYTE: + test_data = test_data_ubyte; + default_fillvalue = &default_fill_ubyte; + wrong_fillvalue = &wrong_fill_ubyte; + break; + case PIO_USHORT: + test_data = test_data_ushort; + default_fillvalue = &default_fill_ushort; + wrong_fillvalue = &wrong_fill_ushort; + break; + case PIO_UINT: + test_data = test_data_uint; + default_fillvalue = &default_fill_uint; + wrong_fillvalue = &wrong_fill_uint; + break; + case PIO_INT64: + test_data = test_data_int64; + default_fillvalue = &default_fill_int64; + wrong_fillvalue = &wrong_fill_int64; + break; + case PIO_UINT64: + test_data = test_data_uint64; + default_fillvalue = &default_fill_uint64; + wrong_fillvalue = &wrong_fill_uint64; + break; +#endif /* _NETCDF4 */ + } + + /* Create test filename. */ + sprintf(filename, "%s_fill_async_%d_iotype_%d_type_%d.nc", TEST_NAME, async, iotype[iot], + test_type[t]); + + /* Create the test file. */ + if ((ret = PIOc_createfile(iosysid, &ncid, &iotype[iot], filename, PIO_CLOBBER))) + ERR(ret); + + /* Define a dimension. */ + if ((ret = PIOc_def_dim(ncid, DIM_NAME, DIM_LEN, &dimid))) + ERR(ret); + + /* Define a 1D var. */ + if ((ret = PIOc_def_var(ncid, VAR_NAME, test_type[t], NDIM1, &dimid, &varid))) + ERR(ret); + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Create the PIO decomposition for this test. */ + int elements_per_pe = LEN2; + PIO_Offset compdof[elements_per_pe]; + int gdimlen = DIM_LEN; + if (my_rank == 0) + { + /* Only non-async code will reach here, for async, task 0 + * does not run this function. */ + compdof[0] = -1; + compdof[1] = -1; + } + else + { + compdof[0] = (my_rank - 1) * elements_per_pe; + compdof[1] = compdof[0] + 1; + } + + /* Initialize the decomposition. Only the subset + * decomposition uses the fill value. */ + if ((ret = PIOc_init_decomp(iosysid, test_type[t], NDIM1, &gdimlen, elements_per_pe, + compdof, &ioid, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + + /* Set the record number for the unlimited dimension. */ + if ((ret = PIOc_setframe(ncid, varid, 0))) + ERR(ret); + + /* This should not work, because fill value is + * incorrect. (Test turned off until Fortran API/tests are + * fixed.) */ + if (PIOc_write_darray(ncid, varid, ioid, LEN2, test_data, wrong_fillvalue) != PIO_EINVAL) + ERR(ERR_WRONG); + + /* Write the data. There are 3 procs with data, each writes 2 + * values. */ + if ((ret = PIOc_write_darray(ncid, varid, ioid, LEN2, test_data, default_fillvalue))) + ERR(ret); + + /* Close the test file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Free decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + + /* Check the file. */ + { + int ncid2; + + /* Reopen the file. */ + if ((ret = PIOc_openfile2(iosysid, &ncid2, &iotype[iot], filename, PIO_NOWRITE))) + ERR(ret); + + /* Read the data. */ + switch(test_type[t]) + { + case PIO_BYTE: + { + signed char data_in[elements_per_pe * NUM_COMPUTATION_PROCS]; + if ((ret = PIOc_get_var_schar(ncid2, 0, data_in))) + ERR(ret); + if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 && + data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3) + ERR(ret); + } + break; + case PIO_CHAR: + break; + case PIO_SHORT: + { + short data_in[elements_per_pe * NUM_COMPUTATION_PROCS]; + if ((ret = PIOc_get_var_short(ncid2, 0, data_in))) + ERR(ret); + if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 && + data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3) + ERR(ret); + } + break; + case PIO_INT: + { + int data_in[elements_per_pe * NUM_COMPUTATION_PROCS]; + if ((ret = PIOc_get_var_int(ncid2, 0, data_in))) + ERR(ret); + if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 && + data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3) + ERR(ret); + } + break; + case PIO_FLOAT: + { + float data_in[elements_per_pe * NUM_COMPUTATION_PROCS]; + if ((ret = PIOc_get_var_float(ncid2, 0, data_in))) + ERR(ret); + if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 && + data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3) + ERR(ret); + } + break; + case PIO_DOUBLE: + { + double data_in[elements_per_pe * NUM_COMPUTATION_PROCS]; + if ((ret = PIOc_get_var_double(ncid2, 0, data_in))) + ERR(ret); + if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 && + data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3) + ERR(ret); + } + break; +#ifdef _NETCDF4 + case PIO_UBYTE: + { + unsigned char data_in[elements_per_pe * NUM_COMPUTATION_PROCS]; + if ((ret = PIOc_get_var_uchar(ncid2, 0, data_in))) + ERR(ret); + if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 && + data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3) + ERR(ret); + } + break; + case PIO_USHORT: + { + unsigned short data_in[elements_per_pe * NUM_COMPUTATION_PROCS]; + if ((ret = PIOc_get_var_ushort(ncid2, 0, data_in))) + ERR(ret); + if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 && + data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3) + ERR(ret); + } + break; + case PIO_UINT: + { + unsigned int data_in[elements_per_pe * NUM_COMPUTATION_PROCS]; + if ((ret = PIOc_get_var_uint(ncid2, 0, data_in))) + ERR(ret); + if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 && + data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3) + ERR(ret); + } + break; + case PIO_INT64: + { + long long data_in[elements_per_pe * NUM_COMPUTATION_PROCS]; + if ((ret = PIOc_get_var_longlong(ncid2, 0, data_in))) + ERR(ret); + if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 && + data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3) + ERR(ret); + } + break; + case PIO_UINT64: + { + unsigned long long data_in[elements_per_pe * NUM_COMPUTATION_PROCS]; + if ((ret = PIOc_get_var_ulonglong(ncid2, 0, data_in))) + ERR(ret); + if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 && + data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3) + ERR(ret); + } + break; +#endif /* _NETCDF4 */ + } + + /* Close the test file. */ + if ((ret = PIOc_closefile(ncid2))) + ERR(ret); + } /* finish checking file */ + } /* next type */ + } /* next iotype */ + + return PIO_NOERR; +} + +/* Tests for darray that can run on both async and non-async + * iosysids. This is a deliberately simple test, to make debugging + * easier. */ +int darray_simple_test(int iosysid, int my_rank, int num_iotypes, int *iotype, + int async) +{ + /* For each of the available IOtypes... */ + for (int iot = 0; iot < num_iotypes; iot++) + { + int ncid; + int dimid; + int varid; + int ioid; + char filename[PIO_MAX_NAME + 1]; + int ret; + + /* Create test filename. */ + sprintf(filename, "%s_simple_async_%d_iotype_%d.nc", TEST_NAME, async, iotype[iot]); + + /* Create the test file. */ + if ((ret = PIOc_createfile(iosysid, &ncid, &iotype[iot], filename, PIO_CLOBBER))) + ERR(ret); + + /* Define a dimension. */ + if ((ret = PIOc_def_dim(ncid, DIM_NAME, DIM_LEN, &dimid))) + ERR(ret); + + /* Define a 1D var. */ + if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM1, &dimid, &varid))) + ERR(ret); + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Create the PIO decomposition for this test. */ + int elements_per_pe = 2; + PIO_Offset compdof[elements_per_pe]; + int gdimlen = DIM_LEN; + if (my_rank == 0) + { + /* Only non-async code will reach here, for async, task 0 + * does not run this function. */ + compdof[0] = -1; + compdof[1] = -1; + } + else + { + compdof[0] = (my_rank - 1) * elements_per_pe; + compdof[1] = compdof[0] + 1; + } + + /* Initialize the decomposition. */ + if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM1, &gdimlen, elements_per_pe, + compdof, &ioid, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + + /* Set the record number for the unlimited dimension. */ + if ((ret = PIOc_setframe(ncid, varid, 0))) + ERR(ret); + + /* Write the data. There are 3 procs with data, each writes 2 + * values. */ + int arraylen = 2; + int test_data[2] = {my_rank, -my_rank}; + if ((ret = PIOc_write_darray(ncid, varid, ioid, arraylen, test_data, NULL))) + ERR(ret); + + /* Close the test file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Free decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + + /* Check the file. */ + { + int ncid2; + int data_in[elements_per_pe * NUM_COMPUTATION_PROCS]; + + /* Reopen the file. */ + if ((ret = PIOc_openfile2(iosysid, &ncid2, &iotype[iot], filename, PIO_NOWRITE))) + ERR(ret); + + /* Read the data. */ + if ((ret = PIOc_get_var_int(ncid2, 0, data_in))) + ERR(ret); + if (my_rank && data_in[0] != 1 && data_in[1] != -1 && data_in[2] != 2 && + data_in[3] != -2 && data_in[4] != 3 && data_in[5] != -3) + ERR(ret); + + /* Close the test file. */ + if ((ret = PIOc_closefile(ncid2))) + ERR(ret); + } + } + + return PIO_NOERR; +} + +/* This function can be run for both async and non async. It runs all + * the test functions. */ +int run_darray_tests(int iosysid, int my_rank, int num_iotypes, int *iotype, int async) +{ + int ret; + + /* Run the simple darray test. */ + if ((ret = darray_simple_test(iosysid, my_rank, num_iotypes, iotype, async))) + ERR(ret); + + /* Run the darray fill value tests. */ + if ((ret = darray_fill_test(iosysid, my_rank, num_iotypes, iotype, async))) + ERR(ret); + + return PIO_NOERR; +} + +/* Initialize with task 0 as IO task, tasks 1-3 as a + * computation component. */ +int run_async_tests(MPI_Comm test_comm, int my_rank, int num_iotypes, int *iotype) +{ + int iosysid; + int num_computation_procs = NUM_COMPUTATION_PROCS; + MPI_Comm io_comm; /* Will get a duplicate of IO communicator. */ + MPI_Comm comp_comm[COMPONENT_COUNT]; /* Will get duplicates of computation communicators. */ + int mpierr; + int ret; + + if ((ret = PIOc_init_async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT, + &num_computation_procs, NULL, &io_comm, comp_comm, + PIO_REARR_BOX, &iosysid))) + ERR(ERR_INIT); + + /* This code runs only on computation components. */ + if (my_rank) + { + /* Run the tests. */ + if ((ret = run_darray_tests(iosysid, my_rank, num_iotypes, iotype, 1))) + ERR(ret); + + /* Finalize PIO system. */ + if ((ret = PIOc_finalize(iosysid))) + return ret; + + /* Free the computation conomponent communicator. */ + if ((mpierr = MPI_Comm_free(comp_comm))) + MPIERR(mpierr); + } + else + { + /* Free the IO communicator. */ + if ((mpierr = MPI_Comm_free(&io_comm))) + MPIERR(mpierr); + } + + return PIO_NOERR; +} + +/* Initialize with task 0 as IO task, tasks 1-3 as a + * computation component. */ +int run_noasync_tests(MPI_Comm test_comm, int my_rank, int num_iotypes, int *iotype) +{ + int iosysid; + int stride = 1; + int base = 1; + int ret; + + /* Initialize PIO system. */ + if ((ret = PIOc_Init_Intracomm(test_comm, NUM_IO_PROCS, stride, base, PIO_REARR_BOX, + &iosysid))) + ERR(ret); + + /* Run the tests. */ + if ((ret = run_darray_tests(iosysid, my_rank, num_iotypes, iotype, 0))) + ERR(ret); + + /* Finalize PIO system. */ + if ((ret = PIOc_finalize(iosysid))) + return ret; + + return PIO_NOERR; +} + +/* Run Tests for darray functions. */ +int main(int argc, char **argv) +{ + int my_rank; /* Zero-based rank of processor. */ + int ntasks; /* Number of processors involved in current execution. */ + int num_iotypes; /* Number of PIO netCDF iotypes in this build. */ + int iotype[NUM_IOTYPES]; /* iotypes for the supported netCDF IO iotypes. */ + MPI_Comm test_comm; /* A communicator for this test. */ + int ret; /* Return code. */ + + /* Initialize test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, + TARGET_NTASKS, -1, &test_comm))) + ERR(ERR_INIT); + if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) + return ret; + + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_iotypes, iotype))) + ERR(ret); + + /* Test code runs on TARGET_NTASKS tasks. The left over tasks do + * nothing. */ + if (my_rank < TARGET_NTASKS) + { + if ((ret = run_async_tests(test_comm, my_rank, num_iotypes, iotype))) + ERR(ret); + + if ((ret = run_noasync_tests(test_comm, my_rank, num_iotypes, iotype))) + ERR(ret); + + } /* endif my_rank < TARGET_NTASKS */ + + /* Finalize the MPI library. */ + if ((ret = pio_test_finalize(&test_comm))) + return ret; + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_darray_3d.c b/src/externals/pio2/tests/cunit/test_darray_3d.c index e261c0cec43..e927da7047b 100644 --- a/src/externals/pio2/tests/cunit/test_darray_3d.c +++ b/src/externals/pio2/tests/cunit/test_darray_3d.c @@ -1,8 +1,10 @@ /* * Tests for PIO distributed arrays. * - * Ed Hartnett, 2/21/17 + * @author Ed Hartnett + * @date 2/21/17 */ +#include #include #include #include @@ -90,13 +92,10 @@ int create_decomposition_3d(int ntasks, int my_rank, int iosysid, int *ioid) compdof[i] = my_rank * elements_per_pe + i; /* Create the PIO decomposition for this test. */ - printf("%d Creating decomposition elements_per_pe = %lld\n", my_rank, elements_per_pe); if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM3, dim_len_3d, elements_per_pe, compdof, ioid, 0, NULL, NULL))) ERR(ret); - printf("%d decomposition initialized.\n", my_rank); - /* Free the mapping. */ free(compdof); @@ -115,7 +114,7 @@ int create_decomposition_3d(int ntasks, int my_rank, int iosysid, int *ioid) * @param my_rank rank of this task. * @param provide_fill 1 if fillvalue should be provided to PIOc_write_darray(). * @returns 0 for success, error code otherwise. -*/ + */ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank, int provide_fill) { @@ -151,8 +150,6 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank sprintf(filename, "data_%s_iotype_%d.nc", TEST_NAME, flavor[fmt]); /* Create the netCDF output file. */ - printf("rank: %d Creating sample file %s with format %d...\n", my_rank, filename, - flavor[fmt]); if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) ERR(ret); @@ -161,7 +158,6 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank ERR(ret); /* Define netCDF dimensions and variable. */ - printf("%d Defining netCDF metadata...\n", my_rank); for (int d = 0; d < NDIM; d++) if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) ERR(ret); @@ -198,6 +194,10 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank if ((ret = PIOc_openfile(iosysid, &ncid2, &flavor[fmt], filename, PIO_NOWRITE))) ERR(ret); + /* Set the value of the record dimension. */ + if ((ret = PIOc_setframe(ncid2, varid, 0))) + ERR(ret); + /* Read the data. */ if ((ret = PIOc_read_darray(ncid2, varid, ioid, arraylen, test_data_in))) ERR(ret); @@ -221,7 +221,6 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank return ERR_WRONG; /* Close the netCDF file. */ - printf("%d Closing the sample data file...\n", my_rank); if ((ret = PIOc_closefile(ncid2))) ERR(ret); } @@ -240,7 +239,7 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank * PIO_REARR_SUBSET). * @param test_comm the MPI communicator for this test. * @returns 0 for success, error code otherwise. -*/ + */ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank, int rearranger, MPI_Comm test_comm) { @@ -258,12 +257,10 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, /* Create the filename. */ sprintf(filename, "decomp_%s_iotype_%d.nc", TEST_NAME, flavor[fmt]); - printf("writing decomp file %s\n", filename); if ((ret = PIOc_write_nc_decomp(iosysid, filename, 0, ioid, NULL, NULL, 0))) return ret; /* Read the data. */ - printf("reading decomp file %s\n", filename); if ((ret = PIOc_read_nc_decomp(iosysid, filename, &ioid2, test_comm, PIO_INT, title_in, history_in, &fortran_order_in))) return ret; @@ -284,7 +281,7 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, iodesc->ndof != EXPECTED_MAPLEN) return ERR_WRONG; if (iodesc->rearranger != rearranger || iodesc->maxregions != 1 || - iodesc->needsfill || iodesc->basetype != MPI_INT) + iodesc->needsfill || iodesc->mpitype != MPI_INT) return ERR_WRONG; for (int e = 0; e < iodesc->maplen; e++) if (iodesc->map[e] != my_rank * iodesc->maplen + e + 1) @@ -301,8 +298,8 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, { /* I haven't figured out yet what these should be for * the box rearranger. */ - printf("iodesc->nrecv = %d iodesc->num_aiotasks = %d\n", iodesc->nrecvs, - iodesc->num_aiotasks); + /* printf("iodesc->nrecv = %d iodesc->num_aiotasks = %d\n", iodesc->nrecvs, */ + /* iodesc->num_aiotasks); */ } } @@ -371,7 +368,7 @@ int main(int argc, char **argv) /* Initialize test. */ if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, - MIN_NTASKS, 3, &test_comm))) + MIN_NTASKS, -1, &test_comm))) ERR(ERR_INIT); if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) @@ -389,7 +386,6 @@ int main(int argc, char **argv) /* Figure out iotypes. */ if ((ret = get_iotypes(&num_flavors, flavor))) ERR(ret); - printf("Runnings tests for %d flavors\n", num_flavors); for (int r = 0; r < NUM_REARRANGERS_TO_TEST; r++) { @@ -400,7 +396,6 @@ int main(int argc, char **argv) return ret; /* Run tests. */ - printf("%d Running tests...\n", my_rank); if ((ret = test_all_darray(iosysid, num_flavors, flavor, my_rank, rearranger[r], test_comm))) return ret; @@ -413,7 +408,6 @@ int main(int argc, char **argv) } /* endif my_rank < TARGET_NTASKS */ /* Finalize the MPI library. */ - printf("%d %s Finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ret; diff --git a/src/externals/pio2/tests/cunit/test_darray_async.c b/src/externals/pio2/tests/cunit/test_darray_async.c new file mode 100644 index 00000000000..9d493fc51eb --- /dev/null +++ b/src/externals/pio2/tests/cunit/test_darray_async.c @@ -0,0 +1,561 @@ +/* + * This program tests darrays with async. + * + * @author Ed Hartnett + * @date 5/4/17 + */ +#include +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 4 + +/* The minimum number of tasks this test should run on. */ +#define MIN_NTASKS 1 + +/* The name of this test. */ +#define TEST_NAME "test_darray_async" + +/* For 1-D use. */ +#define NDIM1 1 + +/* For 2-D use. */ +#define NDIM2 2 + +/* For 3-D use. */ +#define NDIM3 3 + +/* For maplens of 2. */ +#define MAPLEN2 2 + +/* Lengths of non-unlimited dimensions. */ +#define LAT_LEN 2 +#define LON_LEN 3 + +/* Number of vars in test file. */ +#define NVAR 4 + +/* Number of records written for record var. */ +#define NREC 4 + +/* Name of record test var. */ +#define REC_VAR_NAME "surface_temperature" +#define REC_VAR_NAME2 "surface_temperature2" + +/* Name of non-record test var. */ +#define NOREC_VAR_NAME "surface_height" +#define NOREC_VAR_NAME2 "surface_height2" + +char dim_name[NDIM3][PIO_MAX_NAME + 1] = {"unlim", "lat", "lon"}; + +/* Length of the dimension. */ +#define LEN3 3 + +#define NUM_VAR_SETS 2 + +/* Check the file that was created in this test. */ +int check_darray_file(int iosysid, char *data_filename, int iotype, int my_rank, + int piotype) +{ + int ncid; + int varid[NVAR] = {0, 1, 2, 3}; + void *data_in; + void *data_in_norec; + PIO_Offset type_size; + int ret; + + /* Reopen the file. */ + if ((ret = PIOc_openfile(iosysid, &ncid, &iotype, data_filename, NC_NOWRITE))) + ERR(ret); + + /* Get the size of the type. */ + if ((ret = PIOc_inq_type(ncid, piotype, NULL, &type_size))) + ERR(ret); + + /* Allocate memory to read data. */ + if (!(data_in = malloc(LAT_LEN * LON_LEN * type_size * NREC))) + ERR(PIO_ENOMEM); + if (!(data_in_norec = malloc(LAT_LEN * LON_LEN * type_size))) + ERR(PIO_ENOMEM); + + /* We have two sets of variables, those with unlimted, and those + * without unlimited dimension. */ + for (int vs = 0; vs < NUM_VAR_SETS; vs++) + { + int rec_varid = vs ? varid[0] : varid[1]; + int norec_varid = vs ? varid[2] : varid[3]; + + /* Read the record data. The values we expect are: 10, 11, 20, 21, 30, + * 31, in each of three records. */ + if ((ret = PIOc_get_var(ncid, rec_varid, data_in))) + ERR(ret); + + /* Read the non-record data. The values we expect are: 10, 11, 20, 21, 30, + * 31. */ + if ((ret = PIOc_get_var(ncid, norec_varid, data_in_norec))) + ERR(ret); + + /* Check the results. */ + for (int r = 0; r < LAT_LEN * LON_LEN * NREC; r++) + { + int tmp_r = r % (LAT_LEN * LON_LEN); + switch (piotype) + { + case PIO_BYTE: + if (((signed char *)data_in)[r] != (tmp_r/2 + 1) * 10.0 + tmp_r % 2) + ERR(ret); + break; + case PIO_CHAR: + if (((char *)data_in)[r] != (tmp_r/2 + 1) * 10.0 + tmp_r % 2) + ERR(ret); + break; + case PIO_SHORT: + if (((short *)data_in)[r] != (tmp_r/2 + 1) * 10.0 + tmp_r % 2) + ERR(ret); + break; + case PIO_INT: + if (((int *)data_in)[r] != (tmp_r/2 + 1) * 10.0 + tmp_r % 2) + ERR(ret); + break; + case PIO_FLOAT: + if (((float *)data_in)[r] != (tmp_r/2 + 1) * 10.0 + tmp_r % 2) + ERR(ret); + break; + case PIO_DOUBLE: + if (((double *)data_in)[r] != (tmp_r/2 + 1) * 10.0 + tmp_r % 2) + ERR(ret); + break; +#ifdef _NETCDF4 + case PIO_UBYTE: + if (((unsigned char *)data_in)[r] != (tmp_r/2 + 1) * 10.0 + tmp_r % 2) + ERR(ret); + break; + case PIO_USHORT: + if (((unsigned short *)data_in)[r] != (tmp_r/2 + 1) * 10.0 + tmp_r % 2) + ERR(ret); + break; + case PIO_UINT: + if (((unsigned int *)data_in)[r] != (tmp_r/2 + 1) * 10.0 + tmp_r % 2) + ERR(ret); + break; + case PIO_INT64: + if (((long long *)data_in)[r] != (tmp_r/2 + 1) * 10.0 + tmp_r % 2) + ERR(ret); + break; + case PIO_UINT64: + if (((unsigned long long *)data_in)[r] != (tmp_r/2 + 1) * 10.0 + tmp_r % 2) + ERR(ret); + break; +#endif /* _NETCDF4 */ + default: + ERR(ERR_WRONG); + } + } + + /* Check the results. */ + for (int r = 0; r < LAT_LEN * LON_LEN; r++) + { + switch (piotype) + { + case PIO_BYTE: + if (((signed char *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2) + ERR(ret); + break; + case PIO_CHAR: + if (((char *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2) + ERR(ret); + break; + case PIO_SHORT: + if (((short *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2) + ERR(ret); + break; + case PIO_INT: + if (((int *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2) + ERR(ret); + break; + case PIO_FLOAT: + if (((float *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2) + ERR(ret); + break; + case PIO_DOUBLE: + if (((double *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2) + ERR(ret); + break; +#ifdef _NETCDF4 + case PIO_UBYTE: + if (((unsigned char *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2) + ERR(ret); + break; + case PIO_USHORT: + if (((unsigned short *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2) + ERR(ret); + break; + case PIO_UINT: + if (((unsigned int *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2) + ERR(ret); + break; + case PIO_INT64: + if (((long long *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2) + ERR(ret); + break; + case PIO_UINT64: + if (((unsigned long long *)data_in_norec)[r] != (r/2 + 1) * 20.0 + r%2) + ERR(ret); + break; +#endif /* _NETCDF4 */ + default: + ERR(ERR_WRONG); + } + } + } /* next var set */ + + /* Free resources. */ + free(data_in); + free(data_in_norec); + + /* Close the file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + return 0; +} + +/* Run a simple test using darrays with async. */ +int run_darray_async_test(int iosysid, int my_rank, MPI_Comm test_comm, MPI_Comm comp_comm, + int num_flavors, int *flavor, int piotype) +{ + int ioid; + int dim_len[NDIM3] = {NC_UNLIMITED, 2, 3}; + PIO_Offset elements_per_pe = LAT_LEN; + PIO_Offset compdof[LAT_LEN] = {my_rank * 2 - 2, my_rank * 2 - 1}; + char decomp_filename[PIO_MAX_NAME + 1]; + int ret; + + sprintf(decomp_filename, "decomp_rdat_%s_.nc", TEST_NAME); + + /* Create the PIO decomposition for this test. */ + if ((ret = PIOc_init_decomp(iosysid, piotype, NDIM2, &dim_len[1], elements_per_pe, + compdof, &ioid, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + + /* Write the decomp file (on appropriate tasks). */ + if ((ret = PIOc_write_nc_decomp(iosysid, decomp_filename, 0, ioid, NULL, NULL, 0))) + return ret; + + int fortran_order; + int ioid2; + if ((ret = PIOc_read_nc_decomp(iosysid, decomp_filename, &ioid2, comp_comm, + PIO_INT, NULL, NULL, &fortran_order))) + return ret; + + /* Free the decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid2))) + ERR(ret); + + /* Test each available iotype. */ + for (int fmt = 0; fmt < num_flavors; fmt++) + { + int ncid; + PIO_Offset type_size; + int dimid[NDIM3]; + int varid[NVAR]; + char data_filename[PIO_MAX_NAME + 1]; + void *my_data; + void *my_data_multi; + void *my_data_norec; + signed char my_data_byte[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1}; + char my_data_char[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1}; + short my_data_short[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1}; + int my_data_int[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1}; + float my_data_float[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1}; + double my_data_double[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1}; +#ifdef _NETCDF4 + unsigned char my_data_ubyte[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1}; + unsigned short my_data_ushort[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1}; + unsigned int my_data_uint[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1}; + long long my_data_int64[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1}; + unsigned long long my_data_uint64[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1}; +#endif /* _NETCDF4 */ + signed char my_data_byte_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1}; + char my_data_char_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1}; + short my_data_short_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1}; + int my_data_int_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1}; + float my_data_float_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1}; + double my_data_double_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1}; +#ifdef _NETCDF4 + unsigned char my_data_ubyte_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1}; + unsigned short my_data_ushort_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1}; + unsigned int my_data_uint_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1}; + long long my_data_int64_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1}; + unsigned long long my_data_uint64_norec[LAT_LEN] = {my_rank * 20, my_rank * 20 + 1}; +#endif /* _NETCDF4 */ + + /* Only netCDF-4 can handle extended types. */ + if (piotype > PIO_DOUBLE && flavor[fmt] != PIO_IOTYPE_NETCDF4C && flavor[fmt] != PIO_IOTYPE_NETCDF4P) + continue; + + /* BYTE and CHAR don't work with pnetcdf. Don't know why yet. */ + if (flavor[fmt] == PIO_IOTYPE_PNETCDF && (piotype == PIO_BYTE || piotype == PIO_CHAR)) + continue; + + /* Select the correct data to write, depending on type. */ + switch (piotype) + { + case PIO_BYTE: + my_data = my_data_byte; + my_data_norec = my_data_byte_norec; + break; + case PIO_CHAR: + my_data = my_data_char; + my_data_norec = my_data_char_norec; + break; + case PIO_SHORT: + my_data = my_data_short; + my_data_norec = my_data_short_norec; + break; + case PIO_INT: + my_data = my_data_int; + my_data_norec = my_data_int_norec; + break; + case PIO_FLOAT: + my_data = my_data_float; + my_data_norec = my_data_float_norec; + break; + case PIO_DOUBLE: + my_data = my_data_double; + my_data_norec = my_data_double_norec; + break; +#ifdef _NETCDF4 + case PIO_UBYTE: + my_data = my_data_ubyte; + my_data_norec = my_data_ubyte_norec; + break; + case PIO_USHORT: + my_data = my_data_ushort; + my_data_norec = my_data_ushort_norec; + break; + case PIO_UINT: + my_data = my_data_uint; + my_data_norec = my_data_uint_norec; + break; + case PIO_INT64: + my_data = my_data_int64; + my_data_norec = my_data_int64_norec; + break; + case PIO_UINT64: + my_data = my_data_uint64; + my_data_norec = my_data_uint64_norec; + break; +#endif /* _NETCDF4 */ + default: + ERR(ERR_WRONG); + } + + /* Create sample output file. */ + sprintf(data_filename, "data_%s_iotype_%d_piotype_%d.nc", TEST_NAME, flavor[fmt], + piotype); + if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], data_filename, + NC_CLOBBER))) + ERR(ret); + + /* Find the size of the type. */ + if ((ret = PIOc_inq_type(ncid, piotype, NULL, &type_size))) + ERR(ret); + + /* Create the data for the darray_multi call by making two + * copies of the data. */ + if (!(my_data_multi = malloc(2 * type_size * elements_per_pe))) + ERR(PIO_ENOMEM); + memcpy(my_data_multi, my_data, type_size * elements_per_pe); + memcpy((char *)my_data_multi + type_size * elements_per_pe, my_data, type_size * elements_per_pe); + + /* Define dimensions. */ + for (int d = 0; d < NDIM3; d++) + if ((ret = PIOc_def_dim(ncid, dim_name[d], dim_len[d], &dimid[d]))) + ERR(ret); + + /* Define variables. */ + if ((ret = PIOc_def_var(ncid, REC_VAR_NAME, piotype, NDIM3, dimid, &varid[0]))) + ERR(ret); + if ((ret = PIOc_def_var(ncid, REC_VAR_NAME2, piotype, NDIM3, dimid, &varid[1]))) + ERR(ret); + if ((ret = PIOc_def_var(ncid, NOREC_VAR_NAME, piotype, NDIM2, &dimid[1], + &varid[2]))) + ERR(ret); + if ((ret = PIOc_def_var(ncid, NOREC_VAR_NAME2, piotype, NDIM2, &dimid[1], + &varid[3]))) + ERR(ret); + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Set the record number for the record vars. */ + if ((ret = PIOc_setframe(ncid, varid[0], 0))) + ERR(ret); + if ((ret = PIOc_setframe(ncid, varid[1], 0))) + ERR(ret); + + /* Write some data to the record vars. */ + if ((ret = PIOc_write_darray(ncid, varid[0], ioid, elements_per_pe, my_data, NULL))) + ERR(ret); + if ((ret = PIOc_write_darray(ncid, varid[1], ioid, elements_per_pe, my_data, NULL))) + ERR(ret); + + /* Write some data to the non-record vars. */ + if ((ret = PIOc_write_darray(ncid, varid[2], ioid, elements_per_pe, my_data_norec, NULL))) + ERR(ret); + if ((ret = PIOc_write_darray(ncid, varid[3], ioid, elements_per_pe, my_data_norec, NULL))) + ERR(ret); + + /* Sync the file. */ + if ((ret = PIOc_sync(ncid))) + ERR(ret); + + /* Increment the record number for the record vars. */ + if ((ret = PIOc_advanceframe(ncid, varid[0]))) + ERR(ret); + if ((ret = PIOc_advanceframe(ncid, varid[1]))) + ERR(ret); + + /* Write another record. */ + if ((ret = PIOc_write_darray(ncid, varid[0], ioid, elements_per_pe, my_data, NULL))) + ERR(ret); + if ((ret = PIOc_write_darray(ncid, varid[1], ioid, elements_per_pe, my_data, NULL))) + ERR(ret); + + /* Sync the file. */ + if ((ret = PIOc_sync(ncid))) + ERR(ret); + + /* Increment the record number for the record var. */ + if ((ret = PIOc_advanceframe(ncid, varid[0]))) + ERR(ret); + if ((ret = PIOc_advanceframe(ncid, varid[1]))) + ERR(ret); + + /* Write a third record. */ + if ((ret = PIOc_write_darray(ncid, varid[0], ioid, elements_per_pe, my_data, NULL))) + ERR(ret); + if ((ret = PIOc_write_darray(ncid, varid[1], ioid, elements_per_pe, my_data, NULL))) + ERR(ret); + + /* Increment the record number for the record var. */ + if ((ret = PIOc_advanceframe(ncid, varid[0]))) + ERR(ret); + if ((ret = PIOc_advanceframe(ncid, varid[1]))) + ERR(ret); + + /* Write a forth record, using darray_multi(). */ + int frame[2] = {3, 3}; + if ((ret = PIOc_write_darray_multi(ncid, varid, ioid, 2, elements_per_pe, my_data_multi, frame, NULL, 0))) + ERR(ret); + + /* Close the file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Free resources. */ + free(my_data_multi); + + /* Check the file for correctness. */ + if ((ret = check_darray_file(iosysid, data_filename, PIO_IOTYPE_NETCDF, my_rank, piotype))) + ERR(ret); + + } /* next iotype */ + + /* Free the decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + + return 0; +} + +/* Run Tests for pio_spmd.c functions. */ +int main(int argc, char **argv) +{ + int my_rank; /* Zero-based rank of processor. */ + int ntasks; /* Number of processors involved in current execution. */ + int num_flavors; /* Number of PIO netCDF flavors in this build. */ + int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */ + MPI_Comm test_comm; /* A communicator for this test. */ +#ifdef _NETCDF4 +#define NUM_TYPES_TO_TEST 11 + int test_type[NUM_TYPES_TO_TEST] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE, + PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64}; +#else +#define NUM_TYPES_TO_TEST 6 + int test_type[NUM_TYPES_TO_TEST] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE}; +#endif /* _NETCDF4 */ + int ret; /* Return code. */ + + /* Initialize test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, + TARGET_NTASKS, -1, &test_comm))) + ERR(ERR_INIT); + if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) + return ret; + + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_flavors, flavor))) + ERR(ret); + + /* Test code runs on TARGET_NTASKS tasks. The left over tasks do + * nothing. */ + if (my_rank < TARGET_NTASKS) + { + int iosysid; + + /* Initialize with task 0 as IO task, tasks 1-3 as a + * computation component. */ +#define NUM_IO_PROCS 1 +#define NUM_COMPUTATION_PROCS 3 +#define COMPONENT_COUNT 1 + int num_computation_procs = NUM_COMPUTATION_PROCS; + MPI_Comm io_comm; /* Will get a duplicate of IO communicator. */ + MPI_Comm comp_comm[COMPONENT_COUNT]; /* Will get duplicates of computation communicators. */ + int mpierr; + + /* Run the test for each data type. */ + for (int t = 0; t < NUM_TYPES_TO_TEST; t++) + { + if ((ret = PIOc_init_async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT, + &num_computation_procs, NULL, &io_comm, comp_comm, + PIO_REARR_BOX, &iosysid))) + ERR(ERR_INIT); + + /* This code runs only on computation components. */ + if (my_rank) + { + /* Run the simple darray async test. */ + if ((ret = run_darray_async_test(iosysid, my_rank, test_comm, comp_comm[0], num_flavors, + flavor, test_type[t]))) + return ret; + + /* Finalize PIO system. */ + if ((ret = PIOc_finalize(iosysid))) + return ret; + + /* Free the computation conomponent communicator. */ + if ((mpierr = MPI_Comm_free(comp_comm))) + MPIERR(mpierr); + } + else + { + /* Free the IO communicator. */ + if ((mpierr = MPI_Comm_free(&io_comm))) + MPIERR(mpierr); + } + } /* next type */ + } /* endif my_rank < TARGET_NTASKS */ + + /* Finalize the MPI library. */ + if ((ret = pio_test_finalize(&test_comm))) + return ret; + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_darray_async_many.c b/src/externals/pio2/tests/cunit/test_darray_async_many.c new file mode 100644 index 00000000000..d993bfaef29 --- /dev/null +++ b/src/externals/pio2/tests/cunit/test_darray_async_many.c @@ -0,0 +1,645 @@ +/* + * This program tests darrays with async. This tests uses many types + * of vars and iodesc's, all in the same file. + * + * @author Ed Hartnett + * @date 5/10/17 + */ +#include +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 4 + +/* The minimum number of tasks this test should run on. */ +#define MIN_NTASKS 1 + +/* The name of this test. */ +#define TEST_NAME "test_darray_async_many" + +/* For 1-D use. */ +#define NDIM1 1 + +/* For 2-D use. */ +#define NDIM2 2 + +/* For 3-D use. */ +#define NDIM3 3 + +/* For 4-D use. */ +#define NDIM4 4 + +/* For maplens of 2. */ +#define MAPLEN2 2 + +/* Lengths of non-unlimited dimensions. */ +#define LAT_LEN 2 +#define LON_LEN 3 +#define VERT_LEN 2 + +/* Number of vars in test file. */ +#ifdef _NETCDF4 +#define NTYPE 11 +int my_type[NTYPE] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, + PIO_DOUBLE, PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64}; +#else +#define NTYPE NUM_CLASSIC_TYPES +int my_type[NTYPE] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, + PIO_DOUBLE}; +#endif /* _NETCDF4 */ + +/* We will have one record, and one non-record var of each type. */ +#define NVAR (NTYPE * 2) + +/* We will also add some 4D vars, for extra fun. */ +#define NUM_4D_VARS 2 + +/* Number of records written for record vars. */ +#define NREC 3 + +/* Names of the dimensions. */ +char dim_name[NDIM4][PIO_MAX_NAME + 1] = {"time", "vert_level", "lat", "lon"}; + +/* Check the file that was created in this test. */ +int check_darray_file(int iosysid, char *data_filename, int iotype, int my_rank, + int *rec_varid, int *norec_varid, int num_types, int *varid_4d) +{ + int ncid; + int ret; + + /* These are the values we expect. */ + signed char expected_byte[LAT_LEN * LON_LEN] = {1, 2, 2, 3, 3, 4}; + char expected_char[LAT_LEN * LON_LEN] = {65, 97, 66, 98, 67, 99}; + short expected_short[LAT_LEN * LON_LEN] = {-10, -9, -20, -19, -30, -29}; + int expected_int[LAT_LEN * LON_LEN] = {-NC_MAX_SHORT - 1, NC_MAX_SHORT + 2, -NC_MAX_SHORT - 2, + NC_MAX_SHORT + 3, -NC_MAX_SHORT - 3, NC_MAX_SHORT + 4}; + float expected_float[LAT_LEN * LON_LEN] = {10.5, 11.5, 21, 22, 31.5, 32.5}; + double expected_double[LAT_LEN * LON_LEN] = {NC_MAX_FLOAT + 0.5, NC_MAX_FLOAT + 1.5, NC_MAX_FLOAT + 1.5, + NC_MAX_FLOAT + 2.5, NC_MAX_FLOAT + 2.5, NC_MAX_FLOAT + 3.5}; +#ifdef _NETCDF4 + unsigned char expected_ubyte[LAT_LEN * LON_LEN] = {10, 11, 20, 21, 30, 31}; + unsigned short expected_ushort[LAT_LEN * LON_LEN] = {1000, 1001, 2000, 2001, 3000, 3001}; + unsigned int expected_uint[LAT_LEN * LON_LEN] = {(unsigned short)32777, (unsigned short)32778, (unsigned short)32787, (unsigned short)32788, (unsigned short)32797, (unsigned short)32798}; + long long expected_int64[LAT_LEN * LON_LEN] = {-2147483639LL, -2147483637LL, -2147483629LL, + -2147483627LL, -2147483619LL, -2147483617LL}; + unsigned long long expected_uint64[LAT_LEN * LON_LEN] = {9223372036854775817ULL, 9223372036854775818ULL, + 9223372036854775827ULL, 9223372036854775828ULL, + 9223372036854775837ULL, 9223372036854775838ULL}; +#endif /* _NETCDF4 */ + int expected_int_4d[VERT_LEN * LAT_LEN * LON_LEN] = {1, 0, 2, 1, 2, 1, 3, 2, 3, 2, 4, 3}; + float expected_float_4d[VERT_LEN * LAT_LEN * LON_LEN] = {1, 0, 2, 1.5, 2, 1, 3, 2.5, 3, 2, 4, 3.5}; + + /* Reopen the file. */ + if ((ret = PIOc_openfile(iosysid, &ncid, &iotype, data_filename, NC_NOWRITE))) + ERR(ret); + + /* Check metadata. */ + int ndims_in, nvars_in, ngatts_in, unlimdimid_in; + if ((ret = PIOc_inq(ncid, &ndims_in, &nvars_in, &ngatts_in, &unlimdimid_in))) + ERR(ret); + if (ndims_in != NDIM4 || nvars_in != num_types * 2 + NUM_4D_VARS || ngatts_in != 0 || unlimdimid_in != 0) + ERR(ERR_WRONG); + + /* Check the vars. */ + for (int t = 0; t < num_types; t++) + { + void *data_in; + void *norec_data_in; + PIO_Offset type_size; + + /* Find size of type. */ + if ((ret = PIOc_inq_type(ncid, my_type[t], NULL, &type_size))) + ERR(ret); + + /* Allocate buffers to hold data. */ + if (!(data_in = malloc(LAT_LEN * LON_LEN * NREC * type_size))) + ERR(PIO_ENOMEM); + if (!(norec_data_in = malloc(LAT_LEN * LON_LEN * type_size))) + ERR(PIO_ENOMEM); + + /* Read record and non-record vars for this type. */ + if ((ret = PIOc_get_var(ncid, rec_varid[t], data_in))) + ERR(ret); + if ((ret = PIOc_get_var(ncid, norec_varid[t], norec_data_in))) + ERR(ret); + + /* Check each value of non-record data. */ + for (int r = 0; r < LAT_LEN * LON_LEN; r++) + { + switch (my_type[t]) + { + case PIO_BYTE: + if (((signed char *)norec_data_in)[r] != expected_byte[r]) + ERR(ERR_WRONG); + break; + case PIO_CHAR: + if (((char *)norec_data_in)[r] != expected_char[r]) + ERR(ERR_WRONG); + break; + case PIO_SHORT: + if (((short *)norec_data_in)[r] != expected_short[r]) + ERR(ERR_WRONG); + break; + case PIO_INT: + if (((int *)norec_data_in)[r] != expected_int[r]) + ERR(ERR_WRONG); + break; + case PIO_FLOAT: + if (((float *)norec_data_in)[r] != expected_float[r]) + ERR(ERR_WRONG); + break; + case PIO_DOUBLE: + if (((double *)norec_data_in)[r] != expected_double[r]) + ERR(ERR_WRONG); + break; +#ifdef _NETCDF4 + case PIO_UBYTE: + if (((unsigned char *)norec_data_in)[r] != expected_ubyte[r]) + ERR(ERR_WRONG); + break; + case PIO_USHORT: + if (((unsigned short *)norec_data_in)[r] != expected_ushort[r]) + ERR(ERR_WRONG); + break; + case PIO_UINT: + if (((unsigned int *)norec_data_in)[r] != expected_uint[r]) + ERR(ERR_WRONG); + break; + case PIO_INT64: + if (((long long *)norec_data_in)[r] != expected_int64[r]) + ERR(ERR_WRONG); + break; + case PIO_UINT64: + if (((unsigned long long *)norec_data_in)[r] != expected_uint64[r]) + ERR(ERR_WRONG); + break; +#endif /* _NETCDF4 */ + default: + ERR(ERR_WRONG); + } + } + + /* Check each value of record data. */ + for (int r = 0; r < LAT_LEN * LON_LEN * NREC; r++) + { + switch (my_type[t]) + { + case PIO_BYTE: + if (((signed char *)data_in)[r] != expected_byte[r % (LAT_LEN * LON_LEN)]) + ERR(ERR_WRONG); + break; + case PIO_CHAR: + if (((char *)data_in)[r] != expected_char[r % (LAT_LEN * LON_LEN)]) + ERR(ERR_WRONG); + break; + case PIO_SHORT: + if (((short *)data_in)[r] != expected_short[r % (LAT_LEN * LON_LEN)]) + ERR(ERR_WRONG); + break; + case PIO_INT: + if (((int *)data_in)[r] != expected_int[r % (LAT_LEN * LON_LEN)]) + ERR(ERR_WRONG); + break; + case PIO_FLOAT: + if (((float *)data_in)[r] != expected_float[r % (LAT_LEN * LON_LEN)]) + ERR(ERR_WRONG); + break; + case PIO_DOUBLE: + if (((double *)data_in)[r] != expected_double[r % (LAT_LEN * LON_LEN)]) + ERR(ERR_WRONG); + break; +#ifdef _NETCDF4 + case PIO_UBYTE: + if (((unsigned char *)data_in)[r] != expected_ubyte[r % (LAT_LEN * LON_LEN)]) + ERR(ERR_WRONG); + break; + case PIO_USHORT: + if (((unsigned short *)data_in)[r] != expected_ushort[r % (LAT_LEN * LON_LEN)]) + ERR(ERR_WRONG); + break; + case PIO_UINT: + if (((unsigned int *)data_in)[r] != expected_uint[r % (LAT_LEN * LON_LEN)]) + ERR(ERR_WRONG); + break; + case PIO_INT64: + if (((long long *)data_in)[r] != expected_int64[r % (LAT_LEN * LON_LEN)]) + ERR(ERR_WRONG); + break; + case PIO_UINT64: + if (((unsigned long long *)data_in)[r] != expected_uint64[r % (LAT_LEN * LON_LEN)]) + ERR(ERR_WRONG); + break; +#endif /* _NETCDF4 */ + default: + ERR(ERR_WRONG); + } + } + + /* Check the 4D vars. */ + for (int v = 0; v < NUM_4D_VARS; v++) + { + void *data_in; + int xtype; + PIO_Offset size; + + /* Get the type of the 4d var. */ + if ((ret = PIOc_inq_vartype(ncid, varid_4d[v], &xtype))) + ERR(ret); + + /* Get the size of this type. */ + if ((ret = PIOc_inq_type(ncid, xtype, NULL, &size))) + ERR(ret); + + /* Allocate memory for data. */ + if (!(data_in = malloc(size * VERT_LEN * LAT_LEN * LON_LEN * NREC))) + ERR(PIO_ENOMEM); + + /* Read the data. */ + if ((ret = PIOc_get_var(ncid, varid_4d[v], data_in))) + ERR(ret); + + /* Check each element of data. */ + for (int r = 0; r < LAT_LEN * LON_LEN * NREC; r++) + { + switch (xtype) + { + case PIO_INT: + if (((int *)data_in)[r] != expected_int_4d[r % (VERT_LEN * LAT_LEN * LON_LEN)]) + ERR(ERR_WRONG); + break; + case PIO_FLOAT: + if (((float *)data_in)[r] != expected_float_4d[r % (VERT_LEN * LAT_LEN * LON_LEN)]) + ERR(ERR_WRONG); + break; + default: + ERR(ERR_WRONG); + } + } + + /* Release memory. */ + free(data_in); + } + + free(data_in); + free(norec_data_in); + } + + /* Close the file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + return 0; +} + +/* Run a simple test using darrays with async. */ +int run_darray_async_test(int iosysid, int my_rank, MPI_Comm test_comm, + int num_flavors, int *flavor) +{ + int ioid_byte; + int ioid_char; + int ioid_short; + int ioid_int; + int ioid_float; + int ioid_double; +#ifdef _NETCDF4 + int ioid_ubyte; + int ioid_ushort; + int ioid_uint; + int ioid_int64; + int ioid_uint64; +#endif + int ioid_4d_float; + int ioid_4d_int; + int dim_len[NDIM4] = {NC_UNLIMITED, VERT_LEN, LAT_LEN, LON_LEN}; + int dimids_4d[NDIM4] = {0, 1, 2, 3}; + int dimids_3d[NDIM3] = {0, 2, 3}; + int dimids_2d[NDIM2] = {2, 3}; + PIO_Offset elements_per_pe = LAT_LEN; + PIO_Offset elements_per_pe_3d = VERT_LEN * LAT_LEN; + /* Recall the task 0 does not run this code, so the first my_rank + * is 1. */ + PIO_Offset compdof[LAT_LEN] = {my_rank * 2 - 2, my_rank * 2 - 1}; + PIO_Offset compdof_3d[VERT_LEN * LAT_LEN] = {my_rank * 4 - 4, my_rank * 4 - 3, my_rank * 4 - 2, my_rank * 4 - 1}; + char decomp_filename[PIO_MAX_NAME + 1]; + + /* Test data. */ + signed char my_data_byte[LAT_LEN] = {my_rank, my_rank + 1}; + char my_data_char[LAT_LEN] = {64 + my_rank, 96 + my_rank}; + short my_data_short[LAT_LEN] = {-my_rank * 10, -my_rank * 10 + 1}; + int my_data_int[LAT_LEN] = {-NC_MAX_SHORT - my_rank, NC_MAX_SHORT + my_rank + 1}; + float my_data_float[LAT_LEN] = {my_rank * 10.5, my_rank * 10.5 + 1}; + double my_data_double[LAT_LEN] = {NC_MAX_FLOAT + my_rank + 0.5, NC_MAX_FLOAT + my_rank * 1.5}; +#ifdef _NETCDF4 + unsigned char my_data_ubyte[LAT_LEN] = {my_rank * 10, my_rank * 10 + 1}; + unsigned short my_data_ushort[LAT_LEN] = {my_rank * 1000, my_rank * 1000 + 1}; + unsigned int my_data_uint[LAT_LEN] = {NC_MAX_SHORT + my_rank * 10, NC_MAX_SHORT + my_rank * 10 + 1}; + long long my_data_int64[LAT_LEN] = {NC_MAX_INT + my_rank * 10, -NC_MAX_INT + my_rank * 10}; + unsigned long long my_data_uint64[LAT_LEN] = {NC_MAX_INT64 + my_rank * 10, + NC_MAX_INT64 + my_rank * 10 + 1}; +#endif /* _NETCDF4 */ + int int_4d_data[VERT_LEN * LAT_LEN] = {my_rank, my_rank - 1, my_rank + 1, my_rank}; + float float_4d_data[VERT_LEN * LAT_LEN] = {my_rank, my_rank - 1, my_rank + 1, + my_rank + 0.5}; + +#ifdef _NETCDF4 + void *my_data[NTYPE] = {my_data_byte, my_data_char, my_data_short, my_data_int, my_data_float, my_data_double, + my_data_ubyte, my_data_ushort, my_data_uint, my_data_int64, my_data_uint64}; +#else + void *my_data[NTYPE] = {my_data_byte, my_data_char, my_data_short, my_data_int, my_data_float, my_data_double}; +#endif /* _NETCDF4 */ + int ret; + + sprintf(decomp_filename, "decomp_%s.nc", TEST_NAME); + + /* Create the PIO decompositions for this test. */ + if ((ret = PIOc_init_decomp(iosysid, PIO_BYTE, NDIM2, &dim_len[2], elements_per_pe, + compdof, &ioid_byte, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + if ((ret = PIOc_init_decomp(iosysid, PIO_CHAR, NDIM2, &dim_len[2], elements_per_pe, + compdof, &ioid_char, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + if ((ret = PIOc_init_decomp(iosysid, PIO_SHORT, NDIM2, &dim_len[2], elements_per_pe, + compdof, &ioid_short, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM2, &dim_len[2], elements_per_pe, + compdof, &ioid_int, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + if ((ret = PIOc_init_decomp(iosysid, PIO_FLOAT, NDIM2, &dim_len[2], elements_per_pe, + compdof, &ioid_float, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + if ((ret = PIOc_init_decomp(iosysid, PIO_DOUBLE, NDIM2, &dim_len[2], elements_per_pe, + compdof, &ioid_double, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + +#ifdef _NETCDF4 + if ((ret = PIOc_init_decomp(iosysid, PIO_UBYTE, NDIM2, &dim_len[2], elements_per_pe, + compdof, &ioid_ubyte, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + if ((ret = PIOc_init_decomp(iosysid, PIO_USHORT, NDIM2, &dim_len[2], elements_per_pe, + compdof, &ioid_ushort, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + if ((ret = PIOc_init_decomp(iosysid, PIO_UINT, NDIM2, &dim_len[2], elements_per_pe, + compdof, &ioid_uint, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + if ((ret = PIOc_init_decomp(iosysid, PIO_INT64, NDIM2, &dim_len[2], elements_per_pe, + compdof, &ioid_int64, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + if ((ret = PIOc_init_decomp(iosysid, PIO_UINT64, NDIM2, &dim_len[2], elements_per_pe, + compdof, &ioid_uint64, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); +#endif + + if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM3, &dim_len[1], elements_per_pe_3d, + compdof_3d, &ioid_4d_int, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + if ((ret = PIOc_init_decomp(iosysid, PIO_FLOAT, NDIM3, &dim_len[1], elements_per_pe_3d, + compdof_3d, &ioid_4d_float, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + + /* These are the decompositions associated with each type. */ +#ifdef _NETCDF4 + int var_ioid[NTYPE] = {ioid_byte, ioid_char, ioid_short, ioid_int, ioid_float, + ioid_double, ioid_ubyte, ioid_ushort, ioid_uint, ioid_int64, + ioid_uint64}; +#else + int var_ioid[NTYPE] = {ioid_byte, ioid_char, ioid_short, ioid_int, ioid_float, + ioid_double}; +#endif /* _NETCDF4 */ + int var_ioid_4d[NUM_4D_VARS] = {ioid_4d_int, ioid_4d_float}; + + /* Write the decomp file for the 1-byte ioid. */ + if ((ret = PIOc_write_nc_decomp(iosysid, decomp_filename, 0, ioid_byte, NULL, NULL, 0))) + return ret; + + for (int fmt = 0; fmt < num_flavors; fmt++) + { + int ncid; + int dimid[NDIM4]; + char data_filename[PIO_MAX_NAME + 1]; + int num_types = flavor[fmt] == PIO_IOTYPE_NETCDF4C || flavor[fmt] == PIO_IOTYPE_NETCDF4P ? + NUM_NETCDF4_TYPES - 1 : NUM_CLASSIC_TYPES; + int rec_varid[num_types]; + int norec_varid[num_types]; + + /* For now, don't test with pnetcdf since byte and char don't + * work with pnetcdf. */ + if (flavor[fmt] == PIO_IOTYPE_PNETCDF) + continue; + + /* Create sample output file. */ + sprintf(data_filename, "data_%s_iotype_%d.nc", TEST_NAME, flavor[fmt]); + if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], data_filename, + NC_CLOBBER))) + ERR(ret); + + /* Define dimensions. */ + for (int d = 0; d < NDIM4; d++) + if ((ret = PIOc_def_dim(ncid, dim_name[d], dim_len[d], &dimid[d]))) + ERR(ret); + + /* Define variables. */ + char var_name[PIO_MAX_NAME + 1]; + char var_norec_name[PIO_MAX_NAME + 1]; + for (int t = 0; t < num_types; t++) + { + + sprintf(var_name, "var_%d", t); + sprintf(var_norec_name, "var_norec_%d", t); + if ((ret = PIOc_def_var(ncid, var_name, my_type[t], NDIM3, dimids_3d, &rec_varid[t]))) + ERR(ret); + if ((ret = PIOc_def_var(ncid, var_norec_name, my_type[t], NDIM2, dimids_2d, + &norec_varid[t]))) + ERR(ret); + } + + char var_name_4d[NUM_4D_VARS][PIO_MAX_NAME + 1] = {"var_4d_int", "var_4d_float"}; + int var_type_4d[NUM_4D_VARS] = {PIO_INT, PIO_FLOAT}; + int varid_4d[NUM_4D_VARS]; + void *my_data_4d[NUM_4D_VARS] = {int_4d_data, float_4d_data}; + + /* Define some 4D vars for extra fun. */ + for (int v = 0; v < NUM_4D_VARS; v++) + if ((ret = PIOc_def_var(ncid, var_name_4d[v], var_type_4d[v], NDIM4, dimids_4d, &varid_4d[v]))) + ERR(ret); + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Write a record and non-record var for each type. */ + for (int t = 0; t < num_types; t++) + { + /* Write record data to the record vars. */ + for (int r = 0; r < NREC; r++) + { + /* Set or advance the record number. */ + if (!r) + { + if ((ret = PIOc_setframe(ncid, rec_varid[t], 0))) + ERR(ret); + } + else + { + if ((ret = PIOc_advanceframe(ncid, rec_varid[t]))) + ERR(ret); + } + + /* Write a record of data. */ + if ((ret = PIOc_write_darray(ncid, rec_varid[t], var_ioid[t], elements_per_pe, + my_data[t], NULL))) + ERR(ret); + + /* Sync the file. */ + if ((ret = PIOc_sync(ncid))) + ERR(ret); + } /* next record. */ + } /* next type */ + + /* Write some data to the non-record vars. */ + for (int t = 0; t < num_types; t++) + { + if ((ret = PIOc_write_darray(ncid, norec_varid[t], var_ioid[t], elements_per_pe, my_data[t], NULL))) + ERR(ret); + } + + /* Write the 4D vars. */ + for (int v = 0; v < NUM_4D_VARS; v++) + { + for (int r = 0; r < NREC; r++) + { + if (!r) + { + if ((ret = PIOc_setframe(ncid, varid_4d[v], 0))) + ERR(ret); + } + else + { + if ((ret = PIOc_advanceframe(ncid, varid_4d[v]))) + ERR(ret); + } + + if ((ret = PIOc_write_darray(ncid, varid_4d[v], var_ioid_4d[v], elements_per_pe_3d, + my_data_4d[v], NULL))) + ERR(ret); + } + } + + /* Close the file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Check the file for correctness. */ + if ((ret = check_darray_file(iosysid, data_filename, PIO_IOTYPE_NETCDF, my_rank, + rec_varid, norec_varid, num_types, varid_4d))) + ERR(ret); + + } /* next iotype */ + + /* Free the decompositions. */ + if ((ret = PIOc_freedecomp(iosysid, ioid_byte))) + ERR(ret); + if ((ret = PIOc_freedecomp(iosysid, ioid_char))) + ERR(ret); + if ((ret = PIOc_freedecomp(iosysid, ioid_short))) + ERR(ret); + if ((ret = PIOc_freedecomp(iosysid, ioid_int))) + ERR(ret); + if ((ret = PIOc_freedecomp(iosysid, ioid_float))) + ERR(ret); + if ((ret = PIOc_freedecomp(iosysid, ioid_double))) + ERR(ret); +#ifdef _NETCDF4 + if ((ret = PIOc_freedecomp(iosysid, ioid_ubyte))) + ERR(ret); + if ((ret = PIOc_freedecomp(iosysid, ioid_ushort))) + ERR(ret); + if ((ret = PIOc_freedecomp(iosysid, ioid_uint))) + ERR(ret); + if ((ret = PIOc_freedecomp(iosysid, ioid_int64))) + ERR(ret); + if ((ret = PIOc_freedecomp(iosysid, ioid_uint64))) + ERR(ret); +#endif /* _NETCDF4 */ + + if ((ret = PIOc_freedecomp(iosysid, ioid_4d_int))) + ERR(ret); + if ((ret = PIOc_freedecomp(iosysid, ioid_4d_float))) + ERR(ret); + return 0; +} + +/* Run Tests for pio_spmd.c functions. */ +int main(int argc, char **argv) +{ + int my_rank; /* Zero-based rank of processor. */ + int ntasks; /* Number of processors involved in current execution. */ + int num_flavors; /* Number of PIO netCDF flavors in this build. */ + int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */ + MPI_Comm test_comm; /* A communicator for this test. */ + int ret; /* Return code. */ + + /* Initialize test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, + TARGET_NTASKS, -1, &test_comm))) + ERR(ERR_INIT); + if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) + return ret; + + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_flavors, flavor))) + ERR(ret); + + /* Test code runs on TARGET_NTASKS tasks. The left over tasks do + * nothing. */ + if (my_rank < TARGET_NTASKS) + { + int iosysid; + + /* Initialize with task 0 as IO task, tasks 1-3 as a + * computation component. */ +#define NUM_IO_PROCS 1 +#define NUM_COMPUTATION_PROCS 3 +#define COMPONENT_COUNT 1 + int num_computation_procs = NUM_COMPUTATION_PROCS; + MPI_Comm io_comm; /* Will get a duplicate of IO communicator. */ + MPI_Comm comp_comm[COMPONENT_COUNT]; /* Will get duplicates of computation communicators. */ + int mpierr; + + if ((ret = PIOc_init_async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT, + &num_computation_procs, NULL, &io_comm, comp_comm, + PIO_REARR_BOX, &iosysid))) + ERR(ERR_INIT); + + /* This code runs only on computation components. */ + if (my_rank) + { + /* Run the simple darray async test. */ + if ((ret = run_darray_async_test(iosysid, my_rank, test_comm, num_flavors, flavor))) + return ret; + + /* Finalize PIO system. */ + if ((ret = PIOc_finalize(iosysid))) + return ret; + + /* Free the computation conomponent communicator. */ + if ((mpierr = MPI_Comm_free(comp_comm))) + MPIERR(mpierr); + } + else + { + /* Free the IO communicator. */ + if ((mpierr = MPI_Comm_free(&io_comm))) + MPIERR(mpierr); + } + } /* endif my_rank < TARGET_NTASKS */ + + /* Finalize the MPI library. */ + if ((ret = pio_test_finalize(&test_comm))) + return ret; + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_darray_async_simple.c b/src/externals/pio2/tests/cunit/test_darray_async_simple.c new file mode 100644 index 00000000000..c5a8f0f5d16 --- /dev/null +++ b/src/externals/pio2/tests/cunit/test_darray_async_simple.c @@ -0,0 +1,217 @@ +/* + * This program tests a very simple case of using distributed arrays + * with async. + * + * @author Ed Hartnett + * @date 4/26/17 + */ +#include +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 4 + +/* The minimum number of tasks this test should run on. */ +#define MIN_NTASKS 1 + +/* The name of this test. */ +#define TEST_NAME "test_darray_async_simple" + +/* For 1-D use. */ +#define NDIM1 1 + +/* For maplens of 2. */ +#define MAPLEN2 2 + +/* Name of test dim. */ +#define DIM_NAME "Musketeer" + +/* Name of test var. (Don't read anything into it. Sometimes a sword + * is just a sword.)*/ +#define VAR_NAME "Sword_Length" + +/* Number of data elements on each compute task. */ +#define ELEM1 1 + +/* Length of the dimension. */ +#define LEN3 3 + +/* Check the file that was created in this test. */ +int check_darray_file(int iosysid, char *data_filename, int iotype, int my_rank) +{ + int ncid; + int dimid; + int varid; + float data_in[LEN3]; + int ret; + + /* Reopen the file. */ + if ((ret = PIOc_openfile(iosysid, &ncid, &iotype, data_filename, NC_NOWRITE))) + ERR(ret); + + /* Check the metadata. */ + if ((ret = PIOc_inq_varid(ncid, VAR_NAME, &varid))) + ERR(ret); + if ((ret = PIOc_inq_dimid(ncid, DIM_NAME, &dimid))) + ERR(ret); + + /* Check the data. */ + if ((ret = PIOc_get_var(ncid, varid, &data_in))) + ERR(ret); + for (int r = 1; r < TARGET_NTASKS; r++) + if (data_in[r - 1] != r * 10.0) + ERR(ret); + + /* Close the file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + return 0; +} + +/* Run a simple test using darrays with async. */ +int run_darray_async_test(int iosysid, int my_rank, MPI_Comm test_comm, + int num_flavors, int *flavor) +{ + int ioid; + int dim_len = LEN3; + PIO_Offset elements_per_pe = ELEM1; + PIO_Offset compdof[ELEM1] = {my_rank - 1}; + char decomp_filename[PIO_MAX_NAME + 1]; + int ret; + + sprintf(decomp_filename, "decomp_%s_rank_%d.nc", TEST_NAME, my_rank); + + /* Create the PIO decomposition for this test. */ + if ((ret = PIOc_init_decomp(iosysid, PIO_FLOAT, NDIM1, &dim_len, elements_per_pe, + compdof, &ioid, PIO_REARR_BOX, NULL, NULL))) + ERR(ret); + + /* Write the decomp file (on appropriate tasks). */ + if ((ret = PIOc_write_nc_decomp(iosysid, decomp_filename, 0, ioid, NULL, NULL, 0))) + return ret; + + for (int fmt = 0; fmt < num_flavors; fmt++) + { + int ncid; + int dimid; + int varid; + char data_filename[PIO_MAX_NAME + 1]; + float my_data = my_rank * 10; + + /* Generate a file name. */ + sprintf(data_filename, "data_%s_iotype_%d.nc", TEST_NAME, flavor[fmt]); + + /* Create sample output file. */ + if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], data_filename, + NC_CLOBBER))) + ERR(ret); + + /* Define dimension. */ + if ((ret = PIOc_def_dim(ncid, DIM_NAME, dim_len, &dimid))) + ERR(ret); + + /* Define variable. */ + if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM1, &dimid, &varid))) + ERR(ret); + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Write some data. */ + if ((ret = PIOc_write_darray(ncid, varid, ioid, ELEM1, &my_data, NULL))) + ERR(ret); + + /* Close the file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Check the file for correctness. */ + if ((ret = check_darray_file(iosysid, data_filename, PIO_IOTYPE_NETCDF, my_rank))) + ERR(ret); + + } /* next iotype */ + + /* Free the decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + + return 0; +} + +/* Run Tests for pio_spmd.c functions. */ +int main(int argc, char **argv) +{ + int my_rank; /* Zero-based rank of processor. */ + int ntasks; /* Number of processors involved in current execution. */ + int num_flavors; /* Number of PIO netCDF flavors in this build. */ + int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */ + MPI_Comm test_comm; /* A communicator for this test. */ + int ret; /* Return code. */ + + /* Initialize test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, + TARGET_NTASKS, -1, &test_comm))) + ERR(ERR_INIT); + if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) + return ret; + + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_flavors, flavor))) + ERR(ret); + + /* Test code runs on TARGET_NTASKS tasks. The left over tasks do + * nothing. */ + if (my_rank < TARGET_NTASKS) + { + int iosysid; + + /* Initialize with task 0 as IO task, tasks 1-3 as a + * computation component. */ +#define NUM_IO_PROCS 1 +#define NUM_COMPUTATION_PROCS 3 +#define COMPONENT_COUNT 1 + int num_computation_procs = NUM_COMPUTATION_PROCS; + MPI_Comm io_comm; /* Will get a duplicate of IO communicator. */ + MPI_Comm comp_comm[COMPONENT_COUNT]; /* Will get duplicates of computation communicators. */ + int mpierr; + + if ((ret = PIOc_init_async(test_comm, NUM_IO_PROCS, NULL, COMPONENT_COUNT, + &num_computation_procs, NULL, &io_comm, comp_comm, + PIO_REARR_BOX, &iosysid))) + ERR(ERR_INIT); + + /* This code runs only on computation components. */ + if (my_rank) + { + /* Run the simple darray async test. */ + if ((ret = run_darray_async_test(iosysid, my_rank, test_comm, num_flavors, flavor))) + return ret; + + /* Finalize PIO system. */ + if ((ret = PIOc_finalize(iosysid))) + return ret; + + /* Free the computation conomponent communicator. */ + if ((mpierr = MPI_Comm_free(comp_comm))) + MPIERR(mpierr); + } + else + { + /* Free the IO communicator. */ + if ((mpierr = MPI_Comm_free(&io_comm))) + MPIERR(mpierr); + } + } /* endif my_rank < TARGET_NTASKS */ + + /* Finalize the MPI library. */ + if ((ret = pio_test_finalize(&test_comm))) + return ret; + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_darray_frame.c b/src/externals/pio2/tests/cunit/test_darray_frame.c new file mode 100644 index 00000000000..38c7e99e8a4 --- /dev/null +++ b/src/externals/pio2/tests/cunit/test_darray_frame.c @@ -0,0 +1,240 @@ +/* + * Tests for PIO distributed arrays. This test is a C version of some + * fortran tests in pio_decomp_frame_tests.f90. + * + * @author Ed Hartnett + * @date 8/11/17 + */ +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 4 + +/* The minimum number of tasks this test should run on. */ +#define MIN_NTASKS 4 + +/* The name of this test. */ +#define TEST_NAME "test_darray_frame" + +/* The number of dimensions in the example data. In this test, we + * are using three-dimensional data. */ +#define NDIM3 3 + +/* But sometimes we need arrays of the non-record dimensions. */ +#define NDIM2 2 + +/* These are the dimension values used in a typical input file to + * ESMCI/cime. */ +/* #define TIME_LEN 12 */ +/* #define LAT_LEN 94 */ +/* #define LON_LEN 192 */ + +/* Here's a shorter version for a simpler test. */ +#define TIME_LEN_SHORT 3 +#define LAT_LEN_SHORT 2 +#define LON_LEN_SHORT 2 + +/* The number of timesteps of data to write. */ +#define NUM_TIMESTEPS 2 + +/* The names of variable in the netCDF output files. */ +#define VAR_NAME "prc" + +/** + * Test the darray functionality. Create a netCDF file with 3 + * dimensions and 1 PIO_INT variable, and use darray to write some + * data. There are no unlimited dimensions. + * + * @param iosysid the IO system ID. + * @param num_iotypes the number of IOTYPES available in this build. + * @param iotype array of available iotypes. + * @param my_rank rank of this task. + * @param pio_type the type of the data. + * @returns 0 for success, error code otherwise. + */ +int test_frame_simple(int iosysid, int num_iotypes, int *iotype, int my_rank, + int rearranger) +{ + PIO_Offset elements_per_pe; /* Array elements per processing unit. */ + PIO_Offset *compdof; /* The decomposition mapping. */ + int ioid; + int dim_len_2d[NDIM2] = {LAT_LEN_SHORT, LON_LEN_SHORT}; + int ret; /* Return code. */ + + /* How many elements per task? */ + elements_per_pe = dim_len_2d[0] * dim_len_2d[1] / TARGET_NTASKS; + + /* Allocate space for the decomposition array. */ + if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset)))) + return PIO_ENOMEM; + + /* Describe the decomposition. This is a 1-based array, so add 1! */ + for (int i = 0; i < elements_per_pe; i++) + compdof[i] = my_rank * elements_per_pe + i + 1; + + /* Create the PIO decomposition for this test. */ + printf("%d Creating decomposition elements_per_pe = %lld\n", my_rank, elements_per_pe); + if ((ret = PIOc_InitDecomp(iosysid, PIO_INT, NDIM2, dim_len_2d, elements_per_pe, + compdof, &ioid, NULL, NULL, NULL))) + ERR(ret); + + printf("%d decomposition initialized.\n", my_rank); + + /* Free the mapping. */ + free(compdof); + + /* Use PIO to create the example file in each of the four + * available ways. */ + for (int fmt = 0; fmt < num_iotypes; fmt++) + { + int ncid; /* The ncid of the netCDF file. */ + int ncid2; /* The ncid of the re-opened netCDF file. */ + char filename[PIO_MAX_NAME + 1]; /* Name for the output files. */ + int dimids[NDIM3]; /* The dimension IDs. */ + int dim_len[NDIM3] = {TIME_LEN_SHORT, LAT_LEN_SHORT, LON_LEN_SHORT}; + char dim_name[NDIM3][PIO_MAX_NAME + 1] = {"time", "lat", "lon"}; + int varid; /* The ID of the netCDF varable. */ + + /* Create the filename. */ + sprintf(filename, "simple_frame_%s_iotype_%d_rearr_%d.nc", TEST_NAME, iotype[fmt], + rearranger); + + /* Create the netCDF output file. */ + printf("rank: %d Creating sample file %s\n", my_rank, filename); + if ((ret = PIOc_createfile(iosysid, &ncid, &iotype[fmt], filename, PIO_CLOBBER))) + ERR(ret); + + /* Define netCDF dimensions and variable. */ + for (int d = 0; d < NDIM3; d++) + if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) + ERR(ret); + + /* Define a variable. */ + if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM3, dimids, &varid))) + ERR(ret); + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Write records of data. */ + for (int r = 0; r < TIME_LEN_SHORT; r++) + { + int test_data_int[elements_per_pe]; + + /* Initialize test data. */ + for (int i = 0; i < elements_per_pe; i++) + test_data_int[i] = my_rank + r * 100; + + /* Set the value of the record dimension. */ + if ((ret = PIOc_setframe(ncid, varid, r))) + ERR(ret); + + /* Write the data. */ + if ((ret = PIOc_write_darray(ncid, varid, ioid, elements_per_pe, test_data_int, NULL))) + ERR(ret); + } + + /* Close the netCDF file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + { + int test_data_int_in[elements_per_pe]; + + /* Reopen the file. */ + if ((ret = PIOc_openfile(iosysid, &ncid2, &iotype[fmt], filename, PIO_NOWRITE))) + ERR(ret); + + for (int r = 0; r < TIME_LEN_SHORT; r++) + { + + /* Set the record number. */ + if ((ret = PIOc_setframe(ncid2, varid, r))) + ERR(ret); + + /* Read the data. */ + if ((ret = PIOc_read_darray(ncid2, varid, ioid, elements_per_pe, test_data_int_in))) + ERR(ret); + + /* Check the results. */ + for (int f = 0; f < elements_per_pe; f++) + if (test_data_int_in[f] != my_rank + r * 100) + return ERR_WRONG; + } + + /* Close the netCDF file. */ + printf("%d Closing the sample data file...\n", my_rank); + if ((ret = PIOc_closefile(ncid2))) + ERR(ret); + } + } /* next iotype */ + + /* Free the PIO decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + return PIO_NOERR; +} + +/* Run tests for darray functions. */ +int main(int argc, char **argv) +{ +#define NUM_REARRANGERS_TO_TEST 2 + int rearranger[NUM_REARRANGERS_TO_TEST] = {PIO_REARR_BOX, PIO_REARR_SUBSET}; + int my_rank; + int ntasks; + int num_iotypes; /* Number of PIO netCDF iotypes in this build. */ + int iotype[NUM_IOTYPES]; /* iotypes for the supported netCDF IO iotypes. */ + MPI_Comm test_comm; /* A communicator for this test. */ + int ret; /* Return code. */ + + /* Initialize test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, + MIN_NTASKS, 3, &test_comm))) + ERR(ERR_INIT); + + if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) + return ret; + + /* Only do something on max_ntasks tasks. */ + if (my_rank < TARGET_NTASKS) + { + int iosysid; /* The ID for the parallel I/O system. */ + int ioproc_stride = 1; /* Stride in the mpi rank between io tasks. */ + int ioproc_start = 0; /* Zero based rank of first processor to be used for I/O. */ + int ret; /* Return code. */ + + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_iotypes, iotype))) + ERR(ret); + printf("Runnings tests for %d iotypes\n", num_iotypes); + + for (int r = 0; r < NUM_REARRANGERS_TO_TEST; r++) + { + /* Initialize the PIO IO system. This specifies how + * many and which processors are involved in I/O. */ + if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, ioproc_stride, + ioproc_start, rearranger[r], &iosysid))) + return ret; + + /* Run tests. */ + printf("%d Running tests...\n", my_rank); + if ((ret = test_frame_simple(iosysid, num_iotypes, iotype, my_rank, rearranger[r]))) + return ret; + + /* Finalize PIO system. */ + if ((ret = PIOc_finalize(iosysid))) + return ret; + } /* next rearranger */ + } /* endif my_rank < TARGET_NTASKS */ + + /* Finalize the MPI library. */ + printf("%d %s Finalizing...\n", my_rank, TEST_NAME); + if ((ret = pio_test_finalize(&test_comm))) + return ret; + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_darray_multi.c b/src/externals/pio2/tests/cunit/test_darray_multi.c index 206b783e121..31fe313ff94 100644 --- a/src/externals/pio2/tests/cunit/test_darray_multi.c +++ b/src/externals/pio2/tests/cunit/test_darray_multi.c @@ -2,8 +2,10 @@ * Tests for PIO distributed arrays. This program tests the * PIOc_write_darray_multi() function with more than one variable. * - * Ed Hartnett, 3/7/17 + * @author Ed Hartnett + * @date 3/7/17 */ +#include #include #include #include @@ -57,7 +59,7 @@ char var_name[NVAR][PIO_MAX_NAME + 1] = {"Larry", "Curly", "Moe"}; /* Length of the dimensions in the sample data. */ int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; -/** +/** * Test the darray functionality. Create a netCDF file with 3 * dimensions and 3 variable, and use PIOc_write_darray_multi() to * write one record of data to all three vars at once. @@ -69,7 +71,7 @@ int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; * @param my_rank rank of this task. * @param pio_type the type of the data. * @returns 0 for success, error code otherwise. -*/ + */ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank, int pio_type) { @@ -81,11 +83,13 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank int ncid; /* The ncid of the netCDF file. */ int ncid2; /* The ncid of the re-opened netCDF file. */ int varid[NVAR]; /* The IDs of the netCDF varables. */ - int ret; /* Return code. */ + int other_varid; /* The IDs of a var of different type. */ + int wrong_varid[NVAR]; /* These will not work. */ PIO_Offset arraylen = 4; /* Amount of data from each task. */ void *fillvalue; /* Pointer to fill value. */ void *test_data; /* Pointer to test data we will write. */ void *test_data_in; /* Pointer to buffer we will read into. */ + int ret; /* Return code. */ /* Default fill value array for each type. */ signed char byte_fill[NVAR] = {NC_FILL_BYTE, NC_FILL_BYTE, NC_FILL_BYTE}; @@ -239,13 +243,10 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank } /* Create the netCDF output file. */ - printf("rank: %d Creating sample file %s with format %d type %d\n", my_rank, filename, - flavor[fmt], pio_type); if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) ERR(ret); /* Define netCDF dimensions and variable. */ - printf("%d Defining netCDF metadata...\n", my_rank); for (int d = 0; d < NDIM; d++) if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) ERR(ret); @@ -255,6 +256,11 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank if ((ret = PIOc_def_var(ncid, var_name[v], pio_type, NDIM, dimids, &varid[v]))) ERR(ret); + /* Define a variable of a different type, to test error handling. */ + int other_pio_type = pio_type < 5 ? pio_type + 1 : PIO_INT; + if ((ret = PIOc_def_var(ncid, "OTHER_VAR", other_pio_type, NDIM, dimids, &other_varid))) + ERR(ret); + /* Leave a note. */ if ((ret = PIOc_put_att_text(ncid, NC_GLOBAL, NOTE_NAME, strlen(NOTE), NOTE))) ERR(ret); @@ -273,6 +279,14 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank int frame[NVAR] = {0, 0, 0}; int flushtodisk = test_multi; + /* This will not work, because we mix var types. */ + wrong_varid[0] = varid[0]; + wrong_varid[1] = varid[1]; + wrong_varid[0] = other_varid; + if (PIOc_write_darray_multi(ncid, wrong_varid, ioid, NVAR, arraylen, test_data, frame, + fillvalue, flushtodisk) != PIO_EINVAL) + ERR(ERR_WRONG); + /* Write the data with the _multi function. */ if ((ret = PIOc_write_darray_multi(ncid, varid, ioid, NVAR, arraylen, test_data, frame, fillvalue, flushtodisk))) @@ -290,10 +304,14 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank * sure we get correct data. */ for (int v = 0; v < NVAR; v++) { + /* Set the value of the record dimension. */ + if ((ret = PIOc_setframe(ncid2, varid[v], 0))) + ERR(ret); + /* Read the data. */ if ((ret = PIOc_read_darray(ncid2, varid[v], ioid, arraylen, test_data_in))) ERR(ret); - + /* Check the results. */ for (int f = 0; f < arraylen; f++) { @@ -352,7 +370,6 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank } /* Close the netCDF file. */ - printf("%d Closing the sample data file...\n", my_rank); if ((ret = PIOc_closefile(ncid2))) ERR(ret); } /* next fillvalue test case */ @@ -363,14 +380,14 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank } /** - * Run all the tests. + * Run all the tests. * * @param iosysid the IO system ID. * @param num_flavors number of available iotypes in the build. * @param flavor pointer to array of the available iotypes. * @param my_rank rank of this task. * @param test_comm the communicator the test is running on. - * @returns 0 for success, error code otherwise. + * @returns 0 for success, error code otherwise. */ int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm test_comm) @@ -389,7 +406,7 @@ int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, int ret; /* Return code. */ for (int t = 0; t < NUM_TYPES_TO_TEST; t++) - { + { /* This will be our file name for writing out decompositions. */ sprintf(filename, "%s_decomp_rank_%d_flavor_%d_type_%d.nc", TEST_NAME, my_rank, *flavor, pio_type[t]); @@ -397,12 +414,12 @@ int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, /* Decompose the data over the tasks. */ if ((ret = create_decomposition_2d(TARGET_NTASKS, my_rank, iosysid, dim_len_2d, &ioid, pio_type[t]))) - return ret; + return ret; /* Run a simple darray test. */ if ((ret = test_darray(iosysid, ioid, num_flavors, flavor, my_rank, pio_type[t]))) return ret; - + /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid))) ERR(ret); @@ -425,7 +442,7 @@ int main(int argc, char **argv) /* Initialize test. */ if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, - MIN_NTASKS, 3, &test_comm))) + MIN_NTASKS, -1, &test_comm))) ERR(ERR_INIT); if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) @@ -438,33 +455,30 @@ int main(int argc, char **argv) int ioproc_stride = 1; /* Stride in the mpi rank between io tasks. */ int ioproc_start = 0; /* Zero based rank of first processor to be used for I/O. */ int ret; /* Return code. */ - + /* Figure out iotypes. */ if ((ret = get_iotypes(&num_flavors, flavor))) ERR(ret); - printf("Runnings tests for %d flavors\n", num_flavors); for (int r = 0; r < NUM_REARRANGERS_TO_TEST; r++) { - /* Initialize the PIO IO system. This specifies how - * many and which processors are involved in I/O. */ - if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, ioproc_stride, - ioproc_start, rearranger[r], &iosysid))) - return ret; - - /* Run tests. */ - printf("%d Running tests...\n", my_rank); - if ((ret = test_all_darray(iosysid, num_flavors, flavor, my_rank, test_comm))) - return ret; - - /* Finalize PIO system. */ - if ((ret = PIOc_finalize(iosysid))) - return ret; + /* Initialize the PIO IO system. This specifies how + * many and which processors are involved in I/O. */ + if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, ioproc_stride, + ioproc_start, rearranger[r], &iosysid))) + return ret; + + /* Run tests. */ + if ((ret = test_all_darray(iosysid, num_flavors, flavor, my_rank, test_comm))) + return ret; + + /* Finalize PIO system. */ + if ((ret = PIOc_finalize(iosysid))) + return ret; } /* next rearranger */ } /* endif my_rank < TARGET_NTASKS */ /* Finalize the MPI library. */ - printf("%d %s Finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ret; diff --git a/src/externals/pio2/tests/cunit/test_darray_multivar.c b/src/externals/pio2/tests/cunit/test_darray_multivar.c index 773de9b5f60..0c5ecc616e8 100644 --- a/src/externals/pio2/tests/cunit/test_darray_multivar.c +++ b/src/externals/pio2/tests/cunit/test_darray_multivar.c @@ -3,6 +3,7 @@ * * Ed Hartnett, 2/16/17 */ +#include #include #include #include @@ -54,7 +55,7 @@ char var_name[NUM_VAR][PIO_MAX_NAME + 1] = {"STICKS", "NIX", "HICK", "PIX"}; /* Length of the dimensions in the sample data. */ int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; -/** +/** * Test the darray functionality. Create a netCDF file with 3 * dimensions and 4 variables, and use darray to write to one of them. * @@ -70,7 +71,7 @@ int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; * @param use_default 1 if default fill values should be used * (ignored if use_fill is 0). * @returns 0 for success, error code otherwise. -*/ + */ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank, int pio_type, MPI_Comm test_comm, int rearranger, int use_fill, int use_default) @@ -143,11 +144,11 @@ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, test_data_float[f] = my_rank * 10 + f + 0.5; test_data_double[f] = my_rank * 100000 + f + 0.5; #ifdef _NETCDF4 - test_data_ubyte[f] = my_rank * 10 + f; - test_data_ushort[f] = my_rank * 10 + f; - test_data_uint[f] = my_rank * 10 + f; - test_data_int64[f] = my_rank * 10 + f; - test_data_uint64[f] = my_rank * 10 + f; + test_data_ubyte[f] = my_rank * 10 + f; + test_data_ushort[f] = my_rank * 10 + f; + test_data_uint[f] = my_rank * 10 + f; + test_data_int64[f] = my_rank * 10 + f; + test_data_uint64[f] = my_rank * 10 + f; #endif /* _NETCDF4 */ } @@ -228,7 +229,7 @@ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, if ((ret = ncmpi_create(test_comm, test_filename, NC_CLOBBER, MPI_INFO_NULL, &ncid))) return ret; if ((ret = ncmpi_set_fill(ncid, NC_FILL, NULL))) - return ret; + return ret; if ((ret = ncmpi_def_dim(ncid, "dim_name", 5, &dimid))) return ret; if ((ret = ncmpi_def_var(ncid, "dim_name", NC_INT, 1, &dimid, &varid))) @@ -244,12 +245,10 @@ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, int datum; MPI_Offset start[1] = {0}; ret = ncmpi_get_var1_int(ncid, varid, start, &datum); - printf("datum ret = %d\n", ret); /* Not sure why this doesn't work. */ /* if ((ret = ncmpi_get_var1_int(ncid, varid, start, &datum))) */ /* return ret; */ - printf("datum = %d\n", datum); if ((ret = ncmpi_close(ncid))) return ret; #endif /* _PNETCDF */ @@ -264,20 +263,17 @@ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, /* BYTE and CHAR don't work with pnetcdf. Don't know why yet. */ if (flavor[fmt] == PIO_IOTYPE_PNETCDF && (pio_type == PIO_BYTE || pio_type == PIO_CHAR)) continue; - + /* NetCDF-4 types only work with netCDF-4 formats. */ - printf("pio_type = %d flavor[fmt] = %d\n", pio_type, flavor[fmt]); if (pio_type > PIO_DOUBLE && flavor[fmt] != PIO_IOTYPE_NETCDF4C && flavor[fmt] != PIO_IOTYPE_NETCDF4P) continue; - + /* Create the filename. */ sprintf(filename, "data_%s_iotype_%d_tc_%d_pio_type_%d_use_fill_%d_default_fill_%d.nc", TEST_NAME, flavor[fmt], tc, pio_type, use_fill, use_default); - + /* Create the netCDF output file. */ - printf("rank: %d Creating sample file %s with format %d type %d\n", my_rank, filename, - flavor[fmt], pio_type); if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) ERR(ret); @@ -287,7 +283,6 @@ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, ERR(ret); /* Define netCDF dimensions and variable. */ - printf("%d Defining netCDF metadata...\n", my_rank); for (int d = 0; d < NDIM; d++) if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) ERR(ret); @@ -320,7 +315,8 @@ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, /* Write the data. */ for (int v = 0; v < NUM_VAR; v++) { - if ((ret = PIOc_write_darray(ncid, varid[v], ioid, arraylen, test_data, fillvalue))) + void *fp = use_fill ? fillvalue : NULL; + if ((ret = PIOc_write_darray(ncid, varid[v], ioid, arraylen, test_data, fp))) ERR(ret); /* For the first test case we just write the first variable. */ @@ -333,15 +329,18 @@ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, ERR(ret); /* Reopen the file. */ - if ((ret = PIOc_openfile(iosysid, &ncid2, &flavor[fmt], filename, PIO_NOWRITE))) + if ((ret = PIOc_openfile2(iosysid, &ncid2, &flavor[fmt], filename, PIO_NOWRITE))) ERR(ret); for (int v = 0; v < NUM_VAR; v++) { + if ((ret = PIOc_setframe(ncid2, varid[v], 0))) + ERR(ret); + /* Read the data. */ - if ((ret = PIOc_read_darray(ncid2, varid[0], ioid, arraylen, test_data_in))) + if ((ret = PIOc_read_darray(ncid2, varid[v], ioid, arraylen, test_data_in))) ERR(ret); - + /* Check the results. */ for (int f = 0; f < arraylen; f++) { @@ -407,10 +406,13 @@ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, * should have fill values. */ if (tc == 0 && use_fill && flavor[fmt] != PIO_IOTYPE_PNETCDF) { + if ((ret = PIOc_setframe(ncid2, varid[1], 0))) + ERR(ret); + /* Read the data. */ if ((ret = PIOc_read_darray(ncid2, varid[1], ioid, arraylen, test_data_in))) ERR(ret); - + /* Check the results. */ for (int f = 0; f < arraylen; f++) { @@ -469,17 +471,16 @@ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, } /* Close the netCDF file. */ - printf("%d Closing the sample data file...\n", my_rank); if ((ret = PIOc_closefile(ncid2))) ERR(ret); } } /* next test case */ - + return PIO_NOERR; } /** - * Run all the tests. + * Run all the tests. * * @param iosysid the IO system ID. * @param num_flavors number of available iotypes in the build. @@ -487,7 +488,7 @@ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, * @param my_rank rank of this task. * @param test_comm the communicator the test is running on. * @param rearranger the rearranger in use in this test. - * @returns 0 for success, error code otherwise. + * @returns 0 for success, error code otherwise. */ int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm test_comm, int rearranger) @@ -506,14 +507,14 @@ int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, int ret; /* Return code. */ for (int t = 0; t < NUM_TYPES_TO_TEST; t++) - { + { int use_fill = 0; int use_default = 0; - + /* Decompose the data over the tasks. */ if ((ret = create_decomposition_2d(TARGET_NTASKS, my_rank, iosysid, dim_len_2d, &ioid, test_type[t]))) - return ret; + return ret; /* Run the different combinations of use_fill and use_default. */ for (int f = 0; f < NUM_FILL_TESTS; f++) @@ -523,13 +524,13 @@ int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, use_fill++; if (f == 2) use_default++; - + /* Run the multivar darray tests. */ if ((ret = test_multivar_darray(iosysid, ioid, num_flavors, flavor, my_rank, test_type[t], test_comm, rearranger, use_fill, use_default))) return ret; } - + /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid))) ERR(ret); @@ -552,7 +553,7 @@ int main(int argc, char **argv) /* Initialize test. */ if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, MIN_NTASKS, - 3, &test_comm))) + -1, &test_comm))) ERR(ERR_INIT); if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) @@ -565,11 +566,10 @@ int main(int argc, char **argv) int ioproc_stride = 1; /* Stride in the mpi rank between io tasks. */ int ioproc_start = 0; /* Zero based rank of first processor to be used for I/O. */ int ret; /* Return code. */ - + /* Figure out iotypes. */ if ((ret = get_iotypes(&num_flavors, flavor))) ERR(ret); - printf("Runnings tests for %d flavors\n", num_flavors); /* Test for both arrangers. */ for (int r = 0; r < NUM_REARRANGERS; r++) @@ -580,13 +580,12 @@ int main(int argc, char **argv) if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, ioproc_stride, ioproc_start, rearranger[r], &iosysid))) return ret; - + /* Run tests. */ - printf("%d Running tests...\n", my_rank); if ((ret = test_all_darray(iosysid, num_flavors, flavor, my_rank, test_comm, rearranger[r]))) return ret; - + /* Finalize PIO system. */ if ((ret = PIOc_finalize(iosysid))) return ret; @@ -595,7 +594,6 @@ int main(int argc, char **argv) } /* endif my_rank < TARGET_NTASKS */ /* Finalize the MPI library. */ - printf("%d %s Finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ret; diff --git a/src/externals/pio2/tests/cunit/test_darray_multivar2.c b/src/externals/pio2/tests/cunit/test_darray_multivar2.c index b25ba890ab7..0637ed9c66d 100644 --- a/src/externals/pio2/tests/cunit/test_darray_multivar2.c +++ b/src/externals/pio2/tests/cunit/test_darray_multivar2.c @@ -3,6 +3,7 @@ * * Ed Hartnett, Jim Edwards, 4/20/17 */ +#include #include #include #include @@ -61,91 +62,98 @@ int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; * @param pio_type the type of the data. * @param test_comm the communicator that is running this test. * @returns 0 for success, error code otherwise. -*/ + */ int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank, int pio_type, MPI_Comm test_comm) { - char filename[PIO_MAX_NAME + 1]; /* Name for the output files. */ - int dimids[NDIM]; /* The dimension IDs. */ - int ncid; /* The ncid of the netCDF file. */ - int varid[NUM_VAR]; /* The IDs of the netCDF varables. */ - PIO_Offset arraylen = 4; - int custom_fillvalue_int = -TEST_VAL_42; - int test_data_int[arraylen]; - int ret; /* Return code. */ - - /* Initialize some data. */ - for (int f = 0; f < arraylen; f++) - test_data_int[f] = my_rank * 10 + f; - - /* Use PIO to create the example file in each of the four - * available ways. */ - for (int fmt = 0; fmt < num_flavors; fmt++) - { - /* Create the filename. */ - sprintf(filename, "data_%s_iotype_%d_pio_type_%d.nc", TEST_NAME, flavor[fmt], pio_type); - - /* Create the netCDF output file. */ - printf("rank: %d Creating sample file %s with format %d type %d\n", my_rank, filename, - flavor[fmt], pio_type); - if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) - ERR(ret); - - /* Define netCDF dimensions and variable. */ - printf("%d Defining netCDF metadata...\n", my_rank); - for (int d = 0; d < NDIM; d++) - if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) - ERR(ret); - - /* Var 0 does not have a record dim, varid 1 is a record var. */ - if ((ret = PIOc_def_var(ncid, var_name[0], pio_type, NDIM - 1, &dimids[1], &varid[0]))) - ERR(ret); - if ((ret = PIOc_def_var(ncid, var_name[1], pio_type, NDIM, dimids, &varid[1]))) - ERR(ret); - - /* End define mode. */ - if ((ret = PIOc_enddef(ncid))) - ERR(ret); - - /* Set the value of the record dimension for varid 1. */ - if ((ret = PIOc_setframe(ncid, varid[1], 0))) - ERR(ret); - - /* Write the data. */ - for (int v = 0; v < NUM_VAR; v++) - if ((ret = PIOc_write_darray(ncid, varid[v], ioid, arraylen, test_data_int, &custom_fillvalue_int))) - ERR(ret); - - /* Close the netCDF file. */ - if ((ret = PIOc_closefile(ncid))) - ERR(ret); - - /* Check the file contents. */ - { - int ncid2; /* The ncid of the re-opened netCDF file. */ - int test_data_int_in[arraylen]; - - /* Reopen the file. */ - if ((ret = PIOc_openfile(iosysid, &ncid2, &flavor[fmt], filename, PIO_NOWRITE))) - ERR(ret); - - for (int v = 0; v < NUM_VAR; v++) - { - /* Read the data. */ - if ((ret = PIOc_read_darray(ncid2, varid[v], ioid, arraylen, test_data_int_in))) - ERR(ret); - - /* Check the results. */ - for (int f = 0; f < arraylen; f++) - if (test_data_int_in[f] != test_data_int[f]) - return ERR_WRONG; - } /* next var */ - - /* Close the netCDF file. */ - if ((ret = PIOc_closefile(ncid2))) - ERR(ret); - } - } + /* char filename[PIO_MAX_NAME + 1]; /\* Name for the output files. *\/ */ + /* int dimids[NDIM]; /\* The dimension IDs. *\/ */ + /* int ncid; /\* The ncid of the netCDF file. *\/ */ + /* int varid[NUM_VAR]; /\* The IDs of the netCDF varables. *\/ */ + /* PIO_Offset arraylen = 4; */ + /* int custom_fillvalue_int = -TEST_VAL_42; */ + /* int test_data_int[arraylen]; */ + /* int ret; /\* Return code. *\/ */ + + /* /\* Initialize some data. *\/ */ + /* for (int f = 0; f < arraylen; f++) */ + /* test_data_int[f] = my_rank * 10 + f; */ + + /* /\* Use PIO to create the example file in each of the four */ + /* * available ways. *\/ */ + /* for (int fmt = 0; fmt < num_flavors; fmt++) */ + /* { */ + /* /\* Create the filename. *\/ */ + /* sprintf(filename, "data_%s_iotype_%d_pio_type_%d.nc", TEST_NAME, flavor[fmt], pio_type); */ + + /* /\* Create the netCDF output file. *\/ */ + /* printf("rank: %d Creating sample file %s with format %d type %d\n", my_rank, filename, */ + /* flavor[fmt], pio_type); */ + /* if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) */ + /* ERR(ret); */ + + /* /\* Define netCDF dimensions and variable. *\/ */ + /* printf("%d Defining netCDF metadata...\n", my_rank); */ + /* for (int d = 0; d < NDIM; d++) */ + /* if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) */ + /* ERR(ret); */ + + /* /\* Var 0 does not have a record dim, varid 1 is a record var. *\/ */ + /* if ((ret = PIOc_def_var(ncid, var_name[0], pio_type, NDIM - 1, &dimids[1], &varid[0]))) */ + /* ERR(ret); */ + /* if ((ret = PIOc_def_var(ncid, var_name[1], pio_type, NDIM, dimids, &varid[1]))) */ + /* ERR(ret); */ + + /* /\* End define mode. *\/ */ + /* if ((ret = PIOc_enddef(ncid))) */ + /* ERR(ret); */ + + /* /\* Write the data. *\/ */ + /* for (int v = 0; v < NUM_VAR; v++) */ + /* { */ + /* if ((ret = PIOc_setframe(ncid, varid[v], 0))) */ + /* ERR(ret); */ + /* if ((ret = PIOc_write_darray(ncid, varid[v], ioid, arraylen, test_data_int, &custom_fillvalue_int))) */ + /* ERR(ret); */ + /* } */ + + /* /\* Close the netCDF file. *\/ */ + /* if ((ret = PIOc_closefile(ncid))) */ + /* ERR(ret); */ + + /* /\* Check the file contents. *\/ */ + /* { */ + /* int ncid2; /\* The ncid of the re-opened netCDF file. *\/ */ + /* int test_data_int_in[arraylen]; */ + + /* /\* Reopen the file. *\/ */ + /* if ((ret = PIOc_openfile(iosysid, &ncid2, &flavor[fmt], filename, PIO_NOWRITE))) */ + /* ERR(ret); */ + + /* for (int v = 0; v < NUM_VAR; v++) */ + /* { */ + /* if ((ret = PIOc_setframe(ncid2, varid[v], 0))) */ + /* ERR(ret); */ + + /* /\* Read the data. *\/ */ + /* if ((ret = PIOc_read_darray(ncid2, varid[v], ioid, arraylen, test_data_int_in))) */ + /* ERR(ret); */ + + /* /\* Check the results. *\/ */ + /* for (int f = 0; f < arraylen; f++) */ + /* if (test_data_int_in[f] != test_data_int[f]) */ + /* { */ + /* printf("my_rank %d test_data_int_in[%d] = %d expected %d\n", my_rank, */ + /* f, test_data_int_in[f], test_data_int[f]); */ + /* return ERR_WRONG; */ + /* } */ + /* } /\* next var *\/ */ + + /* /\* Close the netCDF file. *\/ */ + /* if ((ret = PIOc_closefile(ncid2))) */ + /* ERR(ret); */ + /* } */ + /* } */ return PIO_NOERR; } @@ -182,13 +190,10 @@ int create_decomposition_2d_2(int ntasks, int my_rank, int iosysid, int *dim_len compdof[i] = my_rank * elements_per_pe + i + 1; /* Create the PIO decomposition for this test. */ - printf("%d Creating decomposition elements_per_pe = %lld\n", my_rank, elements_per_pe); if ((ret = PIOc_InitDecomp(iosysid, pio_type, NDIM2, dim_len_2d, elements_per_pe, compdof, ioid, NULL, NULL, NULL))) ERR(ret); - printf("%d decomposition initialized.\n", my_rank); - /* Free the mapping. */ free(compdof); @@ -216,12 +221,12 @@ int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank, if ((ret = create_decomposition_2d_2(TARGET_NTASKS, my_rank, iosysid, dim_len_2d, &ioid, PIO_INT))) return ret; - + /* Run the multivar darray tests. */ if ((ret = test_multivar_darray(iosysid, ioid, num_flavors, flavor, my_rank, PIO_INT, test_comm))) return ret; - + /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid))) ERR(ret); @@ -241,7 +246,7 @@ int main(int argc, char **argv) /* Initialize test. */ if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, MIN_NTASKS, - 3, &test_comm))) + -1, &test_comm))) ERR(ERR_INIT); if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) @@ -258,7 +263,6 @@ int main(int argc, char **argv) /* Figure out iotypes. */ if ((ret = get_iotypes(&num_flavors, flavor))) ERR(ret); - printf("Runnings tests for %d flavors\n", num_flavors); /* Initialize the PIO IO system. This specifies how * many and which processors are involved in I/O. */ @@ -267,10 +271,9 @@ int main(int argc, char **argv) return ret; /* Run tests. */ - printf("%d Running tests...\n", my_rank); if ((ret = test_all_darray(iosysid, num_flavors, flavor, my_rank, test_comm))) return ret; - + /* Finalize PIO system. */ if ((ret = PIOc_finalize(iosysid))) return ret; @@ -278,7 +281,6 @@ int main(int argc, char **argv) } /* endif my_rank < TARGET_NTASKS */ /* Finalize the MPI library. */ - printf("%d %s Finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ret; diff --git a/src/externals/pio2/tests/cunit/test_darray_multivar3.c b/src/externals/pio2/tests/cunit/test_darray_multivar3.c new file mode 100644 index 00000000000..8ea5681772d --- /dev/null +++ b/src/externals/pio2/tests/cunit/test_darray_multivar3.c @@ -0,0 +1,339 @@ +/* + * Tests for PIO distributed arrays. This test demonstrates problems + * with the fill value that can arrise from mixing types in a + * decomposition. + * + * @author Ed Hartnett + */ +#include +#include +#include +#include + +/* The number of tasks this test should run on. */ +#define TARGET_NTASKS 4 + +/* The minimum number of tasks this test should run on. */ +#define MIN_NTASKS 4 + +/* The name of this test. */ +#define TEST_NAME "test_darray_multivar3" + +/* Number of processors that will do IO. */ +#define NUM_IO_PROCS 1 + +/* Number of computational components to create. */ +#define COMPONENT_COUNT 1 + +/* The number of dimensions in the example data. In this test, we + * are using three-dimensional data. */ +#define NDIM 3 + +/* But sometimes we need arrays of the non-record dimensions. */ +#define NDIM2 2 + +/* The length of our sample data along each dimension. */ +#define X_DIM_LEN 4 +#define Y_DIM_LEN 4 + +/* The number of timesteps of data to write. */ +#define NUM_TIMESTEPS 2 + +/* Number of variables in the test file. */ +#define NUM_VAR 3 + +/* Test with and without custom fill value. */ +#define NUM_FV_TESTS 2 + +/* The dimension names. */ +char dim_name[NDIM][PIO_MAX_NAME + 1] = {"timestep", "x", "y"}; + +/* The var names. */ +char var_name[NUM_VAR][PIO_MAX_NAME + 1] = {"Kirk", "Spock", "McCoy"}; + +/* Length of the dimensions in the sample data. */ +int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; + +/** + * Test the darray functionality. Create a netCDF file with 3 + * dimensions and 2 variables. One of the vars uses the record + * dimension, the other does not. Then use darray to write to them. + * + * @param iosysid the IO system ID. + * @param ioid the ID of the decomposition. + * @param num_flavors the number of IOTYPES available in this build. + * @param flavor array of available iotypes. + * @param my_rank rank of this task. + * @param test_comm the communicator that is running this test. + * @returns 0 for success, error code otherwise. + */ +int test_multivar_darray(int iosysid, int ioid, int num_flavors, int *flavor, + int my_rank, MPI_Comm test_comm) +{ + char filename[PIO_MAX_NAME + 1]; /* Name for the output files. */ + int dimids[NDIM]; /* The dimension IDs. */ + int ncid; /* The ncid of the netCDF file. */ + int varid[NUM_VAR]; /* The IDs of the netCDF varables. */ + PIO_Offset arraylen = 3; + int custom_fillvalue_int = -TEST_VAL_42; + float custom_fillvalue_float = -42.5; + int test_data_int[arraylen]; + float test_data_float[arraylen]; + int ret; /* Return code. */ + + /* Initialize some data. */ + for (int f = 0; f < arraylen; f++) + { + test_data_int[f] = my_rank * 10 + f; + test_data_float[f] = my_rank * 10 + f + 0.5; + } + + /* Use PIO to create the example file in each of the four + * available ways. */ + for (int fmt = 0; fmt < num_flavors; fmt++) + { + for (int use_fv = 0; use_fv < NUM_FV_TESTS; use_fv++) + { + /* Create the filename. */ + sprintf(filename, "data_%s_iotype_%d_use_fv_%d.nc", TEST_NAME, flavor[fmt], use_fv); + + /* Create the netCDF output file. */ + if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) + ERR(ret); + + /* Define netCDF dimensions and variable. */ + for (int d = 0; d < NDIM; d++) + if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) + ERR(ret); + + /* Var 0 does not have a record dim, varid 1 is a record var. */ + if ((ret = PIOc_def_var(ncid, var_name[0], PIO_INT, NDIM - 1, &dimids[1], &varid[0]))) + ERR(ret); + if ((ret = PIOc_def_var(ncid, var_name[1], PIO_INT, NDIM, dimids, &varid[1]))) + ERR(ret); + if ((ret = PIOc_def_var(ncid, var_name[2], PIO_FLOAT, NDIM, dimids, &varid[2]))) + ERR(ret); + + /* Set the custom fill values. */ + if ((ret = PIOc_def_var_fill(ncid, varid[0], 0, &custom_fillvalue_int))) + ERR(ret); + if ((ret = PIOc_def_var_fill(ncid, varid[1], 0, &custom_fillvalue_int))) + ERR(ret); + if ((ret = PIOc_def_var_fill(ncid, varid[2], 0, &custom_fillvalue_float))) + ERR(ret); + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Set the value of the record dimension for varid 1 and 2. */ + if ((ret = PIOc_setframe(ncid, varid[1], 0))) + ERR(ret); + if ((ret = PIOc_setframe(ncid, varid[2], 0))) + ERR(ret); + + int *fvp_int = NULL; + float *fvp_float = NULL; + if (use_fv) + { + fvp_int = &custom_fillvalue_int; + fvp_float = &custom_fillvalue_float; + } + + /* Write the data. */ + if ((ret = PIOc_write_darray(ncid, varid[0], ioid, arraylen, test_data_int, + fvp_int))) + ERR(ret); + if ((ret = PIOc_write_darray(ncid, varid[1], ioid, arraylen, test_data_int, + fvp_int))) + ERR(ret); + + /* This should not work, since the type of the var is + * PIO_FLOAT, and the type if the decomposition is + * PIO_INT. */ + if (PIOc_write_darray(ncid, varid[2], ioid, arraylen, test_data_float, + fvp_float) != PIO_EINVAL) + ERR(ERR_WRONG); + + /* This should also fail, because it mixes an int and a + * float. */ + int frame[NUM_VAR] = {0, 0, 0}; + if (PIOc_write_darray_multi(ncid, varid, ioid, NUM_VAR, arraylen * NUM_VAR, test_data_float, + frame, NULL, 0) != PIO_EINVAL) + ERR(ERR_WRONG); + + + /* Close the netCDF file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Check the file contents. */ + /* { */ + /* int ncid2; /\* The ncid of the re-opened netCDF file. *\/ */ + /* int test_data_int_in[arraylen]; */ + /* /\* float test_data_float_in[arraylen]; *\/ */ + + /* /\* Reopen the file. *\/ */ + /* if ((ret = PIOc_openfile(iosysid, &ncid2, &flavor[fmt], filename, PIO_NOWRITE))) */ + /* ERR(ret); */ + + /* /\* Read the var data with read_darray(). *\/ */ + /* for (int v = 0; v < NUM_VAR; v++) */ + /* { */ + /* if (v < NUM_VAR - 1) */ + /* { */ + /* if ((ret = PIOc_setframe(ncid2, varid[v], 0))) */ + /* ERR(ret); */ + + /* /\* Read the data. *\/ */ + /* if ((ret = PIOc_read_darray(ncid2, varid[v], ioid, arraylen, test_data_int_in))) */ + /* ERR(ret); */ + + /* /\* Check the results. *\/ */ + /* for (int f = 0; f < arraylen; f++) */ + /* if (test_data_int_in[f] != test_data_int[f]) */ + /* return ERR_WRONG; */ + /* } */ + /* } /\* next var *\/ */ + + /* /\* Now read the fill values. *\/ */ + /* PIO_Offset idx[NDIM] = {0, 0, 3}; */ + /* int file_fv_int; */ + /* float file_fv_float; */ + + /* /\* Check an int fill value. *\/ */ + /* if ((ret = PIOc_get_var1_int(ncid2, 1, idx, &file_fv_int))) */ + /* return ret; */ + /* if (use_fv) */ + /* { */ + /* if (file_fv_int != custom_fillvalue_int) */ + /* return ERR_WRONG; */ + /* } */ + + /* /\* Check the float fill value. *\/ */ + /* if ((ret = PIOc_get_var1_float(ncid2, 2, idx, &file_fv_float))) */ + /* return ret; */ + /* /\* if (use_fv) *\/ */ + /* /\* { *\/ */ + /* /\* if (file_fv_float != custom_fillvalue_float) *\/ */ + /* /\* return ERR_WRONG; *\/ */ + /* /\* } *\/ */ + + /* /\* Close the netCDF file. *\/ */ + /* if ((ret = PIOc_closefile(ncid2))) */ + /* ERR(ret); */ + /* } */ + } + } + + return PIO_NOERR; +} + +/* Create the decomposition to divide the 3-dimensional sample data + * between the 4 tasks. For the purposes of decomposition we are only + * concerned with 2 dimensions - we ignore the unlimited dimension. We + * will leave some gaps in the decomposition, to test fill values. + * + * @param ntasks the number of available tasks + * @param my_rank rank of this task. + * @param iosysid the IO system ID. + * @param dim_len_2d an array of length 2 with the dim lengths. + * @param ioid a pointer that gets the ID of this decomposition. + * @param pio_type the data type to use for the decomposition. + * @returns 0 for success, error code otherwise. + **/ +int create_dcomp_gaps(int ntasks, int my_rank, int iosysid, int *dim_len_2d, + int *ioid, int pio_type) +{ + PIO_Offset elements_per_pe; /* Array elements per processing unit. */ + PIO_Offset *compdof; /* The decomposition mapping. */ + int ret; + + /* How many data elements per task? In this example we will end up + * with 3. */ + elements_per_pe = (dim_len_2d[0] * dim_len_2d[1] / ntasks) - 1; + + /* Allocate space for the decomposition array. */ + if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset)))) + return PIO_ENOMEM; + + /* Describe the decomposition. This is a 1-based array, so add 1! */ + for (int i = 0; i < elements_per_pe; i++) + compdof[i] = my_rank * (elements_per_pe + 1) + i + 1; + + /* Create the PIO decomposition for this test. */ + if ((ret = PIOc_InitDecomp(iosysid, pio_type, NDIM2, dim_len_2d, elements_per_pe, + compdof, ioid, NULL, NULL, NULL))) + ERR(ret); + + /* Free the mapping. */ + free(compdof); + + return 0; +} + +/* Run tests for darray functions. */ +int main(int argc, char **argv) +{ + int my_rank; + int ntasks; + int num_flavors; /* Number of PIO netCDF flavors in this build. */ + int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */ + MPI_Comm test_comm; /* A communicator for this test. */ + int ioid; + int dim_len_2d[NDIM2] = {X_DIM_LEN, Y_DIM_LEN}; + int ret; /* Return code. */ + + /* Initialize test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, MIN_NTASKS, + -1, &test_comm))) + ERR(ERR_INIT); + + if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) + return ret; + + /* Only do something on max_ntasks tasks. */ + if (my_rank < TARGET_NTASKS) + { + int iosysid; /* The ID for the parallel I/O system. */ + int ioproc_stride = 1; /* Stride in the mpi rank between io tasks. */ + int ioproc_start = 0; /* Zero based rank of first processor to be used for I/O. */ + int ret; /* Return code. */ + + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_flavors, flavor))) + ERR(ret); + + /* Initialize the PIO IO system. This specifies how + * many and which processors are involved in I/O. */ + if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, ioproc_stride, + ioproc_start, PIO_REARR_SUBSET, &iosysid))) + return ret; + + /* Decompose the data over the tasks. */ + if ((ret = create_dcomp_gaps(TARGET_NTASKS, my_rank, iosysid, dim_len_2d, + &ioid, PIO_INT))) + return ret; + + /* Run the multivar darray tests. */ + if ((ret = test_multivar_darray(iosysid, ioid, num_flavors, flavor, my_rank, + test_comm))) + return ret; + + /* Free the PIO decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + + /* Finalize PIO system. */ + if ((ret = PIOc_finalize(iosysid))) + return ret; + + } /* endif my_rank < TARGET_NTASKS */ + + /* Finalize the MPI library. */ + if ((ret = pio_test_finalize(&test_comm))) + return ret; + + printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); + return 0; +} diff --git a/src/externals/pio2/tests/cunit/test_decomp_uneven.c b/src/externals/pio2/tests/cunit/test_decomp_uneven.c index b6d47004eff..aee53f55263 100644 --- a/src/externals/pio2/tests/cunit/test_decomp_uneven.c +++ b/src/externals/pio2/tests/cunit/test_decomp_uneven.c @@ -2,8 +2,10 @@ * Tests for PIO distributed arrays. This tests cases when arrays do * not distribute evenly over the processors. * - * Ed Hartnett, 3/6/17 + * @author Ed Hartnett + * @date 3/6/17 */ +#include #include #include #include @@ -58,7 +60,6 @@ int create_decomposition_3d(int ntasks, int my_rank, int iosysid, int *dim_len, /* Distribute the remaining elements. */ if (my_rank < remainder) elements_per_pe++; - printf("%d elements_per_pe = %lld remainder = %lld\n", my_rank, elements_per_pe, remainder); /* Allocate space for the decomposition array. */ if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset)))) @@ -71,17 +72,13 @@ int create_decomposition_3d(int ntasks, int my_rank, int iosysid, int *dim_len, if (my_rank >= remainder) my_remainder = remainder; compdof[i] = my_rank * elements_per_pe + i + my_remainder; - printf("%d my_remainder = %d compdof[%d] = %lld\n", my_rank, i, my_remainder, compdof[i]); } /* Create the PIO decomposition for this test. */ - printf("%d Creating decomposition elements_per_pe = %lld\n", my_rank, elements_per_pe); if ((ret = PIOc_init_decomp(iosysid, pio_type, NDIM3, dim_len, elements_per_pe, compdof, ioid, 0, NULL, NULL))) ERR(ret); - printf("%d decomposition initialized.\n", my_rank); - /* Free the mapping. */ free(compdof); @@ -111,7 +108,7 @@ int create_decomposition_3d(int ntasks, int my_rank, int iosysid, int *dim_len, * @param pointer to expected map, an array of TARGET_NTASKS * * max_maplen. * @returns 0 for success, error code otherwise. -*/ + */ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank, int rearranger, MPI_Comm test_comm, int *dim_len, int *expected_maplen, int pio_type, int fill_maplen, int *expected_map) @@ -138,10 +135,8 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, /* Create history string. */ strncat(history, filename, NC_MAX_NAME - strlen(TEST_DECOMP_HISTORY)); - printf("writing decomp file %s\n", filename); if ((ret = PIOc_write_nc_decomp(iosysid, filename, 0, ioid, title, history, 0))) return ret; - printf("about to check map with netCDF\n"); /* Open the decomposition file with netCDF. */ int ncid_in; @@ -153,7 +148,6 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, int max_maplen; if ((ret = PIOc_get_att_int(ncid_in, NC_GLOBAL, DECOMP_MAX_MAPLEN_ATT_NAME, &max_maplen))) return ret; - printf("max_maplen = %d\n", max_maplen); /* Check dims. */ PIO_Offset ndims_in; @@ -176,7 +170,6 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, return ret; for (int t = 0; t < TARGET_NTASKS; t++) { - printf("%d maplen_in[%d] = %d expected_maplen[%d] = %d\n", my_rank, t, maplen_in[t], t, expected_maplen[t]); if (maplen_in[t] != expected_maplen[t]) return ERR_WRONG; } @@ -188,13 +181,10 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, return ret; if ((ret = PIOc_get_var(ncid_in, map_varid, (int *)&map_in))) return ret; - printf("about to check map\n"); for (int t = 0; t < TARGET_NTASKS; t++) { for (int e = 0; e < max_maplen; e++) { - printf("%d t = %d e = %d map_in[t][e] = %d expected_map[t * max_maplen + e] = %d\n", - my_rank, t, e, map_in[t][e], expected_map[t * max_maplen + e]); if (map_in[t][e] != expected_map[t * max_maplen + e]) return ERR_WRONG; } @@ -205,7 +195,6 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, return ret; /* Read the decomposition file into PIO. */ - printf("reading decomp file %s\n", filename); if ((ret = PIOc_read_nc_decomp(iosysid, filename, &ioid2, test_comm, pio_type, title_in, history_in, &fortran_order_in))) return ret; @@ -230,16 +219,16 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, return ret; /* Check values in iodesc. */ - printf("ioid2 = %d iodesc->ioid = %d iodesc->maplen = %d iodesc->ndims = %d " - "iodesc->ndof = %d iodesc->rearranger = %d iodesc->maxregions = %d " - "iodesc->needsfill = %d iodesc->basetype = %d expected_mpi_type = %d\n", - ioid2, iodesc->ioid, iodesc->maplen, iodesc->ndims, iodesc->ndof, - iodesc->rearranger, iodesc->maxregions, iodesc->needsfill, iodesc->basetype, - expected_mpi_type); + /* printf("ioid2 = %d iodesc->ioid = %d iodesc->maplen = %d iodesc->ndims = %d " */ + /* "iodesc->ndof = %d iodesc->rearranger = %d iodesc->maxregions = %d " */ + /* "iodesc->needsfill = %d iodesc->mpitype = %d expected_mpi_type = %d\n", */ + /* ioid2, iodesc->ioid, iodesc->maplen, iodesc->ndims, iodesc->ndof, */ + /* iodesc->rearranger, iodesc->maxregions, iodesc->needsfill, iodesc->mpitype, */ + /* expected_mpi_type); */ if (strcmp(title, title_in) || strcmp(history, history_in)) return ERR_WRONG; if (iodesc->ioid != ioid2 || iodesc->rearranger != rearranger || - iodesc->basetype != expected_mpi_type) + iodesc->mpitype != expected_mpi_type) return ERR_WRONG; if (iodesc->ndims != NDIM3) return ERR_WRONG; @@ -252,8 +241,8 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, /* Don't forget to add 1! */ for (int e = 0; e < iodesc->maplen; e++) { - printf("%d e = %d max_maplen = %d iodesc->map[e] = %lld expected_map[my_rank * max_maplen + e] = %d\n", - my_rank, e, max_maplen, iodesc->map[e], expected_map[my_rank * max_maplen + e]); + /* printf("%d e = %d max_maplen = %d iodesc->map[e] = %lld expected_map[my_rank * max_maplen + e] = %d\n", */ + /* my_rank, e, max_maplen, iodesc->map[e], expected_map[my_rank * max_maplen + e]); */ if (iodesc->map[e] != expected_map[my_rank * max_maplen + e] + 1) return ERR_WRONG; } @@ -285,7 +274,7 @@ int main(int argc, char **argv) /* Initialize test. */ if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, - MIN_NTASKS, 3, &test_comm))) + MIN_NTASKS, -1, &test_comm))) ERR(ERR_INIT); if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) @@ -308,7 +297,7 @@ int main(int argc, char **argv) int map_1x2x3[] = {0, 1, 2, 3, 4, PIO_FILL_INT, 5, PIO_FILL_INT}; #define NUM_DIM_COMBOS_TO_TEST 5 int dim_len[NUM_DIM_COMBOS_TO_TEST][NDIM3] = {{1, 4, 4}, - {2, 4, 4}, + {2, 4, 4}, {3, 4, 4}, {1, 3, 3}, {1, 2, 3}}; @@ -323,12 +312,11 @@ int main(int argc, char **argv) /* Figure out iotypes. */ if ((ret = get_iotypes(&num_flavors, flavor))) ERR(ret); - printf("Runnings tests for %d flavors\n", num_flavors); for (int r = 0; r < NUM_REARRANGERS_TO_TEST; r++) { int ioid; /* Decomposition ID. */ - + /* Initialize the PIO IO system. This specifies how * many and which processors are involved in I/O. */ if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, ioproc_stride, @@ -344,25 +332,25 @@ int main(int argc, char **argv) int full_maplen = 1; for (int d = 0; d < NDIM3; d++) full_maplen *= dim_len[dc][d]; - + /* Decompose the data over the tasks. */ if ((ret = create_decomposition_3d(TARGET_NTASKS, my_rank, iosysid, dim_len[dc], test_type[t], &ioid))) return ret; - + /* Test decomposition read/write. */ if ((ret = test_decomp_read_write(iosysid, ioid, num_flavors, flavor, my_rank, rearranger[r], test_comm, dim_len[dc], expected_maplen[dc], test_type[t], full_maplen, expected_map[dc]))) return ret; - + /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid))) ERR(ret); } } - + /* Finalize PIO system. */ if ((ret = PIOc_finalize(iosysid))) return ret; @@ -371,7 +359,6 @@ int main(int argc, char **argv) } /* endif my_rank < TARGET_NTASKS */ /* Finalize the MPI library. */ - printf("%d %s Finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ret; diff --git a/src/externals/pio2/tests/cunit/test_decomps.c b/src/externals/pio2/tests/cunit/test_decomps.c index 96dc1a386e2..473f70eef65 100644 --- a/src/externals/pio2/tests/cunit/test_decomps.c +++ b/src/externals/pio2/tests/cunit/test_decomps.c @@ -1,8 +1,9 @@ /* * Tests for PIO data decompositons. * - * Ed Hartnett + * @author Ed Hartnett */ +#include #include #include #include @@ -27,9 +28,6 @@ #define X_DIM_LEN 4 #define Y_DIM_LEN 4 -/* The number of timesteps of data to write. */ -#define NUM_TIMESTEPS 1 - /* Files of decompositions. */ #define DECOMP_FILE "decomp.txt" #define DECOMP_BC_FILE "decomp.txt" @@ -42,17 +40,18 @@ #define NUM_IO1 1 #define NUM_IO2 2 #define NUM_IO4 4 -#define REARRANGER 2 +#define NUM_REARRANGER 2 /** - * Test some decomposition functions. + * Test some decomposition functions. * * @param iosysid the IO system ID. + * @param use_io if true, fill the iostart/iocounter arrays. * @param my_rank the 0-based rank of this task. * @param test_comm communicator that includes all tasks paticipating in test. * @returns 0 for success, error code otherwise. */ -int test_decomp1(int iosysid, int my_rank, MPI_Comm test_comm) +int test_decomp1(int iosysid, int use_io, int my_rank, MPI_Comm test_comm) { int ioid; /* The decomposition ID. */ PIO_Offset elements_per_pe; /* Array index per processing unit. */ @@ -63,8 +62,10 @@ int test_decomp1(int iosysid, int my_rank, MPI_Comm test_comm) int *gdims; PIO_Offset fmaplen; PIO_Offset *map; + PIO_Offset *iostart = NULL; + PIO_Offset *iocount = NULL; int ret; - + /* Describe the decomposition. This is a 1-based array, so add 1! */ slice_dimlen[0] = X_DIM_LEN; slice_dimlen[1] = Y_DIM_LEN; @@ -94,13 +95,31 @@ int test_decomp1(int iosysid, int my_rank, MPI_Comm test_comm) if (PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, slice_dimlen, (PIO_Offset)elements_per_pe, compdof, NULL, NULL, NULL, NULL) != PIO_EINVAL) return ERR_WRONG; - + + /* Sometimes we will test with these arrays. */ + if (use_io) + { + if (!(iostart = calloc(NDIM2, sizeof(PIO_Offset)))) + return ERR_AWFUL; + if (!(iocount = calloc(NDIM2, sizeof(PIO_Offset)))) + return ERR_AWFUL; + if (my_rank == 0) + for (int i = 0; i < NDIM2; i++) + iocount[i] = 4; + } + /* Create the PIO decomposition for this test. */ - printf("%d Creating decomposition...\n", my_rank); if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, 2, slice_dimlen, (PIO_Offset)elements_per_pe, - compdof, &ioid, NULL, NULL, NULL))) + compdof, &ioid, NULL, iostart, iocount))) return ret; + + /* Free resources. */ free(compdof); + if (use_io) + { + free(iostart); + free(iocount); + } /* These should not work. */ if (PIOc_write_decomp(DECOMP_FILE, iosysid + TEST_VAL_42, ioid, test_comm) != PIO_EBADID) @@ -132,17 +151,8 @@ int test_decomp1(int iosysid, int my_rank, MPI_Comm test_comm) if ((ret = PIOc_readmap(DECOMP_FILE, &ndims, (int **)&gdims, &fmaplen, (PIO_Offset **)&map, test_comm))) return ret; - printf("ndims = %d fmaplen = %lld\n", ndims, fmaplen); if (ndims != 2 || fmaplen != 4) return ERR_WRONG; - for (int d = 0; d < ndims; d++) - { - printf("gdims[%d] = %d\n", d, gdims[d]); - } - for (int m = 0; m < fmaplen; m++) - { - printf("map[%d] = %lld\n", m, map[m]); - } free(map); free(gdims); @@ -152,17 +162,16 @@ int test_decomp1(int iosysid, int my_rank, MPI_Comm test_comm) return ERR_WRONG; if (PIOc_freedecomp(iosysid, ioid + TEST_VAL_42) != PIO_EBADID) return ERR_WRONG; - + /* Free the PIO decomposition. */ - printf("%d Freeing PIO decomposition...\n", my_rank); if ((ret = PIOc_freedecomp(iosysid, ioid))) return ret; - + return 0; } /** - * Test PIOc_InitDecomp_bc(). + * Test PIOc_InitDecomp_bc(). * * @param iosysid the IO system ID. * @param my_rank the 0-based rank of this task. @@ -182,7 +191,7 @@ int test_decomp_bc(int iosysid, int my_rank, MPI_Comm test_comm) PIO_Offset *map; int slice_dimlen[NDIM2]; int ret; - + /* Describe the decomposition. This is a 1-based array, so add 1! */ start[0] = my_rank; start[1] = 0; @@ -194,7 +203,6 @@ int test_decomp_bc(int iosysid, int my_rank, MPI_Comm test_comm) /* These should not work. */ if (PIOc_InitDecomp_bc(iosysid + TEST_VAL_42, PIO_FLOAT, 2, slice_dimlen, start, count, &ioid) != PIO_EBADID) return ERR_WRONG; - printf("ret = %d\n", PIOc_InitDecomp_bc(iosysid, PIO_FLOAT, 2, NULL, start, count, &ioid)); if (PIOc_InitDecomp_bc(iosysid, PIO_FLOAT, 2, NULL, start, count, &ioid) != PIO_EINVAL) return ERR_WRONG; if (PIOc_InitDecomp_bc(iosysid, PIO_FLOAT, 2, slice_dimlen, NULL, count, &ioid) != PIO_EINVAL) @@ -207,9 +215,8 @@ int test_decomp_bc(int iosysid, int my_rank, MPI_Comm test_comm) return ERR_WRONG; if (PIOc_InitDecomp_bc(iosysid, PIO_FLOAT, 2, slice_dimlen, start, bad_count, &ioid) != PIO_EINVAL) return ERR_WRONG; - + /* Create the PIO decomposition for this test. */ - printf("%d Creating decomposition...\n", my_rank); if ((ret = PIOc_InitDecomp_bc(iosysid, PIO_FLOAT, 2, slice_dimlen, start, count, &ioid))) return ret; @@ -221,30 +228,20 @@ int test_decomp_bc(int iosysid, int my_rank, MPI_Comm test_comm) if ((ret = PIOc_readmap(DECOMP_BC_FILE, &ndims, (int **)&gdims, &fmaplen, (PIO_Offset **)&map, test_comm))) return ret; - printf("ndims = %d fmaplen = %lld\n", ndims, fmaplen); if (ndims != 2 || fmaplen != 4) return ERR_WRONG; - for (int d = 0; d < ndims; d++) - { - printf("gdims[%d] = %d\n", d, gdims[d]); - } - for (int m = 0; m < fmaplen; m++) - { - printf("map[%d] = %lld\n", m, map[m]); - } free(map); free(gdims); - + /* Free the PIO decomposition. */ - printf("%d Freeing PIO decomposition...\n", my_rank); if ((ret = PIOc_freedecomp(iosysid, ioid))) return ret; - + return 0; } -/** +/** * Test the decomp read/write functionality. * * @param iosysid the IO system ID. @@ -254,9 +251,9 @@ int test_decomp_bc(int iosysid, int my_rank, MPI_Comm test_comm) * @param my_rank rank of this task. * @param test_comm the MPI communicator for this test. * @returns 0 for success, error code otherwise. -*/ -int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank, - MPI_Comm test_comm) + */ +int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, int rearranger, + int my_rank, MPI_Comm test_comm) { char filename[PIO_MAX_NAME + 1]; /* Name for the output files. */ int ioid2; /* ID for decomposition we will create from file. */ @@ -270,40 +267,40 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, /* Two extra output methods to tests if NetCDF-4 is present. */ num_decomp_file_types = 3; #endif /* _NETCDF4 */ - + for (int decomp_file_type = 0; decomp_file_type < num_decomp_file_types; decomp_file_type++) { int cmode = 0; - + /* Determine the create mode. */ if (decomp_file_type) cmode |= NC_NETCDF4; if (decomp_file_type == 2) cmode |= NC_MPIIO; - + /* Use PIO to create the decomp file in each of the four * available ways. */ - for (int fmt = 0; fmt < num_flavors; fmt++) + for (int fmt = 0; fmt < num_flavors; fmt++) { /* Create the filename. */ - sprintf(filename, "decomp_%s_iotype_%d_deomp_type_%d.nc", TEST_NAME, flavor[fmt], - decomp_file_type); + sprintf(filename, "decomp_%s_iotype_%d_rearr_%d_decomp_type_%d.nc", TEST_NAME, + flavor[fmt], rearranger, decomp_file_type); - printf("writing decomp file %s\n", filename); if ((ret = PIOc_write_nc_decomp(iosysid, filename, cmode, ioid, NULL, NULL, 0))) return ret; - + /* Read the data. */ - printf("reading decomp file %s\n", filename); if ((ret = PIOc_read_nc_decomp(iosysid, filename, &ioid2, test_comm, PIO_INT, title_in, history_in, &fortran_order_in))) return ret; - + /* Check the results. */ { iosystem_desc_t *ios; io_desc_t *iodesc; - + int target_nrecvs; + int target_num_aiotasks; + /* Get the IO system info. */ if (!(ios = pio_get_iosystem_from_id(iosysid))) return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); @@ -311,19 +308,35 @@ int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, /* Get the IO desc, which describes the decomposition. */ if (!(iodesc = pio_get_iodesc_from_id(ioid2))) return pio_err(ios, NULL, PIO_EBADID, __FILE__, __LINE__); + + /* The answers depend on the rearranger. */ + if (rearranger == PIO_REARR_BOX) + { + target_num_aiotasks = 1; + if (my_rank == 0) + target_nrecvs = TARGET_NTASKS; + else + target_nrecvs = 0; + } + else if (rearranger == PIO_REARR_SUBSET) + { + target_num_aiotasks = TARGET_NTASKS; + target_nrecvs = 1; + } + else + return ERR_WRONG; + + /* Check the results. */ if (iodesc->ioid != ioid2 || iodesc->maplen != TARGET_NTASKS || iodesc->ndims != NDIM2 || - iodesc->nrecvs != 1 || iodesc->ndof != TARGET_NTASKS || iodesc->num_aiotasks != TARGET_NTASKS - || iodesc->rearranger != PIO_REARR_SUBSET || iodesc->maxregions != 1 || - iodesc->needsfill || iodesc->basetype != MPI_INT) + iodesc->nrecvs != target_nrecvs || iodesc->ndof != TARGET_NTASKS || iodesc->num_aiotasks != target_num_aiotasks || + iodesc->rearranger != rearranger || iodesc->maxregions != 1 || iodesc->needsfill || iodesc->mpitype != MPI_INT) return ERR_WRONG; for (int e = 0; e < iodesc->maplen; e++) if (iodesc->map[e] != my_rank * iodesc->maplen + e + 1) return ERR_WRONG; if (iodesc->dimlen[0] != X_DIM_LEN || iodesc->dimlen[1] != Y_DIM_LEN) return ERR_WRONG; - printf("%d in my test iodesc->maxiobuflen = %d\n", my_rank, iodesc->maxiobuflen); } - /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid2))) @@ -339,17 +352,17 @@ int main(int argc, char **argv) int my_rank; /* Zero-based rank of processor. */ int ntasks; /* Number of processors involved in current execution. */ int iosysid; /* The ID for the parallel I/O system. */ - MPI_Group world_group; /* An MPI group of world. */ MPI_Comm test_comm; int num_flavors; /* Number of PIO netCDF flavors in this build. */ int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */ int dim_len_2d[NDIM2] = {X_DIM_LEN, Y_DIM_LEN}; int ioid; + int rearranger[NUM_REARRANGERS] = {PIO_REARR_BOX, PIO_REARR_SUBSET}; int ret; /* Return code. */ /* Initialize test. */ - if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, - &test_comm))) + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS, + -1, &test_comm))) ERR(ERR_INIT); /* Test code runs on TARGET_NTASKS tasks. The left over tasks do @@ -360,57 +373,52 @@ int main(int argc, char **argv) if ((ret = get_iotypes(&num_flavors, flavor))) ERR(ret); - /* Initialize PIO system on world. */ - printf("%d about to call Init_Intracomm\n", my_rank); - if ((ret = PIOc_Init_Intracomm(test_comm, NUM_IO4, STRIDE1, BASE0, REARRANGER, &iosysid))) - ERR(ret); - printf("%d done with Init_Intracomm\n", my_rank); - - /* Set the error handler. */ - /*PIOc_Set_IOSystem_Error_Handling(iosysid, PIO_BCAST_ERROR);*/ - printf("%d about to set iosystem error hanlder for world\n", my_rank); - if ((ret = PIOc_set_iosystem_error_handling(iosysid, PIO_BCAST_ERROR, NULL))) - ERR(ret); - printf("%d done setting iosystem error hanlder for world\n", my_rank); - - /* Get MPI_Group of world comm. */ - if ((ret = MPI_Comm_group(test_comm, &world_group))) - ERR(ret); - - /* Test basic decomp stuff. */ - if ((ret = test_decomp1(iosysid, my_rank, test_comm))) - return ret; - - /* Test PIOc_InitDecomp_bc(). */ - if ((ret = test_decomp_bc(iosysid, my_rank, test_comm))) - return ret; - - /* Decompose the data over the tasks. */ - if ((ret = create_decomposition_2d(TARGET_NTASKS, my_rank, iosysid, dim_len_2d, &ioid, - PIO_INT))) - return ret; - - /* Test decomposition read/write. */ - if ((ret = test_decomp_read_write(iosysid, ioid, num_flavors, flavor, my_rank, test_comm))) - return ret; - - /* Free the PIO decomposition. */ - if ((ret = PIOc_freedecomp(iosysid, ioid))) - ERR(ret); - - /* Finalize PIO systems. */ - printf("%d pio finalized\n", my_rank); - if ((ret = PIOc_finalize(iosysid))) - ERR(ret); - - /* Free MPI resources used by test. */ - if ((ret = MPI_Group_free(&world_group))) - ERR(ret); + /* Test for each rearranger. */ + /* for (int r = 0; r < NUM_REARRANGERS; r++) */ + for (int r = 1; r < NUM_REARRANGERS; r++) + { + int num_iotests = (rearranger[r] == PIO_REARR_BOX) ? 2 : 1; + for (int io_test = 0; io_test < num_iotests; io_test++) + { + /* Initialize PIO system on world. */ + if ((ret = PIOc_Init_Intracomm(test_comm, NUM_IO4, STRIDE1, BASE0, rearranger[r], &iosysid))) + ERR(ret); + + /* Set the error handler. */ + if ((ret = PIOc_set_iosystem_error_handling(iosysid, PIO_BCAST_ERROR, NULL))) + ERR(ret); + + /* Test basic decomp stuff. */ + if ((ret = test_decomp1(iosysid, io_test, my_rank, test_comm))) + return ret; + + /* Test PIOc_InitDecomp_bc(). */ + if ((ret = test_decomp_bc(iosysid, my_rank, test_comm))) + return ret; + + /* Decompose the data over the tasks. */ + if ((ret = create_decomposition_2d(TARGET_NTASKS, my_rank, iosysid, dim_len_2d, &ioid, + PIO_INT))) + return ret; + + /* Test decomposition read/write. */ + if ((ret = test_decomp_read_write(iosysid, ioid, num_flavors, flavor, rearranger[r], + my_rank, test_comm))) + return ret; + + /* Free the PIO decomposition. */ + if ((ret = PIOc_freedecomp(iosysid, ioid))) + ERR(ret); + + /* Finalize PIO systems. */ + if ((ret = PIOc_finalize(iosysid))) + ERR(ret); + } /* next io test */ + } /* next rearranger */ } /* my_rank < TARGET_NTASKS */ /* Finalize test. */ - printf("%d %s finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ERR_AWFUL; diff --git a/src/externals/pio2/tests/cunit/test_intercomm2.c b/src/externals/pio2/tests/cunit/test_intercomm2.c index e55c6e1ff2e..6131e0ef85f 100644 --- a/src/externals/pio2/tests/cunit/test_intercomm2.c +++ b/src/externals/pio2/tests/cunit/test_intercomm2.c @@ -6,6 +6,8 @@ *
    mpiexec -n 4 valgrind -v --leak-check=full --suppressions=../../../tests/unit/valsupp_test.supp
      * --error-exitcode=99 --track-origins=yes ./test_intercomm2
    * + * @author Ed Hartnett + * */ #include #include @@ -80,7 +82,6 @@ int check_file(int iosysid, int format, char *filename, int my_rank) too_long_name[PIO_MAX_NAME * 5] = 0; /* Re-open the file to check it. */ - printf("%d test_intercomm2 opening file %s format %d\n", my_rank, filename, format); if ((ret = PIOc_openfile(iosysid, &ncid, &format, filename, NC_NOWRITE))) ERR(ret); @@ -97,7 +98,6 @@ int check_file(int iosysid, int format, char *filename, int my_rank) ERR(ret); for (int i = 0; i < count[0]; i++) { - printf("%d test_intercomm2 read data_in[%d] = %d, start_index = %d\n", my_rank, i, data_in[i], start_index); if (data_in[i] != (i + start_index)) ERR(ERR_AWFUL); } @@ -226,7 +226,6 @@ int check_file(int iosysid, int format, char *filename, int my_rank) ERR(ERR_WRONG); if ((ret = PIOc_get_att_int(ncid, NC_GLOBAL, ATT_NAME, &att_data))) ERR(ret); - printf("%d test_intercomm2 att_data = %d\n", my_rank, att_data); if (att_data != ATT_VALUE) ERR(ERR_WRONG); if ((ret = PIOc_inq_att(ncid, NC_GLOBAL, SHORT_ATT_NAME, &atttype, &attlen))) @@ -261,7 +260,6 @@ int check_file(int iosysid, int format, char *filename, int my_rank) ERR(ERR_WRONG); /* Close the file. */ - printf("%d test_intercomm2 closing file (again) ncid = %d\n", my_rank, ncid); if ((ret = PIOc_closefile(ncid))) ERR(ret); @@ -296,25 +294,18 @@ int main(int argc, char **argv) /* Create a name that is too long. */ memset(too_long_name, 74, PIO_MAX_NAME * 5); too_long_name[PIO_MAX_NAME * 5] = 0; - - if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, &test_comm))) + + /* Set up test. */ + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS, + -1, &test_comm))) ERR(ERR_INIT); /* Figure out iotypes. */ if ((ret = get_iotypes(&num_flavors, flavor))) ERR(ret); - if(my_rank < TARGET_NTASKS) + if (my_rank < TARGET_NTASKS) { - printf("%d: test_intercomm2 ParallelIO Library test_intercomm2 running on %d processors.\n", - my_rank, ntasks); - - /* Initialize the PIO IO system. This specifies how many and which - * processors are involved in I/O. */ - - /* Turn on logging. */ - if ((ret = PIOc_set_log_level(3))) - ERR(ret); /* How many processors will be used for our IO and 2 computation components. */ int num_procs[COMPONENT_COUNT] = {2}; @@ -331,9 +322,6 @@ int main(int argc, char **argv) num_procs, NULL, NULL, NULL, PIO_REARR_BOX, iosysid))) ERR(ERR_AWFUL); - printf("%d: test_intercomm2 ParallelIO Library test_intercomm2 comp task returned.\n", - my_rank); - /* All the netCDF calls are only executed on the computation * tasks. The IO tasks have not returned from PIOc_Init_Intercomm, * and when the do, they should go straight to finalize. */ @@ -349,16 +337,13 @@ int main(int argc, char **argv) sprintf(filename[fmt], "test_intercomm2_%d.nc", flavor[fmt]); /* Create a netCDF file with one dimension and one variable. */ - printf("%d test_intercomm2 creating file %s\n", my_rank, filename[fmt]); if ((ret = PIOc_createfile(iosysid[my_comp_idx], &ncid, &flavor[fmt], filename[fmt], NC_CLOBBER))) ERR(ret); - printf("%d test_intercomm2 file created ncid = %d\n", my_rank, ncid); /* End define mode, then re-enter it. */ if ((ret = PIOc_enddef(ncid))) ERR(ret); - printf("%d test_intercomm2 calling redef\n", my_rank); if ((ret = PIOc_redef(ncid))) ERR(ret); @@ -398,7 +383,6 @@ int main(int argc, char **argv) /* Define a dimension. */ char dimname2[NC_MAX_NAME + 1]; - printf("%d test_intercomm2 defining dimension %s\n", my_rank, DIM_NAME); if ((ret = PIOc_def_dim(ncid, FIRST_DIM_NAME, DIM_LEN, &dimid))) ERR(ret); if ((ret = PIOc_inq_dimname(ncid, 0, dimname2))) @@ -418,7 +402,6 @@ int main(int argc, char **argv) /* Define a 1-D variable. */ char varname2[NC_MAX_NAME + 1]; - printf("%d test_intercomm2 defining variable %s\n", my_rank, VAR_NAME); if ((ret = PIOc_def_var(ncid, FIRST_VAR_NAME, NC_INT, NDIM, &dimid, &varid))) ERR(ret); if ((ret = PIOc_inq_varname(ncid, 0, varname2))) @@ -437,7 +420,6 @@ int main(int argc, char **argv) ERR(ERR_WRONG); /* Add a global attribute. */ - printf("%d test_intercomm2 writing attributes %s\n", my_rank, ATT_NAME); int att_data = ATT_VALUE; short short_att_data = ATT_VALUE; float float_att_data = ATT_VALUE; @@ -505,7 +487,6 @@ int main(int argc, char **argv) ERR(ERR_WRONG); /* End define mode. */ - printf("%d test_intercomm2 ending define mode ncid = %d\n", my_rank, ncid); if ((ret = PIOc_enddef(ncid))) ERR(ret); @@ -516,14 +497,12 @@ int main(int argc, char **argv) * ignored. */ for (int i = 0; i < DIM_LEN; i++) data[i] = i; - printf("%d test_intercomm2 writing data\n", my_rank); start[0] = 0; count[0] = DIM_LEN; if ((ret = PIOc_put_vars_tc(ncid, varid, start, count, NULL, NC_INT, data))) ERR(ret); /* Close the file. */ - printf("%d test_intercomm2 closing file ncid = %d\n", my_rank, ncid); if ((ret = PIOc_closefile(ncid))) ERR(ret); @@ -541,14 +520,12 @@ int main(int argc, char **argv) } /* next netcdf flavor */ /* Finalize the IO system. Only call this from the computation tasks. */ - printf("%d test_intercomm2 Freeing PIO resources\n", my_rank); if ((ret = PIOc_finalize(iosysid[my_comp_idx]))) ERR(ret); } } /* my_rank < TARGET_NTASKS */ /* Finalize test. */ - printf("%d %s finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ERR_AWFUL; diff --git a/src/externals/pio2/tests/cunit/test_iosystem2.c b/src/externals/pio2/tests/cunit/test_iosystem2.c index ca27acad514..32324e553dd 100644 --- a/src/externals/pio2/tests/cunit/test_iosystem2.c +++ b/src/externals/pio2/tests/cunit/test_iosystem2.c @@ -6,6 +6,7 @@ * * Ed Hartnett */ +#include #include #include @@ -31,7 +32,6 @@ int create_file(MPI_Comm comm, int iosysid, int format, char *filename, /* Create the file. */ if ((ret = PIOc_createfile(iosysid, &ncid, &format, filename, NC_CLOBBER))) return ret; - printf("%d file created ncid = %d\n", my_rank, ncid); /* Use the ncid to set the IO system error handler. This function * is deprecated. */ @@ -41,12 +41,10 @@ int create_file(MPI_Comm comm, int iosysid, int format, char *filename, return ERR_WRONG; /* Define a dimension. */ - printf("%d defining dimension %s\n", my_rank, dimname); if ((ret = PIOc_def_dim(ncid, dimname, PIO_TF_MAX_STR_LEN, &dimid))) return ret; /* Define a 1-D variable. */ - printf("%d defining variable %s\n", my_rank, attname); if ((ret = PIOc_def_var(ncid, attname, NC_CHAR, 1, &dimid, &varid))) return ret; @@ -55,16 +53,12 @@ int create_file(MPI_Comm comm, int iosysid, int format, char *filename, return ret; /* End define mode. */ - printf("%d ending define mode ncid = %d\n", my_rank, ncid); if ((ret = PIOc_enddef(ncid))) return ret; - printf("%d define mode ended ncid = %d\n", my_rank, ncid); /* Close the file. */ - printf("%d closing file ncid = %d\n", my_rank, ncid); if ((ret = PIOc_closefile(ncid))) return ret; - printf("%d closed file ncid = %d\n", my_rank, ncid); return PIO_NOERR; } @@ -79,7 +73,6 @@ int check_file(MPI_Comm comm, int iosysid, int format, int ncid, char *filename, /* Check the file. */ if ((ret = PIOc_inq_dimid(ncid, dimname, &dimid))) return ret; - printf("%d dimid = %d\n", my_rank, dimid); return PIO_NOERR; } @@ -120,8 +113,8 @@ int main(int argc, char **argv) MPI_Comm test_comm; /* Initialize test. */ - if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, - &test_comm))) + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS, + -1, &test_comm))) ERR(ERR_INIT); /* Test code runs on TARGET_NTASKS tasks. The left over tasks do @@ -137,7 +130,6 @@ int main(int argc, char **argv) int even = my_rank % 2 ? 0 : 1; if ((ret = MPI_Comm_split(test_comm, even, 0, &newcomm))) MPIERR(ret); - printf("%d newcomm = %d even = %d\n", my_rank, newcomm, even); /* Get rank in new communicator and its size. */ int new_rank, new_size; @@ -145,8 +137,6 @@ int main(int argc, char **argv) MPIERR(ret); if ((ret = MPI_Comm_size(newcomm, &new_size))) MPIERR(ret); - printf("%d newcomm = %d new_rank = %d new_size = %d\n", my_rank, newcomm, - new_rank, new_size); /* Initialize PIO system. */ if ((ret = PIOc_Init_Intracomm(newcomm, 2, 1, 0, 1, &iosysid))) @@ -165,7 +155,6 @@ int main(int argc, char **argv) char fname0[] = "pio_iosys_test_file0.nc"; char fname1[] = "pio_iosys_test_file1.nc"; char fname2[] = "pio_iosys_test_file2.nc"; - printf("\n\n%d i = %d\n", my_rank, i); if ((ret = create_file(test_comm, iosysid_world, iotypes[i], fname0, ATTNAME, DIMNAME, my_rank))) @@ -191,7 +180,6 @@ int main(int argc, char **argv) * remaining files. */ int ncid2; char *fname = even ? fname1 : fname2; - printf("\n***\n"); if ((ret = open_and_check_file(newcomm, iosysid, iotypes[i], &ncid2, fname, ATTNAME, DIMNAME, 1, my_rank))) ERR(ret); @@ -216,7 +204,6 @@ int main(int argc, char **argv) } /* my_rank < TARGET_NTASKS */ /* Finalize test. */ - printf("%d %s finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ret; diff --git a/src/externals/pio2/tests/cunit/test_iosystem2_simple.c b/src/externals/pio2/tests/cunit/test_iosystem2_simple.c index 66ae617372b..db94bf0ed41 100644 --- a/src/externals/pio2/tests/cunit/test_iosystem2_simple.c +++ b/src/externals/pio2/tests/cunit/test_iosystem2_simple.c @@ -4,8 +4,9 @@ * * This is a simplified, C version of the fortran pio_iosystem_tests2.F90. * - * Ed Hartnett + * @author Ed Hartnett */ +#include #include #include @@ -26,8 +27,8 @@ #define BASE 0 #define REARRANGER 1 -/* Ten megabytes. */ -#define TEN_MEG 10485760 +/* Used to set PIOc_set_buffer_size_limit(). */ +#define NEW_LIMIT 200000 /* Run test. */ int main(int argc, char **argv) @@ -43,7 +44,8 @@ int main(int argc, char **argv) MPI_Comm test_comm; /* Initialize test. */ - if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, &test_comm))) + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS, + -1, &test_comm))) ERR(ERR_INIT); /* Test code runs on TARGET_NTASKS tasks. The left over tasks do @@ -55,10 +57,19 @@ int main(int argc, char **argv) ERR(ret); /* Try setting the buffer size limit. */ - oldlimit = PIOc_set_buffer_size_limit(200000); - if (oldlimit != TEN_MEG) + oldlimit = PIOc_set_buffer_size_limit(NEW_LIMIT); + if (oldlimit != PIO_BUFFER_SIZE) + ERR(ERR_WRONG); + + /* A negative limit will silently do nothing. */ + oldlimit = PIOc_set_buffer_size_limit(-NEW_LIMIT); + if (oldlimit != NEW_LIMIT) + ERR(ERR_WRONG); + + /* Reset the buffer size limit. */ + oldlimit = PIOc_set_buffer_size_limit(PIO_BUFFER_SIZE); + if (oldlimit != NEW_LIMIT) ERR(ERR_WRONG); - oldlimit = PIOc_set_buffer_size_limit(TEN_MEG); /* Figure out iotypes. */ if ((ret = get_iotypes(&num_flavors, flavor))) @@ -74,7 +85,6 @@ int main(int argc, char **argv) int new_size; if ((ret = MPI_Comm_size(newcomm, &new_size))) MPIERR(ret); - printf("%d newcomm = %d even = %d new_size = %d\n", my_rank, newcomm, even, new_size); /* Check that some bad inputs are rejected. */ if (PIOc_Init_Intracomm(newcomm, new_size, STRIDE + 30, BASE, REARRANGER, @@ -107,7 +117,6 @@ int main(int argc, char **argv) ERR(ret); if ((ret = PIOc_get_numiotasks(iosysid, &numiotasks))) ERR(ret); - printf("%d numiotasks = %d\n", my_rank, numiotasks); if (numiotasks != 1) ERR(ERR_WRONG); @@ -118,7 +127,6 @@ int main(int argc, char **argv) ERR(ret); if ((ret = PIOc_iotask_rank(iosysid, &iorank))) ERR(ret); - printf("%d iorank = %d\n", my_rank, iorank); /* Each of two tasks has an iosystem. On both iosystems, the * single task has iorank of zero. */ if (iorank != 0) @@ -188,7 +196,6 @@ int main(int argc, char **argv) char dimname_in[NC_MAX_NAME + 1]; if ((ret = PIOc_inq_dimname(ncid, 0, dimname_in))) return ret; - printf("%d ncid dimname_in = %s should be %s\n", my_rank, dimname_in, dimname[0]); if (strcmp(dimname_in, dimname[0])) return ERR_WRONG; @@ -199,7 +206,6 @@ int main(int argc, char **argv) return ret; if ((ret = PIOc_inq_dimname(ncid2, 0, dimname_in))) return ret; - printf("%d ncid2 dimname_in = %s should be %s\n", my_rank, dimname_in, dimname[1]); if (strcmp(dimname_in, dimname[1])) return ERR_WRONG; } @@ -225,7 +231,6 @@ int main(int argc, char **argv) }/* my_rank < TARGET_NTASKS */ /* Finalize test. */ - printf("%d %s finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ERR_AWFUL; diff --git a/src/externals/pio2/tests/cunit/test_iosystem2_simple2.c b/src/externals/pio2/tests/cunit/test_iosystem2_simple2.c index 018caaedb9d..721a4ef5a76 100644 --- a/src/externals/pio2/tests/cunit/test_iosystem2_simple2.c +++ b/src/externals/pio2/tests/cunit/test_iosystem2_simple2.c @@ -4,8 +4,9 @@ * * This is a simplified, C version of the fortran pio_iosystem_tests2.F90. * - * Ed Hartnett + * @author Ed Hartnett */ +#include #include #include @@ -41,23 +42,22 @@ int main(int argc, char **argv) MPI_Comm test_comm; /* Initialize test. */ - if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, - &test_comm))) + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS, + -1, &test_comm))) ERR(ERR_INIT); /* Only do something on the first TARGET_NTASKS tasks. */ if (my_rank < TARGET_NTASKS) { - /* Figure out iotypes. */ - if ((ret = get_iotypes(&num_flavors, flavor))) - ERR(ret); + /* Figure out iotypes. */ + if ((ret = get_iotypes(&num_flavors, flavor))) + ERR(ret); /* Split world into odd and even. */ MPI_Comm newcomm; int even = my_rank % 2 ? 0 : 1; if ((ret = MPI_Comm_split(test_comm, even, 0, &newcomm))) MPIERR(ret); - printf("%d newcomm = %d even = %d\n", my_rank, newcomm, even); /* Get size of new communicator. */ int new_size; @@ -88,14 +88,13 @@ int main(int argc, char **argv) sprintf(filename[sample], "%s_%s_%d_%d.nc", TEST_NAME, iotype_name, sample, 0); /* Create sample file. */ - printf("%d %s creating file %s\n", my_rank, TEST_NAME, filename[sample]); if ((ret = create_nc_sample(sample, iosysid_world, flavor[flv], filename[sample], - my_rank, NULL))) + my_rank, NULL))) ERR(ret); /* Check the file for correctness. */ if ((ret = check_nc_sample(sample, iosysid_world, flavor[flv], filename[sample], - my_rank, &sample_ncid[sample]))) + my_rank, &sample_ncid[sample]))) ERR(ret); } @@ -132,7 +131,6 @@ int main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); /* Finalize test. */ - printf("%d %s finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ERR_AWFUL; diff --git a/src/externals/pio2/tests/cunit/test_iosystem3.c b/src/externals/pio2/tests/cunit/test_iosystem3.c index 53b0c63b67c..61ca0e24e59 100644 --- a/src/externals/pio2/tests/cunit/test_iosystem3.c +++ b/src/externals/pio2/tests/cunit/test_iosystem3.c @@ -5,8 +5,9 @@ * This is a simplified, C version of the fortran * pio_iosystem_tests3.F90. * - * Ed Hartnett + * @author Ed Hartnett */ +#include #include #include @@ -33,7 +34,6 @@ #define NUM_IO1 1 #define NUM_IO2 2 #define NUM_IO4 4 -#define REARRANGER 2 /* This creates a netCDF file in the specified format, with some * sample values. */ @@ -46,15 +46,12 @@ int create_file(MPI_Comm comm, int iosysid, int format, char *filename, /* Create the file. */ if ((ret = PIOc_createfile(iosysid, &ncid, &format, filename, NC_CLOBBER))) return ret; - printf("%d file created ncid = %d\n", my_rank, ncid); /* Define a dimension. */ - printf("%d defining dimension %s\n", my_rank, dimname); if ((ret = PIOc_def_dim(ncid, dimname, PIO_TF_MAX_STR_LEN, &dimid))) return ret; /* Define a 1-D variable. */ - printf("%d defining variable %s\n", my_rank, attname); if ((ret = PIOc_def_var(ncid, attname, NC_CHAR, 1, &dimid, &varid))) return ret; @@ -63,16 +60,12 @@ int create_file(MPI_Comm comm, int iosysid, int format, char *filename, return ret; /* End define mode. */ - printf("%d ending define mode ncid = %d\n", my_rank, ncid); if ((ret = PIOc_enddef(ncid))) return ret; - printf("%d define mode ended ncid = %d\n", my_rank, ncid); /* Close the file. */ - printf("%d closing file ncid = %d\n", my_rank, ncid); if ((ret = PIOc_closefile(ncid))) return ret; - printf("%d closed file ncid = %d\n", my_rank, ncid); return PIO_NOERR; } @@ -104,11 +97,9 @@ int check_file(MPI_Comm comm, int iosysid, int format, int ncid, char *filename, return PIO_ENOMEM; if ((ret = PIOc_get_att(ncid, varid, attname, att_data))) return ret; - printf("%d DONE with get_att!!!\n", my_rank); if (strncmp(att_data, filename, strlen(filename))) return ERR_WRONG; free(att_data); - printf("%d DONE with get_att!!!\n", my_rank); return PIO_NOERR; } @@ -153,12 +144,13 @@ int main(int argc, char **argv) int even_size = 0, overlap_size = 0; /* Size of communicator. */ int num_flavors; /* Number of PIO netCDF flavors in this build. */ int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */ - int ret; /* Return code. */ MPI_Comm test_comm; + int rearranger[NUM_REARRANGERS] = {PIO_REARR_BOX, PIO_REARR_SUBSET}; + int ret; /* Return code. */ /* Initialize test. */ - if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, - &test_comm))) + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS, + -1, &test_comm))) ERR(ERR_INIT); /* Test code runs on TARGET_NTASKS tasks. The left over tasks do @@ -169,203 +161,187 @@ int main(int argc, char **argv) if ((ret = get_iotypes(&num_flavors, flavor))) ERR(ret); - /* Initialize PIO system on world. */ - printf("%d about to call Init_Intracomm\n", my_rank); - if ((ret = PIOc_Init_Intracomm(test_comm, NUM_IO4, STRIDE1, BASE0, REARRANGER, &iosysid_world))) - ERR(ret); - printf("%d done with Init_Intracomm\n", my_rank); - - /* Set the error handler. */ - /*PIOc_Set_IOSystem_Error_Handling(iosysid_world, PIO_BCAST_ERROR);*/ - printf("%d about to set iosystem error hanlder for world\n", my_rank); - if ((ret = PIOc_set_iosystem_error_handling(iosysid_world, PIO_BCAST_ERROR, NULL))) - ERR(ret); - printf("%d done setting iosystem error hanlder for world\n", my_rank); - - /* Get MPI_Group of world comm. */ - if ((ret = MPI_Comm_group(test_comm, &world_group))) - ERR(ret); - - /* Create a group with tasks 0 and 2. */ - int even_ranges[EVEN_NUM_RANGES][3] = {{0, 2, 2}}; - if ((ret = MPI_Group_range_incl(world_group, EVEN_NUM_RANGES, even_ranges, &even_group))) - ERR(ret); - - /* Create a communicator from the even_group. */ - if ((ret = MPI_Comm_create(test_comm, even_group, &even_comm))) - ERR(ret); - - /* Learn my rank and the total number of processors in even group. */ - if (even_comm != MPI_COMM_NULL) - { - if ((ret = MPI_Comm_rank(even_comm, &even_rank))) - MPIERR(ret); - if ((ret = MPI_Comm_size(even_comm, &even_size))) - MPIERR(ret); - } - printf("%d even_comm = %d even_rank = %d even_size = %d\n", my_rank, - even_comm, even_rank, even_size); - - /* Create a group with tasks 0, 1, and 3. */ - int overlap_ranges[OVERLAP_NUM_RANGES][3] = {{0, 0, 1}, {1, 3, 2}}; - if ((ret = MPI_Group_range_incl(world_group, OVERLAP_NUM_RANGES, - overlap_ranges, &overlap_group))) - ERR(ret); - - /* Create a communicator from the overlap_group. */ - if ((ret = MPI_Comm_create(test_comm, overlap_group, &overlap_comm))) - ERR(ret); - - /* Learn my rank and the total number of processors in overlap - * group. */ - if (overlap_comm != MPI_COMM_NULL) + /* Test with both rearrangers. */ + for (int r = 0; r < NUM_REARRANGERS; r++) { - if ((ret = MPI_Comm_rank(overlap_comm, &overlap_rank))) - MPIERR(ret); - if ((ret = MPI_Comm_size(overlap_comm, &overlap_size))) - MPIERR(ret); - } - printf("%d overlap_comm = %d overlap_rank = %d overlap_size = %d\n", my_rank, - overlap_comm, overlap_rank, overlap_size); - - /* Initialize PIO system for even. */ - if (even_comm != MPI_COMM_NULL) - { - if ((ret = PIOc_Init_Intracomm(even_comm, NUM_IO1, STRIDE1, BASE1, REARRANGER, &even_iosysid))) - ERR(ret); - - /* These should not work. */ - if (PIOc_set_hint(even_iosysid + TEST_VAL_42, NULL, NULL) != PIO_EBADID) - ERR(ERR_WRONG); - if (PIOc_set_hint(even_iosysid, NULL, NULL) != PIO_EINVAL) - ERR(ERR_WRONG); - - /* Set the hint (which will be ignored). */ - if ((ret = PIOc_set_hint(even_iosysid, "hint", "hint_value"))) + /* Initialize PIO system on world. */ + if ((ret = PIOc_Init_Intracomm(test_comm, NUM_IO4, STRIDE1, BASE0, rearranger[r], + &iosysid_world))) ERR(ret); /* Set the error handler. */ - /*PIOc_Set_IOSystem_Error_Handling(even_iosysid, PIO_BCAST_ERROR);*/ - printf("%d about to set iosystem error hanlder for even\n", my_rank); - if ((ret = PIOc_set_iosystem_error_handling(even_iosysid, PIO_BCAST_ERROR, NULL))) + if ((ret = PIOc_set_iosystem_error_handling(iosysid_world, PIO_BCAST_ERROR, NULL))) ERR(ret); - printf("%d done setting iosystem error hanlder for even\n", my_rank); - } - /* Initialize PIO system for overlap comm. */ - if (overlap_comm != MPI_COMM_NULL) - { - if ((ret = PIOc_Init_Intracomm(overlap_comm, NUM_IO2, STRIDE1, BASE1, REARRANGER, - &overlap_iosysid))) + /* Get MPI_Group of world comm. */ + if ((ret = MPI_Comm_group(test_comm, &world_group))) ERR(ret); - printf("%d about to set iosystem error hanlder for overlap\n", my_rank); - /* Set the error handler. */ - /* if ((ret = PIOc_set_iosystem_error_handling(overlap_iosysid, PIO_BCAST_ERROR))) */ - /* ERR(ret); */ - PIOc_Set_IOSystem_Error_Handling(overlap_iosysid, PIO_BCAST_ERROR); - printf("%d done setting iosystem error hanlder for overlap\n", my_rank); - } - - for (int i = 0; i < num_flavors; i++) - { - char fname0[] = "pio_iosys_test_file0.nc"; - char fname1[] = "pio_iosys_test_file1.nc"; - char fname2[] = "pio_iosys_test_file2.nc"; - printf("\n\n%d i = %d\n", my_rank, i); - - if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname0, ATTNAME, - DIMNAME, my_rank))) + /* Create a group with tasks 0 and 2. */ + int even_ranges[EVEN_NUM_RANGES][3] = {{0, 2, 2}}; + if ((ret = MPI_Group_range_incl(world_group, EVEN_NUM_RANGES, even_ranges, + &even_group))) ERR(ret); - if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname1, ATTNAME, - DIMNAME, my_rank))) + /* Create a communicator from the even_group. */ + if ((ret = MPI_Comm_create(test_comm, even_group, &even_comm))) ERR(ret); - if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname2, ATTNAME, - DIMNAME, my_rank))) + /* Learn my rank and the total number of processors in even group. */ + if (even_comm != MPI_COMM_NULL) + { + if ((ret = MPI_Comm_rank(even_comm, &even_rank))) + MPIERR(ret); + if ((ret = MPI_Comm_size(even_comm, &even_size))) + MPIERR(ret); + } + + /* Create a group with tasks 0, 1, and 3. */ + int overlap_ranges[OVERLAP_NUM_RANGES][3] = {{0, 0, 1}, {1, 3, 2}}; + if ((ret = MPI_Group_range_incl(world_group, OVERLAP_NUM_RANGES, overlap_ranges, + &overlap_group))) ERR(ret); - /* Now check the first file from WORLD communicator. */ - int ncid; - if ((ret = open_and_check_file(test_comm, iosysid_world, flavor[i], &ncid, fname0, - ATTNAME, DIMNAME, 1, my_rank))) + /* Create a communicator from the overlap_group. */ + if ((ret = MPI_Comm_create(test_comm, overlap_group, &overlap_comm))) ERR(ret); - /* Now have the even communicators check the files. */ - int ncid2; + /* Learn my rank and the total number of processors in overlap + * group. */ + if (overlap_comm != MPI_COMM_NULL) + { + if ((ret = MPI_Comm_rank(overlap_comm, &overlap_rank))) + MPIERR(ret); + if ((ret = MPI_Comm_size(overlap_comm, &overlap_size))) + MPIERR(ret); + } + + /* Initialize PIO system for even. */ if (even_comm != MPI_COMM_NULL) { - printf("\n***\n%d Checking file for even_comm\n", my_rank); - if ((ret = open_and_check_file(even_comm, even_iosysid, flavor[i], &ncid2, fname2, - ATTNAME, DIMNAME, 1, my_rank))) + if ((ret = PIOc_Init_Intracomm(even_comm, NUM_IO1, STRIDE1, BASE1, rearranger[r], + &even_iosysid))) ERR(ret); - if ((ret = check_file(even_comm, even_iosysid, flavor[i], ncid2, fname2, - ATTNAME, DIMNAME, my_rank))) + + /* These should not work. */ + if (PIOc_set_hint(even_iosysid + TEST_VAL_42, NULL, NULL) != PIO_EBADID) + ERR(ERR_WRONG); + if (PIOc_set_hint(even_iosysid, NULL, NULL) != PIO_EINVAL) + ERR(ERR_WRONG); + + /* Set the hint (which will be ignored). */ + if ((ret = PIOc_set_hint(even_iosysid, "hint", "hint_value"))) + ERR(ret); + + /* Set the error handler. */ + /*PIOc_Set_IOSystem_Error_Handling(even_iosysid, PIO_BCAST_ERROR);*/ + if ((ret = PIOc_set_iosystem_error_handling(even_iosysid, PIO_BCAST_ERROR, NULL))) ERR(ret); } - /* Now have the overlap communicators check the files. */ - int ncid3; + /* Initialize PIO system for overlap comm. */ if (overlap_comm != MPI_COMM_NULL) { - printf("\n***%d Checking file for overlap_comm\n", my_rank); - if ((ret = open_and_check_file(overlap_comm, overlap_iosysid, flavor[i], &ncid3, fname1, + if ((ret = PIOc_Init_Intracomm(overlap_comm, NUM_IO2, STRIDE1, BASE1, rearranger[r], + &overlap_iosysid))) + ERR(ret); + + /* Set the error handler. */ + PIOc_Set_IOSystem_Error_Handling(overlap_iosysid, PIO_BCAST_ERROR); + } + + for (int i = 0; i < num_flavors; i++) + { + char fname0[PIO_MAX_NAME + 1]; + char fname1[PIO_MAX_NAME + 1]; + char fname2[PIO_MAX_NAME + 1]; + + sprintf(fname0, "%s_file_0_iotype_%d_rearr_%d.nc", TEST_NAME, flavor[i], rearranger[r]); + if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname0, ATTNAME, + DIMNAME, my_rank))) + ERR(ret); + + sprintf(fname1, "%s_file_1_iotype_%d_rearr_%d.nc", TEST_NAME, flavor[i], rearranger[r]); + if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname1, ATTNAME, + DIMNAME, my_rank))) + ERR(ret); + + sprintf(fname2, "%s_file_2_iotype_%d_rearr_%d.nc", TEST_NAME, flavor[i], rearranger[r]); + if ((ret = create_file(test_comm, iosysid_world, flavor[i], fname2, ATTNAME, + DIMNAME, my_rank))) + ERR(ret); + + /* Now check the first file from WORLD communicator. */ + int ncid; + if ((ret = open_and_check_file(test_comm, iosysid_world, flavor[i], &ncid, fname0, ATTNAME, DIMNAME, 1, my_rank))) ERR(ret); - if ((ret = check_file(overlap_comm, overlap_iosysid, flavor[i], ncid3, fname1, - ATTNAME, DIMNAME, my_rank))) + + /* Now have the even communicators check the files. */ + int ncid2; + if (even_comm != MPI_COMM_NULL) + { + if ((ret = open_and_check_file(even_comm, even_iosysid, flavor[i], &ncid2, + fname2, ATTNAME, DIMNAME, 1, my_rank))) + ERR(ret); + if ((ret = check_file(even_comm, even_iosysid, flavor[i], ncid2, fname2, + ATTNAME, DIMNAME, my_rank))) + ERR(ret); + } + + /* Now have the overlap communicators check the files. */ + int ncid3; + if (overlap_comm != MPI_COMM_NULL) + { + if ((ret = open_and_check_file(overlap_comm, overlap_iosysid, flavor[i], + &ncid3, fname1, ATTNAME, DIMNAME, 1, my_rank))) + ERR(ret); + if ((ret = check_file(overlap_comm, overlap_iosysid, flavor[i], ncid3, fname1, + ATTNAME, DIMNAME, my_rank))) + ERR(ret); + } + + /* Close the still-open files. */ + if (even_comm != MPI_COMM_NULL) + if ((ret = PIOc_closefile(ncid2))) + ERR(ret); + if (overlap_comm != MPI_COMM_NULL) + if ((ret = PIOc_closefile(ncid3))) + ERR(ret); + if ((ret = PIOc_closefile(ncid))) ERR(ret); - } - /* Close the still-open files. */ - if ((ret = PIOc_closefile(ncid))) - ERR(ret); + } /* next iotype */ + + /* Finalize PIO systems. */ if (even_comm != MPI_COMM_NULL) - { - if ((ret = PIOc_closefile(ncid2))) + if ((ret = PIOc_finalize(even_iosysid))) ERR(ret); - } if (overlap_comm != MPI_COMM_NULL) { - if ((ret = PIOc_closefile(ncid3))) + if ((ret = PIOc_finalize(overlap_iosysid))) ERR(ret); } - } /* next iotype */ - /* Finalize PIO systems. */ - printf("%d pio finalizing %d\n", my_rank, even_iosysid); - if (even_comm != MPI_COMM_NULL) - if ((ret = PIOc_finalize(even_iosysid))) + if ((ret = PIOc_finalize(iosysid_world))) ERR(ret); - printf("%d pio finalizing %d\n", my_rank, overlap_iosysid); - if (overlap_comm != MPI_COMM_NULL) - { - printf("%d calling PIOc_finalize with iosysid = %d\n", my_rank, overlap_iosysid); - if ((ret = PIOc_finalize(overlap_iosysid))) - ERR(ret); - } - printf("%d pio finalized\n", my_rank); - if ((ret = PIOc_finalize(iosysid_world))) - ERR(ret); - /* Free MPI resources used by test. */ - if ((ret = MPI_Group_free(&overlap_group))) - ERR(ret); - if ((ret = MPI_Group_free(&even_group))) - ERR(ret); - if ((ret = MPI_Group_free(&world_group))) - ERR(ret); - if (overlap_comm != MPI_COMM_NULL) - if ((ret = MPI_Comm_free(&overlap_comm))) + /* Free MPI resources used by test. */ + if ((ret = MPI_Group_free(&overlap_group))) ERR(ret); - if (even_comm != MPI_COMM_NULL) - if ((ret = MPI_Comm_free(&even_comm))) + if ((ret = MPI_Group_free(&even_group))) ERR(ret); - + if ((ret = MPI_Group_free(&world_group))) + ERR(ret); + if (overlap_comm != MPI_COMM_NULL) + if ((ret = MPI_Comm_free(&overlap_comm))) + ERR(ret); + if (even_comm != MPI_COMM_NULL) + if ((ret = MPI_Comm_free(&even_comm))) + ERR(ret); + } /* next rearranger */ } /* my_rank < TARGET_NTASKS */ /* Finalize test. */ - printf("%d %s finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ERR_AWFUL; diff --git a/src/externals/pio2/tests/cunit/test_iosystem3_simple.c b/src/externals/pio2/tests/cunit/test_iosystem3_simple.c index 7ac3ad2ebad..b78f8b420c1 100644 --- a/src/externals/pio2/tests/cunit/test_iosystem3_simple.c +++ b/src/externals/pio2/tests/cunit/test_iosystem3_simple.c @@ -4,8 +4,9 @@ * * This is a simplified, C version of the fortran pio_iosystem_tests2.F90. * - * Ed Hartnett + * @author Ed Hartnett */ +#include #include #include @@ -39,19 +40,14 @@ int main(int argc, char **argv) MPI_Comm test_comm; /* Initialize test. */ - if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, - &test_comm))) + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS, + 0, &test_comm))) ERR(ERR_INIT); - + /* Test code runs on TARGET_NTASKS tasks. The left over tasks do * nothing. */ if (my_rank < TARGET_NTASKS) { - - /* Turn on logging. */ - if ((ret = PIOc_set_log_level(3))) - return ret; - /* Initialize PIO system on world. */ if ((ret = PIOc_Init_Intracomm(test_comm, 4, 1, 0, 1, &iosysid_world))) ERR(ret); @@ -79,8 +75,6 @@ int main(int argc, char **argv) if ((ret = MPI_Comm_size(overlap_comm, &overlap_size))) MPIERR(ret); } - printf("%d overlap_comm = %d overlap_rank = %d overlap_size = %d\n", my_rank, - overlap_comm, overlap_rank, overlap_size); /* Initialize PIO system for overlap comm. */ if (overlap_comm != MPI_COMM_NULL) @@ -89,17 +83,13 @@ int main(int argc, char **argv) ERR(ret); } - printf("%d pio finalizing %d\n", my_rank, overlap_iosysid); /* Finalize PIO system. */ if (overlap_comm != MPI_COMM_NULL) - { - printf("%d calling PIOc_finalize with iosysid = %d\n", my_rank, overlap_iosysid); if ((ret = PIOc_finalize(overlap_iosysid))) ERR(ret); - } + if ((ret = PIOc_finalize(iosysid_world))) ERR(ret); - printf("%d pio finalized\n", my_rank); /* Free MPI resources used by test. */ if ((ret = MPI_Group_free(&overlap_group))) @@ -109,11 +99,9 @@ int main(int argc, char **argv) if (overlap_comm != MPI_COMM_NULL) if ((ret = MPI_Comm_free(&overlap_comm))) ERR(ret); - printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME); } /* Finalize test. */ - printf("%d %s finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ERR_AWFUL; diff --git a/src/externals/pio2/tests/cunit/test_iosystem3_simple2.c b/src/externals/pio2/tests/cunit/test_iosystem3_simple2.c index 973ef16c7eb..7253412557c 100644 --- a/src/externals/pio2/tests/cunit/test_iosystem3_simple2.c +++ b/src/externals/pio2/tests/cunit/test_iosystem3_simple2.c @@ -5,8 +5,9 @@ * This is a simplified, C version of the fortran * pio_iosystem_tests3.F90. * - * Ed Hartnett + * @author Ed Hartnett */ +#include #include #include @@ -36,8 +37,8 @@ int main(int argc, char **argv) MPI_Comm test_comm; /* Initialize test. */ - if ((ret = pio_test_init(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, - &test_comm))) + if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, TARGET_NTASKS, TARGET_NTASKS, + -1, &test_comm))) ERR(ERR_INIT); /* Test code runs on TARGET_NTASKS tasks. The left over tasks do @@ -85,13 +86,11 @@ int main(int argc, char **argv) } /* next iotype */ /* Finalize PIO systems. */ - printf("%d pio finalizing\n", my_rank); if ((ret = PIOc_finalize(iosysid_world))) ERR(ret); } /* my_rank < TARGET_NTASKS */ /* Finalize test. */ - printf("%d %s finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ERR_AWFUL; diff --git a/src/externals/pio2/tests/cunit/test_pioc.c b/src/externals/pio2/tests/cunit/test_pioc.c index 466ed99f706..c4cd184a185 100644 --- a/src/externals/pio2/tests/cunit/test_pioc.c +++ b/src/externals/pio2/tests/cunit/test_pioc.c @@ -1,8 +1,9 @@ /* * Tests for PIO Functions. * - * Ed Hartnett + * @author Ed Hartnett */ +#include #include #include #include @@ -54,9 +55,6 @@ /* Number of NetCDF classic types. */ #define NUM_CLASSIC_TYPES 6 -/* Number of NetCDF-4 types. */ -#define NUM_NETCDF4_TYPES 12 - /* The dimension names. */ char dim_name[NDIM][PIO_MAX_NAME + 1] = {"timestep", "x", "y"}; @@ -73,6 +71,17 @@ PIO_Offset chunksize[NDIM] = {2, X_DIM_LEN/2, Y_DIM_LEN/2}; /* Length of the max maplen in decomp testing. */ #define MAX_MAPLEN 1 +signed char custom_fill_byte = -TEST_VAL_42; +unsigned char custom_fill_char = TEST_VAL_42; +short custom_fill_short = -1000; +int custom_fill_int = -100000; +float custom_fill_float = 10.5; +double custom_fill_double = 1000000.5; +unsigned char custom_fill_ubyte = 5; +unsigned short custom_fill_ushort = 1000; +unsigned int custom_fill_uint = 100000; +long long custom_fill_int64 = -100000000; +unsigned long long custom_fill_uint64 = 100000000; /* Create the decomposition to divide the 1-dimensional sample data * between the 4 tasks. @@ -114,13 +123,10 @@ int create_decomposition(int ntasks, int my_rank, int iosysid, int dim1_len, int ERR(ERR_WRONG); /* Create the PIO decomposition for this test. */ - printf("%d Creating decomposition elements_per_pe = %lld\n", my_rank, elements_per_pe); if ((ret = PIOc_init_decomp(iosysid, PIO_FLOAT, NDIM1, dim_len, elements_per_pe, compdof, ioid, 0, NULL, NULL))) ERR(ret); - printf("%d decomposition initialized.\n", my_rank); - /* Free the mapping. */ free(compdof); @@ -142,34 +148,41 @@ int check_darray_file(int iosysid, int ntasks, int my_rank, char *filename) assert(filename); /* Open the file. */ - if ((ret = PIOc_open(iosysid, filename, NC_NOWRITE, &ncid))) - return ret; + if ((ret = PIOc_open(iosysid, filename, PIO_NOWRITE, &ncid))) + ERR(ret); /* Check metadata. */ if ((ret = PIOc_inq(ncid, &ndims, &nvars, &ngatts, &unlimdimid))) - return ret; + ERR(ret); if (ndims != 1 || nvars != 1 || ngatts != 0 || unlimdimid != -1) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_inq_dim(ncid, 0, dim_name_in, &dim_len_in))) - return ret; + ERR(ret); if (strcmp(dim_name_in, DIM_NAME) || dim_len_in != DIM_LEN) - return ERR_WRONG; + ERR(ERR_WRONG); /* Decompose the data over the tasks. */ if ((ret = create_decomposition(ntasks, my_rank, iosysid, DIM_LEN, &ioid))) - return ret; + ERR(ret); /* Read data. */ if ((ret = PIOc_read_darray(ncid, 0, ioid, arraylen, &data_in))) - return ret; + ERR(ret); + + /* Try to write, but this will fail because file was opened with + * NOWRITE. */ + float fillvalue = 0.0; + float test_data = my_rank * 10; + if (PIOc_write_darray(ncid, 0, ioid, arraylen, &test_data, &fillvalue) != PIO_EPERM) + ERR(ret); /* Check data. */ if (data_in != my_rank * 10) - return ERR_WRONG; + ERR(ERR_WRONG); /* Close the file. */ if ((ret = PIOc_closefile(ncid))) - return ret; + ERR(ret); /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid))) @@ -197,13 +210,10 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank sprintf(filename, "%s_flavor_%d_fv_%d.nc", TEST_NAME, flavor[fmt], fv); /* Create the netCDF output file. */ - printf("rank: %d Creating sample file %s with format %d...\n", my_rank, filename, - flavor[fmt]); if ((ret = PIOc_createfile(iosysid, &ncid, &(flavor[fmt]), filename, PIO_CLOBBER))) ERR(ret); /* Define netCDF dimensions and variable. */ - printf("rank: %d Defining netCDF metadata...\n", my_rank); if ((ret = PIOc_def_dim(ncid, DIM_NAME, (PIO_Offset)dim_len[0], &dimids[0]))) ERR(ret); @@ -215,9 +225,15 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank if ((ret = PIOc_enddef(ncid))) ERR(ret); + /* These should not work, because invalid varids are given. */ + if (PIOc_setframe(ncid, TEST_VAL_42, 0) != PIO_ENOTVAR) + ERR(ERR_WRONG); + if (PIOc_advanceframe(ncid, TEST_VAL_42) != PIO_ENOTVAR) + ERR(ERR_WRONG); + /* Write some data. */ PIO_Offset arraylen = 1; - float fillvalue = 0.0; + float fillvalue = PIO_FILL_FLOAT; float *fillvaluep = fv ? &fillvalue : NULL; float test_data[arraylen]; for (int f = 0; f < arraylen; f++) @@ -226,7 +242,6 @@ int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank ERR(ret); /* Close the netCDF file. */ - printf("rank: %d Closing the sample data file...\n", my_rank); if ((ret = PIOc_closefile(ncid))) ERR(ret); @@ -257,18 +272,15 @@ int check_dim_names(int my_rank, int ncid, MPI_Comm test_comm) { memset(dim_name, 0, sizeof(dim_name)); if ((ret = PIOc_inq_dimname(ncid, d, dim_name))) - return ret; - printf("my_rank %d my_test_rank %d dim %d name %s\n", my_rank, my_test_rank, d, dim_name); + ERR(ret); /* Did other ranks get the same name? */ memset(zero_dim_name, 0, sizeof(zero_dim_name)); if (!my_test_rank) strcpy(zero_dim_name, dim_name); - printf("rank %d dim_name %s zero_dim_name %s\n", my_rank, dim_name, zero_dim_name); if ((ret = MPI_Bcast(&zero_dim_name, strlen(dim_name) + 1, MPI_CHAR, 0, test_comm))) MPIERR(ret); - printf("%d zero_dim_name = %s dim_name = %s\n", my_rank, zero_dim_name, dim_name); if (strcmp(dim_name, zero_dim_name)) return ERR_AWFUL; } @@ -294,8 +306,7 @@ int check_var_name(int my_rank, int ncid, MPI_Comm test_comm) memset(var_name, 0, sizeof(var_name)); if ((ret = PIOc_inq_varname(ncid, 0, var_name))) - return ret; - printf("my_rank %d var name %s\n", my_rank, var_name); + ERR(ret); /* Did other ranks get the same name? */ memset(zero_var_name, 0, sizeof(zero_var_name)); @@ -347,7 +358,7 @@ int check_atts(int my_rank, int ncid, int flavor, MPI_Comm test_comm, int async) memset(att_name, 0, sizeof(att_name)); if ((ret = PIOc_inq_attname(ncid, NC_GLOBAL, 0, att_name))) - return ret; + ERR(ret); /* Did all ranks get the same name? */ memset(zero_att_name, 0, sizeof(zero_att_name)); @@ -361,87 +372,87 @@ int check_atts(int my_rank, int ncid, int flavor, MPI_Comm test_comm, int async) /* These should not work. */ if (PIOc_get_att_int(ncid + TEST_VAL_42, NC_GLOBAL, ATT_NAME, &att_int_value) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_get_att_int(ncid, TEST_VAL_42, ATT_NAME, &att_int_value) != PIO_ENOTVAR) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_get_att_int(ncid, NC_GLOBAL, NULL, &att_int_value) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_get_att_int(ncid, NC_GLOBAL, ATT_NAME, NULL) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); /* These should not work. */ if (PIOc_inq_att(ncid, NC_GLOBAL, too_long_name, &att_type, &att_len) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); int tmp_attid; if (PIOc_inq_attid(ncid, NC_GLOBAL, too_long_name, &tmp_attid) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); /* Check first att. */ if ((ret = PIOc_inq_att(ncid, NC_GLOBAL, ATT_NAME, &att_type, &att_len))) - return ret; + ERR(ret); if (att_type != PIO_INT || att_len != 1) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_get_att_int(ncid, NC_GLOBAL, ATT_NAME, &att_int_value))) - return ret; + ERR(ret); if (att_int_value != ATT_VAL) - return ERR_WRONG; + ERR(ERR_WRONG); /* Try the typeless get. */ int att_int_value2; if ((ret = PIOc_get_att(ncid, NC_GLOBAL, ATT_NAME, &att_int_value2))) ERR(ret); if (att_int_value2 != ATT_VAL) - return ERR_WRONG; + ERR(ERR_WRONG); /* Check second att. */ if ((ret = PIOc_inq_att(ncid, NC_GLOBAL, ATT_NAME2, &att_type, &att_len))) - return ret; + ERR(ret); if (att_type != PIO_FLOAT || att_len != 1) - return ERR_WRONG; + ERR(ERR_WRONG); /* Try converting to every type. */ if ((ret = PIOc_get_att_schar(ncid, NC_GLOBAL, ATT_NAME2, &att_schar_value))) - return ret; + ERR(ret); if (att_schar_value != ATT_VAL) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_get_att_short(ncid, NC_GLOBAL, ATT_NAME2, &att_short_value))) - return ret; + ERR(ret); if (att_short_value != ATT_VAL) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_get_att_int(ncid, NC_GLOBAL, ATT_NAME2, &att_int_value))) - return ret; + ERR(ret); if (att_int_value != ATT_VAL) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_get_att_float(ncid, NC_GLOBAL, ATT_NAME2, &att_float_value))) - return ret; + ERR(ret); if (att_float_value != ATT_VAL) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_get_att_double(ncid, NC_GLOBAL, ATT_NAME2, &att_double_value))) - return ret; + ERR(ret); if (att_double_value != ATT_VAL) - return ERR_WRONG; + ERR(ERR_WRONG); if (flavor == PIO_IOTYPE_NETCDF4C || flavor == PIO_IOTYPE_NETCDF4P) { if ((ret = PIOc_get_att_uchar(ncid, NC_GLOBAL, ATT_NAME2, &att_uchar_value))) - return ret; + ERR(ret); if (att_uchar_value != ATT_VAL) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_get_att_ushort(ncid, NC_GLOBAL, ATT_NAME2, &att_ushort_value))) - return ret; + ERR(ret); if (att_ushort_value != ATT_VAL) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_get_att_uint(ncid, NC_GLOBAL, ATT_NAME2, &att_uint_value))) - return ret; + ERR(ret); if (att_uint_value != ATT_VAL) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_get_att_longlong(ncid, NC_GLOBAL, ATT_NAME2, &att_int64_value))) - return ret; + ERR(ret); if (att_int64_value != ATT_VAL) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_get_att_ulonglong(ncid, NC_GLOBAL, ATT_NAME2, &att_uint64_value))) - return ret; + ERR(ret); if (att_uint64_value != ATT_VAL) - return ERR_WRONG; + ERR(ERR_WRONG); } return 0; } @@ -468,9 +479,7 @@ int check_error_strings(int my_rank, int num_tries, int *errcode, /* Get the error string for this errcode. */ if ((ret = PIOc_strerror(errcode[try], errstr))) - return ret; - - printf("%d for errcode = %d message = %s\n", my_rank, errcode[try], errstr); + ERR(ret); /* Check that it was as expected. */ if (strncmp(errstr, expected[try], strlen(expected[try]))) @@ -478,8 +487,6 @@ int check_error_strings(int my_rank, int num_tries, int *errcode, printf("%d expected %s got %s\n", my_rank, expected[try], errstr); return ERR_AWFUL; } - if (!my_rank) - printf("%d errcode = %d passed\n", my_rank, errcode[try]); } return PIO_NOERR; @@ -494,32 +501,32 @@ int test_iotypes(int my_rank) { /* This is never present. */ if (PIOc_iotype_available(1000)) - return ERR_WRONG; + ERR(ERR_WRONG); /* NetCDF is always present. */ if (!PIOc_iotype_available(PIO_IOTYPE_NETCDF)) - return ERR_WRONG; + ERR(ERR_WRONG); /* Pnetcdf may or may not be present. */ #ifdef _PNETCDF if (!PIOc_iotype_available(PIO_IOTYPE_PNETCDF)) - return ERR_WRONG; + ERR(ERR_WRONG); #else if (PIOc_iotype_available(PIO_IOTYPE_PNETCDF)) - return ERR_WRONG; + ERR(ERR_WRONG); #endif /* _PNETCDF */ /* NetCDF-4 may or may not be present. */ #ifdef _NETCDF4 if (!PIOc_iotype_available(PIO_IOTYPE_NETCDF4C)) - return ERR_WRONG; + ERR(ERR_WRONG); if (!PIOc_iotype_available(PIO_IOTYPE_NETCDF4P)) - return ERR_WRONG; + ERR(ERR_WRONG); #else if (PIOc_iotype_available(PIO_IOTYPE_NETCDF4C)) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_iotype_available(PIO_IOTYPE_NETCDF4P)) - return ERR_WRONG; + ERR(ERR_WRONG); #endif /* _NETCDF4 */ return PIO_NOERR; @@ -540,30 +547,29 @@ int check_strerror_netcdf(int my_rank) int ret; if ((ret = check_error_strings(my_rank, NUM_NETCDF_TRIES, errcode, expected))) - return ret; + ERR(ret); /* When called with a code of 0, these functions should do nothing * and return 0. */ if (check_mpi(NULL, 0, __FILE__, __LINE__)) - return ERR_WRONG; + ERR(ERR_WRONG); if (check_mpi2(NULL, NULL, 0, __FILE__, __LINE__)) - return ERR_WRONG; + ERR(ERR_WRONG); if (pio_err(NULL, NULL, 0, __FILE__, __LINE__)) - return ERR_WRONG; + ERR(ERR_WRONG); if (check_netcdf(NULL, 0, __FILE__, __LINE__)) - return ERR_WRONG; + ERR(ERR_WRONG); if (check_netcdf2(NULL, NULL, 0, __FILE__, __LINE__)) - return ERR_WRONG; + ERR(ERR_WRONG); /* When called with other error messages, these functions should * return PIO_EIO. */ - if (check_mpi(NULL, MPI_ERR_OTHER, __FILE__, __LINE__) != PIO_EIO) - return ERR_WRONG; - if (check_mpi(NULL, MPI_ERR_UNKNOWN, __FILE__, __LINE__) != PIO_EIO) - return ERR_WRONG; - - if (!my_rank) - printf("check_strerror_netcdf SUCCEEDED!\n"); + /* if (check_mpi(NULL, MPI_ERR_OTHER, __FILE__, __LINE__) != PIO_EIO) */ + /* ERR(ERR_WRONG); */ + /* This returns the correct result, but prints a confusing error + * message during the test run, so I'll leave it commented out. */ + /* if (check_mpi(NULL, MPI_ERR_UNKNOWN, __FILE__, __LINE__) != PIO_EIO) */ + /* ERR(ERR_WRONG); */ return PIO_NOERR; } @@ -584,10 +590,8 @@ int check_strerror_netcdf4(int my_rank) int ret; if ((ret = check_error_strings(my_rank, NUM_NETCDF4_TRIES, errcode, expected))) - return ret; + ERR(ret); - if (!my_rank) - printf("check_strerror_netcdf4 SUCCEEDED!\n"); #endif /* _NETCDF4 */ return PIO_NOERR; @@ -609,10 +613,8 @@ int check_strerror_pnetcdf(int my_rank) int ret; if ((ret = check_error_strings(my_rank, NUM_PNETCDF_TRIES, errcode, expected))) - return ret; + ERR(ret); - if (!my_rank) - printf("check_strerror_pnetcdf SUCCEEDED!\n"); #endif /* _PNETCDF */ return PIO_NOERR; @@ -636,10 +638,7 @@ int check_strerror_pio(int my_rank) int ret; if ((ret = check_error_strings(my_rank, NUM_PIO_TRIES, errcode, expected))) - return ret; - - if (!my_rank) - printf("check_strerror_pio SUCCEEDED!\n"); + ERR(ret); return PIO_NOERR; } @@ -653,21 +652,17 @@ int check_strerror(int my_rank) { int ret; - printf("checking strerror for netCDF-classic error codes...\n"); if ((ret = check_strerror_netcdf(my_rank))) - return ret; + ERR(ret); - printf("checking strerror for netCDF-4 error codes...\n"); if ((ret = check_strerror_netcdf4(my_rank))) - return ret; + ERR(ret); - printf("checking strerror for pnetcdf error codes...\n"); if ((ret = check_strerror_pnetcdf(my_rank))) - return ret; + ERR(ret); - printf("checking strerror for PIO error codes...\n"); if ((ret = check_strerror_pio(my_rank))) - return ret; + ERR(ret); return PIO_NOERR; } @@ -684,48 +679,43 @@ int define_metadata(int ncid, int my_rank, int flavor) memset(too_long_name, 74, PIO_MAX_NAME * 5); too_long_name[PIO_MAX_NAME * 5] = 0; if (PIOc_def_dim(ncid + 1, dim_name[0], (PIO_Offset)dim_len[0], &dimids[0]) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_def_dim(ncid, NULL, (PIO_Offset)dim_len[0], &dimids[0]) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_def_dim(ncid, too_long_name, (PIO_Offset)dim_len[0], &dimids[0]) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); /* Define dimensions. */ for (int d = 0; d < NDIM; d++) if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) - return ret; + ERR(ret); /* Check invalid parameters. */ if (PIOc_def_var(ncid + 1, VAR_NAME, PIO_INT, NDIM, dimids, &varid) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_def_var(ncid, NULL, PIO_INT, NDIM, dimids, &varid) != PIO_EINVAL) - return ERR_WRONG; - if (PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM, dimids, NULL) != PIO_EINVAL) - return ERR_WRONG; - if (PIOc_def_var(ncid, too_long_name, PIO_INT, NDIM, dimids, NULL) != PIO_EINVAL) - return ERR_WRONG; - if (PIOc_def_var(ncid, too_long_name, TEST_VAL_42, NDIM, dimids, &varid) != PIO_EINVAL) - return ERR_WRONG; - if (PIOc_def_var(ncid, too_long_name, PIO_LONG_INTERNAL, NDIM, dimids, &varid) != PIO_EINVAL) - return ERR_WRONG; - - /* Define a variable. */ - if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM, dimids, &varid))) - return ret; + ERR(ERR_WRONG); + if (PIOc_def_var(ncid, too_long_name, PIO_INT, NDIM, dimids, &varid) != PIO_EINVAL) + ERR(ERR_WRONG); + + /* Define a variable. Test that varidp can be NULL. Since this is + * the first var in the file, the varid will be 0. */ + if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM, dimids, NULL))) + ERR(ret); + varid = 0; /* Set the fill mode. */ int fillmode = PIO_NOFILL; int temp_mode; int old_mode; if ((ret = PIOc_set_fill(ncid, fillmode, &old_mode))) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_set_fill(ncid, fillmode, &temp_mode))) - return ERR_WRONG; - printf("%d old_mode = %d temp_mode = %d\n", my_rank, old_mode, temp_mode); + ERR(ERR_WRONG); if (temp_mode != PIO_NOFILL) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_set_fill(ncid, old_mode, NULL))) - return ret; + ERR(ret); /* Set the fill value for netCDF-4 files. */ int int_fill = -999; @@ -734,27 +724,27 @@ int define_metadata(int ncid, int my_rank, int flavor) /* These should not work. */ if (PIOc_def_var_fill(ncid + TEST_VAL_42, varid, NC_FILL, &int_fill) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_def_var_fill(ncid, varid + TEST_VAL_42, NC_FILL, &int_fill) != PIO_ENOTVAR) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_def_var_fill(ncid, varid, NC_FILL + TEST_VAL_42, &int_fill) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_def_var_fill(ncid, varid, NC_FILL, NULL) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); /* Set the fill value. */ if ((ret = PIOc_def_var_fill(ncid, varid, NC_FILL, &int_fill))) - return ret; + ERR(ret); /* These should not work. */ if (PIOc_inq_var_fill(ncid + TEST_VAL_42, varid, &fill_mode, &int_fill_in) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_inq_var_fill(ncid, varid + TEST_VAL_42, &fill_mode, &int_fill_in) != PIO_ENOTVAR) - return ERR_WRONG; + ERR(ERR_WRONG); /* Check the fill value. */ if ((ret = PIOc_inq_var_fill(ncid, varid, &fill_mode, &int_fill_in))) - return ret; + ERR(ret); if (fill_mode != NC_FILL || int_fill_in != int_fill) ERR(ERR_WRONG); @@ -765,14 +755,14 @@ int define_metadata(int ncid, int my_rank, int flavor) if (flavor != PIO_IOTYPE_PNETCDF) { if ((ret = PIOc_inq_var_fill(ncid, varid, NULL, &int_fill_in))) - return ret; + ERR(ret); if (int_fill_in != int_fill) ERR(ERR_WRONG); if ((ret = PIOc_inq_var_fill(ncid, varid, NULL, NULL))) - return ret; + ERR(ret); } if ((ret = PIOc_inq_var_fill(ncid, varid, &fill_mode, NULL))) - return ret; + ERR(ret); if (fill_mode != NC_FILL) ERR(ERR_WRONG); @@ -791,38 +781,44 @@ int check_metadata(int ncid, int my_rank, int flavor) /* Check how many dims, vars, global atts there are, and the id of * the unlimited dimension. */ if (PIOc_inq(ncid + 1, &ndims, &nvars, &ngatts, &unlimdimid) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_inq(ncid, NULL, NULL, NULL, NULL))) - return ret; + ERR(ret); if ((ret = PIOc_inq(ncid, &ndims, &nvars, &ngatts, &unlimdimid))) - return ret; + ERR(ret); if (ndims != NDIM || nvars != 1 || ngatts != 0 || unlimdimid != 0) return ERR_AWFUL; + int num_unlimdims; + int unlimdimid2; + if ((ret = PIOc_inq_unlimdims(ncid, &num_unlimdims, &unlimdimid2))) + ERR(ret); + if (unlimdimid2 != 0) + return ERR_AWFUL; /* Check the dimensions. */ for (int d = 0; d < NDIM; d++) { if (PIOc_inq_dim(ncid + 1, d, name_in, &len_in) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_inq_dim(ncid, d + 40, name_in, &len_in) != PIO_EBADDIM) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_inq_dim(ncid, d, NULL, NULL))) - return ret; + ERR(ret); if ((ret = PIOc_inq_dim(ncid, d, name_in, &len_in))) - return ret; + ERR(ret); if (len_in != dim_len[d] || strcmp(name_in, dim_name[d])) return ERR_AWFUL; } /* Check the variable. */ if (PIOc_inq_var(ncid + 1, 0, name_in, &xtype_in, &ndims, dimid, &natts) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_inq_var(ncid, 45, name_in, &xtype_in, &ndims, dimid, &natts) != PIO_ENOTVAR) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_inq_var(ncid, 0, name_in, NULL, NULL, NULL, NULL))) - return ret; + ERR(ret); if ((ret = PIOc_inq_var(ncid, 0, name_in, &xtype_in, &ndims, dimid, &natts))) - return ret; + ERR(ret); if (strcmp(name_in, VAR_NAME) || xtype_in != PIO_INT || ndims != NDIM || dimid[0] != 0 || dimid[1] != 1 || dimid[2] != 2 || natts != 1) return ERR_AWFUL; @@ -863,38 +859,33 @@ int test_names(int iosysid, int num_flavors, int *flavor, int my_rank, /* Create a filename. */ if ((ret = get_iotype_name(flavor[fmt], iotype_name))) - return ret; + ERR(ret); sprintf(filename, "%s_%s_names.nc", TEST_NAME, iotype_name); /* Create the netCDF output file. */ - printf("rank: %d Creating sample file %s with format %d...\n", - my_rank, filename, flavor[fmt]); if ((ret = PIOc_createfile(iosysid, &ncid, &(flavor[fmt]), filename, PIO_CLOBBER))) - return ret; + ERR(ret); /* Define netCDF dimensions and variable. */ - printf("rank: %d Defining netCDF metadata...\n", my_rank); for (int d = 0; d < NDIM; d++) { - printf("rank: %d Defining netCDF dimension %s, length %d\n", my_rank, - dim_name[d], dim_len[d]); if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) - return ret; + ERR(ret); } /* These should not work. */ if (PIOc_setframe(ncid + TEST_VAL_42, 0, 0) != PIO_EBADID) - return ERR_WRONG; - if (PIOc_setframe(ncid, -1, 0) != PIO_EINVAL) - return ERR_WRONG; - if (PIOc_setframe(ncid, NC_MAX_VARS + 1, 0) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); + if (PIOc_setframe(ncid, -1, 0) != PIO_ENOTVAR) + ERR(ERR_WRONG); + if (PIOc_setframe(ncid, NC_MAX_VARS + 1, 0) != PIO_ENOTVAR) + ERR(ERR_WRONG); if (PIOc_advanceframe(ncid + TEST_VAL_42, 0) != PIO_EBADID) - return ERR_WRONG; - if (PIOc_advanceframe(ncid, -1) != PIO_EINVAL) - return ERR_WRONG; - if (PIOc_advanceframe(ncid, NC_MAX_VARS + 1) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); + if (PIOc_advanceframe(ncid, -1) != PIO_ENOTVAR) + ERR(ERR_WRONG); + if (PIOc_advanceframe(ncid, NC_MAX_VARS + 1) != PIO_ENOTVAR) + ERR(ERR_WRONG); /* Check the dimension names. */ if ((ret = check_dim_names(my_rank, ncid, test_comm))) @@ -940,7 +931,6 @@ int test_names(int iosysid, int num_flavors, int *flavor, int my_rank, ERR(ret); /* Close the netCDF file. */ - printf("rank: %d Closing the sample data file...\n", my_rank); if ((ret = PIOc_closefile(ncid))) ERR(ret); } @@ -974,32 +964,29 @@ int test_files(int iosysid, int num_flavors, int *flavor, int my_rank) /* If this is netCDF-4, add the netCDF4 flag. */ if (flavor[fmt] == PIO_IOTYPE_NETCDF4C || flavor[fmt] == PIO_IOTYPE_NETCDF4P) { - printf("%d adding NC_NETCDF4 flag\n", my_rank); mode |= NC_NETCDF4; } /* If this is pnetcdf or netCDF-4 parallel, add the MPIIO flag. */ if (flavor[fmt] == PIO_IOTYPE_PNETCDF || flavor[fmt] == PIO_IOTYPE_NETCDF4P) { - printf("%d adding NC_MPIIO flag\n", my_rank); mode |= NC_MPIIO; } /* Create a filename. */ if ((ret = get_iotype_name(flavor[fmt], iotype_name))) - return ret; + ERR(ret); sprintf(filename, "%s_%s.nc", TEST_NAME, iotype_name); /* Testing some invalid parameters. */ if (PIOc_create(iosysid + 1, filename, mode, &ncid) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_create(iosysid, filename, mode, NULL) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_create(iosysid, NULL, mode, &ncid) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); /* Create the netCDF output file. */ - printf("%d Creating sample file %s with format %d...\n", my_rank, filename, flavor[fmt]); if ((ret = PIOc_create(iosysid, filename, mode, &ncid))) ERR(ret); @@ -1015,28 +1002,25 @@ int test_files(int iosysid, int num_flavors, int *flavor, int my_rank) /* End define mode. */ if (PIOc_enddef(ncid + 1) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_enddef(ncid))) ERR(ret); /* Close the netCDF file. */ - printf("%d Closing the sample data file...\n", my_rank); if (PIOc_closefile(ncid + 1) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_closefile(ncid))) ERR(ret); /* Check some invalid paramters. */ if (PIOc_open(iosysid + 1, filename, mode, &ncid) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_open(iosysid, NULL, mode, &ncid) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_open(iosysid, filename, mode, NULL) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); /* Reopen the test file. */ - printf("%d Re-opening sample file %s with format %d...\n", - my_rank, filename, flavor[fmt]); if ((ret = PIOc_open(iosysid, filename, mode, &ncid))) ERR(ret); @@ -1045,7 +1029,6 @@ int test_files(int iosysid, int num_flavors, int *flavor, int my_rank) ERR(ret); /* Close the netCDF file. */ - printf("%d Closing the sample data file...\n", my_rank); if ((ret = PIOc_closefile(ncid))) ERR(ret); @@ -1054,6 +1037,335 @@ int test_files(int iosysid, int num_flavors, int *flavor, int my_rank) return PIO_NOERR; } +/* Test empty file operations. + * + * @param iosysid the iosystem ID that will be used for the test. + * @param num_flavors the number of different IO types that will be tested. + * @param flavor an array of the valid IO types. + * @param my_rank 0-based rank of task. + * @returns 0 for success, error code otherwise. + */ +int test_empty_files(int iosysid, int num_flavors, int *flavor, int my_rank) +{ + int ncid, ncid2; + int ret; /* Return code. */ + + /* Use PIO to create the example file in each of the four + * available ways. */ + for (int fmt = 0; fmt < num_flavors; fmt++) + { + char filename[PIO_MAX_NAME + 1]; /* Test filename. */ + char iotype_name[PIO_MAX_NAME + 1]; + + /* Create a filename. */ + if ((ret = get_iotype_name(flavor[fmt], iotype_name))) + ERR(ret); + sprintf(filename, "%s_empty_%s.nc", TEST_NAME, iotype_name); + + if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) + ERR(ret); + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Close the netCDF file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Reopen the test file. */ + if ((ret = PIOc_openfile2(iosysid, &ncid2, &flavor[fmt], filename, PIO_NOWRITE))) + ERR(ret); + + /* Close the netCDF file. */ + if ((ret = PIOc_closefile(ncid2))) + ERR(ret); + + } + + return PIO_NOERR; +} + +/* Check that the fill values are correctly reported by find_var_fill(). + * + * @param ncid the ID of the open test file. + * @param ntypes the number ot types we are testing. + * @param use_custom_fill true if custom fill values were used. + * @param my_rank rank of this task. + * @return 0 on success. + */ +int check_fillvalues(int ncid, int num_types, int use_custom_fill, int my_rank) +{ + file_desc_t *file; + signed char fill_byte; + unsigned char fill_char; + short fill_short; + int fill_int; + float fill_float; + double fill_double; + unsigned char fill_ubyte; + unsigned short fill_ushort; + unsigned int fill_uint; + long long fill_int64; + unsigned long long fill_uint64; + int ret; + + if (use_custom_fill) + { + fill_byte = custom_fill_byte; + fill_char = custom_fill_char; + fill_short = custom_fill_short; + fill_int = custom_fill_int; + fill_float = custom_fill_float; + fill_double = custom_fill_double; + fill_ubyte = custom_fill_ubyte; + fill_ushort = custom_fill_ushort; + fill_uint = custom_fill_uint; + fill_int64 = custom_fill_int64; + fill_uint64 = custom_fill_uint64; + } + else + { + fill_byte = PIO_FILL_BYTE; + fill_char = PIO_FILL_CHAR; + fill_short = PIO_FILL_SHORT; + fill_int = PIO_FILL_INT; + fill_float = PIO_FILL_FLOAT; + fill_double = PIO_FILL_DOUBLE; + fill_ubyte = PIO_FILL_UBYTE; + fill_ushort = PIO_FILL_USHORT; + fill_uint = PIO_FILL_UINT; + fill_int64 = PIO_FILL_INT64; + fill_uint64 = PIO_FILL_UINT64; + } + + if ((ret = pio_get_file(ncid, &file))) + ERR(ret); + + for (int v = 0; v < num_types; v++) + { + var_desc_t *vdesc; + + /* Get the var info. */ + if ((ret = get_var_desc(v, &file->varlist, &vdesc))) + ERR(ret); + + /* Check the fill value with this internal function. */ + if ((ret = find_var_fillvalue(file, v, vdesc))) + ERR(ret); + + switch (vdesc->pio_type) + { + case PIO_BYTE: + if (*(signed char *)vdesc->fillvalue != fill_byte) + ERR(ERR_WRONG); + break; + case PIO_CHAR: + if (*(unsigned char *)vdesc->fillvalue != fill_char) + ERR(ERR_WRONG); + break; + case PIO_SHORT: + if (*(short *)vdesc->fillvalue != fill_short) + ERR(ERR_WRONG); + break; + case PIO_INT: + if (*(int *)vdesc->fillvalue != fill_int) + ERR(ERR_WRONG); + break; + case PIO_FLOAT: + if (*(float *)vdesc->fillvalue != fill_float) + ERR(ERR_WRONG); + break; + case PIO_DOUBLE: + if (*(double *)vdesc->fillvalue != fill_double) + ERR(ERR_WRONG); + break; + case PIO_UBYTE: + if (*(unsigned char *)vdesc->fillvalue != fill_ubyte) + ERR(ERR_WRONG); + break; + case PIO_USHORT: + if (*(unsigned short *)vdesc->fillvalue != fill_ushort) + ERR(ERR_WRONG); + break; + case PIO_UINT: + if (*(unsigned int *)vdesc->fillvalue != fill_uint) + ERR(ERR_WRONG); + break; + case PIO_INT64: + if (*(long long *)vdesc->fillvalue != fill_int64) + ERR(ERR_WRONG); + break; + case PIO_UINT64: + if (*(unsigned long long *)vdesc->fillvalue != fill_uint64) + ERR(ERR_WRONG); + break; + default: + ERR(ERR_AWFUL); + } + } + + return PIO_NOERR; +} + +/* Test the internal function that determins a var's fillvalue. + * + * @param iosysid the iosystem ID that will be used for the test. + * @param num_flavors the number of different IO types that will be tested. + * @param flavor an array of the valid IO types. + * @param my_rank 0-based rank of task. + * @returns 0 for success, error code otherwise. + */ +int test_find_var_fillvalue(int iosysid, int num_flavors, int *flavor, + int my_rank, int async) +{ +#define NUM_FILL_TESTS 2 + int ncid; + int dimid; + int test_type[NUM_NETCDF4_TYPES - 1] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, + PIO_FLOAT, PIO_DOUBLE, PIO_UBYTE, PIO_USHORT, + PIO_UINT, PIO_INT64, PIO_UINT64}; + int ret; /* Return code. */ + + /* Use PIO to create the example file in each of the four + * available ways. */ + for (int fmt = 0; fmt < num_flavors; fmt++) + { + char filename[PIO_MAX_NAME + 1]; /* Test filename. */ + char iotype_name[PIO_MAX_NAME + 1]; + int num_types = NUM_CLASSIC_TYPES; + + /* Overwrite existing test file. */ + int mode = PIO_CLOBBER; + + /* If this is netCDF-4, add the netCDF4 flag. */ + if (flavor[fmt] == PIO_IOTYPE_NETCDF4C || flavor[fmt] == PIO_IOTYPE_NETCDF4P) + { + mode |= NC_NETCDF4; + num_types = NUM_NETCDF4_TYPES - 1; + } + else + num_types = NUM_CLASSIC_TYPES; + + /* If this is pnetcdf or netCDF-4 parallel, add the MPIIO flag. */ + if (flavor[fmt] == PIO_IOTYPE_PNETCDF || flavor[fmt] == PIO_IOTYPE_NETCDF4P) + { + mode |= NC_MPIIO; + } + + /* Get memory for varids. */ + int varid[num_types]; + + /* Create a filename. */ + if ((ret = get_iotype_name(flavor[fmt], iotype_name))) + ERR(ret); + sprintf(filename, "%s_find_var_fillvalue_%s.nc", TEST_NAME, iotype_name); + + /* Test with and without custom fill values. */ + for (int fvt = 0; fvt < NUM_FILL_TESTS; fvt++) + { + /* Create the netCDF output file. */ + if ((ret = PIOc_create(iosysid, filename, mode, &ncid))) + ERR(ret); + + /* Create a dimension. */ + if ((ret = PIOc_def_dim(ncid, DIM_NAME, DIM_LEN, &dimid))) + ERR(ret); + + /* Create a var of each type. */ + for (int v = 0; v < num_types; v++) + { + char var_name[PIO_MAX_NAME + 1]; + sprintf(var_name, "var_of_type_%d", test_type[v]); + if ((ret = PIOc_def_var(ncid, var_name, test_type[v], NDIM1, &dimid, &varid[v]))) + ERR(ret); + + /* Use custom fill values for this test? */ + if (fvt) + { + switch(test_type[v]) + { + case PIO_BYTE: + if ((ret = PIOc_def_var_fill(ncid, varid[v], NC_FILL, &custom_fill_byte))) + ERR(ret); + break; + case PIO_CHAR: + if ((ret = PIOc_def_var_fill(ncid, varid[v], NC_FILL, &custom_fill_char))) + ERR(ret); + break; + case PIO_SHORT: + if ((ret = PIOc_def_var_fill(ncid, varid[v], NC_FILL, &custom_fill_short))) + ERR(ret); + break; + case PIO_INT: + if ((ret = PIOc_def_var_fill(ncid, varid[v], NC_FILL, &custom_fill_int))) + ERR(ret); + break; + case PIO_FLOAT: + if ((ret = PIOc_def_var_fill(ncid, varid[v], NC_FILL, &custom_fill_float))) + ERR(ret); + break; + case PIO_DOUBLE: + if ((ret = PIOc_def_var_fill(ncid, varid[v], NC_FILL, &custom_fill_double))) + ERR(ret); + break; + case PIO_UBYTE: + if ((ret = PIOc_def_var_fill(ncid, varid[v], NC_FILL, &custom_fill_ubyte))) + ERR(ret); + break; + case PIO_USHORT: + if ((ret = PIOc_def_var_fill(ncid, varid[v], NC_FILL, &custom_fill_ushort))) + ERR(ret); + break; + case PIO_UINT: + if ((ret = PIOc_def_var_fill(ncid, varid[v], NC_FILL, &custom_fill_uint))) + ERR(ret); + break; + case PIO_INT64: + if ((ret = PIOc_def_var_fill(ncid, varid[v], NC_FILL, &custom_fill_int64))) + ERR(ret); + break; + case PIO_UINT64: + if ((ret = PIOc_def_var_fill(ncid, varid[v], NC_FILL, &custom_fill_uint64))) + ERR(ret); + break; + default: + ERR(ERR_AWFUL); + } + } /* endif fvt */ + } + + /* Check the fill values. */ + if ((ret = check_fillvalues(ncid, num_types, fvt, my_rank))) + ERR(ret); + + /* End define mode. */ + if ((ret = PIOc_enddef(ncid))) + ERR(ret); + + /* Close the netCDF file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + + /* Reopen the test file. */ + /* if ((ret = PIOc_open(iosysid, filename, NC_WRITE, &ncid))) */ + /* ERR(ret); */ + if ((ret = PIOc_openfile2(iosysid, &ncid, &flavor[fmt], filename, NC_WRITE))) + ERR(ret); + + /* Check the fill values. */ + if ((ret = check_fillvalues(ncid, num_types, fvt, my_rank))) + ERR(ret); + + /* Close the netCDF file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + } /* next fill value test */ + } + + return PIO_NOERR; +} + /* Test the deletion of files. * * @param iosysid the iosystem ID that will be used for the test. @@ -1077,26 +1389,24 @@ int test_deletefile(int iosysid, int num_flavors, int *flavor, int my_rank) /* These should fail. */ if (PIOc_set_iosystem_error_handling(iosysid + TEST_VAL_42, PIO_RETURN_ERROR, &old_method) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_set_iosystem_error_handling(iosysid, PIO_RETURN_ERROR + TEST_VAL_42, &old_method) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); /* Set error handling. */ if ((ret = PIOc_set_iosystem_error_handling(iosysid, PIO_RETURN_ERROR, &old_method))) - return ret; + ERR(ret); if (old_method != PIO_INTERNAL_ERROR && old_method != PIO_RETURN_ERROR) - return ERR_WRONG; + ERR(ERR_WRONG); /* Create a filename. */ if ((ret = get_iotype_name(flavor[fmt], iotype_name))) - return ret; + ERR(ret); sprintf(filename, "delete_me_%s_%s.nc", TEST_NAME, iotype_name); - printf("%d testing delete for file %s with format %d...\n", - my_rank, filename, flavor[fmt]); int bad_iotype = TEST_VAL_42; if (PIOc_createfile(iosysid, &ncid, &bad_iotype, filename, PIO_CLOBBER) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = PIOc_createfile(iosysid, &ncid, &(flavor[fmt]), filename, PIO_CLOBBER))) ERR(ret); @@ -1105,7 +1415,6 @@ int test_deletefile(int iosysid, int num_flavors, int *flavor, int my_rank) ERR(ret); /* Close the netCDF file. */ - printf("%d Closing the sample data file...\n", my_rank); if ((ret = PIOc_closefile(ncid))) ERR(ret); @@ -1114,13 +1423,12 @@ int test_deletefile(int iosysid, int num_flavors, int *flavor, int my_rank) ERR(ERR_WRONG); /* Now delete the file. */ - printf("%d Deleting %s...\n", my_rank, filename); if ((ret = PIOc_deletefile(iosysid, filename))) ERR(ret); /* Make sure it is gone. Openfile will now return an error * code when I try to open the file. */ - if (!PIOc_openfile(iosysid, &ncid, &(flavor[fmt]), filename, PIO_NOWRITE)) + if (!PIOc_openfile2(iosysid, &ncid, &(flavor[fmt]), filename, PIO_NOWRITE)) ERR(ERR_WRONG); } @@ -1166,12 +1474,9 @@ int test_nc4(int iosysid, int num_flavors, int *flavor, int my_rank) /* Create a filename. */ if ((ret = get_iotype_name(flavor[fmt], iotype_name))) - return ret; + ERR(ret); sprintf(filename, "%s_%s.nc", TEST_NAME, iotype_name); - printf("%d Setting chunk cache for file %s with format %d...\n", - my_rank, filename, flavor[fmt]); - /* Try to set the chunk cache. */ chunk_cache_preemption = 0.5; ret = PIOc_set_chunk_cache(iosysid, flavor[fmt], chunk_cache_size, @@ -1203,21 +1508,15 @@ int test_nc4(int iosysid, int num_flavors, int *flavor, int my_rank) } /* Create the netCDF output file. */ - printf("%d Creating sample file %s with format %d...\n", - my_rank, filename, flavor[fmt]); if ((ret = PIOc_createfile(iosysid, &ncid, &(flavor[fmt]), filename, PIO_CLOBBER))) ERR(ret); /* Define netCDF dimensions and variable. */ - printf("%d Defining netCDF metadata...\n", my_rank); for (int d = 0; d < NDIM; d++) { - printf("%d Defining netCDF dimension %s, length %d\n", my_rank, - dim_name[d], dim_len[d]); if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) ERR(ret); } - printf("%d Defining netCDF variable %s, ndims %d\n", my_rank, VAR_NAME, NDIM); if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_FLOAT, NDIM, dimids, &varid))) ERR(ret); @@ -1262,7 +1561,6 @@ int test_nc4(int iosysid, int num_flavors, int *flavor, int my_rank) } else { - printf("my ret = %d\n", PIOc_def_var_chunking(ncid, TEST_VAL_42, NC_CHUNKED, chunksize)); if (PIOc_def_var_chunking(ncid, TEST_VAL_42, NC_CHUNKED, chunksize) != PIO_ENOTNC4) ERR(ERR_AWFUL); if (PIOc_inq_var_chunking(ncid, TEST_VAL_42, &storage, my_chunksize) != PIO_ENOTNC4) @@ -1282,12 +1580,10 @@ int test_nc4(int iosysid, int num_flavors, int *flavor, int my_rank) if (PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, (MPI_Offset *)too_big_chunksize) == PIO_NOERR) ERR(ret); - printf("%d Defining chunksizes\n", my_rank); if ((ret = PIOc_def_var_chunking(ncid, 0, NC_CHUNKED, chunksize))) ERR(ret); /* Setting deflate should not work with parallel iotype. */ - printf("%d Defining deflate\n", my_rank); ret = PIOc_def_var_deflate(ncid, 0, 0, 1, 1); if (flavor[fmt] == PIO_IOTYPE_NETCDF4P) { @@ -1301,14 +1597,12 @@ int test_nc4(int iosysid, int num_flavors, int *flavor, int my_rank) } /* Check that the inq_varname function works. */ - printf("%d Checking varname\n", my_rank); if ((ret = PIOc_inq_varname(ncid, 0, NULL))) ERR(ret); if ((ret = PIOc_inq_varname(ncid, 0, varname_in))) ERR(ret); /* Check that the inq_var_chunking function works. */ - printf("%d Checking chunksizes\n", my_rank); if ((ret = PIOc_inq_var_chunking(ncid, 0, NULL, NULL))) ERR(ret); if ((ret = PIOc_inq_var_chunking(ncid, 0, &storage, my_chunksize))) @@ -1336,19 +1630,16 @@ int test_nc4(int iosysid, int num_flavors, int *flavor, int my_rank) ERR(ERR_AWFUL); /* Check setting the chunk cache for the variable. */ - printf("%d PIOc_set_var_chunk_cache...\n", my_rank); if ((ret = PIOc_set_var_chunk_cache(ncid, 0, VAR_CACHE_SIZE, VAR_CACHE_NELEMS, VAR_CACHE_PREEMPTION))) ERR(ret); /* Check getting the chunk cache values for the variable. */ - printf("%d PIOc_get_var_chunk_cache...\n", my_rank); if ((ret = PIOc_get_var_chunk_cache(ncid, 0, &var_cache_size, &var_cache_nelems, &var_cache_preemption))) ERR(ret); /* Check that we got expected values. */ - printf("%d var_cache_size = %lld\n", my_rank, var_cache_size); if (var_cache_size != VAR_CACHE_SIZE) ERR(ERR_AWFUL); if (var_cache_nelems != VAR_CACHE_NELEMS) @@ -1396,7 +1687,6 @@ int test_nc4(int iosysid, int num_flavors, int *flavor, int my_rank) ERR(ret); /* Close the netCDF file. */ - printf("%d Closing the sample data file...\n", my_rank); if ((ret = PIOc_closefile(ncid))) ERR(ret); } @@ -1405,7 +1695,7 @@ int test_nc4(int iosysid, int num_flavors, int *flavor, int my_rank) /* This function is part of test_scalar(). It tests the contents of * the scalar var. */ -int check_scalar_var(int ncid, int varid, int flavor) +int check_scalar_var(int ncid, int varid, int flavor, int my_rank) { char var_name_in[PIO_MAX_NAME + 1]; int var_type_in; @@ -1417,20 +1707,19 @@ int check_scalar_var(int ncid, int varid, int flavor) /* Learn the var metadata. */ if ((ret = PIOc_inq_var(ncid, varid, var_name_in, &var_type_in, &ndims_in, NULL, &natts_in))) - return ret; + ERR(ret); /* Is the metadata correct? */ if (strcmp(var_name_in, VAR_NAME) || var_type_in != PIO_INT || ndims_in != 0 || natts_in != 0) - return ERR_WRONG; + ERR(ERR_WRONG); /* Get the value. */ if ((ret = PIOc_get_var_int(ncid, varid, &val_in))) - return ret; - printf("val_in = %d\n", val_in); + ERR(ret); /* Is the value correct? */ if (val_in != TEST_VAL_42) - return ERR_WRONG; + ERR(ERR_WRONG); return 0; } @@ -1452,27 +1741,27 @@ int test_scalar(int iosysid, int num_flavors, int *flavor, int my_rank, int asyn int test_val_in; if ((ret = nc_create(test_file, NC_CLOBBER, &ncid))) - return ret; + ERR(ret); if ((ret = nc_def_var(ncid, VAR_NAME, NC_INT, 0, NULL, &varid))) - return ret; + ERR(ret); if ((ret = nc_enddef(ncid))) - return ret; + ERR(ret); if ((ret = nc_put_var(ncid, varid, &test_val))) - return ret; + ERR(ret); if ((ret = nc_close(ncid))) - return ret; + ERR(ret); if ((ret = nc_open(test_file, NC_NOWRITE, &ncid))) - return ret; + ERR(ret); /* if ((ret = nc_get_var(ncid, varid, &test_val_in))) */ - /* return ret; */ + /* ERR(ret); */ /* if (test_val_in != test_val) */ - /* return ERR_WRONG; */ + /* ERR(ERR_WRONG); */ if ((ret = nc_get_vars(ncid, varid, NULL, NULL, NULL, &test_val_in))) - return ret; + ERR(ret); if (test_val_in != test_val) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = nc_close(ncid))) - return ret; + ERR(ret); } /* Use pnetCDF to create a file with a scalar var, then set and @@ -1484,33 +1773,32 @@ int test_scalar(int iosysid, int num_flavors, int *flavor, int my_rank, int asyn int test_val_in; if ((ret = ncmpi_create(test_comm, test_file, NC_CLOBBER, MPI_INFO_NULL, &ncid))) - return ret; + ERR(ret); if ((ret = ncmpi_def_var(ncid, VAR_NAME, NC_INT, 0, NULL, &varid))) - return ret; + ERR(ret); if ((ret = ncmpi_enddef(ncid))) - return ret; + ERR(ret); if ((ret = ncmpi_put_var_int_all(ncid, varid, &test_val))) - return ret; + ERR(ret); if ((ret = ncmpi_close(ncid))) - return ret; + ERR(ret); if ((ret = ncmpi_open(test_comm, test_file, NC_NOWRITE, MPI_INFO_NULL, &ncid))) - return ret; + ERR(ret); /* Turn on independent access for pnetcdf file. */ if ((ret = ncmpi_begin_indep_data(ncid))) - return ret; + ERR(ret); /* if ((ret = ncmpi_get_var_int(ncid, varid, &test_val_in))) */ - /* return ret; */ + /* ERR(ret); */ if ((ret = ncmpi_get_vars_int(ncid, varid, NULL, NULL, NULL, &test_val_in))) - return ret; + ERR(ret); if ((ret = ncmpi_end_indep_data(ncid))) - return ret; + ERR(ret); if (test_val_in != test_val) - return ERR_WRONG; - printf("ret = %d test_val_in = %d\n", ret, test_val_in); + ERR(ERR_WRONG); if (test_val_in != test_val) - return ERR_WRONG; + ERR(ERR_WRONG); if ((ret = ncmpi_close(ncid))) - return ret; + ERR(ret); } #endif /* _PNETCDF */ @@ -1523,11 +1811,10 @@ int test_scalar(int iosysid, int num_flavors, int *flavor, int my_rank, int asyn /* Create a filename. */ if ((ret = get_iotype_name(flavor[fmt], iotype_name))) - return ret; + ERR(ret); sprintf(filename, "%s_%s_scalar_async_%d.nc", TEST_NAME, iotype_name, async); /* Create the netCDF output file. */ - printf("%d Creating test file %s.\n", my_rank, filename); if ((ret = PIOc_createfile(iosysid, &ncid, &(flavor[fmt]), filename, PIO_CLOBBER))) ERR(ret); @@ -1545,20 +1832,19 @@ int test_scalar(int iosysid, int num_flavors, int *flavor, int my_rank, int asyn ERR(ret); /* Check the scalar var. */ - if ((ret = check_scalar_var(ncid, varid, flavor[fmt]))) + if ((ret = check_scalar_var(ncid, varid, flavor[fmt], my_rank))) ERR(ret); /* Close the netCDF file. */ - printf("%d Closing the sample data file...\n", my_rank); if ((ret = PIOc_closefile(ncid))) ERR(ret); /* Reopen the file. */ - if ((ret = PIOc_openfile(iosysid, &ncid, &(flavor[fmt]), filename, PIO_NOWRITE))) + if ((ret = PIOc_openfile2(iosysid, &ncid, &(flavor[fmt]), filename, PIO_NOWRITE))) ERR(ret); /* Check the scalar var again. */ - if ((ret = check_scalar_var(ncid, varid, flavor[fmt]))) + if ((ret = check_scalar_var(ncid, varid, flavor[fmt], my_rank))) ERR(ret); /* Close the netCDF file. */ @@ -1577,40 +1863,39 @@ int test_malloc_iodesc2(int iosysid, int my_rank) { /* More types are available for netCDF-4 builds. */ #ifdef _NETCDF4 - int num_types = NUM_NETCDF_TYPES; + /* String type not supported. */ + int num_types = NUM_NETCDF_TYPES - 1; #else int num_types = NUM_CLASSIC_TYPES; #endif /* _NETCDF4 */ int test_type[NUM_NETCDF_TYPES] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE, PIO_UBYTE, PIO_USHORT, - PIO_UINT, PIO_INT64, PIO_UINT64, PIO_STRING}; + PIO_UINT, PIO_INT64, PIO_UINT64}; MPI_Datatype mpi_type[NUM_NETCDF_TYPES] = {MPI_BYTE, MPI_CHAR, MPI_SHORT, MPI_INT, MPI_FLOAT, MPI_DOUBLE, MPI_UNSIGNED_CHAR, MPI_UNSIGNED_SHORT, MPI_UNSIGNED, MPI_LONG_LONG, MPI_UNSIGNED_LONG_LONG, MPI_CHAR}; - int ioid; iosystem_desc_t *ios; io_desc_t *iodesc; int ret; if (!(ios = pio_get_iosystem_from_id(iosysid))) return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__); - printf("test_malloc_iodesc2 num_types %d\n",num_types); /* Test with each type. */ for (int t = 0; t < num_types; t++) { if ((ret = malloc_iodesc(ios, test_type[t], 1, &iodesc))) - return ret; - if (iodesc->basetype != mpi_type[t]) - return ERR_WRONG; + ERR(ret); + if (iodesc->mpitype != mpi_type[t]) + ERR(ERR_WRONG); if (iodesc->ndims != 1) - return ERR_WRONG; - ioid = pio_add_to_iodesc_list(iodesc); + ERR(ERR_WRONG); + pio_add_to_iodesc_list(iodesc); if (iodesc->firstregion) free_region_list(iodesc->firstregion); - if ((ret = pio_delete_iodesc_from_list(ioid))) - return ret; + if ((ret = pio_delete_iodesc_from_list(iodesc->ioid))) + ERR(ret); } return 0; } @@ -1631,11 +1916,11 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len /* Decompose the data over the tasks. */ if ((ret = create_decomposition(my_test_size, my_rank, iosysid, dim_len, &ioid))) - return ret; + ERR(ret); /* Write the decomp file (on appropriate tasks). */ if ((ret = PIOc_write_decomp(filename, iosysid, ioid, test_comm))) - return ret; + ERR(ret); /* Some values for the netCDF decomp file for this iosystem. */ char *title = "Very Simple Test Decompositon"; @@ -1652,7 +1937,7 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len if ((ret = pioc_write_nc_decomp_int(ios, nc_filename, 0, NDIM1, global_dimlen, TARGET_NTASKS, task_maplen, (int *)map, title, history, 0))) - return ret; + ERR(ret); int ndims_in; int num_tasks_in; @@ -1671,41 +1956,40 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len if (pioc_read_nc_decomp_int(iosysid + TEST_VAL_42, nc_filename, &ndims_in, &global_dimlen_in, &num_tasks_in, &task_maplen_in, &max_maplen_in, &map_in, title_in, history_in, source_in, version_in, &fortran_order_in) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if (pioc_read_nc_decomp_int(iosysid, NULL, &ndims_in, &global_dimlen_in, &num_tasks_in, &task_maplen_in, &max_maplen_in, &map_in, title_in, history_in, source_in, version_in, &fortran_order_in) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); if (!pioc_read_nc_decomp_int(iosysid, "no_file", &ndims_in, &global_dimlen_in, &num_tasks_in, &task_maplen_in, &max_maplen_in, &map_in, title_in, history_in, source_in, version_in, &fortran_order_in)) - return ERR_WRONG; + ERR(ERR_WRONG); /* Read the decomp file. */ if ((ret = pioc_read_nc_decomp_int(iosysid, nc_filename, &ndims_in, &global_dimlen_in, &num_tasks_in, &task_maplen_in, &max_maplen_in, &map_in, title_in, history_in, source_in, version_in, &fortran_order_in))) - return ret; + ERR(ret); + - /* Did we get the correct answers? */ - printf("source_in = %s\n", source_in); if (strcmp(title, title_in) || strcmp(history, history_in) || strcmp(source_in, expected_source)) - return ERR_WRONG; + ERR(ERR_WRONG); if (ndims_in != NDIM1 || num_tasks_in != TARGET_NTASKS || max_maplen_in != 1 || fortran_order_in) - return ERR_WRONG; + ERR(ERR_WRONG); for (int d = 0; d < ndims_in; d++) if (global_dimlen_in[d] != global_dimlen[d]) - return ERR_WRONG; + ERR(ERR_WRONG); for (int t = 0; t < num_tasks_in; t++) if (task_maplen_in[t] != 1) - return ERR_WRONG; + ERR(ERR_WRONG); for (int t = 0; t < num_tasks_in; t++) for (int l = 0; l < max_maplen_in; l++) if (map_in[t * max_maplen_in + l] != map[t][l]) - return ERR_WRONG; + ERR(ERR_WRONG); /* Free resources. */ free(global_dimlen_in); @@ -1716,7 +2000,7 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len if ((ret = pioc_read_nc_decomp_int(iosysid, nc_filename, NULL, &global_dimlen_in, &num_tasks_in, &task_maplen_in, &max_maplen_in, &map_in, title_in, history_in, source_in, version_in, &fortran_order_in))) - return ret; + ERR(ret); free(global_dimlen_in); free(task_maplen_in); free(map_in); @@ -1724,14 +2008,14 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len if ((ret = pioc_read_nc_decomp_int(iosysid, nc_filename, &ndims_in, NULL, &num_tasks_in, &task_maplen_in, &max_maplen_in, &map_in, title_in, history_in, source_in, version_in, &fortran_order_in))) - return ret; + ERR(ret); free(task_maplen_in); free(map_in); if ((ret = pioc_read_nc_decomp_int(iosysid, nc_filename, &ndims_in, &global_dimlen_in, NULL, &task_maplen_in, &max_maplen_in, &map_in, title_in, history_in, source_in, version_in, &fortran_order_in))) - return ret; + ERR(ret); free(global_dimlen_in); free(task_maplen_in); free(map_in); @@ -1739,14 +2023,14 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len if ((ret = pioc_read_nc_decomp_int(iosysid, nc_filename, &ndims_in, &global_dimlen_in, &num_tasks_in, NULL, &max_maplen_in, &map_in, title_in, history_in, source_in, version_in, &fortran_order_in))) - return ret; + ERR(ret); free(global_dimlen_in); free(map_in); if ((ret = pioc_read_nc_decomp_int(iosysid, nc_filename, &ndims_in, &global_dimlen_in, &num_tasks_in, &task_maplen_in, NULL, &map_in, title_in, history_in, source_in, version_in, &fortran_order_in))) - return ret; + ERR(ret); free(global_dimlen_in); free(task_maplen_in); free(map_in); @@ -1754,14 +2038,14 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len if ((ret = pioc_read_nc_decomp_int(iosysid, nc_filename, &ndims_in, &global_dimlen_in, &num_tasks_in, &task_maplen_in, &max_maplen_in, NULL, title_in, history_in, source_in, version_in, &fortran_order_in))) - return ret; + ERR(ret); free(global_dimlen_in); free(task_maplen_in); if ((ret = pioc_read_nc_decomp_int(iosysid, nc_filename, &ndims_in, &global_dimlen_in, &num_tasks_in, &task_maplen_in, &max_maplen_in, &map_in, NULL, history_in, source_in, version_in, &fortran_order_in))) - return ret; + ERR(ret); free(global_dimlen_in); free(task_maplen_in); free(map_in); @@ -1769,7 +2053,7 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len if ((ret = pioc_read_nc_decomp_int(iosysid, nc_filename, &ndims_in, &global_dimlen_in, &num_tasks_in, &task_maplen_in, &max_maplen_in, &map_in, title_in, NULL, source_in, version_in, &fortran_order_in))) - return ret; + ERR(ret); free(global_dimlen_in); free(task_maplen_in); free(map_in); @@ -1777,7 +2061,7 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len if ((ret = pioc_read_nc_decomp_int(iosysid, nc_filename, &ndims_in, &global_dimlen_in, &num_tasks_in, &task_maplen_in, &max_maplen_in, &map_in, title_in, history_in, NULL, version_in, &fortran_order_in))) - return ret; + ERR(ret); free(global_dimlen_in); free(task_maplen_in); free(map_in); @@ -1785,7 +2069,7 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len if ((ret = pioc_read_nc_decomp_int(iosysid, nc_filename, &ndims_in, &global_dimlen_in, &num_tasks_in, &task_maplen_in, &max_maplen_in, &map_in, title_in, history_in, source_in, NULL, &fortran_order_in))) - return ret; + ERR(ret); free(global_dimlen_in); free(task_maplen_in); free(map_in); @@ -1793,7 +2077,7 @@ int test_decomp_internal(int my_test_size, int my_rank, int iosysid, int dim_len if ((ret = pioc_read_nc_decomp_int(iosysid, nc_filename, &ndims_in, &global_dimlen_in, &num_tasks_in, &task_maplen_in, &max_maplen_in, &map_in, title_in, history_in, source_in, version_in, NULL))) - return ret; + ERR(ret); free(global_dimlen_in); free(task_maplen_in); free(map_in); @@ -1818,7 +2102,7 @@ int test_decomp_public(int my_test_size, int my_rank, int iosysid, int dim_len, /* Decompose the data over the tasks. */ if ((ret = create_decomposition(my_test_size, my_rank, iosysid, dim_len, &ioid))) - return ret; + ERR(ret); /* We will document our decomp file with metadata, like good * netCDF users should. */ @@ -1832,24 +2116,24 @@ int test_decomp_public(int my_test_size, int my_rank, int iosysid, int dim_len, if (PIOc_write_nc_decomp(iosysid + TEST_VAL_42, nc_filename, 0, ioid, title, history, 0) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_write_nc_decomp(iosysid, NULL, 0, ioid, title, history, 0) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid + TEST_VAL_42, title, history, 0) != PIO_EBADID) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid, too_long_name, history, 0) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); if (PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid, title, too_long_name, 0) != PIO_EINVAL) - return ERR_WRONG; + ERR(ERR_WRONG); /* Write a netCDF decomp file for this iosystem. */ if ((ret = PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid, title, history, 0))) - return ret; + ERR(ret); int ioid_in; char title_in[PIO_MAX_NAME + 1]; @@ -1859,22 +2143,22 @@ int test_decomp_public(int my_test_size, int my_rank, int iosysid, int dim_len, /* These should not work. */ if (PIOc_read_nc_decomp(iosysid + TEST_VAL_42, nc_filename, &ioid_in, test_comm, PIO_INT, title_in, history_in, &fortran_order_in) != PIO_EBADID) - return ret; + ERR(ret); if (PIOc_read_nc_decomp(iosysid, NULL, &ioid_in, test_comm, PIO_INT, title_in, history_in, &fortran_order_in) != PIO_EINVAL) - return ret; + ERR(ret); if (PIOc_read_nc_decomp(iosysid, nc_filename, NULL, test_comm, PIO_INT, title_in, history_in, &fortran_order_in) != PIO_EINVAL) - return ret; + ERR(ret); /* Read it using the public read function. */ if ((ret = PIOc_read_nc_decomp(iosysid, nc_filename, &ioid_in, test_comm, PIO_INT, title_in, history_in, &fortran_order_in))) - return ret; + ERR(ret); /* Did we get expected results? */ if (strcmp(title, title_in) || strcmp(history, history_in)) - return ERR_WRONG; + ERR(ERR_WRONG); /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid_in))) @@ -1883,19 +2167,19 @@ int test_decomp_public(int my_test_size, int my_rank, int iosysid, int dim_len, /* These should also work. */ if ((ret = PIOc_read_nc_decomp(iosysid, nc_filename, &ioid_in, test_comm, PIO_CHAR, NULL, history_in, &fortran_order_in))) - return ret; + ERR(ret); if ((ret = PIOc_freedecomp(iosysid, ioid_in))) ERR(ret); if ((ret = PIOc_read_nc_decomp(iosysid, nc_filename, &ioid_in, test_comm, PIO_BYTE, title_in, NULL, &fortran_order_in))) - return ret; + ERR(ret); if ((ret = PIOc_freedecomp(iosysid, ioid_in))) ERR(ret); if ((ret = PIOc_read_nc_decomp(iosysid, nc_filename, &ioid_in, test_comm, PIO_SHORT, title_in, history_in, NULL))) - return ret; + ERR(ret); if ((ret = PIOc_freedecomp(iosysid, ioid_in))) ERR(ret); @@ -1914,26 +2198,25 @@ int test_decomp_public(int my_test_size, int my_rank, int iosysid, int dim_len, if ((ret = pioc_read_nc_decomp_int(iosysid, nc_filename, &ndims_in, &global_dimlen_in, &num_tasks_in, &task_maplen_in, &max_maplen_in, &map_in, title_in, history_in, source_in, version_in, &fortran_order_in))) - return ret; + ERR(ret); /* Did we get the correct answers? */ - printf("source_in = %s\n", source_in); if (strcmp(title, title_in) || strcmp(history, history_in) || strcmp(source_in, expected_source)) - return ERR_WRONG; + ERR(ERR_WRONG); if (ndims_in != NDIM1 || num_tasks_in != TARGET_NTASKS || max_maplen_in != 1 || fortran_order_in) - return ERR_WRONG; + ERR(ERR_WRONG); for (int d = 0; d < ndims_in; d++) if (global_dimlen_in[d] != DIM_LEN) - return ERR_WRONG; + ERR(ERR_WRONG); for (int t = 0; t < num_tasks_in; t++) if (task_maplen_in[t] != 1) - return ERR_WRONG; + ERR(ERR_WRONG); for (int t = 0; t < num_tasks_in; t++) for (int l = 0; l < max_maplen_in; l++) if (map_in[t * max_maplen_in + l] != t) - return ERR_WRONG; + ERR(ERR_WRONG); /* Free resources. */ free(global_dimlen_in); @@ -1942,7 +2225,7 @@ int test_decomp_public(int my_test_size, int my_rank, int iosysid, int dim_len, /* /\* These should also work. *\/ */ /* if ((ret = PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid, title, history, 0))) */ - /* return ret; */ + /* ERR(ret); */ /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid))) @@ -1964,11 +2247,11 @@ int test_decomp_public_2(int my_test_size, int my_rank, int iosysid, int dim_len /* Decompose the data over the tasks. */ if ((ret = create_decomposition(my_test_size, my_rank, iosysid, dim_len, &ioid))) - return ret; + ERR(ret); /* Write a netCDF decomp file for this iosystem. */ if ((ret = PIOc_write_nc_decomp(iosysid, nc_filename, 0, ioid, NULL, NULL, 0))) - return ret; + ERR(ret); /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid))) @@ -1990,7 +2273,7 @@ int test_decomp_2(int my_test_size, int my_rank, int iosysid, int dim_len, /* Decompose the data over the tasks. */ if ((ret = create_decomposition(my_test_size, my_rank, iosysid, dim_len, &ioid))) - return ret; + ERR(ret); /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid))) @@ -2013,7 +2296,7 @@ int test_decomp_public_async(int my_test_size, int my_rank, int iosysid, MPI_Com int ret; sprintf(filename, "async_decomp_%s_rank_%d_async_%d.nc", TEST_NAME, my_rank, async); - + /* Create the PIO decomposition for this test. */ if ((ret = PIOc_init_decomp(iosysid, PIO_FLOAT, NDIM1, &dim_len, elements_per_pe, compdof, &ioid, PIO_REARR_BOX, NULL, NULL))) @@ -2021,7 +2304,7 @@ int test_decomp_public_async(int my_test_size, int my_rank, int iosysid, MPI_Com /* Write the decomp file (on appropriate tasks). */ if ((ret = PIOc_write_nc_decomp(iosysid, filename, 0, ioid, NULL, NULL, 0))) - return ret; + ERR(ret); /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid))) @@ -2051,55 +2334,60 @@ int test_all(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm te * async. */ if (async) if ((ret = test_decomp_public_async(my_test_size, my_rank, iosysid, test_comm, async))) - return ret; - + ERR(ret); + /* Check iotypes. */ - printf("%d Testing iotypes. async = %d\n", my_rank, async); if ((ret = test_iotypes(my_rank))) ERR(ret); /* Test file deletes. */ - printf("%d Testing deletefile. async = %d\n", my_rank, async); if ((ret = test_deletefile(iosysid, num_flavors, flavor, my_rank))) - return ret; + ERR(ret); /* Test file stuff. */ - printf("%d Testing file creation. async = %d\n", my_rank, async); if ((ret = test_files(iosysid, num_flavors, flavor, my_rank))) - return ret; + ERR(ret); /* Test some misc stuff. */ if ((ret = test_malloc_iodesc2(iosysid, my_rank))) - return ret; + ERR(ret); + + /* Test internal function find_var_fillvalue(). */ + if ((ret = test_find_var_fillvalue(iosysid, num_flavors, flavor, my_rank, async))) + ERR(ret); - /* Test decomposition internal functions. */ + /* Run these tests for non-async cases only. */ if (!async) + { + + /* Test empty file stuff. */ + if ((ret = test_empty_files(iosysid, num_flavors, flavor, my_rank))) + ERR(ret); + + /* Test decomposition internal functions. */ if ((ret = test_decomp_internal(my_test_size, my_rank, iosysid, DIM_LEN, test_comm, async))) - return ret; - /* Test decomposition public API functions. */ - if (!async) + ERR(ret); + + /* Test decomposition public API functions. */ if ((ret = test_decomp_public(my_test_size, my_rank, iosysid, DIM_LEN, test_comm, async))) - return ret; + ERR(ret); - /* This is a simple test that just creates a decomp. */ - /* if ((ret = test_decomp_2(my_test_size, my_rank, iosysid, DIM_LEN, test_comm, async))) */ - /* return ret; */ + /* This is a simple test that just creates a decomp. */ + if ((ret = test_decomp_2(my_test_size, my_rank, iosysid, DIM_LEN, test_comm, async))) + ERR(ret); - /* This is a simple test that just writes the decomp. */ - if (!async) + /* This is a simple test that just writes the decomp. */ if ((ret = test_decomp_public_2(my_test_size, my_rank, iosysid, DIM_LEN, test_comm, async))) - return ret; + ERR(ret); - /* Decompose the data over the tasks. */ - if (!async) - { + /* Decompose the data over the tasks. */ if ((ret = create_decomposition(my_test_size, my_rank, iosysid, DIM_LEN, &ioid))) - return ret; + ERR(ret); /* Run the darray tests. */ for (int fv = 0; fv < 2; fv++) if ((ret = test_darray(iosysid, ioid, num_flavors, flavor, my_rank, fv))) - return ret; + ERR(ret); /* Free the PIO decomposition. */ if ((ret = PIOc_freedecomp(iosysid, ioid))) @@ -2107,24 +2395,20 @@ int test_all(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm te } /* Check the error string function. */ - printf("%d Testing streror. async = %d\n", my_rank, async); if ((ret = check_strerror(my_rank))) ERR(ret); /* Test name stuff. */ - printf("%d Testing names. async = %d\n", my_rank, async); if ((ret = test_names(iosysid, num_flavors, flavor, my_rank, test_comm, async))) - return ret; + ERR(ret); /* Test netCDF-4 functions. */ - printf("%d Testing nc4 functions. async = %d\n", my_rank, async); if ((ret = test_nc4(iosysid, num_flavors, flavor, my_rank))) - return ret; - + ERR(ret); + /* Test scalar var. */ - printf("%d Testing scalar var. async = %d\n", my_rank, async); if ((ret = test_scalar(iosysid, num_flavors, flavor, my_rank, async, test_comm))) - return ret; + ERR(ret); return PIO_NOERR; } @@ -2133,6 +2417,6 @@ int test_all(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm te int main(int argc, char **argv) { /* Change the 5th arg to 3 to turn on logging. */ - return run_test_main(argc, argv, MIN_NTASKS, TARGET_NTASKS, 3, + return run_test_main(argc, argv, MIN_NTASKS, TARGET_NTASKS, -1, TEST_NAME, dim_len, COMPONENT_COUNT, NUM_IO_PROCS); } diff --git a/src/externals/pio2/tests/cunit/test_pioc_fill.c b/src/externals/pio2/tests/cunit/test_pioc_fill.c index c699a566be5..52e0571a6eb 100644 --- a/src/externals/pio2/tests/cunit/test_pioc_fill.c +++ b/src/externals/pio2/tests/cunit/test_pioc_fill.c @@ -3,6 +3,7 @@ * * Ed Hartnett */ +#include #include #include #include @@ -47,6 +48,9 @@ int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; #define DIM_NAME "SonsOfTheDesert" #define DIM_LEN 1 +/* Test openfile with PIO_WRITE/PIO_NOWRITE. */ +#define NUM_OPEN_MODE_TESTS 2 + /* Some sample data values to write. */ char text[] = "hi"; char char_data = 2; @@ -114,49 +118,46 @@ void init_arrays() /* Use the vara functions to write some data to an open test file. */ int putget_write_vara(int ncid, int *varid, PIO_Offset *start, PIO_Offset *count, - int flavor) + int flavor, int my_rank) { int ret; - printf("now writing data\n"); if ((ret = PIOc_put_vara_schar(ncid, varid[0], start, count, (signed char *)byte_array))) - return ret; + ERR(ret); if ((ret = PIOc_put_vara_text(ncid, varid[1], start, count, (char *)text_array))) - return ret; + ERR(ret); if ((ret = PIOc_put_vara_short(ncid, varid[2], start, count, (short *)short_array))) - return ret; + ERR(ret); if ((ret = PIOc_put_vara_int(ncid, varid[3], start, count, (int *)int_array))) - return ret; + ERR(ret); if ((ret = PIOc_put_vara_float(ncid, varid[4], start, count, (float *)float_array))) - return ret; + ERR(ret); if ((ret = PIOc_put_vara_double(ncid, varid[5], start, count, (double *)double_array))) - return ret; + ERR(ret); - printf("now wrote classic data\n"); if (flavor == PIO_IOTYPE_NETCDF4C || flavor == PIO_IOTYPE_NETCDF4P) { - printf("now writing netcdf4 data\n"); if ((ret = PIOc_put_vara_uchar(ncid, varid[6], start, count, (unsigned char *)ubyte_array))) - return ret; + ERR(ret); if ((ret = PIOc_put_vara_ushort(ncid, varid[7], start, count, (unsigned short *)ushort_array))) - return ret; + ERR(ret); if ((ret = PIOc_put_vara_uint(ncid, varid[8], start, count, (unsigned int *)uint_array))) - return ret; + ERR(ret); if ((ret = PIOc_put_vara_longlong(ncid, varid[9], start, count, (long long *)int64_array))) - return ret; + ERR(ret); if ((ret = PIOc_put_vara_ulonglong(ncid, varid[10], start, count, (unsigned long long *)uint64_array))) - return ret; + ERR(ret); } return 0; } -int check_fill(int ncid, int *varid, int flavor, int default_fill) +int check_fill(int ncid, int *varid, int flavor, int default_fill, int nowrite, int my_rank) { int fill_mode; char char_fill_value_in; @@ -172,76 +173,73 @@ int check_fill(int ncid, int *varid, int flavor, int default_fill) unsigned long long uint64_fill_value_in; int ret; - printf("checking fill values for flavor %d default_fill %d\n", flavor, default_fill); if ((ret = PIOc_inq_var_fill(ncid, varid[0], &fill_mode, &byte_fill_value_in))) - return ret; - printf("byte_fill_value_in = %d\n", (int)byte_fill_value_in); - if (fill_mode != NC_FILL || byte_fill_value_in != (default_fill ? NC_FILL_BYTE : byte_fill_value)) - return ERR_WRONG; + ERR(ret); + if (fill_mode != PIO_FILL || byte_fill_value_in != (default_fill ? NC_FILL_BYTE : byte_fill_value)) + ERR(ERR_WRONG); fill_mode = -99; if ((ret = PIOc_inq_var_fill(ncid, varid[1], &fill_mode, &char_fill_value_in))) - return ret; - if (fill_mode != NC_FILL || char_fill_value_in != (default_fill ? NC_FILL_CHAR : char_fill_value)) - return ERR_WRONG; + ERR(ret); + if (fill_mode != PIO_FILL || char_fill_value_in != (default_fill ? NC_FILL_CHAR : char_fill_value)) + ERR(ERR_WRONG); fill_mode = -99; if ((ret = PIOc_inq_var_fill(ncid, varid[2], &fill_mode, &short_fill_value_in))) - return ret; - if (fill_mode != NC_FILL || short_fill_value_in != (default_fill ? NC_FILL_SHORT : short_fill_value)) - return ERR_WRONG; + ERR(ret); + if (fill_mode != PIO_FILL || short_fill_value_in != (default_fill ? NC_FILL_SHORT : short_fill_value)) + ERR(ERR_WRONG); fill_mode = -99; if ((ret = PIOc_inq_var_fill(ncid, varid[3], &fill_mode, &int_fill_value_in))) - return ret; - printf("int_fill_value_in = %d\n", int_fill_value_in); - if (fill_mode != NC_FILL || int_fill_value_in != (default_fill ? NC_FILL_INT : int_fill_value)) - return ERR_WRONG; + ERR(ret); + if (fill_mode != PIO_FILL || int_fill_value_in != (default_fill ? NC_FILL_INT : int_fill_value)) + ERR(ERR_WRONG); fill_mode = -99; if ((ret = PIOc_inq_var_fill(ncid, varid[4], &fill_mode, &float_fill_value_in))) - return ret; - if (fill_mode != NC_FILL || float_fill_value_in != (default_fill ? NC_FILL_FLOAT : float_fill_value)) - return ERR_WRONG; + ERR(ret); + if (fill_mode != PIO_FILL || float_fill_value_in != (default_fill ? NC_FILL_FLOAT : float_fill_value)) + ERR(ERR_WRONG); fill_mode = -99; if ((ret = PIOc_inq_var_fill(ncid, varid[5], &fill_mode, &double_fill_value_in))) - return ret; - if (fill_mode != NC_FILL || double_fill_value_in != (default_fill ? NC_FILL_DOUBLE : double_fill_value)) - return ERR_WRONG; + ERR(ret); + if (fill_mode != PIO_FILL || double_fill_value_in != (default_fill ? NC_FILL_DOUBLE : double_fill_value)) + ERR(ERR_WRONG); fill_mode = -99; if (flavor == PIO_IOTYPE_NETCDF4C || flavor == PIO_IOTYPE_NETCDF4P) { if ((ret = PIOc_inq_var_fill(ncid, varid[6], &fill_mode, &ubyte_fill_value_in))) - return ret; - if (fill_mode != NC_FILL || ubyte_fill_value_in != (default_fill ? NC_FILL_UBYTE : ubyte_fill_value)) - return ERR_WRONG; + ERR(ret); + if (fill_mode != PIO_FILL || ubyte_fill_value_in != (default_fill ? NC_FILL_UBYTE : ubyte_fill_value)) + ERR(ERR_WRONG); fill_mode = -99; if ((ret = PIOc_inq_var_fill(ncid, varid[7], &fill_mode, &ushort_fill_value_in))) - return ret; - if (fill_mode != NC_FILL || ushort_fill_value_in != (default_fill ? NC_FILL_USHORT : ushort_fill_value)) - return ERR_WRONG; + ERR(ret); + if (fill_mode != PIO_FILL || ushort_fill_value_in != (default_fill ? NC_FILL_USHORT : ushort_fill_value)) + ERR(ERR_WRONG); fill_mode = -99; if ((ret = PIOc_inq_var_fill(ncid, varid[8], &fill_mode, &uint_fill_value_in))) - return ret; - if (fill_mode != NC_FILL || uint_fill_value_in != (default_fill ? NC_FILL_UINT : uint_fill_value)) - return ERR_WRONG; + ERR(ret); + if (fill_mode != PIO_FILL || uint_fill_value_in != (default_fill ? NC_FILL_UINT : uint_fill_value)) + ERR(ERR_WRONG); fill_mode = -99; if ((ret = PIOc_inq_var_fill(ncid, varid[9], &fill_mode, &int64_fill_value_in))) - return ret; - if (fill_mode != NC_FILL || int64_fill_value_in != (default_fill ? NC_FILL_INT64 : int64_fill_value)) - return ERR_WRONG; + ERR(ret); + if (fill_mode != PIO_FILL || int64_fill_value_in != (default_fill ? NC_FILL_INT64 : int64_fill_value)) + ERR(ERR_WRONG); fill_mode = -99; if ((ret = PIOc_inq_var_fill(ncid, varid[10], &fill_mode, &uint64_fill_value_in))) - return ret; - if (fill_mode != NC_FILL || uint64_fill_value_in != (default_fill ? NC_FILL_UINT64 : uint64_fill_value)) - return ERR_WRONG; + ERR(ret); + if (fill_mode != PIO_FILL || uint64_fill_value_in != (default_fill ? NC_FILL_UINT64 : uint64_fill_value)) + ERR(ERR_WRONG); fill_mode = -99; } @@ -250,7 +248,7 @@ int check_fill(int ncid, int *varid, int flavor, int default_fill) /* Use the vara functions to read some data from an open test file. */ int putget_read_vara(int ncid, int *varid, PIO_Offset *start, PIO_Offset *count, - int default_fill, int flavor) + int default_fill, int flavor, int nowrite, int my_rank) { signed char byte_array_in[X_DIM_LEN/2][Y_DIM_LEN]; char text_array_in[X_DIM_LEN/2][Y_DIM_LEN]; @@ -268,68 +266,68 @@ int putget_read_vara(int ncid, int *varid, PIO_Offset *start, PIO_Offset *count, /* Read the data we wrote. */ if ((ret = PIOc_get_vara_schar(ncid, varid[0], start, count, (signed char *)byte_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_text(ncid, varid[1], start, count, (char *)text_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_short(ncid, varid[2], start, count, (short *)short_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_int(ncid, varid[3], start, count, (int *)int_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_float(ncid, varid[4], start, count, (float *)float_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_double(ncid, varid[5], start, count, (double *)double_array_in))) - return ret; + ERR(ret); for (x = 0; x < X_DIM_LEN/2; x++) { if (strncmp(text_array_in[x], text, strlen(text))) - return ERR_WRONG; + ERR(ERR_WRONG); for (y = 0; y < Y_DIM_LEN; y++) { if (byte_array_in[x][y] != byte_array[x][y]) - return ERR_WRONG; + ERR(ERR_WRONG); if (short_array_in[x][y] != short_array[x][y]) - return ERR_WRONG; + ERR(ERR_WRONG); if (int_array_in[x][y] != int_array[x][y]) - return ERR_WRONG; + ERR(ERR_WRONG); if (float_array_in[x][y] != float_array[x][y]) - return ERR_WRONG; + ERR(ERR_WRONG); if (double_array_in[x][y] != double_array[x][y]) - return ERR_WRONG; + ERR(ERR_WRONG); } } if (flavor == PIO_IOTYPE_NETCDF4C || flavor == PIO_IOTYPE_NETCDF4P) { if ((ret = PIOc_get_vara_uchar(ncid, varid[6], start, count, (unsigned char *)ubyte_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_ushort(ncid, varid[7], start, count, (unsigned short *)ushort_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_uint(ncid, varid[8], start, count, (unsigned int *)uint_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_longlong(ncid, varid[9], start, count, (long long *)int64_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_ulonglong(ncid, varid[10], start, count, (unsigned long long *)uint64_array_in))) - return ret; + ERR(ret); for (x = 0; x < X_DIM_LEN/2; x++) for (y = 0; y < Y_DIM_LEN; y++) { if (ubyte_array_in[x][y] != ubyte_array[x][y]) - return ERR_WRONG; + ERR(ERR_WRONG); if (ushort_array_in[x][y] != ushort_array[x][y]) - return ERR_WRONG; + ERR(ERR_WRONG); if (uint_array_in[x][y] != uint_array[x][y]) - return ERR_WRONG; + ERR(ERR_WRONG); if (int64_array_in[x][y] != int64_array[x][y]) - return ERR_WRONG; + ERR(ERR_WRONG); if (uint64_array_in[x][y] != uint64_array[x][y]) - return ERR_WRONG; + ERR(ERR_WRONG); } } /* Check some fill value stuff. */ - if ((ret = check_fill(ncid, varid, flavor, default_fill))) - return ret; + if ((ret = check_fill(ncid, varid, flavor, default_fill, nowrite, my_rank))) + ERR(ret); return 0; } @@ -337,7 +335,7 @@ int putget_read_vara(int ncid, int *varid, PIO_Offset *start, PIO_Offset *count, /* Use the vara functions to read some data from an open test * file. Expect only fill data. */ int putget_read_vara_fill(int ncid, int *varid, PIO_Offset *start, PIO_Offset *count, - int default_fill, int flavor) + int default_fill, int flavor, int my_rank) { signed char byte_array_in[X_DIM_LEN/2][Y_DIM_LEN]; char text_array_in[X_DIM_LEN/2][Y_DIM_LEN]; @@ -355,62 +353,62 @@ int putget_read_vara_fill(int ncid, int *varid, PIO_Offset *start, PIO_Offset *c /* Read the data we wrote. */ if ((ret = PIOc_get_vara_schar(ncid, varid[0], start, count, (signed char *)byte_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_text(ncid, varid[1], start, count, (char *)text_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_short(ncid, varid[2], start, count, (short *)short_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_int(ncid, varid[3], start, count, (int *)int_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_float(ncid, varid[4], start, count, (float *)float_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_double(ncid, varid[5], start, count, (double *)double_array_in))) - return ret; + ERR(ret); for (x = 0; x < X_DIM_LEN/2; x++) { for (y = 0; y < Y_DIM_LEN; y++) { if (byte_array_in[x][y] != (default_fill ? NC_FILL_BYTE : byte_fill_value)) - return ERR_WRONG; + ERR(ERR_WRONG); if (text_array_in[x][y] != (default_fill ? NC_FILL_CHAR : char_fill_value)) - return ERR_WRONG; + ERR(ERR_WRONG); if (short_array_in[x][y] != (default_fill ? NC_FILL_SHORT : short_fill_value)) - return ERR_WRONG; + ERR(ERR_WRONG); if (int_array_in[x][y] != (default_fill ? NC_FILL_INT : int_fill_value)) - return ERR_WRONG; + ERR(ERR_WRONG); if (float_array_in[x][y] != (default_fill ? NC_FILL_FLOAT : float_fill_value)) - return ERR_WRONG; + ERR(ERR_WRONG); if (double_array_in[x][y] != (default_fill ? NC_FILL_DOUBLE : double_fill_value)) - return ERR_WRONG; + ERR(ERR_WRONG); } } if (flavor == PIO_IOTYPE_NETCDF4C || flavor == PIO_IOTYPE_NETCDF4P) { if ((ret = PIOc_get_vara_uchar(ncid, varid[6], start, count, (unsigned char *)ubyte_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_ushort(ncid, varid[7], start, count, (unsigned short *)ushort_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_uint(ncid, varid[8], start, count, (unsigned int *)uint_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_longlong(ncid, varid[9], start, count, (long long *)int64_array_in))) - return ret; + ERR(ret); if ((ret = PIOc_get_vara_ulonglong(ncid, varid[10], start, count, (unsigned long long *)uint64_array_in))) - return ret; + ERR(ret); for (x = 0; x < X_DIM_LEN/2; x++) for (y = 0; y < Y_DIM_LEN; y++) { if (ubyte_array_in[x][y] != (default_fill ? NC_FILL_UBYTE : ubyte_fill_value)) - return ERR_WRONG; + ERR(ERR_WRONG); if (ushort_array_in[x][y] != (default_fill ? NC_FILL_USHORT : ushort_fill_value)) - return ERR_WRONG; + ERR(ERR_WRONG); if (uint_array_in[x][y] != (default_fill ? NC_FILL_UINT : uint_fill_value)) - return ERR_WRONG; + ERR(ERR_WRONG); if (int64_array_in[x][y] != (default_fill ? NC_FILL_INT64 : int64_fill_value)) - return ERR_WRONG; + ERR(ERR_WRONG); if (uint64_array_in[x][y] != (default_fill ? NC_FILL_UINT64 : uint64_fill_value)) - return ERR_WRONG; + ERR(ERR_WRONG); } } @@ -433,35 +431,34 @@ int putget_read_vara_fill(int ncid, int *varid, PIO_Offset *start, PIO_Offset *c * @returns 0 for success, error code otherwise. */ int create_putget_file(int iosysid, int flavor, int *dim_len, int *varid, const char *filename, - int default_fill, int *ncidp) + int default_fill, int *ncidp, int my_rank) { int dimids[NDIM]; /* The dimension IDs. */ int num_vars = NUM_CLASSIC_TYPES; int xtype[NUM_NETCDF_TYPES] = {PIO_BYTE, PIO_CHAR, PIO_SHORT, PIO_INT, PIO_FLOAT, PIO_DOUBLE, - PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64, PIO_STRING}; + PIO_UBYTE, PIO_USHORT, PIO_UINT, PIO_INT64, PIO_UINT64, PIO_STRING}; int ncid; int old_mode; int ret; /* Create the netCDF output file. */ if ((ret = PIOc_createfile(iosysid, &ncid, &flavor, filename, PIO_CLOBBER))) - return ret; + ERR(ret); /* This should not work. */ if (PIOc_set_fill(ncid + TEST_VAL_42, NC_FILL, &old_mode) != PIO_EBADID) - return ret; + ERR(ret); /* Turn on fill mode. */ if ((ret = PIOc_set_fill(ncid, NC_FILL, &old_mode))) - return ret; - printf("old_mode = %d\n", old_mode); + ERR(ret); if (old_mode != NC_NOFILL) - return ERR_WRONG; + ERR(ERR_WRONG); /* Define netCDF dimensions and variable. */ for (int d = 0; d < NDIM; d++) if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) - return ret; + ERR(ret); /* For netcdf-4, there are extra types. */ if (flavor == PIO_IOTYPE_NETCDF4C || flavor == PIO_IOTYPE_NETCDF4P) @@ -472,43 +469,42 @@ int create_putget_file(int iosysid, int flavor, int *dim_len, int *varid, const { char var_name[PIO_MAX_NAME + 1]; snprintf(var_name, PIO_MAX_NAME, "%s_%d", VAR_NAME, xtype[v]); - printf("defining var %s\n", var_name); if ((ret = PIOc_def_var(ncid, var_name, xtype[v], NDIM, dimids, &varid[v]))) - return ret; + ERR(ret); } /* Maybe set fill values. */ if (!default_fill) { if ((ret = PIOc_def_var_fill(ncid, varid[0], NC_FILL, &byte_fill_value))) - return ret; + ERR(ret); if ((ret = PIOc_def_var_fill(ncid, varid[1], NC_FILL, &char_fill_value))) - return ret; + ERR(ret); if ((ret = PIOc_def_var_fill(ncid, varid[2], NC_FILL, &short_fill_value))) - return ret; + ERR(ret); if ((ret = PIOc_def_var_fill(ncid, varid[3], NC_FILL, &int_fill_value))) - return ret; + ERR(ret); if ((ret = PIOc_def_var_fill(ncid, varid[4], NC_FILL, &float_fill_value))) - return ret; + ERR(ret); if ((ret = PIOc_def_var_fill(ncid, varid[5], NC_FILL, &double_fill_value))) - return ret; + ERR(ret); if (flavor == PIO_IOTYPE_NETCDF4C || flavor == PIO_IOTYPE_NETCDF4P) { if ((ret = PIOc_def_var_fill(ncid, varid[6], NC_FILL, &ubyte_fill_value))) - return ret; + ERR(ret); if ((ret = PIOc_def_var_fill(ncid, varid[7], NC_FILL, &ushort_fill_value))) - return ret; + ERR(ret); if ((ret = PIOc_def_var_fill(ncid, varid[8], NC_FILL, &uint_fill_value))) - return ret; + ERR(ret); if ((ret = PIOc_def_var_fill(ncid, varid[9], NC_FILL, &int64_fill_value))) - return ret; + ERR(ret); if ((ret = PIOc_def_var_fill(ncid, varid[10], NC_FILL, &uint64_fill_value))) - return ret; + ERR(ret); } } if ((ret = PIOc_enddef(ncid))) - return ret; + ERR(ret); /* Pass back the ncid. */ *ncidp = ncid; @@ -535,8 +531,6 @@ int test_fill(int iosysid, int num_flavors, int *flavor, int my_rank, { int dim_len[NDIM] = {NUM_TIMESTEPS, X_DIM_LEN, Y_DIM_LEN}; - printf("running tests for %d flavors\n", num_flavors); - /* Test with and without default fill values. */ for (int default_fill = 0; default_fill < 2; default_fill++) { @@ -552,56 +546,56 @@ int test_fill(int iosysid, int num_flavors, int *flavor, int my_rank, /* Create a filename. */ if ((ret = get_iotype_name(flavor[fmt], iotype_name))) - return ret; + ERR(ret); snprintf(filename, PIO_MAX_NAME, "%s_default_fill_%d_%s.nc", TEST_NAME, default_fill, iotype_name); /* Create test file with dims and vars defined. */ - printf("%d creating test file %s for flavor = %d...\n", my_rank, filename, flavor[fmt]); if ((ret = create_putget_file(iosysid, flavor[fmt], dim_len, varid, filename, - default_fill, &ncid))) - return ret; - printf("created file %s\n", filename); + default_fill, &ncid, my_rank))) + ERR(ret); /* Write some data. */ PIO_Offset start[NDIM] = {1, 0, 0}; PIO_Offset count[NDIM] = {1, X_DIM_LEN/2, Y_DIM_LEN}; - printf("writing data to %s\n", filename); /* Use the no-type vara functions to write some data. */ - if ((ret = putget_write_vara(ncid, varid, start, count, flavor[fmt]))) - return ret; + if ((ret = putget_write_vara(ncid, varid, start, count, flavor[fmt], my_rank))) + ERR(ret); - printf("wrote data to %s\n", filename); /* Make sure all data are written (pnetcdf needs this). */ if ((ret = PIOc_sync(ncid))) - return ret; + ERR(ret); /* Use the vara functions to read some data. */ - if ((ret = putget_read_vara(ncid, varid, start, count, default_fill, flavor[fmt]))) - return ret; + if ((ret = putget_read_vara(ncid, varid, start, count, default_fill, flavor[fmt], 0, my_rank))) + ERR(ret); /* Close the netCDF file. */ if ((ret = PIOc_closefile(ncid))) ERR(ret); /* Access to read it. */ - printf("about to try to open file %s\n", filename); - if ((ret = PIOc_openfile(iosysid, &ncid, &(flavor[fmt]), filename, PIO_WRITE))) - ERR(ret); + for (int omt = 0; omt < NUM_OPEN_MODE_TESTS; omt++) + { + int omode = omt ? PIO_NOWRITE : PIO_WRITE; - /* Use the vara functions to read some data. */ - if ((ret = putget_read_vara(ncid, varid, start, count, default_fill, flavor[fmt]))) - return ret; + if ((ret = PIOc_openfile(iosysid, &ncid, &(flavor[fmt]), filename, omode))) + ERR(ret); - /* Use the vara functions to read some data which are just fill values. */ - start[0] = 0; - if ((ret = putget_read_vara_fill(ncid, varid, start, count, default_fill, flavor[fmt]))) - return ret; + /* Use the vara functions to read some data. */ + start[0] = 1; + if ((ret = putget_read_vara(ncid, varid, start, count, default_fill, flavor[fmt], omt, my_rank))) + ERR(ret); - /* Close the netCDF file. */ - if ((ret = PIOc_closefile(ncid))) - ERR(ret); + /* Use the vara functions to read some data which are just fill values. */ + start[0] = 0; + if ((ret = putget_read_vara_fill(ncid, varid, start, count, default_fill, flavor[fmt], my_rank))) + ERR(ret); + /* Close the netCDF file. */ + if ((ret = PIOc_closefile(ncid))) + ERR(ret); + } /* next open mode test */ } /* next flavor */ } @@ -629,8 +623,6 @@ int test_fill_mode(int iosysid, int num_flavors, int *flavor, int my_rank, #define NUM_TYPES_TO_TEST 2 int xtype[NUM_TYPES_TO_TEST] = {PIO_INT, PIO_FLOAT}; - printf("test_fill_mode async = %d\n", async); - /* Test with and without default fill values. */ for (int default_fill = 0; default_fill < 2; default_fill++) { @@ -660,36 +652,36 @@ int test_fill_mode(int iosysid, int num_flavors, int *flavor, int my_rank, /* Create a filename. */ if ((ret = get_iotype_name(flavor[fmt], iotype_name))) - return ret; + ERR(ret); snprintf(filename, PIO_MAX_NAME, "%s_fill_mode_async_%d_default_fill_%d_extra_var_%d_%s.nc", TEST_NAME, async, default_fill, extra_var, iotype_name); /* Create the test file. */ if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER))) - return ret; + ERR(ret); /* Turn on fill mode. */ if ((ret = PIOc_set_fill(ncid, NC_FILL, NULL))) - return ret; + ERR(ret); /* Define a dimension. */ if ((ret = PIOc_def_dim(ncid, DIM_NAME, DIM_LEN, &dimid))) - return ret; + ERR(ret); /* Define a variable. */ if ((ret = PIOc_def_var(ncid, VAR_NAME, xtype[t], 1, &dimid, &varid))) - return ret; + ERR(ret); /* Do we want an extra variable? */ if (extra_var) if ((ret = PIOc_def_var(ncid, VAR_NAME_2, xtype[t], 1, &dimid, &varid2))) - return ret; + ERR(ret); /* Find the size of our type. */ if ((ret = PIOc_inq_type(ncid, xtype[t], NULL, &type_size))) - return ret; + ERR(ret); if ((xtype[t] == PIO_INT || xtype[t] == PIO_FLOAT) && type_size != 4) - return ERR_WRONG; + ERR(ERR_WRONG); /* Determine fill value and extra data, depending on type. */ switch (xtype[t]) @@ -703,29 +695,28 @@ int test_fill_mode(int iosysid, int num_flavors, int *flavor, int my_rank, extra_data = &extra_data_float; break; default: - return ERR_WRONG; + ERR(ERR_WRONG); } - + /* If not using a default fill value, set one. */ if (!default_fill) if ((ret = PIOc_def_var_fill(ncid, varid, NC_FILL, fillvalue))) - return ret; + ERR(ret); /* End define mode. */ if ((ret = PIOc_enddef(ncid))) - return ret; + ERR(ret); /* If there is an extra variable, write data to it. */ if (extra_var) if ((ret = PIOc_put_var(ncid, varid2, extra_data))) - return ret; - + ERR(ret); + /* Close the netCDF file. */ if ((ret = PIOc_closefile(ncid))) ERR(ret); /* Access to read it. */ - printf("about to try to open file %s\n", filename); if ((ret = PIOc_openfile(iosysid, &ncid, &flavor[fmt], filename, PIO_WRITE))) ERR(ret); @@ -736,11 +727,11 @@ int test_fill_mode(int iosysid, int num_flavors, int *flavor, int my_rank, if (!(fillvalue_in = malloc(type_size))) return PIO_ENOMEM; if ((ret = PIOc_inq_var_fill(ncid, varid, &fill_mode_in, fillvalue_in))) - return ret; + ERR(ret); if (fill_mode_in != NC_FILL) - return ERR_WRONG; + ERR(ERR_WRONG); if (memcmp(fillvalue_in, fillvalue, type_size)) - return ERR_WRONG; + ERR(ERR_WRONG); free(fillvalue_in); /* Allocate space to read one element of data. */ @@ -752,9 +743,9 @@ int test_fill_mode(int iosysid, int num_flavors, int *flavor, int my_rank, * the var we didn't write to. We should get a * fill value. */ if ((ret = PIOc_get_var(ncid, varid, data_in))) - return ret; + ERR(ret); if (memcmp(data_in, fillvalue, type_size)) - return ERR_WRONG; + ERR(ERR_WRONG); /* Use the vara functions to read 1 datum from the * var we did write to. We should get the value we @@ -762,9 +753,9 @@ int test_fill_mode(int iosysid, int num_flavors, int *flavor, int my_rank, if (extra_var) { if ((ret = PIOc_get_var(ncid, varid2, data_in))) - return ret; + ERR(ret); if (memcmp(data_in, extra_data, type_size)) - return ERR_WRONG; + ERR(ERR_WRONG); } /* Free memory. */ @@ -812,7 +803,7 @@ int main(int argc, char **argv) init_arrays(); /* Change the 5th arg to 3 to turn on logging. */ - if ((ret = run_test_main(argc, argv, MIN_NTASKS, TARGET_NTASKS, 0, + if ((ret = run_test_main(argc, argv, MIN_NTASKS, TARGET_NTASKS, -1, TEST_NAME, dim_len, COMPONENT_COUNT, NUM_IO_PROCS))) return ret; diff --git a/src/externals/pio2/tests/cunit/test_pioc_putget.c b/src/externals/pio2/tests/cunit/test_pioc_putget.c index 02e9177be54..15452eb99c4 100644 --- a/src/externals/pio2/tests/cunit/test_pioc_putget.c +++ b/src/externals/pio2/tests/cunit/test_pioc_putget.c @@ -1,8 +1,9 @@ /* * Tests for PIO data reading and writing routines. * - * Ed Hartnett + * @author Ed Hartnett */ +#include #include #include #include @@ -163,8 +164,6 @@ int test_att_conv_byte(int ncid, int flavor, char *name, int *expected, long lon unsigned long long uint64_array_in[ATT_LEN]; /* Read the att and check results. */ - printf("expecting %d got %d\n", expected[PIO_BYTE], PIOc_get_att_schar(ncid, NC_GLOBAL, name, byte_array_in)); - if (expected[PIO_BYTE] != PIOc_get_att_schar(ncid, NC_GLOBAL, name, byte_array_in)) return ERR_WRONG; @@ -325,9 +324,6 @@ int test_atts_byte(int iosysid, int num_flavors, int *flavor, int my_rank, int ret; /* Return code. */ /* Create test file with dims and vars defined. */ - printf("%d creating test file for flavor = %d...\n", my_rank, flavor[fmt]); - - /* Create a filename. */ if ((ret = get_iotype_name(flavor[fmt], iotype_name))) return ret; sprintf(filename, "%s_att_byte_%s.nc", TEST_NAME, iotype_name); @@ -388,7 +384,7 @@ int test_atts_byte(int iosysid, int num_flavors, int *flavor, int my_rank, ERR(ret); /* Reopen the file. */ - if ((ret = PIOc_openfile(iosysid, &ncid, &(flavor[fmt]), filename, PIO_NOWRITE))) + if ((ret = PIOc_openfile2(iosysid, &ncid, &(flavor[fmt]), filename, PIO_NOWRITE))) ERR(ret); /* Test the attribute conversions. */ @@ -443,9 +439,6 @@ int test_atts_int64(int iosysid, int num_flavors, int *flavor, int my_rank, if (flavor[fmt] != PIO_IOTYPE_NETCDF4C && flavor[fmt] != PIO_IOTYPE_NETCDF4P) continue; - /* Create test file with dims and vars defined. */ - printf("%d creating test file for flavor = %d...\n", my_rank, flavor[fmt]); - /* Create a filename. */ if ((ret = get_iotype_name(flavor[fmt], iotype_name))) return ret; @@ -493,7 +486,7 @@ int test_atts_int64(int iosysid, int num_flavors, int *flavor, int my_rank, ERR(ret); /* Reopen the file. */ - if ((ret = PIOc_openfile(iosysid, &ncid, &(flavor[fmt]), filename, PIO_NOWRITE))) + if ((ret = PIOc_openfile2(iosysid, &ncid, &(flavor[fmt]), filename, PIO_NOWRITE))) ERR(ret); if ((ret = test_att_conv_int64(ncid, flavor[fmt], SCHAR_ATT_NAME, int64_expected, expected_data))) @@ -704,7 +697,7 @@ int putget_write_var_nt(int ncid, int *varid, int flavor) * @param varid an array of varids in the file. * @param flavor the PIO IO type of the test file. * @returns 0 for success, error code otherwise. -*/ + */ int test_write_atts(int ncid, int *varid, int flavor) { int ret; @@ -732,7 +725,7 @@ int test_write_atts(int ncid, int *varid, int flavor) return ret; if ((ret = PIOc_put_att_int(ncid, varid[3], INT_ATT_NAME, PIO_INT, - ATT_LEN, (int *)int_array))) + ATT_LEN, (int *)int_array))) return ret; if ((ret = PIOc_put_att_long(ncid, varid[4], LONG_ATT_NAME, PIO_INT, @@ -744,7 +737,7 @@ int test_write_atts(int ncid, int *varid, int flavor) return ret; if ((ret = PIOc_put_att_double(ncid, varid[6], DOUBLE_ATT_NAME, PIO_DOUBLE, - ATT_LEN, (double *)double_array))) + ATT_LEN, (double *)double_array))) return ret; if (flavor == PIO_IOTYPE_NETCDF4C || flavor == PIO_IOTYPE_NETCDF4P) @@ -1066,7 +1059,6 @@ int putget_read_var1(int ncid, int *varid, PIO_Offset *index, int flavor) memset(text_data_in, 0, sizeof(text_data_in)); if ((ret = PIOc_get_var1_text(ncid, varid[1], index, text_data_in))) return ret; - printf("text_data_in = %s\n", text_data_in); if (strncmp(text_data_in, "h", 1)) return ERR_WRONG; @@ -1155,7 +1147,6 @@ int putget_read_var1_nt(int ncid, int *varid, PIO_Offset *index, int flavor) memset(text_data_in, 0, sizeof(text_data_in)); if ((ret = PIOc_get_var1(ncid, varid[1], index, text_data_in))) return ret; - printf("text_data_in = %s\n", text_data_in); if (strncmp(text_data_in, "h", 1)) return ERR_WRONG; @@ -1218,7 +1209,7 @@ int putget_read_var1_nt(int ncid, int *varid, PIO_Offset *index, int flavor) * @param unlim non-zero if unlimited dimension is in use. * @param flavor the PIO IO type of the test file. * @returns 0 for success, error code otherwise. -*/ + */ int putget_read_var(int ncid, int *varid, int unlim, int flavor) { signed char byte_array_in[X_DIM_LEN][Y_DIM_LEN]; @@ -1261,7 +1252,7 @@ int putget_read_var(int ncid, int *varid, int unlim, int flavor) for (x = 0; x < X_DIM_LEN; x++) { if (strncmp(text_array_in[x], text, strlen(text))) - return ERR_WRONG; + return ERR_WRONG; for (y = 0; y < Y_DIM_LEN; y++) { if (byte_array_in[x][y] != byte_array[x][y]) @@ -1317,7 +1308,7 @@ int putget_read_var(int ncid, int *varid, int unlim, int flavor) * @param unlim non-zero if unlimited dimension is in use. * @param flavor the PIO IO type of the test file. * @returns 0 for success, error code otherwise. -*/ + */ int putget_read_var_nt(int ncid, int *varid, int unlim, int flavor) { signed char byte_array_in[X_DIM_LEN][Y_DIM_LEN]; @@ -1360,7 +1351,7 @@ int putget_read_var_nt(int ncid, int *varid, int unlim, int flavor) for (x = 0; x < X_DIM_LEN; x++) { if (strncmp(text_array_in[x], text, strlen(text))) - return ERR_WRONG; + return ERR_WRONG; for (y = 0; y < Y_DIM_LEN; y++) { if (byte_array_in[x][y] != byte_array[x][y]) @@ -1446,7 +1437,7 @@ int putget_read_vara(int ncid, int *varid, PIO_Offset *start, PIO_Offset *count, for (x = 0; x < X_DIM_LEN; x++) { if (strncmp(text_array_in[x], text, strlen(text))) - return ERR_WRONG; + return ERR_WRONG; for (y = 0; y < Y_DIM_LEN; y++) { if (byte_array_in[x][y] != byte_array[x][y]) @@ -1620,7 +1611,7 @@ int putget_read_vara_nt(int ncid, int *varid, PIO_Offset *start, PIO_Offset *cou for (x = 0; x < X_DIM_LEN; x++) { if (strncmp(text_array_in[x], text, strlen(text))) - return ERR_WRONG; + return ERR_WRONG; for (y = 0; y < Y_DIM_LEN; y++) { if (byte_array_in[x][y] != byte_array[x][y]) @@ -1795,7 +1786,6 @@ int create_putget_file(int iosysid, int access, int unlim, int flavor, int *dim_ if (!unlim) dim_len[0] = NUM_TIMESTEPS; - printf("filename = %s\n", filename); /* Define netCDF dimensions and variable. */ for (int d = 0; d < NDIM; d++) if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) @@ -1805,13 +1795,11 @@ int create_putget_file(int iosysid, int access, int unlim, int flavor, int *dim_ if (flavor == PIO_IOTYPE_NETCDF4C || flavor == PIO_IOTYPE_NETCDF4P) num_vars = NUM_NETCDF4_TYPES + 1; - printf("filename = %s\n", filename); /* Define variables. */ for (int v = 0; v < num_vars; v++) { char var_name[PIO_MAX_NAME + 1]; snprintf(var_name, PIO_MAX_NAME, "%s_%d", VAR_NAME, xtype[v]); - printf("defining var %s\n", var_name); /*nc_type my_type = xtype[v] == PIO_LONG_INTERNAL ? PIO_INT : xtype[v];*/ nc_type my_type; if (xtype[v] == PIO_LONG_INTERNAL) @@ -1822,7 +1810,6 @@ int create_putget_file(int iosysid, int access, int unlim, int flavor, int *dim_ return ret; } - printf("filename = %s\n", filename); /* For the first access, also test attributes. */ if (access == 0) if ((ret = test_write_atts(ncid, varid, flavor))) @@ -1842,7 +1829,7 @@ int check_file(int access, int ncid, int *varid, int flavor, PIO_Offset *index, PIO_Offset *count, PIO_Offset *stride, int unlim) { int ret; - + switch (access) { case 0: @@ -1940,12 +1927,9 @@ int test_putget(int iosysid, int num_flavors, int *flavor, int my_rank, access, unlim, iotype_name); /* Create test file with dims and vars defined. */ - printf("%d Access %d creating test file %s for flavor = %d...\n", my_rank, access, - filename, flavor[fmt]); if ((ret = create_putget_file(iosysid, access, unlim, flavor[fmt], dim_len, varid, filename, &ncid))) return ret; - printf("created file %s\n", filename); /* Write some data. */ PIO_Offset index[NDIM] = {0, 0, 0}; @@ -1956,8 +1940,6 @@ int test_putget(int iosysid, int num_flavors, int *flavor, int my_rank, switch (access) { case 0: - printf("%d Access %d writing data with var functions for flavor = %d...\n", - my_rank, access, flavor[fmt]); /* Use the var functions to write some data. */ if ((ret = putget_write_var(ncid, varid, flavor[fmt]))) return ret; @@ -2024,8 +2006,7 @@ int test_putget(int iosysid, int num_flavors, int *flavor, int my_rank, ERR(ret); /* /\* Access to read it. *\/ */ - printf("about to try to open file %s\n", filename); - if ((ret = PIOc_openfile(iosysid, &ncid, &(flavor[fmt]), filename, PIO_NOWRITE))) + if ((ret = PIOc_openfile2(iosysid, &ncid, &(flavor[fmt]), filename, PIO_NOWRITE))) ERR(ret); /* Check contents of the file. */ @@ -2053,16 +2034,13 @@ int test_all(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm te MPIERR(ret); /* Test attribute stuff. */ - printf("%d Testing attributes with NC_BYTE data, async = %d\n", my_rank, async); if ((ret = test_atts_byte(iosysid, num_flavors, flavor, my_rank, test_comm))) return ret; - printf("%d Testing attributes with NC_INT64 data, async = %d\n", my_rank, async); if ((ret = test_atts_int64(iosysid, num_flavors, flavor, my_rank, test_comm))) return ret; /* Test read/write stuff. */ - printf("%d Testing putget. async = %d\n", my_rank, async); if ((ret = test_putget(iosysid, num_flavors, flavor, my_rank, test_comm))) return ret; @@ -2075,7 +2053,7 @@ int main(int argc, char **argv) /* Initialize data arrays with sample data. */ init_arrays(); - return run_test_main(argc, argv, MIN_NTASKS, TARGET_NTASKS, 0, + return run_test_main(argc, argv, MIN_NTASKS, TARGET_NTASKS, -1, TEST_NAME, dim_len, COMPONENT_COUNT, NUM_IO_PROCS); return 0; diff --git a/src/externals/pio2/tests/cunit/test_pioc_unlim.c b/src/externals/pio2/tests/cunit/test_pioc_unlim.c index 3b4a4df347c..725132cdfce 100644 --- a/src/externals/pio2/tests/cunit/test_pioc_unlim.c +++ b/src/externals/pio2/tests/cunit/test_pioc_unlim.c @@ -3,8 +3,10 @@ * with an unlimited dimension. The data will have two timesteps, and * 4x4 elements each timestep. * - * Ed Hartnett, 2/14/17 + * @author Ed Hartnett + * @date 2/14/17 */ +#include #include #include #include @@ -47,6 +49,10 @@ char dim_name[NDIM][PIO_MAX_NAME + 1] = {"timestep", "x", "y"}; /* Length of the dimensions in the sample data. */ int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN}; +#define NETCDF4_UNLIM_FILE_NAME "netcdf4_unlim_file.nc" +#define DIM_NAME1 "dim1" +#define DIM_NAME2 "dim2" + /* Create the decomposition to divide the 3-dimensional sample data * between the 4 tasks. * @@ -76,13 +82,10 @@ int create_decomposition(int ntasks, int my_rank, int iosysid, int dim1_len, compdof[i] = my_rank * elements_per_pe + i + 1; /* Create the PIO decomposition for this test. */ - printf("%d Creating decomposition elements_per_pe = %lld\n", my_rank, elements_per_pe); if ((ret = PIOc_InitDecomp(iosysid, PIO_FLOAT, NDIM - 1, &dim_len[1], elements_per_pe, compdof, ioid, NULL, NULL, NULL))) ERR(ret); - printf("%d decomposition initialized.\n", my_rank); - /* Free the mapping. */ free(compdof); @@ -101,13 +104,10 @@ int create_test_file(int iosysid, int ioid, int iotype, int my_rank, int *ncid, sprintf(filename, "%s_iotype_%d.nc", TEST_NAME, iotype); /* Create the netCDF output file. */ - printf("rank: %d Creating sample file %s with format %d...\n", my_rank, filename, - iotype); if ((ret = PIOc_createfile(iosysid, ncid, &iotype, filename, PIO_CLOBBER))) ERR(ret); /* Define netCDF dimensions and variable. */ - printf("rank: %d Defining netCDF metadata...\n", my_rank); for (int d = 0; d < NDIM; d++) if ((ret = PIOc_def_dim(*ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d]))) ERR(ret); @@ -123,6 +123,111 @@ int create_test_file(int iosysid, int ioid, int iotype, int my_rank, int *ncid, return PIO_NOERR; } +/* Tests with multiple unlimited dims. Only netcdf-4 IOTYPES support + * multiple unlimited dims. */ +int run_multiple_unlim_test(int iosysid, int ioid, int iotype, int my_rank, + MPI_Comm test_comm) +{ +#define UDIM1_NAME "unlimited1" +#define UDIM2_NAME "unlimited2" +#define NUM_UNLIM_DIMS 2 + int ncid; + char filename[PIO_MAX_NAME + 1]; + int dimid[NUM_UNLIM_DIMS]; + int nunlimdims; + int unlimdimids[NUM_UNLIM_DIMS]; + int ret; + + /* Create filename. */ + sprintf(filename, "%s_multiple_unlim_dim_%d.nc", TEST_NAME, iotype); + + /* Create a test file. */ + if ((ret = PIOc_createfile(iosysid, &ncid, &iotype, filename, PIO_CLOBBER))) + ERR(ret); + + /* Add unlimited dimension. */ + if ((ret = PIOc_def_dim(ncid, UDIM1_NAME, NC_UNLIMITED, &dimid[0]))) + ERR(ret); + + /* Add another unlimited dimension. */ + if ((ret = PIOc_def_dim(ncid, UDIM2_NAME, NC_UNLIMITED, &dimid[1]))) + ERR(ret); + + /* Check for correctness. */ + if ((ret = PIOc_inq_unlimdims(ncid, &nunlimdims, unlimdimids))) + ERR(ret); + if (nunlimdims != NUM_UNLIM_DIMS) + ERR(ERR_WRONG); + for (int d = 0; d < NUM_UNLIM_DIMS; d++) + { + if (unlimdimids[d] != dimid[d]) + ERR(ERR_WRONG); + } + + /* Check some more stuff. */ + { + int nunlimdims; + int unlimdimids[NUM_UNLIM_DIMS]; + + /* These should also work. */ + if ((ret = PIOc_inq_unlimdims(ncid, NULL, NULL))) + ERR(ret); + if ((ret = PIOc_inq_unlimdims(ncid, &nunlimdims, NULL))) + ERR(ret); + if (nunlimdims != NUM_UNLIM_DIMS) + ERR(ERR_WRONG); + if ((ret = PIOc_inq_unlimdims(ncid, NULL, unlimdimids))) + ERR(ret); + for (int d = 0; d < NUM_UNLIM_DIMS; d++) + { + if (unlimdimids[d] != dimid[d]) + ERR(ERR_WRONG); + } + } + + /* Now try to add a var with two unlimited dims. It will fail. */ + int varid; + #define VAR_NAME2 "some_dumb_variable_name_def_var_will_fail_anyway" + if (PIOc_def_var(ncid, VAR_NAME2, PIO_INT, NUM_UNLIM_DIMS, unlimdimids, + &varid) != PIO_EINVAL) + ERR(ERR_WRONG); + + /* Close the file. */ + if ((PIOc_closefile(ncid))) + return ret; + + /* Use netCDF-4 directly to create a file that PIO can not create + * or read. */ + if (my_rank == 0) + { + int ncid; + int dimids[NUM_UNLIM_DIMS]; + int varid; + + if ((ret = nc_create(NETCDF4_UNLIM_FILE_NAME, NC_CLOBBER|NC_NETCDF4, &ncid))) + ERR(ret); + + if ((ret = nc_def_dim(ncid, DIM_NAME1, NC_UNLIMITED, &dimids[0]))) + ERR(ret); + if ((ret = nc_def_dim(ncid, DIM_NAME2, NC_UNLIMITED, &dimids[1]))) + ERR(ret); + if ((ret = nc_def_var(ncid, VAR_NAME, PIO_INT, NUM_UNLIM_DIMS, dimids, &varid))) + ERR(ret); + if ((ret = nc_close(ncid))) + ERR(ret); + } + + /* Other tasks wait for task 0 to write file using netCDF-4... */ + MPI_Barrier(test_comm); + + /* Try to read file. It will not work. */ + if (PIOc_openfile2(iosysid, &ncid, &iotype, NETCDF4_UNLIM_FILE_NAME, + 0) != PIO_EINVAL) + ERR(ERR_WRONG); + + return PIO_NOERR; +} + /* Run all the tests. */ int test_all(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm test_comm, int async) @@ -142,8 +247,6 @@ int test_all(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm te if (!async) { - printf("%d Testing darray. async = %d\n", my_rank, async); - /* Decompose the data over the tasks. */ if ((ret = create_decomposition(my_test_size, my_rank, iosysid, X_DIM_LEN, &ioid))) return ret; @@ -161,14 +264,24 @@ int test_all(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm te /* Look at the internals to check that the frame commands * worked. */ - file_desc_t *file; + file_desc_t *file; + var_desc_t *vdesc; /* Contains info about the variable. */ + if ((ret = pio_get_file(ncid, &file))) return ret; - if (file->varlist[varid].record != 1) + if ((ret = get_var_desc(varid, &file->varlist, &vdesc))) + return ret; + if (vdesc->record != 1) return ERR_WRONG; if ((PIOc_closefile(ncid))) return ret; + + /* Test file with multiple unlimited dims. Only netCDF-4 + * iotypes can run this test. */ + if (flavor[fmt] == PIO_IOTYPE_NETCDF4C || flavor[fmt] == PIO_IOTYPE_NETCDF4P) + if ((ret = run_multiple_unlim_test(iosysid, ioid, flavor[fmt], my_rank, test_comm))) + return ret; } /* Free the PIO decomposition. */ @@ -183,6 +296,6 @@ int test_all(int iosysid, int num_flavors, int *flavor, int my_rank, MPI_Comm te int main(int argc, char **argv) { /* Change the 5th arg to 3 to turn on logging. */ - return run_test_main(argc, argv, MIN_NTASKS, TARGET_NTASKS, 3, + return run_test_main(argc, argv, MIN_NTASKS, TARGET_NTASKS, -1, TEST_NAME, dim_len, COMPONENT_COUNT, NUM_IO_PROCS); } diff --git a/src/externals/pio2/tests/cunit/test_rearr.c b/src/externals/pio2/tests/cunit/test_rearr.c index 89e278bfc14..02397af6f8f 100644 --- a/src/externals/pio2/tests/cunit/test_rearr.c +++ b/src/externals/pio2/tests/cunit/test_rearr.c @@ -3,8 +3,10 @@ * to the box and subset rearranger, and the transfer of data betweeen * IO and computation tasks. * - * Ed Hartnett, 3/9/17 + * @author Ed Hartnett + * @date 3/9/17 */ +#include #include #include #include @@ -173,7 +175,6 @@ int test_create_mpi_datatypes() { if ((mpierr = MPI_Type_get_extent(mtype2[t], &lb, &extent))) MPIERR(mpierr); - printf("t = %d lb = %ld extent = %ld\n", t, lb, extent); if (lb != 0 || extent != 4) return ERR_WRONG; } @@ -209,8 +210,6 @@ int test_idx_to_dim_list() /* According to function docs, we should get 2,0 */ idx_to_dim_list(ndims2, gdims2, idx2, dim_list2); - printf("dim_list2[0] = %lld\n", dim_list2[0]); - printf("dim_list2[1] = %lld\n", dim_list2[1]); /* This is the correct result! */ if (dim_list2[0] != 2 || dim_list2[1] != 0) @@ -247,7 +246,6 @@ int test_coord_to_lindex() PIO_Offset lindex3; lindex3 = coord_to_lindex(ndims3, lcoord3, count3); - printf("lindex = %lld\n", lindex3); if (lindex3 != 3) return ERR_WRONG; @@ -349,7 +347,6 @@ int test_compute_maxIObuffersize(MPI_Comm test_comm, int my_rank) /* Run the function. */ if ((ret = compute_maxIObuffersize(test_comm, &iodesc))) return ret; - printf("iodesc.maxiobuflen = %d\n", iodesc.maxiobuflen); if (iodesc.maxiobuflen != 520) return ERR_WRONG; @@ -456,7 +453,6 @@ int test_find_region() regionlen = find_region(ndims, gdimlen, maplen, map, start, count); /* Check results. */ - printf("regionlen = %lld start[0] = %lld count[0] = %lld\n", regionlen, start[0], count[0]); if (regionlen != 1 || start[0] != 0 || count[0] != 1) return ERR_WRONG; @@ -478,7 +474,6 @@ int test_expand_region() expand_region(dim, gdims, maplen, map, region_size, region_stride, max_size, count); if (count[0] != 1) return ERR_WRONG; - printf("max_size[0] = %d count[0] = %lld\n", max_size[0], count[0]); return 0; } @@ -499,10 +494,11 @@ int test_define_iodesc_datatypes() /* Set up test for IO task with BOX rearranger to create one type. */ ios.ioproc = 1; /* this is IO proc. */ + ios.compproc = 1; ios.num_iotasks = 4; /* The number of IO tasks. */ iodesc.rtype = NULL; /* Array of MPI types will be created here. */ iodesc.nrecvs = 1; /* Number of types created. */ - iodesc.basetype = MPI_INT; + iodesc.mpitype = MPI_INT; iodesc.stype = NULL; /* Array of MPI types will be created here. */ /* Allocate space for arrays in iodesc that will be filled in @@ -947,12 +943,12 @@ int test_rearrange_comp2io(MPI_Comm test_comm, int my_rank) ios->num_iotasks = TARGET_NTASKS; ios->num_uniontasks = TARGET_NTASKS; iodesc->rearranger = PIO_REARR_BOX; - iodesc->basetype = MPI_INT; + iodesc->mpitype = MPI_INT; /* Set up test for IO task with BOX rearranger to create one type. */ iodesc->rtype = NULL; /* Array of MPI types will be created here. */ iodesc->nrecvs = 1; /* Number of types created. */ - iodesc->basetype = MPI_INT; + iodesc->mpitype = MPI_INT; iodesc->stype = NULL; /* Array of MPI types will be created here. */ /* The two rearrangers create a different number of send types. */ @@ -998,7 +994,6 @@ int test_rearrange_comp2io(MPI_Comm test_comm, int my_rank) /* Run the function to test. */ if ((ret = rearrange_comp2io(ios, iodesc, sbuf, rbuf, nvars))) return ret; - printf("returned from rearrange_comp2io\n"); /* We created send types, so free them. */ for (int st = 0; st < num_send_types; st++) @@ -1070,14 +1065,14 @@ int test_rearrange_io2comp(MPI_Comm test_comm, int my_rank) ios->union_comm = test_comm; ios->num_iotasks = TARGET_NTASKS; iodesc->rearranger = PIO_REARR_BOX; - iodesc->basetype = MPI_INT; + iodesc->mpitype = MPI_INT; /* Set up test for IO task with BOX rearranger to create one type. */ ios->ioproc = 1; /* this is IO proc. */ ios->num_iotasks = 4; /* The number of IO tasks. */ iodesc->rtype = NULL; /* Array of MPI types will be created here. */ iodesc->nrecvs = 1; /* Number of types created. */ - iodesc->basetype = MPI_INT; + iodesc->mpitype = MPI_INT; iodesc->stype = NULL; /* Array of MPI types will be created here. */ /* The two rearrangers create a different number of send types. */ @@ -1127,7 +1122,6 @@ int test_rearrange_io2comp(MPI_Comm test_comm, int my_rank) /* Run the function to test. */ if ((ret = rearrange_io2comp(ios, iodesc, sbuf, rbuf))) return ret; - printf("returned from rearrange_comp2io\n"); /* We created send types, so free them. */ for (int st = 0; st < num_send_types; st++) @@ -1170,71 +1164,55 @@ int run_no_iosys_tests(int my_rank, MPI_Comm test_comm) { int ret; - printf("%d running idx_to_dim_list tests\n", my_rank); if ((ret = test_idx_to_dim_list())) return ret; - printf("%d running coord_to_lindex tests\n", my_rank); if ((ret = test_coord_to_lindex())) return ret; - printf("%d running compute_maxIObuffersize tests\n", my_rank); if ((ret = test_compute_maxIObuffersize(test_comm, my_rank))) return ret; - printf("%d running determine_fill\n", my_rank); if ((ret = test_determine_fill(test_comm))) return ret; - printf("%d running tests for expand_region()\n", my_rank); if ((ret = test_expand_region())) return ret; - printf("%d running tests for find_region()\n", my_rank); if ((ret = test_find_region())) return ret; - printf("%d running tests for get_regions()\n", my_rank); if ((ret = test_get_regions(my_rank))) return ret; - printf("%d running create_mpi_datatypes tests\n", my_rank); if ((ret = test_create_mpi_datatypes())) return ret; - printf("%d running define_iodesc_datatypes tests\n", my_rank); if ((ret = test_define_iodesc_datatypes())) return ret; - printf("%d running compare_offsets tests\n", my_rank); if ((ret = test_compare_offsets())) return ret; - printf("%d running compute_counts tests for box rearranger\n", my_rank); if ((ret = test_compute_counts(test_comm, my_rank))) return ret; - printf("%d running tests for box_rearrange_create\n", my_rank); if ((ret = test_box_rearrange_create(test_comm, my_rank))) return ret; - printf("%d running more tests for box_rearrange_create\n", my_rank); if ((ret = test_box_rearrange_create_2(test_comm, my_rank))) return ret; - printf("%d running tests for default_subset_partition\n", my_rank); if ((ret = test_default_subset_partition(test_comm, my_rank))) return ret; - printf("%d running tests for rearrange_comp2io\n", my_rank); if ((ret = test_rearrange_comp2io(test_comm, my_rank))) return ret; - printf("%d running tests for rearrange_io2comp\n", my_rank); if ((ret = test_rearrange_io2comp(test_comm, my_rank))) return ret; - return 0; + return 0; } /* Test scalar vars. */ @@ -1285,8 +1263,6 @@ int test_scalar(int numio, int iosysid, MPI_Comm test_comm, int my_rank, int varid; char filename[PIO_MAX_NAME + 1]; - printf("test with t = %d\n", t); - /* These iotypes only handle netCDF classic types. */ if (t >= NUM_CLASSIC_TYPES && (flavor[fmt] == PIO_IOTYPE_PNETCDF || flavor[fmt] == PIO_IOTYPE_NETCDF)) @@ -1335,7 +1311,7 @@ int test_scalar(int numio, int iosysid, MPI_Comm test_comm, int my_rank, if ((ret = PIOc_put_var_double(ncid, varid, &double_data))) return ret; break; -#ifdef _NETCDF4 +#ifdef _NETCDF4 case PIO_UBYTE: if ((ret = PIOc_put_var_uchar(ncid, varid, &ubyte_data))) return ret; @@ -1356,7 +1332,7 @@ int test_scalar(int numio, int iosysid, MPI_Comm test_comm, int my_rank, if ((ret = PIOc_put_var_ulonglong(ncid, varid, &uint64_data))) return ret; break; -#endif /* _NETCDF4 */ +#endif /* _NETCDF4 */ default: return ERR_WRONG; } @@ -1408,7 +1384,7 @@ int test_scalar(int numio, int iosysid, MPI_Comm test_comm, int my_rank, if (double_data_in != double_data) return ERR_WRONG; break; -#ifdef _NETCDF4 +#ifdef _NETCDF4 case PIO_UBYTE: if ((ret = PIOc_get_var_uchar(ncid, varid, &ubyte_data_in))) return ret; @@ -1439,7 +1415,7 @@ int test_scalar(int numio, int iosysid, MPI_Comm test_comm, int my_rank, if (uint64_data_in != uint64_data) return ERR_WRONG; break; -#endif /* _NETCDF4 */ +#endif /* _NETCDF4 */ default: return ERR_WRONG; } @@ -1461,15 +1437,12 @@ int run_iosys_tests(int numio, int iosysid, int my_rank, MPI_Comm test_comm, { int ret; - printf("%d running rearranger opts tests 1\n", my_rank); if ((ret = test_rearranger_opts1(iosysid))) return ret; - printf("%d running test for init_decomp\n", my_rank); if ((ret = test_init_decomp(iosysid, test_comm, my_rank))) return ret; - printf("%d running test for init_decomp\n", my_rank); if ((ret = test_scalar(numio, iosysid, test_comm, my_rank, num_flavors, flavor))) return ret; @@ -1488,7 +1461,7 @@ int main(int argc, char **argv) /* Initialize test. */ if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, - TARGET_NTASKS, 3, &test_comm))) + TARGET_NTASKS, -1, &test_comm))) ERR(ERR_INIT); if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL))) return ret; @@ -1496,7 +1469,6 @@ int main(int argc, char **argv) /* Figure out iotypes. */ if ((ret = get_iotypes(&num_flavors, flavor))) ERR(ret); - printf("Runnings tests for %d flavors\n", num_flavors); /* Test code runs on TARGET_NTASKS tasks. The left over tasks do * nothing. */ @@ -1532,7 +1504,6 @@ int main(int argc, char **argv) } /* endif my_rank < TARGET_NTASKS */ /* Finalize the MPI library. */ - printf("%d %s Finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ret; diff --git a/src/externals/pio2/tests/cunit/test_shared.c b/src/externals/pio2/tests/cunit/test_shared.c index 0926e0be9fd..9d999700994 100644 --- a/src/externals/pio2/tests/cunit/test_shared.c +++ b/src/externals/pio2/tests/cunit/test_shared.c @@ -3,6 +3,7 @@ * * Ed Hartnett */ +#include #include #include #include @@ -30,14 +31,11 @@ int test_async2(int my_rank, int num_flavors, int *flavor, MPI_Comm test_comm, /* Is the current process a computation task? */ int comp_task = my_rank < num_io_procs ? 0 : 1; - printf("%d comp_task = %d\n", my_rank, comp_task); /* Initialize the IO system. */ if ((ret = PIOc_init_async(test_comm, num_io_procs, NULL, component_count, num_procs, NULL, &io_comm, comp_comm, PIO_REARR_BOX, iosysid))) ERR(ERR_INIT); - for (int c = 0; c < component_count; c++) - printf("%d iosysid[%d] = %d\n", my_rank, c, iosysid[c]); /* All the netCDF calls are only executed on the computation * tasks. The IO tasks have not returned from PIOc_Init_Intercomm, @@ -46,16 +44,12 @@ int test_async2(int my_rank, int num_flavors, int *flavor, MPI_Comm test_comm, { for (int c = 0; c < component_count; c++) { - printf("%d Running tests...\n", my_rank); if ((ret = test_all(iosysid[c], num_flavors, flavor, my_rank, comp_comm[0], 1))) return ret; /* Finalize the IO system. Only call this from the computation tasks. */ - printf("%d %s Freeing PIO resources\n", my_rank, test_name); if ((ret = PIOc_finalize(iosysid[c]))) ERR(ret); - printf("%d %s PIOc_finalize completed for iosysid = %d\n", my_rank, test_name, - iosysid[c]); if ((mpierr = MPI_Comm_free(&comp_comm[c]))) MPIERR(mpierr); } @@ -110,19 +104,16 @@ int test_no_async2(int my_rank, int num_flavors, int *flavor, MPI_Comm test_comm compdof[i] = my_rank * elements_per_pe + i; /* Create the PIO decomposition for this test. */ - printf("%d Creating decomposition...\n", my_rank); if ((ret = PIOc_init_decomp(iosysid, PIO_FLOAT, 2, slice_dimlen, (PIO_Offset)elements_per_pe, compdof, &ioid, 0, NULL, NULL))) return ret; free(compdof); /* Run tests. */ - printf("%d Running tests...\n", my_rank); if ((ret = test_all(iosysid, num_flavors, flavor, my_rank, test_comm, 0))) return ret; /* Free the PIO decomposition. */ - printf("%d Freeing PIO decomposition...\n", my_rank); if ((ret = PIOc_freedecomp(iosysid, ioid))) return ret; @@ -174,7 +165,6 @@ int run_test_main(int argc, char **argv, int min_ntasks, int max_ntasks, /* Figure out iotypes. */ if ((ret = get_iotypes(&num_flavors, flavor))) ERR(ret); - printf("Runnings tests for %d flavors\n", num_flavors); /* Run tests without async feature. */ if ((ret = test_no_async2(my_rank, num_flavors, flavor, test_comm, max_ntasks, @@ -189,7 +179,6 @@ int run_test_main(int argc, char **argv, int min_ntasks, int max_ntasks, } /* endif my_rank < TARGET_NTASKS */ /* Finalize the MPI library. */ - printf("%d %s Finalizing...\n", my_rank, test_name); if ((ret = pio_test_finalize(&test_comm))) return ret; diff --git a/src/externals/pio2/tests/cunit/test_spmd.c b/src/externals/pio2/tests/cunit/test_spmd.c index 2c51ce2ccab..e7e64664f4e 100644 --- a/src/externals/pio2/tests/cunit/test_spmd.c +++ b/src/externals/pio2/tests/cunit/test_spmd.c @@ -1,9 +1,10 @@ /* * This program tests some internal functions in the PIO library. * - * Jim Edwards - * Ed Hartnett, 11/23/16 + * @author Jim Edwards, Ed Hartnett + * @date 11/23/16 */ +#include #include #include #include @@ -78,23 +79,15 @@ int run_spmd_tests(MPI_Comm test_comm) * with msg_cnt = 1!). */ for (int msg_cnt = 0; msg_cnt < TARGET_NTASKS; msg_cnt = msg_cnt ? msg_cnt * 2 : 4) { - if (!my_rank) - printf("message count %d\n",msg_cnt); - for (int itest = 0; itest < NUM_TEST_CASES; itest++) { - rearr_comm_fc_opt_t fc; + rearr_comm_fc_opt_t fc; fc.hs = false; fc.isend = false; /* Wait for all tasks. */ MPI_Barrier(test_comm); - /* Print results. */ - if (!my_rank) - for (int e = 0; e < num_elem; e++) - printf("sbuf[%d] = %d\n", e, sbuf[e]); - /* Set the parameters different for each test case. */ if (itest == 1) { @@ -140,9 +133,9 @@ int run_spmd_tests(MPI_Comm test_comm) return 0; } -/* Test some of the functions in the file pioc_sc.c. +/* Test some of the functions in the file pioc_sc.c. * - * @param test_comm the MPI communicator that the test code is running on. + * @param test_comm the MPI communicator that the test code is running on. * @returns 0 for success, error code otherwise. */ int run_sc_tests(MPI_Comm test_comm) @@ -151,10 +144,10 @@ int run_sc_tests(MPI_Comm test_comm) int my_rank; /* 0-based rank in test_comm. */ int ntasks; /* Number of tasks in test_comm. */ int mpierr; /* Return value from MPI calls. */ - int array1[SC_ARRAY_LEN] = {7, 42, 14}; - int array2[SC_ARRAY_LEN] = {2, 3, 7}; - int array3[SC_ARRAY_LEN] = {90, 180, 270}; - int array4[SC_ARRAY_LEN] = {1, 180, 270}; + long long larray1[SC_ARRAY_LEN] = {7, 42, 14}; + long long larray2[SC_ARRAY_LEN] = {2, 3, 7}; + long long larray3[SC_ARRAY_LEN] = {90, 180, 270}; + long long larray4[SC_ARRAY_LEN] = {1, 180, 270}; /* Learn rank and size. */ if ((mpierr = MPI_Comm_size(test_comm, &ntasks))) @@ -179,13 +172,13 @@ int run_sc_tests(MPI_Comm test_comm) return ERR_WRONG; /* Test the gcd_array() function. */ - if (gcd_array(SC_ARRAY_LEN, array1) != 7) + if (lgcd_array(SC_ARRAY_LEN, larray1) != 7) return ERR_WRONG; - if (gcd_array(SC_ARRAY_LEN, array2) != 1) + if (lgcd_array(SC_ARRAY_LEN, larray2) != 1) return ERR_WRONG; - if (gcd_array(SC_ARRAY_LEN, array3) != 90) + if (lgcd_array(SC_ARRAY_LEN, larray3) != 90) return ERR_WRONG; - if (gcd_array(SC_ARRAY_LEN, array4) != 1) + if (lgcd_array(SC_ARRAY_LEN, larray4) != 1) return ERR_WRONG; /* Test compute_one_dim. */ @@ -200,15 +193,14 @@ int run_sc_tests(MPI_Comm test_comm) compute_one_dim(5, 4, my_rank, &start, &count); if (start != my_rank || count != (my_rank == 3 ? 2 : 1)) return ERR_WRONG; - printf("my_rank = %d start = %lld count = %lld\n", my_rank, start, count); return 0; } -/* Tesst some list stuff. */ +/* Test some list stuff. */ int test_lists() { file_desc_t *fdesc; - + /* Test that bad input is correctly rejected. */ if (pio_delete_iodesc_from_list(42) != PIO_EBADID) return ERR_WRONG; @@ -223,6 +215,278 @@ int test_lists() return 0; } +/* Test some list stuff. */ +int test_determine_procs() +{ +#define ONE_COMPONENT 1 +#define TWO_COMPONENTS 2 +#define THREE_PROCS 3 + int ret; + + { + int num_io_procs = 1; + int component_count = ONE_COMPONENT; + int num_procs_per_comp[ONE_COMPONENT] = {1}; + int *my_proc_list[ONE_COMPONENT]; + + if ((ret = determine_procs(num_io_procs, component_count, num_procs_per_comp, NULL, + my_proc_list))) + return ret; + + /* Check results and free resources. */ + for (int c = 0; c < ONE_COMPONENT; c++) + { + if (my_proc_list[c][0] != 1) + return ERR_WRONG; + free(my_proc_list[c]); + } + } + + { + int num_io_procs = 3; + int component_count = TWO_COMPONENTS; + int num_procs_per_comp[TWO_COMPONENTS] = {1, 1}; + int *my_proc_list[TWO_COMPONENTS]; + + if ((ret = determine_procs(num_io_procs, component_count, num_procs_per_comp, NULL, + my_proc_list))) + return ret; + + /* Check results and free resources. */ + for (int c = 0; c < TWO_COMPONENTS; c++) + { + if (my_proc_list[c][0] != c + 3) + return ERR_WRONG; + free(my_proc_list[c]); + } + } + + { + int num_io_procs = 3; + int component_count = TWO_COMPONENTS; + int num_procs_per_comp[TWO_COMPONENTS] = {THREE_PROCS, THREE_PROCS}; + int *my_proc_list[TWO_COMPONENTS]; + + if ((ret = determine_procs(num_io_procs, component_count, num_procs_per_comp, NULL, + my_proc_list))) + return ret; + + /* Check results and free resources. */ + for (int c = 0; c < TWO_COMPONENTS; c++) + { + for (int p = 0; p < THREE_PROCS; p++) + if (my_proc_list[c][p] != 3 + c * THREE_PROCS + p) + return ERR_WRONG; + free(my_proc_list[c]); + } + } + + { + int num_io_procs = 3; + int component_count = TWO_COMPONENTS; + int num_procs_per_comp[TWO_COMPONENTS] = {THREE_PROCS, THREE_PROCS}; + int proc_list_1[THREE_PROCS] = {8, 9, 10}; + int proc_list_2[THREE_PROCS] = {11, 12, 13}; + int *proc_list[TWO_COMPONENTS] = {proc_list_1, proc_list_2}; + int *my_proc_list[TWO_COMPONENTS]; + + if ((ret = determine_procs(num_io_procs, component_count, num_procs_per_comp, + (int **)proc_list, my_proc_list))) + return ret; + + /* Check results and free resources. */ + for (int c = 0; c < TWO_COMPONENTS; c++) + { + for (int p = 0; p < THREE_PROCS; p++) + if (my_proc_list[c][p] != proc_list[c][p]) + return ERR_WRONG; + free(my_proc_list[c]); + } + } + + return PIO_NOERR; +} + +/* + * Test some list stuff for varlists. + * + * @author Ed Hartnett + */ +int test_varlists() +{ + var_desc_t *varlist = NULL; + var_desc_t *var_desc; + int ret; + + /* Try to delete a non-existing var. */ + if (delete_var_desc(2, &varlist) != PIO_ENOTVAR) + return ERR_WRONG; + + /* Add a var to the list. */ + if ((ret = add_to_varlist(0, 1, PIO_INT, 4, MPI_INT, 4, &varlist))) + return ret; + + /* Find that var_desc_t. */ + if ((ret = get_var_desc(0, &varlist, &var_desc))) + return ret; + if (var_desc->varid != 0 || !var_desc->rec_var || var_desc->pio_type != PIO_INT || + var_desc->pio_type_size != 4 || var_desc->mpi_type != MPI_INT || var_desc->mpi_type_size != 4) + return ERR_WRONG; + + /* Try to delete a non-existing var - should fail. */ + if (delete_var_desc(2, &varlist) != PIO_ENOTVAR) + return ERR_WRONG; + + /* Delete it. */ + if ((ret = delete_var_desc(0, &varlist))) + return ret; + + /* Make sure it is gone. */ + if (get_var_desc(0, &varlist, &var_desc) != PIO_ENOTVAR) + return ERR_WRONG; + + return 0; +} + +/* + * Test some more list stuff for varlists. + * + * @author Ed Hartnett + */ +int test_varlists2() +{ + var_desc_t *varlist = NULL; + var_desc_t *var_desc; + int ret; + + /* Add some vars to the list. */ + if ((ret = add_to_varlist(0, 1, PIO_INT, 4, MPI_INT, 4, &varlist))) + return ret; + if ((ret = add_to_varlist(1, 0, PIO_DOUBLE, 8, MPI_DOUBLE, 8, &varlist))) + return ret; + if ((ret = add_to_varlist(2, 1, PIO_BYTE, 1, MPI_CHAR, 1, &varlist))) + return ret; + + /* Find those var_desc_t. */ + if ((ret = get_var_desc(0, &varlist, &var_desc))) + return ret; + if (var_desc->varid != 0 || !var_desc->rec_var || var_desc->pio_type != PIO_INT || + var_desc->pio_type_size != 4 || var_desc->mpi_type != MPI_INT) + return ERR_WRONG; + + if ((ret = get_var_desc(1, &varlist, &var_desc))) + return ret; + if (var_desc->varid != 1 || var_desc->rec_var || var_desc->pio_type != PIO_DOUBLE || + var_desc->pio_type_size != 8) + return ERR_WRONG; + + if ((ret = get_var_desc(2, &varlist, &var_desc))) + return ret; + if (var_desc->varid != 2 || !var_desc->rec_var || var_desc->pio_type != PIO_BYTE || + var_desc->pio_type_size != 1) + return ERR_WRONG; + + /* Try to delete a non-existing var - should fail. */ + if (delete_var_desc(3, &varlist) != PIO_ENOTVAR) + return ERR_WRONG; + + /* Delete one of the vars. */ + if ((ret = delete_var_desc(0, &varlist))) + return ret; + + /* Make sure it is gone. */ + if (get_var_desc(0, &varlist, &var_desc) != PIO_ENOTVAR) + return ERR_WRONG; + + /* Make sure the others are still there. */ + var_desc = NULL; + if ((ret = get_var_desc(1, &varlist, &var_desc))) + return ret; + if (var_desc->varid != 1 || var_desc->rec_var) + return ERR_WRONG; + + var_desc = NULL; + if ((ret = get_var_desc(2, &varlist, &var_desc))) + return ret; + if (var_desc->varid != 2 || !var_desc->rec_var) + return ERR_WRONG; + + /* Delete the other two vars from the varlist. */ + if ((ret = delete_var_desc(1, &varlist))) + return ret; + if ((ret = delete_var_desc(2, &varlist))) + return ret; + + return 0; +} + +/* + * Test even more list stuff for varlists. + * + * @author Ed Hartnett + */ +int test_varlists3() +{ + var_desc_t *varlist = NULL; + var_desc_t *var_desc; + int ret; + + /* Add some vars to the list. */ + if ((ret = add_to_varlist(0, 1, PIO_INT, 4, MPI_INT, 4, &varlist))) + return ret; + if ((ret = add_to_varlist(1, 0, PIO_INT, 4, MPI_INT, 4, &varlist))) + return ret; + if ((ret = add_to_varlist(2, 1, PIO_INT, 4, MPI_INT, 4, &varlist))) + return ret; + if ((ret = add_to_varlist(3, 0, PIO_INT, 4, MPI_INT, 4, &varlist))) + return ret; + + /* Delete one of the vars. */ + if ((ret = delete_var_desc(1, &varlist))) + return ret; + + /* Make sure it is gone. */ + if (get_var_desc(1, &varlist, &var_desc) != PIO_ENOTVAR) + return ERR_WRONG; + + /* Make sure the others are still there. */ + var_desc = NULL; + if ((ret = get_var_desc(0, &varlist, &var_desc))) + return ret; + if (var_desc->varid != 0 || !var_desc->rec_var) + return ERR_WRONG; + + var_desc = NULL; + if ((ret = get_var_desc(2, &varlist, &var_desc))) + return ret; + if (var_desc->varid != 2 || !var_desc->rec_var) + return ERR_WRONG; + + var_desc = NULL; + if ((ret = get_var_desc(3, &varlist, &var_desc))) + return ret; + if (var_desc->varid != 3 || var_desc->rec_var) + return ERR_WRONG; + + /* Delete the other vars from the varlist. */ + if ((ret = delete_var_desc(0, &varlist))) + return ret; + if ((ret = delete_var_desc(2, &varlist))) + return ret; + if ((ret = delete_var_desc(3, &varlist))) + return ret; + + /* Make sure they are gone. */ + if (get_var_desc(0, &varlist, &var_desc) != PIO_ENOTVAR) + return ERR_WRONG; + if (get_var_desc(2, &varlist, &var_desc) != PIO_ENOTVAR) + return ERR_WRONG; + if (get_var_desc(3, &varlist, &var_desc) != PIO_ENOTVAR) + return ERR_WRONG; + + return 0; +} + /* Test the ceil2() and pair() functions. */ int test_ceil2_pair() { @@ -245,7 +509,7 @@ int test_ceil2_pair() return ERR_WRONG; if (pair(4, 2, 2) != 1) return ERR_WRONG; - + return 0; } @@ -374,9 +638,6 @@ int test_CalcStartandCount() if ((ret = CalcStartandCount(PIO_DOUBLE, ndims, gdims, num_io_procs, iorank, start, kount, &numaiotasks))) return ret; - if (iorank < numaiotasks) - printf("iorank %d start %lld %lld count %lld %lld\n", iorank, start[0], - start[1], kount[0], kount[1]); if (numaiotasks < 0) return numaiotasks; @@ -395,7 +656,6 @@ int test_CalcStartandCount() converged = true; else { - printf("Failed to converge %ld %ld %d\n", tpsize, pgdims, num_io_procs); tpsize = 0; num_io_procs--; } @@ -411,7 +671,7 @@ int run_GDCblocksize_tests(MPI_Comm test_comm) int arrlen = 1; PIO_Offset arr_in[1] = {0}; PIO_Offset blocksize; - + blocksize = GCDblocksize(arrlen, arr_in); if (blocksize != 1) return ERR_WRONG; @@ -421,12 +681,12 @@ int run_GDCblocksize_tests(MPI_Comm test_comm) int arrlen = 4; PIO_Offset arr_in[4] = {0, 1, 2, 3}; PIO_Offset blocksize; - + blocksize = GCDblocksize(arrlen, arr_in); if (blocksize != 4) return ERR_WRONG; } - + { int arrlen = 4; PIO_Offset arr_in[4] = {0, 2, 3, 4}; @@ -436,7 +696,7 @@ int run_GDCblocksize_tests(MPI_Comm test_comm) if (blocksize != 1) return ERR_WRONG; } - + { int arrlen = 4; PIO_Offset arr_in[4] = {0, 1, 3, 4}; @@ -446,7 +706,7 @@ int run_GDCblocksize_tests(MPI_Comm test_comm) if (blocksize != 1) return ERR_WRONG; } - + { int arrlen = 4; PIO_Offset arr_in[4] = {0, 1, 2, 4}; @@ -456,7 +716,7 @@ int run_GDCblocksize_tests(MPI_Comm test_comm) if (blocksize != 1) return ERR_WRONG; } - + return 0; } @@ -470,7 +730,7 @@ int main(int argc, char **argv) /* Initialize test. */ if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS, - TARGET_NTASKS, 3, &test_comm))) + TARGET_NTASKS, -1, &test_comm))) ERR(ERR_INIT); /* Test code runs on TARGET_NTASKS tasks. The left over tasks do @@ -483,35 +743,39 @@ int main(int argc, char **argv) if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, 1, 0, PIO_REARR_BOX, &iosysid))) return ret; - printf("%d running tests for functions in pioc_sc.c\n", my_rank); if ((ret = run_sc_tests(test_comm))) return ret; - printf("%d running tests for GCDblocksize()\n", my_rank); if ((ret = run_GDCblocksize_tests(test_comm))) return ret; - printf("%d running spmd test code\n", my_rank); if ((ret = run_spmd_tests(test_comm))) return ret; - - printf("%d running CalcStartandCount test code\n", my_rank); + if ((ret = test_CalcStartandCount())) return ret; - printf("%d running list tests\n", my_rank); if ((ret = test_lists())) return ret; - printf("%d running ceil2/pair tests\n", my_rank); + if ((ret = test_determine_procs())) + return ret; + + if ((ret = test_varlists())) + return ret; + + if ((ret = test_varlists2())) + return ret; + + if ((ret = test_varlists3())) + return ret; + if ((ret = test_ceil2_pair())) return ret; - printf("%d running find_mpi_type tests\n", my_rank); if ((ret = test_find_mpi_type())) return ret; - printf("%d running misc tests\n", my_rank); if ((ret = test_misc())) return ret; @@ -522,7 +786,6 @@ int main(int argc, char **argv) } /* endif my_rank < TARGET_NTASKS */ /* Finalize the MPI library. */ - printf("%d %s Finalizing...\n", my_rank, TEST_NAME); if ((ret = pio_test_finalize(&test_comm))) return ret; diff --git a/src/externals/pio2/tests/general/CMakeLists.txt b/src/externals/pio2/tests/general/CMakeLists.txt index b71382d3064..09358d5b9fd 100644 --- a/src/externals/pio2/tests/general/CMakeLists.txt +++ b/src/externals/pio2/tests/general/CMakeLists.txt @@ -50,7 +50,7 @@ endif () #============================================================================== # Test Timeout (4 min = 240 sec) -set (DEFAULT_TEST_TIMEOUT 240) +set (DEFAULT_TEST_TIMEOUT 480) #===== pio_init_finalize ===== add_executable (pio_init_finalize EXCLUDE_FROM_ALL diff --git a/src/externals/pio2/tests/general/ncdf_simple_tests.F90.in b/src/externals/pio2/tests/general/ncdf_simple_tests.F90.in index 5ed9c401cc2..b1d8133194a 100644 --- a/src/externals/pio2/tests/general/ncdf_simple_tests.F90.in +++ b/src/externals/pio2/tests/general/ncdf_simple_tests.F90.in @@ -99,6 +99,68 @@ PIO_TF_AUTO_TEST_SUB_BEGIN test_def_var PIO_TF_AUTO_TEST_SUB_END test_def_var +PIO_TF_TEMPLATE +PIO_TF_AUTO_TEST_SUB_BEGIN test_data_conversion + use ncdf_simple_tests_tgv + implicit none + integer, parameter :: VEC_LOCAL_SZ = 7 + type(var_desc_t) :: pio_var + type(file_desc_t) :: pio_file + character(len=PIO_TF_MAX_STR_LEN) :: data_fname = "pio_test_data_conversion.nc" + type(io_desc_t) :: wiodesc, riodesc + integer, dimension(VEC_LOCAL_SZ) :: compdof, compdof_rel_disps + integer, dimension(VEC_LOCAL_SZ) :: wbuf + PIO_TF_FC_DATA_TYPE, dimension(VEC_LOCAL_SZ) :: rbuf, exp_val + integer, dimension(1) :: dims + integer :: pio_dim + integer :: i, ierr + + do i=1,VEC_LOCAL_SZ + compdof_rel_disps(i) = i + end do + dims(1) = VEC_LOCAL_SZ * pio_tf_world_sz_ + compdof = VEC_LOCAL_SZ * pio_tf_world_rank_ + compdof_rel_disps + wbuf = pio_tf_world_rank_; + exp_val = pio_tf_world_rank_; + + ! Set the decomposition for writing data as PIO_int + call PIO_initdecomp(pio_tf_iosystem_, PIO_int, dims, compdof, wiodesc) + + ! Set the decomposition for reading data as various types + call PIO_initdecomp(pio_tf_iosystem_, PIO_TF_DATA_TYPE, dims, compdof, riodesc) + + ierr = PIO_createfile(pio_tf_iosystem_, pio_file, tgv_iotype, data_fname, PIO_CLOBBER) + PIO_TF_CHECK_ERR(ierr, "Could not create file " // trim(data_fname)) + + ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim', dims(1), pio_dim) + PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(data_fname)) + + ierr = PIO_def_var(pio_file, 'PIO_TF_test_var', PIO_int, (/pio_dim/), pio_var) + PIO_TF_CHECK_ERR(ierr, "Failed to define a var : " // trim(data_fname)) + + ierr = PIO_enddef(pio_file) + PIO_TF_CHECK_ERR(ierr, "Failed to end redef mode : " // trim(data_fname)) + + ! Write the variable out + call PIO_write_darray(pio_file, pio_var, wiodesc, wbuf, ierr) + PIO_TF_CHECK_ERR(ierr, "Failed to write darray : " // trim(data_fname)) + + call PIO_syncfile(pio_file) + + ! Read the variable back (data conversion might occur) + call PIO_read_darray(pio_file, pio_var, riodesc, rbuf, ierr) + PIO_TF_CHECK_ERR(ierr, "Failed to read darray : " // trim(data_fname)) + + PIO_TF_CHECK_VAL((rbuf, exp_val), "Got wrong val") + + call PIO_closefile(pio_file) + call PIO_deletefile(pio_tf_iosystem_, data_fname); + + call PIO_freedecomp(pio_tf_iosystem_, wiodesc) + call PIO_freedecomp(pio_tf_iosystem_, riodesc) + +PIO_TF_AUTO_TEST_SUB_END test_data_conversion + PIO_TF_TEST_DRIVER_BEGIN use ncdf_simple_tests_tgv Implicit none diff --git a/src/externals/pio2/tests/general/pio_decomp_fillval.F90.in b/src/externals/pio2/tests/general/pio_decomp_fillval.F90.in index 4347c5c3b32..b1ec78fd79f 100644 --- a/src/externals/pio2/tests/general/pio_decomp_fillval.F90.in +++ b/src/externals/pio2/tests/general/pio_decomp_fillval.F90.in @@ -58,6 +58,9 @@ PIO_TF_AUTO_TEST_SUB_BEGIN nc_write_1d_explicit_fval ierr = PIO_def_var(pio_file, 'PIO_TF_test_var', PIO_TF_DATA_TYPE, (/pio_dim/), pio_var) PIO_TF_CHECK_ERR(ierr, "Failed to define a var : " // trim(filename)) + ierr = PIO_put_att(pio_file, pio_var, '_FillValue', BUF_FILLVAL) + PIO_TF_CHECK_ERR(ierr, "Failed to define fill value : " // trim(filename)) + ierr = PIO_enddef(pio_file) PIO_TF_CHECK_ERR(ierr, "Failed to end redef mode : " // trim(filename)) @@ -157,6 +160,9 @@ PIO_TF_AUTO_TEST_SUB_BEGIN nc_write_1d_implicit_fval ierr = PIO_def_var(pio_file, 'PIO_TF_test_var', PIO_TF_DATA_TYPE, (/pio_dim/), pio_var) PIO_TF_CHECK_ERR(ierr, "Failed to define a var : " // trim(filename)) + ierr = PIO_put_att(pio_file, pio_var, '_FillValue', BUF_FILLVAL) + PIO_TF_CHECK_ERR(ierr, "Failed to define fill value : " // trim(filename)) + ierr = PIO_enddef(pio_file) PIO_TF_CHECK_ERR(ierr, "Failed to end redef mode : " // trim(filename)) @@ -243,6 +249,9 @@ PIO_TF_AUTO_TEST_SUB_BEGIN nc_read_1d_explicit_fval ierr = PIO_def_var(pio_file, 'PIO_TF_test_var', PIO_TF_DATA_TYPE, (/pio_dim/), pio_var) PIO_TF_CHECK_ERR(ierr, "Failed to define a var : " // trim(filename)) + ierr = PIO_put_att(pio_file, pio_var, '_FillValue', BUF_FILLVAL) + PIO_TF_CHECK_ERR(ierr, "Failed to define fill value : " // trim(filename)) + ierr = PIO_enddef(pio_file) PIO_TF_CHECK_ERR(ierr, "Failed to end redef mode : " // trim(filename)) diff --git a/src/externals/pio2/tests/general/pio_decomp_frame_tests.F90.in b/src/externals/pio2/tests/general/pio_decomp_frame_tests.F90.in index e75fdce61de..49e77885cf7 100644 --- a/src/externals/pio2/tests/general/pio_decomp_frame_tests.F90.in +++ b/src/externals/pio2/tests/general/pio_decomp_frame_tests.F90.in @@ -96,7 +96,7 @@ PIO_TF_TEMPLATE +PIO_TF_AUTO_TEST_SUB_BEGIN nc_test_limited_time_dim + implicit none + integer, parameter :: NDIMS = 4 + integer, parameter :: NFRAMES = 6 + type(var_desc_t) :: pio_var + type(file_desc_t) :: pio_file + character(len=PIO_TF_MAX_STR_LEN) :: filename + type(io_desc_t) :: wr_iodesc, rd_iodesc + integer, dimension(:), allocatable :: compdof + integer, dimension(NDIMS) :: start, count + PIO_TF_FC_DATA_TYPE, dimension(:,:,:,:), allocatable :: rbuf, wbuf, exp_val + integer, dimension(NDIMS-1) :: dims + integer, dimension(NDIMS) :: pio_dims + integer :: i, j, k, tmp_idx, ierr, lsz, nrows, ncols, nhgts + integer(kind=pio_offset_kind) :: f + ! iotypes = valid io types + integer, dimension(:), allocatable :: iotypes + character(len=PIO_TF_MAX_STR_LEN), dimension(:), allocatable :: iotype_descs + integer :: num_iotypes + + ! Set the decomposition for writing data - forcing rearrangement + call get_3d_col_decomp_info(pio_tf_world_rank_, pio_tf_world_sz_, dims, start, count, .true.) + nrows = count(1) + ncols = count(2) + nhgts = count(3) + + allocate(wbuf(nrows, ncols, nhgts, NFRAMES)) + allocate(compdof(nrows * ncols * nhgts)) + do f=1,NFRAMES + do k=1,nhgts + do j=1,ncols + do i=1,nrows + wbuf(i,j,k,f) = (start(3) - 1 + k - 1) * (dims(1) * dims(2)) +& + (start(2) - 1 + j - 1) * dims(1) + i + wbuf(i,j,k,f) = wbuf(i,j,k,f) + (f - 1) * (dims(1) * dims(2) * dims(3)) + tmp_idx = (k - 1) * (ncols * nrows) + (j - 1) * nrows + i + compdof(tmp_idx) = wbuf(i,j,k,1) + end do + end do + end do + end do + + call PIO_initdecomp(pio_tf_iosystem_, PIO_TF_DATA_TYPE, dims, compdof, wr_iodesc) + deallocate(compdof) + + ! Set the decomposition for reading data - different from the write decomp + call get_3d_col_decomp_info(pio_tf_world_rank_, pio_tf_world_sz_, dims, start, count, .false.) + nrows = count(1) + ncols = count(2) + nhgts = count(3) + + allocate(rbuf(nrows, ncols, nhgts, NFRAMES)) + allocate(compdof(nrows * ncols * nhgts)) + allocate(exp_val(nrows, ncols, nhgts, NFRAMES)) + + do f=1,NFRAMES + do k=1,nhgts + do j=1,ncols + do i=1,nrows + tmp_idx = (k - 1) * (ncols * nrows) + (j - 1) * nrows + i + compdof(tmp_idx) = (start(3) - 1 + k - 1) * (dims(1) * dims(2)) +& + (start(2) - 1 + j - 1) * dims(1) + i + exp_val(i,j,k,f) = compdof(tmp_idx) + (f - 1) * (dims(1) * dims(2) * dims(3)) + end do + end do + end do + end do + + call PIO_initdecomp(pio_tf_iosystem_, PIO_TF_DATA_TYPE, dims, compdof, rd_iodesc) + deallocate(compdof) + + num_iotypes = 0 + call PIO_TF_Get_nc_iotypes(iotypes, iotype_descs, num_iotypes) + filename = "test_pio_decomp_simple_tests.testfile" + do i=1,num_iotypes + PIO_TF_LOG(0,*) "Testing : PIO_TF_DATA_TYPE : ", iotype_descs(i) + ierr = PIO_createfile(pio_tf_iosystem_, pio_file, iotypes(i), filename, PIO_CLOBBER) + PIO_TF_CHECK_ERR(ierr, "Could not create file " // trim(filename)) + + ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_row', dims(1), pio_dims(1)) + PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename)) + + ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_col', dims(2), pio_dims(2)) + PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename)) + + ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_hgt', dims(3), pio_dims(3)) + PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename)) + + ierr = PIO_def_dim(pio_file, 'PIO_TF_test_dim_time_limited', NFRAMES, pio_dims(4)) + PIO_TF_CHECK_ERR(ierr, "Failed to define a dim : " // trim(filename)) + + ierr = PIO_def_var(pio_file, 'PIO_TF_test_var', PIO_TF_DATA_TYPE, pio_dims, pio_var) + PIO_TF_CHECK_ERR(ierr, "Failed to define a var : " // trim(filename)) + + ierr = PIO_enddef(pio_file) + PIO_TF_CHECK_ERR(ierr, "Failed to end redef mode : " // trim(filename)) + + do f=1,NFRAMES + call PIO_setframe(pio_file, pio_var, f) + ! Write the current frame + call PIO_write_darray(pio_file, pio_var, wr_iodesc, wbuf(:,:,:,f), ierr) + PIO_TF_CHECK_ERR(ierr, "Failed to write darray : " // trim(filename)) + end do + + call PIO_syncfile(pio_file) + + do f=1,NFRAMES + call PIO_setframe(pio_file, pio_var, f) + call PIO_read_darray(pio_file, pio_var, rd_iodesc, rbuf(:,:,:,f), ierr) + PIO_TF_CHECK_ERR(ierr, "Failed to read darray : " // trim(filename)) + end do + + do f=1,NFRAMES + PIO_TF_CHECK_VAL((rbuf(:,:,:,f), exp_val(:,:,:,f)), "Got wrong val, frame=", f) + end do + + call PIO_closefile(pio_file) + + call PIO_deletefile(pio_tf_iosystem_, filename); + end do + + if(allocated(iotypes)) then + deallocate(iotypes) + deallocate(iotype_descs) + end if + + call PIO_freedecomp(pio_tf_iosystem_, rd_iodesc) + call PIO_freedecomp(pio_tf_iosystem_, wr_iodesc) + deallocate(exp_val) + deallocate(rbuf) + deallocate(wbuf) +PIO_TF_AUTO_TEST_SUB_END nc_test_limited_time_dim diff --git a/src/externals/pio2/tests/general/util/pio_tutil.F90 b/src/externals/pio2/tests/general/util/pio_tutil.F90 index e4a076f3a10..43c0b634b4a 100644 --- a/src/externals/pio2/tests/general/util/pio_tutil.F90 +++ b/src/externals/pio2/tests/general/util/pio_tutil.F90 @@ -129,7 +129,7 @@ SUBROUTINE PIO_TF_Init_(rearr) - pio_tf_log_level_ = 3 + pio_tf_log_level_ = 0 pio_tf_num_aggregators_ = 0 pio_tf_num_io_tasks_ = 0 pio_tf_stride_ = 1 diff --git a/tools/cprnc/summarize_cprnc_diffs b/tools/cprnc/summarize_cprnc_diffs index 8adaf87b75f..84933192cc6 100755 --- a/tools/cprnc/summarize_cprnc_diffs +++ b/tools/cprnc/summarize_cprnc_diffs @@ -175,7 +175,7 @@ sub process_cprnc_output { } # foreach test_dir if ($num_files == 0) { - die "ERROR: no base.cprnc.out files found\n"; + die "ERROR: no cprnc.out files found\n"; } return \%diffs; diff --git a/tools/load_balancing_tool/README b/tools/load_balancing_tool/README index 78347c31e5a..c9f8815cab8 100644 --- a/tools/load_balancing_tool/README +++ b/tools/load_balancing_tool/README @@ -1,44 +1,13 @@ ####################################################################### ## -## CESM Load Balancing Tool +## CIME Load Balancing Tool ## -## Developed by Sheri Mickelson mickelso@ucar.edu +## Originally Developed by Sheri Mickelson mickelso@ucar.edu ## Yuri Alekseev (ALCF/Argonne National Laboratory ## +## Updated 2017 Jason Sarich sarich@mcs.anl.gov (Argonne National Laboratory) ######################################################################## -To Run: - -Edit global_variables.csh - >In the "Set case variables" section, set each env variable to correctly - describe the setup you're interested in. - - >In the "Set the location of the load balancing results" section, set - "results_dir" to where you would like the scaling plots and load balancing - results be produced. - - >In the "Set the test layouts to produce the scaling curves" section, set the - thread count to be used by all components. You will also need to set the - pe counts/root locations for the scaling tests that will be ran by the - "run_first.csh" script. - - >In the last section, you'll want to set the pes counts that you'd like to find - a balanced layout for. This can be a single value or mult. values. - - -After the global_variabes.csh file is setup, run the "run_first.csh" script. This script -will setup the scaling test runs for you, build them, and submit them to the queue. - -Once all of your runs have ran successfully, run the "run_second.csh" script. This script -will copy the timing files over to the "results_dir" you set in the global_variables.csh -script. They will be analyzed and the scaling curves will be plotted for reference. It -will give you a text file with the load balanced layout and create an evn_mach_pes.xml file -you can use. These files will have the "target_tasks" pe counts appended to the name and -a set will be produced for each TARGET_TASK you listed. - -It is very important that you look at the scaling curves that are plotted out. You want to make sure -all curves are smooth. Sometimes machine variability can cause one of the scaling runs to run -longer than expected. If this happens, the solver will not give you a balanced result -because the curve is incorrect. If the curve does not look correct, re-run the faulty CESM run(s) manually -and then re-run "run_second.csh" once the run(s) complete. +This Load Balancing tool finds reasonable PE layouts for CIME-driven models. It will find these from timing files you provide or from runs done by the tool. +For more information, see the help message or complete documentation in the Miscellaneous Tools section of the CIME user's guide. diff --git a/tools/load_balancing_tool/code/cesm_scaling.gplot b/tools/load_balancing_tool/code/cesm_scaling.gplot deleted file mode 100644 index c19b6b634fa..00000000000 --- a/tools/load_balancing_tool/code/cesm_scaling.gplot +++ /dev/null @@ -1,38 +0,0 @@ -set term gif - -set xlabel "Cores" - -set ylabel "simulated years/day" -set output 'tp.gif' -set title "Model Throughput" -plot 'tp.dat' using 1:2 with linespoints lc rgb "red" lw 2 - -set ylabel "pe-hrs/simulated year" -set output 'cost.gif' -set title "Model Cost" -plot 'cost.dat' using 1:2 with linespoints lc rgb "red" lw 2 - -set ylabel "runtime (seconds)" -set output 'atm.gif' -set title "CAM" -plot 'ATM.dat' using 1:2 with linespoints lc rgb "red" lw 2 - -set title "POP2" -set output 'ocn.gif' -plot 'OCN.dat' using 1:2 with linespoints lc rgb "red" lw 2 - -set title "CPL" -set output 'cpl.gif' -plot 'CPL.dat' using 1:2 with linespoints lc rgb "red" lw 2 - -set title "CICE" -set output 'ice.gif' -plot 'ICE.dat' using 1:2 with linespoints lc rgb "red" lw 2 - -set title "CLM" -set output 'lnd.gif' -plot 'LND.dat' using 1:2 with linespoints lc rgb "red" lw 2 - -set title "ROF" -set output 'rof.gif' -plot 'ROF.dat' using 1:2 with linespoints lc rgb "red" lw 2 diff --git a/tools/load_balancing_tool/code/create_dataFile.pl b/tools/load_balancing_tool/code/create_dataFile.pl deleted file mode 100755 index f41b8edf4c7..00000000000 --- a/tools/load_balancing_tool/code/create_dataFile.pl +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/env perl -use strict; - -my $timingDir = $ARGV[0]; -my $CPUS = $ARGV[1]; -my $current = $ARGV[2]; -my $codeDir = "$current/code/"; - -opendir(D,$timingDir); -my @files = grep /_timing/, readdir(D); -closedir(D); -my $component; -foreach my $file (@files){ - my $full_fn = $timingDir . $file; - open(F,"$full_fn") or die "could not open $full_fn"; - my $tasks; - my $threads; - my $tp; - my $mc; - my $pes; - foreach(){ - if(/(\w+) = (\w+)\s+\d+\s+\d+\s+(\d+)\s+x\s+(\d+)/){ - my $comp = $1; - $comp =~ tr/a-z/A-Z/; - $tasks->{$comp}=$3; - $threads->{$comp}=1; - } - if(/(\w+) Run Time:\s+(\d+\.\d+) seconds \s+(\d+\.\d+) seconds/){ - my $comp = $1; - next if ($comp eq 'TOT' or $comp eq 'GLC'); - $component->{$comp}{$tasks->{$comp}}{$threads->{$comp}} = $3; - } - } - close(F); -} - - -open(ATM_FILE, ") { - $Tcount++; -} - -my $x = 1; -my $TcountS; -while ($x <= $Tcount){ - $TcountS = $TcountS." ".$x; - $x++; -} - -open(S,">all.dat"); -print S "Tasks: \n"; - -my $dataFile = "$codeDir/model.data"; - -open(I, ">model.data"); - -print I "data;\n\n"; -print I "param D := $Tcount;\n"; -print I "param CPUS := $CPUS;\n"; -print I "param CPN := 16;\n"; -print I "param Tsync := 3.0;\n"; -print I "param Etarget := 0.5;\n"; -print I "param MinNodes := 64;\n"; -print I "param MaxNodes := 48160;\n"; -print I " \n"; -print I "param rawx: $TcountS :=\n"; - -foreach my $comp (keys %$component){ - print S "$comp "; - if ($comp eq 'LND' or $comp eq 'ICE' or $comp eq 'ATM' or $comp eq 'OCN'){ - my $compLC = lc $comp; - print I " \'$compLC\' "; - } - foreach my $tasks (sort numerically keys %{$component->{$comp}}){ - my $nodes = $tasks; - print S "$nodes "; - if ($comp eq 'LND' or $comp eq 'ICE' or $comp eq 'ATM' or $comp eq 'OCN'){ - print I " $nodes"; - } - } - print S "\n"; - if ($comp eq 'LND' or $comp eq 'ICE' or $comp eq 'ATM' or $comp eq 'OCN'){ - print I "\n"; - } -} - -print I ";\n"; -print I "\n"; -print I "param rawy: $TcountS :=\n"; - -print S "Timings: \n"; -foreach my $comp (keys %$component){ - print S "$comp "; - if ($comp eq 'LND' or $comp eq 'ICE' or $comp eq 'ATM' or $comp eq 'OCN'){ - my $compLC = lc $comp; - print I " \'$compLC\' "; - } - foreach my $tasks (sort numerically keys %{$component->{$comp}}){ - my $nodes = $tasks; - print S "$component->{$comp}{$tasks}{1} "; - if ($comp eq 'LND' or $comp eq 'ICE' or $comp eq 'ATM' or $comp eq 'OCN'){ - print I " $component->{$comp}{$tasks}{1}"; - } - } - print S "\n"; - if ($comp eq 'LND' or $comp eq 'ICE' or $comp eq 'ATM' or $comp eq 'OCN'){ - print I "\n"; - } -} -close(S); - -print I ";\n"; -print I "\n"; - -close(I); - -sub numerically{ $a <=> $b; } diff --git a/tools/load_balancing_tool/code/f02_peList.txt b/tools/load_balancing_tool/code/f02_peList.txt deleted file mode 100644 index 4deda42c8e7..00000000000 --- a/tools/load_balancing_tool/code/f02_peList.txt +++ /dev/null @@ -1,2946 +0,0 @@ -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12 -13 -14 -15 -16 -17 -18 -19 -20 -21 -22 -23 -24 -25 -26 -27 -28 -29 -30 -31 -32 -33 -34 -35 -36 -37 -38 -39 -40 -41 -42 -43 -44 -45 -46 -47 -48 -49 -50 -51 -52 -53 -54 -55 -56 -57 -58 -59 -60 -61 -62 -63 -64 -65 -66 -67 -68 -69 -70 -71 -72 -73 -74 -75 -76 -77 -78 -79 -80 -81 -82 -83 -84 -85 -86 -87 -88 -89 -90 -91 -92 -93 -94 -95 -96 -97 -98 -99 -100 -101 -102 -103 -104 -105 -106 -107 -108 -109 -110 -111 -112 -113 -114 -115 -116 -117 -118 -119 -120 -121 -122 -123 -124 -125 -126 -127 -128 -129 -130 -131 -132 -133 -134 -135 -136 -137 -138 -139 -140 -141 -142 -143 -144 -145 -146 -147 -148 -149 -150 -151 -152 -153 -154 -155 -156 -157 -158 -159 -160 -161 -162 -163 -164 -165 -166 -167 -168 -169 -170 -171 -172 -173 -174 -175 -176 -177 -178 -179 -180 -181 -182 -183 -184 -185 -186 -187 -188 -189 -190 -191 -192 -193 -194 -195 -196 -197 -198 -199 -200 -201 -202 -203 -204 -205 -206 -207 -208 -209 -210 -211 -212 -213 -214 -215 -216 -217 -218 -219 -220 -221 -222 -223 -224 -225 -226 -227 -228 -229 -230 -231 -232 -233 -234 -235 -236 -237 -238 -239 -240 -241 -242 -243 -244 -245 -246 -247 -248 -249 -250 -251 -252 -253 -254 -255 -256 -258 -259 -260 -261 -262 -264 -265 -266 -267 -268 -270 -272 -273 -274 -275 -276 -278 -279 -280 -282 -284 -285 -286 -287 -288 -289 -290 -291 -292 -294 -295 -296 -297 -298 -299 -300 -301 -302 -303 -304 -305 -306 -308 -309 -310 -312 -314 -315 -316 -318 -319 -320 -321 -322 -323 -324 -325 -326 -327 -328 -329 -330 -332 -333 -334 -335 -336 -338 -339 -340 -341 -342 -343 -344 -345 -346 -348 -350 -351 -352 -354 -355 -356 -357 -358 -360 -361 -362 -363 -364 -365 -366 -368 -369 -370 -371 -372 -374 -375 -376 -377 -378 -380 -381 -382 -384 -385 -386 -387 -388 -390 -391 -392 -393 -394 -395 -396 -398 -399 -400 -402 -403 -404 -405 -406 -407 -408 -410 -411 -412 -413 -414 -415 -416 -417 -418 -420 -422 -423 -424 -425 -426 -427 -428 -429 -430 -432 -434 -435 -436 -437 -438 -440 -441 -442 -444 -445 -446 -447 -448 -450 -451 -452 -453 -454 -455 -456 -458 -459 -460 -462 -464 -465 -466 -468 -469 -470 -471 -472 -473 -474 -475 -476 -477 -478 -480 -481 -482 -483 -484 -485 -486 -488 -489 -490 -492 -493 -494 -495 -496 -497 -498 -500 -501 -502 -504 -505 -506 -507 -508 -510 -511 -512 -513 -515 -516 -517 -518 -519 -520 -522 -524 -525 -527 -528 -529 -530 -531 -532 -533 -534 -535 -536 -537 -539 -540 -543 -544 -545 -546 -548 -549 -550 -551 -552 -553 -555 -556 -558 -559 -560 -561 -564 -565 -567 -568 -570 -572 -573 -574 -575 -576 -578 -579 -580 -581 -582 -583 -584 -585 -588 -589 -590 -591 -592 -594 -595 -596 -597 -598 -600 -602 -603 -604 -605 -606 -608 -609 -610 -611 -612 -615 -616 -618 -620 -621 -623 -624 -625 -627 -628 -629 -630 -632 -633 -635 -636 -637 -638 -639 -640 -642 -644 -645 -646 -648 -649 -650 -651 -652 -654 -655 -656 -657 -658 -660 -663 -664 -665 -666 -667 -668 -669 -670 -671 -672 -675 -676 -678 -679 -680 -681 -682 -684 -685 -686 -687 -688 -689 -690 -692 -693 -695 -696 -697 -699 -700 -702 -703 -704 -705 -707 -708 -710 -711 -712 -713 -714 -715 -716 -717 -720 -721 -722 -723 -724 -725 -726 -728 -729 -730 -731 -732 -735 -736 -737 -738 -740 -741 -742 -744 -745 -747 -748 -749 -750 -752 -753 -754 -755 -756 -759 -760 -762 -763 -764 -765 -767 -768 -770 -772 -774 -775 -776 -777 -779 -780 -781 -782 -783 -784 -785 -786 -788 -790 -791 -792 -793 -795 -796 -798 -799 -800 -801 -803 -804 -805 -806 -808 -810 -812 -814 -815 -816 -817 -819 -820 -822 -824 -825 -826 -828 -830 -832 -833 -834 -835 -836 -837 -840 -844 -845 -846 -847 -848 -850 -851 -852 -854 -855 -856 -858 -860 -861 -864 -865 -867 -868 -869 -870 -871 -872 -873 -874 -875 -876 -880 -882 -884 -885 -888 -889 -890 -891 -892 -893 -894 -895 -896 -897 -900 -901 -902 -903 -904 -905 -906 -908 -909 -910 -912 -913 -915 -916 -917 -918 -920 -923 -924 -925 -927 -928 -930 -931 -932 -935 -936 -938 -940 -942 -943 -944 -945 -946 -948 -949 -950 -952 -954 -955 -956 -957 -959 -960 -962 -963 -964 -965 -966 -968 -969 -970 -972 -973 -975 -976 -978 -979 -980 -981 -984 -985 -986 -987 -988 -989 -990 -992 -994 -995 -996 -999 -1000 -1001 -1002 -1003 -1004 -1005 -1007 -1008 -1010 -1012 -1014 -1015 -1016 -1017 -1020 -1022 -1023 -1024 -1025 -1026 -1027 -1029 -1030 -1032 -1034 -1035 -1036 -1037 -1038 -1040 -1043 -1044 -1045 -1048 -1050 -1053 -1054 -1055 -1056 -1057 -1058 -1060 -1062 -1064 -1065 -1066 -1067 -1068 -1070 -1071 -1072 -1074 -1075 -1078 -1079 -1080 -1081 -1083 -1085 -1086 -1088 -1089 -1090 -1092 -1095 -1096 -1098 -1099 -1100 -1102 -1104 -1105 -1106 -1107 -1110 -1111 -1112 -1113 -1115 -1116 -1118 -1120 -1121 -1122 -1125 -1127 -1128 -1130 -1131 -1133 -1134 -1135 -1136 -1139 -1140 -1141 -1143 -1144 -1145 -1146 -1148 -1150 -1152 -1155 -1156 -1157 -1158 -1159 -1160 -1161 -1162 -1164 -1165 -1166 -1168 -1169 -1170 -1173 -1175 -1176 -1177 -1178 -1179 -1180 -1182 -1183 -1184 -1185 -1188 -1190 -1192 -1194 -1195 -1196 -1197 -1199 -1200 -1204 -1205 -1206 -1207 -1208 -1209 -1210 -1211 -1212 -1215 -1216 -1218 -1219 -1220 -1221 -1222 -1224 -1225 -1230 -1232 -1233 -1235 -1236 -1239 -1240 -1241 -1242 -1243 -1245 -1246 -1248 -1250 -1251 -1253 -1254 -1255 -1256 -1258 -1260 -1261 -1264 -1265 -1266 -1267 -1269 -1270 -1272 -1273 -1274 -1275 -1276 -1278 -1280 -1281 -1284 -1287 -1288 -1290 -1292 -1295 -1296 -1298 -1300 -1302 -1304 -1305 -1308 -1309 -1310 -1311 -1312 -1313 -1314 -1316 -1320 -1323 -1325 -1326 -1328 -1330 -1331 -1332 -1334 -1335 -1336 -1337 -1338 -1339 -1340 -1341 -1342 -1343 -1344 -1349 -1350 -1351 -1352 -1353 -1356 -1357 -1358 -1359 -1360 -1362 -1364 -1365 -1368 -1370 -1372 -1374 -1375 -1376 -1377 -1378 -1379 -1380 -1384 -1386 -1387 -1390 -1391 -1392 -1393 -1394 -1395 -1397 -1398 -1400 -1403 -1404 -1406 -1407 -1408 -1410 -1411 -1413 -1414 -1416 -1417 -1419 -1420 -1421 -1422 -1424 -1425 -1426 -1428 -1430 -1431 -1432 -1434 -1435 -1440 -1441 -1442 -1443 -1444 -1445 -1446 -1448 -1449 -1450 -1452 -1455 -1456 -1458 -1460 -1462 -1463 -1464 -1467 -1469 -1470 -1472 -1474 -1475 -1476 -1477 -1479 -1480 -1482 -1484 -1485 -1488 -1490 -1491 -1494 -1495 -1496 -1498 -1500 -1501 -1503 -1504 -1505 -1506 -1507 -1508 -1510 -1512 -1513 -1515 -1518 -1519 -1520 -1521 -1524 -1525 -1526 -1528 -1529 -1530 -1533 -1534 -1536 -1539 -1540 -1541 -1544 -1545 -1547 -1548 -1550 -1551 -1552 -1554 -1557 -1558 -1560 -1561 -1562 -1564 -1566 -1568 -1570 -1572 -1573 -1575 -1576 -1577 -1580 -1581 -1582 -1584 -1586 -1587 -1589 -1590 -1592 -1593 -1595 -1596 -1598 -1599 -1600 -1602 -1603 -1605 -1606 -1608 -1610 -1611 -1612 -1615 -1616 -1617 -1620 -1624 -1625 -1628 -1629 -1630 -1631 -1632 -1633 -1634 -1635 -1638 -1639 -1640 -1644 -1645 -1647 -1648 -1649 -1650 -1651 -1652 -1653 -1656 -1659 -1660 -1661 -1664 -1665 -1666 -1668 -1670 -1672 -1673 -1674 -1675 -1677 -1679 -1680 -1683 -1687 -1688 -1690 -1691 -1692 -1694 -1695 -1696 -1700 -1701 -1702 -1703 -1704 -1705 -1708 -1710 -1712 -1715 -1716 -1717 -1719 -1720 -1722 -1725 -1727 -1728 -1729 -1730 -1734 -1736 -1737 -1738 -1740 -1742 -1743 -1744 -1746 -1748 -1749 -1750 -1751 -1752 -1755 -1757 -1760 -1764 -1767 -1768 -1770 -1771 -1773 -1775 -1776 -1778 -1780 -1781 -1782 -1784 -1785 -1786 -1788 -1790 -1791 -1792 -1793 -1794 -1800 -1802 -1804 -1805 -1806 -1807 -1808 -1809 -1810 -1812 -1815 -1816 -1817 -1818 -1819 -1820 -1824 -1825 -1826 -1827 -1830 -1832 -1833 -1834 -1836 -1837 -1840 -1843 -1845 -1846 -1848 -1850 -1853 -1854 -1856 -1859 -1860 -1862 -1863 -1864 -1869 -1870 -1872 -1875 -1876 -1880 -1881 -1884 -1885 -1886 -1887 -1888 -1890 -1892 -1896 -1898 -1899 -1900 -1903 -1904 -1905 -1908 -1909 -1910 -1911 -1912 -1914 -1917 -1918 -1919 -1920 -1921 -1924 -1925 -1926 -1928 -1930 -1932 -1935 -1936 -1937 -1938 -1940 -1944 -1946 -1947 -1950 -1952 -1953 -1955 -1956 -1957 -1958 -1960 -1962 -1963 -1965 -1968 -1969 -1970 -1971 -1972 -1974 -1975 -1976 -1978 -1980 -1984 -1988 -1989 -1990 -1991 -1992 -1995 -1998 -2000 -2001 -2002 -2004 -2006 -2007 -2008 -2010 -2013 -2014 -2015 -2016 -2020 -2023 -2024 -2025 -2028 -2030 -2032 -2033 -2034 -2035 -2037 -2040 -2041 -2043 -2044 -2046 -2047 -2048 -2050 -2052 -2054 -2055 -2057 -2058 -2060 -2061 -2064 -2067 -2068 -2070 -2071 -2072 -2074 -2075 -2076 -2079 -2080 -2085 -2086 -2088 -2090 -2091 -2093 -2096 -2097 -2100 -2101 -2106 -2108 -2109 -2110 -2112 -2114 -2115 -2116 -2119 -2120 -2121 -2123 -2124 -2125 -2128 -2130 -2132 -2133 -2134 -2136 -2139 -2140 -2142 -2144 -2145 -2147 -2148 -2150 -2151 -2156 -2158 -2159 -2160 -2162 -2163 -2166 -2167 -2169 -2170 -2171 -2172 -2175 -2176 -2178 -2180 -2184 -2185 -2187 -2189 -2190 -2192 -2193 -2196 -2197 -2198 -2200 -2204 -2205 -2208 -2210 -2211 -2212 -2214 -2220 -2222 -2223 -2224 -2225 -2226 -2227 -2230 -2231 -2232 -2233 -2235 -2236 -2240 -2241 -2242 -2244 -2247 -2249 -2250 -2254 -2255 -2256 -2259 -2260 -2261 -2262 -2265 -2266 -2268 -2270 -2272 -2275 -2277 -2278 -2280 -2282 -2286 -2288 -2289 -2290 -2292 -2295 -2296 -2299 -2300 -2301 -2304 -2310 -2312 -2314 -2316 -2318 -2320 -2321 -2322 -2323 -2324 -2325 -2327 -2328 -2329 -2330 -2331 -2332 -2336 -2337 -2338 -2340 -2343 -2346 -2350 -2352 -2353 -2354 -2355 -2356 -2358 -2360 -2363 -2364 -2365 -2366 -2368 -2369 -2370 -2373 -2375 -2376 -2379 -2380 -2384 -2385 -2387 -2388 -2390 -2392 -2394 -2397 -2398 -2400 -2405 -2408 -2409 -2410 -2412 -2413 -2414 -2415 -2416 -2418 -2420 -2422 -2424 -2425 -2430 -2431 -2432 -2436 -2438 -2440 -2442 -2444 -2445 -2448 -2450 -2451 -2453 -2457 -2460 -2461 -2464 -2465 -2466 -2470 -2472 -2475 -2478 -2480 -2482 -2483 -2484 -2486 -2489 -2490 -2492 -2496 -2497 -2499 -2500 -2502 -2505 -2506 -2507 -2508 -2509 -2510 -2512 -2516 -2519 -2520 -2522 -2525 -2527 -2528 -2530 -2532 -2533 -2534 -2535 -2538 -2540 -2541 -2544 -2546 -2548 -2550 -2552 -2553 -2556 -2560 -2561 -2562 -2563 -2565 -2567 -2568 -2574 -2575 -2576 -2580 -2583 -2584 -2585 -2587 -2590 -2592 -2595 -2596 -2599 -2600 -2601 -2603 -2604 -2607 -2608 -2610 -2613 -2616 -2618 -2620 -2622 -2624 -2625 -2626 -2628 -2629 -2632 -2635 -2639 -2640 -2641 -2645 -2646 -2650 -2651 -2652 -2655 -2656 -2660 -2662 -2664 -2665 -2667 -2668 -2669 -2670 -2672 -2673 -2674 -2675 -2676 -2678 -2679 -2680 -2682 -2684 -2685 -2686 -2688 -2691 -2695 -2698 -2700 -2702 -2703 -2704 -2706 -2709 -2712 -2714 -2715 -2716 -2717 -2718 -2720 -2724 -2725 -2728 -2730 -2736 -2737 -2739 -2740 -2743 -2744 -2745 -2748 -2750 -2751 -2752 -2754 -2755 -2756 -2758 -2760 -2761 -2768 -2769 -2771 -2772 -2774 -2775 -2780 -2782 -2783 -2784 -2786 -2788 -2790 -2793 -2794 -2795 -2796 -2800 -2805 -2806 -2808 -2812 -2814 -2816 -2820 -2821 -2822 -2825 -2826 -2828 -2829 -2831 -2832 -2834 -2835 -2838 -2839 -2840 -2842 -2844 -2847 -2848 -2850 -2852 -2856 -2860 -2862 -2864 -2865 -2868 -2869 -2870 -2873 -2875 -2877 -2880 -2882 -2884 -2886 -2888 -2890 -2892 -2895 -2896 -2898 -2899 -2900 -2904 -2907 -2910 -2912 -2916 -2919 -2920 -2921 -2924 -2925 -2926 -2928 -2934 -2938 -2940 -2941 -2944 -2945 -2948 -2950 -2951 -2952 -2954 -2955 -2958 -2960 -2961 -2964 -2967 -2968 -2970 -2975 -2976 -2977 -2980 -2982 -2983 -2985 -2988 -2990 -2992 -2996 -3000 -3002 -3003 -3006 -3008 -3009 -3010 -3012 -3013 -3014 -3015 -3016 -3020 -3021 -3024 -3025 -3026 -3029 -3030 -3036 -3038 -3040 -3042 -3043 -3045 -3048 -3050 -3052 -3055 -3056 -3058 -3059 -3060 -3066 -3068 -3072 -3075 -3077 -3078 -3080 -3081 -3082 -3087 -3088 -3090 -3094 -3096 -3097 -3100 -3102 -3104 -3105 -3107 -3108 -3111 -3114 -3116 -3120 -3122 -3124 -3125 -3128 -3129 -3132 -3133 -3135 -3136 -3140 -3144 -3145 -3146 -3150 -3151 -3152 -3154 -3159 -3160 -3162 -3164 -3165 -3168 -3171 -3172 -3173 -3174 -3175 -3178 -3179 -3180 -3184 -3185 -3186 -3190 -3192 -3195 -3196 -3197 -3198 -3200 -3204 -3206 -3210 -3211 -3212 -3213 -3216 -3220 -3222 -3224 -3225 -3230 -3232 -3234 -3237 -3240 -3243 -3247 -3248 -3249 -3250 -3255 -3256 -3258 -3260 -3262 -3263 -3264 -3266 -3268 -3270 -3275 -3276 -3278 -3280 -3281 -3285 -3287 -3288 -3289 -3290 -3294 -3296 -3297 -3298 -3300 -3302 -3304 -3306 -3312 -3315 -3318 -3320 -3322 -3325 -3328 -3330 -3332 -3335 -3336 -3339 -3340 -3344 -3345 -3346 -3348 -3349 -3350 -3354 -3358 -3360 -3363 -3366 -3374 -3375 -3376 -3380 -3381 -3382 -3383 -3384 -3388 -3390 -3392 -3400 -3401 -3402 -3404 -3405 -3406 -3408 -3410 -3416 -3417 -3420 -3423 -3424 -3425 -3427 -3430 -3432 -3434 -3435 -3438 -3439 -3440 -3444 -3450 -3451 -3454 -3456 -3458 -3460 -3465 -3468 -3472 -3473 -3474 -3475 -3476 -3477 -3480 -3484 -3485 -3486 -3488 -3492 -3495 -3496 -3498 -3500 -3502 -3504 -3507 -3510 -3514 -3515 -3519 -3520 -3525 -3528 -3534 -3536 -3540 -3542 -3546 -3549 -3550 -3552 -3553 -3555 -3556 -3560 -3562 -3564 -3565 -3568 -3570 -3572 -3575 -3576 -3580 -3582 -3584 -3585 -3586 -3587 -3588 -3591 -3600 -3604 -3608 -3610 -3611 -3612 -3614 -3615 -3616 -3618 -3620 -3621 -3624 -3625 -3629 -3630 -3632 -3633 -3634 -3636 -3638 -3640 -3645 -3648 -3650 -3652 -3654 -3655 -3657 -3660 -3664 -3666 -3667 -3672 -3674 -3675 -3680 -3686 -3689 -3690 -3692 -3696 -3700 -3703 -3705 -3706 -3708 -3712 -3717 -3718 -3720 -3723 -3724 -3725 -3726 -3728 -3735 -3738 -3740 -3743 -3744 -3749 -3750 -3757 -3759 -3760 -3762 -3765 -3768 -3770 -3772 -3774 -3775 -3776 -3780 -3781 -3784 -3791 -3792 -3795 -3796 -3798 -3800 -3801 -3806 -3808 -3810 -3816 -3818 -3819 -3820 -3822 -3824 -3825 -3828 -3834 -3838 -3840 -3841 -3842 -3843 -3848 -3850 -3852 -3856 -3857 -3859 -3860 -3864 -3870 -3872 -3874 -3875 -3876 -3880 -3885 -3887 -3888 -3893 -3894 -3895 -3900 -3904 -3906 -3910 -3912 -3914 -3916 -3920 -3924 -3925 -3926 -3927 -3933 -3936 -3938 -3940 -3942 -3944 -3948 -3950 -3952 -3956 -3960 -3961 -3968 -3969 -3971 -3975 -3978 -3979 -3980 -3982 -3984 -3990 -3995 -3996 -4000 -4002 -4004 -4008 -4009 -4011 -4012 -4014 -4016 -4020 -4025 -4026 -4028 -4029 -4030 -4032 -4040 -4046 -4047 -4048 -4050 -4053 -4056 -4060 -4063 -4064 -4066 -4068 -4070 -4071 -4074 -4075 -4080 -4082 -4085 -4086 -4092 -4094 -4095 -4096 -4097 -4100 -4104 -4108 -4114 -4116 -4117 -4120 -4122 -4123 -4125 -4128 -4131 -4134 -4136 -4137 -4140 -4142 -4148 -4150 -4152 -4158 -4160 -4161 -4163 -4165 -4175 -4176 -4179 -4180 -4182 -4186 -4194 -4199 -4200 -4202 -4209 -4212 -4216 -4218 -4220 -4221 -4224 -4225 -4230 -4232 -4233 -4237 -4238 -4240 -4242 -4246 -4248 -4250 -4255 -4256 -4260 -4263 -4264 -4266 -4267 -4268 -4272 -4275 -4278 -4280 -4284 -4290 -4294 -4296 -4300 -4301 -4302 -4305 -4312 -4313 -4316 -4318 -4320 -4324 -4325 -4326 -4332 -4334 -4335 -4338 -4340 -4342 -4344 -4347 -4350 -4351 -4352 -4356 -4360 -4368 -4370 -4374 -4375 -4378 -4380 -4389 -4392 -4393 -4394 -4400 -4408 -4410 -4416 -4420 -4422 -4425 -4427 -4428 -4431 -4439 -4440 -4444 -4446 -4450 -4452 -4460 -4462 -4464 -4465 -4466 -4472 -4473 -4475 -4480 -4482 -4484 -4485 -4488 -4494 -4498 -4500 -4503 -4508 -4510 -4512 -4515 -4518 -4520 -4522 -4524 -4525 -4531 -4532 -4536 -4540 -4541 -4550 -4554 -4557 -4560 -4572 -4575 -4576 -4577 -4578 -4579 -4580 -4584 -4590 -4598 -4599 -4600 -4602 -4608 -4617 -4620 -4623 -4625 -4628 -4632 -4636 -4640 -4641 -4642 -4646 -4650 -4654 -4655 -4656 -4660 -4662 -4664 -4669 -4674 -4675 -4680 -4683 -4686 -4692 -4693 -4700 -4704 -4706 -4708 -4712 -4715 -4720 -4725 -4728 -4730 -4731 -4732 -4738 -4740 -4746 -4750 -4752 -4758 -4760 -4761 -4767 -4769 -4774 -4775 -4776 -4780 -4784 -4788 -4796 -4800 -4807 -4809 -4810 -4818 -4820 -4824 -4825 -4826 -4830 -4836 -4840 -4845 -4848 -4850 -4851 -4853 -4860 -4862 -4864 -4872 -4875 -4876 -4880 -4884 -4888 -4893 -4896 -4899 -4900 -4906 -4914 -4920 -4922 -4925 -4928 -4935 -4940 -4944 -4945 -4950 -4956 -4960 -4966 -4968 -4972 -4975 -4977 -4980 -4991 -4992 -4994 -4998 -5000 -5014 -5016 -5018 -5019 -5020 -5025 -5037 -5038 -5040 -5044 -5050 -5060 -5061 -5064 -5070 -5075 -5080 -5082 -5083 -5088 -5096 -5100 -5103 -5104 -5106 -5112 -5120 -5122 -5124 -5125 -5126 -5129 -5136 -5145 -5148 -5150 -5152 -5160 -5166 -5170 -5174 -5175 -5184 -5187 -5192 -5198 -5200 -5208 -5214 -5221 -5225 -5226 -5229 -5232 -5236 -5244 -5250 -5252 -5256 -5258 -5267 -5271 -5275 -5278 -5280 -5290 -5292 -5300 -5302 -5304 -5313 -5324 -5325 -5328 -5330 -5334 -5336 -5346 -5350 -5352 -5355 -5356 -5359 -5368 -5375 -5376 -5382 -5390 -5400 -5405 -5408 -5412 -5424 -5425 -5428 -5434 -5448 -5450 -5451 -5456 -5460 -5472 -5474 -5475 -5478 -5486 -5496 -5497 -5500 -5512 -5520 -5522 -5525 -5538 -5543 -5544 -5550 -5564 -5566 -5568 -5575 -5588 -5589 -5590 -5592 -5600 -5610 -5612 -5616 -5625 -5632 -5635 -5640 -5642 -5650 -5658 -5664 -5668 -5675 -5681 -5688 -5694 -5700 -5704 -5712 -5720 -5725 -5727 -5736 -5746 -5750 -5760 -5772 -5773 -5775 -5784 -5796 -5798 -5800 -5808 -5819 -5824 -5825 -5832 -5842 -5850 -5856 -5865 -5875 -5876 -5880 -5888 -5900 -5902 -5904 -5925 -5928 -5950 -5952 -5954 -5975 -5976 -5980 -6000 -6006 -6024 -6025 -6032 -6048 -6050 -6058 -6072 -6075 -6084 -6096 -6100 -6110 -6120 -6125 -6136 -6144 -6150 -6162 -6175 -6188 -6200 -6214 -6225 -6240 -6250 -6266 -6275 -6292 -6300 -6318 -6325 -6344 -6350 -6370 -6375 -6396 -6400 -6422 -6448 -6474 -6500 -6526 -6552 -6578 -6604 -6630 -6656 diff --git a/tools/load_balancing_tool/code/f05_peList.txt b/tools/load_balancing_tool/code/f05_peList.txt deleted file mode 100644 index 9269987c3b9..00000000000 --- a/tools/load_balancing_tool/code/f05_peList.txt +++ /dev/null @@ -1,1367 +0,0 @@ -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12 -13 -14 -15 -16 -17 -18 -19 -20 -21 -22 -23 -24 -25 -26 -27 -28 -29 -30 -31 -32 -33 -34 -35 -36 -37 -38 -39 -40 -41 -42 -43 -44 -45 -46 -47 -48 -49 -50 -51 -52 -53 -54 -55 -56 -57 -58 -59 -60 -61 -62 -63 -64 -65 -66 -67 -68 -69 -70 -71 -72 -73 -74 -75 -76 -77 -78 -79 -80 -81 -82 -83 -84 -85 -86 -87 -88 -89 -90 -91 -92 -93 -94 -95 -96 -97 -98 -99 -100 -101 -102 -103 -104 -105 -106 -107 -108 -109 -110 -111 -112 -113 -114 -115 -116 -117 -118 -119 -120 -121 -122 -123 -124 -125 -126 -128 -129 -130 -132 -133 -134 -135 -136 -138 -140 -141 -142 -143 -144 -145 -146 -147 -148 -150 -152 -153 -154 -155 -156 -158 -159 -160 -161 -162 -164 -165 -166 -168 -169 -170 -171 -172 -174 -175 -176 -177 -178 -180 -182 -183 -184 -185 -186 -187 -188 -189 -190 -192 -194 -195 -196 -198 -200 -201 -202 -203 -204 -205 -206 -207 -208 -209 -210 -212 -213 -214 -215 -216 -217 -218 -219 -220 -221 -222 -224 -225 -226 -228 -230 -231 -232 -234 -235 -236 -237 -238 -240 -242 -243 -244 -245 -246 -247 -248 -249 -250 -252 -253 -255 -256 -258 -259 -260 -261 -264 -265 -266 -267 -268 -270 -272 -273 -275 -276 -279 -280 -282 -284 -285 -286 -287 -288 -289 -290 -291 -292 -294 -295 -296 -297 -299 -300 -301 -303 -304 -305 -306 -308 -309 -310 -312 -315 -316 -318 -319 -320 -321 -322 -323 -324 -325 -327 -328 -329 -330 -332 -333 -335 -336 -338 -339 -340 -341 -342 -343 -344 -345 -348 -350 -351 -352 -354 -355 -356 -357 -360 -361 -363 -364 -365 -366 -368 -369 -370 -371 -372 -374 -375 -376 -377 -378 -380 -384 -385 -387 -388 -390 -391 -392 -395 -396 -399 -400 -402 -403 -404 -405 -406 -407 -408 -410 -412 -413 -414 -415 -416 -418 -420 -423 -424 -425 -426 -427 -428 -429 -430 -432 -434 -435 -436 -437 -438 -440 -441 -442 -444 -445 -448 -450 -451 -452 -455 -456 -459 -460 -462 -464 -465 -468 -469 -470 -472 -473 -474 -475 -476 -477 -480 -481 -483 -484 -485 -486 -488 -490 -492 -493 -494 -495 -496 -497 -498 -500 -504 -505 -506 -507 -510 -511 -512 -513 -515 -516 -517 -518 -520 -522 -525 -527 -528 -529 -530 -531 -532 -533 -534 -535 -536 -539 -540 -544 -545 -546 -549 -550 -551 -552 -553 -555 -558 -559 -560 -561 -564 -565 -567 -568 -570 -572 -574 -575 -576 -578 -580 -581 -582 -583 -584 -585 -588 -589 -590 -592 -594 -595 -598 -600 -602 -603 -605 -606 -608 -609 -610 -611 -612 -615 -616 -618 -620 -621 -623 -624 -625 -627 -629 -630 -632 -636 -637 -638 -639 -640 -642 -644 -645 -646 -648 -649 -650 -651 -654 -656 -657 -658 -660 -663 -664 -665 -666 -667 -670 -671 -672 -675 -676 -678 -679 -680 -682 -684 -686 -688 -689 -690 -693 -696 -697 -700 -702 -703 -704 -705 -707 -708 -710 -711 -712 -713 -714 -715 -720 -721 -722 -725 -726 -728 -729 -730 -731 -732 -735 -736 -737 -738 -740 -741 -742 -744 -747 -748 -749 -750 -752 -754 -756 -759 -760 -763 -765 -767 -768 -770 -774 -775 -776 -777 -779 -780 -781 -782 -783 -784 -790 -791 -792 -793 -795 -798 -799 -800 -801 -803 -804 -805 -806 -808 -810 -812 -814 -816 -817 -819 -820 -824 -825 -826 -828 -830 -832 -833 -836 -837 -840 -845 -846 -847 -848 -850 -851 -852 -854 -855 -856 -858 -860 -861 -864 -867 -868 -869 -870 -871 -872 -873 -874 -875 -876 -880 -882 -884 -885 -888 -890 -891 -893 -896 -897 -900 -901 -902 -903 -904 -909 -910 -912 -913 -915 -918 -920 -923 -924 -925 -927 -928 -930 -931 -935 -936 -938 -940 -943 -944 -945 -946 -948 -949 -950 -952 -954 -957 -960 -962 -963 -966 -968 -969 -970 -972 -975 -976 -979 -980 -981 -984 -986 -987 -988 -989 -990 -992 -994 -996 -999 -1000 -1001 -1003 -1005 -1007 -1008 -1010 -1012 -1014 -1017 -1020 -1022 -1023 -1024 -1025 -1026 -1027 -1029 -1030 -1032 -1034 -1035 -1036 -1037 -1040 -1044 -1045 -1050 -1053 -1054 -1056 -1058 -1060 -1062 -1064 -1065 -1066 -1067 -1068 -1070 -1071 -1072 -1075 -1078 -1079 -1080 -1081 -1083 -1088 -1089 -1090 -1092 -1095 -1098 -1100 -1102 -1104 -1105 -1106 -1110 -1111 -1113 -1116 -1118 -1120 -1121 -1122 -1125 -1127 -1128 -1130 -1131 -1133 -1134 -1136 -1139 -1140 -1144 -1148 -1150 -1152 -1155 -1156 -1157 -1159 -1160 -1162 -1164 -1166 -1168 -1170 -1173 -1175 -1176 -1177 -1178 -1180 -1183 -1184 -1185 -1188 -1190 -1196 -1197 -1199 -1200 -1204 -1206 -1207 -1209 -1210 -1212 -1215 -1216 -1218 -1219 -1220 -1221 -1222 -1224 -1225 -1230 -1232 -1235 -1236 -1239 -1240 -1241 -1242 -1243 -1245 -1246 -1248 -1250 -1254 -1258 -1260 -1261 -1264 -1265 -1272 -1273 -1274 -1275 -1276 -1278 -1280 -1281 -1284 -1287 -1288 -1290 -1292 -1296 -1298 -1300 -1302 -1305 -1308 -1309 -1311 -1312 -1313 -1314 -1316 -1320 -1323 -1325 -1326 -1328 -1330 -1332 -1334 -1335 -1339 -1340 -1342 -1343 -1344 -1349 -1350 -1352 -1356 -1357 -1358 -1360 -1364 -1365 -1368 -1372 -1375 -1376 -1377 -1378 -1380 -1386 -1387 -1391 -1392 -1394 -1395 -1400 -1403 -1404 -1406 -1407 -1408 -1410 -1411 -1414 -1416 -1417 -1420 -1422 -1424 -1425 -1426 -1428 -1430 -1440 -1442 -1443 -1444 -1445 -1449 -1450 -1452 -1455 -1456 -1458 -1460 -1462 -1463 -1464 -1469 -1470 -1472 -1474 -1475 -1476 -1479 -1480 -1482 -1484 -1485 -1488 -1491 -1494 -1495 -1496 -1498 -1500 -1501 -1504 -1508 -1512 -1513 -1515 -1518 -1520 -1521 -1525 -1526 -1530 -1533 -1534 -1536 -1539 -1540 -1541 -1545 -1547 -1548 -1550 -1552 -1554 -1558 -1560 -1562 -1564 -1566 -1568 -1575 -1577 -1580 -1581 -1582 -1584 -1586 -1587 -1590 -1596 -1598 -1600 -1602 -1605 -1606 -1608 -1610 -1612 -1615 -1616 -1617 -1620 -1624 -1625 -1628 -1632 -1633 -1634 -1635 -1638 -1640 -1648 -1649 -1650 -1652 -1653 -1656 -1659 -1660 -1664 -1665 -1666 -1672 -1674 -1675 -1679 -1680 -1683 -1690 -1691 -1692 -1694 -1695 -1696 -1700 -1701 -1702 -1704 -1710 -1712 -1716 -1717 -1720 -1722 -1725 -1728 -1729 -1734 -1738 -1740 -1742 -1743 -1744 -1746 -1748 -1750 -1751 -1752 -1755 -1760 -1764 -1767 -1768 -1770 -1771 -1775 -1776 -1780 -1782 -1785 -1786 -1792 -1794 -1800 -1802 -1804 -1805 -1806 -1808 -1817 -1818 -1819 -1820 -1824 -1825 -1826 -1827 -1836 -1840 -1843 -1846 -1848 -1850 -1853 -1854 -1856 -1860 -1862 -1863 -1869 -1870 -1872 -1875 -1880 -1881 -1886 -1887 -1888 -1890 -1892 -1896 -1898 -1900 -1904 -1908 -1909 -1911 -1914 -1919 -1920 -1921 -1924 -1925 -1926 -1932 -1936 -1938 -1940 -1944 -1950 -1953 -1955 -1957 -1958 -1960 -1962 -1968 -1972 -1974 -1975 -1976 -1978 -1980 -1989 -1992 -1995 -1998 -2000 -2001 -2002 -2006 -2014 -2016 -2020 -2023 -2024 -2025 -2028 -2033 -2034 -2037 -2040 -2046 -2047 -2050 -2052 -2054 -2058 -2060 -2064 -2068 -2070 -2071 -2075 -2079 -2080 -2088 -2090 -2093 -2100 -2106 -2109 -2112 -2116 -2120 -2121 -2124 -2125 -2128 -2132 -2134 -2136 -2139 -2140 -2142 -2147 -2150 -2156 -2158 -2160 -2162 -2163 -2166 -2175 -2178 -2180 -2184 -2185 -2200 -2204 -2205 -2208 -2210 -2220 -2222 -2223 -2225 -2226 -2231 -2232 -2236 -2240 -2242 -2244 -2247 -2250 -2254 -2256 -2260 -2261 -2262 -2266 -2268 -2275 -2277 -2280 -2288 -2289 -2300 -2304 -2310 -2314 -2320 -2323 -2325 -2328 -2331 -2332 -2340 -2346 -2350 -2352 -2354 -2360 -2366 -2369 -2373 -2375 -2376 -2380 -2392 -2394 -2398 -2400 -2415 -2418 -2420 -2424 -2425 -2436 -2438 -2442 -2444 -2448 -2450 -2457 -2461 -2464 -2470 -2472 -2475 -2478 -2484 -2486 -2496 -2499 -2500 -2507 -2508 -2520 -2522 -2525 -2530 -2544 -2548 -2550 -2552 -2553 -2568 -2574 -2575 -2576 -2592 -2596 -2599 -2600 -2616 -2618 -2622 -2625 -2626 -2640 -2645 -2650 -2652 -2664 -2668 -2675 -2678 -2688 -2691 -2700 -2704 -2712 -2714 -2725 -2730 -2736 -2737 -2750 -2756 -2760 -2775 -2782 -2784 -2800 -2808 -2825 -2832 -2834 -2850 -2856 -2860 -2875 -2880 -2886 -2900 -2912 -2925 -2938 -2950 -2964 -2975 -2990 -3000 -3016 -3042 -3068 -3094 -3120 diff --git a/tools/load_balancing_tool/code/f09_peList.txt b/tools/load_balancing_tool/code/f09_peList.txt deleted file mode 100644 index ba773f02cd4..00000000000 --- a/tools/load_balancing_tool/code/f09_peList.txt +++ /dev/null @@ -1,710 +0,0 @@ -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12 -13 -14 -15 -16 -17 -18 -19 -20 -21 -22 -23 -24 -25 -26 -27 -28 -29 -30 -31 -32 -33 -34 -35 -36 -37 -38 -39 -40 -41 -42 -43 -44 -45 -46 -47 -48 -49 -50 -51 -52 -53 -54 -55 -56 -57 -58 -59 -60 -61 -62 -63 -64 -65 -66 -68 -69 -70 -72 -74 -75 -76 -77 -78 -80 -81 -82 -84 -85 -86 -87 -88 -90 -91 -92 -93 -94 -95 -96 -98 -99 -100 -102 -104 -105 -106 -108 -110 -111 -112 -114 -115 -116 -117 -118 -119 -120 -121 -122 -123 -124 -125 -126 -128 -129 -130 -132 -133 -135 -136 -138 -140 -141 -143 -144 -145 -147 -148 -150 -152 -153 -154 -155 -156 -159 -160 -161 -162 -164 -165 -168 -169 -170 -171 -172 -174 -175 -176 -177 -180 -182 -183 -184 -185 -186 -187 -188 -189 -190 -192 -195 -196 -198 -200 -203 -204 -205 -207 -208 -209 -210 -212 -215 -216 -217 -220 -221 -222 -224 -225 -228 -230 -231 -232 -234 -235 -236 -238 -240 -242 -243 -244 -245 -246 -247 -248 -250 -252 -253 -255 -256 -258 -259 -260 -261 -264 -265 -266 -270 -272 -273 -275 -276 -279 -280 -282 -285 -286 -287 -288 -289 -290 -294 -295 -296 -297 -299 -300 -301 -304 -305 -306 -308 -310 -312 -315 -318 -319 -320 -322 -323 -324 -325 -328 -329 -330 -333 -336 -338 -340 -341 -342 -343 -344 -345 -348 -350 -351 -352 -354 -357 -360 -361 -363 -364 -366 -368 -369 -370 -371 -372 -374 -375 -376 -377 -378 -380 -384 -385 -387 -390 -391 -392 -396 -399 -400 -403 -405 -406 -407 -408 -410 -413 -414 -416 -418 -420 -423 -424 -425 -427 -429 -430 -432 -434 -435 -437 -440 -441 -442 -444 -448 -450 -451 -455 -456 -459 -460 -462 -464 -465 -468 -470 -472 -473 -475 -476 -477 -480 -481 -483 -484 -486 -488 -490 -492 -493 -494 -495 -496 -500 -504 -506 -507 -510 -512 -513 -516 -517 -518 -520 -522 -525 -527 -528 -529 -530 -531 -532 -533 -539 -540 -544 -546 -549 -550 -551 -552 -555 -558 -559 -560 -561 -564 -567 -570 -572 -574 -575 -576 -578 -580 -583 -585 -588 -589 -590 -592 -594 -595 -598 -600 -602 -605 -608 -609 -610 -611 -612 -615 -616 -620 -621 -624 -625 -627 -629 -630 -636 -637 -638 -640 -644 -645 -646 -648 -649 -650 -651 -656 -658 -660 -663 -665 -666 -667 -671 -672 -675 -676 -680 -682 -684 -686 -688 -689 -690 -693 -696 -697 -700 -702 -703 -704 -705 -708 -713 -714 -715 -720 -722 -725 -726 -728 -731 -732 -735 -736 -738 -740 -741 -742 -744 -748 -750 -752 -754 -756 -759 -760 -765 -767 -768 -770 -774 -775 -777 -779 -780 -782 -784 -792 -793 -795 -798 -799 -800 -805 -806 -810 -812 -814 -816 -817 -819 -820 -825 -826 -828 -832 -833 -836 -840 -846 -848 -850 -851 -854 -855 -858 -860 -861 -864 -867 -868 -870 -874 -875 -880 -882 -884 -885 -888 -893 -896 -897 -900 -901 -902 -903 -910 -912 -915 -918 -920 -924 -925 -928 -930 -931 -935 -936 -940 -943 -944 -945 -946 -950 -952 -954 -960 -962 -966 -968 -969 -972 -975 -976 -980 -984 -986 -987 -988 -989 -990 -992 -1000 -1003 -1007 -1008 -1012 -1014 -1020 -1024 -1025 -1026 -1029 -1032 -1034 -1035 -1037 -1040 -1044 -1045 -1050 -1054 -1056 -1058 -1060 -1062 -1064 -1066 -1071 -1075 -1078 -1080 -1081 -1083 -1088 -1092 -1098 -1100 -1102 -1104 -1113 -1116 -1118 -1120 -1121 -1122 -1125 -1127 -1128 -1134 -1140 -1144 -1150 -1152 -1155 -1159 -1160 -1166 -1170 -1173 -1175 -1176 -1178 -1180 -1188 -1196 -1197 -1200 -1210 -1216 -1218 -1219 -1220 -1222 -1224 -1225 -1232 -1239 -1240 -1242 -1248 -1250 -1254 -1260 -1265 -1272 -1274 -1275 -1276 -1280 -1281 -1288 -1296 -1298 -1300 -1302 -1311 -1320 -1323 -1325 -1326 -1334 -1342 -1344 -1350 -1352 -1357 -1364 -1368 -1375 -1378 -1380 -1386 -1392 -1400 -1403 -1404 -1408 -1416 -1425 -1426 -1430 -1440 -1449 -1450 -1456 -1464 -1472 -1475 -1482 -1488 -1500 -1508 -1512 -1525 -1534 -1536 -1550 -1560 -1575 -1586 -1600 -1612 -1638 -1664 diff --git a/tools/load_balancing_tool/code/f19_peList.txt b/tools/load_balancing_tool/code/f19_peList.txt deleted file mode 100644 index 9003645849d..00000000000 --- a/tools/load_balancing_tool/code/f19_peList.txt +++ /dev/null @@ -1,333 +0,0 @@ -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12 -13 -14 -15 -16 -17 -18 -19 -20 -21 -22 -23 -24 -25 -26 -27 -28 -29 -30 -31 -32 -33 -34 -35 -36 -38 -39 -40 -42 -44 -45 -46 -48 -49 -50 -51 -52 -54 -55 -56 -57 -58 -60 -62 -63 -64 -65 -66 -68 -69 -70 -72 -75 -76 -77 -78 -80 -81 -84 -85 -87 -88 -90 -91 -92 -93 -95 -96 -98 -99 -100 -102 -104 -105 -108 -110 -112 -114 -115 -116 -117 -119 -120 -121 -124 -125 -126 -128 -130 -132 -133 -135 -136 -138 -140 -143 -144 -145 -147 -150 -152 -153 -154 -155 -156 -160 -161 -162 -165 -168 -169 -170 -171 -174 -175 -176 -180 -182 -184 -186 -187 -189 -190 -192 -195 -196 -198 -200 -203 -204 -207 -208 -209 -210 -216 -217 -220 -221 -224 -225 -228 -230 -231 -232 -234 -238 -240 -242 -243 -247 -248 -250 -252 -253 -255 -256 -260 -261 -264 -266 -270 -272 -273 -275 -276 -279 -280 -285 -286 -288 -289 -290 -294 -297 -299 -300 -304 -306 -308 -310 -312 -315 -319 -320 -322 -323 -324 -325 -330 -336 -338 -340 -341 -342 -345 -348 -350 -351 -352 -357 -360 -361 -364 -368 -372 -374 -375 -377 -378 -380 -384 -390 -391 -392 -396 -399 -400 -403 -405 -406 -408 -414 -416 -418 -420 -425 -432 -434 -435 -437 -440 -441 -442 -448 -450 -456 -459 -460 -462 -464 -465 -468 -475 -476 -480 -483 -484 -486 -493 -494 -496 -500 -504 -506 -510 -512 -513 -520 -522 -525 -527 -528 -529 -532 -540 -544 -546 -550 -551 -552 -558 -560 -567 -570 -572 -575 -576 -580 -588 -589 -594 -598 -600 -608 -609 -616 -620 -621 -624 -625 -630 -638 -640 -644 -648 -650 -651 -660 -667 -672 -675 -676 -682 -690 -696 -700 -702 -704 -713 -720 -725 -728 -736 -744 -750 -754 -768 -775 -780 -800 -806 -832 diff --git a/tools/load_balancing_tool/code/fv_model.mod b/tools/load_balancing_tool/code/fv_model.mod deleted file mode 100644 index ec056e7c203..00000000000 --- a/tools/load_balancing_tool/code/fv_model.mod +++ /dev/null @@ -1,92 +0,0 @@ -# math model to find optimal allocation of cores for CESM components - -param D >= 1, integer; # number of data points -param CPUS >= 1, integer; # number of nodes -param CPN >=1, integer; # number of cores per node -param Tsync >= 0.0; # time to sync ice and lnd in seconds -param Nlat >= 1, integer; # max lat -param Nlev >= 1, integer; # max lev -param Etarget >= 0.0; # target Efficiency -param MinNodes >=1, integer; # minimum number of nodes -param MaxNodes >=1, integer; # maximum number of nodes -param AtmMaxPart >= 1, integer; # maximum number of Atm partition points - -set M := {'lnd','ice','atm','ocn'}; # set of components -set DD := 1..D; # set of data points - -param rawx{M, DD}; # given by user -param rawy{M, DD}; # given by user - -param x{DD}; # extracted automatically -param y{DD}; # extracted automatically - -param A{M}; # best fit value -param B{M}; # best fit value -param C{M}; # best fit value -param K{M}; # best fit value - -# special ordered set variables -var maxtasks >= 0; -var ntasks >= 0; -var nz >= 0; -var remainder >= 0; -var ny >= 0; -var taskcounter >= 0; -var ntasksrestrict >= 0; - -# ... fitting parameters (bounds and initial values) -var a >= 0; -var b >= 0; -var c >= 0; -var k >= 0; -var eta_m1{M} >= 0; -var eta_m2{M} >= 0; -var eta_m3{M} >= 0; -var etaT >= 0; -var etaTi_m1 >= 0; - -var n{M} >= 1, integer; - -# efficiency var -var xcounter >= 1, integer; -var maxx >= 1, integer; -var fmod_xcounter >= 0; -var fmod_MinNodes >= 0; -var fmod_eff >= 0; - -### special ordered sets -### Ocn -set OcnSet := 1..25; -param OcnPart{OcnSet}; -var z2{OcnSet} binary; -subject to SOS2: 1 = sum{i in OcnSet} z2[i]; -subject to DefNocn: n['ocn'] = sum{i in OcnSet} z2[i]*OcnPart[i]; -### Atm -set AtmSet := 1..AtmMaxPart; -param AtmPart{AtmSet}; -var z1_2{AtmSet} binary; - -# ... objective function is the least-squares error -minimize L2Error: sum{i in DD} (y[i] - a/x[i] - b*x[i]^c - k)^2; - -minimize MaxTime: etaT; - - -### max(max(ice,lnd)+atm,ocn) model -# added suffix m1, meaning model 1, to all constraints -# time constraints -subject to -DefEta_m1{i in M}: eta_m1[i] = A[i]/n[i] + B[i]*(n[i]^C[i]) + K[i]; -DefEtaTi1_m1: etaTi_m1 >= eta_m1['ice']; -DefEtaTi2_m1: etaTi_m1 >= eta_m1['lnd']; -DefEta1_m1: etaT >= etaTi_m1 + eta_m1['atm']; -DefEtaT_m1: etaT >= eta_m1['ocn']; - -# add constraint to force eta[ice] = eta[land] -EqualT1_m1: eta_m1['lnd'] >= eta_m1['ice'] - Tsync; -EqualT2_m1: eta_m1['lnd'] <= eta_m1['ice'] + Tsync; - -# constrain number of nodes -TotalNumber_m1: n['atm'] + n['ocn'] <= CPUS; -IceLndNumber_m1: n['ice'] + n['lnd'] <= n['atm']; - diff --git a/tools/load_balancing_tool/code/fv_model.run b/tools/load_balancing_tool/code/fv_model.run deleted file mode 100644 index 88bcfea43fa..00000000000 --- a/tools/load_balancing_tool/code/fv_model.run +++ /dev/null @@ -1,75 +0,0 @@ -### special ordered sets -### Atm -#let ntasksrestrict := CPUS/2; -#let maxtasks := (Nlat/3)*Nlev; -#let ntasks := 0; -#let taskcounter := 0; -#repeat while ntasks<=maxtasks { -# let ntasks := ntasks+1; -# let nz := 0; -# repeat while nz<=Nlev { -# let nz := nz+1; -# # test that $nz divides $ntasks -# let remainder := ntasks mod nz; -# if remainder == 0.0 then { -# let ny := ntasks/nz; -# # test that y subdomains contain at least 3 latitudes -# if (ny*3 <= Nlat) and (ntasks > ntasksrestrict) then { -# let taskcounter := taskcounter + 1; -# let AtmPart[taskcounter] := ntasks; -# printf "%s %s\n", taskcounter, ntasks; -# break; -# }; -# }; -# }; -#}; -subject to SOS1_2: 1 = sum {i in AtmSet} z1_2[i]; -subject to DefNatm: n['atm'] = sum {i in AtmSet} z1_2[i]*AtmPart[i]; - -#---------- declare SPECIAL ORDERED SET TYPE 1 to solver minlp for better branching -suffix sosno IN, integer, >= 0, <= 9999; -suffix ref IN, integer, >=0, <= 9999; -let {i in AtmSet} z1_2[i].sosno := 1; # ... identifier for SOS-set number 1 -let {i in AtmSet} z1_2[i].ref := AtmPart[i]; -let {i in OcnSet} z2[i].sosno := 2; # ... identifier for SOS-set number 2 -#let {i in OcnSet} z2[i].ref := OcnPart[i]; -#---------- - -problem fitData: L2Error, a, b, c, k; - -for {i in M} { - printf "Solving for component %s\n", i; - for {j in DD} { - let x[j] := rawx[i,j]; - let y[j] := rawy[i,j]; - printf "x = %d y = %f\n", x[j], y[j]; - }; - let a := 1000; - let b := 1; - let c := .01; - let k := 10; - solve fitData; - let A[i] := a; - let B[i] := b; - let C[i] := c; - let K[i] := k; -}; - -printf "%d\n", CPUS; -printf "##%4s %12s %12s %12s %12s\n", "Comp", "A[f]", "B[f]", "C[f]", "K[f]"; -for {i in M} { - printf "%s %12.6f %12.6f %12.6f %12.6f\n", i, A[i], B[i], C[i], K[i]; -} - -printf "\n Solving for core allocations for max(max(ice,lnd)+atm,ocn) model:\n"; -option solver minlp; -problem ice_lnd_ocn: MaxTime, DefEta_m1, DefEtaT_m1, DefEta1_m1, DefEtaTi1_m1, DefEtaTi2_m1, TotalNumber_m1, n, eta_m1, etaT, etaTi_m1, IceLndNumber_m1, EqualT1_m1, EqualT2_m1, z1_2, SOS1_2, DefNatm; -solve ice_lnd_ocn; -display etaT, etaTi_m1, eta_m1; - -printf "##%4s %8s %10s\n", "Comp", "NTASKS", "seconds/model-day"; -for {i in M} { - printf "%s %8.0f %8.0f %10.3f\n", i, n[i], n[i]*CPN, (A[i]/n[i] + B[i]*(n[i]^C[i]) + K[i]); -} -printf "Total esimated total time: %10.3f\n", etaT; - diff --git a/tools/load_balancing_tool/code/fv_second_pass.pl b/tools/load_balancing_tool/code/fv_second_pass.pl deleted file mode 100755 index 7c2cc3154c7..00000000000 --- a/tools/load_balancing_tool/code/fv_second_pass.pl +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env perl - -my $res = $ARGV[0]; -my $target = $ARGV[1]; -my $codeDir = $ARGV[2]; -my $current = $ARGV[3]; -my $rdir = $ARGV[4]; - -my $count_file = $codeDir."/".$res."_peList.txt"; -open my $list_handle, '<', $count_file; -chomp(my @test_list = <$list_handle>); - -$index = 0; -I: foreach my $count (@test_list){ - if ($count >= $target){ - last I; - } else { - $index = $index + 1; - } - } - -if ($test_list[$index] != $target){ - if ($index < 10){ - $lower = 0; - } else { - $lower = $index - 10; - } - $upper = $index + 9; - - my $dataFile = $codeDir."/model.data"; - open(MD, ">>$dataFile"); - - print MD "\n"; - print MD "param AtmPart := \n"; - $counter = 0; - for ($i=$lower; $i<=$upper; ++$i){ - $counter = $counter + 1 if exists $test_list[$i]; - print MD " $counter $test_list[$i] \n" if exists $test_list[$i]; - } - print MD ";\n"; - print MD "\n"; - print MD "param AtmMaxPart := ".$counter.";\n"; - print MD "\n"; - close(MD); - - system("/usr/bin/python $current/code/merge.py $current/code/fv_model.mod $current/code/model.data $current/code/fv_model.run >& $rdir/job.xml \n"); - system("/usr/bin/python $current/code/neos.py $rdir/job.xml >& $rdir/job.out \n"); - system("tail -n 6 $rdir/job.out >& $rdir/minmax_times.txt \n"); -} - diff --git a/tools/load_balancing_tool/code/get_cesm_times.pl b/tools/load_balancing_tool/code/get_cesm_times.pl deleted file mode 100755 index ff966eacabe..00000000000 --- a/tools/load_balancing_tool/code/get_cesm_times.pl +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/env perl -use strict; - -my $timingDir = $ARGV[0]; - -opendir(D,$timingDir); -my @files = grep /_timing/, readdir(D); -closedir(D); -my $component; -open(T,">tp.dat"); -open(C,">cost.dat"); -foreach my $file (@files){ - my $full_fn = $timingDir . $file; - open(F,"$full_fn") or die "could not open $full_fn"; - my $tasks; - my $threads; - my $tp; - my $mc; - my $pes; - foreach(){ - if(/(\w+) = (\w+)\s+\d+\s+\d+\s+(\d+)\s+x\s+(\d+)/){ - my $comp = $1; - $comp =~ tr/a-z/A-Z/; - $tasks->{$comp}=$3; - $threads->{$comp}=1; - } - if(/(\w+) Run Time:\s+(\d+\.\d+) seconds \s+(\d+\.\d+) seconds/){ - my $comp = $1; - next if ($comp eq 'TOT' or $comp eq 'GLC'); - $component->{$comp}{$tasks->{$comp}}{$threads->{$comp}} = $3; - } - if(/ Model Throughput:\s+(\d+\.\d+)/){ - $tp .= $1; - } - if(/Model Cost:\s+(\d+\.\d+)/){ - $mc .= $1; - } - if(/total pes active :\s+(\d+)/){ - $pes .= $1; - } - } - print T "$pes $tp \n" ; - print C "$pes $mc \n" ; - close(F); -} - -close(T); -close(C); - - -foreach my $comp (keys %$component){ - open(F,">$comp.dat"); - print F "Tasks seconds/model-day \n"; - foreach my $tasks (sort numerically keys %{$component->{$comp}}){ - my $nodes = $tasks; - print F "$nodes $component->{$comp}{$tasks}{1} $component->{$comp}{$tasks}{2} \n"; - } - close(F); -} - -sub numerically{ $a <=> $b; } diff --git a/tools/load_balancing_tool/code/load_balance.pl b/tools/load_balancing_tool/code/load_balance.pl deleted file mode 100755 index 2940291e2cd..00000000000 --- a/tools/load_balancing_tool/code/load_balance.pl +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/perl - -$cpus_array = $ARGV[0]; -$rdir = $ARGV[1]; -$current = $ARGV[2]; - -# Copy over the first case's env_mach_pes file to modify -# with new layouts - -#my $case = 't'.$ARGV[3].'1_'.$ARGV[4]; -#my $casebase = $ARGV[5].'_'.$ARGV[6].'_'.$ARGV[7]; -#my $caseDir_1 = $ARGV[8].'/'.$case.'_'.$casebase; - -my $case = $ARGV[3]; -my $casebase = $ARGV[4]; -my $caseDir_1 = $ARGV[5]; -my $fv_constraints = $ARGV[6]; -my $full_res = $ARGV[7]; - -system("cp $caseDir_1/env_mach_pes.xml $rdir \n"); -system("cp $caseDir_1/env_mach_pes.xml . \n"); -system("cp $caseDir_1/env_run.xml . \n"); -system("cp $caseDir_1/env_build.xml . \n"); -system("cp $caseDir_1/env_case.xml . \n"); -system("cp $caseDir_1/env_archive.xml . \n"); -system("cp $caseDir_1/xmlchange $rdir \n"); -system("cp -r $caseDir_1/Tools $rdir \n"); - -# Copy over the timing files from the scaling curve runs -my $test_file = "$rdir/test_list.out"; -open my $list_handle, '<', $test_file; -chomp(my @test_list = <$list_handle>); -foreach my $f (@test_list){ - my $file_string = $f."/timing/"; - if (-e $file_string){ - system("cp $f/timing/*_timing.* $rdir \n"); - } -} - -system("perl $current/code/get_cesm_times.pl $rdir \n"); -@cpus = split(/,/, $cpus_array); -foreach my $c (@cpus){ - system("perl $current/code/create_dataFile.pl $rdir $c $current \n"); - system("mv model.data $current/code/ \n"); - system("/usr/bin/python $current/code/merge.py $current/code/model.mod $current/code/model.data $current/code/model.run >& $rdir/job.xml \n"); - system("/usr/bin/python $current/code/neos.py $rdir/job.xml >& $rdir/job.out \n"); - system("tail -n 6 $rdir/job.out >& $rdir/minmax_times.txt \n"); - - # If trying to find a FV layout, run twice to make sure the the ATM pe task count can be decomposed - if ($fv_constraints == 1){ - my $minmax = "$rdir/minmax_times.txt"; - open (F1, $minmax); - my $cpus; - while() { - if (/atm \s+(\d+)/){ - $cpus = $1; - } - } - my $res = substr($full_res,0,3); - my $codeDir = "$current/code/"; - system("perl $current/code/fv_second_pass.pl $res $cpus $codeDir $current $rdir \n"); - } - - my $minmax = "$rdir/minmax_times.txt"; - - open (F, $minmax); - - while() { - - if (/atm \s+(\d+)/){ - my $cpus = $1; - system("$rdir/xmlchange -file env_mach_pes.xml -id NTASKS_ATM -val $cpus \n"); - system("$rdir/xmlchange -file env_mach_pes.xml -id ROOTPE_ATM -val 0 \n"); - - system("$rdir/xmlchange -file env_mach_pes.xml -id NTASKS_CPL -val $cpus \n"); - system("$rdir/xmlchange -file env_mach_pes.xml -id ROOTPE_CPL -val 0 \n"); - - system("$rdir/xmlchange -file env_mach_pes.xml -id ROOTPE_OCN -val $cpus \n"); - - system("$rdir/xmlchange -file env_mach_pes.xml -id NTASKS_GLC -val 1 \n"); - system("$rdir/xmlchange -file env_mach_pes.xml -id ROOTPE_GLC -val 0 \n"); - } - - if (/lnd \s+(\d+)/){ - my $cpus = $1; - system("$rdir/xmlchange -file env_mach_pes.xml -id NTASKS_LND -val $cpus \n"); - system("$rdir/xmlchange -file env_mach_pes.xml -id ROOTPE_LND -val 0 \n"); - - system("$rdir/xmlchange -file env_mach_pes.xml -id NTASKS_ROF -val $cpus \n"); - system("$rdir/xmlchange -file env_mach_pes.xml -id ROOTPE_ROF -val 0 \n"); - - system("$rdir/xmlchange -file env_mach_pes.xml -id ROOTPE_ICE -val $cpus \n"); - system("$rdir/xmlchange -file env_mach_pes.xml -id ROOTPE_WAV -val $cpus \n"); - } - - if (/ice \s+(\d+)/){ - my $cpus = $1; - system("$rdir/xmlchange -file env_mach_pes.xml -id NTASKS_ICE -val $cpus \n"); - system("$rdir/xmlchange -file env_mach_pes.xml -id NTASKS_WAV -val $cpus \n"); - } - - if (/ocn \s+(\d+)/){ - my $cpus = $1; - system("$rdir/xmlchange -file env_mach_pes.xml -id NTASKS_OCN -val $cpus \n"); - } - - } - close(F); - system("mv $rdir/minmax_times.txt $rdir/minmax_times_$c.txt"); - system("cp env_mach_pes.xml $rdir/env_mach_pes_$c.xml"); -} - -system("rm -f $current/env_* \n"); -system("gnuplot $current/code/cesm_scaling.gplot \n"); -system("mv *.gif $rdir \n"); -system("mv *.dat $rdir \n"); - - diff --git a/tools/load_balancing_tool/code/merge.py b/tools/load_balancing_tool/code/merge.py deleted file mode 100755 index d4d84c046d2..00000000000 --- a/tools/load_balancing_tool/code/merge.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python -# script written by Yuri Alexeev to merge .mod/.dat/.run files in one file with XML headers - -import sys - -fmod = open(sys.argv[1],"r") -fdat = open(sys.argv[2],"r") -frun = open(sys.argv[3],"r") - -print('\n\ -minco\n\ -MINLP\n\ -AMPL\n\ -\n\ -\n\ -\n\ -\n\ -\n\ -\n\ -\n\ -\n\ -\n\ -') - -fmod.close() -fdat.close() -frun.close() diff --git a/tools/load_balancing_tool/code/model.data b/tools/load_balancing_tool/code/model.data deleted file mode 100644 index 15765e3ad57..00000000000 --- a/tools/load_balancing_tool/code/model.data +++ /dev/null @@ -1,24 +0,0 @@ -data; - -param D := 5; -param CPUS := 1024; -param CPN := 16; -param Tsync := 3.0; -param Etarget := 0.5; -param MinNodes := 64; -param MaxNodes := 48160; - -param rawx: 1 2 3 4 5 := - 'atm' 32 64 128 256 512 - 'ocn' 32 64 128 256 512 - 'lnd' 32 64 128 256 512 - 'ice' 32 64 160 320 640 -; - -param rawy: 1 2 3 4 5 := - 'atm' 427.471 223.332 119.580 66.182 37.769 - 'ocn' 15.745 7.782 4.383 3.181 2.651 - 'lnd' 4.356 2.191 1.191 0.705 0.560 - 'ice' 8.018 4.921 2.368 1.557 1.429 -; - diff --git a/tools/load_balancing_tool/code/model.mod b/tools/load_balancing_tool/code/model.mod deleted file mode 100644 index f1cc65ebad9..00000000000 --- a/tools/load_balancing_tool/code/model.mod +++ /dev/null @@ -1,91 +0,0 @@ -# math model to find optimal allocation of cores for CESM components - -param D >= 1, integer; # number of data points -param CPUS >= 1, integer; # number of nodes -param CPN >=1, integer; # number of cores per node -param Tsync >= 0.0; # time to sync ice and lnd in seconds -param Nlat >= 1, integer; # max lat -param Nlev >= 1, integer; # max lev -param Etarget >= 0.0; # target Efficiency -param MinNodes >=1, integer; # minimum number of nodes -param MaxNodes >=1, integer; # maximum number of nodes - -set M := {'lnd','ice','atm','ocn'}; # set of components -set DD := 1..D; # set of data points - -param rawx{M, DD}; # given by user -param rawy{M, DD}; # given by user - -param x{DD}; # extracted automatically -param y{DD}; # extracted automatically - -param A{M}; # best fit value -param B{M}; # best fit value -param C{M}; # best fit value -param K{M}; # best fit value - -# special ordered set variables -var maxtasks >= 0; -var ntasks >= 0; -var nz >= 0; -var remainder >= 0; -var ny >= 0; -var taskcounter >= 0; -var ntasksrestrict >= 0; - -# ... fitting parameters (bounds and initial values) -var a >= 0; -var b >= 0; -var c >= 0; -var k >= 0; -var eta_m1{M} >= 0; -var eta_m2{M} >= 0; -var eta_m3{M} >= 0; -var etaT >= 0; -var etaTi_m1 >= 0; - -var n{M} >= 1, integer; - -# efficiency var -var xcounter >= 1, integer; -var maxx >= 1, integer; -var fmod_xcounter >= 0; -var fmod_MinNodes >= 0; -var fmod_eff >= 0; - -### special ordered sets -### Ocn -set OcnSet := 1..25; -param OcnPart{OcnSet}; -var z2{OcnSet} binary; -subject to SOS2: 1 = sum{i in OcnSet} z2[i]; -subject to DefNocn: n['ocn'] = sum{i in OcnSet} z2[i]*OcnPart[i]; -### Atm -set AtmSet := 1..149; -param AtmPart{AtmSet}; -var z1_2{AtmSet} binary; - -# ... objective function is the least-squares error -minimize L2Error: sum{i in DD} (y[i] - a/x[i] - b*x[i]^c - k)^2; - -minimize MaxTime: etaT; - - -### max(max(ice,lnd)+atm,ocn) model -# added suffix m1, meaning model 1, to all constraints -# time constraints -subject to -DefEta_m1{i in M}: eta_m1[i] = A[i]/n[i] + B[i]*(n[i]^C[i]) + K[i]; -DefEtaTi1_m1: etaTi_m1 >= eta_m1['ice']; -DefEtaTi2_m1: etaTi_m1 >= eta_m1['lnd']; -DefEta1_m1: etaT >= etaTi_m1 + eta_m1['atm']; -DefEtaT_m1: etaT >= eta_m1['ocn']; - -# add constraint to force eta[ice] = eta[land] -EqualT1_m1: eta_m1['lnd'] >= eta_m1['ice'] - Tsync; -EqualT2_m1: eta_m1['lnd'] <= eta_m1['ice'] + Tsync; - -# constrain number of nodes -TotalNumber_m1: n['atm'] + n['ocn'] <= CPUS; -IceLndNumber_m1: n['ice'] + n['lnd'] <= n['atm']; - diff --git a/tools/load_balancing_tool/code/model.run b/tools/load_balancing_tool/code/model.run deleted file mode 100644 index b58c219d798..00000000000 --- a/tools/load_balancing_tool/code/model.run +++ /dev/null @@ -1,76 +0,0 @@ -#### special ordered sets -#### Atm -#let ntasksrestrict := CPUS/2; -#let maxtasks := (Nlat/3)*Nlev; -#let ntasks := 0; -#let taskcounter := 0; -#repeat while ntasks<=maxtasks { -# let ntasks := ntasks+1; -# let nz := 0; -# repeat while nz<=Nlev { -# let nz := nz+1; -# # test that $nz divides $ntasks -# let remainder := ntasks mod nz; -# if remainder == 0.0 then { -# let ny := ntasks/nz; -# # test that y subdomains contain at least 3 latitudes -# if (ny*3 <= Nlat) and (ntasks > ntasksrestrict) then { -# let taskcounter := taskcounter + 1; -# let AtmPart[taskcounter] := ntasks; -## printf "%s %s\n", taskcounter, ntasks; -# break; -# }; -# }; -# }; -#}; -#subject to SOS1_2: 1 = sum {i in AtmSet} z1_2[i]; -#subject to DefNatm: n['atm'] = sum {i in AtmSet} z1_2[i]*AtmPart[i]; -# -##---------- declare SPECIAL ORDERED SET TYPE 1 to solver minlp for better branching -#suffix sosno IN, integer, >= 0, <= 9999; -#suffix ref IN, integer, >=0, <= 9999; -#let {i in AtmSet} z1_2[i].sosno := 1; # ... identifier for SOS-set number 1 -#let {i in AtmSet} z1_2[i].ref := AtmPart[i]; -#let {i in OcnSet} z2[i].sosno := 2; # ... identifier for SOS-set number 2 -#let {i in OcnSet} z2[i].ref := OcnPart[i]; -##---------- - -problem fitData: L2Error, a, b, c, k; - -for {i in M} { - printf "Solving for component %s\n", i; - for {j in DD} { - let x[j] := rawx[i,j]; - let y[j] := rawy[i,j]; - printf "x = %d y = %f\n", x[j], y[j]; - }; - let a := 1000; - let b := 1; - let c := .01; - let k := 10; - solve fitData; - let A[i] := a; - let B[i] := b; - let C[i] := c; - let K[i] := k; -}; - -printf "%d\n", CPUS; -printf "##%4s %12s %12s %12s %12s\n", "Comp", "A[f]", "B[f]", "C[f]", "K[f]"; -for {i in M} { - printf "%s %12.6f %12.6f %12.6f %12.6f\n", i, A[i], B[i], C[i], K[i]; -} - -printf "\n Solving for core allocations for max(max(ice,lnd)+atm,ocn) model:\n"; -option solver minlp; -#problem ice_lnd_ocn: MaxTime, DefEta_m1, DefEtaT_m1, DefEta1_m1, DefEtaTi1_m1, DefEtaTi2_m1, TotalNumber_m1, n, eta_m1, etaT, etaTi_m1, z2, SOS2, DefNocn, IceLndNumber_m1, EqualT1_m1, EqualT2_m1, z1_2, SOS1_2, DefNatm; -problem ice_lnd_ocn: MaxTime, DefEta_m1, DefEtaT_m1, DefEta1_m1, DefEtaTi1_m1, DefEtaTi2_m1, TotalNumber_m1, n, eta_m1, etaT, etaTi_m1, IceLndNumber_m1, EqualT1_m1, EqualT2_m1; -solve ice_lnd_ocn; -display etaT, etaTi_m1, eta_m1; - -printf "##%4s %8s %10s\n", "Comp", "NTASKS", "seconds/model-day"; -for {i in M} { - printf "%s %8.0f %10.3f\n", i, n[i], (A[i]/n[i] + B[i]*(n[i]^C[i]) + K[i]); -} -printf "Total esimated total time: %10.3f\n", etaT; - diff --git a/tools/load_balancing_tool/code/neos.py b/tools/load_balancing_tool/code/neos.py deleted file mode 100755 index 65b7d0bcb92..00000000000 --- a/tools/load_balancing_tool/code/neos.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python -######################################### - -# NeosClient.py -######################################### -import sys -import xmlrpclib -import time - -NEOS_HOST="neos-server.org" -NEOS_PORT=3332 - -if len(sys.argv) != 2: - sys.stderr.write("Usage: NeosClient ") - sys.exit(1) - -neos=xmlrpclib.Server("http://{}:{:d}".format(NEOS_HOST, NEOS_PORT)) - -if sys.argv[1] == "queue": - #Print NEOS job queue - msg = neos.printQueue() - sys.stdout.write(msg) -else: - #Read XML file - xmlfile = open(sys.argv[1],"r") - xml="" - buffer=1 - - while buffer: - buffer = xmlfile.read() - xml+= buffer - xmlfile.close() - - (jobNumber,password) = neos.submitJob(xml) - sys.stdout.write("JobNumber = {:d} ".format(jobNumber)) - - offset=0 - - status="" - #Print out partial job output while job is running - while status != "Done": - (msg,offset) = neos.getIntermediateResults(jobNumber,password,offset) - sys.stdout.write(msg.data) - status = neos.getJobStatus(jobNumber, password) - - #Print out the final result - msg = neos.getFinalResults(jobNumber, password).data - - sys.stdout.write(msg) - diff --git a/tools/load_balancing_tool/global_variables.csh b/tools/load_balancing_tool/global_variables.csh deleted file mode 100755 index 8daa22441e9..00000000000 --- a/tools/load_balancing_tool/global_variables.csh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/csh -f - -#################################################### -# Set case variables -#################################################### -setenv cesmsrc /global/u1/m/mickelso/cesm1_3_alpha04b/ -setenv res ne30_g16 -setenv compset B1850C5 -setenv mach edison_intel -setenv casedir $SCRATCH/tests/timing_tests/B1850C5.ne30_g16/ -setenv casestr _B1850C5_ne30_g16__ -setenv run_len 10 - -# Select either FV or SE below -#setenv DYCORE FV -setenv DYCORE SE - -#################################################### -# Set the location of the load balancing results -#################################################### -setenv results_dir /global/u1/m/mickelso/neos-version-example/results/ - -#################################################### -# Set the test layouts to produce the scaling curves -#################################################### -setenv NTHRDS_VAL 1 - -# Set the Task Counts -setenv TASK_ATM "128,256,512,1024,2048" -setenv TASK_LND "128,256,512,1024,2048" -setenv TASK_ROF "128,256,512,1024,2048" -setenv TASK_ICE "128,256,512,1024,2048" -setenv TASK_OCN "128,256,512,1024,2048" -setenv TASK_CPL "128,256,512,1024,2048" -setenv TASK_WAV "128,256,512,1024,2048" -setenv TASK_GLC "1,1,1,1,1" - -# Set Root Locations -setenv ROOT_ATM "0,0,0,0,0" -setenv ROOT_LND "0,0,0,0,0" -setenv ROOT_ROF "0,0,0,0,0" -setenv ROOT_ICE "0,0,0,0,0" -setenv ROOT_OCN "0,0,0,0,0" -setenv ROOT_CPL "0,0,0,0,0" -setenv ROOT_WAV "0,0,0,0,0" -setenv ROOT_GLC "0,0,0,0,0" - -#################################################### -# Set the target task counts (ATM(LND+ICE) + OCN) -#################################################### -setenv TARGET_TASKS "256,512,1024" - diff --git a/tools/load_balancing_tool/layouts.py b/tools/load_balancing_tool/layouts.py new file mode 100644 index 00000000000..6e53b6a20bb --- /dev/null +++ b/tools/load_balancing_tool/layouts.py @@ -0,0 +1,325 @@ +import optimize_model +import pulp + +class IceLndAtmOcn(optimize_model.OptimizeModel): + """ + Optimized the problem based on the Layout + ____________________ + | ICE | LND | | + |______|_______| | + | | OCN | + | ATM | | + |______________|_____| + + Min T + s.t. T[ice] <= T1 + T[lnd] <= T1 + T1 + T[atm] <= T + T[ocn] <= T + + NB[c] >= 1 for c in [ice,lnd,ocn,atm] + NB[ice] + NB[lnd] <= NB[atm] + atm_blocksize*NB[atm] + ocn_blocksize*NB[ocn] <= TotalTasks + (NB[*] is number of processor blocks) + + T[c] >= C[c]_{i} - NB[c]_{i} * + (C[c]_{i+1} - C[c]_{i}) / (NB[c]_{i+1} - NB[c]_{i}) + + NB[c] * (C[c]_{i+1} - C[c]_{i}) + / (NB[c]_{i+1} - NB[c]_{i}), + i=1..ord(NB), c in [ice,lnd,ocn,atm] + + These assumptions are checked when solver is initialized + . Assuming cost is monotonic decreasing vs ntasks + . Assuming perfect scalability for ntasks < tasks[0] + . Assuming same scalability factor for ntasks > ntasks[last] as for + last two data points + + Returns state (STATE_SOLVED_OK, STATE_SOLVED_BAD, STATE_UNSOLVED) + If solved, then solution will be stored in self.X dictionary, indexed + by variable name. Suggested convention: + 'Tice', 'Tlnd', ... for cost per component + 'Nice', 'Nlnd', ... for ntasks per component + 'NBice', 'NBlnd', ... for number of blocks per component + """ + + def get_required_components(self): + return ['LND', 'ICE', 'ATM', 'OCN'] + + def optimize(self): + """ + Run the optimization. + set solution in self.X + set state STATE_SOLVED_OK if solved, + otherwise STATE_SOLVED_BAD + """ + assert self.state != self.STATE_UNDEFINED,\ + "set_data() must be called before optimize()!" + self.atm = self.models['ATM'] + self.lnd = self.models['LND'] + self.ice = self.models['ICE'] + self.ocn = self.models['OCN'] + self.real_variables = ['TotalTime', 'T1', 'Tice', 'Tlnd', 'Tatm', + 'Tocn'] + self.integer_variables = ['NBice', 'NBlnd', 'NBatm', 'NBocn', + 'Nice', 'Nlnd', 'Natm', 'Nocn'] + self.X = {} + X = self.X + self.prob = pulp.LpProblem("Minimize ACME time cost", pulp.LpMinimize) + for rv in self.real_variables: + X[rv] = pulp.LpVariable(rv, lowBound=0) + + for iv in self.integer_variables: + X[iv] = pulp.LpVariable(iv, lowBound=1, cat=pulp.LpInteger) + + + # cost function + self.prob += X['TotalTime'] + + #constraints + self.constraints = [] + # Layout-dependent constraints. Choosing another layout to model + # will require editing these constraints + self.constraints.append([X['Tice'] - X['T1'] <= 0, "Tice - T1 == 0"]) + self.constraints.append([X['Tlnd'] - X['T1'] <= 0, "Tlnd - T1 == 0"]) + self.constraints.append([X['T1'] + X['Tatm'] - X['TotalTime'] <= 0, + "T1 + Tatm - TotalTime <= 0"]) + self.constraints.append([X['Tocn'] - X['TotalTime'] <= 0, + "Tocn - TotalTime == 0"]) + self.constraints.append([X['Nice'] + X['Nlnd'] - X['Natm'] == 0, + "Nice + Nlnd - Natm == 0"]) + self.constraints.append([X['Natm'] + X['Nocn'] == self.maxtasks, + "Natm + Nocn <= %d" % (self.maxtasks)]) + self.constraints.append([self.atm.blocksize * X['NBatm'] - X['Natm'] == 0, + "Natm = %d * NBatm" % self.atm.blocksize]) + self.constraints.append([self.ice.blocksize * X['NBice'] - X['Nice'] == 0, + "Nice = %d * NBice" % self.ice.blocksize]) + self.constraints.append([self.lnd.blocksize * X['NBlnd'] - X['Nlnd'] == 0, + "Nlnd = %d * NBlnd" % self.lnd.blocksize]) + self.constraints.append([self.ocn.blocksize * X['NBocn'] - X['Nocn'] == 0, + "Nocn = %d * NBocn" % self.ocn.blocksize]) + + # These are the constraints based on the timing data. + # They should be the same no matter what the layout of the components. + self.add_model_constraints() + + for c, s in self.constraints: + self.prob += c, s + + # Write the program to file and solve (using coin-cbc) + self.prob.writeLP("IceLndAtmOcn_model.lp") + self.prob.solve() + self.set_state(self.prob.status) + return self.state + + def get_solution(self): + """ + Return a dictionary of the solution variables. + """ + assert self.state == self.STATE_SOLVED_OK,\ + "solver failed, no solution available" + return {'NBLOCKS_ICE':self.X['NBice'].varValue, + 'NBLOCKS_LND':self.X['NBlnd'].varValue, + 'NBLOCKS_ATM':self.X['NBatm'].varValue, + 'NBLOCKS_OCN':self.X['NBocn'].varValue, + 'NTASKS_ICE':self.X['Nice'].varValue, + 'NTASKS_LND':self.X['Nlnd'].varValue, + 'NTASKS_ATM':self.X['Natm'].varValue, + 'NTASKS_OCN':self.X['Nocn'].varValue, + 'NTASKS_TOTAL':self.maxtasks, + 'COST_ICE':self.X['Tice'].varValue, + 'COST_LND':self.X['Tlnd'].varValue, + 'COST_ATM':self.X['Tatm'].varValue, + 'COST_OCN':self.X['Tocn'].varValue, + 'COST_TOTAL':self.X['TotalTime'].varValue} + + def write_pe_file(self, pefilename): + """ + Write out a pe_file that can be used to implement the + optimized layout + """ + assert self.state == self.STATE_SOLVED_OK,\ + "solver failed, no solution available" + natm = int(self.X['Natm'].varValue) + nlnd = int(self.X['Nlnd'].varValue) + nice = int(self.X['Nice'].varValue) + nocn = int(self.X['Nocn'].varValue) + ntasks = {'atm':natm, 'lnd':nlnd, 'rof':1, 'ice':nice, + 'ocn':nocn, 'glc':1, 'wav':1, 'cpl':1} + roots = {'atm':0, 'lnd':nice, 'rof':0, 'ice':0, + 'ocn':natm, 'glc':0, 'wav':0, 'cpl':0} + nthrds = {} + for c in ['atm', 'lnd', 'rof', 'ice', 'ocn', 'glc', 'wav', 'cpl']: + if c.upper() in self.models: + nthrds[c] = self.models[c.upper()].nthrds + else: + nthrds[c] = 1 + self.write_pe_template(pefilename, ntasks, nthrds, roots) + +class IceLndWavAtmOcn(optimize_model.OptimizeModel): + """ + Optimized the problem based on the Layout + __________________________ + | ICE | LND | WAV | | + |______|_______|_____| | + | | OCN | + | ATM | | + |____________________|_____| + + Min T + s.t. T[ice] <= T1 + T[lnd] <= T1 + T[wav] <= T1 + T1 + T[atm] <= T + T[ocn] <= T + + NB[c] >= 1 for c in [ice,lnd,wav,ocn,atm] + NB[ice] + NB[lnd] + NB[wav] <= NB[atm] + atm_blocksize*NB[atm] + ocn_blocksize*NB[ocn] <= TotalTasks + (NB[*] is number of processor blocks) + + T[c] >= C[c]_{i} - NB[c]_{i} * + (C[c]_{i+1} - C[c]_{i}) / (NB[c]_{i+1} - NB[c]_{i}) + + NB[c] * (C[c]_{i+1} - C[c]_{i}) + / (NB[c]_{i+1} - NB[c]_{i}), + i=1..ord(NB), c in [ice,lnd,wav,ocn,atm] + + These assumptions are checked when solver is initialized + . Assuming cost is monotonic decreasing vs ntasks + . Assuming perfect scalability for ntasks < tasks[0] + . Assuming same scalability factor for ntasks > ntasks[last] as for + last two data points + . Assuming components are capable of running on ntasks + + Returns state (STATE_SOLVED_OK, STATE_SOLVED_BAD, STATE_UNSOLVED) + If solved, then solution will be stored in self.X dictionary, indexed + by variable name. Suggested convention: + 'Tice', 'Tlnd', ... for cost per component + 'Nice', 'Nlnd', ... for ntasks per component + 'NBice', 'NBlnd', ... for number of blocks per component + """ + + def __init__(self): + self.models = {} + + + def get_required_components(self): + return ['LND', 'ICE', 'WAV', 'ATM', 'OCN'] + + def optimize(self): + """ + Run the optimization. + set solution in self.X + set state STATE_SOLVED_OK if solved, + otherwise STATE_SOLVED_BAD + """ + assert self.state != self.STATE_UNDEFINED,\ + "set_data() must be called before optimize()!" + self.atm = self.models['ATM'] + self.lnd = self.models['LND'] + self.ice = self.models['ICE'] + self.ocn = self.models['OCN'] + self.wav = self.models['WAV'] + self.real_variables = ['TotalTime', 'T1', 'Tice', 'Tlnd', 'Tatm', + 'Tocn', 'Twav'] + self.integer_variables = ['NBice', 'NBlnd', 'NBatm', 'NBocn', 'NBwav', + 'Nice', 'Nlnd', 'Natm', 'Nocn', 'Nwav'] + self.X = {} + X = self.X + self.prob = pulp.LpProblem("Minimize ACME time cost", pulp.LpMinimize) + for rv in self.real_variables: + X[rv] = pulp.LpVariable(rv, lowBound=0) + + for iv in self.integer_variables: + X[iv] = pulp.LpVariable(iv, lowBound=1, cat=pulp.LpInteger) + + + # cost function + self.prob += X['TotalTime'] + + #constraints + self.constraints = [] + # Layout-dependent constraints. Choosing another layout to model + # will require editing these constraints + self.constraints.append([X['Tice'] - X['T1'] <= 0, "Tice - T1 == 0"]) + self.constraints.append([X['Tlnd'] - X['T1'] <= 0, "Tlnd - T1 == 0"]) + self.constraints.append([X['Twav'] - X['T1'] <= 0, "Twav - T1 == 0"]) + self.constraints.append([X['T1'] + X['Tatm'] - X['TotalTime'] <= 0, + "T1 + Tatm - TotalTime <= 0"]) + self.constraints.append([X['Tocn'] - X['TotalTime'] <= 0, + "Tocn - TotalTime == 0"]) + self.constraints.append([X['Nice'] + X['Nlnd'] + X['Nwav'] - X['Natm'] == 0, + "Nice + Nlnd + Nwav - Natm == 0"]) + self.constraints.append([X['Natm'] + X['Nocn'] == self.maxtasks, + "Natm + Nocn <= %d" % (self.maxtasks)]) + self.constraints.append([self.atm.blocksize * X['NBatm'] - X['Natm'] == 0, + "Natm = %d * NBatm" % self.atm.blocksize]) + self.constraints.append([self.ice.blocksize * X['NBice'] - X['Nice'] == 0, + "Nice = %d * NBice" % self.ice.blocksize]) + self.constraints.append([self.lnd.blocksize * X['NBlnd'] - X['Nlnd'] == 0, + "Nlnd = %d * NBlnd" % self.lnd.blocksize]) + self.constraints.append([self.ocn.blocksize * X['NBocn'] - X['Nocn'] == 0, + "Nocn = %d * NBocn" % self.ocn.blocksize]) + self.constraints.append([self.wav.blocksize * X['NBwav'] - X['Nwav'] == 0, + "Nwav = %d * NBwav" % self.wav.blocksize]) + + # These are the constraints based on the timing data. + # They should be the same no matter what the layout of the components. + self.add_model_constraints() + + for c, s in self.constraints: + self.prob += c, s + + # Write the program to file and solve (using coin-cbc) + self.prob.writeLP("IceLndWavAtmOcn_model.lp") + self.prob.solve() + self.set_state(self.prob.status) + return self.state + + def get_solution(self): + """ + Return a dictionary of the solution variables. + """ + assert self.state == self.STATE_SOLVED_OK,\ + "solver failed, no solution available" + return {'NBLOCKS_ICE':self.X['NBice'].varValue, + 'NBLOCKS_LND':self.X['NBlnd'].varValue, + 'NBLOCKS_WAV':self.X['NBwav'].varValue, + 'NBLOCKS_ATM':self.X['NBatm'].varValue, + 'NBLOCKS_OCN':self.X['NBocn'].varValue, + 'NTASKS_ICE':self.X['Nice'].varValue, + 'NTASKS_LND':self.X['Nlnd'].varValue, + 'NTASKS_WAV':self.X['Nwav'].varValue, + 'NTASKS_ATM':self.X['Natm'].varValue, + 'NTASKS_OCN':self.X['Nocn'].varValue, + 'NTASKS_TOTAL':self.maxtasks, + 'COST_ICE':self.X['Tice'].varValue, + 'COST_LND':self.X['Tlnd'].varValue, + 'COST_WAV':self.X['Twav'].varValue, + 'COST_ATM':self.X['Tatm'].varValue, + 'COST_OCN':self.X['Tocn'].varValue, + 'COST_TOTAL':self.X['TotalTime'].varValue} + + def write_pe_file(self, pefilename): + """ + Write out a pe_file that can be used to implement the + optimized layout + """ + assert self.state == self.STATE_SOLVED_OK,\ + "solver failed, no solution available" + natm = int(self.X['Natm'].varValue) + nlnd = int(self.X['Nlnd'].varValue) + nice = int(self.X['Nice'].varValue) + nocn = int(self.X['Nocn'].varValue) + nwav = int(self.X['Nwav'].varValue) + + ntasks = {'atm':natm, 'lnd':nlnd, 'rof':1, 'ice':nice, + 'ocn':nocn, 'glc':1, 'wav':nwav, 'cpl':1} + roots = {'atm':0, 'lnd':0, 'rof':0, 'ice':nlnd, + 'ocn':natm, 'glc':0, 'wav':nlnd+nice, 'cpl':0} + nthrds = {} + for c in ['atm', 'lnd', 'rof', 'ice', 'ocn', 'glc', 'wav', 'cpl']: + if c.upper() in self.models: + nthrds[c] = self.models[c.upper()].nthrds + else: + nthrds[c] = 1 + self.write_pe_template(pefilename, ntasks, nthrds, roots) diff --git a/tools/load_balancing_tool/load_balancing_solve.py b/tools/load_balancing_tool/load_balancing_solve.py new file mode 100755 index 00000000000..bef12a3c1d5 --- /dev/null +++ b/tools/load_balancing_tool/load_balancing_solve.py @@ -0,0 +1,341 @@ +#!/usr/bin/env python +""" +Reads timing data created with load_balancing_submit.py (or otherwise, +see --timing_files option) and solves an mixed integer optimization problem +using these timings. The default layout (IceLndAtmOcn) minimizes the cost per +model day assuming the layout: + ____________________ + | ICE | LND | | + |______|_______| | + | | OCN | + | ATM | | + |______________|_____| + +It is possible to extend this tool to solve for other layouts. +""" +import re +import json + +try: + from Tools.standard_script_setup import * +except ImportError, e: + print "Error importing Tools.standard_script_setup" + print "May need to add cime/scripts to PYTHONPATH\n" + raise ImportError(e) + +from CIME.utils import expect +from CIME.XML.machines import Machines +logger = logging.getLogger(__name__) + +# These values can be overridden on the command line +DEFAULT_TESTID = "lbt" +DEFAULT_BLOCKSIZE = 1 +DEFAULT_LAYOUT = "IceLndAtmOcn" +COMPONENT_LIST = ['ATM', 'ICE', 'CPL', 'LND', 'WAV', 'ROF', 'OCN', 'GLC', 'ESP'] + +############################################################################### +def parse_command_line(args, description): +############################################################################### + help_str = """ + Solve a Mixed Integer Linear Program to find a PE layout that minimizes + the wall-clock time per model day. + """ + parser = argparse.ArgumentParser(usage=help_str, + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + CIME.utils.setup_standard_logging_options(parser) + + parser.add_argument('--test-id', default=DEFAULT_TESTID, + help='test-id to use for all timing runs') + + parser.add_argument("-r", "--test-root", + help="Where test cases were created." + " Will default to output root as defined in the config_machines file") + + parser.add_argument('--timing-dir', help='alternative to using casename ' + 'to find timing data, instead read all files in' + ' this directory') + + parser.add_argument('--blocksize', + help='default minimum size of blocks to assign to all ' + 'components. Components can be assigned different ' + 'blocksizes using --blocksize_XXX. Default 1', type=int) + + for c in COMPONENT_LIST: + parser.add_argument('--blocksize-%s' % c.lower(), + help='minimum blocksize for component %s, if ' + 'different from --blocksize', type=int) + + parser.add_argument('--total-tasks', type=int, + help='Number of pes available for assignment') + + parser.add_argument("--layout", + help="name of layout to solve (default selected internally)") + + parser.add_argument("--graph-models", action="store_true", + help="plot cost v. ntasks models. requires matplotlib") + + parser.add_argument("--print-models", action="store_true", + help="print all costs and ntasks") + + parser.add_argument("--pe-output", help="write pe layout to file") + + parser.add_argument('--json-output', help="write MILP data to .json file") + + parser.add_argument('--json-input', help="solve using data from .json file") + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, + parser) + if args.total_tasks is None and args.json_input is None: + expect(args.total_tasks is not None or args.json_input is not None, + "--total-tasks or --json-input option must be set") + + blocksizes = {} + for c in COMPONENT_LIST: + attrib = 'blocksize_%s' % c.lower() + if getattr(args, attrib) is not None: + blocksizes[c] = getattr(args, attrib) + elif args.blocksize is not None: + blocksizes[c] = args.blocksize + test_root = args.test_root + if test_root is None: + machobj = Machines() + test_root = machobj.get_value("CIME_OUTPUT_ROOT") + + return (args.test_id, test_root, args.timing_dir, blocksizes, + args.total_tasks, args.layout, args.graph_models, + args.print_models, args.pe_output, args.json_output, + args.json_input) + + +def _locate_timing_files(test_root, test_id, timing_dir): + """ + Find all possible directories for timing files + """ + timing_files = [] + timing_cases_tmp = [] + timing_dirs = [] + + # Add command-line timing directory if it exists + if timing_dir is not None: + logger.info('found directory ' + timing_dir) + timing_dirs.append(timing_dir) + else: + # Add script_dir/casename_prefix_*/timing + for fn in os.listdir(test_root): + if fn.endswith(test_id): + fn = os.path.join(test_root, fn, "timing") + if os.path.isdir(fn): + print "found {}".format(fn) + timing_cases_tmp.append(fn) + timing_dirs = sorted(timing_cases_tmp) + + # Now add all non-.gz files in the directories to be read in + for td in timing_dirs: + full_fn = None + for fn in os.listdir(td): + full_fn = os.path.join(td, fn) + if full_fn.find('.gz') < 0: + timing_files.append(full_fn) + if full_fn is None: + logger.warning("WARNING: no timing files found in directory %s", (td)) + return timing_files + +def _parse_timing_files(timing_files): + """ + Parse every file in list for timing information and return data dict + """ + data = {} + for timing_file in timing_files: + timing = _read_timing_file(timing_file) + logger.debug('ntasks: %s' % "; ".join([str(k) + ":" + + str(timing[k]['ntasks']) + for k in timing.keys()])) + logger.debug('cost: %s' % "; ".join([str(k) + ":" + + str(timing[k]['cost']) + for k in timing.keys()])) + for key in timing: + if key not in data: + data[key] = {'cost':[], 'ntasks':[], 'nthrds':[]} + + if timing[key]['ntasks'] in data[key]['ntasks']: + logger.warning('WARNING: duplicate timing run data in %s ' + 'for %s ntasks=%d.', timing_file, key, + timing[key]['ntasks']) + index = data[key]['ntasks'].index(timing[key]['ntasks']) + logger.warning('Existing value: cost=%s. Ignoring new value: ' + 'cost=%s', data[key]['cost'][index], + timing[key]['cost']) + elif 'name' in data[key] and data[key]['name'] != timing[key]['name']: + expect(False, "Timing files have inconsistant model components {} has {} vs {}" + .format(key, data[key]['name'], timing[key]['name'])) + else: + data[key]['name'] = timing[key]['name'] + data[key]['cost'].append(timing[key]['cost']) + data[key]['ntasks'].append(timing[key]['ntasks']) + data[key]['nthrds'].append(timing[key]['nthrds']) + return data + +def _set_blocksizes(data, blocksizes): + """ + Set blocksizes according to command line arguments. + Specific command line arguments override current data, but + do not set to default if it already exists + """ + for key in COMPONENT_LIST: + if key in data: + if key in blocksizes: + data[key]['blocksize'] = blocksizes[key] + elif 'blocksize' not in data[key]: + data[key]['blocksize'] = DEFAULT_BLOCKSIZE + +def _read_timing_file(filename): + """ + Read in timing files to get the costs (time/mday) for each test + + return model dictionaries. Example + {'ICE':{'ntasks':8,'nthrds':1,'cost':40.6}, + 'ATM':{'ntasks':8,'nthrds':1,'cost':120.4}, + ... + } + """ + + logger.info('Reading timing file %s', filename) + try: + timing_file = open(filename, "r") + timing_lines = timing_file.readlines() + timing_file.close() + except Exception, e: + logger.critical("Unable to open file %s", filename) + raise e + models = {} + for line in timing_lines: + # Get number of tasks and thrds + # atm = xatm 8 0 8 x 1 1 (1 ) + #(\w+) = (\w+) \s+ \d+ \s+ \d+ \s+ (\d+)\s+ x\s+(\d+) + m = re.search(r"(\w+) = (\w+)\s+\d+\s+\d+\s+(\d+)\s+x\s+(\d+)", line) + if m: + component = m.groups()[0].upper() + name = m.groups()[1].upper() + ntasks = int(m.groups()[2]) + nthrds = int(m.groups()[3]) + if component in models: + models[component]['ntasks'] = ntasks + models[component]['nthrds'] = nthrds + models[component]['name'] = name + else: + models[component] = {'name':name,'ntasks':ntasks, 'nthrds':nthrds} + continue + + # get cost + # ATM Run Time: 17.433 seconds 1.743 seconds/mday + #(\w+)Run Time: \s \d+.\d+ seconds \s+(\d+.\d+) seconds/mday + m = re.search(r"(\w+) Run Time:\s+(\d+\.\d+) seconds \s+(\d+\.\d+)" + " seconds/mday", line) + if m: + component = m.groups()[0] + cost = float(m.groups()[1]) + if component != "TOT": + if component in models: + models[component]['cost'] = cost + else: + models[component] = {'cost':cost} + return models + +################################################################################ +def load_balancing_solve(test_id, test_root, timing_dir, blocksizes, total_tasks, layout, graph_models, print_models, pe_output, json_output, json_input): +################################################################################ + if json_input is not None: + # All data is read from given json file + with open(json_input, "r") as jsonfile: + try: + data = json.load(jsonfile) + except ValueError, e: + logger.critical("Unable to parse json file %s", jsonfile) + raise e + # layout, totaltasks, blocksizes may already be set by json file + # but can be overriden by options + if layout is not None: + data['layout'] = layout + if total_tasks is not None: + data['totaltasks'] = total_tasks + + else: + # find and parse timing files + timing_files = _locate_timing_files(test_root, + test_id, + timing_dir) + + expect(len(timing_files) > 0, "No timing data found") + + data = _parse_timing_files(timing_files) + + data['totaltasks'] = total_tasks + if layout is None: + # try to determine layout automatically + if 'ATM' in data and 'OCN' in data and 'WAV' in data: + aname = data['ATM']['name'] + oname = data['OCN']['name'] + wname = data['WAV']['name'] + if aname not in ('DATM', 'XATM', 'SATM') and \ + oname not in ('DOCN', 'XOCN', 'SOCN'): + if wname in ('DWAV', 'XWAV', 'SWAV'): + data['layout'] = "IceLndAtmOcn" + else: + data['layout'] = "IceLndWavAtmOcn" + + logger.info("Using layout = {}".format(data['layout'])) + else: + expect(False, "Could not automatically determine layout") + else: + data['layout'] = layout + + _set_blocksizes(data, blocksizes) + + # Allow dumping to json file before trying to load optimization + if json_output is not None: + logger.info("Writing MILP data to %s", json_output) + with open(json_output, "w") as outfile: + json.dump(data, outfile, indent=4) + + import optimize_model + + # Use atm-lnd-ocn-ice linear program + opt = optimize_model.solver_factory(data) + opt.optimize() + if graph_models: + opt.graph_costs() + if print_models: + opt.write_timings(fd=None, level=logging.INFO) + else: + opt.write_timings(fd=None, level=logging.DEBUG) + + logger.info("Solving Mixed Integer Linear Program using PuLP interface to " + "COIN-CBC") + + status = opt.optimize() + logger.info("PuLP solver status: " + opt.get_state_string(status)) + solution = opt.get_solution() + for k in sorted(solution): + if k[0] == 'N': + logger.info("%s = %d", k, solution[k]) + else: + logger.info("%s = %f", k, solution[k]) + + if pe_output: + opt.write_pe_file(pe_output) + + return 0 + +############################################################################### +def _main_func(description): +############################################################################### + test_id, test_root, timing_dir, blocksizes, total_tasks, layout, graph_models, print_models, pe_output, json_output, json_input = parse_command_line(sys.argv, description) + + sys.exit(load_balancing_solve(test_id, test_root, timing_dir, blocksizes, total_tasks, layout, graph_models, print_models, pe_output, json_output, json_input)) + +############################################################################### + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/tools/load_balancing_tool/load_balancing_submit.py b/tools/load_balancing_tool/load_balancing_submit.py new file mode 100755 index 00000000000..6f99739f1bf --- /dev/null +++ b/tools/load_balancing_tool/load_balancing_submit.py @@ -0,0 +1,320 @@ +#!/usr/bin/env python +""" +Script to submit a series of ACME runs to get data for +time vs nprocessors model. This data will be used to generate +a processor layout that achieves high efficiency +""" +from xml.etree.ElementTree import ParseError +import shutil + +try: + from Tools.standard_script_setup import * +except ImportError, e: + print 'Error importing Tools.standard_script_setup' + print 'May need to add cime/scripts to PYTHONPATH\n' + raise ImportError(e) + +from CIME.utils import expect, get_full_test_name +from CIME.case import Case +from CIME.XML.pes import Pes +from CIME.XML.machines import Machines +from CIME.test_scheduler import TestScheduler + +logger = logging.getLogger(__name__) + + +# Default CIME variables, these can be overridden using the +# --extra-options-file option +CIME_DEFAULTS = { + 'STOP_OPTION':'ndays', + 'STOP_N':'10', + 'REST_OPTION':'never', + 'DOUT_S':'FALSE', + 'COMP_RUN_BARRIERS':'TRUE', + 'TIMER_LEVEL':'9' +} + +DEFAULT_TESTID = 'lbt' + +############################################################################### +def parse_command_line(args, description): +############################################################################### + help_str = """ +Requires a pes xml file listing the timing runs you will submit and +their corresponding pe layouts. Use the 'pesize' tag to name each run. + +After running this submission tool, run load_balancing_solve.py to +solve the mixed integer linear program minimizing the simulation time. + +example_pes.xml: + + + + + none + + 8 + 8 + 8 + 8 + 8 + 8 + 8 + 8 + + + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + + + + + + none + + 32 + 32 + 32 + 32 + 32 + 32 + 32 + 32 + + + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + + + + + + none + + 128 + 128 + 128 + 128 + 128 + 128 + 128 + 128 + + + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + + +""" + parser = argparse.ArgumentParser(usage=help_str, + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + CIME.utils.setup_standard_logging_options(parser) + # Required arguments + parser.add_argument('--compset', + help='Specify compset', required=True) + parser.add_argument('--res', + help='Specify resolution', required=True) + parser.add_argument('--pesfile', required=True) + + # Optional pass-through arguments to create_newcase + parser.add_argument('--compiler', help='Choose compiler to build with') + + parser.add_argument('--project', help='Specify project id') + + parser.add_argument('--machine', help='machine name') + + parser.add_argument('--mpilib', help='mpi library name') + + parser.add_argument("-r", "--test-root", + help="Where test cases will be created." + " Will default to output root as defined in the config_machines file") + + parser.add_argument('--extra-options-file', + help='file listing options to be run using xmlchange') + parser.add_argument('--test-id', default=DEFAULT_TESTID, + help='test-id to use for all timing runs') + parser.add_argument('--force-purge', action='store_true') + + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + + return (args.compset, args.res, args.pesfile, args.mpilib, + args.compiler, args.project, args.machine, args.extra_options_file, + args.test_id, args.force_purge, args.test_root) + +################################################################################ +def load_balancing_submit(compset, res, pesfile, mpilib, compiler, project, machine, + extra_options_file, test_id, force_purge, test_root): +################################################################################ + # Read in list of pes from given file + if not os.access(pesfile, os.R_OK): + logger.critical('ERROR: File %s not found', pesfile) + raise SystemExit(1) + logger.info('Reading XML file %s. Searching for pesize entries:', pesfile) + try: + pesobj = Pes(pesfile) + except ParseError: + logger.critical('ERROR: File %s not parseable', pesfile) + raise SystemExit(1) + + pesize_list = [] + for node in pesobj.get_nodes('pes'): + pesize = node.get('pesize') + if not pesize: + logger.critical('No pesize for pes node in file %s', pesfile) + if pesize in pesize_list: + logger.critical('pesize %s duplicated in file %s', pesize, pesfile) + pesize_list.append(pesize) + + if not pesize_list: + logger.critical('ERROR: No grid entries found in pes file %s', pesfile) + raise SystemExit(1) + + machobj = Machines(machine=machine) + if test_root is None: + test_root = machobj.get_value("CIME_OUTPUT_ROOT") + if machine is None: + machine = machobj.get_machine_name() + print "machine is {}".format(machine) + if compiler is None: + compiler = machobj.get_default_compiler() + print "compiler is {}".format(compiler) + if mpilib is None: + mpilib = machobj.get_default_MPIlib() + + + + + test_names = [] + for i in xrange(len(pesize_list)): + test_names.append(get_full_test_name("PFS_I{}".format(i),grid=res, compset=compset, + machine=machine, compiler=compiler)) + casedir = os.path.join(test_root, test_names[-1] + "." + test_id) + print "casedir is {}".format(casedir) + if os.path.isdir(casedir): + if force_purge: + logger.info('Removing directory %s', casedir) + shutil.rmtree(casedir) + else: + expect(False, + "casedir {} already exists, use the --force-purge option, --test-root or" + " --test-id options".format(casedir)) + + tests = TestScheduler(test_names, no_setup = True, + compiler=compiler, machine_name=machine, mpilib=mpilib, + test_root=test_root, test_id=test_id, project=project) + success = tests.run_tests(wait=True) + expect(success, "Error in creating cases") + testnames = [] + for test in tests.get_testnames(): + testname = os.path.join(test_root, test + "." + test_id) + testnames.append( testname) + logger.info("test is {}".format(testname)) + with Case(testname) as case: + pes_ntasks, pes_nthrds, pes_rootpe, _ = \ + pesobj.find_pes_layout('any', 'any', 'any', pesize_opts=pesize_list.pop(0)) + for key in pes_ntasks: + case.set_value(key, pes_ntasks[key]) + for key in pes_nthrds: + case.set_value(key, pes_nthrds[key]) + for key in pes_rootpe: + case.set_value(key, pes_rootpe[key]) + + if extra_options_file is not None: + try: + extras = open(extra_options_file, 'r') + for line in extras.readlines(): + split = line.split('=') + if len(split) == 2: + logger.info('setting %s=%s', split[0], split[1]) + case.set_value(split[0], split[1]) + else: + logger.debug('ignoring line in {}: {}'.format( + extra_options_file, line)) + extras.close() + except IOError: + expect(False, "ERROR: Could not read file {}".format(extra_options_file)) + + + tests = TestScheduler(test_names, use_existing=True, test_root=test_root, test_id=test_id) + success = tests.run_tests(wait=False) + expect(success, "Error in running cases") + + # need to fix + logger.info('Timing jobs submitted. After jobs completed, run to optimize ' + 'pe layout:\n load_balancing_solve --test-id {} --test-root {}'. + format(test_id, test_root)) + +############################################################################### +def _main_func(description): +############################################################################### + compset, res, pesfile, mpilib, compiler, project, machine, extra_options_file, casename_prefix, \ + force_purge, test_root = parse_command_line(sys.argv, description) + + sys.exit(load_balancing_submit(compset, res, pesfile, mpilib, + compiler, project, machine, + extra_options_file, casename_prefix, + force_purge, test_root)) + +############################################################################### + +if __name__ == '__main__': + _main_func(__doc__) diff --git a/tools/load_balancing_tool/optimize_model.py b/tools/load_balancing_tool/optimize_model.py new file mode 100644 index 00000000000..99413129594 --- /dev/null +++ b/tools/load_balancing_tool/optimize_model.py @@ -0,0 +1,354 @@ +#!/usr/bin/env python +""" +Optimization algorithm for solving MILP from timing data. +""" + +import sys +import copy +import logging +import operator +import importlib +from CIME.utils import expect +try: + import pulp +except ImportError, e: + sys.stderr.write("pulp library not installed or located. " + "Try pip install [--user] pulp\n") + raise e + +logger = logging.getLogger(__name__) + +def solver_factory(data): + """ + load data either from a json file or dictionary + """ + expect(data.has_key('totaltasks'),"totaltasks not found in data") + + layout = data['layout'] + sp = layout.rsplit('.', 1) + try: + if len(sp) > 1: + layout_module = importlib.import_module(sp[0]) + layout = sp[1] + else: + import layouts + layout_module = layouts + except ImportError: + expect(False,"cannot import %s\n") + + try: + solverclass = getattr(layout_module, layout) + except KeyError: + expect(False, "layout class %s not found in %s\n", + layout, layout_module) + + solver = solverclass() + + for c in solver.get_required_components(): + assert data.has_key(c), "ERROR: component %s not found in data" % c + + solver.set_data(data) + return solver + +class ModelData: + """ + Convert dictionary data entry into usable object + """ + def __init__(self, name, model_dict): + self.name = name + self.blocksize = model_dict['blocksize'] + self.nthrds = model_dict['nthrds'][0] + ntasks = copy.deepcopy(model_dict['ntasks']) + cost = copy.deepcopy(model_dict['cost']) + assert len(ntasks) == len(cost), "ntasks data not same length as cost for %s" % name + # sort smallest ntasks to largest + tup = zip(*sorted(zip(cost, ntasks), + key=operator.itemgetter(1))) + self.cost = list(tup[0]) + self.ntasks = list(tup[1]) + for j in self.ntasks: + if j > 1 and j % self.blocksize: + logger.warning("WARNING: %s pe %d not divisible by " + "blocksize %d. Results may be invalid\n", + name, j, self.blocksize) + +class OptimizeModel(object): + STATE_UNDEFINED = 0 + STATE_UNSOLVED = 1 + STATE_SOLVED_OK = 2 + STATE_SOLVED_BAD = 3 + states = ['Undefined', 'Unsolved', 'Solved', 'No Solution'] + + def __init__(self): + self.models = {} + self.state = self.STATE_UNDEFINED + self.X = {} + self.constraints = [] + self.maxtasks = 0 + + def set_data(self, data_dict): + """ + Add data to the model. + data_dict is dictionary of components with their data + example: {'totaltasks':64 + 'ICE': {'ntasks': [2,4,8], + 'costs': [10.0,6.0,4.0], + 'nthrds': [1,1,1], + 'blocksize': 8} + 'LND': {...} + } + + data is extrapolated as needed for n=1 and n=totaltasks + sets state to STATE_UNSOLVED + """ + # get deep copy, because we need to divide ntasks by blocksize + self.maxtasks = data_dict['totaltasks'] + + for key in data_dict: + if isinstance(data_dict[key], dict) and 'ntasks' in data_dict[key]: + self.models[key] = ModelData(key, data_dict[key]) + + + # extrapolate for n=1 and n=maxtasks + for m in self.models.values(): + m.extrapolated = [False] * len(m.cost) + + # add in data for ntasks=1 if not provided + if m.ntasks[0] > 1: + m.cost.insert(0, m.ntasks[0] * m.cost[0]) + m.ntasks.insert(0, 1) + m.extrapolated.insert(0, True) + + # add in data for maxtasks if not available + # assume same scaling factor as previous interval + + if len(m.ntasks) > 1 and m.ntasks[-1] < self.maxtasks: + if m.cost[-2] <= 0.0: + factor = 1.0 + elif len(m.ntasks) > 1: + factor = (1.0 - m.cost[-1]/m.cost[-2]) / \ + (1.0 - 1. * m.ntasks[-2] / m.ntasks[-1]) + else: + # not much information to go on ... + factor = 1.0 + m.cost.append(m.cost[-1] * (1.0 - factor + + factor * m.ntasks[-1] / self.maxtasks)) + m.ntasks.append(self.maxtasks) + m.extrapolated.append(True) + + self.check_requirements() + self.state = self.STATE_UNSOLVED + + def add_model_constraints(self): + """ + Build constraints based on the cost vs ntask models + This should be the same for any layout so is provided in base class + Assumes cost variables are 'Txxx' and ntask variables are 'Nxxx' + """ + assert self.state != self.STATE_UNDEFINED,\ + "set_data() must be called before add_model_constraints()" + for k in self.get_required_components(): + m = self.models[k] + tk = 'T' + k.lower() # cost(time) key + nk = 'N' + k.lower() # nprocs key + for i in range(0, len(m.cost) - 1): + slope = (m.cost[i+1] - m.cost[i]) / (1. * m.ntasks[i+1] - m.ntasks[i]) + self.constraints.append([self.X[tk] - slope * self.X[nk] >= \ + m.cost[i] - slope * m.ntasks[i], + "T%s - %f*N%s >= %f" % \ + (k.lower(), slope, k.lower(), + m.cost[i] - slope * m.ntasks[i])]) + if slope > 0: + logger.warning("WARNING: Nonconvex cost function for model " + "%s. Review costs to ensure data is correct " + "(--graph_models or --print_models)", k) + + break + if slope == 0: + break + + def get_required_components(self): + """ + Should be overridden by derived class. Return a list of required + components (capitalized) used in the layout. + + Example: return ['ATM', 'LND', 'ICE'] + """ + return [] + + def check_requirements(self): + """ + Check to make sure that each element of the subclass's list of + required components has some data provided. + """ + for r in self.get_required_components(): + if r not in self.models: + logger.critical("Data for component %s not available", r) + + def write_timings(self, fd=sys.stdout, level=logging.DEBUG): + """ + Print out the data used for the ntasks/cost models. + Can be used to check that the data provided to the + model is reasonable. Also see graph_costs() + """ + assert self.state != self.STATE_UNDEFINED,\ + "set_data() must be called before write_timings()" + for k in self.models: + m = self.models[k] + message = "***%s***" % k + if fd is not None: + fd.write("\n" + message + "\n") + logger.log(level, message) + + for i in range(len(m.cost)): + extra = "" + if m.extrapolated[i]: + extra = " (extrapolated)" + message = "%4d: %f%s" % \ + (m.ntasks[i], m.cost[i], extra) + if fd is not None: + fd.write(message + "\n") + logger.log(level, message) + + def graph_costs(self): + """ + Use matplotlib to graph the ntasks/cost data. + This provides a quick visual to check that the + data used for the optimization is reasonable. + + If matplotlib is not available, nothing will happen + """ + assert self.state != self.STATE_UNDEFINED,\ + "set_data() must be called before graph_costs()" + try: + import matplotlib.pyplot as pyplot + except ImportError: + logger.info("matplotlib not found, skipping graphs") + return + + nplots = len(self.models) + nrows = (nplots + 1) / 2 + ncols = 2 + fig, ax = pyplot.subplots(nrows, ncols) + row = 0; col = 0 + for k in self.models: + m = self.models[k] + p = ax[row, col] + p.loglog(m.ntasks, m.cost, 'k-') + for i in range(len(m.ntasks)): + if not m.extrapolated[i]: + p.plot(m.ntasks[i], m.cost[i], 'bx') + else: + p.plot(m.ntasks[i], m.cost[i], 'rx') + p.set_title(m.name) + p.set_xlabel('ntasks') + p.set_ylabel('cost (s/mday)') + p.set_xlim([1, self.maxtasks]) + row += 1 + if row == nrows: + row = 0 + col += 1 + + fig.suptitle("log-log plot of Cost/mday vs ntasks for designated " + "components.\nPerfectly scalable components would have a " + "straight line. Blue 'X's designate points\nfrom data, " + "red 'X's designate extrapolated data. Areas above the " + "line plots represent\nthe feasible region. Global " + "optimality of solution depends on the convexity of " + "these line plots.\nClose graph to continue on to solve.") + fig.tight_layout() + fig.subplots_adjust(top=0.75) + logger.info("close graph window to continue") + pyplot.show() + + def optimize(self): + """ + Run the optimization. + Must set self.state using LpStatus object: + LpStatusOptimal -> STATE_SOLVED_OK + LpStatusNotSolved -> STATE_UNSOLVED + LpStatusInfeasible -> STATE_SOLVED_BAD + LpStatusUnbounded -> STATE_SOLVED_BAD + LpStatusUndefined -> STATE_UNDEFINED + -- use self.set_state(lpstatus) -- + Returns state + + If solved, then solution will be stored in self.X dictionary, indexed + by variable name. Suggested convention: + 'Tice', 'Tlnd', ... for cost per component + 'Nice', 'Nlnd', ... for ntasks per component + 'NBice', 'NBlnd', ... for number of blocks per component + + The default implementation of get_solution() returns a dictionary + of these variable keys and their values. + """ + raise NotImplementedError + + def get_solution(self): + """ + Return a dictionary of the solution variables, can be overridden. + Default implementation returns values in self.X + """ + assert self.state == self.STATE_SOLVED_OK,\ + "solver failed, no solution available" + retval = {} + if hasattr(self,'X') and isinstance(self.X, dict): + for k in self.X: + retval[k] = self.X[k].varValue + return retval + + def set_state(self, lpstatus): + if lpstatus == pulp.constants.LpStatusOptimal: + self.state = self.STATE_SOLVED_OK + elif lpstatus == pulp.constants.LpStatusNotSolved: + self.state = self.STATE_UNSOLVED + elif lpstatus == pulp.constants.LpStatusUndefined: + self.state = self.STATE_UNDEFINED + else: + self.state = self.STATE_SOLVED_BAD + + def get_state(self): + return self.state + + def get_state_string(self, state): + return self.states[state] + + def write_pe_file(self, pefilename): + raise NotImplementedError + + def write_xml_changes(self, outfile): + """ + Write out a list of xmlchange commands to implement + the optimal layout + """ + raise NotImplementedError + + def write_pe_template(self, pefilename, ntasks, nthrds, roots): + from distutils.spawn import find_executable + from xml.etree import ElementTree as ET + from CIME.utils import run_cmd + logger.info("Writing pe node info to %s", pefilename) + root = ET.Element('config_pes') + grid = ET.SubElement(root, 'grid') + grid.set('name', 'any') + mach = ET.SubElement(grid, 'mach') + mach.set('name', 'any') + pes = ET.SubElement(mach, 'pes') + pes.set('compset', 'any') + pes.set('pesize', '') + ntasks_node = ET.SubElement(pes, 'ntasks') + for k in ntasks: + node = ET.SubElement(ntasks_node, 'ntasks_' + k) + node.text = str(ntasks[k]) + nthrds_node = ET.SubElement(pes, 'nthrds') + for k in nthrds: + node = ET.SubElement(nthrds_node, 'nthrds_' + k) + node.text = str(nthrds[k]) + rootpe_node = ET.SubElement(pes, 'rootpe') + for k in roots: + node = ET.SubElement(rootpe_node, 'rootpe_' + k) + node.text = str(roots[k]) + xmllint = find_executable("xmllint") + if xmllint is not None: + run_cmd("%s --format --output %s -" % (xmllint, pefilename), + input_str=ET.tostring(root)) diff --git a/tools/load_balancing_tool/run_first.csh b/tools/load_balancing_tool/run_first.csh deleted file mode 100755 index 1aa1158277d..00000000000 --- a/tools/load_balancing_tool/run_first.csh +++ /dev/null @@ -1,132 +0,0 @@ -#!/bin/csh -f - -source global_variables.csh - -set casebase = ${mach}_${compset}_${res} -set bldrun = 1 - -mkdir -p $casedir -mkdir -p ${results_dir} - -set NATM = `echo $TASK_ATM:q | sed 's/,/ /g'` -set NLND = `echo $TASK_LND:q | sed 's/,/ /g'` -set NROF = `echo $TASK_ROF:q | sed 's/,/ /g'` -set NICE = `echo $TASK_ICE:q | sed 's/,/ /g'` -set NOCN = `echo $TASK_OCN:q | sed 's/,/ /g'` -set NCPL = `echo $TASK_CPL:q | sed 's/,/ /g'` -set NWAV = `echo $TASK_WAV:q | sed 's/,/ /g'` -set NGLC = `echo $TASK_GLC:q | sed 's/,/ /g'` - -set RATM = `echo $ROOT_ATM:q | sed 's/,/ /g'` -set RLND = `echo $ROOT_LND:q | sed 's/,/ /g'` -set RROF = `echo $ROOT_LND:q | sed 's/,/ /g'` -set RICE = `echo $ROOT_ICE:q | sed 's/,/ /g'` -set ROCN = `echo $ROOT_OCN:q | sed 's/,/ /g'` -set RCPL = `echo $ROOT_CPL:q | sed 's/,/ /g'` -set RWAV = `echo $ROOT_WAV:q | sed 's/,/ /g'` -set RGLC = `echo $ROOT_LND:q | sed 's/,/ /g'` - - -set CNT = 0 - -if (-e ${results_dir}/test_list.out) then - rm -f ${results_dir}/test_list.out -endif - -foreach NA (${NATM}) -@ CNT = $CNT + 1 - set NT = ${NTHRDS_VAL} - @ EXPN = $CNT - - echo "case for $NA $CNT $NT " - @ NTASKS_A = $NATM[$CNT] - @ NTASKS_L = $NLND[$CNT] - @ NTASKS_R = $NROF[$CNT] - @ NTASKS_O = $NOCN[$CNT] - @ NTASKS_I = $NICE[$CNT] - @ NTASKS_C = $NCPL[$CNT] - @ NTASKS_W = $NWAV[$CNT] - @ NTASKS_G = $NGLC[$CNT] - - @ ROOTPE_ATM = $RATM[$CNT] - @ ROOTPE_LND = $RLND[$CNT] - @ ROOTPE_ROF = $RROF[$CNT] - @ ROOTPE_ICE = $RICE[$CNT] - @ ROOTPE_OCN = $ROCN[$CNT] - @ ROOTPE_CPL = $RCPL[$CNT] - @ ROOTPE_WAV = $RWAV[$CNT] - @ ROOTPE_GLC = $RGLC[$CNT] - - set go = 1 - if ($NTASKS_A < 1) set NTASKS_A = 1 - if ($NTASKS_L < 1) set NTASKS_L = 1 - if ($NTASKS_I < 1) set NTASKS_I = 1 - if ($NTASKS_C < 1) set NTASKS_C = 1 - - if ($go == 1) then - echo "setting up case for $NA $CNT $NT " - - set case = t${casestr}${EXPN}_${NT}_${casebase} - cd ${cesmsrc}/cime/scripts - echo ${casedir}/${case} >> $results_dir/test_list.out - ./create_newcase -case ${casedir}/${case} -res ${res} -compset {$compset} -mach ${mach} - - cd ${casedir}/${case} - ./case_setup -clean - - #generic stuff - ./xmlchange -file env_run.xml -id STOP_N -val $run_len - ./xmlchange -file env_run.xml -id STOP_OPTION -val ndays - ./xmlchange -file env_run.xml -id REST_OPTION -val never - ./xmlchange -file env_run.xml -id TIMER_LEVEL -val 9 - ./xmlchange -file env_run.xml -id DOUT_S -val FALSE - ./xmlchange -file env_run.xml -id COMP_RUN_BARRIERS -val TRUE - - ./xmlchange -file env_mach_pes.xml -id NTASKS_ATM -val $NTASKS_A - ./xmlchange -file env_mach_pes.xml -id NTHRDS_ATM -val $NT - ./xmlchange -file env_mach_pes.xml -id ROOTPE_ATM -val ${ROOTPE_ATM} - - ./xmlchange -file env_mach_pes.xml -id NTASKS_LND -val $NTASKS_L - ./xmlchange -file env_mach_pes.xml -id NTHRDS_LND -val $NT - ./xmlchange -file env_mach_pes.xml -id ROOTPE_LND -val ${ROOTPE_LND} - - ./xmlchange -file env_mach_pes.xml -id NTASKS_ROF -val $NTASKS_R - ./xmlchange -file env_mach_pes.xml -id NTHRDS_ROF -val $NT - ./xmlchange -file env_mach_pes.xml -id ROOTPE_ROF -val ${ROOTPE_ROF} - - ./xmlchange -file env_mach_pes.xml -id NTASKS_ICE -val $NTASKS_I - ./xmlchange -file env_mach_pes.xml -id NTHRDS_ICE -val $NT - ./xmlchange -file env_mach_pes.xml -id ROOTPE_ICE -val ${ROOTPE_ICE} - - ./xmlchange -file env_mach_pes.xml -id NTASKS_OCN -val $NTASKS_O - ./xmlchange -file env_mach_pes.xml -id NTHRDS_OCN -val $NT - ./xmlchange -file env_mach_pes.xml -id ROOTPE_OCN -val ${ROOTPE_OCN} - - ./xmlchange -file env_mach_pes.xml -id NTASKS_CPL -val $NTASKS_C - ./xmlchange -file env_mach_pes.xml -id NTHRDS_CPL -val $NT - ./xmlchange -file env_mach_pes.xml -id ROOTPE_CPL -val ${ROOTPE_CPL} - - ./xmlchange -file env_mach_pes.xml -id NTASKS_WAV -val $NTASKS_W - ./xmlchange -file env_mach_pes.xml -id NTHRDS_WAV -val $NT - ./xmlchange -file env_mach_pes.xml -id ROOTPE_WAV -val ${ROOTPE_WAV} - - ./xmlchange -file env_mach_pes.xml -id NTASKS_GLC -val $NTASKS_G - ./xmlchange -file env_mach_pes.xml -id NTHRDS_GLC -val $NT - ./xmlchange -file env_mach_pes.xml -id ROOTPE_GLC -val ${ROOTPE_GLC} - - ./case_setup - - if ($bldrun == "1") then - ./${case}*.build - - rm tmpsubmit >& /dev/null -cat > tmpsubmit << EOF - ./${case}*.submit -EOF - source tmpsubmit - endif #bldrun - - endif # go - -end #NATM - diff --git a/tools/load_balancing_tool/run_second.csh b/tools/load_balancing_tool/run_second.csh deleted file mode 100755 index 43a7127f882..00000000000 --- a/tools/load_balancing_tool/run_second.csh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/csh -f - -source global_variables.csh - -setenv case t${casestr}1_${NTHRDS_VAL} -setenv casebase ${mach}_${compset}_${res} -setenv case_dir ${casedir}/${case}_${casebase} - -echo "Finding balanced layouts for these total task counts: "$TARGET_TASKS -echo "Results will be written to: "$results_dir - -set numFiles = `ls -lt /glade/u/home/mickelso/test/restults/*.dat | wc -l` -if ($numFiles > 2) then - set curDir = $PWD - echo "Cleaning *.dat files from the results directory: "$results_dir - cd $results_dir - rm -fr *.dat - cd $curDir -endif - -echo "Finding new layouts ..." -if ($DYCORE == "FV") then - set fv_constraints = 1 -else - set fv_constraints = 0 -endif - -perl $PWD/code/load_balance.pl $TARGET_TASKS $results_dir $PWD $case $casebase $case_dir $fv_constraints $res - diff --git a/tools/load_balancing_tool/tests/__init__.py b/tools/load_balancing_tool/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tools/load_balancing_tool/tests/atm_lnd.py b/tools/load_balancing_tool/tests/atm_lnd.py new file mode 100644 index 00000000000..55aacc99b68 --- /dev/null +++ b/tools/load_balancing_tool/tests/atm_lnd.py @@ -0,0 +1,117 @@ +import sys, logging +import pulp +import optimize_model +logger = logging.getLogger(__name__) + +class AtmLnd(optimize_model.OptimizeModel): + def get_required_components(self): + return ['ATM', 'LND', 'ROF', 'ICE', 'CPL', 'OCN'] + + def optimize(self): + """ + Run the optimization. + Must set self.state using LpStatus object: + LpStatusOptimal -> STATE_SOLVED_OK + LpStatusNotSolved -> STATE_UNSOLVED + LpStatusInfeasible -> STATE_SOLVED_BAD + LpStatusUnbounded -> STATE_SOLVED_BAD + LpStatusUndefined -> STATE_UNDEFINED + -- use self.set_state(lpstatus) -- + Returns state + """ + assert self.state != self.STATE_UNDEFINED,\ + "set_data() must be called before optimize()!" + self.atm = self.models['ATM'] + self.lnd = self.models['LND'] + self.ice = self.models['ICE'] + self.ocn = self.models['OCN'] + self.rof = self.models['ROF'] + self.cpl = self.models['CPL'] + + self.real_variables = ['TotalTime', 'Tice', 'Tlnd', 'Tatm', + 'Tocn', 'Trof', 'Tcpl'] + self.integer_variables = ['NBice', 'NBlnd', 'NBatm', 'NBocn', + 'NBrof', 'NBcpl', 'Nrof', 'Ncpl', + 'Nice', 'Nlnd', 'Natm', 'Nocn', 'N1'] + self.X = {} + X = self.X + self.prob = pulp.LpProblem("Minimize ACME time cost", pulp.LpMinimize) + for rv in self.real_variables: + X[rv] = pulp.LpVariable(rv, lowBound=0) + + for iv in self.integer_variables: + X[iv] = pulp.LpVariable(iv, lowBound=1, cat=pulp.LpInteger) + + + # cost function + self.prob += X['TotalTime'] + + #constraints + self.constraints = [] + # Layout-dependent constraints. Choosing another layout to model + # will require editing these constraints + self.constraints.append([X['Tatm'] + X['Trof'] + X['Tcpl'] - X['TotalTime'] <= 0, "Tatm + Trof + Tcpl - TotalTime <= 0"]) + self.constraints.append([X['Tlnd'] + X['Tice'] + X['Tocn'] - X['TotalTime'] <= 0, "Tlnd + Tice + Tocn - TotalTime <= 0"]) + + self.constraints.append([X['Natm'] - X['N1'] == 0, + "Natm - N1 <= 0"]) + self.constraints.append([X['Nrof'] - X['N1'] == 0, + "Nrof - N1 <= 0"]) + self.constraints.append([X['Ncpl'] - X['N1'] == 0, + "Ncpl - N1 <= 0"]) + + self.constraints.append([X['Nlnd'] + X['N1'] == self.maxtasks, + "Nlnd + N1 <= MAXN"]) + self.constraints.append([X['Nice'] + X['N1'] == self.maxtasks, + "Nice + N1 <= MAXN"]) + self.constraints.append([X['Nocn'] + X['N1'] == self.maxtasks, + "Nocn + N1 <= MAXN"]) + + self.constraints.append([self.atm.blocksize * X['NBatm'] - X['Natm'] == 0, + "Natm = %d * NBatm" % self.atm.blocksize]) + self.constraints.append([self.ice.blocksize * X['NBice'] - X['Nice'] == 0, + "Nice = %d * NBice" % self.ice.blocksize]) + self.constraints.append([self.lnd.blocksize * X['NBlnd'] - X['Nlnd'] == 0, + "Nlnd = %d * NBlnd" % self.lnd.blocksize]) + self.constraints.append([self.ocn.blocksize * X['NBocn'] - X['Nocn'] == 0, + "Nocn = %d * NBocn" % self.ocn.blocksize]) + self.constraints.append([self.rof.blocksize * X['NBrof'] - X['Nrof'] == 0, + "Nrof = %d * NBrof" % self.rof.blocksize]) + self.constraints.append([self.cpl.blocksize * X['NBcpl'] - X['Ncpl'] == 0, + "Ncpl = %d * NBcpl" % self.cpl.blocksize]) + + # These are the constraints based on the timing data. + # They should be the same no matter what the layout of the components. + self.add_model_constraints() + + for c, s in self.constraints: + self.prob += c, s + + # Write the program to file and solve (using glpk) + self.prob.writeLP("IceLndAtmOcn_model.lp") + self.prob.solve() + self.set_state(self.prob.status) + return self.state + + + def write_pe_file(self, pefilename): + """ + Write out a pe_file that can be used to implement the + optimized layout + """ + natm = self.X['Natm'].varValue + nlnd = self.X['Nlnd'].varValue + nice = self.X['Nice'].varValue + nocn = self.X['Nocn'].varValue + ncpl = self.X['Ncpl'].varValue + nrof = self.X['Nrof'].varValue + npart = max(natm, nrof, ncpl) + ntasks = {'atm':natm, 'lnd':nldn, 'rof':nrof, 'ice':nice, + 'ocn':nocn, 'glc':1, 'wav':1, 'cpl':ncpl} + roots = {'atm':0, 'lnd':npart, 'rof':0, 'ice':npart, + 'ocn':npart, 'glc':0, 'wav':0, 'cpl':0} + nthrds = {} + for c in ['atm', 'lnd', 'rof', 'ice', 'ocn', 'glc', 'wav', 'cpl']: + nthrds[c] = self.models[c.upper()].nthrds + + self.write_pe_template(pefilename, ntasks, nthrds, roots) diff --git a/tools/load_balancing_tool/tests/example.json b/tools/load_balancing_tool/tests/example.json new file mode 100644 index 00000000000..606d01a9ee2 --- /dev/null +++ b/tools/load_balancing_tool/tests/example.json @@ -0,0 +1,29 @@ +{ + "description" : "Optimize using data available from original load balancing tool. The original tool solved the problem using a different model, so we do not expect exact replication: (Original solution: NTASKS_ATM: 1006 NTASKS_ICE: 889 NTASKS_LND: 117 NTASKS_OCN: 18 TOTAL_COST: 28.749 s/mday)", + "layout" : "IceLndAtmOcn", + "totaltasks" : 1024, + "ATM" : { + "ntasks" : [32,64,128,256,512], + "blocksize" : 8, + "nthrds" : [1], + "cost" : [427.471, 223.332, 119.580, 66.182, 37.769] + }, + "OCN" : { + "ntasks" : [32,64,128,256,512], + "blocksize" : 8, + "nthrds" : [1], + "cost" : [ 15.745, 7.782, 4.383, 3.181, 2.651] + }, + "LND" : { + "ntasks" : [32,64,128,256,512], + "blocksize" : 8, + "nthrds" : [1], + "cost" : [ 4.356, 2.191, 1.191, 0.705, 0.560] + }, + "ICE" : { + "ntasks" : [32,64,160,320,640], + "blocksize" : 8, + "nthrds" : [1], + "cost" : [8.018, 4.921, 2.368, 1.557, 1.429] + } +} diff --git a/tools/load_balancing_tool/tests/load_balancing_test.py b/tools/load_balancing_tool/tests/load_balancing_test.py new file mode 100755 index 00000000000..040e5361517 --- /dev/null +++ b/tools/load_balancing_tool/tests/load_balancing_test.py @@ -0,0 +1,296 @@ +#!/usr/bin/env python +""" +Test for pulp, glpk + +IceLndAtmOcn: +* Test writing to json file, then running from it +* test running solve from timing dir files +* test for pulp +* test can save json file if pulp is not available +* test running submit using X +* test running solve from X +* test writing pes file +* test extended algorithm +""" +try: + from Tools.standard_script_setup import * +except ImportError, e: + print 'Error importing Tools.standard_script_setup' + print 'May need to add cime/scripts to PYTHONPATH\n' + raise ImportError(e) +try: + import optimize_model +except ImportError, e: + print 'Error importing optimize_model' + print 'May need to add cime/tools/load_balancing_tool to PYTHONPATH\n' + raise ImportError(e) + + + +from CIME.utils import run_cmd_no_fail, get_full_test_name +from CIME.XML.machines import Machines +from CIME.XML import pes +import unittest, json, tempfile, sys, re, copy + +SCRIPT_DIR = CIME.utils.get_scripts_root() +MACHINE = Machines() +CODE_DIR = os.path.join(SCRIPT_DIR, "..", "tools", "load_balancing_tool") +TEST_DIR = os.path.join(SCRIPT_DIR, "..", "tools", "load_balancing_tool", + "tests") +X_OPTIONS = """ +STOP_N=1 +""" +JSON_DICT = { + "description" : "Optimize using data available from original load balancing tool. The original tool solved the problem using a different model, so we do not expect exact replication: (Original solution: NTASKS_ATM: 1006 NTASKS_ICE: 889 NTASKS_LND: 117 NTASKS_OCN: 18 TOTAL_COST: 28.749 s/mday)", + "layout" : "IceLndAtmOcn", + "totaltasks" : 1024, + "ATM" : { + "ntasks" : [32,64,128,256,512], + "blocksize" : 8, + "nthrds" : [1], + "cost" : [427.471, 223.332, 119.580, 66.182, 37.769] + }, + "OCN" : { + "ntasks" : [32,64,128,256,512], + "blocksize" : 8, + "nthrds" : [1], + "cost" : [ 15.745, 7.782, 4.383, 3.181, 2.651] + }, + "LND" : { + "ntasks" : [32,64,128,256,512], + "blocksize" : 8, + "nthrds" : [1], + "cost" : [ 4.356, 2.191, 1.191, 0.705, 0.560] + }, + "ICE" : { + "ntasks" : [32,64,160,320,640], + "blocksize" : 8, + "nthrds" : [1], + "cost" : [8.018, 4.921, 2.368, 1.557, 1.429] + } +} + +PES_XML = """ + + + + + none + + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + + + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + + + + + + none + + 4 + 4 + 4 + 4 + 4 + 4 + 4 + 4 + + + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + + +""" + +############################################################################### +def _main_func(description): +############################################################################### + + unittest.main(verbosity=2, catchbreak=True) + +############################################################################### + +class LoadBalanceTests(unittest.TestCase): + def _check_solution(self, output, var, val): + """ + Utility function, checks output of milp solve to make sure solution + value is expected + """ + pattern = var + ' = (\d+)' + m = re.search(pattern, output) + if not m: + self.fail("pattern '%s' not found in output" % (pattern)) + check = int(m.groups()[0]) + self.assertTrue(check == val, "%s = %d, expected %d" % (var, check, val)) + + + def test_pulp(self): + try: + import pulp + except ImportError, e: + self.fail("ERROR: pulp not found. Install or set PYTHONPATH") + x = pulp.LpVariable('x') + p = pulp.LpProblem('p', pulp.LpMinimize) + p.solve() + self.assertTrue(p.status == 1, "ERROR: simple pulp solve failed") + + + def test_read_and_write_json(self): + "Solve from json file, writing to new json file, solve from new file" + with tempfile.NamedTemporaryFile('w+') as jsonfile1, tempfile.NamedTemporaryFile('w+') as jsonfile2: + json.dump(JSON_DICT, jsonfile1) + jsonfile1.flush() + cmd = "./load_balancing_solve.py --json-input %s --json-output %s" % (jsonfile1.name, jsonfile2.name) + output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) + self._check_solution(output, "NTASKS_ATM", 992) + cmd = "./load_balancing_solve.py --json-input %s" % jsonfile2.name + output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) + self._check_solution(output, "NTASKS_ATM", 992) + + + def test_solve_from_timing_dir(self): + cmd = "./load_balancing_solve.py --timing-dir %s --total-tasks 64 --blocksize 2 --layout IceLndAtmOcn" % os.path.join(TEST_DIR, "timing") + output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) + self._check_solution(output, "NTASKS_ATM", 62) + + def test_write_pes(self): + with tempfile.NamedTemporaryFile('w+') as jsonfile1, tempfile.NamedTemporaryFile('w+') as pes_file: + json.dump(JSON_DICT, jsonfile1) + jsonfile1.flush() + cmd = "./load_balancing_solve.py --json-input %s --pe-output %s" % (jsonfile1.name, pes_file.name) + output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) + + self.assertTrue(os.access(pes_file.name, os.R_OK), "pesfile %s not written" % pes_file.name) + pesobj = CIME.XML.pes.Pes(pes_file.name) + for node in pesobj.get_nodes('pes'): + pesize = node.get('pesize') + pes_ntasks, pes_nthrds, pes_rootpe, _ = \ + pesobj.find_pes_layout('any', 'any', 'any', pesize_opts=pesize) + + def test_set_blocksize_atm(self): + cmd = "./load_balancing_solve.py --timing-dir %s --total-tasks 64 --blocksize 2 --blocksize-atm 4 --layout IceLndAtmOcn" % os.path.join(TEST_DIR, "timing") + output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) + self._check_solution(output, "NTASKS_ATM", 60) + self._check_solution(output, "NBLOCKS_ATM", 15) + self._check_solution(output, "NTASKS_OCN", 4) + self._check_solution(output, "NBLOCKS_OCN", 2) + + def test_graph_models(self): + try: + import matplotlib + except ImportError, e: + self.skipTest("matplotlib not found") + + with tempfile.NamedTemporaryFile('w+') as jsonfile: + json.dump(JSON_DICT, jsonfile) + jsonfile.flush() + cmd = "./load_balancing_solve.py --json-input %s --graph-models" % (jsonfile.name) + output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) + self._check_solution(output, "NTASKS_ATM", 992) + + def test_xcase_submit(self): + test_root = MACHINE.get_value("CIME_OUTPUT_ROOT") + machine = MACHINE.get_machine_name() + compiler = MACHINE.get_default_compiler() + + test_name = get_full_test_name("PFS_I0",grid="f19_g16", compset="X", + machine=machine, compiler=compiler) + expected_dir = os.path.join(test_root, + "{}.test_lbt".format(test_name), + "timing") + if not os.path.isdir(expected_dir): + with tempfile.NamedTemporaryFile('w+') as tfile, tempfile.NamedTemporaryFile('w+') as xfile: + tfile.write(PES_XML) + tfile.flush() + xfile.write(X_OPTIONS) + xfile.flush() + cmd = "./load_balancing_submit.py --pesfile {} --res f19_g16 --compset X --test-id test_lbt --extra-options-file {} --test-root {}".format(tfile.name, xfile.name, test_root) + if MACHINE.has_batch_system(): + sys.stdout.write("Jobs will be submitted to queue. Rerun " + "load_balancing_test.py after jobs have " + "finished.") + else: + cmd += " --force-purge" + output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) + + self.assertTrue(output.find("Timing jobs submitted") >= 0, + "Expected 'Timing jobs submitted' in output") + + if os.path.isdir(expected_dir): + + cmd = "./load_balancing_solve.py --total-tasks 32 --blocksize 1 --test-id test_lbt --print-models --test-root {} --layout IceLndAtmOcn".format(test_root) + output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) + self.assertTrue(output.find("***ATM***") > 0, + "--print-models failed to print ATM data") + self._check_solution(output, "NTASKS_ATM", 31) + + def test_use_atm_lnd(self): + "Solve layout atm_lnd from json file" + with tempfile.NamedTemporaryFile('w+') as jsonfile1: + atmlnd_dict = copy.deepcopy(JSON_DICT) + # Fake data for ROF, CPL + atmlnd_dict['ROF'] = {"ntasks" : [32,64,128,256], + "blocksize" : 8, + "nthrds" : [1], + "cost" : [8.0, 4.0, 2.0, 1.0]} + atmlnd_dict['CPL'] = {"ntasks" : [32,64,128,256], + "blocksize" : 8, + "nthrds" : [1], + "cost" : [8.0, 4.0, 2.0, 1.0]} + json.dump(atmlnd_dict, jsonfile1) + jsonfile1.flush() + cmd = "./load_balancing_solve.py --json-input %s --print-models --layout tests.atm_lnd.AtmLnd" % (jsonfile1.name) + output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) + self._check_solution(output, "Natm", 976) + self._check_solution(output, "NBatm", 976/8) + +if __name__ == '__main__': + _main_func(__doc__) diff --git a/tools/load_balancing_tool/tests/test.xml b/tools/load_balancing_tool/tests/test.xml new file mode 100644 index 00000000000..25ae265bdc6 --- /dev/null +++ b/tools/load_balancing_tool/tests/test.xml @@ -0,0 +1,39 @@ + + + + + + + 1 + 1 + 872 + 32 + 120 + 992 + 1 + 1 + + + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + + + 0 + 0 + 0 + 992 + 872 + 0 + 0 + 0 + + + + + diff --git a/tools/load_balancing_tool/tests/timing/timing_1 b/tools/load_balancing_tool/tests/timing/timing_1 new file mode 100644 index 00000000000..be66400a530 --- /dev/null +++ b/tools/load_balancing_tool/tests/timing/timing_1 @@ -0,0 +1,190 @@ +---------------- TIMING PROFILE --------------------- + Case : lbt_timing_run_1 + LID : 170911-105102 + Machine : pauling + Caseroot : /sandbox/sarich/ACME-LBT/cime/scripts/lbt_timing_run_1 + Timeroot : /sandbox/sarich/ACME-LBT/cime/scripts/lbt_timing_run_1/Tools + User : sarich + Curr Date : Mon Sep 11 10:58:44 2017 + grid : a%ne30np4_l%ne30np4_oi%gx1v6_r%r05_m%gx1v6_g%null_w%null + compset : 2000_XATM_XLND_XICE_XOCN_XROF_XGLC_XWAV + run_type : startup, continue_run = FALSE (inittype = TRUE) + stop_option : ndays, stop_n = 10 + run_length : 10 days (9 for ocean) + + component comp_pes root_pe tasks x threads instances (stride) + --------- ------ ------- ------ ------ --------- ------ + cpl = cpl 2 0 2 x 1 1 (1 ) + atm = xatm 2 0 2 x 1 1 (1 ) + lnd = xlnd 2 0 2 x 1 1 (1 ) + ice = xice 2 0 2 x 1 1 (1 ) + ocn = xocn 2 0 2 x 1 1 (1 ) + rof = xrof 2 0 2 x 1 1 (1 ) + glc = xglc 2 0 2 x 1 1 (1 ) + wav = xwav 2 0 2 x 1 1 (1 ) + esp = sesp 8 0 8 x 1 1 (1 ) + + total pes active : 8 + pes per node : 8 + pe count for cost estimate : 8 + + Overall Metrics: + Model Cost: 36.51 pe-hrs/simulated_year + Model Throughput: 5.26 simulated_years/day + + Init Time : 9.675 seconds + Run Time : 450.174 seconds 45.017 seconds/day + Final Time : 0.001 seconds + + Actual Ocn Init Wait Time : 0.000 seconds + Estimated Ocn Init Run Time : 0.000 seconds + Estimated Run Time Correction : 0.000 seconds + (This correction has been applied to the ocean and total run times) + +Runs Time in total seconds, seconds/model-day, and model-years/wall-day +CPL Run Time represents time in CPL pes alone, not including time associated with data exchange with other components + + TOT Run Time: 450.174 seconds 45.017 seconds/mday 5.26 myears/wday + CPL Run Time: 324.956 seconds 32.496 seconds/mday 7.28 myears/wday + ATM Run Time: 20.444 seconds 2.044 seconds/mday 115.79 myears/wday + LND Run Time: 29.597 seconds 2.960 seconds/mday 79.98 myears/wday + ICE Run Time: 45.316 seconds 4.532 seconds/mday 52.24 myears/wday + OCN Run Time: 0.383 seconds 0.038 seconds/mday 6180.48 myears/wday + ROF Run Time: 5.402 seconds 0.540 seconds/mday 438.19 myears/wday + GLC Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + WAV Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + ESP Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + CPL COMM Time: 17.674 seconds 1.767 seconds/mday 133.93 myears/wday + + +---------------- DRIVER TIMING FLOWCHART --------------------- + + NOTE: min:max driver timers (seconds/day): + CPL (pes 0 to 1) + OCN (pes 0 to 1) + LND (pes 0 to 1) + ROF (pes 0 to 1) + ICE (pes 0 to 1) + ATM (pes 0 to 1) + GLC (pes 0 to 1) + WAV (pes 0 to 1) + + CPL:CLOCK_ADVANCE 0.004: 0.005 + CPL:OCNPRE1_BARRIER 0.000: 0.000 + CPL:OCNPRE1 3.571: 3.655 + CPL:OCNPREP_BARRIER 0.000: 0.000 + CPL:OCNPREP 0.009: 0.010 + CPL:C2O_BARRIER <----> 0.000: 0.001 + CPL:C2O <----> 0.010: 0.010 + CPL:LNDPREP_BARRIER 0.003: 0.087 + CPL:LNDPREP 0.405: 0.409 + CPL:C2L_BARRIER <----> 0.013: 0.015 + CPL:C2L <----> 0.299: 0.299 + CPL:ICEPREP_BARRIER 0.000: 0.000 + CPL:ICEPREP 0.958: 0.960 + CPL:C2I_BARRIER <----> 0.025: 0.028 + CPL:C2I <----> 0.439: 0.439 + CPL:ROFPREP_BARRIER 0.000: 0.000 + CPL:ROFPREP 1.751: 1.757 + CPL:C2R_BARRIER <----> 0.032: 0.038 + CPL:C2R <----> 0.119: 0.119 + CPL:ICE_RUN_BARRIER 0.000: 0.000 + CPL:ICE_RUN 3.970: 4.532 + CPL:LND_RUN_BARRIER 0.007: 0.576 + CPL:LND_RUN 2.920: 2.960 + CPL:ROF_RUN_BARRIER 0.059: 0.106 + CPL:ROF_RUN 0.522: 0.540 + CPL:ATMOCNP_BARRIER 0.016: 0.026 + CPL:ATMOCNP 10.243: 10.402 + CPL:L2C_BARRIER <----> 0.158: 0.318 + CPL:L2C 2.655: 2.667 + CPL:LNDPOST_BARRIER 0.002: 0.003 + CPL:LNDPOST 0.048: 0.048 + CPL:R2C_BARRIER <----> 0.001: 0.001 + CPL:R2C <----> 0.142: 0.142 + CPL:ROFPOST_BARRIER 0.001: 0.001 + CPL:ROFPOST 5.444: 5.647 + CPL:I2C_BARRIER <----> 0.000: 0.000 + CPL:I2C <----> 0.311: 0.311 + CPL:ICEPOST_BARRIER 0.003: 0.003 + CPL:ICEPOST 0.000: 0.000 + CPL:FRACSET_BARRIER 0.000: 0.000 + CPL:FRACSET 0.368: 0.370 + CPL:ATMPREP_BARRIER 0.004: 0.006 + CPL:ATMPREP 9.441: 9.458 + CPL:C2A_BARRIER <----> 0.040: 0.057 + CPL:C2A <----> 0.082: 0.083 + CPL:OCN_RUN_BARRIER 0.000: 0.000 + CPL:OCN_RUN 0.039: 0.043 + CPL:ATM_RUN_BARRIER 0.002: 0.004 + CPL:ATM_RUN 1.518: 2.044 + CPL:A2C_BARRIER <----> 0.002: 0.537 + CPL:A2C <----> 0.096: 0.098 + CPL:ATMPOST_BARRIER 0.000: 0.002 + CPL:ATMPOST 0.000: 0.000 + CPL:O2C_BARRIER <----> 0.000: 0.000 + CPL:O2C <----> 0.003: 0.003 + CPL:OCNPOST_BARRIER 0.000: 0.000 + CPL:OCNPOST 0.000: 0.000 + CPL:HISTORY_BARRIER 0.000: 0.000 + CPL:HISTORY 0.000: 0.000 + CPL:TSTAMP_WRITE 0.000: 0.000 + CPL:TPROF_WRITE 0.001: 45.013 + CPL:RUN_LOOP_BSTOP 0.000: 0.000 + + +More info on coupler timing: + + CPL:OCNPRE1 3.571: 3.655 + CPL:ocnpre1_atm2ocn 3.571: 3.655 + + CPL:OCNPREP 0.009: 0.010 + CPL:ocnprep_avg 0.009: 0.010 + + CPL:LNDPREP 0.405: 0.409 + CPL:lndprep_atm2lnd 0.105: 0.107 + CPL:lndprep_mrgx2l 0.298: 0.304 + + CPL:ICEPREP 0.958: 0.960 + CPL:iceprep_ocn2ice 0.079: 0.086 + CPL:iceprep_atm2ice 0.247: 0.255 + CPL:iceprep_mrgx2i 0.624: 0.626 + + + CPL:ROFPREP 1.751: 1.757 + CPL:rofprep_l2xavg 0.000: 0.000 + CPL:rofprep_lnd2rof 1.682: 1.692 + CPL:rofprep_mrgx2r 0.064: 0.069 + + + CPL:ATMPREP 9.441: 9.458 + CPL:atmprep_xao2atm 2.299: 2.318 + CPL:atmprep_ocn2atm 1.458: 1.474 + CPL:atmprep_alb2atm 0.955: 0.961 + CPL:atmprep_ice2atm 2.669: 2.714 + CPL:atmprep_lnd2atm 0.835: 0.850 + CPL:atmprep_mrgx2a 1.156: 1.209 + + CPL:ATMOCNP 10.243: 10.402 + CPL:atmocnp_ice2ocn 0.204: 0.215 + CPL:atmocnp_fluxo 3.843: 3.855 + CPL:atmocnp_mrgx2o 4.861: 4.932 + CPL:atmocnp_accum 0.706: 0.713 + CPL:atmocnp_ocnalb 0.609: 0.707 + + + CPL:OCNPOST 0.000: 0.000 + + CPL:LNDPOST 0.048: 0.048 + + CPL:rofpost_rof2lnd 0.818: 0.959 + CPL:rofpost_rof2ocn 4.625: 4.689 + + CPL:ICEPOST 0.000: 0.000 + + + + CPL:ATMPOST 0.000: 0.000 + + + diff --git a/tools/load_balancing_tool/tests/timing/timing_2 b/tools/load_balancing_tool/tests/timing/timing_2 new file mode 100644 index 00000000000..f0420a4e7cd --- /dev/null +++ b/tools/load_balancing_tool/tests/timing/timing_2 @@ -0,0 +1,190 @@ +---------------- TIMING PROFILE --------------------- + Case : lbt_timing_run_2 + LID : 170911-105938 + Machine : pauling + Caseroot : /sandbox/sarich/ACME-LBT/cime/scripts/lbt_timing_run_2 + Timeroot : /sandbox/sarich/ACME-LBT/cime/scripts/lbt_timing_run_2/Tools + User : sarich + Curr Date : Mon Sep 11 11:05:03 2017 + grid : a%ne30np4_l%ne30np4_oi%gx1v6_r%r05_m%gx1v6_g%null_w%null + compset : 2000_XATM_XLND_XICE_XOCN_XROF_XGLC_XWAV + run_type : startup, continue_run = FALSE (inittype = TRUE) + stop_option : ndays, stop_n = 10 + run_length : 10 days (9 for ocean) + + component comp_pes root_pe tasks x threads instances (stride) + --------- ------ ------- ------ ------ --------- ------ + cpl = cpl 4 0 4 x 1 1 (1 ) + atm = xatm 4 0 4 x 1 1 (1 ) + lnd = xlnd 4 0 4 x 1 1 (1 ) + ice = xice 4 0 4 x 1 1 (1 ) + ocn = xocn 4 0 4 x 1 1 (1 ) + rof = xrof 4 0 4 x 1 1 (1 ) + glc = xglc 4 0 4 x 1 1 (1 ) + wav = xwav 4 0 4 x 1 1 (1 ) + esp = sesp 8 0 8 x 1 1 (1 ) + + total pes active : 8 + pes per node : 8 + pe count for cost estimate : 8 + + Overall Metrics: + Model Cost: 25.80 pe-hrs/simulated_year + Model Throughput: 7.44 simulated_years/day + + Init Time : 5.366 seconds + Run Time : 318.103 seconds 31.810 seconds/day + Final Time : 0.000 seconds + + Actual Ocn Init Wait Time : 0.006 seconds + Estimated Ocn Init Run Time : 0.000 seconds + Estimated Run Time Correction : 0.000 seconds + (This correction has been applied to the ocean and total run times) + +Runs Time in total seconds, seconds/model-day, and model-years/wall-day +CPL Run Time represents time in CPL pes alone, not including time associated with data exchange with other components + + TOT Run Time: 318.103 seconds 31.810 seconds/mday 7.44 myears/wday + CPL Run Time: 230.786 seconds 23.079 seconds/mday 10.26 myears/wday + ATM Run Time: 10.763 seconds 1.076 seconds/mday 219.93 myears/wday + LND Run Time: 15.610 seconds 1.561 seconds/mday 151.64 myears/wday + ICE Run Time: 25.715 seconds 2.571 seconds/mday 92.05 myears/wday + OCN Run Time: 0.200 seconds 0.020 seconds/mday 11835.62 myears/wday + ROF Run Time: 3.775 seconds 0.378 seconds/mday 627.05 myears/wday + GLC Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + WAV Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + ESP Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + CPL COMM Time: 20.367 seconds 2.037 seconds/mday 116.22 myears/wday + + +---------------- DRIVER TIMING FLOWCHART --------------------- + + NOTE: min:max driver timers (seconds/day): + CPL (pes 0 to 3) + OCN (pes 0 to 3) + LND (pes 0 to 3) + ROF (pes 0 to 3) + ICE (pes 0 to 3) + ATM (pes 0 to 3) + GLC (pes 0 to 3) + WAV (pes 0 to 3) + + CPL:CLOCK_ADVANCE 0.004: 0.005 + CPL:OCNPRE1_BARRIER 0.001: 0.001 + CPL:OCNPRE1 2.454: 2.531 + CPL:OCNPREP_BARRIER 0.000: 0.000 + CPL:OCNPREP 0.009: 0.010 + CPL:C2O_BARRIER <----> 0.000: 0.001 + CPL:C2O <----> 0.010: 0.010 + CPL:LNDPREP_BARRIER 0.020: 0.098 + CPL:LNDPREP 0.364: 0.385 + CPL:C2L_BARRIER <----> 0.026: 0.047 + CPL:C2L <----> 0.353: 0.361 + CPL:ICEPREP_BARRIER 0.000: 0.010 + CPL:ICEPREP 0.867: 0.903 + CPL:C2I_BARRIER <----> 0.057: 0.091 + CPL:C2I <----> 0.516: 0.529 + CPL:ROFPREP_BARRIER 0.000: 0.015 + CPL:ROFPREP 1.097: 1.122 + CPL:C2R_BARRIER <----> 0.042: 0.068 + CPL:C2R <----> 0.144: 0.146 + CPL:ICE_RUN_BARRIER 0.001: 0.004 + CPL:ICE_RUN 2.072: 2.571 + CPL:LND_RUN_BARRIER 0.036: 0.533 + CPL:LND_RUN 1.514: 1.561 + CPL:ROF_RUN_BARRIER 0.125: 0.177 + CPL:ROF_RUN 0.337: 0.378 + CPL:ATMOCNP_BARRIER 0.040: 0.077 + CPL:ATMOCNP 6.880: 7.078 + CPL:L2C_BARRIER <----> 0.401: 0.598 + CPL:L2C 3.004: 3.072 + CPL:LNDPOST_BARRIER 0.021: 0.027 + CPL:LNDPOST 0.033: 0.034 + CPL:R2C_BARRIER <----> 0.002: 0.004 + CPL:R2C <----> 0.153: 0.159 + CPL:ROFPOST_BARRIER 0.013: 0.018 + CPL:ROFPOST 3.295: 4.113 + CPL:I2C_BARRIER <----> 0.001: 0.002 + CPL:I2C <----> 0.339: 0.352 + CPL:ICEPOST_BARRIER 0.025: 0.038 + CPL:ICEPOST 0.000: 0.000 + CPL:FRACSET_BARRIER 0.001: 0.002 + CPL:FRACSET 0.330: 0.331 + CPL:ATMPREP_BARRIER 0.009: 0.009 + CPL:ATMPREP 6.625: 6.677 + CPL:C2A_BARRIER <----> 0.069: 0.121 + CPL:C2A <----> 0.073: 0.079 + CPL:OCN_RUN_BARRIER 0.000: 0.000 + CPL:OCN_RUN 0.020: 0.022 + CPL:ATM_RUN_BARRIER 0.004: 0.009 + CPL:ATM_RUN 0.718: 1.076 + CPL:A2C_BARRIER <----> 0.043: 0.405 + CPL:A2C <----> 0.091: 0.098 + CPL:ATMPOST_BARRIER 0.002: 0.009 + CPL:ATMPOST 0.000: 0.000 + CPL:O2C_BARRIER <----> 0.000: 0.000 + CPL:O2C <----> 0.003: 0.003 + CPL:OCNPOST_BARRIER 0.000: 0.000 + CPL:OCNPOST 0.000: 0.000 + CPL:HISTORY_BARRIER 0.000: 0.001 + CPL:HISTORY 0.000: 0.000 + CPL:TSTAMP_WRITE 0.000: 0.000 + CPL:TPROF_WRITE 0.002: 31.806 + CPL:RUN_LOOP_BSTOP 0.000: 0.000 + + +More info on coupler timing: + + CPL:OCNPRE1 2.454: 2.531 + CPL:ocnpre1_atm2ocn 2.454: 2.531 + + CPL:OCNPREP 0.009: 0.010 + CPL:ocnprep_avg 0.009: 0.010 + + CPL:LNDPREP 0.364: 0.385 + CPL:lndprep_atm2lnd 0.102: 0.110 + CPL:lndprep_mrgx2l 0.262: 0.275 + + CPL:ICEPREP 0.867: 0.903 + CPL:iceprep_ocn2ice 0.084: 0.086 + CPL:iceprep_atm2ice 0.240: 0.268 + CPL:iceprep_mrgx2i 0.542: 0.553 + + + CPL:ROFPREP 1.097: 1.122 + CPL:rofprep_l2xavg 0.000: 0.000 + CPL:rofprep_lnd2rof 1.053: 1.079 + CPL:rofprep_mrgx2r 0.043: 0.043 + + + CPL:ATMPREP 6.625: 6.677 + CPL:atmprep_xao2atm 1.546: 1.563 + CPL:atmprep_ocn2atm 0.973: 0.988 + CPL:atmprep_alb2atm 0.565: 0.579 + CPL:atmprep_ice2atm 1.925: 1.954 + CPL:atmprep_lnd2atm 0.778: 0.830 + CPL:atmprep_mrgx2a 0.779: 0.829 + + CPL:ATMOCNP 6.880: 7.078 + CPL:atmocnp_ice2ocn 0.200: 0.222 + CPL:atmocnp_fluxo 2.093: 2.228 + CPL:atmocnp_mrgx2o 3.654: 3.837 + CPL:atmocnp_accum 0.537: 0.572 + CPL:atmocnp_ocnalb 0.323: 0.418 + + + CPL:OCNPOST 0.000: 0.000 + + CPL:LNDPOST 0.033: 0.034 + + CPL:rofpost_rof2lnd 0.716: 0.842 + CPL:rofpost_rof2ocn 2.531: 3.271 + + CPL:ICEPOST 0.000: 0.000 + + + + CPL:ATMPOST 0.000: 0.000 + + + diff --git a/tools/load_balancing_tool/tests/timing/timing_3 b/tools/load_balancing_tool/tests/timing/timing_3 new file mode 100644 index 00000000000..66ce956e9c8 --- /dev/null +++ b/tools/load_balancing_tool/tests/timing/timing_3 @@ -0,0 +1,190 @@ +---------------- TIMING PROFILE --------------------- + Case : lbt_timing_run_3 + LID : 170911-110557 + Machine : pauling + Caseroot : /sandbox/sarich/ACME-LBT/cime/scripts/lbt_timing_run_3 + Timeroot : /sandbox/sarich/ACME-LBT/cime/scripts/lbt_timing_run_3/Tools + User : sarich + Curr Date : Mon Sep 11 11:10:30 2017 + grid : a%ne30np4_l%ne30np4_oi%gx1v6_r%r05_m%gx1v6_g%null_w%null + compset : 2000_XATM_XLND_XICE_XOCN_XROF_XGLC_XWAV + run_type : startup, continue_run = FALSE (inittype = TRUE) + stop_option : ndays, stop_n = 10 + run_length : 10 days (9 for ocean) + + component comp_pes root_pe tasks x threads instances (stride) + --------- ------ ------- ------ ------ --------- ------ + cpl = cpl 8 0 8 x 1 1 (1 ) + atm = xatm 8 0 8 x 1 1 (1 ) + lnd = xlnd 8 0 8 x 1 1 (1 ) + ice = xice 8 0 8 x 1 1 (1 ) + ocn = xocn 8 0 8 x 1 1 (1 ) + rof = xrof 8 0 8 x 1 1 (1 ) + glc = xglc 8 0 8 x 1 1 (1 ) + wav = xwav 8 0 8 x 1 1 (1 ) + esp = sesp 8 0 8 x 1 1 (1 ) + + total pes active : 8 + pes per node : 8 + pe count for cost estimate : 8 + + Overall Metrics: + Model Cost: 21.61 pe-hrs/simulated_year + Model Throughput: 8.89 simulated_years/day + + Init Time : 5.442 seconds + Run Time : 266.378 seconds 26.638 seconds/day + Final Time : 0.000 seconds + + Actual Ocn Init Wait Time : 0.006 seconds + Estimated Ocn Init Run Time : 0.000 seconds + Estimated Run Time Correction : 0.000 seconds + (This correction has been applied to the ocean and total run times) + +Runs Time in total seconds, seconds/model-day, and model-years/wall-day +CPL Run Time represents time in CPL pes alone, not including time associated with data exchange with other components + + TOT Run Time: 266.378 seconds 26.638 seconds/mday 8.89 myears/wday + CPL Run Time: 192.504 seconds 19.250 seconds/mday 12.30 myears/wday + ATM Run Time: 6.064 seconds 0.606 seconds/mday 390.36 myears/wday + LND Run Time: 9.020 seconds 0.902 seconds/mday 262.43 myears/wday + ICE Run Time: 15.208 seconds 1.521 seconds/mday 155.65 myears/wday + OCN Run Time: 0.132 seconds 0.013 seconds/mday 17932.75 myears/wday + ROF Run Time: 3.640 seconds 0.364 seconds/mday 650.31 myears/wday + GLC Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + WAV Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + ESP Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + CPL COMM Time: 24.289 seconds 2.429 seconds/mday 97.46 myears/wday + + +---------------- DRIVER TIMING FLOWCHART --------------------- + + NOTE: min:max driver timers (seconds/day): + CPL (pes 0 to 7) + OCN (pes 0 to 7) + LND (pes 0 to 7) + ROF (pes 0 to 7) + ICE (pes 0 to 7) + ATM (pes 0 to 7) + GLC (pes 0 to 7) + WAV (pes 0 to 7) + + CPL:CLOCK_ADVANCE 0.005: 0.005 + CPL:OCNPRE1_BARRIER 0.001: 0.002 + CPL:OCNPRE1 2.048: 2.204 + CPL:OCNPREP_BARRIER 0.000: 0.001 + CPL:OCNPREP 0.009: 0.010 + CPL:C2O_BARRIER <----> 0.001: 0.002 + CPL:C2O <----> 0.011: 0.012 + CPL:LNDPREP_BARRIER 0.023: 0.178 + CPL:LNDPREP 0.376: 0.386 + CPL:C2L_BARRIER <----> 0.041: 0.050 + CPL:C2L <----> 0.412: 0.425 + CPL:ICEPREP_BARRIER 0.007: 0.022 + CPL:ICEPREP 0.870: 0.888 + CPL:C2I_BARRIER <----> 0.063: 0.084 + CPL:C2I <----> 0.634: 0.652 + CPL:ROFPREP_BARRIER 0.005: 0.028 + CPL:ROFPREP 0.738: 0.891 + CPL:C2R_BARRIER <----> 0.042: 0.196 + CPL:C2R <----> 0.167: 0.174 + CPL:ICE_RUN_BARRIER 0.004: 0.012 + CPL:ICE_RUN 1.287: 1.521 + CPL:LND_RUN_BARRIER 0.118: 0.342 + CPL:LND_RUN 0.872: 0.902 + CPL:ROF_RUN_BARRIER 0.128: 0.156 + CPL:ROF_RUN 0.345: 0.364 + CPL:ATMOCNP_BARRIER 0.058: 0.074 + CPL:ATMOCNP 5.643: 5.764 + CPL:L2C_BARRIER <----> 0.224: 0.344 + CPL:L2C 3.486: 3.592 + CPL:LNDPOST_BARRIER 0.034: 0.045 + CPL:LNDPOST 0.029: 0.030 + CPL:R2C_BARRIER <----> 0.007: 0.009 + CPL:R2C <----> 0.191: 0.196 + CPL:ROFPOST_BARRIER 0.019: 0.026 + CPL:ROFPOST 2.026: 3.835 + CPL:I2C_BARRIER <----> 0.004: 0.008 + CPL:I2C <----> 0.436: 0.444 + CPL:ICEPOST_BARRIER 0.060: 0.070 + CPL:ICEPOST 0.000: 0.000 + CPL:FRACSET_BARRIER 0.001: 0.004 + CPL:FRACSET 0.282: 0.319 + CPL:ATMPREP_BARRIER 0.012: 0.049 + CPL:ATMPREP 5.355: 5.427 + CPL:C2A_BARRIER <----> 0.055: 0.127 + CPL:C2A <----> 0.068: 0.079 + CPL:OCN_RUN_BARRIER 0.000: 0.000 + CPL:OCN_RUN 0.013: 0.015 + CPL:ATM_RUN_BARRIER 0.011: 0.022 + CPL:ATM_RUN 0.423: 0.606 + CPL:A2C_BARRIER <----> 0.068: 0.254 + CPL:A2C <----> 0.091: 0.093 + CPL:ATMPOST_BARRIER 0.005: 0.010 + CPL:ATMPOST 0.000: 0.000 + CPL:O2C_BARRIER <----> 0.000: 0.000 + CPL:O2C <----> 0.003: 0.004 + CPL:OCNPOST_BARRIER 0.000: 0.001 + CPL:OCNPOST 0.000: 0.000 + CPL:HISTORY_BARRIER 0.000: 0.001 + CPL:HISTORY 0.000: 0.000 + CPL:TSTAMP_WRITE 0.000: 0.000 + CPL:TPROF_WRITE 0.001: 0.001 + CPL:RUN_LOOP_BSTOP 0.000: 0.000 + + +More info on coupler timing: + + CPL:OCNPRE1 2.048: 2.204 + CPL:ocnpre1_atm2ocn 2.047: 2.203 + + CPL:OCNPREP 0.009: 0.010 + CPL:ocnprep_avg 0.009: 0.010 + + CPL:LNDPREP 0.376: 0.386 + CPL:lndprep_atm2lnd 0.100: 0.113 + CPL:lndprep_mrgx2l 0.271: 0.275 + + CPL:ICEPREP 0.870: 0.888 + CPL:iceprep_ocn2ice 0.079: 0.090 + CPL:iceprep_atm2ice 0.235: 0.266 + CPL:iceprep_mrgx2i 0.529: 0.555 + + + CPL:ROFPREP 0.738: 0.891 + CPL:rofprep_l2xavg 0.000: 0.000 + CPL:rofprep_lnd2rof 0.712: 0.848 + CPL:rofprep_mrgx2r 0.026: 0.048 + + + CPL:ATMPREP 5.355: 5.427 + CPL:atmprep_xao2atm 1.154: 1.236 + CPL:atmprep_ocn2atm 0.769: 0.812 + CPL:atmprep_alb2atm 0.397: 0.409 + CPL:atmprep_ice2atm 1.548: 1.682 + CPL:atmprep_lnd2atm 0.730: 0.907 + CPL:atmprep_mrgx2a 0.532: 0.615 + + CPL:ATMOCNP 5.643: 5.764 + CPL:atmocnp_ice2ocn 0.193: 0.210 + CPL:atmocnp_fluxo 1.350: 1.448 + CPL:atmocnp_mrgx2o 3.238: 3.314 + CPL:atmocnp_accum 0.557: 0.626 + CPL:atmocnp_ocnalb 0.230: 0.282 + + + CPL:OCNPOST 0.000: 0.000 + + CPL:LNDPOST 0.029: 0.030 + + CPL:rofpost_rof2lnd 0.702: 0.837 + CPL:rofpost_rof2ocn 1.212: 3.047 + + CPL:ICEPOST 0.000: 0.000 + + + + CPL:ATMPOST 0.000: 0.000 + + +