diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index da9db6cd3d0..f2b03b8dcc6 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -213,7 +213,7 @@ typedef struct H5D_chunk_index_info_t { * hh - A handle for hash tables provided by the uthash.h header * */ -typedef struct H5D_filtered_collective_io_info_t { +typedef struct H5D_filtered_collective_chunk_info_t { H5D_chunk_index_info_t index_info; H5D_piece_info_t *chunk_info; @@ -229,14 +229,14 @@ typedef struct H5D_filtered_collective_io_info_t { void *buf; UT_hash_handle hh; -} H5D_filtered_collective_io_info_t; +} H5D_filtered_collective_chunk_info_t; /* - * Top-level structure that contains an array of H5D_filtered_collective_chunk_list_t + * Top-level structure that contains an array of H5D_filtered_collective_chunk_info_t * chunk info structures for collective filtered I/O, as well as other useful information. * The struct's fields are as follows: * - * chunk_infos - An array of H5D_filtered_collective_io_info_t structures that contain + * chunk_infos - An array of H5D_filtered_collective_chunk_info_t structures that contain * information about a single chunk when performing collective filtered I/O. * * num_chunks_infos - The number of entries in the `chunk_infos` array. @@ -246,7 +246,7 @@ typedef struct H5D_filtered_collective_io_info_t { * occur during dataset reads or during dataset writes when a chunk * has to go through a read - modify - write cycle). The value for * this field is based on a chunk's `need_read` field in its particular - * H5D_filtered_collective_io_info_t structure, but may be adjusted + * H5D_filtered_collective_chunk_info_t structure, but may be adjusted * later depending on file space allocation timing and other factors. * * This field can be helpful to avoid needing to scan through the list @@ -254,12 +254,12 @@ typedef struct H5D_filtered_collective_io_info_t { * allocate during read operations, as an example. * */ -typedef struct H5D_filtered_collective_chunk_list_t { - H5D_filtered_collective_io_info_t *chunk_infos; - H5D_filtered_collective_io_info_t *chunk_hash_table; - size_t num_chunk_infos; - size_t num_chunks_to_read; -} H5D_filtered_collective_chunk_list_t; +typedef struct H5D_filtered_collective_io_info_t { + H5D_filtered_collective_chunk_info_t *chunk_infos; + H5D_filtered_collective_chunk_info_t *chunk_hash_table; + size_t num_chunk_infos; + size_t num_chunks_to_read; +} H5D_filtered_collective_io_info_t; /* * Information necessary for redistributing shared chunks during @@ -304,27 +304,27 @@ static herr_t H5D__mpio_get_sum_chunk(const H5D_io_info_t *io_info, int *sum_chu static herr_t H5D__mpio_get_sum_chunk_dset(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *dset_info, int *sum_chunkf); static herr_t H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *di, - int mpi_rank, H5D_filtered_collective_chunk_list_t *chunk_list); -static herr_t H5D__mpio_redistribute_shared_chunks(H5D_filtered_collective_chunk_list_t *chunk_list, + int mpi_rank, H5D_filtered_collective_io_info_t *chunk_list); +static herr_t H5D__mpio_redistribute_shared_chunks(H5D_filtered_collective_io_info_t *chunk_list, const H5D_io_info_t *io_info, int mpi_rank, int mpi_size, size_t **rank_chunks_assigned_map); -static herr_t H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_chunk_list_t *chunk_list, +static herr_t H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chunk_list, size_t *num_chunks_assigned_map, hbool_t all_ranks_involved, const H5D_io_info_t *io_info, int mpi_rank, int mpi_size); -static herr_t H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_chunk_list_t *chunk_list, +static herr_t H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk_list, H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, int mpi_rank, int H5_ATTR_NDEBUG_UNUSED mpi_size, unsigned char ***chunk_msg_bufs, int *chunk_msg_bufs_len); -static herr_t H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_chunk_list_t *chunk_list, +static herr_t H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chunk_list, const H5D_io_info_t *io_info, const H5D_dset_io_info_t *di, int mpi_rank); -static herr_t H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_chunk_list_t *chunk_list, +static herr_t H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *chunk_list, unsigned char **chunk_msg_bufs, int chunk_msg_bufs_len, const H5D_io_info_t *io_info, const H5D_dset_io_info_t *di, int H5_ATTR_NDEBUG_UNUSED mpi_rank); -static herr_t H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_chunk_list_t *chunk_list, +static herr_t H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t *chunk_list, size_t *num_chunks_assigned_map, H5D_io_info_t *io_info, H5D_chk_idx_info_t *idx_info, int mpi_rank, int mpi_size); -static herr_t H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_chunk_list_t *chunk_list, +static herr_t H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t *chunk_list, size_t *num_chunks_assigned_map, H5D_io_info_t *io_info, H5D_dset_io_info_t *di, H5D_chk_idx_info_t *idx_info, int mpi_rank, int mpi_size); @@ -337,7 +337,7 @@ static herr_t H5D__mpio_get_chunk_alloc_info_types(MPI_Datatype *contig_type, hb static herr_t H5D__mpio_get_chunk_insert_info_types(MPI_Datatype *contig_type, hbool_t *contig_type_derived, MPI_Datatype *resized_type, hbool_t *resized_type_derived); -static herr_t H5D__mpio_collective_filtered_vec_io(const H5D_filtered_collective_chunk_list_t *chunk_list, +static herr_t H5D__mpio_collective_filtered_vec_io(const H5D_filtered_collective_io_info_t *chunk_list, H5F_shared_t *f_sh, H5D_io_op_type_t op_type); static int H5D__cmp_piece_addr(const void *chunk_addr_info1, const void *chunk_addr_info2); static int H5D__cmp_filtered_collective_io_info_entry(const void *filtered_collective_io_info_entry1, @@ -347,7 +347,7 @@ static int H5D__cmp_chunk_redistribute_info_orig_owner(const void *entry1, co #ifdef H5Dmpio_DEBUG static herr_t H5D__mpio_debug_init(void); -static herr_t H5D__mpio_dump_collective_filtered_chunk_list(H5D_filtered_collective_chunk_list_t *chunk_list, +static herr_t H5D__mpio_dump_collective_filtered_chunk_list(H5D_filtered_collective_io_info_t *chunk_list, int mpi_rank); #endif @@ -1736,7 +1736,7 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran * * TODO: Note that steps D. and F. here are both collective * operations that partially share data from the - * H5D_filtered_collective_io_info_t structure. To + * H5D_filtered_collective_chunk_info_t structure. To * try to conserve on memory a bit, the distributed * arrays these operations create are discarded after * each operation is performed. If memory consumption @@ -1754,11 +1754,11 @@ static herr_t H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, int mpi_rank, int mpi_size) { - H5D_filtered_collective_chunk_list_t chunk_list = {0}; - unsigned char **chunk_msg_bufs = NULL; - size_t *rank_chunks_assigned_map = NULL; - int chunk_msg_bufs_len = 0; - herr_t ret_value = SUCCEED; + H5D_filtered_collective_io_info_t chunk_list = {0}; + unsigned char **chunk_msg_bufs = NULL; + size_t *rank_chunks_assigned_map = NULL; + int chunk_msg_bufs_len = 0; + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE_TAG(dset_info->dset->oloc.addr) @@ -2155,7 +2155,7 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_ * * TODO: Note that steps E. and G. here are both collective * operations that partially share data from the - * H5D_filtered_collective_io_info_t structure. To + * H5D_filtered_collective_chunk_info_t structure. To * try to conserve on memory a bit, the distributed * arrays these operations create are discarded after * each operation is performed. If memory consumption @@ -2173,13 +2173,13 @@ static herr_t H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, int mpi_rank, int mpi_size) { - H5D_filtered_collective_chunk_list_t chunk_list = {0}; - unsigned char **chunk_msg_bufs = NULL; - hbool_t have_chunk_to_process; - size_t max_num_chunks; - int chunk_msg_bufs_len = 0; - int mpi_code; - herr_t ret_value = SUCCEED; + H5D_filtered_collective_io_info_t chunk_list = {0}; + unsigned char **chunk_msg_bufs = NULL; + hbool_t have_chunk_to_process; + size_t max_num_chunks; + int chunk_msg_bufs_len = 0; + int mpi_code; + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE_TAG(dset_info->dset->oloc.addr) @@ -2215,7 +2215,7 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info if (io_info->op_type == H5D_IO_OP_READ) { /* Filtered collective read */ for (size_t i = 0; i < max_num_chunks; i++) { - H5D_filtered_collective_chunk_list_t single_chunk_list = {0}; + H5D_filtered_collective_io_info_t single_chunk_list = {0}; /* Check if this rank has a chunk to work on for this iteration */ have_chunk_to_process = (i < chunk_list.num_chunk_infos); @@ -2267,7 +2267,7 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info * collective re-allocation and re-insertion of chunks modified by other ranks. */ for (size_t i = 0; i < max_num_chunks; i++) { - H5D_filtered_collective_chunk_list_t single_chunk_list = {0}; + H5D_filtered_collective_io_info_t single_chunk_list = {0}; /* Check if this rank has a chunk to work on for this iteration */ have_chunk_to_process = (i < chunk_list.num_chunk_infos) && (mpi_rank == chunk_list.chunk_infos[i].new_owner); @@ -2565,16 +2565,16 @@ static int H5D__cmp_filtered_collective_io_info_entry(const void *filtered_collective_io_info_entry1, const void *filtered_collective_io_info_entry2) { - const H5D_filtered_collective_io_info_t *entry1; - const H5D_filtered_collective_io_info_t *entry2; + const H5D_filtered_collective_chunk_info_t *entry1; + const H5D_filtered_collective_chunk_info_t *entry2; haddr_t addr1 = HADDR_UNDEF; haddr_t addr2 = HADDR_UNDEF; int ret_value; FUNC_ENTER_PACKAGE_NOERR - entry1 = (const H5D_filtered_collective_io_info_t *)filtered_collective_io_info_entry1; - entry2 = (const H5D_filtered_collective_io_info_t *)filtered_collective_io_info_entry2; + entry1 = (const H5D_filtered_collective_chunk_info_t *)filtered_collective_io_info_entry1; + entry2 = (const H5D_filtered_collective_chunk_info_t *)filtered_collective_io_info_entry2; addr1 = entry1->chunk_new.offset; addr2 = entry2->chunk_new.offset; @@ -2924,14 +2924,14 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_dset_io_info_t *di, uint8_t as */ static herr_t H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, const H5D_dset_io_info_t *di, - int mpi_rank, H5D_filtered_collective_chunk_list_t *chunk_list) + int mpi_rank, H5D_filtered_collective_io_info_t *chunk_list) { - H5D_filtered_collective_io_info_t *local_info_array = NULL; - H5D_chunk_ud_t udata; - hbool_t filter_partial_edge_chunks; - size_t num_chunks_selected; - size_t num_chunks_to_read = 0; - herr_t ret_value = SUCCEED; + H5D_filtered_collective_chunk_info_t *local_info_array = NULL; + H5D_chunk_ud_t udata; + hbool_t filter_partial_edge_chunks; + size_t num_chunks_selected; + size_t num_chunks_to_read = 0; + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE @@ -3082,7 +3082,7 @@ H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, const /* Ensure the chunk list is sorted in ascending order of offset in the file */ if (need_sort) - qsort(local_info_array, num_chunks_selected, sizeof(H5D_filtered_collective_io_info_t), + qsort(local_info_array, num_chunks_selected, sizeof(H5D_filtered_collective_chunk_info_t), H5D__cmp_filtered_collective_io_info_entry); } else if (H5F_get_coll_metadata_reads(di->dset->oloc.file)) { @@ -3148,7 +3148,7 @@ H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, const *------------------------------------------------------------------------- */ static herr_t -H5D__mpio_redistribute_shared_chunks(H5D_filtered_collective_chunk_list_t *chunk_list, +H5D__mpio_redistribute_shared_chunks(H5D_filtered_collective_io_info_t *chunk_list, const H5D_io_info_t *io_info, int mpi_rank, int mpi_size, size_t **rank_chunks_assigned_map) { @@ -3298,7 +3298,7 @@ H5D__mpio_redistribute_shared_chunks(H5D_filtered_collective_chunk_list_t *chunk *------------------------------------------------------------------------- */ static herr_t -H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_chunk_list_t *chunk_list, +H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chunk_list, size_t *num_chunks_assigned_map, hbool_t all_ranks_involved, const H5D_io_info_t *io_info, int mpi_rank, int mpi_size) { @@ -3651,29 +3651,29 @@ H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_chunk_list_t *c *------------------------------------------------------------------------- */ static herr_t -H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_chunk_list_t *chunk_list, +H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk_list, H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, int mpi_rank, int H5_ATTR_NDEBUG_UNUSED mpi_size, unsigned char ***chunk_msg_bufs, int *chunk_msg_bufs_len) { #if H5_CHECK_MPI_VERSION(3, 0) - H5D_filtered_collective_io_info_t *chunk_table = NULL; - H5S_sel_iter_t *mem_iter = NULL; - unsigned char **msg_send_bufs = NULL; - unsigned char **msg_recv_bufs = NULL; - MPI_Request *send_requests = NULL; - MPI_Request *recv_requests = NULL; - MPI_Request ibarrier = MPI_REQUEST_NULL; - hbool_t mem_iter_init = FALSE; - hbool_t ibarrier_posted = FALSE; - size_t send_bufs_nalloc = 0; - size_t num_send_requests = 0; - size_t num_recv_requests = 0; - size_t num_msgs_incoming = 0; - size_t last_assigned_idx; - size_t i; - int mpi_code; - herr_t ret_value = SUCCEED; + H5D_filtered_collective_chunk_info_t *chunk_table = NULL; + H5S_sel_iter_t *mem_iter = NULL; + unsigned char **msg_send_bufs = NULL; + unsigned char **msg_recv_bufs = NULL; + MPI_Request *send_requests = NULL; + MPI_Request *recv_requests = NULL; + MPI_Request ibarrier = MPI_REQUEST_NULL; + hbool_t mem_iter_init = FALSE; + hbool_t ibarrier_posted = FALSE; + size_t send_bufs_nalloc = 0; + size_t num_send_requests = 0; + size_t num_recv_requests = 0; + size_t num_msgs_incoming = 0; + size_t last_assigned_idx; + size_t i; + int mpi_code; + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE @@ -3725,7 +3725,7 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_chunk_list_t *ch * the rank that does own the chunk. */ for (i = 0, last_assigned_idx = 0; i < chunk_list->num_chunk_infos; i++) { - H5D_filtered_collective_io_info_t *chunk_entry = &chunk_list->chunk_infos[i]; + H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[i]; if (mpi_rank == chunk_entry->new_owner) { num_msgs_incoming += (size_t)(chunk_entry->num_writers - 1); @@ -4065,7 +4065,7 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_chunk_list_t *ch *------------------------------------------------------------------------- */ static herr_t -H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_chunk_list_t *chunk_list, +H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chunk_list, const H5D_io_info_t *io_info, const H5D_dset_io_info_t *di, int mpi_rank) { H5D_fill_buf_info_t fb_info; @@ -4123,7 +4123,7 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_chunk_list_t *c * chunk size would of course be bad. */ for (size_t i = 0; i < chunk_list->num_chunk_infos; i++) { - H5D_filtered_collective_io_info_t *chunk_entry = &chunk_list->chunk_infos[i]; + H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[i]; assert(chunk_entry->need_read); @@ -4216,8 +4216,8 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_chunk_list_t *c * data out to the application's read buffer. */ for (size_t i = 0; i < chunk_list->num_chunk_infos; i++) { - H5D_filtered_collective_io_info_t *chunk_entry = &chunk_list->chunk_infos[i]; - H5D_piece_info_t *chunk_info = chunk_entry->chunk_info; + H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[i]; + H5D_piece_info_t *chunk_info = chunk_entry->chunk_info; /* Unfilter the chunk, unless we didn't read it from the file */ if (chunk_entry->need_read && !chunk_entry->skip_filter_pline) { @@ -4272,7 +4272,7 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_chunk_list_t *c *------------------------------------------------------------------------- */ static herr_t -H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_chunk_list_t *chunk_list, +H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *chunk_list, unsigned char **chunk_msg_bufs, int chunk_msg_bufs_len, const H5D_io_info_t *io_info, const H5D_dset_io_info_t *di, int H5_ATTR_NDEBUG_UNUSED mpi_rank) @@ -4346,7 +4346,7 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_chunk_list_t * chunk size would of course be bad. */ for (i = 0; i < chunk_list->num_chunk_infos; i++) { - H5D_filtered_collective_io_info_t *chunk_entry = &chunk_list->chunk_infos[i]; + H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[i]; assert(mpi_rank == chunk_entry->new_owner); @@ -4459,8 +4459,8 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_chunk_list_t /* Process all chunks with data from the owning rank first */ for (i = 0; i < chunk_list->num_chunk_infos; i++) { - H5D_filtered_collective_io_info_t *chunk_entry = &chunk_list->chunk_infos[i]; - H5D_piece_info_t *chunk_info = chunk_entry->chunk_info; + H5D_filtered_collective_chunk_info_t *chunk_entry = &chunk_list->chunk_infos[i]; + H5D_piece_info_t *chunk_info = chunk_entry->chunk_info; assert(mpi_rank == chunk_entry->new_owner); @@ -4489,9 +4489,9 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_chunk_list_t /* Now process all received chunk message buffers */ for (i = 0; i < (size_t)chunk_msg_bufs_len; i++) { - H5D_filtered_collective_io_info_t *chunk_entry = NULL; - const unsigned char *msg_ptr = chunk_msg_bufs[i]; - hsize_t chunk_idx; + H5D_filtered_collective_chunk_info_t *chunk_entry = NULL; + const unsigned char *msg_ptr = chunk_msg_bufs[i]; + hsize_t chunk_idx; if (msg_ptr) { /* Retrieve the chunk's index value */ @@ -4609,7 +4609,7 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_chunk_list_t *------------------------------------------------------------------------- */ static herr_t -H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_chunk_list_t *chunk_list, +H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t *chunk_list, size_t *num_chunks_assigned_map, H5D_io_info_t *io_info, H5D_chk_idx_info_t *idx_info, int mpi_rank, int mpi_size) { @@ -4725,7 +4725,7 @@ H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_chunk_lis (coll_entry->chunk_idx == chunk_list->chunk_infos[num_local_chunks_processed].index_info.chunk_idx); if (update_local_chunk) { - H5D_filtered_collective_io_info_t *local_chunk; + H5D_filtered_collective_chunk_info_t *local_chunk; local_chunk = &chunk_list->chunk_infos[num_local_chunks_processed]; @@ -4761,7 +4761,7 @@ H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_chunk_lis * ascending order of offset in the file */ if (need_sort) - qsort(chunk_list->chunk_infos, chunk_list->num_chunk_infos, sizeof(H5D_filtered_collective_io_info_t), + qsort(chunk_list->chunk_infos, chunk_list->num_chunk_infos, sizeof(H5D_filtered_collective_chunk_info_t), H5D__cmp_filtered_collective_io_info_entry); done: @@ -4799,7 +4799,7 @@ H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_chunk_lis *------------------------------------------------------------------------- */ static herr_t -H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_chunk_list_t *chunk_list, +H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t *chunk_list, size_t *num_chunks_assigned_map, H5D_io_info_t *io_info, H5D_dset_io_info_t *di, H5D_chk_idx_info_t *idx_info, int mpi_rank, int mpi_size) @@ -4995,7 +4995,7 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_chunk_list_ * Function: H5D__mpio_get_chunk_redistribute_info_types * * Purpose: Constructs MPI derived datatypes for communicating the - * info from a H5D_filtered_collective_io_info_t structure + * info from a H5D_filtered_collective_chunk_info_t structure * that is necessary for redistributing shared chunks during a * collective write of filtered chunks. * @@ -5005,7 +5005,7 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_chunk_list_ * type. * * The datatype returned through `resized_type` has an extent - * equal to the size of an H5D_filtered_collective_io_info_t + * equal to the size of an H5D_filtered_collective_chunk_info_t * structure. This makes it suitable for sending an array of * those structures, while extracting out just the info * necessary for the chunk redistribution operation during @@ -5076,7 +5076,7 @@ H5D__mpio_get_chunk_redistribute_info_types(MPI_Datatype *contig_type, hbool_t * HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) /* Create struct type to extract the chunk_current, chunk_idx, orig_owner, - * new_owner and num_writers fields from a H5D_filtered_collective_io_info_t + * new_owner and num_writers fields from a H5D_filtered_collective_chunk_info_t * structure */ block_lengths[0] = 1; @@ -5084,11 +5084,11 @@ H5D__mpio_get_chunk_redistribute_info_types(MPI_Datatype *contig_type, hbool_t * block_lengths[2] = 1; block_lengths[3] = 1; block_lengths[4] = 1; - displacements[0] = offsetof(H5D_filtered_collective_io_info_t, chunk_current); - displacements[1] = offsetof(H5D_filtered_collective_io_info_t, index_info.chunk_idx); - displacements[2] = offsetof(H5D_filtered_collective_io_info_t, orig_owner); - displacements[3] = offsetof(H5D_filtered_collective_io_info_t, new_owner); - displacements[4] = offsetof(H5D_filtered_collective_io_info_t, num_writers); + displacements[0] = offsetof(H5D_filtered_collective_chunk_info_t, chunk_current); + displacements[1] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.chunk_idx); + displacements[2] = offsetof(H5D_filtered_collective_chunk_info_t, orig_owner); + displacements[3] = offsetof(H5D_filtered_collective_chunk_info_t, new_owner); + displacements[4] = offsetof(H5D_filtered_collective_chunk_info_t, num_writers); types[0] = chunk_block_type; types[1] = HSIZE_AS_MPI_TYPE; types[2] = MPI_INT; @@ -5100,7 +5100,7 @@ H5D__mpio_get_chunk_redistribute_info_types(MPI_Datatype *contig_type, hbool_t * struct_type_derived = TRUE; if (MPI_SUCCESS != (mpi_code = MPI_Type_create_resized( - struct_type, 0, sizeof(H5D_filtered_collective_io_info_t), resized_type))) + struct_type, 0, sizeof(H5D_filtered_collective_chunk_info_t), resized_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_resized failed", mpi_code) *resized_type_derived = TRUE; @@ -5137,16 +5137,16 @@ H5D__mpio_get_chunk_redistribute_info_types(MPI_Datatype *contig_type, hbool_t * * Function: H5D__mpio_get_chunk_alloc_info_types * * Purpose: Constructs MPI derived datatypes for communicating the info - * from a H5D_filtered_collective_io_info_t structure that is - * necessary for re-allocating file space during a collective - * write of filtered chunks. + * from a H5D_filtered_collective_chunk_info_t structure that + * is necessary for re-allocating file space during a + * collective write of filtered chunks. * * The datatype returned through `contig_type` has an extent * equal to the size of an H5D_chunk_alloc_info_t structure * and is suitable for communicating that structure type. * * The datatype returned through `resized_type` has an extent - * equal to the size of an H5D_filtered_collective_io_info_t + * equal to the size of an H5D_filtered_collective_chunk_info_t * structure. This makes it suitable for sending an array of * those structures, while extracting out just the info * necessary for the chunk file space reallocation operation @@ -5211,14 +5211,14 @@ H5D__mpio_get_chunk_alloc_info_types(MPI_Datatype *contig_type, hbool_t *contig_ /* * Create struct type to extract the chunk_current, chunk_new and chunk_idx - * fields from a H5D_filtered_collective_io_info_t structure + * fields from a H5D_filtered_collective_chunk_info_t structure */ block_lengths[0] = 1; block_lengths[1] = 1; block_lengths[2] = 1; - displacements[0] = offsetof(H5D_filtered_collective_io_info_t, chunk_current); - displacements[1] = offsetof(H5D_filtered_collective_io_info_t, chunk_new); - displacements[2] = offsetof(H5D_filtered_collective_io_info_t, index_info.chunk_idx); + displacements[0] = offsetof(H5D_filtered_collective_chunk_info_t, chunk_current); + displacements[1] = offsetof(H5D_filtered_collective_chunk_info_t, chunk_new); + displacements[2] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.chunk_idx); types[0] = chunk_block_type; types[1] = chunk_block_type; types[2] = HSIZE_AS_MPI_TYPE; @@ -5228,7 +5228,7 @@ H5D__mpio_get_chunk_alloc_info_types(MPI_Datatype *contig_type, hbool_t *contig_ struct_type_derived = TRUE; if (MPI_SUCCESS != (mpi_code = MPI_Type_create_resized( - struct_type, 0, sizeof(H5D_filtered_collective_io_info_t), resized_type))) + struct_type, 0, sizeof(H5D_filtered_collective_chunk_info_t), resized_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_resized failed", mpi_code) *resized_type_derived = TRUE; @@ -5268,7 +5268,7 @@ H5D__mpio_get_chunk_alloc_info_types(MPI_Datatype *contig_type, hbool_t *contig_ * information necessary when reinserting chunks into a * dataset's chunk index. This includes the chunk's new offset * and size (H5F_block_t) and the inner `index_info` structure - * of a H5D_filtered_collective_io_info_t structure. + * of a H5D_filtered_collective_chunk_info_t structure. * * The datatype returned through `contig_type` has an extent * equal to the size of an H5D_chunk_insert_info_t structure @@ -5276,9 +5276,9 @@ H5D__mpio_get_chunk_alloc_info_types(MPI_Datatype *contig_type, hbool_t *contig_ * * The datatype returned through `resized_type` has an extent * equal to the size of the encompassing - * H5D_filtered_collective_io_info_t structure. This makes it - * suitable for sending an array of - * H5D_filtered_collective_io_info_t structures, while + * H5D_filtered_collective_chunk_info_t structure. This makes + * it suitable for sending an array of + * H5D_filtered_collective_chunk_info_t structures, while * extracting out just the information needed during * communication. * @@ -5357,20 +5357,20 @@ H5D__mpio_get_chunk_insert_info_types(MPI_Datatype *contig_type, hbool_t *contig /* * Create struct type to correctly extract all needed - * information from a H5D_filtered_collective_io_info_t + * information from a H5D_filtered_collective_chunk_info_t * structure. */ - displacements[0] = offsetof(H5D_filtered_collective_io_info_t, chunk_new); - displacements[1] = offsetof(H5D_filtered_collective_io_info_t, index_info.chunk_idx); - displacements[2] = offsetof(H5D_filtered_collective_io_info_t, index_info.filter_mask); - displacements[3] = offsetof(H5D_filtered_collective_io_info_t, index_info.need_insert); + displacements[0] = offsetof(H5D_filtered_collective_chunk_info_t, chunk_new); + displacements[1] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.chunk_idx); + displacements[2] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.filter_mask); + displacements[3] = offsetof(H5D_filtered_collective_chunk_info_t, index_info.need_insert); if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(field_count, block_lengths, displacements, types, &struct_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code) struct_type_derived = TRUE; if (MPI_SUCCESS != (mpi_code = MPI_Type_create_resized( - struct_type, 0, sizeof(H5D_filtered_collective_io_info_t), resized_type))) + struct_type, 0, sizeof(H5D_filtered_collective_chunk_info_t), resized_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_resized failed", mpi_code) *resized_type_derived = TRUE; @@ -5406,7 +5406,7 @@ H5D__mpio_get_chunk_insert_info_types(MPI_Datatype *contig_type, hbool_t *contig /*------------------------------------------------------------------------- * Function: H5D__mpio_collective_filtered_vec_io * - * Purpose: Given a pointer to a H5D_filtered_collective_chunk_list_t + * Purpose: Given a pointer to a H5D_filtered_collective_io_info_t * structure with information about collective filtered chunk * I/O, populates I/O vectors and performs vector I/O on those * chunks. @@ -5416,7 +5416,7 @@ H5D__mpio_get_chunk_insert_info_types(MPI_Datatype *contig_type, hbool_t *contig *------------------------------------------------------------------------- */ static herr_t -H5D__mpio_collective_filtered_vec_io(const H5D_filtered_collective_chunk_list_t *chunk_list, +H5D__mpio_collective_filtered_vec_io(const H5D_filtered_collective_io_info_t *chunk_list, H5F_shared_t *f_sh, H5D_io_op_type_t op_type) { const void **io_wbufs = NULL; @@ -5532,12 +5532,12 @@ H5D__mpio_collective_filtered_vec_io(const H5D_filtered_collective_chunk_list_t #ifdef H5Dmpio_DEBUG static herr_t -H5D__mpio_dump_collective_filtered_chunk_list(H5D_filtered_collective_chunk_list_t *chunk_list, +H5D__mpio_dump_collective_filtered_chunk_list(H5D_filtered_collective_io_info_t *chunk_list, int mpi_rank) { - H5D_filtered_collective_io_info_t *chunk_entry; - size_t i; - herr_t ret_value = SUCCEED; + H5D_filtered_collective_chunk_info_t *chunk_entry; + size_t i; + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE_NOERR