Skip to content

Commit

Permalink
Temp. commit - Added support to gather all of the offsets to write in…
Browse files Browse the repository at this point in the history
… 'files.dat'
  • Loading branch information
sergiorg-hpc committed Sep 25, 2023
1 parent 56dd241 commit f100cc9
Show file tree
Hide file tree
Showing 6 changed files with 97 additions and 68 deletions.
4 changes: 3 additions & 1 deletion src/coreneuron/io/nrn_filehandler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
*/

#include <iostream>
#include <regex>
#include "coreneuron/io/nrn_filehandler.hpp"
#include "coreneuron/nrnconf.h"

Expand All @@ -30,6 +31,7 @@ void FileHandler::open(const std::string& filename, std::ios::openmode mode) {
std::cerr << "cannot open file '" << filename << "'" << std::endl;
}
nrn_assert(F.is_open());
current_filename = filename;

Check failure on line 34 in src/coreneuron/io/nrn_filehandler.cpp

View workflow job for this annotation

GitHub Actions / ubuntu-22.04 - cmake (-DNRN_ENABLE_CORENEURON=ON -DNRN_ENABLE_INTERVIEWS=OFF -DNMODL_SANITIZERS=undefinedundefined)

use of undeclared identifier 'current_filename'
current_mode = mode;
char version[256];
if (current_mode & std::ios::in) {
Expand All @@ -47,7 +49,7 @@ bool FileHandler::eof() {
return true;
}
int a = F.get();
if (F.eof()) {
if (F.eof() || (char)a == '\0') {
return true;
}
F.putback(a);
Expand Down
2 changes: 1 addition & 1 deletion src/nrniv/nrncore_write.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ size_t get_filesize(const char* fname) {
}

static void part2(const char* path) {
std::array<size_t, 5> offsets;
std::array<size_t, 4> offsets;

CellGroup* cgs = cellgroups_;
for (int i = 0; i < nrn_nthread; ++i) {
Expand Down
148 changes: 84 additions & 64 deletions src/nrniv/nrncore_write/io/nrncore_io.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,9 @@ std::array<size_t, 2> write_nrnthread(const char* path, NrnThread& nt, CellGroup
cg.netcon_srcgid = NULL;
}

// Mark the end of the file with '\0' <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
fputc(0, f);

// Set the second offset inside the file
offsets[1] = ftell(f);

Expand Down Expand Up @@ -307,6 +310,9 @@ std::array<size_t, 2> write_nrnthread(const char* path, NrnThread& nt, CellGroup

nrnbbcore_vecplay_write(f, nt);

// Mark the end of the file with '\0' <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
fputc(0, f);

fclose(f);

return offsets;
Expand Down Expand Up @@ -375,72 +381,82 @@ static void fgets_no_newline(char* s, int size, FILE* f) {
* ...
* idN
*/
void write_nrnthread_task(const char* path, CellGroup* cgs, bool append) {
// ids of datasets that will be created
std::vector<int> iSend;

// ignore empty nrnthread (has -1 id)
for (int iInt = 0; iInt < nrn_nthread; ++iInt) {
if (cgs[iInt].group_id >= 0) {
iSend.push_back(cgs[iInt].group_id);
}
}
void write_nrnthread_task(const char* path, CellGroup* cgs, bool append, std::vector<size_t> file_offsets) {
// // ids of datasets that will be created
// std::vector<int> iSend;

// // ignore empty nrnthread (has -1 id)
// for (int iInt = 0; iInt < nrn_nthread; ++iInt) {
// if (cgs[iInt].group_id >= 0) {
// iSend.push_back(cgs[iInt].group_id);
// }
// }

// // receive and displacement buffers for mpi
// std::vector<int> iRecv, iDispl;

// if (nrnmpi_myid == 0) {
// iRecv.resize(nrnmpi_numprocs);
// iDispl.resize(nrnmpi_numprocs);
// }

// // number of datasets on the current rank
// int num_datasets = iSend.size();

// #ifdef NRNMPI
// // gather number of datasets from each task
// if (nrnmpi_numprocs > 1) {
// nrnmpi_int_gather(&num_datasets, begin_ptr(iRecv), 1, 0);
// } else {
// iRecv[0] = num_datasets;
// }
// #else
// iRecv[0] = num_datasets;
// #endif

// // total number of datasets across all ranks
// int iSumThread = 0;

// // calculate mpi displacements
// if (nrnmpi_myid == 0) {
// for (int iInt = 0; iInt < nrnmpi_numprocs; ++iInt) {
// iDispl[iInt] = iSumThread;
// iSumThread += iRecv[iInt];
// }
// }

// // buffer for receiving all dataset ids
// std::vector<int> iRecvVec(iSumThread);

// #ifdef NRNMPI
// // gather ids into the array with correspondent offsets
// if (nrnmpi_numprocs > 1) {
// nrnmpi_int_gatherv(begin_ptr(iSend),
// num_datasets,
// begin_ptr(iRecvVec),
// begin_ptr(iRecv),
// begin_ptr(iDispl),
// 0);
// } else {
// for (int iInt = 0; iInt < num_datasets; ++iInt) {
// iRecvVec[iInt] = iSend[iInt];
// }
// }
// #else
// for (int iInt = 0; iInt < num_datasets; ++iInt) {
// iRecvVec[iInt] = iSend[iInt];
// }
// #endif

int num_offsets = file_offsets.size();
file_offsets.resize(file_offsets.size() * (nrnmpi_myid == 0 ? nrnmpi_numprocs : 1ULL));
nrnmpi_sizet_gather(file_offsets.data(), file_offsets.data(), num_offsets, 0);



// <<<<<<<<<<<<<<<<<<<<<<<<< The writing part is not done!! Just collecitng the offsets from all of the ranks, and we have to write that to the file, somehow

// receive and displacement buffers for mpi
std::vector<int> iRecv, iDispl;

if (nrnmpi_myid == 0) {
iRecv.resize(nrnmpi_numprocs);
iDispl.resize(nrnmpi_numprocs);
}

// number of datasets on the current rank
int num_datasets = iSend.size();

#ifdef NRNMPI
// gather number of datasets from each task
if (nrnmpi_numprocs > 1) {
nrnmpi_int_gather(&num_datasets, begin_ptr(iRecv), 1, 0);
} else {
iRecv[0] = num_datasets;
}
#else
iRecv[0] = num_datasets;
#endif

// total number of datasets across all ranks
int iSumThread = 0;

// calculate mpi displacements
if (nrnmpi_myid == 0) {
for (int iInt = 0; iInt < nrnmpi_numprocs; ++iInt) {
iDispl[iInt] = iSumThread;
iSumThread += iRecv[iInt];
}
}

// buffer for receiving all dataset ids
std::vector<int> iRecvVec(iSumThread);

#ifdef NRNMPI
// gather ids into the array with correspondent offsets
if (nrnmpi_numprocs > 1) {
nrnmpi_int_gatherv(begin_ptr(iSend),
num_datasets,
begin_ptr(iRecvVec),
begin_ptr(iRecv),
begin_ptr(iDispl),
0);
} else {
for (int iInt = 0; iInt < num_datasets; ++iInt) {
iRecvVec[iInt] = iSend[iInt];
}
}
#else
for (int iInt = 0; iInt < num_datasets; ++iInt) {
iRecvVec[iInt] = iSend[iInt];
}
#endif

/// Writing the file with task, correspondent number of threads and list of correspondent first
/// gids
Expand Down Expand Up @@ -584,6 +600,10 @@ size_t nrn_write_mapping_info(const char* path, int gid, NrnMappingInfo& minfo)
}
}
}

// Mark the end of the file with '\0' <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
fputc(0, f);

fclose(f);

return offset;
Expand Down
2 changes: 1 addition & 1 deletion src/nrniv/nrncore_write/io/nrncore_io.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ struct Memb_list;
using bbcore_write_t =
void (*)(double*, int*, int*, int*, Memb_list*, std::size_t, Datum*, Datum*, NrnThread*);

void write_nrnthread_task(const char*, CellGroup* cgs, bool append);
void write_nrnthread_task(const char*, CellGroup* cgs, bool append, std::vector<size_t> file_offsets);
void nrnbbcore_vecplay_write(FILE* f, NrnThread& nt);

size_t nrn_write_mapping_info(const char* path, int gid, NrnMappingInfo& minfo);
Expand Down
5 changes: 4 additions & 1 deletion src/nrniv/partrans.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -996,7 +996,7 @@ size_t nrnbbcore_gap_write(const char* path, int* group_ids) {
}

// print the files
for (int tid = 0; tid < nrn_nthread; ++tid) {
for (int tid = 0; tid < nrn_nthread; ++tid) { // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< Sometimes more than one has data!!
auto& g = gi[tid];

if (g.src_sid.empty() && g.tar_sid.empty()) { // no file
Expand Down Expand Up @@ -1034,6 +1034,9 @@ size_t nrnbbcore_gap_write(const char* path, int* group_ids) {
CHKPNT fwrite(g.tar_index.data(), ntar, sizeof(int), f);
}

// Mark the end of the file with '\0' <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
fputc(0, f);

fclose(f);
}

Expand Down
4 changes: 4 additions & 0 deletions src/nrnmpi/mpispike.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -396,6 +396,10 @@ extern void nrnmpi_int_gatherv(int* s, int scnt, int* r, int* rcnt, int* rdispl,
MPI_Gatherv(s, scnt, MPI_INT, r, rcnt, rdispl, MPI_INT, root, nrnmpi_comm);
}

extern void nrnmpi_sizet_gather(size_t* s, size_t* r, int cnt, int root) {
MPI_Gather(s, cnt, MPI_UNSIGNED_LONG_LONG, r, cnt, MPI_UNSIGNED_LONG_LONG, root, nrnmpi_comm);
}

extern void nrnmpi_char_gatherv(char* s, int scnt, char* r, int* rcnt, int* rdispl, int root) {
MPI_Gatherv(s, scnt, MPI_CHAR, r, rcnt, rdispl, MPI_CHAR, root, nrnmpi_comm);
}
Expand Down

0 comments on commit f100cc9

Please sign in to comment.