1
0
Fork 0
mirror of https://github.com/cosmo-sims/monofonIC.git synced 2024-09-19 17:03:45 +02:00

In the SWIFT i/o plugin, get each rank to write its data in a round-robin fashion using an offset in the previously-created array

This commit is contained in:
Matthieu Schaller 2021-03-14 18:34:34 +01:00
parent c1ebf38aa1
commit 5207492b8e
2 changed files with 138 additions and 33 deletions

View file

@ -940,6 +940,96 @@ inline void HDFCreateEmptyDatasetVector( const std::string Filename, const std::
}
template< typename T >
inline void HDFWriteDatasetChunk( const std::string Filename, const std::string ObjName, const std::vector<T> &Data, const size_t offset )
{
hid_t
HDF_FileID,
HDF_DatasetID,
HDF_MemDataspaceID,
HDF_FileDataspaceID,
HDF_Type;
hsize_t HDF_Dims,
HDF_Shape,
HDF_Offset;
HDF_FileID = H5Fopen( Filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT );
HDF_Type = GetDataType<T>();
HDF_Dims = (hsize_t)Data.size();
HDF_MemDataspaceID = H5Screate_simple(1, &HDF_Dims, NULL);
HDF_Shape = (hsize_t)Data.size();
HDF_Offset = (hsize_t)offset;
HDF_DatasetID = H5Dopen( HDF_FileID, ObjName.c_str() );
HDF_FileDataspaceID = H5Dget_space(HDF_DatasetID);
H5Sselect_hyperslab(HDF_FileDataspaceID, H5S_SELECT_SET, &HDF_Offset, NULL, &HDF_Shape, NULL);
H5Dwrite( HDF_DatasetID, HDF_Type, HDF_MemDataspaceID, HDF_FileDataspaceID, H5P_DEFAULT, &Data[0] );
H5Dclose( HDF_DatasetID );
H5Sclose( HDF_MemDataspaceID );
H5Sclose( HDF_FileDataspaceID );
H5Fclose( HDF_FileID );
}
template< typename T >
inline void HDFWriteDatasetVectorChunk( const std::string Filename, const std::string ObjName, const std::vector<T> &Data, const size_t offset )
{
hid_t
HDF_FileID,
HDF_DatasetID,
HDF_MemDataspaceID,
HDF_FileDataspaceID,
HDF_Type;
hsize_t HDF_Dims[2],
HDF_Shape[2],
HDF_Offset[2];
HDF_FileID = H5Fopen( Filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT );
HDF_Type = GetDataType<T>();
HDF_Dims[0] = (hsize_t)(Data.size()/3);
HDF_Dims[1] = 3;
HDF_MemDataspaceID = H5Screate_simple(2, HDF_Dims, NULL);
if( Data.size() % 3 != 0 ){
std::cerr << " - Warning: Trying to write vector data in HDFWriteDatasetVector\n"
<< " but array length not divisible by 3!\n\n";
}
HDF_Shape[0] = (hsize_t)(Data.size()/3);
HDF_Shape[1] = (hsize_t)3;
HDF_Offset[0] = (hsize_t)offset;
HDF_Offset[1] = (hsize_t)0;
HDF_DatasetID = H5Dopen( HDF_FileID, ObjName.c_str() );
HDF_FileDataspaceID = H5Dget_space(HDF_DatasetID);
H5Sselect_hyperslab(HDF_FileDataspaceID, H5S_SELECT_SET, HDF_Offset, NULL, HDF_Shape, NULL);
H5Dwrite( HDF_DatasetID, HDF_Type, HDF_MemDataspaceID, HDF_FileDataspaceID, H5P_DEFAULT, &Data[0] );
H5Dclose( HDF_DatasetID );
H5Sclose( HDF_MemDataspaceID );
H5Sclose( HDF_FileDataspaceID );
H5Fclose( HDF_FileID );
}
inline void HDFCreateGroup( const std::string Filename, const std::string GroupName )
{
hid_t HDF_FileID, HDF_GroupID;

View file

@ -285,48 +285,63 @@ public:
// note: despite this being a constant array we still need to handle it in a distributed way
HDFCreateEmptyDataset<write_real_t>(fname_, std::string("PartType") + std::to_string(sid) + std::string("/InternalEnergy"), global_num_particles);
HDFCreateEmptyDataset<write_real_t>(fname_, std::string("PartType") + std::to_string(sid) + std::string("/SmoothinLength"), global_num_particles);
HDFCreateEmptyDataset<write_real_t>(fname_, std::string("PartType") + std::to_string(sid) + std::string("/SmoothingLength"), global_num_particles);
}
}
// Now each node writes its own chunk in a round-robin fashion, appending at the end of the currently existing data
// compute each rank's offset in the global array
const size_t n_local = pc.get_local_num_particles();
size_t offset = 0;
MPI_Exscan(&n_local, &offset, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_WORLD);
//... write positions and velocities.....
if (this->has_64bit_reals())
{
HDFWriteDatasetVector(fname_, std::string("PartType") + std::to_string(sid) + std::string("/Coordinates"), pc.positions64_);
HDFWriteDatasetVector(fname_, std::string("PartType") + std::to_string(sid) + std::string("/Velocities"), pc.velocities64_);
}
else
{
HDFWriteDatasetVector(fname_, std::string("PartType") + std::to_string(sid) + std::string("/Coordinates"), pc.positions32_);
HDFWriteDatasetVector(fname_, std::string("PartType") + std::to_string(sid) + std::string("/Velocities"), pc.velocities32_);
}
// now each node writes its own chunk in a round-robin fashion, appending at the end of the currently existing data
for (int rank = 0; rank < num_ranks_; ++rank) {
//... write ids.....
if (this->has_64bit_ids())
HDFWriteDataset(fname_, std::string("PartType") + std::to_string(sid) + std::string("/ParticleIDs"), pc.ids64_);
else
HDFWriteDataset(fname_, std::string("PartType") + std::to_string(sid) + std::string("/ParticleIDs"), pc.ids32_);
MPI_Barrier(MPI_COMM_WORLD);
//... write masses.....
if( pc.bhas_individual_masses_ ){
if (this->has_64bit_reals()){
HDFWriteDataset(fname_, std::string("PartType") + std::to_string(sid) + std::string("/Masses"), pc.mass64_);
}else{
HDFWriteDataset(fname_, std::string("PartType") + std::to_string(sid) + std::string("/Masses"), pc.mass32_);
if (rank == this_rank_) {
//... write positions and velocities.....
if (this->has_64bit_reals())
{
HDFWriteDatasetVectorChunk(fname_, std::string("PartType") + std::to_string(sid) + std::string("/Coordinates"), pc.positions64_, offset);
HDFWriteDatasetVectorChunk(fname_, std::string("PartType") + std::to_string(sid) + std::string("/Velocities"), pc.velocities64_, offset);
}
else
{
HDFWriteDatasetVectorChunk(fname_, std::string("PartType") + std::to_string(sid) + std::string("/Coordinates"), pc.positions32_, offset);
HDFWriteDatasetVectorChunk(fname_, std::string("PartType") + std::to_string(sid) + std::string("/Velocities"), pc.velocities32_, offset);
}
//... write ids.....
if (this->has_64bit_ids())
HDFWriteDatasetChunk(fname_, std::string("PartType") + std::to_string(sid) + std::string("/ParticleIDs"), pc.ids64_, offset);
else
HDFWriteDatasetChunk(fname_, std::string("PartType") + std::to_string(sid) + std::string("/ParticleIDs"), pc.ids32_, offset);
//... write masses.....
if( pc.bhas_individual_masses_ ){
if (this->has_64bit_reals()){
HDFWriteDatasetChunk(fname_, std::string("PartType") + std::to_string(sid) + std::string("/Masses"), pc.mass64_, offset);
}else{
HDFWriteDatasetChunk(fname_, std::string("PartType") + std::to_string(sid) + std::string("/Masses"), pc.mass32_, offset);
}
}
// write GAS internal energy and smoothing length if baryons are enabled
if(bdobaryons_ && s == cosmo_species::baryon) {
std::vector<write_real_t> data( pc.get_local_num_particles(), ceint_ );
HDFWriteDatasetChunk(fname_, std::string("PartType") + std::to_string(sid) + std::string("/InternalEnergy"), data, offset);
data.assign( pc.get_local_num_particles(), h_);
HDFWriteDatasetChunk(fname_, std::string("PartType") + std::to_string(sid) + std::string("/SmoothingLength"), data, offset);
}
}
}
// write GAS internal energy and smoothing length if baryons are enabled
if( bdobaryons_ && s == cosmo_species::baryon) {
std::vector<write_real_t> data( npart_[0], ceint_ );
HDFWriteDataset(fname_, std::string("PartType") + std::to_string(sid) + std::string("/InternalEnergy"), data);
data.assign( npart_[0], h_);
HDFWriteDataset(fname_, std::string("PartType") + std::to_string(sid) + std::string("/SmoothingLength"), data);
}
// end with a barrier to make sure everyone is done before the destructor does its job
MPI_Barrier(MPI_COMM_WORLD);
}
};