1
0
Fork 0
mirror of https://github.com/cosmo-sims/MUSIC.git synced 2024-09-19 17:03:46 +02:00

Added support for very large arrays to enzo output plugin.

Enzo plugin now writes in slabs rather than in one monoloithic block (saving memory).
This commit is contained in:
Oliver Hahn 2011-06-03 18:41:39 -07:00
parent 00fd30f333
commit 03fb47cc3d
2 changed files with 113 additions and 14 deletions

View file

@ -44,8 +44,15 @@ hid_t GetDataType( void )
if( typeid(T) == typeid(double) )
return H5T_NATIVE_DOUBLE;
//if( typeid(T) == typeid(long long) )
// return H5T_NATIVE_LLONG;
if( typeid(T) == typeid(long long) )
return H5T_NATIVE_LLONG;
if( typeid(T) == typeid(unsigned long long) )
return H5T_NATIVE_ULLONG;
if( typeid(T) == typeid(size_t) )
return H5T_NATIVE_ULLONG;
std::cerr << " - Error: [HDF_IO] trying to evaluate unsupported type in GetDataType\n\n";
return -1;
@ -90,6 +97,8 @@ inline void HDFReadVector( const std::string Filename, const std::string ObjName
}
inline void HDFGetDatasetExtent( const std::string Filename, const std::string ObjName, std::vector<int> &Extent )
{
hid_t HDF_FileID, HDF_DatasetID, HDF_DataspaceID;
@ -751,6 +760,51 @@ inline void HDFWriteDataset3D( const std::string Filename, const std::string Obj
H5Fclose( HDF_FileID );
}
template< typename T >
struct HDFHyperslabWriter3Ds
{
hid_t dset_id_, type_id_, file_id_;
HDFHyperslabWriter3Ds( const std::string Filename, const std::string ObjName, size_t nd[3] )
{
hid_t filespace;
hsize_t sizes[4] = { 1, nd[0], nd[1], nd[2] };
type_id_ = GetDataType<T>();
file_id_ = H5Fopen( Filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT );
filespace = H5Screate_simple( 4, sizes, NULL );
dset_id_ = H5Dcreate( file_id_, ObjName.c_str(), type_id_, filespace, H5P_DEFAULT );
H5Sclose(filespace);
}
~HDFHyperslabWriter3Ds()
{
H5Dclose( dset_id_ );
H5Fclose( file_id_ );
}
void write_slab( T* data, size_t* count, size_t* offset )
{
hsize_t counts[4] = { 1, count[0], count[1], count[2] };
hsize_t offsets[4] = { 0, offset[0], offset[1], offset[2] };
hid_t filespace = H5Dget_space(dset_id_);
hid_t memspace = H5Screate_simple(4, counts, NULL);
H5Sselect_hyperslab( filespace, H5S_SELECT_SET, offsets, NULL, counts, NULL );
herr_t status;
status = H5Dwrite(dset_id_, type_id_, memspace, filespace, H5P_DEFAULT, reinterpret_cast<void*>(data));
H5Sclose(filespace);
H5Sclose(memspace);
}
};
template< typename T >
inline void HDFWriteDataset3Ds( const std::string Filename, const std::string ObjName, unsigned nd[3], const std::vector< T > &Data )
{

View file

@ -17,13 +17,19 @@
#include "HDF_IO.hh"
//#define MAX_SLAB_SIZE 134217728 // = 128 MBytes
//#define MAX_SLAB_SIZE 33554432 // = 32 Mbytes
#define MAX_SLAB_SIZE 1048576
class enzo_output_plugin : public output_plugin
{
protected:
struct patch_header{
int component_rank;
int component_size;
size_t component_size;
std::vector<int> dimensions;
int rank;
std::vector<int> top_grid_dims;
@ -88,26 +94,65 @@ protected:
//... need to copy data because we need to get rid of the ghost zones
std::vector<double> data;
data.reserve( ng[0]*ng[1]*ng[2] );
//... write in slabs if data is more than MAX_SLAB_SIZE (default 128 MB)
for( int k=0; k<ng[2]; ++k )
for( int j=0; j<ng[1]; ++j )
for( int i=0; i<ng[0]; ++i )
data.push_back( (add+(*gh.get_grid(ilevel))(i,j,k))*factor );
size_t all_data_size = (size_t)ng[0] * (size_t)ng[1] * (size_t)ng[2];
size_t max_slab_size = std::min((size_t)MAX_SLAB_SIZE/sizeof(double), all_data_size );
size_t slices_in_slab = (size_t)((double)max_slab_size / ((size_t)ng[0] * (size_t)ng[1]));
size_t nsz[3] = { ng[2], ng[1], ng[0] };
if( levelmin_ != levelmax_ )
sprintf( enzoname, "%s.%d", fieldname.c_str(), ilevel-levelmin_ );
else
sprintf( enzoname, "%s", fieldname.c_str() );
sprintf( enzoname, "%s.%d", fieldname.c_str(), ilevel-levelmin_ );
sprintf( filename, "%s/%s", fname_.c_str(), enzoname );
HDFCreateFile( filename );
write_sim_header( filename, the_sim_header );
HDFWriteDataset3Ds( filename, enzoname, reinterpret_cast<unsigned*>(&ng_fortran[0]), data );
HDFHyperslabWriter3Ds<double> *slab_writer = new HDFHyperslabWriter3Ds<double>( filename, enzoname, nsz );
double *data_buf = new double[ slices_in_slab * (size_t)ng[0] * (size_t)ng[1] ];
size_t slices_written = 0;
while( slices_written < (size_t)ng[2] )
{
slices_in_slab = std::min( (size_t)ng[2]-slices_written, slices_in_slab );
#pragma omp parallel for
for( int k=0; k<(int)slices_in_slab; ++k )
for( int j=0; j<ng[1]; ++j )
for( int i=0; i<ng[0]; ++i )
data_buf[ (size_t)(k*ng[1]+j)*(size_t)ng[0]+(size_t)i ] =
(add+(*gh.get_grid(ilevel))(i,j,k+slices_written))*factor;
size_t count[3], offset[3];
count[0] = slices_in_slab;
count[1] = ng[1];
count[2] = ng[0];
offset[0] = slices_written;;
offset[1] = 0;
offset[2] = 0;
slab_writer->write_slab( data_buf, count, offset );
slices_written += slices_in_slab;
}
delete[] data_buf;
delete slab_writer;
//... header data for the patch
patch_header ph;
ph.component_rank = 1;
ph.component_size = ng[0]*ng[1]*ng[2];
ph.component_size = (size_t)ng[0]*(size_t)ng[1]*(size_t)ng[2];
ph.dimensions = ng;
ph.rank = 3;
@ -120,7 +165,7 @@ protected:
ph.top_grid_start.push_back( (int)(gh.offset_abs(ilevel, 0)*rfac) );
ph.top_grid_start.push_back( (int)(gh.offset_abs(ilevel, 1)*rfac) );
ph.top_grid_start.push_back( (int)(gh.offset_abs(ilevel, 2)*rfac) );
ph.top_grid_end.push_back( ph.top_grid_start[0] + (int)(ng[0]*rfac) );
ph.top_grid_end.push_back( ph.top_grid_start[1] + (int)(ng[1]*rfac) );
ph.top_grid_end.push_back( ph.top_grid_start[2] + (int)(ng[2]*rfac) );
@ -185,7 +230,7 @@ public:
sprintf( filename, "%s/parameter_file.txt", fname_.c_str() );
std::ofstream ofs( filename, std::ios::trunc );
std::ofstream ofs( filename, std::ios::trunc );
ofs
<< "#\n"