1
0
Fork 0
mirror of https://github.com/cosmo-sims/MUSIC.git synced 2024-09-19 17:03:46 +02:00

modified directory structure, fixed compiler warnings in g++ 9

This commit is contained in:
Oliver Hahn 2020-02-13 13:44:07 +01:00
parent 6ad3934f23
commit b77fedd700
71 changed files with 4308 additions and 4349 deletions

View file

@ -9,11 +9,11 @@ BOXLIB_HOME = ${HOME}/nyx_tot_sterben/BoxLib
##############################################################################
### compiler and path settings
CC = g++
CC = g++-9
OPT = -Wall -Wno-unknown-pragmas -O3 -g -mtune=native
CFLAGS =
LFLAGS = -lgsl -lgslcblas
CPATHS = -I. -I$(HOME)/local/include -I/opt/local/include -I/usr/local/include
CPATHS = -I./src -I$(HOME)/local/include -I/opt/local/include -I/usr/local/include
LPATHS = -L$(HOME)/local/lib -L/opt/local/lib -L/usr/local/lib
##############################################################################
@ -78,20 +78,20 @@ TARGET = MUSIC
OBJS = output.o transfer_function.o Numerics.o defaults.o constraints.o random.o\
convolution_kernel.o region_generator.o densities.o cosmology.o poisson.o\
densities.o cosmology.o poisson.o log.o main.o \
$(patsubst plugins/%.cc,plugins/%.o,$(wildcard plugins/*.cc))
$(patsubst src/plugins/%.cc,src/plugins/%.o,$(wildcard src/plugins/*.cc))
##############################################################################
# stuff for BoxLib
BLOBJS = ""
ifeq ($(strip $(HAVEBOXLIB)), yes)
IN_MUSIC = YES
TOP = ${PWD}/plugins/nyx_plugin
TOP = ${PWD}/src/plugins/nyx_plugin
CCbla := $(CC)
include plugins/nyx_plugin/Make.ic
include src/plugins/nyx_plugin/Make.ic
CC := $(CCbla)
CPATHS += $(INCLUDE_LOCATIONS)
LPATHS += -L$(objEXETempDir)
BLOBJS = $(foreach obj,$(objForExecs),plugins/boxlib_stuff/$(obj))
BLOBJS = $(foreach obj,$(objForExecs),src/plugins/boxlib_stuff/$(obj))
#
endif
@ -102,23 +102,29 @@ all: $(OBJS) $(TARGET) Makefile
bla:
echo $(BLOBJS)
blabla:
echo $(OBJS)
ifeq ($(strip $(HAVEBOXLIB)), yes)
$(TARGET): $(OBJS) plugins/nyx_plugin/*.cpp
cd plugins/nyx_plugin; make BOXLIB_HOME=$(BOXLIB_HOME) FFTW3=$(FFTW3) SINGLE=$(SINGLEPRECISION)
$(TARGET): $(OBJS) src/plugins/nyx_plugin/*.cpp
cd src/plugins/nyx_plugin; make BOXLIB_HOME=$(BOXLIB_HOME) FFTW3=$(FFTW3) SINGLE=$(SINGLEPRECISION)
$(CC) $(LPATHS) -o $@ $^ $(LFLAGS) $(BLOBJS) -lifcore
else
$(TARGET): $(OBJS)
$(CC) $(LPATHS) -o $@ $^ $(LFLAGS)
endif
%.o: %.cc *.hh Makefile
%.o: src/%.cc src/*.hh Makefile
$(CC) $(CFLAGS) $(CPATHS) -c $< -o $@
src/plugins/%.o: src/plugins/%.cc src/*.hh Makefile
$(CC) $(CFLAGS) $(CPATHS) -c $< -o $@
clean:
rm -rf $(OBJS)
ifeq ($(strip $(HAVEBOXLIB)), yes)
oldpath=`pwd`
cd plugins/nyx_plugin; make realclean BOXLIB_HOME=$(BOXLIB_HOME)
cd src/plugins/nyx_plugin; make realclean BOXLIB_HOME=$(BOXLIB_HOME)
endif
cd $(oldpath)

View file

@ -1,803 +0,0 @@
/*
* output_arepo.cc - This file is part of MUSIC -
* a code to generate multi-scale initial conditions
* for cosmological simulations
*
* Copyright (C) 2010 Oliver Hahn
*
* Plugin: Dylan Nelson (dnelson@cfa.harvard.edu)
*/
#ifdef HAVE_HDF5
#define GAS_PARTTYPE 0
#define HIGHRES_DM_PARTTYPE 1
#define COARSE_DM_DEFAULT_PARTTYPE 2
#define STAR_PARTTYPE 4
#define NTYPES 6
#include <sstream>
#include <string>
#include <algorithm>
#include "output.hh"
#include "HDF_IO.hh"
class arepo_output_plugin : public output_plugin
{
protected:
// header/config
std::vector< std::vector<unsigned int> > nPart;
std::vector<long long> nPartTotal;
std::vector<double> massTable;
double time, redshift, boxSize;
unsigned int numFiles, coarsePartType;
double omega0, omega_L, hubbleParam;
// configuration
double UnitLength_in_cm, UnitMass_in_g, UnitVelocity_in_cm_per_s;
double omega_b, rhoCrit;
double posFac, velFac;
long long nPartTotAllTypes;
bool doBaryons, useLongIDs, doublePrec;
size_t npfine, npart, npcoarse;
std::vector<size_t> levelcounts;
// parameter file hints
int pmgrid, gridboost;
float softening, Tini;
using output_plugin::cf_;
// Nx1 vector (e.g. masses,particleids)
template< typename T >
void writeHDF5_a( std::string fieldName, int partTypeNum, const std::vector<T> &data )
{
hid_t HDF_FileID, HDF_GroupID, HDF_DatasetID, HDF_DataspaceID, HDF_Type;
hsize_t HDF_Dims, offset = 0;
std::stringstream GrpName;
GrpName << "PartType" << partTypeNum;
for( unsigned i=0; i < numFiles; i++ )
{
std::string filename = fname_;
HDF_Dims = data.size();
// modify local filename and write size
if( numFiles > 1 )
{
std::stringstream s;
s << "." << i << ".hdf5";
filename.replace(filename.find(".hdf5"), 5, s.str());
HDF_Dims = ceil( data.size() / numFiles );
if( i == numFiles-1 )
HDF_Dims = data.size() - offset;
}
HDF_FileID = H5Fopen( filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT );
HDF_GroupID = H5Gopen( HDF_FileID, GrpName.str().c_str() );
HDF_Type = GetDataType<T>();
HDF_DataspaceID = H5Screate_simple(1, &HDF_Dims, NULL);
HDF_DatasetID = H5Dcreate(HDF_GroupID, fieldName.c_str(), HDF_Type, HDF_DataspaceID, H5P_DEFAULT);
// write and close
H5Dwrite( HDF_DatasetID, HDF_Type, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data[offset] );
H5Dclose( HDF_DatasetID );
H5Sclose( HDF_DataspaceID );
H5Gclose( HDF_GroupID );
H5Fclose( HDF_FileID );
offset += HDF_Dims;
}
}
// Nx3 vector (e.g. pos,vel), where coord = index of the second dimension (written one at a time)
template<typename T>
void writeHDF5_b( std::string fieldName, int coord, int partTypeNum, std::vector<T> &data, bool readFlag = false )
{
hid_t HDF_FileID, HDF_GroupID, HDF_DatasetID, HDF_DataspaceID, HDF_Type;
hsize_t HDF_Dims[2], HDF_DimsMem[2], w_offset = 0;
std::stringstream GrpName;
GrpName << "PartType" << partTypeNum;
for( unsigned i=0; i < numFiles; i++ )
{
std::string filename = fname_;
HDF_Dims[0] = data.size();
// modify local filename and write size
if( numFiles > 1 )
{
std::stringstream s;
s << "." << i << ".hdf5";
filename.replace(filename.find(".hdf5"), 5, s.str());
HDF_Dims[0] = ceil( data.size() / numFiles );
if( i == numFiles-1 )
HDF_Dims[0] = data.size() - w_offset;
}
HDF_FileID = H5Fopen( filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT );
HDF_GroupID = H5Gopen( HDF_FileID, GrpName.str().c_str() );
HDF_Type = GetDataType<T>();
HDF_Dims[1] = 3;
// if dataset does not yet exist, create it (on first coord call)
if( !(H5Lexists(HDF_GroupID, fieldName.c_str(), H5P_DEFAULT)) )
{
HDF_DataspaceID = H5Screate_simple(2, HDF_Dims, NULL);
HDF_DatasetID = H5Dcreate( HDF_GroupID, fieldName.c_str(), HDF_Type, HDF_DataspaceID, H5P_DEFAULT );
H5Sclose( HDF_DataspaceID );
H5Dclose( HDF_DatasetID );
}
// make memory space (just indicates the size/shape of data)
HDF_DimsMem[0] = HDF_Dims[0];
HDF_DimsMem[1] = 1;
hid_t HDF_MemoryspaceID = H5Screate_simple(2, HDF_DimsMem, NULL);
// open hyperslab
hsize_t count[2]={1,1}, stride[2]={1,1}, offset[2]={0,0};
offset[1] = coord; // set where in the second dimension to write
count[0] = HDF_Dims[0]; // set size in the first dimension (num particles of this type)
HDF_DatasetID = H5Dopen(HDF_GroupID, fieldName.c_str());
HDF_DataspaceID = H5Dget_space(HDF_DatasetID);
H5Sselect_hyperslab(HDF_DataspaceID, H5S_SELECT_SET, offset, stride, count, NULL);
// write (or read) and close
if( readFlag )
H5Dread( HDF_DatasetID, HDF_Type, HDF_MemoryspaceID, HDF_DataspaceID, H5P_DEFAULT,
&data[w_offset] );
else
H5Dwrite( HDF_DatasetID, HDF_Type, HDF_MemoryspaceID, HDF_DataspaceID, H5P_DEFAULT,
&data[w_offset] );
H5Dclose( HDF_DatasetID );
H5Gclose( HDF_GroupID );
H5Fclose( HDF_FileID );
w_offset += HDF_Dims[0];
}
}
// called from finalize()
void generateAndWriteIDs( void )
{
long long offset = 1; // don't use ID==0
nPartTotAllTypes = 0;
for( size_t i=0; i < nPartTotal.size(); i++ )
{
if( !nPartTotal[i] )
continue;
nPartTotAllTypes += nPartTotal[i];
if( !useLongIDs )
{
std::vector<int> ids = std::vector<int>(nPartTotal[i]);
for( int j=0; j < nPartTotal[i]; j++ )
ids[j] = offset + j;
writeHDF5_a( "ParticleIDs", i, ids );
}
else
{
std::vector<long long> ids = std::vector<long long>(nPartTotal[i]);
for( long long j=0; j < nPartTotal[i]; j++ )
ids[j] = offset + j;
writeHDF5_a( "ParticleIDs", i, ids );
}
// make IDs of all particle types sequential (unique) = unnecessary, but consistent with gadget output format
offset += nPartTotal[i];
}
}
void countLeafCells( const grid_hierarchy& gh )
{
npfine = 0; npart = 0; npcoarse = 0;
npfine = gh.count_leaf_cells(gh.levelmax(), gh.levelmax());
npart = gh.count_leaf_cells(gh.levelmin(), gh.levelmax());
if( levelmax_ != levelmin_ ) // multimass
npcoarse = gh.count_leaf_cells(gh.levelmin(), gh.levelmax()-1);
}
template< typename T >
void __write_dm_mass( const grid_hierarchy& gh )
{
countLeafCells(gh);
// fill levelcount for header
levelcounts = std::vector<size_t>(levelmax_-levelmin_+1);
for( int ilevel=gh.levelmax(); ilevel>=(int)gh.levelmin(); --ilevel )
levelcounts[gh.levelmax()-ilevel] = gh.count_leaf_cells(ilevel, ilevel);
if( levelmax_ > levelmin_ +1 ) // morethan2bnd
{
// DM particles will have variable masses
size_t count = 0;
std::vector<T> data(npcoarse);
for( int ilevel=gh.levelmax()-1; ilevel>=(int)gh.levelmin(); --ilevel )
{
// baryon particles live only on finest grid, these particles here are total matter particles
T pmass = omega0 * rhoCrit * pow(boxSize*posFac,3.0)/pow(2,3*ilevel);
for( unsigned i=0; i<gh.get_grid(ilevel)->size(0); ++i )
for( unsigned j=0; j<gh.get_grid(ilevel)->size(1); ++j )
for( unsigned k=0; k<gh.get_grid(ilevel)->size(2); ++k )
if( gh.is_in_mask(ilevel,i,j,k) && !gh.is_refined(ilevel,i,j,k) )
{
data[count++] = pmass;
}
}
if( count != npcoarse )
throw std::runtime_error("Internal consistency error while writing masses");
writeHDF5_a( "Masses", coarsePartType, data ); // write DM
}
else
{
// DM particles will all have the same mass, just write to massTable
if( levelmax_ != levelmin_ ) // multimass
massTable[coarsePartType] = omega0 * rhoCrit * pow(boxSize*posFac,3.0)/pow(2,3*levelmin_);
}
}
template< typename T >
void __write_dm_position( int coord, const grid_hierarchy& gh )
{
countLeafCells(gh);
// update header
hsize_t offset_fine = 0, offset_coarse = 0;
for( unsigned i=0; i < numFiles; i++ ) {
hsize_t dims_fine = ceil( npfine / numFiles );
hsize_t dims_coarse = ceil( npcoarse / numFiles );
if( i == numFiles-1 ) {
dims_fine = npfine - offset_fine;
dims_coarse = npcoarse - offset_coarse;
}
nPart[i][HIGHRES_DM_PARTTYPE] = dims_fine;
nPart[i][coarsePartType] = dims_coarse;
offset_fine += dims_fine;
offset_coarse += dims_coarse;
}
nPartTotal[HIGHRES_DM_PARTTYPE] = npfine;
nPartTotal[coarsePartType] = npcoarse;
// FINE: collect displacements and convert to absolute coordinates with correct units
int ilevel = gh.levelmax();
std::vector<T> data(npfine);
size_t count = 0;
for( unsigned i=0; i<gh.get_grid(ilevel)->size(0); ++i )
for( unsigned j=0; j<gh.get_grid(ilevel)->size(1); ++j )
for( unsigned k=0; k<gh.get_grid(ilevel)->size(2); ++k )
if( gh.is_in_mask(ilevel,i,j,k) && !gh.is_refined(ilevel,i,j,k) )
{
double xx[3];
gh.cell_pos(ilevel, i, j, k, xx);
xx[coord] = (xx[coord] + (*gh.get_grid(ilevel))(i,j,k)) * boxSize;
xx[coord] = fmod( xx[coord] + boxSize,boxSize );
data[count++] = (T) (xx[coord] * posFac);
}
writeHDF5_b( "Coordinates", coord, HIGHRES_DM_PARTTYPE, data ); // write fine DM
if( count != npfine )
throw std::runtime_error("Internal consistency error while writing fine DM pos");
// COARSE: collect displacements and convert to absolute coordinates with correct units
if( levelmax_ != levelmin_ ) // multimass
{
data = std::vector<T> (npcoarse,0.0);
count = 0;
for( int ilevel=gh.levelmax()-1; ilevel>=(int)gh.levelmin(); --ilevel )
for( unsigned i=0; i<gh.get_grid(ilevel)->size(0); ++i )
for( unsigned j=0; j<gh.get_grid(ilevel)->size(1); ++j )
for( unsigned k=0; k<gh.get_grid(ilevel)->size(2); ++k )
if( gh.is_in_mask(ilevel,i,j,k) && !gh.is_refined(ilevel,i,j,k) )
{
double xx[3];
gh.cell_pos(ilevel, i, j, k, xx);
xx[coord] = (xx[coord] + (*gh.get_grid(ilevel))(i,j,k)) * boxSize;
if ( !doBaryons ) // if so, we will handle the mod in write_gas_position
xx[coord] = fmod( xx[coord] + boxSize,boxSize ) * posFac;
data[count++] = (T) xx[coord];
}
if( count != npcoarse )
throw std::runtime_error("Internal consistency error while writing coarse DM pos");
writeHDF5_b( "Coordinates", coord, coarsePartType, data ); // write coarse DM
}
}
template< typename T >
void __write_dm_velocity( int coord, const grid_hierarchy& gh )
{
countLeafCells(gh);
// FINE: collect velocities and convert to correct units
int ilevel = gh.levelmax();
std::vector<T> data(npfine);
size_t count = 0;
for( unsigned i=0; i<gh.get_grid(ilevel)->size(0); ++i )
for( unsigned j=0; j<gh.get_grid(ilevel)->size(1); ++j )
for( unsigned k=0; k<gh.get_grid(ilevel)->size(2); ++k )
if( gh.is_in_mask(ilevel,i,j,k) && !gh.is_refined(ilevel,i,j,k) )
{
data[count++] = (T) (*gh.get_grid(ilevel))(i,j,k) * velFac;
}
writeHDF5_b( "Velocities", coord, HIGHRES_DM_PARTTYPE, data ); // write fine DM
if( count != npfine )
throw std::runtime_error("Internal consistency error while writing fine DM pos");
// COARSE: collect velocities and convert to correct units
if( levelmax_ != levelmin_ ) // multimass
{
data = std::vector<T> (npcoarse,0.0);
count = 0;
for( int ilevel=gh.levelmax()-1; ilevel>=(int)gh.levelmin(); --ilevel )
for( unsigned i=0; i<gh.get_grid(ilevel)->size(0); ++i )
for( unsigned j=0; j<gh.get_grid(ilevel)->size(1); ++j )
for( unsigned k=0; k<gh.get_grid(ilevel)->size(2); ++k )
if( gh.is_in_mask(ilevel,i,j,k) && !gh.is_refined(ilevel,i,j,k) )
{
data[count++] = (T) (*gh.get_grid(ilevel))(i,j,k) * velFac;
}
if( count != npcoarse )
throw std::runtime_error("Internal consistency error while writing coarse DM pos");
writeHDF5_b( "Velocities", coord, coarsePartType, data ); // write coarse DM
}
}
template< typename T >
void __write_gas_velocity( int coord, const grid_hierarchy& gh )
{
countLeafCells(gh);
std::vector<T> gas_data(npart); // read/write gas at all levels from the gh
size_t count = 0;
for( int ilevel=levelmax_; ilevel>=(int)levelmin_; --ilevel )
for( unsigned i=0; i<gh.get_grid(ilevel)->size(0); ++i )
for( unsigned j=0; j<gh.get_grid(ilevel)->size(1); ++j )
for( unsigned k=0; k<gh.get_grid(ilevel)->size(2); ++k )
if( gh.is_in_mask(ilevel,i,j,k) && !gh.is_refined(ilevel,i,j,k) )
{
gas_data[count++] = (T) (*gh.get_grid(ilevel))(i,j,k) * velFac;
}
if( count != npart )
throw std::runtime_error("Internal consistency error while writing GAS pos");
// calculate modified DM velocities if: multimass and baryons present
if( doBaryons && npcoarse )
{
double facb = omega_b / omega0;
double facc = (omega0 - omega_b) / omega0;
std::vector<T> dm_data(npcoarse);
writeHDF5_b( "Velocities", coord, coarsePartType, dm_data, true ); // read coarse DM vels
// overwrite
for( size_t i=0; i < npcoarse; i++ )
dm_data[i] = facc*dm_data[i] + facb*gas_data[npfine + i];
writeHDF5_b( "Velocities", coord, coarsePartType, dm_data ); // overwrite coarse DM vels
} // dm_data deallocated
// restrict gas_data to fine only and request write
std::vector<T> data( gas_data.begin() + 0, gas_data.begin() + npfine );
std::vector<T>().swap( gas_data ); // deallocate
writeHDF5_b( "Velocities", coord, GAS_PARTTYPE, data ); // write highres gas
}
template< typename T >
void __write_gas_position( int coord, const grid_hierarchy& gh )
{
countLeafCells(gh);
// update header (will actually write only gas at levelmax)
hsize_t offset = 0;
for( unsigned i=0; i < numFiles; i++ ) {
hsize_t dims = ceil( npfine / numFiles );
if( i == numFiles-1 )
dims = npfine - offset;
nPart[i][GAS_PARTTYPE] = dims;
offset += dims;
}
nPartTotal[GAS_PARTTYPE] = npfine;
std::vector<double> gas_data(npart); // read/write gas at all levels from the gh
size_t count = 0;
double h = 1.0/(1ul<<gh.levelmax());
for( int ilevel=gh.levelmax(); ilevel>=(int)gh.levelmin(); --ilevel )
for( unsigned i=0; i<gh.get_grid(ilevel)->size(0); ++i )
for( unsigned j=0; j<gh.get_grid(ilevel)->size(1); ++j )
for( unsigned k=0; k<gh.get_grid(ilevel)->size(2); ++k )
if( gh.is_in_mask(ilevel,i,j,k) && !gh.is_refined(ilevel,i,j,k) )
{
double xx[3];
gh.cell_pos(ilevel, i, j, k, xx);
// shift particle positions (this has to be done as the same shift
// is used when computing the convolution kernel for SPH baryons)
xx[coord] += 0.5*h;
xx[coord] = (xx[coord] + (*gh.get_grid(ilevel))(i,j,k)) * boxSize;
gas_data[count++] = xx[coord];
}
if( count != npart )
throw std::runtime_error("Internal consistency error while writing coarse DM pos");
// calculate modified DM coordinates if: multimass and baryons present
if( doBaryons && npcoarse )
{
double facb = omega_b / omega0;
double facc = (omega0 - omega_b) / omega0;
std::vector<T> dm_data(npcoarse);
writeHDF5_b( "Coordinates", coord, coarsePartType, dm_data, true ); // read coarse DM vels
// overwrite
for( size_t i=0; i < npcoarse; i++ ) {
dm_data[i] = facc*dm_data[i] + facb*gas_data[npfine + i];
dm_data[i] = fmod( dm_data[i] + boxSize, boxSize ) * posFac;
}
writeHDF5_b( "Coordinates", coord, coarsePartType, dm_data ); // overwrite coarse DM vels
}
// restrict gas_data to fine only and request write
//std::vector<float> data( gas_data.begin() + 0, gas_data.begin() + npfine );
std::vector<T> data(npfine);
for( size_t i = 0; i < npfine; i++ )
data[i] = (T) ( fmod( gas_data[i] + boxSize, boxSize ) * posFac );
std::vector<double>().swap( gas_data ); // deallocate
writeHDF5_b( "Coordinates", coord, GAS_PARTTYPE, data ); // write highres gas
}
public:
arepo_output_plugin( config_file& cf ) : output_plugin( cf )
{
// ensure that everyone knows we want to do SPH, implies: bsph=1, bbshift=1, decic_baryons=1
// -> instead of just writing gas densities (which are here ignored), the gas displacements are also written
cf.insertValue("setup","do_SPH","yes");
// init header and config parameters
nPartTotal = std::vector<long long>(NTYPES,0);
massTable = std::vector<double>(NTYPES,0.0);
coarsePartType = cf.getValueSafe<unsigned>("output","arepo_coarsetype",COARSE_DM_DEFAULT_PARTTYPE);
UnitLength_in_cm = cf.getValueSafe<double>("output","arepo_unitlength",3.085678e21); // 1.0 kpc
UnitMass_in_g = cf.getValueSafe<double>("output","arepo_unitmass",1.989e43); // 1.0e10 solar masses
UnitVelocity_in_cm_per_s = cf.getValueSafe<double>("output","arepo_unitvel",1e5); // 1 km/sec
omega0 = cf.getValue<double>("cosmology","Omega_m");
omega_b = cf.getValue<double>("cosmology","Omega_b");
omega_L = cf.getValue<double>("cosmology","Omega_L");
redshift = cf.getValue<double>("setup","zstart");
boxSize = cf.getValue<double>("setup","boxlength");
doBaryons = cf.getValueSafe<bool>("setup","baryons",false);
useLongIDs = cf.getValueSafe<bool>("output","arepo_longids",false);
numFiles = cf.getValueSafe<unsigned>("output","arepo_num_files",1);
doublePrec = cf.getValueSafe<bool>("output","arepo_doubleprec",0);
for( unsigned i=0; i < numFiles; i++ )
nPart.push_back( std::vector<unsigned int>(NTYPES,0) );
// factors which multiply positions and velocities
time = 1.0/(1.0+redshift);
posFac = 3.085678e24 / UnitLength_in_cm; // MUSIC uses Mpc internally, i.e. posFac=1e3 for kpc output
velFac = ( 1.0f / sqrt(time) ) * boxSize;
// critical density
rhoCrit = 27.7519737e-9; // in h^2 1e10 M_sol / kpc^3
rhoCrit *= pow(UnitLength_in_cm/3.085678e21, 3.0);
rhoCrit *= (1.989e43/UnitMass_in_g);
// calculate PMGRID suggestion
pmgrid = pow(2,levelmin_) * 2; // unigrid
gridboost = 1;
if( levelmin_ != levelmax_ )
{
double lxref[3], x0ref[3], x1ref[3];
double pmgrid_new;
the_region_generator->get_AABB(x0ref,x1ref,levelmax_); // generalized beyond box
for (int i=0; i < 3; i++)
lxref[i] = x1ref[i] - x0ref[i];
// fraction box length of the zoom region
lxref[0] = pow( (lxref[0]*lxref[1]*lxref[2]),0.333 );
pmgrid_new = pow(2,levelmax_) * 2; // to cover entire box at highest resolution
pmgrid_new *= lxref[0]; // only need to cover a fraction
if( (gridboost=round(pmgrid_new/pmgrid)) > 1 )
gridboost = pow(2, ceil(log(gridboost)/log(2.0))); // round to nearest, higher power of 2
if( gridboost == 0 )
gridboost = 1;
}
// calculate Tini for gas
hubbleParam = cf.getValue<double>("cosmology","H0")/100.0;
double astart = 1.0/(1.0+redshift);
double h2 = hubbleParam*hubbleParam;
double adec = 1.0/( 160.0*pow(omega_b*h2/0.022,2.0/5.0) );
double Tcmb0 = 2.726;
Tini = astart<adec? Tcmb0/astart : Tcmb0/astart/astart*adec;
// calculate softening suggestion
softening = (boxSize * posFac) / pow(2,levelmax_) / 40.0;
// header and sanity checks
if ( !doBaryons )
massTable[HIGHRES_DM_PARTTYPE] = omega0 * rhoCrit * pow(boxSize*posFac,3.0)/pow(2,3*levelmax_);
else
massTable[HIGHRES_DM_PARTTYPE] = (omega0-omega_b) * rhoCrit * pow(boxSize*posFac,3.0)/pow(2,3*levelmax_);
if ( coarsePartType == GAS_PARTTYPE || coarsePartType == HIGHRES_DM_PARTTYPE)
throw std::runtime_error("Error: Specified illegal Arepo particle type for coarse particles.");
if ( coarsePartType == STAR_PARTTYPE )
LOGWARN("WARNING: Specified coarse particle type will collide with stars if USE_SFR enabled.");
// create file(s)
for( unsigned i=0; i < numFiles; i++ )
{
std::string filename = fname_;
if( numFiles > 1 )
{
size_t pos = filename.find(".hdf5");
if( pos != filename.length()-5 )
throw std::runtime_error("Error: Unexpected output filename (doesn't end in .hdf5).");
std::stringstream s;
s << "." << i << ".hdf5";
filename.replace(pos, 5, s.str());
}
HDFCreateFile(filename);
// create particle type groups
std::stringstream GrpName;
GrpName << "PartType" << HIGHRES_DM_PARTTYPE;
HDFCreateGroup(filename, GrpName.str().c_str()); // highres or unigrid DM
if( doBaryons )
{
GrpName.str("");
GrpName << "PartType" << GAS_PARTTYPE;
HDFCreateGroup(filename, GrpName.str().c_str()); // gas
}
if( levelmax_ != levelmin_ ) // multimass
{
GrpName.str("");
GrpName << "PartType" << coarsePartType;
HDFCreateGroup(filename, GrpName.str().c_str()); // coarse DM
}
}
}
~arepo_output_plugin()
{ }
/* ------------------------------------------------------------------------------- */
void write_dm_mass( const grid_hierarchy& gh )
{
if(!doublePrec)
__write_dm_mass<float>(gh);
else
__write_dm_mass<double>(gh);
}
void write_dm_position( int coord, const grid_hierarchy& gh )
{
if(!doublePrec)
__write_dm_position<float>(coord, gh);
else
__write_dm_position<double>(coord, gh);
}
void write_dm_velocity( int coord, const grid_hierarchy& gh )
{
if(!doublePrec)
__write_dm_velocity<float>(coord, gh);
else
__write_dm_velocity<double>(coord, gh);
}
void write_dm_density( const grid_hierarchy& gh )
{ /* skip */ }
void write_dm_potential( const grid_hierarchy& gh )
{ /* skip */ }
/* ------------------------------------------------------------------------------- */
void write_gas_velocity( int coord, const grid_hierarchy& gh )
{
if(!doublePrec)
__write_gas_velocity<float>(coord, gh);
else
__write_gas_velocity<double>(coord, gh);
}
void write_gas_position( int coord, const grid_hierarchy& gh )
{
if(!doublePrec)
__write_gas_position<float>(coord, gh);
else
__write_gas_position<double>(coord, gh);
}
void write_gas_density( const grid_hierarchy& gh )
{
// if only saving highres gas, then all gas cells have the same initial mass
// do not write out densities as we write out displacements
if( doBaryons )
massTable[GAS_PARTTYPE] = omega_b * rhoCrit * pow(boxSize*posFac,3.0)/pow(2,3*levelmax_);
}
void write_gas_potential( const grid_hierarchy& gh )
{ /* skip */ }
void finalize( void )
{
// generate and add contiguous IDs for each particle type we have written
generateAndWriteIDs();
std::vector<unsigned int> nPartTotalLW(nPartTotal.size());
std::vector<unsigned int> nPartTotalHW(nPartTotal.size());
for( size_t i=0; i < nPartTotalHW.size(); i++ ) {
nPartTotalHW[i] = (unsigned)( (size_t)nPartTotal[i] >> 32 );
nPartTotalLW[i] = (unsigned int)( (size_t)nPartTotal[i] );
}
// output particle counts
std::cout << " - Arepo : wrote " << nPartTotAllTypes << " particles..." << std::endl;
for( size_t i=0; i < nPartTotal.size(); i++ )
std::cout << " type [" << i << "] : " << std::setw(12) << nPartTotal[i] << std::endl;
std::cout << std::endl;
// write final header (some of these fields are required, others are extra info)
for( unsigned i=0; i < numFiles; i++ )
{
std::string filename = fname_;
if( numFiles > 1 )
{
std::stringstream s;
s << "." << i << ".hdf5";
filename.replace(filename.find(".hdf5"), 5, s.str());
std::cout << " " << filename;
for( size_t j=0; j < nPart[i].size(); j++ )
std::cout << " " << std::setw(10) << nPart[i][j];
std::cout << std::endl;
}
HDFCreateGroup(filename, "Header");
HDFWriteGroupAttribute(filename, "Header", "NumPart_ThisFile", nPart[i] );
HDFWriteGroupAttribute(filename, "Header", "NumPart_Total", nPartTotalLW );
HDFWriteGroupAttribute(filename, "Header", "NumPart_Total_HighWord", nPartTotalHW );
HDFWriteGroupAttribute(filename, "Header", "MassTable", massTable );
HDFWriteGroupAttribute(filename, "Header", "BoxSize", boxSize );
HDFWriteGroupAttribute(filename, "Header", "NumFilesPerSnapshot", numFiles );
HDFWriteGroupAttribute(filename, "Header", "Time", time );
HDFWriteGroupAttribute(filename, "Header", "Redshift", redshift );
HDFWriteGroupAttribute(filename, "Header", "Omega0", omega0 );
HDFWriteGroupAttribute(filename, "Header", "OmegaLambda", omega_L );
HDFWriteGroupAttribute(filename, "Header", "OmegaBaryon", omega_b );
HDFWriteGroupAttribute(filename, "Header", "HubbleParam", hubbleParam );
HDFWriteGroupAttribute(filename, "Header", "Flag_Sfr", 0 );
HDFWriteGroupAttribute(filename, "Header", "Flag_Cooling", 0 );
HDFWriteGroupAttribute(filename, "Header", "Flag_StellarAge", 0 );
HDFWriteGroupAttribute(filename, "Header", "Flag_Metals", 0 );
HDFWriteGroupAttribute(filename, "Header", "Flag_Feedback", 0 );
HDFWriteGroupAttribute(filename, "Header", "Flag_DoublePrecision", (int)doublePrec );
HDFWriteGroupAttribute(filename, "Header", "Music_levelmin", levelmin_ );
HDFWriteGroupAttribute(filename, "Header", "Music_levelmax", levelmax_ );
HDFWriteGroupAttribute(filename, "Header", "Music_levelcounts", levelcounts );
HDFWriteGroupAttribute(filename, "Header", "haveBaryons", (int)doBaryons );
HDFWriteGroupAttribute(filename, "Header", "longIDs", (int)useLongIDs );
HDFWriteGroupAttribute(filename, "Header", "suggested_pmgrid", pmgrid );
HDFWriteGroupAttribute(filename, "Header", "suggested_gridboost", gridboost );
HDFWriteGroupAttribute(filename, "Header", "suggested_highressoft", softening );
HDFWriteGroupAttribute(filename, "Header", "suggested_gas_Tinit", Tini );
}
// give config/parameter file hints
if( useLongIDs )
std::cout << " - Arepo: Wrote 64bit IDs, enable LONGIDS." << std::endl;
if( doublePrec )
std::cout << " - Arepo: Double precision ICs, set INPUT_IN_DOUBLEPRECISION." << std::endl;
if( NTYPES != 6 )
std::cout << " - Arepo: Using [" << NTYPES << "] particle types, set NTYPES to match." << std::endl;
if( doBaryons )
std::cout << " - Arepo: Wrote high-res gas (only), set REFINEMENT_HIGH_RES_GAS and GENERATE_GAS_IN_ICS with "
<< "SPLIT_PARTICLE_TYPE=" << pow(2,coarsePartType) << "." << std::endl;
if( levelmax_ != levelmin_ )
std::cout << " - Arepo: Have zoom type ICs, set PLACEHIGHRESREGION=" << pow(2,HIGHRES_DM_PARTTYPE)
<< " (suggest PMGRID=" << pmgrid << " with GRIDBOOST=" << gridboost << ")." << std::endl;
else
std::cout << " - Arepo: Have unigrid type ICs (suggest PMGRID=" << pmgrid << ")." << std::endl;
if( levelmax_ > levelmin_ + 1 )
std::cout << " - Arepo: More than one coarse DM mass using same type, set INDIVIDUAL_GRAVITY_SOFTENING="
<< pow(2,coarsePartType) << " (+" << pow(2,STAR_PARTTYPE) << " if including stars)." << std::endl;
if( doBaryons )
std::cout << " - Arepo: Set initial gas temperature to " << std::fixed << std::setprecision(3) << Tini << " K." << std::endl;
std::cout << " - Arepo: Suggest grav softening = " << std::setprecision(3) << softening << " for high res DM." << std::endl;
}
};
namespace{
output_plugin_creator_concrete< arepo_output_plugin > creator("arepo");
}
#endif // HAVE_HDF5

View file

@ -1,912 +0,0 @@
/*
output_art.cc - This file is part of MUSIC -
a code to generate multi-scale initial conditions
for cosmological simulations
Copyright (C) 2012 Jose Onorbe & Oliver Hahn
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <vector>
#include "output.hh"
template<typename T>
inline T bytereorder(T v )
{
T rval;
(reinterpret_cast<unsigned char*>(&rval))[3] = (reinterpret_cast<unsigned char*>(&v))[0];
(reinterpret_cast<unsigned char*>(&rval))[2] = (reinterpret_cast<unsigned char*>(&v))[1];
(reinterpret_cast<unsigned char*>(&rval))[1] = (reinterpret_cast<unsigned char*>(&v))[2];
(reinterpret_cast<unsigned char*>(&rval))[0] = (reinterpret_cast<unsigned char*>(&v))[3];
return rval;
}
template< typename T_store=float >
class art_output_plugin : public output_plugin
{
public:
bool do_baryons_;
bool swap_endianness_;
double omegab_, omegam_;
double gamma_;
double astart_;
double zstart_;
size_t npcdm_;
int hsize_;
protected:
enum iofields {
id_dm_pos, id_dm_vel, id_gas_pos, id_gas_vel
};
struct header
{
char head[45];
float aexpN; // current expansion factor
float aexp0; // initial expansion factor
float amplt; // Amplitude of density fluctuations
float astep; // Delta a -> time step.
// This value is also stored in pt.dat (binary 1 float)
// It is recalculated by art for the next steps so just a small value should work
int istep; // step (=0 in IC)
float partw; // mass of highest res particle.
float TINTG; //=0 in IC
float EKIN; //SUM 0.5 * m_i*(v_i**2) in code units
float EKIN1; //=0 in IC
float EKIN2; //=0 in IC
float AU0; //=0 in IC
float AEU0; //=0 in IC
int NROWC; // Number of particles in 1 dim (number of particles per page = NROW**2)
int NGRIDC; // Number of cells in 1 dim
int nspecies; // number of dm species
int Nseed; // random number used ( 0 for MUSIC? or set the random number used in the lowest level?)
float Om0; //Omega_m
float Oml0; //Omega_L
float hubble; //hubble constant h=H/100
float Wp5; //
float Ocurv; //Omega_k
float Omb0; // this parameter only appears in header in hydro runs
float wpart[10]; // extras[0-9] particle masses from high res to low res (normalized to low res particle)
// Mass of smallest particle=wpart[0]*0m0*2.746e+11*(Box/NGRID)**3 -> Msun/h
// Mass of largest particle=wpart[nspecies-1]*0m0*2.746e+11*(Box/NGRID)**3 -> Msun/h
int lpart[10]; // extras[10-19] number of particles from high res to low res cumulative!!!
//(i.e., lpart[0]=Nhigh res particles; lpart[1]=lpart[0]+N_this_level; etc) so lpart[nspecies-1]=N total
float extras[80]; //extras[20-99]
//extras[9]=iLblock ->0 in IC
//extras[10]=LevMin ->0 in IC
//extras[11]=LevSmall ->0 in IC
//extras[12]=LevLarge ->0 in IC
//extras[13]=Omegab ->0 in IC; fix it?
//extras[14]=sig8 ->0 in IC; fix it?
//extras[15]=Spslope ->0 in IC; fix it? Slope of the Power spectrum
//extras[16]=iDEswtch ->0 in IC; DE Flag=0:LCDM 1:w 2:RP 3:SUGRA
//extras[17]=DEw0 ->0 in IC; w0 for DE z=0
//extras[18]=DEwprime ->0 in IC; DE parameter
//extras[59]= 0 or 1; is used as switch for random numbers generators [do not apply in music use 0?]
//extras[60]= lux - level of luxury [do not apply in music use 0?]
//extras[79]=Lbox (Mpc/h)
};
struct ptf
{
float astep;
};
header header_;
ptf ptf_;
std::string fname;
size_t np_fine_gas_, np_fine_dm_, np_coarse_dm_;
size_t block_buf_size_;
size_t npartmax_;
double YHe_;
// helper class to read temp files
class pistream : public std::ifstream
{
public:
pistream (std::string fname, size_t npart )
: std::ifstream( fname.c_str(), std::ios::binary )
{
size_t blk;
if( !this->good() )
{
LOGERR("Could not open buffer file in ART output plug-in");
throw std::runtime_error("Could not open buffer file in ART output plug-in");
}
this->read( (char*)&blk, sizeof(size_t) );
if( blk != (size_t)(npart*sizeof(T_store)) )
{
LOGERR("Internal consistency error in ART output plug-in");
LOGERR("Expected %d bytes in temp file but found %d",npart*(unsigned)sizeof(T_store),blk);
throw std::runtime_error("Internal consistency error in ART output plug-in");
}
}
pistream ()
{
}
void open(std::string fname, size_t npart )
{
std::ifstream::open( fname.c_str(), std::ios::binary );
size_t blk;
if( !this->good() )
{
LOGERR("Could not open buffer file \'%s\' in ART output plug-in",fname.c_str());
throw std::runtime_error("Could not open buffer file in ART output plug-in");
}
this->read( (char*)&blk, sizeof(size_t) );
if( blk != (size_t)(npart*sizeof(T_store)) )
{
LOGERR("Internal consistency error in ART output plug-in");
LOGERR("Expected %d bytes in temp file but found %d",npart*(unsigned)sizeof(T_store),blk);
throw std::runtime_error("Internal consistency error in ART output plug-in");
}
}
};
// non-public member functions
void write_header_file( void ) //PMcrd.DAT
{
std::string fout;
if(do_baryons_)
fout = "/PMcrdIC.DAT";
else
fout = "/PMcrd.DAT";
std::string partfname = fname_ + fout;
std::ofstream ofs( partfname.c_str(), std::ios::trunc );
//ofs.open(fname_.c_str(), std::ios::binary|std::ios::trunc );
header this_header(header_);
//Should be 529 in a dm only run; 533 in a baryon run
//but not working for alignment so it must be written one by one:
int blksize = hsize_;
if( swap_endianness_ )
{
LOGINFO("ART : swap_endianness option enabled");
blksize = bytereorder( blksize );
this_header.aexpN = bytereorder( this_header.aexpN );
this_header.aexp0 = bytereorder( this_header.aexp0 );
this_header.amplt = bytereorder( this_header.amplt );
this_header.astep = bytereorder( this_header.astep );
this_header.istep = bytereorder( this_header.istep );
this_header.partw = bytereorder( this_header.partw );
this_header.TINTG = bytereorder( this_header.TINTG );
this_header.EKIN = bytereorder( this_header.EKIN );
this_header.EKIN1 = bytereorder( this_header.EKIN1 );
this_header.EKIN2 = bytereorder( this_header.EKIN2 );
this_header.AEU0 = bytereorder( this_header.AEU0 );
this_header.AEU0 = bytereorder( this_header.AEU0 );
this_header.NROWC = bytereorder( this_header.NROWC );
this_header.NGRIDC = bytereorder( this_header.NGRIDC );
this_header.nspecies = bytereorder( this_header.nspecies );
this_header.Nseed = bytereorder( this_header.Nseed );
this_header.Om0 = bytereorder( this_header.Om0);
this_header.Oml0 = bytereorder( this_header.Oml0 );
this_header.hubble = bytereorder( this_header.hubble );
this_header.Wp5 = bytereorder( this_header.Wp5 );
this_header.Ocurv = bytereorder( this_header.Ocurv );
this_header.Omb0 = bytereorder( this_header.Omb0 );
for( int i=0; i<10; ++i )
{
this_header.wpart[i] = bytereorder( this_header.wpart[i] );
this_header.lpart[i] = bytereorder( this_header.lpart[i] );
}
for( int i=0; i<80; ++i )
{
this_header.extras[i] = bytereorder( this_header.extras[i] );
}
}
ofs.write( (char *)&blksize, sizeof(int) );
//ofs.write( (char *)&this_header,sizeof(header)); //Not working because struct aligment, so:
ofs.write( (char *)&this_header.head,sizeof(this_header.head));
ofs.write( (char *)&this_header.aexpN,sizeof(this_header.aexpN));
ofs.write( (char *)&this_header.aexp0,sizeof(this_header.aexp0));
ofs.write( (char *)&this_header.amplt,sizeof(this_header.amplt));
ofs.write( (char *)&this_header.astep,sizeof(this_header.astep));
ofs.write( (char *)&this_header.istep,sizeof(this_header.istep));
ofs.write( (char *)&this_header.partw,sizeof(this_header.partw));
ofs.write( (char *)&this_header.TINTG,sizeof(this_header.TINTG));
ofs.write( (char *)&this_header.EKIN,sizeof(this_header.EKIN));
ofs.write( (char *)&this_header.EKIN1,sizeof(this_header.EKIN1));
ofs.write( (char *)&this_header.EKIN2,sizeof(this_header.EKIN2));
ofs.write( (char *)&this_header.AEU0,sizeof(this_header.AEU0));
ofs.write( (char *)&this_header.AEU0,sizeof(this_header.AEU0));
ofs.write( (char *)&this_header.NROWC,sizeof(this_header.NROWC));
ofs.write( (char *)&this_header.NGRIDC,sizeof(this_header.NGRIDC));
ofs.write( (char *)&this_header.nspecies,sizeof(this_header.nspecies));
ofs.write( (char *)&this_header.Nseed,sizeof(this_header.Nseed));
ofs.write( (char *)&this_header.Om0,sizeof(this_header.Om0));
ofs.write( (char *)&this_header.Oml0,sizeof(this_header.Oml0));
ofs.write( (char *)&this_header.hubble,sizeof(this_header.hubble));
ofs.write( (char *)&this_header.Wp5,sizeof(this_header.Wp5));
ofs.write( (char *)&this_header.Ocurv,sizeof(this_header.Ocurv));
ofs.write( (char *)&this_header.wpart,sizeof(this_header.wpart));
ofs.write( (char *)&this_header.lpart,sizeof(this_header.lpart));
ofs.write( (char *)&this_header.extras,sizeof(this_header.extras));
ofs.write( (char *)&blksize, sizeof(int) );
ofs.close();
LOGINFO("ART : done writing header file.");
}
void write_pt_file( void ) //pt.dat
{
std::string partfname = fname_ + "/pt.dat";
std::ofstream ofs( partfname.c_str(), std::ios::trunc );
//ofs.open(fname_.c_str(), std::ios::binary|std::ios::trunc );
ptf this_ptf(ptf_);
int blksize = sizeof(ptf); //4
if( swap_endianness_ )
{
blksize = bytereorder( blksize );
this_ptf = bytereorder( this_ptf );
}
ofs.write( (char *)&blksize, sizeof(int) );
ofs.write( (char *)&this_ptf,sizeof(ptf));
ofs.write( (char *)&blksize, sizeof(int) );
ofs.close();
LOGINFO("ART : done writing pt file.");
}
void adjust_buf_endianness( T_store* buf )
{
if( swap_endianness_ )
{
for( size_t i=0; i<block_buf_size_; ++i )
buf[i] = bytereorder<T_store>( buf[i] );
}
}
/*
The direct format write the particle data in pages. Each page of particles is read into a common block,
which has the structure: X(Npage),Y(Npage),Z(Npage),Vx(Npage),Vy(Npage),Vz(Npage).
There are NO Fortran size blocks pre or after these blocks!!
The number of particles in each page (Npage) is Npage = Nrow**2; Npages = (N_particles -1)/NPAGE +1
so in last page sometimes can be tricky (zooms): N_in_last=N_particles -NPAGE*(Npages-1)
But keep in mind that ART expects all pages to be written in full regarding of the actual number of particles
you care about.
*/
void assemble_DM_file( void ) //PMcrs0.DAT
{
// file name
std::string fout;
if(do_baryons_)
fout = "/PMcrs0IC.DAT";
else
fout = "/PMcrs0.DAT";
std::string partfname = fname_ + fout;
std::ofstream ofs( partfname.c_str(), std::ios::trunc );
// generate all temp file names
char fnx[256],fny[256],fnz[256],fnvx[256],fnvy[256],fnvz[256];
sprintf( fnx, "___ic_temp_%05d.bin", 100*id_dm_pos+0 );
sprintf( fny, "___ic_temp_%05d.bin", 100*id_dm_pos+1 );
sprintf( fnz, "___ic_temp_%05d.bin", 100*id_dm_pos+2 );
sprintf( fnvx, "___ic_temp_%05d.bin", 100*id_dm_vel+0 );
sprintf( fnvy, "___ic_temp_%05d.bin", 100*id_dm_vel+1 );
sprintf( fnvz, "___ic_temp_%05d.bin", 100*id_dm_vel+2 );
// create buffers for temporary data
T_store *tmp1, *tmp2, *tmp3, *tmp4, *tmp5, *tmp6;
tmp1 = new T_store[block_buf_size_];
tmp2 = new T_store[block_buf_size_];
tmp3 = new T_store[block_buf_size_];
tmp4 = new T_store[block_buf_size_];
tmp5 = new T_store[block_buf_size_];
tmp6 = new T_store[block_buf_size_];
// read in the data from the temporary files in slabs and write it to the output file
size_t npleft, n2read;
size_t npcdm = npcdm_;
LOGINFO("writing DM data to ART format file");
//ofs.open(fname_.c_str(), std::ios::binary|std::ios::trunc );
pistream ifs_x, ifs_y, ifs_z, ifs_vx, ifs_vy, ifs_vz;
ifs_x.open( fnx, npcdm );
ifs_y.open( fny, npcdm );
ifs_z.open( fnz, npcdm );
ifs_vx.open( fnvx, npcdm );
ifs_vy.open( fnvy, npcdm );
ifs_vz.open( fnvz, npcdm );
npleft = npcdm;
n2read = std::min(block_buf_size_,npleft);
while( n2read > 0 )
{
// To make sure last page in zooms have 0s in non-relevant values
// NOT MANDATORY. Can be commented if makes things slow
// but I do not like the idea of writting data in the file
// that could be interpreted as real.
if(n2read<block_buf_size_)
{
for (int i = 0; i < int(block_buf_size_); i++)
{
tmp1[i]=0.0;tmp2[i]=0.0;tmp3[i]=0.0;tmp4[i]=0.0;
tmp5[i]=0.0;tmp6[i]=0.0;
}
}
ifs_x.read( reinterpret_cast<char*>(&tmp1[0]), n2read*sizeof(T_store) );
ifs_y.read( reinterpret_cast<char*>(&tmp2[0]), n2read*sizeof(T_store) );
ifs_z.read( reinterpret_cast<char*>(&tmp3[0]), n2read*sizeof(T_store) );
ifs_vx.read( reinterpret_cast<char*>(&tmp4[0]), n2read*sizeof(T_store) );
ifs_vy.read( reinterpret_cast<char*>(&tmp5[0]), n2read*sizeof(T_store) );
ifs_vz.read( reinterpret_cast<char*>(&tmp6[0]), n2read*sizeof(T_store) );
adjust_buf_endianness( tmp1 );
adjust_buf_endianness( tmp2 );
adjust_buf_endianness( tmp3 );
adjust_buf_endianness( tmp4 );
adjust_buf_endianness( tmp5 );
adjust_buf_endianness( tmp6 );
ofs.write( reinterpret_cast<char*>(&tmp1[0]), block_buf_size_*sizeof(T_store) );
ofs.write( reinterpret_cast<char*>(&tmp2[0]), block_buf_size_*sizeof(T_store) );
ofs.write( reinterpret_cast<char*>(&tmp3[0]), block_buf_size_*sizeof(T_store) );
ofs.write( reinterpret_cast<char*>(&tmp4[0]), block_buf_size_*sizeof(T_store) );
ofs.write( reinterpret_cast<char*>(&tmp5[0]), block_buf_size_*sizeof(T_store) );
ofs.write( reinterpret_cast<char*>(&tmp6[0]), block_buf_size_*sizeof(T_store) );
npleft -= n2read;
n2read = std::min( block_buf_size_,npleft );
}
ifs_x.close();
ifs_y.close();
ifs_z.close();
ifs_vx.close();
ifs_vy.close();
ifs_vz.close();
ofs.close();
// clean up temp files
unlink(fnx);
unlink(fny);
unlink(fnz);
unlink(fnvx);
unlink(fnvy);
unlink(fnvz);
delete[] tmp1;
delete[] tmp2;
delete[] tmp3;
delete[] tmp4;
delete[] tmp5;
delete[] tmp6;
LOGINFO("ART : done writing DM file.");
}
/*
ART users currently create the baryon grid structure from the dark matter data file.
Therefore they have decided that the best way to implement baryons for ART in MUSIC was
by creating a file with the same dm format but using the baryon displacements and velocities.
From this file they will create the actual grid suign their tools.
So here we have just to re-create the dark matter file format but using the baryon data.
*/
void assemble_gas_file( void ) //PMcrs0_GAS.DAT
{
// file name
std::string partfname = fname_ + "/PMcrs0_GAS.DAT";
std::ofstream ofs( partfname.c_str(), std::ios::trunc );
// generate all temp file names
char fnx[256],fny[256],fnz[256],fnvx[256],fnvy[256],fnvz[256];
sprintf( fnx, "___ic_temp_%05d.bin", 100*id_gas_pos+0 );
sprintf( fny, "___ic_temp_%05d.bin", 100*id_gas_pos+1 );
sprintf( fnz, "___ic_temp_%05d.bin", 100*id_gas_pos+2 );
sprintf( fnvx, "___ic_temp_%05d.bin", 100*id_gas_vel+0 );
sprintf( fnvy, "___ic_temp_%05d.bin", 100*id_gas_vel+1 );
sprintf( fnvz, "___ic_temp_%05d.bin", 100*id_gas_vel+2 );
// create buffers for temporary data
T_store *tmp1, *tmp2, *tmp3, *tmp4, *tmp5, *tmp6;
tmp1 = new T_store[block_buf_size_];
tmp2 = new T_store[block_buf_size_];
tmp3 = new T_store[block_buf_size_];
tmp4 = new T_store[block_buf_size_];
tmp5 = new T_store[block_buf_size_];
tmp6 = new T_store[block_buf_size_];
// read in the data from the temporary files in slabs and write it to the output file
size_t npleft, n2read;
size_t npcgas = npcdm_; // # of gas elemets should be equal to # of dm elements
LOGINFO("writing gas data to ART format file");
//ofs.open(fname_.c_str(), std::ios::binary|std::ios::trunc );
pistream ifs_x, ifs_y, ifs_z, ifs_vx, ifs_vy, ifs_vz;
ifs_x.open( fnx, npcgas );
ifs_y.open( fny, npcgas );
ifs_z.open( fnz, npcgas );
ifs_vx.open( fnvx, npcgas );
ifs_vy.open( fnvy, npcgas );
ifs_vz.open( fnvz, npcgas );
npleft = npcgas;
n2read = std::min(block_buf_size_,npleft);
while( n2read > 0 )
{
// To make sure last page in zooms have 0s in non-relevant values
// NOT MANDATORY. Can be commented if makes things slow
// but I do not like the idea of writting data in the file
// that could be interpreted as real.
if(n2read<block_buf_size_)
{
for (int i = 0; i < int(block_buf_size_); i++)
{
tmp1[i]=0.0;tmp2[i]=0.0;tmp3[i]=0.0;tmp4[i]=0.0;
tmp5[i]=0.0;tmp6[i]=0.0;
}
}
ifs_x.read( reinterpret_cast<char*>(&tmp1[0]), n2read*sizeof(T_store) );
ifs_y.read( reinterpret_cast<char*>(&tmp2[0]), n2read*sizeof(T_store) );
ifs_z.read( reinterpret_cast<char*>(&tmp3[0]), n2read*sizeof(T_store) );
ifs_vx.read( reinterpret_cast<char*>(&tmp4[0]), n2read*sizeof(T_store) );
ifs_vy.read( reinterpret_cast<char*>(&tmp5[0]), n2read*sizeof(T_store) );
ifs_vz.read( reinterpret_cast<char*>(&tmp6[0]), n2read*sizeof(T_store) );
adjust_buf_endianness( tmp1 );
adjust_buf_endianness( tmp2 );
adjust_buf_endianness( tmp3 );
adjust_buf_endianness( tmp4 );
adjust_buf_endianness( tmp5 );
adjust_buf_endianness( tmp6 );
ofs.write( reinterpret_cast<char*>(&tmp1[0]), block_buf_size_*sizeof(T_store) );
ofs.write( reinterpret_cast<char*>(&tmp2[0]), block_buf_size_*sizeof(T_store) );
ofs.write( reinterpret_cast<char*>(&tmp3[0]), block_buf_size_*sizeof(T_store) );
ofs.write( reinterpret_cast<char*>(&tmp4[0]), block_buf_size_*sizeof(T_store) );
ofs.write( reinterpret_cast<char*>(&tmp5[0]), block_buf_size_*sizeof(T_store) );
ofs.write( reinterpret_cast<char*>(&tmp6[0]), block_buf_size_*sizeof(T_store) );
npleft -= n2read;
n2read = std::min( block_buf_size_,npleft );
}
ifs_x.close();
ifs_y.close();
ifs_z.close();
ifs_vx.close();
ifs_vy.close();
ifs_vz.close();
ofs.close();
// clean up temp files
unlink(fnx);
unlink(fny);
unlink(fnz);
unlink(fnvx);
unlink(fnvy);
unlink(fnvz);
delete[] tmp1;
delete[] tmp2;
delete[] tmp3;
delete[] tmp4;
delete[] tmp5;
delete[] tmp6;
LOGINFO("ART : done writing gas file.");
// Temperature
const double Tcmb0 = 2.726;
const double h2 = header_.hubble*header_.hubble;
const double adec = 1.0/(160.*pow(omegab_*h2/0.022,2.0/5.0));
const double Tini = astart_<adec? Tcmb0/astart_ : Tcmb0/astart_/astart_*adec;
const double mu = (Tini>1.e4) ? 4.0/(8.-5.*YHe_) : 4.0/(1.+3.*(1.-YHe_));
LOGINFO("ART : set initial gas temperature to %.3f K (%.3f K/mu)",Tini, Tini/mu);
}
public:
explicit art_output_plugin ( config_file& cf )
: output_plugin( cf )
{
if( mkdir( fname_.c_str(), 0777 ) );
do_baryons_ = cf.getValueSafe<bool>("setup","baryons",false);
// We need to say that we want to do SPH for baryons
// because if not MUSIC does not calculate/write gas positions
cf.insertValue("setup","do_SPH","yes");
// header size (alignment problem)
hsize_ = 529; // dm & hydro run
omegab_ = cf.getValueSafe<double>("cosmology","Omega_b",0.0);
omegam_ = cf.getValue<double>("cosmology","Omega_m");
zstart_ = cf.getValue<double>("setup","zstart");
astart_ = 1.0/(1.0+zstart_);
swap_endianness_ = cf.getValueSafe<bool>("output","art_swap_endian",true);
int levelmin = cf.getValue<unsigned>("setup","levelmin");
int levelmax = cf.getValue<unsigned>("setup","levelmax");
block_buf_size_ = (size_t) (pow(pow(2,levelmax),2)); //Npage=nrow^2; Number of particles in each page
YHe_ = cf.getValueSafe<double>("cosmology","YHe",0.248);
gamma_ = cf.getValueSafe<double>("cosmology","gamma",5.0/3.0);
// Set header
std::string thead;
thead=cf.getValueSafe<std::string>("output","header","ICs generated using MUSIC");
strcpy(header_.head,thead.c_str()); // text for the header; any easy way to add also the version?
std::string ws = " "; // Filling with blanks. Any better way?
for (int i=thead.size(); i<45;i++)
{
header_.head[i]=ws[0];
}
header_.aexpN = astart_;
header_.aexp0 = header_.aexpN;
header_.amplt = 0.0; // Amplitude of density fluctuations
header_.astep = cf.getValue<double>("output","astep"); // Seems that this must also be in the config file
ptf_.astep=header_.astep; // to write pt file
header_.istep = 0; // step (=0 in IC)
header_.partw = 0.0; // mass of highest res particle. SEE BELOW
header_.TINTG = 0; //=0 in IC
header_.EKIN = 0.0; //SUM 0.5 * m_i*(v_i**2) in code units. Seems that 0 is ok for ICs
header_.EKIN1 = 0; //=0 in IC
header_.EKIN2 = 0; //=0 in IC
header_.AU0 = 0; //=0 in IC
header_.AEU0 = 0; //=0 in IC
header_.NROWC = (int) pow(2,levelmax); // Number of particles in 1 dim (number of particles per page = NROW**2)
header_.NGRIDC = (int) pow(2,levelmin); // Number of cells in 1 dim
header_.nspecies = 0; // number of dm species
for( int ilevel=levelmax; ilevel>=(int)levelmin; --ilevel )
{
header_.nspecies+=1;
}
//header_.partw SEE BELOW
header_.Nseed = 0; // random number used ( 0 for MUSIC? or set the random number used in the lowest level?)
header_.Om0 = cf.getValue<double>("cosmology","Omega_m"); //Omega_m
header_.Oml0 = cf.getValue<double>("cosmology","Omega_L"); //Omega_L
header_.hubble = cf.getValue<double>("cosmology","H0")/100; //hubble constant h=H/100
header_.Wp5 = 0.0; // 0.0
header_.Ocurv = 1.0 - header_.Oml0 - header_.Om0; //
header_.Omb0 = cf.getValue<double>("cosmology","Omega_b");; // this parameter only appears in header in hydro runs
for (int i=0;i<10;i++)
{
header_.wpart[i] = 0.0; // extras[0-9] part. masses from high res to low res (normalized to low res particle)
header_.lpart[i] = 0; // extras[10-19] # particles from high res to low res cumulative!!!
}
for (int i=0;i<header_.nspecies;i++)
{
header_.wpart[i] = 1.0/pow(8.0,(header_.nspecies-i-1)); //from high res to lo res // 8 should be changed for internal variable?
}
header_.partw = header_.wpart[0]; // mass of highest res particle.
for (int i=0;i<80;i++)
{
header_.extras[i] = 0.0; //extras[20-99]
}
header_.extras[13] = cf.getValueSafe<double>("cosmology","Omega_b",0.0);
header_.extras[14] = cf.getValue<double>("cosmology","sigma_8");
header_.extras[15] = cf.getValue<double>("cosmology","nspec"); //Slope of the Power spectrum
header_.extras[79] = cf.getValue<double>("setup","boxlength");
LOGINFO("ART : done header info.");
}
void write_dm_mass( const grid_hierarchy& gh )
{
//... write data for dark matter mass......
// This is not needed for ART
}
void write_dm_position( int coord, const grid_hierarchy& gh )
{
size_t nptot = gh.count_leaf_cells(gh.levelmin(), gh.levelmax());
//... store all the meta data about the grid hierarchy in header variables
npcdm_ = nptot;
for (int i=0;i<header_.nspecies;i++)
{
header_.lpart[i] = gh.count_leaf_cells(gh.levelmax()-i, gh.levelmax()); //cumulative!!
}
// Now, let us write the dm particle info
std::vector<T_store> temp_data;
temp_data.reserve( block_buf_size_ );
//coordinates are in the range 1 - (NGRID+1)
// so scale factor is scaleX = Box/NGRID -> to Mpc/h (Box in Mpc/h)
double xfac = (double) header_.NGRIDC;
char temp_fname[256];
sprintf( temp_fname, "___ic_temp_%05d.bin", 100*id_dm_pos+coord );
std::ofstream ofs_temp( temp_fname, std::ios::binary|std::ios::trunc );
size_t blksize = sizeof(T_store)*nptot;
ofs_temp.write( (char *)&blksize, sizeof(size_t) );
size_t nwritten = 0;
for( int ilevel=gh.levelmax(); ilevel>=(int)gh.levelmin(); --ilevel )
for( unsigned i=0; i<gh.get_grid(ilevel)->size(0); ++i )
for( unsigned j=0; j<gh.get_grid(ilevel)->size(1); ++j )
for( unsigned k=0; k<gh.get_grid(ilevel)->size(2); ++k )
if( gh.is_in_mask(ilevel,i,j,k) && !gh.is_refined(ilevel,i,j,k) )
{
double xx[3];
gh.cell_pos(ilevel, i, j, k, xx);
xx[coord] = fmod( (xx[coord]+(*gh.get_grid(ilevel))(i,j,k)) + 1.0, 1.0 ) ;
xx[coord] = (xx[coord]*xfac)+1.0;
//xx[coord] = ((xx[coord]+(*gh.get_grid(ilevel))(i,j,k)));
if( temp_data.size() < block_buf_size_ )
temp_data.push_back( xx[coord] );
else
{
ofs_temp.write( (char*)&temp_data[0], sizeof(T_store)*block_buf_size_ );
nwritten += block_buf_size_;
temp_data.clear();
temp_data.push_back( xx[coord] );
}
}
if( temp_data.size() > 0 )
{
ofs_temp.write( (char*)&temp_data[0], sizeof(T_store)*temp_data.size() );
nwritten += temp_data.size();
}
if( nwritten != nptot )
throw std::runtime_error("Internal consistency error while writing temporary file for positions");
//... dump to temporary file
ofs_temp.write( (char *)&blksize, sizeof(size_t) );
if( ofs_temp.bad() )
throw std::runtime_error("I/O error while writing temporary file for positions");
ofs_temp.close();
}
void write_dm_velocity( int coord, const grid_hierarchy& gh )
{
size_t nptot = gh.count_leaf_cells(gh.levelmin(), gh.levelmax());
std::vector<T_store> temp_data;
temp_data.reserve( block_buf_size_ );
//In ART velocities are P = a_expansion*V_pec/(x_0H_0)
// where x_0 = comoving cell_size=Box/Ngrid;H_0 = Hubble at z=0
// so scale factor to physical km/s is convV= BoxV/AEXPN/NGRID
// (BoxV is Box*100; aexpn=current expansion factor)
//internal units of MUSIC: To km/s just multiply by Lbox
double vfac = (header_.aexpN*header_.NGRIDC)/(100.0);
char temp_fname[256];
sprintf( temp_fname, "___ic_temp_%05d.bin", 100*id_dm_vel+coord );
std::ofstream ofs_temp( temp_fname, std::ios::binary|std::ios::trunc );
size_t blksize = sizeof(T_store)*nptot;
ofs_temp.write( (char *)&blksize, sizeof(size_t) );
size_t nwritten = 0;
for( int ilevel=gh.levelmax(); ilevel>=(int)gh.levelmin(); --ilevel )
for( unsigned i=0; i<gh.get_grid(ilevel)->size(0); ++i )
for( unsigned j=0; j<gh.get_grid(ilevel)->size(1); ++j )
for( unsigned k=0; k<gh.get_grid(ilevel)->size(2); ++k )
if( gh.is_in_mask(ilevel,i,j,k) && !gh.is_refined(ilevel,i,j,k) )
{
if( temp_data.size() < block_buf_size_ )
temp_data.push_back( (*gh.get_grid(ilevel))(i,j,k) * vfac );
else
{
ofs_temp.write( (char*)&temp_data[0], sizeof(T_store)*block_buf_size_ );
nwritten += block_buf_size_;
temp_data.clear();
temp_data.push_back( (*gh.get_grid(ilevel))(i,j,k) * vfac );
}
}
if( temp_data.size() > 0 )
{
ofs_temp.write( (char*)&temp_data[0], sizeof(T_store)*temp_data.size() );
nwritten += temp_data.size();
}
if( nwritten != nptot )
throw std::runtime_error("Internal consistency error while writing temporary file for DM velocities");
//... dump to temporary file
ofs_temp.write( (char *)&blksize, sizeof(size_t) );
if( ofs_temp.bad() )
throw std::runtime_error("I/O error while writing temporary file for DM velocities");
ofs_temp.close();
}
void write_dm_density( const grid_hierarchy& gh )
{
//... we don't care about DM density for art
}
void write_dm_potential( const grid_hierarchy& gh )
{ }
void write_gas_position( int coord, const grid_hierarchy& gh )
{
size_t nptot = gh.count_leaf_cells(gh.levelmin(), gh.levelmax());
std::vector<T_store> temp_data;
temp_data.reserve( block_buf_size_ );
//ART coordinates are in the range 1 - (NGRID+1)
double xfac = (double) header_.NGRIDC;
char temp_fname[256];
sprintf( temp_fname, "___ic_temp_%05d.bin", 100*id_gas_pos+coord );
std::ofstream ofs_temp( temp_fname, std::ios::binary|std::ios::trunc );
size_t blksize = sizeof(T_store)*nptot;
ofs_temp.write( (char *)&blksize, sizeof(size_t) );
size_t nwritten = 0;
for( int ilevel=gh.levelmax(); ilevel>=(int)gh.levelmin(); --ilevel )
for( unsigned i=0; i<gh.get_grid(ilevel)->size(0); ++i )
for( unsigned j=0; j<gh.get_grid(ilevel)->size(1); ++j )
for( unsigned k=0; k<gh.get_grid(ilevel)->size(2); ++k )
if( gh.is_in_mask(ilevel,i,j,k) && !gh.is_refined(ilevel,i,j,k) )
{
double xx[3];
gh.cell_pos(ilevel, i, j, k, xx);
xx[coord] = fmod( (xx[coord]+(*gh.get_grid(ilevel))(i,j,k)) + 1.0, 1.0 ) ;
xx[coord] = (xx[coord]*xfac)+1.0;
if( temp_data.size() < block_buf_size_ )
temp_data.push_back( xx[coord] );
else
{
ofs_temp.write( (char*)&temp_data[0], sizeof(T_store)*block_buf_size_ );
nwritten += block_buf_size_;
temp_data.clear();
temp_data.push_back( xx[coord] );
}
}
if( temp_data.size() > 0 )
{
ofs_temp.write( (char*)&temp_data[0], sizeof(T_store)*temp_data.size() );
nwritten += temp_data.size();
}
if( nwritten != nptot )
throw std::runtime_error("Internal consistency error while writing temporary file for gas positions");
//... dump to temporary file
ofs_temp.write( (char *)&blksize, sizeof(size_t) );
if( ofs_temp.bad() )
throw std::runtime_error("I/O error while writing temporary file for gas positions");
ofs_temp.close();
}
void write_gas_velocity( int coord, const grid_hierarchy& gh )
{
size_t nptot = gh.count_leaf_cells(gh.levelmin(), gh.levelmax());
std::vector<T_store> temp_data;
temp_data.reserve( block_buf_size_ );
//In ART velocities are P = a_expansion*V_pec/(x_0H_0)
// where x_0 = comoving cell_size=Box/Ngrid;H_0 = Hubble at z=0
// so scale factor to physical km/s is convV= BoxV/AEXPN/NGRID
// (BoxV is Box*100; aexpn=current expansion factor)
//internal units of MUSIC: To km/s just multiply by Lbox
double vfac = (header_.aexpN*header_.NGRIDC)/(100.0);
char temp_fname[256];
sprintf( temp_fname, "___ic_temp_%05d.bin", 100*id_gas_vel+coord );
std::ofstream ofs_temp( temp_fname, std::ios::binary|std::ios::trunc );
size_t blksize = sizeof(T_store)*nptot;
ofs_temp.write( (char *)&blksize, sizeof(size_t) );
size_t nwritten = 0;
for( int ilevel=gh.levelmax(); ilevel>=(int)gh.levelmin(); --ilevel )
for( unsigned i=0; i<gh.get_grid(ilevel)->size(0); ++i )
for( unsigned j=0; j<gh.get_grid(ilevel)->size(1); ++j )
for( unsigned k=0; k<gh.get_grid(ilevel)->size(2); ++k )
if( gh.is_in_mask(ilevel,i,j,k) && !gh.is_refined(ilevel,i,j,k) )
{
if( temp_data.size() < block_buf_size_ )
temp_data.push_back( (*gh.get_grid(ilevel))(i,j,k) * vfac );
else
{
ofs_temp.write( (char*)&temp_data[0], sizeof(T_store)*block_buf_size_ );
nwritten += block_buf_size_;
temp_data.clear();
temp_data.push_back( (*gh.get_grid(ilevel))(i,j,k) * vfac );
}
}
if( temp_data.size() > 0 )
{
ofs_temp.write( (char*)&temp_data[0], sizeof(T_store)*temp_data.size() );
nwritten += temp_data.size();
}
if( nwritten != nptot )
throw std::runtime_error("Internal consistency error while writing temporary file for gas velocities");
//... dump to temporary file
ofs_temp.write( (char *)&blksize, sizeof(size_t) );
if( ofs_temp.bad() )
throw std::runtime_error("I/O error while writing temporary file for gas velocities");
ofs_temp.close();
}
void write_gas_density( const grid_hierarchy& gh )
{ }
void write_gas_potential( const grid_hierarchy& gh )
{ }
void finalize( void )
{
this->write_header_file();
this->write_pt_file();
this->assemble_DM_file();
if(do_baryons_)
{
this->assemble_gas_file();
}
}
};
namespace{
output_plugin_creator_concrete<art_output_plugin<float> > creator("art");
}

View file

@ -1,634 +0,0 @@
/*
output_enzo.cc - This file is part of MUSIC -
a code to generate multi-scale initial conditions
for cosmological simulations
Copyright (C) 2010 Oliver Hahn
*/
#ifdef HAVE_HDF5
#include <sys/types.h>
#include <sys/stat.h>
#include "output.hh"
#include "HDF_IO.hh"
#define MAX_SLAB_SIZE 268435456 // = 256 MBytes
class enzo_output_plugin : public output_plugin
{
protected:
struct patch_header{
int component_rank;
size_t component_size;
std::vector<int> dimensions;
int rank;
std::vector<int> top_grid_dims;
std::vector<int> top_grid_end;
std::vector<int> top_grid_start;
};
struct sim_header{
std::vector<int> dimensions;
std::vector<int> offset;
float a_start;
float dx;
float h0;
float omega_b;
float omega_m;
float omega_v;
float vfact;
};
sim_header the_sim_header;
void write_sim_header( std::string fname, const sim_header& h )
{
HDFWriteGroupAttribute( fname, "/", "Dimensions", h.dimensions );
HDFWriteGroupAttribute( fname, "/", "Offset", h.offset );
HDFWriteGroupAttribute( fname, "/", "a_start", h.a_start );
HDFWriteGroupAttribute( fname, "/", "dx", h.dx );
HDFWriteGroupAttribute( fname, "/", "h0", h.h0 );
HDFWriteGroupAttribute( fname, "/", "omega_b", h.omega_b );
HDFWriteGroupAttribute( fname, "/", "omega_m", h.omega_m );
HDFWriteGroupAttribute( fname, "/", "omega_v", h.omega_v );
HDFWriteGroupAttribute( fname, "/", "vfact", h.vfact );
}
void write_patch_header( std::string fname, std::string dsetname, const patch_header& h )
{
HDFWriteDatasetAttribute( fname, dsetname, "Component_Rank", h.component_rank );
HDFWriteDatasetAttribute( fname, dsetname, "Component_Size", h.component_size );
HDFWriteDatasetAttribute( fname, dsetname, "Dimensions", h.dimensions );
HDFWriteDatasetAttribute( fname, dsetname, "Rank", h.rank );
HDFWriteDatasetAttribute( fname, dsetname, "TopGridDims", h.top_grid_dims );
HDFWriteDatasetAttribute( fname, dsetname, "TopGridEnd", h.top_grid_end );
HDFWriteDatasetAttribute( fname, dsetname, "TopGridStart", h.top_grid_start );
}
void dump_mask( const grid_hierarchy& gh )
{
char enzoname[256], filename[256];
std::string fieldname("RefinementMask");
for(unsigned ilevel=levelmin_; ilevel<=levelmax_; ++ilevel )
{
std::vector<int> ng, ng_fortran;
ng.push_back( gh.get_grid(ilevel)->size(0) );
ng.push_back( gh.get_grid(ilevel)->size(1) );
ng.push_back( gh.get_grid(ilevel)->size(2) );
ng_fortran.push_back( gh.get_grid(ilevel)->size(2) );
ng_fortran.push_back( gh.get_grid(ilevel)->size(1) );
ng_fortran.push_back( gh.get_grid(ilevel)->size(0) );
//... need to copy data because we need to get rid of the ghost zones
//... write in slabs if data is more than MAX_SLAB_SIZE (default 128 MB)
//... full 3D block size
size_t all_data_size = (size_t)ng[0] * (size_t)ng[1] * (size_t)ng[2];
//... write in slabs of MAX_SLAB_SIZE unless all_data_size is anyway smaller
size_t max_slab_size = std::min((size_t)MAX_SLAB_SIZE/sizeof(double), all_data_size );
//... but one slab hast to be at least the size of one slice
max_slab_size = std::max(((size_t)ng[0] * (size_t)ng[1]), max_slab_size );
//... number of slices in one slab
size_t slices_in_slab = (size_t)((double)max_slab_size / ((size_t)ng[0] * (size_t)ng[1]));
size_t nsz[3] = { ng[2], ng[1], ng[0] };
if( levelmin_ != levelmax_ )
sprintf( enzoname, "%s.%d", fieldname.c_str(), ilevel-levelmin_ );
else
sprintf( enzoname, "%s", fieldname.c_str() );
sprintf( filename, "%s/%s", fname_.c_str(), enzoname );
HDFCreateFile( filename );
write_sim_header( filename, the_sim_header );
//... create full array in file
HDFHyperslabWriter3Ds<int> *slab_writer = new HDFHyperslabWriter3Ds<int>( filename, enzoname, nsz );
//... create buffer
int *data_buf = new int[ slices_in_slab * (size_t)ng[0] * (size_t)ng[1] ];
//... write slice by slice
size_t slices_written = 0;
while( slices_written < (size_t)ng[2] )
{
slices_in_slab = std::min( (size_t)ng[2]-slices_written, slices_in_slab );
#pragma omp parallel for
for( int k=0; k<(int)slices_in_slab; ++k )
for( int j=0; j<ng[1]; ++j )
for( int i=0; i<ng[0]; ++i )
{
int mask_val = -1;
if( gh.is_in_mask(ilevel,i,j,k+slices_written) )
{
if( gh.is_refined(ilevel,i,j,k+slices_written) )
mask_val = 1;
else
mask_val = 0;
}
data_buf[ (size_t)(k*ng[1]+j)*(size_t)ng[0]+(size_t)i ] = mask_val;
}
size_t count[3], offset[3];
count[0] = slices_in_slab;
count[1] = ng[1];
count[2] = ng[0];
offset[0] = slices_written;;
offset[1] = 0;
offset[2] = 0;
slab_writer->write_slab( data_buf, count, offset );
slices_written += slices_in_slab;
}
delete[] data_buf;
delete slab_writer;
//... header data for the patch
patch_header ph;
ph.component_rank = 1;
ph.component_size = (size_t)ng[0]*(size_t)ng[1]*(size_t)ng[2];
ph.dimensions = ng;
ph.rank = 3;
ph.top_grid_dims.assign(3, 1<<levelmin_);
//... offset_abs is in units of the current level cell size
double rfac = 1.0/(1<<(ilevel-levelmin_));
ph.top_grid_start.push_back( (int)(gh.offset_abs(ilevel, 0)*rfac) );
ph.top_grid_start.push_back( (int)(gh.offset_abs(ilevel, 1)*rfac) );
ph.top_grid_start.push_back( (int)(gh.offset_abs(ilevel, 2)*rfac) );
ph.top_grid_end.push_back( ph.top_grid_start[0] + (int)(ng[0]*rfac) );
ph.top_grid_end.push_back( ph.top_grid_start[1] + (int)(ng[1]*rfac) );
ph.top_grid_end.push_back( ph.top_grid_start[2] + (int)(ng[2]*rfac) );
write_patch_header( filename, enzoname, ph );
}
}
void dump_grid_data( std::string fieldname, const grid_hierarchy& gh, double factor = 1.0, double add = 0.0 )
{
char enzoname[256], filename[256];
for(unsigned ilevel=levelmin_; ilevel<=levelmax_; ++ilevel )
{
std::vector<int> ng, ng_fortran;
ng.push_back( gh.get_grid(ilevel)->size(0) );
ng.push_back( gh.get_grid(ilevel)->size(1) );
ng.push_back( gh.get_grid(ilevel)->size(2) );
ng_fortran.push_back( gh.get_grid(ilevel)->size(2) );
ng_fortran.push_back( gh.get_grid(ilevel)->size(1) );
ng_fortran.push_back( gh.get_grid(ilevel)->size(0) );
//... need to copy data because we need to get rid of the ghost zones
//... write in slabs if data is more than MAX_SLAB_SIZE (default 128 MB)
//... full 3D block size
size_t all_data_size = (size_t)ng[0] * (size_t)ng[1] * (size_t)ng[2];
//... write in slabs of MAX_SLAB_SIZE unless all_data_size is anyway smaller
size_t max_slab_size = std::min((size_t)MAX_SLAB_SIZE/sizeof(double), all_data_size );
//... but one slab hast to be at least the size of one slice
max_slab_size = std::max(((size_t)ng[0] * (size_t)ng[1]), max_slab_size );
//... number of slices in one slab
size_t slices_in_slab = (size_t)((double)max_slab_size / ((size_t)ng[0] * (size_t)ng[1]));
size_t nsz[3] = { ng[2], ng[1], ng[0] };
if( levelmin_ != levelmax_ )
sprintf( enzoname, "%s.%d", fieldname.c_str(), ilevel-levelmin_ );
else
sprintf( enzoname, "%s", fieldname.c_str() );
sprintf( filename, "%s/%s", fname_.c_str(), enzoname );
HDFCreateFile( filename );
write_sim_header( filename, the_sim_header );
#ifdef SINGLE_PRECISION
//... create full array in file
HDFHyperslabWriter3Ds<float> *slab_writer = new HDFHyperslabWriter3Ds<float>( filename, enzoname, nsz );
//... create buffer
float *data_buf = new float[ slices_in_slab * (size_t)ng[0] * (size_t)ng[1] ];
#else
//... create full array in file
HDFHyperslabWriter3Ds<double> *slab_writer = new HDFHyperslabWriter3Ds<double>( filename, enzoname, nsz );
//... create buffer
double *data_buf = new double[ slices_in_slab * (size_t)ng[0] * (size_t)ng[1] ];
#endif
//... write slice by slice
size_t slices_written = 0;
while( slices_written < (size_t)ng[2] )
{
slices_in_slab = std::min( (size_t)ng[2]-slices_written, slices_in_slab );
#pragma omp parallel for
for( int k=0; k<(int)slices_in_slab; ++k )
for( int j=0; j<ng[1]; ++j )
for( int i=0; i<ng[0]; ++i )
data_buf[ (size_t)(k*ng[1]+j)*(size_t)ng[0]+(size_t)i ] =
(add+(*gh.get_grid(ilevel))(i,j,k+slices_written))*factor;
size_t count[3], offset[3];
count[0] = slices_in_slab;
count[1] = ng[1];
count[2] = ng[0];
offset[0] = slices_written;;
offset[1] = 0;
offset[2] = 0;
slab_writer->write_slab( data_buf, count, offset );
slices_written += slices_in_slab;
}
//... free buffer
delete[] data_buf;
//... finalize writing and close dataset
delete slab_writer;
//... header data for the patch
patch_header ph;
ph.component_rank = 1;
ph.component_size = (size_t)ng[0]*(size_t)ng[1]*(size_t)ng[2];
ph.dimensions = ng;
ph.rank = 3;
ph.top_grid_dims.assign(3, 1<<levelmin_);
//... offset_abs is in units of the current level cell size
double rfac = 1.0/(1<<(ilevel-levelmin_));
ph.top_grid_start.push_back( (int)(gh.offset_abs(ilevel, 0)*rfac) );
ph.top_grid_start.push_back( (int)(gh.offset_abs(ilevel, 1)*rfac) );
ph.top_grid_start.push_back( (int)(gh.offset_abs(ilevel, 2)*rfac) );
ph.top_grid_end.push_back( ph.top_grid_start[0] + (int)(ng[0]*rfac) );
ph.top_grid_end.push_back( ph.top_grid_start[1] + (int)(ng[1]*rfac) );
ph.top_grid_end.push_back( ph.top_grid_start[2] + (int)(ng[2]*rfac) );
write_patch_header( filename, enzoname, ph );
}
}
public:
enzo_output_plugin( config_file& cf )
: output_plugin( cf )
{
if( mkdir( fname_.c_str(), 0777 ) )
{
perror( fname_.c_str() );
throw std::runtime_error("Error in enzo_output_plugin!");
}
bool bhave_hydro = cf_.getValue<bool>("setup","baryons");
bool align_top = cf.getValueSafe<bool>( "setup", "align_top", false );
if( !align_top )
LOGWARN("Old ENZO versions may require \'align_top=true\'!");
the_sim_header.dimensions.push_back( 1<<levelmin_ );
the_sim_header.dimensions.push_back( 1<<levelmin_ );
the_sim_header.dimensions.push_back( 1<<levelmin_ );
the_sim_header.offset.push_back( 0 );
the_sim_header.offset.push_back( 0 );
the_sim_header.offset.push_back( 0 );
the_sim_header.a_start = 1.0/(1.0+cf.getValue<double>("setup","zstart"));
the_sim_header.dx = cf.getValue<double>("setup","boxlength")/the_sim_header.dimensions[0]/(cf.getValue<double>("cosmology","H0")*0.01); // not sure?!?
the_sim_header.h0 = cf.getValue<double>("cosmology","H0")*0.01;
if( bhave_hydro )
the_sim_header.omega_b = cf.getValue<double>("cosmology","Omega_b");
else
the_sim_header.omega_b = 0.0;
the_sim_header.omega_m = cf.getValue<double>("cosmology","Omega_m");
the_sim_header.omega_v = cf.getValue<double>("cosmology","Omega_L");
the_sim_header.vfact = cf.getValue<double>("cosmology","vfact")*the_sim_header.h0; //.. need to multiply by h, ENZO wants this factor for non h-1 units
}
~enzo_output_plugin()
{ }
void write_dm_mass( const grid_hierarchy& gh )
{ /* do nothing, not needed */ }
void write_dm_density( const grid_hierarchy& gh )
{ /* write the parameter file data */
bool bhave_hydro = cf_.getValue<bool>("setup","baryons");
double refine_region_fraction = cf_.getValueSafe<double>( "output", "enzo_refine_region_fraction", 0.8 );
char filename[256];
unsigned nbase = (unsigned)pow(2,levelmin_);
// write out the refinement masks
dump_mask( gh );
// write out a parameter file
sprintf( filename, "%s/parameter_file.txt", fname_.c_str() );
std::ofstream ofs( filename, std::ios::trunc );
ofs
<< "# Relevant Section of Enzo Paramter File (NOT COMPLETE!) \n"
<< "ProblemType = 30 // cosmology simulation\n"
<< "TopGridRank = 3\n"
<< "TopGridDimensions = " << nbase << " " << nbase << " " << nbase << "\n"
<< "SelfGravity = 1 // gravity on\n"
<< "TopGridGravityBoundary = 0 // Periodic BC for gravity\n"
<< "LeftFaceBoundaryCondition = 3 3 3 // same for fluid\n"
<< "RightFaceBoundaryCondition = 3 3 3\n"
<< "RefineBy = 2\n"
<< "\n"
<< "#\n";
if( bhave_hydro )
ofs
<< "CosmologySimulationOmegaBaryonNow = " << the_sim_header.omega_b << "\n"
<< "CosmologySimulationOmegaCDMNow = " << the_sim_header.omega_m-the_sim_header.omega_b << "\n";
else
ofs
<< "CosmologySimulationOmegaBaryonNow = " << 0.0 << "\n"
<< "CosmologySimulationOmegaCDMNow = " << the_sim_header.omega_m << "\n";
if( bhave_hydro )
ofs
<< "CosmologySimulationDensityName = GridDensity\n"
<< "CosmologySimulationVelocity1Name = GridVelocities_x\n"
<< "CosmologySimulationVelocity2Name = GridVelocities_y\n"
<< "CosmologySimulationVelocity3Name = GridVelocities_z\n";
ofs
<< "CosmologySimulationCalculatePositions = 1\n"
<< "CosmologySimulationParticleVelocity1Name = ParticleVelocities_x\n"
<< "CosmologySimulationParticleVelocity2Name = ParticleVelocities_y\n"
<< "CosmologySimulationParticleVelocity3Name = ParticleVelocities_z\n"
<< "CosmologySimulationParticleDisplacement1Name = ParticleDisplacements_x\n"
<< "CosmologySimulationParticleDisplacement2Name = ParticleDisplacements_y\n"
<< "CosmologySimulationParticleDisplacement3Name = ParticleDisplacements_z\n"
<< "\n"
<< "#\n"
<< "# define cosmology parameters\n"
<< "#\n"
<< "ComovingCoordinates = 1 // Expansion ON\n"
<< "CosmologyOmegaMatterNow = " << the_sim_header.omega_m << "\n"
<< "CosmologyOmegaLambdaNow = " << the_sim_header.omega_v << "\n"
<< "CosmologyHubbleConstantNow = " << the_sim_header.h0 << " // in 100 km/s/Mpc\n"
<< "CosmologyComovingBoxSize = " << cf_.getValue<double>("setup","boxlength") << " // in Mpc/h\n"
<< "CosmologyMaxExpansionRate = 0.015 // maximum allowed delta(a)/a\n"
<< "CosmologyInitialRedshift = " << cf_.getValue<double>("setup","zstart") << " //\n"
<< "CosmologyFinalRedshift = 0 //\n"
<< "GravitationalConstant = 1 // this must be true for cosmology\n"
<< "#\n"
<< "#\n"
<< "ParallelRootGridIO = 1\n"
<< "ParallelParticleIO = 1\n"
<< "PartitionNestedGrids = 1\n"
<< "CosmologySimulationNumberOfInitialGrids = " << 1+levelmax_-levelmin_ << "\n";
int num_prec = 10;
if( levelmax_ > 15 )
num_prec = 17;
//... only for additionally refined grids
for( unsigned ilevel = 0; ilevel< levelmax_-levelmin_; ++ilevel )
{
double h = 1.0/(1<<(levelmin_+1+ilevel));
ofs
<< "CosmologySimulationGridDimension[" << 1+ilevel << "] = "
<< std::setw(16) << gh.size( levelmin_+ilevel+1, 0 ) << " "
<< std::setw(16) << gh.size( levelmin_+ilevel+1, 1 ) << " "
<< std::setw(16) << gh.size( levelmin_+ilevel+1, 2 ) << "\n"
<< "CosmologySimulationGridLeftEdge[" << 1+ilevel << "] = "
<< std::setw(num_prec+6) << std::setprecision(num_prec) << h*gh.offset_abs(levelmin_+ilevel+1, 0) << " "
<< std::setw(num_prec+6) << std::setprecision(num_prec) << h*gh.offset_abs(levelmin_+ilevel+1, 1) << " "
<< std::setw(num_prec+6) << std::setprecision(num_prec) << h*gh.offset_abs(levelmin_+ilevel+1, 2) << "\n"
<< "CosmologySimulationGridRightEdge[" << 1+ilevel << "] = "
<< std::setw(num_prec+6) << std::setprecision(num_prec) << h*(gh.offset_abs(levelmin_+ilevel+1, 0)+gh.size( levelmin_+ilevel+1, 0 )) << " "
<< std::setw(num_prec+6) << std::setprecision(num_prec) << h*(gh.offset_abs(levelmin_+ilevel+1, 1)+gh.size( levelmin_+ilevel+1, 1 )) << " "
<< std::setw(num_prec+6) << std::setprecision(num_prec) << h*(gh.offset_abs(levelmin_+ilevel+1, 2)+gh.size( levelmin_+ilevel+1, 2 )) << "\n"
<< "CosmologySimulationGridLevel[" << 1+ilevel << "] = " << 1+ilevel << "\n";
}
if( levelmin_ != levelmax_ )
{
double h = 1.0/(1<<levelmax_);
double cen[3],le[3],re[3];
for (int i=0;i<3;i++)
{
cen[i] = gh.offset_abs(levelmax_, i)+gh.size( levelmax_, i )/2;
le[i] = cen[i]-refine_region_fraction*gh.size( levelmax_,i)/2;
re[i] = le[i] +refine_region_fraction*gh.size( levelmax_, i);
}
ofs
<< "#\n"
<< "# region allowed for further refinement\n"
<< "#\n"
// << "RefineRegionAutoAdjust = 1\n"
<< "RefineRegionLeftEdge = "
<< std::setw(num_prec+6) << std::setprecision(num_prec) << h*le[0] << " "
<< std::setw(num_prec+6) << std::setprecision(num_prec) << h*le[1] << " "
<< std::setw(num_prec+6) << std::setprecision(num_prec) << h*le[2] << "\n"
<< "RefineRegionRightEdge = "
<< std::setw(num_prec+6) << std::setprecision(num_prec) << h*re[0] << " "
<< std::setw(num_prec+6) << std::setprecision(num_prec) << h*re[1] << " "
<< std::setw(num_prec+6) << std::setprecision(num_prec) << h*re[2]<< "\n";
}
// determine density maximum and minimum location
real_t rhomax = -1e30, rhomin = 1e30;
double loc_rhomax[3] = {0.0,0.0,0.0}, loc_rhomin[3] = {0.0,0.0,0.0};
int lvl_rhomax = 0, lvl_rhomin = 0;
real_t rhomax_lm = -1e30, rhomin_lm = 1e30;
double loc_rhomax_lm[3] = {0.0,0.0,0.0}, loc_rhomin_lm[3] = {0.0,0.0,0.0};
for( int ilevel=gh.levelmax(); ilevel>=(int)gh.levelmin(); --ilevel )
for( unsigned i=0; i<gh.get_grid(ilevel)->size(0); ++i )
for( unsigned j=0; j<gh.get_grid(ilevel)->size(1); ++j )
for( unsigned k=0; k<gh.get_grid(ilevel)->size(2); ++k )
if( ! gh.is_refined(ilevel,i,j,k) )
{
real_t rho = (*gh.get_grid(ilevel))(i,j,k);
if( rho > rhomax )
{
rhomax = rho;
lvl_rhomax = ilevel;
gh.cell_pos(ilevel, i, j, k, loc_rhomax);
}
if( rho < rhomin )
{
rhomin = rho;
lvl_rhomin = ilevel;
gh.cell_pos(ilevel, i, j, k, loc_rhomin);
}
if( ilevel == (int)gh.levelmax() )
{
if( rho > rhomax_lm )
{
rhomax_lm = rho;
gh.cell_pos(ilevel, i, j, k, loc_rhomax_lm);
}
if( rho < rhomin_lm )
{
rhomin_lm = rho;
gh.cell_pos(ilevel, i, j, k, loc_rhomin_lm);
}
}
}
double h = 1.0/(1<<levelmin_);
double shift[3];
shift[0] = -(double)cf_.getValue<int>( "setup", "shift_x" )*h;
shift[1] = -(double)cf_.getValue<int>( "setup", "shift_y" )*h;
shift[2] = -(double)cf_.getValue<int>( "setup", "shift_z" )*h;
if( gh.levelmin() != gh.levelmax() )
{
LOGINFO("Global density extrema: ");
LOGINFO(" minimum: delta=%f at (%f,%f,%f) (level=%d)",rhomin,loc_rhomin[0],loc_rhomin[1],loc_rhomin[2],lvl_rhomin);
LOGINFO(" shifted back at (%f,%f,%f)",loc_rhomin[0]+shift[0],loc_rhomin[1]+shift[1],loc_rhomin[2]+shift[2]);
LOGINFO(" maximum: delta=%f at (%f,%f,%f) (level=%d)",rhomax,loc_rhomax[0],loc_rhomax[1],loc_rhomax[2],lvl_rhomax);
LOGINFO(" shifted back at (%f,%f,%f)",loc_rhomax[0]+shift[0],loc_rhomax[1]+shift[1],loc_rhomax[2]+shift[2]);
LOGINFO("Density extrema on finest level: ");
LOGINFO(" minimum: delta=%f at (%f,%f,%f)",rhomin_lm,loc_rhomin_lm[0],loc_rhomin_lm[1],loc_rhomin_lm[2]);
LOGINFO(" shifted back at (%f,%f,%f)",loc_rhomin_lm[0]+shift[0],loc_rhomin_lm[1]+shift[1],loc_rhomin_lm[2]+shift[2]);
LOGINFO(" maximum: delta=%f at (%f,%f,%f)",rhomax_lm,loc_rhomax_lm[0],loc_rhomax_lm[1],loc_rhomax_lm[2]);
LOGINFO(" shifted back at (%f,%f,%f)",loc_rhomax_lm[0]+shift[0],loc_rhomax_lm[1]+shift[1],loc_rhomax_lm[2]+shift[2]);
}else{
LOGINFO("Global density extrema: ");
LOGINFO(" minimum: delta=%f at (%f,%f,%f)",rhomin,loc_rhomin[0],loc_rhomin[1],loc_rhomin[2]);
LOGINFO(" shifted back at (%f,%f,%f)",loc_rhomin[0]+shift[0],loc_rhomin[1]+shift[1],loc_rhomin[2]+shift[2]);
LOGINFO(" maximum: delta=%f at (%f,%f,%f)",rhomax,loc_rhomax[0],loc_rhomax[1],loc_rhomax[2]);
LOGINFO(" shifted back at (%f,%f,%f)",loc_rhomax[0]+shift[0],loc_rhomax[1]+shift[1],loc_rhomax[2]+shift[2]);
}
}
void write_dm_velocity( int coord, const grid_hierarchy& gh )
{
char enzoname[256];
sprintf( enzoname, "ParticleVelocities_%c", (char)('x'+coord) );
double vunit = 1.0/(1.225e2*sqrt(the_sim_header.omega_m/the_sim_header.a_start));
dump_grid_data( enzoname, gh, vunit );
}
void write_dm_position( int coord, const grid_hierarchy& gh )
{
char enzoname[256];
sprintf( enzoname, "ParticleDisplacements_%c", (char)('x'+coord) );
dump_grid_data( enzoname, gh );
}
void write_dm_potential( const grid_hierarchy& gh )
{ }
void write_gas_potential( const grid_hierarchy& gh )
{ }
void write_gas_velocity( int coord, const grid_hierarchy& gh )
{
double vunit = 1.0/(1.225e2*sqrt(the_sim_header.omega_m/the_sim_header.a_start));
char enzoname[256];
sprintf( enzoname, "GridVelocities_%c", (char)('x'+coord) );
dump_grid_data( enzoname, gh, vunit );
}
void write_gas_position( int coord, const grid_hierarchy& gh )
{
/* do nothing, not needed */
}
void write_gas_density( const grid_hierarchy& gh )
{
char enzoname[256];
sprintf( enzoname, "GridDensity" );
dump_grid_data( enzoname, gh, the_sim_header.omega_b/the_sim_header.omega_m, 1.0 );
}
void finalize( void )
{ }
};
namespace{
output_plugin_creator_concrete<enzo_output_plugin> creator("enzo");
}
#endif

1990
random.cc

File diff suppressed because it is too large Load diff

View file

View file

809
src/plugins/output_arepo.cc Normal file
View file

@ -0,0 +1,809 @@
/*
* output_arepo.cc - This file is part of MUSIC -
* a code to generate multi-scale initial conditions
* for cosmological simulations
*
* Copyright (C) 2010 Oliver Hahn
*
* Plugin: Dylan Nelson (dnelson@cfa.harvard.edu)
*/
#ifdef HAVE_HDF5
#define GAS_PARTTYPE 0
#define HIGHRES_DM_PARTTYPE 1
#define COARSE_DM_DEFAULT_PARTTYPE 2
#define STAR_PARTTYPE 4
#define NTYPES 6
#include <sstream>
#include <string>
#include <algorithm>
#include "output.hh"
#include "HDF_IO.hh"
class arepo_output_plugin : public output_plugin
{
protected:
// header/config
std::vector<std::vector<unsigned int>> nPart;
std::vector<long long> nPartTotal;
std::vector<double> massTable;
double time, redshift, boxSize;
unsigned int numFiles, coarsePartType;
double omega0, omega_L, hubbleParam;
// configuration
double UnitLength_in_cm, UnitMass_in_g, UnitVelocity_in_cm_per_s;
double omega_b, rhoCrit;
double posFac, velFac;
long long nPartTotAllTypes;
bool doBaryons, useLongIDs, doublePrec;
size_t npfine, npart, npcoarse;
std::vector<size_t> levelcounts;
// parameter file hints
int pmgrid, gridboost;
float softening, Tini;
using output_plugin::cf_;
// Nx1 vector (e.g. masses,particleids)
template <typename T>
void writeHDF5_a(std::string fieldName, int partTypeNum, const std::vector<T> &data)
{
hid_t HDF_FileID, HDF_GroupID, HDF_DatasetID, HDF_DataspaceID, HDF_Type;
hsize_t HDF_Dims, offset = 0;
std::stringstream GrpName;
GrpName << "PartType" << partTypeNum;
for (unsigned i = 0; i < numFiles; i++)
{
std::string filename = fname_;
HDF_Dims = data.size();
// modify local filename and write size
if (numFiles > 1)
{
std::stringstream s;
s << "." << i << ".hdf5";
filename.replace(filename.find(".hdf5"), 5, s.str());
HDF_Dims = ceil(data.size() / numFiles);
if (i == numFiles - 1)
HDF_Dims = data.size() - offset;
}
HDF_FileID = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
HDF_GroupID = H5Gopen(HDF_FileID, GrpName.str().c_str());
HDF_Type = GetDataType<T>();
HDF_DataspaceID = H5Screate_simple(1, &HDF_Dims, NULL);
HDF_DatasetID = H5Dcreate(HDF_GroupID, fieldName.c_str(), HDF_Type, HDF_DataspaceID, H5P_DEFAULT);
// write and close
H5Dwrite(HDF_DatasetID, HDF_Type, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data[offset]);
H5Dclose(HDF_DatasetID);
H5Sclose(HDF_DataspaceID);
H5Gclose(HDF_GroupID);
H5Fclose(HDF_FileID);
offset += HDF_Dims;
}
}
// Nx3 vector (e.g. pos,vel), where coord = index of the second dimension (written one at a time)
template <typename T>
void writeHDF5_b(std::string fieldName, int coord, int partTypeNum, std::vector<T> &data, bool readFlag = false)
{
hid_t HDF_FileID, HDF_GroupID, HDF_DatasetID, HDF_DataspaceID, HDF_Type;
hsize_t HDF_Dims[2], HDF_DimsMem[2], w_offset = 0;
std::stringstream GrpName;
GrpName << "PartType" << partTypeNum;
for (unsigned i = 0; i < numFiles; i++)
{
std::string filename = fname_;
HDF_Dims[0] = data.size();
// modify local filename and write size
if (numFiles > 1)
{
std::stringstream s;
s << "." << i << ".hdf5";
filename.replace(filename.find(".hdf5"), 5, s.str());
HDF_Dims[0] = ceil(data.size() / numFiles);
if (i == numFiles - 1)
HDF_Dims[0] = data.size() - w_offset;
}
HDF_FileID = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
HDF_GroupID = H5Gopen(HDF_FileID, GrpName.str().c_str());
HDF_Type = GetDataType<T>();
HDF_Dims[1] = 3;
// if dataset does not yet exist, create it (on first coord call)
if (!(H5Lexists(HDF_GroupID, fieldName.c_str(), H5P_DEFAULT)))
{
HDF_DataspaceID = H5Screate_simple(2, HDF_Dims, NULL);
HDF_DatasetID = H5Dcreate(HDF_GroupID, fieldName.c_str(), HDF_Type, HDF_DataspaceID, H5P_DEFAULT);
H5Sclose(HDF_DataspaceID);
H5Dclose(HDF_DatasetID);
}
// make memory space (just indicates the size/shape of data)
HDF_DimsMem[0] = HDF_Dims[0];
HDF_DimsMem[1] = 1;
hid_t HDF_MemoryspaceID = H5Screate_simple(2, HDF_DimsMem, NULL);
// open hyperslab
hsize_t count[2] = {1, 1}, stride[2] = {1, 1}, offset[2] = {0, 0};
offset[1] = coord; // set where in the second dimension to write
count[0] = HDF_Dims[0]; // set size in the first dimension (num particles of this type)
HDF_DatasetID = H5Dopen(HDF_GroupID, fieldName.c_str());
HDF_DataspaceID = H5Dget_space(HDF_DatasetID);
H5Sselect_hyperslab(HDF_DataspaceID, H5S_SELECT_SET, offset, stride, count, NULL);
// write (or read) and close
if (readFlag)
H5Dread(HDF_DatasetID, HDF_Type, HDF_MemoryspaceID, HDF_DataspaceID, H5P_DEFAULT,
&data[w_offset]);
else
H5Dwrite(HDF_DatasetID, HDF_Type, HDF_MemoryspaceID, HDF_DataspaceID, H5P_DEFAULT,
&data[w_offset]);
H5Dclose(HDF_DatasetID);
H5Gclose(HDF_GroupID);
H5Fclose(HDF_FileID);
w_offset += HDF_Dims[0];
}
}
// called from finalize()
void generateAndWriteIDs(void)
{
long long offset = 1; // don't use ID==0
nPartTotAllTypes = 0;
for (size_t i = 0; i < nPartTotal.size(); i++)
{
if (!nPartTotal[i])
continue;
nPartTotAllTypes += nPartTotal[i];
if (!useLongIDs)
{
std::vector<int> ids = std::vector<int>(nPartTotal[i]);
for (int j = 0; j < nPartTotal[i]; j++)
ids[j] = offset + j;
writeHDF5_a("ParticleIDs", i, ids);
}
else
{
std::vector<long long> ids = std::vector<long long>(nPartTotal[i]);
for (long long j = 0; j < nPartTotal[i]; j++)
ids[j] = offset + j;
writeHDF5_a("ParticleIDs", i, ids);
}
// make IDs of all particle types sequential (unique) = unnecessary, but consistent with gadget output format
offset += nPartTotal[i];
}
}
void countLeafCells(const grid_hierarchy &gh)
{
npfine = 0;
npart = 0;
npcoarse = 0;
npfine = gh.count_leaf_cells(gh.levelmax(), gh.levelmax());
npart = gh.count_leaf_cells(gh.levelmin(), gh.levelmax());
if (levelmax_ != levelmin_) // multimass
npcoarse = gh.count_leaf_cells(gh.levelmin(), gh.levelmax() - 1);
}
template <typename T>
void __write_dm_mass(const grid_hierarchy &gh)
{
countLeafCells(gh);
// fill levelcount for header
levelcounts = std::vector<size_t>(levelmax_ - levelmin_ + 1);
for (int ilevel = gh.levelmax(); ilevel >= (int)gh.levelmin(); --ilevel)
levelcounts[gh.levelmax() - ilevel] = gh.count_leaf_cells(ilevel, ilevel);
if (levelmax_ > levelmin_ + 1) // morethan2bnd
{
// DM particles will have variable masses
size_t count = 0;
std::vector<T> data(npcoarse);
for (int ilevel = gh.levelmax() - 1; ilevel >= (int)gh.levelmin(); --ilevel)
{
// baryon particles live only on finest grid, these particles here are total matter particles
T pmass = omega0 * rhoCrit * pow(boxSize * posFac, 3.0) / pow(2, 3 * ilevel);
for (unsigned i = 0; i < gh.get_grid(ilevel)->size(0); ++i)
for (unsigned j = 0; j < gh.get_grid(ilevel)->size(1); ++j)
for (unsigned k = 0; k < gh.get_grid(ilevel)->size(2); ++k)
if (gh.is_in_mask(ilevel, i, j, k) && !gh.is_refined(ilevel, i, j, k))
{
data[count++] = pmass;
}
}
if (count != npcoarse)
throw std::runtime_error("Internal consistency error while writing masses");
writeHDF5_a("Masses", coarsePartType, data); // write DM
}
else
{
// DM particles will all have the same mass, just write to massTable
if (levelmax_ != levelmin_) // multimass
massTable[coarsePartType] = omega0 * rhoCrit * pow(boxSize * posFac, 3.0) / pow(2, 3 * levelmin_);
}
}
template <typename T>
void __write_dm_position(int coord, const grid_hierarchy &gh)
{
countLeafCells(gh);
// update header
hsize_t offset_fine = 0, offset_coarse = 0;
for (unsigned i = 0; i < numFiles; i++)
{
hsize_t dims_fine = ceil(npfine / numFiles);
hsize_t dims_coarse = ceil(npcoarse / numFiles);
if (i == numFiles - 1)
{
dims_fine = npfine - offset_fine;
dims_coarse = npcoarse - offset_coarse;
}
nPart[i][HIGHRES_DM_PARTTYPE] = dims_fine;
nPart[i][coarsePartType] = dims_coarse;
offset_fine += dims_fine;
offset_coarse += dims_coarse;
}
nPartTotal[HIGHRES_DM_PARTTYPE] = npfine;
nPartTotal[coarsePartType] = npcoarse;
// FINE: collect displacements and convert to absolute coordinates with correct units
int ilevel = gh.levelmax();
std::vector<T> data(npfine);
size_t count = 0;
for (unsigned i = 0; i < gh.get_grid(ilevel)->size(0); ++i)
for (unsigned j = 0; j < gh.get_grid(ilevel)->size(1); ++j)
for (unsigned k = 0; k < gh.get_grid(ilevel)->size(2); ++k)
if (gh.is_in_mask(ilevel, i, j, k) && !gh.is_refined(ilevel, i, j, k))
{
double xx[3];
gh.cell_pos(ilevel, i, j, k, xx);
xx[coord] = (xx[coord] + (*gh.get_grid(ilevel))(i, j, k)) * boxSize;
xx[coord] = fmod(xx[coord] + boxSize, boxSize);
data[count++] = (T)(xx[coord] * posFac);
}
writeHDF5_b("Coordinates", coord, HIGHRES_DM_PARTTYPE, data); // write fine DM
if (count != npfine)
throw std::runtime_error("Internal consistency error while writing fine DM pos");
// COARSE: collect displacements and convert to absolute coordinates with correct units
if (levelmax_ != levelmin_) // multimass
{
data = std::vector<T>(npcoarse, 0.0);
count = 0;
for (int ilevel = gh.levelmax() - 1; ilevel >= (int)gh.levelmin(); --ilevel)
for (unsigned i = 0; i < gh.get_grid(ilevel)->size(0); ++i)
for (unsigned j = 0; j < gh.get_grid(ilevel)->size(1); ++j)
for (unsigned k = 0; k < gh.get_grid(ilevel)->size(2); ++k)
if (gh.is_in_mask(ilevel, i, j, k) && !gh.is_refined(ilevel, i, j, k))
{
double xx[3];
gh.cell_pos(ilevel, i, j, k, xx);
xx[coord] = (xx[coord] + (*gh.get_grid(ilevel))(i, j, k)) * boxSize;
if (!doBaryons) // if so, we will handle the mod in write_gas_position
xx[coord] = fmod(xx[coord] + boxSize, boxSize) * posFac;
data[count++] = (T)xx[coord];
}
if (count != npcoarse)
throw std::runtime_error("Internal consistency error while writing coarse DM pos");
writeHDF5_b("Coordinates", coord, coarsePartType, data); // write coarse DM
}
}
template <typename T>
void __write_dm_velocity(int coord, const grid_hierarchy &gh)
{
countLeafCells(gh);
// FINE: collect velocities and convert to correct units
int ilevel = gh.levelmax();
std::vector<T> data(npfine);
size_t count = 0;
for (unsigned i = 0; i < gh.get_grid(ilevel)->size(0); ++i)
for (unsigned j = 0; j < gh.get_grid(ilevel)->size(1); ++j)
for (unsigned k = 0; k < gh.get_grid(ilevel)->size(2); ++k)
if (gh.is_in_mask(ilevel, i, j, k) && !gh.is_refined(ilevel, i, j, k))
{
data[count++] = (T)(*gh.get_grid(ilevel))(i, j, k) * velFac;
}
writeHDF5_b("Velocities", coord, HIGHRES_DM_PARTTYPE, data); // write fine DM
if (count != npfine)
throw std::runtime_error("Internal consistency error while writing fine DM pos");
// COARSE: collect velocities and convert to correct units
if (levelmax_ != levelmin_) // multimass
{
data = std::vector<T>(npcoarse, 0.0);
count = 0;
for (int ilevel = gh.levelmax() - 1; ilevel >= (int)gh.levelmin(); --ilevel)
for (unsigned i = 0; i < gh.get_grid(ilevel)->size(0); ++i)
for (unsigned j = 0; j < gh.get_grid(ilevel)->size(1); ++j)
for (unsigned k = 0; k < gh.get_grid(ilevel)->size(2); ++k)
if (gh.is_in_mask(ilevel, i, j, k) && !gh.is_refined(ilevel, i, j, k))
{
data[count++] = (T)(*gh.get_grid(ilevel))(i, j, k) * velFac;
}
if (count != npcoarse)
throw std::runtime_error("Internal consistency error while writing coarse DM pos");
writeHDF5_b("Velocities", coord, coarsePartType, data); // write coarse DM
}
}
template <typename T>
void __write_gas_velocity(int coord, const grid_hierarchy &gh)
{
countLeafCells(gh);
std::vector<T> gas_data(npart); // read/write gas at all levels from the gh
size_t count = 0;
for (int ilevel = levelmax_; ilevel >= (int)levelmin_; --ilevel)
for (unsigned i = 0; i < gh.get_grid(ilevel)->size(0); ++i)
for (unsigned j = 0; j < gh.get_grid(ilevel)->size(1); ++j)
for (unsigned k = 0; k < gh.get_grid(ilevel)->size(2); ++k)
if (gh.is_in_mask(ilevel, i, j, k) && !gh.is_refined(ilevel, i, j, k))
{
gas_data[count++] = (T)(*gh.get_grid(ilevel))(i, j, k) * velFac;
}
if (count != npart)
throw std::runtime_error("Internal consistency error while writing GAS pos");
// calculate modified DM velocities if: multimass and baryons present
if (doBaryons && npcoarse)
{
double facb = omega_b / omega0;
double facc = (omega0 - omega_b) / omega0;
std::vector<T> dm_data(npcoarse);
writeHDF5_b("Velocities", coord, coarsePartType, dm_data, true); // read coarse DM vels
// overwrite
for (size_t i = 0; i < npcoarse; i++)
dm_data[i] = facc * dm_data[i] + facb * gas_data[npfine + i];
writeHDF5_b("Velocities", coord, coarsePartType, dm_data); // overwrite coarse DM vels
} // dm_data deallocated
// restrict gas_data to fine only and request write
std::vector<T> data(gas_data.begin() + 0, gas_data.begin() + npfine);
std::vector<T>().swap(gas_data); // deallocate
writeHDF5_b("Velocities", coord, GAS_PARTTYPE, data); // write highres gas
}
template <typename T>
void __write_gas_position(int coord, const grid_hierarchy &gh)
{
countLeafCells(gh);
// update header (will actually write only gas at levelmax)
hsize_t offset = 0;
for (unsigned i = 0; i < numFiles; i++)
{
hsize_t dims = ceil(npfine / numFiles);
if (i == numFiles - 1)
dims = npfine - offset;
nPart[i][GAS_PARTTYPE] = dims;
offset += dims;
}
nPartTotal[GAS_PARTTYPE] = npfine;
std::vector<double> gas_data(npart); // read/write gas at all levels from the gh
size_t count = 0;
double h = 1.0 / (1ul << gh.levelmax());
for (int ilevel = gh.levelmax(); ilevel >= (int)gh.levelmin(); --ilevel)
for (unsigned i = 0; i < gh.get_grid(ilevel)->size(0); ++i)
for (unsigned j = 0; j < gh.get_grid(ilevel)->size(1); ++j)
for (unsigned k = 0; k < gh.get_grid(ilevel)->size(2); ++k)
if (gh.is_in_mask(ilevel, i, j, k) && !gh.is_refined(ilevel, i, j, k))
{
double xx[3];
gh.cell_pos(ilevel, i, j, k, xx);
// shift particle positions (this has to be done as the same shift
// is used when computing the convolution kernel for SPH baryons)
xx[coord] += 0.5 * h;
xx[coord] = (xx[coord] + (*gh.get_grid(ilevel))(i, j, k)) * boxSize;
gas_data[count++] = xx[coord];
}
if (count != npart)
throw std::runtime_error("Internal consistency error while writing coarse DM pos");
// calculate modified DM coordinates if: multimass and baryons present
if (doBaryons && npcoarse)
{
double facb = omega_b / omega0;
double facc = (omega0 - omega_b) / omega0;
std::vector<T> dm_data(npcoarse);
writeHDF5_b("Coordinates", coord, coarsePartType, dm_data, true); // read coarse DM vels
// overwrite
for (size_t i = 0; i < npcoarse; i++)
{
dm_data[i] = facc * dm_data[i] + facb * gas_data[npfine + i];
dm_data[i] = fmod(dm_data[i] + boxSize, boxSize) * posFac;
}
writeHDF5_b("Coordinates", coord, coarsePartType, dm_data); // overwrite coarse DM vels
}
// restrict gas_data to fine only and request write
//std::vector<float> data( gas_data.begin() + 0, gas_data.begin() + npfine );
std::vector<T> data(npfine);
for (size_t i = 0; i < npfine; i++)
data[i] = (T)(fmod(gas_data[i] + boxSize, boxSize) * posFac);
std::vector<double>().swap(gas_data); // deallocate
writeHDF5_b("Coordinates", coord, GAS_PARTTYPE, data); // write highres gas
}
public:
arepo_output_plugin(config_file &cf) : output_plugin(cf)
{
// ensure that everyone knows we want to do SPH, implies: bsph=1, bbshift=1, decic_baryons=1
// -> instead of just writing gas densities (which are here ignored), the gas displacements are also written
cf.insertValue("setup", "do_SPH", "yes");
// init header and config parameters
nPartTotal = std::vector<long long>(NTYPES, 0);
massTable = std::vector<double>(NTYPES, 0.0);
coarsePartType = cf.getValueSafe<unsigned>("output", "arepo_coarsetype", COARSE_DM_DEFAULT_PARTTYPE);
UnitLength_in_cm = cf.getValueSafe<double>("output", "arepo_unitlength", 3.085678e21); // 1.0 kpc
UnitMass_in_g = cf.getValueSafe<double>("output", "arepo_unitmass", 1.989e43); // 1.0e10 solar masses
UnitVelocity_in_cm_per_s = cf.getValueSafe<double>("output", "arepo_unitvel", 1e5); // 1 km/sec
omega0 = cf.getValue<double>("cosmology", "Omega_m");
omega_b = cf.getValue<double>("cosmology", "Omega_b");
omega_L = cf.getValue<double>("cosmology", "Omega_L");
redshift = cf.getValue<double>("setup", "zstart");
boxSize = cf.getValue<double>("setup", "boxlength");
doBaryons = cf.getValueSafe<bool>("setup", "baryons", false);
useLongIDs = cf.getValueSafe<bool>("output", "arepo_longids", false);
numFiles = cf.getValueSafe<unsigned>("output", "arepo_num_files", 1);
doublePrec = cf.getValueSafe<bool>("output", "arepo_doubleprec", 0);
for (unsigned i = 0; i < numFiles; i++)
nPart.push_back(std::vector<unsigned int>(NTYPES, 0));
// factors which multiply positions and velocities
time = 1.0 / (1.0 + redshift);
posFac = 3.085678e24 / UnitLength_in_cm; // MUSIC uses Mpc internally, i.e. posFac=1e3 for kpc output
velFac = (1.0f / sqrt(time)) * boxSize;
// critical density
rhoCrit = 27.7519737e-9; // in h^2 1e10 M_sol / kpc^3
rhoCrit *= pow(UnitLength_in_cm / 3.085678e21, 3.0);
rhoCrit *= (1.989e43 / UnitMass_in_g);
// calculate PMGRID suggestion
pmgrid = pow(2, levelmin_) * 2; // unigrid
gridboost = 1;
if (levelmin_ != levelmax_)
{
double lxref[3], x0ref[3], x1ref[3];
double pmgrid_new;
the_region_generator->get_AABB(x0ref, x1ref, levelmax_); // generalized beyond box
for (int i = 0; i < 3; i++)
lxref[i] = x1ref[i] - x0ref[i];
// fraction box length of the zoom region
lxref[0] = pow((lxref[0] * lxref[1] * lxref[2]), 0.333);
pmgrid_new = pow(2, levelmax_) * 2; // to cover entire box at highest resolution
pmgrid_new *= lxref[0]; // only need to cover a fraction
if ((gridboost = round(pmgrid_new / pmgrid)) > 1)
gridboost = pow(2, ceil(log(gridboost) / log(2.0))); // round to nearest, higher power of 2
if (gridboost == 0)
gridboost = 1;
}
// calculate Tini for gas
hubbleParam = cf.getValue<double>("cosmology", "H0") / 100.0;
double astart = 1.0 / (1.0 + redshift);
double h2 = hubbleParam * hubbleParam;
double adec = 1.0 / (160.0 * pow(omega_b * h2 / 0.022, 2.0 / 5.0));
double Tcmb0 = 2.726;
Tini = astart < adec ? Tcmb0 / astart : Tcmb0 / astart / astart * adec;
// calculate softening suggestion
softening = (boxSize * posFac) / pow(2, levelmax_) / 40.0;
// header and sanity checks
if (!doBaryons)
massTable[HIGHRES_DM_PARTTYPE] = omega0 * rhoCrit * pow(boxSize * posFac, 3.0) / pow(2, 3 * levelmax_);
else
massTable[HIGHRES_DM_PARTTYPE] = (omega0 - omega_b) * rhoCrit * pow(boxSize * posFac, 3.0) / pow(2, 3 * levelmax_);
if (coarsePartType == GAS_PARTTYPE || coarsePartType == HIGHRES_DM_PARTTYPE)
throw std::runtime_error("Error: Specified illegal Arepo particle type for coarse particles.");
if (coarsePartType == STAR_PARTTYPE)
LOGWARN("WARNING: Specified coarse particle type will collide with stars if USE_SFR enabled.");
// create file(s)
for (unsigned i = 0; i < numFiles; i++)
{
std::string filename = fname_;
if (numFiles > 1)
{
size_t pos = filename.find(".hdf5");
if (pos != filename.length() - 5)
throw std::runtime_error("Error: Unexpected output filename (doesn't end in .hdf5).");
std::stringstream s;
s << "." << i << ".hdf5";
filename.replace(pos, 5, s.str());
}
HDFCreateFile(filename);
// create particle type groups
std::stringstream GrpName;
GrpName << "PartType" << HIGHRES_DM_PARTTYPE;
HDFCreateGroup(filename, GrpName.str().c_str()); // highres or unigrid DM
if (doBaryons)
{
GrpName.str("");
GrpName << "PartType" << GAS_PARTTYPE;
HDFCreateGroup(filename, GrpName.str().c_str()); // gas
}
if (levelmax_ != levelmin_) // multimass
{
GrpName.str("");
GrpName << "PartType" << coarsePartType;
HDFCreateGroup(filename, GrpName.str().c_str()); // coarse DM
}
}
}
~arepo_output_plugin()
{
}
/* ------------------------------------------------------------------------------- */
void write_dm_mass(const grid_hierarchy &gh)
{
if (!doublePrec)
__write_dm_mass<float>(gh);
else
__write_dm_mass<double>(gh);
}
void write_dm_position(int coord, const grid_hierarchy &gh)
{
if (!doublePrec)
__write_dm_position<float>(coord, gh);
else
__write_dm_position<double>(coord, gh);
}
void write_dm_velocity(int coord, const grid_hierarchy &gh)
{
if (!doublePrec)
__write_dm_velocity<float>(coord, gh);
else
__write_dm_velocity<double>(coord, gh);
}
void write_dm_density(const grid_hierarchy &gh)
{ /* skip */
}
void write_dm_potential(const grid_hierarchy &gh)
{ /* skip */
}
/* ------------------------------------------------------------------------------- */
void write_gas_velocity(int coord, const grid_hierarchy &gh)
{
if (!doublePrec)
__write_gas_velocity<float>(coord, gh);
else
__write_gas_velocity<double>(coord, gh);
}
void write_gas_position(int coord, const grid_hierarchy &gh)
{
if (!doublePrec)
__write_gas_position<float>(coord, gh);
else
__write_gas_position<double>(coord, gh);
}
void write_gas_density(const grid_hierarchy &gh)
{
// if only saving highres gas, then all gas cells have the same initial mass
// do not write out densities as we write out displacements
if (doBaryons)
massTable[GAS_PARTTYPE] = omega_b * rhoCrit * pow(boxSize * posFac, 3.0) / pow(2, 3 * levelmax_);
}
void write_gas_potential(const grid_hierarchy &gh)
{ /* skip */
}
void finalize(void)
{
// generate and add contiguous IDs for each particle type we have written
generateAndWriteIDs();
std::vector<unsigned int> nPartTotalLW(nPartTotal.size());
std::vector<unsigned int> nPartTotalHW(nPartTotal.size());
for (size_t i = 0; i < nPartTotalHW.size(); i++)
{
nPartTotalHW[i] = (unsigned)((size_t)nPartTotal[i] >> 32);
nPartTotalLW[i] = (unsigned int)((size_t)nPartTotal[i]);
}
// output particle counts
std::cout << " - Arepo : wrote " << nPartTotAllTypes << " particles..." << std::endl;
for (size_t i = 0; i < nPartTotal.size(); i++)
std::cout << " type [" << i << "] : " << std::setw(12) << nPartTotal[i] << std::endl;
std::cout << std::endl;
// write final header (some of these fields are required, others are extra info)
for (unsigned i = 0; i < numFiles; i++)
{
std::string filename = fname_;
if (numFiles > 1)
{
std::stringstream s;
s << "." << i << ".hdf5";
filename.replace(filename.find(".hdf5"), 5, s.str());
std::cout << " " << filename;
for (size_t j = 0; j < nPart[i].size(); j++)
std::cout << " " << std::setw(10) << nPart[i][j];
std::cout << std::endl;
}
HDFCreateGroup(filename, "Header");
HDFWriteGroupAttribute(filename, "Header", "NumPart_ThisFile", nPart[i]);
HDFWriteGroupAttribute(filename, "Header", "NumPart_Total", nPartTotalLW);
HDFWriteGroupAttribute(filename, "Header", "NumPart_Total_HighWord", nPartTotalHW);
HDFWriteGroupAttribute(filename, "Header", "MassTable", massTable);
HDFWriteGroupAttribute(filename, "Header", "BoxSize", boxSize);
HDFWriteGroupAttribute(filename, "Header", "NumFilesPerSnapshot", numFiles);
HDFWriteGroupAttribute(filename, "Header", "Time", time);
HDFWriteGroupAttribute(filename, "Header", "Redshift", redshift);
HDFWriteGroupAttribute(filename, "Header", "Omega0", omega0);
HDFWriteGroupAttribute(filename, "Header", "OmegaLambda", omega_L);
HDFWriteGroupAttribute(filename, "Header", "OmegaBaryon", omega_b);
HDFWriteGroupAttribute(filename, "Header", "HubbleParam", hubbleParam);
HDFWriteGroupAttribute(filename, "Header", "Flag_Sfr", 0);
HDFWriteGroupAttribute(filename, "Header", "Flag_Cooling", 0);
HDFWriteGroupAttribute(filename, "Header", "Flag_StellarAge", 0);
HDFWriteGroupAttribute(filename, "Header", "Flag_Metals", 0);
HDFWriteGroupAttribute(filename, "Header", "Flag_Feedback", 0);
HDFWriteGroupAttribute(filename, "Header", "Flag_DoublePrecision", (int)doublePrec);
HDFWriteGroupAttribute(filename, "Header", "Music_levelmin", levelmin_);
HDFWriteGroupAttribute(filename, "Header", "Music_levelmax", levelmax_);
HDFWriteGroupAttribute(filename, "Header", "Music_levelcounts", levelcounts);
HDFWriteGroupAttribute(filename, "Header", "haveBaryons", (int)doBaryons);
HDFWriteGroupAttribute(filename, "Header", "longIDs", (int)useLongIDs);
HDFWriteGroupAttribute(filename, "Header", "suggested_pmgrid", pmgrid);
HDFWriteGroupAttribute(filename, "Header", "suggested_gridboost", gridboost);
HDFWriteGroupAttribute(filename, "Header", "suggested_highressoft", softening);
HDFWriteGroupAttribute(filename, "Header", "suggested_gas_Tinit", Tini);
}
// give config/parameter file hints
if (useLongIDs)
std::cout << " - Arepo: Wrote 64bit IDs, enable LONGIDS." << std::endl;
if (doublePrec)
std::cout << " - Arepo: Double precision ICs, set INPUT_IN_DOUBLEPRECISION." << std::endl;
if (NTYPES != 6)
std::cout << " - Arepo: Using [" << NTYPES << "] particle types, set NTYPES to match." << std::endl;
if (doBaryons)
std::cout << " - Arepo: Wrote high-res gas (only), set REFINEMENT_HIGH_RES_GAS and GENERATE_GAS_IN_ICS with "
<< "SPLIT_PARTICLE_TYPE=" << pow(2, coarsePartType) << "." << std::endl;
if (levelmax_ != levelmin_)
std::cout << " - Arepo: Have zoom type ICs, set PLACEHIGHRESREGION=" << pow(2, HIGHRES_DM_PARTTYPE)
<< " (suggest PMGRID=" << pmgrid << " with GRIDBOOST=" << gridboost << ")." << std::endl;
else
std::cout << " - Arepo: Have unigrid type ICs (suggest PMGRID=" << pmgrid << ")." << std::endl;
if (levelmax_ > levelmin_ + 1)
std::cout << " - Arepo: More than one coarse DM mass using same type, set INDIVIDUAL_GRAVITY_SOFTENING="
<< pow(2, coarsePartType) << " (+" << pow(2, STAR_PARTTYPE) << " if including stars)." << std::endl;
if (doBaryons)
std::cout << " - Arepo: Set initial gas temperature to " << std::fixed << std::setprecision(3) << Tini << " K." << std::endl;
std::cout << " - Arepo: Suggest grav softening = " << std::setprecision(3) << softening << " for high res DM." << std::endl;
}
};
namespace
{
output_plugin_creator_concrete<arepo_output_plugin> creator("arepo");
}
#endif // HAVE_HDF5

903
src/plugins/output_art.cc Normal file
View file

@ -0,0 +1,903 @@
/*
output_art.cc - This file is part of MUSIC -
a code to generate multi-scale initial conditions
for cosmological simulations
Copyright (C) 2012 Jose Onorbe & Oliver Hahn
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <vector>
#include "output.hh"
template <typename T>
inline T bytereorder(T v)
{
T rval;
(reinterpret_cast<unsigned char *>(&rval))[3] = (reinterpret_cast<unsigned char *>(&v))[0];
(reinterpret_cast<unsigned char *>(&rval))[2] = (reinterpret_cast<unsigned char *>(&v))[1];
(reinterpret_cast<unsigned char *>(&rval))[1] = (reinterpret_cast<unsigned char *>(&v))[2];
(reinterpret_cast<unsigned char *>(&rval))[0] = (reinterpret_cast<unsigned char *>(&v))[3];
return rval;
}
template <typename T_store = float>
class art_output_plugin : public output_plugin
{
public:
bool do_baryons_;
bool swap_endianness_;
double omegab_, omegam_;
double gamma_;
double astart_;
double zstart_;
size_t npcdm_;
int hsize_;
protected:
enum iofields
{
id_dm_pos,
id_dm_vel,
id_gas_pos,
id_gas_vel
};
struct header
{
char head[45];
float aexpN; // current expansion factor
float aexp0; // initial expansion factor
float amplt; // Amplitude of density fluctuations
float astep; // Delta a -> time step.
// This value is also stored in pt.dat (binary 1 float)
// It is recalculated by art for the next steps so just a small value should work
int istep; // step (=0 in IC)
float partw; // mass of highest res particle.
float TINTG; //=0 in IC
float EKIN; //SUM 0.5 * m_i*(v_i**2) in code units
float EKIN1; //=0 in IC
float EKIN2; //=0 in IC
float AU0; //=0 in IC
float AEU0; //=0 in IC
int NROWC; // Number of particles in 1 dim (number of particles per page = NROW**2)
int NGRIDC; // Number of cells in 1 dim
int nspecies; // number of dm species
int Nseed; // random number used ( 0 for MUSIC? or set the random number used in the lowest level?)
float Om0; //Omega_m
float Oml0; //Omega_L
float hubble; //hubble constant h=H/100
float Wp5; //
float Ocurv; //Omega_k
float Omb0; // this parameter only appears in header in hydro runs
float wpart[10]; // extras[0-9] particle masses from high res to low res (normalized to low res particle)
// Mass of smallest particle=wpart[0]*0m0*2.746e+11*(Box/NGRID)**3 -> Msun/h
// Mass of largest particle=wpart[nspecies-1]*0m0*2.746e+11*(Box/NGRID)**3 -> Msun/h
int lpart[10]; // extras[10-19] number of particles from high res to low res cumulative!!!
//(i.e., lpart[0]=Nhigh res particles; lpart[1]=lpart[0]+N_this_level; etc) so lpart[nspecies-1]=N total
float extras[80]; //extras[20-99]
//extras[9]=iLblock ->0 in IC
//extras[10]=LevMin ->0 in IC
//extras[11]=LevSmall ->0 in IC
//extras[12]=LevLarge ->0 in IC
//extras[13]=Omegab ->0 in IC; fix it?
//extras[14]=sig8 ->0 in IC; fix it?
//extras[15]=Spslope ->0 in IC; fix it? Slope of the Power spectrum
//extras[16]=iDEswtch ->0 in IC; DE Flag=0:LCDM 1:w 2:RP 3:SUGRA
//extras[17]=DEw0 ->0 in IC; w0 for DE z=0
//extras[18]=DEwprime ->0 in IC; DE parameter
//extras[59]= 0 or 1; is used as switch for random numbers generators [do not apply in music use 0?]
//extras[60]= lux - level of luxury [do not apply in music use 0?]
//extras[79]=Lbox (Mpc/h)
};
struct ptf
{
float astep;
};
header header_;
ptf ptf_;
std::string fname;
size_t np_fine_gas_, np_fine_dm_, np_coarse_dm_;
size_t block_buf_size_;
size_t npartmax_;
double YHe_;
// helper class to read temp files
class pistream : public std::ifstream
{
public:
pistream(std::string fname, size_t npart)
: std::ifstream(fname.c_str(), std::ios::binary)
{
size_t blk;
if (!this->good())
{
LOGERR("Could not open buffer file in ART output plug-in");
throw std::runtime_error("Could not open buffer file in ART output plug-in");
}
this->read((char *)&blk, sizeof(size_t));
if (blk != (size_t)(npart * sizeof(T_store)))
{
LOGERR("Internal consistency error in ART output plug-in");
LOGERR("Expected %d bytes in temp file but found %d", npart * (unsigned)sizeof(T_store), blk);
throw std::runtime_error("Internal consistency error in ART output plug-in");
}
}
pistream()
{
}
void open(std::string fname, size_t npart)
{
std::ifstream::open(fname.c_str(), std::ios::binary);
size_t blk;
if (!this->good())
{
LOGERR("Could not open buffer file \'%s\' in ART output plug-in", fname.c_str());
throw std::runtime_error("Could not open buffer file in ART output plug-in");
}
this->read((char *)&blk, sizeof(size_t));
if (blk != (size_t)(npart * sizeof(T_store)))
{
LOGERR("Internal consistency error in ART output plug-in");
LOGERR("Expected %d bytes in temp file but found %d", npart * (unsigned)sizeof(T_store), blk);
throw std::runtime_error("Internal consistency error in ART output plug-in");
}
}
};
// non-public member functions
void write_header_file(void) //PMcrd.DAT
{
std::string fout;
if (do_baryons_)
fout = "/PMcrdIC.DAT";
else
fout = "/PMcrd.DAT";
std::string partfname = fname_ + fout;
std::ofstream ofs(partfname.c_str(), std::ios::trunc);
//ofs.open(fname_.c_str(), std::ios::binary|std::ios::trunc );
header this_header(header_);
//Should be 529 in a dm only run; 533 in a baryon run
//but not working for alignment so it must be written one by one:
int blksize = hsize_;
if (swap_endianness_)
{
LOGINFO("ART : swap_endianness option enabled");
blksize = bytereorder(blksize);
this_header.aexpN = bytereorder(this_header.aexpN);
this_header.aexp0 = bytereorder(this_header.aexp0);
this_header.amplt = bytereorder(this_header.amplt);
this_header.astep = bytereorder(this_header.astep);
this_header.istep = bytereorder(this_header.istep);
this_header.partw = bytereorder(this_header.partw);
this_header.TINTG = bytereorder(this_header.TINTG);
this_header.EKIN = bytereorder(this_header.EKIN);
this_header.EKIN1 = bytereorder(this_header.EKIN1);
this_header.EKIN2 = bytereorder(this_header.EKIN2);
this_header.AEU0 = bytereorder(this_header.AEU0);
this_header.AEU0 = bytereorder(this_header.AEU0);
this_header.NROWC = bytereorder(this_header.NROWC);
this_header.NGRIDC = bytereorder(this_header.NGRIDC);
this_header.nspecies = bytereorder(this_header.nspecies);
this_header.Nseed = bytereorder(this_header.Nseed);
this_header.Om0 = bytereorder(this_header.Om0);
this_header.Oml0 = bytereorder(this_header.Oml0);
this_header.hubble = bytereorder(this_header.hubble);
this_header.Wp5 = bytereorder(this_header.Wp5);
this_header.Ocurv = bytereorder(this_header.Ocurv);
this_header.Omb0 = bytereorder(this_header.Omb0);
for (int i = 0; i < 10; ++i)
{
this_header.wpart[i] = bytereorder(this_header.wpart[i]);
this_header.lpart[i] = bytereorder(this_header.lpart[i]);
}
for (int i = 0; i < 80; ++i)
{
this_header.extras[i] = bytereorder(this_header.extras[i]);
}
}
ofs.write((char *)&blksize, sizeof(int));
//ofs.write( (char *)&this_header,sizeof(header)); //Not working because struct aligment, so:
ofs.write((char *)&this_header.head, sizeof(this_header.head));
ofs.write((char *)&this_header.aexpN, sizeof(this_header.aexpN));
ofs.write((char *)&this_header.aexp0, sizeof(this_header.aexp0));
ofs.write((char *)&this_header.amplt, sizeof(this_header.amplt));
ofs.write((char *)&this_header.astep, sizeof(this_header.astep));
ofs.write((char *)&this_header.istep, sizeof(this_header.istep));
ofs.write((char *)&this_header.partw, sizeof(this_header.partw));
ofs.write((char *)&this_header.TINTG, sizeof(this_header.TINTG));
ofs.write((char *)&this_header.EKIN, sizeof(this_header.EKIN));
ofs.write((char *)&this_header.EKIN1, sizeof(this_header.EKIN1));
ofs.write((char *)&this_header.EKIN2, sizeof(this_header.EKIN2));
ofs.write((char *)&this_header.AEU0, sizeof(this_header.AEU0));
ofs.write((char *)&this_header.AEU0, sizeof(this_header.AEU0));
ofs.write((char *)&this_header.NROWC, sizeof(this_header.NROWC));
ofs.write((char *)&this_header.NGRIDC, sizeof(this_header.NGRIDC));
ofs.write((char *)&this_header.nspecies, sizeof(this_header.nspecies));
ofs.write((char *)&this_header.Nseed, sizeof(this_header.Nseed));
ofs.write((char *)&this_header.Om0, sizeof(this_header.Om0));
ofs.write((char *)&this_header.Oml0, sizeof(this_header.Oml0));
ofs.write((char *)&this_header.hubble, sizeof(this_header.hubble));
ofs.write((char *)&this_header.Wp5, sizeof(this_header.Wp5));
ofs.write((char *)&this_header.Ocurv, sizeof(this_header.Ocurv));
ofs.write((char *)&this_header.wpart, sizeof(this_header.wpart));
ofs.write((char *)&this_header.lpart, sizeof(this_header.lpart));
ofs.write((char *)&this_header.extras, sizeof(this_header.extras));
ofs.write((char *)&blksize, sizeof(int));
ofs.close();
LOGINFO("ART : done writing header file.");
}
void write_pt_file(void) //pt.dat
{
std::string partfname = fname_ + "/pt.dat";
std::ofstream ofs(partfname.c_str(), std::ios::trunc);
//ofs.open(fname_.c_str(), std::ios::binary|std::ios::trunc );
ptf this_ptf(ptf_);
int blksize = sizeof(ptf); //4
if (swap_endianness_)
{
blksize = bytereorder(blksize);
this_ptf = bytereorder(this_ptf);
}
ofs.write((char *)&blksize, sizeof(int));
ofs.write((char *)&this_ptf, sizeof(ptf));
ofs.write((char *)&blksize, sizeof(int));
ofs.close();
LOGINFO("ART : done writing pt file.");
}
void adjust_buf_endianness(T_store *buf)
{
if (swap_endianness_)
{
for (size_t i = 0; i < block_buf_size_; ++i)
buf[i] = bytereorder<T_store>(buf[i]);
}
}
/*
The direct format write the particle data in pages. Each page of particles is read into a common block,
which has the structure: X(Npage),Y(Npage),Z(Npage),Vx(Npage),Vy(Npage),Vz(Npage).
There are NO Fortran size blocks pre or after these blocks!!
The number of particles in each page (Npage) is Npage = Nrow**2; Npages = (N_particles -1)/NPAGE +1
so in last page sometimes can be tricky (zooms): N_in_last=N_particles -NPAGE*(Npages-1)
But keep in mind that ART expects all pages to be written in full regarding of the actual number of particles
you care about.
*/
void assemble_DM_file(void) //PMcrs0.DAT
{
// file name
std::string fout;
if (do_baryons_)
fout = "/PMcrs0IC.DAT";
else
fout = "/PMcrs0.DAT";
std::string partfname = fname_ + fout;
std::ofstream ofs(partfname.c_str(), std::ios::trunc);
// generate all temp file names
char fnx[256], fny[256], fnz[256], fnvx[256], fnvy[256], fnvz[256];
sprintf(fnx, "___ic_temp_%05d.bin", 100 * id_dm_pos + 0);
sprintf(fny, "___ic_temp_%05d.bin", 100 * id_dm_pos + 1);
sprintf(fnz, "___ic_temp_%05d.bin", 100 * id_dm_pos + 2);
sprintf(fnvx, "___ic_temp_%05d.bin", 100 * id_dm_vel + 0);
sprintf(fnvy, "___ic_temp_%05d.bin", 100 * id_dm_vel + 1);
sprintf(fnvz, "___ic_temp_%05d.bin", 100 * id_dm_vel + 2);
// create buffers for temporary data
T_store *tmp1, *tmp2, *tmp3, *tmp4, *tmp5, *tmp6;
tmp1 = new T_store[block_buf_size_];
tmp2 = new T_store[block_buf_size_];
tmp3 = new T_store[block_buf_size_];
tmp4 = new T_store[block_buf_size_];
tmp5 = new T_store[block_buf_size_];
tmp6 = new T_store[block_buf_size_];
// read in the data from the temporary files in slabs and write it to the output file
size_t npleft, n2read;
size_t npcdm = npcdm_;
LOGINFO("writing DM data to ART format file");
//ofs.open(fname_.c_str(), std::ios::binary|std::ios::trunc );
pistream ifs_x, ifs_y, ifs_z, ifs_vx, ifs_vy, ifs_vz;
ifs_x.open(fnx, npcdm);
ifs_y.open(fny, npcdm);
ifs_z.open(fnz, npcdm);
ifs_vx.open(fnvx, npcdm);
ifs_vy.open(fnvy, npcdm);
ifs_vz.open(fnvz, npcdm);
npleft = npcdm;
n2read = std::min(block_buf_size_, npleft);
while (n2read > 0)
{
// To make sure last page in zooms have 0s in non-relevant values
// NOT MANDATORY. Can be commented if makes things slow
// but I do not like the idea of writting data in the file
// that could be interpreted as real.
if (n2read < block_buf_size_)
{
for (int i = 0; i < int(block_buf_size_); i++)
{
tmp1[i] = 0.0;
tmp2[i] = 0.0;
tmp3[i] = 0.0;
tmp4[i] = 0.0;
tmp5[i] = 0.0;
tmp6[i] = 0.0;
}
}
ifs_x.read(reinterpret_cast<char *>(&tmp1[0]), n2read * sizeof(T_store));
ifs_y.read(reinterpret_cast<char *>(&tmp2[0]), n2read * sizeof(T_store));
ifs_z.read(reinterpret_cast<char *>(&tmp3[0]), n2read * sizeof(T_store));
ifs_vx.read(reinterpret_cast<char *>(&tmp4[0]), n2read * sizeof(T_store));
ifs_vy.read(reinterpret_cast<char *>(&tmp5[0]), n2read * sizeof(T_store));
ifs_vz.read(reinterpret_cast<char *>(&tmp6[0]), n2read * sizeof(T_store));
adjust_buf_endianness(tmp1);
adjust_buf_endianness(tmp2);
adjust_buf_endianness(tmp3);
adjust_buf_endianness(tmp4);
adjust_buf_endianness(tmp5);
adjust_buf_endianness(tmp6);
ofs.write(reinterpret_cast<char *>(&tmp1[0]), block_buf_size_ * sizeof(T_store));
ofs.write(reinterpret_cast<char *>(&tmp2[0]), block_buf_size_ * sizeof(T_store));
ofs.write(reinterpret_cast<char *>(&tmp3[0]), block_buf_size_ * sizeof(T_store));
ofs.write(reinterpret_cast<char *>(&tmp4[0]), block_buf_size_ * sizeof(T_store));
ofs.write(reinterpret_cast<char *>(&tmp5[0]), block_buf_size_ * sizeof(T_store));
ofs.write(reinterpret_cast<char *>(&tmp6[0]), block_buf_size_ * sizeof(T_store));
npleft -= n2read;
n2read = std::min(block_buf_size_, npleft);
}
ifs_x.close();
ifs_y.close();
ifs_z.close();
ifs_vx.close();
ifs_vy.close();
ifs_vz.close();
ofs.close();
// clean up temp files
unlink(fnx);
unlink(fny);
unlink(fnz);
unlink(fnvx);
unlink(fnvy);
unlink(fnvz);
delete[] tmp1;
delete[] tmp2;
delete[] tmp3;
delete[] tmp4;
delete[] tmp5;
delete[] tmp6;
LOGINFO("ART : done writing DM file.");
}
/*
ART users currently create the baryon grid structure from the dark matter data file.
Therefore they have decided that the best way to implement baryons for ART in MUSIC was
by creating a file with the same dm format but using the baryon displacements and velocities.
From this file they will create the actual grid suign their tools.
So here we have just to re-create the dark matter file format but using the baryon data.
*/
void assemble_gas_file(void) //PMcrs0_GAS.DAT
{
// file name
std::string partfname = fname_ + "/PMcrs0_GAS.DAT";
std::ofstream ofs(partfname.c_str(), std::ios::trunc);
// generate all temp file names
char fnx[256], fny[256], fnz[256], fnvx[256], fnvy[256], fnvz[256];
sprintf(fnx, "___ic_temp_%05d.bin", 100 * id_gas_pos + 0);
sprintf(fny, "___ic_temp_%05d.bin", 100 * id_gas_pos + 1);
sprintf(fnz, "___ic_temp_%05d.bin", 100 * id_gas_pos + 2);
sprintf(fnvx, "___ic_temp_%05d.bin", 100 * id_gas_vel + 0);
sprintf(fnvy, "___ic_temp_%05d.bin", 100 * id_gas_vel + 1);
sprintf(fnvz, "___ic_temp_%05d.bin", 100 * id_gas_vel + 2);
// create buffers for temporary data
T_store *tmp1, *tmp2, *tmp3, *tmp4, *tmp5, *tmp6;
tmp1 = new T_store[block_buf_size_];
tmp2 = new T_store[block_buf_size_];
tmp3 = new T_store[block_buf_size_];
tmp4 = new T_store[block_buf_size_];
tmp5 = new T_store[block_buf_size_];
tmp6 = new T_store[block_buf_size_];
// read in the data from the temporary files in slabs and write it to the output file
size_t npleft, n2read;
size_t npcgas = npcdm_; // # of gas elemets should be equal to # of dm elements
LOGINFO("writing gas data to ART format file");
//ofs.open(fname_.c_str(), std::ios::binary|std::ios::trunc );
pistream ifs_x, ifs_y, ifs_z, ifs_vx, ifs_vy, ifs_vz;
ifs_x.open(fnx, npcgas);
ifs_y.open(fny, npcgas);
ifs_z.open(fnz, npcgas);
ifs_vx.open(fnvx, npcgas);
ifs_vy.open(fnvy, npcgas);
ifs_vz.open(fnvz, npcgas);
npleft = npcgas;
n2read = std::min(block_buf_size_, npleft);
while (n2read > 0)
{
// To make sure last page in zooms have 0s in non-relevant values
// NOT MANDATORY. Can be commented if makes things slow
// but I do not like the idea of writting data in the file
// that could be interpreted as real.
if (n2read < block_buf_size_)
{
for (int i = 0; i < int(block_buf_size_); i++)
{
tmp1[i] = 0.0;
tmp2[i] = 0.0;
tmp3[i] = 0.0;
tmp4[i] = 0.0;
tmp5[i] = 0.0;
tmp6[i] = 0.0;
}
}
ifs_x.read(reinterpret_cast<char *>(&tmp1[0]), n2read * sizeof(T_store));
ifs_y.read(reinterpret_cast<char *>(&tmp2[0]), n2read * sizeof(T_store));
ifs_z.read(reinterpret_cast<char *>(&tmp3[0]), n2read * sizeof(T_store));
ifs_vx.read(reinterpret_cast<char *>(&tmp4[0]), n2read * sizeof(T_store));
ifs_vy.read(reinterpret_cast<char *>(&tmp5[0]), n2read * sizeof(T_store));
ifs_vz.read(reinterpret_cast<char *>(&tmp6[0]), n2read * sizeof(T_store));
adjust_buf_endianness(tmp1);
adjust_buf_endianness(tmp2);
adjust_buf_endianness(tmp3);
adjust_buf_endianness(tmp4);
adjust_buf_endianness(tmp5);
adjust_buf_endianness(tmp6);
ofs.write(reinterpret_cast<char *>(&tmp1[0]), block_buf_size_ * sizeof(T_store));
ofs.write(reinterpret_cast<char *>(&tmp2[0]), block_buf_size_ * sizeof(T_store));
ofs.write(reinterpret_cast<char *>(&tmp3[0]), block_buf_size_ * sizeof(T_store));
ofs.write(reinterpret_cast<char *>(&tmp4[0]), block_buf_size_ * sizeof(T_store));
ofs.write(reinterpret_cast<char *>(&tmp5[0]), block_buf_size_ * sizeof(T_store));
ofs.write(reinterpret_cast<char *>(&tmp6[0]), block_buf_size_ * sizeof(T_store));
npleft -= n2read;
n2read = std::min(block_buf_size_, npleft);
}
ifs_x.close();
ifs_y.close();
ifs_z.close();
ifs_vx.close();
ifs_vy.close();
ifs_vz.close();
ofs.close();
// clean up temp files
unlink(fnx);
unlink(fny);
unlink(fnz);
unlink(fnvx);
unlink(fnvy);
unlink(fnvz);
delete[] tmp1;
delete[] tmp2;
delete[] tmp3;
delete[] tmp4;
delete[] tmp5;
delete[] tmp6;
LOGINFO("ART : done writing gas file.");
// Temperature
const double Tcmb0 = 2.726;
const double h2 = header_.hubble * header_.hubble;
const double adec = 1.0 / (160. * pow(omegab_ * h2 / 0.022, 2.0 / 5.0));
const double Tini = astart_ < adec ? Tcmb0 / astart_ : Tcmb0 / astart_ / astart_ * adec;
const double mu = (Tini > 1.e4) ? 4.0 / (8. - 5. * YHe_) : 4.0 / (1. + 3. * (1. - YHe_));
LOGINFO("ART : set initial gas temperature to %.3f K (%.3f K/mu)", Tini, Tini / mu);
}
public:
explicit art_output_plugin(config_file &cf)
: output_plugin(cf)
{
if (mkdir(fname_.c_str(), 0777))
;
do_baryons_ = cf.getValueSafe<bool>("setup", "baryons", false);
// We need to say that we want to do SPH for baryons
// because if not MUSIC does not calculate/write gas positions
cf.insertValue("setup", "do_SPH", "yes");
// header size (alignment problem)
hsize_ = 529; // dm & hydro run
omegab_ = cf.getValueSafe<double>("cosmology", "Omega_b", 0.0);
omegam_ = cf.getValue<double>("cosmology", "Omega_m");
zstart_ = cf.getValue<double>("setup", "zstart");
astart_ = 1.0 / (1.0 + zstart_);
swap_endianness_ = cf.getValueSafe<bool>("output", "art_swap_endian", true);
int levelmin = cf.getValue<unsigned>("setup", "levelmin");
int levelmax = cf.getValue<unsigned>("setup", "levelmax");
block_buf_size_ = (size_t)(pow(pow(2, levelmax), 2)); //Npage=nrow^2; Number of particles in each page
YHe_ = cf.getValueSafe<double>("cosmology", "YHe", 0.248);
gamma_ = cf.getValueSafe<double>("cosmology", "gamma", 5.0 / 3.0);
// Set header
std::string thead;
thead = cf.getValueSafe<std::string>("output", "header", "ICs generated using MUSIC");
strcpy(header_.head, thead.c_str()); // text for the header; any easy way to add also the version?
std::string ws = " "; // Filling with blanks. Any better way?
for (int i = thead.size(); i < 45; i++)
{
header_.head[i] = ws[0];
}
header_.aexpN = astart_;
header_.aexp0 = header_.aexpN;
header_.amplt = 0.0; // Amplitude of density fluctuations
header_.astep = cf.getValue<double>("output", "astep"); // Seems that this must also be in the config file
ptf_.astep = header_.astep; // to write pt file
header_.istep = 0; // step (=0 in IC)
header_.partw = 0.0; // mass of highest res particle. SEE BELOW
header_.TINTG = 0; //=0 in IC
header_.EKIN = 0.0; //SUM 0.5 * m_i*(v_i**2) in code units. Seems that 0 is ok for ICs
header_.EKIN1 = 0; //=0 in IC
header_.EKIN2 = 0; //=0 in IC
header_.AU0 = 0; //=0 in IC
header_.AEU0 = 0; //=0 in IC
header_.NROWC = (int)pow(2, levelmax); // Number of particles in 1 dim (number of particles per page = NROW**2)
header_.NGRIDC = (int)pow(2, levelmin); // Number of cells in 1 dim
header_.nspecies = 0; // number of dm species
for (int ilevel = levelmax; ilevel >= (int)levelmin; --ilevel)
{
header_.nspecies += 1;
}
//header_.partw SEE BELOW
header_.Nseed = 0; // random number used ( 0 for MUSIC? or set the random number used in the lowest level?)
header_.Om0 = cf.getValue<double>("cosmology", "Omega_m"); //Omega_m
header_.Oml0 = cf.getValue<double>("cosmology", "Omega_L"); //Omega_L
header_.hubble = cf.getValue<double>("cosmology", "H0") / 100; //hubble constant h=H/100
header_.Wp5 = 0.0; // 0.0
header_.Ocurv = 1.0 - header_.Oml0 - header_.Om0; //
header_.Omb0 = cf.getValue<double>("cosmology", "Omega_b");
; // this parameter only appears in header in hydro runs
for (int i = 0; i < 10; i++)
{
header_.wpart[i] = 0.0; // extras[0-9] part. masses from high res to low res (normalized to low res particle)
header_.lpart[i] = 0; // extras[10-19] # particles from high res to low res cumulative!!!
}
for (int i = 0; i < header_.nspecies; i++)
{
header_.wpart[i] = 1.0 / pow(8.0, (header_.nspecies - i - 1)); //from high res to lo res // 8 should be changed for internal variable?
}
header_.partw = header_.wpart[0]; // mass of highest res particle.
for (int i = 0; i < 80; i++)
{
header_.extras[i] = 0.0; //extras[20-99]
}
header_.extras[13] = cf.getValueSafe<double>("cosmology", "Omega_b", 0.0);
header_.extras[14] = cf.getValue<double>("cosmology", "sigma_8");
header_.extras[15] = cf.getValue<double>("cosmology", "nspec"); //Slope of the Power spectrum
header_.extras[79] = cf.getValue<double>("setup", "boxlength");
LOGINFO("ART : done header info.");
}
void write_dm_mass(const grid_hierarchy &gh)
{
//... write data for dark matter mass......
// This is not needed for ART
}
void write_dm_position(int coord, const grid_hierarchy &gh)
{
size_t nptot = gh.count_leaf_cells(gh.levelmin(), gh.levelmax());
//... store all the meta data about the grid hierarchy in header variables
npcdm_ = nptot;
for (int i = 0; i < header_.nspecies; i++)
{
header_.lpart[i] = gh.count_leaf_cells(gh.levelmax() - i, gh.levelmax()); //cumulative!!
}
// Now, let us write the dm particle info
std::vector<T_store> temp_data;
temp_data.reserve(block_buf_size_);
//coordinates are in the range 1 - (NGRID+1)
// so scale factor is scaleX = Box/NGRID -> to Mpc/h (Box in Mpc/h)
double xfac = (double)header_.NGRIDC;
char temp_fname[256];
sprintf(temp_fname, "___ic_temp_%05d.bin", 100 * id_dm_pos + coord);
std::ofstream ofs_temp(temp_fname, std::ios::binary | std::ios::trunc);
size_t blksize = sizeof(T_store) * nptot;
ofs_temp.write((char *)&blksize, sizeof(size_t));
size_t nwritten = 0;
for (int ilevel = gh.levelmax(); ilevel >= (int)gh.levelmin(); --ilevel)
for (unsigned i = 0; i < gh.get_grid(ilevel)->size(0); ++i)
for (unsigned j = 0; j < gh.get_grid(ilevel)->size(1); ++j)
for (unsigned k = 0; k < gh.get_grid(ilevel)->size(2); ++k)
if (gh.is_in_mask(ilevel, i, j, k) && !gh.is_refined(ilevel, i, j, k))
{
double xx[3];
gh.cell_pos(ilevel, i, j, k, xx);
xx[coord] = fmod((xx[coord] + (*gh.get_grid(ilevel))(i, j, k)) + 1.0, 1.0);
xx[coord] = (xx[coord] * xfac) + 1.0;
//xx[coord] = ((xx[coord]+(*gh.get_grid(ilevel))(i,j,k)));
if (temp_data.size() < block_buf_size_)
temp_data.push_back(xx[coord]);
else
{
ofs_temp.write((char *)&temp_data[0], sizeof(T_store) * block_buf_size_);
nwritten += block_buf_size_;
temp_data.clear();
temp_data.push_back(xx[coord]);
}
}
if (temp_data.size() > 0)
{
ofs_temp.write((char *)&temp_data[0], sizeof(T_store) * temp_data.size());
nwritten += temp_data.size();
}
if (nwritten != nptot)
throw std::runtime_error("Internal consistency error while writing temporary file for positions");
//... dump to temporary file
ofs_temp.write((char *)&blksize, sizeof(size_t));
if (ofs_temp.bad())
throw std::runtime_error("I/O error while writing temporary file for positions");
ofs_temp.close();
}
void write_dm_velocity(int coord, const grid_hierarchy &gh)
{
size_t nptot = gh.count_leaf_cells(gh.levelmin(), gh.levelmax());
std::vector<T_store> temp_data;
temp_data.reserve(block_buf_size_);
//In ART velocities are P = a_expansion*V_pec/(x_0H_0)
// where x_0 = comoving cell_size=Box/Ngrid;H_0 = Hubble at z=0
// so scale factor to physical km/s is convV= BoxV/AEXPN/NGRID
// (BoxV is Box*100; aexpn=current expansion factor)
//internal units of MUSIC: To km/s just multiply by Lbox
double vfac = (header_.aexpN * header_.NGRIDC) / (100.0);
char temp_fname[256];
sprintf(temp_fname, "___ic_temp_%05d.bin", 100 * id_dm_vel + coord);
std::ofstream ofs_temp(temp_fname, std::ios::binary | std::ios::trunc);
size_t blksize = sizeof(T_store) * nptot;
ofs_temp.write((char *)&blksize, sizeof(size_t));
size_t nwritten = 0;
for (int ilevel = gh.levelmax(); ilevel >= (int)gh.levelmin(); --ilevel)
for (unsigned i = 0; i < gh.get_grid(ilevel)->size(0); ++i)
for (unsigned j = 0; j < gh.get_grid(ilevel)->size(1); ++j)
for (unsigned k = 0; k < gh.get_grid(ilevel)->size(2); ++k)
if (gh.is_in_mask(ilevel, i, j, k) && !gh.is_refined(ilevel, i, j, k))
{
if (temp_data.size() < block_buf_size_)
temp_data.push_back((*gh.get_grid(ilevel))(i, j, k) * vfac);
else
{
ofs_temp.write((char *)&temp_data[0], sizeof(T_store) * block_buf_size_);
nwritten += block_buf_size_;
temp_data.clear();
temp_data.push_back((*gh.get_grid(ilevel))(i, j, k) * vfac);
}
}
if (temp_data.size() > 0)
{
ofs_temp.write((char *)&temp_data[0], sizeof(T_store) * temp_data.size());
nwritten += temp_data.size();
}
if (nwritten != nptot)
throw std::runtime_error("Internal consistency error while writing temporary file for DM velocities");
//... dump to temporary file
ofs_temp.write((char *)&blksize, sizeof(size_t));
if (ofs_temp.bad())
throw std::runtime_error("I/O error while writing temporary file for DM velocities");
ofs_temp.close();
}
void write_dm_density(const grid_hierarchy &gh)
{
//... we don't care about DM density for art
}
void write_dm_potential(const grid_hierarchy &gh)
{
}
void write_gas_position(int coord, const grid_hierarchy &gh)
{
size_t nptot = gh.count_leaf_cells(gh.levelmin(), gh.levelmax());
std::vector<T_store> temp_data;
temp_data.reserve(block_buf_size_);
//ART coordinates are in the range 1 - (NGRID+1)
double xfac = (double)header_.NGRIDC;
char temp_fname[256];
sprintf(temp_fname, "___ic_temp_%05d.bin", 100 * id_gas_pos + coord);
std::ofstream ofs_temp(temp_fname, std::ios::binary | std::ios::trunc);
size_t blksize = sizeof(T_store) * nptot;
ofs_temp.write((char *)&blksize, sizeof(size_t));
size_t nwritten = 0;
for (int ilevel = gh.levelmax(); ilevel >= (int)gh.levelmin(); --ilevel)
for (unsigned i = 0; i < gh.get_grid(ilevel)->size(0); ++i)
for (unsigned j = 0; j < gh.get_grid(ilevel)->size(1); ++j)
for (unsigned k = 0; k < gh.get_grid(ilevel)->size(2); ++k)
if (gh.is_in_mask(ilevel, i, j, k) && !gh.is_refined(ilevel, i, j, k))
{
double xx[3];
gh.cell_pos(ilevel, i, j, k, xx);
xx[coord] = fmod((xx[coord] + (*gh.get_grid(ilevel))(i, j, k)) + 1.0, 1.0);
xx[coord] = (xx[coord] * xfac) + 1.0;
if (temp_data.size() < block_buf_size_)
temp_data.push_back(xx[coord]);
else
{
ofs_temp.write((char *)&temp_data[0], sizeof(T_store) * block_buf_size_);
nwritten += block_buf_size_;
temp_data.clear();
temp_data.push_back(xx[coord]);
}
}
if (temp_data.size() > 0)
{
ofs_temp.write((char *)&temp_data[0], sizeof(T_store) * temp_data.size());
nwritten += temp_data.size();
}
if (nwritten != nptot)
throw std::runtime_error("Internal consistency error while writing temporary file for gas positions");
//... dump to temporary file
ofs_temp.write((char *)&blksize, sizeof(size_t));
if (ofs_temp.bad())
throw std::runtime_error("I/O error while writing temporary file for gas positions");
ofs_temp.close();
}
void write_gas_velocity(int coord, const grid_hierarchy &gh)
{
size_t nptot = gh.count_leaf_cells(gh.levelmin(), gh.levelmax());
std::vector<T_store> temp_data;
temp_data.reserve(block_buf_size_);
//In ART velocities are P = a_expansion*V_pec/(x_0H_0)
// where x_0 = comoving cell_size=Box/Ngrid;H_0 = Hubble at z=0
// so scale factor to physical km/s is convV= BoxV/AEXPN/NGRID
// (BoxV is Box*100; aexpn=current expansion factor)
//internal units of MUSIC: To km/s just multiply by Lbox
double vfac = (header_.aexpN * header_.NGRIDC) / (100.0);
char temp_fname[256];
sprintf(temp_fname, "___ic_temp_%05d.bin", 100 * id_gas_vel + coord);
std::ofstream ofs_temp(temp_fname, std::ios::binary | std::ios::trunc);
size_t blksize = sizeof(T_store) * nptot;
ofs_temp.write((char *)&blksize, sizeof(size_t));
size_t nwritten = 0;
for (int ilevel = gh.levelmax(); ilevel >= (int)gh.levelmin(); --ilevel)
for (unsigned i = 0; i < gh.get_grid(ilevel)->size(0); ++i)
for (unsigned j = 0; j < gh.get_grid(ilevel)->size(1); ++j)
for (unsigned k = 0; k < gh.get_grid(ilevel)->size(2); ++k)
if (gh.is_in_mask(ilevel, i, j, k) && !gh.is_refined(ilevel, i, j, k))
{
if (temp_data.size() < block_buf_size_)
temp_data.push_back((*gh.get_grid(ilevel))(i, j, k) * vfac);
else
{
ofs_temp.write((char *)&temp_data[0], sizeof(T_store) * block_buf_size_);
nwritten += block_buf_size_;
temp_data.clear();
temp_data.push_back((*gh.get_grid(ilevel))(i, j, k) * vfac);
}
}
if (temp_data.size() > 0)
{
ofs_temp.write((char *)&temp_data[0], sizeof(T_store) * temp_data.size());
nwritten += temp_data.size();
}
if (nwritten != nptot)
throw std::runtime_error("Internal consistency error while writing temporary file for gas velocities");
//... dump to temporary file
ofs_temp.write((char *)&blksize, sizeof(size_t));
if (ofs_temp.bad())
throw std::runtime_error("I/O error while writing temporary file for gas velocities");
ofs_temp.close();
}
void write_gas_density(const grid_hierarchy &gh)
{
}
void write_gas_potential(const grid_hierarchy &gh)
{
}
void finalize(void)
{
this->write_header_file();
this->write_pt_file();
this->assemble_DM_file();
if (do_baryons_)
{
this->assemble_gas_file();
}
}
};
namespace
{
output_plugin_creator_concrete<art_output_plugin<float>> creator("art");
}

617
src/plugins/output_enzo.cc Normal file
View file

@ -0,0 +1,617 @@
/*
output_enzo.cc - This file is part of MUSIC -
a code to generate multi-scale initial conditions
for cosmological simulations
Copyright (C) 2010 Oliver Hahn
*/
#ifdef HAVE_HDF5
#include <sys/types.h>
#include <sys/stat.h>
#include "output.hh"
#include "HDF_IO.hh"
#define MAX_SLAB_SIZE 268435456 // = 256 MBytes
class enzo_output_plugin : public output_plugin
{
protected:
struct patch_header
{
int component_rank;
size_t component_size;
std::vector<int> dimensions;
int rank;
std::vector<int> top_grid_dims;
std::vector<int> top_grid_end;
std::vector<int> top_grid_start;
};
struct sim_header
{
std::vector<int> dimensions;
std::vector<int> offset;
float a_start;
float dx;
float h0;
float omega_b;
float omega_m;
float omega_v;
float vfact;
};
sim_header the_sim_header;
void write_sim_header(std::string fname, const sim_header &h)
{
HDFWriteGroupAttribute(fname, "/", "Dimensions", h.dimensions);
HDFWriteGroupAttribute(fname, "/", "Offset", h.offset);
HDFWriteGroupAttribute(fname, "/", "a_start", h.a_start);
HDFWriteGroupAttribute(fname, "/", "dx", h.dx);
HDFWriteGroupAttribute(fname, "/", "h0", h.h0);
HDFWriteGroupAttribute(fname, "/", "omega_b", h.omega_b);
HDFWriteGroupAttribute(fname, "/", "omega_m", h.omega_m);
HDFWriteGroupAttribute(fname, "/", "omega_v", h.omega_v);
HDFWriteGroupAttribute(fname, "/", "vfact", h.vfact);
}
void write_patch_header(std::string fname, std::string dsetname, const patch_header &h)
{
HDFWriteDatasetAttribute(fname, dsetname, "Component_Rank", h.component_rank);
HDFWriteDatasetAttribute(fname, dsetname, "Component_Size", h.component_size);
HDFWriteDatasetAttribute(fname, dsetname, "Dimensions", h.dimensions);
HDFWriteDatasetAttribute(fname, dsetname, "Rank", h.rank);
HDFWriteDatasetAttribute(fname, dsetname, "TopGridDims", h.top_grid_dims);
HDFWriteDatasetAttribute(fname, dsetname, "TopGridEnd", h.top_grid_end);
HDFWriteDatasetAttribute(fname, dsetname, "TopGridStart", h.top_grid_start);
}
void dump_mask(const grid_hierarchy &gh)
{
char enzoname[256], filename[512];
std::string fieldname("RefinementMask");
for (unsigned ilevel = levelmin_; ilevel <= levelmax_; ++ilevel)
{
std::vector<int> ng, ng_fortran;
ng.push_back(gh.get_grid(ilevel)->size(0));
ng.push_back(gh.get_grid(ilevel)->size(1));
ng.push_back(gh.get_grid(ilevel)->size(2));
ng_fortran.push_back(gh.get_grid(ilevel)->size(2));
ng_fortran.push_back(gh.get_grid(ilevel)->size(1));
ng_fortran.push_back(gh.get_grid(ilevel)->size(0));
//... need to copy data because we need to get rid of the ghost zones
//... write in slabs if data is more than MAX_SLAB_SIZE (default 128 MB)
//... full 3D block size
size_t all_data_size = (size_t)ng[0] * (size_t)ng[1] * (size_t)ng[2];
//... write in slabs of MAX_SLAB_SIZE unless all_data_size is anyway smaller
size_t max_slab_size = std::min((size_t)MAX_SLAB_SIZE / sizeof(double), all_data_size);
//... but one slab hast to be at least the size of one slice
max_slab_size = std::max(((size_t)ng[0] * (size_t)ng[1]), max_slab_size);
//... number of slices in one slab
size_t slices_in_slab = (size_t)((double)max_slab_size / ((size_t)ng[0] * (size_t)ng[1]));
size_t nsz[3] = {size_t(ng[2]), size_t(ng[1]), size_t(ng[0])};
if (levelmin_ != levelmax_)
sprintf(enzoname, "%s.%d", fieldname.c_str(), ilevel - levelmin_);
else
sprintf(enzoname, "%s", fieldname.c_str());
sprintf(filename, "%s/%s", fname_.c_str(), enzoname);
HDFCreateFile(filename);
write_sim_header(filename, the_sim_header);
//... create full array in file
HDFHyperslabWriter3Ds<int> *slab_writer = new HDFHyperslabWriter3Ds<int>(filename, enzoname, nsz);
//... create buffer
int *data_buf = new int[slices_in_slab * (size_t)ng[0] * (size_t)ng[1]];
//... write slice by slice
size_t slices_written = 0;
while (slices_written < (size_t)ng[2])
{
slices_in_slab = std::min((size_t)ng[2] - slices_written, slices_in_slab);
#pragma omp parallel for
for (int k = 0; k < (int)slices_in_slab; ++k)
for (int j = 0; j < ng[1]; ++j)
for (int i = 0; i < ng[0]; ++i)
{
int mask_val = -1;
if (gh.is_in_mask(ilevel, i, j, k + slices_written))
{
if (gh.is_refined(ilevel, i, j, k + slices_written))
mask_val = 1;
else
mask_val = 0;
}
data_buf[(size_t)(k * ng[1] + j) * (size_t)ng[0] + (size_t)i] = mask_val;
}
size_t count[3], offset[3];
count[0] = slices_in_slab;
count[1] = ng[1];
count[2] = ng[0];
offset[0] = slices_written;
;
offset[1] = 0;
offset[2] = 0;
slab_writer->write_slab(data_buf, count, offset);
slices_written += slices_in_slab;
}
delete[] data_buf;
delete slab_writer;
//... header data for the patch
patch_header ph;
ph.component_rank = 1;
ph.component_size = (size_t)ng[0] * (size_t)ng[1] * (size_t)ng[2];
ph.dimensions = ng;
ph.rank = 3;
ph.top_grid_dims.assign(3, 1 << levelmin_);
//... offset_abs is in units of the current level cell size
double rfac = 1.0 / (1 << (ilevel - levelmin_));
ph.top_grid_start.push_back((int)(gh.offset_abs(ilevel, 0) * rfac));
ph.top_grid_start.push_back((int)(gh.offset_abs(ilevel, 1) * rfac));
ph.top_grid_start.push_back((int)(gh.offset_abs(ilevel, 2) * rfac));
ph.top_grid_end.push_back(ph.top_grid_start[0] + (int)(ng[0] * rfac));
ph.top_grid_end.push_back(ph.top_grid_start[1] + (int)(ng[1] * rfac));
ph.top_grid_end.push_back(ph.top_grid_start[2] + (int)(ng[2] * rfac));
write_patch_header(filename, enzoname, ph);
}
}
void dump_grid_data(std::string fieldname, const grid_hierarchy &gh, double factor = 1.0, double add = 0.0)
{
char enzoname[256], filename[512];
for (unsigned ilevel = levelmin_; ilevel <= levelmax_; ++ilevel)
{
std::vector<int> ng, ng_fortran;
ng.push_back(gh.get_grid(ilevel)->size(0));
ng.push_back(gh.get_grid(ilevel)->size(1));
ng.push_back(gh.get_grid(ilevel)->size(2));
ng_fortran.push_back(gh.get_grid(ilevel)->size(2));
ng_fortran.push_back(gh.get_grid(ilevel)->size(1));
ng_fortran.push_back(gh.get_grid(ilevel)->size(0));
//... need to copy data because we need to get rid of the ghost zones
//... write in slabs if data is more than MAX_SLAB_SIZE (default 128 MB)
//... full 3D block size
size_t all_data_size = (size_t)ng[0] * (size_t)ng[1] * (size_t)ng[2];
//... write in slabs of MAX_SLAB_SIZE unless all_data_size is anyway smaller
size_t max_slab_size = std::min((size_t)MAX_SLAB_SIZE / sizeof(double), all_data_size);
//... but one slab hast to be at least the size of one slice
max_slab_size = std::max(((size_t)ng[0] * (size_t)ng[1]), max_slab_size);
//... number of slices in one slab
size_t slices_in_slab = (size_t)((double)max_slab_size / ((size_t)ng[0] * (size_t)ng[1]));
size_t nsz[3] = {size_t(ng[2]), size_t(ng[1]), size_t(ng[0])};
if (levelmin_ != levelmax_)
sprintf(enzoname, "%s.%d", fieldname.c_str(), ilevel - levelmin_);
else
sprintf(enzoname, "%s", fieldname.c_str());
sprintf(filename, "%s/%s", fname_.c_str(), enzoname);
HDFCreateFile(filename);
write_sim_header(filename, the_sim_header);
#ifdef SINGLE_PRECISION
//... create full array in file
HDFHyperslabWriter3Ds<float> *slab_writer = new HDFHyperslabWriter3Ds<float>(filename, enzoname, nsz);
//... create buffer
float *data_buf = new float[slices_in_slab * (size_t)ng[0] * (size_t)ng[1]];
#else
//... create full array in file
HDFHyperslabWriter3Ds<double> *slab_writer = new HDFHyperslabWriter3Ds<double>(filename, enzoname, nsz);
//... create buffer
double *data_buf = new double[slices_in_slab * (size_t)ng[0] * (size_t)ng[1]];
#endif
//... write slice by slice
size_t slices_written = 0;
while (slices_written < (size_t)ng[2])
{
slices_in_slab = std::min((size_t)ng[2] - slices_written, slices_in_slab);
#pragma omp parallel for
for (int k = 0; k < (int)slices_in_slab; ++k)
for (int j = 0; j < ng[1]; ++j)
for (int i = 0; i < ng[0]; ++i)
data_buf[(size_t)(k * ng[1] + j) * (size_t)ng[0] + (size_t)i] =
(add + (*gh.get_grid(ilevel))(i, j, k + slices_written)) * factor;
size_t count[3], offset[3];
count[0] = slices_in_slab;
count[1] = ng[1];
count[2] = ng[0];
offset[0] = slices_written;
;
offset[1] = 0;
offset[2] = 0;
slab_writer->write_slab(data_buf, count, offset);
slices_written += slices_in_slab;
}
//... free buffer
delete[] data_buf;
//... finalize writing and close dataset
delete slab_writer;
//... header data for the patch
patch_header ph;
ph.component_rank = 1;
ph.component_size = (size_t)ng[0] * (size_t)ng[1] * (size_t)ng[2];
ph.dimensions = ng;
ph.rank = 3;
ph.top_grid_dims.assign(3, 1 << levelmin_);
//... offset_abs is in units of the current level cell size
double rfac = 1.0 / (1 << (ilevel - levelmin_));
ph.top_grid_start.push_back((int)(gh.offset_abs(ilevel, 0) * rfac));
ph.top_grid_start.push_back((int)(gh.offset_abs(ilevel, 1) * rfac));
ph.top_grid_start.push_back((int)(gh.offset_abs(ilevel, 2) * rfac));
ph.top_grid_end.push_back(ph.top_grid_start[0] + (int)(ng[0] * rfac));
ph.top_grid_end.push_back(ph.top_grid_start[1] + (int)(ng[1] * rfac));
ph.top_grid_end.push_back(ph.top_grid_start[2] + (int)(ng[2] * rfac));
write_patch_header(filename, enzoname, ph);
}
}
public:
enzo_output_plugin(config_file &cf)
: output_plugin(cf)
{
if (mkdir(fname_.c_str(), 0777))
{
perror(fname_.c_str());
throw std::runtime_error("Error in enzo_output_plugin!");
}
bool bhave_hydro = cf_.getValue<bool>("setup", "baryons");
bool align_top = cf.getValueSafe<bool>("setup", "align_top", false);
if (!align_top)
LOGWARN("Old ENZO versions may require \'align_top=true\'!");
the_sim_header.dimensions.push_back(1 << levelmin_);
the_sim_header.dimensions.push_back(1 << levelmin_);
the_sim_header.dimensions.push_back(1 << levelmin_);
the_sim_header.offset.push_back(0);
the_sim_header.offset.push_back(0);
the_sim_header.offset.push_back(0);
the_sim_header.a_start = 1.0 / (1.0 + cf.getValue<double>("setup", "zstart"));
the_sim_header.dx = cf.getValue<double>("setup", "boxlength") / the_sim_header.dimensions[0] / (cf.getValue<double>("cosmology", "H0") * 0.01); // not sure?!?
the_sim_header.h0 = cf.getValue<double>("cosmology", "H0") * 0.01;
if (bhave_hydro)
the_sim_header.omega_b = cf.getValue<double>("cosmology", "Omega_b");
else
the_sim_header.omega_b = 0.0;
the_sim_header.omega_m = cf.getValue<double>("cosmology", "Omega_m");
the_sim_header.omega_v = cf.getValue<double>("cosmology", "Omega_L");
the_sim_header.vfact = cf.getValue<double>("cosmology", "vfact") * the_sim_header.h0; //.. need to multiply by h, ENZO wants this factor for non h-1 units
}
~enzo_output_plugin()
{
}
void write_dm_mass(const grid_hierarchy &gh)
{ /* do nothing, not needed */
}
void write_dm_density(const grid_hierarchy &gh)
{ /* write the parameter file data */
bool bhave_hydro = cf_.getValue<bool>("setup", "baryons");
double refine_region_fraction = cf_.getValueSafe<double>("output", "enzo_refine_region_fraction", 0.8);
char filename[256];
unsigned nbase = (unsigned)pow(2, levelmin_);
// write out the refinement masks
dump_mask(gh);
// write out a parameter file
sprintf(filename, "%s/parameter_file.txt", fname_.c_str());
std::ofstream ofs(filename, std::ios::trunc);
ofs
<< "# Relevant Section of Enzo Paramter File (NOT COMPLETE!) \n"
<< "ProblemType = 30 // cosmology simulation\n"
<< "TopGridRank = 3\n"
<< "TopGridDimensions = " << nbase << " " << nbase << " " << nbase << "\n"
<< "SelfGravity = 1 // gravity on\n"
<< "TopGridGravityBoundary = 0 // Periodic BC for gravity\n"
<< "LeftFaceBoundaryCondition = 3 3 3 // same for fluid\n"
<< "RightFaceBoundaryCondition = 3 3 3\n"
<< "RefineBy = 2\n"
<< "\n"
<< "#\n";
if (bhave_hydro)
ofs
<< "CosmologySimulationOmegaBaryonNow = " << the_sim_header.omega_b << "\n"
<< "CosmologySimulationOmegaCDMNow = " << the_sim_header.omega_m - the_sim_header.omega_b << "\n";
else
ofs
<< "CosmologySimulationOmegaBaryonNow = " << 0.0 << "\n"
<< "CosmologySimulationOmegaCDMNow = " << the_sim_header.omega_m << "\n";
if (bhave_hydro)
ofs
<< "CosmologySimulationDensityName = GridDensity\n"
<< "CosmologySimulationVelocity1Name = GridVelocities_x\n"
<< "CosmologySimulationVelocity2Name = GridVelocities_y\n"
<< "CosmologySimulationVelocity3Name = GridVelocities_z\n";
ofs
<< "CosmologySimulationCalculatePositions = 1\n"
<< "CosmologySimulationParticleVelocity1Name = ParticleVelocities_x\n"
<< "CosmologySimulationParticleVelocity2Name = ParticleVelocities_y\n"
<< "CosmologySimulationParticleVelocity3Name = ParticleVelocities_z\n"
<< "CosmologySimulationParticleDisplacement1Name = ParticleDisplacements_x\n"
<< "CosmologySimulationParticleDisplacement2Name = ParticleDisplacements_y\n"
<< "CosmologySimulationParticleDisplacement3Name = ParticleDisplacements_z\n"
<< "\n"
<< "#\n"
<< "# define cosmology parameters\n"
<< "#\n"
<< "ComovingCoordinates = 1 // Expansion ON\n"
<< "CosmologyOmegaMatterNow = " << the_sim_header.omega_m << "\n"
<< "CosmologyOmegaLambdaNow = " << the_sim_header.omega_v << "\n"
<< "CosmologyHubbleConstantNow = " << the_sim_header.h0 << " // in 100 km/s/Mpc\n"
<< "CosmologyComovingBoxSize = " << cf_.getValue<double>("setup", "boxlength") << " // in Mpc/h\n"
<< "CosmologyMaxExpansionRate = 0.015 // maximum allowed delta(a)/a\n"
<< "CosmologyInitialRedshift = " << cf_.getValue<double>("setup", "zstart") << " //\n"
<< "CosmologyFinalRedshift = 0 //\n"
<< "GravitationalConstant = 1 // this must be true for cosmology\n"
<< "#\n"
<< "#\n"
<< "ParallelRootGridIO = 1\n"
<< "ParallelParticleIO = 1\n"
<< "PartitionNestedGrids = 1\n"
<< "CosmologySimulationNumberOfInitialGrids = " << 1 + levelmax_ - levelmin_ << "\n";
int num_prec = 10;
if (levelmax_ > 15)
num_prec = 17;
//... only for additionally refined grids
for (unsigned ilevel = 0; ilevel < levelmax_ - levelmin_; ++ilevel)
{
double h = 1.0 / (1 << (levelmin_ + 1 + ilevel));
ofs
<< "CosmologySimulationGridDimension[" << 1 + ilevel << "] = "
<< std::setw(16) << gh.size(levelmin_ + ilevel + 1, 0) << " "
<< std::setw(16) << gh.size(levelmin_ + ilevel + 1, 1) << " "
<< std::setw(16) << gh.size(levelmin_ + ilevel + 1, 2) << "\n"
<< "CosmologySimulationGridLeftEdge[" << 1 + ilevel << "] = "
<< std::setw(num_prec + 6) << std::setprecision(num_prec) << h * gh.offset_abs(levelmin_ + ilevel + 1, 0) << " "
<< std::setw(num_prec + 6) << std::setprecision(num_prec) << h * gh.offset_abs(levelmin_ + ilevel + 1, 1) << " "
<< std::setw(num_prec + 6) << std::setprecision(num_prec) << h * gh.offset_abs(levelmin_ + ilevel + 1, 2) << "\n"
<< "CosmologySimulationGridRightEdge[" << 1 + ilevel << "] = "
<< std::setw(num_prec + 6) << std::setprecision(num_prec) << h * (gh.offset_abs(levelmin_ + ilevel + 1, 0) + gh.size(levelmin_ + ilevel + 1, 0)) << " "
<< std::setw(num_prec + 6) << std::setprecision(num_prec) << h * (gh.offset_abs(levelmin_ + ilevel + 1, 1) + gh.size(levelmin_ + ilevel + 1, 1)) << " "
<< std::setw(num_prec + 6) << std::setprecision(num_prec) << h * (gh.offset_abs(levelmin_ + ilevel + 1, 2) + gh.size(levelmin_ + ilevel + 1, 2)) << "\n"
<< "CosmologySimulationGridLevel[" << 1 + ilevel << "] = " << 1 + ilevel << "\n";
}
if (levelmin_ != levelmax_)
{
double h = 1.0 / (1 << levelmax_);
double cen[3], le[3], re[3];
for (int i = 0; i < 3; i++)
{
cen[i] = gh.offset_abs(levelmax_, i) + gh.size(levelmax_, i) / 2;
le[i] = cen[i] - refine_region_fraction * gh.size(levelmax_, i) / 2;
re[i] = le[i] + refine_region_fraction * gh.size(levelmax_, i);
}
ofs
<< "#\n"
<< "# region allowed for further refinement\n"
<< "#\n"
// << "RefineRegionAutoAdjust = 1\n"
<< "RefineRegionLeftEdge = "
<< std::setw(num_prec + 6) << std::setprecision(num_prec) << h * le[0] << " "
<< std::setw(num_prec + 6) << std::setprecision(num_prec) << h * le[1] << " "
<< std::setw(num_prec + 6) << std::setprecision(num_prec) << h * le[2] << "\n"
<< "RefineRegionRightEdge = "
<< std::setw(num_prec + 6) << std::setprecision(num_prec) << h * re[0] << " "
<< std::setw(num_prec + 6) << std::setprecision(num_prec) << h * re[1] << " "
<< std::setw(num_prec + 6) << std::setprecision(num_prec) << h * re[2] << "\n";
}
// determine density maximum and minimum location
real_t rhomax = -1e30, rhomin = 1e30;
double loc_rhomax[3] = {0.0, 0.0, 0.0}, loc_rhomin[3] = {0.0, 0.0, 0.0};
int lvl_rhomax = 0, lvl_rhomin = 0;
real_t rhomax_lm = -1e30, rhomin_lm = 1e30;
double loc_rhomax_lm[3] = {0.0, 0.0, 0.0}, loc_rhomin_lm[3] = {0.0, 0.0, 0.0};
for (int ilevel = gh.levelmax(); ilevel >= (int)gh.levelmin(); --ilevel)
for (unsigned i = 0; i < gh.get_grid(ilevel)->size(0); ++i)
for (unsigned j = 0; j < gh.get_grid(ilevel)->size(1); ++j)
for (unsigned k = 0; k < gh.get_grid(ilevel)->size(2); ++k)
if (!gh.is_refined(ilevel, i, j, k))
{
real_t rho = (*gh.get_grid(ilevel))(i, j, k);
if (rho > rhomax)
{
rhomax = rho;
lvl_rhomax = ilevel;
gh.cell_pos(ilevel, i, j, k, loc_rhomax);
}
if (rho < rhomin)
{
rhomin = rho;
lvl_rhomin = ilevel;
gh.cell_pos(ilevel, i, j, k, loc_rhomin);
}
if (ilevel == (int)gh.levelmax())
{
if (rho > rhomax_lm)
{
rhomax_lm = rho;
gh.cell_pos(ilevel, i, j, k, loc_rhomax_lm);
}
if (rho < rhomin_lm)
{
rhomin_lm = rho;
gh.cell_pos(ilevel, i, j, k, loc_rhomin_lm);
}
}
}
double h = 1.0 / (1 << levelmin_);
double shift[3];
shift[0] = -(double)cf_.getValue<int>("setup", "shift_x") * h;
shift[1] = -(double)cf_.getValue<int>("setup", "shift_y") * h;
shift[2] = -(double)cf_.getValue<int>("setup", "shift_z") * h;
if (gh.levelmin() != gh.levelmax())
{
LOGINFO("Global density extrema: ");
LOGINFO(" minimum: delta=%f at (%f,%f,%f) (level=%d)", rhomin, loc_rhomin[0], loc_rhomin[1], loc_rhomin[2], lvl_rhomin);
LOGINFO(" shifted back at (%f,%f,%f)", loc_rhomin[0] + shift[0], loc_rhomin[1] + shift[1], loc_rhomin[2] + shift[2]);
LOGINFO(" maximum: delta=%f at (%f,%f,%f) (level=%d)", rhomax, loc_rhomax[0], loc_rhomax[1], loc_rhomax[2], lvl_rhomax);
LOGINFO(" shifted back at (%f,%f,%f)", loc_rhomax[0] + shift[0], loc_rhomax[1] + shift[1], loc_rhomax[2] + shift[2]);
LOGINFO("Density extrema on finest level: ");
LOGINFO(" minimum: delta=%f at (%f,%f,%f)", rhomin_lm, loc_rhomin_lm[0], loc_rhomin_lm[1], loc_rhomin_lm[2]);
LOGINFO(" shifted back at (%f,%f,%f)", loc_rhomin_lm[0] + shift[0], loc_rhomin_lm[1] + shift[1], loc_rhomin_lm[2] + shift[2]);
LOGINFO(" maximum: delta=%f at (%f,%f,%f)", rhomax_lm, loc_rhomax_lm[0], loc_rhomax_lm[1], loc_rhomax_lm[2]);
LOGINFO(" shifted back at (%f,%f,%f)", loc_rhomax_lm[0] + shift[0], loc_rhomax_lm[1] + shift[1], loc_rhomax_lm[2] + shift[2]);
}
else
{
LOGINFO("Global density extrema: ");
LOGINFO(" minimum: delta=%f at (%f,%f,%f)", rhomin, loc_rhomin[0], loc_rhomin[1], loc_rhomin[2]);
LOGINFO(" shifted back at (%f,%f,%f)", loc_rhomin[0] + shift[0], loc_rhomin[1] + shift[1], loc_rhomin[2] + shift[2]);
LOGINFO(" maximum: delta=%f at (%f,%f,%f)", rhomax, loc_rhomax[0], loc_rhomax[1], loc_rhomax[2]);
LOGINFO(" shifted back at (%f,%f,%f)", loc_rhomax[0] + shift[0], loc_rhomax[1] + shift[1], loc_rhomax[2] + shift[2]);
}
}
void write_dm_velocity(int coord, const grid_hierarchy &gh)
{
char enzoname[256];
sprintf(enzoname, "ParticleVelocities_%c", (char)('x' + coord));
double vunit = 1.0 / (1.225e2 * sqrt(the_sim_header.omega_m / the_sim_header.a_start));
dump_grid_data(enzoname, gh, vunit);
}
void write_dm_position(int coord, const grid_hierarchy &gh)
{
char enzoname[256];
sprintf(enzoname, "ParticleDisplacements_%c", (char)('x' + coord));
dump_grid_data(enzoname, gh);
}
void write_dm_potential(const grid_hierarchy &gh)
{
}
void write_gas_potential(const grid_hierarchy &gh)
{
}
void write_gas_velocity(int coord, const grid_hierarchy &gh)
{
double vunit = 1.0 / (1.225e2 * sqrt(the_sim_header.omega_m / the_sim_header.a_start));
char enzoname[256];
sprintf(enzoname, "GridVelocities_%c", (char)('x' + coord));
dump_grid_data(enzoname, gh, vunit);
}
void write_gas_position(int coord, const grid_hierarchy &gh)
{
/* do nothing, not needed */
}
void write_gas_density(const grid_hierarchy &gh)
{
char enzoname[256];
sprintf(enzoname, "GridDensity");
dump_grid_data(enzoname, gh, the_sim_header.omega_b / the_sim_header.omega_m, 1.0);
}
void finalize(void)
{
}
};
namespace
{
output_plugin_creator_concrete<enzo_output_plugin> creator("enzo");
}
#endif

1963
src/random.cc Normal file

File diff suppressed because it is too large Load diff