1
0
Fork 0
mirror of https://github.com/cosmo-sims/monofonIC.git synced 2024-09-19 17:03:45 +02:00

Merge branch 'constraint' into develop

This commit is contained in:
Oliver Hahn 2020-03-01 17:41:44 +01:00
commit 8855c75f43
5 changed files with 502 additions and 32 deletions

View file

@ -5,7 +5,6 @@ GridRes = 128
BoxLength = 125
# starting redshift
zstart = 49.0
#zstart = 19.0
# order of the LPT to be used (1,2 or 3)
LPTorder = 1
# also do baryon ICs?
@ -14,54 +13,59 @@ DoBaryons = no
DoFixing = yes
# particle load, can be 'sc' (1x), 'bcc' (2x) or 'fcc' (4x) (increases number of particles by factor!)
ParticleLoad = sc
# Add a possible constraint field here:
#ConstraintFieldFile = initial_conditions.h5
#ConstraintFieldName = ic_white_noise
[cosmology]
transfer = CLASS
ztarget = 0.0
# transfer = eisenstein
transfer = CLASS
ztarget = 2.5
# transfer = eisenstein
# transfer = file_CAMB
# transfer_file = wmap5_transfer_out_z0.dat
Omega_m = 0.302
Omega_b = 0.045
Omega_L = 0.698
H0 = 70.3
sigma_8 = 0.811
nspec = 0.961
Omega_m = 0.302
Omega_b = 0.045
Omega_L = 0.698
H0 = 70.3
sigma_8 = 0.811
nspec = 0.961
# anisotropic large scale tidal field
#LSS_aniso_lx = +0.1
#LSS_aniso_ly = +0.1
#LSS_aniso_lz = -0.2
# LSS_aniso_lx = +0.1
# LSS_aniso_ly = +0.1
# LSS_aniso_lz = -0.2
[random]
generator = NGENIC
seed = 9001
generator = NGENIC
seed = 9001
[testing]
# enables diagnostic output
# can be 'none' (default), 'potentials_and_densities', 'velocity_displacement_symmetries', or 'convergence'
test = none
test = none
[execution]
NumThreads = 8
NumThreads = 8
[output]
fname_hdf5 = output_sch.hdf5
fbase_analysis = output
format = gadget2
filename = ics_gadget.dat
#UseLongids = false
#
#format = gadget_hdf5
#filename = ics_gadget.hdf5
# format = gadget2
# filename = ics_gadget.dat
# UseLongids = false
format = gadget_hdf5
filename = ics_gadget.hdf5
#format = generic
#filename = debug.hdf5
#generic_out_eulerian = yes
# format = AREPO
# filename = ics_arepo.hdf5
#format = grafic2
#filename = ics_ramses
#grafic_use_SPT = yes
# format = generic
# filename = debug.hdf5
# generic_out_eulerian = yes
# format = grafic2
# filename = ics_ramses
# grafic_use_SPT = yes

View file

@ -738,6 +738,8 @@ public:
void Write_to_HDF5(std::string fname, std::string datasetname) const;
void Read_from_HDF5( std::string fname, std::string datasetname );
void Write_PowerSpectrum(std::string ofname);
void Compute_PowerSpectrum(std::vector<double> &bin_k, std::vector<double> &bin_P, std::vector<double> &bin_eP, std::vector<size_t> &bin_count);

View file

@ -177,7 +177,8 @@ void Grid_FFT<data_t,bdistributed>::FourierTransformForward(bool do_transform)
{
double wtime = get_wtime();
csoca::dlog.Print("[FFT] Calling Grid_FFT::to_kspace (%lux%lux%lu)", sizes_[0], sizes_[1], sizes_[2]);
FFTW_API(execute)(plan_);
FFTW_API(execute)
(plan_);
this->ApplyNorm();
wtime = get_wtime() - wtime;
@ -209,7 +210,8 @@ void Grid_FFT<data_t,bdistributed>::FourierTransformBackward(bool do_transform)
csoca::dlog.Print("[FFT] Calling Grid_FFT::to_rspace (%dx%dx%d)\n", sizes_[0], sizes_[1], sizes_[2]);
double wtime = get_wtime();
FFTW_API(execute)(iplan_);
FFTW_API(execute)
(iplan_);
this->ApplyNorm();
wtime = get_wtime() - wtime;
@ -246,6 +248,157 @@ void create_hdf5(std::string Filename)
H5Fclose(HDF_FileID);
}
template <typename T>
hid_t hdf5_get_data_type(void)
{
if (typeid(T) == typeid(int))
return H5T_NATIVE_INT;
if (typeid(T) == typeid(unsigned))
return H5T_NATIVE_UINT;
if (typeid(T) == typeid(float))
return H5T_NATIVE_FLOAT;
if (typeid(T) == typeid(double))
return H5T_NATIVE_DOUBLE;
if (typeid(T) == typeid(long long))
return H5T_NATIVE_LLONG;
if (typeid(T) == typeid(unsigned long long))
return H5T_NATIVE_ULLONG;
if (typeid(T) == typeid(size_t))
return H5T_NATIVE_ULLONG;
std::cerr << " - Error: [HDF_IO] trying to evaluate unsupported type in GetDataType\n\n";
return -1;
}
template <typename data_t,bool bdistributed>
void Grid_FFT<data_t,bdistributed>::Read_from_HDF5(const std::string Filename, const std::string ObjName)
{
if( bdistributed ){
csoca::elog << "Attempt to read from HDF5 into MPI-distributed array. This is not supported yet!" << std::endl;
abort();
}
hid_t HDF_Type = hdf5_get_data_type<data_t>();
hid_t HDF_FileID = H5Fopen(Filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
//... save old error handler
herr_t (*old_func)(void *);
void *old_client_data;
H5Eget_auto(&old_func, &old_client_data);
//... turn off error handling by hdf5 library
H5Eset_auto(NULL, NULL);
//... probe dataset opening
hid_t HDF_DatasetID = H5Dopen(HDF_FileID, ObjName.c_str());
//... restore previous error handler
H5Eset_auto(old_func, old_client_data);
//... dataset did not exist or was empty
if (HDF_DatasetID < 0)
{
csoca::elog << "Dataset \'" << ObjName.c_str() << "\' does not exist or is empty." << std::endl;
H5Fclose(HDF_FileID);
abort();
}
//... get space associated with dataset and its extensions
hid_t HDF_DataspaceID = H5Dget_space(HDF_DatasetID);
int ndims = H5Sget_simple_extent_ndims(HDF_DataspaceID);
hsize_t dimsize[3];
H5Sget_simple_extent_dims(HDF_DataspaceID, dimsize, NULL);
hsize_t HDF_StorageSize = 1;
for (int i = 0; i < ndims; ++i)
HDF_StorageSize *= dimsize[i];
//... adjust the array size to hold the data
std::vector<data_t> Data;
Data.reserve(HDF_StorageSize);
Data.assign(HDF_StorageSize, (data_t)0);
if (Data.capacity() < HDF_StorageSize)
{
csoca::elog << "Not enough memory to store all data in HDFReadDataset!" << std::endl;
H5Sclose(HDF_DataspaceID);
H5Dclose(HDF_DatasetID);
H5Fclose(HDF_FileID);
abort();
}
//... read the dataset
H5Dread(HDF_DatasetID, HDF_Type, H5S_ALL, H5S_ALL, H5P_DEFAULT, &Data[0]);
if (Data.size() != HDF_StorageSize)
{
csoca::elog << "Something went wrong while reading!" << std::endl;
H5Sclose(HDF_DataspaceID);
H5Dclose(HDF_DatasetID);
H5Fclose(HDF_FileID);
abort();
}
H5Sclose(HDF_DataspaceID);
H5Dclose(HDF_DatasetID);
H5Fclose(HDF_FileID);
assert( dimsize[0] == dimsize[1] && dimsize[0] == dimsize[2] );
csoca::ilog << "Read external constraint data of dimensions " << dimsize[0] << "**3." << std::endl;
for( size_t i=0; i<3; ++i ) this->n_[i] = dimsize[i];
this->space_ = rspace_id;
if (data_ != nullptr)
{
fftw_free(data_);
}
this->Setup();
//... copy data to internal array ...
double sum1{0.0}, sum2{0.0};
#pragma omp parallel for reduction(+:sum1,sum2)
for (size_t i = 0; i < size(0); ++i)
{
for (size_t j = 0; j < size(1); ++j)
{
for (size_t k = 0; k < size(2); ++k)
{
this->relem(i,j,k) = Data[ (i*size(1) + j)*size(2)+k ];
sum2 += std::real(this->relem(i,j,k)*this->relem(i,j,k));
sum1 += std::real(this->relem(i,j,k));
}
}
}
sum1 /= Data.size();
sum2 /= Data.size();
auto stdw = std::sqrt(sum2-sum1*sum1);
csoca::ilog << "Constraint field has <W>=" << sum1 << ", <W^2>-<W>^2=" << stdw << std::endl;
#pragma omp parallel for reduction(+:sum1,sum2)
for (size_t i = 0; i < size(0); ++i)
{
for (size_t j = 0; j < size(1); ++j)
{
for (size_t k = 0; k < size(2); ++k)
{
this->relem(i,j,k) /= stdw;
}
}
}
}
template <typename data_t,bool bdistributed>
void Grid_FFT<data_t,bdistributed>::Write_to_HDF5(std::string fname, std::string datasetname) const

View file

@ -83,6 +83,10 @@ int Run( ConfigFile& the_config )
Omega[cosmo_species::baryon] = 0.0;
}
//--------------------------------------------------------------------------------------------------------
//! do constrained ICs?
const bool bAddConstrainedModes = the_config.ContainsKey("setup", "ConstraintFieldFile" );
//--------------------------------------------------------------------------------------------------------
//! add beyond box tidal field modes following Schmidt et al. (2018) [https://arxiv.org/abs/1803.03274]
bool bAddExternalTides = the_config.ContainsKey("cosmology", "LSS_aniso_lx")
@ -180,8 +184,74 @@ int Run( ConfigFile& the_config )
csoca::ilog << "Generating white noise field...." << std::endl;
the_random_number_generator->Fill_Grid(wnoise);
wnoise.FourierTransformForward();
//--------------------------------------------------------------------
// Use externally specified large scale modes from constraints in case
//--------------------------------------------------------------------
if( bAddConstrainedModes ){
Grid_FFT<real_t,false> cwnoise({8,8,8}, {boxlen,boxlen,boxlen});
cwnoise.Read_from_HDF5( the_config.GetValue<std::string>("setup", "ConstraintFieldFile"),
the_config.GetValue<std::string>("setup", "ConstraintFieldName") );
cwnoise.FourierTransformForward();
size_t ngrid_c = cwnoise.size(0), ngrid_c_2 = ngrid_c/2;
// TODO: copy over modes
double rs1{0.0},rs2{0.0},is1{0.0},is2{0.0};
double nrs1{0.0},nrs2{0.0},nis1{0.0},nis2{0.0};
size_t count{0};
#pragma omp parallel for reduction(+:rs1,rs2,is1,is2,nrs1,nrs2,nis1,nis2,count)
for( size_t i=0; i<ngrid_c; ++i ){
size_t il = size_t(-1);
if( i<ngrid_c_2 && i<ngrid/2 ) il = i;
if( i>ngrid_c_2 && i+ngrid-ngrid_c>ngrid/2) il = ngrid-ngrid_c+i;
if( il == size_t(-1) ) continue;
if( il<size_t(wnoise.local_1_start_) || il>=size_t(wnoise.local_1_start_+wnoise.local_1_size_)) continue;
il -= wnoise.local_1_start_;
for( size_t j=0; j<ngrid_c; ++j ){
size_t jl = size_t(-1);
if( j<ngrid_c_2 && j<ngrid/2 ) jl = j;
if( j>ngrid_c_2 && j+ngrid-ngrid_c>ngrid/2 ) jl = ngrid-ngrid_c+j;
if( jl == size_t(-1) ) continue;
for( size_t k=0; k<ngrid_c/2+1; ++k ){
if( k>ngrid/2 ) continue;
size_t kl = k;
++count;
nrs1 += std::real(cwnoise.kelem(i,j,k));
nrs2 += std::real(cwnoise.kelem(i,j,k))*std::real(cwnoise.kelem(i,j,k));
nis1 += std::imag(cwnoise.kelem(i,j,k));
nis2 += std::imag(cwnoise.kelem(i,j,k))*std::imag(cwnoise.kelem(i,j,k));
rs1 += std::real(wnoise.kelem(il,jl,kl));
rs2 += std::real(wnoise.kelem(il,jl,kl))*std::real(wnoise.kelem(il,jl,kl));
is1 += std::imag(wnoise.kelem(il,jl,kl));
is2 += std::imag(wnoise.kelem(il,jl,kl))*std::imag(wnoise.kelem(il,jl,kl));
#if defined(USE_MPI)
wnoise.kelem(il,jl,kl) = cwnoise.kelem(j,i,k);
#else
wnoise.kelem(il,jl,kl) = cwnoise.kelem(i,j,k);
#endif
}
}
}
// csoca::ilog << " ... old field: re <w>=" << rs1/count << " <w^2>-<w>^2=" << rs2/count-rs1*rs1/count/count << std::endl;
// csoca::ilog << " ... old field: im <w>=" << is1/count << " <w^2>-<w>^2=" << is2/count-is1*is1/count/count << std::endl;
// csoca::ilog << " ... new field: re <w>=" << nrs1/count << " <w^2>-<w>^2=" << nrs2/count-nrs1*nrs1/count/count << std::endl;
// csoca::ilog << " ... new field: im <w>=" << nis1/count << " <w^2>-<w>^2=" << nis2/count-nis1*nis1/count/count << std::endl;
csoca::ilog << "White noise field large-scale modes overwritten with external field." << std::endl;
}
//--------------------------------------------------------------------
// Apply Normalisation factor and Angulo&Pontzen fixing or not
//--------------------------------------------------------------------
wnoise.apply_function_k( [&](auto wn){
if (bDoFixing)
wn = (std::abs(wn) != 0.0) ? wn / std::abs(wn) : wn;

241
src/plugins/output_arepo.cc Normal file
View file

@ -0,0 +1,241 @@
#ifdef USE_HDF5
#include <unistd.h> // for unlink
#include <output_plugin.hh>
#include "HDF_IO.hh"
template <typename T>
std::vector<T> from_6array(const T *a)
{
return std::vector<T>{{a[0], a[1], a[2], a[3], a[4], a[5]}};
}
template <typename T>
std::vector<T> from_value(const T a)
{
return std::vector<T>{{a}};
}
template <typename write_real_t>
class gadget_hdf5_output_plugin : public output_plugin
{
struct header_t
{
unsigned npart[6];
double mass[6];
double time;
double redshift;
int flag_sfr;
int flag_feedback;
unsigned int npartTotal[6];
int flag_cooling;
int num_files;
double BoxSize;
double Omega0;
double OmegaLambda;
double HubbleParam;
int flag_stellarage;
int flag_metals;
unsigned int npartTotalHighWord[6];
int flag_entropy_instead_u;
int flag_doubleprecision;
};
protected:
int num_files_, num_simultaneous_writers_;
header_t header_;
real_t lunit_, vunit_;
bool blongids_;
std::string this_fname_;
double Tini_;
unsigned pmgrid_;
unsigned gridboost_;
int doublePrec_;
int doBaryons_;
double softening_;
public:
//! constructor
explicit gadget_hdf5_output_plugin(ConfigFile &cf)
: output_plugin(cf, "GADGET-HDF5")
{
num_files_ = 1;
#ifdef USE_MPI
// use as many output files as we have MPI tasks
MPI_Comm_size(MPI_COMM_WORLD, &num_files_);
#endif
real_t astart = 1.0 / (1.0 + cf_.GetValue<double>("setup", "zstart"));
lunit_ = cf_.GetValue<double>("setup", "BoxLength");
vunit_ = lunit_ / std::sqrt(astart);
blongids_ = cf_.GetValueSafe<bool>("output", "UseLongids", false);
num_simultaneous_writers_ = cf_.GetValueSafe<int>("output", "NumSimWriters", num_files_);
for (int i = 0; i < 6; ++i)
{
header_.npart[i] = 0;
header_.npartTotal[i] = 0;
header_.npartTotalHighWord[i] = 0;
header_.mass[i] = 0.0;
}
header_.time = astart;
header_.redshift = 1.0 / astart - 1.0;
header_.flag_sfr = 0;
header_.flag_feedback = 0;
header_.flag_cooling = 0;
header_.num_files = num_files_;
header_.BoxSize = lunit_;
header_.Omega0 = cf_.GetValue<double>("cosmology", "Omega_m");
header_.OmegaLambda = cf_.GetValue<double>("cosmology", "Omega_L");
header_.HubbleParam = cf_.GetValue<double>("cosmology", "H0") / 100.0;
header_.flag_stellarage = 0;
header_.flag_metals = 0;
header_.flag_entropy_instead_u = 0;
header_.flag_doubleprecision = (typeid(write_real_t) == typeid(double)) ? true : false;
// initial gas temperature
double Tcmb0 = 2.726;
double Omegab = cf_.GetValue<double>("cosmology", "Omega_b");
double h = cf_.GetValue<double>("cosmology", "H0") / 100.0, h2 = h*h;
double adec = 1.0 / (160.0 * pow(Omegab * h2 / 0.022, 2.0 / 5.0));
Tini_ = astart < adec ? Tcmb0 / astart : Tcmb0 / astart / astart * adec;
// suggested PM res
pmgrid_ = 2*cf_.GetValue<double>("setup", "GridRes");
gridboost_ = 1;
softening_ = cf_.GetValue<double>("setup", "BoxLength")/pmgrid_/20;
doBaryons_ = cf_.GetValue<bool>("setup", "DoBaryons");
#if !defined(USE_SINGLEPRECISION)
doublePrec_ = 1;
#else
doublePrec_ = 0;
#endif
this_fname_ = fname_;
#ifdef USE_MPI
int thisrank = 0;
MPI_Comm_rank(MPI_COMM_WORLD, &thisrank);
if (num_files_ > 1)
this_fname_ += "." + std::to_string(thisrank);
#endif
unlink(this_fname_.c_str());
HDFCreateFile(this_fname_);
}
// use destructor to write header post factum
~gadget_hdf5_output_plugin()
{
HDFCreateGroup(this_fname_, "Header");
HDFWriteGroupAttribute(this_fname_, "Header", "NumPart_ThisFile", from_6array<unsigned>(header_.npart));
HDFWriteGroupAttribute(this_fname_, "Header", "MassTable", from_6array<double>(header_.mass));
HDFWriteGroupAttribute(this_fname_, "Header", "Time", from_value<double>(header_.time));
HDFWriteGroupAttribute(this_fname_, "Header", "Redshift", from_value<double>(header_.redshift));
HDFWriteGroupAttribute(this_fname_, "Header", "NumPart_Total", from_6array<unsigned>(header_.npartTotal));
HDFWriteGroupAttribute(this_fname_, "Header", "NumPart_Total_HighWord", from_6array<unsigned>(header_.npartTotalHighWord));
HDFWriteGroupAttribute(this_fname_, "Header", "NumFilesPerSnapshot", from_value<int>(header_.num_files));
HDFWriteGroupAttribute(this_fname_, "Header", "BoxSize", from_value<double>(header_.BoxSize));
HDFWriteGroupAttribute(this_fname_, "Header", "Omega0", from_value<double>(header_.Omega0));
HDFWriteGroupAttribute(this_fname_, "Header", "OmegaLambda", from_value<double>(header_.OmegaLambda));
HDFWriteGroupAttribute(this_fname_, "Header", "HubbleParam", from_value<double>(header_.HubbleParam));
HDFWriteGroupAttribute(this_fname_, "Header", "Flag_Sfr", from_value<int>(0));
HDFWriteGroupAttribute(this_fname_, "Header", "Flag_Cooling", from_value<int>(0));
HDFWriteGroupAttribute(this_fname_, "Header", "Flag_StellarAge", from_value<int>(0));
HDFWriteGroupAttribute(this_fname_, "Header", "Flag_Metals", from_value<int>(0));
HDFWriteGroupAttribute(this_fname_, "Header", "Flag_Feedback", from_value<int>(0));
HDFWriteGroupAttribute(this_fname_, "Header", "Flag_DoublePrecision", (int)doublePrec_);
// HDFWriteGroupAttribute(this_fname_, "Header", "Music_levelmin", levelmin_);
// HDFWriteGroupAttribute(this_fname_, "Header", "Music_levelmax", levelmax_);
// HDFWriteGroupAttribute(this_fname_, "Header", "Music_levelcounts", levelcounts);
HDFWriteGroupAttribute(this_fname_, "Header", "haveBaryons", from_value<int>((int)doBaryons_));
HDFWriteGroupAttribute(this_fname_, "Header", "longIDs", from_value<int>((int)blongids_));
HDFWriteGroupAttribute(this_fname_, "Header", "suggested_pmgrid", from_value<int>(pmgrid_));
HDFWriteGroupAttribute(this_fname_, "Header", "suggested_gridboost", from_value<int>(gridboost_));
HDFWriteGroupAttribute(this_fname_, "Header", "suggested_highressoft", from_value<double>(softening_));
HDFWriteGroupAttribute(this_fname_, "Header", "suggested_gas_Tinit", from_value<double>(Tini_));
csoca::ilog << "Wrote" << std::endl;
}
output_type write_species_as(const cosmo_species &) const { return output_type::particles; }
real_t position_unit() const { return lunit_; }
real_t velocity_unit() const { return vunit_; }
bool has_64bit_reals() const
{
if (typeid(write_real_t) == typeid(double))
return true;
return false;
}
bool has_64bit_ids() const
{
if (blongids_)
return true;
return false;
}
int get_species_idx(const cosmo_species &s) const
{
switch (s)
{
case cosmo_species::dm:
return 1;
case cosmo_species::baryon:
return 2;
case cosmo_species::neutrino:
return 3;
}
return -1;
}
void write_particle_data(const particle::container &pc, const cosmo_species &s, double Omega_species)
{
int sid = get_species_idx(s);
assert(sid != -1);
header_.npart[sid] = (pc.get_local_num_particles());
header_.npartTotal[sid] = (uint32_t)(pc.get_global_num_particles());
header_.npartTotalHighWord[sid] = (uint32_t)((pc.get_global_num_particles()) >> 32);
double rhoc = 27.7519737; // in h^2 1e10 M_sol / Mpc^3
double boxmass = Omega_species * rhoc * std::pow(header_.BoxSize, 3);
header_.mass[sid] = boxmass / pc.get_global_num_particles();
HDFCreateGroup(this_fname_, std::string("PartType") + std::to_string(sid));
//... write positions and velocities.....
if (this->has_64bit_reals())
{
HDFWriteDatasetVector(this_fname_, std::string("PartType") + std::to_string(sid) + std::string("/Coordinates"), pc.positions64_);
HDFWriteDatasetVector(this_fname_, std::string("PartType") + std::to_string(sid) + std::string("/Velocities"), pc.velocities64_);
}
else
{
HDFWriteDatasetVector(this_fname_, std::string("PartType") + std::to_string(sid) + std::string("/Coordinates"), pc.positions32_);
HDFWriteDatasetVector(this_fname_, std::string("PartType") + std::to_string(sid) + std::string("/Velocities"), pc.velocities32_);
}
//... write ids.....
if (this->has_64bit_ids())
HDFWriteDataset(this_fname_, std::string("PartType") + std::to_string(sid) + std::string("/ParticleIDs"), pc.ids64_);
else
HDFWriteDataset(this_fname_, std::string("PartType") + std::to_string(sid) + std::string("/ParticleIDs"), pc.ids32_);
// std::cout << ">>>A> " << header_.npart[sid] << std::endl;
}
};
namespace
{
#if !defined(USE_SINGLEPRECISION)
output_plugin_creator_concrete<gadget_hdf5_output_plugin<double>> creator1("AREPO");
#else
output_plugin_creator_concrete<gadget_hdf5_output_plugin<float>> creator1("AREPO");
#endif
} // namespace
#endif