1
0
Fork 0
mirror of https://github.com/glatterf42/sims_python_files.git synced 2024-09-18 15:03:45 +02:00

first commit

This commit is contained in:
glatterf42 2022-04-13 13:20:32 +02:00
commit 764c1358d1
20 changed files with 1647 additions and 0 deletions

47
check_ics_swift.py Normal file
View file

@ -0,0 +1,47 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 8 11:16:06 2022
@author: ben
"""
import h5py
from pathlib import Path
# directory = Path(r"/home/ben/sims/swiftsim/examples/agora/")
# directory = Path(r"/home/ben/Pictures/Gadget4/vsc4_256/")
directory = Path(r'/home/ben/sims/music-panphasia/test/')
file = h5py.File(directory / "agora_128.hdf5", "r")
# file = h5py.File(directory / "output_0001.hdf5", "r")
# file = h5py.File(directory / "cosmo_ics_gadget_256_pbh_vsc.hdf5", "r")
for key in file.keys():
print(key) #for finding all header entries, which are:
Header = file["Header"]
#ICs_parameters = file["ICs_parameters"]
PartType1 = file["PartType1"]
Units = file["Units"]
# PartType2 = file['PartType2']
print(list(Header.attrs.items()))
print(PartType1.keys())
# print(PartType2.keys())
print(PartType1['Masses'][0:10])
print(PartType1['Velocities'][0:10])
# print(PartType2['Masses'][0:10])
# print(list(Units.attrs.items()))
# use list(Header.attr.keys()) to list all attributes contained in the Header and .attr.values() to see their values
# even otherwise empty things like ICs_parameters can contain such attributes

View file

@ -0,0 +1,192 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 24 10:54:50 2022
@author: ben
"""
from pathlib import Path
from directories import g4_output_basedir
from sys import argv
def create_dir_and_write_params_g4(Nres: int, Lbox: float, waveform: str, output_basedir: Path):
# create directory if it doesn't exist already
outputdir = output_basedir / f"{waveform}_{Nres}_{Lbox:.0f}"
if outputdir.exists():
print(outputdir, "already exists. Skipping...")
else:
print("creating", outputdir)
outputdir.mkdir()
# write param file there:
param_script = f"""
%---- Relevant files
InitCondFile \t \t /gpfs/data/fs71636/fglatter/PBH_EFD/monofonic_tests/output/{waveform}_{Nres}_{Lbox:.0f}/ics_{waveform}_{Nres}_{Lbox:.0f}
OutputDir \t \t /gpfs/data/fs71636/fglatter/PBH_EFD/monofonic_tests/output/{waveform}_{Nres}_{Lbox:.0f}
SnapshotFileBase \t snapshot
OutputListFilename \t /gpfs/data/fs71636/fglatter/PBH_EFD/monofonic_tests/outputs.txt
%---- File formats
ICFormat \t 3
SnapFormat \t 3
%---- CPU-time limits
TimeLimitCPU \t \t 86400 \t % 24h, in seconds
CpuTimeBetRestartFile \t 7200 \t % 2h, in seconds
%---- Memory allocation
MaxMemSize \t 1800 \t % in MByte
%---- Characteristics of run
TimeBegin \t 0.02 \t % Begin of the simulation, z=49
TimeMax \t 1.0 \t % End of the simulation, z=0
%---- Basic code options that set the type of simulation
ComovingIntegrationOn \t 1
%---- Cosmological parameters #update according to monofonic output
Omega0 \t \t \t \t 0.3099
OmegaLambda \t \t \t \t 0.690021
OmegaBaryon \t \t \t \t 0.0488911
HubbleParam \t \t \t \t 0.67742
Hubble \t \t \t \t 100.0
BoxSize \t \t \t \t {Lbox}
#
# MeanPBHScatteringCrossSection \t 1.73189e-13
#
# AveragePBHMass \t \t \t 3.086978e-10
#
# SigmaOverM \t \t \t \t 5.6103e-4
%---- Output frequency and output parameters
OutputListOn \t \t \t 1
TimeBetSnapshot \t \t \t 0.0
TimeOfFirstSnapshot \t \t 0.0
TimeBetStatistics \t \t 0.01
NumFilesPerSnapshot \t \t 1
MaxFilesWithConcurrentIO \t 1
%---- Accuracy of time integration
ErrTolIntAccuracy \t \t 0.025
CourantFac \t \t \t 0.7
MaxSizeTimestep \t \t 0.025
MinSizeTimestep \t \t 0.0
%---- Tree algorithm, force accuracy, domain update frequency
TypeOfOpeningCriterion \t \t 1
ErrTolTheta \t \t \t \t 0.5
ErrTolThetaMax \t \t \t 1.0
ErrTolForceAcc \t \t \t 0.002
TopNodeFactor \t \t \t \t 3.0
ActivePartFracForNewDomainDecomp \t 0.01
ActivePartFracForPMinsteadOfEwald \t 0.05
%---- Initial density estimate
DesNumNgb \t \t 64
MaxNumNgbDeviation \t 1
%---- Subfind parameters
DesLinkNgb \t \t 20
%---- System of units
UnitLength_in_cm \t \t 3.086578e24 \t ; Mpc/h
UnitMass_in_g \t \t \t 1.989e43 \t ; 1.0e10 Msun/h
UnitVelocity_in_cm_per_s \t 1e5 \t ; 1km/s
GravityConstantInternal \t 0
%---- Gravitational softening length
SofteningComovingClass0 \t {Lbox / Nres / 30}
SofteningMaxPhysClass0 \t {Lbox / Nres / 30}
SofteningClassOfPartType0 \t 0
SofteningClassOfPartType1 \t 0
SofteningClassOfPartType2 \t 0
SofteningClassOfPartType3 \t 0
SofteningClassOfPartType4 \t 0
SofteningClassOfPartType5 \t 0
%---- SPH
ArtBulkViscConst \t 1.0
MinEgySpec \t \t 0
InitGasTemp \t \t 10
"""
with (outputdir / f"{waveform}_{Nres}_{Lbox:.0f}_param.txt").open("w") as f:
f.write(param_script)
if __name__ == "__main__":
create_dir_and_write_params_g4(
Nres=int(argv[1]),
Lbox=float(argv[2]),
waveform=argv[3],
output_basedir=g4_output_basedir
)

View file

@ -0,0 +1,171 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 4 13:43:40 2022
@author: ben
"""
from pathlib import Path
from directories import swift_output_basedir, swift_mon_test_basedir
from sys import argv
def create_dir_and_write_params_swift(Nres: int, Lbox: float, waveform: str, output_basedir: Path):
# create directory if it doesn't exist already
outputdir = output_basedir / f"{waveform}_{Nres}_{Lbox:.0f}"
if outputdir.exists():
print(outputdir, "already exists. Skipping...")
else:
print("creating", outputdir)
outputdir.mkdir()
# write param file there:
param_script = f"""
# Define some meta data about the simulation.
MetaData:
run_name: mon_test_{waveform}_{Nres}_{Lbox:.0f}
# Define the system of units to use internally.
InternalUnitSystem:
UnitMass_in_cgs: 1.98848e43 # 10^10 M_sun in grams
UnitLength_in_cgs: 3.08567758e24 # 1 Mpc in centimeters
UnitVelocity_in_cgs: 1e5 # 1 km/s in centimeters per second
UnitCurrent_in_cgs: 1 # 1 Ampere
UnitTemp_in_cgs: 1 # 1 Kelvin
# Values of some physical constants
#PhysicalConstants:
# G: 6.67408e-8 # (Optional) Overwrite the value of Newton's constant used internally by the code.
# Cosmological parameters # update according to monofonic output
Cosmology:
h: 0.67742 # Reduced Hubble constant
a_begin: 0.02 # Initial scale-factor of the simulation, z = 49
a_end: 1.0 # Final scale factor of the simulation, z = 0
Omega_cdm: 0.2610089 # CDM density parameter, new: Omega_m = _cdm + _b (w/o neutrinos)
Omega_lambda: 0.690021 # Dark-energy density parameter
Omega_b: 0.0488911 # Baryon density parameter
# Parameters for the self-gravity scheme
Gravity:
mesh_side_length: {Nres * 2}# Number of cells along each axis for the periodic gravity mesh (must be even).
eta: 0.025 # Constant dimensionless multiplier for time integration.
MAC: adaptive # Choice of mulitpole acceptance criterion: 'adaptive' OR 'geometric'.
epsilon_fmm: 0.001 # Tolerance parameter for the adaptive multipole acceptance criterion.
theta_cr: 0.7 # Opening angle for the purely gemoetric criterion.
# use_tree_below_softening: 0 # (Optional) Can the gravity code use the multipole interactions below the softening scale?
# allow_truncation_in_MAC: 0 # (Optional) Can the Multipole acceptance criterion use the truncated force estimator?
comoving_DM_softening: {Lbox / Nres / 30} # Comoving Plummer-equivalent softening length for DM particles (in internal units).
max_physical_DM_softening: {Lbox / Nres / 30} # Maximal Plummer-equivalent softening length in physical coordinates for DM particles (in internal units). #should work b/c Lbox is given in Mpc=internal unit
rebuild_frequency: 0.01 # (Optional) Frequency of the gravity-tree rebuild in units of the number of g-particles (this is the default value).
a_smooth: 1.25 # (Optional) Smoothing scale in top-level cell sizes to smooth the long-range forces over (this is the default value).
r_cut_max: 4.5 # (Optional) Cut-off in number of top-level cells beyond which no FMM forces are computed (this is the default value).
r_cut_min: 0.1 # (Optional) Cut-off in number of top-level cells below which no truncation of FMM forces are performed (this is the default value).
# FOF group finding parameters
FOF:
basename: fof_output # Filename for the FOF outputs (Unused when FoF is only run to seed BHs).
scale_factor_first: 0.91 # Scale-factor of first FoF black hole seeding calls (needed for cosmological runs).
delta_time: 1.005 # Time between consecutive FoF black hole seeding calls.
min_group_size: 256 # The minimum no. of particles required for a group.
linking_length_ratio: 0.2 # Linking length in units of the main inter-particle separation.
seed_black_holes_enabled: 0 # Enable (1) or disable (0) seeding of black holes in FoF groups
dump_catalogue_when_seeding: 0 # Enable (1) or disable (0) dumping a group catalogue when runnning FOF for seeding purposes.
absolute_linking_length: -1. # (Optional) Absolute linking length (in internal units). When not set to -1, this will overwrite the linking length computed from 'linking_length_ratio'.
group_id_default: 2147483647 # (Optional) Sets the group ID of particles in groups below the minimum size. Defaults to 2^31 - 1 if unspecified. Has to be positive.
group_id_offset: 1 # (Optional) Sets the offset of group ID labeling. Defaults to 1 if unspecified.
# Parameters governing the time integration (Set dt_min and dt_max to the same value for a fixed time-step run.)
TimeIntegration:
dt_min: 1e-6 # The minimal time-step size of the simulation (in internal units).
dt_max: 0.025 # The maximal time-step size of the simulation (in internal units).
# Parameters governing the snapshots
Snapshots:
basename: output # Common part of the name of output files.
# subdir: dir # (Optional) Sub-directory in which to write the snapshots. Defaults to "" (i.e. the directory where SWIFT is run).
scale_factor_first: 0.1 # (Optional) Scale-factor of the first snapshot if cosmological time-integration.
delta_time: 0.01 # Time difference between consecutive outputs (in internal units)
# invoke_stf: 0 # (Optional) Call VELOCIraptor every time a snapshot is written irrespective of the VELOCIraptor output strategy.
invoke_fof: 1 # (Optional) Call FOF every time a snapshot is written
# compression: 0 # (Optional) Set the level of GZIP compression of the HDF5 datasets [0-9]. 0 does no compression. The lossless compression is applied to *all* the fields.
# distributed: 0 # (Optional) When running over MPI, should each rank write a partial snapshot or do we want a single file? 1 implies one file per MPI rank.
# UnitMass_in_cgs: 1 # (Optional) Unit system for the outputs (Grams)
# UnitLength_in_cgs: 1 # (Optional) Unit system for the outputs (Centimeters)
# UnitVelocity_in_cgs: 1 # (Optional) Unit system for the outputs (Centimeters per second)
# UnitCurrent_in_cgs: 1 # (Optional) Unit system for the outputs (Amperes)
# UnitTemp_in_cgs: 1 # (Optional) Unit system for the outputs (Kelvin)
output_list_on: 1 # (Optional) Enable the output list
output_list: {swift_mon_test_basedir}/snap_times.txt # (Optional) File containing the output times (see documentation in "Parameter File" section)
# select_output_on: 0 # (Optional) Enable the output selection behaviour
# select_output: selectoutput.yml # (Optional) File containing information to select outputs with (see documentation in the "Output Selection" section)
# Parameters governing the conserved quantities statistics
Statistics:
delta_time: 1.1 # Time between statistics output
scale_factor_first: 0.1 # (Optional) Scale-factor of the first statistics dump if cosmological time-integration.
energy_file_name: statistics # (Optional) File name for statistics output
timestep_file_name: timesteps # (Optional) File name for timing information output. Note: No underscores "_" allowed in file name
output_list_on: 1 # (Optional) Enable the output list
output_list: {swift_mon_test_basedir}/snap_times.txt # (Optional) File containing the output times (see documentation in "Parameter File" section)
# Parameters related to the initial conditions
InitialConditions:
file_name: {swift_output_basedir}/{waveform}_{Nres}_{Lbox:.0f}/ics_{waveform}_{Nres}_{Lbox:.0f}.hdf5 # The file to read
periodic: 1 # Are we running with periodic ICs?
cleanup_h_factors: 0 # (Optional) Clean up the h-factors used in the ICs (e.g. in Gadget files).
cleanup_velocity_factors: 0 # (Optional) Clean up the scale-factors used in the definition of the velocity variable in the ICs (e.g. in Gadget files).
cleanup_smoothing_lengths: 0 # (Optional) Clean the values of the smoothing lengths that are read in to remove stupid values. Set to 1 to activate.
smoothing_length_scaling: 1. # (Optional) A scaling factor to apply to all smoothing lengths in the ICs.
replicate: 1 # (Optional) Replicate all particles along each axis a given integer number of times. Default 1.
remap_ids: 0 # (Optional) Remap all the particle IDs to the range [1, NumPart].
metadata_group_name: ICs_parameters # (Optional) Copy this HDF5 group from the initial conditions file to all snapshots, if found
# Parameters controlling restarts
Restarts:
enable: 1 # (Optional) whether to enable dumping restarts at fixed intervals.
save: 0 # (Optional) whether to save copies of the previous set of restart files (named .prev)
# onexit: 0 # (Optional) whether to dump restarts on exit (*needs enable*)
subdir: restart # (Optional) name of subdirectory for restart files.
basename: restart # (Optional) prefix used in naming restart files.
delta_hours: 2.0 # (Optional) decimal hours between dumps of restart files.
stop_steps: 100 # (Optional) how many steps to process before checking if the <subdir>/stop file exists. When present the application will attempt to exit early, dumping restart files first.
max_run_time: 24.0 # (optional) Maximal wall-clock time in hours. The application will exit when this limit is reached.
# resubmit_on_exit: 0 # (Optional) whether to run a command when exiting after the time limit has been reached.
# resubmit_command: ./resub.sh # (Optional) Command to run when time limit is reached. Compulsory if resubmit_on_exit is switched on. Note potentially unsafe.
# Parameters governing domain decomposition
# DomainDecomposition:
# initial_type: memory # (Optional) The initial decomposition strategy: "grid",
# # "region", "memory", or "vectorized".
# initial_grid: [10,10,10] # (Optional) Grid sizes if the "grid" strategy is chosen.
# synchronous: 0 # (Optional) Use synchronous MPI requests to redistribute, uses less system memory, but slower.
# repartition_type: fullcosts # (Optional) The re-decomposition strategy, one of:
# # "none", "fullcosts", "edgecosts", "memory" or
# # "timecosts".
# trigger: 0.05 # (Optional) Fractional (<1) CPU time difference between MPI ranks required to trigger a
# # new decomposition, or number of steps (>1) between decompositions
# minfrac: 0.9 # (Optional) Fractional of all particles that should be updated in previous step when
# # using CPU time trigger
# usemetis: 0 # Use serial METIS when ParMETIS is also available.
# adaptive: 1 # Use adaptive repartition when ParMETIS is available, otherwise simple refinement.
# itr: 100 # When adaptive defines the ratio of inter node communication time to data redistribution time, in the range 0.00001 to 10000000.0.
# # Lower values give less data movement during redistributions, at the cost of global balance which may require more communication.
# use_fixed_costs: 0 # If 1 then use any compiled in fixed costs for
# # task weights in first repartition, if 0 only use task timings, if > 1 only use
# # fixed costs, unless none are available
"""
with (outputdir / f"{waveform}_{Nres}_{Lbox:.0f}_param.yml").open("w") as f:
f.write(param_script)
if __name__ == "__main__":
create_dir_and_write_params_swift(
Nres=int(argv[1]),
Lbox=float(argv[2]),
waveform=argv[3],
output_basedir=swift_output_basedir
)

View file

@ -0,0 +1,187 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 20 15:03:28 2021
@author: Ben Melville
"""
import h5py
from pathlib import Path
#import matplotlib.pyplot as plt
from scipy.spatial import KDTree #for nearest neighbour search
import numpy as np
import numba
def determine_desired_range(offset, minimum, upper_limit_bottom, lower_limit_top, maximum):
a = minimum
b = maximum
if offset < 0:
a = lower_limit_top
elif offset > 0:
b = upper_limit_bottom
return a,b
@numba.njit()
def find_coordinates_to_move(minimum, maximum, ratio, x_offset, y_offset, z_offset, move_candidates):
coordinates_to_move = []
x_start, x_end = determine_desired_range(x_offset, minimum, upper_limit_bottom, lower_limit_top, maximum)
y_start, y_end = determine_desired_range(y_offset, minimum, upper_limit_bottom, lower_limit_top, maximum)
z_start, z_end = determine_desired_range(z_offset, minimum, upper_limit_bottom, lower_limit_top, maximum)
for point in move_candidates:
if x_start <= point[0] <= x_end and y_start <= point[1] <= y_end and z_start <= point[2] <= z_end:
coordinates_to_move.append(point)
return coordinates_to_move
directory = Path(r".\256_pbh_fast_10sigma")
# total_mass = 232209.09 #retrieved from visualisation.py
# #Choose one of the following Nres values:
# Nres = 64
# #Nres = 128
# mass_per_particle = total_mass / (Nres**3)
# file = h5py.File(directory / "fof_subhalo_tab_038.hdf5", "r") #for whole duration
# file = h5py.File(directory / "fof_subhalo_tab_018.hdf5", "r") #for small boxes
file = h5py.File(directory / "fof_subhalo_tab_016.hdf5", "r") #for e5 boxes 256
# for key in file.keys():
# print(key)
configuration = file["Config"] #appears to be empty
header = file["Header"] #appears to be empty
ids = file["IDs"] #appears to be empty
parameters = file["Parameters"] #appears to be empty
group = file["Group"] #contains ['GroupAscale', 'GroupFirstSub', 'GroupLen', 'GroupLenType', 'GroupMass', 'GroupMassType', 'GroupNsubs', 'GroupOffsetType', 'GroupPos', 'GroupVel', 'Group_M_Crit200', 'Group_M_Crit500', 'Group_M_Mean200', 'Group_M_TopHat200', 'Group_R_Crit200', 'Group_R_Crit500', 'Group_R_Mean200', 'Group_R_TopHat200']
subhalo = file["Subhalo"] #contains ['SubhaloCM', 'SubhaloGroupNr', 'SubhaloHalfmassRad', 'SubhaloHalfmassRadType', 'SubhaloIDMostbound', 'SubhaloLen', 'SubhaloLenType', 'SubhaloMass', 'SubhaloMassType', 'SubhaloOffsetType', 'SubhaloParentRank', 'SubhaloPos', 'SubhaloRankInGr', 'SubhaloSpin', 'SubhaloVel', 'SubhaloVelDisp', 'SubhaloVmax', 'SubhaloVmaxRad']
# file = h5py.File(str(directory)+r"\subhalo_snapshot_038.hdf5", "r")
# configuration = file["Config"] #appears to be empty
# header = file["Header"] #appears to be empty
# parameters = file["Parameters"] #appears to be empty
# parttype1 = file["PartType1"] #contains ['Coordinates', 'ParticleIDs', 'SubfindDensity', 'SubfindHsml', 'SubfindVelDisp', 'Velocities']
group_positions = group["GroupPos"][:]
group_radii = group["Group_R_Crit200"][:]
group_masses = group["Group_M_Crit200"][:]
# file = h5py.File(directory / "subhalo_snapshot_038.hdf5", "r") #for whole duration
# file = h5py.File(directory / "snapshot_018.hdf5", "r") #for small boxes
file = h5py.File(directory / "snapshot_016.hdf5", "r") #for e5 boxes 256
# parttype1 = file["PartType1"] #contains ['Coordinates', 'ParticleIDs', 'SubfindDensity', 'SubfindHsml', 'SubfindVelDisp', 'Velocities']
# original_coordinates = file["PartType1"]["Coordinates"][:] #for cdm particles
parttype1 = file["PartType0"] #contains ['Coordinates', 'ParticleIDs', 'SubfindDensity', 'SubfindHsml', 'SubfindVelDisp', 'Velocities']
original_coordinates = file["PartType0"]["Coordinates"][:] #for pbh particles
# boundaries = [30., 30., 30.] #BoxLength in MonofonIC is 30.
# boundaries = [2.77048, 2.77048, 2.77048] #BoxLength for small boxes depends on Nres, 2.77048 for 64, 5.54096 for 128.
boundaries = [2.36438, 2.36438, 2.36438] #BoxLength for e5 boxes depends on Nres, 2.36438 for 256, 4.72876 for 512.
offsets = [-1, 0, 1]
coordinates = original_coordinates[:]
number_of_time_that_points_have_been_found = 0
#assumes cube form and 0.1 as desired ratio to move
minimum = 0.0
maximum = max(boundaries)
ratio = 0.1
box_length = maximum - minimum
range_to_move = 0.1 * box_length
upper_limit_bottom = minimum + range_to_move
lower_limit_top = maximum - range_to_move
print("Find candidates to move...")
@numba.njit()
def find_move_candidates():
move_candidates = []
for point in original_coordinates:
if{
minimum <= point[0] <= upper_limit_bottom or
lower_limit_top <= point[0] <= maximum or
minimum <= point[1] <= upper_limit_bottom or
lower_limit_top <= point[1] <= maximum or
minimum <= point[2] <= upper_limit_bottom or
lower_limit_top <= point[2] <= maximum
}:
move_candidates.append(point)
return move_candidates
move_candidates = find_move_candidates()
print("...done.")
for x in offsets:
for y in offsets:
for z in offsets:
if (x, y, z) == (0, 0, 0):
continue
moved_coordinates = find_coordinates_to_move(minimum, maximum, ratio, x, y, z, move_candidates)
moved_coordinates += np.array([x * boundaries[0], y * boundaries[1], z * boundaries[2]])
coordinates = np.vstack((coordinates, moved_coordinates))
number_of_time_that_points_have_been_found += 1
print("Points found: " + str(number_of_time_that_points_have_been_found) + "/26...")
# assert coordinates.shape[0] == original_coordinates.shape[0] * 3 ** 3 #check that the new space has the shape we want it to have
print("Building 3d-Tree for all particles...")
tree = KDTree(coordinates)
print("...done.")
print("Searching group members...")
group_member_indices = tree.query_ball_point(group_positions, group_radii, workers=6)
assert len(group_member_indices) == len(group_positions)
print("...found.")
print("Calculating radial bins and saving data...")
for i in range(len(group_positions)):
group_center = group_positions[i]
group_radius = group_radii[i]
group_mass = group_masses[i]
if group_radius == 0 or group_mass == 0:
continue #Not quite sure how this can happen, but apparently there are groups with a radius or mass of zero (and they are not necessarily the same).
radial_bins = np.arange(0, group_radius + group_radius/10, group_radius/10)
members = [coordinates[j] for j in group_member_indices[i]]
members_in_bins = [] #should better be called distances now
for member in members:
distance = np.linalg.norm(group_center - member)
members_in_bins.append(distance)
#The following already sorts into bins, but that's not ideal for changing bins to e.g. logarithmic values.
# for r in range(len(radial_bins)-1): #-1 because no value is going to be above radial_bins[10] anyway
# if radial_bins[r] <= distance < radial_bins[r + 1]:
# members_in_bins.append(radial_bins[r] + group_radius/20)
assert len(members) == len(members_in_bins)
group_ID = [i+1] * len(members)
group_radius_list = [group_radius] * len(members)
group_mass_list = [group_mass] * len(members)
all_data = np.column_stack((group_ID, group_radius_list, group_mass_list, members_in_bins))
np.savetxt(directory/"density_profiles_16"/f"group{i+1}.csv", all_data, delimiter=",", fmt="%.3f", header="ID,R200,M200,Bin")
print("...done.")

23
directories.py Normal file
View file

@ -0,0 +1,23 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 22 16:08:54 2022
@author: ben
"""
from pathlib import Path
g4_basedir = Path("/gpfs/data/fs71636/fglatter/PBH_EFD/monofonic_tests/")
g4_output_basedir = Path("/gpfs/data/fs71636/fglatter/PBH_EFD/monofonic_tests/output/")
swift_basedir = Path("/gpfs/data/fs71636/fglatter/swiftsim/")
swift_mon_test_basedir = Path("/gpfs/data/fs71636/fglatter/swiftsim/monofonic_tests/")
swift_output_basedir = Path("/gpfs/data/fs71636/fglatter/swiftsim/monofonic_tests/output/")
swift_tool_basedir = Path("/gpfs/data/fs71636/fglatter/swiftsim/tools/")
python_base = Path("/gpfs/data/fs71636/fglatter/python_files/")
monofonic_dir = Path("/gpfs/data/fs71636/fglatter/monofonic-experimental")
template_file = monofonic_dir / "example.conf"
# monofonic_binary = monofonic_dir / "build" / "monofonIC"

64
get_mass_zoom_halo.py Normal file
View file

@ -0,0 +1,64 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 8 11:16:06 2022
@author: ben
"""
import h5py
from pathlib import Path
import numpy as np
directory = Path(r"/home/ben/sims/swiftsim/examples/zoom_tests/")
snap_number = 7
fof_file = h5py.File(directory / f"fof_output_000{snap_number}.hdf5", "r")
file = h5py.File(directory / f'output_000{snap_number}.hdf5', 'r')
PartType1 = file["PartType1"]
highres_groupids = PartType1['FOFGroupIDs'][:]
total_highres_particles = len(highres_groupids)
unique_groups, particle_count = np.unique(highres_groupids, return_counts=True)
print(f'Group IDs: {unique_groups}, Number in them: {particle_count}')
# all particles belonging to no group have group id 2147483647
# for key in fof_file.keys():
# print(key) #for finding all header entries, which are:
groups = fof_file['Groups']
# for key in groups.keys():
# print(key)
# centres = groups['Centres'][:]
groupids = groups['GroupIDs'][:]
masses = groups['Masses'][:]
number_of_members = groups['Sizes'][:]
table_width = 4
separate_unique_counter = 0
for i in range(len(groupids)-1):
if np.isin(groupids[i], unique_groups):
highres_members = particle_count[separate_unique_counter]
contamination = (1 - highres_members / number_of_members[i])
print(f'Group: {groupids[i]:{table_width}} | Mass: {masses[i]:{table_width + 1}.{table_width}} | Highres Members: {highres_members:{table_width}} | Total Members: {number_of_members[i]:{table_width}} | Contamination: {contamination * 100:.{table_width}}%\n')
separate_unique_counter += 1
# for i in range(10):
# print(f'Group: {groupids[i]} | Mass: {masses[i]} | Size: {sizes[i]} | Centre: {centres[i]}\n')
# print(list(parameters.attrs.items()))
# use list(Header.attr.keys()) to list all attributes contained in the Header and .attr.values() to see their values
# even otherwise empty things like ICs_parameters can contain such attributes

60
prepare_simulations.py Normal file
View file

@ -0,0 +1,60 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 22 16:59:03 2022
@author: ben
"""
from sys import argv
from write_monofonic_conf import generate_config
from pathlib import Path
from create_dir_and_write_params_g4 import create_dir_and_write_params_g4
from directories import g4_output_basedir
from write_job_script import write_job_script
def main(Nres: int, Lbox: float, waveforms: list):
for form in waveforms:
#write monofonic conf from template file
generate_config(
Nres = Nres,
Lbox = Lbox,
outputdir = Path(f"/gpfs/data/fs71636/fglatter/monofonic_exp_{form}/"),
waveform = form
)
#create directory in g4_output_basedir and write param file for G4 there
create_dir_and_write_params_g4(
Nres = Nres,
Lbox = Lbox,
waveform = form,
output_basedir = g4_output_basedir
)
#write job scripts
write_job_script(
Nres = Nres,
Lbox = Lbox,
waveform = form,
output_dir = g4_output_basedir / f"{form}_{Nres}_{Lbox:.0f}"
)
if __name__ == "__main__":
if argv[3] == "all":
waveforms = ["DB2", "DB4", "DB6", "DB8", "DB10", "shannon"]
else:
waveforms = argv[3:len(argv)]
assert len(waveforms) <= 6
Nres = int(argv[1])
Lbox = float(argv[2])
main(
Nres=Nres,
Lbox=Lbox,
waveforms=waveforms
)

60
prepare_simulations_g4.py Normal file
View file

@ -0,0 +1,60 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 22 16:59:03 2022
@author: ben
"""
from sys import argv
from write_monofonic_conf import generate_config
from pathlib import Path
from create_dir_and_write_params_g4 import create_dir_and_write_params_g4
from directories import g4_output_basedir
from write_job_script import write_job_script
def main(Nres: int, Lbox: float, waveforms: list):
for form in waveforms:
#write monofonic conf from template file
generate_config(
Nres = Nres,
Lbox = Lbox,
outputdir = Path(f"/gpfs/data/fs71636/fglatter/monofonic_exp_{form}/"),
waveform = form
)
#create directory in g4_output_basedir and write param file for G4 there
create_dir_and_write_params_g4(
Nres = Nres,
Lbox = Lbox,
waveform = form,
output_basedir = g4_output_basedir
)
#write job scripts
write_job_script(
Nres = Nres,
Lbox = Lbox,
waveform = form,
output_dir = g4_output_basedir / f"{form}_{Nres}_{Lbox:.0f}"
)
if __name__ == "__main__":
if argv[3] == "all":
waveforms = ["DB2", "DB4", "DB6", "DB8", "DB10", "shannon"]
else:
waveforms = argv[3:len(argv)]
assert len(waveforms) <= 6
Nres = int(argv[1])
Lbox = float(argv[2])
main(
Nres=Nres,
Lbox=Lbox,
waveforms=waveforms
)

View file

@ -0,0 +1,60 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 22 16:59:03 2022
@author: ben
"""
from sys import argv
from write_monofonic_conf import generate_config
from pathlib import Path
from create_dir_and_write_params_swift import create_dir_and_write_params_swift
from directories import swift_output_basedir
from write_job_script_swift import write_job_script
def main(Nres: int, Lbox: float, waveforms: list):
for form in waveforms:
#write monofonic conf from template file
generate_config(
Nres = Nres,
Lbox = Lbox,
outputdir = Path(f"/gpfs/data/fs71636/fglatter/monofonic_exp_{form}/"),
waveform = form
)
#create directory in swift_output_basedir and write param file for swift there
create_dir_and_write_params_swift(
Nres = Nres,
Lbox = Lbox,
waveform = form,
output_basedir = swift_output_basedir
)
#write job scripts
write_job_script(
Nres = Nres,
Lbox = Lbox,
waveform = form,
output_dir = swift_output_basedir / f"{form}_{Nres}_{Lbox:.0f}"
)
if __name__ == "__main__":
if argv[3] == "all":
waveforms = ["DB2", "DB4", "DB6", "DB8", "DB10", "shannon"]
else:
waveforms = argv[3:len(argv)]
assert len(waveforms) <= 6
Nres = int(argv[1])
Lbox = float(argv[2])
main(
Nres=Nres,
Lbox=Lbox,
waveforms=waveforms
)

28
rename_and_move_ics.py Normal file
View file

@ -0,0 +1,28 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 22 17:25:58 2022
@author: ben
"""
import shutil
from pathlib import Path
from sys import argv
def rename_and_move_ics(n_tasks: int, source_dir: Path, dest_dir: Path, waveform: str, Nres: int, Lbox: float):
for i in range(n_tasks):
source_file = source_dir / f"ics_{waveform}_{Nres}_{Lbox:.0f}.hdf5.{i}"
dest_file = dest_dir / f"ics_{waveform}_{Nres}_{Lbox:.0f}.{i}.hdf5"
shutil.move(source_file, dest_file)
if __name__ == "__main__":
rename_and_move_ics(
n_tasks = int(argv[1]),
source_dir = Path(argv[2]),
dest_dir = Path(argv[3]),
waveform = str(argv[4]),
Nres = int(argv[5]),
Lbox = float(argv[6])
)

28
rename_and_move_ics_g4.py Normal file
View file

@ -0,0 +1,28 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 22 17:25:58 2022
@author: ben
"""
import shutil
from pathlib import Path
from sys import argv
def rename_and_move_ics(n_tasks: int, source_dir: Path, dest_dir: Path, waveform: str, Nres: int, Lbox: float):
for i in range(n_tasks):
source_file = source_dir / f"ics_{waveform}_{Nres}_{Lbox:.0f}.hdf5.{i}"
dest_file = dest_dir / f"ics_{waveform}_{Nres}_{Lbox:.0f}.{i}.hdf5"
shutil.move(source_file, dest_file)
if __name__ == "__main__":
rename_and_move_ics(
n_tasks = int(argv[1]),
source_dir = Path(argv[2]),
dest_dir = Path(argv[3]),
waveform = str(argv[4]),
Nres = int(argv[5]),
Lbox = float(argv[6])
)

View file

@ -0,0 +1,28 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 22 17:25:58 2022
@author: ben
"""
import shutil
from pathlib import Path
from sys import argv
def rename_and_move_ics(n_tasks: int, source_dir: Path, dest_dir: Path, waveform: str, Nres: int, Lbox: float):
for i in range(n_tasks):
source_file = source_dir / f"ics_{waveform}_{Nres}_{Lbox:.0f}.hdf5"
dest_file = dest_dir / f"ics_{waveform}_{Nres}_{Lbox:.0f}.hdf5"
shutil.move(source_file, dest_file)
if __name__ == "__main__":
rename_and_move_ics(
n_tasks = int(argv[1]),
source_dir = Path(argv[2]),
dest_dir = Path(argv[3]),
waveform = str(argv[4]),
Nres = int(argv[5]),
Lbox = float(argv[6])
)

177
visualisation.py Normal file
View file

@ -0,0 +1,177 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 11 19:11:42 2021
@author: Ben Melville
"""
import time
from pathlib import Path
from sys import getsizeof
import h5py
import numpy as np
import scipy.spatial
"""
Use this section to find out how many particles there are. The second part retrieves the total mass of cdm or pbh particles, which is all we need because we only have one type of particle in every simulation.
"""
# paths = [Path(r"C:\Users\Ben Melville\Documents\Studium\MSc Thesis\cosmo_sims\64_cdm"), Path(r"C:\Users\Ben Melville\Documents\Studium\MSc Thesis\cosmo_sims\64_pbh"), Path(r"C:\Users\Ben Melville\Documents\Studium\MSc Thesis\cosmo_sims\128_cdm"), Path(r"C:\Users\Ben Melville\Documents\Studium\MSc Thesis\cosmo_sims\128_pbh")]
cdm_paths = [Path(r"C:\Users\Ben Melville\Documents\Studium\MSc Thesis\cosmo_sims\64_cdm"), Path(r"C:\Users\Ben Melville\Documents\Studium\MSc Thesis\cosmo_sims\128_cdm")]
pbh_paths = [Path(r"C:\Users\Ben Melville\Documents\Studium\MSc Thesis\cosmo_sims\64_pbh"), Path(r"C:\Users\Ben Melville\Documents\Studium\MSc Thesis\cosmo_sims\128_pbh")]
# for path in pbh_paths:
# file = h5py.File(str(path)+"\snapshot_000.hdf5", "r")
# names = file["PartType0"]["ParticleIDs"][:]
# print("There are "+str(len(names))+" particles.")
# for path in cdm_paths:
# file = h5py.File(str(path)+"\snapshot_000.hdf5", "r")
# names = file["PartType1"]["ParticleIDs"][:]
# print("There are "+str(len(names))+" particles.")
# for path in pbh_paths:
# filename = str(path)+"\energy.txt"
# file = np.loadtxt(filename)
# print("File: "+filename+" Total mass: "+str(file[0][22])+"\n")
# for path in cdm_paths:
# filename = str(path)+"\energy.txt"
# file = np.loadtxt(filename)
# print("File: "+filename+" Total mass: "+str(file[0][23])+"\n")
#For Standard DM-L50-N128, there are 2097152 particles, i.e for Nres=128. For Nres=64, there are 262144 particles.
#For our cosmos-sims, the total amount of mass is always the same: 232209.09 (units unclear!)
"""
Use this section to extract the desired data from every snapshot. Start with
a random selection of particles to enable faster testing with ParaView. Raw
String needed to read spaces; filename.with_suffix(".csv").name works thanks to
Lukas.
"""
#indices = np.random.choice(np.arange(2097152), 100000, replace=False) #for Nres=128
#indices = np.random.choice(np.arange(262144), 100000, replace=False) #for Nres=64
# directory = Path(r"C:\Users\Ben Melville\Documents\Studium\MSc Thesis\cosmo_sims\64_cdm")
directory = Path(".")
total_mass = 232209.09 #retrieved from above
Nres = 64 #choose one of this or the following:
#Nres = 128
mass_per_particle = total_mass / (Nres**3)
for filename in sorted(directory.glob("snapshot_*.hdf5")):
print(filename)
file = h5py.File(str(filename), "r")
coordinates = file["PartType1"]["Coordinates"][:] #for cdm particles
names = file["PartType1"]["ParticleIDs"][:]
velocities = file["PartType1"]["Velocities"][:]
print("building 3d-Tree")
tree = scipy.spatial.KDTree(coordinates)
print(getsizeof(tree)/1024,"KB")
print("tree built")
# coordinates = file["PartType0"]["Coordinates"][:] #for pbh particles
# names = file["PartType0"]["ParticleIDs"][:]
# velocities = file["PartType0"]["Velocities"][:]
absolute_velo = np.sqrt(np.sum(velocities**2, axis=1))
print(len(names))
print("searching neighbours")
a = time.perf_counter_ns()
closest_neighbours, indices = tree.query([coordinates], k=40, workers=6)
# shape of closest_neighbours: (1, 262144, 40)
b = time.perf_counter_ns()
print("found neighbours")
print(f"took {(b - a) / 1000 / 1000:.2f} ms")
closest_neighbours = closest_neighbours[0] # to (262144, 40)
densities = 40 * mass_per_particle / np.mean(closest_neighbours, axis=1) ** 3
print(closest_neighbours.shape)
print(densities)
print(densities.shape)
all_data = np.append(coordinates, velocities, axis=1)
all_data = np.column_stack((all_data, absolute_velo, names, densities))
sorted_index = np.argsort(all_data[::,7], kind="stable")
all_data = all_data[sorted_index, :]
# np.savetxt("out_"+filename.with_suffix(".csv").name, all_data[indices], delimiter=",", fmt="%.3f", header="x,y,z,vx,vy,vz,v,name") #if indices are needed
np.savetxt(directory/f"out_{filename.stem}.csv", all_data, delimiter=",", fmt="%.3f", header="x,y,z,vx,vy,vz,v,name,density")
print(all_data.shape)
"""
This section is the old version of reading in just one file. It shows some
underlying structure and gives the option of using all data.
"""
# file = h5py.File("snapshot_000.hdf5", "r")
# configuration = file["Config"]
# header = file["Header"]
# parameters = file["Parameters"]
# data_b = file["PartType0"] #according to Oliver's program, Type_0 are the baryons and Type_1 the cdms
# coordinates_b = data_b["Coordinates"][:]
# names_b = data_b["ParticleIDs"][:]
# velocities_b = data_b["Velocities"][:]
# data_c = file["PartType1"] #all keys used here: ['Coordinates', 'ParticleIDs', 'Velocities']
# coordinates_c = data_c["Coordinates"][:]
# names_c = data_c["ParticleIDs"][:]
# velocities_c = data_c["Velocities"][:]
#print(configuration)
#print("------------")
#print(header)
#print("------------")
#print(parameters)
#print("------------")
#print(data_b) #keys contained in data_b: ['Coordinates', 'Density', 'InternalEnergy', 'ParticleIDs', 'SmoothingLength', 'Velocities']
"""
absolute_velo = np.sqrt(np.sum(velocities**2, axis=1))
all_data = np.append(coordinates, velocities, axis=1)
all_data = np.column_stack((all_data, absolute_velo, names))
sorted_index = np.argsort(all_data[::, 7], kind="stable")
all_data = all_data[sorted_index, :]
np.savetxt("out.csv", all_data[indices], delimiter=",", fmt="%.3f", header="x,y,z,vx,vy,vz,v,name")
#np.savetxt("out_all.csv", all_data, delimiter=",", fmt="%.3f", header="x,y,z,vx,vy,vz,v,name")
print(all_data.shape)
"""
"""
For the plane wave test, we don't need 3-D plots, we just want to plot z-coordinates as x vs z-velocities as y.
Output times: [0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1]
"""
"""
file = h5py.File("snapshot_000.hdf5", "r")
data_b = file["PartType0"] #according to Oliver's program, Type_0 are the baryons and Type_1 the cdms
z_b = data_b["Coordinates"][:,2]
names_b = data_b["ParticleIDs"][:]
vz_b = data_b["Velocities"][:,2]
data_c = file["PartType1"]
z_c = data_c["Coordinates"][:,2]
names_c = data_c["ParticleIDs"][:]
vz_c = data_c["Velocities"][:,2]
plt.figure()
plt.plot( z_b, vz_b, '.', label='type b' )
plt.plot( z_c, vz_c, '.', label='type c' )
plt.legend()
plt.show()
"""

213
visualisation_swift.py Normal file
View file

@ -0,0 +1,213 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# import vtk
import sys
import time
from pathlib import Path
from sys import getsizeof
import h5py
import numba
import numpy as np
import scipy.spatial
@numba.njit()
def determine_desired_range(offset, minimum, upper_limit_bottom, lower_limit_top, maximum):
a = minimum
b = maximum
if offset < 0:
a = lower_limit_top
elif offset > 0:
b = upper_limit_bottom
return a, b
@numba.njit()
def find_coordinates_to_move(minimum, maximum, ratio, x_offset, y_offset, z_offset, move_candidates):
coordinates_to_move = []
x_start, x_end = determine_desired_range(x_offset, minimum, upper_limit_bottom, lower_limit_top, maximum)
y_start, y_end = determine_desired_range(y_offset, minimum, upper_limit_bottom, lower_limit_top, maximum)
z_start, z_end = determine_desired_range(z_offset, minimum, upper_limit_bottom, lower_limit_top, maximum)
for particle in move_candidates:
point = particle[0:3]
if x_start <= point[0] <= x_end and y_start <= point[1] <= y_end and z_start <= point[2] <= z_end:
coordinates_to_move.append(particle)
return coordinates_to_move
directory = Path(r"/home/ben/sims/swiftsim/examples/zoom_tests/")
# file = h5py.File(directory / "auriga6_halo7_8_9.hdf5", "r")
for filename in sorted(directory.glob("output_*.hdf5")):
print(filename)
file = h5py.File(str(filename), "r")
Header = file['Header']
highres_coordinates = file["PartType1"]["Coordinates"][:] # for cdm particles
highres_names = file["PartType1"]["ParticleIDs"][:]
highres_velocities = file["PartType1"]["Velocities"][:]
highres_masses = file['PartType1']['Masses'][:]
highres_absolute_velo = np.sqrt(np.sum(highres_velocities ** 2, axis=1))
lowres_coordinates = file["PartType2"]["Coordinates"][:] # for cdm particles
lowres_names = file["PartType2"]["ParticleIDs"][:]
lowres_velocities = file["PartType2"]["Velocities"][:]
lowres_masses = file['PartType2']['Masses'][:]
lowres_absolute_velo = np.sqrt(np.sum(lowres_velocities ** 2, axis=1))
original_coordinates = np.concatenate((highres_coordinates, lowres_coordinates))
# if "auriga" in str(filename):
# original_coordinates /= 1000
print(original_coordinates.mean())
print(original_coordinates.min())
print(original_coordinates.max())
# print(file['Header'])
# print(list(file['Units'].attrs.items()))
# exit()
names = np.concatenate((highres_names, lowres_names))
velocities = np.concatenate((highres_velocities, lowres_velocities))
masses = np.concatenate((highres_masses, lowres_masses))
absolute_velo = np.concatenate((highres_absolute_velo, lowres_absolute_velo))
# for bla in [original_coordinates, names, velocities, masses, absolute_velo]:
# print(bla.shape)
original_data = np.vstack([
original_coordinates[::, 0],
original_coordinates[::, 1],
original_coordinates[::, 2],
names,
velocities[::, 0],
velocities[::, 1],
velocities[::, 2],
masses,
absolute_velo,
]).T
print(original_data.shape)
assert (original_coordinates == original_data[::, 0:3]).all()
boundaries = Header.attrs['BoxSize'] # BoxLength for e5 boxes depends on Nres, 2.36438 for 256, 4.72876 for 512.
print(boundaries)
if not boundaries.shape:
boundaries = np.array([boundaries] * 3)
offsets = [-1, 0, 1]
transformed_data = original_data[:]
number_of_time_that_points_have_been_found = 0
# assumes cube form and 0.1 as desired ratio to move
minimum = 0.0
maximum = max(boundaries)
ratio = 0.1
box_length = maximum - minimum
range_to_move = 0.1 * box_length
upper_limit_bottom = minimum + range_to_move
lower_limit_top = maximum - range_to_move
print("Find candidates to move...")
@numba.njit()
def find_move_candidates():
move_candidates = []
print("finding move candidates")
for particle in original_data:
point = particle[0:3]
if (
minimum <= point[0] <= upper_limit_bottom or
lower_limit_top <= point[0] <= maximum or
minimum <= point[1] <= upper_limit_bottom or
lower_limit_top <= point[1] <= maximum or
minimum <= point[2] <= upper_limit_bottom or
lower_limit_top <= point[2] <= maximum
):
move_candidates.append(particle)
# print(point)
return move_candidates
move_candidates = find_move_candidates()
move_candidates = np.array(move_candidates)
print("...done.")
for x in offsets:
for y in offsets:
for z in offsets:
if (x, y, z) == (0, 0, 0):
continue
moved_coordinates = find_coordinates_to_move(minimum, maximum, ratio, x, y, z, move_candidates)
# print(moved_coordinates)
moved_coordinates = np.array(moved_coordinates)
# if not moved_coordinates:
# print(f"nothing moved in {(x,y,z)}")
# continue
moved_coordinates[::, 0] += x * boundaries[0]
moved_coordinates[::, 1] += y * boundaries[1]
moved_coordinates[::, 2] += z * boundaries[2]
transformed_data = np.vstack((transformed_data, moved_coordinates))
number_of_time_that_points_have_been_found += 1
print(f"Points found: {number_of_time_that_points_have_been_found}/26...")
# assert coordinates.shape[0] == original_coordinates.shape[0] * 3 ** 3 #check that the new space has the shape we want it to have
num_nearest_neighbors = 40
print("Building 3d-Tree for all particles...")
coordinates = transformed_data[::, 0:3]
print(coordinates.shape)
tree = scipy.spatial.KDTree(coordinates)
print(getsizeof(tree) / 1024, "KB")
print("...done.")
print("Searching neighbours...")
a = time.perf_counter_ns()
distances, indices = tree.query([coordinates], k=num_nearest_neighbors, workers=6)
# shape of closest_neighbours: (1, xxxx, 40)
b = time.perf_counter_ns()
print("...found neighbours.")
print(f"took {(b - a) / 1000 / 1000:.2f} ms")
distances = distances[0] # to (xxxx, 40)
indices = indices[0] # to (xxxx, 40)
print(distances.shape)
print(indices.shape)
print(indices)
mass_array = []
print("fetching masses")
for subindices in indices: # subindices is (40)
# can maybe be optimized to remove loop
masses = transformed_data[subindices, 7]
mass_array.append(masses)
mass_array = np.array(mass_array)
print("finished fetching masses")
# print(closest_neighbours, indices)
# print(indices)
# densities = num_nearest_neighbors * mass_per_particle / np.mean(closest_neighbours, axis=1) ** 3
total_masses = np.sum(mass_array, axis=1)
densities = total_masses / np.mean(distances, axis=1) ** 3
alt_densities = total_masses / np.max(distances, axis=1) ** 3
# print(closest_neighbours.shape)
print(densities)
print(densities.shape)
all_data = np.column_stack([list(range(densities.shape[0])), transformed_data, densities, alt_densities])
print(all_data.shape)
print(original_data.shape[0])
export_data = all_data[:original_data.shape[0]]
print(export_data.shape)
# all_data = np.append(coordinates, velocities, axis=1)
# all_data = np.column_stack((all_data, absolute_velo, names, densities))
# sorted_index = np.argsort(all_data[::, 7], kind="stable")
# all_data = all_data[sorted_index, :]
# np.savetxt("out_"+filename.with_suffix(".csv").name, all_data[indices], delimiter=",", fmt="%.3f", header="x,y,z,vx,vy,vz,v,name") #if indices are needed
np.savetxt(directory / f"out_{filename.with_suffix('.csv').name}",
export_data,
delimiter=",",
fmt="%.3f",
header="num,x,y,z,name,vx,vy,vz,masse,v,density,density_alt")
file.close()

58
write_job_script.py Normal file
View file

@ -0,0 +1,58 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 22 16:14:20 2022
@author: ben
Currently for 256 particles
"""
from pathlib import Path
from sys import argv
from directories import g4_basedir, g4_output_basedir, python_base
n_tasks = 48 # used 8 for 128, use 48 for 256.
def write_job_script(Nres: int, Lbox: float, waveform: str, output_dir: Path):
output_dir_string = str(output_dir)
# replaced line #SBATCH --mem={2 * n_tasks}G with --exclusive for now to get all the memory
# deleted --ntasks-per-node={n_tasks} and --ntasks-per-core=1
script = f"""#!/bin/bash
#SBATCH --mail-type=ALL
#SBATCH --mail-user=a01611430@unet.univie.ac.at
#SBATCH --time=24:00:00
#SBATCH --nodes=1 #equal to -N 1
#SBATCH --exclusive
#SBATCH --job-name MON_{Nres}
#SBATCH -o mon_{waveform}_{Nres}.out
module load gcc/9.1.0-gcc-4.8.5-mj7s6dg
module load openmpi/3.1.4-gcc-9.1.0-fdssbx5
module load hdf5/1.10.5-gcc-9.1.0-rolgskh
module load fftw/3.3.8-gcc-9.1.0-2kyouz7
module load gsl/2.5-gcc-9.1.0-ucmpak4
module load cmake/3.17.3-gcc-9.1.0-tsjr5x6
module load python/3.9.4-gcc-9.1.0-l7amfu6
cd $DATA/monofonic_exp_{waveform}/build
mpiexec -np {n_tasks} ./monofonIC ../{waveform}_{Nres}_{Lbox:.0f}.conf
python3 {python_base}/rename_and_move_ics.py {n_tasks} $DATA/monofonic_exp_{waveform}/build {output_dir_string} {waveform} {Nres} {Lbox}
mpiexec -np {n_tasks} {g4_basedir}/Gadget4 {output_dir}/{waveform}_{Nres}_{Lbox:.0f}_param.txt
"""
with (output_dir / "job.sh").open("w") as f:
f.write(script)
if __name__ == "__main__":
write_job_script(
Nres = int(argv[1]),
Lbox = float(argv[2]),
output_dir = g4_output_basedir / f"{argv[3]}_{argv[1]}_{argv[2]}"
)

58
write_job_script_g4.py Normal file
View file

@ -0,0 +1,58 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 22 16:14:20 2022
@author: ben
Currently for 256 particles
"""
from pathlib import Path
from sys import argv
from directories import g4_basedir, g4_output_basedir, python_base
n_tasks = 48 # used 8 for 128, use 48 for 256.
def write_job_script(Nres: int, Lbox: float, waveform: str, output_dir: Path):
output_dir_string = str(output_dir)
# replaced line #SBATCH --mem={2 * n_tasks}G with --exclusive for now to get all the memory
# deleted --ntasks-per-node={n_tasks} and --ntasks-per-core=1
script = f"""#!/bin/bash
#SBATCH --mail-type=ALL
#SBATCH --mail-user=a01611430@unet.univie.ac.at
#SBATCH --time=24:00:00
#SBATCH --nodes=1 #equal to -N 1
#SBATCH --exclusive
#SBATCH --job-name MON_{Nres}
#SBATCH -o mon_{waveform}_{Nres}.out
module load gcc/9.1.0-gcc-4.8.5-mj7s6dg
module load openmpi/3.1.4-gcc-9.1.0-fdssbx5
module load hdf5/1.10.5-gcc-9.1.0-rolgskh
module load fftw/3.3.8-gcc-9.1.0-2kyouz7
module load gsl/2.5-gcc-9.1.0-ucmpak4
module load cmake/3.17.3-gcc-9.1.0-tsjr5x6
module load python/3.9.4-gcc-9.1.0-l7amfu6
cd $DATA/monofonic_exp_{waveform}/build
mpiexec -np {n_tasks} ./monofonIC ../g4_{waveform}_{Nres}_{Lbox:.0f}.conf
python3 {python_base}/rename_and_move_ics.py {n_tasks} $DATA/monofonic_exp_{waveform}/build {output_dir_string} {waveform} {Nres} {Lbox}
mpiexec -np {n_tasks} {g4_basedir}/Gadget4 {output_dir}/{waveform}_{Nres}_{Lbox:.0f}_param.txt
"""
with (output_dir / "job.sh").open("w") as f:
f.write(script)
if __name__ == "__main__":
write_job_script(
Nres = int(argv[1]),
Lbox = float(argv[2]),
output_dir = g4_output_basedir / f"{argv[3]}_{argv[1]}_{argv[2]}"
)

66
write_job_script_swift.py Normal file
View file

@ -0,0 +1,66 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 22 16:14:20 2022
@author: ben
Currently for 256 particles
"""
from pathlib import Path
from sys import argv
from directories import swift_basedir, swift_output_basedir, swift_mon_test_basedir, swift_tool_basedir, python_base
n_tasks = 48 # used 8 for 128, use 48 for 256.
def write_job_script(Nres: int, Lbox: float, waveform: str, output_dir: Path):
output_dir_string = str(output_dir)
# replaced line #SBATCH --mem={2 * n_tasks}G with --exclusive for now to get all the memory
# deleted --ntasks-per-core=1
script = f"""#!/bin/bash
#SBATCH --mail-type=ALL
#SBATCH --mail-user=a01611430@unet.univie.ac.at
#SBATCH --time=04:00:00
#SBATCH --nodes=1 #equal to -N 1
#SBATCH --ntasks-per-node={n_tasks}
#SBATCH --exclusive
#SBATCH --job-name MON_{Nres}
#SBATCH -o mon_{waveform}_{Nres}.out
module load gcc/9.1.0-gcc-4.8.5-mj7s6dg
module load openmpi/3.1.4-gcc-9.1.0-fdssbx5
module load gsl/2.5-gcc-9.1.0-ucmpak4
module load fftw/3.3.8-gcc-9.1.0-2kyouz7
module load libtool/2.4.6-gcc-9.1.0-vkpnfol
module load hdf5/1.10.5-gcc-9.1.0-rolgskh
module load metis/5.1.0-gcc-9.1.0-gvmpssi
module load python/3.9.4-gcc-9.1.0-l7amfu6
source {python_base}/swift_venv/bin/activate
cd $DATA/monofonic_exp_{waveform}/build
mpiexec -np 2 ./monofonIC ../swift_{waveform}_{Nres}_{Lbox:.0f}.conf
python3 {python_base}/rename_and_move_ics_swift.py 1 $DATA/monofonic_exp_{waveform}/build {output_dir_string} {waveform} {Nres} {Lbox}
cd $DATA/swiftsim/monofonic_tests/output/{waveform}_{Nres}_{Lbox:.0f}
{swift_basedir}/examples/swift --cosmology --self-gravity --fof --threads=$SLURM_NPROCS {output_dir}/{waveform}_{Nres}_{Lbox:.0f}_param.yml
"""
with (output_dir / "job.sh").open("w") as f:
f.write(script)
if __name__ == "__main__":
write_job_script(
Nres = int(argv[1]),
Lbox = float(argv[2]),
output_dir = swift_output_basedir / f"{argv[3]}_{argv[1]}_{argv[2]}"
)
#shouldn't be necessary anymore:
# python3 {swift_tool_basedir}/combine_ics.py {output_dir}/ics_{waveform}_{Nres}_{Lbox:.0f}.0.hdf5 {output_dir}/ics_{waveform}_{Nres}_{Lbox:.0f}.hdf5 4

41
write_monofonic_conf.py Normal file
View file

@ -0,0 +1,41 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 24 10:29:11 2022
@author: ben
"""
from configparser import ConfigParser
from pathlib import Path
from sys import argv
template_file = Path("/gpfs/data/fs71636/fglatter/monofonic/monofonic/example.conf")
def generate_config(Nres: int, Lbox: float, outputdir: Path, waveform: str, print_config=False):
config = ConfigParser(inline_comment_prefixes="#")
config.optionxform = str
with template_file.open() as f:
config.read_file(f)
config.set("setup", "GridRes", str(Nres))
config.set("setup", "BoxLength", str(Lbox))
config.set("setup", "zstart", str(49.0))
config.set("output", "format", "gadget_hdf5")
config.set("output", "filename", f"ics_{waveform}_{Nres}_{Lbox:.0f}.hdf5")
with (outputdir / f"{waveform}_{Nres}_{Lbox:.0f}.conf").open("w") as outputfile:
config.write(outputfile)
# just for also printing the output
if print_config:
with (outputdir / f"{waveform}_{Nres}_{Lbox:.0f}.conf").open("r") as outputfile:
print(outputfile.read())
if __name__ == '__main__':
generate_config(
Nres=int(argv[1]),
Lbox=float(argv[2]),
outputdir=Path(f"/gpfs/data/fs71636/fglatter/monofonic_exp_{argv[3]}/"),
waveform=argv[3],
print_config=True
)

View file

@ -0,0 +1,42 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 24 10:29:11 2022
@author: ben
"""
from configparser import ConfigParser
from pathlib import Path
from sys import argv
template_file = Path("/gpfs/data/fs71636/fglatter/monofonic/monofonic/example.conf")
def generate_config(Nres: int, Lbox: float, outputdir: Path, waveform: str, print_config=False):
config = ConfigParser(inline_comment_prefixes="#")
config.optionxform = str
with template_file.open() as f:
config.read_file(f)
config.set("setup", "GridRes", str(Nres))
config.set("setup", "BoxLength", str(Lbox))
config.set("setup", "zstart", str(49.0))
config.set("output", "format", "gadget_hdf5")
config.set("output", "filename", f"ics_{waveform}_{Nres}_{Lbox:.0f}.hdf5")
config.set("random", "generator", "MUSIC2")
with (outputdir / f"g4_{waveform}_{Nres}_{Lbox:.0f}.conf").open("w") as outputfile:
config.write(outputfile)
# just for also printing the output
if print_config:
with (outputdir / f"g4_{waveform}_{Nres}_{Lbox:.0f}.conf").open("r") as outputfile:
print(outputfile.read())
if __name__ == '__main__':
generate_config(
Nres=int(argv[1]),
Lbox=float(argv[2]),
outputdir=Path(f"/gpfs/data/fs71636/fglatter/monofonic_exp_{argv[3]}/"),
waveform=argv[3],
print_config=True
)

View file

@ -0,0 +1,44 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 24 10:29:11 2022
@author: ben
"""
from configparser import ConfigParser
from pathlib import Path
from sys import argv
template_file = Path("/gpfs/data/fs71636/fglatter/monofonic/monofonic/example.conf")
def generate_config(Nres: int, Lbox: float, outputdir: Path, waveform: str, print_config=False):
config = ConfigParser(inline_comment_prefixes="#")
config.optionxform = str
with template_file.open() as f:
config.read_file(f)
config.set("setup", "GridRes", str(Nres))
config.set("setup", "BoxLength", f"{Lbox * 0.67742}")
config.set("setup", "zstart", str(49.0))
config.set("output", "format", "SWIFT")
config.set("output", "filename", f"ics_{waveform}_{Nres}_{Lbox:.0f}.hdf5")
config.set("output", "UseLongids", "true")
config.set("random", "generator", "MUSIC2")
config.set('execution', 'NumThreads', str(24))
with (outputdir / f"swift_{waveform}_{Nres}_{Lbox:.0f}.conf").open("w") as outputfile:
config.write(outputfile)
# just for also printing the output
if print_config:
with (outputdir / f"swift_{waveform}_{Nres}_{Lbox:.0f}.conf").open("r") as outputfile:
print(outputfile.read())
if __name__ == '__main__':
generate_config(
Nres=int(argv[1]),
Lbox=float(argv[2]),
outputdir=Path(f"/gpfs/data/fs71636/fglatter/monofonic_exp_{argv[3]}/"),
waveform=argv[3],
print_config=True
)