mirror of
https://github.com/glatterf42/sims_python_files.git
synced 2024-09-09 04:33:45 +02:00
Introduce new files for auriga evaluation and ID remapping between resolutions
This commit is contained in:
parent
2ba0971a3c
commit
805f2caf84
5 changed files with 556 additions and 0 deletions
298
combine_ics.py
Normal file
298
combine_ics.py
Normal file
|
@ -0,0 +1,298 @@
|
|||
#!/usr/bin/env python
|
||||
"""
|
||||
Usage:
|
||||
combine_ics.py input_file.0.hdf5 merged_file.hdf5 gzip_level
|
||||
|
||||
This file combines Gadget-2 type 2 (i.e. hdf5) initial condition files
|
||||
into a single file that can be digested by SWIFT.
|
||||
This has mainly be tested for DM-only (parttype1) files but also works
|
||||
smoothly for ICs including gas. The special case of a mass-table for
|
||||
the DM particles is handled. No unit conversions are applied nor are
|
||||
any scale-factors or h-factors changed.
|
||||
The script applies some compression and checksum filters to the output
|
||||
to save disk space.
|
||||
The last argument `gzip_level` is used to specify the level of compression
|
||||
to apply to all the fields in the file. Use 0 to cancel all coompression.
|
||||
The default value is `4`.
|
||||
|
||||
This file is adapted from a part of SWIFT.
|
||||
Copyright (C) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
|
||||
|
||||
All Rights Reserved.
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published
|
||||
by the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
from re import T
|
||||
import sys
|
||||
import h5py as h5
|
||||
import numpy as np
|
||||
|
||||
# Store the compression level
|
||||
gzip_level = 4
|
||||
if len(sys.argv) > 3:
|
||||
gzip_level = int(sys.argv[3])
|
||||
|
||||
# First, we need to collect some information from the master file
|
||||
main_file_name = str(sys.argv[1])[:-7]
|
||||
print("Merging snapshots files with name", main_file_name)
|
||||
master_file_name = main_file_name + ".0.hdf5"
|
||||
print("Reading master information from", master_file_name)
|
||||
master_file = h5.File(master_file_name, "r")
|
||||
grp_header = master_file["/Header"]
|
||||
|
||||
num_files = grp_header.attrs["NumFilesPerSnapshot"]
|
||||
tot_num_parts = grp_header.attrs["NumPart_Total"]
|
||||
tot_num_parts_high_word = grp_header.attrs["NumPart_Total_HighWord"]
|
||||
box_size = grp_header.attrs["BoxSize"]
|
||||
time = grp_header.attrs["Time"]
|
||||
hubble_param = grp_header.attrs['HubbleParam']
|
||||
omega0 = grp_header.attrs['Omega0']
|
||||
omegaLambda = grp_header.attrs['OmegaLambda']
|
||||
|
||||
# Combine the low- and high-words
|
||||
tot_num_parts = tot_num_parts.astype(np.int64)
|
||||
for i in range(6):
|
||||
tot_num_parts[i] += np.int64(tot_num_parts_high_word[i]) << 32
|
||||
|
||||
tot_num_parts_swift = np.copy(tot_num_parts)
|
||||
tot_num_parts_swift[2] += tot_num_parts_swift[3]
|
||||
tot_num_parts_swift[3] = 0
|
||||
|
||||
# Some basic information
|
||||
print("Reading", tot_num_parts, "particles from", num_files, "files.")
|
||||
|
||||
# Check whether there is a mass table
|
||||
DM_mass = 0.0
|
||||
mtable = grp_header.attrs.get("MassTable")
|
||||
if mtable is not None:
|
||||
DM_mass = grp_header.attrs["MassTable"][1] / hubble_param
|
||||
if DM_mass != 0.0:
|
||||
print("DM mass set to", DM_mass, "from the header mass table.")
|
||||
else:
|
||||
print("Reading DM mass from the particles.")
|
||||
|
||||
|
||||
# Create the empty file
|
||||
output_file_name = sys.argv[2]
|
||||
output_file = h5.File(output_file_name, "w-")
|
||||
|
||||
|
||||
# Header
|
||||
grp = output_file.create_group("/Header")
|
||||
grp.attrs["NumFilesPerSnapshot"] = 1
|
||||
grp.attrs["NumPart_Total"] = tot_num_parts_swift
|
||||
grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0]
|
||||
grp.attrs["NumPart_ThisFile"] = tot_num_parts_swift
|
||||
grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
|
||||
grp.attrs["BoxSize"] = box_size / hubble_param
|
||||
grp.attrs["Flag_Entropy_ICs"] = 0
|
||||
grp.attrs["Time"] = time
|
||||
grp.attrs['HubbleParam'] = hubble_param
|
||||
grp.attrs['Omega0'] = omega0
|
||||
grp.attrs['OmegaLambda'] = omegaLambda
|
||||
|
||||
|
||||
# Create the particle groups
|
||||
if tot_num_parts[0] > 0:
|
||||
grp0 = output_file.create_group("/PartType0")
|
||||
if tot_num_parts[1] > 0:
|
||||
grp1 = output_file.create_group("/PartType1")
|
||||
if tot_num_parts_swift[2] > 0:
|
||||
grp2 = output_file.create_group("/PartType2")
|
||||
if tot_num_parts[4] > 0:
|
||||
grp4 = output_file.create_group("/PartType4")
|
||||
if tot_num_parts[5] > 0:
|
||||
grp5 = output_file.create_group("/PartType5")
|
||||
|
||||
|
||||
# Helper function to create the datasets we need
|
||||
def create_set(grp, name, size, dim, dtype):
|
||||
if dim == 1:
|
||||
grp.create_dataset(
|
||||
name,
|
||||
(size,),
|
||||
dtype=dtype,
|
||||
chunks=True,
|
||||
compression="gzip",
|
||||
compression_opts=gzip_level,
|
||||
shuffle=True,
|
||||
fletcher32=True,
|
||||
maxshape=(size,),
|
||||
)
|
||||
else:
|
||||
grp.create_dataset(
|
||||
name,
|
||||
(size, dim),
|
||||
dtype=dtype,
|
||||
chunks=True,
|
||||
compression="gzip",
|
||||
compression_opts=gzip_level,
|
||||
shuffle=True,
|
||||
fletcher32=True,
|
||||
maxshape=(size, dim),
|
||||
)
|
||||
|
||||
|
||||
# Create the required datasets
|
||||
if tot_num_parts[0] > 0:
|
||||
create_set(grp0, "Coordinates", tot_num_parts[0], 3, "d")
|
||||
create_set(grp0, "Velocities", tot_num_parts[0], 3, "f")
|
||||
create_set(grp0, "Masses", tot_num_parts[0], 1, "f")
|
||||
create_set(grp0, "ParticleIDs", tot_num_parts[0], 1, "l")
|
||||
create_set(grp0, "InternalEnergy", tot_num_parts[0], 1, "f")
|
||||
create_set(grp0, "SmoothingLength", tot_num_parts[0], 1, "f")
|
||||
create_set(grp0, 'Potential', tot_num_parts[0], 1, 'f')
|
||||
|
||||
if tot_num_parts[1] > 0:
|
||||
create_set(grp1, "Coordinates", tot_num_parts[1], 3, "d")
|
||||
create_set(grp1, "Velocities", tot_num_parts[1], 3, "f")
|
||||
create_set(grp1, "Masses", tot_num_parts[1], 1, "f")
|
||||
create_set(grp1, "ParticleIDs", tot_num_parts[1], 1, "l")
|
||||
create_set(grp1, 'Potential', tot_num_parts[1], 1, 'f')
|
||||
|
||||
if tot_num_parts_swift[2] > 0:
|
||||
create_set(grp2, "Coordinates", tot_num_parts_swift[2], 3, "d")
|
||||
create_set(grp2, "Velocities", tot_num_parts_swift[2], 3, "f")
|
||||
create_set(grp2, "Masses", tot_num_parts_swift[2], 1, "f")
|
||||
create_set(grp2, "ParticleIDs", tot_num_parts_swift[2], 1, "l")
|
||||
create_set(grp2, 'Potential', tot_num_parts_swift[2], 1, 'f')
|
||||
|
||||
if tot_num_parts[4] > 0:
|
||||
create_set(grp4, "Coordinates", tot_num_parts[4], 3, "d")
|
||||
create_set(grp4, "Velocities", tot_num_parts[4], 3, "f")
|
||||
create_set(grp4, "Masses", tot_num_parts[4], 1, "f")
|
||||
create_set(grp4, "ParticleIDs", tot_num_parts[4], 1, "l")
|
||||
create_set(grp4, 'Potential', tot_num_parts[4], 1, 'f')
|
||||
|
||||
if tot_num_parts[5] > 0:
|
||||
create_set(grp5, "Coordinates", tot_num_parts[5], 3, "d")
|
||||
create_set(grp5, "Velocities", tot_num_parts[5], 3, "f")
|
||||
create_set(grp5, "Masses", tot_num_parts[5], 1, "f")
|
||||
create_set(grp5, "ParticleIDs", tot_num_parts[5], 1, "l")
|
||||
create_set(grp5, 'Potential', tot_num_parts[5], 1, 'f')
|
||||
|
||||
# Heavy-lifting ahead. Leave a last message.
|
||||
print("Datasets created in output file")
|
||||
|
||||
|
||||
# Special case of the non-zero mass table
|
||||
if DM_mass != 0.0:
|
||||
masses = np.ones(tot_num_parts[1], dtype=float) * DM_mass
|
||||
grp1["Masses"][:] = masses
|
||||
|
||||
|
||||
# Cumulative number of particles read/written
|
||||
cumul_parts = [0, 0, 0, 0, 0, 0]
|
||||
|
||||
# Loop over all the files that are part of the snapshots
|
||||
for f in range(num_files):
|
||||
|
||||
file_name = main_file_name + "." + str(f) + ".hdf5"
|
||||
file = h5.File(file_name, "r")
|
||||
file_header = file["/Header"]
|
||||
num_parts = file_header.attrs["NumPart_ThisFile"]
|
||||
|
||||
print(
|
||||
"Copying data from file",
|
||||
f,
|
||||
"/",
|
||||
num_files,
|
||||
": num_parts = [",
|
||||
num_parts[0],
|
||||
num_parts[1],
|
||||
num_parts[2],
|
||||
num_parts[3],
|
||||
num_parts[4],
|
||||
num_parts[5],
|
||||
"]",
|
||||
)
|
||||
sys.stdout.flush()
|
||||
|
||||
# Helper function to copy data
|
||||
def copy_grp(name_new, name_old, ptype, correct_h):
|
||||
full_name_new = "/PartType" + str(ptype) + "/" + name_new
|
||||
full_name_old = "/PartType" + str(ptype) + "/" + name_old
|
||||
if correct_h:
|
||||
output_file[full_name_new][cumul_parts[ptype] : cumul_parts[ptype] + num_parts[ptype]] = file[full_name_old] / hubble_param
|
||||
else:
|
||||
output_file[full_name_new][cumul_parts[ptype] : cumul_parts[ptype] + num_parts[ptype]] = file[full_name_old]
|
||||
|
||||
def copy_grp_same_name(name, ptype, correct_h):
|
||||
copy_grp(name, name, ptype, correct_h)
|
||||
|
||||
def copy_grp_pt3(name, correct_h):
|
||||
full_name_new = "/PartType2/" + name
|
||||
full_name_old = "/PartType3/" + name
|
||||
if correct_h:
|
||||
output_file[full_name_new][cumul_parts[2] : cumul_parts[2] + num_parts[3]] = file[full_name_old] / hubble_param
|
||||
else:
|
||||
output_file[full_name_new][cumul_parts[2] : cumul_parts[2] + num_parts[3]] = file[full_name_old]
|
||||
|
||||
if num_parts[0] > 0:
|
||||
copy_grp_same_name("Coordinates", 0, True)
|
||||
copy_grp_same_name("Velocities", 0, False)
|
||||
copy_grp_same_name("Masses", 0, False)
|
||||
copy_grp_same_name("ParticleIDs", 0, False)
|
||||
copy_grp_same_name("InternalEnergy", 0, False)
|
||||
copy_grp_same_name("SmoothingLength", 0, False)
|
||||
copy_grp_same_name('Potential', 0, False) ######## Careful: I don't actually know the units of the Potential, so I don't know if I should correct it.
|
||||
|
||||
if num_parts[1] > 0:
|
||||
copy_grp_same_name("Coordinates", 1, True)
|
||||
copy_grp_same_name("Velocities", 1, False)
|
||||
copy_grp_same_name("ParticleIDs", 1, False)
|
||||
if DM_mass == 0.0: # Do not overwrite values if there was a mass table
|
||||
copy_grp_same_name("Masses", 1, False)
|
||||
copy_grp_same_name('Potential', 1, False)
|
||||
|
||||
if num_parts[2] > 0:
|
||||
copy_grp_same_name("Coordinates", 2, True)
|
||||
copy_grp_same_name("Velocities", 2, False)
|
||||
copy_grp_same_name("ParticleIDs", 2, False)
|
||||
copy_grp_same_name("Masses", 2, False)
|
||||
copy_grp_same_name('Potential', 2, False)
|
||||
|
||||
# Need to update part counter for pt2 already here, so we append 3 in correct place
|
||||
cumul_parts[2] += num_parts[2]
|
||||
|
||||
if num_parts[3] > 0:
|
||||
copy_grp_pt3("Coordinates", True)
|
||||
copy_grp_pt3("Velocities", False)
|
||||
copy_grp_pt3("ParticleIDs", False)
|
||||
copy_grp_pt3("Masses", False)
|
||||
copy_grp_pt3('Potential', False)
|
||||
|
||||
if num_parts[4] > 0:
|
||||
copy_grp_same_name("Coordinates", 4, True)
|
||||
copy_grp_same_name("Velocities", 4, False)
|
||||
copy_grp_same_name("Masses", 4, False)
|
||||
copy_grp_same_name("ParticleIDs", 4, False)
|
||||
copy_grp_same_name('Potential', 4, False)
|
||||
|
||||
if num_parts[5] > 0:
|
||||
copy_grp_same_name("Coordinates", 5, True)
|
||||
copy_grp_same_name("Velocities", 5, False)
|
||||
copy_grp_same_name("Masses", 5, False)
|
||||
copy_grp_same_name("ParticleIDs", 5, False)
|
||||
copy_grp_same_name('Potential', 5, False)
|
||||
|
||||
cumul_parts[0] += num_parts[0]
|
||||
cumul_parts[1] += num_parts[1]
|
||||
cumul_parts[2] += num_parts[3] # Need to adjust for added pt-3
|
||||
cumul_parts[4] += num_parts[4]
|
||||
cumul_parts[5] += num_parts[5]
|
||||
file.close()
|
||||
|
||||
print("All done! SWIFT is waiting.")
|
69
hist2d_auriga6.py
Normal file
69
hist2d_auriga6.py
Normal file
|
@ -0,0 +1,69 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Wed Apr 27 11:55:06 2022
|
||||
|
||||
@author: ben
|
||||
"""
|
||||
|
||||
import h5py
|
||||
from pathlib import Path
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.colors import LogNorm
|
||||
|
||||
directory = Path(r"/home/ben/sims/swiftsim/examples/zoom_tests/auriga6_halo7_8_10")
|
||||
Nres = 128 # 233.45 #for arj
|
||||
Lbox = 100
|
||||
|
||||
snap_number = 7 # 7 for our tests, 0 for arj
|
||||
|
||||
fof_file = h5py.File(directory / f"fof_output_000{snap_number}.hdf5", "r")
|
||||
file = h5py.File(directory / f'output_000{snap_number}.hdf5', 'r')
|
||||
|
||||
PartType1 = file["PartType1"]
|
||||
highres_groupids = PartType1['FOFGroupIDs'][:]
|
||||
highres_coordinates = np.array(PartType1['Coordinates'][:])
|
||||
highres_masses = PartType1['Masses'][:]
|
||||
|
||||
total_highres_particles = len(highres_groupids)
|
||||
unique_groups, particle_count = np.unique(highres_groupids, return_counts=True)
|
||||
# print(f'Group IDs: {unique_groups}, Number in them: {particle_count}')
|
||||
|
||||
# all particles belonging to no group have group id 2147483647
|
||||
|
||||
highres_x, highres_y, highres_z = highres_coordinates[:, 0], highres_coordinates[:, 1], highres_coordinates[:, 2]
|
||||
|
||||
groups = fof_file['Groups']
|
||||
|
||||
# for key in groups.keys():
|
||||
# print(key)
|
||||
|
||||
centres = groups['Centres'][:]
|
||||
groupids = groups['GroupIDs'][:]
|
||||
masses = groups['Masses'][:]
|
||||
number_of_members = groups['Sizes'][:]
|
||||
|
||||
# table_width = 4
|
||||
|
||||
# separate_unique_counter = 0
|
||||
# # for i in range(len(groupids)-1):
|
||||
# for i in range(11):
|
||||
# if np.isin(groupids[i], unique_groups):
|
||||
# highres_members = particle_count[separate_unique_counter]
|
||||
# contamination = (1 - highres_members / number_of_members[i])
|
||||
# print(f'Group: {groupids[i]:{table_width}} | Mass: {masses[i]:{table_width + 1}.{table_width}} | Highres Members: {highres_members:{table_width + 1}} | Total Members: {number_of_members[i]:{table_width}} | Contamination: {contamination * 100:.{table_width}}%\n')
|
||||
# separate_unique_counter += 1
|
||||
|
||||
main_group_origin = centres[np.min(unique_groups) - 1]
|
||||
main_group_mass = masses[np.min(unique_groups) - 1]
|
||||
origin_x, origin_y, origin_z = main_group_origin[0], main_group_origin[1], main_group_origin[2]
|
||||
print(np.mean(highres_masses))
|
||||
|
||||
plt.title('Histogram of Auriga 6 Centre Levelmax 10')
|
||||
plt.xlabel('x [Mpc]')
|
||||
plt.ylabel('y [Mpc]')
|
||||
i = np.logical_and( highres_z>origin_z-0.5, highres_z<origin_z+0.5 )
|
||||
plt.hist2d(x=highres_x[i], y=highres_y[i], bins=256, range=[[origin_x - 0.5, origin_x + 0.5], [origin_y - 0.5, origin_y + 0.5]], weights=highres_masses[i], norm=LogNorm())
|
||||
plt.colorbar()
|
||||
plt.show()
|
35
plot_cumulative_mass_profiles.py
Normal file
35
plot_cumulative_mass_profiles.py
Normal file
|
@ -0,0 +1,35 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Wed Apr 27 10:50:06 2022
|
||||
|
||||
@author: ben
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from pathlib import Path
|
||||
|
||||
directories = [(Path(r'/home/ben/sims/swiftsim/examples/zoom_tests/auriga6_halo7_8_9'), 'Levelmax 9'),
|
||||
(Path(r'/home/ben/sims/swiftsim/examples/zoom_tests/auriga6_halo7_8_10'), 'Levelmax 10'),
|
||||
(Path(r'/home/ben/sims/swiftsim/examples/zoom_tests/auriga6_halo_arj'), 'ARJ')]
|
||||
|
||||
for (directory, label) in directories:
|
||||
filename = directory / 'cumulative_mass_profile.csv'
|
||||
radii, masses = np.loadtxt(filename, dtype=float, delimiter=',', skiprows=1, unpack=True) # unpack seems to deal with formatting of input file
|
||||
plt.loglog(radii, masses, label=label)
|
||||
|
||||
# # This could be evolved to plot some resolution indicator for the different sims, but I need to save/read in the group_radius somehow.
|
||||
# Nres = [128, 128, 233.45]
|
||||
# Lbox = [100, 100, 100]
|
||||
# group_radius = []
|
||||
|
||||
# for i in range(len(Nres)):
|
||||
# softening = Lbox[i] / Nres[i] / 30
|
||||
# plt.axvline(4 * softening / group_radius, linestyle='--', color='grey')
|
||||
|
||||
plt.xlabel(r'R / R$_\mathrm{group}$')
|
||||
plt.ylabel(r'M [$10^{10} \mathrm{M}_\odot$]')
|
||||
plt.title('Cumulative Mass Profile Auriga 6')
|
||||
plt.legend()
|
||||
plt.show()
|
123
prepare_cumulative_mass_profiles.py
Normal file
123
prepare_cumulative_mass_profiles.py
Normal file
|
@ -0,0 +1,123 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Mar 8 11:16:06 2022
|
||||
|
||||
@author: ben
|
||||
"""
|
||||
|
||||
import h5py
|
||||
from pathlib import Path
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
def V(r):
|
||||
return 4 * np.pi * r**3 / 3
|
||||
|
||||
directory = Path(r"/home/ben/sims/swiftsim/examples/zoom_tests/auriga6_halo7_8_9")
|
||||
Nres = 128 # 233.45 #for arj
|
||||
Lbox = 100
|
||||
|
||||
snap_number = 7 # 7 for our tests, 0 for arj
|
||||
|
||||
fof_file = h5py.File(directory / f"fof_output_000{snap_number}.hdf5", "r")
|
||||
file = h5py.File(directory / f'output_000{snap_number}.hdf5', 'r')
|
||||
|
||||
PartType1 = file["PartType1"]
|
||||
highres_groupids = PartType1['FOFGroupIDs'][:]
|
||||
highres_coordinates = PartType1['Coordinates'][:]
|
||||
highres_masses = PartType1['Masses'][:]
|
||||
|
||||
total_highres_particles = len(highres_groupids)
|
||||
unique_groups, particle_count = np.unique(highres_groupids, return_counts=True)
|
||||
print(f'Group IDs: {unique_groups}, Number in them: {particle_count}')
|
||||
|
||||
# all particles belonging to no group have group id 2147483647
|
||||
|
||||
|
||||
# for key in fof_file.keys():
|
||||
# print(key) #for finding all header entries, which are:
|
||||
|
||||
groups = fof_file['Groups']
|
||||
|
||||
# for key in groups.keys():
|
||||
# print(key)
|
||||
|
||||
centres = groups['Centres'][:]
|
||||
groupids = groups['GroupIDs'][:]
|
||||
masses = groups['Masses'][:]
|
||||
number_of_members = groups['Sizes'][:]
|
||||
|
||||
table_width = 4
|
||||
|
||||
separate_unique_counter = 0
|
||||
# for i in range(len(groupids)-1):
|
||||
for i in range(11):
|
||||
if np.isin(groupids[i], unique_groups):
|
||||
highres_members = particle_count[separate_unique_counter]
|
||||
contamination = (1 - highres_members / number_of_members[i])
|
||||
print(f'Group: {groupids[i]:{table_width}} | Mass: {masses[i]:{table_width + 1}.{table_width}} | Highres Members: {highres_members:{table_width + 1}} | Total Members: {number_of_members[i]:{table_width}} | Contamination: {contamination * 100:.{table_width}}%\n')
|
||||
separate_unique_counter += 1
|
||||
|
||||
|
||||
main_group_origin = centres[np.min(unique_groups) - 1]
|
||||
main_group_mass = masses[np.min(unique_groups) - 1]
|
||||
distances = []
|
||||
# masses = []
|
||||
|
||||
for j in range(len(highres_groupids)):
|
||||
if highres_groupids[j] == np.min(unique_groups):
|
||||
distance = np.linalg.norm(main_group_origin - highres_coordinates[j])
|
||||
# mass = highres_masses[j]
|
||||
distances.append(distance)
|
||||
# masses.append(mass)
|
||||
|
||||
group_radius = np.max(np.array(distances))
|
||||
normalised_distances = np.array(distances) / group_radius
|
||||
|
||||
num_bins = 30
|
||||
log_radial_bins = np.geomspace(0.01, 2, num_bins)
|
||||
|
||||
particle_mass = highres_masses[0] # Careful: this assumes that all highres particles have the same mass and that it's exclusively them in the main group,
|
||||
# so keep an eye on its contamination and adjust accordingly!
|
||||
|
||||
counts_in_radial_bins = []
|
||||
for k in range(len(log_radial_bins) - 1):
|
||||
count = 0
|
||||
for member_distance in normalised_distances:
|
||||
if log_radial_bins[k] <= member_distance < log_radial_bins[k + 1]:
|
||||
count += 1
|
||||
# volume = V(log_radial_bins[k + 1] * group_radius) - V(log_radial_bins[k] * group_radius)
|
||||
# count /= volume
|
||||
counts_in_radial_bins.append(count)
|
||||
|
||||
plot_log_radial_bins = log_radial_bins[0:len(log_radial_bins) - 1] # there is one fewer bin then there are bin borders
|
||||
masses_in_radial_bins = np.array(counts_in_radial_bins) * particle_mass
|
||||
|
||||
cumulative_mass_profile = np.cumsum(masses_in_radial_bins)
|
||||
|
||||
softening = Lbox / Nres / 30
|
||||
# plt.axvline(4 * softening / group_radius, linestyle='--', color='grey')
|
||||
|
||||
|
||||
save_data = np.column_stack((plot_log_radial_bins, cumulative_mass_profile))
|
||||
np.savetxt(directory/'cumulative_mass_profile.csv', save_data, delimiter=',', fmt='%.3f', header='Radii[R/R_group],Masses[1e10Msun]')
|
||||
|
||||
|
||||
# kumulatives Dichteprofil -> sollte bei grossen Massen praktisch gleich sein, am besten direkt uebereinander plotten.
|
||||
# plt.hist2D mit x und y, jeweils Zentrum der Gruppe +- 0.5 Mpc anschauen, alles andere wegwerfen, sollte direkt Struktur erkennen koennen, ob sie gleich ist. Muss noch durch Masse teilen (nur Teilchen zaehlen?)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# for i in range(10):
|
||||
# print(f'Group: {groupids[i]} | Mass: {masses[i]} | Size: {sizes[i]} | Centre: {centres[i]}\n')
|
||||
|
||||
|
||||
|
||||
# print(list(parameters.attrs.items()))
|
||||
|
||||
|
||||
# use list(Header.attr.keys()) to list all attributes contained in the Header and .attr.values() to see their values
|
||||
# even otherwise empty things like ICs_parameters can contain such attributes
|
31
remap_particle_IDs.py
Normal file
31
remap_particle_IDs.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
import numpy as np
|
||||
|
||||
test_particle = np.array([127, 0, 1])
|
||||
# maximum_test = np.array([127, 127, 127]) #this works, Nres - 1 is the maximum for (i,j,k)
|
||||
|
||||
Nres_1 = 128
|
||||
Nres_2 = 256
|
||||
|
||||
particle_ID_1 = ((test_particle[0] * Nres_1) + test_particle[1]) * Nres_1 + test_particle[2]
|
||||
particle_ID_2 = ((test_particle[0] * Nres_2) + test_particle[1]) * Nres_2 + test_particle[2]
|
||||
|
||||
print(particle_ID_1, particle_ID_2)
|
||||
|
||||
def get_highres_ID_from_lowres(particle_ID: int, Nres_low: int, Nres_high: int):
|
||||
particle_k = particle_ID % Nres_low
|
||||
particle_j = ((particle_ID - particle_k) / Nres_low) % Nres_low
|
||||
particle_i = ((particle_ID - particle_k) / Nres_low - particle_j) / Nres_low
|
||||
highres_ID = ((particle_i * Nres_high) + particle_j) * Nres_high + particle_k
|
||||
return int(highres_ID)
|
||||
|
||||
def get_lowres_ID_from_highres(particle_ID: int, Nres_low: int, Nres_high: int):
|
||||
particle_k = particle_ID % Nres_high
|
||||
particle_j = ((particle_ID - particle_k) / Nres_high) % Nres_high
|
||||
particle_i = ((particle_ID - particle_k) / Nres_high - particle_j) / Nres_high
|
||||
lowres_ID = ((particle_i * Nres_low) + particle_j) * Nres_low + particle_k
|
||||
return int(lowres_ID)
|
||||
|
||||
particle_ID_1_converted = get_highres_ID_from_lowres(particle_ID_1, Nres_1, Nres_2)
|
||||
particle_ID_1_converted_converted = get_lowres_ID_from_highres(particle_ID_1_converted, Nres_1, Nres_2)
|
||||
|
||||
print(particle_ID_1, particle_ID_1_converted, particle_ID_1_converted_converted)
|
Loading…
Reference in a new issue