mirror of
https://github.com/glatterf42/sims_python_files.git
synced 2024-09-19 16:13:45 +02:00
extract methods
this commit shouldn't change any actual code apart from reordering it and extracting parts of it into methods
This commit is contained in:
parent
05e6be9921
commit
5da3f1ad26
1 changed files with 130 additions and 124 deletions
|
@ -41,21 +41,34 @@ def find_coordinates_to_move(minimum, maximum, ratio, x_offset, y_offset, z_offs
|
||||||
return coordinates_to_move
|
return coordinates_to_move
|
||||||
|
|
||||||
|
|
||||||
# file = h5py.File(directory / "auriga6_halo7_8_9.hdf5", "r")
|
@numba.njit()
|
||||||
|
def find_move_candidates(original_data, minimum, maximum, lower_limit_top, upper_limit_bottom):
|
||||||
|
move_candidates = []
|
||||||
|
print("finding move candidates")
|
||||||
|
for particle in original_data:
|
||||||
|
point = particle[0:3]
|
||||||
|
if (
|
||||||
|
minimum <= point[0] <= upper_limit_bottom or
|
||||||
|
lower_limit_top <= point[0] <= maximum or
|
||||||
|
minimum <= point[1] <= upper_limit_bottom or
|
||||||
|
lower_limit_top <= point[1] <= maximum or
|
||||||
|
minimum <= point[2] <= upper_limit_bottom or
|
||||||
|
lower_limit_top <= point[2] <= maximum
|
||||||
|
):
|
||||||
|
move_candidates.append(particle)
|
||||||
|
# print(point)
|
||||||
|
return move_candidates
|
||||||
|
|
||||||
for filename in sys.argv[1:]:
|
|
||||||
filename = Path(filename)
|
def read_file(filename):
|
||||||
print(filename)
|
|
||||||
file = h5py.File(str(filename), "r")
|
file = h5py.File(str(filename), "r")
|
||||||
Header = file['Header']
|
Header = file['Header']
|
||||||
|
|
||||||
highres_coordinates = file["PartType1"]["Coordinates"][:] # for cdm particles
|
highres_coordinates = file["PartType1"]["Coordinates"][:] # for cdm particles
|
||||||
highres_names = file["PartType1"]["ParticleIDs"][:]
|
highres_names = file["PartType1"]["ParticleIDs"][:]
|
||||||
highres_velocities = file["PartType1"]["Velocities"][:]
|
highres_velocities = file["PartType1"]["Velocities"][:]
|
||||||
highres_masses = file['PartType1']['Masses'][:]
|
highres_masses = file['PartType1']['Masses'][:]
|
||||||
highres_group_ids = file['PartType1']['FOFGroupIDs'][:]
|
highres_group_ids = file['PartType1']['FOFGroupIDs'][:]
|
||||||
highres_absolute_velo = np.sqrt(np.sum(highres_velocities ** 2, axis=1))
|
highres_absolute_velo = np.sqrt(np.sum(highres_velocities ** 2, axis=1))
|
||||||
|
|
||||||
if "PartType2" in file:
|
if "PartType2" in file:
|
||||||
lowres_coordinates = file["PartType2"]["Coordinates"][:] # for cdm particles
|
lowres_coordinates = file["PartType2"]["Coordinates"][:] # for cdm particles
|
||||||
lowres_names = file["PartType2"]["ParticleIDs"][:]
|
lowres_names = file["PartType2"]["ParticleIDs"][:]
|
||||||
|
@ -77,7 +90,7 @@ for filename in sys.argv[1:]:
|
||||||
masses = highres_masses
|
masses = highres_masses
|
||||||
group_ids = highres_group_ids
|
group_ids = highres_group_ids
|
||||||
absolute_velo = highres_absolute_velo
|
absolute_velo = highres_absolute_velo
|
||||||
|
file.close()
|
||||||
# if "auriga" in str(filename):
|
# if "auriga" in str(filename):
|
||||||
# original_coordinates /= 1000
|
# original_coordinates /= 1000
|
||||||
# print(original_coordinates.mean())
|
# print(original_coordinates.mean())
|
||||||
|
@ -103,8 +116,18 @@ for filename in sys.argv[1:]:
|
||||||
]).T
|
]).T
|
||||||
print(original_data.shape)
|
print(original_data.shape)
|
||||||
assert (original_coordinates == original_data[::, 0:3]).all()
|
assert (original_coordinates == original_data[::, 0:3]).all()
|
||||||
|
return Header, highres_names, original_data
|
||||||
|
|
||||||
boundaries = Header.attrs['BoxSize'] # BoxLength for e5 boxes depends on Nres, 2.36438 for 256, 4.72876 for 512.
|
|
||||||
|
# file = h5py.File(directory / "auriga6_halo7_8_9.hdf5", "r")
|
||||||
|
def main():
|
||||||
|
for filename in sys.argv[1:]:
|
||||||
|
filename = Path(filename)
|
||||||
|
print(filename)
|
||||||
|
Header, highres_names, original_data = read_file(filename)
|
||||||
|
|
||||||
|
boundaries = Header.attrs[
|
||||||
|
'BoxSize'] # BoxLength for e5 boxes depends on Nres, 2.36438 for 256, 4.72876 for 512.
|
||||||
print(boundaries, len(highres_names))
|
print(boundaries, len(highres_names))
|
||||||
if not boundaries.shape:
|
if not boundaries.shape:
|
||||||
boundaries = np.array([boundaries] * 3)
|
boundaries = np.array([boundaries] * 3)
|
||||||
|
@ -123,27 +146,7 @@ for filename in sys.argv[1:]:
|
||||||
|
|
||||||
print("Find candidates to move...")
|
print("Find candidates to move...")
|
||||||
|
|
||||||
|
move_candidates = find_move_candidates(original_data, minimum, maximum, lower_limit_top, upper_limit_bottom)
|
||||||
@numba.njit()
|
|
||||||
def find_move_candidates():
|
|
||||||
move_candidates = []
|
|
||||||
print("finding move candidates")
|
|
||||||
for particle in original_data:
|
|
||||||
point = particle[0:3]
|
|
||||||
if (
|
|
||||||
minimum <= point[0] <= upper_limit_bottom or
|
|
||||||
lower_limit_top <= point[0] <= maximum or
|
|
||||||
minimum <= point[1] <= upper_limit_bottom or
|
|
||||||
lower_limit_top <= point[1] <= maximum or
|
|
||||||
minimum <= point[2] <= upper_limit_bottom or
|
|
||||||
lower_limit_top <= point[2] <= maximum
|
|
||||||
):
|
|
||||||
move_candidates.append(particle)
|
|
||||||
# print(point)
|
|
||||||
return move_candidates
|
|
||||||
|
|
||||||
|
|
||||||
move_candidates = find_move_candidates()
|
|
||||||
move_candidates = np.array(move_candidates)
|
move_candidates = np.array(move_candidates)
|
||||||
|
|
||||||
print("...done.")
|
print("...done.")
|
||||||
|
@ -223,4 +226,7 @@ for filename in sys.argv[1:]:
|
||||||
delimiter=",",
|
delimiter=",",
|
||||||
fmt="%.3f",
|
fmt="%.3f",
|
||||||
header="num,x,y,z,name,vx,vy,vz,masse,groupid,v,density,density_alt")
|
header="num,x,y,z,name,vx,vy,vz,masse,groupid,v,density,density_alt")
|
||||||
file.close()
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
Loading…
Reference in a new issue