summarize.py
Go to the documentation of this file.
1 # Author: M. Reichert
2 # Date : 25.09.2024
3 import numpy as np
4 import h5py
5 import sys
6 import os
7 import optparse
8 import logging
9 import re
10 from tqdm import tqdm
11 from src_files.wreader import wreader
12 from src_files.template_class import template
13 from src_files.nucleus_multiple_class import nucleus_multiple
14 from scipy.interpolate import interp1d
15 import matplotlib.pyplot as plt
16 
17 
18 
19 #--- define options ----------------------------------------------------------
20 p = optparse.OptionParser()
21 p.add_option("-i","--input" , action="store", dest="rundir", default='.', \
22  help="Simulation directory to summarize (default: current directory)")
23 p.add_option("-o","--output", action="store", dest="outdir", default='./summary.hdf5', \
24  help="Output path (default: summary.hdf5)")
25 p.add_option("-b", "--buf" , action="store", dest="buffersize", default='500', \
26  help="Buffer size before writing to the file (default: 500)")
27 p.add_option("-f", "--force", action="store_true", dest="force", default=False, \
28  help="Force overwrite of the output file (default: False)")
29 p.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, \
30  help="Enable verbose output. If enabled, output is written to 'debug.log' (default: False)")
31 p.add_option("--time_file", action="store", dest="time_file", default=None, \
32  help="File containing a time grid in seconds to map the data to (default: None)")
33 p.add_option("--time_final", action="store", dest="time_final", default=None, \
34  help="Final time for the time grid. Only used if '--time_file' "+\
35  "is not given (default: Read from template file)")
36 p.add_option("--time_initial", action="store", dest="time_initial", default=None, \
37  help="Initial time for the time grid. Only used if '--time_file' "+\
38  "is not given (default: 1e-5)")
39 p.add_option("--time_number", action="store", dest="time_number", default=None, \
40  help="Number of time steps for the time grid. Only used if '--time_file' "+\
41  "is not given (default: 200)")
42 p.add_option("--sunet_path", action="store", dest="sunet_path", default=None, \
43  help="Path to the sunet file (default: Read from template)")
44 p.add_option("--disable_mainout", action="store_true", dest="disable_mainout", default=False, \
45  help="Disable the summary of the mainout output (default: False)")
46 p.add_option("--disable_energy", action="store_true", dest="disable_energy", default=False, \
47  help="Disable the summary of the energy output (default: False)")
48 p.add_option("--disable_timescales", action="store_true", dest="disable_timescales", default=False, \
49  help="Disable the summary of the timescales output (default: False)")
50 p.add_option("--disable_tracked_nuclei", action="store_true", dest="disable_tracked_nuclei", default=False, \
51  help="Disable the summary of the tracked_nuclei output (default: False)")
52 p.add_option("--disable_nuloss", action="store_true", dest="disable_nuloss", default=False, \
53  help="Disable the summary of the nuloss output (default: False)")
54 p.add_option("--disable_snapshots", action="store_true", dest="disable_snapshots", default=False, \
55  help="Disable the summary of the snapshots output (default: False)")
56 p.add_option("-r", "--recursive", action="store_true", dest="recursive", default=False, \
57  help="Give a folder that contains subfolders with runs inside (default: False)")
58 p.set_usage("""
59 
60  Usage: ./summarize.py -i <rundir>
61  Example: ./summarize.py -i runs/test""")
62 
63 
64 #--- parse options -----------------------------------------------------------
65 (options,args) = p.parse_args()
66 run_path = options.rundir
67 
68 
69 # Remove later and make it an parameter
70 buffsize = int(options.buffersize)
71 
72 
73 # Verbose mode or not?
74 if options.verbose:
75  # Set up a logger to trace the progress
76  logging.basicConfig(
77  format="%(asctime)s - %(levelname)-10s - %(message)s",
78  style="%",
79  datefmt="%Y-%m-%d %H:%M",
80  level=logging.DEBUG,
81  )
82  file_handler = logging.FileHandler("debug.log", mode="w", encoding="utf-8")
83  logger = logging.getLogger(__name__)
84  # Set the style also for the file handler
85  file_handler.setFormatter(logging.Formatter(
86  fmt="%(asctime)s - %(levelname)s - %(message)s",
87  datefmt="%Y-%m-%d %H:%M",
88  style="%",
89  ))
90  logger.addHandler(file_handler)
91 else:
92  # Set up a logger to trace the progress
93  logging.basicConfig(level=logging.ERROR)
94  logger = logging.getLogger(__name__)
95 
96 # Disable the logging in the terminal
97 logger.propagate = False
98 # Say something
99 logger.info(f"Started summarizing run at {run_path}.")
100 
101 
102 if options.recursive:
103  # Find all folders in the run_path
104  folders = [f for f in os.listdir(run_path) if os.path.isdir(os.path.join(run_path, f))]
105  # Do it recursively
106  logger.info(f"Recursively going through {len(folders)} folders.")
107  # Create folder at output path
108  fold = options.outdir.replace(".hdf5","")
109  basepath = run_path
110  if not os.path.exists(fold):
111  os.makedirs(fold)
112  else:
113  # Raise exception if the folder already exists and not force
114  if not options.force:
115  logger.error(f"Folder {fold} already exists. Exiting.")
116  raise ValueError(f"Folder {fold} already exists. Exiting.")
117  else:
118  logger.warning(f"Folder {fold} already exists. Overwriting it.")
119  os.system(f"rm -r {fold}")
120  os.makedirs(fold)
121 
122 # Loop over runs (if recursive)
123 looping = True
124 
125 while looping:
126 
127  if options.recursive:
128  # Get the next folder
129  try:
130  output_file = os.path.join(fold, folders[0]+".hdf5")
131  run_path = os.path.join(basepath, folders.pop(0))
132  except IndexError:
133  looping = False
134  break
135  else:
136  # Create output hdf5 file
137  output_file = options.outdir
138 
139  # Get a list of all directories in the run_path. Ignore "network_data" directory
140  dirs = [d for d in os.listdir(run_path) if os.path.isdir(os.path.join(run_path, d)) and d != "network_data"]
141 
142  # Say something
143  logger.info(f"Found {len(dirs)} directories in {run_path}.")
144 
145 
146  # Check if the file already exists
147  if os.path.exists(output_file) and not options.force:
148  logger.error(f"Output file {output_file} already exists. Exiting.")
149  raise ValueError(f"Output file {output_file} already exists. Either delete or use -f option to overwrite.")
150  elif os.path.exists(output_file) and options.force:
151  logger.warning(f"Output file {output_file} already exists. Overwriting it.")
152  f_hdf = h5py.File(output_file, 'w')
153 
154 
155  # Check already one run to see what has been outputted
156  # Find a run that didnt crash
157  found = False
158  for d in dirs:
159  data = wreader(os.path.join(run_path, d))
160  if not data.is_crashed:
161  logger.info(f"Found {d} to look up the output.")
162  found = True
163  break
164  if not found:
165  # Raise an error if all runs are crashed
166  if options.recursive:
167  logger.warning("Detected folder where all runs have crashed!")
168  continue
169  logger.error("All runs are crashed!")
170  raise ValueError("All runs are crashed!")
171 
172  # Get the template name (ends with .par)
173  template_name = [f for f in os.listdir(os.path.join(run_path, d)) if f.endswith('.par')][0]
174  t = template(os.path.join(run_path, d, template_name))
175 
176  # Check if the sunet is given in the template
177  if options.sunet_path is not None:
178  net_source = options.sunet_path
179  else:
180  if (not "net_source" in t.entries):
181  # Raise an error if the net_source is not given
182  raise ValueError("net_source not given in the template file.")
183  else:
184  # Get the net_source
185  net_source = t["net_source"]
186 
187  # Read the net_source file
188  logger.info(f"Using sunet file from {net_source}.")
189  nuclei = np.loadtxt(net_source,dtype=str)
190  nuclei_data = nucleus_multiple(nuclei)
191 
192 
193  # Create the time grid
194  if options.time_file is not None:
195  logger.info(f"Using time grid from {options.time_file}.")
196  mainout_time = np.loadtxt(options.time_file, dtype=float, unpack=True)
197  else:
198  if options.time_final is not None:
199  final_time = float(options.time_final)
200  else:
201  final_time = 3.15e16
202  # Make some reasonable time grid
203  # Check if termination criterion is given
204  if "termination_criterion" in t.entries:
205  if t["termination_criterion"] == "1":
206  if "final_time" in t.entries:
207  final_time = float(t["final_time"])
208 
209  if options.time_initial is not None:
210  initial_time = float(options.time_initial)
211  else:
212  initial_time = 1e-5
213 
214  if options.time_number is not None:
215  time_number = int(options.time_number)
216  else:
217  time_number = 200
218 
219  mainout_time = np.logspace(np.log10(initial_time), np.log10(final_time), time_number)
220 
221 
222  # Possible entries in the data
223  possible_entries = []
224  if not options.disable_mainout:
225  possible_entries.append("mainout")
226  else:
227  logger.info("Ignoring mainout output.")
228  if not options.disable_energy:
229  possible_entries.append("energy")
230  else:
231  logger.info("Ignoring energy output.")
232  if not options.disable_timescales:
233  possible_entries.append("timescales")
234  else:
235  logger.info("Ignoring timescales output.")
236  if not options.disable_tracked_nuclei:
237  possible_entries.append("tracked_nuclei")
238  else:
239  logger.info("Ignoring tracked_nuclei output.")
240  if not options.disable_nuloss:
241  possible_entries.append("nuloss")
242  else:
243  logger.info("Ignoring nuloss output.")
244 
245 
246  entry_dict = {}
247  for entry in possible_entries:
248  # Check existence
249  if data.check_existence(entry) != 0:
250  entry_dict[entry] = {}
251  for key in data[entry].keys():
252  # Ignore iteration and time key
253  if ((key == "iteration") or (key == "time") or (key == "A") or (key == "Z") or (key == "N")
254  or (key == "names") or (key == "latex_names")):
255  continue
256  # Ignore temperature, density, and radius for nuloss
257  if (entry == "nuloss") and ((key == "temp") or (key == "dens") or (key == "rad")):
258  continue
259  # Check shape of the data
260  if data[entry][key].ndim == 1:
261  entry_dict[entry][key] = np.zeros((len(mainout_time),buffsize))
262  elif data[entry][key].ndim == 2:
263  entry_dict[entry][key] = np.zeros((len(mainout_time),data[entry][key].shape[1],buffsize))
264  else:
265  raise ValueError(f"Invalid shape of {entry}/{key} in the data.")
266 
267  # Write the time already
268  f_hdf[entry+"/time"] = mainout_time
269 
270  # Say something
271  logger.info(f"Found {entry} in the data.")
272 
273  # Check if it is the tracked nuclei and put A, Z, and N
274  # if entry == "tracked_nuclei":
275  # f_hdf[entry+"/A"] = data[entry]["A"]
276  # f_hdf[entry+"/Z"] = data[entry]["Z"]
277  # f_hdf[entry+"/N"] = data[entry]["N"]
278  # f_hdf[entry+"/names"] = data[entry]["names"]
279 
280 
281  # Take care of snapshots, check if they are custom or not
282  if (data.check_existence("snapshot") != 0) and (not options.disable_snapshots):
283  if ("custom_snapshots" in t.entries) or ("h_custom_snapshots" in t.entries):
284  # Check if either ascii or hdf5 custom snapshots are given
285  summarize_snapshots = False
286  if ("custom_snapshots" in t.entries):
287  summarize_snapshots = (t["custom_snapshots"].strip().lower() == "yes")
288  # Debug statement
289  if (t["custom_snapshots"].strip().lower() == "yes"):
290  logger.info("Found custom snapshots in ascii format.")
291  if ("h_custom_snapshots" in t.entries):
292  summarize_snapshots = (summarize_snapshots or (t["h_custom_snapshots"].strip().lower() == "yes"))
293  # Debug statement
294  if (t["h_custom_snapshots"].strip().lower() == "yes"):
295  logger.info("Found custom snapshots in hdf5 format.")
296 
297  # Read the time for the custom snapshots
298  if summarize_snapshots:
299  if "snapshot_file" not in t.entries:
300  raise ValueError("Invalid template file. snapshot_file not given in the template file.")
301  snapshot_time = np.loadtxt(os.path.join(t["snapshot_file"]),dtype=float)
302  # Convert from days to seconds
303  snapshot_time *= 24*3600
304  # Write the time already
305  f_hdf["snapshots/time"] = snapshot_time
306  # Write the A and Z data to the hdf5 file
307  f_hdf["snapshots/A"] = nuclei_data.A
308  f_hdf["snapshots/Z"] = nuclei_data.Z
309  f_hdf["snapshots/N"] = nuclei_data.N
310  # Create an array to buffer the data
311  snapshot_data = np.zeros((len(nuclei),len(snapshot_time),buffsize))
312  logger.info(f"Summarize custom snapshots as well.")
313  else:
314  summarize_snapshots = False
315  else:
316  summarize_snapshots = False
317  if not options.disable_snapshots:
318  logger.info("Ignoring snapshots output.")
319 
320 
321  # Finab should always be in
322  finab_data_Y = np.zeros((len(nuclei),buffsize))
323  finab_data_X = np.zeros((len(nuclei),buffsize))
324  # Write already the A and Z data to the hdf5 file, this is the same for
325  # all runs
326  f_hdf["finab/A"] = nuclei_data.A
327  f_hdf["finab/Z"] = nuclei_data.Z
328  f_hdf["finab/N"] = nuclei_data.N
329 
330  # Prepare for efficient mapping
331  dtype_nuclei = np.dtype([('A', int), ('Z', int)])
332  nuclei_struct = np.array(list(zip(nuclei_data.A.astype(int), nuclei_data.Z.astype(int))), dtype=dtype_nuclei)
333  # Sort the structured nuclei array and get sorting indices
334  nuclei_sorted_idx = np.argsort(nuclei_struct)
335  sorted_nuclei_struct = nuclei_struct[nuclei_sorted_idx]
336 
337  # Create array that will contain the names of the runs
338  # Array of strings:
339  run_names = np.zeros(buffsize,dtype="S100")
340  run_ids = np.zeros(buffsize,dtype=int)
341 
342  # Loop over all directories
343  ind = -1
344  for counter, d in enumerate(tqdm(dirs)):
345  # Load data
346  data = wreader(os.path.join(run_path, d),silent=True)
347 
348  if data.is_crashed:
349  logger.warning(f"Run {d} is crashed. Skipping it.")
350  continue
351 
352  # Increase the index
353  ind += 1
354 
355  #### Finab ####
356  ###############
357  # Put the data in the finab_data, Notice that it should be at the right A and Z position
358  # A is contained in data.finab["A"] and Z in data.finab["Z"]. It should fit to the nuclei_data.A and nuclei_data.Z
359  # All of them are 1D arrays
360  # Getting indices where match occurs
361  # indices = [(np.where((nuclei_data.A.astype(int) == A) & (nuclei_data.Z.astype(int) == Z))[0][0])
362  # for A, Z in zip(data.finab["A"].astype(int), data.finab["Z"].astype(int))]
363 
364  # Check if the finab_data is full and write it to the hdf5 file
365  if ind % buffsize == 0 and ind != 0:
366  # Check if the dataset is already created and if not create it
367  if "finab/Y" not in f_hdf:
368  f_hdf.create_dataset("finab/Y", (len(nuclei),ind+1), maxshape=(len(nuclei),None))
369  f_hdf.create_dataset("finab/X", (len(nuclei),ind+1), maxshape=(len(nuclei),None))
370  # If necessary extend the dataset
371  if ind > buffsize:
372  f_hdf["finab/Y"].resize((len(nuclei),ind+1))
373  f_hdf["finab/X"].resize((len(nuclei),ind+1))
374  # Write the data to the hdf5 file
375  f_hdf["finab/Y"][:,ind-buffsize:ind] = finab_data_Y
376  f_hdf["finab/X"][:,ind-buffsize:ind] = finab_data_X
377 
378  # Convert the finab data to structured array
379  finab_struct = np.array(list(zip(data.finab["A"].astype(int), data.finab["Z"].astype(int))), dtype=dtype_nuclei)
380  # Find the matching indices
381  # Use np.searchsorted to find matching indices
382  matching_idx = np.searchsorted(sorted_nuclei_struct, finab_struct)
383  # Recover the original indices from the sorted index
384  indices = nuclei_sorted_idx[matching_idx]
385 
386  # Set first zero
387  finab_data_Y[:,ind % buffsize] = 0
388  finab_data_X[:,ind % buffsize] = 0
389  finab_data_Y[indices,ind % buffsize] = data.finab["Y"][:]
390  finab_data_X[indices,ind % buffsize] = data.finab["X"][:]
391 
392  #### Run name ####
393  ##################
394 
395  # Check if the run_names is full and write it to the hdf5 file
396  if ind % buffsize == 0 and ind != 0:
397  # Check if the dataset is already created and if not create it
398  if "run_names" not in f_hdf:
399  f_hdf.create_dataset("run_names", (ind+1,), maxshape=(None,),dtype="S100")
400  # If necessary extend the dataset
401  if ind > buffsize:
402  f_hdf["run_names"].resize((ind+1,))
403  # Write the data to the hdf5 file
404  f_hdf["run_names"][ind-buffsize:ind] = run_names
405 
406  # Save the run name
407  run_names[ind % buffsize] = d
408 
409 
410  # Get numbers out from the run name
411  # Check if the run_ids is full and write it to the hdf5 file
412  if ind % buffsize == 0 and ind != 0:
413  # Check if the dataset is already created and if not create it
414  if "run_ids" not in f_hdf:
415  f_hdf.create_dataset("run_ids", (ind+1,), maxshape=(None,),dtype=int)
416  # If necessary extend the dataset
417  if ind > buffsize:
418  f_hdf["run_ids"].resize((ind+1,))
419  # Write the data to the hdf5 file
420  f_hdf["run_ids"][ind-buffsize:ind] = run_ids
421 
422  try:
423  run_ids[ind % buffsize] = int(re.findall(r'\d+', d)[-1])
424  except:
425  run_ids[ind % buffsize] = -1
426 
427  #### Custom snapshots ####
428  ##########################
429 
430  if summarize_snapshots:
431  # Get the time of the snapshots
432  snapstime = data.snapshot_time
433 
434  # Now get the indexes of the entries that agree with snapshot_time
435  indexes = np.searchsorted(snapstime, snapshot_time)
436 
437  # In case snapstime is shorter than snapshot_time, we need to get a mask to set it nan
438  if (len(snapstime) < len(snapshot_time)):
439  mask = np.zeros(len(snapshot_time),dtype=bool)
440  # Check where the time deviates more than 1e-5
441  mask[np.min(np.abs(snapstime - snapshot_time[:,np.newaxis]),axis=1) > 1e-5] = True
442  else:
443  mask = None
444 
445 
446  # Convert the snapshot data to structured array
447  finab_struct = np.array(list(zip(data.A.astype(int), data.Z.astype(int))), dtype=dtype_nuclei)
448  # Find the matching indices
449  # Use np.searchsorted to find matching indices
450  matching_idx = np.searchsorted(sorted_nuclei_struct, finab_struct)
451  # Recover the original indices from the sorted index
452  indices_nuclei = nuclei_sorted_idx[matching_idx]
453 
454  # Check if the snapshot_data is full and write it to the hdf5 file
455  if ind % buffsize == 0 and ind != 0:
456  # Check if the dataset is already created and if not create it
457  if "snapshots/Y" not in f_hdf:
458  f_hdf.create_dataset("snapshots/Y", (len(nuclei),len(snapshot_time),ind+1), maxshape=(len(nuclei),len(snapshot_time),None))
459  f_hdf.create_dataset("snapshots/X", (len(nuclei),len(snapshot_time),ind+1), maxshape=(len(nuclei),len(snapshot_time),None))
460  # If necessary extend the dataset
461  if ind > buffsize:
462  f_hdf["snapshots/Y"].resize((len(nuclei),len(snapshot_time),ind+1))
463  f_hdf["snapshots/X"].resize((len(nuclei),len(snapshot_time),ind+1))
464  # Write the data to the hdf5 file
465  f_hdf["snapshots/Y"][:,:,ind-buffsize:ind] = snapshot_data
466  f_hdf["snapshots/X"][:,:,ind-buffsize:ind] = snapshot_data*nuclei_data.A[:,np.newaxis,np.newaxis]
467 
468  # Store it
469  snapshot_data[:,:,ind % buffsize] = 0
470  snapshot_data[indices_nuclei,:,ind % buffsize] = data.Y[indexes][:, :].T
471 
472  # Set entries to nan if necessary (e.g., if run does not contain a time at the beginning or end)
473  if mask is not None:
474  snapshot_data[:,mask,ind % buffsize] = np.nan
475 
476 
477  #### Other entries ####
478  #######################
479 
480  for entry in entry_dict.keys():
481  # Check if the data_dict is full and write it to the hdf5 file
482  if ind % buffsize == 0 and ind != 0:
483  for key in entry_dict[entry].keys():
484  # Check dimensions
485  dim = entry_dict[entry][key].ndim
486  # Check if the dataset is already created and if not create it
487  if dim == 2:
488  if entry+"/"+key not in f_hdf:
489  f_hdf.create_dataset(entry+"/"+key, (len(mainout_time),ind+1), maxshape=(len(mainout_time),None))
490  # If necessary extend the dataset
491  if ind > buffsize:
492  f_hdf[entry+"/"+key].resize((len(mainout_time),ind+1))
493  # Write the data to the hdf5 file
494  f_hdf[entry+"/"+key][:,ind-buffsize:ind] = entry_dict[entry][key]
495  elif dim == 3:
496  if entry+"/"+key not in f_hdf:
497  f_hdf.create_dataset(entry+"/"+key, (len(mainout_time),data[entry][key].shape[1],ind+1), maxshape=(len(mainout_time),data[entry][key].shape[1],None))
498  # If necessary extend the dataset
499  if ind > buffsize:
500  f_hdf[entry+"/"+key].resize((len(mainout_time),data[entry][key].shape[1],ind+1))
501  # Write the data to the hdf5 file
502  f_hdf[entry+"/"+key][:,:,ind-buffsize:ind] = entry_dict[entry][key]
503 
504  # Put the data in the data_dict
505  for key in entry_dict[entry].keys():
506  value = interp1d(data[entry]["time"],data[entry][key],
507  bounds_error = False, fill_value = np.nan, axis = 0)(mainout_time)
508  if entry_dict[entry][key].ndim == 2:
509  entry_dict[entry][key][:,ind % buffsize] = value
510  elif entry_dict[entry][key].ndim == 3:
511  entry_dict[entry][key][:,:,ind % buffsize] = value
512 
513 
514 
515  # Say something
516  logger.info(f"Finished looping over all directories, writing the last data to the hdf5 file.")
517 
518 
519  # # Write the last data to the hdf5 file
520 
521  #### Finab ####
522  ###############
523 
524  if "finab/Y" not in f_hdf:
525  f_hdf.create_dataset("finab/Y", (len(nuclei),ind+1), maxshape=(len(nuclei),None))
526  f_hdf.create_dataset("finab/X", (len(nuclei),ind+1), maxshape=(len(nuclei),None))
527  else:
528  f_hdf["finab/Y"].resize((len(nuclei),ind+1))
529  f_hdf["finab/X"].resize((len(nuclei),ind+1))
530  # Write the missing entries
531  if ind>buffsize:
532  f_hdf["finab/Y"][:,ind-buffsize+1:ind+1] = finab_data_Y[:,:buffsize]
533  f_hdf["finab/X"][:,ind-buffsize+1:ind+1] = finab_data_X[:,:buffsize]
534  else:
535  f_hdf["finab/Y"][:,:ind+1] = finab_data_Y[:,:ind+1]
536  f_hdf["finab/X"][:,:ind+1] = finab_data_X[:,:ind+1]
537 
538 
539  #### Run name ####
540  ##################
541 
542  if "run_names" not in f_hdf:
543  f_hdf.create_dataset("run_names", (ind+1,), maxshape=(None,),dtype="S100")
544  else:
545  f_hdf["run_names"].resize((ind+1,))
546  # Write the missing entries
547  if ind>buffsize:
548  f_hdf["run_names"][ind-buffsize+1:ind+1] = run_names[:buffsize]
549  else:
550  f_hdf["run_names"][:ind+1] = run_names[:ind+1]
551 
552 
553  if "run_ids" not in f_hdf:
554  f_hdf.create_dataset("run_ids", (ind+1,), maxshape=(None,),dtype=int)
555  else:
556  f_hdf["run_ids"].resize((ind+1,))
557  # Write the missing entries
558  if ind>buffsize:
559  f_hdf["run_ids"][ind-buffsize+1:ind+1] = run_ids[:buffsize]
560  else:
561  f_hdf["run_ids"][:ind+1] = run_ids[:ind+1]
562 
563 
564  #### Custom snapshots ####
565  ##########################
566 
567  if summarize_snapshots:
568  if "snapshots/Y" not in f_hdf:
569  f_hdf.create_dataset("snapshots/Y", (len(nuclei),len(snapshot_time),ind+1), maxshape=(len(nuclei),len(snapshot_time),None))
570  f_hdf.create_dataset("snapshots/X", (len(nuclei),len(snapshot_time),ind+1), maxshape=(len(nuclei),len(snapshot_time),None))
571  else:
572  f_hdf["snapshots/Y"].resize((len(nuclei),len(snapshot_time),ind+1))
573  f_hdf["snapshots/X"].resize((len(nuclei),len(snapshot_time),ind+1))
574  # Write the missing entries
575  if ind>buffsize:
576  f_hdf["snapshots/Y"][:,:,ind-buffsize+1:ind+1] = snapshot_data[:,:,:buffsize]
577  f_hdf["snapshots/X"][:,:,ind-buffsize+1:ind+1] = snapshot_data[:,:,:buffsize]*nuclei_data.A[:,np.newaxis,np.newaxis]
578  else:
579  f_hdf["snapshots/Y"][:,:,:ind+1] = snapshot_data[:,:,:ind+1]
580  f_hdf["snapshots/X"][:,:,:ind+1] = snapshot_data[:,:,:ind+1]*nuclei_data.A[:,np.newaxis,np.newaxis]
581 
582 
583  #### Other entries ####
584  #######################
585 
586  for entry in entry_dict.keys():
587  for key in entry_dict[entry].keys():
588  # Check dimensions
589  dim = entry_dict[entry][key].ndim
590 
591  if dim == 2:
592  if entry+"/"+key not in f_hdf:
593  f_hdf.create_dataset(entry+"/"+key, (len(mainout_time),ind+1), maxshape=(len(mainout_time),None))
594  else:
595  f_hdf[entry+"/"+key].resize((len(mainout_time),ind+1))
596  # Write the missing entries
597  if ind>buffsize:
598  f_hdf[entry+"/"+key][:,ind-buffsize+1:ind+1] = entry_dict[entry][key][:,:buffsize]
599  else:
600  f_hdf[entry+"/"+key][:,:ind+1] = entry_dict[entry][key][:,:ind+1]
601  elif dim == 3:
602  if entry+"/"+key not in f_hdf:
603  f_hdf.create_dataset(entry+"/"+key, (len(mainout_time),data[entry][key].shape[1],ind+1), maxshape=(len(mainout_time),data[entry][key].shape[1],None))
604  else:
605  f_hdf[entry+"/"+key].resize((len(mainout_time),data[entry][key].shape[1],ind+1))
606  # Write the missing entries
607  if ind>buffsize:
608  f_hdf[entry+"/"+key][:,:,ind-buffsize+1:ind+1] = entry_dict[entry][key][:,:,:buffsize]
609  else:
610  f_hdf[entry+"/"+key][:,:,:ind+1] = entry_dict[entry][key][:,:,:ind+1]
611 
612 
613 
614  # Say something
615  logger.info(f"Finished summarizing run at {run_path}.")
616 
617  # Close the hdf5 file
618  f_hdf.close()
619 
620  if not options.recursive:
621  looping = False
622 
src_files.wreader
Definition: wreader.py:1
src_files.template_class
Definition: template_class.py:1
src_files.template_class.template
Definition: template_class.py:6
inter_module::interp1d
subroutine, public interp1d(n, xp, xb, yp, res, flin, itype)
Interface for 1D interpolation routines.
Definition: inter_module.f90:100
src_files.nucleus_multiple_class.nucleus_multiple
Definition: nucleus_multiple_class.py:10
src_files.nucleus_multiple_class
Definition: nucleus_multiple_class.py:1
src_files.wreader.wreader
Definition: wreader.py:12