1import os 2from collections import defaultdict 3 4import numpy as np 5 6from yt.frontends.chombo.io import parse_orion_sinks 7from yt.funcs import mylog 8from yt.geometry.selection_routines import GridSelector 9from yt.utilities.io_handler import BaseIOHandler 10 11 12def _remove_raw(all_fields, raw_fields): 13 centered_fields = set(all_fields) 14 for raw in raw_fields: 15 centered_fields.discard(raw) 16 return list(centered_fields) 17 18 19class IOHandlerBoxlib(BaseIOHandler): 20 21 _dataset_type = "boxlib_native" 22 23 def __init__(self, ds, *args, **kwargs): 24 super().__init__(ds) 25 26 def _read_fluid_selection(self, chunks, selector, fields, size): 27 chunks = list(chunks) 28 if any((not (ftype == "boxlib" or ftype == "raw") for ftype, fname in fields)): 29 raise NotImplementedError 30 rv = {} 31 raw_fields = [] 32 for field in fields: 33 if field[0] == "raw": 34 nodal_flag = self.ds.nodal_flags[field[1]] 35 num_nodes = 2 ** sum(nodal_flag) 36 rv[field] = np.empty((size, num_nodes), dtype="float64") 37 raw_fields.append(field) 38 else: 39 rv[field] = np.empty(size, dtype="float64") 40 centered_fields = _remove_raw(fields, raw_fields) 41 ng = sum(len(c.objs) for c in chunks) 42 mylog.debug( 43 "Reading %s cells of %s fields in %s grids", 44 size, 45 [f2 for f1, f2 in fields], 46 ng, 47 ) 48 ind = 0 49 for chunk in chunks: 50 data = self._read_chunk_data(chunk, centered_fields) 51 for g in chunk.objs: 52 for field in fields: 53 if field in centered_fields: 54 ds = data[g.id].pop(field) 55 else: 56 ds = self._read_raw_field(g, field) 57 nd = g.select(selector, ds, rv[field], ind) 58 ind += nd 59 data.pop(g.id) 60 return rv 61 62 def _read_raw_field(self, grid, field): 63 field_name = field[1] 64 base_dir = self.ds.index.raw_file 65 66 nghost = self.ds.index.raw_field_nghost[field_name] 67 box_list = self.ds.index.raw_field_map[field_name][0] 68 fn_list = self.ds.index.raw_field_map[field_name][1] 69 offset_list = self.ds.index.raw_field_map[field_name][2] 70 71 lev = grid.Level 72 filename = base_dir + "Level_%d/" % lev + fn_list[grid.id] 73 offset = offset_list[grid.id] 74 box = box_list[grid.id] 75 76 lo = box[0] - nghost 77 hi = box[1] + nghost 78 shape = hi - lo + 1 79 with open(filename, "rb") as f: 80 f.seek(offset) 81 f.readline() # always skip the first line 82 arr = np.fromfile(f, "float64", np.product(shape)) 83 arr = arr.reshape(shape, order="F") 84 return arr[ 85 tuple( 86 slice(None) if (nghost[dim] == 0) else slice(nghost[dim], -nghost[dim]) 87 for dim in range(self.ds.dimensionality) 88 ) 89 ] 90 91 def _read_chunk_data(self, chunk, fields): 92 data = {} 93 grids_by_file = defaultdict(list) 94 if len(chunk.objs) == 0: 95 return data 96 for g in chunk.objs: 97 if g.filename is None: 98 continue 99 grids_by_file[g.filename].append(g) 100 dtype = self.ds.index._dtype 101 bpr = dtype.itemsize 102 for filename in grids_by_file: 103 grids = grids_by_file[filename] 104 grids.sort(key=lambda a: a._offset) 105 f = open(filename, "rb") 106 for grid in grids: 107 data[grid.id] = {} 108 local_offset = grid._get_offset(f) - f.tell() 109 count = grid.ActiveDimensions.prod() 110 size = count * bpr 111 for field in self.ds.index.field_order: 112 if field in fields: 113 # We read it ... 114 f.seek(local_offset, os.SEEK_CUR) 115 v = np.fromfile(f, dtype=dtype, count=count) 116 v = v.reshape(grid.ActiveDimensions, order="F") 117 data[grid.id][field] = v 118 local_offset = 0 119 else: 120 local_offset += size 121 return data 122 123 def _read_particle_coords(self, chunks, ptf): 124 yield from self._read_particle_fields(chunks, ptf, None) 125 126 def _read_particle_fields(self, chunks, ptf, selector): 127 for chunk in chunks: # These should be organized by grid filename 128 for g in chunk.objs: 129 for ptype, field_list in sorted(ptf.items()): 130 npart = g._pdata[ptype]["NumberOfParticles"] 131 if npart == 0: 132 continue 133 134 fn = g._pdata[ptype]["particle_filename"] 135 offset = g._pdata[ptype]["offset"] 136 pheader = self.ds.index.particle_headers[ptype] 137 138 with open(fn, "rb") as f: 139 # read in the position fields for selection 140 f.seek(offset + pheader.particle_int_dtype.itemsize * npart) 141 rdata = np.fromfile( 142 f, pheader.real_type, pheader.num_real * npart 143 ) 144 x = np.asarray(rdata[0 :: pheader.num_real], dtype=np.float64) 145 y = np.asarray(rdata[1 :: pheader.num_real], dtype=np.float64) 146 if g.ds.dimensionality == 2: 147 z = np.ones_like(y) 148 z *= 0.5 * (g.LeftEdge[2] + g.RightEdge[2]) 149 else: 150 z = np.asarray( 151 rdata[2 :: pheader.num_real], dtype=np.float64 152 ) 153 154 if selector is None: 155 # This only ever happens if the call is made from 156 # _read_particle_coords. 157 yield ptype, (x, y, z) 158 continue 159 mask = selector.select_points(x, y, z, 0.0) 160 if mask is None: 161 continue 162 for field in field_list: 163 # handle the case that this is an integer field 164 int_fnames = [ 165 fname for _, fname in pheader.known_int_fields 166 ] 167 if field in int_fnames: 168 ind = int_fnames.index(field) 169 f.seek(offset) 170 idata = np.fromfile( 171 f, pheader.int_type, pheader.num_int * npart 172 ) 173 data = np.asarray( 174 idata[ind :: pheader.num_int], dtype=np.float64 175 ) 176 yield (ptype, field), data[mask].flatten() 177 178 # handle case that this is a real field 179 real_fnames = [ 180 fname for _, fname in pheader.known_real_fields 181 ] 182 if field in real_fnames: 183 ind = real_fnames.index(field) 184 data = np.asarray( 185 rdata[ind :: pheader.num_real], dtype=np.float64 186 ) 187 yield (ptype, field), data[mask].flatten() 188 189 190class IOHandlerOrion(IOHandlerBoxlib): 191 _dataset_type = "orion_native" 192 193 _particle_filename = None 194 195 @property 196 def particle_filename(self): 197 fn = self.ds.output_dir + "/StarParticles" 198 if not os.path.exists(fn): 199 fn = self.ds.output_dir + "/SinkParticles" 200 self._particle_filename = fn 201 return self._particle_filename 202 203 _particle_field_index = None 204 205 @property 206 def particle_field_index(self): 207 208 index = parse_orion_sinks(self.particle_filename) 209 210 self._particle_field_index = index 211 return self._particle_field_index 212 213 def _read_particle_selection(self, chunks, selector, fields): 214 rv = {} 215 chunks = list(chunks) 216 217 if isinstance(selector, GridSelector): 218 219 if not (len(chunks) == len(chunks[0].objs) == 1): 220 raise RuntimeError 221 222 grid = chunks[0].objs[0] 223 224 for ftype, fname in fields: 225 rv[ftype, fname] = self._read_particles(grid, fname) 226 227 return rv 228 229 rv = {f: np.array([]) for f in fields} 230 for chunk in chunks: 231 for grid in chunk.objs: 232 for ftype, fname in fields: 233 data = self._read_particles(grid, fname) 234 rv[ftype, fname] = np.concatenate((data, rv[ftype, fname])) 235 return rv 236 237 def _read_particles(self, grid, field): 238 """ 239 parses the Orion Star Particle text files 240 241 """ 242 243 particles = [] 244 245 if grid.NumberOfParticles == 0: 246 return np.array(particles) 247 248 def read(line, field): 249 entry = line.strip().split(" ")[self.particle_field_index[field]] 250 return float(entry) 251 252 try: 253 lines = self._cached_lines 254 for num in grid._particle_line_numbers: 255 line = lines[num] 256 particles.append(read(line, field)) 257 return np.array(particles) 258 except AttributeError: 259 fn = self.particle_filename 260 with open(fn) as f: 261 lines = f.readlines() 262 self._cached_lines = lines 263 for num in grid._particle_line_numbers: 264 line = lines[num] 265 particles.append(read(line, field)) 266 return np.array(particles) 267