1import os
2import weakref
3
4import numpy as np
5
6from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch
7from yt.data_objects.static_output import Dataset
8from yt.funcs import mylog, sglob
9from yt.geometry.geometry_handler import YTDataChunk
10from yt.geometry.grid_geometry_handler import GridIndex
11from yt.utilities.chemical_formulas import compute_mu
12from yt.utilities.decompose import decompose_array, get_psize
13from yt.utilities.lib.misc_utilities import get_box_grids_level
14
15from .fields import AthenaFieldInfo
16
17
18def chk23(strin):
19    return strin.encode("utf-8")
20
21
22def str23(strin):
23    if isinstance(strin, list):
24        return [s.decode("utf-8") for s in strin]
25    else:
26        return strin.decode("utf-8")
27
28
29def check_readline(fl):
30    line = fl.readline()
31    chk = chk23("SCALARS")
32    if chk in line and not line.startswith(chk):
33        line = line[line.find(chk) :]
34    chk = chk23("VECTORS")
35    if chk in line and not line.startswith(chk):
36        line = line[line.find(chk) :]
37    return line
38
39
40def check_break(line):
41    splitup = line.strip().split()
42    do_break = chk23("SCALAR") in splitup
43    do_break = (chk23("VECTOR") in splitup) & do_break
44    do_break = (chk23("TABLE") in splitup) & do_break
45    do_break = (len(line) == 0) & do_break
46    return do_break
47
48
49def _get_convert(fname):
50    def _conv(data):
51        return data.convert(fname)
52
53    return _conv
54
55
56class AthenaGrid(AMRGridPatch):
57    _id_offset = 0
58
59    def __init__(self, id, index, level, start, dimensions, file_offset, read_dims):
60        gname = index.grid_filenames[id]
61        AMRGridPatch.__init__(self, id, filename=gname, index=index)
62        self.filename = gname
63        self.Parent = []
64        self.Children = []
65        self.Level = level
66        self.start_index = start.copy()
67        self.stop_index = self.start_index + dimensions
68        self.ActiveDimensions = dimensions.copy()
69        self.file_offset = file_offset
70        self.read_dims = read_dims
71
72    def _setup_dx(self):
73        # So first we figure out what the index is.  We don't assume
74        # that dx=dy=dz , at least here.  We probably do elsewhere.
75        id = self.id - self._id_offset
76        if len(self.Parent) > 0:
77            self.dds = self.Parent[0].dds / self.ds.refine_by
78        else:
79            LE, RE = self.index.grid_left_edge[id, :], self.index.grid_right_edge[id, :]
80            self.dds = self.ds.arr((RE - LE) / self.ActiveDimensions, "code_length")
81        if self.ds.dimensionality < 2:
82            self.dds[1] = 1.0
83        if self.ds.dimensionality < 3:
84            self.dds[2] = 1.0
85        self.field_data["dx"], self.field_data["dy"], self.field_data["dz"] = self.dds
86
87    def __repr__(self):
88        return "AthenaGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
89
90
91def parse_line(line, grid):
92    # grid is a dictionary
93    splitup = line.strip().split()
94    if chk23("vtk") in splitup:
95        grid["vtk_version"] = str23(splitup[-1])
96    elif chk23("time=") in splitup:
97        time_index = splitup.index(chk23("time="))
98        grid["time"] = float(str23(splitup[time_index + 1]).rstrip(","))
99        grid["level"] = int(str23(splitup[time_index + 3]).rstrip(","))
100        grid["domain"] = int(str23(splitup[time_index + 5]).rstrip(","))
101    elif chk23("DIMENSIONS") in splitup:
102        grid["dimensions"] = np.array(str23(splitup[-3:])).astype("int")
103    elif chk23("ORIGIN") in splitup:
104        grid["left_edge"] = np.array(str23(splitup[-3:])).astype("float64")
105    elif chk23("SPACING") in splitup:
106        grid["dds"] = np.array(str23(splitup[-3:])).astype("float64")
107    elif chk23("CELL_DATA") in splitup or chk23("POINT_DATA") in splitup:
108        grid["ncells"] = int(str23(splitup[-1]))
109    elif chk23("SCALARS") in splitup:
110        field = str23(splitup[1])
111        grid["read_field"] = field
112        grid["read_type"] = "scalar"
113    elif chk23("VECTORS") in splitup:
114        field = str23(splitup[1])
115        grid["read_field"] = field
116        grid["read_type"] = "vector"
117    elif chk23("time") in splitup:
118        time_index = splitup.index(chk23("time"))
119        grid["time"] = float(str23(splitup[time_index + 1]))
120
121
122class AthenaHierarchy(GridIndex):
123
124    grid = AthenaGrid
125    _dataset_type = "athena"
126    _data_file = None
127
128    def __init__(self, ds, dataset_type="athena"):
129        self.dataset = weakref.proxy(ds)
130        self.directory = os.path.dirname(self.dataset.filename)
131        self.dataset_type = dataset_type
132        # for now, the index file is the dataset!
133        self.index_filename = os.path.join(os.getcwd(), self.dataset.filename)
134        self._fhandle = open(self.index_filename, "rb")
135        GridIndex.__init__(self, ds, dataset_type)
136
137        self._fhandle.close()
138
139    def _detect_output_fields(self):
140        field_map = {}
141        f = open(self.index_filename, "rb")
142        line = check_readline(f)
143        chkwhile = chk23("")
144        while line != chkwhile:
145            splitup = line.strip().split()
146            chkd = chk23("DIMENSIONS")
147            chkc = chk23("CELL_DATA")
148            chkp = chk23("POINT_DATA")
149            if chkd in splitup:
150                field = str23(splitup[-3:])
151                grid_dims = np.array(field).astype("int")
152                line = check_readline(f)
153            elif chkc in splitup or chkp in splitup:
154                grid_ncells = int(str23(splitup[-1]))
155                line = check_readline(f)
156                if np.prod(grid_dims) != grid_ncells:
157                    grid_dims -= 1
158                    grid_dims[grid_dims == 0] = 1
159                if np.prod(grid_dims) != grid_ncells:
160                    mylog.error(
161                        "product of dimensions %i not equal to number of cells %i",
162                        np.prod(grid_dims),
163                        grid_ncells,
164                    )
165                    raise TypeError
166                break
167            else:
168                line = check_readline(f)
169        read_table = False
170        read_table_offset = f.tell()
171        while line != chkwhile:
172            splitup = line.strip().split()
173            chks = chk23("SCALARS")
174            chkv = chk23("VECTORS")
175            if chks in line and chks not in splitup:
176                splitup = str23(line[line.find(chks) :].strip().split())
177            if chkv in line and chkv not in splitup:
178                splitup = str23(line[line.find(chkv) :].strip().split())
179            if chks in splitup:
180                field = ("athena", str23(splitup[1]))
181                dtype = str23(splitup[-1]).lower()
182                if not read_table:
183                    line = check_readline(f)  # Read the lookup table line
184                    read_table = True
185                field_map[field] = ("scalar", f.tell() - read_table_offset, dtype)
186                read_table = False
187            elif chkv in splitup:
188                field = str23(splitup[1])
189                dtype = str23(splitup[-1]).lower()
190                for ax in "xyz":
191                    field_map[("athena", f"{field}_{ax}")] = (
192                        "vector",
193                        f.tell() - read_table_offset,
194                        dtype,
195                    )
196            line = check_readline(f)
197
198        f.close()
199
200        self.field_list = list(field_map.keys())
201        self._field_map = field_map
202
203    def _count_grids(self):
204        self.num_grids = self.dataset.nvtk * self.dataset.nprocs
205
206    def _parse_index(self):
207        f = open(self.index_filename, "rb")
208        grid = {}
209        grid["read_field"] = None
210        grid["read_type"] = None
211        line = f.readline()
212        while grid["read_field"] is None:
213            parse_line(line, grid)
214            if check_break(line):
215                break
216            line = f.readline()
217        f.close()
218
219        # It seems some datasets have a mismatch between ncells and
220        # the actual grid dimensions.
221        if np.prod(grid["dimensions"]) != grid["ncells"]:
222            grid["dimensions"] -= 1
223            grid["dimensions"][grid["dimensions"] == 0] = 1
224        if np.prod(grid["dimensions"]) != grid["ncells"]:
225            mylog.error(
226                "product of dimensions %i not equal to number of cells %i",
227                np.prod(grid["dimensions"]),
228                grid["ncells"],
229            )
230            raise TypeError
231
232        # Need to determine how many grids: self.num_grids
233        dataset_dir = os.path.dirname(self.index_filename)
234        dname = os.path.split(self.index_filename)[-1]
235        if dataset_dir.endswith("id0"):
236            dname = "id0/" + dname
237            dataset_dir = dataset_dir[:-3]
238
239        gridlistread = sglob(
240            os.path.join(dataset_dir, f"id*/{dname[4:-9]}-id*{dname[-9:]}")
241        )
242        gridlistread.insert(0, self.index_filename)
243        if "id0" in dname:
244            gridlistread += sglob(
245                os.path.join(dataset_dir, f"id*/lev*/{dname[4:-9]}*-lev*{dname[-9:]}")
246            )
247        else:
248            gridlistread += sglob(
249                os.path.join(dataset_dir, f"lev*/{dname[:-9]}*-lev*{dname[-9:]}")
250            )
251        ndots = dname.count(".")
252        gridlistread = [
253            fn for fn in gridlistread if os.path.basename(fn).count(".") == ndots
254        ]
255        self.num_grids = len(gridlistread)
256        dxs = []
257        levels = np.zeros(self.num_grids, dtype="int32")
258        glis = np.empty((self.num_grids, 3), dtype="float64")
259        gdds = np.empty((self.num_grids, 3), dtype="float64")
260        gdims = np.ones_like(glis)
261        j = 0
262        self.grid_filenames = gridlistread
263        while j < (self.num_grids):
264            f = open(gridlistread[j], "rb")
265            gridread = {}
266            gridread["read_field"] = None
267            gridread["read_type"] = None
268            line = f.readline()
269            while gridread["read_field"] is None:
270                parse_line(line, gridread)
271                splitup = line.strip().split()
272                if chk23("X_COORDINATES") in splitup:
273                    gridread["left_edge"] = np.zeros(3)
274                    gridread["dds"] = np.zeros(3)
275                    v = np.fromfile(f, dtype=">f8", count=2)
276                    gridread["left_edge"][0] = v[0] - 0.5 * (v[1] - v[0])
277                    gridread["dds"][0] = v[1] - v[0]
278                if chk23("Y_COORDINATES") in splitup:
279                    v = np.fromfile(f, dtype=">f8", count=2)
280                    gridread["left_edge"][1] = v[0] - 0.5 * (v[1] - v[0])
281                    gridread["dds"][1] = v[1] - v[0]
282                if chk23("Z_COORDINATES") in splitup:
283                    v = np.fromfile(f, dtype=">f8", count=2)
284                    gridread["left_edge"][2] = v[0] - 0.5 * (v[1] - v[0])
285                    gridread["dds"][2] = v[1] - v[0]
286                if check_break(line):
287                    break
288                line = f.readline()
289            f.close()
290            levels[j] = gridread.get("level", 0)
291            glis[j, 0] = gridread["left_edge"][0]
292            glis[j, 1] = gridread["left_edge"][1]
293            glis[j, 2] = gridread["left_edge"][2]
294            # It seems some datasets have a mismatch between ncells and
295            # the actual grid dimensions.
296            if np.prod(gridread["dimensions"]) != gridread["ncells"]:
297                gridread["dimensions"] -= 1
298                gridread["dimensions"][gridread["dimensions"] == 0] = 1
299            if np.prod(gridread["dimensions"]) != gridread["ncells"]:
300                mylog.error(
301                    "product of dimensions %i not equal to number of cells %i",
302                    np.prod(gridread["dimensions"]),
303                    gridread["ncells"],
304                )
305                raise TypeError
306            gdims[j, 0] = gridread["dimensions"][0]
307            gdims[j, 1] = gridread["dimensions"][1]
308            gdims[j, 2] = gridread["dimensions"][2]
309            # Setting dds=1 for non-active dimensions in 1D/2D datasets
310            gridread["dds"][gridread["dimensions"] == 1] = 1.0
311            gdds[j, :] = gridread["dds"]
312
313            j = j + 1
314
315        gres = glis + gdims * gdds
316        # Now we convert the glis, which were left edges (floats), to indices
317        # from the domain left edge.  Then we do a bunch of fixing now that we
318        # know the extent of all the grids.
319        glis = np.round(
320            (glis - self.dataset.domain_left_edge.ndarray_view()) / gdds
321        ).astype("int")
322        new_dre = np.max(gres, axis=0)
323        dre_units = self.dataset.domain_right_edge.uq
324        self.dataset.domain_right_edge = np.round(new_dre, decimals=12) * dre_units
325        self.dataset.domain_width = (
326            self.dataset.domain_right_edge - self.dataset.domain_left_edge
327        )
328        self.dataset.domain_center = 0.5 * (
329            self.dataset.domain_left_edge + self.dataset.domain_right_edge
330        )
331        self.dataset.domain_dimensions = np.round(
332            self.dataset.domain_width / gdds[0]
333        ).astype("int")
334
335        if self.dataset.dimensionality <= 2:
336            self.dataset.domain_dimensions[2] = np.int(1)
337        if self.dataset.dimensionality == 1:
338            self.dataset.domain_dimensions[1] = np.int(1)
339
340        dle = self.dataset.domain_left_edge
341        dre = self.dataset.domain_right_edge
342        dx_root = (
343            self.dataset.domain_right_edge - self.dataset.domain_left_edge
344        ) / self.dataset.domain_dimensions
345
346        if self.dataset.nprocs > 1:
347            gle_all = []
348            gre_all = []
349            shapes_all = []
350            levels_all = []
351            new_gridfilenames = []
352            file_offsets = []
353            read_dims = []
354            for i in range(levels.shape[0]):
355                dx = dx_root / self.dataset.refine_by ** (levels[i])
356                gle_orig = self.ds.arr(
357                    np.round(dle + dx * glis[i], decimals=12), "code_length"
358                )
359                gre_orig = self.ds.arr(
360                    np.round(gle_orig + dx * gdims[i], decimals=12), "code_length"
361                )
362                bbox = np.array([[le, re] for le, re in zip(gle_orig, gre_orig)])
363                psize = get_psize(self.ds.domain_dimensions, self.ds.nprocs)
364                gle, gre, shapes, slices = decompose_array(gdims[i], psize, bbox)
365                gle_all += gle
366                gre_all += gre
367                shapes_all += shapes
368                levels_all += [levels[i]] * self.dataset.nprocs
369                new_gridfilenames += [self.grid_filenames[i]] * self.dataset.nprocs
370                file_offsets += [
371                    [slc[0].start, slc[1].start, slc[2].start] for slc in slices
372                ]
373                read_dims += [
374                    np.array([gdims[i][0], gdims[i][1], shape[2]], dtype="int64")
375                    for shape in shapes
376                ]
377            self.num_grids *= self.dataset.nprocs
378            self.grids = np.empty(self.num_grids, dtype="object")
379            self.grid_filenames = new_gridfilenames
380            self.grid_left_edge = self.ds.arr(gle_all, "code_length")
381            self.grid_right_edge = self.ds.arr(gre_all, "code_length")
382            self.grid_dimensions = np.array(
383                [shape for shape in shapes_all], dtype="int32"
384            )
385            gdds = (self.grid_right_edge - self.grid_left_edge) / self.grid_dimensions
386            glis = np.round(
387                (self.grid_left_edge - self.ds.domain_left_edge) / gdds
388            ).astype("int")
389            for i in range(self.num_grids):
390                self.grids[i] = self.grid(
391                    i,
392                    self,
393                    levels_all[i],
394                    glis[i],
395                    shapes_all[i],
396                    file_offsets[i],
397                    read_dims[i],
398                )
399        else:
400            self.grids = np.empty(self.num_grids, dtype="object")
401            for i in range(levels.shape[0]):
402                self.grids[i] = self.grid(
403                    i, self, levels[i], glis[i], gdims[i], [0] * 3, gdims[i]
404                )
405                dx = dx_root / self.dataset.refine_by ** (levels[i])
406                dxs.append(dx)
407
408            dx = self.ds.arr(dxs, "code_length")
409            self.grid_left_edge = self.ds.arr(
410                np.round(dle + dx * glis, decimals=12), "code_length"
411            )
412            self.grid_dimensions = gdims.astype("int32")
413            self.grid_right_edge = self.ds.arr(
414                np.round(self.grid_left_edge + dx * self.grid_dimensions, decimals=12),
415                "code_length",
416            )
417        if self.dataset.dimensionality <= 2:
418            self.grid_right_edge[:, 2] = dre[2]
419        if self.dataset.dimensionality == 1:
420            self.grid_right_edge[:, 1:] = dre[1:]
421        self.grid_particle_count = np.zeros([self.num_grids, 1], dtype="int64")
422
423    def _populate_grid_objects(self):
424        for g in self.grids:
425            g._prepare_grid()
426            g._setup_dx()
427        self._reconstruct_parent_child()
428        self.max_level = self.grid_levels.max()
429
430    def _reconstruct_parent_child(self):
431        mask = np.empty(len(self.grids), dtype="int32")
432        mylog.debug("First pass; identifying child grids")
433        for i, grid in enumerate(self.grids):
434            get_box_grids_level(
435                self.grid_left_edge[i, :],
436                self.grid_right_edge[i, :],
437                self.grid_levels[i] + 1,
438                self.grid_left_edge,
439                self.grid_right_edge,
440                self.grid_levels,
441                mask,
442            )
443            grid.Children = [
444                g for g in self.grids[mask.astype("bool")] if g.Level == grid.Level + 1
445            ]
446        mylog.debug("Second pass; identifying parents")
447        for grid in self.grids:  # Second pass
448            for child in grid.Children:
449                child.Parent.append(grid)
450
451    def _get_grid_children(self, grid):
452        mask = np.zeros(self.num_grids, dtype="bool")
453        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
454        mask[grid_ind] = True
455        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
456
457    def _chunk_io(self, dobj, cache=True, local_only=False):
458        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
459        for subset in gobjs:
460            yield YTDataChunk(
461                dobj, "io", [subset], self._count_selection(dobj, [subset]), cache=cache
462            )
463
464
465class AthenaDataset(Dataset):
466    _index_class = AthenaHierarchy
467    _field_info_class = AthenaFieldInfo
468    _dataset_type = "athena"
469
470    def __init__(
471        self,
472        filename,
473        dataset_type="athena",
474        storage_filename=None,
475        parameters=None,
476        units_override=None,
477        nprocs=1,
478        unit_system="cgs",
479        default_species_fields=None,
480    ):
481        self.fluid_types += ("athena",)
482        self.nprocs = nprocs
483        if parameters is None:
484            parameters = {}
485        self.specified_parameters = parameters.copy()
486        if units_override is None:
487            units_override = {}
488        Dataset.__init__(
489            self,
490            filename,
491            dataset_type,
492            units_override=units_override,
493            unit_system=unit_system,
494            default_species_fields=default_species_fields,
495        )
496        self.filename = filename
497        if storage_filename is None:
498            storage_filename = f"{filename.split('/')[-1]}.yt"
499        self.storage_filename = storage_filename
500        self.backup_filename = self.filename[:-4] + "_backup.gdf"
501        # Unfortunately we now have to mandate that the index gets
502        # instantiated so that we can make sure we have the correct left
503        # and right domain edges.
504        self.index
505
506    def _set_code_unit_attributes(self):
507        """
508        Generates the conversion to various physical _units based on the
509        parameter file
510        """
511        if "length_unit" not in self.units_override:
512            self.no_cgs_equiv_length = True
513        for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
514            # We set these to cgs for now, but they may have been overridden
515            if getattr(self, unit + "_unit", None) is not None:
516                continue
517            mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
518            setattr(self, f"{unit}_unit", self.quan(1.0, cgs))
519        self.magnetic_unit = np.sqrt(
520            4 * np.pi * self.mass_unit / (self.time_unit ** 2 * self.length_unit)
521        )
522        self.magnetic_unit.convert_to_units("gauss")
523        self.velocity_unit = self.length_unit / self.time_unit
524
525    def _parse_parameter_file(self):
526        self._handle = open(self.parameter_filename, "rb")
527        # Read the start of a grid to get simulation parameters.
528        grid = {}
529        grid["read_field"] = None
530        line = self._handle.readline()
531        while grid["read_field"] is None:
532            parse_line(line, grid)
533            splitup = line.strip().split()
534            if chk23("X_COORDINATES") in splitup:
535                grid["left_edge"] = np.zeros(3)
536                grid["dds"] = np.zeros(3)
537                v = np.fromfile(self._handle, dtype=">f8", count=2)
538                grid["left_edge"][0] = v[0] - 0.5 * (v[1] - v[0])
539                grid["dds"][0] = v[1] - v[0]
540            if chk23("Y_COORDINATES") in splitup:
541                v = np.fromfile(self._handle, dtype=">f8", count=2)
542                grid["left_edge"][1] = v[0] - 0.5 * (v[1] - v[0])
543                grid["dds"][1] = v[1] - v[0]
544            if chk23("Z_COORDINATES") in splitup:
545                v = np.fromfile(self._handle, dtype=">f8", count=2)
546                grid["left_edge"][2] = v[0] - 0.5 * (v[1] - v[0])
547                grid["dds"][2] = v[1] - v[0]
548            if check_break(line):
549                break
550            line = self._handle.readline()
551
552        self.domain_left_edge = grid["left_edge"]
553        mylog.info(
554            "Temporarily setting domain_right_edge = -domain_left_edge. "
555            "This will be corrected automatically if it is not the case."
556        )
557        self.domain_right_edge = -self.domain_left_edge
558        self.domain_width = self.domain_right_edge - self.domain_left_edge
559        self.domain_dimensions = np.round(self.domain_width / grid["dds"]).astype(
560            "int32"
561        )
562        refine_by = None
563        if refine_by is None:
564            refine_by = 2
565        self.refine_by = refine_by
566        dimensionality = 3
567        if grid["dimensions"][2] == 1:
568            dimensionality = 2
569        if grid["dimensions"][1] == 1:
570            dimensionality = 1
571        if dimensionality <= 2:
572            self.domain_dimensions[2] = np.int32(1)
573        if dimensionality == 1:
574            self.domain_dimensions[1] = np.int32(1)
575        if dimensionality != 3 and self.nprocs > 1:
576            raise RuntimeError("Virtual grids are only supported for 3D outputs!")
577        self.dimensionality = dimensionality
578        self.current_time = grid["time"]
579        self.unique_identifier = self.parameter_filename.__hash__()
580        self.cosmological_simulation = False
581        self.num_ghost_zones = 0
582        self.field_ordering = "fortran"
583        self.boundary_conditions = [1] * 6
584        self._periodicity = tuple(
585            self.specified_parameters.get("periodicity", (True, True, True))
586        )
587        if "gamma" in self.specified_parameters:
588            self.gamma = float(self.specified_parameters["gamma"])
589        else:
590            self.gamma = 5.0 / 3.0
591        dataset_dir = os.path.dirname(self.parameter_filename)
592        dname = os.path.split(self.parameter_filename)[-1]
593        if dataset_dir.endswith("id0"):
594            dname = "id0/" + dname
595            dataset_dir = dataset_dir[:-3]
596
597        gridlistread = sglob(
598            os.path.join(dataset_dir, f"id*/{dname[4:-9]}-id*{dname[-9:]}")
599        )
600        if "id0" in dname:
601            gridlistread += sglob(
602                os.path.join(dataset_dir, f"id*/lev*/{dname[4:-9]}*-lev*{dname[-9:]}")
603            )
604        else:
605            gridlistread += sglob(
606                os.path.join(dataset_dir, f"lev*/{dname[:-9]}*-lev*{dname[-9:]}")
607            )
608        ndots = dname.count(".")
609        gridlistread = [
610            fn for fn in gridlistread if os.path.basename(fn).count(".") == ndots
611        ]
612        self.nvtk = len(gridlistread) + 1
613
614        self.current_redshift = 0.0
615        self.omega_lambda = 0.0
616        self.omega_matter = 0.0
617        self.hubble_constant = 0.0
618        self.cosmological_simulation = 0
619        self.parameters["Time"] = self.current_time  # Hardcode time conversion for now.
620        self.parameters[
621            "HydroMethod"
622        ] = 0  # Hardcode for now until field staggering is supported.
623        if "gamma" in self.specified_parameters:
624            self.parameters["Gamma"] = self.specified_parameters["gamma"]
625        else:
626            self.parameters["Gamma"] = 5.0 / 3.0
627        self.geometry = self.specified_parameters.get("geometry", "cartesian")
628        self._handle.close()
629        self.mu = self.specified_parameters.get(
630            "mu", compute_mu(self.default_species_fields)
631        )
632
633    @classmethod
634    def _is_valid(cls, filename, *args, **kwargs):
635        try:
636            if "vtk" in filename:
637                return True
638        except Exception:
639            pass
640        return False
641
642    @property
643    def _skip_cache(self):
644        return True
645
646    def __str__(self):
647        return self.basename.rsplit(".", 1)[0]
648