1# -*- coding: utf-8 -*-
2
3########################################################################
4#
5# License: BSD
6# Created: October 14, 2002
7# Author: Francesc Alted - faltet@pytables.com
8#
9# $Id$
10#
11########################################################################
12
13"""Here is defined the Leaf class."""
14
15import warnings
16import math
17
18import numpy
19
20from .flavor import (check_flavor, internal_flavor,
21                           alias_map as flavor_alias_map)
22from .node import Node
23from .filters import Filters
24from .utils import byteorders, lazyattr, SizeType
25from .exceptions import PerformanceWarning
26from . import utilsextension
27
28
29def csformula(expected_mb):
30    """Return the fitted chunksize for expected_mb."""
31
32    # For a basesize of 8 KB, this will return:
33    # 8 KB for datasets <= 1 MB
34    # 1 MB for datasets >= 10 TB
35    basesize = 8 * 1024   # 8 KB is a good minimum
36    return basesize * int(2**math.log10(expected_mb))
37
38
39def limit_es(expected_mb):
40    """Protection against creating too small or too large chunks."""
41
42    if expected_mb < 1:        # < 1 MB
43        expected_mb = 1
44    elif expected_mb > 10**7:  # > 10 TB
45        expected_mb = 10**7
46    return expected_mb
47
48
49def calc_chunksize(expected_mb):
50    """Compute the optimum HDF5 chunksize for I/O purposes.
51
52    Rational: HDF5 takes the data in bunches of chunksize length to
53    write the on disk. A BTree in memory is used to map structures on
54    disk. The more chunks that are allocated for a dataset the larger
55    the B-tree. Large B-trees take memory and causes file storage
56    overhead as well as more disk I/O and higher contention for the meta
57    data cache.  You have to balance between memory and I/O overhead
58    (small B-trees) and time to access to data (big B-trees).
59
60    The tuning of the chunksize parameter affects the performance and
61    the memory consumed. This is based on my own experiments and, as
62    always, your mileage may vary.
63
64    """
65
66    expected_mb = limit_es(expected_mb)
67    zone = int(math.log10(expected_mb))
68    expected_mb = 10**zone
69    chunksize = csformula(expected_mb)
70    # XXX: Multiply by 8 seems optimal for sequential access
71    return chunksize * 8
72
73
74class Leaf(Node):
75    """Abstract base class for all PyTables leaves.
76
77    A leaf is a node (see the Node class in :class:`Node`) which hangs from a
78    group (see the Group class in :class:`Group`) but, unlike a group, it can
79    not have any further children below it (i.e. it is an end node).
80
81    This definition includes all nodes which contain actual data (datasets
82    handled by the Table - see :ref:`TableClassDescr`, Array -
83    see :ref:`ArrayClassDescr`, CArray - see :ref:`CArrayClassDescr`, EArray -
84    see :ref:`EArrayClassDescr`, and VLArray - see :ref:`VLArrayClassDescr`
85    classes) and unsupported nodes (the UnImplemented
86    class - :ref:`UnImplementedClassDescr`) these classes do in fact inherit
87    from Leaf.
88
89
90    .. rubric:: Leaf attributes
91
92    These instance variables are provided in addition to those in Node
93    (see :ref:`NodeClassDescr`):
94
95    .. attribute:: byteorder
96
97        The byte ordering of the leaf data *on disk*.  It will be either
98        ``little`` or ``big``.
99
100    .. attribute:: dtype
101
102        The NumPy dtype that most closely matches this leaf type.
103
104    .. attribute:: extdim
105
106        The index of the enlargeable dimension (-1 if none).
107
108    .. attribute:: nrows
109
110        The length of the main dimension of the leaf data.
111
112    .. attribute:: nrowsinbuf
113
114        The number of rows that fit in internal input buffers.
115
116        You can change this to fine-tune the speed or memory
117        requirements of your application.
118
119    .. attribute:: shape
120
121        The shape of data in the leaf.
122
123    """
124
125    # Properties
126    # ~~~~~~~~~~
127
128    # Node property aliases
129    # `````````````````````
130    # These are a little hard to override, but so are properties.
131    attrs = Node._v_attrs
132    """The associated AttributeSet instance - see :ref:`AttributeSetClassDescr`
133    (This is an easier-to-write alias of :attr:`Node._v_attrs`."""
134    title = Node._v_title
135    """A description for this node
136    (This is an easier-to-write alias of :attr:`Node._v_title`)."""
137
138    # Read-only node property aliases
139    # ```````````````````````````````
140    @property
141    def name(self):
142        """The name of this node in its parent group (This is an easier-to-write alias of :attr:`Node._v_name`)."""
143        return self._v_name
144
145    @property
146    def chunkshape(self):
147        """The HDF5 chunk size for chunked leaves (a tuple).
148
149        This is read-only because you cannot change the chunk size of a
150        leaf once it has been created.
151        """
152        return getattr(self, '_v_chunkshape', None)
153
154    @property
155    def object_id(self):
156        """A node identifier, which may change from run to run.
157        (This is an easier-to-write alias of :attr:`Node._v_objectid`).
158
159        .. versionchanged:: 3.0
160           The *objectID* property has been renamed into *object_id*.
161
162        """
163        return self._v_objectid
164
165    @property
166    def ndim(self):
167        """The number of dimensions of the leaf data.
168
169        .. versionadded: 2.4"""
170        return len(self.shape)
171
172    # Lazy read-only attributes
173    # `````````````````````````
174    @lazyattr
175    def filters(self):
176        """Filter properties for this leaf.
177
178        See Also
179        --------
180        Filters
181
182        """
183
184        return Filters._from_leaf(self)
185
186    @property
187    def track_times(self):
188        """Whether timestamps for the leaf are recorded
189
190        If the leaf is not a dataset, this will fail with HDF5ExtError.
191
192        The track times dataset creation property does not seem to
193        survive closing and reopening as of HDF5 1.8.17.  Currently,
194        it may be more accurate to test whether the ctime for the
195        dataset is 0:
196        track_times = (leaf._get_obj_timestamps().ctime == 0)
197        """
198        return self._get_obj_track_times()
199
200    # Other properties
201    # ````````````````
202
203    @property
204    def maindim(self):
205        """The dimension along which iterators work.
206
207        Its value is 0 (i.e. the first dimension) when the dataset is not
208        extendable, and self.extdim (where available) for extendable ones.
209        """
210
211        if self.extdim < 0:
212            return 0  # choose the first dimension
213        return self.extdim
214
215    @property
216    def flavor(self):
217        """The type of data object read from this leaf.
218
219        It can be any of 'numpy' or 'python'.
220
221        You can (and are encouraged to) use this property to get, set
222        and delete the FLAVOR HDF5 attribute of the leaf. When the leaf
223        has no such attribute, the default flavor is used..
224        """
225
226        return self._flavor
227
228    @flavor.setter
229    def flavor(self, flavor):
230        self._v_file._check_writable()
231        check_flavor(flavor)
232        self._v_attrs.FLAVOR = self._flavor = flavor  # logs the change
233
234    @flavor.deleter
235    def flavor(self):
236        del self._v_attrs.FLAVOR
237        self._flavor = internal_flavor
238
239    @property
240    def size_on_disk(self):
241        """
242        The size of this leaf's data in bytes as it is stored on disk.  If the
243        data is compressed, this shows the compressed size.  In the case of
244        uncompressed, chunked data, this may be slightly larger than the amount
245        of data, due to partially filled chunks.
246        """
247        return self._get_storage_size()
248
249    # Special methods
250    # ~~~~~~~~~~~~~~~
251    def __init__(self, parentnode, name,
252                 new=False, filters=None,
253                 byteorder=None, _log=True,
254                 track_times=True):
255        self._v_new = new
256        """Is this the first time the node has been created?"""
257        self.nrowsinbuf = None
258        """
259        The number of rows that fits in internal input buffers.
260
261        You can change this to fine-tune the speed or memory
262        requirements of your application.
263        """
264        self._flavor = None
265        """Private storage for the `flavor` property."""
266
267        if new:
268            # Get filter properties from parent group if not given.
269            if filters is None:
270                filters = parentnode._v_filters
271            self.__dict__['filters'] = filters  # bypass the property
272
273            if byteorder not in (None, 'little', 'big'):
274                raise ValueError(
275                    "the byteorder can only take 'little' or 'big' values "
276                    "and you passed: %s" % byteorder)
277            self.byteorder = byteorder
278            """The byte ordering of the leaf data *on disk*."""
279
280        self._want_track_times = track_times
281
282        # Existing filters need not be read since `filters`
283        # is a lazy property that automatically handles their loading.
284
285
286        super(Leaf, self).__init__(parentnode, name, _log)
287
288    def __len__(self):
289        """Return the length of the main dimension of the leaf data.
290
291        Please note that this may raise an OverflowError on 32-bit platforms
292        for datasets having more than 2**31-1 rows.  This is a limitation of
293        Python that you can work around by using the nrows or shape attributes.
294
295        """
296
297        return self.nrows
298
299    def __str__(self):
300        """The string representation for this object is its pathname in the
301        HDF5 object tree plus some additional metainfo."""
302
303        # Get this class name
304        classname = self.__class__.__name__
305        # The title
306        title = self._v_title
307        # The filters
308        filters = ""
309        if self.filters.fletcher32:
310            filters += ", fletcher32"
311        if self.filters.complevel:
312            if self.filters.shuffle:
313                filters += ", shuffle"
314            if self.filters.bitshuffle:
315                filters += ", bitshuffle"
316            filters += ", %s(%s)" % (self.filters.complib,
317                                     self.filters.complevel)
318        return "%s (%s%s%s) %r" % \
319               (self._v_pathname, classname, self.shape, filters, title)
320
321    # Private methods
322    # ~~~~~~~~~~~~~~~
323    def _g_post_init_hook(self):
324        """Code to be run after node creation and before creation logging.
325
326        This method gets or sets the flavor of the leaf.
327
328        """
329
330        super(Leaf, self)._g_post_init_hook()
331        if self._v_new:  # set flavor of new node
332            if self._flavor is None:
333                self._flavor = internal_flavor
334            else:  # flavor set at creation time, do not log
335                if self._v_file.params['PYTABLES_SYS_ATTRS']:
336                    self._v_attrs._g__setattr('FLAVOR', self._flavor)
337        else:  # get flavor of existing node (if any)
338            if self._v_file.params['PYTABLES_SYS_ATTRS']:
339                flavor = getattr(self._v_attrs, 'FLAVOR', internal_flavor)
340                self._flavor = flavor_alias_map.get(flavor, flavor)
341            else:
342                self._flavor = internal_flavor
343
344    def _calc_chunkshape(self, expectedrows, rowsize, itemsize):
345        """Calculate the shape for the HDF5 chunk."""
346
347        # In case of a scalar shape, return the unit chunksize
348        if self.shape == ():
349            return (SizeType(1),)
350
351        # Compute the chunksize
352        MB = 1024 * 1024
353        expected_mb = (expectedrows * rowsize) // MB
354        chunksize = calc_chunksize(expected_mb)
355
356        maindim = self.maindim
357        # Compute the chunknitems
358        chunknitems = chunksize // itemsize
359        # Safeguard against itemsizes being extremely large
360        if chunknitems == 0:
361            chunknitems = 1
362        chunkshape = list(self.shape)
363        # Check whether trimming the main dimension is enough
364        chunkshape[maindim] = 1
365        newchunknitems = numpy.prod(chunkshape, dtype=SizeType)
366        if newchunknitems <= chunknitems:
367            chunkshape[maindim] = chunknitems // newchunknitems
368        else:
369            # No, so start trimming other dimensions as well
370            for j in range(len(chunkshape)):
371                # Check whether trimming this dimension is enough
372                chunkshape[j] = 1
373                newchunknitems = numpy.prod(chunkshape, dtype=SizeType)
374                if newchunknitems <= chunknitems:
375                    chunkshape[j] = chunknitems // newchunknitems
376                    break
377            else:
378                # Ops, we ran out of the loop without a break
379                # Set the last dimension to chunknitems
380                chunkshape[-1] = chunknitems
381
382        return tuple(SizeType(s) for s in chunkshape)
383
384    def _calc_nrowsinbuf(self):
385        """Calculate the number of rows that fits on a PyTables buffer."""
386
387        params = self._v_file.params
388        # Compute the nrowsinbuf
389        rowsize = self.rowsize
390        buffersize = params['IO_BUFFER_SIZE']
391        if rowsize != 0:
392            nrowsinbuf = buffersize // rowsize
393        else:
394            nrowsinbuf = 1
395
396        # Safeguard against row sizes being extremely large
397        if nrowsinbuf == 0:
398            nrowsinbuf = 1
399            # If rowsize is too large, issue a Performance warning
400            maxrowsize = params['BUFFER_TIMES'] * buffersize
401            if rowsize > maxrowsize:
402                warnings.warn("""\
403The Leaf ``%s`` is exceeding the maximum recommended rowsize (%d bytes);
404be ready to see PyTables asking for *lots* of memory and possibly slow
405I/O.  You may want to reduce the rowsize by trimming the value of
406dimensions that are orthogonal (and preferably close) to the *main*
407dimension of this leave.  Alternatively, in case you have specified a
408very small/large chunksize, you may want to increase/decrease it."""
409                              % (self._v_pathname, maxrowsize),
410                              PerformanceWarning)
411        return nrowsinbuf
412
413    # This method is appropriate for calls to __getitem__ methods
414    def _process_range(self, start, stop, step, dim=None, warn_negstep=True):
415        if dim is None:
416            nrows = self.nrows  # self.shape[self.maindim]
417        else:
418            nrows = self.shape[dim]
419
420        if warn_negstep and step and step < 0:
421            raise ValueError("slice step cannot be negative")
422
423        #if start is not None: start = long(start)
424        #if stop is not None: stop = long(stop)
425        #if step is not None: step = long(step)
426
427        return slice(start, stop, step).indices(int(nrows))
428
429    # This method is appropriate for calls to read() methods
430    def _process_range_read(self, start, stop, step, warn_negstep=True):
431        nrows = self.nrows
432        if start is not None and stop is None and step is None:
433            # Protection against start greater than available records
434            # nrows == 0 is a special case for empty objects
435            if nrows > 0 and start >= nrows:
436                raise IndexError("start of range (%s) is greater than "
437                                 "number of rows (%s)" % (start, nrows))
438            step = 1
439            if start == -1:  # corner case
440                stop = nrows
441            else:
442                stop = start + 1
443        # Finally, get the correct values (over the main dimension)
444        start, stop, step = self._process_range(start, stop, step,
445                                                warn_negstep=warn_negstep)
446        return (start, stop, step)
447
448    def _g_copy(self, newparent, newname, recursive, _log=True, **kwargs):
449        # Compute default arguments.
450        start = kwargs.pop('start', None)
451        stop = kwargs.pop('stop', None)
452        step = kwargs.pop('step', None)
453        title = kwargs.pop('title', self._v_title)
454        filters = kwargs.pop('filters', self.filters)
455        chunkshape = kwargs.pop('chunkshape', self.chunkshape)
456        copyuserattrs = kwargs.pop('copyuserattrs', True)
457        stats = kwargs.pop('stats', None)
458        if chunkshape == 'keep':
459            chunkshape = self.chunkshape  # Keep the original chunkshape
460        elif chunkshape == 'auto':
461            chunkshape = None             # Will recompute chunkshape
462
463        # Fix arguments with explicit None values for backwards compatibility.
464        if title is None:
465            title = self._v_title
466        if filters is None:
467            filters = self.filters
468
469        # Create a copy of the object.
470        (new_node, bytes) = self._g_copy_with_stats(
471            newparent, newname, start, stop, step,
472            title, filters, chunkshape, _log, **kwargs)
473
474        # Copy user attributes if requested (or the flavor at least).
475        if copyuserattrs:
476            self._v_attrs._g_copy(new_node._v_attrs, copyclass=True)
477        elif 'FLAVOR' in self._v_attrs:
478            if self._v_file.params['PYTABLES_SYS_ATTRS']:
479                new_node._v_attrs._g__setattr('FLAVOR', self._flavor)
480        new_node._flavor = self._flavor  # update cached value
481
482        # Update statistics if needed.
483        if stats is not None:
484            stats['leaves'] += 1
485            stats['bytes'] += bytes
486
487        return new_node
488
489    def _g_fix_byteorder_data(self, data, dbyteorder):
490        "Fix the byteorder of data passed in constructors."
491        dbyteorder = byteorders[dbyteorder]
492        # If self.byteorder has not been passed as an argument of
493        # the constructor, then set it to the same value of data.
494        if self.byteorder is None:
495            self.byteorder = dbyteorder
496        # Do an additional in-place byteswap of data if the in-memory
497        # byteorder doesn't match that of the on-disk.  This is the only
498        # place that we have to do the conversion manually. In all the
499        # other cases, it will be HDF5 the responsible of doing the
500        # byteswap properly.
501        if dbyteorder in ['little', 'big']:
502            if dbyteorder != self.byteorder:
503                # if data is not writeable, do a copy first
504                if not data.flags.writeable:
505                    data = data.copy()
506                data.byteswap(True)
507        else:
508            # Fix the byteorder again, no matter which byteorder have
509            # specified the user in the constructor.
510            self.byteorder = "irrelevant"
511        return data
512
513    def _point_selection(self, key):
514        """Perform a point-wise selection.
515
516        `key` can be any of the following items:
517
518        * A boolean array with the same shape than self. Those positions
519          with True values will signal the coordinates to be returned.
520
521        * A numpy array (or list or tuple) with the point coordinates.
522          This has to be a two-dimensional array of size len(self.shape)
523          by num_elements containing a list of of zero-based values
524          specifying the coordinates in the dataset of the selected
525          elements. The order of the element coordinates in the array
526          specifies the order in which the array elements are iterated
527          through when I/O is performed. Duplicate coordinate locations
528          are not checked for.
529
530        Return the coordinates array.  If this is not possible, raise a
531        `TypeError` so that the next selection method can be tried out.
532
533        This is useful for whatever `Leaf` instance implementing a
534        point-wise selection.
535
536        """
537
538        if type(key) in (list, tuple):
539            if isinstance(key, tuple) and len(key) > len(self.shape):
540                raise IndexError("Invalid index or slice: %r" % (key,))
541            # Try to convert key to a numpy array.  If not possible,
542            # a TypeError will be issued (to be catched later on).
543            try:
544                key = numpy.array(key)
545            except ValueError:
546                raise TypeError("Invalid index or slice: %r" % (key,))
547        elif not isinstance(key, numpy.ndarray):
548            raise TypeError("Invalid index or slice: %r" % (key,))
549
550        # Protection against empty keys
551        if len(key) == 0:
552            return numpy.array([], dtype="i8")
553
554        if key.dtype.kind == 'b':
555            if not key.shape == self.shape:
556                raise IndexError(
557                    "Boolean indexing array has incompatible shape")
558            # Get the True coordinates (64-bit indices!)
559            coords = numpy.asarray(key.nonzero(), dtype='i8')
560            coords = numpy.transpose(coords)
561        elif key.dtype.kind == 'i' or key.dtype.kind == 'u':
562            if len(key.shape) > 2:
563                raise IndexError(
564                    "Coordinate indexing array has incompatible shape")
565            elif len(key.shape) == 2:
566                if key.shape[0] != len(self.shape):
567                    raise IndexError(
568                        "Coordinate indexing array has incompatible shape")
569                coords = numpy.asarray(key, dtype="i8")
570                coords = numpy.transpose(coords)
571            else:
572                # For 1-dimensional datasets
573                coords = numpy.asarray(key, dtype="i8")
574
575            # handle negative indices
576            idx = coords < 0
577            coords[idx] = (coords + self.shape)[idx]
578
579            # bounds check
580            if numpy.any(coords < 0) or numpy.any(coords >= self.shape):
581                raise IndexError("Index out of bounds")
582        else:
583            raise TypeError("Only integer coordinates allowed.")
584        # We absolutely need a contiguous array
585        if not coords.flags.contiguous:
586            coords = coords.copy()
587        return coords
588
589    # Public methods
590    # ~~~~~~~~~~~~~~
591    # Tree manipulation
592    # `````````````````
593    def remove(self):
594        """Remove this node from the hierarchy.
595
596        This method has the behavior described
597        in :meth:`Node._f_remove`. Please note that there is no recursive flag
598        since leaves do not have child nodes.
599
600        """
601
602        self._f_remove(False)
603
604    def rename(self, newname):
605        """Rename this node in place.
606
607        This method has the behavior described in :meth:`Node._f_rename()`.
608
609        """
610
611        self._f_rename(newname)
612
613    def move(self, newparent=None, newname=None,
614             overwrite=False, createparents=False):
615        """Move or rename this node.
616
617        This method has the behavior described in :meth:`Node._f_move`
618
619        """
620
621        self._f_move(newparent, newname, overwrite, createparents)
622
623    def copy(self, newparent=None, newname=None,
624             overwrite=False, createparents=False, **kwargs):
625        """Copy this node and return the new one.
626
627        This method has the behavior described in :meth:`Node._f_copy`. Please
628        note that there is no recursive flag since leaves do not have child
629        nodes.
630
631        .. warning::
632
633            Note that unknown parameters passed to this method will be
634            ignored, so may want to double check the spelling of these
635            (i.e. if you write them incorrectly, they will most probably
636            be ignored).
637
638        Parameters
639        ----------
640        title
641            The new title for the destination. If omitted or None, the original
642            title is used.
643        filters : Filters
644            Specifying this parameter overrides the original filter properties
645            in the source node. If specified, it must be an instance of the
646            Filters class (see :ref:`FiltersClassDescr`). The default is to
647            copy the filter properties from the source node.
648        copyuserattrs
649            You can prevent the user attributes from being copied by setting
650            this parameter to False. The default is to copy them.
651        start, stop, step : int
652            Specify the range of rows to be copied; the default is to copy all
653            the rows.
654        stats
655            This argument may be used to collect statistics on the copy
656            process. When used, it should be a dictionary with keys 'groups',
657            'leaves' and 'bytes' having a numeric value. Their values will be
658            incremented to reflect the number of groups, leaves and bytes,
659            respectively, that have been copied during the operation.
660        chunkshape
661            The chunkshape of the new leaf.  It supports a couple of special
662            values.  A value of keep means that the chunkshape will be the same
663            than original leaf (this is the default).  A value of auto means
664            that a new shape will be computed automatically in order to ensure
665            best performance when accessing the dataset through the main
666            dimension.  Any other value should be an integer or a tuple
667            matching the dimensions of the leaf.
668
669        """
670
671        return self._f_copy(
672            newparent, newname, overwrite, createparents, **kwargs)
673
674    def truncate(self, size):
675        """Truncate the main dimension to be size rows.
676
677        If the main dimension previously was larger than this size, the extra
678        data is lost.  If the main dimension previously was shorter, it is
679        extended, and the extended part is filled with the default values.
680
681        The truncation operation can only be applied to *enlargeable* datasets,
682        else a TypeError will be raised.
683
684        """
685
686        # A non-enlargeable arrays (Array, CArray) cannot be truncated
687        if self.extdim < 0:
688            raise TypeError("non-enlargeable datasets cannot be truncated")
689        self._g_truncate(size)
690
691    def isvisible(self):
692        """Is this node visible?
693
694        This method has the behavior described in :meth:`Node._f_isvisible()`.
695
696        """
697
698        return self._f_isvisible()
699
700    # Attribute handling
701    # ``````````````````
702    def get_attr(self, name):
703        """Get a PyTables attribute from this node.
704
705        This method has the behavior described in :meth:`Node._f_getattr`.
706
707        """
708
709        return self._f_getattr(name)
710
711    def set_attr(self, name, value):
712        """Set a PyTables attribute for this node.
713
714        This method has the behavior described in :meth:`Node._f_setattr()`.
715
716        """
717
718        self._f_setattr(name, value)
719
720    def del_attr(self, name):
721        """Delete a PyTables attribute from this node.
722
723        This method has the behavior described in :meth:`Node_f_delAttr`.
724
725        """
726
727        self._f_delattr(name)
728
729    # Data handling
730    # `````````````
731    def flush(self):
732        """Flush pending data to disk.
733
734        Saves whatever remaining buffered data to disk. It also releases
735        I/O buffers, so if you are filling many datasets in the same
736        PyTables session, please call flush() extensively so as to help
737        PyTables to keep memory requirements low.
738
739        """
740
741        self._g_flush()
742
743    def _f_close(self, flush=True):
744        """Close this node in the tree.
745
746        This method has the behavior described in :meth:`Node._f_close`.
747        Besides that, the optional argument flush tells whether to flush
748        pending data to disk or not before closing.
749
750        """
751
752        if not self._v_isopen:
753            return  # the node is already closed or not initialized
754
755        # Only do a flush in case the leaf has an IO buffer.  The
756        # internal buffers of HDF5 will be flushed afterwards during the
757        # self._g_close() call.  Avoiding an unnecessary flush()
758        # operation accelerates the closing for the unbuffered leaves.
759        if flush and hasattr(self, "_v_iobuf"):
760            self.flush()
761
762        # Close the dataset and release resources
763        self._g_close()
764
765        # Close myself as a node.
766        super(Leaf, self)._f_close()
767
768    def close(self, flush=True):
769        """Close this node in the tree.
770
771        This method is completely equivalent to :meth:`Leaf._f_close`.
772
773        """
774
775        self._f_close(flush)
776
777
778## Local Variables:
779## mode: python
780## py-indent-offset: 4
781## tab-width: 4
782## fill-column: 72
783## End:
784