1"""
2Reducer using memory mapping for numpy arrays
3"""
4# Author: Thomas Moreau <thomas.moreau.2010@gmail.com>
5# Copyright: 2017, Thomas Moreau
6# License: BSD 3 clause
7
8from mmap import mmap
9import errno
10import os
11import stat
12import threading
13import atexit
14import tempfile
15import time
16import warnings
17import weakref
18from uuid import uuid4
19from multiprocessing import util
20
21from pickle import whichmodule, loads, dumps, HIGHEST_PROTOCOL, PicklingError
22
23try:
24    WindowsError
25except NameError:
26    WindowsError = type(None)
27
28try:
29    import numpy as np
30    from numpy.lib.stride_tricks import as_strided
31except ImportError:
32    np = None
33
34from .numpy_pickle import dump, load, load_temporary_memmap
35from .backports import make_memmap
36from .disk import delete_folder
37from .externals.loky.backend import resource_tracker
38
39# Some system have a ramdisk mounted by default, we can use it instead of /tmp
40# as the default folder to dump big arrays to share with subprocesses.
41SYSTEM_SHARED_MEM_FS = '/dev/shm'
42
43# Minimal number of bytes available on SYSTEM_SHARED_MEM_FS to consider using
44# it as the default folder to dump big arrays to share with subprocesses.
45SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(2e9)
46
47# Folder and file permissions to chmod temporary files generated by the
48# memmapping pool. Only the owner of the Python process can access the
49# temporary files and folder.
50FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
51FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR
52
53# Set used in joblib workers, referencing the filenames of temporary memmaps
54# created by joblib to speed up data communication. In child processes, we add
55# a finalizer to these memmaps that sends a maybe_unlink call to the
56# resource_tracker, in order to free main memory as fast as possible.
57JOBLIB_MMAPS = set()
58
59
60def _log_and_unlink(filename):
61    from .externals.loky.backend.resource_tracker import _resource_tracker
62    util.debug(
63        "[FINALIZER CALL] object mapping to {} about to be deleted,"
64        " decrementing the refcount of the file (pid: {})".format(
65            os.path.basename(filename), os.getpid()))
66    _resource_tracker.maybe_unlink(filename, "file")
67
68
69def add_maybe_unlink_finalizer(memmap):
70    util.debug(
71        "[FINALIZER ADD] adding finalizer to {} (id {}, filename {}, pid  {})"
72        "".format(type(memmap), id(memmap), os.path.basename(memmap.filename),
73                  os.getpid()))
74    weakref.finalize(memmap, _log_and_unlink, memmap.filename)
75
76
77def unlink_file(filename):
78    """Wrapper around os.unlink with a retry mechanism.
79
80    The retry mechanism has been implemented primarily to overcome a race
81    condition happening during the finalizer of a np.memmap: when a process
82    holding the last reference to a mmap-backed np.memmap/np.array is about to
83    delete this array (and close the reference), it sends a maybe_unlink
84    request to the resource_tracker. This request can be processed faster than
85    it takes for the last reference of the memmap to be closed, yielding (on
86    Windows) a PermissionError in the resource_tracker loop.
87    """
88    NUM_RETRIES = 10
89    for retry_no in range(1, NUM_RETRIES + 1):
90        try:
91            os.unlink(filename)
92            break
93        except PermissionError:
94            util.debug(
95                '[ResourceTracker] tried to unlink {}, got '
96                'PermissionError'.format(filename)
97            )
98            if retry_no == NUM_RETRIES:
99                raise
100            else:
101                time.sleep(.2)
102
103
104resource_tracker._CLEANUP_FUNCS['file'] = unlink_file
105
106
107class _WeakArrayKeyMap:
108    """A variant of weakref.WeakKeyDictionary for unhashable numpy arrays.
109
110    This datastructure will be used with numpy arrays as obj keys, therefore we
111    do not use the __get__ / __set__ methods to avoid any conflict with the
112    numpy fancy indexing syntax.
113    """
114
115    def __init__(self):
116        self._data = {}
117
118    def get(self, obj):
119        ref, val = self._data[id(obj)]
120        if ref() is not obj:
121            # In case of race condition with on_destroy: could never be
122            # triggered by the joblib tests with CPython.
123            raise KeyError(obj)
124        return val
125
126    def set(self, obj, value):
127        key = id(obj)
128        try:
129            ref, _ = self._data[key]
130            if ref() is not obj:
131                # In case of race condition with on_destroy: could never be
132                # triggered by the joblib tests with CPython.
133                raise KeyError(obj)
134        except KeyError:
135            # Insert the new entry in the mapping along with a weakref
136            # callback to automatically delete the entry from the mapping
137            # as soon as the object used as key is garbage collected.
138            def on_destroy(_):
139                del self._data[key]
140            ref = weakref.ref(obj, on_destroy)
141        self._data[key] = ref, value
142
143    def __getstate__(self):
144        raise PicklingError("_WeakArrayKeyMap is not pickleable")
145
146
147###############################################################################
148# Support for efficient transient pickling of numpy data structures
149
150
151def _get_backing_memmap(a):
152    """Recursively look up the original np.memmap instance base if any."""
153    b = getattr(a, 'base', None)
154    if b is None:
155        # TODO: check scipy sparse datastructure if scipy is installed
156        # a nor its descendants do not have a memmap base
157        return None
158
159    elif isinstance(b, mmap):
160        # a is already a real memmap instance.
161        return a
162
163    else:
164        # Recursive exploration of the base ancestry
165        return _get_backing_memmap(b)
166
167
168def _get_temp_dir(pool_folder_name, temp_folder=None):
169    """Get the full path to a subfolder inside the temporary folder.
170
171    Parameters
172    ----------
173    pool_folder_name : str
174        Sub-folder name used for the serialization of a pool instance.
175
176    temp_folder: str, optional
177        Folder to be used by the pool for memmapping large arrays
178        for sharing memory with worker processes. If None, this will try in
179        order:
180
181        - a folder pointed by the JOBLIB_TEMP_FOLDER environment
182          variable,
183        - /dev/shm if the folder exists and is writable: this is a
184          RAMdisk filesystem available by default on modern Linux
185          distributions,
186        - the default system temporary folder that can be
187          overridden with TMP, TMPDIR or TEMP environment
188          variables, typically /tmp under Unix operating systems.
189
190    Returns
191    -------
192    pool_folder : str
193       full path to the temporary folder
194    use_shared_mem : bool
195       whether the temporary folder is written to the system shared memory
196       folder or some other temporary folder.
197    """
198    use_shared_mem = False
199    if temp_folder is None:
200        temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None)
201    if temp_folder is None:
202        if os.path.exists(SYSTEM_SHARED_MEM_FS):
203            try:
204                shm_stats = os.statvfs(SYSTEM_SHARED_MEM_FS)
205                available_nbytes = shm_stats.f_bsize * shm_stats.f_bavail
206                if available_nbytes > SYSTEM_SHARED_MEM_FS_MIN_SIZE:
207                    # Try to see if we have write access to the shared mem
208                    # folder only if it is reasonably large (that is 2GB or
209                    # more).
210                    temp_folder = SYSTEM_SHARED_MEM_FS
211                    pool_folder = os.path.join(temp_folder, pool_folder_name)
212                    if not os.path.exists(pool_folder):
213                        os.makedirs(pool_folder)
214                    use_shared_mem = True
215            except (IOError, OSError):
216                # Missing rights in the /dev/shm partition, fallback to regular
217                # temp folder.
218                temp_folder = None
219    if temp_folder is None:
220        # Fallback to the default tmp folder, typically /tmp
221        temp_folder = tempfile.gettempdir()
222    temp_folder = os.path.abspath(os.path.expanduser(temp_folder))
223    pool_folder = os.path.join(temp_folder, pool_folder_name)
224    return pool_folder, use_shared_mem
225
226
227def has_shareable_memory(a):
228    """Return True if a is backed by some mmap buffer directly or not."""
229    return _get_backing_memmap(a) is not None
230
231
232def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides,
233                         total_buffer_len, unlink_on_gc_collect):
234    """Reconstruct an array view on a memory mapped file."""
235    if mode == 'w+':
236        # Do not zero the original data when unpickling
237        mode = 'r+'
238
239    if strides is None:
240        # Simple, contiguous memmap
241        return make_memmap(
242            filename, dtype=dtype, shape=shape, mode=mode, offset=offset,
243            order=order, unlink_on_gc_collect=unlink_on_gc_collect
244        )
245    else:
246        # For non-contiguous data, memmap the total enclosing buffer and then
247        # extract the non-contiguous view with the stride-tricks API
248        base = make_memmap(
249            filename, dtype=dtype, shape=total_buffer_len, offset=offset,
250            mode=mode, order=order, unlink_on_gc_collect=unlink_on_gc_collect
251        )
252        return as_strided(base, shape=shape, strides=strides)
253
254
255def _reduce_memmap_backed(a, m):
256    """Pickling reduction for memmap backed arrays.
257
258    a is expected to be an instance of np.ndarray (or np.memmap)
259    m is expected to be an instance of np.memmap on the top of the ``base``
260    attribute ancestry of a. ``m.base`` should be the real python mmap object.
261    """
262    # offset that comes from the striding differences between a and m
263    util.debug('[MEMMAP REDUCE] reducing a memmap-backed array '
264               '(shape, {}, pid: {})'.format(a.shape, os.getpid()))
265    a_start, a_end = np.byte_bounds(a)
266    m_start = np.byte_bounds(m)[0]
267    offset = a_start - m_start
268
269    # offset from the backing memmap
270    offset += m.offset
271
272    if m.flags['F_CONTIGUOUS']:
273        order = 'F'
274    else:
275        # The backing memmap buffer is necessarily contiguous hence C if not
276        # Fortran
277        order = 'C'
278
279    if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
280        # If the array is a contiguous view, no need to pass the strides
281        strides = None
282        total_buffer_len = None
283    else:
284        # Compute the total number of items to map from which the strided
285        # view will be extracted.
286        strides = a.strides
287        total_buffer_len = (a_end - a_start) // a.itemsize
288
289    return (_strided_from_memmap,
290            (m.filename, a.dtype, m.mode, offset, order, a.shape, strides,
291             total_buffer_len, False))
292
293
294def reduce_array_memmap_backward(a):
295    """reduce a np.array or a np.memmap from a child process"""
296    m = _get_backing_memmap(a)
297    if isinstance(m, np.memmap) and m.filename not in JOBLIB_MMAPS:
298        # if a is backed by a memmaped file, reconstruct a using the
299        # memmaped file.
300        return _reduce_memmap_backed(a, m)
301    else:
302        # a is either a regular (not memmap-backed) numpy array, or an array
303        # backed by a shared temporary file created by joblib. In the latter
304        # case, in order to limit the lifespan of these temporary files, we
305        # serialize the memmap as a regular numpy array, and decref the
306        # file backing the memmap (done implicitly in a previously registered
307        # finalizer, see ``unlink_on_gc_collect`` for more details)
308        return (
309            loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL), )
310        )
311
312
313class ArrayMemmapForwardReducer(object):
314    """Reducer callable to dump large arrays to memmap files.
315
316    Parameters
317    ----------
318    max_nbytes: int
319        Threshold to trigger memmapping of large arrays to files created
320        a folder.
321    temp_folder_resolver: callable
322        An callable in charge of resolving a temporary folder name where files
323        for backing memmapped arrays are created.
324    mmap_mode: 'r', 'r+' or 'c'
325        Mode for the created memmap datastructure. See the documentation of
326        numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
327        automatically to avoid zeroing the data on unpickling.
328    verbose: int, optional, 0 by default
329        If verbose > 0, memmap creations are logged.
330        If verbose > 1, both memmap creations, reuse and array pickling are
331        logged.
332    prewarm: bool, optional, False by default.
333        Force a read on newly memmapped array to make sure that OS pre-cache it
334        memory. This can be useful to avoid concurrent disk access when the
335        same data array is passed to different worker processes.
336    """
337
338    def __init__(self, max_nbytes, temp_folder_resolver, mmap_mode,
339                 unlink_on_gc_collect, verbose=0, prewarm=True):
340        self._max_nbytes = max_nbytes
341        self._temp_folder_resolver = temp_folder_resolver
342        self._mmap_mode = mmap_mode
343        self.verbose = int(verbose)
344        if prewarm == "auto":
345            self._prewarm = not self._temp_folder.startswith(
346                SYSTEM_SHARED_MEM_FS
347            )
348        else:
349            self._prewarm = prewarm
350        self._prewarm = prewarm
351        self._memmaped_arrays = _WeakArrayKeyMap()
352        self._temporary_memmaped_filenames = set()
353        self._unlink_on_gc_collect = unlink_on_gc_collect
354
355    @property
356    def _temp_folder(self):
357        return self._temp_folder_resolver()
358
359    def __reduce__(self):
360        # The ArrayMemmapForwardReducer is passed to the children processes: it
361        # needs to be pickled but the _WeakArrayKeyMap need to be skipped as
362        # it's only guaranteed to be consistent with the parent process memory
363        # garbage collection.
364        # Although this reducer is pickled, it is not needed in its destination
365        # process (child processes), as we only use this reducer to send
366        # memmaps from the parent process to the children processes. For this
367        # reason, we can afford skipping the resolver, (which would otherwise
368        # be unpicklable), and pass it as None instead.
369        args = (self._max_nbytes, None, self._mmap_mode,
370                self._unlink_on_gc_collect)
371        kwargs = {
372            'verbose': self.verbose,
373            'prewarm': self._prewarm,
374        }
375        return ArrayMemmapForwardReducer, args, kwargs
376
377    def __call__(self, a):
378        m = _get_backing_memmap(a)
379        if m is not None and isinstance(m, np.memmap):
380            # a is already backed by a memmap file, let's reuse it directly
381            return _reduce_memmap_backed(a, m)
382
383        if (not a.dtype.hasobject and self._max_nbytes is not None and
384                a.nbytes > self._max_nbytes):
385            # check that the folder exists (lazily create the pool temp folder
386            # if required)
387            try:
388                os.makedirs(self._temp_folder)
389                os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
390            except OSError as e:
391                if e.errno != errno.EEXIST:
392                    raise e
393
394            try:
395                basename = self._memmaped_arrays.get(a)
396            except KeyError:
397                # Generate a new unique random filename. The process and thread
398                # ids are only useful for debugging purpose and to make it
399                # easier to cleanup orphaned files in case of hard process
400                # kill (e.g. by "kill -9" or segfault).
401                basename = "{}-{}-{}.pkl".format(
402                    os.getpid(), id(threading.current_thread()), uuid4().hex)
403                self._memmaped_arrays.set(a, basename)
404            filename = os.path.join(self._temp_folder, basename)
405
406            # In case the same array with the same content is passed several
407            # times to the pool subprocess children, serialize it only once
408
409            is_new_memmap = filename not in self._temporary_memmaped_filenames
410
411            # add the memmap to the list of temporary memmaps created by joblib
412            self._temporary_memmaped_filenames.add(filename)
413
414            if self._unlink_on_gc_collect:
415                # Bump reference count of the memmap by 1 to account for
416                # shared usage of the memmap by a child process. The
417                # corresponding decref call will be executed upon calling
418                # resource_tracker.maybe_unlink, registered as a finalizer in
419                # the child.
420                # the incref/decref calls here are only possible when the child
421                # and the parent share the same resource_tracker. It is not the
422                # case for the multiprocessing backend, but it does not matter
423                # because unlinking a memmap from a child process is only
424                # useful to control the memory usage of long-lasting child
425                # processes, while the multiprocessing-based pools terminate
426                # their workers at the end of a map() call.
427                resource_tracker.register(filename, "file")
428
429            if is_new_memmap:
430                # Incref each temporary memmap created by joblib one extra
431                # time.  This means that these memmaps will only be deleted
432                # once an extra maybe_unlink() is called, which is done once
433                # all the jobs have completed (or been canceled) in the
434                # Parallel._terminate_backend() method.
435                resource_tracker.register(filename, "file")
436
437            if not os.path.exists(filename):
438                util.debug(
439                    "[ARRAY DUMP] Pickling new array (shape={}, dtype={}) "
440                    "creating a new memmap at {}".format(
441                        a.shape, a.dtype, filename))
442                for dumped_filename in dump(a, filename):
443                    os.chmod(dumped_filename, FILE_PERMISSIONS)
444
445                if self._prewarm:
446                    # Warm up the data by accessing it. This operation ensures
447                    # that the disk access required to create the memmapping
448                    # file are performed in the reducing process and avoids
449                    # concurrent memmap creation in multiple children
450                    # processes.
451                    load(filename, mmap_mode=self._mmap_mode).max()
452
453            else:
454                util.debug(
455                    "[ARRAY DUMP] Pickling known array (shape={}, dtype={}) "
456                    "reusing memmap file: {}".format(
457                        a.shape, a.dtype, os.path.basename(filename)))
458
459            # The worker process will use joblib.load to memmap the data
460            return (
461                (load_temporary_memmap, (filename, self._mmap_mode,
462                                         self._unlink_on_gc_collect))
463            )
464        else:
465            # do not convert a into memmap, let pickler do its usual copy with
466            # the default system pickler
467            util.debug(
468                '[ARRAY DUMP] Pickling array (NO MEMMAPPING) (shape={}, '
469                ' dtype={}).'.format(a.shape, a.dtype))
470            return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),))
471
472
473def get_memmapping_reducers(
474        forward_reducers=None, backward_reducers=None,
475        temp_folder_resolver=None, max_nbytes=1e6, mmap_mode='r', verbose=0,
476        prewarm=False, unlink_on_gc_collect=True, **kwargs):
477    """Construct a pair of memmapping reducer linked to a tmpdir.
478
479    This function manage the creation and the clean up of the temporary folders
480    underlying the memory maps and should be use to get the reducers necessary
481    to construct joblib pool or executor.
482    """
483    if forward_reducers is None:
484        forward_reducers = dict()
485    if backward_reducers is None:
486        backward_reducers = dict()
487
488    if np is not None:
489        # Register smart numpy.ndarray reducers that detects memmap backed
490        # arrays and that is also able to dump to memmap large in-memory
491        # arrays over the max_nbytes threshold
492        forward_reduce_ndarray = ArrayMemmapForwardReducer(
493            max_nbytes, temp_folder_resolver, mmap_mode, unlink_on_gc_collect,
494            verbose, prewarm=prewarm)
495        forward_reducers[np.ndarray] = forward_reduce_ndarray
496        forward_reducers[np.memmap] = forward_reduce_ndarray
497
498        # Communication from child process to the parent process always
499        # pickles in-memory numpy.ndarray without dumping them as memmap
500        # to avoid confusing the caller and make it tricky to collect the
501        # temporary folder
502        backward_reducers[np.ndarray] = reduce_array_memmap_backward
503        backward_reducers[np.memmap] = reduce_array_memmap_backward
504
505    return forward_reducers, backward_reducers
506
507
508class TemporaryResourcesManager(object):
509    """Stateful object able to manage temporary folder and pickles
510
511    It exposes:
512    - a per-context folder name resolving API that memmap-based reducers will
513      rely on to know where to pickle the temporary memmaps
514    - a temporary file/folder management API that internally uses the
515      resource_tracker.
516    """
517
518    def __init__(self, temp_folder_root=None, context_id=None):
519        self._current_temp_folder = None
520        self._temp_folder_root = temp_folder_root
521        self._use_shared_mem = None
522        self._cached_temp_folders = dict()
523        self._id = uuid4().hex
524        self._finalizers = {}
525        if context_id is None:
526            # It would be safer to not assign a default context id (less silent
527            # bugs), but doing this while maintaining backward compatibility
528            # with the previous, context-unaware version get_memmaping_executor
529            # exposes exposes too many low-level details.
530            context_id = uuid4().hex
531        self.set_current_context(context_id)
532
533    def set_current_context(self, context_id):
534        self._current_context_id = context_id
535        self.register_new_context(context_id)
536
537    def register_new_context(self, context_id):
538        # Prepare a sub-folder name specific to a context (usually a unique id
539        # generated by each instance of the Parallel class). Do not create in
540        # advance to spare FS write access if no array is to be dumped).
541        if context_id in self._cached_temp_folders:
542            return
543        else:
544            # During its lifecycle, one Parallel object can have several
545            # executors associated to it (for instance, if a loky worker raises
546            # an exception, joblib shutdowns the executor and instantly
547            # recreates a new one before raising the error - see
548            # ``ensure_ready``.  Because we don't want two executors tied to
549            # the same Parallel object (and thus the same context id) to
550            # register/use/delete the same folder, we also add an id specific
551            # to the current Manager (and thus specific to its associated
552            # executor) to the folder name.
553            new_folder_name = (
554                "joblib_memmapping_folder_{}_{}_{}".format(
555                    os.getpid(), self._id, context_id)
556            )
557            new_folder_path, _ = _get_temp_dir(
558                new_folder_name, self._temp_folder_root
559            )
560            self.register_folder_finalizer(new_folder_path, context_id)
561            self._cached_temp_folders[context_id] = new_folder_path
562
563    def resolve_temp_folder_name(self):
564        """Return a folder name specific to the currently activated context"""
565        return self._cached_temp_folders[self._current_context_id]
566
567    def _unregister_context(self, context_id=None):
568        if context_id is None:
569            for context_id in list(self._cached_temp_folders):
570                self._unregister_context(context_id)
571        else:
572            temp_folder = self._cached_temp_folders[context_id]
573            finalizer = self._finalizers[context_id]
574
575            resource_tracker.unregister(temp_folder, "folder")
576            atexit.unregister(finalizer)
577
578            self._cached_temp_folders.pop(context_id)
579            self._finalizers.pop(context_id)
580
581    # resource management API
582
583    def register_folder_finalizer(self, pool_subfolder, context_id):
584        # Register the garbage collector at program exit in case caller forgets
585        # to call terminate explicitly: note we do not pass any reference to
586        # ensure that this callback won't prevent garbage collection of
587        # parallel instance and related file handler resources such as POSIX
588        # semaphores and pipes
589        pool_module_name = whichmodule(delete_folder, 'delete_folder')
590        resource_tracker.register(pool_subfolder, "folder")
591
592        def _cleanup():
593            # In some cases the Python runtime seems to set delete_folder to
594            # None just before exiting when accessing the delete_folder
595            # function from the closure namespace. So instead we reimport
596            # the delete_folder function explicitly.
597            # https://github.com/joblib/joblib/issues/328
598            # We cannot just use from 'joblib.pool import delete_folder'
599            # because joblib should only use relative imports to allow
600            # easy vendoring.
601            delete_folder = __import__(
602                pool_module_name, fromlist=['delete_folder']).delete_folder
603            try:
604                delete_folder(pool_subfolder, allow_non_empty=True)
605                resource_tracker.unregister(pool_subfolder, "folder")
606            except OSError:
607                warnings.warn("Failed to delete temporary folder: {}"
608                              .format(pool_subfolder))
609
610        self._finalizers[context_id] = atexit.register(_cleanup)
611
612    def _unlink_temporary_resources(self, context_id=None):
613        """Unlink temporary resources created by a process-based pool"""
614        if context_id is None:
615            # iterate over a copy of the cache keys because
616            # unlink_temporary_resources further deletes an entry in this
617            # cache
618            for context_id in self._cached_temp_folders.copy():
619                self._unlink_temporary_resources(context_id)
620        else:
621            temp_folder = self._cached_temp_folders[context_id]
622            if os.path.exists(temp_folder):
623                for filename in os.listdir(temp_folder):
624                    resource_tracker.maybe_unlink(
625                        os.path.join(temp_folder, filename), "file"
626                    )
627                self._try_delete_folder(
628                    allow_non_empty=False, context_id=context_id
629                )
630
631    def _unregister_temporary_resources(self, context_id=None):
632        """Unregister temporary resources created by a process-based pool"""
633        if context_id is None:
634            for context_id in self._cached_temp_folders:
635                self._unregister_temporary_resources(context_id)
636        else:
637            temp_folder = self._cached_temp_folders[context_id]
638            if os.path.exists(temp_folder):
639                for filename in os.listdir(temp_folder):
640                    resource_tracker.unregister(
641                        os.path.join(temp_folder, filename), "file"
642                    )
643
644    def _try_delete_folder(self, allow_non_empty, context_id=None):
645        if context_id is None:
646            # ditto
647            for context_id in self._cached_temp_folders.copy():
648                self._try_delete_folder(
649                    allow_non_empty=allow_non_empty, context_id=context_id
650                )
651        else:
652            temp_folder = self._cached_temp_folders[context_id]
653            try:
654                delete_folder(
655                    temp_folder, allow_non_empty=allow_non_empty
656                )
657                # Now that this folder is deleted, we can forget about it
658                self._unregister_context(context_id)
659
660            except OSError:
661                # Temporary folder cannot be deleted right now. No need to
662                # handle it though, as this folder will be cleaned up by an
663                # atexit finalizer registered by the memmapping_reducer.
664                pass
665