1# -*- coding: utf-8 -*-
2"""
3    werkzeug.contrib.cache
4    ~~~~~~~~~~~~~~~~~~~~~~
5
6    The main problem with dynamic Web sites is, well, they're dynamic.  Each
7    time a user requests a page, the webserver executes a lot of code, queries
8    the database, renders templates until the visitor gets the page he sees.
9
10    This is a lot more expensive than just loading a file from the file system
11    and sending it to the visitor.
12
13    For most Web applications, this overhead isn't a big deal but once it
14    becomes, you will be glad to have a cache system in place.
15
16    How Caching Works
17    =================
18
19    Caching is pretty simple.  Basically you have a cache object lurking around
20    somewhere that is connected to a remote cache or the file system or
21    something else.  When the request comes in you check if the current page
22    is already in the cache and if so, you're returning it from the cache.
23    Otherwise you generate the page and put it into the cache. (Or a fragment
24    of the page, you don't have to cache the full thing)
25
26    Here is a simple example of how to cache a sidebar for 5 minutes::
27
28        def get_sidebar(user):
29            identifier = 'sidebar_for/user%d' % user.id
30            value = cache.get(identifier)
31            if value is not None:
32                return value
33            value = generate_sidebar_for(user=user)
34            cache.set(identifier, value, timeout=60 * 5)
35            return value
36
37    Creating a Cache Object
38    =======================
39
40    To create a cache object you just import the cache system of your choice
41    from the cache module and instantiate it.  Then you can start working
42    with that object:
43
44    >>> from werkzeug.contrib.cache import SimpleCache
45    >>> c = SimpleCache()
46    >>> c.set("foo", "value")
47    >>> c.get("foo")
48    'value'
49    >>> c.get("missing") is None
50    True
51
52    Please keep in mind that you have to create the cache and put it somewhere
53    you have access to it (either as a module global you can import or you just
54    put it into your WSGI application).
55
56    :copyright: 2007 Pallets
57    :license: BSD-3-Clause
58"""
59import errno
60import os
61import platform
62import re
63import tempfile
64import warnings
65from hashlib import md5
66from time import time
67
68from .._compat import integer_types
69from .._compat import iteritems
70from .._compat import string_types
71from .._compat import text_type
72from .._compat import to_native
73from ..posixemulation import rename
74
75try:
76    import cPickle as pickle
77except ImportError:  # pragma: no cover
78    import pickle
79
80warnings.warn(
81    "'werkzeug.contrib.cache' is deprecated as of version 0.15 and will"
82    " be removed in version 1.0. It has moved to https://github.com"
83    "/pallets/cachelib.",
84    DeprecationWarning,
85    stacklevel=2,
86)
87
88
89def _items(mappingorseq):
90    """Wrapper for efficient iteration over mappings represented by dicts
91    or sequences::
92
93        >>> for k, v in _items((i, i*i) for i in xrange(5)):
94        ...    assert k*k == v
95
96        >>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
97        ...    assert k*k == v
98
99    """
100    if hasattr(mappingorseq, "items"):
101        return iteritems(mappingorseq)
102    return mappingorseq
103
104
105class BaseCache(object):
106    """Baseclass for the cache systems.  All the cache systems implement this
107    API or a superset of it.
108
109    :param default_timeout: the default timeout (in seconds) that is used if
110                            no timeout is specified on :meth:`set`. A timeout
111                            of 0 indicates that the cache never expires.
112    """
113
114    def __init__(self, default_timeout=300):
115        self.default_timeout = default_timeout
116
117    def _normalize_timeout(self, timeout):
118        if timeout is None:
119            timeout = self.default_timeout
120        return timeout
121
122    def get(self, key):
123        """Look up key in the cache and return the value for it.
124
125        :param key: the key to be looked up.
126        :returns: The value if it exists and is readable, else ``None``.
127        """
128        return None
129
130    def delete(self, key):
131        """Delete `key` from the cache.
132
133        :param key: the key to delete.
134        :returns: Whether the key existed and has been deleted.
135        :rtype: boolean
136        """
137        return True
138
139    def get_many(self, *keys):
140        """Returns a list of values for the given keys.
141        For each key an item in the list is created::
142
143            foo, bar = cache.get_many("foo", "bar")
144
145        Has the same error handling as :meth:`get`.
146
147        :param keys: The function accepts multiple keys as positional
148                     arguments.
149        """
150        return [self.get(k) for k in keys]
151
152    def get_dict(self, *keys):
153        """Like :meth:`get_many` but return a dict::
154
155            d = cache.get_dict("foo", "bar")
156            foo = d["foo"]
157            bar = d["bar"]
158
159        :param keys: The function accepts multiple keys as positional
160                     arguments.
161        """
162        return dict(zip(keys, self.get_many(*keys)))
163
164    def set(self, key, value, timeout=None):
165        """Add a new key/value to the cache (overwrites value, if key already
166        exists in the cache).
167
168        :param key: the key to set
169        :param value: the value for the key
170        :param timeout: the cache timeout for the key in seconds (if not
171                        specified, it uses the default timeout). A timeout of
172                        0 idicates that the cache never expires.
173        :returns: ``True`` if key has been updated, ``False`` for backend
174                  errors. Pickling errors, however, will raise a subclass of
175                  ``pickle.PickleError``.
176        :rtype: boolean
177        """
178        return True
179
180    def add(self, key, value, timeout=None):
181        """Works like :meth:`set` but does not overwrite the values of already
182        existing keys.
183
184        :param key: the key to set
185        :param value: the value for the key
186        :param timeout: the cache timeout for the key in seconds (if not
187                        specified, it uses the default timeout). A timeout of
188                        0 idicates that the cache never expires.
189        :returns: Same as :meth:`set`, but also ``False`` for already
190                  existing keys.
191        :rtype: boolean
192        """
193        return True
194
195    def set_many(self, mapping, timeout=None):
196        """Sets multiple keys and values from a mapping.
197
198        :param mapping: a mapping with the keys/values to set.
199        :param timeout: the cache timeout for the key in seconds (if not
200                        specified, it uses the default timeout). A timeout of
201                        0 idicates that the cache never expires.
202        :returns: Whether all given keys have been set.
203        :rtype: boolean
204        """
205        rv = True
206        for key, value in _items(mapping):
207            if not self.set(key, value, timeout):
208                rv = False
209        return rv
210
211    def delete_many(self, *keys):
212        """Deletes multiple keys at once.
213
214        :param keys: The function accepts multiple keys as positional
215                     arguments.
216        :returns: Whether all given keys have been deleted.
217        :rtype: boolean
218        """
219        return all(self.delete(key) for key in keys)
220
221    def has(self, key):
222        """Checks if a key exists in the cache without returning it. This is a
223        cheap operation that bypasses loading the actual data on the backend.
224
225        This method is optional and may not be implemented on all caches.
226
227        :param key: the key to check
228        """
229        raise NotImplementedError(
230            "%s doesn't have an efficient implementation of `has`. That "
231            "means it is impossible to check whether a key exists without "
232            "fully loading the key's data. Consider using `self.get` "
233            "explicitly if you don't care about performance."
234        )
235
236    def clear(self):
237        """Clears the cache.  Keep in mind that not all caches support
238        completely clearing the cache.
239
240        :returns: Whether the cache has been cleared.
241        :rtype: boolean
242        """
243        return True
244
245    def inc(self, key, delta=1):
246        """Increments the value of a key by `delta`.  If the key does
247        not yet exist it is initialized with `delta`.
248
249        For supporting caches this is an atomic operation.
250
251        :param key: the key to increment.
252        :param delta: the delta to add.
253        :returns: The new value or ``None`` for backend errors.
254        """
255        value = (self.get(key) or 0) + delta
256        return value if self.set(key, value) else None
257
258    def dec(self, key, delta=1):
259        """Decrements the value of a key by `delta`.  If the key does
260        not yet exist it is initialized with `-delta`.
261
262        For supporting caches this is an atomic operation.
263
264        :param key: the key to increment.
265        :param delta: the delta to subtract.
266        :returns: The new value or `None` for backend errors.
267        """
268        value = (self.get(key) or 0) - delta
269        return value if self.set(key, value) else None
270
271
272class NullCache(BaseCache):
273    """A cache that doesn't cache.  This can be useful for unit testing.
274
275    :param default_timeout: a dummy parameter that is ignored but exists
276                            for API compatibility with other caches.
277    """
278
279    def has(self, key):
280        return False
281
282
283class SimpleCache(BaseCache):
284    """Simple memory cache for single process environments.  This class exists
285    mainly for the development server and is not 100% thread safe.  It tries
286    to use as many atomic operations as possible and no locks for simplicity
287    but it could happen under heavy load that keys are added multiple times.
288
289    :param threshold: the maximum number of items the cache stores before
290                      it starts deleting some.
291    :param default_timeout: the default timeout that is used if no timeout is
292                            specified on :meth:`~BaseCache.set`. A timeout of
293                            0 indicates that the cache never expires.
294    """
295
296    def __init__(self, threshold=500, default_timeout=300):
297        BaseCache.__init__(self, default_timeout)
298        self._cache = {}
299        self.clear = self._cache.clear
300        self._threshold = threshold
301
302    def _prune(self):
303        if len(self._cache) > self._threshold:
304            now = time()
305            toremove = []
306            for idx, (key, (expires, _)) in enumerate(self._cache.items()):
307                if (expires != 0 and expires <= now) or idx % 3 == 0:
308                    toremove.append(key)
309            for key in toremove:
310                self._cache.pop(key, None)
311
312    def _normalize_timeout(self, timeout):
313        timeout = BaseCache._normalize_timeout(self, timeout)
314        if timeout > 0:
315            timeout = time() + timeout
316        return timeout
317
318    def get(self, key):
319        try:
320            expires, value = self._cache[key]
321            if expires == 0 or expires > time():
322                return pickle.loads(value)
323        except (KeyError, pickle.PickleError):
324            return None
325
326    def set(self, key, value, timeout=None):
327        expires = self._normalize_timeout(timeout)
328        self._prune()
329        self._cache[key] = (expires, pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
330        return True
331
332    def add(self, key, value, timeout=None):
333        expires = self._normalize_timeout(timeout)
334        self._prune()
335        item = (expires, pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
336        if key in self._cache:
337            return False
338        self._cache.setdefault(key, item)
339        return True
340
341    def delete(self, key):
342        return self._cache.pop(key, None) is not None
343
344    def has(self, key):
345        try:
346            expires, value = self._cache[key]
347            return expires == 0 or expires > time()
348        except KeyError:
349            return False
350
351
352_test_memcached_key = re.compile(r"[^\x00-\x21\xff]{1,250}$").match
353
354
355class MemcachedCache(BaseCache):
356    """A cache that uses memcached as backend.
357
358    The first argument can either be an object that resembles the API of a
359    :class:`memcache.Client` or a tuple/list of server addresses. In the
360    event that a tuple/list is passed, Werkzeug tries to import the best
361    available memcache library.
362
363    This cache looks into the following packages/modules to find bindings for
364    memcached:
365
366        - ``pylibmc``
367        - ``google.appengine.api.memcached``
368        - ``memcached``
369        - ``libmc``
370
371    Implementation notes:  This cache backend works around some limitations in
372    memcached to simplify the interface.  For example unicode keys are encoded
373    to utf-8 on the fly.  Methods such as :meth:`~BaseCache.get_dict` return
374    the keys in the same format as passed.  Furthermore all get methods
375    silently ignore key errors to not cause problems when untrusted user data
376    is passed to the get methods which is often the case in web applications.
377
378    :param servers: a list or tuple of server addresses or alternatively
379                    a :class:`memcache.Client` or a compatible client.
380    :param default_timeout: the default timeout that is used if no timeout is
381                            specified on :meth:`~BaseCache.set`. A timeout of
382                            0 indicates that the cache never expires.
383    :param key_prefix: a prefix that is added before all keys.  This makes it
384                       possible to use the same memcached server for different
385                       applications.  Keep in mind that
386                       :meth:`~BaseCache.clear` will also clear keys with a
387                       different prefix.
388    """
389
390    def __init__(self, servers=None, default_timeout=300, key_prefix=None):
391        BaseCache.__init__(self, default_timeout)
392        if servers is None or isinstance(servers, (list, tuple)):
393            if servers is None:
394                servers = ["127.0.0.1:11211"]
395            self._client = self.import_preferred_memcache_lib(servers)
396            if self._client is None:
397                raise RuntimeError("no memcache module found")
398        else:
399            # NOTE: servers is actually an already initialized memcache
400            # client.
401            self._client = servers
402
403        self.key_prefix = to_native(key_prefix)
404
405    def _normalize_key(self, key):
406        key = to_native(key, "utf-8")
407        if self.key_prefix:
408            key = self.key_prefix + key
409        return key
410
411    def _normalize_timeout(self, timeout):
412        timeout = BaseCache._normalize_timeout(self, timeout)
413        if timeout > 0:
414            timeout = int(time()) + timeout
415        return timeout
416
417    def get(self, key):
418        key = self._normalize_key(key)
419        # memcached doesn't support keys longer than that.  Because often
420        # checks for so long keys can occur because it's tested from user
421        # submitted data etc we fail silently for getting.
422        if _test_memcached_key(key):
423            return self._client.get(key)
424
425    def get_dict(self, *keys):
426        key_mapping = {}
427        have_encoded_keys = False
428        for key in keys:
429            encoded_key = self._normalize_key(key)
430            if not isinstance(key, str):
431                have_encoded_keys = True
432            if _test_memcached_key(key):
433                key_mapping[encoded_key] = key
434        _keys = list(key_mapping)
435        d = rv = self._client.get_multi(_keys)
436        if have_encoded_keys or self.key_prefix:
437            rv = {}
438            for key, value in iteritems(d):
439                rv[key_mapping[key]] = value
440        if len(rv) < len(keys):
441            for key in keys:
442                if key not in rv:
443                    rv[key] = None
444        return rv
445
446    def add(self, key, value, timeout=None):
447        key = self._normalize_key(key)
448        timeout = self._normalize_timeout(timeout)
449        return self._client.add(key, value, timeout)
450
451    def set(self, key, value, timeout=None):
452        key = self._normalize_key(key)
453        timeout = self._normalize_timeout(timeout)
454        return self._client.set(key, value, timeout)
455
456    def get_many(self, *keys):
457        d = self.get_dict(*keys)
458        return [d[key] for key in keys]
459
460    def set_many(self, mapping, timeout=None):
461        new_mapping = {}
462        for key, value in _items(mapping):
463            key = self._normalize_key(key)
464            new_mapping[key] = value
465
466        timeout = self._normalize_timeout(timeout)
467        failed_keys = self._client.set_multi(new_mapping, timeout)
468        return not failed_keys
469
470    def delete(self, key):
471        key = self._normalize_key(key)
472        if _test_memcached_key(key):
473            return self._client.delete(key)
474
475    def delete_many(self, *keys):
476        new_keys = []
477        for key in keys:
478            key = self._normalize_key(key)
479            if _test_memcached_key(key):
480                new_keys.append(key)
481        return self._client.delete_multi(new_keys)
482
483    def has(self, key):
484        key = self._normalize_key(key)
485        if _test_memcached_key(key):
486            return self._client.append(key, "")
487        return False
488
489    def clear(self):
490        return self._client.flush_all()
491
492    def inc(self, key, delta=1):
493        key = self._normalize_key(key)
494        return self._client.incr(key, delta)
495
496    def dec(self, key, delta=1):
497        key = self._normalize_key(key)
498        return self._client.decr(key, delta)
499
500    def import_preferred_memcache_lib(self, servers):
501        """Returns an initialized memcache client.  Used by the constructor."""
502        try:
503            import pylibmc
504        except ImportError:
505            pass
506        else:
507            return pylibmc.Client(servers)
508
509        try:
510            from google.appengine.api import memcache
511        except ImportError:
512            pass
513        else:
514            return memcache.Client()
515
516        try:
517            import memcache
518        except ImportError:
519            pass
520        else:
521            return memcache.Client(servers)
522
523        try:
524            import libmc
525        except ImportError:
526            pass
527        else:
528            return libmc.Client(servers)
529
530
531# backwards compatibility
532GAEMemcachedCache = MemcachedCache
533
534
535class RedisCache(BaseCache):
536    """Uses the Redis key-value store as a cache backend.
537
538    The first argument can be either a string denoting address of the Redis
539    server or an object resembling an instance of a redis.Redis class.
540
541    Note: Python Redis API already takes care of encoding unicode strings on
542    the fly.
543
544    .. versionadded:: 0.7
545
546    .. versionadded:: 0.8
547       `key_prefix` was added.
548
549    .. versionchanged:: 0.8
550       This cache backend now properly serializes objects.
551
552    .. versionchanged:: 0.8.3
553       This cache backend now supports password authentication.
554
555    .. versionchanged:: 0.10
556        ``**kwargs`` is now passed to the redis object.
557
558    :param host: address of the Redis server or an object which API is
559                 compatible with the official Python Redis client (redis-py).
560    :param port: port number on which Redis server listens for connections.
561    :param password: password authentication for the Redis server.
562    :param db: db (zero-based numeric index) on Redis Server to connect.
563    :param default_timeout: the default timeout that is used if no timeout is
564                            specified on :meth:`~BaseCache.set`. A timeout of
565                            0 indicates that the cache never expires.
566    :param key_prefix: A prefix that should be added to all keys.
567
568    Any additional keyword arguments will be passed to ``redis.Redis``.
569    """
570
571    def __init__(
572        self,
573        host="localhost",
574        port=6379,
575        password=None,
576        db=0,
577        default_timeout=300,
578        key_prefix=None,
579        **kwargs
580    ):
581        BaseCache.__init__(self, default_timeout)
582        if host is None:
583            raise ValueError("RedisCache host parameter may not be None")
584        if isinstance(host, string_types):
585            try:
586                import redis
587            except ImportError:
588                raise RuntimeError("no redis module found")
589            if kwargs.get("decode_responses", None):
590                raise ValueError("decode_responses is not supported by RedisCache.")
591            self._client = redis.Redis(
592                host=host, port=port, password=password, db=db, **kwargs
593            )
594        else:
595            self._client = host
596        self.key_prefix = key_prefix or ""
597
598    def _normalize_timeout(self, timeout):
599        timeout = BaseCache._normalize_timeout(self, timeout)
600        if timeout == 0:
601            timeout = -1
602        return timeout
603
604    def dump_object(self, value):
605        """Dumps an object into a string for redis.  By default it serializes
606        integers as regular string and pickle dumps everything else.
607        """
608        t = type(value)
609        if t in integer_types:
610            return str(value).encode("ascii")
611        return b"!" + pickle.dumps(value)
612
613    def load_object(self, value):
614        """The reversal of :meth:`dump_object`.  This might be called with
615        None.
616        """
617        if value is None:
618            return None
619        if value.startswith(b"!"):
620            try:
621                return pickle.loads(value[1:])
622            except pickle.PickleError:
623                return None
624        try:
625            return int(value)
626        except ValueError:
627            # before 0.8 we did not have serialization.  Still support that.
628            return value
629
630    def get(self, key):
631        return self.load_object(self._client.get(self.key_prefix + key))
632
633    def get_many(self, *keys):
634        if self.key_prefix:
635            keys = [self.key_prefix + key for key in keys]
636        return [self.load_object(x) for x in self._client.mget(keys)]
637
638    def set(self, key, value, timeout=None):
639        timeout = self._normalize_timeout(timeout)
640        dump = self.dump_object(value)
641        if timeout == -1:
642            result = self._client.set(name=self.key_prefix + key, value=dump)
643        else:
644            result = self._client.setex(
645                name=self.key_prefix + key, value=dump, time=timeout
646            )
647        return result
648
649    def add(self, key, value, timeout=None):
650        timeout = self._normalize_timeout(timeout)
651        dump = self.dump_object(value)
652        return self._client.setnx(
653            name=self.key_prefix + key, value=dump
654        ) and self._client.expire(name=self.key_prefix + key, time=timeout)
655
656    def set_many(self, mapping, timeout=None):
657        timeout = self._normalize_timeout(timeout)
658        # Use transaction=False to batch without calling redis MULTI
659        # which is not supported by twemproxy
660        pipe = self._client.pipeline(transaction=False)
661
662        for key, value in _items(mapping):
663            dump = self.dump_object(value)
664            if timeout == -1:
665                pipe.set(name=self.key_prefix + key, value=dump)
666            else:
667                pipe.setex(name=self.key_prefix + key, value=dump, time=timeout)
668        return pipe.execute()
669
670    def delete(self, key):
671        return self._client.delete(self.key_prefix + key)
672
673    def delete_many(self, *keys):
674        if not keys:
675            return
676        if self.key_prefix:
677            keys = [self.key_prefix + key for key in keys]
678        return self._client.delete(*keys)
679
680    def has(self, key):
681        return self._client.exists(self.key_prefix + key)
682
683    def clear(self):
684        status = False
685        if self.key_prefix:
686            keys = self._client.keys(self.key_prefix + "*")
687            if keys:
688                status = self._client.delete(*keys)
689        else:
690            status = self._client.flushdb()
691        return status
692
693    def inc(self, key, delta=1):
694        return self._client.incr(name=self.key_prefix + key, amount=delta)
695
696    def dec(self, key, delta=1):
697        return self._client.decr(name=self.key_prefix + key, amount=delta)
698
699
700class FileSystemCache(BaseCache):
701    """A cache that stores the items on the file system.  This cache depends
702    on being the only user of the `cache_dir`.  Make absolutely sure that
703    nobody but this cache stores files there or otherwise the cache will
704    randomly delete files therein.
705
706    :param cache_dir: the directory where cache files are stored.
707    :param threshold: the maximum number of items the cache stores before
708                      it starts deleting some. A threshold value of 0
709                      indicates no threshold.
710    :param default_timeout: the default timeout that is used if no timeout is
711                            specified on :meth:`~BaseCache.set`. A timeout of
712                            0 indicates that the cache never expires.
713    :param mode: the file mode wanted for the cache files, default 0600
714    """
715
716    #: used for temporary files by the FileSystemCache
717    _fs_transaction_suffix = ".__wz_cache"
718    #: keep amount of files in a cache element
719    _fs_count_file = "__wz_cache_count"
720
721    def __init__(self, cache_dir, threshold=500, default_timeout=300, mode=0o600):
722        BaseCache.__init__(self, default_timeout)
723        self._path = cache_dir
724        self._threshold = threshold
725        self._mode = mode
726
727        try:
728            os.makedirs(self._path)
729        except OSError as ex:
730            if ex.errno != errno.EEXIST:
731                raise
732
733        self._update_count(value=len(self._list_dir()))
734
735    @property
736    def _file_count(self):
737        return self.get(self._fs_count_file) or 0
738
739    def _update_count(self, delta=None, value=None):
740        # If we have no threshold, don't count files
741        if self._threshold == 0:
742            return
743
744        if delta:
745            new_count = self._file_count + delta
746        else:
747            new_count = value or 0
748        self.set(self._fs_count_file, new_count, mgmt_element=True)
749
750    def _normalize_timeout(self, timeout):
751        timeout = BaseCache._normalize_timeout(self, timeout)
752        if timeout != 0:
753            timeout = time() + timeout
754        return int(timeout)
755
756    def _list_dir(self):
757        """return a list of (fully qualified) cache filenames
758        """
759        mgmt_files = [
760            self._get_filename(name).split("/")[-1] for name in (self._fs_count_file,)
761        ]
762        return [
763            os.path.join(self._path, fn)
764            for fn in os.listdir(self._path)
765            if not fn.endswith(self._fs_transaction_suffix) and fn not in mgmt_files
766        ]
767
768    def _prune(self):
769        if self._threshold == 0 or not self._file_count > self._threshold:
770            return
771
772        entries = self._list_dir()
773        now = time()
774        for idx, fname in enumerate(entries):
775            try:
776                remove = False
777                with open(fname, "rb") as f:
778                    expires = pickle.load(f)
779                remove = (expires != 0 and expires <= now) or idx % 3 == 0
780
781                if remove:
782                    os.remove(fname)
783            except (IOError, OSError):
784                pass
785        self._update_count(value=len(self._list_dir()))
786
787    def clear(self):
788        for fname in self._list_dir():
789            try:
790                os.remove(fname)
791            except (IOError, OSError):
792                self._update_count(value=len(self._list_dir()))
793                return False
794        self._update_count(value=0)
795        return True
796
797    def _get_filename(self, key):
798        if isinstance(key, text_type):
799            key = key.encode("utf-8")  # XXX unicode review
800        hash = md5(key).hexdigest()
801        return os.path.join(self._path, hash)
802
803    def get(self, key):
804        filename = self._get_filename(key)
805        try:
806            with open(filename, "rb") as f:
807                pickle_time = pickle.load(f)
808                if pickle_time == 0 or pickle_time >= time():
809                    return pickle.load(f)
810                else:
811                    os.remove(filename)
812                    return None
813        except (IOError, OSError, pickle.PickleError):
814            return None
815
816    def add(self, key, value, timeout=None):
817        filename = self._get_filename(key)
818        if not os.path.exists(filename):
819            return self.set(key, value, timeout)
820        return False
821
822    def set(self, key, value, timeout=None, mgmt_element=False):
823        # Management elements have no timeout
824        if mgmt_element:
825            timeout = 0
826
827        # Don't prune on management element update, to avoid loop
828        else:
829            self._prune()
830
831        timeout = self._normalize_timeout(timeout)
832        filename = self._get_filename(key)
833        try:
834            fd, tmp = tempfile.mkstemp(
835                suffix=self._fs_transaction_suffix, dir=self._path
836            )
837            with os.fdopen(fd, "wb") as f:
838                pickle.dump(timeout, f, 1)
839                pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
840            rename(tmp, filename)
841            os.chmod(filename, self._mode)
842        except (IOError, OSError):
843            return False
844        else:
845            # Management elements should not count towards threshold
846            if not mgmt_element:
847                self._update_count(delta=1)
848            return True
849
850    def delete(self, key, mgmt_element=False):
851        try:
852            os.remove(self._get_filename(key))
853        except (IOError, OSError):
854            return False
855        else:
856            # Management elements should not count towards threshold
857            if not mgmt_element:
858                self._update_count(delta=-1)
859            return True
860
861    def has(self, key):
862        filename = self._get_filename(key)
863        try:
864            with open(filename, "rb") as f:
865                pickle_time = pickle.load(f)
866                if pickle_time == 0 or pickle_time >= time():
867                    return True
868                else:
869                    os.remove(filename)
870                    return False
871        except (IOError, OSError, pickle.PickleError):
872            return False
873
874
875class UWSGICache(BaseCache):
876    """Implements the cache using uWSGI's caching framework.
877
878    .. note::
879        This class cannot be used when running under PyPy, because the uWSGI
880        API implementation for PyPy is lacking the needed functionality.
881
882    :param default_timeout: The default timeout in seconds.
883    :param cache: The name of the caching instance to connect to, for
884        example: mycache@localhost:3031, defaults to an empty string, which
885        means uWSGI will cache in the local instance. If the cache is in the
886        same instance as the werkzeug app, you only have to provide the name of
887        the cache.
888    """
889
890    def __init__(self, default_timeout=300, cache=""):
891        BaseCache.__init__(self, default_timeout)
892
893        if platform.python_implementation() == "PyPy":
894            raise RuntimeError(
895                "uWSGI caching does not work under PyPy, see "
896                "the docs for more details."
897            )
898
899        try:
900            import uwsgi
901
902            self._uwsgi = uwsgi
903        except ImportError:
904            raise RuntimeError(
905                "uWSGI could not be imported, are you running under uWSGI?"
906            )
907
908        self.cache = cache
909
910    def get(self, key):
911        rv = self._uwsgi.cache_get(key, self.cache)
912        if rv is None:
913            return
914        return pickle.loads(rv)
915
916    def delete(self, key):
917        return self._uwsgi.cache_del(key, self.cache)
918
919    def set(self, key, value, timeout=None):
920        return self._uwsgi.cache_update(
921            key, pickle.dumps(value), self._normalize_timeout(timeout), self.cache
922        )
923
924    def add(self, key, value, timeout=None):
925        return self._uwsgi.cache_set(
926            key, pickle.dumps(value), self._normalize_timeout(timeout), self.cache
927        )
928
929    def clear(self):
930        return self._uwsgi.cache_clear(self.cache)
931
932    def has(self, key):
933        return self._uwsgi.cache_exists(key, self.cache) is not None
934