1"""functools.py - Tools for working with functions and callable objects 2""" 3# Python module wrapper for _functools C module 4# to allow utilities written in Python to be added 5# to the functools module. 6# Written by Nick Coghlan <ncoghlan at gmail.com>, 7# Raymond Hettinger <python at rcn.com>, 8# and Łukasz Langa <lukasz at langa.pl>. 9# Copyright (C) 2006-2013 Python Software Foundation. 10# See C source code for _functools credits/copyright 11 12__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', 13 'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial', 14 'partialmethod', 'singledispatch', 'singledispatchmethod', 15 "cached_property"] 16 17from abc import get_cache_token 18from collections import namedtuple 19# import types, weakref # Deferred to single_dispatch() 20from reprlib import recursive_repr 21from _thread import RLock 22 23 24################################################################################ 25### update_wrapper() and wraps() decorator 26################################################################################ 27 28# update_wrapper() and wraps() are tools to help write 29# wrapper functions that can handle naive introspection 30 31WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__', 32 '__annotations__') 33WRAPPER_UPDATES = ('__dict__',) 34def update_wrapper(wrapper, 35 wrapped, 36 assigned = WRAPPER_ASSIGNMENTS, 37 updated = WRAPPER_UPDATES): 38 """Update a wrapper function to look like the wrapped function 39 40 wrapper is the function to be updated 41 wrapped is the original function 42 assigned is a tuple naming the attributes assigned directly 43 from the wrapped function to the wrapper function (defaults to 44 functools.WRAPPER_ASSIGNMENTS) 45 updated is a tuple naming the attributes of the wrapper that 46 are updated with the corresponding attribute from the wrapped 47 function (defaults to functools.WRAPPER_UPDATES) 48 """ 49 for attr in assigned: 50 try: 51 value = getattr(wrapped, attr) 52 except AttributeError: 53 pass 54 else: 55 setattr(wrapper, attr, value) 56 for attr in updated: 57 getattr(wrapper, attr).update(getattr(wrapped, attr, {})) 58 # Issue #17482: set __wrapped__ last so we don't inadvertently copy it 59 # from the wrapped function when updating __dict__ 60 wrapper.__wrapped__ = wrapped 61 # Return the wrapper so this can be used as a decorator via partial() 62 return wrapper 63 64def wraps(wrapped, 65 assigned = WRAPPER_ASSIGNMENTS, 66 updated = WRAPPER_UPDATES): 67 """Decorator factory to apply update_wrapper() to a wrapper function 68 69 Returns a decorator that invokes update_wrapper() with the decorated 70 function as the wrapper argument and the arguments to wraps() as the 71 remaining arguments. Default arguments are as for update_wrapper(). 72 This is a convenience function to simplify applying partial() to 73 update_wrapper(). 74 """ 75 return partial(update_wrapper, wrapped=wrapped, 76 assigned=assigned, updated=updated) 77 78 79################################################################################ 80### total_ordering class decorator 81################################################################################ 82 83# The total ordering functions all invoke the root magic method directly 84# rather than using the corresponding operator. This avoids possible 85# infinite recursion that could occur when the operator dispatch logic 86# detects a NotImplemented result and then calls a reflected method. 87 88def _gt_from_lt(self, other, NotImplemented=NotImplemented): 89 'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).' 90 op_result = self.__lt__(other) 91 if op_result is NotImplemented: 92 return op_result 93 return not op_result and self != other 94 95def _le_from_lt(self, other, NotImplemented=NotImplemented): 96 'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).' 97 op_result = self.__lt__(other) 98 return op_result or self == other 99 100def _ge_from_lt(self, other, NotImplemented=NotImplemented): 101 'Return a >= b. Computed by @total_ordering from (not a < b).' 102 op_result = self.__lt__(other) 103 if op_result is NotImplemented: 104 return op_result 105 return not op_result 106 107def _ge_from_le(self, other, NotImplemented=NotImplemented): 108 'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).' 109 op_result = self.__le__(other) 110 if op_result is NotImplemented: 111 return op_result 112 return not op_result or self == other 113 114def _lt_from_le(self, other, NotImplemented=NotImplemented): 115 'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).' 116 op_result = self.__le__(other) 117 if op_result is NotImplemented: 118 return op_result 119 return op_result and self != other 120 121def _gt_from_le(self, other, NotImplemented=NotImplemented): 122 'Return a > b. Computed by @total_ordering from (not a <= b).' 123 op_result = self.__le__(other) 124 if op_result is NotImplemented: 125 return op_result 126 return not op_result 127 128def _lt_from_gt(self, other, NotImplemented=NotImplemented): 129 'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).' 130 op_result = self.__gt__(other) 131 if op_result is NotImplemented: 132 return op_result 133 return not op_result and self != other 134 135def _ge_from_gt(self, other, NotImplemented=NotImplemented): 136 'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).' 137 op_result = self.__gt__(other) 138 return op_result or self == other 139 140def _le_from_gt(self, other, NotImplemented=NotImplemented): 141 'Return a <= b. Computed by @total_ordering from (not a > b).' 142 op_result = self.__gt__(other) 143 if op_result is NotImplemented: 144 return op_result 145 return not op_result 146 147def _le_from_ge(self, other, NotImplemented=NotImplemented): 148 'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).' 149 op_result = self.__ge__(other) 150 if op_result is NotImplemented: 151 return op_result 152 return not op_result or self == other 153 154def _gt_from_ge(self, other, NotImplemented=NotImplemented): 155 'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).' 156 op_result = self.__ge__(other) 157 if op_result is NotImplemented: 158 return op_result 159 return op_result and self != other 160 161def _lt_from_ge(self, other, NotImplemented=NotImplemented): 162 'Return a < b. Computed by @total_ordering from (not a >= b).' 163 op_result = self.__ge__(other) 164 if op_result is NotImplemented: 165 return op_result 166 return not op_result 167 168_convert = { 169 '__lt__': [('__gt__', _gt_from_lt), 170 ('__le__', _le_from_lt), 171 ('__ge__', _ge_from_lt)], 172 '__le__': [('__ge__', _ge_from_le), 173 ('__lt__', _lt_from_le), 174 ('__gt__', _gt_from_le)], 175 '__gt__': [('__lt__', _lt_from_gt), 176 ('__ge__', _ge_from_gt), 177 ('__le__', _le_from_gt)], 178 '__ge__': [('__le__', _le_from_ge), 179 ('__gt__', _gt_from_ge), 180 ('__lt__', _lt_from_ge)] 181} 182 183def total_ordering(cls): 184 """Class decorator that fills in missing ordering methods""" 185 # Find user-defined comparisons (not those inherited from object). 186 roots = {op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)} 187 if not roots: 188 raise ValueError('must define at least one ordering operation: < > <= >=') 189 root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ 190 for opname, opfunc in _convert[root]: 191 if opname not in roots: 192 opfunc.__name__ = opname 193 setattr(cls, opname, opfunc) 194 return cls 195 196 197################################################################################ 198### cmp_to_key() function converter 199################################################################################ 200 201def cmp_to_key(mycmp): 202 """Convert a cmp= function into a key= function""" 203 class K(object): 204 __slots__ = ['obj'] 205 def __init__(self, obj): 206 self.obj = obj 207 def __lt__(self, other): 208 return mycmp(self.obj, other.obj) < 0 209 def __gt__(self, other): 210 return mycmp(self.obj, other.obj) > 0 211 def __eq__(self, other): 212 return mycmp(self.obj, other.obj) == 0 213 def __le__(self, other): 214 return mycmp(self.obj, other.obj) <= 0 215 def __ge__(self, other): 216 return mycmp(self.obj, other.obj) >= 0 217 __hash__ = None 218 return K 219 220try: 221 from _functools import cmp_to_key 222except ImportError: 223 pass 224 225 226################################################################################ 227### reduce() sequence to a single item 228################################################################################ 229 230_initial_missing = object() 231 232def reduce(function, sequence, initial=_initial_missing): 233 """ 234 reduce(function, sequence[, initial]) -> value 235 236 Apply a function of two arguments cumulatively to the items of a sequence, 237 from left to right, so as to reduce the sequence to a single value. 238 For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates 239 ((((1+2)+3)+4)+5). If initial is present, it is placed before the items 240 of the sequence in the calculation, and serves as a default when the 241 sequence is empty. 242 """ 243 244 it = iter(sequence) 245 246 if initial is _initial_missing: 247 try: 248 value = next(it) 249 except StopIteration: 250 raise TypeError("reduce() of empty sequence with no initial value") from None 251 else: 252 value = initial 253 254 for element in it: 255 value = function(value, element) 256 257 return value 258 259try: 260 from _functools import reduce 261except ImportError: 262 pass 263 264 265################################################################################ 266### partial() argument application 267################################################################################ 268 269# Purely functional, no descriptor behaviour 270class partial: 271 """New function with partial application of the given arguments 272 and keywords. 273 """ 274 275 __slots__ = "func", "args", "keywords", "__dict__", "__weakref__" 276 277 def __new__(cls, func, /, *args, **keywords): 278 if not callable(func): 279 raise TypeError("the first argument must be callable") 280 281 if hasattr(func, "func"): 282 args = func.args + args 283 keywords = {**func.keywords, **keywords} 284 func = func.func 285 286 self = super(partial, cls).__new__(cls) 287 288 self.func = func 289 self.args = args 290 self.keywords = keywords 291 return self 292 293 def __call__(self, /, *args, **keywords): 294 keywords = {**self.keywords, **keywords} 295 return self.func(*self.args, *args, **keywords) 296 297 @recursive_repr() 298 def __repr__(self): 299 qualname = type(self).__qualname__ 300 args = [repr(self.func)] 301 args.extend(repr(x) for x in self.args) 302 args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items()) 303 if type(self).__module__ == "functools": 304 return f"functools.{qualname}({', '.join(args)})" 305 return f"{qualname}({', '.join(args)})" 306 307 def __reduce__(self): 308 return type(self), (self.func,), (self.func, self.args, 309 self.keywords or None, self.__dict__ or None) 310 311 def __setstate__(self, state): 312 if not isinstance(state, tuple): 313 raise TypeError("argument to __setstate__ must be a tuple") 314 if len(state) != 4: 315 raise TypeError(f"expected 4 items in state, got {len(state)}") 316 func, args, kwds, namespace = state 317 if (not callable(func) or not isinstance(args, tuple) or 318 (kwds is not None and not isinstance(kwds, dict)) or 319 (namespace is not None and not isinstance(namespace, dict))): 320 raise TypeError("invalid partial state") 321 322 args = tuple(args) # just in case it's a subclass 323 if kwds is None: 324 kwds = {} 325 elif type(kwds) is not dict: # XXX does it need to be *exactly* dict? 326 kwds = dict(kwds) 327 if namespace is None: 328 namespace = {} 329 330 self.__dict__ = namespace 331 self.func = func 332 self.args = args 333 self.keywords = kwds 334 335try: 336 from _functools import partial 337except ImportError: 338 pass 339 340# Descriptor version 341class partialmethod(object): 342 """Method descriptor with partial application of the given arguments 343 and keywords. 344 345 Supports wrapping existing descriptors and handles non-descriptor 346 callables as instance methods. 347 """ 348 349 def __init__(*args, **keywords): 350 if len(args) >= 2: 351 self, func, *args = args 352 elif not args: 353 raise TypeError("descriptor '__init__' of partialmethod " 354 "needs an argument") 355 elif 'func' in keywords: 356 func = keywords.pop('func') 357 self, *args = args 358 import warnings 359 warnings.warn("Passing 'func' as keyword argument is deprecated", 360 DeprecationWarning, stacklevel=2) 361 else: 362 raise TypeError("type 'partialmethod' takes at least one argument, " 363 "got %d" % (len(args)-1)) 364 args = tuple(args) 365 366 if not callable(func) and not hasattr(func, "__get__"): 367 raise TypeError("{!r} is not callable or a descriptor" 368 .format(func)) 369 370 # func could be a descriptor like classmethod which isn't callable, 371 # so we can't inherit from partial (it verifies func is callable) 372 if isinstance(func, partialmethod): 373 # flattening is mandatory in order to place cls/self before all 374 # other arguments 375 # it's also more efficient since only one function will be called 376 self.func = func.func 377 self.args = func.args + args 378 self.keywords = {**func.keywords, **keywords} 379 else: 380 self.func = func 381 self.args = args 382 self.keywords = keywords 383 __init__.__text_signature__ = '($self, func, /, *args, **keywords)' 384 385 def __repr__(self): 386 args = ", ".join(map(repr, self.args)) 387 keywords = ", ".join("{}={!r}".format(k, v) 388 for k, v in self.keywords.items()) 389 format_string = "{module}.{cls}({func}, {args}, {keywords})" 390 return format_string.format(module=self.__class__.__module__, 391 cls=self.__class__.__qualname__, 392 func=self.func, 393 args=args, 394 keywords=keywords) 395 396 def _make_unbound_method(self): 397 def _method(cls_or_self, /, *args, **keywords): 398 keywords = {**self.keywords, **keywords} 399 return self.func(cls_or_self, *self.args, *args, **keywords) 400 _method.__isabstractmethod__ = self.__isabstractmethod__ 401 _method._partialmethod = self 402 return _method 403 404 def __get__(self, obj, cls=None): 405 get = getattr(self.func, "__get__", None) 406 result = None 407 if get is not None: 408 new_func = get(obj, cls) 409 if new_func is not self.func: 410 # Assume __get__ returning something new indicates the 411 # creation of an appropriate callable 412 result = partial(new_func, *self.args, **self.keywords) 413 try: 414 result.__self__ = new_func.__self__ 415 except AttributeError: 416 pass 417 if result is None: 418 # If the underlying descriptor didn't do anything, treat this 419 # like an instance method 420 result = self._make_unbound_method().__get__(obj, cls) 421 return result 422 423 @property 424 def __isabstractmethod__(self): 425 return getattr(self.func, "__isabstractmethod__", False) 426 427# Helper functions 428 429def _unwrap_partial(func): 430 while isinstance(func, partial): 431 func = func.func 432 return func 433 434################################################################################ 435### LRU Cache function decorator 436################################################################################ 437 438_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) 439 440class _HashedSeq(list): 441 """ This class guarantees that hash() will be called no more than once 442 per element. This is important because the lru_cache() will hash 443 the key multiple times on a cache miss. 444 445 """ 446 447 __slots__ = 'hashvalue' 448 449 def __init__(self, tup, hash=hash): 450 self[:] = tup 451 self.hashvalue = hash(tup) 452 453 def __hash__(self): 454 return self.hashvalue 455 456def _make_key(args, kwds, typed, 457 kwd_mark = (object(),), 458 fasttypes = {int, str}, 459 tuple=tuple, type=type, len=len): 460 """Make a cache key from optionally typed positional and keyword arguments 461 462 The key is constructed in a way that is flat as possible rather than 463 as a nested structure that would take more memory. 464 465 If there is only a single argument and its data type is known to cache 466 its hash value, then that argument is returned without a wrapper. This 467 saves space and improves lookup speed. 468 469 """ 470 # All of code below relies on kwds preserving the order input by the user. 471 # Formerly, we sorted() the kwds before looping. The new way is *much* 472 # faster; however, it means that f(x=1, y=2) will now be treated as a 473 # distinct call from f(y=2, x=1) which will be cached separately. 474 key = args 475 if kwds: 476 key += kwd_mark 477 for item in kwds.items(): 478 key += item 479 if typed: 480 key += tuple(type(v) for v in args) 481 if kwds: 482 key += tuple(type(v) for v in kwds.values()) 483 elif len(key) == 1 and type(key[0]) in fasttypes: 484 return key[0] 485 return _HashedSeq(key) 486 487def lru_cache(maxsize=128, typed=False): 488 """Least-recently-used cache decorator. 489 490 If *maxsize* is set to None, the LRU features are disabled and the cache 491 can grow without bound. 492 493 If *typed* is True, arguments of different types will be cached separately. 494 For example, f(3.0) and f(3) will be treated as distinct calls with 495 distinct results. 496 497 Arguments to the cached function must be hashable. 498 499 View the cache statistics named tuple (hits, misses, maxsize, currsize) 500 with f.cache_info(). Clear the cache and statistics with f.cache_clear(). 501 Access the underlying function with f.__wrapped__. 502 503 See: http://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU) 504 505 """ 506 507 # Users should only access the lru_cache through its public API: 508 # cache_info, cache_clear, and f.__wrapped__ 509 # The internals of the lru_cache are encapsulated for thread safety and 510 # to allow the implementation to change (including a possible C version). 511 512 if isinstance(maxsize, int): 513 # Negative maxsize is treated as 0 514 if maxsize < 0: 515 maxsize = 0 516 elif callable(maxsize) and isinstance(typed, bool): 517 # The user_function was passed in directly via the maxsize argument 518 user_function, maxsize = maxsize, 128 519 wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) 520 return update_wrapper(wrapper, user_function) 521 elif maxsize is not None: 522 raise TypeError( 523 'Expected first argument to be an integer, a callable, or None') 524 525 def decorating_function(user_function): 526 wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) 527 return update_wrapper(wrapper, user_function) 528 529 return decorating_function 530 531def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo): 532 # Constants shared by all lru cache instances: 533 sentinel = object() # unique object used to signal cache misses 534 make_key = _make_key # build a key from the function arguments 535 PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields 536 537 cache = {} 538 hits = misses = 0 539 full = False 540 cache_get = cache.get # bound method to lookup a key or return None 541 cache_len = cache.__len__ # get cache size without calling len() 542 lock = RLock() # because linkedlist updates aren't threadsafe 543 root = [] # root of the circular doubly linked list 544 root[:] = [root, root, None, None] # initialize by pointing to self 545 546 if maxsize == 0: 547 548 def wrapper(*args, **kwds): 549 # No caching -- just a statistics update 550 nonlocal misses 551 misses += 1 552 result = user_function(*args, **kwds) 553 return result 554 555 elif maxsize is None: 556 557 def wrapper(*args, **kwds): 558 # Simple caching without ordering or size limit 559 nonlocal hits, misses 560 key = make_key(args, kwds, typed) 561 result = cache_get(key, sentinel) 562 if result is not sentinel: 563 hits += 1 564 return result 565 misses += 1 566 result = user_function(*args, **kwds) 567 cache[key] = result 568 return result 569 570 else: 571 572 def wrapper(*args, **kwds): 573 # Size limited caching that tracks accesses by recency 574 nonlocal root, hits, misses, full 575 key = make_key(args, kwds, typed) 576 with lock: 577 link = cache_get(key) 578 if link is not None: 579 # Move the link to the front of the circular queue 580 link_prev, link_next, _key, result = link 581 link_prev[NEXT] = link_next 582 link_next[PREV] = link_prev 583 last = root[PREV] 584 last[NEXT] = root[PREV] = link 585 link[PREV] = last 586 link[NEXT] = root 587 hits += 1 588 return result 589 misses += 1 590 result = user_function(*args, **kwds) 591 with lock: 592 if key in cache: 593 # Getting here means that this same key was added to the 594 # cache while the lock was released. Since the link 595 # update is already done, we need only return the 596 # computed result and update the count of misses. 597 pass 598 elif full: 599 # Use the old root to store the new key and result. 600 oldroot = root 601 oldroot[KEY] = key 602 oldroot[RESULT] = result 603 # Empty the oldest link and make it the new root. 604 # Keep a reference to the old key and old result to 605 # prevent their ref counts from going to zero during the 606 # update. That will prevent potentially arbitrary object 607 # clean-up code (i.e. __del__) from running while we're 608 # still adjusting the links. 609 root = oldroot[NEXT] 610 oldkey = root[KEY] 611 oldresult = root[RESULT] 612 root[KEY] = root[RESULT] = None 613 # Now update the cache dictionary. 614 del cache[oldkey] 615 # Save the potentially reentrant cache[key] assignment 616 # for last, after the root and links have been put in 617 # a consistent state. 618 cache[key] = oldroot 619 else: 620 # Put result in a new link at the front of the queue. 621 last = root[PREV] 622 link = [last, root, key, result] 623 last[NEXT] = root[PREV] = cache[key] = link 624 # Use the cache_len bound method instead of the len() function 625 # which could potentially be wrapped in an lru_cache itself. 626 full = (cache_len() >= maxsize) 627 return result 628 629 def cache_info(): 630 """Report cache statistics""" 631 with lock: 632 return _CacheInfo(hits, misses, maxsize, cache_len()) 633 634 def cache_clear(): 635 """Clear the cache and cache statistics""" 636 nonlocal hits, misses, full 637 with lock: 638 cache.clear() 639 root[:] = [root, root, None, None] 640 hits = misses = 0 641 full = False 642 643 wrapper.cache_info = cache_info 644 wrapper.cache_clear = cache_clear 645 return wrapper 646 647try: 648 from _functools import _lru_cache_wrapper 649except ImportError: 650 pass 651 652 653################################################################################ 654### singledispatch() - single-dispatch generic function decorator 655################################################################################ 656 657def _c3_merge(sequences): 658 """Merges MROs in *sequences* to a single MRO using the C3 algorithm. 659 660 Adapted from http://www.python.org/download/releases/2.3/mro/. 661 662 """ 663 result = [] 664 while True: 665 sequences = [s for s in sequences if s] # purge empty sequences 666 if not sequences: 667 return result 668 for s1 in sequences: # find merge candidates among seq heads 669 candidate = s1[0] 670 for s2 in sequences: 671 if candidate in s2[1:]: 672 candidate = None 673 break # reject the current head, it appears later 674 else: 675 break 676 if candidate is None: 677 raise RuntimeError("Inconsistent hierarchy") 678 result.append(candidate) 679 # remove the chosen candidate 680 for seq in sequences: 681 if seq[0] == candidate: 682 del seq[0] 683 684def _c3_mro(cls, abcs=None): 685 """Computes the method resolution order using extended C3 linearization. 686 687 If no *abcs* are given, the algorithm works exactly like the built-in C3 688 linearization used for method resolution. 689 690 If given, *abcs* is a list of abstract base classes that should be inserted 691 into the resulting MRO. Unrelated ABCs are ignored and don't end up in the 692 result. The algorithm inserts ABCs where their functionality is introduced, 693 i.e. issubclass(cls, abc) returns True for the class itself but returns 694 False for all its direct base classes. Implicit ABCs for a given class 695 (either registered or inferred from the presence of a special method like 696 __len__) are inserted directly after the last ABC explicitly listed in the 697 MRO of said class. If two implicit ABCs end up next to each other in the 698 resulting MRO, their ordering depends on the order of types in *abcs*. 699 700 """ 701 for i, base in enumerate(reversed(cls.__bases__)): 702 if hasattr(base, '__abstractmethods__'): 703 boundary = len(cls.__bases__) - i 704 break # Bases up to the last explicit ABC are considered first. 705 else: 706 boundary = 0 707 abcs = list(abcs) if abcs else [] 708 explicit_bases = list(cls.__bases__[:boundary]) 709 abstract_bases = [] 710 other_bases = list(cls.__bases__[boundary:]) 711 for base in abcs: 712 if issubclass(cls, base) and not any( 713 issubclass(b, base) for b in cls.__bases__ 714 ): 715 # If *cls* is the class that introduces behaviour described by 716 # an ABC *base*, insert said ABC to its MRO. 717 abstract_bases.append(base) 718 for base in abstract_bases: 719 abcs.remove(base) 720 explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] 721 abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] 722 other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] 723 return _c3_merge( 724 [[cls]] + 725 explicit_c3_mros + abstract_c3_mros + other_c3_mros + 726 [explicit_bases] + [abstract_bases] + [other_bases] 727 ) 728 729def _compose_mro(cls, types): 730 """Calculates the method resolution order for a given class *cls*. 731 732 Includes relevant abstract base classes (with their respective bases) from 733 the *types* iterable. Uses a modified C3 linearization algorithm. 734 735 """ 736 bases = set(cls.__mro__) 737 # Remove entries which are already present in the __mro__ or unrelated. 738 def is_related(typ): 739 return (typ not in bases and hasattr(typ, '__mro__') 740 and issubclass(cls, typ)) 741 types = [n for n in types if is_related(n)] 742 # Remove entries which are strict bases of other entries (they will end up 743 # in the MRO anyway. 744 def is_strict_base(typ): 745 for other in types: 746 if typ != other and typ in other.__mro__: 747 return True 748 return False 749 types = [n for n in types if not is_strict_base(n)] 750 # Subclasses of the ABCs in *types* which are also implemented by 751 # *cls* can be used to stabilize ABC ordering. 752 type_set = set(types) 753 mro = [] 754 for typ in types: 755 found = [] 756 for sub in typ.__subclasses__(): 757 if sub not in bases and issubclass(cls, sub): 758 found.append([s for s in sub.__mro__ if s in type_set]) 759 if not found: 760 mro.append(typ) 761 continue 762 # Favor subclasses with the biggest number of useful bases 763 found.sort(key=len, reverse=True) 764 for sub in found: 765 for subcls in sub: 766 if subcls not in mro: 767 mro.append(subcls) 768 return _c3_mro(cls, abcs=mro) 769 770def _find_impl(cls, registry): 771 """Returns the best matching implementation from *registry* for type *cls*. 772 773 Where there is no registered implementation for a specific type, its method 774 resolution order is used to find a more generic implementation. 775 776 Note: if *registry* does not contain an implementation for the base 777 *object* type, this function may return None. 778 779 """ 780 mro = _compose_mro(cls, registry.keys()) 781 match = None 782 for t in mro: 783 if match is not None: 784 # If *match* is an implicit ABC but there is another unrelated, 785 # equally matching implicit ABC, refuse the temptation to guess. 786 if (t in registry and t not in cls.__mro__ 787 and match not in cls.__mro__ 788 and not issubclass(match, t)): 789 raise RuntimeError("Ambiguous dispatch: {} or {}".format( 790 match, t)) 791 break 792 if t in registry: 793 match = t 794 return registry.get(match) 795 796def singledispatch(func): 797 """Single-dispatch generic function decorator. 798 799 Transforms a function into a generic function, which can have different 800 behaviours depending upon the type of its first argument. The decorated 801 function acts as the default implementation, and additional 802 implementations can be registered using the register() attribute of the 803 generic function. 804 """ 805 # There are many programs that use functools without singledispatch, so we 806 # trade-off making singledispatch marginally slower for the benefit of 807 # making start-up of such applications slightly faster. 808 import types, weakref 809 810 registry = {} 811 dispatch_cache = weakref.WeakKeyDictionary() 812 cache_token = None 813 814 def dispatch(cls): 815 """generic_func.dispatch(cls) -> <function implementation> 816 817 Runs the dispatch algorithm to return the best available implementation 818 for the given *cls* registered on *generic_func*. 819 820 """ 821 nonlocal cache_token 822 if cache_token is not None: 823 current_token = get_cache_token() 824 if cache_token != current_token: 825 dispatch_cache.clear() 826 cache_token = current_token 827 try: 828 impl = dispatch_cache[cls] 829 except KeyError: 830 try: 831 impl = registry[cls] 832 except KeyError: 833 impl = _find_impl(cls, registry) 834 dispatch_cache[cls] = impl 835 return impl 836 837 def register(cls, func=None): 838 """generic_func.register(cls, func) -> func 839 840 Registers a new implementation for the given *cls* on a *generic_func*. 841 842 """ 843 nonlocal cache_token 844 if func is None: 845 if isinstance(cls, type): 846 return lambda f: register(cls, f) 847 ann = getattr(cls, '__annotations__', {}) 848 if not ann: 849 raise TypeError( 850 f"Invalid first argument to `register()`: {cls!r}. " 851 f"Use either `@register(some_class)` or plain `@register` " 852 f"on an annotated function." 853 ) 854 func = cls 855 856 # only import typing if annotation parsing is necessary 857 from typing import get_type_hints 858 argname, cls = next(iter(get_type_hints(func).items())) 859 if not isinstance(cls, type): 860 raise TypeError( 861 f"Invalid annotation for {argname!r}. " 862 f"{cls!r} is not a class." 863 ) 864 registry[cls] = func 865 if cache_token is None and hasattr(cls, '__abstractmethods__'): 866 cache_token = get_cache_token() 867 dispatch_cache.clear() 868 return func 869 870 def wrapper(*args, **kw): 871 if not args: 872 raise TypeError(f'{funcname} requires at least ' 873 '1 positional argument') 874 875 return dispatch(args[0].__class__)(*args, **kw) 876 877 funcname = getattr(func, '__name__', 'singledispatch function') 878 registry[object] = func 879 wrapper.register = register 880 wrapper.dispatch = dispatch 881 wrapper.registry = types.MappingProxyType(registry) 882 wrapper._clear_cache = dispatch_cache.clear 883 update_wrapper(wrapper, func) 884 return wrapper 885 886 887# Descriptor version 888class singledispatchmethod: 889 """Single-dispatch generic method descriptor. 890 891 Supports wrapping existing descriptors and handles non-descriptor 892 callables as instance methods. 893 """ 894 895 def __init__(self, func): 896 if not callable(func) and not hasattr(func, "__get__"): 897 raise TypeError(f"{func!r} is not callable or a descriptor") 898 899 self.dispatcher = singledispatch(func) 900 self.func = func 901 902 def register(self, cls, method=None): 903 """generic_method.register(cls, func) -> func 904 905 Registers a new implementation for the given *cls* on a *generic_method*. 906 """ 907 return self.dispatcher.register(cls, func=method) 908 909 def __get__(self, obj, cls=None): 910 def _method(*args, **kwargs): 911 method = self.dispatcher.dispatch(args[0].__class__) 912 return method.__get__(obj, cls)(*args, **kwargs) 913 914 _method.__isabstractmethod__ = self.__isabstractmethod__ 915 _method.register = self.register 916 update_wrapper(_method, self.func) 917 return _method 918 919 @property 920 def __isabstractmethod__(self): 921 return getattr(self.func, '__isabstractmethod__', False) 922 923 924################################################################################ 925### cached_property() - computed once per instance, cached as attribute 926################################################################################ 927 928_NOT_FOUND = object() 929 930 931class cached_property: 932 def __init__(self, func): 933 self.func = func 934 self.attrname = None 935 self.__doc__ = func.__doc__ 936 self.lock = RLock() 937 938 def __set_name__(self, owner, name): 939 if self.attrname is None: 940 self.attrname = name 941 elif name != self.attrname: 942 raise TypeError( 943 "Cannot assign the same cached_property to two different names " 944 f"({self.attrname!r} and {name!r})." 945 ) 946 947 def __get__(self, instance, owner=None): 948 if instance is None: 949 return self 950 if self.attrname is None: 951 raise TypeError( 952 "Cannot use cached_property instance without calling __set_name__ on it.") 953 try: 954 cache = instance.__dict__ 955 except AttributeError: # not all objects have __dict__ (e.g. class defines slots) 956 msg = ( 957 f"No '__dict__' attribute on {type(instance).__name__!r} " 958 f"instance to cache {self.attrname!r} property." 959 ) 960 raise TypeError(msg) from None 961 val = cache.get(self.attrname, _NOT_FOUND) 962 if val is _NOT_FOUND: 963 with self.lock: 964 # check if another thread filled cache while we awaited lock 965 val = cache.get(self.attrname, _NOT_FOUND) 966 if val is _NOT_FOUND: 967 val = self.func(instance) 968 try: 969 cache[self.attrname] = val 970 except TypeError: 971 msg = ( 972 f"The '__dict__' attribute on {type(instance).__name__!r} instance " 973 f"does not support item assignment for caching {self.attrname!r} property." 974 ) 975 raise TypeError(msg) from None 976 return val 977