1 /* 2 * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package java.util; 27 28 import java.io.IOException; 29 import java.io.InvalidObjectException; 30 import java.io.ObjectInputStream; 31 import java.io.Serializable; 32 import java.lang.reflect.ParameterizedType; 33 import java.lang.reflect.Type; 34 import java.util.function.BiConsumer; 35 import java.util.function.BiFunction; 36 import java.util.function.Consumer; 37 import java.util.function.Function; 38 import jdk.internal.misc.SharedSecrets; 39 40 /** 41 * Hash table based implementation of the {@code Map} interface. This 42 * implementation provides all of the optional map operations, and permits 43 * {@code null} values and the {@code null} key. (The {@code HashMap} 44 * class is roughly equivalent to {@code Hashtable}, except that it is 45 * unsynchronized and permits nulls.) This class makes no guarantees as to 46 * the order of the map; in particular, it does not guarantee that the order 47 * will remain constant over time. 48 * 49 * <p>This implementation provides constant-time performance for the basic 50 * operations ({@code get} and {@code put}), assuming the hash function 51 * disperses the elements properly among the buckets. Iteration over 52 * collection views requires time proportional to the "capacity" of the 53 * {@code HashMap} instance (the number of buckets) plus its size (the number 54 * of key-value mappings). Thus, it's very important not to set the initial 55 * capacity too high (or the load factor too low) if iteration performance is 56 * important. 57 * 58 * <p>An instance of {@code HashMap} has two parameters that affect its 59 * performance: <i>initial capacity</i> and <i>load factor</i>. The 60 * <i>capacity</i> is the number of buckets in the hash table, and the initial 61 * capacity is simply the capacity at the time the hash table is created. The 62 * <i>load factor</i> is a measure of how full the hash table is allowed to 63 * get before its capacity is automatically increased. When the number of 64 * entries in the hash table exceeds the product of the load factor and the 65 * current capacity, the hash table is <i>rehashed</i> (that is, internal data 66 * structures are rebuilt) so that the hash table has approximately twice the 67 * number of buckets. 68 * 69 * <p>As a general rule, the default load factor (.75) offers a good 70 * tradeoff between time and space costs. Higher values decrease the 71 * space overhead but increase the lookup cost (reflected in most of 72 * the operations of the {@code HashMap} class, including 73 * {@code get} and {@code put}). The expected number of entries in 74 * the map and its load factor should be taken into account when 75 * setting its initial capacity, so as to minimize the number of 76 * rehash operations. If the initial capacity is greater than the 77 * maximum number of entries divided by the load factor, no rehash 78 * operations will ever occur. 79 * 80 * <p>If many mappings are to be stored in a {@code HashMap} 81 * instance, creating it with a sufficiently large capacity will allow 82 * the mappings to be stored more efficiently than letting it perform 83 * automatic rehashing as needed to grow the table. Note that using 84 * many keys with the same {@code hashCode()} is a sure way to slow 85 * down performance of any hash table. To ameliorate impact, when keys 86 * are {@link Comparable}, this class may use comparison order among 87 * keys to help break ties. 88 * 89 * <p><strong>Note that this implementation is not synchronized.</strong> 90 * If multiple threads access a hash map concurrently, and at least one of 91 * the threads modifies the map structurally, it <i>must</i> be 92 * synchronized externally. (A structural modification is any operation 93 * that adds or deletes one or more mappings; merely changing the value 94 * associated with a key that an instance already contains is not a 95 * structural modification.) This is typically accomplished by 96 * synchronizing on some object that naturally encapsulates the map. 97 * 98 * If no such object exists, the map should be "wrapped" using the 99 * {@link Collections#synchronizedMap Collections.synchronizedMap} 100 * method. This is best done at creation time, to prevent accidental 101 * unsynchronized access to the map:<pre> 102 * Map m = Collections.synchronizedMap(new HashMap(...));</pre> 103 * 104 * <p>The iterators returned by all of this class's "collection view methods" 105 * are <i>fail-fast</i>: if the map is structurally modified at any time after 106 * the iterator is created, in any way except through the iterator's own 107 * {@code remove} method, the iterator will throw a 108 * {@link ConcurrentModificationException}. Thus, in the face of concurrent 109 * modification, the iterator fails quickly and cleanly, rather than risking 110 * arbitrary, non-deterministic behavior at an undetermined time in the 111 * future. 112 * 113 * <p>Note that the fail-fast behavior of an iterator cannot be guaranteed 114 * as it is, generally speaking, impossible to make any hard guarantees in the 115 * presence of unsynchronized concurrent modification. Fail-fast iterators 116 * throw {@code ConcurrentModificationException} on a best-effort basis. 117 * Therefore, it would be wrong to write a program that depended on this 118 * exception for its correctness: <i>the fail-fast behavior of iterators 119 * should be used only to detect bugs.</i> 120 * 121 * <p>This class is a member of the 122 * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework"> 123 * Java Collections Framework</a>. 124 * 125 * @param <K> the type of keys maintained by this map 126 * @param <V> the type of mapped values 127 * 128 * @author Doug Lea 129 * @author Josh Bloch 130 * @author Arthur van Hoff 131 * @author Neal Gafter 132 * @see Object#hashCode() 133 * @see Collection 134 * @see Map 135 * @see TreeMap 136 * @see Hashtable 137 * @since 1.2 138 */ 139 public class HashMap<K,V> extends AbstractMap<K,V> 140 implements Map<K,V>, Cloneable, Serializable { 141 142 private static final long serialVersionUID = 362498820763181265L; 143 144 /* 145 * Implementation notes. 146 * 147 * This map usually acts as a binned (bucketed) hash table, but 148 * when bins get too large, they are transformed into bins of 149 * TreeNodes, each structured similarly to those in 150 * java.util.TreeMap. Most methods try to use normal bins, but 151 * relay to TreeNode methods when applicable (simply by checking 152 * instanceof a node). Bins of TreeNodes may be traversed and 153 * used like any others, but additionally support faster lookup 154 * when overpopulated. However, since the vast majority of bins in 155 * normal use are not overpopulated, checking for existence of 156 * tree bins may be delayed in the course of table methods. 157 * 158 * Tree bins (i.e., bins whose elements are all TreeNodes) are 159 * ordered primarily by hashCode, but in the case of ties, if two 160 * elements are of the same "class C implements Comparable<C>", 161 * type then their compareTo method is used for ordering. (We 162 * conservatively check generic types via reflection to validate 163 * this -- see method comparableClassFor). The added complexity 164 * of tree bins is worthwhile in providing worst-case O(log n) 165 * operations when keys either have distinct hashes or are 166 * orderable, Thus, performance degrades gracefully under 167 * accidental or malicious usages in which hashCode() methods 168 * return values that are poorly distributed, as well as those in 169 * which many keys share a hashCode, so long as they are also 170 * Comparable. (If neither of these apply, we may waste about a 171 * factor of two in time and space compared to taking no 172 * precautions. But the only known cases stem from poor user 173 * programming practices that are already so slow that this makes 174 * little difference.) 175 * 176 * Because TreeNodes are about twice the size of regular nodes, we 177 * use them only when bins contain enough nodes to warrant use 178 * (see TREEIFY_THRESHOLD). And when they become too small (due to 179 * removal or resizing) they are converted back to plain bins. In 180 * usages with well-distributed user hashCodes, tree bins are 181 * rarely used. Ideally, under random hashCodes, the frequency of 182 * nodes in bins follows a Poisson distribution 183 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a 184 * parameter of about 0.5 on average for the default resizing 185 * threshold of 0.75, although with a large variance because of 186 * resizing granularity. Ignoring variance, the expected 187 * occurrences of list size k are (exp(-0.5) * pow(0.5, k) / 188 * factorial(k)). The first values are: 189 * 190 * 0: 0.60653066 191 * 1: 0.30326533 192 * 2: 0.07581633 193 * 3: 0.01263606 194 * 4: 0.00157952 195 * 5: 0.00015795 196 * 6: 0.00001316 197 * 7: 0.00000094 198 * 8: 0.00000006 199 * more: less than 1 in ten million 200 * 201 * The root of a tree bin is normally its first node. However, 202 * sometimes (currently only upon Iterator.remove), the root might 203 * be elsewhere, but can be recovered following parent links 204 * (method TreeNode.root()). 205 * 206 * All applicable internal methods accept a hash code as an 207 * argument (as normally supplied from a public method), allowing 208 * them to call each other without recomputing user hashCodes. 209 * Most internal methods also accept a "tab" argument, that is 210 * normally the current table, but may be a new or old one when 211 * resizing or converting. 212 * 213 * When bin lists are treeified, split, or untreeified, we keep 214 * them in the same relative access/traversal order (i.e., field 215 * Node.next) to better preserve locality, and to slightly 216 * simplify handling of splits and traversals that invoke 217 * iterator.remove. When using comparators on insertion, to keep a 218 * total ordering (or as close as is required here) across 219 * rebalancings, we compare classes and identityHashCodes as 220 * tie-breakers. 221 * 222 * The use and transitions among plain vs tree modes is 223 * complicated by the existence of subclass LinkedHashMap. See 224 * below for hook methods defined to be invoked upon insertion, 225 * removal and access that allow LinkedHashMap internals to 226 * otherwise remain independent of these mechanics. (This also 227 * requires that a map instance be passed to some utility methods 228 * that may create new nodes.) 229 * 230 * The concurrent-programming-like SSA-based coding style helps 231 * avoid aliasing errors amid all of the twisty pointer operations. 232 */ 233 234 /** 235 * The default initial capacity - MUST be a power of two. 236 */ 237 static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16 238 239 /** 240 * The maximum capacity, used if a higher value is implicitly specified 241 * by either of the constructors with arguments. 242 * MUST be a power of two <= 1<<30. 243 */ 244 static final int MAXIMUM_CAPACITY = 1 << 30; 245 246 /** 247 * The load factor used when none specified in constructor. 248 */ 249 static final float DEFAULT_LOAD_FACTOR = 0.75f; 250 251 /** 252 * The bin count threshold for using a tree rather than list for a 253 * bin. Bins are converted to trees when adding an element to a 254 * bin with at least this many nodes. The value must be greater 255 * than 2 and should be at least 8 to mesh with assumptions in 256 * tree removal about conversion back to plain bins upon 257 * shrinkage. 258 */ 259 static final int TREEIFY_THRESHOLD = 8; 260 261 /** 262 * The bin count threshold for untreeifying a (split) bin during a 263 * resize operation. Should be less than TREEIFY_THRESHOLD, and at 264 * most 6 to mesh with shrinkage detection under removal. 265 */ 266 static final int UNTREEIFY_THRESHOLD = 6; 267 268 /** 269 * The smallest table capacity for which bins may be treeified. 270 * (Otherwise the table is resized if too many nodes in a bin.) 271 * Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts 272 * between resizing and treeification thresholds. 273 */ 274 static final int MIN_TREEIFY_CAPACITY = 64; 275 276 /** 277 * Basic hash bin node, used for most entries. (See below for 278 * TreeNode subclass, and in LinkedHashMap for its Entry subclass.) 279 */ 280 static class Node<K,V> implements Map.Entry<K,V> { 281 final int hash; 282 final K key; 283 V value; 284 Node<K,V> next; 285 Node(int hash, K key, V value, Node<K,V> next)286 Node(int hash, K key, V value, Node<K,V> next) { 287 this.hash = hash; 288 this.key = key; 289 this.value = value; 290 this.next = next; 291 } 292 getKey()293 public final K getKey() { return key; } getValue()294 public final V getValue() { return value; } toString()295 public final String toString() { return key + "=" + value; } 296 hashCode()297 public final int hashCode() { 298 return Objects.hashCode(key) ^ Objects.hashCode(value); 299 } 300 setValue(V newValue)301 public final V setValue(V newValue) { 302 V oldValue = value; 303 value = newValue; 304 return oldValue; 305 } 306 equals(Object o)307 public final boolean equals(Object o) { 308 if (o == this) 309 return true; 310 if (o instanceof Map.Entry) { 311 Map.Entry<?,?> e = (Map.Entry<?,?>)o; 312 if (Objects.equals(key, e.getKey()) && 313 Objects.equals(value, e.getValue())) 314 return true; 315 } 316 return false; 317 } 318 } 319 320 /* ---------------- Static utilities -------------- */ 321 322 /** 323 * Computes key.hashCode() and spreads (XORs) higher bits of hash 324 * to lower. Because the table uses power-of-two masking, sets of 325 * hashes that vary only in bits above the current mask will 326 * always collide. (Among known examples are sets of Float keys 327 * holding consecutive whole numbers in small tables.) So we 328 * apply a transform that spreads the impact of higher bits 329 * downward. There is a tradeoff between speed, utility, and 330 * quality of bit-spreading. Because many common sets of hashes 331 * are already reasonably distributed (so don't benefit from 332 * spreading), and because we use trees to handle large sets of 333 * collisions in bins, we just XOR some shifted bits in the 334 * cheapest possible way to reduce systematic lossage, as well as 335 * to incorporate impact of the highest bits that would otherwise 336 * never be used in index calculations because of table bounds. 337 */ hash(Object key)338 static final int hash(Object key) { 339 int h; 340 return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16); 341 } 342 343 /** 344 * Returns x's Class if it is of the form "class C implements 345 * Comparable<C>", else null. 346 */ comparableClassFor(Object x)347 static Class<?> comparableClassFor(Object x) { 348 if (x instanceof Comparable) { 349 Class<?> c; Type[] ts, as; ParameterizedType p; 350 if ((c = x.getClass()) == String.class) // bypass checks 351 return c; 352 if ((ts = c.getGenericInterfaces()) != null) { 353 for (Type t : ts) { 354 if ((t instanceof ParameterizedType) && 355 ((p = (ParameterizedType) t).getRawType() == 356 Comparable.class) && 357 (as = p.getActualTypeArguments()) != null && 358 as.length == 1 && as[0] == c) // type arg is c 359 return c; 360 } 361 } 362 } 363 return null; 364 } 365 366 /** 367 * Returns k.compareTo(x) if x matches kc (k's screened comparable 368 * class), else 0. 369 */ 370 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable compareComparables(Class<?> kc, Object k, Object x)371 static int compareComparables(Class<?> kc, Object k, Object x) { 372 return (x == null || x.getClass() != kc ? 0 : 373 ((Comparable)k).compareTo(x)); 374 } 375 376 /** 377 * Returns a power of two size for the given target capacity. 378 */ tableSizeFor(int cap)379 static final int tableSizeFor(int cap) { 380 int n = -1 >>> Integer.numberOfLeadingZeros(cap - 1); 381 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; 382 } 383 384 /* ---------------- Fields -------------- */ 385 386 /** 387 * The table, initialized on first use, and resized as 388 * necessary. When allocated, length is always a power of two. 389 * (We also tolerate length zero in some operations to allow 390 * bootstrapping mechanics that are currently not needed.) 391 */ 392 transient Node<K,V>[] table; 393 394 /** 395 * Holds cached entrySet(). Note that AbstractMap fields are used 396 * for keySet() and values(). 397 */ 398 transient Set<Map.Entry<K,V>> entrySet; 399 400 /** 401 * The number of key-value mappings contained in this map. 402 */ 403 transient int size; 404 405 /** 406 * The number of times this HashMap has been structurally modified 407 * Structural modifications are those that change the number of mappings in 408 * the HashMap or otherwise modify its internal structure (e.g., 409 * rehash). This field is used to make iterators on Collection-views of 410 * the HashMap fail-fast. (See ConcurrentModificationException). 411 */ 412 transient int modCount; 413 414 /** 415 * The next size value at which to resize (capacity * load factor). 416 * 417 * @serial 418 */ 419 // (The javadoc description is true upon serialization. 420 // Additionally, if the table array has not been allocated, this 421 // field holds the initial array capacity, or zero signifying 422 // DEFAULT_INITIAL_CAPACITY.) 423 int threshold; 424 425 /** 426 * The load factor for the hash table. 427 * 428 * @serial 429 */ 430 final float loadFactor; 431 432 /* ---------------- Public operations -------------- */ 433 434 /** 435 * Constructs an empty {@code HashMap} with the specified initial 436 * capacity and load factor. 437 * 438 * @param initialCapacity the initial capacity 439 * @param loadFactor the load factor 440 * @throws IllegalArgumentException if the initial capacity is negative 441 * or the load factor is nonpositive 442 */ HashMap(int initialCapacity, float loadFactor)443 public HashMap(int initialCapacity, float loadFactor) { 444 if (initialCapacity < 0) 445 throw new IllegalArgumentException("Illegal initial capacity: " + 446 initialCapacity); 447 if (initialCapacity > MAXIMUM_CAPACITY) 448 initialCapacity = MAXIMUM_CAPACITY; 449 if (loadFactor <= 0 || Float.isNaN(loadFactor)) 450 throw new IllegalArgumentException("Illegal load factor: " + 451 loadFactor); 452 this.loadFactor = loadFactor; 453 this.threshold = tableSizeFor(initialCapacity); 454 } 455 456 /** 457 * Constructs an empty {@code HashMap} with the specified initial 458 * capacity and the default load factor (0.75). 459 * 460 * @param initialCapacity the initial capacity. 461 * @throws IllegalArgumentException if the initial capacity is negative. 462 */ HashMap(int initialCapacity)463 public HashMap(int initialCapacity) { 464 this(initialCapacity, DEFAULT_LOAD_FACTOR); 465 } 466 467 /** 468 * Constructs an empty {@code HashMap} with the default initial capacity 469 * (16) and the default load factor (0.75). 470 */ HashMap()471 public HashMap() { 472 this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted 473 } 474 475 /** 476 * Constructs a new {@code HashMap} with the same mappings as the 477 * specified {@code Map}. The {@code HashMap} is created with 478 * default load factor (0.75) and an initial capacity sufficient to 479 * hold the mappings in the specified {@code Map}. 480 * 481 * @param m the map whose mappings are to be placed in this map 482 * @throws NullPointerException if the specified map is null 483 */ HashMap(Map<? extends K, ? extends V> m)484 public HashMap(Map<? extends K, ? extends V> m) { 485 this.loadFactor = DEFAULT_LOAD_FACTOR; 486 putMapEntries(m, false); 487 } 488 489 /** 490 * Implements Map.putAll and Map constructor. 491 * 492 * @param m the map 493 * @param evict false when initially constructing this map, else 494 * true (relayed to method afterNodeInsertion). 495 */ putMapEntries(Map<? extends K, ? extends V> m, boolean evict)496 final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) { 497 int s = m.size(); 498 if (s > 0) { 499 if (table == null) { // pre-size 500 float ft = ((float)s / loadFactor) + 1.0F; 501 int t = ((ft < (float)MAXIMUM_CAPACITY) ? 502 (int)ft : MAXIMUM_CAPACITY); 503 if (t > threshold) 504 threshold = tableSizeFor(t); 505 } 506 else if (s > threshold) 507 resize(); 508 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) { 509 K key = e.getKey(); 510 V value = e.getValue(); 511 putVal(hash(key), key, value, false, evict); 512 } 513 } 514 } 515 516 /** 517 * Returns the number of key-value mappings in this map. 518 * 519 * @return the number of key-value mappings in this map 520 */ size()521 public int size() { 522 return size; 523 } 524 525 /** 526 * Returns {@code true} if this map contains no key-value mappings. 527 * 528 * @return {@code true} if this map contains no key-value mappings 529 */ isEmpty()530 public boolean isEmpty() { 531 return size == 0; 532 } 533 534 /** 535 * Returns the value to which the specified key is mapped, 536 * or {@code null} if this map contains no mapping for the key. 537 * 538 * <p>More formally, if this map contains a mapping from a key 539 * {@code k} to a value {@code v} such that {@code (key==null ? k==null : 540 * key.equals(k))}, then this method returns {@code v}; otherwise 541 * it returns {@code null}. (There can be at most one such mapping.) 542 * 543 * <p>A return value of {@code null} does not <i>necessarily</i> 544 * indicate that the map contains no mapping for the key; it's also 545 * possible that the map explicitly maps the key to {@code null}. 546 * The {@link #containsKey containsKey} operation may be used to 547 * distinguish these two cases. 548 * 549 * @see #put(Object, Object) 550 */ get(Object key)551 public V get(Object key) { 552 Node<K,V> e; 553 return (e = getNode(hash(key), key)) == null ? null : e.value; 554 } 555 556 /** 557 * Implements Map.get and related methods. 558 * 559 * @param hash hash for key 560 * @param key the key 561 * @return the node, or null if none 562 */ getNode(int hash, Object key)563 final Node<K,V> getNode(int hash, Object key) { 564 Node<K,V>[] tab; Node<K,V> first, e; int n; K k; 565 if ((tab = table) != null && (n = tab.length) > 0 && 566 (first = tab[(n - 1) & hash]) != null) { 567 if (first.hash == hash && // always check first node 568 ((k = first.key) == key || (key != null && key.equals(k)))) 569 return first; 570 if ((e = first.next) != null) { 571 if (first instanceof TreeNode) 572 return ((TreeNode<K,V>)first).getTreeNode(hash, key); 573 do { 574 if (e.hash == hash && 575 ((k = e.key) == key || (key != null && key.equals(k)))) 576 return e; 577 } while ((e = e.next) != null); 578 } 579 } 580 return null; 581 } 582 583 /** 584 * Returns {@code true} if this map contains a mapping for the 585 * specified key. 586 * 587 * @param key The key whose presence in this map is to be tested 588 * @return {@code true} if this map contains a mapping for the specified 589 * key. 590 */ containsKey(Object key)591 public boolean containsKey(Object key) { 592 return getNode(hash(key), key) != null; 593 } 594 595 /** 596 * Associates the specified value with the specified key in this map. 597 * If the map previously contained a mapping for the key, the old 598 * value is replaced. 599 * 600 * @param key key with which the specified value is to be associated 601 * @param value value to be associated with the specified key 602 * @return the previous value associated with {@code key}, or 603 * {@code null} if there was no mapping for {@code key}. 604 * (A {@code null} return can also indicate that the map 605 * previously associated {@code null} with {@code key}.) 606 */ put(K key, V value)607 public V put(K key, V value) { 608 return putVal(hash(key), key, value, false, true); 609 } 610 611 /** 612 * Implements Map.put and related methods. 613 * 614 * @param hash hash for key 615 * @param key the key 616 * @param value the value to put 617 * @param onlyIfAbsent if true, don't change existing value 618 * @param evict if false, the table is in creation mode. 619 * @return previous value, or null if none 620 */ putVal(int hash, K key, V value, boolean onlyIfAbsent, boolean evict)621 final V putVal(int hash, K key, V value, boolean onlyIfAbsent, 622 boolean evict) { 623 Node<K,V>[] tab; Node<K,V> p; int n, i; 624 if ((tab = table) == null || (n = tab.length) == 0) 625 n = (tab = resize()).length; 626 if ((p = tab[i = (n - 1) & hash]) == null) 627 tab[i] = newNode(hash, key, value, null); 628 else { 629 Node<K,V> e; K k; 630 if (p.hash == hash && 631 ((k = p.key) == key || (key != null && key.equals(k)))) 632 e = p; 633 else if (p instanceof TreeNode) 634 e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value); 635 else { 636 for (int binCount = 0; ; ++binCount) { 637 if ((e = p.next) == null) { 638 p.next = newNode(hash, key, value, null); 639 if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st 640 treeifyBin(tab, hash); 641 break; 642 } 643 if (e.hash == hash && 644 ((k = e.key) == key || (key != null && key.equals(k)))) 645 break; 646 p = e; 647 } 648 } 649 if (e != null) { // existing mapping for key 650 V oldValue = e.value; 651 if (!onlyIfAbsent || oldValue == null) 652 e.value = value; 653 afterNodeAccess(e); 654 return oldValue; 655 } 656 } 657 ++modCount; 658 if (++size > threshold) 659 resize(); 660 afterNodeInsertion(evict); 661 return null; 662 } 663 664 /** 665 * Initializes or doubles table size. If null, allocates in 666 * accord with initial capacity target held in field threshold. 667 * Otherwise, because we are using power-of-two expansion, the 668 * elements from each bin must either stay at same index, or move 669 * with a power of two offset in the new table. 670 * 671 * @return the table 672 */ resize()673 final Node<K,V>[] resize() { 674 Node<K,V>[] oldTab = table; 675 int oldCap = (oldTab == null) ? 0 : oldTab.length; 676 int oldThr = threshold; 677 int newCap, newThr = 0; 678 if (oldCap > 0) { 679 if (oldCap >= MAXIMUM_CAPACITY) { 680 threshold = Integer.MAX_VALUE; 681 return oldTab; 682 } 683 else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && 684 oldCap >= DEFAULT_INITIAL_CAPACITY) 685 newThr = oldThr << 1; // double threshold 686 } 687 else if (oldThr > 0) // initial capacity was placed in threshold 688 newCap = oldThr; 689 else { // zero initial threshold signifies using defaults 690 newCap = DEFAULT_INITIAL_CAPACITY; 691 newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY); 692 } 693 if (newThr == 0) { 694 float ft = (float)newCap * loadFactor; 695 newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? 696 (int)ft : Integer.MAX_VALUE); 697 } 698 threshold = newThr; 699 @SuppressWarnings({"rawtypes","unchecked"}) 700 Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap]; 701 table = newTab; 702 if (oldTab != null) { 703 for (int j = 0; j < oldCap; ++j) { 704 Node<K,V> e; 705 if ((e = oldTab[j]) != null) { 706 oldTab[j] = null; 707 if (e.next == null) 708 newTab[e.hash & (newCap - 1)] = e; 709 else if (e instanceof TreeNode) 710 ((TreeNode<K,V>)e).split(this, newTab, j, oldCap); 711 else { // preserve order 712 Node<K,V> loHead = null, loTail = null; 713 Node<K,V> hiHead = null, hiTail = null; 714 Node<K,V> next; 715 do { 716 next = e.next; 717 if ((e.hash & oldCap) == 0) { 718 if (loTail == null) 719 loHead = e; 720 else 721 loTail.next = e; 722 loTail = e; 723 } 724 else { 725 if (hiTail == null) 726 hiHead = e; 727 else 728 hiTail.next = e; 729 hiTail = e; 730 } 731 } while ((e = next) != null); 732 if (loTail != null) { 733 loTail.next = null; 734 newTab[j] = loHead; 735 } 736 if (hiTail != null) { 737 hiTail.next = null; 738 newTab[j + oldCap] = hiHead; 739 } 740 } 741 } 742 } 743 } 744 return newTab; 745 } 746 747 /** 748 * Replaces all linked nodes in bin at index for given hash unless 749 * table is too small, in which case resizes instead. 750 */ treeifyBin(Node<K,V>[] tab, int hash)751 final void treeifyBin(Node<K,V>[] tab, int hash) { 752 int n, index; Node<K,V> e; 753 if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY) 754 resize(); 755 else if ((e = tab[index = (n - 1) & hash]) != null) { 756 TreeNode<K,V> hd = null, tl = null; 757 do { 758 TreeNode<K,V> p = replacementTreeNode(e, null); 759 if (tl == null) 760 hd = p; 761 else { 762 p.prev = tl; 763 tl.next = p; 764 } 765 tl = p; 766 } while ((e = e.next) != null); 767 if ((tab[index] = hd) != null) 768 hd.treeify(tab); 769 } 770 } 771 772 /** 773 * Copies all of the mappings from the specified map to this map. 774 * These mappings will replace any mappings that this map had for 775 * any of the keys currently in the specified map. 776 * 777 * @param m mappings to be stored in this map 778 * @throws NullPointerException if the specified map is null 779 */ putAll(Map<? extends K, ? extends V> m)780 public void putAll(Map<? extends K, ? extends V> m) { 781 putMapEntries(m, true); 782 } 783 784 /** 785 * Removes the mapping for the specified key from this map if present. 786 * 787 * @param key key whose mapping is to be removed from the map 788 * @return the previous value associated with {@code key}, or 789 * {@code null} if there was no mapping for {@code key}. 790 * (A {@code null} return can also indicate that the map 791 * previously associated {@code null} with {@code key}.) 792 */ remove(Object key)793 public V remove(Object key) { 794 Node<K,V> e; 795 return (e = removeNode(hash(key), key, null, false, true)) == null ? 796 null : e.value; 797 } 798 799 /** 800 * Implements Map.remove and related methods. 801 * 802 * @param hash hash for key 803 * @param key the key 804 * @param value the value to match if matchValue, else ignored 805 * @param matchValue if true only remove if value is equal 806 * @param movable if false do not move other nodes while removing 807 * @return the node, or null if none 808 */ removeNode(int hash, Object key, Object value, boolean matchValue, boolean movable)809 final Node<K,V> removeNode(int hash, Object key, Object value, 810 boolean matchValue, boolean movable) { 811 Node<K,V>[] tab; Node<K,V> p; int n, index; 812 if ((tab = table) != null && (n = tab.length) > 0 && 813 (p = tab[index = (n - 1) & hash]) != null) { 814 Node<K,V> node = null, e; K k; V v; 815 if (p.hash == hash && 816 ((k = p.key) == key || (key != null && key.equals(k)))) 817 node = p; 818 else if ((e = p.next) != null) { 819 if (p instanceof TreeNode) 820 node = ((TreeNode<K,V>)p).getTreeNode(hash, key); 821 else { 822 do { 823 if (e.hash == hash && 824 ((k = e.key) == key || 825 (key != null && key.equals(k)))) { 826 node = e; 827 break; 828 } 829 p = e; 830 } while ((e = e.next) != null); 831 } 832 } 833 if (node != null && (!matchValue || (v = node.value) == value || 834 (value != null && value.equals(v)))) { 835 if (node instanceof TreeNode) 836 ((TreeNode<K,V>)node).removeTreeNode(this, tab, movable); 837 else if (node == p) 838 tab[index] = node.next; 839 else 840 p.next = node.next; 841 ++modCount; 842 --size; 843 afterNodeRemoval(node); 844 return node; 845 } 846 } 847 return null; 848 } 849 850 /** 851 * Removes all of the mappings from this map. 852 * The map will be empty after this call returns. 853 */ clear()854 public void clear() { 855 Node<K,V>[] tab; 856 modCount++; 857 if ((tab = table) != null && size > 0) { 858 size = 0; 859 for (int i = 0; i < tab.length; ++i) 860 tab[i] = null; 861 } 862 } 863 864 /** 865 * Returns {@code true} if this map maps one or more keys to the 866 * specified value. 867 * 868 * @param value value whose presence in this map is to be tested 869 * @return {@code true} if this map maps one or more keys to the 870 * specified value 871 */ containsValue(Object value)872 public boolean containsValue(Object value) { 873 Node<K,V>[] tab; V v; 874 if ((tab = table) != null && size > 0) { 875 for (Node<K,V> e : tab) { 876 for (; e != null; e = e.next) { 877 if ((v = e.value) == value || 878 (value != null && value.equals(v))) 879 return true; 880 } 881 } 882 } 883 return false; 884 } 885 886 /** 887 * Returns a {@link Set} view of the keys contained in this map. 888 * The set is backed by the map, so changes to the map are 889 * reflected in the set, and vice-versa. If the map is modified 890 * while an iteration over the set is in progress (except through 891 * the iterator's own {@code remove} operation), the results of 892 * the iteration are undefined. The set supports element removal, 893 * which removes the corresponding mapping from the map, via the 894 * {@code Iterator.remove}, {@code Set.remove}, 895 * {@code removeAll}, {@code retainAll}, and {@code clear} 896 * operations. It does not support the {@code add} or {@code addAll} 897 * operations. 898 * 899 * @return a set view of the keys contained in this map 900 */ keySet()901 public Set<K> keySet() { 902 Set<K> ks = keySet; 903 if (ks == null) { 904 ks = new KeySet(); 905 keySet = ks; 906 } 907 return ks; 908 } 909 910 final class KeySet extends AbstractSet<K> { size()911 public final int size() { return size; } clear()912 public final void clear() { HashMap.this.clear(); } iterator()913 public final Iterator<K> iterator() { return new KeyIterator(); } contains(Object o)914 public final boolean contains(Object o) { return containsKey(o); } remove(Object key)915 public final boolean remove(Object key) { 916 return removeNode(hash(key), key, null, false, true) != null; 917 } spliterator()918 public final Spliterator<K> spliterator() { 919 return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0); 920 } forEach(Consumer<? super K> action)921 public final void forEach(Consumer<? super K> action) { 922 Node<K,V>[] tab; 923 if (action == null) 924 throw new NullPointerException(); 925 if (size > 0 && (tab = table) != null) { 926 int mc = modCount; 927 for (Node<K,V> e : tab) { 928 for (; e != null; e = e.next) 929 action.accept(e.key); 930 } 931 if (modCount != mc) 932 throw new ConcurrentModificationException(); 933 } 934 } 935 } 936 937 /** 938 * Returns a {@link Collection} view of the values contained in this map. 939 * The collection is backed by the map, so changes to the map are 940 * reflected in the collection, and vice-versa. If the map is 941 * modified while an iteration over the collection is in progress 942 * (except through the iterator's own {@code remove} operation), 943 * the results of the iteration are undefined. The collection 944 * supports element removal, which removes the corresponding 945 * mapping from the map, via the {@code Iterator.remove}, 946 * {@code Collection.remove}, {@code removeAll}, 947 * {@code retainAll} and {@code clear} operations. It does not 948 * support the {@code add} or {@code addAll} operations. 949 * 950 * @return a view of the values contained in this map 951 */ values()952 public Collection<V> values() { 953 Collection<V> vs = values; 954 if (vs == null) { 955 vs = new Values(); 956 values = vs; 957 } 958 return vs; 959 } 960 961 final class Values extends AbstractCollection<V> { size()962 public final int size() { return size; } clear()963 public final void clear() { HashMap.this.clear(); } iterator()964 public final Iterator<V> iterator() { return new ValueIterator(); } contains(Object o)965 public final boolean contains(Object o) { return containsValue(o); } spliterator()966 public final Spliterator<V> spliterator() { 967 return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0); 968 } forEach(Consumer<? super V> action)969 public final void forEach(Consumer<? super V> action) { 970 Node<K,V>[] tab; 971 if (action == null) 972 throw new NullPointerException(); 973 if (size > 0 && (tab = table) != null) { 974 int mc = modCount; 975 for (Node<K,V> e : tab) { 976 for (; e != null; e = e.next) 977 action.accept(e.value); 978 } 979 if (modCount != mc) 980 throw new ConcurrentModificationException(); 981 } 982 } 983 } 984 985 /** 986 * Returns a {@link Set} view of the mappings contained in this map. 987 * The set is backed by the map, so changes to the map are 988 * reflected in the set, and vice-versa. If the map is modified 989 * while an iteration over the set is in progress (except through 990 * the iterator's own {@code remove} operation, or through the 991 * {@code setValue} operation on a map entry returned by the 992 * iterator) the results of the iteration are undefined. The set 993 * supports element removal, which removes the corresponding 994 * mapping from the map, via the {@code Iterator.remove}, 995 * {@code Set.remove}, {@code removeAll}, {@code retainAll} and 996 * {@code clear} operations. It does not support the 997 * {@code add} or {@code addAll} operations. 998 * 999 * @return a set view of the mappings contained in this map 1000 */ entrySet()1001 public Set<Map.Entry<K,V>> entrySet() { 1002 Set<Map.Entry<K,V>> es; 1003 return (es = entrySet) == null ? (entrySet = new EntrySet()) : es; 1004 } 1005 1006 final class EntrySet extends AbstractSet<Map.Entry<K,V>> { size()1007 public final int size() { return size; } clear()1008 public final void clear() { HashMap.this.clear(); } iterator()1009 public final Iterator<Map.Entry<K,V>> iterator() { 1010 return new EntryIterator(); 1011 } contains(Object o)1012 public final boolean contains(Object o) { 1013 if (!(o instanceof Map.Entry)) 1014 return false; 1015 Map.Entry<?,?> e = (Map.Entry<?,?>) o; 1016 Object key = e.getKey(); 1017 Node<K,V> candidate = getNode(hash(key), key); 1018 return candidate != null && candidate.equals(e); 1019 } remove(Object o)1020 public final boolean remove(Object o) { 1021 if (o instanceof Map.Entry) { 1022 Map.Entry<?,?> e = (Map.Entry<?,?>) o; 1023 Object key = e.getKey(); 1024 Object value = e.getValue(); 1025 return removeNode(hash(key), key, value, true, true) != null; 1026 } 1027 return false; 1028 } spliterator()1029 public final Spliterator<Map.Entry<K,V>> spliterator() { 1030 return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0); 1031 } forEach(Consumer<? super Map.Entry<K,V>> action)1032 public final void forEach(Consumer<? super Map.Entry<K,V>> action) { 1033 Node<K,V>[] tab; 1034 if (action == null) 1035 throw new NullPointerException(); 1036 if (size > 0 && (tab = table) != null) { 1037 int mc = modCount; 1038 for (Node<K,V> e : tab) { 1039 for (; e != null; e = e.next) 1040 action.accept(e); 1041 } 1042 if (modCount != mc) 1043 throw new ConcurrentModificationException(); 1044 } 1045 } 1046 } 1047 1048 // Overrides of JDK8 Map extension methods 1049 1050 @Override getOrDefault(Object key, V defaultValue)1051 public V getOrDefault(Object key, V defaultValue) { 1052 Node<K,V> e; 1053 return (e = getNode(hash(key), key)) == null ? defaultValue : e.value; 1054 } 1055 1056 @Override putIfAbsent(K key, V value)1057 public V putIfAbsent(K key, V value) { 1058 return putVal(hash(key), key, value, true, true); 1059 } 1060 1061 @Override remove(Object key, Object value)1062 public boolean remove(Object key, Object value) { 1063 return removeNode(hash(key), key, value, true, true) != null; 1064 } 1065 1066 @Override replace(K key, V oldValue, V newValue)1067 public boolean replace(K key, V oldValue, V newValue) { 1068 Node<K,V> e; V v; 1069 if ((e = getNode(hash(key), key)) != null && 1070 ((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) { 1071 e.value = newValue; 1072 afterNodeAccess(e); 1073 return true; 1074 } 1075 return false; 1076 } 1077 1078 @Override replace(K key, V value)1079 public V replace(K key, V value) { 1080 Node<K,V> e; 1081 if ((e = getNode(hash(key), key)) != null) { 1082 V oldValue = e.value; 1083 e.value = value; 1084 afterNodeAccess(e); 1085 return oldValue; 1086 } 1087 return null; 1088 } 1089 1090 /** 1091 * {@inheritDoc} 1092 * 1093 * <p>This method will, on a best-effort basis, throw a 1094 * {@link ConcurrentModificationException} if it is detected that the 1095 * mapping function modifies this map during computation. 1096 * 1097 * @throws ConcurrentModificationException if it is detected that the 1098 * mapping function modified this map 1099 */ 1100 @Override computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction)1101 public V computeIfAbsent(K key, 1102 Function<? super K, ? extends V> mappingFunction) { 1103 if (mappingFunction == null) 1104 throw new NullPointerException(); 1105 int hash = hash(key); 1106 Node<K,V>[] tab; Node<K,V> first; int n, i; 1107 int binCount = 0; 1108 TreeNode<K,V> t = null; 1109 Node<K,V> old = null; 1110 if (size > threshold || (tab = table) == null || 1111 (n = tab.length) == 0) 1112 n = (tab = resize()).length; 1113 if ((first = tab[i = (n - 1) & hash]) != null) { 1114 if (first instanceof TreeNode) 1115 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); 1116 else { 1117 Node<K,V> e = first; K k; 1118 do { 1119 if (e.hash == hash && 1120 ((k = e.key) == key || (key != null && key.equals(k)))) { 1121 old = e; 1122 break; 1123 } 1124 ++binCount; 1125 } while ((e = e.next) != null); 1126 } 1127 V oldValue; 1128 if (old != null && (oldValue = old.value) != null) { 1129 afterNodeAccess(old); 1130 return oldValue; 1131 } 1132 } 1133 int mc = modCount; 1134 V v = mappingFunction.apply(key); 1135 if (mc != modCount) { throw new ConcurrentModificationException(); } 1136 if (v == null) { 1137 return null; 1138 } else if (old != null) { 1139 old.value = v; 1140 afterNodeAccess(old); 1141 return v; 1142 } 1143 else if (t != null) 1144 t.putTreeVal(this, tab, hash, key, v); 1145 else { 1146 tab[i] = newNode(hash, key, v, first); 1147 if (binCount >= TREEIFY_THRESHOLD - 1) 1148 treeifyBin(tab, hash); 1149 } 1150 modCount = mc + 1; 1151 ++size; 1152 afterNodeInsertion(true); 1153 return v; 1154 } 1155 1156 /** 1157 * {@inheritDoc} 1158 * 1159 * <p>This method will, on a best-effort basis, throw a 1160 * {@link ConcurrentModificationException} if it is detected that the 1161 * remapping function modifies this map during computation. 1162 * 1163 * @throws ConcurrentModificationException if it is detected that the 1164 * remapping function modified this map 1165 */ 1166 @Override computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction)1167 public V computeIfPresent(K key, 1168 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1169 if (remappingFunction == null) 1170 throw new NullPointerException(); 1171 Node<K,V> e; V oldValue; 1172 int hash = hash(key); 1173 if ((e = getNode(hash, key)) != null && 1174 (oldValue = e.value) != null) { 1175 int mc = modCount; 1176 V v = remappingFunction.apply(key, oldValue); 1177 if (mc != modCount) { throw new ConcurrentModificationException(); } 1178 if (v != null) { 1179 e.value = v; 1180 afterNodeAccess(e); 1181 return v; 1182 } 1183 else 1184 removeNode(hash, key, null, false, true); 1185 } 1186 return null; 1187 } 1188 1189 /** 1190 * {@inheritDoc} 1191 * 1192 * <p>This method will, on a best-effort basis, throw a 1193 * {@link ConcurrentModificationException} if it is detected that the 1194 * remapping function modifies this map during computation. 1195 * 1196 * @throws ConcurrentModificationException if it is detected that the 1197 * remapping function modified this map 1198 */ 1199 @Override compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction)1200 public V compute(K key, 1201 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1202 if (remappingFunction == null) 1203 throw new NullPointerException(); 1204 int hash = hash(key); 1205 Node<K,V>[] tab; Node<K,V> first; int n, i; 1206 int binCount = 0; 1207 TreeNode<K,V> t = null; 1208 Node<K,V> old = null; 1209 if (size > threshold || (tab = table) == null || 1210 (n = tab.length) == 0) 1211 n = (tab = resize()).length; 1212 if ((first = tab[i = (n - 1) & hash]) != null) { 1213 if (first instanceof TreeNode) 1214 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); 1215 else { 1216 Node<K,V> e = first; K k; 1217 do { 1218 if (e.hash == hash && 1219 ((k = e.key) == key || (key != null && key.equals(k)))) { 1220 old = e; 1221 break; 1222 } 1223 ++binCount; 1224 } while ((e = e.next) != null); 1225 } 1226 } 1227 V oldValue = (old == null) ? null : old.value; 1228 int mc = modCount; 1229 V v = remappingFunction.apply(key, oldValue); 1230 if (mc != modCount) { throw new ConcurrentModificationException(); } 1231 if (old != null) { 1232 if (v != null) { 1233 old.value = v; 1234 afterNodeAccess(old); 1235 } 1236 else 1237 removeNode(hash, key, null, false, true); 1238 } 1239 else if (v != null) { 1240 if (t != null) 1241 t.putTreeVal(this, tab, hash, key, v); 1242 else { 1243 tab[i] = newNode(hash, key, v, first); 1244 if (binCount >= TREEIFY_THRESHOLD - 1) 1245 treeifyBin(tab, hash); 1246 } 1247 modCount = mc + 1; 1248 ++size; 1249 afterNodeInsertion(true); 1250 } 1251 return v; 1252 } 1253 1254 /** 1255 * {@inheritDoc} 1256 * 1257 * <p>This method will, on a best-effort basis, throw a 1258 * {@link ConcurrentModificationException} if it is detected that the 1259 * remapping function modifies this map during computation. 1260 * 1261 * @throws ConcurrentModificationException if it is detected that the 1262 * remapping function modified this map 1263 */ 1264 @Override merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction)1265 public V merge(K key, V value, 1266 BiFunction<? super V, ? super V, ? extends V> remappingFunction) { 1267 if (value == null) 1268 throw new NullPointerException(); 1269 if (remappingFunction == null) 1270 throw new NullPointerException(); 1271 int hash = hash(key); 1272 Node<K,V>[] tab; Node<K,V> first; int n, i; 1273 int binCount = 0; 1274 TreeNode<K,V> t = null; 1275 Node<K,V> old = null; 1276 if (size > threshold || (tab = table) == null || 1277 (n = tab.length) == 0) 1278 n = (tab = resize()).length; 1279 if ((first = tab[i = (n - 1) & hash]) != null) { 1280 if (first instanceof TreeNode) 1281 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); 1282 else { 1283 Node<K,V> e = first; K k; 1284 do { 1285 if (e.hash == hash && 1286 ((k = e.key) == key || (key != null && key.equals(k)))) { 1287 old = e; 1288 break; 1289 } 1290 ++binCount; 1291 } while ((e = e.next) != null); 1292 } 1293 } 1294 if (old != null) { 1295 V v; 1296 if (old.value != null) { 1297 int mc = modCount; 1298 v = remappingFunction.apply(old.value, value); 1299 if (mc != modCount) { 1300 throw new ConcurrentModificationException(); 1301 } 1302 } else { 1303 v = value; 1304 } 1305 if (v != null) { 1306 old.value = v; 1307 afterNodeAccess(old); 1308 } 1309 else 1310 removeNode(hash, key, null, false, true); 1311 return v; 1312 } 1313 if (value != null) { 1314 if (t != null) 1315 t.putTreeVal(this, tab, hash, key, value); 1316 else { 1317 tab[i] = newNode(hash, key, value, first); 1318 if (binCount >= TREEIFY_THRESHOLD - 1) 1319 treeifyBin(tab, hash); 1320 } 1321 ++modCount; 1322 ++size; 1323 afterNodeInsertion(true); 1324 } 1325 return value; 1326 } 1327 1328 @Override forEach(BiConsumer<? super K, ? super V> action)1329 public void forEach(BiConsumer<? super K, ? super V> action) { 1330 Node<K,V>[] tab; 1331 if (action == null) 1332 throw new NullPointerException(); 1333 if (size > 0 && (tab = table) != null) { 1334 int mc = modCount; 1335 for (Node<K,V> e : tab) { 1336 for (; e != null; e = e.next) 1337 action.accept(e.key, e.value); 1338 } 1339 if (modCount != mc) 1340 throw new ConcurrentModificationException(); 1341 } 1342 } 1343 1344 @Override replaceAll(BiFunction<? super K, ? super V, ? extends V> function)1345 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { 1346 Node<K,V>[] tab; 1347 if (function == null) 1348 throw new NullPointerException(); 1349 if (size > 0 && (tab = table) != null) { 1350 int mc = modCount; 1351 for (Node<K,V> e : tab) { 1352 for (; e != null; e = e.next) { 1353 e.value = function.apply(e.key, e.value); 1354 } 1355 } 1356 if (modCount != mc) 1357 throw new ConcurrentModificationException(); 1358 } 1359 } 1360 1361 /* ------------------------------------------------------------ */ 1362 // Cloning and serialization 1363 1364 /** 1365 * Returns a shallow copy of this {@code HashMap} instance: the keys and 1366 * values themselves are not cloned. 1367 * 1368 * @return a shallow copy of this map 1369 */ 1370 @SuppressWarnings("unchecked") 1371 @Override clone()1372 public Object clone() { 1373 HashMap<K,V> result; 1374 try { 1375 result = (HashMap<K,V>)super.clone(); 1376 } catch (CloneNotSupportedException e) { 1377 // this shouldn't happen, since we are Cloneable 1378 throw new InternalError(e); 1379 } 1380 result.reinitialize(); 1381 result.putMapEntries(this, false); 1382 return result; 1383 } 1384 1385 // These methods are also used when serializing HashSets loadFactor()1386 final float loadFactor() { return loadFactor; } capacity()1387 final int capacity() { 1388 return (table != null) ? table.length : 1389 (threshold > 0) ? threshold : 1390 DEFAULT_INITIAL_CAPACITY; 1391 } 1392 1393 /** 1394 * Saves this map to a stream (that is, serializes it). 1395 * 1396 * @param s the stream 1397 * @throws IOException if an I/O error occurs 1398 * @serialData The <i>capacity</i> of the HashMap (the length of the 1399 * bucket array) is emitted (int), followed by the 1400 * <i>size</i> (an int, the number of key-value 1401 * mappings), followed by the key (Object) and value (Object) 1402 * for each key-value mapping. The key-value mappings are 1403 * emitted in no particular order. 1404 */ writeObject(java.io.ObjectOutputStream s)1405 private void writeObject(java.io.ObjectOutputStream s) 1406 throws IOException { 1407 int buckets = capacity(); 1408 // Write out the threshold, loadfactor, and any hidden stuff 1409 s.defaultWriteObject(); 1410 s.writeInt(buckets); 1411 s.writeInt(size); 1412 internalWriteEntries(s); 1413 } 1414 1415 /** 1416 * Reconstitutes this map from a stream (that is, deserializes it). 1417 * @param s the stream 1418 * @throws ClassNotFoundException if the class of a serialized object 1419 * could not be found 1420 * @throws IOException if an I/O error occurs 1421 */ readObject(ObjectInputStream s)1422 private void readObject(ObjectInputStream s) 1423 throws IOException, ClassNotFoundException { 1424 1425 ObjectInputStream.GetField fields = s.readFields(); 1426 1427 // Read loadFactor (ignore threshold) 1428 float lf = fields.get("loadFactor", 0.75f); 1429 if (lf <= 0 || Float.isNaN(lf)) 1430 throw new InvalidObjectException("Illegal load factor: " + lf); 1431 1432 lf = Math.min(Math.max(0.25f, lf), 4.0f); 1433 HashMap.UnsafeHolder.putLoadFactor(this, lf); 1434 1435 reinitialize(); 1436 1437 s.readInt(); // Read and ignore number of buckets 1438 int mappings = s.readInt(); // Read number of mappings (size) 1439 if (mappings < 0) { 1440 throw new InvalidObjectException("Illegal mappings count: " + mappings); 1441 } else if (mappings == 0) { 1442 // use defaults 1443 } else if (mappings > 0) { 1444 float fc = (float)mappings / lf + 1.0f; 1445 int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ? 1446 DEFAULT_INITIAL_CAPACITY : 1447 (fc >= MAXIMUM_CAPACITY) ? 1448 MAXIMUM_CAPACITY : 1449 tableSizeFor((int)fc)); 1450 float ft = (float)cap * lf; 1451 threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ? 1452 (int)ft : Integer.MAX_VALUE); 1453 1454 // Check Map.Entry[].class since it's the nearest public type to 1455 // what we're actually creating. 1456 SharedSecrets.getJavaObjectInputStreamAccess().checkArray(s, Map.Entry[].class, cap); 1457 @SuppressWarnings({"rawtypes","unchecked"}) 1458 Node<K,V>[] tab = (Node<K,V>[])new Node[cap]; 1459 table = tab; 1460 1461 // Read the keys and values, and put the mappings in the HashMap 1462 for (int i = 0; i < mappings; i++) { 1463 @SuppressWarnings("unchecked") 1464 K key = (K) s.readObject(); 1465 @SuppressWarnings("unchecked") 1466 V value = (V) s.readObject(); 1467 putVal(hash(key), key, value, false, false); 1468 } 1469 } 1470 } 1471 1472 // Support for resetting final field during deserializing 1473 private static final class UnsafeHolder { UnsafeHolder()1474 private UnsafeHolder() { throw new InternalError(); } 1475 private static final jdk.internal.misc.Unsafe unsafe 1476 = jdk.internal.misc.Unsafe.getUnsafe(); 1477 private static final long LF_OFFSET 1478 = unsafe.objectFieldOffset(HashMap.class, "loadFactor"); putLoadFactor(HashMap<?, ?> map, float lf)1479 static void putLoadFactor(HashMap<?, ?> map, float lf) { 1480 unsafe.putFloat(map, LF_OFFSET, lf); 1481 } 1482 } 1483 1484 /* ------------------------------------------------------------ */ 1485 // iterators 1486 1487 abstract class HashIterator { 1488 Node<K,V> next; // next entry to return 1489 Node<K,V> current; // current entry 1490 int expectedModCount; // for fast-fail 1491 int index; // current slot 1492 HashIterator()1493 HashIterator() { 1494 expectedModCount = modCount; 1495 Node<K,V>[] t = table; 1496 current = next = null; 1497 index = 0; 1498 if (t != null && size > 0) { // advance to first entry 1499 do {} while (index < t.length && (next = t[index++]) == null); 1500 } 1501 } 1502 hasNext()1503 public final boolean hasNext() { 1504 return next != null; 1505 } 1506 nextNode()1507 final Node<K,V> nextNode() { 1508 Node<K,V>[] t; 1509 Node<K,V> e = next; 1510 if (modCount != expectedModCount) 1511 throw new ConcurrentModificationException(); 1512 if (e == null) 1513 throw new NoSuchElementException(); 1514 if ((next = (current = e).next) == null && (t = table) != null) { 1515 do {} while (index < t.length && (next = t[index++]) == null); 1516 } 1517 return e; 1518 } 1519 remove()1520 public final void remove() { 1521 Node<K,V> p = current; 1522 if (p == null) 1523 throw new IllegalStateException(); 1524 if (modCount != expectedModCount) 1525 throw new ConcurrentModificationException(); 1526 current = null; 1527 removeNode(p.hash, p.key, null, false, false); 1528 expectedModCount = modCount; 1529 } 1530 } 1531 1532 final class KeyIterator extends HashIterator 1533 implements Iterator<K> { next()1534 public final K next() { return nextNode().key; } 1535 } 1536 1537 final class ValueIterator extends HashIterator 1538 implements Iterator<V> { next()1539 public final V next() { return nextNode().value; } 1540 } 1541 1542 final class EntryIterator extends HashIterator 1543 implements Iterator<Map.Entry<K,V>> { next()1544 public final Map.Entry<K,V> next() { return nextNode(); } 1545 } 1546 1547 /* ------------------------------------------------------------ */ 1548 // spliterators 1549 1550 static class HashMapSpliterator<K,V> { 1551 final HashMap<K,V> map; 1552 Node<K,V> current; // current node 1553 int index; // current index, modified on advance/split 1554 int fence; // one past last index 1555 int est; // size estimate 1556 int expectedModCount; // for comodification checks 1557 HashMapSpliterator(HashMap<K,V> m, int origin, int fence, int est, int expectedModCount)1558 HashMapSpliterator(HashMap<K,V> m, int origin, 1559 int fence, int est, 1560 int expectedModCount) { 1561 this.map = m; 1562 this.index = origin; 1563 this.fence = fence; 1564 this.est = est; 1565 this.expectedModCount = expectedModCount; 1566 } 1567 getFence()1568 final int getFence() { // initialize fence and size on first use 1569 int hi; 1570 if ((hi = fence) < 0) { 1571 HashMap<K,V> m = map; 1572 est = m.size; 1573 expectedModCount = m.modCount; 1574 Node<K,V>[] tab = m.table; 1575 hi = fence = (tab == null) ? 0 : tab.length; 1576 } 1577 return hi; 1578 } 1579 estimateSize()1580 public final long estimateSize() { 1581 getFence(); // force init 1582 return (long) est; 1583 } 1584 } 1585 1586 static final class KeySpliterator<K,V> 1587 extends HashMapSpliterator<K,V> 1588 implements Spliterator<K> { KeySpliterator(HashMap<K,V> m, int origin, int fence, int est, int expectedModCount)1589 KeySpliterator(HashMap<K,V> m, int origin, int fence, int est, 1590 int expectedModCount) { 1591 super(m, origin, fence, est, expectedModCount); 1592 } 1593 trySplit()1594 public KeySpliterator<K,V> trySplit() { 1595 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 1596 return (lo >= mid || current != null) ? null : 1597 new KeySpliterator<>(map, lo, index = mid, est >>>= 1, 1598 expectedModCount); 1599 } 1600 forEachRemaining(Consumer<? super K> action)1601 public void forEachRemaining(Consumer<? super K> action) { 1602 int i, hi, mc; 1603 if (action == null) 1604 throw new NullPointerException(); 1605 HashMap<K,V> m = map; 1606 Node<K,V>[] tab = m.table; 1607 if ((hi = fence) < 0) { 1608 mc = expectedModCount = m.modCount; 1609 hi = fence = (tab == null) ? 0 : tab.length; 1610 } 1611 else 1612 mc = expectedModCount; 1613 if (tab != null && tab.length >= hi && 1614 (i = index) >= 0 && (i < (index = hi) || current != null)) { 1615 Node<K,V> p = current; 1616 current = null; 1617 do { 1618 if (p == null) 1619 p = tab[i++]; 1620 else { 1621 action.accept(p.key); 1622 p = p.next; 1623 } 1624 } while (p != null || i < hi); 1625 if (m.modCount != mc) 1626 throw new ConcurrentModificationException(); 1627 } 1628 } 1629 tryAdvance(Consumer<? super K> action)1630 public boolean tryAdvance(Consumer<? super K> action) { 1631 int hi; 1632 if (action == null) 1633 throw new NullPointerException(); 1634 Node<K,V>[] tab = map.table; 1635 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { 1636 while (current != null || index < hi) { 1637 if (current == null) 1638 current = tab[index++]; 1639 else { 1640 K k = current.key; 1641 current = current.next; 1642 action.accept(k); 1643 if (map.modCount != expectedModCount) 1644 throw new ConcurrentModificationException(); 1645 return true; 1646 } 1647 } 1648 } 1649 return false; 1650 } 1651 characteristics()1652 public int characteristics() { 1653 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | 1654 Spliterator.DISTINCT; 1655 } 1656 } 1657 1658 static final class ValueSpliterator<K,V> 1659 extends HashMapSpliterator<K,V> 1660 implements Spliterator<V> { ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est, int expectedModCount)1661 ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est, 1662 int expectedModCount) { 1663 super(m, origin, fence, est, expectedModCount); 1664 } 1665 trySplit()1666 public ValueSpliterator<K,V> trySplit() { 1667 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 1668 return (lo >= mid || current != null) ? null : 1669 new ValueSpliterator<>(map, lo, index = mid, est >>>= 1, 1670 expectedModCount); 1671 } 1672 forEachRemaining(Consumer<? super V> action)1673 public void forEachRemaining(Consumer<? super V> action) { 1674 int i, hi, mc; 1675 if (action == null) 1676 throw new NullPointerException(); 1677 HashMap<K,V> m = map; 1678 Node<K,V>[] tab = m.table; 1679 if ((hi = fence) < 0) { 1680 mc = expectedModCount = m.modCount; 1681 hi = fence = (tab == null) ? 0 : tab.length; 1682 } 1683 else 1684 mc = expectedModCount; 1685 if (tab != null && tab.length >= hi && 1686 (i = index) >= 0 && (i < (index = hi) || current != null)) { 1687 Node<K,V> p = current; 1688 current = null; 1689 do { 1690 if (p == null) 1691 p = tab[i++]; 1692 else { 1693 action.accept(p.value); 1694 p = p.next; 1695 } 1696 } while (p != null || i < hi); 1697 if (m.modCount != mc) 1698 throw new ConcurrentModificationException(); 1699 } 1700 } 1701 tryAdvance(Consumer<? super V> action)1702 public boolean tryAdvance(Consumer<? super V> action) { 1703 int hi; 1704 if (action == null) 1705 throw new NullPointerException(); 1706 Node<K,V>[] tab = map.table; 1707 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { 1708 while (current != null || index < hi) { 1709 if (current == null) 1710 current = tab[index++]; 1711 else { 1712 V v = current.value; 1713 current = current.next; 1714 action.accept(v); 1715 if (map.modCount != expectedModCount) 1716 throw new ConcurrentModificationException(); 1717 return true; 1718 } 1719 } 1720 } 1721 return false; 1722 } 1723 characteristics()1724 public int characteristics() { 1725 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0); 1726 } 1727 } 1728 1729 static final class EntrySpliterator<K,V> 1730 extends HashMapSpliterator<K,V> 1731 implements Spliterator<Map.Entry<K,V>> { EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est, int expectedModCount)1732 EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est, 1733 int expectedModCount) { 1734 super(m, origin, fence, est, expectedModCount); 1735 } 1736 trySplit()1737 public EntrySpliterator<K,V> trySplit() { 1738 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 1739 return (lo >= mid || current != null) ? null : 1740 new EntrySpliterator<>(map, lo, index = mid, est >>>= 1, 1741 expectedModCount); 1742 } 1743 forEachRemaining(Consumer<? super Map.Entry<K,V>> action)1744 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { 1745 int i, hi, mc; 1746 if (action == null) 1747 throw new NullPointerException(); 1748 HashMap<K,V> m = map; 1749 Node<K,V>[] tab = m.table; 1750 if ((hi = fence) < 0) { 1751 mc = expectedModCount = m.modCount; 1752 hi = fence = (tab == null) ? 0 : tab.length; 1753 } 1754 else 1755 mc = expectedModCount; 1756 if (tab != null && tab.length >= hi && 1757 (i = index) >= 0 && (i < (index = hi) || current != null)) { 1758 Node<K,V> p = current; 1759 current = null; 1760 do { 1761 if (p == null) 1762 p = tab[i++]; 1763 else { 1764 action.accept(p); 1765 p = p.next; 1766 } 1767 } while (p != null || i < hi); 1768 if (m.modCount != mc) 1769 throw new ConcurrentModificationException(); 1770 } 1771 } 1772 tryAdvance(Consumer<? super Map.Entry<K,V>> action)1773 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { 1774 int hi; 1775 if (action == null) 1776 throw new NullPointerException(); 1777 Node<K,V>[] tab = map.table; 1778 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { 1779 while (current != null || index < hi) { 1780 if (current == null) 1781 current = tab[index++]; 1782 else { 1783 Node<K,V> e = current; 1784 current = current.next; 1785 action.accept(e); 1786 if (map.modCount != expectedModCount) 1787 throw new ConcurrentModificationException(); 1788 return true; 1789 } 1790 } 1791 } 1792 return false; 1793 } 1794 characteristics()1795 public int characteristics() { 1796 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | 1797 Spliterator.DISTINCT; 1798 } 1799 } 1800 1801 /* ------------------------------------------------------------ */ 1802 // LinkedHashMap support 1803 1804 1805 /* 1806 * The following package-protected methods are designed to be 1807 * overridden by LinkedHashMap, but not by any other subclass. 1808 * Nearly all other internal methods are also package-protected 1809 * but are declared final, so can be used by LinkedHashMap, view 1810 * classes, and HashSet. 1811 */ 1812 1813 // Create a regular (non-tree) node newNode(int hash, K key, V value, Node<K,V> next)1814 Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) { 1815 return new Node<>(hash, key, value, next); 1816 } 1817 1818 // For conversion from TreeNodes to plain nodes replacementNode(Node<K,V> p, Node<K,V> next)1819 Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) { 1820 return new Node<>(p.hash, p.key, p.value, next); 1821 } 1822 1823 // Create a tree bin node newTreeNode(int hash, K key, V value, Node<K,V> next)1824 TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) { 1825 return new TreeNode<>(hash, key, value, next); 1826 } 1827 1828 // For treeifyBin replacementTreeNode(Node<K,V> p, Node<K,V> next)1829 TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) { 1830 return new TreeNode<>(p.hash, p.key, p.value, next); 1831 } 1832 1833 /** 1834 * Reset to initial default state. Called by clone and readObject. 1835 */ reinitialize()1836 void reinitialize() { 1837 table = null; 1838 entrySet = null; 1839 keySet = null; 1840 values = null; 1841 modCount = 0; 1842 threshold = 0; 1843 size = 0; 1844 } 1845 1846 // Callbacks to allow LinkedHashMap post-actions afterNodeAccess(Node<K,V> p)1847 void afterNodeAccess(Node<K,V> p) { } afterNodeInsertion(boolean evict)1848 void afterNodeInsertion(boolean evict) { } afterNodeRemoval(Node<K,V> p)1849 void afterNodeRemoval(Node<K,V> p) { } 1850 1851 // Called only from writeObject, to ensure compatible ordering. internalWriteEntries(java.io.ObjectOutputStream s)1852 void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException { 1853 Node<K,V>[] tab; 1854 if (size > 0 && (tab = table) != null) { 1855 for (Node<K,V> e : tab) { 1856 for (; e != null; e = e.next) { 1857 s.writeObject(e.key); 1858 s.writeObject(e.value); 1859 } 1860 } 1861 } 1862 } 1863 1864 /* ------------------------------------------------------------ */ 1865 // Tree bins 1866 1867 /** 1868 * Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn 1869 * extends Node) so can be used as extension of either regular or 1870 * linked node. 1871 */ 1872 static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> { 1873 TreeNode<K,V> parent; // red-black tree links 1874 TreeNode<K,V> left; 1875 TreeNode<K,V> right; 1876 TreeNode<K,V> prev; // needed to unlink next upon deletion 1877 boolean red; TreeNode(int hash, K key, V val, Node<K,V> next)1878 TreeNode(int hash, K key, V val, Node<K,V> next) { 1879 super(hash, key, val, next); 1880 } 1881 1882 /** 1883 * Returns root of tree containing this node. 1884 */ root()1885 final TreeNode<K,V> root() { 1886 for (TreeNode<K,V> r = this, p;;) { 1887 if ((p = r.parent) == null) 1888 return r; 1889 r = p; 1890 } 1891 } 1892 1893 /** 1894 * Ensures that the given root is the first node of its bin. 1895 */ moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root)1896 static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) { 1897 int n; 1898 if (root != null && tab != null && (n = tab.length) > 0) { 1899 int index = (n - 1) & root.hash; 1900 TreeNode<K,V> first = (TreeNode<K,V>)tab[index]; 1901 if (root != first) { 1902 Node<K,V> rn; 1903 tab[index] = root; 1904 TreeNode<K,V> rp = root.prev; 1905 if ((rn = root.next) != null) 1906 ((TreeNode<K,V>)rn).prev = rp; 1907 if (rp != null) 1908 rp.next = rn; 1909 if (first != null) 1910 first.prev = root; 1911 root.next = first; 1912 root.prev = null; 1913 } 1914 assert checkInvariants(root); 1915 } 1916 } 1917 1918 /** 1919 * Finds the node starting at root p with the given hash and key. 1920 * The kc argument caches comparableClassFor(key) upon first use 1921 * comparing keys. 1922 */ find(int h, Object k, Class<?> kc)1923 final TreeNode<K,V> find(int h, Object k, Class<?> kc) { 1924 TreeNode<K,V> p = this; 1925 do { 1926 int ph, dir; K pk; 1927 TreeNode<K,V> pl = p.left, pr = p.right, q; 1928 if ((ph = p.hash) > h) 1929 p = pl; 1930 else if (ph < h) 1931 p = pr; 1932 else if ((pk = p.key) == k || (k != null && k.equals(pk))) 1933 return p; 1934 else if (pl == null) 1935 p = pr; 1936 else if (pr == null) 1937 p = pl; 1938 else if ((kc != null || 1939 (kc = comparableClassFor(k)) != null) && 1940 (dir = compareComparables(kc, k, pk)) != 0) 1941 p = (dir < 0) ? pl : pr; 1942 else if ((q = pr.find(h, k, kc)) != null) 1943 return q; 1944 else 1945 p = pl; 1946 } while (p != null); 1947 return null; 1948 } 1949 1950 /** 1951 * Calls find for root node. 1952 */ getTreeNode(int h, Object k)1953 final TreeNode<K,V> getTreeNode(int h, Object k) { 1954 return ((parent != null) ? root() : this).find(h, k, null); 1955 } 1956 1957 /** 1958 * Tie-breaking utility for ordering insertions when equal 1959 * hashCodes and non-comparable. We don't require a total 1960 * order, just a consistent insertion rule to maintain 1961 * equivalence across rebalancings. Tie-breaking further than 1962 * necessary simplifies testing a bit. 1963 */ tieBreakOrder(Object a, Object b)1964 static int tieBreakOrder(Object a, Object b) { 1965 int d; 1966 if (a == null || b == null || 1967 (d = a.getClass().getName(). 1968 compareTo(b.getClass().getName())) == 0) 1969 d = (System.identityHashCode(a) <= System.identityHashCode(b) ? 1970 -1 : 1); 1971 return d; 1972 } 1973 1974 /** 1975 * Forms tree of the nodes linked from this node. 1976 */ treeify(Node<K,V>[] tab)1977 final void treeify(Node<K,V>[] tab) { 1978 TreeNode<K,V> root = null; 1979 for (TreeNode<K,V> x = this, next; x != null; x = next) { 1980 next = (TreeNode<K,V>)x.next; 1981 x.left = x.right = null; 1982 if (root == null) { 1983 x.parent = null; 1984 x.red = false; 1985 root = x; 1986 } 1987 else { 1988 K k = x.key; 1989 int h = x.hash; 1990 Class<?> kc = null; 1991 for (TreeNode<K,V> p = root;;) { 1992 int dir, ph; 1993 K pk = p.key; 1994 if ((ph = p.hash) > h) 1995 dir = -1; 1996 else if (ph < h) 1997 dir = 1; 1998 else if ((kc == null && 1999 (kc = comparableClassFor(k)) == null) || 2000 (dir = compareComparables(kc, k, pk)) == 0) 2001 dir = tieBreakOrder(k, pk); 2002 2003 TreeNode<K,V> xp = p; 2004 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2005 x.parent = xp; 2006 if (dir <= 0) 2007 xp.left = x; 2008 else 2009 xp.right = x; 2010 root = balanceInsertion(root, x); 2011 break; 2012 } 2013 } 2014 } 2015 } 2016 moveRootToFront(tab, root); 2017 } 2018 2019 /** 2020 * Returns a list of non-TreeNodes replacing those linked from 2021 * this node. 2022 */ untreeify(HashMap<K,V> map)2023 final Node<K,V> untreeify(HashMap<K,V> map) { 2024 Node<K,V> hd = null, tl = null; 2025 for (Node<K,V> q = this; q != null; q = q.next) { 2026 Node<K,V> p = map.replacementNode(q, null); 2027 if (tl == null) 2028 hd = p; 2029 else 2030 tl.next = p; 2031 tl = p; 2032 } 2033 return hd; 2034 } 2035 2036 /** 2037 * Tree version of putVal. 2038 */ putTreeVal(HashMap<K,V> map, Node<K,V>[] tab, int h, K k, V v)2039 final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab, 2040 int h, K k, V v) { 2041 Class<?> kc = null; 2042 boolean searched = false; 2043 TreeNode<K,V> root = (parent != null) ? root() : this; 2044 for (TreeNode<K,V> p = root;;) { 2045 int dir, ph; K pk; 2046 if ((ph = p.hash) > h) 2047 dir = -1; 2048 else if (ph < h) 2049 dir = 1; 2050 else if ((pk = p.key) == k || (k != null && k.equals(pk))) 2051 return p; 2052 else if ((kc == null && 2053 (kc = comparableClassFor(k)) == null) || 2054 (dir = compareComparables(kc, k, pk)) == 0) { 2055 if (!searched) { 2056 TreeNode<K,V> q, ch; 2057 searched = true; 2058 if (((ch = p.left) != null && 2059 (q = ch.find(h, k, kc)) != null) || 2060 ((ch = p.right) != null && 2061 (q = ch.find(h, k, kc)) != null)) 2062 return q; 2063 } 2064 dir = tieBreakOrder(k, pk); 2065 } 2066 2067 TreeNode<K,V> xp = p; 2068 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2069 Node<K,V> xpn = xp.next; 2070 TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn); 2071 if (dir <= 0) 2072 xp.left = x; 2073 else 2074 xp.right = x; 2075 xp.next = x; 2076 x.parent = x.prev = xp; 2077 if (xpn != null) 2078 ((TreeNode<K,V>)xpn).prev = x; 2079 moveRootToFront(tab, balanceInsertion(root, x)); 2080 return null; 2081 } 2082 } 2083 } 2084 2085 /** 2086 * Removes the given node, that must be present before this call. 2087 * This is messier than typical red-black deletion code because we 2088 * cannot swap the contents of an interior node with a leaf 2089 * successor that is pinned by "next" pointers that are accessible 2090 * independently during traversal. So instead we swap the tree 2091 * linkages. If the current tree appears to have too few nodes, 2092 * the bin is converted back to a plain bin. (The test triggers 2093 * somewhere between 2 and 6 nodes, depending on tree structure). 2094 */ removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab, boolean movable)2095 final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab, 2096 boolean movable) { 2097 int n; 2098 if (tab == null || (n = tab.length) == 0) 2099 return; 2100 int index = (n - 1) & hash; 2101 TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl; 2102 TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev; 2103 if (pred == null) 2104 tab[index] = first = succ; 2105 else 2106 pred.next = succ; 2107 if (succ != null) 2108 succ.prev = pred; 2109 if (first == null) 2110 return; 2111 if (root.parent != null) 2112 root = root.root(); 2113 if (root == null 2114 || (movable 2115 && (root.right == null 2116 || (rl = root.left) == null 2117 || rl.left == null))) { 2118 tab[index] = first.untreeify(map); // too small 2119 return; 2120 } 2121 TreeNode<K,V> p = this, pl = left, pr = right, replacement; 2122 if (pl != null && pr != null) { 2123 TreeNode<K,V> s = pr, sl; 2124 while ((sl = s.left) != null) // find successor 2125 s = sl; 2126 boolean c = s.red; s.red = p.red; p.red = c; // swap colors 2127 TreeNode<K,V> sr = s.right; 2128 TreeNode<K,V> pp = p.parent; 2129 if (s == pr) { // p was s's direct parent 2130 p.parent = s; 2131 s.right = p; 2132 } 2133 else { 2134 TreeNode<K,V> sp = s.parent; 2135 if ((p.parent = sp) != null) { 2136 if (s == sp.left) 2137 sp.left = p; 2138 else 2139 sp.right = p; 2140 } 2141 if ((s.right = pr) != null) 2142 pr.parent = s; 2143 } 2144 p.left = null; 2145 if ((p.right = sr) != null) 2146 sr.parent = p; 2147 if ((s.left = pl) != null) 2148 pl.parent = s; 2149 if ((s.parent = pp) == null) 2150 root = s; 2151 else if (p == pp.left) 2152 pp.left = s; 2153 else 2154 pp.right = s; 2155 if (sr != null) 2156 replacement = sr; 2157 else 2158 replacement = p; 2159 } 2160 else if (pl != null) 2161 replacement = pl; 2162 else if (pr != null) 2163 replacement = pr; 2164 else 2165 replacement = p; 2166 if (replacement != p) { 2167 TreeNode<K,V> pp = replacement.parent = p.parent; 2168 if (pp == null) 2169 (root = replacement).red = false; 2170 else if (p == pp.left) 2171 pp.left = replacement; 2172 else 2173 pp.right = replacement; 2174 p.left = p.right = p.parent = null; 2175 } 2176 2177 TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement); 2178 2179 if (replacement == p) { // detach 2180 TreeNode<K,V> pp = p.parent; 2181 p.parent = null; 2182 if (pp != null) { 2183 if (p == pp.left) 2184 pp.left = null; 2185 else if (p == pp.right) 2186 pp.right = null; 2187 } 2188 } 2189 if (movable) 2190 moveRootToFront(tab, r); 2191 } 2192 2193 /** 2194 * Splits nodes in a tree bin into lower and upper tree bins, 2195 * or untreeifies if now too small. Called only from resize; 2196 * see above discussion about split bits and indices. 2197 * 2198 * @param map the map 2199 * @param tab the table for recording bin heads 2200 * @param index the index of the table being split 2201 * @param bit the bit of hash to split on 2202 */ split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit)2203 final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) { 2204 TreeNode<K,V> b = this; 2205 // Relink into lo and hi lists, preserving order 2206 TreeNode<K,V> loHead = null, loTail = null; 2207 TreeNode<K,V> hiHead = null, hiTail = null; 2208 int lc = 0, hc = 0; 2209 for (TreeNode<K,V> e = b, next; e != null; e = next) { 2210 next = (TreeNode<K,V>)e.next; 2211 e.next = null; 2212 if ((e.hash & bit) == 0) { 2213 if ((e.prev = loTail) == null) 2214 loHead = e; 2215 else 2216 loTail.next = e; 2217 loTail = e; 2218 ++lc; 2219 } 2220 else { 2221 if ((e.prev = hiTail) == null) 2222 hiHead = e; 2223 else 2224 hiTail.next = e; 2225 hiTail = e; 2226 ++hc; 2227 } 2228 } 2229 2230 if (loHead != null) { 2231 if (lc <= UNTREEIFY_THRESHOLD) 2232 tab[index] = loHead.untreeify(map); 2233 else { 2234 tab[index] = loHead; 2235 if (hiHead != null) // (else is already treeified) 2236 loHead.treeify(tab); 2237 } 2238 } 2239 if (hiHead != null) { 2240 if (hc <= UNTREEIFY_THRESHOLD) 2241 tab[index + bit] = hiHead.untreeify(map); 2242 else { 2243 tab[index + bit] = hiHead; 2244 if (loHead != null) 2245 hiHead.treeify(tab); 2246 } 2247 } 2248 } 2249 2250 /* ------------------------------------------------------------ */ 2251 // Red-black tree methods, all adapted from CLR 2252 rotateLeft(TreeNode<K,V> root, TreeNode<K,V> p)2253 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root, 2254 TreeNode<K,V> p) { 2255 TreeNode<K,V> r, pp, rl; 2256 if (p != null && (r = p.right) != null) { 2257 if ((rl = p.right = r.left) != null) 2258 rl.parent = p; 2259 if ((pp = r.parent = p.parent) == null) 2260 (root = r).red = false; 2261 else if (pp.left == p) 2262 pp.left = r; 2263 else 2264 pp.right = r; 2265 r.left = p; 2266 p.parent = r; 2267 } 2268 return root; 2269 } 2270 rotateRight(TreeNode<K,V> root, TreeNode<K,V> p)2271 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root, 2272 TreeNode<K,V> p) { 2273 TreeNode<K,V> l, pp, lr; 2274 if (p != null && (l = p.left) != null) { 2275 if ((lr = p.left = l.right) != null) 2276 lr.parent = p; 2277 if ((pp = l.parent = p.parent) == null) 2278 (root = l).red = false; 2279 else if (pp.right == p) 2280 pp.right = l; 2281 else 2282 pp.left = l; 2283 l.right = p; 2284 p.parent = l; 2285 } 2286 return root; 2287 } 2288 balanceInsertion(TreeNode<K,V> root, TreeNode<K,V> x)2289 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root, 2290 TreeNode<K,V> x) { 2291 x.red = true; 2292 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { 2293 if ((xp = x.parent) == null) { 2294 x.red = false; 2295 return x; 2296 } 2297 else if (!xp.red || (xpp = xp.parent) == null) 2298 return root; 2299 if (xp == (xppl = xpp.left)) { 2300 if ((xppr = xpp.right) != null && xppr.red) { 2301 xppr.red = false; 2302 xp.red = false; 2303 xpp.red = true; 2304 x = xpp; 2305 } 2306 else { 2307 if (x == xp.right) { 2308 root = rotateLeft(root, x = xp); 2309 xpp = (xp = x.parent) == null ? null : xp.parent; 2310 } 2311 if (xp != null) { 2312 xp.red = false; 2313 if (xpp != null) { 2314 xpp.red = true; 2315 root = rotateRight(root, xpp); 2316 } 2317 } 2318 } 2319 } 2320 else { 2321 if (xppl != null && xppl.red) { 2322 xppl.red = false; 2323 xp.red = false; 2324 xpp.red = true; 2325 x = xpp; 2326 } 2327 else { 2328 if (x == xp.left) { 2329 root = rotateRight(root, x = xp); 2330 xpp = (xp = x.parent) == null ? null : xp.parent; 2331 } 2332 if (xp != null) { 2333 xp.red = false; 2334 if (xpp != null) { 2335 xpp.red = true; 2336 root = rotateLeft(root, xpp); 2337 } 2338 } 2339 } 2340 } 2341 } 2342 } 2343 balanceDeletion(TreeNode<K,V> root, TreeNode<K,V> x)2344 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root, 2345 TreeNode<K,V> x) { 2346 for (TreeNode<K,V> xp, xpl, xpr;;) { 2347 if (x == null || x == root) 2348 return root; 2349 else if ((xp = x.parent) == null) { 2350 x.red = false; 2351 return x; 2352 } 2353 else if (x.red) { 2354 x.red = false; 2355 return root; 2356 } 2357 else if ((xpl = xp.left) == x) { 2358 if ((xpr = xp.right) != null && xpr.red) { 2359 xpr.red = false; 2360 xp.red = true; 2361 root = rotateLeft(root, xp); 2362 xpr = (xp = x.parent) == null ? null : xp.right; 2363 } 2364 if (xpr == null) 2365 x = xp; 2366 else { 2367 TreeNode<K,V> sl = xpr.left, sr = xpr.right; 2368 if ((sr == null || !sr.red) && 2369 (sl == null || !sl.red)) { 2370 xpr.red = true; 2371 x = xp; 2372 } 2373 else { 2374 if (sr == null || !sr.red) { 2375 if (sl != null) 2376 sl.red = false; 2377 xpr.red = true; 2378 root = rotateRight(root, xpr); 2379 xpr = (xp = x.parent) == null ? 2380 null : xp.right; 2381 } 2382 if (xpr != null) { 2383 xpr.red = (xp == null) ? false : xp.red; 2384 if ((sr = xpr.right) != null) 2385 sr.red = false; 2386 } 2387 if (xp != null) { 2388 xp.red = false; 2389 root = rotateLeft(root, xp); 2390 } 2391 x = root; 2392 } 2393 } 2394 } 2395 else { // symmetric 2396 if (xpl != null && xpl.red) { 2397 xpl.red = false; 2398 xp.red = true; 2399 root = rotateRight(root, xp); 2400 xpl = (xp = x.parent) == null ? null : xp.left; 2401 } 2402 if (xpl == null) 2403 x = xp; 2404 else { 2405 TreeNode<K,V> sl = xpl.left, sr = xpl.right; 2406 if ((sl == null || !sl.red) && 2407 (sr == null || !sr.red)) { 2408 xpl.red = true; 2409 x = xp; 2410 } 2411 else { 2412 if (sl == null || !sl.red) { 2413 if (sr != null) 2414 sr.red = false; 2415 xpl.red = true; 2416 root = rotateLeft(root, xpl); 2417 xpl = (xp = x.parent) == null ? 2418 null : xp.left; 2419 } 2420 if (xpl != null) { 2421 xpl.red = (xp == null) ? false : xp.red; 2422 if ((sl = xpl.left) != null) 2423 sl.red = false; 2424 } 2425 if (xp != null) { 2426 xp.red = false; 2427 root = rotateRight(root, xp); 2428 } 2429 x = root; 2430 } 2431 } 2432 } 2433 } 2434 } 2435 2436 /** 2437 * Recursive invariant check 2438 */ checkInvariants(TreeNode<K,V> t)2439 static <K,V> boolean checkInvariants(TreeNode<K,V> t) { 2440 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, 2441 tb = t.prev, tn = (TreeNode<K,V>)t.next; 2442 if (tb != null && tb.next != t) 2443 return false; 2444 if (tn != null && tn.prev != t) 2445 return false; 2446 if (tp != null && t != tp.left && t != tp.right) 2447 return false; 2448 if (tl != null && (tl.parent != t || tl.hash > t.hash)) 2449 return false; 2450 if (tr != null && (tr.parent != t || tr.hash < t.hash)) 2451 return false; 2452 if (t.red && tl != null && tl.red && tr != null && tr.red) 2453 return false; 2454 if (tl != null && !checkInvariants(tl)) 2455 return false; 2456 if (tr != null && !checkInvariants(tr)) 2457 return false; 2458 return true; 2459 } 2460 } 2461 2462 } 2463