1 /* 2 * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package java.util; 27 28 import java.io.IOException; 29 import java.io.InvalidObjectException; 30 import java.io.ObjectInputStream; 31 import java.io.Serializable; 32 import java.lang.reflect.ParameterizedType; 33 import java.lang.reflect.Type; 34 import java.util.function.BiConsumer; 35 import java.util.function.BiFunction; 36 import java.util.function.Consumer; 37 import java.util.function.Function; 38 import jdk.internal.access.SharedSecrets; 39 40 /** 41 * Hash table based implementation of the {@code Map} interface. This 42 * implementation provides all of the optional map operations, and permits 43 * {@code null} values and the {@code null} key. (The {@code HashMap} 44 * class is roughly equivalent to {@code Hashtable}, except that it is 45 * unsynchronized and permits nulls.) This class makes no guarantees as to 46 * the order of the map; in particular, it does not guarantee that the order 47 * will remain constant over time. 48 * 49 * <p>This implementation provides constant-time performance for the basic 50 * operations ({@code get} and {@code put}), assuming the hash function 51 * disperses the elements properly among the buckets. Iteration over 52 * collection views requires time proportional to the "capacity" of the 53 * {@code HashMap} instance (the number of buckets) plus its size (the number 54 * of key-value mappings). Thus, it's very important not to set the initial 55 * capacity too high (or the load factor too low) if iteration performance is 56 * important. 57 * 58 * <p>An instance of {@code HashMap} has two parameters that affect its 59 * performance: <i>initial capacity</i> and <i>load factor</i>. The 60 * <i>capacity</i> is the number of buckets in the hash table, and the initial 61 * capacity is simply the capacity at the time the hash table is created. The 62 * <i>load factor</i> is a measure of how full the hash table is allowed to 63 * get before its capacity is automatically increased. When the number of 64 * entries in the hash table exceeds the product of the load factor and the 65 * current capacity, the hash table is <i>rehashed</i> (that is, internal data 66 * structures are rebuilt) so that the hash table has approximately twice the 67 * number of buckets. 68 * 69 * <p>As a general rule, the default load factor (.75) offers a good 70 * tradeoff between time and space costs. Higher values decrease the 71 * space overhead but increase the lookup cost (reflected in most of 72 * the operations of the {@code HashMap} class, including 73 * {@code get} and {@code put}). The expected number of entries in 74 * the map and its load factor should be taken into account when 75 * setting its initial capacity, so as to minimize the number of 76 * rehash operations. If the initial capacity is greater than the 77 * maximum number of entries divided by the load factor, no rehash 78 * operations will ever occur. 79 * 80 * <p>If many mappings are to be stored in a {@code HashMap} 81 * instance, creating it with a sufficiently large capacity will allow 82 * the mappings to be stored more efficiently than letting it perform 83 * automatic rehashing as needed to grow the table. Note that using 84 * many keys with the same {@code hashCode()} is a sure way to slow 85 * down performance of any hash table. To ameliorate impact, when keys 86 * are {@link Comparable}, this class may use comparison order among 87 * keys to help break ties. 88 * 89 * <p><strong>Note that this implementation is not synchronized.</strong> 90 * If multiple threads access a hash map concurrently, and at least one of 91 * the threads modifies the map structurally, it <i>must</i> be 92 * synchronized externally. (A structural modification is any operation 93 * that adds or deletes one or more mappings; merely changing the value 94 * associated with a key that an instance already contains is not a 95 * structural modification.) This is typically accomplished by 96 * synchronizing on some object that naturally encapsulates the map. 97 * 98 * If no such object exists, the map should be "wrapped" using the 99 * {@link Collections#synchronizedMap Collections.synchronizedMap} 100 * method. This is best done at creation time, to prevent accidental 101 * unsynchronized access to the map:<pre> 102 * Map m = Collections.synchronizedMap(new HashMap(...));</pre> 103 * 104 * <p>The iterators returned by all of this class's "collection view methods" 105 * are <i>fail-fast</i>: if the map is structurally modified at any time after 106 * the iterator is created, in any way except through the iterator's own 107 * {@code remove} method, the iterator will throw a 108 * {@link ConcurrentModificationException}. Thus, in the face of concurrent 109 * modification, the iterator fails quickly and cleanly, rather than risking 110 * arbitrary, non-deterministic behavior at an undetermined time in the 111 * future. 112 * 113 * <p>Note that the fail-fast behavior of an iterator cannot be guaranteed 114 * as it is, generally speaking, impossible to make any hard guarantees in the 115 * presence of unsynchronized concurrent modification. Fail-fast iterators 116 * throw {@code ConcurrentModificationException} on a best-effort basis. 117 * Therefore, it would be wrong to write a program that depended on this 118 * exception for its correctness: <i>the fail-fast behavior of iterators 119 * should be used only to detect bugs.</i> 120 * 121 * <p>This class is a member of the 122 * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework"> 123 * Java Collections Framework</a>. 124 * 125 * @param <K> the type of keys maintained by this map 126 * @param <V> the type of mapped values 127 * 128 * @author Doug Lea 129 * @author Josh Bloch 130 * @author Arthur van Hoff 131 * @author Neal Gafter 132 * @see Object#hashCode() 133 * @see Collection 134 * @see Map 135 * @see TreeMap 136 * @see Hashtable 137 * @since 1.2 138 */ 139 public class HashMap<K,V> extends AbstractMap<K,V> 140 implements Map<K,V>, Cloneable, Serializable { 141 142 private static final long serialVersionUID = 362498820763181265L; 143 144 /* 145 * Implementation notes. 146 * 147 * This map usually acts as a binned (bucketed) hash table, but 148 * when bins get too large, they are transformed into bins of 149 * TreeNodes, each structured similarly to those in 150 * java.util.TreeMap. Most methods try to use normal bins, but 151 * relay to TreeNode methods when applicable (simply by checking 152 * instanceof a node). Bins of TreeNodes may be traversed and 153 * used like any others, but additionally support faster lookup 154 * when overpopulated. However, since the vast majority of bins in 155 * normal use are not overpopulated, checking for existence of 156 * tree bins may be delayed in the course of table methods. 157 * 158 * Tree bins (i.e., bins whose elements are all TreeNodes) are 159 * ordered primarily by hashCode, but in the case of ties, if two 160 * elements are of the same "class C implements Comparable<C>", 161 * type then their compareTo method is used for ordering. (We 162 * conservatively check generic types via reflection to validate 163 * this -- see method comparableClassFor). The added complexity 164 * of tree bins is worthwhile in providing worst-case O(log n) 165 * operations when keys either have distinct hashes or are 166 * orderable, Thus, performance degrades gracefully under 167 * accidental or malicious usages in which hashCode() methods 168 * return values that are poorly distributed, as well as those in 169 * which many keys share a hashCode, so long as they are also 170 * Comparable. (If neither of these apply, we may waste about a 171 * factor of two in time and space compared to taking no 172 * precautions. But the only known cases stem from poor user 173 * programming practices that are already so slow that this makes 174 * little difference.) 175 * 176 * Because TreeNodes are about twice the size of regular nodes, we 177 * use them only when bins contain enough nodes to warrant use 178 * (see TREEIFY_THRESHOLD). And when they become too small (due to 179 * removal or resizing) they are converted back to plain bins. In 180 * usages with well-distributed user hashCodes, tree bins are 181 * rarely used. Ideally, under random hashCodes, the frequency of 182 * nodes in bins follows a Poisson distribution 183 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a 184 * parameter of about 0.5 on average for the default resizing 185 * threshold of 0.75, although with a large variance because of 186 * resizing granularity. Ignoring variance, the expected 187 * occurrences of list size k are (exp(-0.5) * pow(0.5, k) / 188 * factorial(k)). The first values are: 189 * 190 * 0: 0.60653066 191 * 1: 0.30326533 192 * 2: 0.07581633 193 * 3: 0.01263606 194 * 4: 0.00157952 195 * 5: 0.00015795 196 * 6: 0.00001316 197 * 7: 0.00000094 198 * 8: 0.00000006 199 * more: less than 1 in ten million 200 * 201 * The root of a tree bin is normally its first node. However, 202 * sometimes (currently only upon Iterator.remove), the root might 203 * be elsewhere, but can be recovered following parent links 204 * (method TreeNode.root()). 205 * 206 * All applicable internal methods accept a hash code as an 207 * argument (as normally supplied from a public method), allowing 208 * them to call each other without recomputing user hashCodes. 209 * Most internal methods also accept a "tab" argument, that is 210 * normally the current table, but may be a new or old one when 211 * resizing or converting. 212 * 213 * When bin lists are treeified, split, or untreeified, we keep 214 * them in the same relative access/traversal order (i.e., field 215 * Node.next) to better preserve locality, and to slightly 216 * simplify handling of splits and traversals that invoke 217 * iterator.remove. When using comparators on insertion, to keep a 218 * total ordering (or as close as is required here) across 219 * rebalancings, we compare classes and identityHashCodes as 220 * tie-breakers. 221 * 222 * The use and transitions among plain vs tree modes is 223 * complicated by the existence of subclass LinkedHashMap. See 224 * below for hook methods defined to be invoked upon insertion, 225 * removal and access that allow LinkedHashMap internals to 226 * otherwise remain independent of these mechanics. (This also 227 * requires that a map instance be passed to some utility methods 228 * that may create new nodes.) 229 * 230 * The concurrent-programming-like SSA-based coding style helps 231 * avoid aliasing errors amid all of the twisty pointer operations. 232 */ 233 234 /** 235 * The default initial capacity - MUST be a power of two. 236 */ 237 static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16 238 239 /** 240 * The maximum capacity, used if a higher value is implicitly specified 241 * by either of the constructors with arguments. 242 * MUST be a power of two <= 1<<30. 243 */ 244 static final int MAXIMUM_CAPACITY = 1 << 30; 245 246 /** 247 * The load factor used when none specified in constructor. 248 */ 249 static final float DEFAULT_LOAD_FACTOR = 0.75f; 250 251 /** 252 * The bin count threshold for using a tree rather than list for a 253 * bin. Bins are converted to trees when adding an element to a 254 * bin with at least this many nodes. The value must be greater 255 * than 2 and should be at least 8 to mesh with assumptions in 256 * tree removal about conversion back to plain bins upon 257 * shrinkage. 258 */ 259 static final int TREEIFY_THRESHOLD = 8; 260 261 /** 262 * The bin count threshold for untreeifying a (split) bin during a 263 * resize operation. Should be less than TREEIFY_THRESHOLD, and at 264 * most 6 to mesh with shrinkage detection under removal. 265 */ 266 static final int UNTREEIFY_THRESHOLD = 6; 267 268 /** 269 * The smallest table capacity for which bins may be treeified. 270 * (Otherwise the table is resized if too many nodes in a bin.) 271 * Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts 272 * between resizing and treeification thresholds. 273 */ 274 static final int MIN_TREEIFY_CAPACITY = 64; 275 276 /** 277 * Basic hash bin node, used for most entries. (See below for 278 * TreeNode subclass, and in LinkedHashMap for its Entry subclass.) 279 */ 280 static class Node<K,V> implements Map.Entry<K,V> { 281 final int hash; 282 final K key; 283 V value; 284 Node<K,V> next; 285 Node(int hash, K key, V value, Node<K,V> next)286 Node(int hash, K key, V value, Node<K,V> next) { 287 this.hash = hash; 288 this.key = key; 289 this.value = value; 290 this.next = next; 291 } 292 getKey()293 public final K getKey() { return key; } getValue()294 public final V getValue() { return value; } toString()295 public final String toString() { return key + "=" + value; } 296 hashCode()297 public final int hashCode() { 298 return Objects.hashCode(key) ^ Objects.hashCode(value); 299 } 300 setValue(V newValue)301 public final V setValue(V newValue) { 302 V oldValue = value; 303 value = newValue; 304 return oldValue; 305 } 306 equals(Object o)307 public final boolean equals(Object o) { 308 if (o == this) 309 return true; 310 if (o instanceof Map.Entry) { 311 Map.Entry<?,?> e = (Map.Entry<?,?>)o; 312 if (Objects.equals(key, e.getKey()) && 313 Objects.equals(value, e.getValue())) 314 return true; 315 } 316 return false; 317 } 318 } 319 320 /* ---------------- Static utilities -------------- */ 321 322 /** 323 * Computes key.hashCode() and spreads (XORs) higher bits of hash 324 * to lower. Because the table uses power-of-two masking, sets of 325 * hashes that vary only in bits above the current mask will 326 * always collide. (Among known examples are sets of Float keys 327 * holding consecutive whole numbers in small tables.) So we 328 * apply a transform that spreads the impact of higher bits 329 * downward. There is a tradeoff between speed, utility, and 330 * quality of bit-spreading. Because many common sets of hashes 331 * are already reasonably distributed (so don't benefit from 332 * spreading), and because we use trees to handle large sets of 333 * collisions in bins, we just XOR some shifted bits in the 334 * cheapest possible way to reduce systematic lossage, as well as 335 * to incorporate impact of the highest bits that would otherwise 336 * never be used in index calculations because of table bounds. 337 */ hash(Object key)338 static final int hash(Object key) { 339 int h; 340 return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16); 341 } 342 343 /** 344 * Returns x's Class if it is of the form "class C implements 345 * Comparable<C>", else null. 346 */ comparableClassFor(Object x)347 static Class<?> comparableClassFor(Object x) { 348 if (x instanceof Comparable) { 349 Class<?> c; Type[] ts, as; ParameterizedType p; 350 if ((c = x.getClass()) == String.class) // bypass checks 351 return c; 352 if ((ts = c.getGenericInterfaces()) != null) { 353 for (Type t : ts) { 354 if ((t instanceof ParameterizedType) && 355 ((p = (ParameterizedType) t).getRawType() == 356 Comparable.class) && 357 (as = p.getActualTypeArguments()) != null && 358 as.length == 1 && as[0] == c) // type arg is c 359 return c; 360 } 361 } 362 } 363 return null; 364 } 365 366 /** 367 * Returns k.compareTo(x) if x matches kc (k's screened comparable 368 * class), else 0. 369 */ 370 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable compareComparables(Class<?> kc, Object k, Object x)371 static int compareComparables(Class<?> kc, Object k, Object x) { 372 return (x == null || x.getClass() != kc ? 0 : 373 ((Comparable)k).compareTo(x)); 374 } 375 376 /** 377 * Returns a power of two size for the given target capacity. 378 */ tableSizeFor(int cap)379 static final int tableSizeFor(int cap) { 380 int n = -1 >>> Integer.numberOfLeadingZeros(cap - 1); 381 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; 382 } 383 384 /* ---------------- Fields -------------- */ 385 386 /** 387 * The table, initialized on first use, and resized as 388 * necessary. When allocated, length is always a power of two. 389 * (We also tolerate length zero in some operations to allow 390 * bootstrapping mechanics that are currently not needed.) 391 */ 392 transient Node<K,V>[] table; 393 394 /** 395 * Holds cached entrySet(). Note that AbstractMap fields are used 396 * for keySet() and values(). 397 */ 398 transient Set<Map.Entry<K,V>> entrySet; 399 400 /** 401 * The number of key-value mappings contained in this map. 402 */ 403 transient int size; 404 405 /** 406 * The number of times this HashMap has been structurally modified 407 * Structural modifications are those that change the number of mappings in 408 * the HashMap or otherwise modify its internal structure (e.g., 409 * rehash). This field is used to make iterators on Collection-views of 410 * the HashMap fail-fast. (See ConcurrentModificationException). 411 */ 412 transient int modCount; 413 414 /** 415 * The next size value at which to resize (capacity * load factor). 416 * 417 * @serial 418 */ 419 // (The javadoc description is true upon serialization. 420 // Additionally, if the table array has not been allocated, this 421 // field holds the initial array capacity, or zero signifying 422 // DEFAULT_INITIAL_CAPACITY.) 423 int threshold; 424 425 /** 426 * The load factor for the hash table. 427 * 428 * @serial 429 */ 430 final float loadFactor; 431 432 /* ---------------- Public operations -------------- */ 433 434 /** 435 * Constructs an empty {@code HashMap} with the specified initial 436 * capacity and load factor. 437 * 438 * @param initialCapacity the initial capacity 439 * @param loadFactor the load factor 440 * @throws IllegalArgumentException if the initial capacity is negative 441 * or the load factor is nonpositive 442 */ HashMap(int initialCapacity, float loadFactor)443 public HashMap(int initialCapacity, float loadFactor) { 444 if (initialCapacity < 0) 445 throw new IllegalArgumentException("Illegal initial capacity: " + 446 initialCapacity); 447 if (initialCapacity > MAXIMUM_CAPACITY) 448 initialCapacity = MAXIMUM_CAPACITY; 449 if (loadFactor <= 0 || Float.isNaN(loadFactor)) 450 throw new IllegalArgumentException("Illegal load factor: " + 451 loadFactor); 452 this.loadFactor = loadFactor; 453 this.threshold = tableSizeFor(initialCapacity); 454 } 455 456 /** 457 * Constructs an empty {@code HashMap} with the specified initial 458 * capacity and the default load factor (0.75). 459 * 460 * @param initialCapacity the initial capacity. 461 * @throws IllegalArgumentException if the initial capacity is negative. 462 */ HashMap(int initialCapacity)463 public HashMap(int initialCapacity) { 464 this(initialCapacity, DEFAULT_LOAD_FACTOR); 465 } 466 467 /** 468 * Constructs an empty {@code HashMap} with the default initial capacity 469 * (16) and the default load factor (0.75). 470 */ HashMap()471 public HashMap() { 472 this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted 473 } 474 475 /** 476 * Constructs a new {@code HashMap} with the same mappings as the 477 * specified {@code Map}. The {@code HashMap} is created with 478 * default load factor (0.75) and an initial capacity sufficient to 479 * hold the mappings in the specified {@code Map}. 480 * 481 * @param m the map whose mappings are to be placed in this map 482 * @throws NullPointerException if the specified map is null 483 */ HashMap(Map<? extends K, ? extends V> m)484 public HashMap(Map<? extends K, ? extends V> m) { 485 this.loadFactor = DEFAULT_LOAD_FACTOR; 486 putMapEntries(m, false); 487 } 488 489 /** 490 * Implements Map.putAll and Map constructor. 491 * 492 * @param m the map 493 * @param evict false when initially constructing this map, else 494 * true (relayed to method afterNodeInsertion). 495 */ putMapEntries(Map<? extends K, ? extends V> m, boolean evict)496 final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) { 497 int s = m.size(); 498 if (s > 0) { 499 if (table == null) { // pre-size 500 float ft = ((float)s / loadFactor) + 1.0F; 501 int t = ((ft < (float)MAXIMUM_CAPACITY) ? 502 (int)ft : MAXIMUM_CAPACITY); 503 if (t > threshold) 504 threshold = tableSizeFor(t); 505 } else { 506 // Because of linked-list bucket constraints, we cannot 507 // expand all at once, but can reduce total resize 508 // effort by repeated doubling now vs later 509 while (s > threshold && table.length < MAXIMUM_CAPACITY) 510 resize(); 511 } 512 513 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) { 514 K key = e.getKey(); 515 V value = e.getValue(); 516 putVal(hash(key), key, value, false, evict); 517 } 518 } 519 } 520 521 /** 522 * Returns the number of key-value mappings in this map. 523 * 524 * @return the number of key-value mappings in this map 525 */ size()526 public int size() { 527 return size; 528 } 529 530 /** 531 * Returns {@code true} if this map contains no key-value mappings. 532 * 533 * @return {@code true} if this map contains no key-value mappings 534 */ isEmpty()535 public boolean isEmpty() { 536 return size == 0; 537 } 538 539 /** 540 * Returns the value to which the specified key is mapped, 541 * or {@code null} if this map contains no mapping for the key. 542 * 543 * <p>More formally, if this map contains a mapping from a key 544 * {@code k} to a value {@code v} such that {@code (key==null ? k==null : 545 * key.equals(k))}, then this method returns {@code v}; otherwise 546 * it returns {@code null}. (There can be at most one such mapping.) 547 * 548 * <p>A return value of {@code null} does not <i>necessarily</i> 549 * indicate that the map contains no mapping for the key; it's also 550 * possible that the map explicitly maps the key to {@code null}. 551 * The {@link #containsKey containsKey} operation may be used to 552 * distinguish these two cases. 553 * 554 * @see #put(Object, Object) 555 */ get(Object key)556 public V get(Object key) { 557 Node<K,V> e; 558 return (e = getNode(hash(key), key)) == null ? null : e.value; 559 } 560 561 /** 562 * Implements Map.get and related methods. 563 * 564 * @param hash hash for key 565 * @param key the key 566 * @return the node, or null if none 567 */ getNode(int hash, Object key)568 final Node<K,V> getNode(int hash, Object key) { 569 Node<K,V>[] tab; Node<K,V> first, e; int n; K k; 570 if ((tab = table) != null && (n = tab.length) > 0 && 571 (first = tab[(n - 1) & hash]) != null) { 572 if (first.hash == hash && // always check first node 573 ((k = first.key) == key || (key != null && key.equals(k)))) 574 return first; 575 if ((e = first.next) != null) { 576 if (first instanceof TreeNode) 577 return ((TreeNode<K,V>)first).getTreeNode(hash, key); 578 do { 579 if (e.hash == hash && 580 ((k = e.key) == key || (key != null && key.equals(k)))) 581 return e; 582 } while ((e = e.next) != null); 583 } 584 } 585 return null; 586 } 587 588 /** 589 * Returns {@code true} if this map contains a mapping for the 590 * specified key. 591 * 592 * @param key The key whose presence in this map is to be tested 593 * @return {@code true} if this map contains a mapping for the specified 594 * key. 595 */ containsKey(Object key)596 public boolean containsKey(Object key) { 597 return getNode(hash(key), key) != null; 598 } 599 600 /** 601 * Associates the specified value with the specified key in this map. 602 * If the map previously contained a mapping for the key, the old 603 * value is replaced. 604 * 605 * @param key key with which the specified value is to be associated 606 * @param value value to be associated with the specified key 607 * @return the previous value associated with {@code key}, or 608 * {@code null} if there was no mapping for {@code key}. 609 * (A {@code null} return can also indicate that the map 610 * previously associated {@code null} with {@code key}.) 611 */ put(K key, V value)612 public V put(K key, V value) { 613 return putVal(hash(key), key, value, false, true); 614 } 615 616 /** 617 * Implements Map.put and related methods. 618 * 619 * @param hash hash for key 620 * @param key the key 621 * @param value the value to put 622 * @param onlyIfAbsent if true, don't change existing value 623 * @param evict if false, the table is in creation mode. 624 * @return previous value, or null if none 625 */ putVal(int hash, K key, V value, boolean onlyIfAbsent, boolean evict)626 final V putVal(int hash, K key, V value, boolean onlyIfAbsent, 627 boolean evict) { 628 Node<K,V>[] tab; Node<K,V> p; int n, i; 629 if ((tab = table) == null || (n = tab.length) == 0) 630 n = (tab = resize()).length; 631 if ((p = tab[i = (n - 1) & hash]) == null) 632 tab[i] = newNode(hash, key, value, null); 633 else { 634 Node<K,V> e; K k; 635 if (p.hash == hash && 636 ((k = p.key) == key || (key != null && key.equals(k)))) 637 e = p; 638 else if (p instanceof TreeNode) 639 e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value); 640 else { 641 for (int binCount = 0; ; ++binCount) { 642 if ((e = p.next) == null) { 643 p.next = newNode(hash, key, value, null); 644 if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st 645 treeifyBin(tab, hash); 646 break; 647 } 648 if (e.hash == hash && 649 ((k = e.key) == key || (key != null && key.equals(k)))) 650 break; 651 p = e; 652 } 653 } 654 if (e != null) { // existing mapping for key 655 V oldValue = e.value; 656 if (!onlyIfAbsent || oldValue == null) 657 e.value = value; 658 afterNodeAccess(e); 659 return oldValue; 660 } 661 } 662 ++modCount; 663 if (++size > threshold) 664 resize(); 665 afterNodeInsertion(evict); 666 return null; 667 } 668 669 /** 670 * Initializes or doubles table size. If null, allocates in 671 * accord with initial capacity target held in field threshold. 672 * Otherwise, because we are using power-of-two expansion, the 673 * elements from each bin must either stay at same index, or move 674 * with a power of two offset in the new table. 675 * 676 * @return the table 677 */ resize()678 final Node<K,V>[] resize() { 679 Node<K,V>[] oldTab = table; 680 int oldCap = (oldTab == null) ? 0 : oldTab.length; 681 int oldThr = threshold; 682 int newCap, newThr = 0; 683 if (oldCap > 0) { 684 if (oldCap >= MAXIMUM_CAPACITY) { 685 threshold = Integer.MAX_VALUE; 686 return oldTab; 687 } 688 else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && 689 oldCap >= DEFAULT_INITIAL_CAPACITY) 690 newThr = oldThr << 1; // double threshold 691 } 692 else if (oldThr > 0) // initial capacity was placed in threshold 693 newCap = oldThr; 694 else { // zero initial threshold signifies using defaults 695 newCap = DEFAULT_INITIAL_CAPACITY; 696 newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY); 697 } 698 if (newThr == 0) { 699 float ft = (float)newCap * loadFactor; 700 newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? 701 (int)ft : Integer.MAX_VALUE); 702 } 703 threshold = newThr; 704 @SuppressWarnings({"rawtypes","unchecked"}) 705 Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap]; 706 table = newTab; 707 if (oldTab != null) { 708 for (int j = 0; j < oldCap; ++j) { 709 Node<K,V> e; 710 if ((e = oldTab[j]) != null) { 711 oldTab[j] = null; 712 if (e.next == null) 713 newTab[e.hash & (newCap - 1)] = e; 714 else if (e instanceof TreeNode) 715 ((TreeNode<K,V>)e).split(this, newTab, j, oldCap); 716 else { // preserve order 717 Node<K,V> loHead = null, loTail = null; 718 Node<K,V> hiHead = null, hiTail = null; 719 Node<K,V> next; 720 do { 721 next = e.next; 722 if ((e.hash & oldCap) == 0) { 723 if (loTail == null) 724 loHead = e; 725 else 726 loTail.next = e; 727 loTail = e; 728 } 729 else { 730 if (hiTail == null) 731 hiHead = e; 732 else 733 hiTail.next = e; 734 hiTail = e; 735 } 736 } while ((e = next) != null); 737 if (loTail != null) { 738 loTail.next = null; 739 newTab[j] = loHead; 740 } 741 if (hiTail != null) { 742 hiTail.next = null; 743 newTab[j + oldCap] = hiHead; 744 } 745 } 746 } 747 } 748 } 749 return newTab; 750 } 751 752 /** 753 * Replaces all linked nodes in bin at index for given hash unless 754 * table is too small, in which case resizes instead. 755 */ treeifyBin(Node<K,V>[] tab, int hash)756 final void treeifyBin(Node<K,V>[] tab, int hash) { 757 int n, index; Node<K,V> e; 758 if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY) 759 resize(); 760 else if ((e = tab[index = (n - 1) & hash]) != null) { 761 TreeNode<K,V> hd = null, tl = null; 762 do { 763 TreeNode<K,V> p = replacementTreeNode(e, null); 764 if (tl == null) 765 hd = p; 766 else { 767 p.prev = tl; 768 tl.next = p; 769 } 770 tl = p; 771 } while ((e = e.next) != null); 772 if ((tab[index] = hd) != null) 773 hd.treeify(tab); 774 } 775 } 776 777 /** 778 * Copies all of the mappings from the specified map to this map. 779 * These mappings will replace any mappings that this map had for 780 * any of the keys currently in the specified map. 781 * 782 * @param m mappings to be stored in this map 783 * @throws NullPointerException if the specified map is null 784 */ putAll(Map<? extends K, ? extends V> m)785 public void putAll(Map<? extends K, ? extends V> m) { 786 putMapEntries(m, true); 787 } 788 789 /** 790 * Removes the mapping for the specified key from this map if present. 791 * 792 * @param key key whose mapping is to be removed from the map 793 * @return the previous value associated with {@code key}, or 794 * {@code null} if there was no mapping for {@code key}. 795 * (A {@code null} return can also indicate that the map 796 * previously associated {@code null} with {@code key}.) 797 */ remove(Object key)798 public V remove(Object key) { 799 Node<K,V> e; 800 return (e = removeNode(hash(key), key, null, false, true)) == null ? 801 null : e.value; 802 } 803 804 /** 805 * Implements Map.remove and related methods. 806 * 807 * @param hash hash for key 808 * @param key the key 809 * @param value the value to match if matchValue, else ignored 810 * @param matchValue if true only remove if value is equal 811 * @param movable if false do not move other nodes while removing 812 * @return the node, or null if none 813 */ removeNode(int hash, Object key, Object value, boolean matchValue, boolean movable)814 final Node<K,V> removeNode(int hash, Object key, Object value, 815 boolean matchValue, boolean movable) { 816 Node<K,V>[] tab; Node<K,V> p; int n, index; 817 if ((tab = table) != null && (n = tab.length) > 0 && 818 (p = tab[index = (n - 1) & hash]) != null) { 819 Node<K,V> node = null, e; K k; V v; 820 if (p.hash == hash && 821 ((k = p.key) == key || (key != null && key.equals(k)))) 822 node = p; 823 else if ((e = p.next) != null) { 824 if (p instanceof TreeNode) 825 node = ((TreeNode<K,V>)p).getTreeNode(hash, key); 826 else { 827 do { 828 if (e.hash == hash && 829 ((k = e.key) == key || 830 (key != null && key.equals(k)))) { 831 node = e; 832 break; 833 } 834 p = e; 835 } while ((e = e.next) != null); 836 } 837 } 838 if (node != null && (!matchValue || (v = node.value) == value || 839 (value != null && value.equals(v)))) { 840 if (node instanceof TreeNode) 841 ((TreeNode<K,V>)node).removeTreeNode(this, tab, movable); 842 else if (node == p) 843 tab[index] = node.next; 844 else 845 p.next = node.next; 846 ++modCount; 847 --size; 848 afterNodeRemoval(node); 849 return node; 850 } 851 } 852 return null; 853 } 854 855 /** 856 * Removes all of the mappings from this map. 857 * The map will be empty after this call returns. 858 */ clear()859 public void clear() { 860 Node<K,V>[] tab; 861 modCount++; 862 if ((tab = table) != null && size > 0) { 863 size = 0; 864 for (int i = 0; i < tab.length; ++i) 865 tab[i] = null; 866 } 867 } 868 869 /** 870 * Returns {@code true} if this map maps one or more keys to the 871 * specified value. 872 * 873 * @param value value whose presence in this map is to be tested 874 * @return {@code true} if this map maps one or more keys to the 875 * specified value 876 */ containsValue(Object value)877 public boolean containsValue(Object value) { 878 Node<K,V>[] tab; V v; 879 if ((tab = table) != null && size > 0) { 880 for (Node<K,V> e : tab) { 881 for (; e != null; e = e.next) { 882 if ((v = e.value) == value || 883 (value != null && value.equals(v))) 884 return true; 885 } 886 } 887 } 888 return false; 889 } 890 891 /** 892 * Returns a {@link Set} view of the keys contained in this map. 893 * The set is backed by the map, so changes to the map are 894 * reflected in the set, and vice-versa. If the map is modified 895 * while an iteration over the set is in progress (except through 896 * the iterator's own {@code remove} operation), the results of 897 * the iteration are undefined. The set supports element removal, 898 * which removes the corresponding mapping from the map, via the 899 * {@code Iterator.remove}, {@code Set.remove}, 900 * {@code removeAll}, {@code retainAll}, and {@code clear} 901 * operations. It does not support the {@code add} or {@code addAll} 902 * operations. 903 * 904 * @return a set view of the keys contained in this map 905 */ keySet()906 public Set<K> keySet() { 907 Set<K> ks = keySet; 908 if (ks == null) { 909 ks = new KeySet(); 910 keySet = ks; 911 } 912 return ks; 913 } 914 915 final class KeySet extends AbstractSet<K> { size()916 public final int size() { return size; } clear()917 public final void clear() { HashMap.this.clear(); } iterator()918 public final Iterator<K> iterator() { return new KeyIterator(); } contains(Object o)919 public final boolean contains(Object o) { return containsKey(o); } remove(Object key)920 public final boolean remove(Object key) { 921 return removeNode(hash(key), key, null, false, true) != null; 922 } spliterator()923 public final Spliterator<K> spliterator() { 924 return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0); 925 } forEach(Consumer<? super K> action)926 public final void forEach(Consumer<? super K> action) { 927 Node<K,V>[] tab; 928 if (action == null) 929 throw new NullPointerException(); 930 if (size > 0 && (tab = table) != null) { 931 int mc = modCount; 932 for (Node<K,V> e : tab) { 933 for (; e != null; e = e.next) 934 action.accept(e.key); 935 } 936 if (modCount != mc) 937 throw new ConcurrentModificationException(); 938 } 939 } 940 } 941 942 /** 943 * Returns a {@link Collection} view of the values contained in this map. 944 * The collection is backed by the map, so changes to the map are 945 * reflected in the collection, and vice-versa. If the map is 946 * modified while an iteration over the collection is in progress 947 * (except through the iterator's own {@code remove} operation), 948 * the results of the iteration are undefined. The collection 949 * supports element removal, which removes the corresponding 950 * mapping from the map, via the {@code Iterator.remove}, 951 * {@code Collection.remove}, {@code removeAll}, 952 * {@code retainAll} and {@code clear} operations. It does not 953 * support the {@code add} or {@code addAll} operations. 954 * 955 * @return a view of the values contained in this map 956 */ values()957 public Collection<V> values() { 958 Collection<V> vs = values; 959 if (vs == null) { 960 vs = new Values(); 961 values = vs; 962 } 963 return vs; 964 } 965 966 final class Values extends AbstractCollection<V> { size()967 public final int size() { return size; } clear()968 public final void clear() { HashMap.this.clear(); } iterator()969 public final Iterator<V> iterator() { return new ValueIterator(); } contains(Object o)970 public final boolean contains(Object o) { return containsValue(o); } spliterator()971 public final Spliterator<V> spliterator() { 972 return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0); 973 } forEach(Consumer<? super V> action)974 public final void forEach(Consumer<? super V> action) { 975 Node<K,V>[] tab; 976 if (action == null) 977 throw new NullPointerException(); 978 if (size > 0 && (tab = table) != null) { 979 int mc = modCount; 980 for (Node<K,V> e : tab) { 981 for (; e != null; e = e.next) 982 action.accept(e.value); 983 } 984 if (modCount != mc) 985 throw new ConcurrentModificationException(); 986 } 987 } 988 } 989 990 /** 991 * Returns a {@link Set} view of the mappings contained in this map. 992 * The set is backed by the map, so changes to the map are 993 * reflected in the set, and vice-versa. If the map is modified 994 * while an iteration over the set is in progress (except through 995 * the iterator's own {@code remove} operation, or through the 996 * {@code setValue} operation on a map entry returned by the 997 * iterator) the results of the iteration are undefined. The set 998 * supports element removal, which removes the corresponding 999 * mapping from the map, via the {@code Iterator.remove}, 1000 * {@code Set.remove}, {@code removeAll}, {@code retainAll} and 1001 * {@code clear} operations. It does not support the 1002 * {@code add} or {@code addAll} operations. 1003 * 1004 * @return a set view of the mappings contained in this map 1005 */ entrySet()1006 public Set<Map.Entry<K,V>> entrySet() { 1007 Set<Map.Entry<K,V>> es; 1008 return (es = entrySet) == null ? (entrySet = new EntrySet()) : es; 1009 } 1010 1011 final class EntrySet extends AbstractSet<Map.Entry<K,V>> { size()1012 public final int size() { return size; } clear()1013 public final void clear() { HashMap.this.clear(); } iterator()1014 public final Iterator<Map.Entry<K,V>> iterator() { 1015 return new EntryIterator(); 1016 } contains(Object o)1017 public final boolean contains(Object o) { 1018 if (!(o instanceof Map.Entry)) 1019 return false; 1020 Map.Entry<?,?> e = (Map.Entry<?,?>) o; 1021 Object key = e.getKey(); 1022 Node<K,V> candidate = getNode(hash(key), key); 1023 return candidate != null && candidate.equals(e); 1024 } remove(Object o)1025 public final boolean remove(Object o) { 1026 if (o instanceof Map.Entry) { 1027 Map.Entry<?,?> e = (Map.Entry<?,?>) o; 1028 Object key = e.getKey(); 1029 Object value = e.getValue(); 1030 return removeNode(hash(key), key, value, true, true) != null; 1031 } 1032 return false; 1033 } spliterator()1034 public final Spliterator<Map.Entry<K,V>> spliterator() { 1035 return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0); 1036 } forEach(Consumer<? super Map.Entry<K,V>> action)1037 public final void forEach(Consumer<? super Map.Entry<K,V>> action) { 1038 Node<K,V>[] tab; 1039 if (action == null) 1040 throw new NullPointerException(); 1041 if (size > 0 && (tab = table) != null) { 1042 int mc = modCount; 1043 for (Node<K,V> e : tab) { 1044 for (; e != null; e = e.next) 1045 action.accept(e); 1046 } 1047 if (modCount != mc) 1048 throw new ConcurrentModificationException(); 1049 } 1050 } 1051 } 1052 1053 // Overrides of JDK8 Map extension methods 1054 1055 @Override getOrDefault(Object key, V defaultValue)1056 public V getOrDefault(Object key, V defaultValue) { 1057 Node<K,V> e; 1058 return (e = getNode(hash(key), key)) == null ? defaultValue : e.value; 1059 } 1060 1061 @Override putIfAbsent(K key, V value)1062 public V putIfAbsent(K key, V value) { 1063 return putVal(hash(key), key, value, true, true); 1064 } 1065 1066 @Override remove(Object key, Object value)1067 public boolean remove(Object key, Object value) { 1068 return removeNode(hash(key), key, value, true, true) != null; 1069 } 1070 1071 @Override replace(K key, V oldValue, V newValue)1072 public boolean replace(K key, V oldValue, V newValue) { 1073 Node<K,V> e; V v; 1074 if ((e = getNode(hash(key), key)) != null && 1075 ((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) { 1076 e.value = newValue; 1077 afterNodeAccess(e); 1078 return true; 1079 } 1080 return false; 1081 } 1082 1083 @Override replace(K key, V value)1084 public V replace(K key, V value) { 1085 Node<K,V> e; 1086 if ((e = getNode(hash(key), key)) != null) { 1087 V oldValue = e.value; 1088 e.value = value; 1089 afterNodeAccess(e); 1090 return oldValue; 1091 } 1092 return null; 1093 } 1094 1095 /** 1096 * {@inheritDoc} 1097 * 1098 * <p>This method will, on a best-effort basis, throw a 1099 * {@link ConcurrentModificationException} if it is detected that the 1100 * mapping function modifies this map during computation. 1101 * 1102 * @throws ConcurrentModificationException if it is detected that the 1103 * mapping function modified this map 1104 */ 1105 @Override computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction)1106 public V computeIfAbsent(K key, 1107 Function<? super K, ? extends V> mappingFunction) { 1108 if (mappingFunction == null) 1109 throw new NullPointerException(); 1110 int hash = hash(key); 1111 Node<K,V>[] tab; Node<K,V> first; int n, i; 1112 int binCount = 0; 1113 TreeNode<K,V> t = null; 1114 Node<K,V> old = null; 1115 if (size > threshold || (tab = table) == null || 1116 (n = tab.length) == 0) 1117 n = (tab = resize()).length; 1118 if ((first = tab[i = (n - 1) & hash]) != null) { 1119 if (first instanceof TreeNode) 1120 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); 1121 else { 1122 Node<K,V> e = first; K k; 1123 do { 1124 if (e.hash == hash && 1125 ((k = e.key) == key || (key != null && key.equals(k)))) { 1126 old = e; 1127 break; 1128 } 1129 ++binCount; 1130 } while ((e = e.next) != null); 1131 } 1132 V oldValue; 1133 if (old != null && (oldValue = old.value) != null) { 1134 afterNodeAccess(old); 1135 return oldValue; 1136 } 1137 } 1138 int mc = modCount; 1139 V v = mappingFunction.apply(key); 1140 if (mc != modCount) { throw new ConcurrentModificationException(); } 1141 if (v == null) { 1142 return null; 1143 } else if (old != null) { 1144 old.value = v; 1145 afterNodeAccess(old); 1146 return v; 1147 } 1148 else if (t != null) 1149 t.putTreeVal(this, tab, hash, key, v); 1150 else { 1151 tab[i] = newNode(hash, key, v, first); 1152 if (binCount >= TREEIFY_THRESHOLD - 1) 1153 treeifyBin(tab, hash); 1154 } 1155 modCount = mc + 1; 1156 ++size; 1157 afterNodeInsertion(true); 1158 return v; 1159 } 1160 1161 /** 1162 * {@inheritDoc} 1163 * 1164 * <p>This method will, on a best-effort basis, throw a 1165 * {@link ConcurrentModificationException} if it is detected that the 1166 * remapping function modifies this map during computation. 1167 * 1168 * @throws ConcurrentModificationException if it is detected that the 1169 * remapping function modified this map 1170 */ 1171 @Override computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction)1172 public V computeIfPresent(K key, 1173 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1174 if (remappingFunction == null) 1175 throw new NullPointerException(); 1176 Node<K,V> e; V oldValue; 1177 int hash = hash(key); 1178 if ((e = getNode(hash, key)) != null && 1179 (oldValue = e.value) != null) { 1180 int mc = modCount; 1181 V v = remappingFunction.apply(key, oldValue); 1182 if (mc != modCount) { throw new ConcurrentModificationException(); } 1183 if (v != null) { 1184 e.value = v; 1185 afterNodeAccess(e); 1186 return v; 1187 } 1188 else 1189 removeNode(hash, key, null, false, true); 1190 } 1191 return null; 1192 } 1193 1194 /** 1195 * {@inheritDoc} 1196 * 1197 * <p>This method will, on a best-effort basis, throw a 1198 * {@link ConcurrentModificationException} if it is detected that the 1199 * remapping function modifies this map during computation. 1200 * 1201 * @throws ConcurrentModificationException if it is detected that the 1202 * remapping function modified this map 1203 */ 1204 @Override compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction)1205 public V compute(K key, 1206 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1207 if (remappingFunction == null) 1208 throw new NullPointerException(); 1209 int hash = hash(key); 1210 Node<K,V>[] tab; Node<K,V> first; int n, i; 1211 int binCount = 0; 1212 TreeNode<K,V> t = null; 1213 Node<K,V> old = null; 1214 if (size > threshold || (tab = table) == null || 1215 (n = tab.length) == 0) 1216 n = (tab = resize()).length; 1217 if ((first = tab[i = (n - 1) & hash]) != null) { 1218 if (first instanceof TreeNode) 1219 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); 1220 else { 1221 Node<K,V> e = first; K k; 1222 do { 1223 if (e.hash == hash && 1224 ((k = e.key) == key || (key != null && key.equals(k)))) { 1225 old = e; 1226 break; 1227 } 1228 ++binCount; 1229 } while ((e = e.next) != null); 1230 } 1231 } 1232 V oldValue = (old == null) ? null : old.value; 1233 int mc = modCount; 1234 V v = remappingFunction.apply(key, oldValue); 1235 if (mc != modCount) { throw new ConcurrentModificationException(); } 1236 if (old != null) { 1237 if (v != null) { 1238 old.value = v; 1239 afterNodeAccess(old); 1240 } 1241 else 1242 removeNode(hash, key, null, false, true); 1243 } 1244 else if (v != null) { 1245 if (t != null) 1246 t.putTreeVal(this, tab, hash, key, v); 1247 else { 1248 tab[i] = newNode(hash, key, v, first); 1249 if (binCount >= TREEIFY_THRESHOLD - 1) 1250 treeifyBin(tab, hash); 1251 } 1252 modCount = mc + 1; 1253 ++size; 1254 afterNodeInsertion(true); 1255 } 1256 return v; 1257 } 1258 1259 /** 1260 * {@inheritDoc} 1261 * 1262 * <p>This method will, on a best-effort basis, throw a 1263 * {@link ConcurrentModificationException} if it is detected that the 1264 * remapping function modifies this map during computation. 1265 * 1266 * @throws ConcurrentModificationException if it is detected that the 1267 * remapping function modified this map 1268 */ 1269 @Override merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction)1270 public V merge(K key, V value, 1271 BiFunction<? super V, ? super V, ? extends V> remappingFunction) { 1272 if (value == null || remappingFunction == null) 1273 throw new NullPointerException(); 1274 int hash = hash(key); 1275 Node<K,V>[] tab; Node<K,V> first; int n, i; 1276 int binCount = 0; 1277 TreeNode<K,V> t = null; 1278 Node<K,V> old = null; 1279 if (size > threshold || (tab = table) == null || 1280 (n = tab.length) == 0) 1281 n = (tab = resize()).length; 1282 if ((first = tab[i = (n - 1) & hash]) != null) { 1283 if (first instanceof TreeNode) 1284 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); 1285 else { 1286 Node<K,V> e = first; K k; 1287 do { 1288 if (e.hash == hash && 1289 ((k = e.key) == key || (key != null && key.equals(k)))) { 1290 old = e; 1291 break; 1292 } 1293 ++binCount; 1294 } while ((e = e.next) != null); 1295 } 1296 } 1297 if (old != null) { 1298 V v; 1299 if (old.value != null) { 1300 int mc = modCount; 1301 v = remappingFunction.apply(old.value, value); 1302 if (mc != modCount) { 1303 throw new ConcurrentModificationException(); 1304 } 1305 } else { 1306 v = value; 1307 } 1308 if (v != null) { 1309 old.value = v; 1310 afterNodeAccess(old); 1311 } 1312 else 1313 removeNode(hash, key, null, false, true); 1314 return v; 1315 } else { 1316 if (t != null) 1317 t.putTreeVal(this, tab, hash, key, value); 1318 else { 1319 tab[i] = newNode(hash, key, value, first); 1320 if (binCount >= TREEIFY_THRESHOLD - 1) 1321 treeifyBin(tab, hash); 1322 } 1323 ++modCount; 1324 ++size; 1325 afterNodeInsertion(true); 1326 return value; 1327 } 1328 } 1329 1330 @Override forEach(BiConsumer<? super K, ? super V> action)1331 public void forEach(BiConsumer<? super K, ? super V> action) { 1332 Node<K,V>[] tab; 1333 if (action == null) 1334 throw new NullPointerException(); 1335 if (size > 0 && (tab = table) != null) { 1336 int mc = modCount; 1337 for (Node<K,V> e : tab) { 1338 for (; e != null; e = e.next) 1339 action.accept(e.key, e.value); 1340 } 1341 if (modCount != mc) 1342 throw new ConcurrentModificationException(); 1343 } 1344 } 1345 1346 @Override replaceAll(BiFunction<? super K, ? super V, ? extends V> function)1347 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { 1348 Node<K,V>[] tab; 1349 if (function == null) 1350 throw new NullPointerException(); 1351 if (size > 0 && (tab = table) != null) { 1352 int mc = modCount; 1353 for (Node<K,V> e : tab) { 1354 for (; e != null; e = e.next) { 1355 e.value = function.apply(e.key, e.value); 1356 } 1357 } 1358 if (modCount != mc) 1359 throw new ConcurrentModificationException(); 1360 } 1361 } 1362 1363 /* ------------------------------------------------------------ */ 1364 // Cloning and serialization 1365 1366 /** 1367 * Returns a shallow copy of this {@code HashMap} instance: the keys and 1368 * values themselves are not cloned. 1369 * 1370 * @return a shallow copy of this map 1371 */ 1372 @SuppressWarnings("unchecked") 1373 @Override clone()1374 public Object clone() { 1375 HashMap<K,V> result; 1376 try { 1377 result = (HashMap<K,V>)super.clone(); 1378 } catch (CloneNotSupportedException e) { 1379 // this shouldn't happen, since we are Cloneable 1380 throw new InternalError(e); 1381 } 1382 result.reinitialize(); 1383 result.putMapEntries(this, false); 1384 return result; 1385 } 1386 1387 // These methods are also used when serializing HashSets loadFactor()1388 final float loadFactor() { return loadFactor; } capacity()1389 final int capacity() { 1390 return (table != null) ? table.length : 1391 (threshold > 0) ? threshold : 1392 DEFAULT_INITIAL_CAPACITY; 1393 } 1394 1395 /** 1396 * Saves this map to a stream (that is, serializes it). 1397 * 1398 * @param s the stream 1399 * @throws IOException if an I/O error occurs 1400 * @serialData The <i>capacity</i> of the HashMap (the length of the 1401 * bucket array) is emitted (int), followed by the 1402 * <i>size</i> (an int, the number of key-value 1403 * mappings), followed by the key (Object) and value (Object) 1404 * for each key-value mapping. The key-value mappings are 1405 * emitted in no particular order. 1406 */ writeObject(java.io.ObjectOutputStream s)1407 private void writeObject(java.io.ObjectOutputStream s) 1408 throws IOException { 1409 int buckets = capacity(); 1410 // Write out the threshold, loadfactor, and any hidden stuff 1411 s.defaultWriteObject(); 1412 s.writeInt(buckets); 1413 s.writeInt(size); 1414 internalWriteEntries(s); 1415 } 1416 1417 /** 1418 * Reconstitutes this map from a stream (that is, deserializes it). 1419 * @param s the stream 1420 * @throws ClassNotFoundException if the class of a serialized object 1421 * could not be found 1422 * @throws IOException if an I/O error occurs 1423 */ readObject(ObjectInputStream s)1424 private void readObject(ObjectInputStream s) 1425 throws IOException, ClassNotFoundException { 1426 1427 ObjectInputStream.GetField fields = s.readFields(); 1428 1429 // Read loadFactor (ignore threshold) 1430 float lf = fields.get("loadFactor", 0.75f); 1431 if (lf <= 0 || Float.isNaN(lf)) 1432 throw new InvalidObjectException("Illegal load factor: " + lf); 1433 1434 lf = Math.min(Math.max(0.25f, lf), 4.0f); 1435 HashMap.UnsafeHolder.putLoadFactor(this, lf); 1436 1437 reinitialize(); 1438 1439 s.readInt(); // Read and ignore number of buckets 1440 int mappings = s.readInt(); // Read number of mappings (size) 1441 if (mappings < 0) { 1442 throw new InvalidObjectException("Illegal mappings count: " + mappings); 1443 } else if (mappings == 0) { 1444 // use defaults 1445 } else if (mappings > 0) { 1446 float fc = (float)mappings / lf + 1.0f; 1447 int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ? 1448 DEFAULT_INITIAL_CAPACITY : 1449 (fc >= MAXIMUM_CAPACITY) ? 1450 MAXIMUM_CAPACITY : 1451 tableSizeFor((int)fc)); 1452 float ft = (float)cap * lf; 1453 threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ? 1454 (int)ft : Integer.MAX_VALUE); 1455 1456 // Check Map.Entry[].class since it's the nearest public type to 1457 // what we're actually creating. 1458 SharedSecrets.getJavaObjectInputStreamAccess().checkArray(s, Map.Entry[].class, cap); 1459 @SuppressWarnings({"rawtypes","unchecked"}) 1460 Node<K,V>[] tab = (Node<K,V>[])new Node[cap]; 1461 table = tab; 1462 1463 // Read the keys and values, and put the mappings in the HashMap 1464 for (int i = 0; i < mappings; i++) { 1465 @SuppressWarnings("unchecked") 1466 K key = (K) s.readObject(); 1467 @SuppressWarnings("unchecked") 1468 V value = (V) s.readObject(); 1469 putVal(hash(key), key, value, false, false); 1470 } 1471 } 1472 } 1473 1474 // Support for resetting final field during deserializing 1475 private static final class UnsafeHolder { UnsafeHolder()1476 private UnsafeHolder() { throw new InternalError(); } 1477 private static final jdk.internal.misc.Unsafe unsafe 1478 = jdk.internal.misc.Unsafe.getUnsafe(); 1479 private static final long LF_OFFSET 1480 = unsafe.objectFieldOffset(HashMap.class, "loadFactor"); putLoadFactor(HashMap<?, ?> map, float lf)1481 static void putLoadFactor(HashMap<?, ?> map, float lf) { 1482 unsafe.putFloat(map, LF_OFFSET, lf); 1483 } 1484 } 1485 1486 /* ------------------------------------------------------------ */ 1487 // iterators 1488 1489 abstract class HashIterator { 1490 Node<K,V> next; // next entry to return 1491 Node<K,V> current; // current entry 1492 int expectedModCount; // for fast-fail 1493 int index; // current slot 1494 HashIterator()1495 HashIterator() { 1496 expectedModCount = modCount; 1497 Node<K,V>[] t = table; 1498 current = next = null; 1499 index = 0; 1500 if (t != null && size > 0) { // advance to first entry 1501 do {} while (index < t.length && (next = t[index++]) == null); 1502 } 1503 } 1504 hasNext()1505 public final boolean hasNext() { 1506 return next != null; 1507 } 1508 nextNode()1509 final Node<K,V> nextNode() { 1510 Node<K,V>[] t; 1511 Node<K,V> e = next; 1512 if (modCount != expectedModCount) 1513 throw new ConcurrentModificationException(); 1514 if (e == null) 1515 throw new NoSuchElementException(); 1516 if ((next = (current = e).next) == null && (t = table) != null) { 1517 do {} while (index < t.length && (next = t[index++]) == null); 1518 } 1519 return e; 1520 } 1521 remove()1522 public final void remove() { 1523 Node<K,V> p = current; 1524 if (p == null) 1525 throw new IllegalStateException(); 1526 if (modCount != expectedModCount) 1527 throw new ConcurrentModificationException(); 1528 current = null; 1529 removeNode(p.hash, p.key, null, false, false); 1530 expectedModCount = modCount; 1531 } 1532 } 1533 1534 final class KeyIterator extends HashIterator 1535 implements Iterator<K> { next()1536 public final K next() { return nextNode().key; } 1537 } 1538 1539 final class ValueIterator extends HashIterator 1540 implements Iterator<V> { next()1541 public final V next() { return nextNode().value; } 1542 } 1543 1544 final class EntryIterator extends HashIterator 1545 implements Iterator<Map.Entry<K,V>> { next()1546 public final Map.Entry<K,V> next() { return nextNode(); } 1547 } 1548 1549 /* ------------------------------------------------------------ */ 1550 // spliterators 1551 1552 static class HashMapSpliterator<K,V> { 1553 final HashMap<K,V> map; 1554 Node<K,V> current; // current node 1555 int index; // current index, modified on advance/split 1556 int fence; // one past last index 1557 int est; // size estimate 1558 int expectedModCount; // for comodification checks 1559 HashMapSpliterator(HashMap<K,V> m, int origin, int fence, int est, int expectedModCount)1560 HashMapSpliterator(HashMap<K,V> m, int origin, 1561 int fence, int est, 1562 int expectedModCount) { 1563 this.map = m; 1564 this.index = origin; 1565 this.fence = fence; 1566 this.est = est; 1567 this.expectedModCount = expectedModCount; 1568 } 1569 getFence()1570 final int getFence() { // initialize fence and size on first use 1571 int hi; 1572 if ((hi = fence) < 0) { 1573 HashMap<K,V> m = map; 1574 est = m.size; 1575 expectedModCount = m.modCount; 1576 Node<K,V>[] tab = m.table; 1577 hi = fence = (tab == null) ? 0 : tab.length; 1578 } 1579 return hi; 1580 } 1581 estimateSize()1582 public final long estimateSize() { 1583 getFence(); // force init 1584 return (long) est; 1585 } 1586 } 1587 1588 static final class KeySpliterator<K,V> 1589 extends HashMapSpliterator<K,V> 1590 implements Spliterator<K> { KeySpliterator(HashMap<K,V> m, int origin, int fence, int est, int expectedModCount)1591 KeySpliterator(HashMap<K,V> m, int origin, int fence, int est, 1592 int expectedModCount) { 1593 super(m, origin, fence, est, expectedModCount); 1594 } 1595 trySplit()1596 public KeySpliterator<K,V> trySplit() { 1597 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 1598 return (lo >= mid || current != null) ? null : 1599 new KeySpliterator<>(map, lo, index = mid, est >>>= 1, 1600 expectedModCount); 1601 } 1602 forEachRemaining(Consumer<? super K> action)1603 public void forEachRemaining(Consumer<? super K> action) { 1604 int i, hi, mc; 1605 if (action == null) 1606 throw new NullPointerException(); 1607 HashMap<K,V> m = map; 1608 Node<K,V>[] tab = m.table; 1609 if ((hi = fence) < 0) { 1610 mc = expectedModCount = m.modCount; 1611 hi = fence = (tab == null) ? 0 : tab.length; 1612 } 1613 else 1614 mc = expectedModCount; 1615 if (tab != null && tab.length >= hi && 1616 (i = index) >= 0 && (i < (index = hi) || current != null)) { 1617 Node<K,V> p = current; 1618 current = null; 1619 do { 1620 if (p == null) 1621 p = tab[i++]; 1622 else { 1623 action.accept(p.key); 1624 p = p.next; 1625 } 1626 } while (p != null || i < hi); 1627 if (m.modCount != mc) 1628 throw new ConcurrentModificationException(); 1629 } 1630 } 1631 tryAdvance(Consumer<? super K> action)1632 public boolean tryAdvance(Consumer<? super K> action) { 1633 int hi; 1634 if (action == null) 1635 throw new NullPointerException(); 1636 Node<K,V>[] tab = map.table; 1637 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { 1638 while (current != null || index < hi) { 1639 if (current == null) 1640 current = tab[index++]; 1641 else { 1642 K k = current.key; 1643 current = current.next; 1644 action.accept(k); 1645 if (map.modCount != expectedModCount) 1646 throw new ConcurrentModificationException(); 1647 return true; 1648 } 1649 } 1650 } 1651 return false; 1652 } 1653 characteristics()1654 public int characteristics() { 1655 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | 1656 Spliterator.DISTINCT; 1657 } 1658 } 1659 1660 static final class ValueSpliterator<K,V> 1661 extends HashMapSpliterator<K,V> 1662 implements Spliterator<V> { ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est, int expectedModCount)1663 ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est, 1664 int expectedModCount) { 1665 super(m, origin, fence, est, expectedModCount); 1666 } 1667 trySplit()1668 public ValueSpliterator<K,V> trySplit() { 1669 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 1670 return (lo >= mid || current != null) ? null : 1671 new ValueSpliterator<>(map, lo, index = mid, est >>>= 1, 1672 expectedModCount); 1673 } 1674 forEachRemaining(Consumer<? super V> action)1675 public void forEachRemaining(Consumer<? super V> action) { 1676 int i, hi, mc; 1677 if (action == null) 1678 throw new NullPointerException(); 1679 HashMap<K,V> m = map; 1680 Node<K,V>[] tab = m.table; 1681 if ((hi = fence) < 0) { 1682 mc = expectedModCount = m.modCount; 1683 hi = fence = (tab == null) ? 0 : tab.length; 1684 } 1685 else 1686 mc = expectedModCount; 1687 if (tab != null && tab.length >= hi && 1688 (i = index) >= 0 && (i < (index = hi) || current != null)) { 1689 Node<K,V> p = current; 1690 current = null; 1691 do { 1692 if (p == null) 1693 p = tab[i++]; 1694 else { 1695 action.accept(p.value); 1696 p = p.next; 1697 } 1698 } while (p != null || i < hi); 1699 if (m.modCount != mc) 1700 throw new ConcurrentModificationException(); 1701 } 1702 } 1703 tryAdvance(Consumer<? super V> action)1704 public boolean tryAdvance(Consumer<? super V> action) { 1705 int hi; 1706 if (action == null) 1707 throw new NullPointerException(); 1708 Node<K,V>[] tab = map.table; 1709 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { 1710 while (current != null || index < hi) { 1711 if (current == null) 1712 current = tab[index++]; 1713 else { 1714 V v = current.value; 1715 current = current.next; 1716 action.accept(v); 1717 if (map.modCount != expectedModCount) 1718 throw new ConcurrentModificationException(); 1719 return true; 1720 } 1721 } 1722 } 1723 return false; 1724 } 1725 characteristics()1726 public int characteristics() { 1727 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0); 1728 } 1729 } 1730 1731 static final class EntrySpliterator<K,V> 1732 extends HashMapSpliterator<K,V> 1733 implements Spliterator<Map.Entry<K,V>> { EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est, int expectedModCount)1734 EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est, 1735 int expectedModCount) { 1736 super(m, origin, fence, est, expectedModCount); 1737 } 1738 trySplit()1739 public EntrySpliterator<K,V> trySplit() { 1740 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 1741 return (lo >= mid || current != null) ? null : 1742 new EntrySpliterator<>(map, lo, index = mid, est >>>= 1, 1743 expectedModCount); 1744 } 1745 forEachRemaining(Consumer<? super Map.Entry<K,V>> action)1746 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { 1747 int i, hi, mc; 1748 if (action == null) 1749 throw new NullPointerException(); 1750 HashMap<K,V> m = map; 1751 Node<K,V>[] tab = m.table; 1752 if ((hi = fence) < 0) { 1753 mc = expectedModCount = m.modCount; 1754 hi = fence = (tab == null) ? 0 : tab.length; 1755 } 1756 else 1757 mc = expectedModCount; 1758 if (tab != null && tab.length >= hi && 1759 (i = index) >= 0 && (i < (index = hi) || current != null)) { 1760 Node<K,V> p = current; 1761 current = null; 1762 do { 1763 if (p == null) 1764 p = tab[i++]; 1765 else { 1766 action.accept(p); 1767 p = p.next; 1768 } 1769 } while (p != null || i < hi); 1770 if (m.modCount != mc) 1771 throw new ConcurrentModificationException(); 1772 } 1773 } 1774 tryAdvance(Consumer<? super Map.Entry<K,V>> action)1775 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { 1776 int hi; 1777 if (action == null) 1778 throw new NullPointerException(); 1779 Node<K,V>[] tab = map.table; 1780 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { 1781 while (current != null || index < hi) { 1782 if (current == null) 1783 current = tab[index++]; 1784 else { 1785 Node<K,V> e = current; 1786 current = current.next; 1787 action.accept(e); 1788 if (map.modCount != expectedModCount) 1789 throw new ConcurrentModificationException(); 1790 return true; 1791 } 1792 } 1793 } 1794 return false; 1795 } 1796 characteristics()1797 public int characteristics() { 1798 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | 1799 Spliterator.DISTINCT; 1800 } 1801 } 1802 1803 /* ------------------------------------------------------------ */ 1804 // LinkedHashMap support 1805 1806 1807 /* 1808 * The following package-protected methods are designed to be 1809 * overridden by LinkedHashMap, but not by any other subclass. 1810 * Nearly all other internal methods are also package-protected 1811 * but are declared final, so can be used by LinkedHashMap, view 1812 * classes, and HashSet. 1813 */ 1814 1815 // Create a regular (non-tree) node newNode(int hash, K key, V value, Node<K,V> next)1816 Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) { 1817 return new Node<>(hash, key, value, next); 1818 } 1819 1820 // For conversion from TreeNodes to plain nodes replacementNode(Node<K,V> p, Node<K,V> next)1821 Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) { 1822 return new Node<>(p.hash, p.key, p.value, next); 1823 } 1824 1825 // Create a tree bin node newTreeNode(int hash, K key, V value, Node<K,V> next)1826 TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) { 1827 return new TreeNode<>(hash, key, value, next); 1828 } 1829 1830 // For treeifyBin replacementTreeNode(Node<K,V> p, Node<K,V> next)1831 TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) { 1832 return new TreeNode<>(p.hash, p.key, p.value, next); 1833 } 1834 1835 /** 1836 * Reset to initial default state. Called by clone and readObject. 1837 */ reinitialize()1838 void reinitialize() { 1839 table = null; 1840 entrySet = null; 1841 keySet = null; 1842 values = null; 1843 modCount = 0; 1844 threshold = 0; 1845 size = 0; 1846 } 1847 1848 // Callbacks to allow LinkedHashMap post-actions afterNodeAccess(Node<K,V> p)1849 void afterNodeAccess(Node<K,V> p) { } afterNodeInsertion(boolean evict)1850 void afterNodeInsertion(boolean evict) { } afterNodeRemoval(Node<K,V> p)1851 void afterNodeRemoval(Node<K,V> p) { } 1852 1853 // Called only from writeObject, to ensure compatible ordering. internalWriteEntries(java.io.ObjectOutputStream s)1854 void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException { 1855 Node<K,V>[] tab; 1856 if (size > 0 && (tab = table) != null) { 1857 for (Node<K,V> e : tab) { 1858 for (; e != null; e = e.next) { 1859 s.writeObject(e.key); 1860 s.writeObject(e.value); 1861 } 1862 } 1863 } 1864 } 1865 1866 /* ------------------------------------------------------------ */ 1867 // Tree bins 1868 1869 /** 1870 * Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn 1871 * extends Node) so can be used as extension of either regular or 1872 * linked node. 1873 */ 1874 static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> { 1875 TreeNode<K,V> parent; // red-black tree links 1876 TreeNode<K,V> left; 1877 TreeNode<K,V> right; 1878 TreeNode<K,V> prev; // needed to unlink next upon deletion 1879 boolean red; TreeNode(int hash, K key, V val, Node<K,V> next)1880 TreeNode(int hash, K key, V val, Node<K,V> next) { 1881 super(hash, key, val, next); 1882 } 1883 1884 /** 1885 * Returns root of tree containing this node. 1886 */ root()1887 final TreeNode<K,V> root() { 1888 for (TreeNode<K,V> r = this, p;;) { 1889 if ((p = r.parent) == null) 1890 return r; 1891 r = p; 1892 } 1893 } 1894 1895 /** 1896 * Ensures that the given root is the first node of its bin. 1897 */ moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root)1898 static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) { 1899 int n; 1900 if (root != null && tab != null && (n = tab.length) > 0) { 1901 int index = (n - 1) & root.hash; 1902 TreeNode<K,V> first = (TreeNode<K,V>)tab[index]; 1903 if (root != first) { 1904 Node<K,V> rn; 1905 tab[index] = root; 1906 TreeNode<K,V> rp = root.prev; 1907 if ((rn = root.next) != null) 1908 ((TreeNode<K,V>)rn).prev = rp; 1909 if (rp != null) 1910 rp.next = rn; 1911 if (first != null) 1912 first.prev = root; 1913 root.next = first; 1914 root.prev = null; 1915 } 1916 assert checkInvariants(root); 1917 } 1918 } 1919 1920 /** 1921 * Finds the node starting at root p with the given hash and key. 1922 * The kc argument caches comparableClassFor(key) upon first use 1923 * comparing keys. 1924 */ find(int h, Object k, Class<?> kc)1925 final TreeNode<K,V> find(int h, Object k, Class<?> kc) { 1926 TreeNode<K,V> p = this; 1927 do { 1928 int ph, dir; K pk; 1929 TreeNode<K,V> pl = p.left, pr = p.right, q; 1930 if ((ph = p.hash) > h) 1931 p = pl; 1932 else if (ph < h) 1933 p = pr; 1934 else if ((pk = p.key) == k || (k != null && k.equals(pk))) 1935 return p; 1936 else if (pl == null) 1937 p = pr; 1938 else if (pr == null) 1939 p = pl; 1940 else if ((kc != null || 1941 (kc = comparableClassFor(k)) != null) && 1942 (dir = compareComparables(kc, k, pk)) != 0) 1943 p = (dir < 0) ? pl : pr; 1944 else if ((q = pr.find(h, k, kc)) != null) 1945 return q; 1946 else 1947 p = pl; 1948 } while (p != null); 1949 return null; 1950 } 1951 1952 /** 1953 * Calls find for root node. 1954 */ getTreeNode(int h, Object k)1955 final TreeNode<K,V> getTreeNode(int h, Object k) { 1956 return ((parent != null) ? root() : this).find(h, k, null); 1957 } 1958 1959 /** 1960 * Tie-breaking utility for ordering insertions when equal 1961 * hashCodes and non-comparable. We don't require a total 1962 * order, just a consistent insertion rule to maintain 1963 * equivalence across rebalancings. Tie-breaking further than 1964 * necessary simplifies testing a bit. 1965 */ tieBreakOrder(Object a, Object b)1966 static int tieBreakOrder(Object a, Object b) { 1967 int d; 1968 if (a == null || b == null || 1969 (d = a.getClass().getName(). 1970 compareTo(b.getClass().getName())) == 0) 1971 d = (System.identityHashCode(a) <= System.identityHashCode(b) ? 1972 -1 : 1); 1973 return d; 1974 } 1975 1976 /** 1977 * Forms tree of the nodes linked from this node. 1978 */ treeify(Node<K,V>[] tab)1979 final void treeify(Node<K,V>[] tab) { 1980 TreeNode<K,V> root = null; 1981 for (TreeNode<K,V> x = this, next; x != null; x = next) { 1982 next = (TreeNode<K,V>)x.next; 1983 x.left = x.right = null; 1984 if (root == null) { 1985 x.parent = null; 1986 x.red = false; 1987 root = x; 1988 } 1989 else { 1990 K k = x.key; 1991 int h = x.hash; 1992 Class<?> kc = null; 1993 for (TreeNode<K,V> p = root;;) { 1994 int dir, ph; 1995 K pk = p.key; 1996 if ((ph = p.hash) > h) 1997 dir = -1; 1998 else if (ph < h) 1999 dir = 1; 2000 else if ((kc == null && 2001 (kc = comparableClassFor(k)) == null) || 2002 (dir = compareComparables(kc, k, pk)) == 0) 2003 dir = tieBreakOrder(k, pk); 2004 2005 TreeNode<K,V> xp = p; 2006 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2007 x.parent = xp; 2008 if (dir <= 0) 2009 xp.left = x; 2010 else 2011 xp.right = x; 2012 root = balanceInsertion(root, x); 2013 break; 2014 } 2015 } 2016 } 2017 } 2018 moveRootToFront(tab, root); 2019 } 2020 2021 /** 2022 * Returns a list of non-TreeNodes replacing those linked from 2023 * this node. 2024 */ untreeify(HashMap<K,V> map)2025 final Node<K,V> untreeify(HashMap<K,V> map) { 2026 Node<K,V> hd = null, tl = null; 2027 for (Node<K,V> q = this; q != null; q = q.next) { 2028 Node<K,V> p = map.replacementNode(q, null); 2029 if (tl == null) 2030 hd = p; 2031 else 2032 tl.next = p; 2033 tl = p; 2034 } 2035 return hd; 2036 } 2037 2038 /** 2039 * Tree version of putVal. 2040 */ putTreeVal(HashMap<K,V> map, Node<K,V>[] tab, int h, K k, V v)2041 final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab, 2042 int h, K k, V v) { 2043 Class<?> kc = null; 2044 boolean searched = false; 2045 TreeNode<K,V> root = (parent != null) ? root() : this; 2046 for (TreeNode<K,V> p = root;;) { 2047 int dir, ph; K pk; 2048 if ((ph = p.hash) > h) 2049 dir = -1; 2050 else if (ph < h) 2051 dir = 1; 2052 else if ((pk = p.key) == k || (k != null && k.equals(pk))) 2053 return p; 2054 else if ((kc == null && 2055 (kc = comparableClassFor(k)) == null) || 2056 (dir = compareComparables(kc, k, pk)) == 0) { 2057 if (!searched) { 2058 TreeNode<K,V> q, ch; 2059 searched = true; 2060 if (((ch = p.left) != null && 2061 (q = ch.find(h, k, kc)) != null) || 2062 ((ch = p.right) != null && 2063 (q = ch.find(h, k, kc)) != null)) 2064 return q; 2065 } 2066 dir = tieBreakOrder(k, pk); 2067 } 2068 2069 TreeNode<K,V> xp = p; 2070 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2071 Node<K,V> xpn = xp.next; 2072 TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn); 2073 if (dir <= 0) 2074 xp.left = x; 2075 else 2076 xp.right = x; 2077 xp.next = x; 2078 x.parent = x.prev = xp; 2079 if (xpn != null) 2080 ((TreeNode<K,V>)xpn).prev = x; 2081 moveRootToFront(tab, balanceInsertion(root, x)); 2082 return null; 2083 } 2084 } 2085 } 2086 2087 /** 2088 * Removes the given node, that must be present before this call. 2089 * This is messier than typical red-black deletion code because we 2090 * cannot swap the contents of an interior node with a leaf 2091 * successor that is pinned by "next" pointers that are accessible 2092 * independently during traversal. So instead we swap the tree 2093 * linkages. If the current tree appears to have too few nodes, 2094 * the bin is converted back to a plain bin. (The test triggers 2095 * somewhere between 2 and 6 nodes, depending on tree structure). 2096 */ removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab, boolean movable)2097 final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab, 2098 boolean movable) { 2099 int n; 2100 if (tab == null || (n = tab.length) == 0) 2101 return; 2102 int index = (n - 1) & hash; 2103 TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl; 2104 TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev; 2105 if (pred == null) 2106 tab[index] = first = succ; 2107 else 2108 pred.next = succ; 2109 if (succ != null) 2110 succ.prev = pred; 2111 if (first == null) 2112 return; 2113 if (root.parent != null) 2114 root = root.root(); 2115 if (root == null 2116 || (movable 2117 && (root.right == null 2118 || (rl = root.left) == null 2119 || rl.left == null))) { 2120 tab[index] = first.untreeify(map); // too small 2121 return; 2122 } 2123 TreeNode<K,V> p = this, pl = left, pr = right, replacement; 2124 if (pl != null && pr != null) { 2125 TreeNode<K,V> s = pr, sl; 2126 while ((sl = s.left) != null) // find successor 2127 s = sl; 2128 boolean c = s.red; s.red = p.red; p.red = c; // swap colors 2129 TreeNode<K,V> sr = s.right; 2130 TreeNode<K,V> pp = p.parent; 2131 if (s == pr) { // p was s's direct parent 2132 p.parent = s; 2133 s.right = p; 2134 } 2135 else { 2136 TreeNode<K,V> sp = s.parent; 2137 if ((p.parent = sp) != null) { 2138 if (s == sp.left) 2139 sp.left = p; 2140 else 2141 sp.right = p; 2142 } 2143 if ((s.right = pr) != null) 2144 pr.parent = s; 2145 } 2146 p.left = null; 2147 if ((p.right = sr) != null) 2148 sr.parent = p; 2149 if ((s.left = pl) != null) 2150 pl.parent = s; 2151 if ((s.parent = pp) == null) 2152 root = s; 2153 else if (p == pp.left) 2154 pp.left = s; 2155 else 2156 pp.right = s; 2157 if (sr != null) 2158 replacement = sr; 2159 else 2160 replacement = p; 2161 } 2162 else if (pl != null) 2163 replacement = pl; 2164 else if (pr != null) 2165 replacement = pr; 2166 else 2167 replacement = p; 2168 if (replacement != p) { 2169 TreeNode<K,V> pp = replacement.parent = p.parent; 2170 if (pp == null) 2171 (root = replacement).red = false; 2172 else if (p == pp.left) 2173 pp.left = replacement; 2174 else 2175 pp.right = replacement; 2176 p.left = p.right = p.parent = null; 2177 } 2178 2179 TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement); 2180 2181 if (replacement == p) { // detach 2182 TreeNode<K,V> pp = p.parent; 2183 p.parent = null; 2184 if (pp != null) { 2185 if (p == pp.left) 2186 pp.left = null; 2187 else if (p == pp.right) 2188 pp.right = null; 2189 } 2190 } 2191 if (movable) 2192 moveRootToFront(tab, r); 2193 } 2194 2195 /** 2196 * Splits nodes in a tree bin into lower and upper tree bins, 2197 * or untreeifies if now too small. Called only from resize; 2198 * see above discussion about split bits and indices. 2199 * 2200 * @param map the map 2201 * @param tab the table for recording bin heads 2202 * @param index the index of the table being split 2203 * @param bit the bit of hash to split on 2204 */ split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit)2205 final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) { 2206 TreeNode<K,V> b = this; 2207 // Relink into lo and hi lists, preserving order 2208 TreeNode<K,V> loHead = null, loTail = null; 2209 TreeNode<K,V> hiHead = null, hiTail = null; 2210 int lc = 0, hc = 0; 2211 for (TreeNode<K,V> e = b, next; e != null; e = next) { 2212 next = (TreeNode<K,V>)e.next; 2213 e.next = null; 2214 if ((e.hash & bit) == 0) { 2215 if ((e.prev = loTail) == null) 2216 loHead = e; 2217 else 2218 loTail.next = e; 2219 loTail = e; 2220 ++lc; 2221 } 2222 else { 2223 if ((e.prev = hiTail) == null) 2224 hiHead = e; 2225 else 2226 hiTail.next = e; 2227 hiTail = e; 2228 ++hc; 2229 } 2230 } 2231 2232 if (loHead != null) { 2233 if (lc <= UNTREEIFY_THRESHOLD) 2234 tab[index] = loHead.untreeify(map); 2235 else { 2236 tab[index] = loHead; 2237 if (hiHead != null) // (else is already treeified) 2238 loHead.treeify(tab); 2239 } 2240 } 2241 if (hiHead != null) { 2242 if (hc <= UNTREEIFY_THRESHOLD) 2243 tab[index + bit] = hiHead.untreeify(map); 2244 else { 2245 tab[index + bit] = hiHead; 2246 if (loHead != null) 2247 hiHead.treeify(tab); 2248 } 2249 } 2250 } 2251 2252 /* ------------------------------------------------------------ */ 2253 // Red-black tree methods, all adapted from CLR 2254 rotateLeft(TreeNode<K,V> root, TreeNode<K,V> p)2255 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root, 2256 TreeNode<K,V> p) { 2257 TreeNode<K,V> r, pp, rl; 2258 if (p != null && (r = p.right) != null) { 2259 if ((rl = p.right = r.left) != null) 2260 rl.parent = p; 2261 if ((pp = r.parent = p.parent) == null) 2262 (root = r).red = false; 2263 else if (pp.left == p) 2264 pp.left = r; 2265 else 2266 pp.right = r; 2267 r.left = p; 2268 p.parent = r; 2269 } 2270 return root; 2271 } 2272 rotateRight(TreeNode<K,V> root, TreeNode<K,V> p)2273 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root, 2274 TreeNode<K,V> p) { 2275 TreeNode<K,V> l, pp, lr; 2276 if (p != null && (l = p.left) != null) { 2277 if ((lr = p.left = l.right) != null) 2278 lr.parent = p; 2279 if ((pp = l.parent = p.parent) == null) 2280 (root = l).red = false; 2281 else if (pp.right == p) 2282 pp.right = l; 2283 else 2284 pp.left = l; 2285 l.right = p; 2286 p.parent = l; 2287 } 2288 return root; 2289 } 2290 balanceInsertion(TreeNode<K,V> root, TreeNode<K,V> x)2291 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root, 2292 TreeNode<K,V> x) { 2293 x.red = true; 2294 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { 2295 if ((xp = x.parent) == null) { 2296 x.red = false; 2297 return x; 2298 } 2299 else if (!xp.red || (xpp = xp.parent) == null) 2300 return root; 2301 if (xp == (xppl = xpp.left)) { 2302 if ((xppr = xpp.right) != null && xppr.red) { 2303 xppr.red = false; 2304 xp.red = false; 2305 xpp.red = true; 2306 x = xpp; 2307 } 2308 else { 2309 if (x == xp.right) { 2310 root = rotateLeft(root, x = xp); 2311 xpp = (xp = x.parent) == null ? null : xp.parent; 2312 } 2313 if (xp != null) { 2314 xp.red = false; 2315 if (xpp != null) { 2316 xpp.red = true; 2317 root = rotateRight(root, xpp); 2318 } 2319 } 2320 } 2321 } 2322 else { 2323 if (xppl != null && xppl.red) { 2324 xppl.red = false; 2325 xp.red = false; 2326 xpp.red = true; 2327 x = xpp; 2328 } 2329 else { 2330 if (x == xp.left) { 2331 root = rotateRight(root, x = xp); 2332 xpp = (xp = x.parent) == null ? null : xp.parent; 2333 } 2334 if (xp != null) { 2335 xp.red = false; 2336 if (xpp != null) { 2337 xpp.red = true; 2338 root = rotateLeft(root, xpp); 2339 } 2340 } 2341 } 2342 } 2343 } 2344 } 2345 balanceDeletion(TreeNode<K,V> root, TreeNode<K,V> x)2346 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root, 2347 TreeNode<K,V> x) { 2348 for (TreeNode<K,V> xp, xpl, xpr;;) { 2349 if (x == null || x == root) 2350 return root; 2351 else if ((xp = x.parent) == null) { 2352 x.red = false; 2353 return x; 2354 } 2355 else if (x.red) { 2356 x.red = false; 2357 return root; 2358 } 2359 else if ((xpl = xp.left) == x) { 2360 if ((xpr = xp.right) != null && xpr.red) { 2361 xpr.red = false; 2362 xp.red = true; 2363 root = rotateLeft(root, xp); 2364 xpr = (xp = x.parent) == null ? null : xp.right; 2365 } 2366 if (xpr == null) 2367 x = xp; 2368 else { 2369 TreeNode<K,V> sl = xpr.left, sr = xpr.right; 2370 if ((sr == null || !sr.red) && 2371 (sl == null || !sl.red)) { 2372 xpr.red = true; 2373 x = xp; 2374 } 2375 else { 2376 if (sr == null || !sr.red) { 2377 if (sl != null) 2378 sl.red = false; 2379 xpr.red = true; 2380 root = rotateRight(root, xpr); 2381 xpr = (xp = x.parent) == null ? 2382 null : xp.right; 2383 } 2384 if (xpr != null) { 2385 xpr.red = (xp == null) ? false : xp.red; 2386 if ((sr = xpr.right) != null) 2387 sr.red = false; 2388 } 2389 if (xp != null) { 2390 xp.red = false; 2391 root = rotateLeft(root, xp); 2392 } 2393 x = root; 2394 } 2395 } 2396 } 2397 else { // symmetric 2398 if (xpl != null && xpl.red) { 2399 xpl.red = false; 2400 xp.red = true; 2401 root = rotateRight(root, xp); 2402 xpl = (xp = x.parent) == null ? null : xp.left; 2403 } 2404 if (xpl == null) 2405 x = xp; 2406 else { 2407 TreeNode<K,V> sl = xpl.left, sr = xpl.right; 2408 if ((sl == null || !sl.red) && 2409 (sr == null || !sr.red)) { 2410 xpl.red = true; 2411 x = xp; 2412 } 2413 else { 2414 if (sl == null || !sl.red) { 2415 if (sr != null) 2416 sr.red = false; 2417 xpl.red = true; 2418 root = rotateLeft(root, xpl); 2419 xpl = (xp = x.parent) == null ? 2420 null : xp.left; 2421 } 2422 if (xpl != null) { 2423 xpl.red = (xp == null) ? false : xp.red; 2424 if ((sl = xpl.left) != null) 2425 sl.red = false; 2426 } 2427 if (xp != null) { 2428 xp.red = false; 2429 root = rotateRight(root, xp); 2430 } 2431 x = root; 2432 } 2433 } 2434 } 2435 } 2436 } 2437 2438 /** 2439 * Recursive invariant check 2440 */ checkInvariants(TreeNode<K,V> t)2441 static <K,V> boolean checkInvariants(TreeNode<K,V> t) { 2442 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, 2443 tb = t.prev, tn = (TreeNode<K,V>)t.next; 2444 if (tb != null && tb.next != t) 2445 return false; 2446 if (tn != null && tn.prev != t) 2447 return false; 2448 if (tp != null && t != tp.left && t != tp.right) 2449 return false; 2450 if (tl != null && (tl.parent != t || tl.hash > t.hash)) 2451 return false; 2452 if (tr != null && (tr.parent != t || tr.hash < t.hash)) 2453 return false; 2454 if (t.red && tl != null && tl.red && tr != null && tr.red) 2455 return false; 2456 if (tl != null && !checkInvariants(tl)) 2457 return false; 2458 if (tr != null && !checkInvariants(tr)) 2459 return false; 2460 return true; 2461 } 2462 } 2463 2464 } 2465