Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/java.base/share/classes/java/util/HashMap.java
41152 views
1
/*
2
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation. Oracle designates this
8
* particular file as subject to the "Classpath" exception as provided
9
* by Oracle in the LICENSE file that accompanied this code.
10
*
11
* This code is distributed in the hope that it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14
* version 2 for more details (a copy is included in the LICENSE file that
15
* accompanied this code).
16
*
17
* You should have received a copy of the GNU General Public License version
18
* 2 along with this work; if not, write to the Free Software Foundation,
19
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
*
21
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
* or visit www.oracle.com if you need additional information or have any
23
* questions.
24
*/
25
26
package java.util;
27
28
import java.io.IOException;
29
import java.io.InvalidObjectException;
30
import java.io.Serializable;
31
import java.lang.reflect.ParameterizedType;
32
import java.lang.reflect.Type;
33
import java.util.function.BiConsumer;
34
import java.util.function.BiFunction;
35
import java.util.function.Consumer;
36
import java.util.function.Function;
37
import jdk.internal.access.SharedSecrets;
38
39
/**
40
* Hash table based implementation of the {@code Map} interface. This
41
* implementation provides all of the optional map operations, and permits
42
* {@code null} values and the {@code null} key. (The {@code HashMap}
43
* class is roughly equivalent to {@code Hashtable}, except that it is
44
* unsynchronized and permits nulls.) This class makes no guarantees as to
45
* the order of the map; in particular, it does not guarantee that the order
46
* will remain constant over time.
47
*
48
* <p>This implementation provides constant-time performance for the basic
49
* operations ({@code get} and {@code put}), assuming the hash function
50
* disperses the elements properly among the buckets. Iteration over
51
* collection views requires time proportional to the "capacity" of the
52
* {@code HashMap} instance (the number of buckets) plus its size (the number
53
* of key-value mappings). Thus, it's very important not to set the initial
54
* capacity too high (or the load factor too low) if iteration performance is
55
* important.
56
*
57
* <p>An instance of {@code HashMap} has two parameters that affect its
58
* performance: <i>initial capacity</i> and <i>load factor</i>. The
59
* <i>capacity</i> is the number of buckets in the hash table, and the initial
60
* capacity is simply the capacity at the time the hash table is created. The
61
* <i>load factor</i> is a measure of how full the hash table is allowed to
62
* get before its capacity is automatically increased. When the number of
63
* entries in the hash table exceeds the product of the load factor and the
64
* current capacity, the hash table is <i>rehashed</i> (that is, internal data
65
* structures are rebuilt) so that the hash table has approximately twice the
66
* number of buckets.
67
*
68
* <p>As a general rule, the default load factor (.75) offers a good
69
* tradeoff between time and space costs. Higher values decrease the
70
* space overhead but increase the lookup cost (reflected in most of
71
* the operations of the {@code HashMap} class, including
72
* {@code get} and {@code put}). The expected number of entries in
73
* the map and its load factor should be taken into account when
74
* setting its initial capacity, so as to minimize the number of
75
* rehash operations. If the initial capacity is greater than the
76
* maximum number of entries divided by the load factor, no rehash
77
* operations will ever occur.
78
*
79
* <p>If many mappings are to be stored in a {@code HashMap}
80
* instance, creating it with a sufficiently large capacity will allow
81
* the mappings to be stored more efficiently than letting it perform
82
* automatic rehashing as needed to grow the table. Note that using
83
* many keys with the same {@code hashCode()} is a sure way to slow
84
* down performance of any hash table. To ameliorate impact, when keys
85
* are {@link Comparable}, this class may use comparison order among
86
* keys to help break ties.
87
*
88
* <p><strong>Note that this implementation is not synchronized.</strong>
89
* If multiple threads access a hash map concurrently, and at least one of
90
* the threads modifies the map structurally, it <i>must</i> be
91
* synchronized externally. (A structural modification is any operation
92
* that adds or deletes one or more mappings; merely changing the value
93
* associated with a key that an instance already contains is not a
94
* structural modification.) This is typically accomplished by
95
* synchronizing on some object that naturally encapsulates the map.
96
*
97
* If no such object exists, the map should be "wrapped" using the
98
* {@link Collections#synchronizedMap Collections.synchronizedMap}
99
* method. This is best done at creation time, to prevent accidental
100
* unsynchronized access to the map:<pre>
101
* Map m = Collections.synchronizedMap(new HashMap(...));</pre>
102
*
103
* <p>The iterators returned by all of this class's "collection view methods"
104
* are <i>fail-fast</i>: if the map is structurally modified at any time after
105
* the iterator is created, in any way except through the iterator's own
106
* {@code remove} method, the iterator will throw a
107
* {@link ConcurrentModificationException}. Thus, in the face of concurrent
108
* modification, the iterator fails quickly and cleanly, rather than risking
109
* arbitrary, non-deterministic behavior at an undetermined time in the
110
* future.
111
*
112
* <p>Note that the fail-fast behavior of an iterator cannot be guaranteed
113
* as it is, generally speaking, impossible to make any hard guarantees in the
114
* presence of unsynchronized concurrent modification. Fail-fast iterators
115
* throw {@code ConcurrentModificationException} on a best-effort basis.
116
* Therefore, it would be wrong to write a program that depended on this
117
* exception for its correctness: <i>the fail-fast behavior of iterators
118
* should be used only to detect bugs.</i>
119
*
120
* <p>This class is a member of the
121
* <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework">
122
* Java Collections Framework</a>.
123
*
124
* @param <K> the type of keys maintained by this map
125
* @param <V> the type of mapped values
126
*
127
* @author Doug Lea
128
* @author Josh Bloch
129
* @author Arthur van Hoff
130
* @author Neal Gafter
131
* @see Object#hashCode()
132
* @see Collection
133
* @see Map
134
* @see TreeMap
135
* @see Hashtable
136
* @since 1.2
137
*/
138
public class HashMap<K,V> extends AbstractMap<K,V>
139
implements Map<K,V>, Cloneable, Serializable {
140
141
@java.io.Serial
142
private static final long serialVersionUID = 362498820763181265L;
143
144
/*
145
* Implementation notes.
146
*
147
* This map usually acts as a binned (bucketed) hash table, but
148
* when bins get too large, they are transformed into bins of
149
* TreeNodes, each structured similarly to those in
150
* java.util.TreeMap. Most methods try to use normal bins, but
151
* relay to TreeNode methods when applicable (simply by checking
152
* instanceof a node). Bins of TreeNodes may be traversed and
153
* used like any others, but additionally support faster lookup
154
* when overpopulated. However, since the vast majority of bins in
155
* normal use are not overpopulated, checking for existence of
156
* tree bins may be delayed in the course of table methods.
157
*
158
* Tree bins (i.e., bins whose elements are all TreeNodes) are
159
* ordered primarily by hashCode, but in the case of ties, if two
160
* elements are of the same "class C implements Comparable<C>",
161
* type then their compareTo method is used for ordering. (We
162
* conservatively check generic types via reflection to validate
163
* this -- see method comparableClassFor). The added complexity
164
* of tree bins is worthwhile in providing worst-case O(log n)
165
* operations when keys either have distinct hashes or are
166
* orderable, Thus, performance degrades gracefully under
167
* accidental or malicious usages in which hashCode() methods
168
* return values that are poorly distributed, as well as those in
169
* which many keys share a hashCode, so long as they are also
170
* Comparable. (If neither of these apply, we may waste about a
171
* factor of two in time and space compared to taking no
172
* precautions. But the only known cases stem from poor user
173
* programming practices that are already so slow that this makes
174
* little difference.)
175
*
176
* Because TreeNodes are about twice the size of regular nodes, we
177
* use them only when bins contain enough nodes to warrant use
178
* (see TREEIFY_THRESHOLD). And when they become too small (due to
179
* removal or resizing) they are converted back to plain bins. In
180
* usages with well-distributed user hashCodes, tree bins are
181
* rarely used. Ideally, under random hashCodes, the frequency of
182
* nodes in bins follows a Poisson distribution
183
* (http://en.wikipedia.org/wiki/Poisson_distribution) with a
184
* parameter of about 0.5 on average for the default resizing
185
* threshold of 0.75, although with a large variance because of
186
* resizing granularity. Ignoring variance, the expected
187
* occurrences of list size k are (exp(-0.5) * pow(0.5, k) /
188
* factorial(k)). The first values are:
189
*
190
* 0: 0.60653066
191
* 1: 0.30326533
192
* 2: 0.07581633
193
* 3: 0.01263606
194
* 4: 0.00157952
195
* 5: 0.00015795
196
* 6: 0.00001316
197
* 7: 0.00000094
198
* 8: 0.00000006
199
* more: less than 1 in ten million
200
*
201
* The root of a tree bin is normally its first node. However,
202
* sometimes (currently only upon Iterator.remove), the root might
203
* be elsewhere, but can be recovered following parent links
204
* (method TreeNode.root()).
205
*
206
* All applicable internal methods accept a hash code as an
207
* argument (as normally supplied from a public method), allowing
208
* them to call each other without recomputing user hashCodes.
209
* Most internal methods also accept a "tab" argument, that is
210
* normally the current table, but may be a new or old one when
211
* resizing or converting.
212
*
213
* When bin lists are treeified, split, or untreeified, we keep
214
* them in the same relative access/traversal order (i.e., field
215
* Node.next) to better preserve locality, and to slightly
216
* simplify handling of splits and traversals that invoke
217
* iterator.remove. When using comparators on insertion, to keep a
218
* total ordering (or as close as is required here) across
219
* rebalancings, we compare classes and identityHashCodes as
220
* tie-breakers.
221
*
222
* The use and transitions among plain vs tree modes is
223
* complicated by the existence of subclass LinkedHashMap. See
224
* below for hook methods defined to be invoked upon insertion,
225
* removal and access that allow LinkedHashMap internals to
226
* otherwise remain independent of these mechanics. (This also
227
* requires that a map instance be passed to some utility methods
228
* that may create new nodes.)
229
*
230
* The concurrent-programming-like SSA-based coding style helps
231
* avoid aliasing errors amid all of the twisty pointer operations.
232
*/
233
234
/**
235
* The default initial capacity - MUST be a power of two.
236
*/
237
static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16
238
239
/**
240
* The maximum capacity, used if a higher value is implicitly specified
241
* by either of the constructors with arguments.
242
* MUST be a power of two <= 1<<30.
243
*/
244
static final int MAXIMUM_CAPACITY = 1 << 30;
245
246
/**
247
* The load factor used when none specified in constructor.
248
*/
249
static final float DEFAULT_LOAD_FACTOR = 0.75f;
250
251
/**
252
* The bin count threshold for using a tree rather than list for a
253
* bin. Bins are converted to trees when adding an element to a
254
* bin with at least this many nodes. The value must be greater
255
* than 2 and should be at least 8 to mesh with assumptions in
256
* tree removal about conversion back to plain bins upon
257
* shrinkage.
258
*/
259
static final int TREEIFY_THRESHOLD = 8;
260
261
/**
262
* The bin count threshold for untreeifying a (split) bin during a
263
* resize operation. Should be less than TREEIFY_THRESHOLD, and at
264
* most 6 to mesh with shrinkage detection under removal.
265
*/
266
static final int UNTREEIFY_THRESHOLD = 6;
267
268
/**
269
* The smallest table capacity for which bins may be treeified.
270
* (Otherwise the table is resized if too many nodes in a bin.)
271
* Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts
272
* between resizing and treeification thresholds.
273
*/
274
static final int MIN_TREEIFY_CAPACITY = 64;
275
276
/**
277
* Basic hash bin node, used for most entries. (See below for
278
* TreeNode subclass, and in LinkedHashMap for its Entry subclass.)
279
*/
280
static class Node<K,V> implements Map.Entry<K,V> {
281
final int hash;
282
final K key;
283
V value;
284
Node<K,V> next;
285
286
Node(int hash, K key, V value, Node<K,V> next) {
287
this.hash = hash;
288
this.key = key;
289
this.value = value;
290
this.next = next;
291
}
292
293
public final K getKey() { return key; }
294
public final V getValue() { return value; }
295
public final String toString() { return key + "=" + value; }
296
297
public final int hashCode() {
298
return Objects.hashCode(key) ^ Objects.hashCode(value);
299
}
300
301
public final V setValue(V newValue) {
302
V oldValue = value;
303
value = newValue;
304
return oldValue;
305
}
306
307
public final boolean equals(Object o) {
308
if (o == this)
309
return true;
310
311
return o instanceof Map.Entry<?, ?> e
312
&& Objects.equals(key, e.getKey())
313
&& Objects.equals(value, e.getValue());
314
}
315
}
316
317
/* ---------------- Static utilities -------------- */
318
319
/**
320
* Computes key.hashCode() and spreads (XORs) higher bits of hash
321
* to lower. Because the table uses power-of-two masking, sets of
322
* hashes that vary only in bits above the current mask will
323
* always collide. (Among known examples are sets of Float keys
324
* holding consecutive whole numbers in small tables.) So we
325
* apply a transform that spreads the impact of higher bits
326
* downward. There is a tradeoff between speed, utility, and
327
* quality of bit-spreading. Because many common sets of hashes
328
* are already reasonably distributed (so don't benefit from
329
* spreading), and because we use trees to handle large sets of
330
* collisions in bins, we just XOR some shifted bits in the
331
* cheapest possible way to reduce systematic lossage, as well as
332
* to incorporate impact of the highest bits that would otherwise
333
* never be used in index calculations because of table bounds.
334
*/
335
static final int hash(Object key) {
336
int h;
337
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
338
}
339
340
/**
341
* Returns x's Class if it is of the form "class C implements
342
* Comparable<C>", else null.
343
*/
344
static Class<?> comparableClassFor(Object x) {
345
if (x instanceof Comparable) {
346
Class<?> c; Type[] ts, as; ParameterizedType p;
347
if ((c = x.getClass()) == String.class) // bypass checks
348
return c;
349
if ((ts = c.getGenericInterfaces()) != null) {
350
for (Type t : ts) {
351
if ((t instanceof ParameterizedType) &&
352
((p = (ParameterizedType) t).getRawType() ==
353
Comparable.class) &&
354
(as = p.getActualTypeArguments()) != null &&
355
as.length == 1 && as[0] == c) // type arg is c
356
return c;
357
}
358
}
359
}
360
return null;
361
}
362
363
/**
364
* Returns k.compareTo(x) if x matches kc (k's screened comparable
365
* class), else 0.
366
*/
367
@SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
368
static int compareComparables(Class<?> kc, Object k, Object x) {
369
return (x == null || x.getClass() != kc ? 0 :
370
((Comparable)k).compareTo(x));
371
}
372
373
/**
374
* Returns a power of two size for the given target capacity.
375
*/
376
static final int tableSizeFor(int cap) {
377
int n = -1 >>> Integer.numberOfLeadingZeros(cap - 1);
378
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
379
}
380
381
/* ---------------- Fields -------------- */
382
383
/**
384
* The table, initialized on first use, and resized as
385
* necessary. When allocated, length is always a power of two.
386
* (We also tolerate length zero in some operations to allow
387
* bootstrapping mechanics that are currently not needed.)
388
*/
389
transient Node<K,V>[] table;
390
391
/**
392
* Holds cached entrySet(). Note that AbstractMap fields are used
393
* for keySet() and values().
394
*/
395
transient Set<Map.Entry<K,V>> entrySet;
396
397
/**
398
* The number of key-value mappings contained in this map.
399
*/
400
transient int size;
401
402
/**
403
* The number of times this HashMap has been structurally modified
404
* Structural modifications are those that change the number of mappings in
405
* the HashMap or otherwise modify its internal structure (e.g.,
406
* rehash). This field is used to make iterators on Collection-views of
407
* the HashMap fail-fast. (See ConcurrentModificationException).
408
*/
409
transient int modCount;
410
411
/**
412
* The next size value at which to resize (capacity * load factor).
413
*
414
* @serial
415
*/
416
// (The javadoc description is true upon serialization.
417
// Additionally, if the table array has not been allocated, this
418
// field holds the initial array capacity, or zero signifying
419
// DEFAULT_INITIAL_CAPACITY.)
420
int threshold;
421
422
/**
423
* The load factor for the hash table.
424
*
425
* @serial
426
*/
427
final float loadFactor;
428
429
/* ---------------- Public operations -------------- */
430
431
/**
432
* Constructs an empty {@code HashMap} with the specified initial
433
* capacity and load factor.
434
*
435
* @param initialCapacity the initial capacity
436
* @param loadFactor the load factor
437
* @throws IllegalArgumentException if the initial capacity is negative
438
* or the load factor is nonpositive
439
*/
440
public HashMap(int initialCapacity, float loadFactor) {
441
if (initialCapacity < 0)
442
throw new IllegalArgumentException("Illegal initial capacity: " +
443
initialCapacity);
444
if (initialCapacity > MAXIMUM_CAPACITY)
445
initialCapacity = MAXIMUM_CAPACITY;
446
if (loadFactor <= 0 || Float.isNaN(loadFactor))
447
throw new IllegalArgumentException("Illegal load factor: " +
448
loadFactor);
449
this.loadFactor = loadFactor;
450
this.threshold = tableSizeFor(initialCapacity);
451
}
452
453
/**
454
* Constructs an empty {@code HashMap} with the specified initial
455
* capacity and the default load factor (0.75).
456
*
457
* @param initialCapacity the initial capacity.
458
* @throws IllegalArgumentException if the initial capacity is negative.
459
*/
460
public HashMap(int initialCapacity) {
461
this(initialCapacity, DEFAULT_LOAD_FACTOR);
462
}
463
464
/**
465
* Constructs an empty {@code HashMap} with the default initial capacity
466
* (16) and the default load factor (0.75).
467
*/
468
public HashMap() {
469
this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted
470
}
471
472
/**
473
* Constructs a new {@code HashMap} with the same mappings as the
474
* specified {@code Map}. The {@code HashMap} is created with
475
* default load factor (0.75) and an initial capacity sufficient to
476
* hold the mappings in the specified {@code Map}.
477
*
478
* @param m the map whose mappings are to be placed in this map
479
* @throws NullPointerException if the specified map is null
480
*/
481
public HashMap(Map<? extends K, ? extends V> m) {
482
this.loadFactor = DEFAULT_LOAD_FACTOR;
483
putMapEntries(m, false);
484
}
485
486
/**
487
* Implements Map.putAll and Map constructor.
488
*
489
* @param m the map
490
* @param evict false when initially constructing this map, else
491
* true (relayed to method afterNodeInsertion).
492
*/
493
final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) {
494
int s = m.size();
495
if (s > 0) {
496
if (table == null) { // pre-size
497
float ft = ((float)s / loadFactor) + 1.0F;
498
int t = ((ft < (float)MAXIMUM_CAPACITY) ?
499
(int)ft : MAXIMUM_CAPACITY);
500
if (t > threshold)
501
threshold = tableSizeFor(t);
502
} else {
503
// Because of linked-list bucket constraints, we cannot
504
// expand all at once, but can reduce total resize
505
// effort by repeated doubling now vs later
506
while (s > threshold && table.length < MAXIMUM_CAPACITY)
507
resize();
508
}
509
510
for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) {
511
K key = e.getKey();
512
V value = e.getValue();
513
putVal(hash(key), key, value, false, evict);
514
}
515
}
516
}
517
518
/**
519
* Returns the number of key-value mappings in this map.
520
*
521
* @return the number of key-value mappings in this map
522
*/
523
public int size() {
524
return size;
525
}
526
527
/**
528
* Returns {@code true} if this map contains no key-value mappings.
529
*
530
* @return {@code true} if this map contains no key-value mappings
531
*/
532
public boolean isEmpty() {
533
return size == 0;
534
}
535
536
/**
537
* Returns the value to which the specified key is mapped,
538
* or {@code null} if this map contains no mapping for the key.
539
*
540
* <p>More formally, if this map contains a mapping from a key
541
* {@code k} to a value {@code v} such that {@code (key==null ? k==null :
542
* key.equals(k))}, then this method returns {@code v}; otherwise
543
* it returns {@code null}. (There can be at most one such mapping.)
544
*
545
* <p>A return value of {@code null} does not <i>necessarily</i>
546
* indicate that the map contains no mapping for the key; it's also
547
* possible that the map explicitly maps the key to {@code null}.
548
* The {@link #containsKey containsKey} operation may be used to
549
* distinguish these two cases.
550
*
551
* @see #put(Object, Object)
552
*/
553
public V get(Object key) {
554
Node<K,V> e;
555
return (e = getNode(key)) == null ? null : e.value;
556
}
557
558
/**
559
* Implements Map.get and related methods.
560
*
561
* @param key the key
562
* @return the node, or null if none
563
*/
564
final Node<K,V> getNode(Object key) {
565
Node<K,V>[] tab; Node<K,V> first, e; int n, hash; K k;
566
if ((tab = table) != null && (n = tab.length) > 0 &&
567
(first = tab[(n - 1) & (hash = hash(key))]) != null) {
568
if (first.hash == hash && // always check first node
569
((k = first.key) == key || (key != null && key.equals(k))))
570
return first;
571
if ((e = first.next) != null) {
572
if (first instanceof TreeNode)
573
return ((TreeNode<K,V>)first).getTreeNode(hash, key);
574
do {
575
if (e.hash == hash &&
576
((k = e.key) == key || (key != null && key.equals(k))))
577
return e;
578
} while ((e = e.next) != null);
579
}
580
}
581
return null;
582
}
583
584
/**
585
* Returns {@code true} if this map contains a mapping for the
586
* specified key.
587
*
588
* @param key The key whose presence in this map is to be tested
589
* @return {@code true} if this map contains a mapping for the specified
590
* key.
591
*/
592
public boolean containsKey(Object key) {
593
return getNode(key) != null;
594
}
595
596
/**
597
* Associates the specified value with the specified key in this map.
598
* If the map previously contained a mapping for the key, the old
599
* value is replaced.
600
*
601
* @param key key with which the specified value is to be associated
602
* @param value value to be associated with the specified key
603
* @return the previous value associated with {@code key}, or
604
* {@code null} if there was no mapping for {@code key}.
605
* (A {@code null} return can also indicate that the map
606
* previously associated {@code null} with {@code key}.)
607
*/
608
public V put(K key, V value) {
609
return putVal(hash(key), key, value, false, true);
610
}
611
612
/**
613
* Implements Map.put and related methods.
614
*
615
* @param hash hash for key
616
* @param key the key
617
* @param value the value to put
618
* @param onlyIfAbsent if true, don't change existing value
619
* @param evict if false, the table is in creation mode.
620
* @return previous value, or null if none
621
*/
622
final V putVal(int hash, K key, V value, boolean onlyIfAbsent,
623
boolean evict) {
624
Node<K,V>[] tab; Node<K,V> p; int n, i;
625
if ((tab = table) == null || (n = tab.length) == 0)
626
n = (tab = resize()).length;
627
if ((p = tab[i = (n - 1) & hash]) == null)
628
tab[i] = newNode(hash, key, value, null);
629
else {
630
Node<K,V> e; K k;
631
if (p.hash == hash &&
632
((k = p.key) == key || (key != null && key.equals(k))))
633
e = p;
634
else if (p instanceof TreeNode)
635
e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value);
636
else {
637
for (int binCount = 0; ; ++binCount) {
638
if ((e = p.next) == null) {
639
p.next = newNode(hash, key, value, null);
640
if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
641
treeifyBin(tab, hash);
642
break;
643
}
644
if (e.hash == hash &&
645
((k = e.key) == key || (key != null && key.equals(k))))
646
break;
647
p = e;
648
}
649
}
650
if (e != null) { // existing mapping for key
651
V oldValue = e.value;
652
if (!onlyIfAbsent || oldValue == null)
653
e.value = value;
654
afterNodeAccess(e);
655
return oldValue;
656
}
657
}
658
++modCount;
659
if (++size > threshold)
660
resize();
661
afterNodeInsertion(evict);
662
return null;
663
}
664
665
/**
666
* Initializes or doubles table size. If null, allocates in
667
* accord with initial capacity target held in field threshold.
668
* Otherwise, because we are using power-of-two expansion, the
669
* elements from each bin must either stay at same index, or move
670
* with a power of two offset in the new table.
671
*
672
* @return the table
673
*/
674
final Node<K,V>[] resize() {
675
Node<K,V>[] oldTab = table;
676
int oldCap = (oldTab == null) ? 0 : oldTab.length;
677
int oldThr = threshold;
678
int newCap, newThr = 0;
679
if (oldCap > 0) {
680
if (oldCap >= MAXIMUM_CAPACITY) {
681
threshold = Integer.MAX_VALUE;
682
return oldTab;
683
}
684
else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
685
oldCap >= DEFAULT_INITIAL_CAPACITY)
686
newThr = oldThr << 1; // double threshold
687
}
688
else if (oldThr > 0) // initial capacity was placed in threshold
689
newCap = oldThr;
690
else { // zero initial threshold signifies using defaults
691
newCap = DEFAULT_INITIAL_CAPACITY;
692
newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
693
}
694
if (newThr == 0) {
695
float ft = (float)newCap * loadFactor;
696
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
697
(int)ft : Integer.MAX_VALUE);
698
}
699
threshold = newThr;
700
@SuppressWarnings({"rawtypes","unchecked"})
701
Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
702
table = newTab;
703
if (oldTab != null) {
704
for (int j = 0; j < oldCap; ++j) {
705
Node<K,V> e;
706
if ((e = oldTab[j]) != null) {
707
oldTab[j] = null;
708
if (e.next == null)
709
newTab[e.hash & (newCap - 1)] = e;
710
else if (e instanceof TreeNode)
711
((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
712
else { // preserve order
713
Node<K,V> loHead = null, loTail = null;
714
Node<K,V> hiHead = null, hiTail = null;
715
Node<K,V> next;
716
do {
717
next = e.next;
718
if ((e.hash & oldCap) == 0) {
719
if (loTail == null)
720
loHead = e;
721
else
722
loTail.next = e;
723
loTail = e;
724
}
725
else {
726
if (hiTail == null)
727
hiHead = e;
728
else
729
hiTail.next = e;
730
hiTail = e;
731
}
732
} while ((e = next) != null);
733
if (loTail != null) {
734
loTail.next = null;
735
newTab[j] = loHead;
736
}
737
if (hiTail != null) {
738
hiTail.next = null;
739
newTab[j + oldCap] = hiHead;
740
}
741
}
742
}
743
}
744
}
745
return newTab;
746
}
747
748
/**
749
* Replaces all linked nodes in bin at index for given hash unless
750
* table is too small, in which case resizes instead.
751
*/
752
final void treeifyBin(Node<K,V>[] tab, int hash) {
753
int n, index; Node<K,V> e;
754
if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY)
755
resize();
756
else if ((e = tab[index = (n - 1) & hash]) != null) {
757
TreeNode<K,V> hd = null, tl = null;
758
do {
759
TreeNode<K,V> p = replacementTreeNode(e, null);
760
if (tl == null)
761
hd = p;
762
else {
763
p.prev = tl;
764
tl.next = p;
765
}
766
tl = p;
767
} while ((e = e.next) != null);
768
if ((tab[index] = hd) != null)
769
hd.treeify(tab);
770
}
771
}
772
773
/**
774
* Copies all of the mappings from the specified map to this map.
775
* These mappings will replace any mappings that this map had for
776
* any of the keys currently in the specified map.
777
*
778
* @param m mappings to be stored in this map
779
* @throws NullPointerException if the specified map is null
780
*/
781
public void putAll(Map<? extends K, ? extends V> m) {
782
putMapEntries(m, true);
783
}
784
785
/**
786
* Removes the mapping for the specified key from this map if present.
787
*
788
* @param key key whose mapping is to be removed from the map
789
* @return the previous value associated with {@code key}, or
790
* {@code null} if there was no mapping for {@code key}.
791
* (A {@code null} return can also indicate that the map
792
* previously associated {@code null} with {@code key}.)
793
*/
794
public V remove(Object key) {
795
Node<K,V> e;
796
return (e = removeNode(hash(key), key, null, false, true)) == null ?
797
null : e.value;
798
}
799
800
/**
801
* Implements Map.remove and related methods.
802
*
803
* @param hash hash for key
804
* @param key the key
805
* @param value the value to match if matchValue, else ignored
806
* @param matchValue if true only remove if value is equal
807
* @param movable if false do not move other nodes while removing
808
* @return the node, or null if none
809
*/
810
final Node<K,V> removeNode(int hash, Object key, Object value,
811
boolean matchValue, boolean movable) {
812
Node<K,V>[] tab; Node<K,V> p; int n, index;
813
if ((tab = table) != null && (n = tab.length) > 0 &&
814
(p = tab[index = (n - 1) & hash]) != null) {
815
Node<K,V> node = null, e; K k; V v;
816
if (p.hash == hash &&
817
((k = p.key) == key || (key != null && key.equals(k))))
818
node = p;
819
else if ((e = p.next) != null) {
820
if (p instanceof TreeNode)
821
node = ((TreeNode<K,V>)p).getTreeNode(hash, key);
822
else {
823
do {
824
if (e.hash == hash &&
825
((k = e.key) == key ||
826
(key != null && key.equals(k)))) {
827
node = e;
828
break;
829
}
830
p = e;
831
} while ((e = e.next) != null);
832
}
833
}
834
if (node != null && (!matchValue || (v = node.value) == value ||
835
(value != null && value.equals(v)))) {
836
if (node instanceof TreeNode)
837
((TreeNode<K,V>)node).removeTreeNode(this, tab, movable);
838
else if (node == p)
839
tab[index] = node.next;
840
else
841
p.next = node.next;
842
++modCount;
843
--size;
844
afterNodeRemoval(node);
845
return node;
846
}
847
}
848
return null;
849
}
850
851
/**
852
* Removes all of the mappings from this map.
853
* The map will be empty after this call returns.
854
*/
855
public void clear() {
856
Node<K,V>[] tab;
857
modCount++;
858
if ((tab = table) != null && size > 0) {
859
size = 0;
860
for (int i = 0; i < tab.length; ++i)
861
tab[i] = null;
862
}
863
}
864
865
/**
866
* Returns {@code true} if this map maps one or more keys to the
867
* specified value.
868
*
869
* @param value value whose presence in this map is to be tested
870
* @return {@code true} if this map maps one or more keys to the
871
* specified value
872
*/
873
public boolean containsValue(Object value) {
874
Node<K,V>[] tab; V v;
875
if ((tab = table) != null && size > 0) {
876
for (Node<K,V> e : tab) {
877
for (; e != null; e = e.next) {
878
if ((v = e.value) == value ||
879
(value != null && value.equals(v)))
880
return true;
881
}
882
}
883
}
884
return false;
885
}
886
887
/**
888
* Returns a {@link Set} view of the keys contained in this map.
889
* The set is backed by the map, so changes to the map are
890
* reflected in the set, and vice-versa. If the map is modified
891
* while an iteration over the set is in progress (except through
892
* the iterator's own {@code remove} operation), the results of
893
* the iteration are undefined. The set supports element removal,
894
* which removes the corresponding mapping from the map, via the
895
* {@code Iterator.remove}, {@code Set.remove},
896
* {@code removeAll}, {@code retainAll}, and {@code clear}
897
* operations. It does not support the {@code add} or {@code addAll}
898
* operations.
899
*
900
* @return a set view of the keys contained in this map
901
*/
902
public Set<K> keySet() {
903
Set<K> ks = keySet;
904
if (ks == null) {
905
ks = new KeySet();
906
keySet = ks;
907
}
908
return ks;
909
}
910
911
/**
912
* Prepares the array for {@link Collection#toArray(Object[])} implementation.
913
* If supplied array is smaller than this map size, a new array is allocated.
914
* If supplied array is bigger than this map size, a null is written at size index.
915
*
916
* @param a an original array passed to {@code toArray()} method
917
* @param <T> type of array elements
918
* @return an array ready to be filled and returned from {@code toArray()} method.
919
*/
920
@SuppressWarnings("unchecked")
921
final <T> T[] prepareArray(T[] a) {
922
int size = this.size;
923
if (a.length < size) {
924
return (T[]) java.lang.reflect.Array
925
.newInstance(a.getClass().getComponentType(), size);
926
}
927
if (a.length > size) {
928
a[size] = null;
929
}
930
return a;
931
}
932
933
/**
934
* Fills an array with this map keys and returns it. This method assumes
935
* that input array is big enough to fit all the keys. Use
936
* {@link #prepareArray(Object[])} to ensure this.
937
*
938
* @param a an array to fill
939
* @param <T> type of array elements
940
* @return supplied array
941
*/
942
<T> T[] keysToArray(T[] a) {
943
Object[] r = a;
944
Node<K,V>[] tab;
945
int idx = 0;
946
if (size > 0 && (tab = table) != null) {
947
for (Node<K,V> e : tab) {
948
for (; e != null; e = e.next) {
949
r[idx++] = e.key;
950
}
951
}
952
}
953
return a;
954
}
955
956
/**
957
* Fills an array with this map values and returns it. This method assumes
958
* that input array is big enough to fit all the values. Use
959
* {@link #prepareArray(Object[])} to ensure this.
960
*
961
* @param a an array to fill
962
* @param <T> type of array elements
963
* @return supplied array
964
*/
965
<T> T[] valuesToArray(T[] a) {
966
Object[] r = a;
967
Node<K,V>[] tab;
968
int idx = 0;
969
if (size > 0 && (tab = table) != null) {
970
for (Node<K,V> e : tab) {
971
for (; e != null; e = e.next) {
972
r[idx++] = e.value;
973
}
974
}
975
}
976
return a;
977
}
978
979
final class KeySet extends AbstractSet<K> {
980
public final int size() { return size; }
981
public final void clear() { HashMap.this.clear(); }
982
public final Iterator<K> iterator() { return new KeyIterator(); }
983
public final boolean contains(Object o) { return containsKey(o); }
984
public final boolean remove(Object key) {
985
return removeNode(hash(key), key, null, false, true) != null;
986
}
987
public final Spliterator<K> spliterator() {
988
return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0);
989
}
990
991
public Object[] toArray() {
992
return keysToArray(new Object[size]);
993
}
994
995
public <T> T[] toArray(T[] a) {
996
return keysToArray(prepareArray(a));
997
}
998
999
public final void forEach(Consumer<? super K> action) {
1000
Node<K,V>[] tab;
1001
if (action == null)
1002
throw new NullPointerException();
1003
if (size > 0 && (tab = table) != null) {
1004
int mc = modCount;
1005
for (Node<K,V> e : tab) {
1006
for (; e != null; e = e.next)
1007
action.accept(e.key);
1008
}
1009
if (modCount != mc)
1010
throw new ConcurrentModificationException();
1011
}
1012
}
1013
}
1014
1015
/**
1016
* Returns a {@link Collection} view of the values contained in this map.
1017
* The collection is backed by the map, so changes to the map are
1018
* reflected in the collection, and vice-versa. If the map is
1019
* modified while an iteration over the collection is in progress
1020
* (except through the iterator's own {@code remove} operation),
1021
* the results of the iteration are undefined. The collection
1022
* supports element removal, which removes the corresponding
1023
* mapping from the map, via the {@code Iterator.remove},
1024
* {@code Collection.remove}, {@code removeAll},
1025
* {@code retainAll} and {@code clear} operations. It does not
1026
* support the {@code add} or {@code addAll} operations.
1027
*
1028
* @return a view of the values contained in this map
1029
*/
1030
public Collection<V> values() {
1031
Collection<V> vs = values;
1032
if (vs == null) {
1033
vs = new Values();
1034
values = vs;
1035
}
1036
return vs;
1037
}
1038
1039
final class Values extends AbstractCollection<V> {
1040
public final int size() { return size; }
1041
public final void clear() { HashMap.this.clear(); }
1042
public final Iterator<V> iterator() { return new ValueIterator(); }
1043
public final boolean contains(Object o) { return containsValue(o); }
1044
public final Spliterator<V> spliterator() {
1045
return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0);
1046
}
1047
1048
public Object[] toArray() {
1049
return valuesToArray(new Object[size]);
1050
}
1051
1052
public <T> T[] toArray(T[] a) {
1053
return valuesToArray(prepareArray(a));
1054
}
1055
1056
public final void forEach(Consumer<? super V> action) {
1057
Node<K,V>[] tab;
1058
if (action == null)
1059
throw new NullPointerException();
1060
if (size > 0 && (tab = table) != null) {
1061
int mc = modCount;
1062
for (Node<K,V> e : tab) {
1063
for (; e != null; e = e.next)
1064
action.accept(e.value);
1065
}
1066
if (modCount != mc)
1067
throw new ConcurrentModificationException();
1068
}
1069
}
1070
}
1071
1072
/**
1073
* Returns a {@link Set} view of the mappings contained in this map.
1074
* The set is backed by the map, so changes to the map are
1075
* reflected in the set, and vice-versa. If the map is modified
1076
* while an iteration over the set is in progress (except through
1077
* the iterator's own {@code remove} operation, or through the
1078
* {@code setValue} operation on a map entry returned by the
1079
* iterator) the results of the iteration are undefined. The set
1080
* supports element removal, which removes the corresponding
1081
* mapping from the map, via the {@code Iterator.remove},
1082
* {@code Set.remove}, {@code removeAll}, {@code retainAll} and
1083
* {@code clear} operations. It does not support the
1084
* {@code add} or {@code addAll} operations.
1085
*
1086
* @return a set view of the mappings contained in this map
1087
*/
1088
public Set<Map.Entry<K,V>> entrySet() {
1089
Set<Map.Entry<K,V>> es;
1090
return (es = entrySet) == null ? (entrySet = new EntrySet()) : es;
1091
}
1092
1093
final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
1094
public final int size() { return size; }
1095
public final void clear() { HashMap.this.clear(); }
1096
public final Iterator<Map.Entry<K,V>> iterator() {
1097
return new EntryIterator();
1098
}
1099
public final boolean contains(Object o) {
1100
if (!(o instanceof Map.Entry<?, ?> e))
1101
return false;
1102
Object key = e.getKey();
1103
Node<K,V> candidate = getNode(key);
1104
return candidate != null && candidate.equals(e);
1105
}
1106
public final boolean remove(Object o) {
1107
if (o instanceof Map.Entry<?, ?> e) {
1108
Object key = e.getKey();
1109
Object value = e.getValue();
1110
return removeNode(hash(key), key, value, true, true) != null;
1111
}
1112
return false;
1113
}
1114
public final Spliterator<Map.Entry<K,V>> spliterator() {
1115
return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0);
1116
}
1117
public final void forEach(Consumer<? super Map.Entry<K,V>> action) {
1118
Node<K,V>[] tab;
1119
if (action == null)
1120
throw new NullPointerException();
1121
if (size > 0 && (tab = table) != null) {
1122
int mc = modCount;
1123
for (Node<K,V> e : tab) {
1124
for (; e != null; e = e.next)
1125
action.accept(e);
1126
}
1127
if (modCount != mc)
1128
throw new ConcurrentModificationException();
1129
}
1130
}
1131
}
1132
1133
// Overrides of JDK8 Map extension methods
1134
1135
@Override
1136
public V getOrDefault(Object key, V defaultValue) {
1137
Node<K,V> e;
1138
return (e = getNode(key)) == null ? defaultValue : e.value;
1139
}
1140
1141
@Override
1142
public V putIfAbsent(K key, V value) {
1143
return putVal(hash(key), key, value, true, true);
1144
}
1145
1146
@Override
1147
public boolean remove(Object key, Object value) {
1148
return removeNode(hash(key), key, value, true, true) != null;
1149
}
1150
1151
@Override
1152
public boolean replace(K key, V oldValue, V newValue) {
1153
Node<K,V> e; V v;
1154
if ((e = getNode(key)) != null &&
1155
((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) {
1156
e.value = newValue;
1157
afterNodeAccess(e);
1158
return true;
1159
}
1160
return false;
1161
}
1162
1163
@Override
1164
public V replace(K key, V value) {
1165
Node<K,V> e;
1166
if ((e = getNode(key)) != null) {
1167
V oldValue = e.value;
1168
e.value = value;
1169
afterNodeAccess(e);
1170
return oldValue;
1171
}
1172
return null;
1173
}
1174
1175
/**
1176
* {@inheritDoc}
1177
*
1178
* <p>This method will, on a best-effort basis, throw a
1179
* {@link ConcurrentModificationException} if it is detected that the
1180
* mapping function modifies this map during computation.
1181
*
1182
* @throws ConcurrentModificationException if it is detected that the
1183
* mapping function modified this map
1184
*/
1185
@Override
1186
public V computeIfAbsent(K key,
1187
Function<? super K, ? extends V> mappingFunction) {
1188
if (mappingFunction == null)
1189
throw new NullPointerException();
1190
int hash = hash(key);
1191
Node<K,V>[] tab; Node<K,V> first; int n, i;
1192
int binCount = 0;
1193
TreeNode<K,V> t = null;
1194
Node<K,V> old = null;
1195
if (size > threshold || (tab = table) == null ||
1196
(n = tab.length) == 0)
1197
n = (tab = resize()).length;
1198
if ((first = tab[i = (n - 1) & hash]) != null) {
1199
if (first instanceof TreeNode)
1200
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
1201
else {
1202
Node<K,V> e = first; K k;
1203
do {
1204
if (e.hash == hash &&
1205
((k = e.key) == key || (key != null && key.equals(k)))) {
1206
old = e;
1207
break;
1208
}
1209
++binCount;
1210
} while ((e = e.next) != null);
1211
}
1212
V oldValue;
1213
if (old != null && (oldValue = old.value) != null) {
1214
afterNodeAccess(old);
1215
return oldValue;
1216
}
1217
}
1218
int mc = modCount;
1219
V v = mappingFunction.apply(key);
1220
if (mc != modCount) { throw new ConcurrentModificationException(); }
1221
if (v == null) {
1222
return null;
1223
} else if (old != null) {
1224
old.value = v;
1225
afterNodeAccess(old);
1226
return v;
1227
}
1228
else if (t != null)
1229
t.putTreeVal(this, tab, hash, key, v);
1230
else {
1231
tab[i] = newNode(hash, key, v, first);
1232
if (binCount >= TREEIFY_THRESHOLD - 1)
1233
treeifyBin(tab, hash);
1234
}
1235
modCount = mc + 1;
1236
++size;
1237
afterNodeInsertion(true);
1238
return v;
1239
}
1240
1241
/**
1242
* {@inheritDoc}
1243
*
1244
* <p>This method will, on a best-effort basis, throw a
1245
* {@link ConcurrentModificationException} if it is detected that the
1246
* remapping function modifies this map during computation.
1247
*
1248
* @throws ConcurrentModificationException if it is detected that the
1249
* remapping function modified this map
1250
*/
1251
@Override
1252
public V computeIfPresent(K key,
1253
BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1254
if (remappingFunction == null)
1255
throw new NullPointerException();
1256
Node<K,V> e; V oldValue;
1257
if ((e = getNode(key)) != null &&
1258
(oldValue = e.value) != null) {
1259
int mc = modCount;
1260
V v = remappingFunction.apply(key, oldValue);
1261
if (mc != modCount) { throw new ConcurrentModificationException(); }
1262
if (v != null) {
1263
e.value = v;
1264
afterNodeAccess(e);
1265
return v;
1266
}
1267
else {
1268
int hash = hash(key);
1269
removeNode(hash, key, null, false, true);
1270
}
1271
}
1272
return null;
1273
}
1274
1275
/**
1276
* {@inheritDoc}
1277
*
1278
* <p>This method will, on a best-effort basis, throw a
1279
* {@link ConcurrentModificationException} if it is detected that the
1280
* remapping function modifies this map during computation.
1281
*
1282
* @throws ConcurrentModificationException if it is detected that the
1283
* remapping function modified this map
1284
*/
1285
@Override
1286
public V compute(K key,
1287
BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1288
if (remappingFunction == null)
1289
throw new NullPointerException();
1290
int hash = hash(key);
1291
Node<K,V>[] tab; Node<K,V> first; int n, i;
1292
int binCount = 0;
1293
TreeNode<K,V> t = null;
1294
Node<K,V> old = null;
1295
if (size > threshold || (tab = table) == null ||
1296
(n = tab.length) == 0)
1297
n = (tab = resize()).length;
1298
if ((first = tab[i = (n - 1) & hash]) != null) {
1299
if (first instanceof TreeNode)
1300
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
1301
else {
1302
Node<K,V> e = first; K k;
1303
do {
1304
if (e.hash == hash &&
1305
((k = e.key) == key || (key != null && key.equals(k)))) {
1306
old = e;
1307
break;
1308
}
1309
++binCount;
1310
} while ((e = e.next) != null);
1311
}
1312
}
1313
V oldValue = (old == null) ? null : old.value;
1314
int mc = modCount;
1315
V v = remappingFunction.apply(key, oldValue);
1316
if (mc != modCount) { throw new ConcurrentModificationException(); }
1317
if (old != null) {
1318
if (v != null) {
1319
old.value = v;
1320
afterNodeAccess(old);
1321
}
1322
else
1323
removeNode(hash, key, null, false, true);
1324
}
1325
else if (v != null) {
1326
if (t != null)
1327
t.putTreeVal(this, tab, hash, key, v);
1328
else {
1329
tab[i] = newNode(hash, key, v, first);
1330
if (binCount >= TREEIFY_THRESHOLD - 1)
1331
treeifyBin(tab, hash);
1332
}
1333
modCount = mc + 1;
1334
++size;
1335
afterNodeInsertion(true);
1336
}
1337
return v;
1338
}
1339
1340
/**
1341
* {@inheritDoc}
1342
*
1343
* <p>This method will, on a best-effort basis, throw a
1344
* {@link ConcurrentModificationException} if it is detected that the
1345
* remapping function modifies this map during computation.
1346
*
1347
* @throws ConcurrentModificationException if it is detected that the
1348
* remapping function modified this map
1349
*/
1350
@Override
1351
public V merge(K key, V value,
1352
BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
1353
if (value == null || remappingFunction == null)
1354
throw new NullPointerException();
1355
int hash = hash(key);
1356
Node<K,V>[] tab; Node<K,V> first; int n, i;
1357
int binCount = 0;
1358
TreeNode<K,V> t = null;
1359
Node<K,V> old = null;
1360
if (size > threshold || (tab = table) == null ||
1361
(n = tab.length) == 0)
1362
n = (tab = resize()).length;
1363
if ((first = tab[i = (n - 1) & hash]) != null) {
1364
if (first instanceof TreeNode)
1365
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
1366
else {
1367
Node<K,V> e = first; K k;
1368
do {
1369
if (e.hash == hash &&
1370
((k = e.key) == key || (key != null && key.equals(k)))) {
1371
old = e;
1372
break;
1373
}
1374
++binCount;
1375
} while ((e = e.next) != null);
1376
}
1377
}
1378
if (old != null) {
1379
V v;
1380
if (old.value != null) {
1381
int mc = modCount;
1382
v = remappingFunction.apply(old.value, value);
1383
if (mc != modCount) {
1384
throw new ConcurrentModificationException();
1385
}
1386
} else {
1387
v = value;
1388
}
1389
if (v != null) {
1390
old.value = v;
1391
afterNodeAccess(old);
1392
}
1393
else
1394
removeNode(hash, key, null, false, true);
1395
return v;
1396
} else {
1397
if (t != null)
1398
t.putTreeVal(this, tab, hash, key, value);
1399
else {
1400
tab[i] = newNode(hash, key, value, first);
1401
if (binCount >= TREEIFY_THRESHOLD - 1)
1402
treeifyBin(tab, hash);
1403
}
1404
++modCount;
1405
++size;
1406
afterNodeInsertion(true);
1407
return value;
1408
}
1409
}
1410
1411
@Override
1412
public void forEach(BiConsumer<? super K, ? super V> action) {
1413
Node<K,V>[] tab;
1414
if (action == null)
1415
throw new NullPointerException();
1416
if (size > 0 && (tab = table) != null) {
1417
int mc = modCount;
1418
for (Node<K,V> e : tab) {
1419
for (; e != null; e = e.next)
1420
action.accept(e.key, e.value);
1421
}
1422
if (modCount != mc)
1423
throw new ConcurrentModificationException();
1424
}
1425
}
1426
1427
@Override
1428
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1429
Node<K,V>[] tab;
1430
if (function == null)
1431
throw new NullPointerException();
1432
if (size > 0 && (tab = table) != null) {
1433
int mc = modCount;
1434
for (Node<K,V> e : tab) {
1435
for (; e != null; e = e.next) {
1436
e.value = function.apply(e.key, e.value);
1437
}
1438
}
1439
if (modCount != mc)
1440
throw new ConcurrentModificationException();
1441
}
1442
}
1443
1444
/* ------------------------------------------------------------ */
1445
// Cloning and serialization
1446
1447
/**
1448
* Returns a shallow copy of this {@code HashMap} instance: the keys and
1449
* values themselves are not cloned.
1450
*
1451
* @return a shallow copy of this map
1452
*/
1453
@SuppressWarnings("unchecked")
1454
@Override
1455
public Object clone() {
1456
HashMap<K,V> result;
1457
try {
1458
result = (HashMap<K,V>)super.clone();
1459
} catch (CloneNotSupportedException e) {
1460
// this shouldn't happen, since we are Cloneable
1461
throw new InternalError(e);
1462
}
1463
result.reinitialize();
1464
result.putMapEntries(this, false);
1465
return result;
1466
}
1467
1468
// These methods are also used when serializing HashSets
1469
final float loadFactor() { return loadFactor; }
1470
final int capacity() {
1471
return (table != null) ? table.length :
1472
(threshold > 0) ? threshold :
1473
DEFAULT_INITIAL_CAPACITY;
1474
}
1475
1476
/**
1477
* Saves this map to a stream (that is, serializes it).
1478
*
1479
* @param s the stream
1480
* @throws IOException if an I/O error occurs
1481
* @serialData The <i>capacity</i> of the HashMap (the length of the
1482
* bucket array) is emitted (int), followed by the
1483
* <i>size</i> (an int, the number of key-value
1484
* mappings), followed by the key (Object) and value (Object)
1485
* for each key-value mapping. The key-value mappings are
1486
* emitted in no particular order.
1487
*/
1488
@java.io.Serial
1489
private void writeObject(java.io.ObjectOutputStream s)
1490
throws IOException {
1491
int buckets = capacity();
1492
// Write out the threshold, loadfactor, and any hidden stuff
1493
s.defaultWriteObject();
1494
s.writeInt(buckets);
1495
s.writeInt(size);
1496
internalWriteEntries(s);
1497
}
1498
1499
/**
1500
* Reconstitutes this map from a stream (that is, deserializes it).
1501
* @param s the stream
1502
* @throws ClassNotFoundException if the class of a serialized object
1503
* could not be found
1504
* @throws IOException if an I/O error occurs
1505
*/
1506
@java.io.Serial
1507
private void readObject(java.io.ObjectInputStream s)
1508
throws IOException, ClassNotFoundException {
1509
// Read in the threshold (ignored), loadfactor, and any hidden stuff
1510
s.defaultReadObject();
1511
reinitialize();
1512
if (loadFactor <= 0 || Float.isNaN(loadFactor))
1513
throw new InvalidObjectException("Illegal load factor: " +
1514
loadFactor);
1515
s.readInt(); // Read and ignore number of buckets
1516
int mappings = s.readInt(); // Read number of mappings (size)
1517
if (mappings < 0)
1518
throw new InvalidObjectException("Illegal mappings count: " +
1519
mappings);
1520
else if (mappings > 0) { // (if zero, use defaults)
1521
// Size the table using given load factor only if within
1522
// range of 0.25...4.0
1523
float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f);
1524
float fc = (float)mappings / lf + 1.0f;
1525
int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ?
1526
DEFAULT_INITIAL_CAPACITY :
1527
(fc >= MAXIMUM_CAPACITY) ?
1528
MAXIMUM_CAPACITY :
1529
tableSizeFor((int)fc));
1530
float ft = (float)cap * lf;
1531
threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ?
1532
(int)ft : Integer.MAX_VALUE);
1533
1534
// Check Map.Entry[].class since it's the nearest public type to
1535
// what we're actually creating.
1536
SharedSecrets.getJavaObjectInputStreamAccess().checkArray(s, Map.Entry[].class, cap);
1537
@SuppressWarnings({"rawtypes","unchecked"})
1538
Node<K,V>[] tab = (Node<K,V>[])new Node[cap];
1539
table = tab;
1540
1541
// Read the keys and values, and put the mappings in the HashMap
1542
for (int i = 0; i < mappings; i++) {
1543
@SuppressWarnings("unchecked")
1544
K key = (K) s.readObject();
1545
@SuppressWarnings("unchecked")
1546
V value = (V) s.readObject();
1547
putVal(hash(key), key, value, false, false);
1548
}
1549
}
1550
}
1551
1552
/* ------------------------------------------------------------ */
1553
// iterators
1554
1555
abstract class HashIterator {
1556
Node<K,V> next; // next entry to return
1557
Node<K,V> current; // current entry
1558
int expectedModCount; // for fast-fail
1559
int index; // current slot
1560
1561
HashIterator() {
1562
expectedModCount = modCount;
1563
Node<K,V>[] t = table;
1564
current = next = null;
1565
index = 0;
1566
if (t != null && size > 0) { // advance to first entry
1567
do {} while (index < t.length && (next = t[index++]) == null);
1568
}
1569
}
1570
1571
public final boolean hasNext() {
1572
return next != null;
1573
}
1574
1575
final Node<K,V> nextNode() {
1576
Node<K,V>[] t;
1577
Node<K,V> e = next;
1578
if (modCount != expectedModCount)
1579
throw new ConcurrentModificationException();
1580
if (e == null)
1581
throw new NoSuchElementException();
1582
if ((next = (current = e).next) == null && (t = table) != null) {
1583
do {} while (index < t.length && (next = t[index++]) == null);
1584
}
1585
return e;
1586
}
1587
1588
public final void remove() {
1589
Node<K,V> p = current;
1590
if (p == null)
1591
throw new IllegalStateException();
1592
if (modCount != expectedModCount)
1593
throw new ConcurrentModificationException();
1594
current = null;
1595
removeNode(p.hash, p.key, null, false, false);
1596
expectedModCount = modCount;
1597
}
1598
}
1599
1600
final class KeyIterator extends HashIterator
1601
implements Iterator<K> {
1602
public final K next() { return nextNode().key; }
1603
}
1604
1605
final class ValueIterator extends HashIterator
1606
implements Iterator<V> {
1607
public final V next() { return nextNode().value; }
1608
}
1609
1610
final class EntryIterator extends HashIterator
1611
implements Iterator<Map.Entry<K,V>> {
1612
public final Map.Entry<K,V> next() { return nextNode(); }
1613
}
1614
1615
/* ------------------------------------------------------------ */
1616
// spliterators
1617
1618
static class HashMapSpliterator<K,V> {
1619
final HashMap<K,V> map;
1620
Node<K,V> current; // current node
1621
int index; // current index, modified on advance/split
1622
int fence; // one past last index
1623
int est; // size estimate
1624
int expectedModCount; // for comodification checks
1625
1626
HashMapSpliterator(HashMap<K,V> m, int origin,
1627
int fence, int est,
1628
int expectedModCount) {
1629
this.map = m;
1630
this.index = origin;
1631
this.fence = fence;
1632
this.est = est;
1633
this.expectedModCount = expectedModCount;
1634
}
1635
1636
final int getFence() { // initialize fence and size on first use
1637
int hi;
1638
if ((hi = fence) < 0) {
1639
HashMap<K,V> m = map;
1640
est = m.size;
1641
expectedModCount = m.modCount;
1642
Node<K,V>[] tab = m.table;
1643
hi = fence = (tab == null) ? 0 : tab.length;
1644
}
1645
return hi;
1646
}
1647
1648
public final long estimateSize() {
1649
getFence(); // force init
1650
return (long) est;
1651
}
1652
}
1653
1654
static final class KeySpliterator<K,V>
1655
extends HashMapSpliterator<K,V>
1656
implements Spliterator<K> {
1657
KeySpliterator(HashMap<K,V> m, int origin, int fence, int est,
1658
int expectedModCount) {
1659
super(m, origin, fence, est, expectedModCount);
1660
}
1661
1662
public KeySpliterator<K,V> trySplit() {
1663
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
1664
return (lo >= mid || current != null) ? null :
1665
new KeySpliterator<>(map, lo, index = mid, est >>>= 1,
1666
expectedModCount);
1667
}
1668
1669
public void forEachRemaining(Consumer<? super K> action) {
1670
int i, hi, mc;
1671
if (action == null)
1672
throw new NullPointerException();
1673
HashMap<K,V> m = map;
1674
Node<K,V>[] tab = m.table;
1675
if ((hi = fence) < 0) {
1676
mc = expectedModCount = m.modCount;
1677
hi = fence = (tab == null) ? 0 : tab.length;
1678
}
1679
else
1680
mc = expectedModCount;
1681
if (tab != null && tab.length >= hi &&
1682
(i = index) >= 0 && (i < (index = hi) || current != null)) {
1683
Node<K,V> p = current;
1684
current = null;
1685
do {
1686
if (p == null)
1687
p = tab[i++];
1688
else {
1689
action.accept(p.key);
1690
p = p.next;
1691
}
1692
} while (p != null || i < hi);
1693
if (m.modCount != mc)
1694
throw new ConcurrentModificationException();
1695
}
1696
}
1697
1698
public boolean tryAdvance(Consumer<? super K> action) {
1699
int hi;
1700
if (action == null)
1701
throw new NullPointerException();
1702
Node<K,V>[] tab = map.table;
1703
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
1704
while (current != null || index < hi) {
1705
if (current == null)
1706
current = tab[index++];
1707
else {
1708
K k = current.key;
1709
current = current.next;
1710
action.accept(k);
1711
if (map.modCount != expectedModCount)
1712
throw new ConcurrentModificationException();
1713
return true;
1714
}
1715
}
1716
}
1717
return false;
1718
}
1719
1720
public int characteristics() {
1721
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
1722
Spliterator.DISTINCT;
1723
}
1724
}
1725
1726
static final class ValueSpliterator<K,V>
1727
extends HashMapSpliterator<K,V>
1728
implements Spliterator<V> {
1729
ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est,
1730
int expectedModCount) {
1731
super(m, origin, fence, est, expectedModCount);
1732
}
1733
1734
public ValueSpliterator<K,V> trySplit() {
1735
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
1736
return (lo >= mid || current != null) ? null :
1737
new ValueSpliterator<>(map, lo, index = mid, est >>>= 1,
1738
expectedModCount);
1739
}
1740
1741
public void forEachRemaining(Consumer<? super V> action) {
1742
int i, hi, mc;
1743
if (action == null)
1744
throw new NullPointerException();
1745
HashMap<K,V> m = map;
1746
Node<K,V>[] tab = m.table;
1747
if ((hi = fence) < 0) {
1748
mc = expectedModCount = m.modCount;
1749
hi = fence = (tab == null) ? 0 : tab.length;
1750
}
1751
else
1752
mc = expectedModCount;
1753
if (tab != null && tab.length >= hi &&
1754
(i = index) >= 0 && (i < (index = hi) || current != null)) {
1755
Node<K,V> p = current;
1756
current = null;
1757
do {
1758
if (p == null)
1759
p = tab[i++];
1760
else {
1761
action.accept(p.value);
1762
p = p.next;
1763
}
1764
} while (p != null || i < hi);
1765
if (m.modCount != mc)
1766
throw new ConcurrentModificationException();
1767
}
1768
}
1769
1770
public boolean tryAdvance(Consumer<? super V> action) {
1771
int hi;
1772
if (action == null)
1773
throw new NullPointerException();
1774
Node<K,V>[] tab = map.table;
1775
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
1776
while (current != null || index < hi) {
1777
if (current == null)
1778
current = tab[index++];
1779
else {
1780
V v = current.value;
1781
current = current.next;
1782
action.accept(v);
1783
if (map.modCount != expectedModCount)
1784
throw new ConcurrentModificationException();
1785
return true;
1786
}
1787
}
1788
}
1789
return false;
1790
}
1791
1792
public int characteristics() {
1793
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0);
1794
}
1795
}
1796
1797
static final class EntrySpliterator<K,V>
1798
extends HashMapSpliterator<K,V>
1799
implements Spliterator<Map.Entry<K,V>> {
1800
EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est,
1801
int expectedModCount) {
1802
super(m, origin, fence, est, expectedModCount);
1803
}
1804
1805
public EntrySpliterator<K,V> trySplit() {
1806
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
1807
return (lo >= mid || current != null) ? null :
1808
new EntrySpliterator<>(map, lo, index = mid, est >>>= 1,
1809
expectedModCount);
1810
}
1811
1812
public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
1813
int i, hi, mc;
1814
if (action == null)
1815
throw new NullPointerException();
1816
HashMap<K,V> m = map;
1817
Node<K,V>[] tab = m.table;
1818
if ((hi = fence) < 0) {
1819
mc = expectedModCount = m.modCount;
1820
hi = fence = (tab == null) ? 0 : tab.length;
1821
}
1822
else
1823
mc = expectedModCount;
1824
if (tab != null && tab.length >= hi &&
1825
(i = index) >= 0 && (i < (index = hi) || current != null)) {
1826
Node<K,V> p = current;
1827
current = null;
1828
do {
1829
if (p == null)
1830
p = tab[i++];
1831
else {
1832
action.accept(p);
1833
p = p.next;
1834
}
1835
} while (p != null || i < hi);
1836
if (m.modCount != mc)
1837
throw new ConcurrentModificationException();
1838
}
1839
}
1840
1841
public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
1842
int hi;
1843
if (action == null)
1844
throw new NullPointerException();
1845
Node<K,V>[] tab = map.table;
1846
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
1847
while (current != null || index < hi) {
1848
if (current == null)
1849
current = tab[index++];
1850
else {
1851
Node<K,V> e = current;
1852
current = current.next;
1853
action.accept(e);
1854
if (map.modCount != expectedModCount)
1855
throw new ConcurrentModificationException();
1856
return true;
1857
}
1858
}
1859
}
1860
return false;
1861
}
1862
1863
public int characteristics() {
1864
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
1865
Spliterator.DISTINCT;
1866
}
1867
}
1868
1869
/* ------------------------------------------------------------ */
1870
// LinkedHashMap support
1871
1872
1873
/*
1874
* The following package-protected methods are designed to be
1875
* overridden by LinkedHashMap, but not by any other subclass.
1876
* Nearly all other internal methods are also package-protected
1877
* but are declared final, so can be used by LinkedHashMap, view
1878
* classes, and HashSet.
1879
*/
1880
1881
// Create a regular (non-tree) node
1882
Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) {
1883
return new Node<>(hash, key, value, next);
1884
}
1885
1886
// For conversion from TreeNodes to plain nodes
1887
Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) {
1888
return new Node<>(p.hash, p.key, p.value, next);
1889
}
1890
1891
// Create a tree bin node
1892
TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) {
1893
return new TreeNode<>(hash, key, value, next);
1894
}
1895
1896
// For treeifyBin
1897
TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) {
1898
return new TreeNode<>(p.hash, p.key, p.value, next);
1899
}
1900
1901
/**
1902
* Reset to initial default state. Called by clone and readObject.
1903
*/
1904
void reinitialize() {
1905
table = null;
1906
entrySet = null;
1907
keySet = null;
1908
values = null;
1909
modCount = 0;
1910
threshold = 0;
1911
size = 0;
1912
}
1913
1914
// Callbacks to allow LinkedHashMap post-actions
1915
void afterNodeAccess(Node<K,V> p) { }
1916
void afterNodeInsertion(boolean evict) { }
1917
void afterNodeRemoval(Node<K,V> p) { }
1918
1919
// Called only from writeObject, to ensure compatible ordering.
1920
void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
1921
Node<K,V>[] tab;
1922
if (size > 0 && (tab = table) != null) {
1923
for (Node<K,V> e : tab) {
1924
for (; e != null; e = e.next) {
1925
s.writeObject(e.key);
1926
s.writeObject(e.value);
1927
}
1928
}
1929
}
1930
}
1931
1932
/* ------------------------------------------------------------ */
1933
// Tree bins
1934
1935
/**
1936
* Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn
1937
* extends Node) so can be used as extension of either regular or
1938
* linked node.
1939
*/
1940
static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> {
1941
TreeNode<K,V> parent; // red-black tree links
1942
TreeNode<K,V> left;
1943
TreeNode<K,V> right;
1944
TreeNode<K,V> prev; // needed to unlink next upon deletion
1945
boolean red;
1946
TreeNode(int hash, K key, V val, Node<K,V> next) {
1947
super(hash, key, val, next);
1948
}
1949
1950
/**
1951
* Returns root of tree containing this node.
1952
*/
1953
final TreeNode<K,V> root() {
1954
for (TreeNode<K,V> r = this, p;;) {
1955
if ((p = r.parent) == null)
1956
return r;
1957
r = p;
1958
}
1959
}
1960
1961
/**
1962
* Ensures that the given root is the first node of its bin.
1963
*/
1964
static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) {
1965
int n;
1966
if (root != null && tab != null && (n = tab.length) > 0) {
1967
int index = (n - 1) & root.hash;
1968
TreeNode<K,V> first = (TreeNode<K,V>)tab[index];
1969
if (root != first) {
1970
Node<K,V> rn;
1971
tab[index] = root;
1972
TreeNode<K,V> rp = root.prev;
1973
if ((rn = root.next) != null)
1974
((TreeNode<K,V>)rn).prev = rp;
1975
if (rp != null)
1976
rp.next = rn;
1977
if (first != null)
1978
first.prev = root;
1979
root.next = first;
1980
root.prev = null;
1981
}
1982
assert checkInvariants(root);
1983
}
1984
}
1985
1986
/**
1987
* Finds the node starting at root p with the given hash and key.
1988
* The kc argument caches comparableClassFor(key) upon first use
1989
* comparing keys.
1990
*/
1991
final TreeNode<K,V> find(int h, Object k, Class<?> kc) {
1992
TreeNode<K,V> p = this;
1993
do {
1994
int ph, dir; K pk;
1995
TreeNode<K,V> pl = p.left, pr = p.right, q;
1996
if ((ph = p.hash) > h)
1997
p = pl;
1998
else if (ph < h)
1999
p = pr;
2000
else if ((pk = p.key) == k || (k != null && k.equals(pk)))
2001
return p;
2002
else if (pl == null)
2003
p = pr;
2004
else if (pr == null)
2005
p = pl;
2006
else if ((kc != null ||
2007
(kc = comparableClassFor(k)) != null) &&
2008
(dir = compareComparables(kc, k, pk)) != 0)
2009
p = (dir < 0) ? pl : pr;
2010
else if ((q = pr.find(h, k, kc)) != null)
2011
return q;
2012
else
2013
p = pl;
2014
} while (p != null);
2015
return null;
2016
}
2017
2018
/**
2019
* Calls find for root node.
2020
*/
2021
final TreeNode<K,V> getTreeNode(int h, Object k) {
2022
return ((parent != null) ? root() : this).find(h, k, null);
2023
}
2024
2025
/**
2026
* Tie-breaking utility for ordering insertions when equal
2027
* hashCodes and non-comparable. We don't require a total
2028
* order, just a consistent insertion rule to maintain
2029
* equivalence across rebalancings. Tie-breaking further than
2030
* necessary simplifies testing a bit.
2031
*/
2032
static int tieBreakOrder(Object a, Object b) {
2033
int d;
2034
if (a == null || b == null ||
2035
(d = a.getClass().getName().
2036
compareTo(b.getClass().getName())) == 0)
2037
d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2038
-1 : 1);
2039
return d;
2040
}
2041
2042
/**
2043
* Forms tree of the nodes linked from this node.
2044
*/
2045
final void treeify(Node<K,V>[] tab) {
2046
TreeNode<K,V> root = null;
2047
for (TreeNode<K,V> x = this, next; x != null; x = next) {
2048
next = (TreeNode<K,V>)x.next;
2049
x.left = x.right = null;
2050
if (root == null) {
2051
x.parent = null;
2052
x.red = false;
2053
root = x;
2054
}
2055
else {
2056
K k = x.key;
2057
int h = x.hash;
2058
Class<?> kc = null;
2059
for (TreeNode<K,V> p = root;;) {
2060
int dir, ph;
2061
K pk = p.key;
2062
if ((ph = p.hash) > h)
2063
dir = -1;
2064
else if (ph < h)
2065
dir = 1;
2066
else if ((kc == null &&
2067
(kc = comparableClassFor(k)) == null) ||
2068
(dir = compareComparables(kc, k, pk)) == 0)
2069
dir = tieBreakOrder(k, pk);
2070
2071
TreeNode<K,V> xp = p;
2072
if ((p = (dir <= 0) ? p.left : p.right) == null) {
2073
x.parent = xp;
2074
if (dir <= 0)
2075
xp.left = x;
2076
else
2077
xp.right = x;
2078
root = balanceInsertion(root, x);
2079
break;
2080
}
2081
}
2082
}
2083
}
2084
moveRootToFront(tab, root);
2085
}
2086
2087
/**
2088
* Returns a list of non-TreeNodes replacing those linked from
2089
* this node.
2090
*/
2091
final Node<K,V> untreeify(HashMap<K,V> map) {
2092
Node<K,V> hd = null, tl = null;
2093
for (Node<K,V> q = this; q != null; q = q.next) {
2094
Node<K,V> p = map.replacementNode(q, null);
2095
if (tl == null)
2096
hd = p;
2097
else
2098
tl.next = p;
2099
tl = p;
2100
}
2101
return hd;
2102
}
2103
2104
/**
2105
* Tree version of putVal.
2106
*/
2107
final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab,
2108
int h, K k, V v) {
2109
Class<?> kc = null;
2110
boolean searched = false;
2111
TreeNode<K,V> root = (parent != null) ? root() : this;
2112
for (TreeNode<K,V> p = root;;) {
2113
int dir, ph; K pk;
2114
if ((ph = p.hash) > h)
2115
dir = -1;
2116
else if (ph < h)
2117
dir = 1;
2118
else if ((pk = p.key) == k || (k != null && k.equals(pk)))
2119
return p;
2120
else if ((kc == null &&
2121
(kc = comparableClassFor(k)) == null) ||
2122
(dir = compareComparables(kc, k, pk)) == 0) {
2123
if (!searched) {
2124
TreeNode<K,V> q, ch;
2125
searched = true;
2126
if (((ch = p.left) != null &&
2127
(q = ch.find(h, k, kc)) != null) ||
2128
((ch = p.right) != null &&
2129
(q = ch.find(h, k, kc)) != null))
2130
return q;
2131
}
2132
dir = tieBreakOrder(k, pk);
2133
}
2134
2135
TreeNode<K,V> xp = p;
2136
if ((p = (dir <= 0) ? p.left : p.right) == null) {
2137
Node<K,V> xpn = xp.next;
2138
TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn);
2139
if (dir <= 0)
2140
xp.left = x;
2141
else
2142
xp.right = x;
2143
xp.next = x;
2144
x.parent = x.prev = xp;
2145
if (xpn != null)
2146
((TreeNode<K,V>)xpn).prev = x;
2147
moveRootToFront(tab, balanceInsertion(root, x));
2148
return null;
2149
}
2150
}
2151
}
2152
2153
/**
2154
* Removes the given node, that must be present before this call.
2155
* This is messier than typical red-black deletion code because we
2156
* cannot swap the contents of an interior node with a leaf
2157
* successor that is pinned by "next" pointers that are accessible
2158
* independently during traversal. So instead we swap the tree
2159
* linkages. If the current tree appears to have too few nodes,
2160
* the bin is converted back to a plain bin. (The test triggers
2161
* somewhere between 2 and 6 nodes, depending on tree structure).
2162
*/
2163
final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab,
2164
boolean movable) {
2165
int n;
2166
if (tab == null || (n = tab.length) == 0)
2167
return;
2168
int index = (n - 1) & hash;
2169
TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl;
2170
TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev;
2171
if (pred == null)
2172
tab[index] = first = succ;
2173
else
2174
pred.next = succ;
2175
if (succ != null)
2176
succ.prev = pred;
2177
if (first == null)
2178
return;
2179
if (root.parent != null)
2180
root = root.root();
2181
if (root == null
2182
|| (movable
2183
&& (root.right == null
2184
|| (rl = root.left) == null
2185
|| rl.left == null))) {
2186
tab[index] = first.untreeify(map); // too small
2187
return;
2188
}
2189
TreeNode<K,V> p = this, pl = left, pr = right, replacement;
2190
if (pl != null && pr != null) {
2191
TreeNode<K,V> s = pr, sl;
2192
while ((sl = s.left) != null) // find successor
2193
s = sl;
2194
boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2195
TreeNode<K,V> sr = s.right;
2196
TreeNode<K,V> pp = p.parent;
2197
if (s == pr) { // p was s's direct parent
2198
p.parent = s;
2199
s.right = p;
2200
}
2201
else {
2202
TreeNode<K,V> sp = s.parent;
2203
if ((p.parent = sp) != null) {
2204
if (s == sp.left)
2205
sp.left = p;
2206
else
2207
sp.right = p;
2208
}
2209
if ((s.right = pr) != null)
2210
pr.parent = s;
2211
}
2212
p.left = null;
2213
if ((p.right = sr) != null)
2214
sr.parent = p;
2215
if ((s.left = pl) != null)
2216
pl.parent = s;
2217
if ((s.parent = pp) == null)
2218
root = s;
2219
else if (p == pp.left)
2220
pp.left = s;
2221
else
2222
pp.right = s;
2223
if (sr != null)
2224
replacement = sr;
2225
else
2226
replacement = p;
2227
}
2228
else if (pl != null)
2229
replacement = pl;
2230
else if (pr != null)
2231
replacement = pr;
2232
else
2233
replacement = p;
2234
if (replacement != p) {
2235
TreeNode<K,V> pp = replacement.parent = p.parent;
2236
if (pp == null)
2237
(root = replacement).red = false;
2238
else if (p == pp.left)
2239
pp.left = replacement;
2240
else
2241
pp.right = replacement;
2242
p.left = p.right = p.parent = null;
2243
}
2244
2245
TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement);
2246
2247
if (replacement == p) { // detach
2248
TreeNode<K,V> pp = p.parent;
2249
p.parent = null;
2250
if (pp != null) {
2251
if (p == pp.left)
2252
pp.left = null;
2253
else if (p == pp.right)
2254
pp.right = null;
2255
}
2256
}
2257
if (movable)
2258
moveRootToFront(tab, r);
2259
}
2260
2261
/**
2262
* Splits nodes in a tree bin into lower and upper tree bins,
2263
* or untreeifies if now too small. Called only from resize;
2264
* see above discussion about split bits and indices.
2265
*
2266
* @param map the map
2267
* @param tab the table for recording bin heads
2268
* @param index the index of the table being split
2269
* @param bit the bit of hash to split on
2270
*/
2271
final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) {
2272
TreeNode<K,V> b = this;
2273
// Relink into lo and hi lists, preserving order
2274
TreeNode<K,V> loHead = null, loTail = null;
2275
TreeNode<K,V> hiHead = null, hiTail = null;
2276
int lc = 0, hc = 0;
2277
for (TreeNode<K,V> e = b, next; e != null; e = next) {
2278
next = (TreeNode<K,V>)e.next;
2279
e.next = null;
2280
if ((e.hash & bit) == 0) {
2281
if ((e.prev = loTail) == null)
2282
loHead = e;
2283
else
2284
loTail.next = e;
2285
loTail = e;
2286
++lc;
2287
}
2288
else {
2289
if ((e.prev = hiTail) == null)
2290
hiHead = e;
2291
else
2292
hiTail.next = e;
2293
hiTail = e;
2294
++hc;
2295
}
2296
}
2297
2298
if (loHead != null) {
2299
if (lc <= UNTREEIFY_THRESHOLD)
2300
tab[index] = loHead.untreeify(map);
2301
else {
2302
tab[index] = loHead;
2303
if (hiHead != null) // (else is already treeified)
2304
loHead.treeify(tab);
2305
}
2306
}
2307
if (hiHead != null) {
2308
if (hc <= UNTREEIFY_THRESHOLD)
2309
tab[index + bit] = hiHead.untreeify(map);
2310
else {
2311
tab[index + bit] = hiHead;
2312
if (loHead != null)
2313
hiHead.treeify(tab);
2314
}
2315
}
2316
}
2317
2318
/* ------------------------------------------------------------ */
2319
// Red-black tree methods, all adapted from CLR
2320
2321
static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
2322
TreeNode<K,V> p) {
2323
TreeNode<K,V> r, pp, rl;
2324
if (p != null && (r = p.right) != null) {
2325
if ((rl = p.right = r.left) != null)
2326
rl.parent = p;
2327
if ((pp = r.parent = p.parent) == null)
2328
(root = r).red = false;
2329
else if (pp.left == p)
2330
pp.left = r;
2331
else
2332
pp.right = r;
2333
r.left = p;
2334
p.parent = r;
2335
}
2336
return root;
2337
}
2338
2339
static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
2340
TreeNode<K,V> p) {
2341
TreeNode<K,V> l, pp, lr;
2342
if (p != null && (l = p.left) != null) {
2343
if ((lr = p.left = l.right) != null)
2344
lr.parent = p;
2345
if ((pp = l.parent = p.parent) == null)
2346
(root = l).red = false;
2347
else if (pp.right == p)
2348
pp.right = l;
2349
else
2350
pp.left = l;
2351
l.right = p;
2352
p.parent = l;
2353
}
2354
return root;
2355
}
2356
2357
static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
2358
TreeNode<K,V> x) {
2359
x.red = true;
2360
for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
2361
if ((xp = x.parent) == null) {
2362
x.red = false;
2363
return x;
2364
}
2365
else if (!xp.red || (xpp = xp.parent) == null)
2366
return root;
2367
if (xp == (xppl = xpp.left)) {
2368
if ((xppr = xpp.right) != null && xppr.red) {
2369
xppr.red = false;
2370
xp.red = false;
2371
xpp.red = true;
2372
x = xpp;
2373
}
2374
else {
2375
if (x == xp.right) {
2376
root = rotateLeft(root, x = xp);
2377
xpp = (xp = x.parent) == null ? null : xp.parent;
2378
}
2379
if (xp != null) {
2380
xp.red = false;
2381
if (xpp != null) {
2382
xpp.red = true;
2383
root = rotateRight(root, xpp);
2384
}
2385
}
2386
}
2387
}
2388
else {
2389
if (xppl != null && xppl.red) {
2390
xppl.red = false;
2391
xp.red = false;
2392
xpp.red = true;
2393
x = xpp;
2394
}
2395
else {
2396
if (x == xp.left) {
2397
root = rotateRight(root, x = xp);
2398
xpp = (xp = x.parent) == null ? null : xp.parent;
2399
}
2400
if (xp != null) {
2401
xp.red = false;
2402
if (xpp != null) {
2403
xpp.red = true;
2404
root = rotateLeft(root, xpp);
2405
}
2406
}
2407
}
2408
}
2409
}
2410
}
2411
2412
static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
2413
TreeNode<K,V> x) {
2414
for (TreeNode<K,V> xp, xpl, xpr;;) {
2415
if (x == null || x == root)
2416
return root;
2417
else if ((xp = x.parent) == null) {
2418
x.red = false;
2419
return x;
2420
}
2421
else if (x.red) {
2422
x.red = false;
2423
return root;
2424
}
2425
else if ((xpl = xp.left) == x) {
2426
if ((xpr = xp.right) != null && xpr.red) {
2427
xpr.red = false;
2428
xp.red = true;
2429
root = rotateLeft(root, xp);
2430
xpr = (xp = x.parent) == null ? null : xp.right;
2431
}
2432
if (xpr == null)
2433
x = xp;
2434
else {
2435
TreeNode<K,V> sl = xpr.left, sr = xpr.right;
2436
if ((sr == null || !sr.red) &&
2437
(sl == null || !sl.red)) {
2438
xpr.red = true;
2439
x = xp;
2440
}
2441
else {
2442
if (sr == null || !sr.red) {
2443
if (sl != null)
2444
sl.red = false;
2445
xpr.red = true;
2446
root = rotateRight(root, xpr);
2447
xpr = (xp = x.parent) == null ?
2448
null : xp.right;
2449
}
2450
if (xpr != null) {
2451
xpr.red = (xp == null) ? false : xp.red;
2452
if ((sr = xpr.right) != null)
2453
sr.red = false;
2454
}
2455
if (xp != null) {
2456
xp.red = false;
2457
root = rotateLeft(root, xp);
2458
}
2459
x = root;
2460
}
2461
}
2462
}
2463
else { // symmetric
2464
if (xpl != null && xpl.red) {
2465
xpl.red = false;
2466
xp.red = true;
2467
root = rotateRight(root, xp);
2468
xpl = (xp = x.parent) == null ? null : xp.left;
2469
}
2470
if (xpl == null)
2471
x = xp;
2472
else {
2473
TreeNode<K,V> sl = xpl.left, sr = xpl.right;
2474
if ((sl == null || !sl.red) &&
2475
(sr == null || !sr.red)) {
2476
xpl.red = true;
2477
x = xp;
2478
}
2479
else {
2480
if (sl == null || !sl.red) {
2481
if (sr != null)
2482
sr.red = false;
2483
xpl.red = true;
2484
root = rotateLeft(root, xpl);
2485
xpl = (xp = x.parent) == null ?
2486
null : xp.left;
2487
}
2488
if (xpl != null) {
2489
xpl.red = (xp == null) ? false : xp.red;
2490
if ((sl = xpl.left) != null)
2491
sl.red = false;
2492
}
2493
if (xp != null) {
2494
xp.red = false;
2495
root = rotateRight(root, xp);
2496
}
2497
x = root;
2498
}
2499
}
2500
}
2501
}
2502
}
2503
2504
/**
2505
* Recursive invariant check
2506
*/
2507
static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
2508
TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
2509
tb = t.prev, tn = (TreeNode<K,V>)t.next;
2510
if (tb != null && tb.next != t)
2511
return false;
2512
if (tn != null && tn.prev != t)
2513
return false;
2514
if (tp != null && t != tp.left && t != tp.right)
2515
return false;
2516
if (tl != null && (tl.parent != t || tl.hash > t.hash))
2517
return false;
2518
if (tr != null && (tr.parent != t || tr.hash < t.hash))
2519
return false;
2520
if (t.red && tl != null && tl.red && tr != null && tr.red)
2521
return false;
2522
if (tl != null && !checkInvariants(tl))
2523
return false;
2524
if (tr != null && !checkInvariants(tr))
2525
return false;
2526
return true;
2527
}
2528
}
2529
2530
}
2531
2532