构造函数
/**
* Creates a new, empty map with an initial table size based on
* the given number of elements ({@code initialCapacity}), table
* density ({@code loadFactor}), and number of concurrently
* updating threads ({@code concurrencyLevel}).
*
* @param initialCapacity the initial capacity. The implementation
* performs internal sizing to accommodate this many elements,
* given the specified load factor.
* @param loadFactor the load factor (table density) for
* establishing the initial table size
* @param concurrencyLevel the estimated number of concurrently
* updating threads. The implementation may use this value as
* a sizing hint.
* @throws IllegalArgumentException if the initial capacity is
* negative or the load factor or concurrencyLevel are
* nonpositive
*/
public ConcurrentHashMap(int initialCapacity,
float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
int cap = (size >= (long)MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((int)size);
this.sizeCtl = cap;
}
传入的initialCapacity就真的是map可用的size,即扩容门槛,总容量根据负载因子计算。与hashmap传入的总容量计算可用容量不同
基本跟hashMap差不多,介绍sizeCtl
不同状态,sizeCtl所代表的含义也有所不同。
未初始化:sizeCtl=0:表示没有指定初始容量。sizeCtl>0:表示初始容量。
初始化中:sizeCtl=-1,标记作用,告知其他线程,正在初始化
正常状态:sizeCtl=0.75n,扩容阈值
扩容中:sizeCtl < 0 : 表示有其他线程正在执行扩容
sizeCtl = (resizeStamp(n) << RESIZE_STAMP_SHIFT)+2 表示此时只有一个线程在执行扩容
putVal
/** Implementation for put and putIfAbsent */
final V putVal(K key, V value, boolean onlyIfAbsent) {
if (key == null || value == null) throw new NullPointerException();
int hash = spread(key.hashCode());
int binCount = 0;
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
tab = initTable();
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
if (casTabAt(tab, i, null,
new Node<K,V>(hash, key, value, null)))
break; // no lock when adding to empty bin
}
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
if ((e = e.next) == null) {
pred.next = new Node<K,V>(hash, key,
value, null);
break;
}
}
}
else if (f instanceof TreeBin) {
Node<K,V> p;
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
addCount(1L, binCount);
return null;
}
第一次进入initTable
/**
* Initializes table, using the size recorded in sizeCtl.
*/
private final Node<K,V>[] initTable() {
Node<K,V>[] tab; int sc;
while ((tab = table) == null || tab.length == 0) {
if ((sc = sizeCtl) < 0)
Thread.yield(); // lost initialization race; just spin
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if ((tab = table) == null || tab.length == 0) {
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = tab = nt;
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
break;
}
}
return tab;
}
while中循环设置sizeCtl并初始化数组
如果sizeCtl<0 说明其他线程已经在初始化,说明本线程不需要再初始化,Thread.yield();暂时让出cpu。从新获得cpu后再检测是不是已经初始化完毕
如果不小于0 ,sizeCtl>0表示初始容量。 设置成-1,向其他线程说明自己已经在初始化
初始化完毕之后再把sizeCtl设置大于0,sizeCtl变为原来的四分之三
sizeCtl在初始化后存储的是扩容门槛;
扩容门槛写死的是桶数组大小的0.75倍,桶数组大小即map的容量,也就是最多存储多少个元素。
tabAt(tab, i = (n - 1) & hash)
static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
}
getObjectVolatile#
- public native Object getObjectVolatile(Object o, long offset);
此方法和上面的getObject功能类似,不过附加了’volatile’加载语义,也就是强制从主存中获取属性值。类似的方法有getIntVolatile、getDoubleVolatile等等。这个方法要求被使用的属性被volatile修饰,否则功能和getObject方法相同。
如果数组上的位置还是空,cas在数组上设置值
if (casTabAt(tab, i, null,
new Node<K,V>(hash, key, value, null)))
break;
如果不为空,需要放入链表或者红黑树
V oldVal = null;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
if ((e = e.next) == null) {
pred.next = new Node<K,V>(hash, key,
value, null);
break;
}
}
}
else if (f instanceof TreeBin) {
Node<K,V> p;
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
- 锁头节点
- 在链表上找到相等值,替换,否则尾插
- 链表长超过8,treeifyBin
treeifyBin
/**
* Replaces all linked nodes in bin at given index unless table is
* too small, in which case resizes instead.
*/
private final void treeifyBin(Node<K,V>[] tab, int index) {
Node<K,V> b; int n, sc;
if (tab != null) {
if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
tryPresize(n << 1);
else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
synchronized (b) {
if (tabAt(tab, index) == b) {
TreeNode<K,V> hd = null, tl = null;
for (Node<K,V> e = b; e != null; e = e.next) {
TreeNode<K,V> p =
new TreeNode<K,V>(e.hash, e.key, e.val,
null, null);
if ((p.prev = tl) == null)
hd = p;
else
tl.next = p;
tl = p;
}
setTabAt(tab, index, new TreeBin<K,V>(hd));
}
}
}
}
}
如果tab数组长度小于64,先尝试扩容,如果满足转成红黑树
- 遍历链表,放入节点为TreeNode的链表
- new TreeBin<K,V>(hd)转为红黑树
- TreeBin构造中先按照二叉查找树构建,然后再_balanceInsertion进行平衡_
每次添加元素后,元素数量加1,并判断是否达到扩容门槛,达到了则进行扩容或协助扩容。
addCount
private final void addCount(long x, int check) {
CounterCell[] as; long b, s;
// 把数组的大小存储根据不同的线程存储到不同的段上(也是分段锁的思想)
// 并且有一个baseCount,优先更新baseCount,如果失败了再更新不同线程对应的段
// 这样可以保证尽量小的减少冲突
// 先尝试把数量加到baseCount上,如果失败再加到分段的CounterCell上
if ((as = counterCells) != null ||
!U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
CounterCell a; long v; int m;
boolean uncontended = true;
// 如果as为空
// 或者长度为0
// 或者当前线程所在的段为null
// 或者在当前线程的段上加数量失败
if (as == null || (m = as.length - 1) < 0 ||
(a = as[ThreadLocalRandom.getProbe() & m]) == null ||
!(uncontended =
U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
// 强制增加数量(无论如何数量是一定要加上的,并不是简单地自旋)
// 不同线程对应不同的段都更新失败了
// 说明已经发生冲突了,那么就对counterCells进行扩容
// 以减少多个线程hash到同一个段的概率
fullAddCount(x, uncontended);
return;
}
if (check <= 1)
return;
// 计算元素个数 ,baseCount + 不同线程上加的count
s = sumCount();
}
if (check >= 0) {
Node<K,V>[] tab, nt; int n, sc;
// 如果元素个数达到了扩容门槛,则进行扩容
// 注意,正常情况下sizeCtl存储的是扩容门槛,即容量的0.75倍
while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
(n = tab.length) < MAXIMUM_CAPACITY) {
// rs是扩容时的一个邮戳标识
int rs = resizeStamp(n);
if (sc < 0) {
// sc<0说明正在扩容中
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
// 扩容已经完成了,退出循环
// 正常应该只会触发nextTable==null这个条件,其它条件没看出来何时触发
break;
// 扩容未完成,则当前线程加入迁移元素中
// 并把扩容线程数加1
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
}
else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2))
// 这里是触发扩容的那个线程进入的地方
// sizeCtl的高16位存储着rs这个扩容邮戳
// sizeCtl的低16位存储着扩容线程数加1,即(1+nThreads)
// 所以官方说的扩容时sizeCtl的值为 -(1+nThreads)是错误的
// 进入迁移元素
transfer(tab, null);
// 重新计算元素个数
s = sumCount();
}
}
}
(1)元素个数的存储方式类似于LongAdder类,存储在不同的段上,减少不同线程同时更新size时的冲突;
(2)计算元素个数时把这些段的值及baseCount相加算出总的元素个数;
(3)正常情况下sizeCtl存储着扩容门槛,扩容门槛为容量的0.75倍;
(4)扩容时sizeCtl高位存储扩容邮戳(resizeStamp),低位存储扩容线程数加1(1+nThreads);
(5)其它线程添加元素后如果发现存在扩容,也会加入的扩容行列中来;
transfer 扩容
private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
int n = tab.length, stride;
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
stride = MIN_TRANSFER_STRIDE; // subdivide range
if (nextTab == null) { // initiating
// 如果nextTab为空,说明还没开始迁移
// 就新建一个新桶数组
try {
// 新桶数组是原桶的两倍
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
nextTab = nt;
} catch (Throwable ex) { // try to cope with OOME
sizeCtl = Integer.MAX_VALUE;
return;
}
nextTable = nextTab;
transferIndex = n;
}
// 新桶数组大小
int nextn = nextTab.length;
// 新建一个ForwardingNode类型的节点,并把新桶数组存储在里面
ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
boolean advance = true;
boolean finishing = false; // to ensure sweep before committing nextTab
for (int i = 0, bound = 0;;) {
Node<K,V> f; int fh;
// 整个while循环就是在算i的值,过程太复杂,不用太关心
// i的值会从n-1依次递减,感兴趣的可以打下断点就知道了
// 其中n是旧桶数组的大小,也就是说i从15开始一直减到1这样去迁移元素
while (advance) {
int nextIndex, nextBound;
if (--i >= bound || finishing)
advance = false;
else if ((nextIndex = transferIndex) <= 0) {
i = -1;
advance = false;
}
else if (U.compareAndSwapInt
(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ?
nextIndex - stride : 0))) {
bound = nextBound;
i = nextIndex - 1;
advance = false;
}
}
if (i < 0 || i >= n || i + n >= nextn) {
// 如果一次遍历完成了
// 也就是整个map所有桶中的元素都迁移完成了
int sc;
if (finishing) {
// 如果全部迁移完成了,则替换旧桶数组
// 并设置下一次扩容门槛为新桶数组容量的0.75倍
nextTable = null;
table = nextTab;
sizeCtl = (n << 1) - (n >>> 1);
return;
}
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
// 当前线程扩容完成,把扩容线程数-1
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
// 扩容完成两边肯定相等
return;
// 把finishing设置为true
// finishing为true才会走到上面的if条件
finishing = advance = true;
// i重新赋值为n
// 这样会再重新遍历一次桶数组,看看是不是都迁移完成了
// 也就是第二次遍历都会走到下面的(fh = f.hash) == MOVED这个条件
i = n; // recheck before commit
}
}
else if ((f = tabAt(tab, i)) == null)
// 如果桶中无数据,直接放入ForwardingNode标记该桶已迁移
advance = casTabAt(tab, i, null, fwd);
else if ((fh = f.hash) == MOVED)
// 如果桶中第一个元素的hash值为MOVED
// 说明它是ForwardingNode节点
// 也就是该桶已迁移
advance = true; // already processed
else {
// 锁定该桶并迁移元素
synchronized (f) {
// 再次判断当前桶第一个元素是否有修改
// 也就是可能其它线程先一步迁移了元素
if (tabAt(tab, i) == f) {
// 把一个链表分化成两个链表
// 规则是桶中各元素的hash与桶大小n进行与操作
// 等于0的放到低位链表(low)中,不等于0的放到高位链表(high)中
// 其中低位链表迁移到新桶中的位置相对旧桶不变
// 高位链表迁移到新桶中位置正好是其在旧桶的位置加n
// 这也正是为什么扩容时容量在变成两倍的原因
Node<K,V> ln, hn;
if (fh >= 0) {
// 第一个元素的hash值大于等于0
// 说明该桶中元素是以链表形式存储的
// 这里与HashMap迁移算法基本类似
// 唯一不同的是多了一步寻找lastRun
// 这里的lastRun是提取出链表后面不用处理再特殊处理的子链表
// 比如所有元素的hash值与桶大小n与操作后的值分别为 0 0 4 4 0 0 0
// 则最后后面三个0对应的元素肯定还是在同一个桶中
// 这时lastRun对应的就是倒数第三个节点
// 至于为啥要这样处理,应该是为了减少遍历节点个数
int runBit = fh & n;
Node<K,V> lastRun = f;
for (Node<K,V> p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
// 看看最后这几个元素归属于低位链表还是高位链表
if (runBit == 0) {
ln = lastRun;
hn = null;
}
else {
hn = lastRun;
ln = null;
}
// 遍历链表,把hash&n为0的放在低位链表中
// 不为0的放在高位链表中
for (Node<K,V> p = f; p != lastRun; p = p.next) {
int ph = p.hash; K pk = p.key; V pv = p.val;
if ((ph & n) == 0)
ln = new Node<K,V>(ph, pk, pv, ln);
else
hn = new Node<K,V>(ph, pk, pv, hn);
}
// 低位链表的位置不变
setTabAt(nextTab, i, ln);
// 高位链表的位置是原位置加n
setTabAt(nextTab, i + n, hn);
// 标记当前桶已迁移
setTabAt(tab, i, fwd);
// advance为true,返回上面进行--i操作
advance = true;
}
else if (f instanceof TreeBin) {
// 如果第一个元素是树节点
// 也是一样,分化成两颗树
// 也是根据hash&n为0放在低位树中
// 不为0放在高位树中
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> lo = null, loTail = null;
TreeNode<K,V> hi = null, hiTail = null;
int lc = 0, hc = 0;
// 遍历整颗树,根据hash&n是否为0分化成两颗树
for (Node<K,V> e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode<K,V> p = new TreeNode<K,V>
(h, e.key, e.val, null, null);
if ((h & n) == 0) {
if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
}
else {
if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
// 如果分化的树中元素个数小于等于6,则退化成链表
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin<K,V>(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin<K,V>(hi) : t;
// 低位树的位置不变
setTabAt(nextTab, i, ln);
// 高位树的位置是原位置加n
setTabAt(nextTab, i + n, hn);
// 标记该桶已迁移
setTabAt(tab, i, fwd);
// advance为true,返回上面进行--i操作
advance = true;
}
}
}
}
}
}
(1)新桶数组大小是旧桶数组的两倍;
(2)迁移元素先从靠后的桶开始;
(3)迁移完成的桶在里面放置一ForwardingNode类型的元素,标记该桶迁移完成;
(4)迁移时根据hash&n是否等于0把桶中元素分化成两个链表或树;
(5)低位链表(树)存储在原来的位置;
(6)高们链表(树)存储在原来的位置加n的位置;
(7)迁移元素时会锁住当前桶,也是分段锁的思想;
tryPresize
看回之前的 treeifyBin 当map的length不足64时,扩容,而不是转化成红黑树
private final void tryPresize(int size) {
//如果大小为MAXIMUM_CAPACITY最大总量的一半,那么直接扩容为MAXIMUM_CAPACITY,否则计算最小幂次方
int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
tableSizeFor(size + (size >>> 1) + 1);
int sc;
//如果sizeCtl为正数或0
while ((sc = sizeCtl) >= 0) {
Node<K,V>[] tab = table; int n;
//如果table还未进行初始化
if (tab == null || (n = tab.length) == 0) {
n = (sc > c) ? sc : c;
//cas修改sizeCtl为-1,表示table正在进行初始化
if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
//确认其他线程没有对table修改
if (table == tab) {
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = nt;
//0.75*n
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
}
}
//如果扩容大小没有达到阈值,或者超过最大容量
else if (c <= sc || n >= MAXIMUM_CAPACITY)
break;
else if (tab == table) {
/**生成表的生成戳,每个n都有不同的生成戳
* static final int resizeStamp(int n) {
* return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
* }
* Integer.numberOfLeadingZeros(n)在指定 int 值的二进制补码表示形式中最高位(最左边)的 1 位之前,返回零位的数量
* 例如 n为16 0001 0000 则Integer.numberOfLeadingZeros(n)为27,因为n为2的幂次方,因此不同的n此结果也不同
* 然后与(1 << (RESIZE_STAMP_BITS - 1)) | ,相当于2^15 | n中0的个数。
* (因此其左移16位后符号位为1,结果肯定是个负数)
*/
int rs = resizeStamp(n);
if (sc < 0) {
Node<K,V>[] nt;
/**1.第一个判断 sc右移RESIZE_STAMP_SHIFT位,也就是比较高ESIZE_STAMP_BITS位生成戳和rs是否相等
* 相等则代表是同一个n,是在同一容量下进行的扩容,
* 2.第二个和第三个判断 判断当前帮助扩容线程数是否已达到MAX_RESIZERS最大扩容线程数
* 3.第四个和第五个判断 为了确保transfer()方法初始化完毕
*/
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt);
}
/**如果没有线程在进行扩容,那么cas修改sizeCtl值,作为扩容的发起,rs左移RESIZE_STAMP_SHIFT位+2
* 上面说了,左移RESIZE_STAMP_SHIFT位,肯定是个负数,代表有一个线程正在进行扩容
* 此时sizeCtl高RESIZE_STAMP_BITS位为生成戳,低RESIZE_STAMP_SHIFT位为扩容线程数
*/
else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2))
transfer(tab, null);
}
}
}
get
/**
* Returns the value to which the specified key is mapped,
* or {@code null} if this map contains no mapping for the key.
*
* <p>More formally, if this map contains a mapping from a key
* {@code k} to a value {@code v} such that {@code key.equals(k)},
* then this method returns {@code v}; otherwise it returns
* {@code null}. (There can be at most one such mapping.)
*
* @throws NullPointerException if the specified key is null
*/
public V get(Object key) {
Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
int h = spread(key.hashCode());
if ((tab = table) != null && (n = tab.length) > 0 &&
(e = tabAt(tab, (n - 1) & h)) != null) {
if ((eh = e.hash) == h) {
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
}
else if (eh < 0)
return (p = e.find(h, key)) != null ? p.val : null;
while ((e = e.next) != null) {
if (e.hash == h &&
((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
看find方法
如果时 TreeBin,按照红黑树的方式查找
final Node<K,V> find(int h, Object k) {
if (k != null) {
for (Node<K,V> e = first; e != null; ) {
int s; K ek;
if (((s = lockState) & (WAITER|WRITER)) != 0) {
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
e = e.next;
}
else if (U.compareAndSwapInt(this, LOCKSTATE, s,
s + READER)) {
TreeNode<K,V> r, p;
try {
p = ((r = root) == null ? null :
r.findTreeNode(h, k, null));
} finally {
Thread w;
if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
(READER|WAITER) && (w = waiter) != null)
LockSupport.unpark(w);
}
return p;
}
}
}
return null;
}
如果是FowardingNode
Node<K,V> find(int h, Object k) {
// loop to avoid arbitrarily deep recursion on forwarding nodes
outer: for (Node<K,V>[] tab = nextTable;;) {
Node<K,V> e; int n;
if (k == null || tab == null || (n = tab.length) == 0 ||
(e = tabAt(tab, (n - 1) & h)) == null)
return null;
for (;;) {
int eh; K ek;
if ((eh = e.hash) == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
if (eh < 0) {
if (e instanceof ForwardingNode) {
tab = ((ForwardingNode<K,V>)e).nextTable;
continue outer;
}
else
return e.find(h, k);
}
if ((e = e.next) == null)
return null;
}
}
}
}
FowardingNode中存储了 nextTable,取出到nextTable寻找数据,nextTable在扩容时,nextTable不为空。扩容完成后nextTable覆盖table,并将nextTable重置为null
remove
/**
* Implementation for the four public remove/replace methods:
* Replaces node value with v, conditional upon match of cv if
* non-null. If resulting value is null, delete.
*/
final V replaceNode(Object key, V value, Object cv) {
int hash = spread(key.hashCode());
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0 ||
(f = tabAt(tab, i = (n - 1) & hash)) == null)
break;
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
boolean validated = false;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
validated = true;
for (Node<K,V> e = f, pred = null;;) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
V ev = e.val;
if (cv == null || cv == ev ||
(ev != null && cv.equals(ev))) {
oldVal = ev;
if (value != null)
e.val = value;
else if (pred != null)
pred.next = e.next;
else
setTabAt(tab, i, e.next);
}
break;
}
pred = e;
if ((e = e.next) == null)
break;
}
}
else if (f instanceof TreeBin) {
validated = true;
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> r, p;
if ((r = t.root) != null &&
(p = r.findTreeNode(hash, key, null)) != null) {
V pv = p.val;
if (cv == null || cv == pv ||
(pv != null && cv.equals(pv))) {
oldVal = pv;
if (value != null)
p.val = value;
else if (t.removeTreeNode(p))
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
}
if (validated) {
if (oldVal != null) {
if (value == null)
addCount(-1L, -1);
return oldVal;
}
break;
}
}
}
return null;
}
整体逻辑和hashmap的逻辑差不多,链表删除或者树删除,如果正在扩容那么helpTransfer,等待扩容完成再删除