一,Collections.synchronizedMap
1,构造
public static <K,V> Map<K,V> synchronizedMap(Map<K,V> m) {
return new SynchronizedMap<>(m);
}
Collections.synchronizedMap返回的是一个Map,所以构造时举例如下
Map<String, String> map = Collections.synchronizedMap(new HashMap<String, String>());
2,为什么线程安全
如下图,synchronizedMap内其实有一个普通的Map以及排斥锁mutex,并且如果我们使用上面的构造方法的话,传入了一个map对象,所以,mutex排斥锁对象赋值为 mutex = this,就是调用synchronizedMap的对象。
所以创建出synchronizedMap之后,再操作map的话,各种方法都上锁了,如下
public int size() {
synchronized (mutex) {return m.size();}
}
public boolean isEmpty() {
synchronized (mutex) {return m.isEmpty();}
}
public boolean containsKey(Object key) {
synchronized (mutex) {return m.containsKey(key);}
}
public boolean containsValue(Object value) {
synchronized (mutex) {return m.containsValue(value);}
}
public V get(Object key) {
synchronized (mutex) {return m.get(key);}
}
public V put(K key, V value) {
synchronized (mutex) {return m.put(key, value);}
}
public V remove(Object key) {
synchronized (mutex) {return m.remove(key);}
}
public void putAll(Map<? extends K, ? extends V> map) {
synchronized (mutex) {m.putAll(map);}
}
public void clear() {
synchronized (mutex) {m.clear();}
}
所以说Collections.synchronizedMap是线程安全的,但是又因为锁的都是当前整个map,锁的粒度太大,所以Collections.synchronizedMap效率不高。
二,Hashtable
1,主要参数
// Entry组成的链表数组
private transient Entry<?,?>[] table;
// 实际元素个数
private transient int count;
/**
* The table is rehashed when its size exceeds this threshold. (The
* value of this field is (int)(capacity * loadFactor).)
*
* @serial
*/
// 扩容的阈值 等于(int)(capacity * loadFactor).)
private int threshold
// 负载因子 默认0.75
private float loadFactor;
2,构造方法,无参和有参
// 无参构造方法 默认初始化容量为11 负载因子为0.75f
public Hashtable() {
this(11, 0.75f);
}
// 有参构造方法 初始化数组容量为initialCapacity 负载因子为0.75f
public Hashtable(int initialCapacity) {
this(initialCapacity, 0.75f);
}
public Hashtable(int initialCapacity, float loadFactor) {
if (initialCapacity < 0)
throw new IllegalArgumentException("Illegal Capacity: "+
initialCapacity);
if (loadFactor <= 0 || Float.isNaN(loadFactor))
throw new IllegalArgumentException("Illegal Load: "+loadFactor);
if (initialCapacity==0)
initialCapacity = 1;
this.loadFactor = loadFactor;
table = new Entry<?,?>[initialCapacity];
// private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
// 正常来说 initialCapacity * loadFactor 等于扩容阈值
threshold = (int)Math.min(initialCapacity * loadFactor, MAX_ARRAY_SIZE + 1);
}
// 这里与HashMap一致,size是实际 Entry<?,?>[] 数组内元素数量
public synchronized int size() {
return count;
}
3,put方法(扩容)
// 加锁,保证线程安全
public synchronized V put(K key, V value) {
// Make sure the value is not null
if (value == null) {
throw new NullPointerException();
}
// Makes sure the key is not already in the hashtable.
Entry<?,?> tab[] = table;
// key的hash,HashTable不允许key为空,而HashTable可以
int hash = key.hashCode();
// 计算index hash与int的最大值 与运算后 除tab.length 取余数
int index = (hash & 0x7FFFFFFF) % tab.length;
@SuppressWarnings("unchecked")
Entry<K,V> entry = (Entry<K,V>)tab[index];
for(; entry != null ; entry = entry.next) {
if ((entry.hash == hash) && entry.key.equals(key)) {
V old = entry.value;
entry.value = value;
return old;
}
}
addEntry(hash, key, value, index);
return null;
}
private void addEntry(int hash, K key, V value, int index) {
Entry<?,?> tab[] = table;
// 当容量达到阈值时,
if (count >= threshold) {
// Rehash the table if the threshold is exceeded
// 进行扩容 rehash
rehash();
tab = table;
hash = key.hashCode();
index = (hash & 0x7FFFFFFF) % tab.length;
}
// Creates the new entry.
@SuppressWarnings("unchecked")
Entry<K,V> e = (Entry<K,V>) tab[index];
tab[index] = new Entry<>(hash, key, value, e);
count++;
modCount++;
}
// 扩容方法
@SuppressWarnings("unchecked")
protected void rehash() {
int oldCapacity = table.length;
Entry<?,?>[] oldMap = table;
// overflow-conscious code
// 新的数组等于就数组长度*2 + 1
int newCapacity = (oldCapacity << 1) + 1;
if (newCapacity - MAX_ARRAY_SIZE > 0) {
if (oldCapacity == MAX_ARRAY_SIZE)
// Keep running with MAX_ARRAY_SIZE buckets
return;
newCapacity = MAX_ARRAY_SIZE;
}
Entry<?,?>[] newMap = new Entry<?,?>[newCapacity];
modCount++;
threshold = (int)Math.min(newCapacity * loadFactor, MAX_ARRAY_SIZE + 1);
table = newMap;
for (int i = oldCapacity ; i-- > 0 ;) {
for (Entry<K,V> old = (Entry<K,V>)oldMap[i] ; old != null ; ) {
Entry<K,V> e = old;
old = old.next;
int index = (e.hash & 0x7FFFFFFF) % newCapacity;
e.next = (Entry<K,V>)newMap[index];
newMap[index] = e;
}
}
}
4,remove方法
// 有锁
public synchronized V remove(Object key) {
Entry<?,?> tab[] = table;
int hash = key.hashCode();
// 计算元素index
int index = (hash & 0x7FFFFFFF) % tab.length;
@SuppressWarnings("unchecked")
Entry<K,V> e = (Entry<K,V>)tab[index];
// 循环遍历
for(Entry<K,V> prev = null ; e != null ; prev = e, e = e.next) {
if ((e.hash == hash) && e.key.equals(key)) {
if (prev != null) {
prev.next = e.next;
} else {
tab[index] = e.next;
}
modCount++;
count--;
V oldValue = e.value;
e.value = null;
return oldValue;
}
}
return null;
}
5,replace方法
// 有锁
public synchronized V replace(K key, V value) {
Objects.requireNonNull(value);
Entry<?,?> tab[] = table;
int hash = key.hashCode();
int index = (hash & 0x7FFFFFFF) % tab.length;
@SuppressWarnings("unchecked")
Entry<K,V> e = (Entry<K,V>)tab[index];
for (; e != null; e = e.next) {
if ((e.hash == hash) && e.key.equals(key)) {
V oldValue = e.value;
e.value = value;
return oldValue;
}
}
return null;
}
6,get方法
// 有锁
public synchronized V get(Object key) {
Entry<?,?> tab[] = table;
int hash = key.hashCode();
int index = (hash & 0x7FFFFFFF) % tab.length;
for (Entry<?,?> e = tab[index] ; e != null ; e = e.next) {
if ((e.hash == hash) && e.key.equals(key)) {
return (V)e.value;
}
}
return null;
}
7,Hashtable 与 HashMapyu 不同点
此处总结来自 https://aobing.blog.csdn.net/article/details/103589011
7.1键或者值是否允许为null
Hashtable 是不允许键或值为 null 的,HashMap 的键值则都可以为 null。
Hashtable使⽤的是安全失败机制(fail-safe),这种机制会使你此次读到的数据不⼀定是最新的数据。
如果你使⽤null值,就会使得其⽆法判断对应的key是不存在还是为空,因为你⽆法再调⽤⼀次 contain(key)来对key是否存在进⾏判断,ConcurrentHashMap同理。
7.2实现⽅式不同
Hashtable 继承了 Dictionary类,⽽ HashMap 继承的是 AbstractMap 类。
7.2 初始化容量不同
HashMap 的初始容量为:16,Hashtable 初始容量为:11,两者的负载因⼦默 认都是:0.75f。
7.3 扩容机制不同
当现有容量⼤于总容量 * 负载因⼦时,HashMap 扩容规则为当前容量翻倍, Hashtable 扩容规则为当前容量翻倍 + 1。
7.4 迭代器不同
HashMap 中的 Iterator 迭代器是 fail-fast 的,⽽ Hashtable 的 Enumerator 不是 fail-fast 的。 所以,当其他线程改变了HashMap 的结构,如:增加、删除元素,将会抛出 ConcurrentModificationException 异常,⽽ Hashtable 则不会。
7.5 快速失败(fail-fast)与安全失败(fail-safe)机制
摘抄自菜鸟教程 --> https://www.runoob.com/w3cnote/java-fail-ast-fail-safe.html
快速失败(fail-fast)
在用迭代器遍历一个集合对象时,如果遍历过程中对集合对象的内容进行了修改(增加、删除、修改),则会抛出 Concurrent Modification Exception。
原理:迭代器在遍历时直接访问集合中的内容,并且在遍历过程中使用一个 modCount 变量。集合在被遍历期间如果内容发生变化,就会改变 modCount 的值。每当迭代器使用 hashNext()/next() 遍历下一个元素之前,都会检测 modCount 变量是否为 expectedmodCount 值,是的话就返回遍历;否则抛出异常,终止遍历。
注意:这里异常的抛出条件是检测到 modCount != expectedmodCount 这个条件。如果集合发生变化时修改 modCount 值刚好又设置为了 expectedmodCount 值,则异常不会抛出。因此,不能依赖于这个异常是否抛出而进行并发操作的编程,这个异常只 建议用于检测并发修改的 bug。
场景:java.util 包下的集合类都是快速失败的,不能在多线程下发生并发修改(迭代过程中被修改)。
安全失败(fail-safe)
采用安全失败机制的集合容器,在遍历时不是直接在集合内容*问的,而是先复制原有集合内容,在拷贝的集合上进行遍历。
原理:由于迭代时是对原集合的拷贝进行遍历,所以在遍历过程中对原集合所作的修改并不能被迭代器检测到,所以不会触发 Concurrent Modification Exception。
缺点:基于拷贝内容的优点是避免了 Concurrent Modification Exception,但同样地,迭代器并不能访问到修改后的内容,即:迭代器遍历的是开始遍历那一刻拿到的集合拷贝,在遍历期间原集合发生的修改迭代器是不知道的。
场景:java.util.concurrent 包下的容器都是安全失败,可以在多线程下并发使用,并发修改。
三,ConcurrentHashMap(JDK1.8)
JDK1.8 ConCurrentHashMap的底层数据结构与HashMap一致,都是数组+链表+红黑树
1,主要参数
// node数组最大容量:2^30=1073741824
private static final int MAXIMUM_CAPACITY = 1 << 30;
// 默认初始值,必须是2的幕数
private static final int DEFAULT_CAPACITY = 16;
//数组可能最大值,需要与toArray()相关方法关联
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
//并发级别,遗留下来的,为兼容以前的版本
private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
// 负载因子
private static final float LOAD_FACTOR = 0.75f;
// 链表转红黑树阀值,> 8 链表转换为红黑树
static final int TREEIFY_THRESHOLD = 8;
//树转链表阀值,小于等于6(tranfer时,lc、hc=0两个计数器分别++记录原bin、新binTreeNode数量,<=UNTREEIFY_THRESHOLD 则untreeify(lo))
static final int UNTREEIFY_THRESHOLD = 6;
static final int MIN_TREEIFY_CAPACITY = 64;
private static final int MIN_TRANSFER_STRIDE = 16;
private static int RESIZE_STAMP_BITS = 16;
// 2^15-1,help resize的最大线程数
private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
// 32-16=16,sizeCtl中记录size大小的偏移量
private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
// forwarding nodes的hash值
static final int MOVED = -1;
// 树根节点的hash值
static final int TREEBIN = -2;
// ReservationNode的hash值
static final int RESERVED = -3;
// 可用处理器数量
static final int NCPU = Runtime.getRuntime().availableProcessors();
//存放node的数组
transient volatile Node<K,V>[] table;
/*控制标识符,用来控制table的初始化和扩容的操作,不同的值有不同的含义
*当为负数时:-1代表正在初始化,-N代表有N-1个线程正在 进行扩容
*当为0时(默认值):代表当时的table还没有被初始化
*当为正数时:表示初始化或者下一次进行扩容的大小<br>*/
private transient volatile int sizeCtl;
2,构造
public ConcurrentHashMap() {
}
// LOAD_FACTOR 负载因子0.75f
public ConcurrentHashMap(int initialCapacity) {
this(initialCapacity, LOAD_FACTOR, 1);
}
public ConcurrentHashMap(int initialCapacity,
float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
// 初始化容量小于1则等于1
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
// size 等于 初始化容量除以负载因子 + 1
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
int cap = (size >= (long)MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((int)size);
// sizeCtl = cap
this.sizeCtl = cap;
}
3,put
ConcurrentHashMap的put操作比较复杂,大致可以分为以下步骤
- 根据key计算index。 对应代码: hash = spread(key.hashCode());
- 判断是否需要进行初始化,对应代码: if (tab == null || (n = tab.length) == 0)
- 定位当前key对应的Node,如果为空则写入数据,利用CAS尝试写入,失败则自旋保证成功。对应代码: casTabAt(tab, i, null, new Node<K,V>(hash, key, value))
- 如果当前的hashcode == MOVED == -1 则进行扩容。 对应代码: ((fh = f.hash) == MOVED)
- 如果都不满足,使用synchronized加锁写入数据。 对应代码: synchronized (f)
- 当数量大于TREEIFY_THRESHOLD 则要转换为红黑树 。 对应代码: if(binCount >= TREEIFY_THRESHOLD ) treeifyBin(tab, i);
public V put(K key, V value) {
return putVal(key, value, false);
}
final V putVal(K key, V value, boolean onlyIfAbsent) {
// key 和 value不允许为空
if (key == null || value == null) throw new NullPointerException();
// 计算hashcode
int hash = spread(key.hashCode());
int binCount = 0;
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh; K fk; V fv;
// 当前 tab为空或者长度等于0
if (tab == null || (n = tab.length) == 0)
// 初始化(首次插入元素)
tab = initTable();
// 当前插入位置为空
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
// 直接插入 CAS保证线程安全
if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value)))
break; // no lock when adding to empty bin
}
//
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else if (onlyIfAbsent // check first node without acquiring lock
&& fh == hash
&& ((fk = f.key) == key || (fk != null && key.equals(fk)))
&& (fv = f.val) != null)
return fv;
else {
V oldVal = null;
// 节点上锁,链表的话,锁的粒度为头节点
synchronized (f) {
if (tabAt(tab, i) == f) {
// 普通Node的hash值为key的hash值大于零,
// 而ForwardingNode的是-1,TreeBin是-2
if (fh >= 0) {
binCount = 1;
// 遍历链表节点,尾插法
for (Node<K,V> e = f;; ++binCount) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
if ((e = e.next) == null) {
pred.next = new Node<K,V>(hash, key, value);
break;
}
}
}
// 树节点,按照树的方式插入
else if (f instanceof TreeBin) {
Node<K,V> p;
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
else if (f instanceof ReservationNode)
throw new IllegalStateException("Recursive update");
}
}
if (binCount != 0) {
// 链表长度大于等于8,则把链表转为树
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
addCount(1L, binCount);
return null;
}
initTable初始化
关键属性为sizeCtl,如果sizeCtl小于0,则表明其他线程正在初始化,当前线程执行Thread.yield(),由运行状态变为就绪状态。如果获得了初始化权限,那么就将sizeCtl置为-1,初始化后sizeCtl为0.75*n。
private final Node<K,V>[] initTable() {
Node<K,V>[] tab; int sc;
while ((tab = table) == null || tab.length == 0) {
// 已经初始化
if ((sc = sizeCtl) < 0)
Thread.yield(); // lost initialization race; just spin
// CAS 一下,将 sizeCtl 设置为 -1,代表抢到了锁
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if ((tab = table) == null || tab.length == 0) {
// DEFAULT_CAPACITY 默认初始容量是 16
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
// 初始化数组,长度为 16 或初始化时提供的长度
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
// 将这个数组赋值给 table,table 是 volatile 的
table = tab = nt;
// 如果 n 为 16 的话,那么这里 sc = 12
// 其实就是 0.75 * n
sc = n - (n >>> 2);
}
} finally {
// 设置 sizeCtl 为 sc
sizeCtl = sc;
}
break;
}
}
return tab;
}
treeifyBin:链表元素超过8个,则把链表转为红黑树。
private final void treeifyBin(Node<K,V>[] tab, int index) {
Node<K,V> b; int n;
if (tab != null) {
// tab长度小于64,优先进行扩容
if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
tryPresize(n << 1);
else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
// 链表头节点加锁
synchronized (b) {
if (tabAt(tab, index) == b) {
TreeNode<K,V> hd = null, tl = null;
for (Node<K,V> e = b; e != null; e = e.next) {
TreeNode<K,V> p =
new TreeNode<K,V>(e.hash, e.key, e.val,
null, null);
if ((p.prev = tl) == null)
hd = p;
else
tl.next = p;
tl = p;
}
// 设置bin的头节点为TreeBin
setTabAt(tab, index, new TreeBin<K,V>(hd));
}
}
}
}
}
private final void tryPresize(int size) {
int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
tableSizeFor(size + (size >>> 1) + 1);
int sc;
while ((sc = sizeCtl) >= 0) {
Node<K,V>[] tab = table; int n;
if (tab == null || (n = tab.length) == 0) {
n = (sc > c) ? sc : c;
if (U.compareAndSetInt(this, SIZECTL, sc, -1)) {
try {
if (table == tab) {
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = nt;
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
}
}
else if (c <= sc || n >= MAXIMUM_CAPACITY)
break;
else if (tab == table) {
int rs = resizeStamp(n);
if (U.compareAndSetInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2))
transfer(tab, null);
}
}
}
private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
int n = tab.length, stride;
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
stride = MIN_TRANSFER_STRIDE; // subdivide range
if (nextTab == null) { // initiating
try {
@SuppressWarnings("unchecked")
// 新数组等于旧数组长度的两倍 n << 1
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
nextTab = nt;
} catch (Throwable ex) { // try to cope with OOME
sizeCtl = Integer.MAX_VALUE;
return;
}
nextTable = nextTab;
transferIndex = n;
}
int nextn = nextTab.length;
ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
boolean advance = true;
boolean finishing = false; // to ensure sweep before committing nextTab
for (int i = 0, bound = 0;;) {
Node<K,V> f; int fh;
while (advance) {
int nextIndex, nextBound;
if (--i >= bound || finishing)
advance = false;
else if ((nextIndex = transferIndex) <= 0) {
i = -1;
advance = false;
}
else if (U.compareAndSetInt
(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ?
nextIndex - stride : 0))) {
bound = nextBound;
i = nextIndex - 1;
advance = false;
}
}
if (i < 0 || i >= n || i + n >= nextn) {
int sc;
if (finishing) {
nextTable = null;
table = nextTab;
sizeCtl = (n << 1) - (n >>> 1);
return;
}
if (U.compareAndSetInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
return;
finishing = advance = true;
i = n; // recheck before commit
}
}
// 如果bin为空,则通过CAS设置fwd,表示已经处理过了
else if ((f = tabAt(tab, i)) == null)
advance = casTabAt(tab, i, null, fwd);
// 如果已经处理过了则设置fwd节点,其hash值为MOVED(-1)
else if ((fh = f.hash) == MOVED)
advance = true; // already processed
else {
// 对bin的头节点(链表的头节点或数的根节点)加锁
synchronized (f) {
if (tabAt(tab, i) == f) {
Node<K,V> ln, hn;
if (fh >= 0) {
int runBit = fh & n;
Node<K,V> lastRun = f;
for (Node<K,V> p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
if (runBit == 0) {
ln = lastRun;
hn = null;
}
else {
hn = lastRun;
ln = null;
}
for (Node<K,V> p = f; p != lastRun; p = p.next) {
int ph = p.hash; K pk = p.key; V pv = p.val;
if ((ph & n) == 0)
ln = new Node<K,V>(ph, pk, pv, ln);
else
hn = new Node<K,V>(ph, pk, pv, hn);
}
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
else if (f instanceof TreeBin) {
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> lo = null, loTail = null;
TreeNode<K,V> hi = null, hiTail = null;
int lc = 0, hc = 0;
for (Node<K,V> e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode<K,V> p = new TreeNode<K,V>
(h, e.key, e.val, null, null);
if ((h & n) == 0) {
if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
}
else {
if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
// 如果树<=6,则树转数组
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin<K,V>(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin<K,V>(hi) : t;
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
}
}
}
}
}
4,remove
public V remove(Object key) {
return replaceNode(key, null, null);
}
final V replaceNode(Object key, V value, Object cv) {
int hash = spread(key.hashCode());
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0 ||
(f = tabAt(tab, i = (n - 1) & hash)) == null)
break;
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
boolean validated = false;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
validated = true;
for (Node<K,V> e = f, pred = null;;) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
V ev = e.val;
if (cv == null || cv == ev ||
(ev != null && cv.equals(ev))) {
oldVal = ev;
if (value != null)
e.val = value;
else if (pred != null)
pred.next = e.next;
else
setTabAt(tab, i, e.next);
}
break;
}
pred = e;
if ((e = e.next) == null)
break;
}
}
else if (f instanceof TreeBin) {
validated = true;
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> r, p;
if ((r = t.root) != null &&
(p = r.findTreeNode(hash, key, null)) != null) {
V pv = p.val;
if (cv == null || cv == pv ||
(pv != null && cv.equals(pv))) {
oldVal = pv;
if (value != null)
p.val = value;
else if (t.removeTreeNode(p))
setTabAt(tab, i, untreeify(t.first));
}
}
}
else if (f instanceof ReservationNode)
throw new IllegalStateException("Recursive update");
}
}
if (validated) {
if (oldVal != null) {
if (value == null)
addCount(-1L, -1);
return oldVal;
}
break;
}
}
}
return null;
}
5,replace
public V replace(K key, V value) {
if (key == null || value == null)
throw new NullPointerException();
return replaceNode(key, value, null);
}
6,get
- 计算hashcode。
- 如果是红黑树则按照树的方式取值。
- 不是红黑树则遍历链表取值。
public V get(Object key) {
Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
// 计算hashcode
int h = spread(key.hashCode());
// 寻找的值在hash桶上则直接返回
if ((tab = table) != null && (n = tab.length) > 0 &&
(e = tabAt(tab, (n - 1) & h)) != null) {
// 红黑树的情况下按照树的方式取值
if ((eh = e.hash) == h) {
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
}
// 链表的情况下按照链表取值
else if (eh < 0)
return (p = e.find(h, key)) != null ? p.val : null;
while ((e = e.next) != null) {
if (e.hash == h &&
((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
// find方法
Node<K,V> find(int h, Object k) {
Node<K,V> e = this;
if (k != null) {
do {
K ek;
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
} while ((e = e.next) != null);
}
return null;
}
四,ConcurrentHashMap(JDK1.7)
1,底层数据结构
JDK1.7,ConcurrentHashMap是由一个Segment数组和多个HashEntry组成的,如下图
2,Segment(ReentrantLock)
static final class Segment<K,V> extends ReenTrantLock implements Serializable {
private static final long serialVersionUID = 2249069246763182397L;
// 存放数据的HashEntry数组
transient volatile HashEntry<K,V>[] table;
transient int count;
// modCount变量,是否被修改,fail-fast
transient int modCount;
transient int threshold;
// 负载因子
final float loadFactor;
}
可以看出Segment继承于ReenTrantLock (重入锁)。ConcurrentHashMap不会像 HashTable 那样不管是 put 还是 get 操作都需要做同步处理,理论上 ConcurrentHashMap ⽀持 CurrencyLevel (Segment 数组数量)的线程并发。
3,HashEntry
HashEntry的value以及下一个节点next,被volatile关键字所修饰,确保了一个线程对变量进行操作时,修改结果对其他线程时可见的。
static final class HashEntry<K,V> {
final int hash;
final K key;
volatile V value;
volatile HashEntry<K,V> next;
HashEntry(int hash, K key, V value, HashEntry<K,V> next) {
this.hash = hash;
this.key = key;
this.value = value;
this.next = next;
}
/**
* Sets next field with volatile write semantics. (See above
* about use of putOrderedObject.)
*/
final void setNext(HashEntry<K,V> n) {
UNSAFE.putOrderedObject(this, nextOffset, n);
}
// Unsafe mechanics
static final sun.misc.Unsafe UNSAFE; //可以理解为一个指针
static final long nextOffset;//偏移量,可以简单的理解为内存地址
static {
try {
UNSAFE = sun.misc.Unsafe.getUnsafe();//获取这个节点对应的内存指针
Class k = HashEntry.class;//
nextOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("next"));
//获取当前节点的next节点对于当前节点指针的偏移量
//通过UNSAFE中有方法直接能够获取到当前引用变量的初始内存地址
//通过初始内存地址和引用变量内部的局部变量的偏移量就可以通过Unsafe直接读取到对应的参数值
} catch (Exception e) {
throw new Error(e);
}
}
}
4,put(线程安全)
final V put(K key, int hash, V value, boolean onlyIfAbsent) {
//先尝试对segment加锁,如果直接加锁成功,那么node=null;如果加锁失败,则会调用scanAndLockForPut⾃旋获取锁。
HashEntry<K,V> node = tryLock() ? null :
scanAndLockForPut(key, hash, value);
V oldValue;
try {
HashEntry<K,V>[] tab = table;
int index = (tab.length - 1) & hash;
HashEntry<K,V> first = entryAt(tab, index);//先获取需要put的<k,v>对在当前这个segment中对应的链表的表头结点。
for (HashEntry<K,V> e = first;;) {//开始遍历first为头结点的链表
if (e != null) {//<1>
//e不为空,说明当前键值对需要存储的位置有hash冲突,直接遍历当前链表,如果链表中找到一个节点对应的key相同,
//依据onlyIfAbsent来判断是否覆盖已有的value值。
K k;
if ((k = e.key) == key ||
(e.hash == hash && key.equals(k))) {
//进入这个条件内说明需要put的<k,y>对应的key节点已经存在,直接判断是否更新并最后break退出循环。
oldValue = e.value;
if (!onlyIfAbsent) {
e.value = value;
++modCount;
}
break;
}
e = e.next;//未进入上面的if条件中,说明当前e节点对应的key不是需要的,直接遍历下一个节点。
}
else {//<2>
//进入到这个else分支,说明e为空,对应有两种情况下e可能会为空,即:
// 1>. <1>中进行循环遍历,遍历到了链表的表尾仍然没有满足条件的节点。
// 2>. e=first一开始就是null(可以理解为即一开始就遍历到了尾节点)
if (node != null) //这里有可能获取到锁是通过scanAndLockForPut方法内自旋获取到的,这种情况下依据找好或者说是新建好了对应节点,node不为空
node.setNext(first);
else// 当然也有可能是这里直接第一次tryLock就获取到了锁,从而node没有分配对应节点,即需要给依据插入的k,v来创建一个新节点
node = new HashEntry<K,V>(hash, key, value, first);
int c = count + 1; //总数+1 在这里依据获取到了锁,即是线程安全的!对应了上述对count变量的使用规范说明。
if (c > threshold && tab.length < MAXIMUM_CAPACITY)//判断是否需要进行扩容
//扩容是直接重新new一个新的HashEntry数组,这个数组的容量是老数组的两倍,
//新数组创建好后再依次将老的table中的HashEntry插入新数组中,所以这个过程是十分费时的,应尽量避免。
//扩容完毕后,还会将这个node插入到新的数组中。
rehash(node);
else
//数组无需扩容,那么就直接插入node到指定index位置
setEntryAt(tab, index, node);
++modCount;
count = c;
oldValue = null;
break;
}
}
} finally {
unlock();
}
return oldValue;
}