HashMap在并发场景下的问题
hashmap在并发情况下的问题
- 扩容时,并发读数据可能读不到
- 并发写数据可能会丢失
本文的源码展示的是1.8.0_401版本
扩容导致并发读失败
HashMap扩容的代码如下。扩容步骤
- 先申请一块更大的空间
- table变量立刻指向这个新空间
- 旧空间里的数据逐个复制到新空间(数据量大的话会比较耗时)
步骤2结束到步骤3结束的这段时间,读数据可能会失败
final Node<K,V>[] resize() {
// 1 申请新空间
Node<K,V>[] oldTab = table;
int oldCap = (oldTab == null) ? 0 : oldTab.length;
int oldThr = threshold;
int newCap, newThr = 0;
if (oldCap > 0) {
if (oldCap >= MAXIMUM_CAPACITY) {
threshold = Integer.MAX_VALUE;
return oldTab;
}
else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
oldCap >= DEFAULT_INITIAL_CAPACITY)
newThr = oldThr << 1; // double threshold
}
else if (oldThr > 0) // initial capacity was placed in threshold
newCap = oldThr;
else { // zero initial threshold signifies using defaults
newCap = DEFAULT_INITIAL_CAPACITY;
newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
}
if (newThr == 0) {
float ft = (float)newCap * loadFactor;
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
(int)ft : Integer.MAX_VALUE);
}
threshold = newThr;
Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
// 2 table指向新空间
table = newTab;
// 3 旧空间的数据复制到新空间上
if (oldTab != null) {
for (int j = 0; j < oldCap; ++j) {
Node<K,V> e;
if ((e = oldTab[j]) != null) {
oldTab[j] = null;
if (e.next == null)
newTab[e.hash & (newCap - 1)] = e;
else if (e instanceof TreeNode)
((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
else { // preserve order
Node<K,V> loHead = null, loTail = null;
Node<K,V> hiHead = null, hiTail = null;
Node<K,V> next;
do {
next = e.next;
if ((e.hash & oldCap) == 0) {
if (loTail == null)
loHead = e;
else
loTail.next = e;
loTail = e;
}
else {
if (hiTail == null)
hiHead = e;
else
hiTail.next = e;
hiTail = e;
}
} while ((e = next) != null);
if (loTail != null) {
loTail.next = null;
newTab[j] = loHead;
}
if (hiTail != null) {
hiTail.next = null;
newTab[j + oldCap] = hiHead;
}
}
}
}
}
return newTab;
}
并发写数据可能会丢失
我们看put的源码, 有两个地方,如果有多个线程同时执行,则只有一个线程的put的值得到保留。
final V putVal(int hash, K key, V value, boolean onlyIfAbsent,
boolean evict) {
Node<K,V>[] tab; Node<K,V> p; int n, i;
if ((tab = table) == null || (n = tab.length) == 0)
n = (tab = resize()).length;
if ((p = tab[i = (n - 1) & hash]) == null)
// 多个线程同时运行到这里的时候,只有一个线程的值保存成功
tab[i] = newNode(hash, key, value, null);
else {
Node<K,V> e; K k;
if (p.hash == hash &&
((k = p.key) == key || (key != null && key.equals(k))))
e = p;
else if (p instanceof TreeNode)
e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value);
else {
for (int binCount = 0; ; ++binCount) {
if ((e = p.next) == null) {
// 多个线程同时运行到这里的时候,只有一个线程的值保存成功
p.next = newNode(hash, key, value, null);
if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
treeifyBin(tab, hash);
break;
}
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k))))
break;
p = e;
}
}
if (e != null) { // existing mapping for key
V oldValue = e.value;
if (!onlyIfAbsent || oldValue == null)
e.value = value;
afterNodeAccess(e);
return oldValue;
}
}
++modCount;
if (++size > threshold)
resize();
afterNodeInsertion(evict);
return null;
}
验证
写段程序验证我的推测,并发1000W次读写hashmap.
public class HashMapTest {
public static volatile int missingCount = 0;
public static void main(String[] args) {
HashMap<Integer, Integer> map = new HashMap<>();
int coreSize = Runtime.getRuntime().availableProcessors();
ThreadPoolExecutor es = new ThreadPoolExecutor(coreSize, coreSize, 1, TimeUnit.MINUTES,
new LinkedBlockingQueue<Runnable>(100),
new ThreadPoolExecutor.CallerRunsPolicy());
int max = 10000000;
int testMissingKey = 10001;
map.put(testMissingKey, 0);
for (int i = 1; i < max; i++) {
int finalI = i;
es.submit(() -> {
System.out.println(finalI);
map.put(finalI, finalI);
Integer value = map.get(testMissingKey);
if (value == null) {
missingCount++;
}
});
}
try {
es.shutdown();
es.awaitTermination(1, TimeUnit.MINUTES);
} catch (Exception e) {
e.printStackTrace();
}
System.out.println((max - map.size()) + "个数字缺失了");
System.out.println(missingCount + "次并发读失败");
}
}
程序输出, 只看最后一部分。1000W次读写,340次读失败。弄丢 901个值。
9999917
9999916
9999915
9999912
9999909
901个数字缺失了
340次并发读失败
因此并发场景 ,必须用ConcurrentHashMap