Luckylau's Blog

concurrentHashmap的设计之美

concurrentHashmap是Java工程师接触最多的关于并发和线程安全的类,本篇从jdk1.6, jdk1.7, jdk1.8三个版本的实现来详细分析其设计之美。

结构原型

首先简单从整体把握concurrentHashmap的结构原理。

JDK1.6和JDK1.7版本

该版本的ConcurrentHashMap结构:每一个segment都是一个HashEntry[] table, table中的每一个元素本质上都是一个HashEntry的单向队列(单向链表实现)。每一个segment都是一个HashEntry[] table, table中的每一个元素本质上都是一个HashEntry的单向队列。

JDK1.8版本

该版本的ConcurrentHashMap结构:放弃了Segment的概念,而是直接用Node数组+链表+红黑树的数据结构来实现,并发控制使用Synchronized和CAS来操作,整个看起来就像是优化过且线程安全的HashMap。其中Node数组里面的元素包括TreeBin,ForwardingNode ,Node 。因为放弃了Segment,所以具体并发级别可以认为是hash桶是数量,也就是容量,会随扩容而改变,不再是固定值。

构造函数

名词解释

initialCapacity:初始化容量,指的是所有Segment中的hash桶的数量和,默认16。

concurrencyLevel:最大并发级别,也是数组segments的最大长度,初始化之后就不会改变,实际扩容的是每个segment里的hash桶,默认16。

loadFactor:每个Segment的加载因子,当个数大于threshold=hash桶数量*loadFactor时候会扩容。

Unsafe:这是jdk1.7,1.8常用的机制,可以看这篇文章

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
<------------------------------jdk1.7版本-------------------------------------->
private static final sun.misc.Unsafe UNSAFE;
// Segment数组第一个元素的地址相对于该对象实例起始地址的偏移量
private static final long SBASE;
// Segment数组中元素element基本类型大小的移位量,比如我们要在i位置加入元素,
//用unsafe.putOrderedObject(segments, (i << SSHIFT) + SBASE, element);
private static final int SSHIFT;
//HashEntry数组第一个元素的地址相对于该对象实例起始地址的偏移量
private static final long TBASE;
//HashEntry数组中元素基本类型大小的移位量
private static final int TSHIFT;
// ConcurrentHashMap属性hashSeed,segmentShift,segmentMask,segments的相对实例地址的偏移量
private static final long HASHSEED_OFFSET;
private static final long SEGSHIFT_OFFSET;
private static final long SEGMASK_OFFSET;
private static final long SEGMENTS_OFFSET;
static {
int ss, ts;
try {
UNSAFE = sun.misc.Unsafe.getUnsafe();
Class tc = HashEntry[].class;
Class sc = Segment[].class;
//获取数组中第一个元素的偏移量(get offset of a first element in the array)
TBASE = UNSAFE.arrayBaseOffset(tc);
SBASE = UNSAFE.arrayBaseOffset(sc);
//获取数组中一个元素的大小(get size of an element in the array)
ts = UNSAFE.arrayIndexScale(tc);
ss = UNSAFE.arrayIndexScale(sc);
//其中hashSeed = randomHashSeed(this),随机一个hashSeed,用于key的hash计算
HASHSEED_OFFSET = UNSAFE.objectFieldOffset(ConcurrentHashMap.class.getDeclaredField("hashSeed"));
SEGSHIFT_OFFSET = UNSAFE.objectFieldOffset(ConcurrentHashMap.class.getDeclaredField("segmentShift"));
SEGMASK_OFFSET = UNSAFE.objectFieldOffset(ConcurrentHashMap.class.getDeclaredField("segmentMask"));
SEGMENTS_OFFSET = UNSAFE.objectFieldOffset(ConcurrentHashMap.class.getDeclaredField("segments"));
} catch (Exception e) {
throw new Error(e);
}
if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0)
throw new Error("data type scale not a power of two");
SSHIFT = 31 - Integer.numberOfLeadingZeros(ss);
TSHIFT = 31 - Integer.numberOfLeadingZeros(ts);
}
<---------------------------------jdk1.8版本--------------------------------->
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long SIZECTL;
private static final long TRANSFERINDEX;
private static final long BASECOUNT;
private static final long CELLSBUSY;
private static final long CELLVALUE;
private static final long ABASE;
private static final int ASHIFT;
static {
try {
U = sun.misc.Unsafe.getUnsafe();
Class<?> k = ConcurrentHashMap.class;
SIZECTL = U.objectFieldOffset
(k.getDeclaredField("sizeCtl"));
TRANSFERINDEX = U.objectFieldOffset
(k.getDeclaredField("transferIndex"));
BASECOUNT = U.objectFieldOffset
(k.getDeclaredField("baseCount"));
CELLSBUSY = U.objectFieldOffset
(k.getDeclaredField("cellsBusy"));
Class<?> ck = CounterCell.class;
CELLVALUE = U.objectFieldOffset
(ck.getDeclaredField("value"));
Class<?> ak = Node[].class;
ABASE = U.arrayBaseOffset(ak);
int scale = U.arrayIndexScale(ak);
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
}

sizeCtl: java1.8版本特有的,用于数组的初始化与扩容控制;当sizeCtl = -1 时,表明table正在初始化;当sizeCtl = 0时,表明用了无参构造方法,没有指定初始容量默认值;当sizeCtl > 0时,表明用了传参构造方法,指定了初始化容量值;当sizeCtl = -(1+ N),表明在扩容, 其中N的低RESIZE_STAMP_SHIFT位表示参与扩容线程数。

JDK1.6版本

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
public ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel) {
//默认loadFactor=DEFAULT_LOAD_FACTOR=0.75f
if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
//默认concurrencyLevel=DEFAULT_CONCURRENCY_LEVEL=16,
//MAX_SEGMENTS =1<<16即数组segments的最大长度,也是最大并发级别;
if (concurrencyLevel > MAX_SEGMENTS)
concurrencyLevel = MAX_SEGMENTS;
//保证concurrencyLevel是2^n,默认ssize=16;
int sshift = 0;
int ssize = 1;
while (ssize < concurrencyLevel) {
++sshift;
ssize <<= 1;
}
//定位Segment的index时hash值的移位,默认时候sshift=4,segmentShift=32-4=28;
segmentShift = 32 - sshift;
//用于& 运算定位Segment的index,默认构造时是0x0f(十进制也就是15)
segmentMask = ssize - 1;
this.segments = Segment.newArray(ssize);
//默认initialCapacity=DEFAULT_INITIAL_CAPACITY=16
//MAXIMUM_CAPACITY=1<<30
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
//确定每个segment初始化时候有多少个hash桶,并保证为2^n,默认cap = 1,但是之后每个segment构造完成后这个cap值就没用了。
int c = initialCapacity / ssize;
if (c * ssize < initialCapacity)
++c;
int cap = 1;
while (cap < c)
cap <<= 1;
//确定cap后开始初始化每个segment的table
for (int i = 0; i < this.segments.length; ++i)
this.segments[i] = new Segment< K,V > (cap, loadFactor);
}
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
}
public ConcurrentHashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
}
public ConcurrentHashMap() {
this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
}
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, DEFAULT_INITIAL_CAPACITY),
DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
putAll(m);
}

JDK1.7版本

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
public ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (concurrencyLevel > MAX_SEGMENTS)
concurrencyLevel = MAX_SEGMENTS;
// Find power-of-two sizes best matching arguments
int sshift = 0;
int ssize = 1;
while (ssize < concurrencyLevel) {
++sshift;
ssize <<= 1;
}
this.segmentShift = 32 - sshift;
this.segmentMask = ssize - 1;
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
int c = initialCapacity / ssize;
if (c * ssize < initialCapacity)
++c;
//cap = MIN_SEGMENT_TABLE_CAPACITY = 2,即每个segment容量最小初始化为2,
//而jdk1.6默认cap=1
int cap = MIN_SEGMENT_TABLE_CAPACITY;
while (cap < c)
cap <<= 1;
//与jdk1.6不同点,这里只是构造一个segments和segments[0],懒加载
Segment<K,V> s0 = new Segment<K,V>(loadFactor, (int)(cap * loadFactor), (HashEntry<K,V>[])new HashEntry[cap]);
Segment<K,V>[] ss = (Segment<K,V>[])new Segment[ssize];
//这里用UNSAFE给数组ss第一个元素赋值,内存非立即可见
UNSAFE.putOrderedObject(ss, SBASE, s0);
this.segments = ss;
}
// 其余的几个同上

JDK1.8版本

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
//这种初始化,什么都没有做,这时候sizeCtl还没有赋值
public ConcurrentHashMap() {
}
//保证cap是2^n,也就是求不小于initialCapacity的2^n
public ConcurrentHashMap(int initialCapacity) {
if (initialCapacity < 0)
throw new IllegalArgumentException();
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
MAXIMUM_CAPACITY :
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
//给sizeCtl赋值
this.sizeCtl = cap;
}
private static final int tableSizeFor(int c) {
int n = c - 1;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}
//默认sizeCtl = 16
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
this.sizeCtl = DEFAULT_CAPACITY;
putAll(m);
}
//为了兼容
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, 1);
}
//loadFactor和concurrencyLevel失去原有的意义了,只为了兼容老版本,构造方法采用懒加载,在后面的put方法中会进行initTable()操作。假如initialCapacity = 16 ,loadFactor = 0.75 ,则size=21.333,那么sizeCtl = 2^5 = 32
public ConcurrentHashMap(int initialCapacity,
float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel)
initialCapacity = concurrencyLevel;
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
int cap = (size >= (long)MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((int)size);
this.sizeCtl = cap;
}

基本类

HashEntry和Segments

JDK1.6版本

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
static final class HashEntry<K,V> {
final K key;
final int hash;
volatile V value;
final HashEntry<K,V> next;
HashEntry(K key, int hash, HashEntry<K,V> next, V value) {
this.key = key;
this.hash = hash;
this.next = next;
this.value = value;
}
@SuppressWarnings("unchecked")
static final <K,V> HashEntry<K,V>[] newArray(int i) {
return new HashEntry[i];
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
static final class Segment<K,V> extends ReentrantLock implements Serializable {
private static final long serialVersionUID = 2249069246763182397L;
//segment的table中HashEntry总数目,这里用volatile修饰,并发读的时候,避免无用读取操作
transient volatile int count;
//反映segment是否被修改过
transient int modCount;
// table的扩容阈值
transient int threshold;
// 类似HashMap的table数组
transient volatile HashEntry<K,V>[] table;
// 加载因子,确定扩容阈值,所有的segment都是一样的
final float loadFactor;
Segment(int initialCapacity, float lf) {
loadFactor = lf;
setTable(HashEntry.<K,V>newArray(initialCapacity));
}
@SuppressWarnings("unchecked")
static final <K,V> Segment<K,V>[] newArray(int i) {
return new Segment[i];
}
// 只有持有本segment的锁或者是构造方法中才能调用这个方法
void setTable(HashEntry<K,V>[] newTable) {
//默认时候这里的newTable.length = 1 , threshold = 0
threshold = (int)(newTable.length * loadFactor);
table = newTable;
}
// 跟indexFor算法一样
HashEntry<K,V> getFirst(int hash) {
HashEntry<K,V>[] tab = table;
return tab[hash & (tab.length - 1)];
}
// 加锁读取value,在直接读取value得到null时会调用,源码这里有英文注释:读到value为null,只有当某种重排序的HashEntry初始化代码让volatile变量初始化重排序到构造方法外面时才会出现,这一点旧的内存模型下是合法的,但是不知道会不会发生。
// stackoverflow有讨论:http://stackoverflow.com/questions/5002428/concurrenthashmap-reorder-instruction/5144515#5144515
//在新的内存模型下,是不会发生value= null的,jdk1.7修复了这个问题
V readValueUnderLock(HashEntry<K,V> e) {
lock();
try {
return e.value;
} finally {
unlock();
}
}
// 下面的方法是常见操作,get, put, remove, contains等等
// 读操作之get,直接读value是不用加锁的,碰到读到value == null,才加锁再读一次
V get(Object key, int hash) {
if (count != 0) { // read-volatile
HashEntry<K,V> e = getFirst(hash);
while (e != null) {
if (e.hash == hash && key.equals(e.key)) {
V v = e.value;
if (v != null)
return v;
return readValueUnderLock(e); // recheck
}
e = e.next;
}
}
return null;
}
boolean containsKey(Object key, int hash) {
if (count != 0) { // read-volatile
HashEntry<K,V> e = getFirst(hash);
while (e != null) {
if (e.hash == hash && key.equals(e.key))
return true;
e = e.next;
}
}
return false;
}
//类似get
boolean containsValue(Object value) {
if (count != 0) { // read-volatile
HashEntry<K,V>[] tab = table;
int len = tab.length;
for (int i = 0 ; i < len; i++) {
for (HashEntry<K,V> e = tab[i]; e != null; e = e.next) {
V v = e.value;
if (v == null) // recheck
v = readValueUnderLock(e);
if (value.equals(v))
return true;
}
}
}
return false;
}
//写操作之替换
boolean replace(K key, int hash, V oldValue, V newValue) {
lock();
try {
HashEntry<K,V> e = getFirst(hash);
while (e != null && (e.hash != hash || !key.equals(e.key)))
e = e.next;
boolean replaced = false;
if (e != null && oldValue.equals(e.value)) {
// replace不会修改modCount,这降低了containValue方法的准确性,jdk1.7修复了这一点
replaced = true;
e.value = newValue;
}
return replaced;
} finally {
unlock();
}
}
//写操作之替换
V replace(K key, int hash, V newValue) {
lock();
try {
HashEntry<K,V> e = getFirst(hash);
while (e != null && (e.hash != hash || !key.equals(e.key)))
e = e.next;
V oldValue = null;
if (e != null) {
// replace不会修改modCount,这降低了containValue方法的准确性,jdk1.7修复了这一点
oldValue = e.value;
e.value = newValue;
}
return oldValue;
} finally {
unlock();
}
}
// 写操作之put
V put(K key, int hash, V value, boolean onlyIfAbsent) {
lock();
try {
int c = count;
//先执行扩容,再添加节点,1.6的HashMap是先添加节点,再扩容,由于用的是c++ > threshold,如果 threshold = 12,那么在这个Segment上执行第13次put时,判断语句为 12++ > 12是为false,并未扩容,在第14次 13++>12才扩容的,但是会保证默认情况第二次才扩容:每个Segment的table.length都为1,threthold = 0的,第一次put时候0++ >0是为false,并未扩容,第二次1++>0才会扩容。
if (c++ > threshold) // ensure capacity
rehash();
HashEntry<K,V>[] tab = table;
int index = hash & (tab.length - 1); // 定位方式和1.6的HashMap一样
HashEntry<K,V> first = tab[index];
HashEntry<K,V> e = first;
while (e != null && (e.hash != hash || !key.equals(e.key)))
e = e.next;
V oldValue;
if (e != null) {
oldValue = e.value;
if (!onlyIfAbsent)
// put相同的key(相当于replace)不会修改modCount,这降低了containsValue方法的准确性,jdk1.7修复了这一点
e.value = value;
}
else {
oldValue = null;
++modCount;
// 也是添加在HashEntry链的头部,前面说了,这里的HashEntry的next指针是final的,new后就不能变
tab[index] = new HashEntry<K,V>(key, hash, first, value);
count = c;
}
return oldValue;
} finally {
unlock();
}
}
// 扩容时为了不影响正在进行的读线程,最好的方式是全部节点复制一次并重新添加
// 这里根据扩容时节点迁移的性质,最大可能的重用一部分节点,这个性质跟1.8的HashMap中的高低位是一个道理,必须要求hash值是final的
void rehash() {
HashEntry<K,V>[] oldTable = table;
int oldCapacity = oldTable.length;
if (oldCapacity >= MAXIMUM_CAPACITY)
return;
// 扩容前在一个hash桶中的节点,扩容后只会有两个去向,这里是用e.hash & sizeMask;
// 根据这个去向,找到最末尾的去向都一样的连续的一部分,这部分可以重用,不需要复制;
// HashEntry的next是final的,resize/rehash时需要重新new,这里的特殊之处就是最大程度重用HashEntry链尾部的一部分,尽量减少重新new的次数;
// 作者说从统计角度看,默认设置下有大约1/6的节点需要被重新复制一次,所以通常情况还是能节省不少时间的;
HashEntry<K,V>[] newTable = HashEntry.newArray(oldCapacity<<1);
threshold = (int)(newTable.length * loadFactor); // 跟1.6的HashMap有一样的小问题,可能会过早变为Integer.MAX_VALUE从而导致后续永远不能扩容
int sizeMask = newTable.length - 1;
for (int i = 0; i < oldCapacity ; i++) {
// We need to guarantee that any existing reads of old Map can proceed. So we cannot yet null out each bin. 为了保证其他线程能够继续执行读操作,不执行手动将原来table赋值为null,只是再最后修改一次table的引用
HashEntry<K,V> e = oldTable[i];
if (e != null) {
HashEntry<K,V> next = e.next;
//
int idx = e.hash & sizeMask;
// Single node on list
if (next == null)
newTable[idx] = e;
else {
// Reuse trailing consecutive sequence at same slot
HashEntry<K,V> lastRun = e;
int lastIdx = idx;
// 这个循环是寻找HashEntry链最大的可重用的尾部
// 看过1.8的HashMap就知道,如果hash值是final的,那么每次扩容,扩容前在一条链表上的节点,扩容后只会有两个去向
// 这里重用部分中,所有节点的去向相同,它们可以不用被复制
for (HashEntry<K,V> last = next;
last != null;
last = last.next) {
int k = last.hash & sizeMask;
if (k != lastIdx) {
lastIdx = k;
lastRun = last;
}
}
newTable[lastIdx] = lastRun; // 把重用部分整体放在扩容后的hash桶中
// 复制不能重用的部分,并把它们插入到rehash后的所在HashEntry链的头部
for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
int k = p.hash & sizeMask;
HashEntry<K,V> n = newTable[k];
newTable[k] = new HashEntry<K,V>(p.key, p.hash, n, p.value);
}
// 这里也可以看出,重用部分rehash后相对顺序不变,并且还是在rehash后的链表的尾部
// 不能重用那些节点在rehash后,再一个个重头添加到链表的头部,如果还在一条链表上面,那么不能重用节点的相对顺序会颠倒
// 所以ConcurrentHashMap的迭代顺序会变化,本身它也不保证迭代顺序不变
}
}
}
table = newTable;
}
// 因为1.6的HashEntry的next指针是final的,所以比普通的链表remove要复杂些,只有被删除节点的后面可以被重用,前面的都要再重新insert一次
V remove(Object key, int hash, Object value) {
lock();
try {
int c = count - 1;
HashEntry<K,V>[] tab = table;
int index = hash & (tab.length - 1);
HashEntry<K,V> first = tab[index];
HashEntry<K,V> e = first;
//找到被删除的e
while (e != null && (e.hash != hash || !key.equals(e.key)))
e = e.next;
V oldValue = null;
if (e != null) {
V v = e.value;
if (value == null || value.equals(v)) {
oldValue = v;
// All entries following removed node can stay in list, but all preceding ones need to be cloned.因为next指针是final的,所以删除不能用简单的链表删除,需要把前面的节点都重新复制再插入一次,后面的节点可以重用;删除后,后面的可以重用的那部分顺序不变且还是放在最后,前面的被复制的那部分顺序颠倒地放在前面
++modCount;
//被删除的e的next标记为newFirst,不断对e之前的元素复制+头插法方式完成新的链表
HashEntry<K,V> newFirst = e.next;
for (HashEntry<K,V> p = first; p != e; p = p.next)
newFirst = new HashEntry<K,V>(p.key, p.hash, newFirst, p.value);
tab[index] = newFirst;
count = c; // write-volatile
}
}
return oldValue;
} finally {
unlock();
}
}
//清除所有的元素
void clear() {
if (count != 0) {
lock();
try {
HashEntry<K,V>[] tab = table;
for (int i = 0; i < tab.length ; i++)
tab[i] = null;
++modCount;
count = 0; // write-volatile
} finally {
unlock();
}
}
}
}

附录:

jdk1.6的扩容图。我们假设原先某个segment的table大小为2,扩容后的newtable大小为4,table[0]对应一个大小为7的HashEntry,经过e.hash & sizeMask之后,10-14-18是落到newtable[3],6也是落到newtable[3],,2-8-4是落到newtable[0]。这个扩容的过程是这样的:首先rehash过程会找到可重用部分,即图上的10-14-18,通过newTable[lastIdx] = lastRun整体的将它们放到newtable[3]的位置去,各个元素的顺序相对原来并没有变化的;对于不可重用的部分,只有遍历和复制的方式,通过头插法,放到newtable中,这时我们发现6被头插到newtable[3]中,所以newtable[3]中的链表元素顺序为6-10-14-18,2-8-4被头插到newtable[0]中,顺序变为倒序了。这些操作完成之后才table = newTable,这是为了保证其他线程能够继续执行读操作,不执行手动将原来table赋值为null,只是再最后修改一次table的引用;对于原先的table,会被gc回收掉。

jdk1.6的删除图。假设我们从segment[0].table[2]中删除98的节点。

JDK1.7版本

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
static final class HashEntry<K,V> {
final int hash; // hash是final的,1.7的HashMap中不是final的,用final对扩容比较友好
final K key;
volatile V value;
volatile HashEntry<K,V> next; // jdk1.7中next指针不再是final的,改为volatile,使用 setNext 方法(内部用Unsafe的提供的方法)更新
HashEntry(int hash, K key, V value, HashEntry<K,V> next) {
this.hash = hash;
this.key = key;
this.value = value;
this.next = next;
}
// putOrderedObject,这个方法只有作用于volatile才有效,它能保证写操作的之间的顺序性,但是不保证能立马被其他线程读取到最新结果,是一种lazySet,效率比volatile高,但是只有volatile的“一半”的效果;
// 普通的volatile保证写操作的结果能立马被其他线程看到,不论其他线程是读操作还写操作;
// putOrderedObject能保证其他线程在写操作时一定能看到这个方法对变量的改变,但是其他线程只是进行读操作时,不一定能看到这个方法对变量的改变;
final void setNext(HashEntry<K,V> n) {
UNSAFE.putOrderedObject(this, nextOffset, n);
}
// 初始化执行Unsafe有关的操作
static final sun.misc.Unsafe UNSAFE;
static final long nextOffset;
static {
try {
UNSAFE = sun.misc.Unsafe.getUnsafe();
Class k = HashEntry.class;
nextOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("next"));
} catch (Exception e) {
throw new Error(e);
}
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
static final class Segment<K,V> extends ReentrantLock implements Serializable {
private static final long serialVersionUID = 2249069246763182397L;
static final int MAX_SCAN_RETRIES = Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; // 尝试次数
transient volatile HashEntry<K,V>[] table;
transient int count;
transient int modCount;
transient int threshold;
final float loadFactor;
Segment(float lf, int threshold, HashEntry<K,V>[] tab) {
this.loadFactor = lf;
this.threshold = threshold;
this.table = tab;
}
// jdk1.6中的 newArray、setTable 这两个方法因为实现太简单了,直接不用了;
// jdk1.6中的 get、containsKey、containsValue 这几个方法,因为都是读操作,实现基本类似,用 Unsafe 提供的一些底层操作代替了;
// jdk1.6中的 getFirst 虽然实现也很简单,但还是用 Unsafe 提供的一些底层方法强化了这个操作;
// 保证了对数组元素的volatile读取,1.6的只保证对整个数组的读取是volatile;
// jdk1.6中的 readValueUnderLock 在1.7中彻底去掉了;
final V put(K key, int hash, V value, boolean onlyIfAbsent) {
HashEntry<K,V> node = tryLock() ? null : scanAndLockForPut(key, hash, value); // 看scanAndLockForPut方法的注释
V oldValue;
try {
HashEntry<K,V>[] tab = table;
int index = (tab.length - 1) & hash;
HashEntry<K,V> first = entryAt(tab, index);
for (HashEntry<K,V> e = first;;) {
if (e != null) {
K k;
if ((k = e.key) == key || (e.hash == hash && key.equals(k))) {
oldValue = e.value;
if (!onlyIfAbsent) {
e.value = value;
++modCount; // 1.7的put相同的key(这时候相当于replace)时也会修改modCount了,1.6是不会的,能够更大地保证containValue这个方法的准确性
}
break;
}
e = e.next;
}
else {
if (node != null)
node.setNext(first); // 尝试添加在链表头部
else
node = new HashEntry<K,V>(hash, key, value, first);
int c = count + 1; // 先加1
if (c > threshold && tab.length < MAXIMUM_CAPACITY) // 超过最大容量的情况,在put这里一并处理了
rehash(node);
else
setEntryAt(tab, index, node); // 不扩容时,直接让新节点成为头节点
++modCount;
count = c;
oldValue = null;
break;
}
}
} finally {
unlock();
}
return oldValue;
}
// 1.7的rehash方法带有参数了,这个参数node就是要新put进去的node,新的rehash方法带有部分put的功能
// 节点迁移的基本思路还是和1.6的一样
@SuppressWarnings("unchecked")
private void rehash(HashEntry<K,V> node) {
HashEntry<K,V>[] oldTable = table;
int oldCapacity = oldTable.length;
int newCapacity = oldCapacity << 1;
threshold = (int)(newCapacity * loadFactor);
HashEntry<K,V>[] newTable = (HashEntry<K,V>[]) new HashEntry[newCapacity];
int sizeMask = newCapacity - 1;
for (int i = 0; i < oldCapacity ; i++) {
HashEntry<K,V> e = oldTable[i];
if (e != null) {
HashEntry<K,V> next = e.next;
int idx = e.hash & sizeMask;
if (next == null) // Single node on list 只有一个节点,简单处理
newTable[idx] = e;
else { // Reuse consecutive sequence at same slot 最大地重用链表尾部的一段连续的节点(这些节点扩容后在新数组中的同一个hash桶中),并标记位置
HashEntry<K,V> lastRun = e;
int lastIdx = idx;
for (HashEntry<K,V> last = next;
last != null;
last = last.next) {
int k = last.hash & sizeMask;
if (k != lastIdx) {
lastIdx = k;
lastRun = last;
}
}
newTable[lastIdx] = lastRun;
// Clone remaining nodes 对标记之前的不能重用的节点进行复制,再重新添加到新数组对应的hash桶中去
for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
V v = p.value;
int h = p.hash;
int k = h & sizeMask;
HashEntry<K,V> n = newTable[k];
newTable[k] = new HashEntry<K,V>(h, p.key, v, n);
}
}
}
}
int nodeIndex = node.hash & sizeMask; // add the new node 部分的put功能,把新节点添加到链表的最前面
node.setNext(newTable[nodeIndex]);
newTable[nodeIndex] = node;
table = newTable;
}
// 为put方法而编写的,在尝试获取锁的同时时进行一些准备工作的方法
// 获取不到锁时,会尝试一定次数的准备工作,这个准备工作指的是“遍历并预先创建要被添加的新节点,同时监测链表是否改变”
// 这样有可能在获取到锁时新的要被put的节点已经创建了,可以在put时少做一些工作
// 准备工作中也会不断地尝试获取锁,超过最大准备工作尝试次数就直接阻塞等待地获取锁
private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) {
HashEntry<K,V> first = entryForHash(this, hash);
HashEntry<K,V> e = first;
HashEntry<K,V> node = null;
int retries = -1; // negative while locating node
while (!tryLock()) {
HashEntry<K,V> f; // to recheck first below
if (retries < 0) {
if (e == null) { // 遍历完了,发现这条链表上没有“相等”的节点
if (node == null) // speculatively create node 预先创建要被添加的新节点
node = new HashEntry<K,V>(hash, key, value, null);
retries = 0; // 遍历完都没碰见“相等”,不再遍历了,改为 尝试直接获取锁,没获取到锁时尝试监测链表是否改变
}
else if (key.equals(e.key)) // 碰见“相等”,不再遍历了,改为 尝试直接获取锁,没获取到锁时尝试监测链表是否改变
retries = 0;
else // 遍历链表
e = e.next;
}
else if (++retries > MAX_SCAN_RETRIES) { // 超过最大的准备工作尝试次数,放弃准备工作尝试,直接阻塞等待地获取锁
lock();
break;
}
else if ((retries & 1) == 0 && (f = entryForHash(this, hash)) != first) { // 间隔一次判断是否有新节点添加进去
e = first = f; // re-traverse if entry changed 如果链表改变,就重新遍历一次链表
retries = -1; // 重置次数
}
}
return node;
}
// 基本同scanAndLockForPut,但是更简单些,只用遍历链表并监测改变,不用创建新节点
private void scanAndLock(Object key, int hash) {
HashEntry<K,V> first = entryForHash(this, hash);
HashEntry<K,V> e = first;
int retries = -1;
while (!tryLock()) {
HashEntry<K,V> f;
if (retries < 0) {
if (e == null || key.equals(e.key))
retries = 0;
else
e = e.next;
}
else if (++retries > MAX_SCAN_RETRIES) {
lock();
break;
}
else if ((retries & 1) == 0 && (f = entryForHash(this, hash)) != first) {
e = first = f;
retries = -1;
}
}
}
// 因为1.7的HashEntry.next是volatile的,可以修改,因此remove操作简单了很多,就是基本的链表删除操作。
final V remove(Object key, int hash, Object value) {
if (!tryLock())
scanAndLock(key, hash);
V oldValue = null;
try {
HashEntry<K,V>[] tab = table;
int index = (tab.length - 1) & hash;
HashEntry<K,V> e = entryAt(tab, index);
HashEntry<K,V> pred = null;
while (e != null) {
K k;
HashEntry<K,V> next = e.next;
if ((k = e.key) == key || (e.hash == hash && key.equals(k))) {
V v = e.value;
if (value == null || value == v || value.equals(v)) {
if (pred == null)
setEntryAt(tab, index, next); // remove的是第一个节点
else
pred.setNext(next); // 直接链表操作,前面说了1.7的HashEntry.next是volatile的,可以修改,不再跟1.6一样是final的!!!
++modCount;
--count;
oldValue = v;
}
break;
}
pred = e;
e = next;
}
} finally {
unlock();
}
return oldValue;
}
// 1.7相对1.6的两点改动:
// 1、多了个scanAndLock操作;2、会修改modCount
final boolean replace(K key, int hash, V oldValue, V newValue) {
if (!tryLock())
scanAndLock(key, hash);
boolean replaced = false;
try {
HashEntry<K,V> e;
for (e = entryForHash(this, hash); e != null; e = e.next) {
K k;
if ((k = e.key) == key || (e.hash == hash && key.equals(k))) {
if (oldValue.equals(e.value)) {
e.value = newValue;
++modCount; // 1.7的replace方法也会修改modCount了,1.6是不会的,能够更大地保证containValue这个方法
replaced = true;
}
break;
}
}
} finally {
unlock();
}
return replaced;
}
// 基本同replace(K key, int hash, V oldValue, V newValue)
final V replace(K key, int hash, V value) {
if (!tryLock())
scanAndLock(key, hash);
V oldValue = null;
try {
HashEntry<K,V> e;
for (e = entryForHash(this, hash); e != null; e = e.next) {
K k;
if ((k = e.key) == key || (e.hash == hash && key.equals(k))) {
oldValue = e.value;
e.value = value;
++modCount; // 1.7的replace方法也会修改modCount了,1.6是不会的,能够更大地保证containValue这个方法
break;
}
}
} finally {
unlock();
}
return oldValue;
}
// 基本没改变
final void clear() {
lock();
try {
HashEntry<K,V>[] tab = table;
for (int i = 0; i < tab.length ; i++)
setEntryAt(tab, i, null);
++modCount;
count = 0;
} finally {
unlock();
}
}
}

JDK1.8版本

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
//类似于jdk1.8的HashMap.node和jdk1.7的Concurrent.HashEntry
static class Node<K,V> implements Map.Entry<K,V> {
final int hash;
final K key;
volatile V val;
volatile Node<K,V> next;//跟jdk1.7版本一样
Node(int hash, K key, V val, Node<K,V> next) {
this.hash = hash;
this.key = key;
this.val = val;
this.next = next;
}
public final K getKey() { return key; }
public final V getValue() { return val; }
public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
public final String toString(){ return key + "=" + val; }
//不可以直接设置
public final V setValue(V value) {
throw new UnsupportedOperationException();
}
public final boolean equals(Object o) {
Object k, v, u; Map.Entry<?,?> e;
return ((o instanceof Map.Entry) &&
(k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
(v = e.getValue()) != null &&
(k == key || k.equals(key)) &&
(v == (u = val) || v.equals(u)));
}
/**
* Virtualized support for map.get(); overridden in subclasses.
*/
Node<K,V> find(int h, Object k) {
Node<K,V> e = this;
if (k != null) {
do {
K ek;
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
} while ((e = e.next) != null);
}
return null;
}
}
//在红黑树后才存在的节点,但是由TreeBin来代理执行
static final class TreeNode<K,V> extends Node<K,V> {
TreeNode<K,V> parent; // red-black tree links
TreeNode<K,V> left;
TreeNode<K,V> right;
TreeNode<K,V> prev; // needed to unlink next upon deletion
boolean red;
TreeNode(int hash, K key, V val, Node<K,V> next,
TreeNode<K,V> parent) {
super(hash, key, val, next);
this.parent = parent;
}
Node<K,V> find(int h, Object k) {
return findTreeNode(h, k, null);
}
/**
* Returns the TreeNode (or null if not found) for the given key
* starting at given root.
*/
final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
if (k != null) {
TreeNode<K,V> p = this;
do {
int ph, dir; K pk; TreeNode<K,V> q;
TreeNode<K,V> pl = p.left, pr = p.right;
if ((ph = p.hash) > h)
p = pl;
else if (ph < h)
p = pr;
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
return p;
else if (pl == null)
p = pr;
else if (pr == null)
p = pl;
else if ((kc != null ||
(kc = comparableClassFor(k)) != null) &&
(dir = compareComparables(kc, k, pk)) != 0)
p = (dir < 0) ? pl : pr;
else if ((q = pr.findTreeNode(h, k, kc)) != null)
return q;
else
p = pl;
} while (p != null);
}
return null;
}
}
//代理操作TreeNode的特殊节点,持有存储实际数据的红黑树的根节点
static final class TreeBin<K,V> extends Node<K,V> {
TreeNode<K,V> root;//红黑树结构的根节点
volatile TreeNode<K,V> first;
volatile Thread waiter; //最近一个设置为WAITER标识位的线程
volatile int lockState;
// values for lockState
//红黑树的读锁状态和写锁状态表现为:当有线程持有红黑树的读锁时,写线程可能会阻塞,由于查找很快,
//写线程阻塞时间也短; 当有线程持有红黑树的写锁时,读线程不会以红黑树的方式读取,而是简单的链表读取,
//这样读写可以并发进行,参考Node<K,V> find(int h, Object k)方法;
//001
static final int WRITER = 1; // set while holding write lock
//010
static final int WAITER = 2; // set when waiting for write lock
//100
static final int READER = 4; // increment value for setting read lock
//判断大小
static int tieBreakOrder(Object a, Object b) {
int d;
if (a == null || b == null ||
(d = a.getClass().getName().
compareTo(b.getClass().getName())) == 0)
d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
-1 : 1);
return d;
}
//构造函数,构造b为头结点的红黑树
TreeBin(TreeNode<K,V> b) {
super(TREEBIN, null, null, null);
this.first = b;
TreeNode<K,V> r = null;
for (TreeNode<K,V> x = b, next; x != null; x = next) {
next = (TreeNode<K,V>)x.next;
x.left = x.right = null;
if (r == null) {
x.parent = null;
x.red = false;
r = x;
}
else {
K k = x.key;
int h = x.hash;
Class<?> kc = null;
for (TreeNode<K,V> p = r;;) {
int dir, ph;
K pk = p.key;
if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0)
dir = tieBreakOrder(k, pk);
TreeNode<K,V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
x.parent = xp;
if (dir <= 0)
xp.left = x;
else
xp.right = x;
r = balanceInsertion(r, x);
break;
}
}
}
}
this.root = r;
assert checkInvariants(root);
}
//对根节点加写锁
private final void lockRoot() {
//WRITER = 1,当没有获取写锁时候,调用contendedLock
if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
contendedLock(); // offload to separate method
}
//释放写锁
private final void unlockRoot() {
lockState = 0;
}
//在对根节点加写锁时候,调用的方式,这个方法的目的是直到取得写锁才会返回。
private final void contendedLock() {
boolean waiting = false;
for (int s;;) {
//~WAITER是对WAITER进行二进制取反,当此时没有线程持有 读锁(不会有线程持有 写锁)时,这个if为真
if (((s = lockState) & ~WAITER) == 0) {
if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
if (waiting)
waiter = null;
return;
}
}
// 有线程持有 读锁(不会有线程持有 写锁),并且当前线程不是 WAITER 状态时,这个else if为真
else if ((s & WAITER) == 0) {
//尝试占据 WAITER 状态标识位
if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
waiting = true;
waiter = Thread.currentThread();
}
}
else if (waiting)
//unsafe类阻塞自己
LockSupport.park(this);
}
}
//查找
final Node<K,V> find(int h, Object k) {
if (k != null) {
for (Node<K,V> e = first; e != null; ) {
int s; K ek;
//这里当lockState = WRITER = 1(有线程正持有写锁)或者 lockState = WAITER = 2 (有线程等待获取写锁)会以链表方式查找,如果只有持有读锁线程时候,会通过CAS竞争去读,同时lockState +=READER;
if (((s = lockState) & (WAITER|WRITER)) != 0) {
if (e.hash == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
e = e.next;
}
else if (U.compareAndSwapInt(this, LOCKSTATE, s,
s + READER)) {
TreeNode<K,V> r, p;
try {
p = ((r = root) == null ? null :
r.findTreeNode(h, k, null));
} finally {
Thread w;
// U.getAndAddInt(this, LOCKSTATE, -READER)这个操作是在更新之后返回lockstate的旧值,
//不是返回新值,相当于先判断==,再执行减法
//前面的判断是LOCKSTATE是否正好包含一个读线程和一个写线程,然后减法后只剩下写线程了,也就是说当时最后一个读线程,同时有写线程因为 读锁 而阻塞,那么要通知它,告诉它可以尝试获取写锁了。
if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
(READER|WAITER) && (w = waiter) != null)
LockSupport.unpark(w);
}
return p;
}
}
}
return null;
}
//存在红黑树时候,添加元素
final TreeNode<K,V> putTreeVal(int h, K k, V v) {
Class<?> kc = null;
boolean searched = false;
for (TreeNode<K,V> p = root;;) {
int dir, ph; K pk;
if (p == null) {
first = root = new TreeNode<K,V>(h, k, v, null, null);
break;
}
else if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
return p;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0) {
if (!searched) {
TreeNode<K,V> q, ch;
searched = true;
if (((ch = p.left) != null &&
(q = ch.findTreeNode(h, k, kc)) != null) ||
((ch = p.right) != null &&
(q = ch.findTreeNode(h, k, kc)) != null))
return q;
}
dir = tieBreakOrder(k, pk);
}
TreeNode<K,V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
TreeNode<K,V> x, f = first;
first = x = new TreeNode<K,V>(h, k, v, f, xp);
if (f != null)
f.prev = x;
if (dir <= 0)
xp.left = x;
else
xp.right = x;
if (!xp.red)
x.red = true;
else {
lockRoot();
try {
root = balanceInsertion(root, x);
} finally {
unlockRoot();
}
}
break;
}
}
assert checkInvariants(root);
return null;
}
//移除元素
final boolean removeTreeNode(TreeNode<K,V> p) {
TreeNode<K,V> next = (TreeNode<K,V>)p.next;
TreeNode<K,V> pred = p.prev; // unlink traversal pointers
TreeNode<K,V> r, rl;
if (pred == null)
first = next;
else
pred.next = next;
if (next != null)
next.prev = pred;
if (first == null) {
root = null;
return true;
}
if ((r = root) == null || r.right == null || // too small
(rl = r.left) == null || rl.left == null)
return true;
//红黑树规模够,就从红黑树再删除,需要写锁
lockRoot();
try {
TreeNode<K,V> replacement;
TreeNode<K,V> pl = p.left;
TreeNode<K,V> pr = p.right;
if (pl != null && pr != null) {
TreeNode<K,V> s = pr, sl;
while ((sl = s.left) != null) // find successor
s = sl;
boolean c = s.red; s.red = p.red; p.red = c; // swap colors
TreeNode<K,V> sr = s.right;
TreeNode<K,V> pp = p.parent;
if (s == pr) { // p was s's direct parent
p.parent = s;
s.right = p;
}
else {
TreeNode<K,V> sp = s.parent;
if ((p.parent = sp) != null) {
if (s == sp.left)
sp.left = p;
else
sp.right = p;
}
if ((s.right = pr) != null)
pr.parent = s;
}
p.left = null;
if ((p.right = sr) != null)
sr.parent = p;
if ((s.left = pl) != null)
pl.parent = s;
if ((s.parent = pp) == null)
r = s;
else if (p == pp.left)
pp.left = s;
else
pp.right = s;
if (sr != null)
replacement = sr;
else
replacement = p;
}
else if (pl != null)
replacement = pl;
else if (pr != null)
replacement = pr;
else
replacement = p;
if (replacement != p) {
TreeNode<K,V> pp = replacement.parent = p.parent;
if (pp == null)
r = replacement;
else if (p == pp.left)
pp.left = replacement;
else
pp.right = replacement;
p.left = p.right = p.parent = null;
}
root = (p.red) ? r : balanceDeletion(r, replacement);
if (p == replacement) { // detach pointers
TreeNode<K,V> pp;
if ((pp = p.parent) != null) {
if (p == pp.left)
pp.left = null;
else if (p == pp.right)
pp.right = null;
p.parent = null;
}
}
} finally {
unlockRoot();
}
assert checkInvariants(root);
return false;
}
/* ------------------------------------------------------------ */
// Red-black tree methods, all adapted from CLR
static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
TreeNode<K,V> p) {
TreeNode<K,V> r, pp, rl;
if (p != null && (r = p.right) != null) {
if ((rl = p.right = r.left) != null)
rl.parent = p;
if ((pp = r.parent = p.parent) == null)
(root = r).red = false;
else if (pp.left == p)
pp.left = r;
else
pp.right = r;
r.left = p;
p.parent = r;
}
return root;
}
static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
TreeNode<K,V> p) {
TreeNode<K,V> l, pp, lr;
if (p != null && (l = p.left) != null) {
if ((lr = p.left = l.right) != null)
lr.parent = p;
if ((pp = l.parent = p.parent) == null)
(root = l).red = false;
else if (pp.right == p)
pp.right = l;
else
pp.left = l;
l.right = p;
p.parent = l;
}
return root;
}
static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
TreeNode<K,V> x) {
x.red = true;
for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
if ((xp = x.parent) == null) {
x.red = false;
return x;
}
else if (!xp.red || (xpp = xp.parent) == null)
return root;
if (xp == (xppl = xpp.left)) {
if ((xppr = xpp.right) != null && xppr.red) {
xppr.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
}
else {
if (x == xp.right) {
root = rotateLeft(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
root = rotateRight(root, xpp);
}
}
}
}
else {
if (xppl != null && xppl.red) {
xppl.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
}
else {
if (x == xp.left) {
root = rotateRight(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
root = rotateLeft(root, xpp);
}
}
}
}
}
}
static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
TreeNode<K,V> x) {
for (TreeNode<K,V> xp, xpl, xpr;;) {
if (x == null || x == root)
return root;
else if ((xp = x.parent) == null) {
x.red = false;
return x;
}
else if (x.red) {
x.red = false;
return root;
}
else if ((xpl = xp.left) == x) {
if ((xpr = xp.right) != null && xpr.red) {
xpr.red = false;
xp.red = true;
root = rotateLeft(root, xp);
xpr = (xp = x.parent) == null ? null : xp.right;
}
if (xpr == null)
x = xp;
else {
TreeNode<K,V> sl = xpr.left, sr = xpr.right;
if ((sr == null || !sr.red) &&
(sl == null || !sl.red)) {
xpr.red = true;
x = xp;
}
else {
if (sr == null || !sr.red) {
if (sl != null)
sl.red = false;
xpr.red = true;
root = rotateRight(root, xpr);
xpr = (xp = x.parent) == null ?
null : xp.right;
}
if (xpr != null) {
xpr.red = (xp == null) ? false : xp.red;
if ((sr = xpr.right) != null)
sr.red = false;
}
if (xp != null) {
xp.red = false;
root = rotateLeft(root, xp);
}
x = root;
}
}
}
else { // symmetric
if (xpl != null && xpl.red) {
xpl.red = false;
xp.red = true;
root = rotateRight(root, xp);
xpl = (xp = x.parent) == null ? null : xp.left;
}
if (xpl == null)
x = xp;
else {
TreeNode<K,V> sl = xpl.left, sr = xpl.right;
if ((sl == null || !sl.red) &&
(sr == null || !sr.red)) {
xpl.red = true;
x = xp;
}
else {
if (sl == null || !sl.red) {
if (sr != null)
sr.red = false;
xpl.red = true;
root = rotateLeft(root, xpl);
xpl = (xp = x.parent) == null ?
null : xp.left;
}
if (xpl != null) {
xpl.red = (xp == null) ? false : xp.red;
if ((sl = xpl.left) != null)
sl.red = false;
}
if (xp != null) {
xp.red = false;
root = rotateRight(root, xp);
}
x = root;
}
}
}
}
}
/**
* Recursive invariant check
*/
static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
tb = t.prev, tn = (TreeNode<K,V>)t.next;
if (tb != null && tb.next != t)
return false;
if (tn != null && tn.prev != t)
return false;
if (tp != null && t != tp.left && t != tp.right)
return false;
if (tl != null && (tl.parent != t || tl.hash > t.hash))
return false;
if (tr != null && (tr.parent != t || tr.hash < t.hash))
return false;
if (t.red && tl != null && tl.red && tr != null && tr.red)
return false;
if (tl != null && !checkInvariants(tl))
return false;
if (tr != null && !checkInvariants(tr))
return false;
return true;
}
private static final sun.misc.Unsafe U;
private static final long LOCKSTATE;
static {
try {
U = sun.misc.Unsafe.getUnsafe();
Class<?> k = TreeBin.class;
LOCKSTATE = U.objectFieldOffset
(k.getDeclaredField("lockState"));
} catch (Exception e) {
throw new Error(e);
}
}
}
//转发节点,是一种临时节点,如果旧数组的一个hash桶中全部的节点都迁移到新数组中,旧数组就在这个hash桶中放置一个ForwardingNode。读操作或者迭代读时碰到ForwardingNode时,将操作转发到扩容后的新的table数组上去执行,写操作碰见它时,则尝试帮助扩容。
static final class ForwardingNode<K,V> extends Node<K,V> {
final Node<K,V>[] nextTable;
ForwardingNode(Node<K,V>[] tab) {
super(MOVED, null, null, null);
this.nextTable = tab;
}
Node<K,V> find(int h, Object k) {
// loop to avoid arbitrarily deep recursion on forwarding nodes
outer: for (Node<K,V>[] tab = nextTable;;) {
Node<K,V> e; int n;
if (k == null || tab == null || (n = tab.length) == 0 ||
(e = tabAt(tab, (n - 1) & h)) == null)
return null;
for (;;) {
int eh; K ek;
if ((eh = e.hash) == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
if (eh < 0) {
if (e instanceof ForwardingNode) {
tab = ((ForwardingNode<K,V>)e).nextTable;
continue outer;
}
else
return e.find(h, k);
}
if ((e = e.next) == null)
return null;
}
}
}
}
//空节点,computeIfAbsent和compute这两个函数式api中才会使用
static final class ReservationNode<K,V> extends Node<K,V> {
ReservationNode() {
super(RESERVED, null, null, null);
}
Node<K,V> find(int h, Object k) {
return null;
}
}

常用方法

读方法,外面封装一层其实还是操作segment。

JDK1.6版本

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
public V get(Object key) {
int hash = hash(key.hashCode());
return segmentFor(hash).get(key, hash);
}
public boolean containsKey(Object key) {
int hash = hash(key.hashCode());
return segmentFor(hash).containsKey(key, hash);
}
private static int hash(int h) {
// Spread bits to regularize both segment and index locations,
// using variant of single-word Wang/Jenkins hash.
h += (h << 15) ^ 0xffffcd7d;
h ^= (h >>> 10);
h += (h << 3);
h ^= (h >>> 6);
h += (h << 2) + (h << 14);
return h ^ (h >>> 16);
}
//定位Segment的index时hash值的移位,默认时候sshift=4,segmentShift=32-4=28;
//segmentShift = 32 - sshift;
//segmentMask = ssize - 1 = 15
final Segment<K,V> segmentFor(int hash) {
return segments[(hash >>> segmentShift) & segmentMask];
}
// 不是Map接口的方法,为了兼容Hashtable,等价于containsValue
public boolean contains(Object value) {
return containsValue( value);
}
public boolean containsValue(Object value) {
if (value == null)
throw new NullPointerException();
final Segment<K,V>[] segments = this.segments;
int[] mc = new int[segments.length];
// 先不加锁执行RETRIES_BEFORE_LOCK = 2次
for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
int sum = 0;
int mcsum = 0;
for (int i = 0; i < segments.length; ++i) {
int c = segments[i].count; // 这个c没哪里用,意义不明
mcsum += mc[i] = segments[i].modCount; // 就是 mc[i] = segments[i].count; mssum += mc[i],临时保存一份modCount
if (segments[i].containsValue(value)) // 碰见contains直接return
return true;
}
boolean cleanSweep = true;
// mcsum是modCount的和,为0可以认为遍历开始时没有任何put完成过任何HashEntry,即方法开始执行时不包含任何HashEntry,可以认为(近似认为,几率比较大)此时也不包含
if (mcsum != 0) {
for (int i = 0; i < segments.length; ++i) {
int c = segments[i].count;
if (mc[i] != segments[i].modCount) { // modCount改变,说明有其他线程修改了Segment的结构,退出循环。会有replace的问题,前面说了
cleanSweep = false;
break;
}
}
}
if (cleanSweep)
return false;
}
// 如果连续两次都碰见modCount改变的情况,这时候一次性对全部Segment加锁,最大程度保证遍历时的一致性
// 因为是全部加锁后再遍历,遍历开始后没有线程可以修改任何Segment的结构,可以保证当前线程得到的是准确值
for (int i = 0; i < segments.length; ++i)
segments[i].lock();
boolean found = false;
try {
for (int i = 0; i < segments.length; ++i) {
if (segments[i].containsValue(value)) {
found = true;
break;
}
}
} finally {
for (int i = 0; i < segments.length; ++i)
segments[i].unlock();
}
return found;
}
//不加锁执行一次
public boolean isEmpty() {
final Segment<K,V>[] segments = this.segments;
int[] mc = new int[segments.length];
int mcsum = 0;
for (int i = 0; i < segments.length; ++i) {
if (segments[i].count != 0)
return false;
else
mcsum += mc[i] = segments[i].modCount;
}
if (mcsum != 0) {
for (int i = 0; i < segments.length; ++i) {
if (segments[i].count != 0 || mc[i] != segments[i].modCount)
return false;
}
}
return true;
}
public int size() {
final Segment<K,V>[] segments = this.segments;
long sum = 0;
long check = 0;
int[] mc = new int[segments.length];
// 不加锁执行两次,如果两次数据不一样,或者碰到modCount++被修改了,就全部加锁在执行一次
for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) {
check = 0;
sum = 0;
int mcsum = 0;
for (int i = 0; i < segments.length; ++i) {
sum += segments[i].count;
mcsum += mc[i] = segments[i].modCount;
}
if (mcsum != 0) {
for (int i = 0; i < segments.length; ++i) {
check += segments[i].count;
if (mc[i] != segments[i].modCount) {
check = -1; // force retry
break;
}
}
}
if (check == sum)
break;
}
if (check != sum) {
sum = 0;
for (int i = 0; i < segments.length; ++i)
segments[i].lock();
for (int i = 0; i < segments.length; ++i)
sum += segments[i].count;
for (int i = 0; i < segments.length; ++i)
segments[i].unlock();
}
if (sum > Integer.MAX_VALUE) // int溢出处理,因此返回值可能会是错误的。
// 并且因为兼容性的原因,这个还无法解决,只能新增一个方法,1.8的ConcurrentHashMap就是新增了一个返回long型的方法。
return Integer.MAX_VALUE;
else
return (int)sum;
}

写方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
public V put(K key, V value) {
if (value == null)
throw new NullPointerException();
int hash = hash(key.hashCode()); // 这里 null key 会抛出NPE
return segmentFor(hash).put(key, hash, value, false);
}
// 下面几个基本思路同put,都是代理给相应的Segment的对应方法进行操作
public V putIfAbsent(K key, V value);
public V remove(Object key);
public boolean remove(Object key, Object value);
public boolean replace(K key, V oldValue, V newValue);
public V replace(K key, V value);
// putAll和clear都是循环操作,没有全局加锁,在执行期间还是可以执行完成其他的写操作的,事务性比较差的方法,设计成不用全局锁是为了提高效率
public void putAll(Map<? extends K, ? extends V> m) {
for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
put(e.getKey(), e.getValue());
}
public void clear() {
for (int i = 0; i < segments.length; ++i)
segments[i].clear();
}

JDK1.7版本

读方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
// get方法整体实现思路跟1.6基本一样
// 1.7的使用了Unsafe.getObjectVolatile,它能为普通对象提供volatile读取功能,能够强化这里的get方法
// get方法的操作都比较简单,就都把操作集中在这里,省略了Segment.get,减少方法调用带来的开销,抽象性层次性也没有变差
public V get(Object key) {
Segment<K,V> s; // manually integrate access methods to reduce overhead 集中在这里手动访问减少方法调用开销
HashEntry<K,V>[] tab;
int h = hash(key);
long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&(tab = s.table) != null) {
for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile(tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
e != null; e = e.next) {
K k;
if ((k = e.key) == key || (e.hash == h && key.equals(k)))
return e.value;
}
}
return null;
}
private int hash(Object k) {
int h = hashSeed;
if ((0 != h) && (k instanceof String)) {
return sun.misc.Hashing.stringHash32((String) k);
}
h ^= k.hashCode();
// Spread bits to regularize both segment and index locations,
// using variant of single-word Wang/Jenkins hash.
h += (h << 15) ^ 0xffffcd7d;
h ^= (h >>> 10);
h += (h << 3);
h ^= (h >>> 6);
h += (h << 2) + (h << 14);
return h ^ (h >>> 16);
}
public boolean contains(Object value) {
return containsValue( value);
}
public boolean containsValue(Object value) {
// Same idea as size()
if (value == null)
throw new NullPointerException();
final Segment<K,V>[] segments = this.segments;
boolean found = false;
long last = 0;
int retries = -1;
try {
outer: for (;;) {
if (retries++ == RETRIES_BEFORE_LOCK) {
for (int j = 0; j < segments.length; ++j)
ensureSegment(j).lock(); // force creation
}
long hashSum = 0L;
int sum = 0;
for (int j = 0; j < segments.length; ++j) {
HashEntry<K,V>[] tab;
//segmentAt如下解释
Segment<K,V> seg = segmentAt(segments, j);
if (seg != null && (tab = seg.table) != null) {
for (int i = 0 ; i < tab.length; i++) {
HashEntry<K,V> e;
for (e = entryAt(tab, i); e != null; e = e.next) {
V v = e.value;
if (v != null && value.equals(v)) {
found = true;
break outer; // 这里就算是contains也会再执行一次,1.6如果第一次contains就直接return,不会执行第二次
}
}
}
sum += seg.modCount;
}
}
if (retries > 0 && sum == last)
break;
last = sum;
}
} finally {
if (retries > RETRIES_BEFORE_LOCK) {
for (int j = 0; j < segments.length; ++j)
segmentAt(segments, j).unlock();
}
}
return found;
}
// 使用Unsafe提供的volatile读取功能,通过下标求segments[j]
// segments是用final修饰的,构造方法保证它会在ConcurrentHashMap的实例被引用前初始化成正确的值,null的情况只在反序列化时才会出现
static final <K,V> Segment<K,V> segmentAt(Segment<K,V>[] ss, int j) {
long u = (j << SSHIFT) + SBASE; // 计算实际的字节偏移量
return ss == null ? null : (Segment<K,V>) UNSAFE.getObjectVolatile(ss, u);
}
// 通过下标定位到Segment中下标为 i 的hash桶的第一个节点,也就是链表的头结点,用 Unsafe 提供对数组元素的 // volatile 读取,还要处理下Segment为null的情况
static final <K,V> HashEntry<K,V> entryAt(HashEntry<K,V>[] tab, int i) {
return (tab == null) ? null : (HashEntry<K,V>) UNSAFE.getObjectVolatile(tab, ((long)i << TSHIFT) + TBASE);
}
// isEmpty方法,实现思路跟1.6的基本一样,利用modCount单调递增的性质偷了个懒,只进行sum(modCount)的前后比较,不用一个个单独地前后比较
public boolean isEmpty() {
long sum = 0L;
final Segment<K,V>[] segments = this.segments;
for (int j = 0; j < segments.length; ++j) {
Segment<K,V> seg = segmentAt(segments, j);
if (seg != null) {
if (seg.count != 0)
return false;
sum += seg.modCount;
}
}
if (sum != 0L) { // recheck unless no modifications
for (int j = 0; j < segments.length; ++j) {
Segment<K,V> seg = segmentAt(segments, j);
if (seg != null) {
if (seg.count != 0)
return false;
sum -= seg.modCount; // 1.6这里的一个个modCount对比,1.7改为总体对比一次,因为modCount的单调递增的,不会有count可能出现的 ABA 问题
}
}
if (sum != 0L)
return false;
}
return true;
}
// size方法,实现思路跟1.6的基本一样,也利用了modCount单调递增的性质偷了个懒
public int size() {
final Segment<K,V>[] segments = this.segments;
int size;
boolean overflow; // true if size overflows 32 bits
long sum; // sum of modCounts
long last = 0L; // previous sum
int retries = -1; // first iteration isn't retry
try {
for (;;) {
if (retries++ == RETRIES_BEFORE_LOCK) {
for (int j = 0; j < segments.length; ++j)
ensureSegment(j).lock(); // force creation
}
sum = 0L;
size = 0;
overflow = false;
for (int j = 0; j < segments.length; ++j) {
Segment<K,V> seg = segmentAt(segments, j);
if (seg != null) {
sum += seg.modCount;
int c = seg.count;
if (c < 0 || (size += c) < 0)
overflow = true;
}
}
if (sum == last)
break;
last = sum;
}
} finally {
if (retries > RETRIES_BEFORE_LOCK) {
for (int j = 0; j < segments.length; ++j)
segmentAt(segments, j).unlock();
}
}
return overflow ? Integer.MAX_VALUE : size;
}

写方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
// 1.7开始大部分集合类都是懒初始化,put这里处理下懒初始化,其余基本思路跟1.6的差不多,都是代理给相应的Segment的同名方法
public V put(K key, V value) {
Segment<K,V> s;
if (value == null)
throw new NullPointerException();
int hash = hash(key);
int j = (hash >>> segmentShift) & segmentMask;
if ((s = (Segment<K,V>)UNSAFE.getObject(segments, (j << SSHIFT) + SBASE)) == null) // nonvolatile; recheck in ensureSegment 非volatile方式读取,在ensureSegment中再检查一次
s = ensureSegment(j); // 处理Segment的初始化
return s.put(key, hash, value, false);
}
// 确保Segment被初始化
// 因为懒初始化的原因,只有segments[0]在构造方法中被初始化,其余的都是后面按需初始化,此方法就是做这个初始化的
// 使用 CAS 不加锁,同时也能保证每个Segment只被初始化一次
private Segment<K,V> ensureSegment(int k) {
final Segment<K,V>[] ss = this.segments;
long u = (k << SSHIFT) + SBASE; // raw offset 实际的字节偏移量
Segment<K,V> seg;
if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) {
Segment<K,V> proto = ss[0]; // use segment 0 as prototype
int cap = proto.table.length;
float lf = proto.loadFactor;
int threshold = (int)(cap * lf);
HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry[cap];
if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) { // recheck 再检查一次是否已经被初始化
Segment<K,V> s = new Segment<K,V>(lf, threshold, tab);
while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) {
if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s)) // 使用 CAS 确保只被初始化一次
break;
}
}
}
return seg;
}
// 同put
public V putIfAbsent(K key, V value);
// 跟1.6一样,都是循环put,不用全局锁,其他线程还是可以在这个方法执行期间成功进行写操作
public void putAll(Map<? extends K, ? extends V> m);
public void clear();
// 额外处理下Segment为null的情况,其余基本同1.6,代理给相应的Segement的同名方法
public V remove(Object key) {
int hash = hash(key);
Segment<K,V> s = segmentForHash(hash);
return s == null ? null : s.remove(key, hash, null); // 1.7使用懒初始化,会出现Segment为null的情况
}
// 同remove
public boolean remove(Object key, Object value);
// 学remove一样额外处理下Segment为null的情况,其余思路跟1.6差不多,都是代理给相应的Segement的同名方法
public boolean replace(K key, V oldValue, V newValue);
public V replace(K key, V value);

JDK1.8版本

读方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
public int size() {
long n = sumCount();
return ((n < 0L) ? 0 :
(n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
(int)n);
}
final long sumCount() {
CounterCell[] as = counterCells; CounterCell a;
long sum = baseCount;
if (as != null) {
for (int i = 0; i < as.length; ++i) {
if ((a = as[i]) != null)
sum += a.value;
}
}
return sum;
}
//
public boolean isEmpty() {
return sumCount() <= 0L;
}
public boolean containsKey(Object key) {
return get(key) != null;
}
//获取元素
public V get(Object key) {
Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
int h = spread(key.hashCode());
if ((tab = table) != null && (n = tab.length) > 0 && (e = tabAt(tab, (n - 1) & h)) != null) {
if ((eh = e.hash) == h) { // 特殊判断第一个节点
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
}
else if (eh < 0) //约定hash值小于0为特殊节点
// ForwardingNode会把find转发到nextTable上再去执行一次;
// TreeBin则根据自身读写锁情况,判断是用红黑树方式查找,还是用链表方式查找;
// ReservationNode本身只是为了synchronized有加锁对象而创建的空的占位节点,因此本身hash桶是没节点的,一定找不到,直接返回null)
return (p = e.find(h, key)) != null ? p.val : null;
while ((e = e.next) != null) { // 是普通节点,使用链表方式查找
if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
// hash扰动函数,跟1.8的HashMap的基本一样,& HASH_BITS用于把hash值转化为正数,负数hash是有特别的作用的
static final int spread(int h) {
return (h ^ (h >>> 16)) & HASH_BITS;
}
//
public boolean containsValue(Object value) {
if (value == null)
throw new NullPointerException();
Node<K,V>[] t;
if ((t = table) != null) {
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
for (Node<K,V> p; (p = it.advance()) != null; ) {
V v;
if ((v = p.val) == value || (v != null && value.equals(v)))
return true;
}
}
return false;
}

读方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
public V put(K key, V value) {
return putVal(key, value, false);
}
final V putVal(K key, V value, boolean onlyIfAbsent) {
if (key == null || value == null) throw new NullPointerException();
int hash = spread(key.hashCode());
int binCount = 0;
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
//处理初始化
tab = initTable();
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
if (casTabAt(tab, i, null,
new Node<K,V>(hash, key, value, null)))
break; // no lock when adding to empty bin
}
else if ((fh = f.hash) == MOVED) //发现转发节点,表明此时正在进行扩容,去帮助扩容
tab = helpTransfer(tab, f);
else {
V oldVal = null;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
if ((e = e.next) == null) {
pred.next = new Node<K,V>(hash, key,
value, null);
break;
}
}
}
else if (f instanceof TreeBin) {
Node<K,V> p;
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
addCount(1L, binCount);
return null;
}
private final Node<K,V>[] initTable() {
Node<K,V>[] tab; int sc;
while ((tab = table) == null || tab.length == 0) {
if ((sc = sizeCtl) < 0)
//当在初始化时候,其他线程来了之后需要让出cpu
Thread.yield(); // lost initialization race; just spin
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if ((tab = table) == null || tab.length == 0) {
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
table = tab = nt;
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
break;
}
}
return tab;
}
public void putAll(Map<? extends K, ? extends V> m) {
tryPresize(m.size());
for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
putVal(e.getKey(), e.getValue(), false);
}
public V remove(Object key) {
return replaceNode(key, null, null);
}
final V replaceNode(Object key, V value, Object cv) {
int hash = spread(key.hashCode());
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0 ||
(f = tabAt(tab, i = (n - 1) & hash)) == null)
break;
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
boolean validated = false;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
validated = true;
for (Node<K,V> e = f, pred = null;;) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
V ev = e.val;
if (cv == null || cv == ev ||
(ev != null && cv.equals(ev))) {
oldVal = ev;
if (value != null)
e.val = value;
else if (pred != null)
pred.next = e.next;
else
setTabAt(tab, i, e.next);
}
break;
}
pred = e;
if ((e = e.next) == null)
break;
}
}
else if (f instanceof TreeBin) {
validated = true;
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> r, p;
if ((r = t.root) != null &&
(p = r.findTreeNode(hash, key, null)) != null) {
V pv = p.val;
if (cv == null || cv == pv ||
(pv != null && cv.equals(pv))) {
oldVal = pv;
if (value != null)
p.val = value;
else if (t.removeTreeNode(p))
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
}
if (validated) {
if (oldVal != null) {
if (value == null)
addCount(-1L, -1);
return oldVal;
}
break;
}
}
}
return null;
}
public void clear() {
long delta = 0L; // negative number of deletions
int i = 0;
Node<K,V>[] tab = table;
while (tab != null && i < tab.length) {
int fh;
Node<K,V> f = tabAt(tab, i);
if (f == null)
++i;
else if ((fh = f.hash) == MOVED) {
tab = helpTransfer(tab, f);
i = 0; // restart
}
else {
synchronized (f) {
if (tabAt(tab, i) == f) {
Node<K,V> p = (fh >= 0 ? f :
(f instanceof TreeBin) ?
((TreeBin<K,V>)f).first : null);
while (p != null) {
--delta;
p = p.next;
}
setTabAt(tab, i++, null);
}
}
}
}
if (delta != 0L)
addCount(delta, -1);
}

参考:

https://blog.csdn.net/fenglibing/article/details/17119959

https://blog.csdn.net/u011392897/article/details/60466665

https://blog.csdn.net/u011392897/article/details/60469263

https://blog.csdn.net/u011392897/article/details/60479937

https://juejin.im/post/5a815743f265da4e7e10b8e8

https://blog.csdn.net/hitxueliang/article/details/24734861

Luckylau wechat
如果对您有价值,看官可以打赏的!