2. ConcurrenHashMap的结构是什么样子的? 3.底层的实现(以put和get为例)在多线程中,针对已有的HashMap和HashTable来看的话,HashMap是一种线程不安全的,而HashTable是线程安全的。我们不使用HashTable主要是因为HashTable将所有的方法都加上了锁,以此来保证在多线程中操作HashTable是线程安全的。但是因为所有的方法都加上了synchronized导致使用HashTable相对来说会比较慢,比较浪费资源。因此ConcurrenHashMap出现了,解决了这种问题。
提醒:在HashTable中,key-value都不能为null;而在HashMap中,key-value值都可以为null。
1.我们可以看到声明的变量
static final int DEFAULT_INITIAL_CAPACITY = 16;//默认数组大小为16 static final float DEFAULT_LOAD_FACTOR = 0.75f;//加载因子为0.75 static final int DEFAULT_CONCURRENCY_LEVEL = 16;//默认的隔离等级 static final int MAXIMUM_CAPACITY = 1 << 30;//最大的桶的数量 static final int MIN_SEGMENT_TABLE_CAPACITY = 2;//最小的HashEntity大小 static final int MAX_SEGMENTS = 1 << 16;//最大的Segments数量 static final int RETRIES_BEFORE_LOCK = 2;//重试次数 final int segmentMask;//用于定位段,大小等于segments数组的大小减 1,是不可变的 final int segmentShift;//用于定位段,大小等于32(hash值的位数)减去对segments的大小取以2为底的对数值,是不可变的 final Segment[] segments;//ConcurrentHashMap的底层结构是一个Segment数组
2.然后看到他的HashEntity在内部静态代码块内(跟HashMap的Entity差不多)
static final class HashEntry{ final int hash;//hash值 final K key;//存入的key值 volatile V value;//存入的Value值 volatile HashEntry next;//相当于链表的下一个节点值 //初始化 HashEntry(int hash, K key, V value, HashEntry next) { this.hash = hash; this.key = key; this.value = value; this.next = next; } .... }
3.我们看他的初始化
public ConcurrentHashMap(int initialCapacity,
float loadFactor, int concurrencyLevel) {
//初始化时进行传入值的判断
if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
//这里是判断他的隔离级别是否大于最大值
if (concurrencyLevel > MAX_SEGMENTS)
concurrencyLevel = MAX_SEGMENTS;
// Find power-of-two sizes best matching arguments
int sshift = 0;// 大小为 lg(ssize)
int ssize = 1;//段的数目,segments数组的大小
while (ssize < concurrencyLevel) {
++sshift;
ssize <<= 1;
}
this.segmentShift = 32 - sshift;//用于定位segment的偏移量(segment的数组下标)
this.segmentMask = ssize - 1;//用于计算出HashEntity的数组下标
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
int c = initialCapacity / ssize;//计算出当前segment的下面有多少个HashEntity,属于(2的幂次方)
if (c * ssize < initialCapacity)
++c;
int cap = MIN_SEGMENT_TABLE_CAPACITY;
while (cap < c)//判断计算出的数据大小,判断是否比默认的还小,如果比默认值小,就使用默认值,否则就计算出最小的(2的幂次方)
cap <<= 1;
// 创建segment【0】
Segment s0 =
new Segment(loadFactor, (int)(cap * loadFactor),
(HashEntry[])new HashEntry[cap]);
Segment[] ss = (Segment[])new Segment[ssize];
UNSAFE.putOrderedObject(ss, Sbase, s0); // 给s0进行赋值
this.segments = ss;
}
3.1Put方法的实现
public V put(K key, V value) {
Segment s;
//传入的Value值不能为null
if (value == null)
throw new NullPointerException();
//进行hash值运算
int hash = hash(key);
//通过偏移量来计算出对应的下标
int j = (hash >>> segmentShift) & segmentMask;
//判断当前的数组下标是否为空,为空就把记录当前位置
if ((s = (Segment)UNSAFE.getObject // nonvolatile; recheck
(segments, (j << SSHIFT) + Sbase)) == null) // in ensureSegment
s = ensureSegment(j);
return s.put(key, hash, value, false);
}
//在这个方法中多次进行判断主要是为了在多线程当中减少错误的产生(个人理解) private SegmentensureSegment(int k) { final Segment [] ss = this.segments; long u = (k << SSHIFT) + Sbase; // raw offset Segment seg; //计算出对应的偏移量是否存在值 if ((seg = (Segment )UNSAFE.getObjectVolatile(ss, u)) == null) { //初始化数组大小 Segment proto = ss[0]; // use segment 0 as prototype int cap = proto.table.length; float lf = proto.loadFactor; int threshold = (int)(cap * lf); HashEntry [] tab = (HashEntry [])new HashEntry[cap]; //再次判断是否为空 if ((seg = (Segment )UNSAFE.getObjectVolatile(ss, u)) == null) { // recheck Segment s = new Segment (lf, threshold, tab); while ((seg = (Segment )UNSAFE.getObjectVolatile(ss, u)) == null) { //这里主要是为了在多线程中并发产生时,通过CAS来进行判断,相同才进行值的更替 if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s)) break; } } } return seg; }
计算出对应的下标就可以进行put方法
final V put(K key, int hash, V value, boolean onlyIfAbsent) {
//尝试获取锁
HashEntry node = tryLock() ? null :
scanAndLockForPut(key, hash, value);
V oldValue;
try {
//类似Hashmap的新增
HashEntry[] tab = table;
int index = (tab.length - 1) & hash;
HashEntry first = entryAt(tab, index);
for (HashEntry e = first;;) {
//找到相同的key值
if (e != null) {
K k;
if ((k = e.key) == key ||
(e.hash == hash && key.equals(k))) {
oldValue = e.value;
if (!onlyIfAbsent) {
e.value = value;
++modCount;
}
break;
}
e = e.next;
}
else {
//进行头插法
if (node != null)
node.setNext(first);
else
node = new HashEntry(hash, key, value, first);
int c = count + 1;
//判断是否需要进行扩容
if (c > threshold && tab.length < MAXIMUM_CAPACITY)
rehash(node);
else
//使用volatile write设置给定表的第i个元素
setEntryAt(tab, index, node);
++modCount;
count = c;
oldValue = null;
break;
}
}
} finally {
unlock();
}
return oldValue;
}
我们看尝试获取锁的方法
private HashEntryscanAndLockForPut(K key, int hash, V value) { HashEntry first = entryForHash(this, hash); HashEntry e = first; HashEntry node = null; int retries = -1; // negative while locating node while (!tryLock()) { HashEntry f; // to recheck first below if (retries < 0) { if (e == null) { if (node == null) // speculatively create node node = new HashEntry (hash, key, value, null); retries = 0; } else if (key.equals(e.key)) retries = 0; else e = e.next; } //如果尝试的次数大于最大尝试次数,就直接加上锁,并返回 else if (++retries > MAX_SCAN_RETRIES) { lock(); break; } else if ((retries & 1) == 0 && (f = entryForHash(this, hash)) != first) { e = first = f; // re-traverse if entry changed retries = -1; } } return node; }
我们看他的扩容方法
private void rehash(HashEntry3.2get方法node) { //获取老的HashEntity的大小 HashEntry [] oldTable = table; int oldCapacity = oldTable.length; //将数组大小进行二倍扩容 int newCapacity = oldCapacity << 1; threshold = (int)(newCapacity * loadFactor); //创建一个新的数组大小 HashEntry [] newTable = (HashEntry []) new HashEntry[newCapacity]; //主要是为了计算出偏移量 int sizeMask = newCapacity - 1; //循环遍历老数组上的所有值 for (int i = 0; i < oldCapacity ; i++) { HashEntry e = oldTable[i]; if (e != null) { HashEntry next = e.next; int idx = e.hash & sizeMask; //如果当前值没有下一个数据,直接进行转移 if (next == null) // Single node on list newTable[idx] = e; else { // Reuse consecutive sequence at same slot //第一个for循环主要是为了记录在链表中最后一串相同的数组下标,直接进行移植到新的数组,可以加快扩容的速度 HashEntry lastRun = e; int lastIdx = idx; for (HashEntry last = next; last != null; last = last.next) { int k = last.hash & sizeMask; if (k != lastIdx) { lastIdx = k; lastRun = last; } } newTable[lastIdx] = lastRun; // 类似于HashMap的计算出下标进行转移 for (HashEntry p = e; p != lastRun; p = p.next) { V v = p.value; int h = p.hash; int k = h & sizeMask; HashEntry n = newTable[k]; newTable[k] = new HashEntry (h, p.key, v, n); } } } } //新的值加入进来,直接转移到新的扩容数组中,主要是因为在concurrentHashMap中,如果新加入的值正好大于等于原来数组的阈值,就先把原来数组中的值进行转移,转移过后再把新的值加入到新的数组中,也算是提高效率。 int nodeIndex = node.hash & sizeMask; // add the new node node.setNext(newTable[nodeIndex]); newTable[nodeIndex] = node; table = newTable; }
get方法相对来说就比较简单,它实现的原理是先计算出Segment的下标,在计算出HashEntity的偏移量,从而获取到对应的Value值。这里不使用加锁的方式主要是因为用Volatile变量协调读写线程间的内存可见性。
public V get(Object key) {
Segment s; // manually integrate access methods to reduce overhead
HashEntry[] tab;
//获取key的偏移量
int h = hash(key);
//获取HashEntity的偏移量
long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + Sbase;
if ((s = (Segment)UNSAFE.getObjectVolatile(segments, u)) != null &&
(tab = s.table) != null) {
for (HashEntry e = (HashEntry) UNSAFE.getObjectVolatile
(tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + Tbase);
e != null; e = e.next) {
K k;
//如果key值相同就进行替换
if ((k = e.key) == key || (e.hash == h && key.equals(k)))
return e.value;
}
}
return null;
}



