1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"runtime/internal/sys"
9	"unsafe"
10)
11
12func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
13	if raceenabled && h != nil {
14		callerpc := getcallerpc(unsafe.Pointer(&t))
15		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
16	}
17	if h == nil || h.count == 0 {
18		return unsafe.Pointer(&zeroVal[0])
19	}
20	if h.flags&hashWriting != 0 {
21		throw("concurrent map read and map write")
22	}
23	var b *bmap
24	if h.B == 0 {
25		// One-bucket table. No need to hash.
26		b = (*bmap)(h.buckets)
27	} else {
28		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
29		m := uintptr(1)<<h.B - 1
30		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
31		if c := h.oldbuckets; c != nil {
32			if !h.sameSizeGrow() {
33				// There used to be half as many buckets; mask down one more power of two.
34				m >>= 1
35			}
36			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
37			if !evacuated(oldb) {
38				b = oldb
39			}
40		}
41	}
42	for {
43		for i := uintptr(0); i < bucketCnt; i++ {
44			k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
45			if k != key {
46				continue
47			}
48			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
49			if x == empty {
50				continue
51			}
52			return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
53		}
54		b = b.overflow(t)
55		if b == nil {
56			return unsafe.Pointer(&zeroVal[0])
57		}
58	}
59}
60
61func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
62	if raceenabled && h != nil {
63		callerpc := getcallerpc(unsafe.Pointer(&t))
64		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
65	}
66	if h == nil || h.count == 0 {
67		return unsafe.Pointer(&zeroVal[0]), false
68	}
69	if h.flags&hashWriting != 0 {
70		throw("concurrent map read and map write")
71	}
72	var b *bmap
73	if h.B == 0 {
74		// One-bucket table. No need to hash.
75		b = (*bmap)(h.buckets)
76	} else {
77		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
78		m := uintptr(1)<<h.B - 1
79		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
80		if c := h.oldbuckets; c != nil {
81			if !h.sameSizeGrow() {
82				// There used to be half as many buckets; mask down one more power of two.
83				m >>= 1
84			}
85			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
86			if !evacuated(oldb) {
87				b = oldb
88			}
89		}
90	}
91	for {
92		for i := uintptr(0); i < bucketCnt; i++ {
93			k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
94			if k != key {
95				continue
96			}
97			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
98			if x == empty {
99				continue
100			}
101			return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
102		}
103		b = b.overflow(t)
104		if b == nil {
105			return unsafe.Pointer(&zeroVal[0]), false
106		}
107	}
108}
109
110func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
111	if raceenabled && h != nil {
112		callerpc := getcallerpc(unsafe.Pointer(&t))
113		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
114	}
115	if h == nil || h.count == 0 {
116		return unsafe.Pointer(&zeroVal[0])
117	}
118	if h.flags&hashWriting != 0 {
119		throw("concurrent map read and map write")
120	}
121	var b *bmap
122	if h.B == 0 {
123		// One-bucket table. No need to hash.
124		b = (*bmap)(h.buckets)
125	} else {
126		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
127		m := uintptr(1)<<h.B - 1
128		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
129		if c := h.oldbuckets; c != nil {
130			if !h.sameSizeGrow() {
131				// There used to be half as many buckets; mask down one more power of two.
132				m >>= 1
133			}
134			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
135			if !evacuated(oldb) {
136				b = oldb
137			}
138		}
139	}
140	for {
141		for i := uintptr(0); i < bucketCnt; i++ {
142			k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
143			if k != key {
144				continue
145			}
146			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
147			if x == empty {
148				continue
149			}
150			return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
151		}
152		b = b.overflow(t)
153		if b == nil {
154			return unsafe.Pointer(&zeroVal[0])
155		}
156	}
157}
158
159func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
160	if raceenabled && h != nil {
161		callerpc := getcallerpc(unsafe.Pointer(&t))
162		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
163	}
164	if h == nil || h.count == 0 {
165		return unsafe.Pointer(&zeroVal[0]), false
166	}
167	if h.flags&hashWriting != 0 {
168		throw("concurrent map read and map write")
169	}
170	var b *bmap
171	if h.B == 0 {
172		// One-bucket table. No need to hash.
173		b = (*bmap)(h.buckets)
174	} else {
175		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
176		m := uintptr(1)<<h.B - 1
177		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
178		if c := h.oldbuckets; c != nil {
179			if !h.sameSizeGrow() {
180				// There used to be half as many buckets; mask down one more power of two.
181				m >>= 1
182			}
183			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
184			if !evacuated(oldb) {
185				b = oldb
186			}
187		}
188	}
189	for {
190		for i := uintptr(0); i < bucketCnt; i++ {
191			k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
192			if k != key {
193				continue
194			}
195			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
196			if x == empty {
197				continue
198			}
199			return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
200		}
201		b = b.overflow(t)
202		if b == nil {
203			return unsafe.Pointer(&zeroVal[0]), false
204		}
205	}
206}
207
208func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
209	if raceenabled && h != nil {
210		callerpc := getcallerpc(unsafe.Pointer(&t))
211		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
212	}
213	if h == nil || h.count == 0 {
214		return unsafe.Pointer(&zeroVal[0])
215	}
216	if h.flags&hashWriting != 0 {
217		throw("concurrent map read and map write")
218	}
219	key := stringStructOf(&ky)
220	if h.B == 0 {
221		// One-bucket table.
222		b := (*bmap)(h.buckets)
223		if key.len < 32 {
224			// short key, doing lots of comparisons is ok
225			for i := uintptr(0); i < bucketCnt; i++ {
226				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
227				if x == empty {
228					continue
229				}
230				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
231				if k.len != key.len {
232					continue
233				}
234				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
235					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
236				}
237			}
238			return unsafe.Pointer(&zeroVal[0])
239		}
240		// long key, try not to do more comparisons than necessary
241		keymaybe := uintptr(bucketCnt)
242		for i := uintptr(0); i < bucketCnt; i++ {
243			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
244			if x == empty {
245				continue
246			}
247			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
248			if k.len != key.len {
249				continue
250			}
251			if k.str == key.str {
252				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
253			}
254			// check first 4 bytes
255			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
256				continue
257			}
258			// check last 4 bytes
259			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
260				continue
261			}
262			if keymaybe != bucketCnt {
263				// Two keys are potential matches. Use hash to distinguish them.
264				goto dohash
265			}
266			keymaybe = i
267		}
268		if keymaybe != bucketCnt {
269			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
270			if memequal(k.str, key.str, uintptr(key.len)) {
271				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
272			}
273		}
274		return unsafe.Pointer(&zeroVal[0])
275	}
276dohash:
277	hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
278	m := uintptr(1)<<h.B - 1
279	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
280	if c := h.oldbuckets; c != nil {
281		if !h.sameSizeGrow() {
282			// There used to be half as many buckets; mask down one more power of two.
283			m >>= 1
284		}
285		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
286		if !evacuated(oldb) {
287			b = oldb
288		}
289	}
290	top := uint8(hash >> (sys.PtrSize*8 - 8))
291	if top < minTopHash {
292		top += minTopHash
293	}
294	for {
295		for i := uintptr(0); i < bucketCnt; i++ {
296			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
297			if x != top {
298				continue
299			}
300			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
301			if k.len != key.len {
302				continue
303			}
304			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
305				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
306			}
307		}
308		b = b.overflow(t)
309		if b == nil {
310			return unsafe.Pointer(&zeroVal[0])
311		}
312	}
313}
314
315func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
316	if raceenabled && h != nil {
317		callerpc := getcallerpc(unsafe.Pointer(&t))
318		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
319	}
320	if h == nil || h.count == 0 {
321		return unsafe.Pointer(&zeroVal[0]), false
322	}
323	if h.flags&hashWriting != 0 {
324		throw("concurrent map read and map write")
325	}
326	key := stringStructOf(&ky)
327	if h.B == 0 {
328		// One-bucket table.
329		b := (*bmap)(h.buckets)
330		if key.len < 32 {
331			// short key, doing lots of comparisons is ok
332			for i := uintptr(0); i < bucketCnt; i++ {
333				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
334				if x == empty {
335					continue
336				}
337				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
338				if k.len != key.len {
339					continue
340				}
341				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
342					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
343				}
344			}
345			return unsafe.Pointer(&zeroVal[0]), false
346		}
347		// long key, try not to do more comparisons than necessary
348		keymaybe := uintptr(bucketCnt)
349		for i := uintptr(0); i < bucketCnt; i++ {
350			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
351			if x == empty {
352				continue
353			}
354			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
355			if k.len != key.len {
356				continue
357			}
358			if k.str == key.str {
359				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
360			}
361			// check first 4 bytes
362			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
363				continue
364			}
365			// check last 4 bytes
366			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
367				continue
368			}
369			if keymaybe != bucketCnt {
370				// Two keys are potential matches. Use hash to distinguish them.
371				goto dohash
372			}
373			keymaybe = i
374		}
375		if keymaybe != bucketCnt {
376			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
377			if memequal(k.str, key.str, uintptr(key.len)) {
378				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
379			}
380		}
381		return unsafe.Pointer(&zeroVal[0]), false
382	}
383dohash:
384	hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
385	m := uintptr(1)<<h.B - 1
386	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
387	if c := h.oldbuckets; c != nil {
388		if !h.sameSizeGrow() {
389			// There used to be half as many buckets; mask down one more power of two.
390			m >>= 1
391		}
392		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
393		if !evacuated(oldb) {
394			b = oldb
395		}
396	}
397	top := uint8(hash >> (sys.PtrSize*8 - 8))
398	if top < minTopHash {
399		top += minTopHash
400	}
401	for {
402		for i := uintptr(0); i < bucketCnt; i++ {
403			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
404			if x != top {
405				continue
406			}
407			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
408			if k.len != key.len {
409				continue
410			}
411			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
412				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
413			}
414		}
415		b = b.overflow(t)
416		if b == nil {
417			return unsafe.Pointer(&zeroVal[0]), false
418		}
419	}
420}
421
422func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
423	if h == nil {
424		panic(plainError("assignment to entry in nil map"))
425	}
426	if raceenabled {
427		callerpc := getcallerpc(unsafe.Pointer(&t))
428		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
429	}
430	if h.flags&hashWriting != 0 {
431		throw("concurrent map writes")
432	}
433	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
434
435	// Set hashWriting after calling alg.hash for consistency with mapassign.
436	h.flags |= hashWriting
437
438	if h.buckets == nil {
439		h.buckets = newarray(t.bucket, 1)
440	}
441
442again:
443	bucket := hash & (uintptr(1)<<h.B - 1)
444	if h.growing() {
445		growWork(t, h, bucket)
446	}
447	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
448	top := uint8(hash >> (sys.PtrSize*8 - 8))
449	if top < minTopHash {
450		top += minTopHash
451	}
452
453	var inserti *uint8
454	var insertk unsafe.Pointer
455	var val unsafe.Pointer
456	for {
457		for i := uintptr(0); i < bucketCnt; i++ {
458			if b.tophash[i] != top {
459				if b.tophash[i] == empty && inserti == nil {
460					inserti = &b.tophash[i]
461					insertk = add(unsafe.Pointer(b), dataOffset+i*4)
462					val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
463				}
464				continue
465			}
466			k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
467			if k != key {
468				continue
469			}
470			val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
471			goto done
472		}
473		ovf := b.overflow(t)
474		if ovf == nil {
475			break
476		}
477		b = ovf
478	}
479
480	// Did not find mapping for key. Allocate new cell & add entry.
481
482	// If we hit the max load factor or we have too many overflow buckets,
483	// and we're not already in the middle of growing, start growing.
484	if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
485		hashGrow(t, h)
486		goto again // Growing the table invalidates everything, so try again
487	}
488
489	if inserti == nil {
490		// all current buckets are full, allocate a new one.
491		newb := h.newoverflow(t, b)
492		inserti = &newb.tophash[0]
493		insertk = add(unsafe.Pointer(newb), dataOffset)
494		val = add(insertk, bucketCnt*4)
495	}
496
497	// store new key/value at insert position
498	typedmemmove(t.key, insertk, unsafe.Pointer(&key))
499	*inserti = top
500	h.count++
501
502done:
503	if h.flags&hashWriting == 0 {
504		throw("concurrent map writes")
505	}
506	h.flags &^= hashWriting
507	return val
508}
509
510func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
511	if h == nil {
512		panic(plainError("assignment to entry in nil map"))
513	}
514	if raceenabled {
515		callerpc := getcallerpc(unsafe.Pointer(&t))
516		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
517	}
518	if h.flags&hashWriting != 0 {
519		throw("concurrent map writes")
520	}
521	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
522
523	// Set hashWriting after calling alg.hash for consistency with mapassign.
524	h.flags |= hashWriting
525
526	if h.buckets == nil {
527		h.buckets = newarray(t.bucket, 1)
528	}
529
530again:
531	bucket := hash & (uintptr(1)<<h.B - 1)
532	if h.growing() {
533		growWork(t, h, bucket)
534	}
535	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
536	top := uint8(hash >> (sys.PtrSize*8 - 8))
537	if top < minTopHash {
538		top += minTopHash
539	}
540
541	var inserti *uint8
542	var insertk unsafe.Pointer
543	var val unsafe.Pointer
544	for {
545		for i := uintptr(0); i < bucketCnt; i++ {
546			if b.tophash[i] != top {
547				if b.tophash[i] == empty && inserti == nil {
548					inserti = &b.tophash[i]
549					insertk = add(unsafe.Pointer(b), dataOffset+i*4)
550					val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
551				}
552				continue
553			}
554			k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
555			if k != key {
556				continue
557			}
558			val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
559			goto done
560		}
561		ovf := b.overflow(t)
562		if ovf == nil {
563			break
564		}
565		b = ovf
566	}
567
568	// Did not find mapping for key. Allocate new cell & add entry.
569
570	// If we hit the max load factor or we have too many overflow buckets,
571	// and we're not already in the middle of growing, start growing.
572	if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
573		hashGrow(t, h)
574		goto again // Growing the table invalidates everything, so try again
575	}
576
577	if inserti == nil {
578		// all current buckets are full, allocate a new one.
579		newb := h.newoverflow(t, b)
580		inserti = &newb.tophash[0]
581		insertk = add(unsafe.Pointer(newb), dataOffset)
582		val = add(insertk, bucketCnt*4)
583	}
584
585	// store new key/value at insert position
586	typedmemmove(t.key, insertk, unsafe.Pointer(&key))
587	*inserti = top
588	h.count++
589
590done:
591	if h.flags&hashWriting == 0 {
592		throw("concurrent map writes")
593	}
594	h.flags &^= hashWriting
595	return val
596}
597
598func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
599	if h == nil {
600		panic(plainError("assignment to entry in nil map"))
601	}
602	if raceenabled {
603		callerpc := getcallerpc(unsafe.Pointer(&t))
604		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
605	}
606	if h.flags&hashWriting != 0 {
607		throw("concurrent map writes")
608	}
609	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
610
611	// Set hashWriting after calling alg.hash for consistency with mapassign.
612	h.flags |= hashWriting
613
614	if h.buckets == nil {
615		h.buckets = newarray(t.bucket, 1)
616	}
617
618again:
619	bucket := hash & (uintptr(1)<<h.B - 1)
620	if h.growing() {
621		growWork(t, h, bucket)
622	}
623	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
624	top := uint8(hash >> (sys.PtrSize*8 - 8))
625	if top < minTopHash {
626		top += minTopHash
627	}
628
629	var inserti *uint8
630	var insertk unsafe.Pointer
631	var val unsafe.Pointer
632	for {
633		for i := uintptr(0); i < bucketCnt; i++ {
634			if b.tophash[i] != top {
635				if b.tophash[i] == empty && inserti == nil {
636					inserti = &b.tophash[i]
637					insertk = add(unsafe.Pointer(b), dataOffset+i*8)
638					val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
639				}
640				continue
641			}
642			k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
643			if k != key {
644				continue
645			}
646			val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
647			goto done
648		}
649		ovf := b.overflow(t)
650		if ovf == nil {
651			break
652		}
653		b = ovf
654	}
655
656	// Did not find mapping for key. Allocate new cell & add entry.
657
658	// If we hit the max load factor or we have too many overflow buckets,
659	// and we're not already in the middle of growing, start growing.
660	if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
661		hashGrow(t, h)
662		goto again // Growing the table invalidates everything, so try again
663	}
664
665	if inserti == nil {
666		// all current buckets are full, allocate a new one.
667		newb := h.newoverflow(t, b)
668		inserti = &newb.tophash[0]
669		insertk = add(unsafe.Pointer(newb), dataOffset)
670		val = add(insertk, bucketCnt*8)
671	}
672
673	// store new key/value at insert position
674	typedmemmove(t.key, insertk, unsafe.Pointer(&key))
675	*inserti = top
676	h.count++
677
678done:
679	if h.flags&hashWriting == 0 {
680		throw("concurrent map writes")
681	}
682	h.flags &^= hashWriting
683	return val
684}
685
686func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
687	if h == nil {
688		panic(plainError("assignment to entry in nil map"))
689	}
690	if raceenabled {
691		callerpc := getcallerpc(unsafe.Pointer(&t))
692		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
693	}
694	if h.flags&hashWriting != 0 {
695		throw("concurrent map writes")
696	}
697	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
698
699	// Set hashWriting after calling alg.hash for consistency with mapassign.
700	h.flags |= hashWriting
701
702	if h.buckets == nil {
703		h.buckets = newarray(t.bucket, 1)
704	}
705
706again:
707	bucket := hash & (uintptr(1)<<h.B - 1)
708	if h.growing() {
709		growWork(t, h, bucket)
710	}
711	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
712	top := uint8(hash >> (sys.PtrSize*8 - 8))
713	if top < minTopHash {
714		top += minTopHash
715	}
716
717	var inserti *uint8
718	var insertk unsafe.Pointer
719	var val unsafe.Pointer
720	for {
721		for i := uintptr(0); i < bucketCnt; i++ {
722			if b.tophash[i] != top {
723				if b.tophash[i] == empty && inserti == nil {
724					inserti = &b.tophash[i]
725					insertk = add(unsafe.Pointer(b), dataOffset+i*8)
726					val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
727				}
728				continue
729			}
730			k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
731			if k != key {
732				continue
733			}
734			val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
735			goto done
736		}
737		ovf := b.overflow(t)
738		if ovf == nil {
739			break
740		}
741		b = ovf
742	}
743
744	// Did not find mapping for key. Allocate new cell & add entry.
745
746	// If we hit the max load factor or we have too many overflow buckets,
747	// and we're not already in the middle of growing, start growing.
748	if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
749		hashGrow(t, h)
750		goto again // Growing the table invalidates everything, so try again
751	}
752
753	if inserti == nil {
754		// all current buckets are full, allocate a new one.
755		newb := h.newoverflow(t, b)
756		inserti = &newb.tophash[0]
757		insertk = add(unsafe.Pointer(newb), dataOffset)
758		val = add(insertk, bucketCnt*8)
759	}
760
761	// store new key/value at insert position
762	typedmemmove(t.key, insertk, unsafe.Pointer(&key))
763	*inserti = top
764	h.count++
765
766done:
767	if h.flags&hashWriting == 0 {
768		throw("concurrent map writes")
769	}
770	h.flags &^= hashWriting
771	return val
772}
773
774func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
775	if h == nil {
776		panic(plainError("assignment to entry in nil map"))
777	}
778	if raceenabled {
779		callerpc := getcallerpc(unsafe.Pointer(&t))
780		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
781	}
782	if h.flags&hashWriting != 0 {
783		throw("concurrent map writes")
784	}
785	key := stringStructOf(&ky)
786	hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
787
788	// Set hashWriting after calling alg.hash for consistency with mapassign.
789	h.flags |= hashWriting
790
791	if h.buckets == nil {
792		h.buckets = newarray(t.bucket, 1)
793	}
794
795again:
796	bucket := hash & (uintptr(1)<<h.B - 1)
797	if h.growing() {
798		growWork(t, h, bucket)
799	}
800	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
801	top := uint8(hash >> (sys.PtrSize*8 - 8))
802	if top < minTopHash {
803		top += minTopHash
804	}
805
806	var inserti *uint8
807	var insertk unsafe.Pointer
808	var val unsafe.Pointer
809	for {
810		for i := uintptr(0); i < bucketCnt; i++ {
811			if b.tophash[i] != top {
812				if b.tophash[i] == empty && inserti == nil {
813					inserti = &b.tophash[i]
814					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
815					val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
816				}
817				continue
818			}
819			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
820			if k.len != key.len {
821				continue
822			}
823			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
824				continue
825			}
826			// already have a mapping for key. Update it.
827			val = add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
828			goto done
829		}
830		ovf := b.overflow(t)
831		if ovf == nil {
832			break
833		}
834		b = ovf
835	}
836
837	// Did not find mapping for key. Allocate new cell & add entry.
838
839	// If we hit the max load factor or we have too many overflow buckets,
840	// and we're not already in the middle of growing, start growing.
841	if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
842		hashGrow(t, h)
843		goto again // Growing the table invalidates everything, so try again
844	}
845
846	if inserti == nil {
847		// all current buckets are full, allocate a new one.
848		newb := h.newoverflow(t, b)
849		inserti = &newb.tophash[0]
850		insertk = add(unsafe.Pointer(newb), dataOffset)
851		val = add(insertk, bucketCnt*2*sys.PtrSize)
852	}
853
854	// store new key/value at insert position
855	*((*stringStruct)(insertk)) = *key
856	*inserti = top
857	h.count++
858
859done:
860	if h.flags&hashWriting == 0 {
861		throw("concurrent map writes")
862	}
863	h.flags &^= hashWriting
864	return val
865}
866
867func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
868	if raceenabled && h != nil {
869		callerpc := getcallerpc(unsafe.Pointer(&t))
870		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32))
871	}
872	if h == nil || h.count == 0 {
873		return
874	}
875	if h.flags&hashWriting != 0 {
876		throw("concurrent map writes")
877	}
878
879	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
880
881	// Set hashWriting after calling alg.hash for consistency with mapdelete
882	h.flags |= hashWriting
883
884	bucket := hash & (uintptr(1)<<h.B - 1)
885	if h.growing() {
886		growWork(t, h, bucket)
887	}
888	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
889	top := uint8(hash >> (sys.PtrSize*8 - 8))
890	if top < minTopHash {
891		top += minTopHash
892	}
893	for {
894		for i := uintptr(0); i < bucketCnt; i++ {
895			if b.tophash[i] != top {
896				continue
897			}
898			k := (*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))
899			if key != *k {
900				continue
901			}
902			typedmemclr(t.key, unsafe.Pointer(k))
903			v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*4 + i*uintptr(t.valuesize))
904			typedmemclr(t.elem, v)
905			b.tophash[i] = empty
906			h.count--
907			goto done
908		}
909		b = b.overflow(t)
910		if b == nil {
911			goto done
912		}
913	}
914
915done:
916	if h.flags&hashWriting == 0 {
917		throw("concurrent map writes")
918	}
919	h.flags &^= hashWriting
920}
921
922func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
923	if raceenabled && h != nil {
924		callerpc := getcallerpc(unsafe.Pointer(&t))
925		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
926	}
927	if h == nil || h.count == 0 {
928		return
929	}
930	if h.flags&hashWriting != 0 {
931		throw("concurrent map writes")
932	}
933
934	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
935
936	// Set hashWriting after calling alg.hash for consistency with mapdelete
937	h.flags |= hashWriting
938
939	bucket := hash & (uintptr(1)<<h.B - 1)
940	if h.growing() {
941		growWork(t, h, bucket)
942	}
943	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
944	top := uint8(hash >> (sys.PtrSize*8 - 8))
945	if top < minTopHash {
946		top += minTopHash
947	}
948	for {
949		for i := uintptr(0); i < bucketCnt; i++ {
950			if b.tophash[i] != top {
951				continue
952			}
953			k := (*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))
954			if key != *k {
955				continue
956			}
957			typedmemclr(t.key, unsafe.Pointer(k))
958			v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*8 + i*uintptr(t.valuesize))
959			typedmemclr(t.elem, v)
960			b.tophash[i] = empty
961			h.count--
962			goto done
963		}
964		b = b.overflow(t)
965		if b == nil {
966			goto done
967		}
968	}
969
970done:
971	if h.flags&hashWriting == 0 {
972		throw("concurrent map writes")
973	}
974	h.flags &^= hashWriting
975}
976
977func mapdelete_faststr(t *maptype, h *hmap, ky string) {
978	if raceenabled && h != nil {
979		callerpc := getcallerpc(unsafe.Pointer(&t))
980		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
981	}
982	if h == nil || h.count == 0 {
983		return
984	}
985	if h.flags&hashWriting != 0 {
986		throw("concurrent map writes")
987	}
988
989	key := stringStructOf(&ky)
990	hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
991
992	// Set hashWriting after calling alg.hash for consistency with mapdelete
993	h.flags |= hashWriting
994
995	bucket := hash & (uintptr(1)<<h.B - 1)
996	if h.growing() {
997		growWork(t, h, bucket)
998	}
999	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
1000	top := uint8(hash >> (sys.PtrSize*8 - 8))
1001	if top < minTopHash {
1002		top += minTopHash
1003	}
1004	for {
1005		for i := uintptr(0); i < bucketCnt; i++ {
1006			if b.tophash[i] != top {
1007				continue
1008			}
1009			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
1010			if k.len != key.len {
1011				continue
1012			}
1013			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
1014				continue
1015			}
1016			typedmemclr(t.key, unsafe.Pointer(k))
1017			v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*2*sys.PtrSize + i*uintptr(t.valuesize))
1018			typedmemclr(t.elem, v)
1019			b.tophash[i] = empty
1020			h.count--
1021			goto done
1022		}
1023		b = b.overflow(t)
1024		if b == nil {
1025			goto done
1026		}
1027	}
1028
1029done:
1030	if h.flags&hashWriting == 0 {
1031		throw("concurrent map writes")
1032	}
1033	h.flags &^= hashWriting
1034}
1035