1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"runtime/internal/sys"
9	"unsafe"
10)
11
12func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
13	if raceenabled && h != nil {
14		callerpc := getcallerpc()
15		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
16	}
17	if h == nil || h.count == 0 {
18		return unsafe.Pointer(&zeroVal[0])
19	}
20	if h.flags&hashWriting != 0 {
21		throw("concurrent map read and map write")
22	}
23	var b *bmap
24	if h.B == 0 {
25		// One-bucket table. No need to hash.
26		b = (*bmap)(h.buckets)
27	} else {
28		hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
29		m := bucketMask(h.B)
30		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
31		if c := h.oldbuckets; c != nil {
32			if !h.sameSizeGrow() {
33				// There used to be half as many buckets; mask down one more power of two.
34				m >>= 1
35			}
36			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
37			if !evacuated(oldb) {
38				b = oldb
39			}
40		}
41	}
42	for ; b != nil; b = b.overflow(t) {
43		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
44			if *(*uint32)(k) == key && b.tophash[i] != empty {
45				return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
46			}
47		}
48	}
49	return unsafe.Pointer(&zeroVal[0])
50}
51
52func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
53	if raceenabled && h != nil {
54		callerpc := getcallerpc()
55		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
56	}
57	if h == nil || h.count == 0 {
58		return unsafe.Pointer(&zeroVal[0]), false
59	}
60	if h.flags&hashWriting != 0 {
61		throw("concurrent map read and map write")
62	}
63	var b *bmap
64	if h.B == 0 {
65		// One-bucket table. No need to hash.
66		b = (*bmap)(h.buckets)
67	} else {
68		hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
69		m := bucketMask(h.B)
70		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
71		if c := h.oldbuckets; c != nil {
72			if !h.sameSizeGrow() {
73				// There used to be half as many buckets; mask down one more power of two.
74				m >>= 1
75			}
76			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
77			if !evacuated(oldb) {
78				b = oldb
79			}
80		}
81	}
82	for ; b != nil; b = b.overflow(t) {
83		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
84			if *(*uint32)(k) == key && b.tophash[i] != empty {
85				return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
86			}
87		}
88	}
89	return unsafe.Pointer(&zeroVal[0]), false
90}
91
92func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
93	if raceenabled && h != nil {
94		callerpc := getcallerpc()
95		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
96	}
97	if h == nil || h.count == 0 {
98		return unsafe.Pointer(&zeroVal[0])
99	}
100	if h.flags&hashWriting != 0 {
101		throw("concurrent map read and map write")
102	}
103	var b *bmap
104	if h.B == 0 {
105		// One-bucket table. No need to hash.
106		b = (*bmap)(h.buckets)
107	} else {
108		hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
109		m := bucketMask(h.B)
110		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
111		if c := h.oldbuckets; c != nil {
112			if !h.sameSizeGrow() {
113				// There used to be half as many buckets; mask down one more power of two.
114				m >>= 1
115			}
116			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
117			if !evacuated(oldb) {
118				b = oldb
119			}
120		}
121	}
122	for ; b != nil; b = b.overflow(t) {
123		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
124			if *(*uint64)(k) == key && b.tophash[i] != empty {
125				return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
126			}
127		}
128	}
129	return unsafe.Pointer(&zeroVal[0])
130}
131
132func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
133	if raceenabled && h != nil {
134		callerpc := getcallerpc()
135		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
136	}
137	if h == nil || h.count == 0 {
138		return unsafe.Pointer(&zeroVal[0]), false
139	}
140	if h.flags&hashWriting != 0 {
141		throw("concurrent map read and map write")
142	}
143	var b *bmap
144	if h.B == 0 {
145		// One-bucket table. No need to hash.
146		b = (*bmap)(h.buckets)
147	} else {
148		hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
149		m := bucketMask(h.B)
150		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
151		if c := h.oldbuckets; c != nil {
152			if !h.sameSizeGrow() {
153				// There used to be half as many buckets; mask down one more power of two.
154				m >>= 1
155			}
156			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
157			if !evacuated(oldb) {
158				b = oldb
159			}
160		}
161	}
162	for ; b != nil; b = b.overflow(t) {
163		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
164			if *(*uint64)(k) == key && b.tophash[i] != empty {
165				return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
166			}
167		}
168	}
169	return unsafe.Pointer(&zeroVal[0]), false
170}
171
172func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
173	if raceenabled && h != nil {
174		callerpc := getcallerpc()
175		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
176	}
177	if h == nil || h.count == 0 {
178		return unsafe.Pointer(&zeroVal[0])
179	}
180	if h.flags&hashWriting != 0 {
181		throw("concurrent map read and map write")
182	}
183	key := stringStructOf(&ky)
184	if h.B == 0 {
185		// One-bucket table.
186		b := (*bmap)(h.buckets)
187		if key.len < 32 {
188			// short key, doing lots of comparisons is ok
189			for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
190				k := (*stringStruct)(kptr)
191				if k.len != key.len || b.tophash[i] == empty {
192					continue
193				}
194				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
195					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
196				}
197			}
198			return unsafe.Pointer(&zeroVal[0])
199		}
200		// long key, try not to do more comparisons than necessary
201		keymaybe := uintptr(bucketCnt)
202		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
203			k := (*stringStruct)(kptr)
204			if k.len != key.len || b.tophash[i] == empty {
205				continue
206			}
207			if k.str == key.str {
208				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
209			}
210			// check first 4 bytes
211			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
212				continue
213			}
214			// check last 4 bytes
215			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
216				continue
217			}
218			if keymaybe != bucketCnt {
219				// Two keys are potential matches. Use hash to distinguish them.
220				goto dohash
221			}
222			keymaybe = i
223		}
224		if keymaybe != bucketCnt {
225			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
226			if memequal(k.str, key.str, uintptr(key.len)) {
227				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
228			}
229		}
230		return unsafe.Pointer(&zeroVal[0])
231	}
232dohash:
233	hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
234	m := bucketMask(h.B)
235	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
236	if c := h.oldbuckets; c != nil {
237		if !h.sameSizeGrow() {
238			// There used to be half as many buckets; mask down one more power of two.
239			m >>= 1
240		}
241		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
242		if !evacuated(oldb) {
243			b = oldb
244		}
245	}
246	top := tophash(hash)
247	for ; b != nil; b = b.overflow(t) {
248		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
249			k := (*stringStruct)(kptr)
250			if k.len != key.len || b.tophash[i] != top {
251				continue
252			}
253			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
254				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
255			}
256		}
257	}
258	return unsafe.Pointer(&zeroVal[0])
259}
260
261func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
262	if raceenabled && h != nil {
263		callerpc := getcallerpc()
264		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
265	}
266	if h == nil || h.count == 0 {
267		return unsafe.Pointer(&zeroVal[0]), false
268	}
269	if h.flags&hashWriting != 0 {
270		throw("concurrent map read and map write")
271	}
272	key := stringStructOf(&ky)
273	if h.B == 0 {
274		// One-bucket table.
275		b := (*bmap)(h.buckets)
276		if key.len < 32 {
277			// short key, doing lots of comparisons is ok
278			for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
279				k := (*stringStruct)(kptr)
280				if k.len != key.len || b.tophash[i] == empty {
281					continue
282				}
283				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
284					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
285				}
286			}
287			return unsafe.Pointer(&zeroVal[0]), false
288		}
289		// long key, try not to do more comparisons than necessary
290		keymaybe := uintptr(bucketCnt)
291		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
292			k := (*stringStruct)(kptr)
293			if k.len != key.len || b.tophash[i] == empty {
294				continue
295			}
296			if k.str == key.str {
297				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
298			}
299			// check first 4 bytes
300			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
301				continue
302			}
303			// check last 4 bytes
304			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
305				continue
306			}
307			if keymaybe != bucketCnt {
308				// Two keys are potential matches. Use hash to distinguish them.
309				goto dohash
310			}
311			keymaybe = i
312		}
313		if keymaybe != bucketCnt {
314			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
315			if memequal(k.str, key.str, uintptr(key.len)) {
316				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
317			}
318		}
319		return unsafe.Pointer(&zeroVal[0]), false
320	}
321dohash:
322	hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
323	m := bucketMask(h.B)
324	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
325	if c := h.oldbuckets; c != nil {
326		if !h.sameSizeGrow() {
327			// There used to be half as many buckets; mask down one more power of two.
328			m >>= 1
329		}
330		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
331		if !evacuated(oldb) {
332			b = oldb
333		}
334	}
335	top := tophash(hash)
336	for ; b != nil; b = b.overflow(t) {
337		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
338			k := (*stringStruct)(kptr)
339			if k.len != key.len || b.tophash[i] != top {
340				continue
341			}
342			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
343				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
344			}
345		}
346	}
347	return unsafe.Pointer(&zeroVal[0]), false
348}
349
350func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
351	if h == nil {
352		panic(plainError("assignment to entry in nil map"))
353	}
354	if raceenabled {
355		callerpc := getcallerpc()
356		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
357	}
358	if h.flags&hashWriting != 0 {
359		throw("concurrent map writes")
360	}
361	hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
362
363	// Set hashWriting after calling alg.hash for consistency with mapassign.
364	h.flags |= hashWriting
365
366	if h.buckets == nil {
367		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
368	}
369
370again:
371	bucket := hash & bucketMask(h.B)
372	if h.growing() {
373		growWork_fast32(t, h, bucket)
374	}
375	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
376
377	var insertb *bmap
378	var inserti uintptr
379	var insertk unsafe.Pointer
380
381	for {
382		for i := uintptr(0); i < bucketCnt; i++ {
383			if b.tophash[i] == empty {
384				if insertb == nil {
385					inserti = i
386					insertb = b
387				}
388				continue
389			}
390			k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
391			if k != key {
392				continue
393			}
394			inserti = i
395			insertb = b
396			goto done
397		}
398		ovf := b.overflow(t)
399		if ovf == nil {
400			break
401		}
402		b = ovf
403	}
404
405	// Did not find mapping for key. Allocate new cell & add entry.
406
407	// If we hit the max load factor or we have too many overflow buckets,
408	// and we're not already in the middle of growing, start growing.
409	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
410		hashGrow(t, h)
411		goto again // Growing the table invalidates everything, so try again
412	}
413
414	if insertb == nil {
415		// all current buckets are full, allocate a new one.
416		insertb = h.newoverflow(t, b)
417		inserti = 0 // not necessary, but avoids needlessly spilling inserti
418	}
419	insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
420
421	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
422	// store new key at insert position
423	*(*uint32)(insertk) = key
424
425	h.count++
426
427done:
428	val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.valuesize))
429	if h.flags&hashWriting == 0 {
430		throw("concurrent map writes")
431	}
432	h.flags &^= hashWriting
433	return val
434}
435
436func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
437	if h == nil {
438		panic(plainError("assignment to entry in nil map"))
439	}
440	if raceenabled {
441		callerpc := getcallerpc()
442		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
443	}
444	if h.flags&hashWriting != 0 {
445		throw("concurrent map writes")
446	}
447	hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
448
449	// Set hashWriting after calling alg.hash for consistency with mapassign.
450	h.flags |= hashWriting
451
452	if h.buckets == nil {
453		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
454	}
455
456again:
457	bucket := hash & bucketMask(h.B)
458	if h.growing() {
459		growWork_fast32(t, h, bucket)
460	}
461	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
462
463	var insertb *bmap
464	var inserti uintptr
465	var insertk unsafe.Pointer
466
467	for {
468		for i := uintptr(0); i < bucketCnt; i++ {
469			if b.tophash[i] == empty {
470				if insertb == nil {
471					inserti = i
472					insertb = b
473				}
474				continue
475			}
476			k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
477			if k != key {
478				continue
479			}
480			inserti = i
481			insertb = b
482			goto done
483		}
484		ovf := b.overflow(t)
485		if ovf == nil {
486			break
487		}
488		b = ovf
489	}
490
491	// Did not find mapping for key. Allocate new cell & add entry.
492
493	// If we hit the max load factor or we have too many overflow buckets,
494	// and we're not already in the middle of growing, start growing.
495	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
496		hashGrow(t, h)
497		goto again // Growing the table invalidates everything, so try again
498	}
499
500	if insertb == nil {
501		// all current buckets are full, allocate a new one.
502		insertb = h.newoverflow(t, b)
503		inserti = 0 // not necessary, but avoids needlessly spilling inserti
504	}
505	insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
506
507	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
508	// store new key at insert position
509	*(*unsafe.Pointer)(insertk) = key
510
511	h.count++
512
513done:
514	val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.valuesize))
515	if h.flags&hashWriting == 0 {
516		throw("concurrent map writes")
517	}
518	h.flags &^= hashWriting
519	return val
520}
521
522func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
523	if h == nil {
524		panic(plainError("assignment to entry in nil map"))
525	}
526	if raceenabled {
527		callerpc := getcallerpc()
528		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
529	}
530	if h.flags&hashWriting != 0 {
531		throw("concurrent map writes")
532	}
533	hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
534
535	// Set hashWriting after calling alg.hash for consistency with mapassign.
536	h.flags |= hashWriting
537
538	if h.buckets == nil {
539		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
540	}
541
542again:
543	bucket := hash & bucketMask(h.B)
544	if h.growing() {
545		growWork_fast64(t, h, bucket)
546	}
547	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
548
549	var insertb *bmap
550	var inserti uintptr
551	var insertk unsafe.Pointer
552
553	for {
554		for i := uintptr(0); i < bucketCnt; i++ {
555			if b.tophash[i] == empty {
556				if insertb == nil {
557					insertb = b
558					inserti = i
559				}
560				continue
561			}
562			k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
563			if k != key {
564				continue
565			}
566			insertb = b
567			inserti = i
568			goto done
569		}
570		ovf := b.overflow(t)
571		if ovf == nil {
572			break
573		}
574		b = ovf
575	}
576
577	// Did not find mapping for key. Allocate new cell & add entry.
578
579	// If we hit the max load factor or we have too many overflow buckets,
580	// and we're not already in the middle of growing, start growing.
581	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
582		hashGrow(t, h)
583		goto again // Growing the table invalidates everything, so try again
584	}
585
586	if insertb == nil {
587		// all current buckets are full, allocate a new one.
588		insertb = h.newoverflow(t, b)
589		inserti = 0 // not necessary, but avoids needlessly spilling inserti
590	}
591	insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
592
593	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
594	// store new key at insert position
595	*(*uint64)(insertk) = key
596
597	h.count++
598
599done:
600	val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.valuesize))
601	if h.flags&hashWriting == 0 {
602		throw("concurrent map writes")
603	}
604	h.flags &^= hashWriting
605	return val
606}
607
608func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
609	if h == nil {
610		panic(plainError("assignment to entry in nil map"))
611	}
612	if raceenabled {
613		callerpc := getcallerpc()
614		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
615	}
616	if h.flags&hashWriting != 0 {
617		throw("concurrent map writes")
618	}
619	hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
620
621	// Set hashWriting after calling alg.hash for consistency with mapassign.
622	h.flags |= hashWriting
623
624	if h.buckets == nil {
625		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
626	}
627
628again:
629	bucket := hash & bucketMask(h.B)
630	if h.growing() {
631		growWork_fast64(t, h, bucket)
632	}
633	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
634
635	var insertb *bmap
636	var inserti uintptr
637	var insertk unsafe.Pointer
638
639	for {
640		for i := uintptr(0); i < bucketCnt; i++ {
641			if b.tophash[i] == empty {
642				if insertb == nil {
643					insertb = b
644					inserti = i
645				}
646				continue
647			}
648			k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
649			if k != key {
650				continue
651			}
652			insertb = b
653			inserti = i
654			goto done
655		}
656		ovf := b.overflow(t)
657		if ovf == nil {
658			break
659		}
660		b = ovf
661	}
662
663	// Did not find mapping for key. Allocate new cell & add entry.
664
665	// If we hit the max load factor or we have too many overflow buckets,
666	// and we're not already in the middle of growing, start growing.
667	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
668		hashGrow(t, h)
669		goto again // Growing the table invalidates everything, so try again
670	}
671
672	if insertb == nil {
673		// all current buckets are full, allocate a new one.
674		insertb = h.newoverflow(t, b)
675		inserti = 0 // not necessary, but avoids needlessly spilling inserti
676	}
677	insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
678
679	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
680	// store new key at insert position
681	*(*unsafe.Pointer)(insertk) = key
682
683	h.count++
684
685done:
686	val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.valuesize))
687	if h.flags&hashWriting == 0 {
688		throw("concurrent map writes")
689	}
690	h.flags &^= hashWriting
691	return val
692}
693
694func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
695	if h == nil {
696		panic(plainError("assignment to entry in nil map"))
697	}
698	if raceenabled {
699		callerpc := getcallerpc()
700		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
701	}
702	if h.flags&hashWriting != 0 {
703		throw("concurrent map writes")
704	}
705	key := stringStructOf(&s)
706	hash := t.key.hashfn(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
707
708	// Set hashWriting after calling alg.hash for consistency with mapassign.
709	h.flags |= hashWriting
710
711	if h.buckets == nil {
712		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
713	}
714
715again:
716	bucket := hash & bucketMask(h.B)
717	if h.growing() {
718		growWork_faststr(t, h, bucket)
719	}
720	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
721	top := tophash(hash)
722
723	var insertb *bmap
724	var inserti uintptr
725	var insertk unsafe.Pointer
726
727	for {
728		for i := uintptr(0); i < bucketCnt; i++ {
729			if b.tophash[i] != top {
730				if b.tophash[i] == empty && insertb == nil {
731					insertb = b
732					inserti = i
733				}
734				continue
735			}
736			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
737			if k.len != key.len {
738				continue
739			}
740			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
741				continue
742			}
743			// already have a mapping for key. Update it.
744			inserti = i
745			insertb = b
746			goto done
747		}
748		ovf := b.overflow(t)
749		if ovf == nil {
750			break
751		}
752		b = ovf
753	}
754
755	// Did not find mapping for key. Allocate new cell & add entry.
756
757	// If we hit the max load factor or we have too many overflow buckets,
758	// and we're not already in the middle of growing, start growing.
759	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
760		hashGrow(t, h)
761		goto again // Growing the table invalidates everything, so try again
762	}
763
764	if insertb == nil {
765		// all current buckets are full, allocate a new one.
766		insertb = h.newoverflow(t, b)
767		inserti = 0 // not necessary, but avoids needlessly spilling inserti
768	}
769	insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
770
771	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*sys.PtrSize)
772	// store new key at insert position
773	*((*stringStruct)(insertk)) = *key
774	h.count++
775
776done:
777	val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.valuesize))
778	if h.flags&hashWriting == 0 {
779		throw("concurrent map writes")
780	}
781	h.flags &^= hashWriting
782	return val
783}
784
785func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
786	if raceenabled && h != nil {
787		callerpc := getcallerpc()
788		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32))
789	}
790	if h == nil || h.count == 0 {
791		return
792	}
793	if h.flags&hashWriting != 0 {
794		throw("concurrent map writes")
795	}
796
797	hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
798
799	// Set hashWriting after calling alg.hash for consistency with mapdelete
800	h.flags |= hashWriting
801
802	bucket := hash & bucketMask(h.B)
803	if h.growing() {
804		growWork_fast32(t, h, bucket)
805	}
806	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
807search:
808	for ; b != nil; b = b.overflow(t) {
809		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
810			if key != *(*uint32)(k) || b.tophash[i] == empty {
811				continue
812			}
813			// Only clear key if there are pointers in it.
814			if t.key.kind&kindNoPointers == 0 {
815				memclrHasPointers(k, t.key.size)
816			}
817			// Only clear value if there are pointers in it.
818			if t.elem.kind&kindNoPointers == 0 {
819				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
820				memclrHasPointers(v, t.elem.size)
821			}
822			b.tophash[i] = empty
823			h.count--
824			break search
825		}
826	}
827
828	if h.flags&hashWriting == 0 {
829		throw("concurrent map writes")
830	}
831	h.flags &^= hashWriting
832}
833
834func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
835	if raceenabled && h != nil {
836		callerpc := getcallerpc()
837		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
838	}
839	if h == nil || h.count == 0 {
840		return
841	}
842	if h.flags&hashWriting != 0 {
843		throw("concurrent map writes")
844	}
845
846	hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
847
848	// Set hashWriting after calling alg.hash for consistency with mapdelete
849	h.flags |= hashWriting
850
851	bucket := hash & bucketMask(h.B)
852	if h.growing() {
853		growWork_fast64(t, h, bucket)
854	}
855	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
856search:
857	for ; b != nil; b = b.overflow(t) {
858		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
859			if key != *(*uint64)(k) || b.tophash[i] == empty {
860				continue
861			}
862			// Only clear key if there are pointers in it.
863			if t.key.kind&kindNoPointers == 0 {
864				memclrHasPointers(k, t.key.size)
865			}
866			// Only clear value if there are pointers in it.
867			if t.elem.kind&kindNoPointers == 0 {
868				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
869				memclrHasPointers(v, t.elem.size)
870			}
871			b.tophash[i] = empty
872			h.count--
873			break search
874		}
875	}
876
877	if h.flags&hashWriting == 0 {
878		throw("concurrent map writes")
879	}
880	h.flags &^= hashWriting
881}
882
883func mapdelete_faststr(t *maptype, h *hmap, ky string) {
884	if raceenabled && h != nil {
885		callerpc := getcallerpc()
886		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
887	}
888	if h == nil || h.count == 0 {
889		return
890	}
891	if h.flags&hashWriting != 0 {
892		throw("concurrent map writes")
893	}
894
895	key := stringStructOf(&ky)
896	hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
897
898	// Set hashWriting after calling alg.hash for consistency with mapdelete
899	h.flags |= hashWriting
900
901	bucket := hash & bucketMask(h.B)
902	if h.growing() {
903		growWork_faststr(t, h, bucket)
904	}
905	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
906	top := tophash(hash)
907search:
908	for ; b != nil; b = b.overflow(t) {
909		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
910			k := (*stringStruct)(kptr)
911			if k.len != key.len || b.tophash[i] != top {
912				continue
913			}
914			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
915				continue
916			}
917			// Clear key's pointer.
918			k.str = nil
919			// Only clear value if there are pointers in it.
920			if t.elem.kind&kindNoPointers == 0 {
921				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
922				memclrHasPointers(v, t.elem.size)
923			}
924			b.tophash[i] = empty
925			h.count--
926			break search
927		}
928	}
929
930	if h.flags&hashWriting == 0 {
931		throw("concurrent map writes")
932	}
933	h.flags &^= hashWriting
934}
935
936func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
937	// make sure we evacuate the oldbucket corresponding
938	// to the bucket we're about to use
939	evacuate_fast32(t, h, bucket&h.oldbucketmask())
940
941	// evacuate one more oldbucket to make progress on growing
942	if h.growing() {
943		evacuate_fast32(t, h, h.nevacuate)
944	}
945}
946
947func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
948	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
949	newbit := h.noldbuckets()
950	if !evacuated(b) {
951		// TODO: reuse overflow buckets instead of using new ones, if there
952		// is no iterator using the old buckets.  (If !oldIterator.)
953
954		// xy contains the x and y (low and high) evacuation destinations.
955		var xy [2]evacDst
956		x := &xy[0]
957		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
958		x.k = add(unsafe.Pointer(x.b), dataOffset)
959		x.v = add(x.k, bucketCnt*4)
960
961		if !h.sameSizeGrow() {
962			// Only calculate y pointers if we're growing bigger.
963			// Otherwise GC can see bad pointers.
964			y := &xy[1]
965			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
966			y.k = add(unsafe.Pointer(y.b), dataOffset)
967			y.v = add(y.k, bucketCnt*4)
968		}
969
970		for ; b != nil; b = b.overflow(t) {
971			k := add(unsafe.Pointer(b), dataOffset)
972			v := add(k, bucketCnt*4)
973			for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 4), add(v, uintptr(t.valuesize)) {
974				top := b.tophash[i]
975				if top == empty {
976					b.tophash[i] = evacuatedEmpty
977					continue
978				}
979				if top < minTopHash {
980					throw("bad map state")
981				}
982				var useY uint8
983				if !h.sameSizeGrow() {
984					// Compute hash to make our evacuation decision (whether we need
985					// to send this key/value to bucket x or bucket y).
986					hash := t.key.hashfn(k, uintptr(h.hash0))
987					if hash&newbit != 0 {
988						useY = 1
989					}
990				}
991
992				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
993				dst := &xy[useY]                 // evacuation destination
994
995				if dst.i == bucketCnt {
996					dst.b = h.newoverflow(t, dst.b)
997					dst.i = 0
998					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
999					dst.v = add(dst.k, bucketCnt*4)
1000				}
1001				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
1002
1003				// Copy key.
1004				if sys.PtrSize == 4 && t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
1005					writebarrierptr((*uintptr)(dst.k), *(*uintptr)(k))
1006				} else {
1007					*(*uint32)(dst.k) = *(*uint32)(k)
1008				}
1009
1010				typedmemmove(t.elem, dst.v, v)
1011				dst.i++
1012				// These updates might push these pointers past the end of the
1013				// key or value arrays.  That's ok, as we have the overflow pointer
1014				// at the end of the bucket to protect against pointing past the
1015				// end of the bucket.
1016				dst.k = add(dst.k, 4)
1017				dst.v = add(dst.v, uintptr(t.valuesize))
1018			}
1019		}
1020		// Unlink the overflow buckets & clear key/value to help GC.
1021		if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
1022			b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
1023			// Preserve b.tophash because the evacuation
1024			// state is maintained there.
1025			ptr := add(b, dataOffset)
1026			n := uintptr(t.bucketsize) - dataOffset
1027			memclrHasPointers(ptr, n)
1028		}
1029	}
1030
1031	if oldbucket == h.nevacuate {
1032		advanceEvacuationMark(h, t, newbit)
1033	}
1034}
1035
1036func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
1037	// make sure we evacuate the oldbucket corresponding
1038	// to the bucket we're about to use
1039	evacuate_fast64(t, h, bucket&h.oldbucketmask())
1040
1041	// evacuate one more oldbucket to make progress on growing
1042	if h.growing() {
1043		evacuate_fast64(t, h, h.nevacuate)
1044	}
1045}
1046
1047func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
1048	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
1049	newbit := h.noldbuckets()
1050	if !evacuated(b) {
1051		// TODO: reuse overflow buckets instead of using new ones, if there
1052		// is no iterator using the old buckets.  (If !oldIterator.)
1053
1054		// xy contains the x and y (low and high) evacuation destinations.
1055		var xy [2]evacDst
1056		x := &xy[0]
1057		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
1058		x.k = add(unsafe.Pointer(x.b), dataOffset)
1059		x.v = add(x.k, bucketCnt*8)
1060
1061		if !h.sameSizeGrow() {
1062			// Only calculate y pointers if we're growing bigger.
1063			// Otherwise GC can see bad pointers.
1064			y := &xy[1]
1065			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
1066			y.k = add(unsafe.Pointer(y.b), dataOffset)
1067			y.v = add(y.k, bucketCnt*8)
1068		}
1069
1070		for ; b != nil; b = b.overflow(t) {
1071			k := add(unsafe.Pointer(b), dataOffset)
1072			v := add(k, bucketCnt*8)
1073			for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 8), add(v, uintptr(t.valuesize)) {
1074				top := b.tophash[i]
1075				if top == empty {
1076					b.tophash[i] = evacuatedEmpty
1077					continue
1078				}
1079				if top < minTopHash {
1080					throw("bad map state")
1081				}
1082				var useY uint8
1083				if !h.sameSizeGrow() {
1084					// Compute hash to make our evacuation decision (whether we need
1085					// to send this key/value to bucket x or bucket y).
1086					hash := t.key.hashfn(k, uintptr(h.hash0))
1087					if hash&newbit != 0 {
1088						useY = 1
1089					}
1090				}
1091
1092				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
1093				dst := &xy[useY]                 // evacuation destination
1094
1095				if dst.i == bucketCnt {
1096					dst.b = h.newoverflow(t, dst.b)
1097					dst.i = 0
1098					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1099					dst.v = add(dst.k, bucketCnt*8)
1100				}
1101				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
1102
1103				// Copy key.
1104				if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
1105					if sys.PtrSize == 8 {
1106						writebarrierptr((*uintptr)(dst.k), *(*uintptr)(k))
1107					} else {
1108						// There are three ways to squeeze at least one 32 bit pointer into 64 bits.
1109						// Give up and call typedmemmove.
1110						typedmemmove(t.key, dst.k, k)
1111					}
1112				} else {
1113					*(*uint64)(dst.k) = *(*uint64)(k)
1114				}
1115
1116				typedmemmove(t.elem, dst.v, v)
1117				dst.i++
1118				// These updates might push these pointers past the end of the
1119				// key or value arrays.  That's ok, as we have the overflow pointer
1120				// at the end of the bucket to protect against pointing past the
1121				// end of the bucket.
1122				dst.k = add(dst.k, 8)
1123				dst.v = add(dst.v, uintptr(t.valuesize))
1124			}
1125		}
1126		// Unlink the overflow buckets & clear key/value to help GC.
1127		if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
1128			b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
1129			// Preserve b.tophash because the evacuation
1130			// state is maintained there.
1131			ptr := add(b, dataOffset)
1132			n := uintptr(t.bucketsize) - dataOffset
1133			memclrHasPointers(ptr, n)
1134		}
1135	}
1136
1137	if oldbucket == h.nevacuate {
1138		advanceEvacuationMark(h, t, newbit)
1139	}
1140}
1141
1142func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
1143	// make sure we evacuate the oldbucket corresponding
1144	// to the bucket we're about to use
1145	evacuate_faststr(t, h, bucket&h.oldbucketmask())
1146
1147	// evacuate one more oldbucket to make progress on growing
1148	if h.growing() {
1149		evacuate_faststr(t, h, h.nevacuate)
1150	}
1151}
1152
1153func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
1154	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
1155	newbit := h.noldbuckets()
1156	if !evacuated(b) {
1157		// TODO: reuse overflow buckets instead of using new ones, if there
1158		// is no iterator using the old buckets.  (If !oldIterator.)
1159
1160		// xy contains the x and y (low and high) evacuation destinations.
1161		var xy [2]evacDst
1162		x := &xy[0]
1163		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
1164		x.k = add(unsafe.Pointer(x.b), dataOffset)
1165		x.v = add(x.k, bucketCnt*2*sys.PtrSize)
1166
1167		if !h.sameSizeGrow() {
1168			// Only calculate y pointers if we're growing bigger.
1169			// Otherwise GC can see bad pointers.
1170			y := &xy[1]
1171			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
1172			y.k = add(unsafe.Pointer(y.b), dataOffset)
1173			y.v = add(y.k, bucketCnt*2*sys.PtrSize)
1174		}
1175
1176		for ; b != nil; b = b.overflow(t) {
1177			k := add(unsafe.Pointer(b), dataOffset)
1178			v := add(k, bucketCnt*2*sys.PtrSize)
1179			for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 2*sys.PtrSize), add(v, uintptr(t.valuesize)) {
1180				top := b.tophash[i]
1181				if top == empty {
1182					b.tophash[i] = evacuatedEmpty
1183					continue
1184				}
1185				if top < minTopHash {
1186					throw("bad map state")
1187				}
1188				var useY uint8
1189				if !h.sameSizeGrow() {
1190					// Compute hash to make our evacuation decision (whether we need
1191					// to send this key/value to bucket x or bucket y).
1192					hash := t.key.hashfn(k, uintptr(h.hash0))
1193					if hash&newbit != 0 {
1194						useY = 1
1195					}
1196				}
1197
1198				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
1199				dst := &xy[useY]                 // evacuation destination
1200
1201				if dst.i == bucketCnt {
1202					dst.b = h.newoverflow(t, dst.b)
1203					dst.i = 0
1204					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1205					dst.v = add(dst.k, bucketCnt*2*sys.PtrSize)
1206				}
1207				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
1208
1209				// Copy key.
1210				*(*string)(dst.k) = *(*string)(k)
1211
1212				typedmemmove(t.elem, dst.v, v)
1213				dst.i++
1214				// These updates might push these pointers past the end of the
1215				// key or value arrays.  That's ok, as we have the overflow pointer
1216				// at the end of the bucket to protect against pointing past the
1217				// end of the bucket.
1218				dst.k = add(dst.k, 2*sys.PtrSize)
1219				dst.v = add(dst.v, uintptr(t.valuesize))
1220			}
1221		}
1222		// Unlink the overflow buckets & clear key/value to help GC.
1223		// Unlink the overflow buckets & clear key/value to help GC.
1224		if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
1225			b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
1226			// Preserve b.tophash because the evacuation
1227			// state is maintained there.
1228			ptr := add(b, dataOffset)
1229			n := uintptr(t.bucketsize) - dataOffset
1230			memclrHasPointers(ptr, n)
1231		}
1232	}
1233
1234	if oldbucket == h.nevacuate {
1235		advanceEvacuationMark(h, t, newbit)
1236	}
1237}
1238