1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7// This file contains the implementation of Go's map type.
8//
9// A map is just a hash table. The data is arranged
10// into an array of buckets. Each bucket contains up to
11// 8 key/elem pairs. The low-order bits of the hash are
12// used to select a bucket. Each bucket contains a few
13// high-order bits of each hash to distinguish the entries
14// within a single bucket.
15//
16// If more than 8 keys hash to a bucket, we chain on
17// extra buckets.
18//
19// When the hashtable grows, we allocate a new array
20// of buckets twice as big. Buckets are incrementally
21// copied from the old bucket array to the new bucket array.
22//
23// Map iterators walk through the array of buckets and
24// return the keys in walk order (bucket #, then overflow
25// chain order, then bucket index).  To maintain iteration
26// semantics, we never move keys within their bucket (if
27// we did, keys might be returned 0 or 2 times).  When
28// growing the table, iterators remain iterating through the
29// old table and must check the new table if the bucket
30// they are iterating through has been moved ("evacuated")
31// to the new table.
32
33// Picking loadFactor: too large and we have lots of overflow
34// buckets, too small and we waste a lot of space. I wrote
35// a simple program to check some stats for different loads:
36// (64-bit, 8 byte keys and elems)
37//  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
38//        4.00         2.13        20.77         3.00         4.00
39//        4.50         4.05        17.30         3.25         4.50
40//        5.00         6.85        14.77         3.50         5.00
41//        5.50        10.55        12.94         3.75         5.50
42//        6.00        15.27        11.67         4.00         6.00
43//        6.50        20.90        10.79         4.25         6.50
44//        7.00        27.14        10.15         4.50         7.00
45//        7.50        34.03         9.73         4.75         7.50
46//        8.00        41.10         9.40         5.00         8.00
47//
48// %overflow   = percentage of buckets which have an overflow bucket
49// bytes/entry = overhead bytes used per key/elem pair
50// hitprobe    = # of entries to check when looking up a present key
51// missprobe   = # of entries to check when looking up an absent key
52//
53// Keep in mind this data is for maximally loaded tables, i.e. just
54// before the table grows. Typical tables will be somewhat less loaded.
55
56import (
57	"internal/abi"
58	"internal/goarch"
59	"runtime/internal/atomic"
60	"runtime/internal/math"
61	"unsafe"
62)
63
64const (
65	// Maximum number of key/elem pairs a bucket can hold.
66	bucketCntBits = 3
67	bucketCnt     = 1 << bucketCntBits
68
69	// Maximum average load of a bucket that triggers growth is 6.5.
70	// Represent as loadFactorNum/loadFactorDen, to allow integer math.
71	loadFactorNum = 13
72	loadFactorDen = 2
73
74	// Maximum key or elem size to keep inline (instead of mallocing per element).
75	// Must fit in a uint8.
76	// Fast versions cannot handle big elems - the cutoff size for
77	// fast versions in cmd/compile/internal/gc/walk.go must be at most this elem.
78	maxKeySize  = 128
79	maxElemSize = 128
80
81	// data offset should be the size of the bmap struct, but needs to be
82	// aligned correctly. For amd64p32 this means 64-bit alignment
83	// even though pointers are 32 bit.
84	dataOffset = unsafe.Offsetof(struct {
85		b bmap
86		v int64
87	}{}.v)
88
89	// Possible tophash values. We reserve a few possibilities for special marks.
90	// Each bucket (including its overflow buckets, if any) will have either all or none of its
91	// entries in the evacuated* states (except during the evacuate() method, which only happens
92	// during map writes and thus no one else can observe the map during that time).
93	emptyRest      = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
94	emptyOne       = 1 // this cell is empty
95	evacuatedX     = 2 // key/elem is valid.  Entry has been evacuated to first half of larger table.
96	evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
97	evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
98	minTopHash     = 5 // minimum tophash for a normal filled cell.
99
100	// flags
101	iterator     = 1 // there may be an iterator using buckets
102	oldIterator  = 2 // there may be an iterator using oldbuckets
103	hashWriting  = 4 // a goroutine is writing to the map
104	sameSizeGrow = 8 // the current map growth is to a new map of the same size
105
106	// sentinel bucket ID for iterator checks
107	noCheck = 1<<(8*goarch.PtrSize) - 1
108)
109
110// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
111func isEmpty(x uint8) bool {
112	return x <= emptyOne
113}
114
115// A header for a Go map.
116type hmap struct {
117	// Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
118	// Make sure this stays in sync with the compiler's definition.
119	count     int // # live cells == size of map.  Must be first (used by len() builtin)
120	flags     uint8
121	B         uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
122	noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
123	hash0     uint32 // hash seed
124
125	buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
126	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
127	nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
128
129	extra *mapextra // optional fields
130}
131
132// mapextra holds fields that are not present on all maps.
133type mapextra struct {
134	// If both key and elem do not contain pointers and are inline, then we mark bucket
135	// type as containing no pointers. This avoids scanning such maps.
136	// However, bmap.overflow is a pointer. In order to keep overflow buckets
137	// alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
138	// overflow and oldoverflow are only used if key and elem do not contain pointers.
139	// overflow contains overflow buckets for hmap.buckets.
140	// oldoverflow contains overflow buckets for hmap.oldbuckets.
141	// The indirection allows to store a pointer to the slice in hiter.
142	overflow    *[]*bmap
143	oldoverflow *[]*bmap
144
145	// nextOverflow holds a pointer to a free overflow bucket.
146	nextOverflow *bmap
147}
148
149// A bucket for a Go map.
150type bmap struct {
151	// tophash generally contains the top byte of the hash value
152	// for each key in this bucket. If tophash[0] < minTopHash,
153	// tophash[0] is a bucket evacuation state instead.
154	tophash [bucketCnt]uint8
155	// Followed by bucketCnt keys and then bucketCnt elems.
156	// NOTE: packing all the keys together and then all the elems together makes the
157	// code a bit more complicated than alternating key/elem/key/elem/... but it allows
158	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
159	// Followed by an overflow pointer.
160}
161
162// A hash iteration structure.
163// If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go
164// and reflect/value.go to match the layout of this structure.
165type hiter struct {
166	key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
167	elem        unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
168	t           *maptype
169	h           *hmap
170	buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
171	bptr        *bmap          // current bucket
172	overflow    *[]*bmap       // keeps overflow buckets of hmap.buckets alive
173	oldoverflow *[]*bmap       // keeps overflow buckets of hmap.oldbuckets alive
174	startBucket uintptr        // bucket iteration started at
175	offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
176	wrapped     bool           // already wrapped around from end of bucket array to beginning
177	B           uint8
178	i           uint8
179	bucket      uintptr
180	checkBucket uintptr
181}
182
183// bucketShift returns 1<<b, optimized for code generation.
184func bucketShift(b uint8) uintptr {
185	// Masking the shift amount allows overflow checks to be elided.
186	return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
187}
188
189// bucketMask returns 1<<b - 1, optimized for code generation.
190func bucketMask(b uint8) uintptr {
191	return bucketShift(b) - 1
192}
193
194// tophash calculates the tophash value for hash.
195func tophash(hash uintptr) uint8 {
196	top := uint8(hash >> (goarch.PtrSize*8 - 8))
197	if top < minTopHash {
198		top += minTopHash
199	}
200	return top
201}
202
203func evacuated(b *bmap) bool {
204	h := b.tophash[0]
205	return h > emptyOne && h < minTopHash
206}
207
208func (b *bmap) overflow(t *maptype) *bmap {
209	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize))
210}
211
212func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
213	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize)) = ovf
214}
215
216func (b *bmap) keys() unsafe.Pointer {
217	return add(unsafe.Pointer(b), dataOffset)
218}
219
220// incrnoverflow increments h.noverflow.
221// noverflow counts the number of overflow buckets.
222// This is used to trigger same-size map growth.
223// See also tooManyOverflowBuckets.
224// To keep hmap small, noverflow is a uint16.
225// When there are few buckets, noverflow is an exact count.
226// When there are many buckets, noverflow is an approximate count.
227func (h *hmap) incrnoverflow() {
228	// We trigger same-size map growth if there are
229	// as many overflow buckets as buckets.
230	// We need to be able to count to 1<<h.B.
231	if h.B < 16 {
232		h.noverflow++
233		return
234	}
235	// Increment with probability 1/(1<<(h.B-15)).
236	// When we reach 1<<15 - 1, we will have approximately
237	// as many overflow buckets as buckets.
238	mask := uint32(1)<<(h.B-15) - 1
239	// Example: if h.B == 18, then mask == 7,
240	// and fastrand & 7 == 0 with probability 1/8.
241	if fastrand()&mask == 0 {
242		h.noverflow++
243	}
244}
245
246func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
247	var ovf *bmap
248	if h.extra != nil && h.extra.nextOverflow != nil {
249		// We have preallocated overflow buckets available.
250		// See makeBucketArray for more details.
251		ovf = h.extra.nextOverflow
252		if ovf.overflow(t) == nil {
253			// We're not at the end of the preallocated overflow buckets. Bump the pointer.
254			h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
255		} else {
256			// This is the last preallocated overflow bucket.
257			// Reset the overflow pointer on this bucket,
258			// which was set to a non-nil sentinel value.
259			ovf.setoverflow(t, nil)
260			h.extra.nextOverflow = nil
261		}
262	} else {
263		ovf = (*bmap)(newobject(t.bucket))
264	}
265	h.incrnoverflow()
266	if t.bucket.ptrdata == 0 {
267		h.createOverflow()
268		*h.extra.overflow = append(*h.extra.overflow, ovf)
269	}
270	b.setoverflow(t, ovf)
271	return ovf
272}
273
274func (h *hmap) createOverflow() {
275	if h.extra == nil {
276		h.extra = new(mapextra)
277	}
278	if h.extra.overflow == nil {
279		h.extra.overflow = new([]*bmap)
280	}
281}
282
283func makemap64(t *maptype, hint int64, h *hmap) *hmap {
284	if int64(int(hint)) != hint {
285		hint = 0
286	}
287	return makemap(t, int(hint), h)
288}
289
290// makemap_small implements Go map creation for make(map[k]v) and
291// make(map[k]v, hint) when hint is known to be at most bucketCnt
292// at compile time and the map needs to be allocated on the heap.
293func makemap_small() *hmap {
294	h := new(hmap)
295	h.hash0 = fastrand()
296	return h
297}
298
299// makemap implements Go map creation for make(map[k]v, hint).
300// If the compiler has determined that the map or the first bucket
301// can be created on the stack, h and/or bucket may be non-nil.
302// If h != nil, the map can be created directly in h.
303// If h.buckets != nil, bucket pointed to can be used as the first bucket.
304func makemap(t *maptype, hint int, h *hmap) *hmap {
305	mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.size)
306	if overflow || mem > maxAlloc {
307		hint = 0
308	}
309
310	// initialize Hmap
311	if h == nil {
312		h = new(hmap)
313	}
314	h.hash0 = fastrand()
315
316	// Find the size parameter B which will hold the requested # of elements.
317	// For hint < 0 overLoadFactor returns false since hint < bucketCnt.
318	B := uint8(0)
319	for overLoadFactor(hint, B) {
320		B++
321	}
322	h.B = B
323
324	// allocate initial hash table
325	// if B == 0, the buckets field is allocated lazily later (in mapassign)
326	// If hint is large zeroing this memory could take a while.
327	if h.B != 0 {
328		var nextOverflow *bmap
329		h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
330		if nextOverflow != nil {
331			h.extra = new(mapextra)
332			h.extra.nextOverflow = nextOverflow
333		}
334	}
335
336	return h
337}
338
339// makeBucketArray initializes a backing array for map buckets.
340// 1<<b is the minimum number of buckets to allocate.
341// dirtyalloc should either be nil or a bucket array previously
342// allocated by makeBucketArray with the same t and b parameters.
343// If dirtyalloc is nil a new backing array will be alloced and
344// otherwise dirtyalloc will be cleared and reused as backing array.
345func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
346	base := bucketShift(b)
347	nbuckets := base
348	// For small b, overflow buckets are unlikely.
349	// Avoid the overhead of the calculation.
350	if b >= 4 {
351		// Add on the estimated number of overflow buckets
352		// required to insert the median number of elements
353		// used with this value of b.
354		nbuckets += bucketShift(b - 4)
355		sz := t.bucket.size * nbuckets
356		up := roundupsize(sz)
357		if up != sz {
358			nbuckets = up / t.bucket.size
359		}
360	}
361
362	if dirtyalloc == nil {
363		buckets = newarray(t.bucket, int(nbuckets))
364	} else {
365		// dirtyalloc was previously generated by
366		// the above newarray(t.bucket, int(nbuckets))
367		// but may not be empty.
368		buckets = dirtyalloc
369		size := t.bucket.size * nbuckets
370		if t.bucket.ptrdata != 0 {
371			memclrHasPointers(buckets, size)
372		} else {
373			memclrNoHeapPointers(buckets, size)
374		}
375	}
376
377	if base != nbuckets {
378		// We preallocated some overflow buckets.
379		// To keep the overhead of tracking these overflow buckets to a minimum,
380		// we use the convention that if a preallocated overflow bucket's overflow
381		// pointer is nil, then there are more available by bumping the pointer.
382		// We need a safe non-nil pointer for the last overflow bucket; just use buckets.
383		nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
384		last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
385		last.setoverflow(t, (*bmap)(buckets))
386	}
387	return buckets, nextOverflow
388}
389
390// mapaccess1 returns a pointer to h[key].  Never returns nil, instead
391// it will return a reference to the zero object for the elem type if
392// the key is not in the map.
393// NOTE: The returned pointer may keep the whole map live, so don't
394// hold onto it for very long.
395func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
396	if raceenabled && h != nil {
397		callerpc := getcallerpc()
398		pc := abi.FuncPCABIInternal(mapaccess1)
399		racereadpc(unsafe.Pointer(h), callerpc, pc)
400		raceReadObjectPC(t.key, key, callerpc, pc)
401	}
402	if msanenabled && h != nil {
403		msanread(key, t.key.size)
404	}
405	if asanenabled && h != nil {
406		asanread(key, t.key.size)
407	}
408	if h == nil || h.count == 0 {
409		if t.hashMightPanic() {
410			t.hasher(key, 0) // see issue 23734
411		}
412		return unsafe.Pointer(&zeroVal[0])
413	}
414	if h.flags&hashWriting != 0 {
415		throw("concurrent map read and map write")
416	}
417	hash := t.hasher(key, uintptr(h.hash0))
418	m := bucketMask(h.B)
419	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
420	if c := h.oldbuckets; c != nil {
421		if !h.sameSizeGrow() {
422			// There used to be half as many buckets; mask down one more power of two.
423			m >>= 1
424		}
425		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
426		if !evacuated(oldb) {
427			b = oldb
428		}
429	}
430	top := tophash(hash)
431bucketloop:
432	for ; b != nil; b = b.overflow(t) {
433		for i := uintptr(0); i < bucketCnt; i++ {
434			if b.tophash[i] != top {
435				if b.tophash[i] == emptyRest {
436					break bucketloop
437				}
438				continue
439			}
440			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
441			if t.indirectkey() {
442				k = *((*unsafe.Pointer)(k))
443			}
444			if t.key.equal(key, k) {
445				e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
446				if t.indirectelem() {
447					e = *((*unsafe.Pointer)(e))
448				}
449				return e
450			}
451		}
452	}
453	return unsafe.Pointer(&zeroVal[0])
454}
455
456func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
457	if raceenabled && h != nil {
458		callerpc := getcallerpc()
459		pc := abi.FuncPCABIInternal(mapaccess2)
460		racereadpc(unsafe.Pointer(h), callerpc, pc)
461		raceReadObjectPC(t.key, key, callerpc, pc)
462	}
463	if msanenabled && h != nil {
464		msanread(key, t.key.size)
465	}
466	if asanenabled && h != nil {
467		asanread(key, t.key.size)
468	}
469	if h == nil || h.count == 0 {
470		if t.hashMightPanic() {
471			t.hasher(key, 0) // see issue 23734
472		}
473		return unsafe.Pointer(&zeroVal[0]), false
474	}
475	if h.flags&hashWriting != 0 {
476		throw("concurrent map read and map write")
477	}
478	hash := t.hasher(key, uintptr(h.hash0))
479	m := bucketMask(h.B)
480	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
481	if c := h.oldbuckets; c != nil {
482		if !h.sameSizeGrow() {
483			// There used to be half as many buckets; mask down one more power of two.
484			m >>= 1
485		}
486		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
487		if !evacuated(oldb) {
488			b = oldb
489		}
490	}
491	top := tophash(hash)
492bucketloop:
493	for ; b != nil; b = b.overflow(t) {
494		for i := uintptr(0); i < bucketCnt; i++ {
495			if b.tophash[i] != top {
496				if b.tophash[i] == emptyRest {
497					break bucketloop
498				}
499				continue
500			}
501			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
502			if t.indirectkey() {
503				k = *((*unsafe.Pointer)(k))
504			}
505			if t.key.equal(key, k) {
506				e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
507				if t.indirectelem() {
508					e = *((*unsafe.Pointer)(e))
509				}
510				return e, true
511			}
512		}
513	}
514	return unsafe.Pointer(&zeroVal[0]), false
515}
516
517// returns both key and elem. Used by map iterator
518func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
519	if h == nil || h.count == 0 {
520		return nil, nil
521	}
522	hash := t.hasher(key, uintptr(h.hash0))
523	m := bucketMask(h.B)
524	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
525	if c := h.oldbuckets; c != nil {
526		if !h.sameSizeGrow() {
527			// There used to be half as many buckets; mask down one more power of two.
528			m >>= 1
529		}
530		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
531		if !evacuated(oldb) {
532			b = oldb
533		}
534	}
535	top := tophash(hash)
536bucketloop:
537	for ; b != nil; b = b.overflow(t) {
538		for i := uintptr(0); i < bucketCnt; i++ {
539			if b.tophash[i] != top {
540				if b.tophash[i] == emptyRest {
541					break bucketloop
542				}
543				continue
544			}
545			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
546			if t.indirectkey() {
547				k = *((*unsafe.Pointer)(k))
548			}
549			if t.key.equal(key, k) {
550				e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
551				if t.indirectelem() {
552					e = *((*unsafe.Pointer)(e))
553				}
554				return k, e
555			}
556		}
557	}
558	return nil, nil
559}
560
561func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
562	e := mapaccess1(t, h, key)
563	if e == unsafe.Pointer(&zeroVal[0]) {
564		return zero
565	}
566	return e
567}
568
569func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
570	e := mapaccess1(t, h, key)
571	if e == unsafe.Pointer(&zeroVal[0]) {
572		return zero, false
573	}
574	return e, true
575}
576
577// Like mapaccess, but allocates a slot for the key if it is not present in the map.
578func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
579	if h == nil {
580		panic(plainError("assignment to entry in nil map"))
581	}
582	if raceenabled {
583		callerpc := getcallerpc()
584		pc := abi.FuncPCABIInternal(mapassign)
585		racewritepc(unsafe.Pointer(h), callerpc, pc)
586		raceReadObjectPC(t.key, key, callerpc, pc)
587	}
588	if msanenabled {
589		msanread(key, t.key.size)
590	}
591	if asanenabled {
592		asanread(key, t.key.size)
593	}
594	if h.flags&hashWriting != 0 {
595		throw("concurrent map writes")
596	}
597	hash := t.hasher(key, uintptr(h.hash0))
598
599	// Set hashWriting after calling t.hasher, since t.hasher may panic,
600	// in which case we have not actually done a write.
601	h.flags ^= hashWriting
602
603	if h.buckets == nil {
604		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
605	}
606
607again:
608	bucket := hash & bucketMask(h.B)
609	if h.growing() {
610		growWork(t, h, bucket)
611	}
612	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
613	top := tophash(hash)
614
615	var inserti *uint8
616	var insertk unsafe.Pointer
617	var elem unsafe.Pointer
618bucketloop:
619	for {
620		for i := uintptr(0); i < bucketCnt; i++ {
621			if b.tophash[i] != top {
622				if isEmpty(b.tophash[i]) && inserti == nil {
623					inserti = &b.tophash[i]
624					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
625					elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
626				}
627				if b.tophash[i] == emptyRest {
628					break bucketloop
629				}
630				continue
631			}
632			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
633			if t.indirectkey() {
634				k = *((*unsafe.Pointer)(k))
635			}
636			if !t.key.equal(key, k) {
637				continue
638			}
639			// already have a mapping for key. Update it.
640			if t.needkeyupdate() {
641				typedmemmove(t.key, k, key)
642			}
643			elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
644			goto done
645		}
646		ovf := b.overflow(t)
647		if ovf == nil {
648			break
649		}
650		b = ovf
651	}
652
653	// Did not find mapping for key. Allocate new cell & add entry.
654
655	// If we hit the max load factor or we have too many overflow buckets,
656	// and we're not already in the middle of growing, start growing.
657	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
658		hashGrow(t, h)
659		goto again // Growing the table invalidates everything, so try again
660	}
661
662	if inserti == nil {
663		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
664		newb := h.newoverflow(t, b)
665		inserti = &newb.tophash[0]
666		insertk = add(unsafe.Pointer(newb), dataOffset)
667		elem = add(insertk, bucketCnt*uintptr(t.keysize))
668	}
669
670	// store new key/elem at insert position
671	if t.indirectkey() {
672		kmem := newobject(t.key)
673		*(*unsafe.Pointer)(insertk) = kmem
674		insertk = kmem
675	}
676	if t.indirectelem() {
677		vmem := newobject(t.elem)
678		*(*unsafe.Pointer)(elem) = vmem
679	}
680	typedmemmove(t.key, insertk, key)
681	*inserti = top
682	h.count++
683
684done:
685	if h.flags&hashWriting == 0 {
686		throw("concurrent map writes")
687	}
688	h.flags &^= hashWriting
689	if t.indirectelem() {
690		elem = *((*unsafe.Pointer)(elem))
691	}
692	return elem
693}
694
695func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
696	if raceenabled && h != nil {
697		callerpc := getcallerpc()
698		pc := abi.FuncPCABIInternal(mapdelete)
699		racewritepc(unsafe.Pointer(h), callerpc, pc)
700		raceReadObjectPC(t.key, key, callerpc, pc)
701	}
702	if msanenabled && h != nil {
703		msanread(key, t.key.size)
704	}
705	if asanenabled && h != nil {
706		asanread(key, t.key.size)
707	}
708	if h == nil || h.count == 0 {
709		if t.hashMightPanic() {
710			t.hasher(key, 0) // see issue 23734
711		}
712		return
713	}
714	if h.flags&hashWriting != 0 {
715		throw("concurrent map writes")
716	}
717
718	hash := t.hasher(key, uintptr(h.hash0))
719
720	// Set hashWriting after calling t.hasher, since t.hasher may panic,
721	// in which case we have not actually done a write (delete).
722	h.flags ^= hashWriting
723
724	bucket := hash & bucketMask(h.B)
725	if h.growing() {
726		growWork(t, h, bucket)
727	}
728	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
729	bOrig := b
730	top := tophash(hash)
731search:
732	for ; b != nil; b = b.overflow(t) {
733		for i := uintptr(0); i < bucketCnt; i++ {
734			if b.tophash[i] != top {
735				if b.tophash[i] == emptyRest {
736					break search
737				}
738				continue
739			}
740			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
741			k2 := k
742			if t.indirectkey() {
743				k2 = *((*unsafe.Pointer)(k2))
744			}
745			if !t.key.equal(key, k2) {
746				continue
747			}
748			// Only clear key if there are pointers in it.
749			if t.indirectkey() {
750				*(*unsafe.Pointer)(k) = nil
751			} else if t.key.ptrdata != 0 {
752				memclrHasPointers(k, t.key.size)
753			}
754			e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
755			if t.indirectelem() {
756				*(*unsafe.Pointer)(e) = nil
757			} else if t.elem.ptrdata != 0 {
758				memclrHasPointers(e, t.elem.size)
759			} else {
760				memclrNoHeapPointers(e, t.elem.size)
761			}
762			b.tophash[i] = emptyOne
763			// If the bucket now ends in a bunch of emptyOne states,
764			// change those to emptyRest states.
765			// It would be nice to make this a separate function, but
766			// for loops are not currently inlineable.
767			if i == bucketCnt-1 {
768				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
769					goto notLast
770				}
771			} else {
772				if b.tophash[i+1] != emptyRest {
773					goto notLast
774				}
775			}
776			for {
777				b.tophash[i] = emptyRest
778				if i == 0 {
779					if b == bOrig {
780						break // beginning of initial bucket, we're done.
781					}
782					// Find previous bucket, continue at its last entry.
783					c := b
784					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
785					}
786					i = bucketCnt - 1
787				} else {
788					i--
789				}
790				if b.tophash[i] != emptyOne {
791					break
792				}
793			}
794		notLast:
795			h.count--
796			// Reset the hash seed to make it more difficult for attackers to
797			// repeatedly trigger hash collisions. See issue 25237.
798			if h.count == 0 {
799				h.hash0 = fastrand()
800			}
801			break search
802		}
803	}
804
805	if h.flags&hashWriting == 0 {
806		throw("concurrent map writes")
807	}
808	h.flags &^= hashWriting
809}
810
811// mapiterinit initializes the hiter struct used for ranging over maps.
812// The hiter struct pointed to by 'it' is allocated on the stack
813// by the compilers order pass or on the heap by reflect_mapiterinit.
814// Both need to have zeroed hiter since the struct contains pointers.
815func mapiterinit(t *maptype, h *hmap, it *hiter) {
816	if raceenabled && h != nil {
817		callerpc := getcallerpc()
818		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
819	}
820
821	it.t = t
822	if h == nil || h.count == 0 {
823		return
824	}
825
826	if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
827		throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
828	}
829	it.h = h
830
831	// grab snapshot of bucket state
832	it.B = h.B
833	it.buckets = h.buckets
834	if t.bucket.ptrdata == 0 {
835		// Allocate the current slice and remember pointers to both current and old.
836		// This preserves all relevant overflow buckets alive even if
837		// the table grows and/or overflow buckets are added to the table
838		// while we are iterating.
839		h.createOverflow()
840		it.overflow = h.extra.overflow
841		it.oldoverflow = h.extra.oldoverflow
842	}
843
844	// decide where to start
845	r := uintptr(fastrand())
846	if h.B > 31-bucketCntBits {
847		r += uintptr(fastrand()) << 31
848	}
849	it.startBucket = r & bucketMask(h.B)
850	it.offset = uint8(r >> h.B & (bucketCnt - 1))
851
852	// iterator state
853	it.bucket = it.startBucket
854
855	// Remember we have an iterator.
856	// Can run concurrently with another mapiterinit().
857	if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
858		atomic.Or8(&h.flags, iterator|oldIterator)
859	}
860
861	mapiternext(it)
862}
863
864func mapiternext(it *hiter) {
865	h := it.h
866	if raceenabled {
867		callerpc := getcallerpc()
868		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
869	}
870	if h.flags&hashWriting != 0 {
871		throw("concurrent map iteration and map write")
872	}
873	t := it.t
874	bucket := it.bucket
875	b := it.bptr
876	i := it.i
877	checkBucket := it.checkBucket
878
879next:
880	if b == nil {
881		if bucket == it.startBucket && it.wrapped {
882			// end of iteration
883			it.key = nil
884			it.elem = nil
885			return
886		}
887		if h.growing() && it.B == h.B {
888			// Iterator was started in the middle of a grow, and the grow isn't done yet.
889			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
890			// bucket hasn't been evacuated) then we need to iterate through the old
891			// bucket and only return the ones that will be migrated to this bucket.
892			oldbucket := bucket & it.h.oldbucketmask()
893			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
894			if !evacuated(b) {
895				checkBucket = bucket
896			} else {
897				b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
898				checkBucket = noCheck
899			}
900		} else {
901			b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
902			checkBucket = noCheck
903		}
904		bucket++
905		if bucket == bucketShift(it.B) {
906			bucket = 0
907			it.wrapped = true
908		}
909		i = 0
910	}
911	for ; i < bucketCnt; i++ {
912		offi := (i + it.offset) & (bucketCnt - 1)
913		if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
914			// TODO: emptyRest is hard to use here, as we start iterating
915			// in the middle of a bucket. It's feasible, just tricky.
916			continue
917		}
918		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
919		if t.indirectkey() {
920			k = *((*unsafe.Pointer)(k))
921		}
922		e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.elemsize))
923		if checkBucket != noCheck && !h.sameSizeGrow() {
924			// Special case: iterator was started during a grow to a larger size
925			// and the grow is not done yet. We're working on a bucket whose
926			// oldbucket has not been evacuated yet. Or at least, it wasn't
927			// evacuated when we started the bucket. So we're iterating
928			// through the oldbucket, skipping any keys that will go
929			// to the other new bucket (each oldbucket expands to two
930			// buckets during a grow).
931			if t.reflexivekey() || t.key.equal(k, k) {
932				// If the item in the oldbucket is not destined for
933				// the current new bucket in the iteration, skip it.
934				hash := t.hasher(k, uintptr(h.hash0))
935				if hash&bucketMask(it.B) != checkBucket {
936					continue
937				}
938			} else {
939				// Hash isn't repeatable if k != k (NaNs).  We need a
940				// repeatable and randomish choice of which direction
941				// to send NaNs during evacuation. We'll use the low
942				// bit of tophash to decide which way NaNs go.
943				// NOTE: this case is why we need two evacuate tophash
944				// values, evacuatedX and evacuatedY, that differ in
945				// their low bit.
946				if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
947					continue
948				}
949			}
950		}
951		if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
952			!(t.reflexivekey() || t.key.equal(k, k)) {
953			// This is the golden data, we can return it.
954			// OR
955			// key!=key, so the entry can't be deleted or updated, so we can just return it.
956			// That's lucky for us because when key!=key we can't look it up successfully.
957			it.key = k
958			if t.indirectelem() {
959				e = *((*unsafe.Pointer)(e))
960			}
961			it.elem = e
962		} else {
963			// The hash table has grown since the iterator was started.
964			// The golden data for this key is now somewhere else.
965			// Check the current hash table for the data.
966			// This code handles the case where the key
967			// has been deleted, updated, or deleted and reinserted.
968			// NOTE: we need to regrab the key as it has potentially been
969			// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
970			rk, re := mapaccessK(t, h, k)
971			if rk == nil {
972				continue // key has been deleted
973			}
974			it.key = rk
975			it.elem = re
976		}
977		it.bucket = bucket
978		if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
979			it.bptr = b
980		}
981		it.i = i + 1
982		it.checkBucket = checkBucket
983		return
984	}
985	b = b.overflow(t)
986	i = 0
987	goto next
988}
989
990// mapclear deletes all keys from a map.
991func mapclear(t *maptype, h *hmap) {
992	if raceenabled && h != nil {
993		callerpc := getcallerpc()
994		pc := abi.FuncPCABIInternal(mapclear)
995		racewritepc(unsafe.Pointer(h), callerpc, pc)
996	}
997
998	if h == nil || h.count == 0 {
999		return
1000	}
1001
1002	if h.flags&hashWriting != 0 {
1003		throw("concurrent map writes")
1004	}
1005
1006	h.flags ^= hashWriting
1007
1008	h.flags &^= sameSizeGrow
1009	h.oldbuckets = nil
1010	h.nevacuate = 0
1011	h.noverflow = 0
1012	h.count = 0
1013
1014	// Reset the hash seed to make it more difficult for attackers to
1015	// repeatedly trigger hash collisions. See issue 25237.
1016	h.hash0 = fastrand()
1017
1018	// Keep the mapextra allocation but clear any extra information.
1019	if h.extra != nil {
1020		*h.extra = mapextra{}
1021	}
1022
1023	// makeBucketArray clears the memory pointed to by h.buckets
1024	// and recovers any overflow buckets by generating them
1025	// as if h.buckets was newly alloced.
1026	_, nextOverflow := makeBucketArray(t, h.B, h.buckets)
1027	if nextOverflow != nil {
1028		// If overflow buckets are created then h.extra
1029		// will have been allocated during initial bucket creation.
1030		h.extra.nextOverflow = nextOverflow
1031	}
1032
1033	if h.flags&hashWriting == 0 {
1034		throw("concurrent map writes")
1035	}
1036	h.flags &^= hashWriting
1037}
1038
1039func hashGrow(t *maptype, h *hmap) {
1040	// If we've hit the load factor, get bigger.
1041	// Otherwise, there are too many overflow buckets,
1042	// so keep the same number of buckets and "grow" laterally.
1043	bigger := uint8(1)
1044	if !overLoadFactor(h.count+1, h.B) {
1045		bigger = 0
1046		h.flags |= sameSizeGrow
1047	}
1048	oldbuckets := h.buckets
1049	newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
1050
1051	flags := h.flags &^ (iterator | oldIterator)
1052	if h.flags&iterator != 0 {
1053		flags |= oldIterator
1054	}
1055	// commit the grow (atomic wrt gc)
1056	h.B += bigger
1057	h.flags = flags
1058	h.oldbuckets = oldbuckets
1059	h.buckets = newbuckets
1060	h.nevacuate = 0
1061	h.noverflow = 0
1062
1063	if h.extra != nil && h.extra.overflow != nil {
1064		// Promote current overflow buckets to the old generation.
1065		if h.extra.oldoverflow != nil {
1066			throw("oldoverflow is not nil")
1067		}
1068		h.extra.oldoverflow = h.extra.overflow
1069		h.extra.overflow = nil
1070	}
1071	if nextOverflow != nil {
1072		if h.extra == nil {
1073			h.extra = new(mapextra)
1074		}
1075		h.extra.nextOverflow = nextOverflow
1076	}
1077
1078	// the actual copying of the hash table data is done incrementally
1079	// by growWork() and evacuate().
1080}
1081
1082// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
1083func overLoadFactor(count int, B uint8) bool {
1084	return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
1085}
1086
1087// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
1088// Note that most of these overflow buckets must be in sparse use;
1089// if use was dense, then we'd have already triggered regular map growth.
1090func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
1091	// If the threshold is too low, we do extraneous work.
1092	// If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
1093	// "too many" means (approximately) as many overflow buckets as regular buckets.
1094	// See incrnoverflow for more details.
1095	if B > 15 {
1096		B = 15
1097	}
1098	// The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
1099	return noverflow >= uint16(1)<<(B&15)
1100}
1101
1102// growing reports whether h is growing. The growth may be to the same size or bigger.
1103func (h *hmap) growing() bool {
1104	return h.oldbuckets != nil
1105}
1106
1107// sameSizeGrow reports whether the current growth is to a map of the same size.
1108func (h *hmap) sameSizeGrow() bool {
1109	return h.flags&sameSizeGrow != 0
1110}
1111
1112// noldbuckets calculates the number of buckets prior to the current map growth.
1113func (h *hmap) noldbuckets() uintptr {
1114	oldB := h.B
1115	if !h.sameSizeGrow() {
1116		oldB--
1117	}
1118	return bucketShift(oldB)
1119}
1120
1121// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
1122func (h *hmap) oldbucketmask() uintptr {
1123	return h.noldbuckets() - 1
1124}
1125
1126func growWork(t *maptype, h *hmap, bucket uintptr) {
1127	// make sure we evacuate the oldbucket corresponding
1128	// to the bucket we're about to use
1129	evacuate(t, h, bucket&h.oldbucketmask())
1130
1131	// evacuate one more oldbucket to make progress on growing
1132	if h.growing() {
1133		evacuate(t, h, h.nevacuate)
1134	}
1135}
1136
1137func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
1138	b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize)))
1139	return evacuated(b)
1140}
1141
1142// evacDst is an evacuation destination.
1143type evacDst struct {
1144	b *bmap          // current destination bucket
1145	i int            // key/elem index into b
1146	k unsafe.Pointer // pointer to current key storage
1147	e unsafe.Pointer // pointer to current elem storage
1148}
1149
1150func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
1151	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
1152	newbit := h.noldbuckets()
1153	if !evacuated(b) {
1154		// TODO: reuse overflow buckets instead of using new ones, if there
1155		// is no iterator using the old buckets.  (If !oldIterator.)
1156
1157		// xy contains the x and y (low and high) evacuation destinations.
1158		var xy [2]evacDst
1159		x := &xy[0]
1160		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
1161		x.k = add(unsafe.Pointer(x.b), dataOffset)
1162		x.e = add(x.k, bucketCnt*uintptr(t.keysize))
1163
1164		if !h.sameSizeGrow() {
1165			// Only calculate y pointers if we're growing bigger.
1166			// Otherwise GC can see bad pointers.
1167			y := &xy[1]
1168			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
1169			y.k = add(unsafe.Pointer(y.b), dataOffset)
1170			y.e = add(y.k, bucketCnt*uintptr(t.keysize))
1171		}
1172
1173		for ; b != nil; b = b.overflow(t) {
1174			k := add(unsafe.Pointer(b), dataOffset)
1175			e := add(k, bucketCnt*uintptr(t.keysize))
1176			for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.keysize)), add(e, uintptr(t.elemsize)) {
1177				top := b.tophash[i]
1178				if isEmpty(top) {
1179					b.tophash[i] = evacuatedEmpty
1180					continue
1181				}
1182				if top < minTopHash {
1183					throw("bad map state")
1184				}
1185				k2 := k
1186				if t.indirectkey() {
1187					k2 = *((*unsafe.Pointer)(k2))
1188				}
1189				var useY uint8
1190				if !h.sameSizeGrow() {
1191					// Compute hash to make our evacuation decision (whether we need
1192					// to send this key/elem to bucket x or bucket y).
1193					hash := t.hasher(k2, uintptr(h.hash0))
1194					if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) {
1195						// If key != key (NaNs), then the hash could be (and probably
1196						// will be) entirely different from the old hash. Moreover,
1197						// it isn't reproducible. Reproducibility is required in the
1198						// presence of iterators, as our evacuation decision must
1199						// match whatever decision the iterator made.
1200						// Fortunately, we have the freedom to send these keys either
1201						// way. Also, tophash is meaningless for these kinds of keys.
1202						// We let the low bit of tophash drive the evacuation decision.
1203						// We recompute a new random tophash for the next level so
1204						// these keys will get evenly distributed across all buckets
1205						// after multiple grows.
1206						useY = top & 1
1207						top = tophash(hash)
1208					} else {
1209						if hash&newbit != 0 {
1210							useY = 1
1211						}
1212					}
1213				}
1214
1215				if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
1216					throw("bad evacuatedN")
1217				}
1218
1219				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
1220				dst := &xy[useY]                 // evacuation destination
1221
1222				if dst.i == bucketCnt {
1223					dst.b = h.newoverflow(t, dst.b)
1224					dst.i = 0
1225					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1226					dst.e = add(dst.k, bucketCnt*uintptr(t.keysize))
1227				}
1228				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
1229				if t.indirectkey() {
1230					*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
1231				} else {
1232					typedmemmove(t.key, dst.k, k) // copy elem
1233				}
1234				if t.indirectelem() {
1235					*(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
1236				} else {
1237					typedmemmove(t.elem, dst.e, e)
1238				}
1239				dst.i++
1240				// These updates might push these pointers past the end of the
1241				// key or elem arrays.  That's ok, as we have the overflow pointer
1242				// at the end of the bucket to protect against pointing past the
1243				// end of the bucket.
1244				dst.k = add(dst.k, uintptr(t.keysize))
1245				dst.e = add(dst.e, uintptr(t.elemsize))
1246			}
1247		}
1248		// Unlink the overflow buckets & clear key/elem to help GC.
1249		if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
1250			b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
1251			// Preserve b.tophash because the evacuation
1252			// state is maintained there.
1253			ptr := add(b, dataOffset)
1254			n := uintptr(t.bucketsize) - dataOffset
1255			memclrHasPointers(ptr, n)
1256		}
1257	}
1258
1259	if oldbucket == h.nevacuate {
1260		advanceEvacuationMark(h, t, newbit)
1261	}
1262}
1263
1264func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
1265	h.nevacuate++
1266	// Experiments suggest that 1024 is overkill by at least an order of magnitude.
1267	// Put it in there as a safeguard anyway, to ensure O(1) behavior.
1268	stop := h.nevacuate + 1024
1269	if stop > newbit {
1270		stop = newbit
1271	}
1272	for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
1273		h.nevacuate++
1274	}
1275	if h.nevacuate == newbit { // newbit == # of oldbuckets
1276		// Growing is all done. Free old main bucket array.
1277		h.oldbuckets = nil
1278		// Can discard old overflow buckets as well.
1279		// If they are still referenced by an iterator,
1280		// then the iterator holds a pointers to the slice.
1281		if h.extra != nil {
1282			h.extra.oldoverflow = nil
1283		}
1284		h.flags &^= sameSizeGrow
1285	}
1286}
1287
1288// Reflect stubs. Called from ../reflect/asm_*.s
1289
1290//go:linkname reflect_makemap reflect.makemap
1291func reflect_makemap(t *maptype, cap int) *hmap {
1292	// Check invariants and reflects math.
1293	if t.key.equal == nil {
1294		throw("runtime.reflect_makemap: unsupported map key type")
1295	}
1296	if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(goarch.PtrSize)) ||
1297		t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) {
1298		throw("key size wrong")
1299	}
1300	if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(goarch.PtrSize)) ||
1301		t.elem.size <= maxElemSize && (t.indirectelem() || t.elemsize != uint8(t.elem.size)) {
1302		throw("elem size wrong")
1303	}
1304	if t.key.align > bucketCnt {
1305		throw("key align too big")
1306	}
1307	if t.elem.align > bucketCnt {
1308		throw("elem align too big")
1309	}
1310	if t.key.size%uintptr(t.key.align) != 0 {
1311		throw("key size not a multiple of key align")
1312	}
1313	if t.elem.size%uintptr(t.elem.align) != 0 {
1314		throw("elem size not a multiple of elem align")
1315	}
1316	if bucketCnt < 8 {
1317		throw("bucketsize too small for proper alignment")
1318	}
1319	if dataOffset%uintptr(t.key.align) != 0 {
1320		throw("need padding in bucket (key)")
1321	}
1322	if dataOffset%uintptr(t.elem.align) != 0 {
1323		throw("need padding in bucket (elem)")
1324	}
1325
1326	return makemap(t, cap, nil)
1327}
1328
1329//go:linkname reflect_mapaccess reflect.mapaccess
1330func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
1331	elem, ok := mapaccess2(t, h, key)
1332	if !ok {
1333		// reflect wants nil for a missing element
1334		elem = nil
1335	}
1336	return elem
1337}
1338
1339//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
1340func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
1341	elem, ok := mapaccess2_faststr(t, h, key)
1342	if !ok {
1343		// reflect wants nil for a missing element
1344		elem = nil
1345	}
1346	return elem
1347}
1348
1349//go:linkname reflect_mapassign reflect.mapassign
1350func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
1351	p := mapassign(t, h, key)
1352	typedmemmove(t.elem, p, elem)
1353}
1354
1355//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr
1356func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
1357	p := mapassign_faststr(t, h, key)
1358	typedmemmove(t.elem, p, elem)
1359}
1360
1361//go:linkname reflect_mapdelete reflect.mapdelete
1362func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
1363	mapdelete(t, h, key)
1364}
1365
1366//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
1367func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
1368	mapdelete_faststr(t, h, key)
1369}
1370
1371//go:linkname reflect_mapiterinit reflect.mapiterinit
1372func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
1373	mapiterinit(t, h, it)
1374}
1375
1376//go:linkname reflect_mapiternext reflect.mapiternext
1377func reflect_mapiternext(it *hiter) {
1378	mapiternext(it)
1379}
1380
1381//go:linkname reflect_mapiterkey reflect.mapiterkey
1382func reflect_mapiterkey(it *hiter) unsafe.Pointer {
1383	return it.key
1384}
1385
1386//go:linkname reflect_mapiterelem reflect.mapiterelem
1387func reflect_mapiterelem(it *hiter) unsafe.Pointer {
1388	return it.elem
1389}
1390
1391//go:linkname reflect_maplen reflect.maplen
1392func reflect_maplen(h *hmap) int {
1393	if h == nil {
1394		return 0
1395	}
1396	if raceenabled {
1397		callerpc := getcallerpc()
1398		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1399	}
1400	return h.count
1401}
1402
1403//go:linkname reflectlite_maplen internal/reflectlite.maplen
1404func reflectlite_maplen(h *hmap) int {
1405	if h == nil {
1406		return 0
1407	}
1408	if raceenabled {
1409		callerpc := getcallerpc()
1410		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1411	}
1412	return h.count
1413}
1414
1415const maxZero = 1024 // must match value in reflect/value.go:maxZero cmd/compile/internal/gc/walk.go:zeroValSize
1416var zeroVal [maxZero]byte
1417