1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7// This file contains the implementation of Go's map type.
8//
9// A map is just a hash table. The data is arranged
10// into an array of buckets. Each bucket contains up to
11// 8 key/elem pairs. The low-order bits of the hash are
12// used to select a bucket. Each bucket contains a few
13// high-order bits of each hash to distinguish the entries
14// within a single bucket.
15//
16// If more than 8 keys hash to a bucket, we chain on
17// extra buckets.
18//
19// When the hashtable grows, we allocate a new array
20// of buckets twice as big. Buckets are incrementally
21// copied from the old bucket array to the new bucket array.
22//
23// Map iterators walk through the array of buckets and
24// return the keys in walk order (bucket #, then overflow
25// chain order, then bucket index).  To maintain iteration
26// semantics, we never move keys within their bucket (if
27// we did, keys might be returned 0 or 2 times).  When
28// growing the table, iterators remain iterating through the
29// old table and must check the new table if the bucket
30// they are iterating through has been moved ("evacuated")
31// to the new table.
32
33// Picking loadFactor: too large and we have lots of overflow
34// buckets, too small and we waste a lot of space. I wrote
35// a simple program to check some stats for different loads:
36// (64-bit, 8 byte keys and elems)
37//  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
38//        4.00         2.13        20.77         3.00         4.00
39//        4.50         4.05        17.30         3.25         4.50
40//        5.00         6.85        14.77         3.50         5.00
41//        5.50        10.55        12.94         3.75         5.50
42//        6.00        15.27        11.67         4.00         6.00
43//        6.50        20.90        10.79         4.25         6.50
44//        7.00        27.14        10.15         4.50         7.00
45//        7.50        34.03         9.73         4.75         7.50
46//        8.00        41.10         9.40         5.00         8.00
47//
48// %overflow   = percentage of buckets which have an overflow bucket
49// bytes/entry = overhead bytes used per key/elem pair
50// hitprobe    = # of entries to check when looking up a present key
51// missprobe   = # of entries to check when looking up an absent key
52//
53// Keep in mind this data is for maximally loaded tables, i.e. just
54// before the table grows. Typical tables will be somewhat less loaded.
55
56import (
57	"runtime/internal/atomic"
58	"runtime/internal/math"
59	"runtime/internal/sys"
60	"unsafe"
61)
62
63// For gccgo, use go:linkname to export compiler-called functions.
64//
65//go:linkname makemap
66//go:linkname makemap64
67//go:linkname makemap_small
68//go:linkname mapaccess1
69//go:linkname mapaccess2
70//go:linkname mapaccess1_fat
71//go:linkname mapaccess2_fat
72//go:linkname mapassign
73//go:linkname mapdelete
74//go:linkname mapclear
75//go:linkname mapiterinit
76//go:linkname mapiternext
77
78const (
79	// Maximum number of key/elem pairs a bucket can hold.
80	bucketCntBits = 3
81	bucketCnt     = 1 << bucketCntBits
82
83	// Maximum average load of a bucket that triggers growth is 6.5.
84	// Represent as loadFactorNum/loadFactDen, to allow integer math.
85	loadFactorNum = 13
86	loadFactorDen = 2
87
88	// Maximum key or elem size to keep inline (instead of mallocing per element).
89	// Must fit in a uint8.
90	// Fast versions cannot handle big elems - the cutoff size for
91	// fast versions in cmd/compile/internal/gc/walk.go must be at most this elem.
92	maxKeySize  = 128
93	maxElemSize = 128
94
95	// data offset should be the size of the bmap struct, but needs to be
96	// aligned correctly. For amd64p32 this means 64-bit alignment
97	// even though pointers are 32 bit.
98	dataOffset = unsafe.Offsetof(struct {
99		b bmap
100		v int64
101	}{}.v)
102
103	// Possible tophash values. We reserve a few possibilities for special marks.
104	// Each bucket (including its overflow buckets, if any) will have either all or none of its
105	// entries in the evacuated* states (except during the evacuate() method, which only happens
106	// during map writes and thus no one else can observe the map during that time).
107	emptyRest      = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
108	emptyOne       = 1 // this cell is empty
109	evacuatedX     = 2 // key/elem is valid.  Entry has been evacuated to first half of larger table.
110	evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
111	evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
112	minTopHash     = 5 // minimum tophash for a normal filled cell.
113
114	// flags
115	iterator     = 1 // there may be an iterator using buckets
116	oldIterator  = 2 // there may be an iterator using oldbuckets
117	hashWriting  = 4 // a goroutine is writing to the map
118	sameSizeGrow = 8 // the current map growth is to a new map of the same size
119
120	// sentinel bucket ID for iterator checks
121	noCheck = 1<<(8*sys.PtrSize) - 1
122)
123
124// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
125func isEmpty(x uint8) bool {
126	return x <= emptyOne
127}
128
129// A header for a Go map.
130type hmap struct {
131	// Note: the format of the hmap is also encoded in cmd/compile/internal/gc/reflect.go.
132	// Make sure this stays in sync with the compiler's definition.
133	count     int // # live cells == size of map.  Must be first (used by len() builtin)
134	flags     uint8
135	B         uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
136	noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
137	hash0     uint32 // hash seed
138
139	buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
140	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
141	nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
142
143	extra *mapextra // optional fields
144}
145
146// mapextra holds fields that are not present on all maps.
147type mapextra struct {
148	// If both key and elem do not contain pointers and are inline, then we mark bucket
149	// type as containing no pointers. This avoids scanning such maps.
150	// However, bmap.overflow is a pointer. In order to keep overflow buckets
151	// alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
152	// overflow and oldoverflow are only used if key and elem do not contain pointers.
153	// overflow contains overflow buckets for hmap.buckets.
154	// oldoverflow contains overflow buckets for hmap.oldbuckets.
155	// The indirection allows to store a pointer to the slice in hiter.
156	overflow    *[]*bmap
157	oldoverflow *[]*bmap
158
159	// nextOverflow holds a pointer to a free overflow bucket.
160	nextOverflow *bmap
161}
162
163// A bucket for a Go map.
164type bmap struct {
165	// tophash generally contains the top byte of the hash value
166	// for each key in this bucket. If tophash[0] < minTopHash,
167	// tophash[0] is a bucket evacuation state instead.
168	tophash [bucketCnt]uint8
169	// Followed by bucketCnt keys and then bucketCnt elems.
170	// NOTE: packing all the keys together and then all the elems together makes the
171	// code a bit more complicated than alternating key/elem/key/elem/... but it allows
172	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
173	// Followed by an overflow pointer.
174}
175
176// A hash iteration structure.
177// If you modify hiter, also change cmd/compile/internal/gc/reflect.go to indicate
178// the layout of this structure.
179type hiter struct {
180	key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/internal/gc/range.go).
181	elem        unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
182	t           *maptype
183	h           *hmap
184	buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
185	bptr        *bmap          // current bucket
186	overflow    *[]*bmap       // keeps overflow buckets of hmap.buckets alive
187	oldoverflow *[]*bmap       // keeps overflow buckets of hmap.oldbuckets alive
188	startBucket uintptr        // bucket iteration started at
189	offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
190	wrapped     bool           // already wrapped around from end of bucket array to beginning
191	B           uint8
192	i           uint8
193	bucket      uintptr
194	checkBucket uintptr
195}
196
197// bucketShift returns 1<<b, optimized for code generation.
198func bucketShift(b uint8) uintptr {
199	// Masking the shift amount allows overflow checks to be elided.
200	return uintptr(1) << (b & (sys.PtrSize*8 - 1))
201}
202
203// bucketMask returns 1<<b - 1, optimized for code generation.
204func bucketMask(b uint8) uintptr {
205	return bucketShift(b) - 1
206}
207
208// tophash calculates the tophash value for hash.
209func tophash(hash uintptr) uint8 {
210	top := uint8(hash >> (sys.PtrSize*8 - 8))
211	if top < minTopHash {
212		top += minTopHash
213	}
214	return top
215}
216
217func evacuated(b *bmap) bool {
218	h := b.tophash[0]
219	return h > emptyOne && h < minTopHash
220}
221
222func (b *bmap) overflow(t *maptype) *bmap {
223	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
224}
225
226func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
227	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
228}
229
230func (b *bmap) keys() unsafe.Pointer {
231	return add(unsafe.Pointer(b), dataOffset)
232}
233
234// incrnoverflow increments h.noverflow.
235// noverflow counts the number of overflow buckets.
236// This is used to trigger same-size map growth.
237// See also tooManyOverflowBuckets.
238// To keep hmap small, noverflow is a uint16.
239// When there are few buckets, noverflow is an exact count.
240// When there are many buckets, noverflow is an approximate count.
241func (h *hmap) incrnoverflow() {
242	// We trigger same-size map growth if there are
243	// as many overflow buckets as buckets.
244	// We need to be able to count to 1<<h.B.
245	if h.B < 16 {
246		h.noverflow++
247		return
248	}
249	// Increment with probability 1/(1<<(h.B-15)).
250	// When we reach 1<<15 - 1, we will have approximately
251	// as many overflow buckets as buckets.
252	mask := uint32(1)<<(h.B-15) - 1
253	// Example: if h.B == 18, then mask == 7,
254	// and fastrand & 7 == 0 with probability 1/8.
255	if fastrand()&mask == 0 {
256		h.noverflow++
257	}
258}
259
260func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
261	var ovf *bmap
262	if h.extra != nil && h.extra.nextOverflow != nil {
263		// We have preallocated overflow buckets available.
264		// See makeBucketArray for more details.
265		ovf = h.extra.nextOverflow
266		if ovf.overflow(t) == nil {
267			// We're not at the end of the preallocated overflow buckets. Bump the pointer.
268			h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
269		} else {
270			// This is the last preallocated overflow bucket.
271			// Reset the overflow pointer on this bucket,
272			// which was set to a non-nil sentinel value.
273			ovf.setoverflow(t, nil)
274			h.extra.nextOverflow = nil
275		}
276	} else {
277		ovf = (*bmap)(newobject(t.bucket))
278	}
279	h.incrnoverflow()
280	if t.bucket.ptrdata == 0 {
281		h.createOverflow()
282		*h.extra.overflow = append(*h.extra.overflow, ovf)
283	}
284	b.setoverflow(t, ovf)
285	return ovf
286}
287
288func (h *hmap) createOverflow() {
289	if h.extra == nil {
290		h.extra = new(mapextra)
291	}
292	if h.extra.overflow == nil {
293		h.extra.overflow = new([]*bmap)
294	}
295}
296
297func makemap64(t *maptype, hint int64, h *hmap) *hmap {
298	if int64(int(hint)) != hint {
299		hint = 0
300	}
301	return makemap(t, int(hint), h)
302}
303
304// makemap_small implements Go map creation for make(map[k]v) and
305// make(map[k]v, hint) when hint is known to be at most bucketCnt
306// at compile time and the map needs to be allocated on the heap.
307func makemap_small() *hmap {
308	h := new(hmap)
309	h.hash0 = fastrand()
310	return h
311}
312
313// makemap implements Go map creation for make(map[k]v, hint).
314// If the compiler has determined that the map or the first bucket
315// can be created on the stack, h and/or bucket may be non-nil.
316// If h != nil, the map can be created directly in h.
317// If h.buckets != nil, bucket pointed to can be used as the first bucket.
318func makemap(t *maptype, hint int, h *hmap) *hmap {
319	mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.size)
320	if overflow || mem > maxAlloc {
321		hint = 0
322	}
323
324	// initialize Hmap
325	if h == nil {
326		h = new(hmap)
327	}
328	h.hash0 = fastrand()
329
330	// Find the size parameter B which will hold the requested # of elements.
331	// For hint < 0 overLoadFactor returns false since hint < bucketCnt.
332	B := uint8(0)
333	for overLoadFactor(hint, B) {
334		B++
335	}
336	h.B = B
337
338	// allocate initial hash table
339	// if B == 0, the buckets field is allocated lazily later (in mapassign)
340	// If hint is large zeroing this memory could take a while.
341	if h.B != 0 {
342		var nextOverflow *bmap
343		h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
344		if nextOverflow != nil {
345			h.extra = new(mapextra)
346			h.extra.nextOverflow = nextOverflow
347		}
348	}
349
350	return h
351}
352
353// makeBucketArray initializes a backing array for map buckets.
354// 1<<b is the minimum number of buckets to allocate.
355// dirtyalloc should either be nil or a bucket array previously
356// allocated by makeBucketArray with the same t and b parameters.
357// If dirtyalloc is nil a new backing array will be alloced and
358// otherwise dirtyalloc will be cleared and reused as backing array.
359func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
360	base := bucketShift(b)
361	nbuckets := base
362	// For small b, overflow buckets are unlikely.
363	// Avoid the overhead of the calculation.
364	if b >= 4 {
365		// Add on the estimated number of overflow buckets
366		// required to insert the median number of elements
367		// used with this value of b.
368		nbuckets += bucketShift(b - 4)
369		sz := t.bucket.size * nbuckets
370		up := roundupsize(sz)
371		if up != sz {
372			nbuckets = up / t.bucket.size
373		}
374	}
375
376	if dirtyalloc == nil {
377		buckets = newarray(t.bucket, int(nbuckets))
378	} else {
379		// dirtyalloc was previously generated by
380		// the above newarray(t.bucket, int(nbuckets))
381		// but may not be empty.
382		buckets = dirtyalloc
383		size := t.bucket.size * nbuckets
384		if t.bucket.ptrdata != 0 {
385			memclrHasPointers(buckets, size)
386		} else {
387			memclrNoHeapPointers(buckets, size)
388		}
389	}
390
391	if base != nbuckets {
392		// We preallocated some overflow buckets.
393		// To keep the overhead of tracking these overflow buckets to a minimum,
394		// we use the convention that if a preallocated overflow bucket's overflow
395		// pointer is nil, then there are more available by bumping the pointer.
396		// We need a safe non-nil pointer for the last overflow bucket; just use buckets.
397		nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
398		last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
399		last.setoverflow(t, (*bmap)(buckets))
400	}
401	return buckets, nextOverflow
402}
403
404// mapaccess1 returns a pointer to h[key].  Never returns nil, instead
405// it will return a reference to the zero object for the elem type if
406// the key is not in the map.
407// NOTE: The returned pointer may keep the whole map live, so don't
408// hold onto it for very long.
409func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
410	// Check preemption, since unlike gc we don't check on every call.
411	if getg().preempt {
412		checkPreempt()
413	}
414
415	if raceenabled && h != nil {
416		callerpc := getcallerpc()
417		pc := funcPC(mapaccess1)
418		racereadpc(unsafe.Pointer(h), callerpc, pc)
419		raceReadObjectPC(t.key, key, callerpc, pc)
420	}
421	if msanenabled && h != nil {
422		msanread(key, t.key.size)
423	}
424	if h == nil || h.count == 0 {
425		if t.hashMightPanic() {
426			t.hasher(key, 0) // see issue 23734
427		}
428		return unsafe.Pointer(&zeroVal[0])
429	}
430	if h.flags&hashWriting != 0 {
431		throw("concurrent map read and map write")
432	}
433	hash := t.hasher(key, uintptr(h.hash0))
434	m := bucketMask(h.B)
435	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
436	if c := h.oldbuckets; c != nil {
437		if !h.sameSizeGrow() {
438			// There used to be half as many buckets; mask down one more power of two.
439			m >>= 1
440		}
441		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
442		if !evacuated(oldb) {
443			b = oldb
444		}
445	}
446	top := tophash(hash)
447bucketloop:
448	for ; b != nil; b = b.overflow(t) {
449		for i := uintptr(0); i < bucketCnt; i++ {
450			if b.tophash[i] != top {
451				if b.tophash[i] == emptyRest {
452					break bucketloop
453				}
454				continue
455			}
456			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
457			if t.indirectkey() {
458				k = *((*unsafe.Pointer)(k))
459			}
460			if t.key.equal(key, k) {
461				e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
462				if t.indirectelem() {
463					e = *((*unsafe.Pointer)(e))
464				}
465				return e
466			}
467		}
468	}
469	return unsafe.Pointer(&zeroVal[0])
470}
471
472func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
473	// Check preemption, since unlike gc we don't check on every call.
474	if getg().preempt {
475		checkPreempt()
476	}
477
478	if raceenabled && h != nil {
479		callerpc := getcallerpc()
480		pc := funcPC(mapaccess2)
481		racereadpc(unsafe.Pointer(h), callerpc, pc)
482		raceReadObjectPC(t.key, key, callerpc, pc)
483	}
484	if msanenabled && h != nil {
485		msanread(key, t.key.size)
486	}
487	if h == nil || h.count == 0 {
488		if t.hashMightPanic() {
489			t.hasher(key, 0) // see issue 23734
490		}
491		return unsafe.Pointer(&zeroVal[0]), false
492	}
493	if h.flags&hashWriting != 0 {
494		throw("concurrent map read and map write")
495	}
496	hash := t.hasher(key, uintptr(h.hash0))
497	m := bucketMask(h.B)
498	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
499	if c := h.oldbuckets; c != nil {
500		if !h.sameSizeGrow() {
501			// There used to be half as many buckets; mask down one more power of two.
502			m >>= 1
503		}
504		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
505		if !evacuated(oldb) {
506			b = oldb
507		}
508	}
509	top := tophash(hash)
510bucketloop:
511	for ; b != nil; b = b.overflow(t) {
512		for i := uintptr(0); i < bucketCnt; i++ {
513			if b.tophash[i] != top {
514				if b.tophash[i] == emptyRest {
515					break bucketloop
516				}
517				continue
518			}
519			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
520			if t.indirectkey() {
521				k = *((*unsafe.Pointer)(k))
522			}
523			if t.key.equal(key, k) {
524				e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
525				if t.indirectelem() {
526					e = *((*unsafe.Pointer)(e))
527				}
528				return e, true
529			}
530		}
531	}
532	return unsafe.Pointer(&zeroVal[0]), false
533}
534
535// returns both key and elem. Used by map iterator
536func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
537	// Check preemption, since unlike gc we don't check on every call.
538	if getg().preempt {
539		checkPreempt()
540	}
541
542	if h == nil || h.count == 0 {
543		return nil, nil
544	}
545	hash := t.hasher(key, uintptr(h.hash0))
546	m := bucketMask(h.B)
547	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
548	if c := h.oldbuckets; c != nil {
549		if !h.sameSizeGrow() {
550			// There used to be half as many buckets; mask down one more power of two.
551			m >>= 1
552		}
553		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
554		if !evacuated(oldb) {
555			b = oldb
556		}
557	}
558	top := tophash(hash)
559bucketloop:
560	for ; b != nil; b = b.overflow(t) {
561		for i := uintptr(0); i < bucketCnt; i++ {
562			if b.tophash[i] != top {
563				if b.tophash[i] == emptyRest {
564					break bucketloop
565				}
566				continue
567			}
568			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
569			if t.indirectkey() {
570				k = *((*unsafe.Pointer)(k))
571			}
572			if t.key.equal(key, k) {
573				e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
574				if t.indirectelem() {
575					e = *((*unsafe.Pointer)(e))
576				}
577				return k, e
578			}
579		}
580	}
581	return nil, nil
582}
583
584func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
585	e := mapaccess1(t, h, key)
586	if e == unsafe.Pointer(&zeroVal[0]) {
587		return zero
588	}
589	return e
590}
591
592func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
593	e := mapaccess1(t, h, key)
594	if e == unsafe.Pointer(&zeroVal[0]) {
595		return zero, false
596	}
597	return e, true
598}
599
600// Like mapaccess, but allocates a slot for the key if it is not present in the map.
601func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
602	// Check preemption, since unlike gc we don't check on every call.
603	if getg().preempt {
604		checkPreempt()
605	}
606
607	if h == nil {
608		panic(plainError("assignment to entry in nil map"))
609	}
610	if raceenabled {
611		callerpc := getcallerpc()
612		pc := funcPC(mapassign)
613		racewritepc(unsafe.Pointer(h), callerpc, pc)
614		raceReadObjectPC(t.key, key, callerpc, pc)
615	}
616	if msanenabled {
617		msanread(key, t.key.size)
618	}
619	if h.flags&hashWriting != 0 {
620		throw("concurrent map writes")
621	}
622	hash := t.hasher(key, uintptr(h.hash0))
623
624	// Set hashWriting after calling t.hasher, since t.hasher may panic,
625	// in which case we have not actually done a write.
626	h.flags ^= hashWriting
627
628	if h.buckets == nil {
629		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
630	}
631
632again:
633	bucket := hash & bucketMask(h.B)
634	if h.growing() {
635		growWork(t, h, bucket)
636	}
637	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
638	top := tophash(hash)
639
640	var inserti *uint8
641	var insertk unsafe.Pointer
642	var elem unsafe.Pointer
643bucketloop:
644	for {
645		for i := uintptr(0); i < bucketCnt; i++ {
646			if b.tophash[i] != top {
647				if isEmpty(b.tophash[i]) && inserti == nil {
648					inserti = &b.tophash[i]
649					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
650					elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
651				}
652				if b.tophash[i] == emptyRest {
653					break bucketloop
654				}
655				continue
656			}
657			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
658			if t.indirectkey() {
659				k = *((*unsafe.Pointer)(k))
660			}
661			if !t.key.equal(key, k) {
662				continue
663			}
664			// already have a mapping for key. Update it.
665			if t.needkeyupdate() {
666				typedmemmove(t.key, k, key)
667			}
668			elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
669			goto done
670		}
671		ovf := b.overflow(t)
672		if ovf == nil {
673			break
674		}
675		b = ovf
676	}
677
678	// Did not find mapping for key. Allocate new cell & add entry.
679
680	// If we hit the max load factor or we have too many overflow buckets,
681	// and we're not already in the middle of growing, start growing.
682	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
683		hashGrow(t, h)
684		goto again // Growing the table invalidates everything, so try again
685	}
686
687	if inserti == nil {
688		// all current buckets are full, allocate a new one.
689		newb := h.newoverflow(t, b)
690		inserti = &newb.tophash[0]
691		insertk = add(unsafe.Pointer(newb), dataOffset)
692		elem = add(insertk, bucketCnt*uintptr(t.keysize))
693	}
694
695	// store new key/elem at insert position
696	if t.indirectkey() {
697		kmem := newobject(t.key)
698		*(*unsafe.Pointer)(insertk) = kmem
699		insertk = kmem
700	}
701	if t.indirectelem() {
702		vmem := newobject(t.elem)
703		*(*unsafe.Pointer)(elem) = vmem
704	}
705	typedmemmove(t.key, insertk, key)
706	*inserti = top
707	h.count++
708
709done:
710	if h.flags&hashWriting == 0 {
711		throw("concurrent map writes")
712	}
713	h.flags &^= hashWriting
714	if t.indirectelem() {
715		elem = *((*unsafe.Pointer)(elem))
716	}
717	return elem
718}
719
720func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
721	if raceenabled && h != nil {
722		callerpc := getcallerpc()
723		pc := funcPC(mapdelete)
724		racewritepc(unsafe.Pointer(h), callerpc, pc)
725		raceReadObjectPC(t.key, key, callerpc, pc)
726	}
727	if msanenabled && h != nil {
728		msanread(key, t.key.size)
729	}
730	if h == nil || h.count == 0 {
731		if t.hashMightPanic() {
732			t.hasher(key, 0) // see issue 23734
733		}
734		return
735	}
736	if h.flags&hashWriting != 0 {
737		throw("concurrent map writes")
738	}
739
740	hash := t.hasher(key, uintptr(h.hash0))
741
742	// Set hashWriting after calling t.hasher, since t.hasher may panic,
743	// in which case we have not actually done a write (delete).
744	h.flags ^= hashWriting
745
746	bucket := hash & bucketMask(h.B)
747	if h.growing() {
748		growWork(t, h, bucket)
749	}
750	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
751	bOrig := b
752	top := tophash(hash)
753search:
754	for ; b != nil; b = b.overflow(t) {
755		for i := uintptr(0); i < bucketCnt; i++ {
756			if b.tophash[i] != top {
757				if b.tophash[i] == emptyRest {
758					break search
759				}
760				continue
761			}
762			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
763			k2 := k
764			if t.indirectkey() {
765				k2 = *((*unsafe.Pointer)(k2))
766			}
767			if !t.key.equal(key, k2) {
768				continue
769			}
770			// Only clear key if there are pointers in it.
771			if t.indirectkey() {
772				*(*unsafe.Pointer)(k) = nil
773			} else if t.key.ptrdata != 0 {
774				memclrHasPointers(k, t.key.size)
775			}
776			e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
777			if t.indirectelem() {
778				*(*unsafe.Pointer)(e) = nil
779			} else if t.elem.ptrdata != 0 {
780				memclrHasPointers(e, t.elem.size)
781			} else {
782				memclrNoHeapPointers(e, t.elem.size)
783			}
784			b.tophash[i] = emptyOne
785			// If the bucket now ends in a bunch of emptyOne states,
786			// change those to emptyRest states.
787			// It would be nice to make this a separate function, but
788			// for loops are not currently inlineable.
789			if i == bucketCnt-1 {
790				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
791					goto notLast
792				}
793			} else {
794				if b.tophash[i+1] != emptyRest {
795					goto notLast
796				}
797			}
798			for {
799				b.tophash[i] = emptyRest
800				if i == 0 {
801					if b == bOrig {
802						break // beginning of initial bucket, we're done.
803					}
804					// Find previous bucket, continue at its last entry.
805					c := b
806					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
807					}
808					i = bucketCnt - 1
809				} else {
810					i--
811				}
812				if b.tophash[i] != emptyOne {
813					break
814				}
815			}
816		notLast:
817			h.count--
818			break search
819		}
820	}
821
822	if h.flags&hashWriting == 0 {
823		throw("concurrent map writes")
824	}
825	h.flags &^= hashWriting
826}
827
828// mapiterinit initializes the hiter struct used for ranging over maps.
829// The hiter struct pointed to by 'it' is allocated on the stack
830// by the compilers order pass or on the heap by reflect_mapiterinit.
831// Both need to have zeroed hiter since the struct contains pointers.
832// Gccgo-specific: *it need not be zeroed by the compiler,
833//  and it's cheaper to zero it here.
834func mapiterinit(t *maptype, h *hmap, it *hiter) {
835	it.key = nil
836	it.elem = nil
837	it.t = nil
838	it.h = nil
839	it.buckets = nil
840	it.bptr = nil
841	it.overflow = nil
842	it.oldoverflow = nil
843	it.startBucket = 0
844	it.offset = 0
845	it.wrapped = false
846	it.B = 0
847	it.i = 0
848	it.bucket = 0
849	it.checkBucket = 0
850
851	if raceenabled && h != nil {
852		callerpc := getcallerpc()
853		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
854	}
855
856	if h == nil || h.count == 0 {
857		return
858	}
859
860	if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
861		throw("hash_iter size incorrect") // see cmd/compile/internal/gc/reflect.go
862	}
863	it.t = t
864	it.h = h
865
866	// grab snapshot of bucket state
867	it.B = h.B
868	it.buckets = h.buckets
869	if t.bucket.ptrdata == 0 {
870		// Allocate the current slice and remember pointers to both current and old.
871		// This preserves all relevant overflow buckets alive even if
872		// the table grows and/or overflow buckets are added to the table
873		// while we are iterating.
874		h.createOverflow()
875		it.overflow = h.extra.overflow
876		it.oldoverflow = h.extra.oldoverflow
877	}
878
879	// decide where to start
880	r := uintptr(fastrand())
881	if h.B > 31-bucketCntBits {
882		r += uintptr(fastrand()) << 31
883	}
884	it.startBucket = r & bucketMask(h.B)
885	it.offset = uint8(r >> h.B & (bucketCnt - 1))
886
887	// iterator state
888	it.bucket = it.startBucket
889
890	// Remember we have an iterator.
891	// Can run concurrently with another mapiterinit().
892	if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
893		atomic.Or8(&h.flags, iterator|oldIterator)
894	}
895
896	mapiternext(it)
897}
898
899func mapiternext(it *hiter) {
900	// Check preemption, since unlike gc we don't check on every call.
901	if getg().preempt {
902		checkPreempt()
903	}
904
905	h := it.h
906	if raceenabled {
907		callerpc := getcallerpc()
908		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
909	}
910	if h.flags&hashWriting != 0 {
911		throw("concurrent map iteration and map write")
912	}
913	t := it.t
914	bucket := it.bucket
915	b := it.bptr
916	i := it.i
917	checkBucket := it.checkBucket
918
919next:
920	if b == nil {
921		if bucket == it.startBucket && it.wrapped {
922			// end of iteration
923			it.key = nil
924			it.elem = nil
925			return
926		}
927		if h.growing() && it.B == h.B {
928			// Iterator was started in the middle of a grow, and the grow isn't done yet.
929			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
930			// bucket hasn't been evacuated) then we need to iterate through the old
931			// bucket and only return the ones that will be migrated to this bucket.
932			oldbucket := bucket & it.h.oldbucketmask()
933			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
934			if !evacuated(b) {
935				checkBucket = bucket
936			} else {
937				b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
938				checkBucket = noCheck
939			}
940		} else {
941			b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
942			checkBucket = noCheck
943		}
944		bucket++
945		if bucket == bucketShift(it.B) {
946			bucket = 0
947			it.wrapped = true
948		}
949		i = 0
950	}
951	for ; i < bucketCnt; i++ {
952		offi := (i + it.offset) & (bucketCnt - 1)
953		if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
954			// TODO: emptyRest is hard to use here, as we start iterating
955			// in the middle of a bucket. It's feasible, just tricky.
956			continue
957		}
958		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
959		if t.indirectkey() {
960			k = *((*unsafe.Pointer)(k))
961		}
962		e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.elemsize))
963		if checkBucket != noCheck && !h.sameSizeGrow() {
964			// Special case: iterator was started during a grow to a larger size
965			// and the grow is not done yet. We're working on a bucket whose
966			// oldbucket has not been evacuated yet. Or at least, it wasn't
967			// evacuated when we started the bucket. So we're iterating
968			// through the oldbucket, skipping any keys that will go
969			// to the other new bucket (each oldbucket expands to two
970			// buckets during a grow).
971			if t.reflexivekey() || t.key.equal(k, k) {
972				// If the item in the oldbucket is not destined for
973				// the current new bucket in the iteration, skip it.
974				hash := t.hasher(k, uintptr(h.hash0))
975				if hash&bucketMask(it.B) != checkBucket {
976					continue
977				}
978			} else {
979				// Hash isn't repeatable if k != k (NaNs).  We need a
980				// repeatable and randomish choice of which direction
981				// to send NaNs during evacuation. We'll use the low
982				// bit of tophash to decide which way NaNs go.
983				// NOTE: this case is why we need two evacuate tophash
984				// values, evacuatedX and evacuatedY, that differ in
985				// their low bit.
986				if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
987					continue
988				}
989			}
990		}
991		if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
992			!(t.reflexivekey() || t.key.equal(k, k)) {
993			// This is the golden data, we can return it.
994			// OR
995			// key!=key, so the entry can't be deleted or updated, so we can just return it.
996			// That's lucky for us because when key!=key we can't look it up successfully.
997			it.key = k
998			if t.indirectelem() {
999				e = *((*unsafe.Pointer)(e))
1000			}
1001			it.elem = e
1002		} else {
1003			// The hash table has grown since the iterator was started.
1004			// The golden data for this key is now somewhere else.
1005			// Check the current hash table for the data.
1006			// This code handles the case where the key
1007			// has been deleted, updated, or deleted and reinserted.
1008			// NOTE: we need to regrab the key as it has potentially been
1009			// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
1010			rk, re := mapaccessK(t, h, k)
1011			if rk == nil {
1012				continue // key has been deleted
1013			}
1014			it.key = rk
1015			it.elem = re
1016		}
1017		it.bucket = bucket
1018		if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
1019			it.bptr = b
1020		}
1021		it.i = i + 1
1022		it.checkBucket = checkBucket
1023		return
1024	}
1025	b = b.overflow(t)
1026	i = 0
1027	goto next
1028}
1029
1030// mapclear deletes all keys from a map.
1031func mapclear(t *maptype, h *hmap) {
1032	if raceenabled && h != nil {
1033		callerpc := getcallerpc()
1034		pc := funcPC(mapclear)
1035		racewritepc(unsafe.Pointer(h), callerpc, pc)
1036	}
1037
1038	if h == nil || h.count == 0 {
1039		return
1040	}
1041
1042	if h.flags&hashWriting != 0 {
1043		throw("concurrent map writes")
1044	}
1045
1046	h.flags ^= hashWriting
1047
1048	h.flags &^= sameSizeGrow
1049	h.oldbuckets = nil
1050	h.nevacuate = 0
1051	h.noverflow = 0
1052	h.count = 0
1053
1054	// Keep the mapextra allocation but clear any extra information.
1055	if h.extra != nil {
1056		*h.extra = mapextra{}
1057	}
1058
1059	// makeBucketArray clears the memory pointed to by h.buckets
1060	// and recovers any overflow buckets by generating them
1061	// as if h.buckets was newly alloced.
1062	_, nextOverflow := makeBucketArray(t, h.B, h.buckets)
1063	if nextOverflow != nil {
1064		// If overflow buckets are created then h.extra
1065		// will have been allocated during initial bucket creation.
1066		h.extra.nextOverflow = nextOverflow
1067	}
1068
1069	if h.flags&hashWriting == 0 {
1070		throw("concurrent map writes")
1071	}
1072	h.flags &^= hashWriting
1073}
1074
1075func hashGrow(t *maptype, h *hmap) {
1076	// If we've hit the load factor, get bigger.
1077	// Otherwise, there are too many overflow buckets,
1078	// so keep the same number of buckets and "grow" laterally.
1079	bigger := uint8(1)
1080	if !overLoadFactor(h.count+1, h.B) {
1081		bigger = 0
1082		h.flags |= sameSizeGrow
1083	}
1084	oldbuckets := h.buckets
1085	newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
1086
1087	flags := h.flags &^ (iterator | oldIterator)
1088	if h.flags&iterator != 0 {
1089		flags |= oldIterator
1090	}
1091	// commit the grow (atomic wrt gc)
1092	h.B += bigger
1093	h.flags = flags
1094	h.oldbuckets = oldbuckets
1095	h.buckets = newbuckets
1096	h.nevacuate = 0
1097	h.noverflow = 0
1098
1099	if h.extra != nil && h.extra.overflow != nil {
1100		// Promote current overflow buckets to the old generation.
1101		if h.extra.oldoverflow != nil {
1102			throw("oldoverflow is not nil")
1103		}
1104		h.extra.oldoverflow = h.extra.overflow
1105		h.extra.overflow = nil
1106	}
1107	if nextOverflow != nil {
1108		if h.extra == nil {
1109			h.extra = new(mapextra)
1110		}
1111		h.extra.nextOverflow = nextOverflow
1112	}
1113
1114	// the actual copying of the hash table data is done incrementally
1115	// by growWork() and evacuate().
1116}
1117
1118// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
1119func overLoadFactor(count int, B uint8) bool {
1120	return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
1121}
1122
1123// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
1124// Note that most of these overflow buckets must be in sparse use;
1125// if use was dense, then we'd have already triggered regular map growth.
1126func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
1127	// If the threshold is too low, we do extraneous work.
1128	// If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
1129	// "too many" means (approximately) as many overflow buckets as regular buckets.
1130	// See incrnoverflow for more details.
1131	if B > 15 {
1132		B = 15
1133	}
1134	// The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
1135	return noverflow >= uint16(1)<<(B&15)
1136}
1137
1138// growing reports whether h is growing. The growth may be to the same size or bigger.
1139func (h *hmap) growing() bool {
1140	return h.oldbuckets != nil
1141}
1142
1143// sameSizeGrow reports whether the current growth is to a map of the same size.
1144func (h *hmap) sameSizeGrow() bool {
1145	return h.flags&sameSizeGrow != 0
1146}
1147
1148// noldbuckets calculates the number of buckets prior to the current map growth.
1149func (h *hmap) noldbuckets() uintptr {
1150	oldB := h.B
1151	if !h.sameSizeGrow() {
1152		oldB--
1153	}
1154	return bucketShift(oldB)
1155}
1156
1157// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
1158func (h *hmap) oldbucketmask() uintptr {
1159	return h.noldbuckets() - 1
1160}
1161
1162func growWork(t *maptype, h *hmap, bucket uintptr) {
1163	// make sure we evacuate the oldbucket corresponding
1164	// to the bucket we're about to use
1165	evacuate(t, h, bucket&h.oldbucketmask())
1166
1167	// evacuate one more oldbucket to make progress on growing
1168	if h.growing() {
1169		evacuate(t, h, h.nevacuate)
1170	}
1171}
1172
1173func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
1174	b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize)))
1175	return evacuated(b)
1176}
1177
1178// evacDst is an evacuation destination.
1179type evacDst struct {
1180	b *bmap          // current destination bucket
1181	i int            // key/elem index into b
1182	k unsafe.Pointer // pointer to current key storage
1183	e unsafe.Pointer // pointer to current elem storage
1184}
1185
1186func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
1187	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
1188	newbit := h.noldbuckets()
1189	if !evacuated(b) {
1190		// TODO: reuse overflow buckets instead of using new ones, if there
1191		// is no iterator using the old buckets.  (If !oldIterator.)
1192
1193		// xy contains the x and y (low and high) evacuation destinations.
1194		var xy [2]evacDst
1195		x := &xy[0]
1196		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
1197		x.k = add(unsafe.Pointer(x.b), dataOffset)
1198		x.e = add(x.k, bucketCnt*uintptr(t.keysize))
1199
1200		if !h.sameSizeGrow() {
1201			// Only calculate y pointers if we're growing bigger.
1202			// Otherwise GC can see bad pointers.
1203			y := &xy[1]
1204			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
1205			y.k = add(unsafe.Pointer(y.b), dataOffset)
1206			y.e = add(y.k, bucketCnt*uintptr(t.keysize))
1207		}
1208
1209		for ; b != nil; b = b.overflow(t) {
1210			k := add(unsafe.Pointer(b), dataOffset)
1211			e := add(k, bucketCnt*uintptr(t.keysize))
1212			for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.keysize)), add(e, uintptr(t.elemsize)) {
1213				top := b.tophash[i]
1214				if isEmpty(top) {
1215					b.tophash[i] = evacuatedEmpty
1216					continue
1217				}
1218				if top < minTopHash {
1219					throw("bad map state")
1220				}
1221				k2 := k
1222				if t.indirectkey() {
1223					k2 = *((*unsafe.Pointer)(k2))
1224				}
1225				var useY uint8
1226				if !h.sameSizeGrow() {
1227					// Compute hash to make our evacuation decision (whether we need
1228					// to send this key/elem to bucket x or bucket y).
1229					hash := t.hasher(k2, uintptr(h.hash0))
1230					if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) {
1231						// If key != key (NaNs), then the hash could be (and probably
1232						// will be) entirely different from the old hash. Moreover,
1233						// it isn't reproducible. Reproducibility is required in the
1234						// presence of iterators, as our evacuation decision must
1235						// match whatever decision the iterator made.
1236						// Fortunately, we have the freedom to send these keys either
1237						// way. Also, tophash is meaningless for these kinds of keys.
1238						// We let the low bit of tophash drive the evacuation decision.
1239						// We recompute a new random tophash for the next level so
1240						// these keys will get evenly distributed across all buckets
1241						// after multiple grows.
1242						useY = top & 1
1243						top = tophash(hash)
1244					} else {
1245						if hash&newbit != 0 {
1246							useY = 1
1247						}
1248					}
1249				}
1250
1251				if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
1252					throw("bad evacuatedN")
1253				}
1254
1255				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
1256				dst := &xy[useY]                 // evacuation destination
1257
1258				if dst.i == bucketCnt {
1259					dst.b = h.newoverflow(t, dst.b)
1260					dst.i = 0
1261					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1262					dst.e = add(dst.k, bucketCnt*uintptr(t.keysize))
1263				}
1264				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
1265				if t.indirectkey() {
1266					*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
1267				} else {
1268					typedmemmove(t.key, dst.k, k) // copy elem
1269				}
1270				if t.indirectelem() {
1271					*(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
1272				} else {
1273					typedmemmove(t.elem, dst.e, e)
1274				}
1275				dst.i++
1276				// These updates might push these pointers past the end of the
1277				// key or elem arrays.  That's ok, as we have the overflow pointer
1278				// at the end of the bucket to protect against pointing past the
1279				// end of the bucket.
1280				dst.k = add(dst.k, uintptr(t.keysize))
1281				dst.e = add(dst.e, uintptr(t.elemsize))
1282			}
1283		}
1284		// Unlink the overflow buckets & clear key/elem to help GC.
1285		if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
1286			b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
1287			// Preserve b.tophash because the evacuation
1288			// state is maintained there.
1289			ptr := add(b, dataOffset)
1290			n := uintptr(t.bucketsize) - dataOffset
1291			memclrHasPointers(ptr, n)
1292		}
1293	}
1294
1295	if oldbucket == h.nevacuate {
1296		advanceEvacuationMark(h, t, newbit)
1297	}
1298}
1299
1300func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
1301	h.nevacuate++
1302	// Experiments suggest that 1024 is overkill by at least an order of magnitude.
1303	// Put it in there as a safeguard anyway, to ensure O(1) behavior.
1304	stop := h.nevacuate + 1024
1305	if stop > newbit {
1306		stop = newbit
1307	}
1308	for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
1309		h.nevacuate++
1310	}
1311	if h.nevacuate == newbit { // newbit == # of oldbuckets
1312		// Growing is all done. Free old main bucket array.
1313		h.oldbuckets = nil
1314		// Can discard old overflow buckets as well.
1315		// If they are still referenced by an iterator,
1316		// then the iterator holds a pointers to the slice.
1317		if h.extra != nil {
1318			h.extra.oldoverflow = nil
1319		}
1320		h.flags &^= sameSizeGrow
1321	}
1322}
1323
1324// Reflect stubs. Called from ../reflect/asm_*.s
1325
1326//go:linkname reflect_makemap reflect.makemap
1327func reflect_makemap(t *maptype, cap int) *hmap {
1328	// Check invariants and reflects math.
1329	if t.key.equal == nil {
1330		throw("runtime.reflect_makemap: unsupported map key type")
1331	}
1332	if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
1333		t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) {
1334		throw("key size wrong")
1335	}
1336	if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(sys.PtrSize)) ||
1337		t.elem.size <= maxElemSize && (t.indirectelem() || t.elemsize != uint8(t.elem.size)) {
1338		throw("elem size wrong")
1339	}
1340	if t.key.align > bucketCnt {
1341		throw("key align too big")
1342	}
1343	if t.elem.align > bucketCnt {
1344		throw("elem align too big")
1345	}
1346	if t.key.size%uintptr(t.key.align) != 0 {
1347		throw("key size not a multiple of key align")
1348	}
1349	if t.elem.size%uintptr(t.elem.align) != 0 {
1350		throw("elem size not a multiple of elem align")
1351	}
1352	if bucketCnt < 8 {
1353		throw("bucketsize too small for proper alignment")
1354	}
1355	if dataOffset%uintptr(t.key.align) != 0 {
1356		throw("need padding in bucket (key)")
1357	}
1358	if dataOffset%uintptr(t.elem.align) != 0 {
1359		throw("need padding in bucket (elem)")
1360	}
1361
1362	return makemap(t, cap, nil)
1363}
1364
1365//go:linkname reflect_mapaccess reflect.mapaccess
1366func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
1367	elem, ok := mapaccess2(t, h, key)
1368	if !ok {
1369		// reflect wants nil for a missing element
1370		elem = nil
1371	}
1372	return elem
1373}
1374
1375//go:linkname reflect_mapassign reflect.mapassign
1376func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
1377	p := mapassign(t, h, key)
1378	typedmemmove(t.elem, p, elem)
1379}
1380
1381//go:linkname reflect_mapdelete reflect.mapdelete
1382func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
1383	mapdelete(t, h, key)
1384}
1385
1386//go:linkname reflect_mapiterinit reflect.mapiterinit
1387func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
1388	it := new(hiter)
1389	mapiterinit(t, h, it)
1390	return it
1391}
1392
1393//go:linkname reflect_mapiternext reflect.mapiternext
1394func reflect_mapiternext(it *hiter) {
1395	mapiternext(it)
1396}
1397
1398//go:linkname reflect_mapiterkey reflect.mapiterkey
1399func reflect_mapiterkey(it *hiter) unsafe.Pointer {
1400	return it.key
1401}
1402
1403//go:linkname reflect_mapiterelem reflect.mapiterelem
1404func reflect_mapiterelem(it *hiter) unsafe.Pointer {
1405	return it.elem
1406}
1407
1408//go:linkname reflect_maplen reflect.maplen
1409func reflect_maplen(h *hmap) int {
1410	if h == nil {
1411		return 0
1412	}
1413	if raceenabled {
1414		callerpc := getcallerpc()
1415		racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
1416	}
1417	return h.count
1418}
1419
1420//go:linkname reflectlite_maplen internal..z2freflectlite.maplen
1421func reflectlite_maplen(h *hmap) int {
1422	if h == nil {
1423		return 0
1424	}
1425	if raceenabled {
1426		callerpc := getcallerpc()
1427		racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
1428	}
1429	return h.count
1430}
1431
1432const maxZero = 1024 // must match value in cmd/compile/internal/gc/walk.go:zeroValSize
1433var zeroVal [maxZero]byte
1434