1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Garbage collector: type and heap bitmaps.
6//
7// Stack, data, and bss bitmaps
8//
9// Stack frames and global variables in the data and bss sections are described
10// by 1-bit bitmaps in which 0 means uninteresting and 1 means live pointer
11// to be visited during GC. The bits in each byte are consumed starting with
12// the low bit: 1<<0, 1<<1, and so on.
13//
14// Heap bitmap
15//
16// The allocated heap comes from a subset of the memory in the range [start, used),
17// where start == mheap_.arena_start and used == mheap_.arena_used.
18// The heap bitmap comprises 2 bits for each pointer-sized word in that range,
19// stored in bytes indexed backward in memory from start.
20// That is, the byte at address start-1 holds the 2-bit entries for the four words
21// start through start+3*ptrSize, the byte at start-2 holds the entries for
22// start+4*ptrSize through start+7*ptrSize, and so on.
23//
24// In each 2-bit entry, the lower bit holds the same information as in the 1-bit
25// bitmaps: 0 means uninteresting and 1 means live pointer to be visited during GC.
26// The meaning of the high bit depends on the position of the word being described
27// in its allocated object. In all words *except* the second word, the
28// high bit indicates that the object is still being described. In
29// these words, if a bit pair with a high bit 0 is encountered, the
30// low bit can also be assumed to be 0, and the object description is
31// over. This 00 is called the ``dead'' encoding: it signals that the
32// rest of the words in the object are uninteresting to the garbage
33// collector.
34//
35// In the second word, the high bit is the GC ``checkmarked'' bit (see below).
36//
37// The 2-bit entries are split when written into the byte, so that the top half
38// of the byte contains 4 high bits and the bottom half contains 4 low (pointer)
39// bits.
40// This form allows a copy from the 1-bit to the 4-bit form to keep the
41// pointer bits contiguous, instead of having to space them out.
42//
43// The code makes use of the fact that the zero value for a heap bitmap
44// has no live pointer bit set and is (depending on position), not used,
45// not checkmarked, and is the dead encoding.
46// These properties must be preserved when modifying the encoding.
47//
48// The bitmap for noscan spans is not maintained. Code must ensure
49// that an object is scannable before consulting its bitmap by
50// checking either the noscan bit in the span or by consulting its
51// type's information.
52//
53// Checkmarks
54//
55// In a concurrent garbage collector, one worries about failing to mark
56// a live object due to mutations without write barriers or bugs in the
57// collector implementation. As a sanity check, the GC has a 'checkmark'
58// mode that retraverses the object graph with the world stopped, to make
59// sure that everything that should be marked is marked.
60// In checkmark mode, in the heap bitmap, the high bit of the 2-bit entry
61// for the second word of the object holds the checkmark bit.
62// When not in checkmark mode, this bit is set to 1.
63//
64// The smallest possible allocation is 8 bytes. On a 32-bit machine, that
65// means every allocated object has two words, so there is room for the
66// checkmark bit. On a 64-bit machine, however, the 8-byte allocation is
67// just one word, so the second bit pair is not available for encoding the
68// checkmark. However, because non-pointer allocations are combined
69// into larger 16-byte (maxTinySize) allocations, a plain 8-byte allocation
70// must be a pointer, so the type bit in the first word is not actually needed.
71// It is still used in general, except in checkmark the type bit is repurposed
72// as the checkmark bit and then reinitialized (to 1) as the type bit when
73// finished.
74//
75
76package runtime
77
78import (
79	"runtime/internal/atomic"
80	"runtime/internal/sys"
81	"unsafe"
82)
83
84const (
85	bitPointer = 1 << 0
86	bitScan    = 1 << 4
87
88	heapBitsShift   = 1                     // shift offset between successive bitPointer or bitScan entries
89	heapBitmapScale = sys.PtrSize * (8 / 2) // number of data bytes described by one heap bitmap byte
90
91	// all scan/pointer bits in a byte
92	bitScanAll    = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
93	bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
94)
95
96// addb returns the byte pointer p+n.
97//go:nowritebarrier
98//go:nosplit
99func addb(p *byte, n uintptr) *byte {
100	// Note: wrote out full expression instead of calling add(p, n)
101	// to reduce the number of temporaries generated by the
102	// compiler for this trivial expression during inlining.
103	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
104}
105
106// subtractb returns the byte pointer p-n.
107// subtractb is typically used when traversing the pointer tables referred to by hbits
108// which are arranged in reverse order.
109//go:nowritebarrier
110//go:nosplit
111func subtractb(p *byte, n uintptr) *byte {
112	// Note: wrote out full expression instead of calling add(p, -n)
113	// to reduce the number of temporaries generated by the
114	// compiler for this trivial expression during inlining.
115	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
116}
117
118// add1 returns the byte pointer p+1.
119//go:nowritebarrier
120//go:nosplit
121func add1(p *byte) *byte {
122	// Note: wrote out full expression instead of calling addb(p, 1)
123	// to reduce the number of temporaries generated by the
124	// compiler for this trivial expression during inlining.
125	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
126}
127
128// subtract1 returns the byte pointer p-1.
129// subtract1 is typically used when traversing the pointer tables referred to by hbits
130// which are arranged in reverse order.
131//go:nowritebarrier
132//
133// nosplit because it is used during write barriers and must not be preempted.
134//go:nosplit
135func subtract1(p *byte) *byte {
136	// Note: wrote out full expression instead of calling subtractb(p, 1)
137	// to reduce the number of temporaries generated by the
138	// compiler for this trivial expression during inlining.
139	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
140}
141
142// mapBits maps any additional bitmap memory needed for the new arena memory.
143//
144// Don't call this directly. Call mheap.setArenaUsed.
145//
146//go:nowritebarrier
147func (h *mheap) mapBits(arena_used uintptr) {
148	// Caller has added extra mappings to the arena.
149	// Add extra mappings of bitmap words as needed.
150	// We allocate extra bitmap pieces in chunks of bitmapChunk.
151	const bitmapChunk = 8192
152
153	n := (arena_used - mheap_.arena_start) / heapBitmapScale
154	n = round(n, bitmapChunk)
155	n = round(n, physPageSize)
156	if h.bitmap_mapped >= n {
157		return
158	}
159
160	sysMap(unsafe.Pointer(h.bitmap-n), n-h.bitmap_mapped, h.arena_reserved, &memstats.gc_sys)
161	h.bitmap_mapped = n
162}
163
164// heapBits provides access to the bitmap bits for a single heap word.
165// The methods on heapBits take value receivers so that the compiler
166// can more easily inline calls to those methods and registerize the
167// struct fields independently.
168type heapBits struct {
169	bitp  *uint8
170	shift uint32
171}
172
173// markBits provides access to the mark bit for an object in the heap.
174// bytep points to the byte holding the mark bit.
175// mask is a byte with a single bit set that can be &ed with *bytep
176// to see if the bit has been set.
177// *m.byte&m.mask != 0 indicates the mark bit is set.
178// index can be used along with span information to generate
179// the address of the object in the heap.
180// We maintain one set of mark bits for allocation and one for
181// marking purposes.
182type markBits struct {
183	bytep *uint8
184	mask  uint8
185	index uintptr
186}
187
188//go:nosplit
189func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
190	bytep, mask := s.allocBits.bitp(allocBitIndex)
191	return markBits{bytep, mask, allocBitIndex}
192}
193
194// refillaCache takes 8 bytes s.allocBits starting at whichByte
195// and negates them so that ctz (count trailing zeros) instructions
196// can be used. It then places these 8 bytes into the cached 64 bit
197// s.allocCache.
198func (s *mspan) refillAllocCache(whichByte uintptr) {
199	bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
200	aCache := uint64(0)
201	aCache |= uint64(bytes[0])
202	aCache |= uint64(bytes[1]) << (1 * 8)
203	aCache |= uint64(bytes[2]) << (2 * 8)
204	aCache |= uint64(bytes[3]) << (3 * 8)
205	aCache |= uint64(bytes[4]) << (4 * 8)
206	aCache |= uint64(bytes[5]) << (5 * 8)
207	aCache |= uint64(bytes[6]) << (6 * 8)
208	aCache |= uint64(bytes[7]) << (7 * 8)
209	s.allocCache = ^aCache
210}
211
212// nextFreeIndex returns the index of the next free object in s at
213// or after s.freeindex.
214// There are hardware instructions that can be used to make this
215// faster if profiling warrants it.
216func (s *mspan) nextFreeIndex() uintptr {
217	sfreeindex := s.freeindex
218	snelems := s.nelems
219	if sfreeindex == snelems {
220		return sfreeindex
221	}
222	if sfreeindex > snelems {
223		throw("s.freeindex > s.nelems")
224	}
225
226	aCache := s.allocCache
227
228	bitIndex := sys.Ctz64(aCache)
229	for bitIndex == 64 {
230		// Move index to start of next cached bits.
231		sfreeindex = (sfreeindex + 64) &^ (64 - 1)
232		if sfreeindex >= snelems {
233			s.freeindex = snelems
234			return snelems
235		}
236		whichByte := sfreeindex / 8
237		// Refill s.allocCache with the next 64 alloc bits.
238		s.refillAllocCache(whichByte)
239		aCache = s.allocCache
240		bitIndex = sys.Ctz64(aCache)
241		// nothing available in cached bits
242		// grab the next 8 bytes and try again.
243	}
244	result := sfreeindex + uintptr(bitIndex)
245	if result >= snelems {
246		s.freeindex = snelems
247		return snelems
248	}
249
250	s.allocCache >>= uint(bitIndex + 1)
251	sfreeindex = result + 1
252
253	if sfreeindex%64 == 0 && sfreeindex != snelems {
254		// We just incremented s.freeindex so it isn't 0.
255		// As each 1 in s.allocCache was encountered and used for allocation
256		// it was shifted away. At this point s.allocCache contains all 0s.
257		// Refill s.allocCache so that it corresponds
258		// to the bits at s.allocBits starting at s.freeindex.
259		whichByte := sfreeindex / 8
260		s.refillAllocCache(whichByte)
261	}
262	s.freeindex = sfreeindex
263	return result
264}
265
266// isFree returns whether the index'th object in s is unallocated.
267func (s *mspan) isFree(index uintptr) bool {
268	if index < s.freeindex {
269		return false
270	}
271	bytep, mask := s.allocBits.bitp(index)
272	return *bytep&mask == 0
273}
274
275func (s *mspan) objIndex(p uintptr) uintptr {
276	byteOffset := p - s.base()
277	if byteOffset == 0 {
278		return 0
279	}
280	if s.baseMask != 0 {
281		// s.baseMask is 0, elemsize is a power of two, so shift by s.divShift
282		return byteOffset >> s.divShift
283	}
284	return uintptr(((uint64(byteOffset) >> s.divShift) * uint64(s.divMul)) >> s.divShift2)
285}
286
287func markBitsForAddr(p uintptr) markBits {
288	s := spanOf(p)
289	objIndex := s.objIndex(p)
290	return s.markBitsForIndex(objIndex)
291}
292
293func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
294	bytep, mask := s.gcmarkBits.bitp(objIndex)
295	return markBits{bytep, mask, objIndex}
296}
297
298func (s *mspan) markBitsForBase() markBits {
299	return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
300}
301
302// isMarked reports whether mark bit m is set.
303func (m markBits) isMarked() bool {
304	return *m.bytep&m.mask != 0
305}
306
307// setMarked sets the marked bit in the markbits, atomically. Some compilers
308// are not able to inline atomic.Or8 function so if it appears as a hot spot consider
309// inlining it manually.
310func (m markBits) setMarked() {
311	// Might be racing with other updates, so use atomic update always.
312	// We used to be clever here and use a non-atomic update in certain
313	// cases, but it's not worth the risk.
314	atomic.Or8(m.bytep, m.mask)
315}
316
317// setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
318func (m markBits) setMarkedNonAtomic() {
319	*m.bytep |= m.mask
320}
321
322// clearMarked clears the marked bit in the markbits, atomically.
323func (m markBits) clearMarked() {
324	// Might be racing with other updates, so use atomic update always.
325	// We used to be clever here and use a non-atomic update in certain
326	// cases, but it's not worth the risk.
327	atomic.And8(m.bytep, ^m.mask)
328}
329
330// markBitsForSpan returns the markBits for the span base address base.
331func markBitsForSpan(base uintptr) (mbits markBits) {
332	if base < mheap_.arena_start || base >= mheap_.arena_used {
333		throw("markBitsForSpan: base out of range")
334	}
335	mbits = markBitsForAddr(base)
336	if mbits.mask != 1 {
337		throw("markBitsForSpan: unaligned start")
338	}
339	return mbits
340}
341
342// advance advances the markBits to the next object in the span.
343func (m *markBits) advance() {
344	if m.mask == 1<<7 {
345		m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
346		m.mask = 1
347	} else {
348		m.mask = m.mask << 1
349	}
350	m.index++
351}
352
353// heapBitsForAddr returns the heapBits for the address addr.
354// The caller must have already checked that addr is in the range [mheap_.arena_start, mheap_.arena_used).
355//
356// nosplit because it is used during write barriers and must not be preempted.
357//go:nosplit
358func heapBitsForAddr(addr uintptr) heapBits {
359	// 2 bits per work, 4 pairs per byte, and a mask is hard coded.
360	off := (addr - mheap_.arena_start) / sys.PtrSize
361	return heapBits{(*uint8)(unsafe.Pointer(mheap_.bitmap - off/4 - 1)), uint32(off & 3)}
362}
363
364// heapBitsForSpan returns the heapBits for the span base address base.
365func heapBitsForSpan(base uintptr) (hbits heapBits) {
366	if base < mheap_.arena_start || base >= mheap_.arena_used {
367		print("runtime: base ", hex(base), " not in range [", hex(mheap_.arena_start), ",", hex(mheap_.arena_used), ")\n")
368		throw("heapBitsForSpan: base out of range")
369	}
370	return heapBitsForAddr(base)
371}
372
373// heapBitsForObject returns the base address for the heap object
374// containing the address p, the heapBits for base,
375// the object's span, and of the index of the object in s.
376// If p does not point into a heap object,
377// return base == 0
378// otherwise return the base of the object.
379//
380// refBase and refOff optionally give the base address of the object
381// in which the pointer p was found and the byte offset at which it
382// was found. These are used for error reporting.
383func heapBitsForObject(p, refBase, refOff uintptr) (base uintptr, hbits heapBits, s *mspan, objIndex uintptr) {
384	arenaStart := mheap_.arena_start
385	if p < arenaStart || p >= mheap_.arena_used {
386		return
387	}
388	off := p - arenaStart
389	idx := off >> _PageShift
390	// p points into the heap, but possibly to the middle of an object.
391	// Consult the span table to find the block beginning.
392	s = mheap_.spans[idx]
393	if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
394		if s == nil || s.state == _MSpanManual {
395			// If s is nil, the virtual address has never been part of the heap.
396			// This pointer may be to some mmap'd region, so we allow it.
397			// Pointers into stacks are also ok, the runtime manages these explicitly.
398			return
399		}
400
401		// The following ensures that we are rigorous about what data
402		// structures hold valid pointers.
403		if debug.invalidptr != 0 {
404			// Typically this indicates an incorrect use
405			// of unsafe or cgo to store a bad pointer in
406			// the Go heap. It may also indicate a runtime
407			// bug.
408			//
409			// TODO(austin): We could be more aggressive
410			// and detect pointers to unallocated objects
411			// in allocated spans.
412			printlock()
413			print("runtime: pointer ", hex(p))
414			if s.state != mSpanInUse {
415				print(" to unallocated span")
416			} else {
417				print(" to unused region of span")
418			}
419			print(" idx=", hex(idx), " span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
420			if refBase != 0 {
421				print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
422				gcDumpObject("object", refBase, refOff)
423			}
424			getg().m.traceback = 2
425			throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
426		}
427		return
428	}
429	// If this span holds object of a power of 2 size, just mask off the bits to
430	// the interior of the object. Otherwise use the size to get the base.
431	if s.baseMask != 0 {
432		// optimize for power of 2 sized objects.
433		base = s.base()
434		base = base + (p-base)&uintptr(s.baseMask)
435		objIndex = (base - s.base()) >> s.divShift
436		// base = p & s.baseMask is faster for small spans,
437		// but doesn't work for large spans.
438		// Overall, it's faster to use the more general computation above.
439	} else {
440		base = s.base()
441		if p-base >= s.elemsize {
442			// n := (p - base) / s.elemsize, using division by multiplication
443			objIndex = uintptr(p-base) >> s.divShift * uintptr(s.divMul) >> s.divShift2
444			base += objIndex * s.elemsize
445		}
446	}
447	// Now that we know the actual base, compute heapBits to return to caller.
448	hbits = heapBitsForAddr(base)
449	return
450}
451
452// prefetch the bits.
453func (h heapBits) prefetch() {
454	prefetchnta(uintptr(unsafe.Pointer((h.bitp))))
455}
456
457// next returns the heapBits describing the next pointer-sized word in memory.
458// That is, if h describes address p, h.next() describes p+ptrSize.
459// Note that next does not modify h. The caller must record the result.
460//
461// nosplit because it is used during write barriers and must not be preempted.
462//go:nosplit
463func (h heapBits) next() heapBits {
464	if h.shift < 3*heapBitsShift {
465		return heapBits{h.bitp, h.shift + heapBitsShift}
466	}
467	return heapBits{subtract1(h.bitp), 0}
468}
469
470// forward returns the heapBits describing n pointer-sized words ahead of h in memory.
471// That is, if h describes address p, h.forward(n) describes p+n*ptrSize.
472// h.forward(1) is equivalent to h.next(), just slower.
473// Note that forward does not modify h. The caller must record the result.
474// bits returns the heap bits for the current word.
475func (h heapBits) forward(n uintptr) heapBits {
476	n += uintptr(h.shift) / heapBitsShift
477	return heapBits{subtractb(h.bitp, n/4), uint32(n%4) * heapBitsShift}
478}
479
480// The caller can test morePointers and isPointer by &-ing with bitScan and bitPointer.
481// The result includes in its higher bits the bits for subsequent words
482// described by the same bitmap byte.
483func (h heapBits) bits() uint32 {
484	// The (shift & 31) eliminates a test and conditional branch
485	// from the generated code.
486	return uint32(*h.bitp) >> (h.shift & 31)
487}
488
489// morePointers returns true if this word and all remaining words in this object
490// are scalars.
491// h must not describe the second word of the object.
492func (h heapBits) morePointers() bool {
493	return h.bits()&bitScan != 0
494}
495
496// isPointer reports whether the heap bits describe a pointer word.
497//
498// nosplit because it is used during write barriers and must not be preempted.
499//go:nosplit
500func (h heapBits) isPointer() bool {
501	return h.bits()&bitPointer != 0
502}
503
504// isCheckmarked reports whether the heap bits have the checkmarked bit set.
505// It must be told how large the object at h is, because the encoding of the
506// checkmark bit varies by size.
507// h must describe the initial word of the object.
508func (h heapBits) isCheckmarked(size uintptr) bool {
509	if size == sys.PtrSize {
510		return (*h.bitp>>h.shift)&bitPointer != 0
511	}
512	// All multiword objects are 2-word aligned,
513	// so we know that the initial word's 2-bit pair
514	// and the second word's 2-bit pair are in the
515	// same heap bitmap byte, *h.bitp.
516	return (*h.bitp>>(heapBitsShift+h.shift))&bitScan != 0
517}
518
519// setCheckmarked sets the checkmarked bit.
520// It must be told how large the object at h is, because the encoding of the
521// checkmark bit varies by size.
522// h must describe the initial word of the object.
523func (h heapBits) setCheckmarked(size uintptr) {
524	if size == sys.PtrSize {
525		atomic.Or8(h.bitp, bitPointer<<h.shift)
526		return
527	}
528	atomic.Or8(h.bitp, bitScan<<(heapBitsShift+h.shift))
529}
530
531// bulkBarrierPreWrite executes writebarrierptr_prewrite1
532// for every pointer slot in the memory range [src, src+size),
533// using pointer/scalar information from [dst, dst+size).
534// This executes the write barriers necessary before a memmove.
535// src, dst, and size must be pointer-aligned.
536// The range [dst, dst+size) must lie within a single object.
537//
538// As a special case, src == 0 indicates that this is being used for a
539// memclr. bulkBarrierPreWrite will pass 0 for the src of each write
540// barrier.
541//
542// Callers should call bulkBarrierPreWrite immediately before
543// calling memmove(dst, src, size). This function is marked nosplit
544// to avoid being preempted; the GC must not stop the goroutine
545// between the memmove and the execution of the barriers.
546// The caller is also responsible for cgo pointer checks if this
547// may be writing Go pointers into non-Go memory.
548//
549// The pointer bitmap is not maintained for allocations containing
550// no pointers at all; any caller of bulkBarrierPreWrite must first
551// make sure the underlying allocation contains pointers, usually
552// by checking typ.kind&kindNoPointers.
553//
554//go:nosplit
555func bulkBarrierPreWrite(dst, src, size uintptr) {
556	if (dst|src|size)&(sys.PtrSize-1) != 0 {
557		throw("bulkBarrierPreWrite: unaligned arguments")
558	}
559	if !writeBarrier.needed {
560		return
561	}
562	if !inheap(dst) {
563		gp := getg().m.curg
564		if gp != nil && gp.stack.lo <= dst && dst < gp.stack.hi {
565			// Destination is our own stack. No need for barriers.
566			return
567		}
568
569		// If dst is a global, use the data or BSS bitmaps to
570		// execute write barriers.
571		for _, datap := range activeModules() {
572			if datap.data <= dst && dst < datap.edata {
573				bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
574				return
575			}
576		}
577		for _, datap := range activeModules() {
578			if datap.bss <= dst && dst < datap.ebss {
579				bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
580				return
581			}
582		}
583		return
584	}
585
586	h := heapBitsForAddr(dst)
587	if src == 0 {
588		for i := uintptr(0); i < size; i += sys.PtrSize {
589			if h.isPointer() {
590				dstx := (*uintptr)(unsafe.Pointer(dst + i))
591				writebarrierptr_prewrite1(dstx, 0)
592			}
593			h = h.next()
594		}
595	} else {
596		for i := uintptr(0); i < size; i += sys.PtrSize {
597			if h.isPointer() {
598				dstx := (*uintptr)(unsafe.Pointer(dst + i))
599				srcx := (*uintptr)(unsafe.Pointer(src + i))
600				writebarrierptr_prewrite1(dstx, *srcx)
601			}
602			h = h.next()
603		}
604	}
605}
606
607// bulkBarrierBitmap executes write barriers for copying from [src,
608// src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
609// assumed to start maskOffset bytes into the data covered by the
610// bitmap in bits (which may not be a multiple of 8).
611//
612// This is used by bulkBarrierPreWrite for writes to data and BSS.
613//
614//go:nosplit
615func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
616	word := maskOffset / sys.PtrSize
617	bits = addb(bits, word/8)
618	mask := uint8(1) << (word % 8)
619
620	for i := uintptr(0); i < size; i += sys.PtrSize {
621		if mask == 0 {
622			bits = addb(bits, 1)
623			if *bits == 0 {
624				// Skip 8 words.
625				i += 7 * sys.PtrSize
626				continue
627			}
628			mask = 1
629		}
630		if *bits&mask != 0 {
631			dstx := (*uintptr)(unsafe.Pointer(dst + i))
632			if src == 0 {
633				writebarrierptr_prewrite1(dstx, 0)
634			} else {
635				srcx := (*uintptr)(unsafe.Pointer(src + i))
636				writebarrierptr_prewrite1(dstx, *srcx)
637			}
638		}
639		mask <<= 1
640	}
641}
642
643// typeBitsBulkBarrier executes writebarrierptr_prewrite for every
644// pointer that would be copied from [src, src+size) to [dst,
645// dst+size) by a memmove using the type bitmap to locate those
646// pointer slots.
647//
648// The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
649// dst, src, and size must be pointer-aligned.
650// The type typ must have a plain bitmap, not a GC program.
651// The only use of this function is in channel sends, and the
652// 64 kB channel element limit takes care of this for us.
653//
654// Must not be preempted because it typically runs right before memmove,
655// and the GC must observe them as an atomic action.
656//
657//go:nosplit
658func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
659	if typ == nil {
660		throw("runtime: typeBitsBulkBarrier without type")
661	}
662	if typ.size != size {
663		println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
664		throw("runtime: invalid typeBitsBulkBarrier")
665	}
666	if typ.kind&kindGCProg != 0 {
667		println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
668		throw("runtime: invalid typeBitsBulkBarrier")
669	}
670	if !writeBarrier.needed {
671		return
672	}
673	ptrmask := typ.gcdata
674	var bits uint32
675	for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
676		if i&(sys.PtrSize*8-1) == 0 {
677			bits = uint32(*ptrmask)
678			ptrmask = addb(ptrmask, 1)
679		} else {
680			bits = bits >> 1
681		}
682		if bits&1 != 0 {
683			dstx := (*uintptr)(unsafe.Pointer(dst + i))
684			srcx := (*uintptr)(unsafe.Pointer(src + i))
685			writebarrierptr_prewrite(dstx, *srcx)
686		}
687	}
688}
689
690// The methods operating on spans all require that h has been returned
691// by heapBitsForSpan and that size, n, total are the span layout description
692// returned by the mspan's layout method.
693// If total > size*n, it means that there is extra leftover memory in the span,
694// usually due to rounding.
695//
696// TODO(rsc): Perhaps introduce a different heapBitsSpan type.
697
698// initSpan initializes the heap bitmap for a span.
699// It clears all checkmark bits.
700// If this is a span of pointer-sized objects, it initializes all
701// words to pointer/scan.
702// Otherwise, it initializes all words to scalar/dead.
703func (h heapBits) initSpan(s *mspan) {
704	size, n, total := s.layout()
705
706	// Init the markbit structures
707	s.freeindex = 0
708	s.allocCache = ^uint64(0) // all 1s indicating all free.
709	s.nelems = n
710	s.allocBits = nil
711	s.gcmarkBits = nil
712	s.gcmarkBits = newMarkBits(s.nelems)
713	s.allocBits = newAllocBits(s.nelems)
714
715	// Clear bits corresponding to objects.
716	if total%heapBitmapScale != 0 {
717		throw("initSpan: unaligned length")
718	}
719	nbyte := total / heapBitmapScale
720	if sys.PtrSize == 8 && size == sys.PtrSize {
721		end := h.bitp
722		bitp := subtractb(end, nbyte-1)
723		for {
724			*bitp = bitPointerAll | bitScanAll
725			if bitp == end {
726				break
727			}
728			bitp = add1(bitp)
729		}
730		return
731	}
732	memclrNoHeapPointers(unsafe.Pointer(subtractb(h.bitp, nbyte-1)), nbyte)
733}
734
735// initCheckmarkSpan initializes a span for being checkmarked.
736// It clears the checkmark bits, which are set to 1 in normal operation.
737func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
738	// The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
739	if sys.PtrSize == 8 && size == sys.PtrSize {
740		// Checkmark bit is type bit, bottom bit of every 2-bit entry.
741		// Only possible on 64-bit system, since minimum size is 8.
742		// Must clear type bit (checkmark bit) of every word.
743		// The type bit is the lower of every two-bit pair.
744		bitp := h.bitp
745		for i := uintptr(0); i < n; i += 4 {
746			*bitp &^= bitPointerAll
747			bitp = subtract1(bitp)
748		}
749		return
750	}
751	for i := uintptr(0); i < n; i++ {
752		*h.bitp &^= bitScan << (heapBitsShift + h.shift)
753		h = h.forward(size / sys.PtrSize)
754	}
755}
756
757// clearCheckmarkSpan undoes all the checkmarking in a span.
758// The actual checkmark bits are ignored, so the only work to do
759// is to fix the pointer bits. (Pointer bits are ignored by scanobject
760// but consulted by typedmemmove.)
761func (h heapBits) clearCheckmarkSpan(size, n, total uintptr) {
762	// The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
763	if sys.PtrSize == 8 && size == sys.PtrSize {
764		// Checkmark bit is type bit, bottom bit of every 2-bit entry.
765		// Only possible on 64-bit system, since minimum size is 8.
766		// Must clear type bit (checkmark bit) of every word.
767		// The type bit is the lower of every two-bit pair.
768		bitp := h.bitp
769		for i := uintptr(0); i < n; i += 4 {
770			*bitp |= bitPointerAll
771			bitp = subtract1(bitp)
772		}
773	}
774}
775
776// oneBitCount is indexed by byte and produces the
777// number of 1 bits in that byte. For example 128 has 1 bit set
778// and oneBitCount[128] will holds 1.
779var oneBitCount = [256]uint8{
780	0, 1, 1, 2, 1, 2, 2, 3,
781	1, 2, 2, 3, 2, 3, 3, 4,
782	1, 2, 2, 3, 2, 3, 3, 4,
783	2, 3, 3, 4, 3, 4, 4, 5,
784	1, 2, 2, 3, 2, 3, 3, 4,
785	2, 3, 3, 4, 3, 4, 4, 5,
786	2, 3, 3, 4, 3, 4, 4, 5,
787	3, 4, 4, 5, 4, 5, 5, 6,
788	1, 2, 2, 3, 2, 3, 3, 4,
789	2, 3, 3, 4, 3, 4, 4, 5,
790	2, 3, 3, 4, 3, 4, 4, 5,
791	3, 4, 4, 5, 4, 5, 5, 6,
792	2, 3, 3, 4, 3, 4, 4, 5,
793	3, 4, 4, 5, 4, 5, 5, 6,
794	3, 4, 4, 5, 4, 5, 5, 6,
795	4, 5, 5, 6, 5, 6, 6, 7,
796	1, 2, 2, 3, 2, 3, 3, 4,
797	2, 3, 3, 4, 3, 4, 4, 5,
798	2, 3, 3, 4, 3, 4, 4, 5,
799	3, 4, 4, 5, 4, 5, 5, 6,
800	2, 3, 3, 4, 3, 4, 4, 5,
801	3, 4, 4, 5, 4, 5, 5, 6,
802	3, 4, 4, 5, 4, 5, 5, 6,
803	4, 5, 5, 6, 5, 6, 6, 7,
804	2, 3, 3, 4, 3, 4, 4, 5,
805	3, 4, 4, 5, 4, 5, 5, 6,
806	3, 4, 4, 5, 4, 5, 5, 6,
807	4, 5, 5, 6, 5, 6, 6, 7,
808	3, 4, 4, 5, 4, 5, 5, 6,
809	4, 5, 5, 6, 5, 6, 6, 7,
810	4, 5, 5, 6, 5, 6, 6, 7,
811	5, 6, 6, 7, 6, 7, 7, 8}
812
813// countAlloc returns the number of objects allocated in span s by
814// scanning the allocation bitmap.
815// TODO:(rlh) Use popcount intrinsic.
816func (s *mspan) countAlloc() int {
817	count := 0
818	maxIndex := s.nelems / 8
819	for i := uintptr(0); i < maxIndex; i++ {
820		mrkBits := *s.gcmarkBits.bytep(i)
821		count += int(oneBitCount[mrkBits])
822	}
823	if bitsInLastByte := s.nelems % 8; bitsInLastByte != 0 {
824		mrkBits := *s.gcmarkBits.bytep(maxIndex)
825		mask := uint8((1 << bitsInLastByte) - 1)
826		bits := mrkBits & mask
827		count += int(oneBitCount[bits])
828	}
829	return count
830}
831
832// heapBitsSetType records that the new allocation [x, x+size)
833// holds in [x, x+dataSize) one or more values of type typ.
834// (The number of values is given by dataSize / typ.size.)
835// If dataSize < size, the fragment [x+dataSize, x+size) is
836// recorded as non-pointer data.
837// It is known that the type has pointers somewhere;
838// malloc does not call heapBitsSetType when there are no pointers,
839// because all free objects are marked as noscan during
840// heapBitsSweepSpan.
841//
842// There can only be one allocation from a given span active at a time,
843// and the bitmap for a span always falls on byte boundaries,
844// so there are no write-write races for access to the heap bitmap.
845// Hence, heapBitsSetType can access the bitmap without atomics.
846//
847// There can be read-write races between heapBitsSetType and things
848// that read the heap bitmap like scanobject. However, since
849// heapBitsSetType is only used for objects that have not yet been
850// made reachable, readers will ignore bits being modified by this
851// function. This does mean this function cannot transiently modify
852// bits that belong to neighboring objects. Also, on weakly-ordered
853// machines, callers must execute a store/store (publication) barrier
854// between calling this function and making the object reachable.
855func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
856	const doubleCheck = false // slow but helpful; enable to test modifications to this code
857
858	// dataSize is always size rounded up to the next malloc size class,
859	// except in the case of allocating a defer block, in which case
860	// size is sizeof(_defer{}) (at least 6 words) and dataSize may be
861	// arbitrarily larger.
862	//
863	// The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore
864	// assume that dataSize == size without checking it explicitly.
865
866	if sys.PtrSize == 8 && size == sys.PtrSize {
867		// It's one word and it has pointers, it must be a pointer.
868		// Since all allocated one-word objects are pointers
869		// (non-pointers are aggregated into tinySize allocations),
870		// initSpan sets the pointer bits for us. Nothing to do here.
871		if doubleCheck {
872			h := heapBitsForAddr(x)
873			if !h.isPointer() {
874				throw("heapBitsSetType: pointer bit missing")
875			}
876			if !h.morePointers() {
877				throw("heapBitsSetType: scan bit missing")
878			}
879		}
880		return
881	}
882
883	h := heapBitsForAddr(x)
884	ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below)
885
886	// Heap bitmap bits for 2-word object are only 4 bits,
887	// so also shared with objects next to it.
888	// This is called out as a special case primarily for 32-bit systems,
889	// so that on 32-bit systems the code below can assume all objects
890	// are 4-word aligned (because they're all 16-byte aligned).
891	if size == 2*sys.PtrSize {
892		if typ.size == sys.PtrSize {
893			// We're allocating a block big enough to hold two pointers.
894			// On 64-bit, that means the actual object must be two pointers,
895			// or else we'd have used the one-pointer-sized block.
896			// On 32-bit, however, this is the 8-byte block, the smallest one.
897			// So it could be that we're allocating one pointer and this was
898			// just the smallest block available. Distinguish by checking dataSize.
899			// (In general the number of instances of typ being allocated is
900			// dataSize/typ.size.)
901			if sys.PtrSize == 4 && dataSize == sys.PtrSize {
902				// 1 pointer object. On 32-bit machines clear the bit for the
903				// unused second word.
904				*h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
905				*h.bitp |= (bitPointer | bitScan) << h.shift
906			} else {
907				// 2-element slice of pointer.
908				*h.bitp |= (bitPointer | bitScan | bitPointer<<heapBitsShift) << h.shift
909			}
910			return
911		}
912		// Otherwise typ.size must be 2*sys.PtrSize,
913		// and typ.kind&kindGCProg == 0.
914		if doubleCheck {
915			if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
916				print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
917				throw("heapBitsSetType")
918			}
919		}
920		b := uint32(*ptrmask)
921		hb := (b & 3) | bitScan
922		// bitPointer == 1, bitScan is 1 << 4, heapBitsShift is 1.
923		// 110011 is shifted h.shift and complemented.
924		// This clears out the bits that are about to be
925		// ored into *h.hbitp in the next instructions.
926		*h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
927		*h.bitp |= uint8(hb << h.shift)
928		return
929	}
930
931	// Copy from 1-bit ptrmask into 2-bit bitmap.
932	// The basic approach is to use a single uintptr as a bit buffer,
933	// alternating between reloading the buffer and writing bitmap bytes.
934	// In general, one load can supply two bitmap byte writes.
935	// This is a lot of lines of code, but it compiles into relatively few
936	// machine instructions.
937
938	var (
939		// Ptrmask input.
940		p     *byte   // last ptrmask byte read
941		b     uintptr // ptrmask bits already loaded
942		nb    uintptr // number of bits in b at next read
943		endp  *byte   // final ptrmask byte to read (then repeat)
944		endnb uintptr // number of valid bits in *endp
945		pbits uintptr // alternate source of bits
946
947		// Heap bitmap output.
948		w     uintptr // words processed
949		nw    uintptr // number of words to process
950		hbitp *byte   // next heap bitmap byte to write
951		hb    uintptr // bits being prepared for *hbitp
952	)
953
954	hbitp = h.bitp
955
956	// Handle GC program. Delayed until this part of the code
957	// so that we can use the same double-checking mechanism
958	// as the 1-bit case. Nothing above could have encountered
959	// GC programs: the cases were all too small.
960	if typ.kind&kindGCProg != 0 {
961		heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
962		if doubleCheck {
963			// Double-check the heap bits written by GC program
964			// by running the GC program to create a 1-bit pointer mask
965			// and then jumping to the double-check code below.
966			// This doesn't catch bugs shared between the 1-bit and 4-bit
967			// GC program execution, but it does catch mistakes specific
968			// to just one of those and bugs in heapBitsSetTypeGCProg's
969			// implementation of arrays.
970			lock(&debugPtrmask.lock)
971			if debugPtrmask.data == nil {
972				debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
973			}
974			ptrmask = debugPtrmask.data
975			runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
976			goto Phase4
977		}
978		return
979	}
980
981	// Note about sizes:
982	//
983	// typ.size is the number of words in the object,
984	// and typ.ptrdata is the number of words in the prefix
985	// of the object that contains pointers. That is, the final
986	// typ.size - typ.ptrdata words contain no pointers.
987	// This allows optimization of a common pattern where
988	// an object has a small header followed by a large scalar
989	// buffer. If we know the pointers are over, we don't have
990	// to scan the buffer's heap bitmap at all.
991	// The 1-bit ptrmasks are sized to contain only bits for
992	// the typ.ptrdata prefix, zero padded out to a full byte
993	// of bitmap. This code sets nw (below) so that heap bitmap
994	// bits are only written for the typ.ptrdata prefix; if there is
995	// more room in the allocated object, the next heap bitmap
996	// entry is a 00, indicating that there are no more pointers
997	// to scan. So only the ptrmask for the ptrdata bytes is needed.
998	//
999	// Replicated copies are not as nice: if there is an array of
1000	// objects with scalar tails, all but the last tail does have to
1001	// be initialized, because there is no way to say "skip forward".
1002	// However, because of the possibility of a repeated type with
1003	// size not a multiple of 4 pointers (one heap bitmap byte),
1004	// the code already must handle the last ptrmask byte specially
1005	// by treating it as containing only the bits for endnb pointers,
1006	// where endnb <= 4. We represent large scalar tails that must
1007	// be expanded in the replication by setting endnb larger than 4.
1008	// This will have the effect of reading many bits out of b,
1009	// but once the real bits are shifted out, b will supply as many
1010	// zero bits as we try to read, which is exactly what we need.
1011
1012	p = ptrmask
1013	if typ.size < dataSize {
1014		// Filling in bits for an array of typ.
1015		// Set up for repetition of ptrmask during main loop.
1016		// Note that ptrmask describes only a prefix of
1017		const maxBits = sys.PtrSize*8 - 7
1018		if typ.ptrdata/sys.PtrSize <= maxBits {
1019			// Entire ptrmask fits in uintptr with room for a byte fragment.
1020			// Load into pbits and never read from ptrmask again.
1021			// This is especially important when the ptrmask has
1022			// fewer than 8 bits in it; otherwise the reload in the middle
1023			// of the Phase 2 loop would itself need to loop to gather
1024			// at least 8 bits.
1025
1026			// Accumulate ptrmask into b.
1027			// ptrmask is sized to describe only typ.ptrdata, but we record
1028			// it as describing typ.size bytes, since all the high bits are zero.
1029			nb = typ.ptrdata / sys.PtrSize
1030			for i := uintptr(0); i < nb; i += 8 {
1031				b |= uintptr(*p) << i
1032				p = add1(p)
1033			}
1034			nb = typ.size / sys.PtrSize
1035
1036			// Replicate ptrmask to fill entire pbits uintptr.
1037			// Doubling and truncating is fewer steps than
1038			// iterating by nb each time. (nb could be 1.)
1039			// Since we loaded typ.ptrdata/sys.PtrSize bits
1040			// but are pretending to have typ.size/sys.PtrSize,
1041			// there might be no replication necessary/possible.
1042			pbits = b
1043			endnb = nb
1044			if nb+nb <= maxBits {
1045				for endnb <= sys.PtrSize*8 {
1046					pbits |= pbits << endnb
1047					endnb += endnb
1048				}
1049				// Truncate to a multiple of original ptrmask.
1050				// Because nb+nb <= maxBits, nb fits in a byte.
1051				// Byte division is cheaper than uintptr division.
1052				endnb = uintptr(maxBits/byte(nb)) * nb
1053				pbits &= 1<<endnb - 1
1054				b = pbits
1055				nb = endnb
1056			}
1057
1058			// Clear p and endp as sentinel for using pbits.
1059			// Checked during Phase 2 loop.
1060			p = nil
1061			endp = nil
1062		} else {
1063			// Ptrmask is larger. Read it multiple times.
1064			n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
1065			endp = addb(ptrmask, n)
1066			endnb = typ.size/sys.PtrSize - n*8
1067		}
1068	}
1069	if p != nil {
1070		b = uintptr(*p)
1071		p = add1(p)
1072		nb = 8
1073	}
1074
1075	if typ.size == dataSize {
1076		// Single entry: can stop once we reach the non-pointer data.
1077		nw = typ.ptrdata / sys.PtrSize
1078	} else {
1079		// Repeated instances of typ in an array.
1080		// Have to process first N-1 entries in full, but can stop
1081		// once we reach the non-pointer data in the final entry.
1082		nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
1083	}
1084	if nw == 0 {
1085		// No pointers! Caller was supposed to check.
1086		println("runtime: invalid type ", typ.string())
1087		throw("heapBitsSetType: called with non-pointer type")
1088		return
1089	}
1090	if nw < 2 {
1091		// Must write at least 2 words, because the "no scan"
1092		// encoding doesn't take effect until the third word.
1093		nw = 2
1094	}
1095
1096	// Phase 1: Special case for leading byte (shift==0) or half-byte (shift==4).
1097	// The leading byte is special because it contains the bits for word 1,
1098	// which does not have the scan bit set.
1099	// The leading half-byte is special because it's a half a byte,
1100	// so we have to be careful with the bits already there.
1101	switch {
1102	default:
1103		throw("heapBitsSetType: unexpected shift")
1104
1105	case h.shift == 0:
1106		// Ptrmask and heap bitmap are aligned.
1107		// Handle first byte of bitmap specially.
1108		//
1109		// The first byte we write out covers the first four
1110		// words of the object. The scan/dead bit on the first
1111		// word must be set to scan since there are pointers
1112		// somewhere in the object. The scan/dead bit on the
1113		// second word is the checkmark, so we don't set it.
1114		// In all following words, we set the scan/dead
1115		// appropriately to indicate that the object contains
1116		// to the next 2-bit entry in the bitmap.
1117		//
1118		// TODO: It doesn't matter if we set the checkmark, so
1119		// maybe this case isn't needed any more.
1120		hb = b & bitPointerAll
1121		hb |= bitScan | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
1122		if w += 4; w >= nw {
1123			goto Phase3
1124		}
1125		*hbitp = uint8(hb)
1126		hbitp = subtract1(hbitp)
1127		b >>= 4
1128		nb -= 4
1129
1130	case sys.PtrSize == 8 && h.shift == 2:
1131		// Ptrmask and heap bitmap are misaligned.
1132		// The bits for the first two words are in a byte shared
1133		// with another object, so we must be careful with the bits
1134		// already there.
1135		// We took care of 1-word and 2-word objects above,
1136		// so this is at least a 6-word object.
1137		hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift)
1138		// This is not noscan, so set the scan bit in the
1139		// first word.
1140		hb |= bitScan << (2 * heapBitsShift)
1141		b >>= 2
1142		nb -= 2
1143		// Note: no bitScan for second word because that's
1144		// the checkmark.
1145		*hbitp &^= uint8((bitPointer | bitScan | (bitPointer << heapBitsShift)) << (2 * heapBitsShift))
1146		*hbitp |= uint8(hb)
1147		hbitp = subtract1(hbitp)
1148		if w += 2; w >= nw {
1149			// We know that there is more data, because we handled 2-word objects above.
1150			// This must be at least a 6-word object. If we're out of pointer words,
1151			// mark no scan in next bitmap byte and finish.
1152			hb = 0
1153			w += 4
1154			goto Phase3
1155		}
1156	}
1157
1158	// Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap.
1159	// The loop computes the bits for that last write but does not execute the write;
1160	// it leaves the bits in hb for processing by phase 3.
1161	// To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to
1162	// use in the first half of the loop right now, and then we only adjust nb explicitly
1163	// if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop.
1164	nb -= 4
1165	for {
1166		// Emit bitmap byte.
1167		// b has at least nb+4 bits, with one exception:
1168		// if w+4 >= nw, then b has only nw-w bits,
1169		// but we'll stop at the break and then truncate
1170		// appropriately in Phase 3.
1171		hb = b & bitPointerAll
1172		hb |= bitScanAll
1173		if w += 4; w >= nw {
1174			break
1175		}
1176		*hbitp = uint8(hb)
1177		hbitp = subtract1(hbitp)
1178		b >>= 4
1179
1180		// Load more bits. b has nb right now.
1181		if p != endp {
1182			// Fast path: keep reading from ptrmask.
1183			// nb unmodified: we just loaded 8 bits,
1184			// and the next iteration will consume 8 bits,
1185			// leaving us with the same nb the next time we're here.
1186			if nb < 8 {
1187				b |= uintptr(*p) << nb
1188				p = add1(p)
1189			} else {
1190				// Reduce the number of bits in b.
1191				// This is important if we skipped
1192				// over a scalar tail, since nb could
1193				// be larger than the bit width of b.
1194				nb -= 8
1195			}
1196		} else if p == nil {
1197			// Almost as fast path: track bit count and refill from pbits.
1198			// For short repetitions.
1199			if nb < 8 {
1200				b |= pbits << nb
1201				nb += endnb
1202			}
1203			nb -= 8 // for next iteration
1204		} else {
1205			// Slow path: reached end of ptrmask.
1206			// Process final partial byte and rewind to start.
1207			b |= uintptr(*p) << nb
1208			nb += endnb
1209			if nb < 8 {
1210				b |= uintptr(*ptrmask) << nb
1211				p = add1(ptrmask)
1212			} else {
1213				nb -= 8
1214				p = ptrmask
1215			}
1216		}
1217
1218		// Emit bitmap byte.
1219		hb = b & bitPointerAll
1220		hb |= bitScanAll
1221		if w += 4; w >= nw {
1222			break
1223		}
1224		*hbitp = uint8(hb)
1225		hbitp = subtract1(hbitp)
1226		b >>= 4
1227	}
1228
1229Phase3:
1230	// Phase 3: Write last byte or partial byte and zero the rest of the bitmap entries.
1231	if w > nw {
1232		// Counting the 4 entries in hb not yet written to memory,
1233		// there are more entries than possible pointer slots.
1234		// Discard the excess entries (can't be more than 3).
1235		mask := uintptr(1)<<(4-(w-nw)) - 1
1236		hb &= mask | mask<<4 // apply mask to both pointer bits and scan bits
1237	}
1238
1239	// Change nw from counting possibly-pointer words to total words in allocation.
1240	nw = size / sys.PtrSize
1241
1242	// Write whole bitmap bytes.
1243	// The first is hb, the rest are zero.
1244	if w <= nw {
1245		*hbitp = uint8(hb)
1246		hbitp = subtract1(hbitp)
1247		hb = 0 // for possible final half-byte below
1248		for w += 4; w <= nw; w += 4 {
1249			*hbitp = 0
1250			hbitp = subtract1(hbitp)
1251		}
1252	}
1253
1254	// Write final partial bitmap byte if any.
1255	// We know w > nw, or else we'd still be in the loop above.
1256	// It can be bigger only due to the 4 entries in hb that it counts.
1257	// If w == nw+4 then there's nothing left to do: we wrote all nw entries
1258	// and can discard the 4 sitting in hb.
1259	// But if w == nw+2, we need to write first two in hb.
1260	// The byte is shared with the next object, so be careful with
1261	// existing bits.
1262	if w == nw+2 {
1263		*hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb)
1264	}
1265
1266Phase4:
1267	// Phase 4: all done, but perhaps double check.
1268	if doubleCheck {
1269		end := heapBitsForAddr(x + size)
1270		if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) {
1271			println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size)
1272			print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1273			print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1274			h0 := heapBitsForAddr(x)
1275			print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1276			print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n")
1277			throw("bad heapBitsSetType")
1278		}
1279
1280		// Double-check that bits to be written were written correctly.
1281		// Does not check that other bits were not written, unfortunately.
1282		h := heapBitsForAddr(x)
1283		nptr := typ.ptrdata / sys.PtrSize
1284		ndata := typ.size / sys.PtrSize
1285		count := dataSize / typ.size
1286		totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
1287		for i := uintptr(0); i < size/sys.PtrSize; i++ {
1288			j := i % ndata
1289			var have, want uint8
1290			have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
1291			if i >= totalptr {
1292				want = 0 // deadmarker
1293				if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 {
1294					want = bitScan
1295				}
1296			} else {
1297				if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 {
1298					want |= bitPointer
1299				}
1300				if i != 1 {
1301					want |= bitScan
1302				} else {
1303					have &^= bitScan
1304				}
1305			}
1306			if have != want {
1307				println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size)
1308				print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1309				print("kindGCProg=", typ.kind&kindGCProg != 0, "\n")
1310				print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1311				h0 := heapBitsForAddr(x)
1312				print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1313				print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
1314				print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
1315				println("at word", i, "offset", i*sys.PtrSize, "have", have, "want", want)
1316				if typ.kind&kindGCProg != 0 {
1317					println("GC program:")
1318					dumpGCProg(addb(typ.gcdata, 4))
1319				}
1320				throw("bad heapBitsSetType")
1321			}
1322			h = h.next()
1323		}
1324		if ptrmask == debugPtrmask.data {
1325			unlock(&debugPtrmask.lock)
1326		}
1327	}
1328}
1329
1330var debugPtrmask struct {
1331	lock mutex
1332	data *byte
1333}
1334
1335// heapBitsSetTypeGCProg implements heapBitsSetType using a GC program.
1336// progSize is the size of the memory described by the program.
1337// elemSize is the size of the element that the GC program describes (a prefix of).
1338// dataSize is the total size of the intended data, a multiple of elemSize.
1339// allocSize is the total size of the allocated memory.
1340//
1341// GC programs are only used for large allocations.
1342// heapBitsSetType requires that allocSize is a multiple of 4 words,
1343// so that the relevant bitmap bytes are not shared with surrounding
1344// objects.
1345func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
1346	if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
1347		// Alignment will be wrong.
1348		throw("heapBitsSetTypeGCProg: small allocation")
1349	}
1350	var totalBits uintptr
1351	if elemSize == dataSize {
1352		totalBits = runGCProg(prog, nil, h.bitp, 2)
1353		if totalBits*sys.PtrSize != progSize {
1354			println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
1355			throw("heapBitsSetTypeGCProg: unexpected bit count")
1356		}
1357	} else {
1358		count := dataSize / elemSize
1359
1360		// Piece together program trailer to run after prog that does:
1361		//	literal(0)
1362		//	repeat(1, elemSize-progSize-1) // zeros to fill element size
1363		//	repeat(elemSize, count-1) // repeat that element for count
1364		// This zero-pads the data remaining in the first element and then
1365		// repeats that first element to fill the array.
1366		var trailer [40]byte // 3 varints (max 10 each) + some bytes
1367		i := 0
1368		if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
1369			// literal(0)
1370			trailer[i] = 0x01
1371			i++
1372			trailer[i] = 0
1373			i++
1374			if n > 1 {
1375				// repeat(1, n-1)
1376				trailer[i] = 0x81
1377				i++
1378				n--
1379				for ; n >= 0x80; n >>= 7 {
1380					trailer[i] = byte(n | 0x80)
1381					i++
1382				}
1383				trailer[i] = byte(n)
1384				i++
1385			}
1386		}
1387		// repeat(elemSize/ptrSize, count-1)
1388		trailer[i] = 0x80
1389		i++
1390		n := elemSize / sys.PtrSize
1391		for ; n >= 0x80; n >>= 7 {
1392			trailer[i] = byte(n | 0x80)
1393			i++
1394		}
1395		trailer[i] = byte(n)
1396		i++
1397		n = count - 1
1398		for ; n >= 0x80; n >>= 7 {
1399			trailer[i] = byte(n | 0x80)
1400			i++
1401		}
1402		trailer[i] = byte(n)
1403		i++
1404		trailer[i] = 0
1405		i++
1406
1407		runGCProg(prog, &trailer[0], h.bitp, 2)
1408
1409		// Even though we filled in the full array just now,
1410		// record that we only filled in up to the ptrdata of the
1411		// last element. This will cause the code below to
1412		// memclr the dead section of the final array element,
1413		// so that scanobject can stop early in the final element.
1414		totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
1415	}
1416	endProg := unsafe.Pointer(subtractb(h.bitp, (totalBits+3)/4))
1417	endAlloc := unsafe.Pointer(subtractb(h.bitp, allocSize/heapBitmapScale))
1418	memclrNoHeapPointers(add(endAlloc, 1), uintptr(endProg)-uintptr(endAlloc))
1419}
1420
1421// progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
1422// size the size of the region described by prog, in bytes.
1423// The resulting bitvector will have no more than size/sys.PtrSize bits.
1424func progToPointerMask(prog *byte, size uintptr) bitvector {
1425	n := (size/sys.PtrSize + 7) / 8
1426	x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1427	x[len(x)-1] = 0xa1 // overflow check sentinel
1428	n = runGCProg(prog, nil, &x[0], 1)
1429	if x[len(x)-1] != 0xa1 {
1430		throw("progToPointerMask: overflow")
1431	}
1432	return bitvector{int32(n), &x[0]}
1433}
1434
1435// Packed GC pointer bitmaps, aka GC programs.
1436//
1437// For large types containing arrays, the type information has a
1438// natural repetition that can be encoded to save space in the
1439// binary and in the memory representation of the type information.
1440//
1441// The encoding is a simple Lempel-Ziv style bytecode machine
1442// with the following instructions:
1443//
1444//	00000000: stop
1445//	0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
1446//	10000000 n c: repeat the previous n bits c times; n, c are varints
1447//	1nnnnnnn c: repeat the previous n bits c times; c is a varint
1448
1449// runGCProg executes the GC program prog, and then trailer if non-nil,
1450// writing to dst with entries of the given size.
1451// If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst.
1452// If size == 2, dst is the 2-bit heap bitmap, and writes move backward
1453// starting at dst (because the heap bitmap does). In this case, the caller guarantees
1454// that only whole bytes in dst need to be written.
1455//
1456// runGCProg returns the number of 1- or 2-bit entries written to memory.
1457func runGCProg(prog, trailer, dst *byte, size int) uintptr {
1458	dstStart := dst
1459
1460	// Bits waiting to be written to memory.
1461	var bits uintptr
1462	var nbits uintptr
1463
1464	p := prog
1465Run:
1466	for {
1467		// Flush accumulated full bytes.
1468		// The rest of the loop assumes that nbits <= 7.
1469		for ; nbits >= 8; nbits -= 8 {
1470			if size == 1 {
1471				*dst = uint8(bits)
1472				dst = add1(dst)
1473				bits >>= 8
1474			} else {
1475				v := bits&bitPointerAll | bitScanAll
1476				*dst = uint8(v)
1477				dst = subtract1(dst)
1478				bits >>= 4
1479				v = bits&bitPointerAll | bitScanAll
1480				*dst = uint8(v)
1481				dst = subtract1(dst)
1482				bits >>= 4
1483			}
1484		}
1485
1486		// Process one instruction.
1487		inst := uintptr(*p)
1488		p = add1(p)
1489		n := inst & 0x7F
1490		if inst&0x80 == 0 {
1491			// Literal bits; n == 0 means end of program.
1492			if n == 0 {
1493				// Program is over; continue in trailer if present.
1494				if trailer != nil {
1495					//println("trailer")
1496					p = trailer
1497					trailer = nil
1498					continue
1499				}
1500				//println("done")
1501				break Run
1502			}
1503			//println("lit", n, dst)
1504			nbyte := n / 8
1505			for i := uintptr(0); i < nbyte; i++ {
1506				bits |= uintptr(*p) << nbits
1507				p = add1(p)
1508				if size == 1 {
1509					*dst = uint8(bits)
1510					dst = add1(dst)
1511					bits >>= 8
1512				} else {
1513					v := bits&0xf | bitScanAll
1514					*dst = uint8(v)
1515					dst = subtract1(dst)
1516					bits >>= 4
1517					v = bits&0xf | bitScanAll
1518					*dst = uint8(v)
1519					dst = subtract1(dst)
1520					bits >>= 4
1521				}
1522			}
1523			if n %= 8; n > 0 {
1524				bits |= uintptr(*p) << nbits
1525				p = add1(p)
1526				nbits += n
1527			}
1528			continue Run
1529		}
1530
1531		// Repeat. If n == 0, it is encoded in a varint in the next bytes.
1532		if n == 0 {
1533			for off := uint(0); ; off += 7 {
1534				x := uintptr(*p)
1535				p = add1(p)
1536				n |= (x & 0x7F) << off
1537				if x&0x80 == 0 {
1538					break
1539				}
1540			}
1541		}
1542
1543		// Count is encoded in a varint in the next bytes.
1544		c := uintptr(0)
1545		for off := uint(0); ; off += 7 {
1546			x := uintptr(*p)
1547			p = add1(p)
1548			c |= (x & 0x7F) << off
1549			if x&0x80 == 0 {
1550				break
1551			}
1552		}
1553		c *= n // now total number of bits to copy
1554
1555		// If the number of bits being repeated is small, load them
1556		// into a register and use that register for the entire loop
1557		// instead of repeatedly reading from memory.
1558		// Handling fewer than 8 bits here makes the general loop simpler.
1559		// The cutoff is sys.PtrSize*8 - 7 to guarantee that when we add
1560		// the pattern to a bit buffer holding at most 7 bits (a partial byte)
1561		// it will not overflow.
1562		src := dst
1563		const maxBits = sys.PtrSize*8 - 7
1564		if n <= maxBits {
1565			// Start with bits in output buffer.
1566			pattern := bits
1567			npattern := nbits
1568
1569			// If we need more bits, fetch them from memory.
1570			if size == 1 {
1571				src = subtract1(src)
1572				for npattern < n {
1573					pattern <<= 8
1574					pattern |= uintptr(*src)
1575					src = subtract1(src)
1576					npattern += 8
1577				}
1578			} else {
1579				src = add1(src)
1580				for npattern < n {
1581					pattern <<= 4
1582					pattern |= uintptr(*src) & 0xf
1583					src = add1(src)
1584					npattern += 4
1585				}
1586			}
1587
1588			// We started with the whole bit output buffer,
1589			// and then we loaded bits from whole bytes.
1590			// Either way, we might now have too many instead of too few.
1591			// Discard the extra.
1592			if npattern > n {
1593				pattern >>= npattern - n
1594				npattern = n
1595			}
1596
1597			// Replicate pattern to at most maxBits.
1598			if npattern == 1 {
1599				// One bit being repeated.
1600				// If the bit is 1, make the pattern all 1s.
1601				// If the bit is 0, the pattern is already all 0s,
1602				// but we can claim that the number of bits
1603				// in the word is equal to the number we need (c),
1604				// because right shift of bits will zero fill.
1605				if pattern == 1 {
1606					pattern = 1<<maxBits - 1
1607					npattern = maxBits
1608				} else {
1609					npattern = c
1610				}
1611			} else {
1612				b := pattern
1613				nb := npattern
1614				if nb+nb <= maxBits {
1615					// Double pattern until the whole uintptr is filled.
1616					for nb <= sys.PtrSize*8 {
1617						b |= b << nb
1618						nb += nb
1619					}
1620					// Trim away incomplete copy of original pattern in high bits.
1621					// TODO(rsc): Replace with table lookup or loop on systems without divide?
1622					nb = maxBits / npattern * npattern
1623					b &= 1<<nb - 1
1624					pattern = b
1625					npattern = nb
1626				}
1627			}
1628
1629			// Add pattern to bit buffer and flush bit buffer, c/npattern times.
1630			// Since pattern contains >8 bits, there will be full bytes to flush
1631			// on each iteration.
1632			for ; c >= npattern; c -= npattern {
1633				bits |= pattern << nbits
1634				nbits += npattern
1635				if size == 1 {
1636					for nbits >= 8 {
1637						*dst = uint8(bits)
1638						dst = add1(dst)
1639						bits >>= 8
1640						nbits -= 8
1641					}
1642				} else {
1643					for nbits >= 4 {
1644						*dst = uint8(bits&0xf | bitScanAll)
1645						dst = subtract1(dst)
1646						bits >>= 4
1647						nbits -= 4
1648					}
1649				}
1650			}
1651
1652			// Add final fragment to bit buffer.
1653			if c > 0 {
1654				pattern &= 1<<c - 1
1655				bits |= pattern << nbits
1656				nbits += c
1657			}
1658			continue Run
1659		}
1660
1661		// Repeat; n too large to fit in a register.
1662		// Since nbits <= 7, we know the first few bytes of repeated data
1663		// are already written to memory.
1664		off := n - nbits // n > nbits because n > maxBits and nbits <= 7
1665		if size == 1 {
1666			// Leading src fragment.
1667			src = subtractb(src, (off+7)/8)
1668			if frag := off & 7; frag != 0 {
1669				bits |= uintptr(*src) >> (8 - frag) << nbits
1670				src = add1(src)
1671				nbits += frag
1672				c -= frag
1673			}
1674			// Main loop: load one byte, write another.
1675			// The bits are rotating through the bit buffer.
1676			for i := c / 8; i > 0; i-- {
1677				bits |= uintptr(*src) << nbits
1678				src = add1(src)
1679				*dst = uint8(bits)
1680				dst = add1(dst)
1681				bits >>= 8
1682			}
1683			// Final src fragment.
1684			if c %= 8; c > 0 {
1685				bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1686				nbits += c
1687			}
1688		} else {
1689			// Leading src fragment.
1690			src = addb(src, (off+3)/4)
1691			if frag := off & 3; frag != 0 {
1692				bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits
1693				src = subtract1(src)
1694				nbits += frag
1695				c -= frag
1696			}
1697			// Main loop: load one byte, write another.
1698			// The bits are rotating through the bit buffer.
1699			for i := c / 4; i > 0; i-- {
1700				bits |= (uintptr(*src) & 0xf) << nbits
1701				src = subtract1(src)
1702				*dst = uint8(bits&0xf | bitScanAll)
1703				dst = subtract1(dst)
1704				bits >>= 4
1705			}
1706			// Final src fragment.
1707			if c %= 4; c > 0 {
1708				bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1709				nbits += c
1710			}
1711		}
1712	}
1713
1714	// Write any final bits out, using full-byte writes, even for the final byte.
1715	var totalBits uintptr
1716	if size == 1 {
1717		totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1718		nbits += -nbits & 7
1719		for ; nbits > 0; nbits -= 8 {
1720			*dst = uint8(bits)
1721			dst = add1(dst)
1722			bits >>= 8
1723		}
1724	} else {
1725		totalBits = (uintptr(unsafe.Pointer(dstStart))-uintptr(unsafe.Pointer(dst)))*4 + nbits
1726		nbits += -nbits & 3
1727		for ; nbits > 0; nbits -= 4 {
1728			v := bits&0xf | bitScanAll
1729			*dst = uint8(v)
1730			dst = subtract1(dst)
1731			bits >>= 4
1732		}
1733	}
1734	return totalBits
1735}
1736
1737func dumpGCProg(p *byte) {
1738	nptr := 0
1739	for {
1740		x := *p
1741		p = add1(p)
1742		if x == 0 {
1743			print("\t", nptr, " end\n")
1744			break
1745		}
1746		if x&0x80 == 0 {
1747			print("\t", nptr, " lit ", x, ":")
1748			n := int(x+7) / 8
1749			for i := 0; i < n; i++ {
1750				print(" ", hex(*p))
1751				p = add1(p)
1752			}
1753			print("\n")
1754			nptr += int(x)
1755		} else {
1756			nbit := int(x &^ 0x80)
1757			if nbit == 0 {
1758				for nb := uint(0); ; nb += 7 {
1759					x := *p
1760					p = add1(p)
1761					nbit |= int(x&0x7f) << nb
1762					if x&0x80 == 0 {
1763						break
1764					}
1765				}
1766			}
1767			count := 0
1768			for nb := uint(0); ; nb += 7 {
1769				x := *p
1770				p = add1(p)
1771				count |= int(x&0x7f) << nb
1772				if x&0x80 == 0 {
1773					break
1774				}
1775			}
1776			print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1777			nptr += nbit * count
1778		}
1779	}
1780}
1781
1782// Testing.
1783
1784func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
1785	target := (*stkframe)(ctxt)
1786	if frame.sp <= target.sp && target.sp < frame.varp {
1787		*target = *frame
1788		return false
1789	}
1790	return true
1791}
1792
1793// gcbits returns the GC type info for x, for testing.
1794// The result is the bitmap entries (0 or 1), one entry per byte.
1795//go:linkname reflect_gcbits reflect.gcbits
1796func reflect_gcbits(x interface{}) []byte {
1797	ret := getgcmask(x)
1798	typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
1799	nptr := typ.ptrdata / sys.PtrSize
1800	for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
1801		ret = ret[:len(ret)-1]
1802	}
1803	return ret
1804}
1805
1806// Returns GC type info for object p for testing.
1807func getgcmask(ep interface{}) (mask []byte) {
1808	e := *efaceOf(&ep)
1809	p := e.data
1810	t := e._type
1811	// data or bss
1812	for _, datap := range activeModules() {
1813		// data
1814		if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1815			bitmap := datap.gcdatamask.bytedata
1816			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
1817			mask = make([]byte, n/sys.PtrSize)
1818			for i := uintptr(0); i < n; i += sys.PtrSize {
1819				off := (uintptr(p) + i - datap.data) / sys.PtrSize
1820				mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1821			}
1822			return
1823		}
1824
1825		// bss
1826		if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1827			bitmap := datap.gcbssmask.bytedata
1828			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
1829			mask = make([]byte, n/sys.PtrSize)
1830			for i := uintptr(0); i < n; i += sys.PtrSize {
1831				off := (uintptr(p) + i - datap.bss) / sys.PtrSize
1832				mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1833			}
1834			return
1835		}
1836	}
1837
1838	// heap
1839	var n uintptr
1840	var base uintptr
1841	if mlookup(uintptr(p), &base, &n, nil) != 0 {
1842		mask = make([]byte, n/sys.PtrSize)
1843		for i := uintptr(0); i < n; i += sys.PtrSize {
1844			hbits := heapBitsForAddr(base + i)
1845			if hbits.isPointer() {
1846				mask[i/sys.PtrSize] = 1
1847			}
1848			if i != 1*sys.PtrSize && !hbits.morePointers() {
1849				mask = mask[:i/sys.PtrSize]
1850				break
1851			}
1852		}
1853		return
1854	}
1855
1856	// stack
1857	if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi {
1858		var frame stkframe
1859		frame.sp = uintptr(p)
1860		_g_ := getg()
1861		gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
1862		if frame.fn.valid() {
1863			f := frame.fn
1864			targetpc := frame.continpc
1865			if targetpc == 0 {
1866				return
1867			}
1868			if targetpc != f.entry {
1869				targetpc--
1870			}
1871			pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, nil)
1872			if pcdata == -1 {
1873				return
1874			}
1875			stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
1876			if stkmap == nil || stkmap.n <= 0 {
1877				return
1878			}
1879			bv := stackmapdata(stkmap, pcdata)
1880			size := uintptr(bv.n) * sys.PtrSize
1881			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
1882			mask = make([]byte, n/sys.PtrSize)
1883			for i := uintptr(0); i < n; i += sys.PtrSize {
1884				bitmap := bv.bytedata
1885				off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
1886				mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1887			}
1888		}
1889		return
1890	}
1891
1892	// otherwise, not something the GC knows about.
1893	// possibly read-only data, like malloc(0).
1894	// must not have pointers
1895	return
1896}
1897