1// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"internal/cpu"
9	"runtime/internal/atomic"
10	"runtime/internal/sys"
11	"unsafe"
12)
13
14/*
15Stack layout parameters.
16Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
17
18The per-goroutine g->stackguard is set to point StackGuard bytes
19above the bottom of the stack.  Each function compares its stack
20pointer against g->stackguard to check for overflow.  To cut one
21instruction from the check sequence for functions with tiny frames,
22the stack is allowed to protrude StackSmall bytes below the stack
23guard.  Functions with large frames don't bother with the check and
24always call morestack.  The sequences are (for amd64, others are
25similar):
26
27	guard = g->stackguard
28	frame = function's stack frame size
29	argsize = size of function arguments (call + return)
30
31	stack frame size <= StackSmall:
32		CMPQ guard, SP
33		JHI 3(PC)
34		MOVQ m->morearg, $(argsize << 32)
35		CALL morestack(SB)
36
37	stack frame size > StackSmall but < StackBig
38		LEAQ (frame-StackSmall)(SP), R0
39		CMPQ guard, R0
40		JHI 3(PC)
41		MOVQ m->morearg, $(argsize << 32)
42		CALL morestack(SB)
43
44	stack frame size >= StackBig:
45		MOVQ m->morearg, $((argsize << 32) | frame)
46		CALL morestack(SB)
47
48The bottom StackGuard - StackSmall bytes are important: there has
49to be enough room to execute functions that refuse to check for
50stack overflow, either because they need to be adjacent to the
51actual caller's frame (deferproc) or because they handle the imminent
52stack overflow (morestack).
53
54For example, deferproc might call malloc, which does one of the
55above checks (without allocating a full frame), which might trigger
56a call to morestack.  This sequence needs to fit in the bottom
57section of the stack.  On amd64, morestack's frame is 40 bytes, and
58deferproc's frame is 56 bytes.  That fits well within the
59StackGuard - StackSmall bytes at the bottom.
60The linkers explore all possible call traces involving non-splitting
61functions to make sure that this limit cannot be violated.
62*/
63
64const (
65	// StackSystem is a number of additional bytes to add
66	// to each stack below the usual guard area for OS-specific
67	// purposes like signal handling. Used on Windows, Plan 9,
68	// and iOS because they do not use a separate stack.
69	_StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 + sys.GoosDarwin*sys.GoarchArm64*1024
70
71	// The minimum size of stack used by Go code
72	_StackMin = 2048
73
74	// The minimum stack size to allocate.
75	// The hackery here rounds FixedStack0 up to a power of 2.
76	_FixedStack0 = _StackMin + _StackSystem
77	_FixedStack1 = _FixedStack0 - 1
78	_FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
79	_FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
80	_FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
81	_FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
82	_FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
83	_FixedStack  = _FixedStack6 + 1
84
85	// Functions that need frames bigger than this use an extra
86	// instruction to do the stack split check, to avoid overflow
87	// in case SP - framesize wraps below zero.
88	// This value can be no bigger than the size of the unmapped
89	// space at zero.
90	_StackBig = 4096
91
92	// The stack guard is a pointer this many bytes above the
93	// bottom of the stack.
94	_StackGuard = 896*sys.StackGuardMultiplier + _StackSystem
95
96	// After a stack split check the SP is allowed to be this
97	// many bytes below the stack guard. This saves an instruction
98	// in the checking sequence for tiny frames.
99	_StackSmall = 128
100
101	// The maximum number of bytes that a chain of NOSPLIT
102	// functions can use.
103	_StackLimit = _StackGuard - _StackSystem - _StackSmall
104)
105
106const (
107	// stackDebug == 0: no logging
108	//            == 1: logging of per-stack operations
109	//            == 2: logging of per-frame operations
110	//            == 3: logging of per-word updates
111	//            == 4: logging of per-word reads
112	stackDebug       = 0
113	stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
114	stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
115	stackPoisonCopy  = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
116	stackNoCache     = 0 // disable per-P small stack caches
117
118	// check the BP links during traceback.
119	debugCheckBP = false
120)
121
122const (
123	uintptrMask = 1<<(8*sys.PtrSize) - 1
124
125	// Goroutine preemption request.
126	// Stored into g->stackguard0 to cause split stack check failure.
127	// Must be greater than any real sp.
128	// 0xfffffade in hex.
129	stackPreempt = uintptrMask & -1314
130
131	// Thread is forking.
132	// Stored into g->stackguard0 to cause split stack check failure.
133	// Must be greater than any real sp.
134	stackFork = uintptrMask & -1234
135)
136
137// Global pool of spans that have free stacks.
138// Stacks are assigned an order according to size.
139//     order = log_2(size/FixedStack)
140// There is a free list for each order.
141var stackpool [_NumStackOrders]struct {
142	item stackpoolItem
143	_    [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
144}
145
146//go:notinheap
147type stackpoolItem struct {
148	mu   mutex
149	span mSpanList
150}
151
152// Global pool of large stack spans.
153var stackLarge struct {
154	lock mutex
155	free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
156}
157
158func stackinit() {
159	if _StackCacheSize&_PageMask != 0 {
160		throw("cache size must be a multiple of page size")
161	}
162	for i := range stackpool {
163		stackpool[i].item.span.init()
164	}
165	for i := range stackLarge.free {
166		stackLarge.free[i].init()
167	}
168}
169
170// stacklog2 returns ⌊log_2(n)⌋.
171func stacklog2(n uintptr) int {
172	log2 := 0
173	for n > 1 {
174		n >>= 1
175		log2++
176	}
177	return log2
178}
179
180// Allocates a stack from the free pool. Must be called with
181// stackpool[order].item.mu held.
182func stackpoolalloc(order uint8) gclinkptr {
183	list := &stackpool[order].item.span
184	s := list.first
185	if s == nil {
186		// no free stacks. Allocate another span worth.
187		s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse)
188		if s == nil {
189			throw("out of memory")
190		}
191		if s.allocCount != 0 {
192			throw("bad allocCount")
193		}
194		if s.manualFreeList.ptr() != nil {
195			throw("bad manualFreeList")
196		}
197		osStackAlloc(s)
198		s.elemsize = _FixedStack << order
199		for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
200			x := gclinkptr(s.base() + i)
201			x.ptr().next = s.manualFreeList
202			s.manualFreeList = x
203		}
204		list.insert(s)
205	}
206	x := s.manualFreeList
207	if x.ptr() == nil {
208		throw("span has no free stacks")
209	}
210	s.manualFreeList = x.ptr().next
211	s.allocCount++
212	if s.manualFreeList.ptr() == nil {
213		// all stacks in s are allocated.
214		list.remove(s)
215	}
216	return x
217}
218
219// Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
220func stackpoolfree(x gclinkptr, order uint8) {
221	s := spanOfUnchecked(uintptr(x))
222	if s.state.get() != mSpanManual {
223		throw("freeing stack not in a stack span")
224	}
225	if s.manualFreeList.ptr() == nil {
226		// s will now have a free stack
227		stackpool[order].item.span.insert(s)
228	}
229	x.ptr().next = s.manualFreeList
230	s.manualFreeList = x
231	s.allocCount--
232	if gcphase == _GCoff && s.allocCount == 0 {
233		// Span is completely free. Return it to the heap
234		// immediately if we're sweeping.
235		//
236		// If GC is active, we delay the free until the end of
237		// GC to avoid the following type of situation:
238		//
239		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
240		// 2) The stack that pointer points to is copied
241		// 3) The old stack is freed
242		// 4) The containing span is marked free
243		// 5) GC attempts to mark the SudoG.elem pointer. The
244		//    marking fails because the pointer looks like a
245		//    pointer into a free span.
246		//
247		// By not freeing, we prevent step #4 until GC is done.
248		stackpool[order].item.span.remove(s)
249		s.manualFreeList = 0
250		osStackFree(s)
251		mheap_.freeManual(s, &memstats.stacks_inuse)
252	}
253}
254
255// stackcacherefill/stackcacherelease implement a global pool of stack segments.
256// The pool is required to prevent unlimited growth of per-thread caches.
257//
258//go:systemstack
259func stackcacherefill(c *mcache, order uint8) {
260	if stackDebug >= 1 {
261		print("stackcacherefill order=", order, "\n")
262	}
263
264	// Grab some stacks from the global cache.
265	// Grab half of the allowed capacity (to prevent thrashing).
266	var list gclinkptr
267	var size uintptr
268	lock(&stackpool[order].item.mu)
269	for size < _StackCacheSize/2 {
270		x := stackpoolalloc(order)
271		x.ptr().next = list
272		list = x
273		size += _FixedStack << order
274	}
275	unlock(&stackpool[order].item.mu)
276	c.stackcache[order].list = list
277	c.stackcache[order].size = size
278}
279
280//go:systemstack
281func stackcacherelease(c *mcache, order uint8) {
282	if stackDebug >= 1 {
283		print("stackcacherelease order=", order, "\n")
284	}
285	x := c.stackcache[order].list
286	size := c.stackcache[order].size
287	lock(&stackpool[order].item.mu)
288	for size > _StackCacheSize/2 {
289		y := x.ptr().next
290		stackpoolfree(x, order)
291		x = y
292		size -= _FixedStack << order
293	}
294	unlock(&stackpool[order].item.mu)
295	c.stackcache[order].list = x
296	c.stackcache[order].size = size
297}
298
299//go:systemstack
300func stackcache_clear(c *mcache) {
301	if stackDebug >= 1 {
302		print("stackcache clear\n")
303	}
304	for order := uint8(0); order < _NumStackOrders; order++ {
305		lock(&stackpool[order].item.mu)
306		x := c.stackcache[order].list
307		for x.ptr() != nil {
308			y := x.ptr().next
309			stackpoolfree(x, order)
310			x = y
311		}
312		c.stackcache[order].list = 0
313		c.stackcache[order].size = 0
314		unlock(&stackpool[order].item.mu)
315	}
316}
317
318// stackalloc allocates an n byte stack.
319//
320// stackalloc must run on the system stack because it uses per-P
321// resources and must not split the stack.
322//
323//go:systemstack
324func stackalloc(n uint32) stack {
325	// Stackalloc must be called on scheduler stack, so that we
326	// never try to grow the stack during the code that stackalloc runs.
327	// Doing so would cause a deadlock (issue 1547).
328	thisg := getg()
329	if thisg != thisg.m.g0 {
330		throw("stackalloc not on scheduler stack")
331	}
332	if n&(n-1) != 0 {
333		throw("stack size not a power of 2")
334	}
335	if stackDebug >= 1 {
336		print("stackalloc ", n, "\n")
337	}
338
339	if debug.efence != 0 || stackFromSystem != 0 {
340		n = uint32(alignUp(uintptr(n), physPageSize))
341		v := sysAlloc(uintptr(n), &memstats.stacks_sys)
342		if v == nil {
343			throw("out of memory (stackalloc)")
344		}
345		return stack{uintptr(v), uintptr(v) + uintptr(n)}
346	}
347
348	// Small stacks are allocated with a fixed-size free-list allocator.
349	// If we need a stack of a bigger size, we fall back on allocating
350	// a dedicated span.
351	var v unsafe.Pointer
352	if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
353		order := uint8(0)
354		n2 := n
355		for n2 > _FixedStack {
356			order++
357			n2 >>= 1
358		}
359		var x gclinkptr
360		c := thisg.m.mcache
361		if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" {
362			// c == nil can happen in the guts of exitsyscall or
363			// procresize. Just get a stack from the global pool.
364			// Also don't touch stackcache during gc
365			// as it's flushed concurrently.
366			lock(&stackpool[order].item.mu)
367			x = stackpoolalloc(order)
368			unlock(&stackpool[order].item.mu)
369		} else {
370			x = c.stackcache[order].list
371			if x.ptr() == nil {
372				stackcacherefill(c, order)
373				x = c.stackcache[order].list
374			}
375			c.stackcache[order].list = x.ptr().next
376			c.stackcache[order].size -= uintptr(n)
377		}
378		v = unsafe.Pointer(x)
379	} else {
380		var s *mspan
381		npage := uintptr(n) >> _PageShift
382		log2npage := stacklog2(npage)
383
384		// Try to get a stack from the large stack cache.
385		lock(&stackLarge.lock)
386		if !stackLarge.free[log2npage].isEmpty() {
387			s = stackLarge.free[log2npage].first
388			stackLarge.free[log2npage].remove(s)
389		}
390		unlock(&stackLarge.lock)
391
392		if s == nil {
393			// Allocate a new stack from the heap.
394			s = mheap_.allocManual(npage, &memstats.stacks_inuse)
395			if s == nil {
396				throw("out of memory")
397			}
398			osStackAlloc(s)
399			s.elemsize = uintptr(n)
400		}
401		v = unsafe.Pointer(s.base())
402	}
403
404	if raceenabled {
405		racemalloc(v, uintptr(n))
406	}
407	if msanenabled {
408		msanmalloc(v, uintptr(n))
409	}
410	if stackDebug >= 1 {
411		print("  allocated ", v, "\n")
412	}
413	return stack{uintptr(v), uintptr(v) + uintptr(n)}
414}
415
416// stackfree frees an n byte stack allocation at stk.
417//
418// stackfree must run on the system stack because it uses per-P
419// resources and must not split the stack.
420//
421//go:systemstack
422func stackfree(stk stack) {
423	gp := getg()
424	v := unsafe.Pointer(stk.lo)
425	n := stk.hi - stk.lo
426	if n&(n-1) != 0 {
427		throw("stack not a power of 2")
428	}
429	if stk.lo+n < stk.hi {
430		throw("bad stack size")
431	}
432	if stackDebug >= 1 {
433		println("stackfree", v, n)
434		memclrNoHeapPointers(v, n) // for testing, clobber stack data
435	}
436	if debug.efence != 0 || stackFromSystem != 0 {
437		if debug.efence != 0 || stackFaultOnFree != 0 {
438			sysFault(v, n)
439		} else {
440			sysFree(v, n, &memstats.stacks_sys)
441		}
442		return
443	}
444	if msanenabled {
445		msanfree(v, n)
446	}
447	if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
448		order := uint8(0)
449		n2 := n
450		for n2 > _FixedStack {
451			order++
452			n2 >>= 1
453		}
454		x := gclinkptr(v)
455		c := gp.m.mcache
456		if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" {
457			lock(&stackpool[order].item.mu)
458			stackpoolfree(x, order)
459			unlock(&stackpool[order].item.mu)
460		} else {
461			if c.stackcache[order].size >= _StackCacheSize {
462				stackcacherelease(c, order)
463			}
464			x.ptr().next = c.stackcache[order].list
465			c.stackcache[order].list = x
466			c.stackcache[order].size += n
467		}
468	} else {
469		s := spanOfUnchecked(uintptr(v))
470		if s.state.get() != mSpanManual {
471			println(hex(s.base()), v)
472			throw("bad span state")
473		}
474		if gcphase == _GCoff {
475			// Free the stack immediately if we're
476			// sweeping.
477			osStackFree(s)
478			mheap_.freeManual(s, &memstats.stacks_inuse)
479		} else {
480			// If the GC is running, we can't return a
481			// stack span to the heap because it could be
482			// reused as a heap span, and this state
483			// change would race with GC. Add it to the
484			// large stack cache instead.
485			log2npage := stacklog2(s.npages)
486			lock(&stackLarge.lock)
487			stackLarge.free[log2npage].insert(s)
488			unlock(&stackLarge.lock)
489		}
490	}
491}
492
493var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
494
495var ptrnames = []string{
496	0: "scalar",
497	1: "ptr",
498}
499
500// Stack frame layout
501//
502// (x86)
503// +------------------+
504// | args from caller |
505// +------------------+ <- frame->argp
506// |  return address  |
507// +------------------+
508// |  caller's BP (*) | (*) if framepointer_enabled && varp < sp
509// +------------------+ <- frame->varp
510// |     locals       |
511// +------------------+
512// |  args to callee  |
513// +------------------+ <- frame->sp
514//
515// (arm)
516// +------------------+
517// | args from caller |
518// +------------------+ <- frame->argp
519// | caller's retaddr |
520// +------------------+ <- frame->varp
521// |     locals       |
522// +------------------+
523// |  args to callee  |
524// +------------------+
525// |  return address  |
526// +------------------+ <- frame->sp
527
528type adjustinfo struct {
529	old   stack
530	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
531	cache pcvalueCache
532
533	// sghi is the highest sudog.elem on the stack.
534	sghi uintptr
535}
536
537// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
538// If so, it rewrites *vpp to point into the new stack.
539func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
540	pp := (*uintptr)(vpp)
541	p := *pp
542	if stackDebug >= 4 {
543		print("        ", pp, ":", hex(p), "\n")
544	}
545	if adjinfo.old.lo <= p && p < adjinfo.old.hi {
546		*pp = p + adjinfo.delta
547		if stackDebug >= 3 {
548			print("        adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
549		}
550	}
551}
552
553// Information from the compiler about the layout of stack frames.
554type bitvector struct {
555	n        int32 // # of bits
556	bytedata *uint8
557}
558
559// ptrbit returns the i'th bit in bv.
560// ptrbit is less efficient than iterating directly over bitvector bits,
561// and should only be used in non-performance-critical code.
562// See adjustpointers for an example of a high-efficiency walk of a bitvector.
563func (bv *bitvector) ptrbit(i uintptr) uint8 {
564	b := *(addb(bv.bytedata, i/8))
565	return (b >> (i % 8)) & 1
566}
567
568// bv describes the memory starting at address scanp.
569// Adjust any pointers contained therein.
570func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
571	minp := adjinfo.old.lo
572	maxp := adjinfo.old.hi
573	delta := adjinfo.delta
574	num := uintptr(bv.n)
575	// If this frame might contain channel receive slots, use CAS
576	// to adjust pointers. If the slot hasn't been received into
577	// yet, it may contain stack pointers and a concurrent send
578	// could race with adjusting those pointers. (The sent value
579	// itself can never contain stack pointers.)
580	useCAS := uintptr(scanp) < adjinfo.sghi
581	for i := uintptr(0); i < num; i += 8 {
582		if stackDebug >= 4 {
583			for j := uintptr(0); j < 8; j++ {
584				print("        ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
585			}
586		}
587		b := *(addb(bv.bytedata, i/8))
588		for b != 0 {
589			j := uintptr(sys.Ctz8(b))
590			b &= b - 1
591			pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize))
592		retry:
593			p := *pp
594			if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
595				// Looks like a junk value in a pointer slot.
596				// Live analysis wrong?
597				getg().m.traceback = 2
598				print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
599				throw("invalid pointer found on stack")
600			}
601			if minp <= p && p < maxp {
602				if stackDebug >= 3 {
603					print("adjust ptr ", hex(p), " ", funcname(f), "\n")
604				}
605				if useCAS {
606					ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
607					if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
608						goto retry
609					}
610				} else {
611					*pp = p + delta
612				}
613			}
614		}
615	}
616}
617
618// Note: the argument/return area is adjusted by the callee.
619func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
620	adjinfo := (*adjustinfo)(arg)
621	if frame.continpc == 0 {
622		// Frame is dead.
623		return true
624	}
625	f := frame.fn
626	if stackDebug >= 2 {
627		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
628	}
629	if f.funcID == funcID_systemstack_switch {
630		// A special routine at the bottom of stack of a goroutine that does a systemstack call.
631		// We will allow it to be copied even though we don't
632		// have full GC info for it (because it is written in asm).
633		return true
634	}
635
636	locals, args, objs := getStackMap(frame, &adjinfo.cache, true)
637
638	// Adjust local variables if stack frame has been allocated.
639	if locals.n > 0 {
640		size := uintptr(locals.n) * sys.PtrSize
641		adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
642	}
643
644	// Adjust saved base pointer if there is one.
645	if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
646		if !framepointer_enabled {
647			print("runtime: found space for saved base pointer, but no framepointer experiment\n")
648			print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
649			throw("bad frame layout")
650		}
651		if stackDebug >= 3 {
652			print("      saved bp\n")
653		}
654		if debugCheckBP {
655			// Frame pointers should always point to the next higher frame on
656			// the Go stack (or be nil, for the top frame on the stack).
657			bp := *(*uintptr)(unsafe.Pointer(frame.varp))
658			if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
659				println("runtime: found invalid frame pointer")
660				print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
661				throw("bad frame pointer")
662			}
663		}
664		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
665	}
666
667	// Adjust arguments.
668	if args.n > 0 {
669		if stackDebug >= 3 {
670			print("      args\n")
671		}
672		adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
673	}
674
675	// Adjust pointers in all stack objects (whether they are live or not).
676	// See comments in mgcmark.go:scanframeworker.
677	if frame.varp != 0 {
678		for _, obj := range objs {
679			off := obj.off
680			base := frame.varp // locals base pointer
681			if off >= 0 {
682				base = frame.argp // arguments and return values base pointer
683			}
684			p := base + uintptr(off)
685			if p < frame.sp {
686				// Object hasn't been allocated in the frame yet.
687				// (Happens when the stack bounds check fails and
688				// we call into morestack.)
689				continue
690			}
691			t := obj.typ
692			gcdata := t.gcdata
693			var s *mspan
694			if t.kind&kindGCProg != 0 {
695				// See comments in mgcmark.go:scanstack
696				s = materializeGCProg(t.ptrdata, gcdata)
697				gcdata = (*byte)(unsafe.Pointer(s.startAddr))
698			}
699			for i := uintptr(0); i < t.ptrdata; i += sys.PtrSize {
700				if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 {
701					adjustpointer(adjinfo, unsafe.Pointer(p+i))
702				}
703			}
704			if s != nil {
705				dematerializeGCProg(s)
706			}
707		}
708	}
709
710	return true
711}
712
713func adjustctxt(gp *g, adjinfo *adjustinfo) {
714	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
715	if !framepointer_enabled {
716		return
717	}
718	if debugCheckBP {
719		bp := gp.sched.bp
720		if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
721			println("runtime: found invalid top frame pointer")
722			print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
723			throw("bad top frame pointer")
724		}
725	}
726	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
727}
728
729func adjustdefers(gp *g, adjinfo *adjustinfo) {
730	// Adjust pointers in the Defer structs.
731	// We need to do this first because we need to adjust the
732	// defer.link fields so we always work on the new stack.
733	adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
734	for d := gp._defer; d != nil; d = d.link {
735		adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
736		adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
737		adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
738		adjustpointer(adjinfo, unsafe.Pointer(&d.link))
739		adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
740		adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
741	}
742
743	// Adjust defer argument blocks the same way we adjust active stack frames.
744	// Note: this code is after the loop above, so that if a defer record is
745	// stack allocated, we work on the copy in the new stack.
746	tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
747}
748
749func adjustpanics(gp *g, adjinfo *adjustinfo) {
750	// Panics are on stack and already adjusted.
751	// Update pointer to head of list in G.
752	adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
753}
754
755func adjustsudogs(gp *g, adjinfo *adjustinfo) {
756	// the data elements pointed to by a SudoG structure
757	// might be in the stack.
758	for s := gp.waiting; s != nil; s = s.waitlink {
759		adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
760	}
761}
762
763func fillstack(stk stack, b byte) {
764	for p := stk.lo; p < stk.hi; p++ {
765		*(*byte)(unsafe.Pointer(p)) = b
766	}
767}
768
769func findsghi(gp *g, stk stack) uintptr {
770	var sghi uintptr
771	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
772		p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
773		if stk.lo <= p && p < stk.hi && p > sghi {
774			sghi = p
775		}
776	}
777	return sghi
778}
779
780// syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
781// stack they refer to while synchronizing with concurrent channel
782// operations. It returns the number of bytes of stack copied.
783func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
784	if gp.waiting == nil {
785		return 0
786	}
787
788	// Lock channels to prevent concurrent send/receive.
789	var lastc *hchan
790	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
791		if sg.c != lastc {
792			lock(&sg.c.lock)
793		}
794		lastc = sg.c
795	}
796
797	// Adjust sudogs.
798	adjustsudogs(gp, adjinfo)
799
800	// Copy the part of the stack the sudogs point in to
801	// while holding the lock to prevent races on
802	// send/receive slots.
803	var sgsize uintptr
804	if adjinfo.sghi != 0 {
805		oldBot := adjinfo.old.hi - used
806		newBot := oldBot + adjinfo.delta
807		sgsize = adjinfo.sghi - oldBot
808		memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
809	}
810
811	// Unlock channels.
812	lastc = nil
813	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
814		if sg.c != lastc {
815			unlock(&sg.c.lock)
816		}
817		lastc = sg.c
818	}
819
820	return sgsize
821}
822
823// Copies gp's stack to a new stack of a different size.
824// Caller must have changed gp status to Gcopystack.
825func copystack(gp *g, newsize uintptr) {
826	if gp.syscallsp != 0 {
827		throw("stack growth not allowed in system call")
828	}
829	old := gp.stack
830	if old.lo == 0 {
831		throw("nil stackbase")
832	}
833	used := old.hi - gp.sched.sp
834
835	// allocate new stack
836	new := stackalloc(uint32(newsize))
837	if stackPoisonCopy != 0 {
838		fillstack(new, 0xfd)
839	}
840	if stackDebug >= 1 {
841		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
842	}
843
844	// Compute adjustment.
845	var adjinfo adjustinfo
846	adjinfo.old = old
847	adjinfo.delta = new.hi - old.hi
848
849	// Adjust sudogs, synchronizing with channel ops if necessary.
850	ncopy := used
851	if !gp.activeStackChans {
852		adjustsudogs(gp, &adjinfo)
853	} else {
854		// sudogs may be pointing in to the stack and gp has
855		// released channel locks, so other goroutines could
856		// be writing to gp's stack. Find the highest such
857		// pointer so we can handle everything there and below
858		// carefully. (This shouldn't be far from the bottom
859		// of the stack, so there's little cost in handling
860		// everything below it carefully.)
861		adjinfo.sghi = findsghi(gp, old)
862
863		// Synchronize with channel ops and copy the part of
864		// the stack they may interact with.
865		ncopy -= syncadjustsudogs(gp, used, &adjinfo)
866	}
867
868	// Copy the stack (or the rest of it) to the new location
869	memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
870
871	// Adjust remaining structures that have pointers into stacks.
872	// We have to do most of these before we traceback the new
873	// stack because gentraceback uses them.
874	adjustctxt(gp, &adjinfo)
875	adjustdefers(gp, &adjinfo)
876	adjustpanics(gp, &adjinfo)
877	if adjinfo.sghi != 0 {
878		adjinfo.sghi += adjinfo.delta
879	}
880
881	// Swap out old stack for new one
882	gp.stack = new
883	gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
884	gp.sched.sp = new.hi - used
885	gp.stktopsp += adjinfo.delta
886
887	// Adjust pointers in the new stack.
888	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
889
890	// free old stack
891	if stackPoisonCopy != 0 {
892		fillstack(old, 0xfc)
893	}
894	stackfree(old)
895}
896
897// round x up to a power of 2.
898func round2(x int32) int32 {
899	s := uint(0)
900	for 1<<s < x {
901		s++
902	}
903	return 1 << s
904}
905
906// Called from runtime·morestack when more stack is needed.
907// Allocate larger stack and relocate to new stack.
908// Stack growth is multiplicative, for constant amortized cost.
909//
910// g->atomicstatus will be Grunning or Gscanrunning upon entry.
911// If the scheduler is trying to stop this g, then it will set preemptStop.
912//
913// This must be nowritebarrierrec because it can be called as part of
914// stack growth from other nowritebarrierrec functions, but the
915// compiler doesn't check this.
916//
917//go:nowritebarrierrec
918func newstack() {
919	thisg := getg()
920	// TODO: double check all gp. shouldn't be getg().
921	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
922		throw("stack growth after fork")
923	}
924	if thisg.m.morebuf.g.ptr() != thisg.m.curg {
925		print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
926		morebuf := thisg.m.morebuf
927		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
928		throw("runtime: wrong goroutine in newstack")
929	}
930
931	gp := thisg.m.curg
932
933	if thisg.m.curg.throwsplit {
934		// Update syscallsp, syscallpc in case traceback uses them.
935		morebuf := thisg.m.morebuf
936		gp.syscallsp = morebuf.sp
937		gp.syscallpc = morebuf.pc
938		pcname, pcoff := "(unknown)", uintptr(0)
939		f := findfunc(gp.sched.pc)
940		if f.valid() {
941			pcname = funcname(f)
942			pcoff = gp.sched.pc - f.entry
943		}
944		print("runtime: newstack at ", pcname, "+", hex(pcoff),
945			" sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
946			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
947			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
948
949		thisg.m.traceback = 2 // Include runtime frames
950		traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
951		throw("runtime: stack split at bad time")
952	}
953
954	morebuf := thisg.m.morebuf
955	thisg.m.morebuf.pc = 0
956	thisg.m.morebuf.lr = 0
957	thisg.m.morebuf.sp = 0
958	thisg.m.morebuf.g = 0
959
960	// NOTE: stackguard0 may change underfoot, if another thread
961	// is about to try to preempt gp. Read it just once and use that same
962	// value now and below.
963	preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
964
965	// Be conservative about where we preempt.
966	// We are interested in preempting user Go code, not runtime code.
967	// If we're holding locks, mallocing, or preemption is disabled, don't
968	// preempt.
969	// This check is very early in newstack so that even the status change
970	// from Grunning to Gwaiting and back doesn't happen in this case.
971	// That status change by itself can be viewed as a small preemption,
972	// because the GC might change Gwaiting to Gscanwaiting, and then
973	// this goroutine has to wait for the GC to finish before continuing.
974	// If the GC is in some way dependent on this goroutine (for example,
975	// it needs a lock held by the goroutine), that small preemption turns
976	// into a real deadlock.
977	if preempt {
978		if !canPreemptM(thisg.m) {
979			// Let the goroutine keep running for now.
980			// gp->preempt is set, so it will be preempted next time.
981			gp.stackguard0 = gp.stack.lo + _StackGuard
982			gogo(&gp.sched) // never return
983		}
984	}
985
986	if gp.stack.lo == 0 {
987		throw("missing stack in newstack")
988	}
989	sp := gp.sched.sp
990	if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM {
991		// The call to morestack cost a word.
992		sp -= sys.PtrSize
993	}
994	if stackDebug >= 1 || sp < gp.stack.lo {
995		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
996			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
997			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
998	}
999	if sp < gp.stack.lo {
1000		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
1001		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1002		throw("runtime: split stack overflow")
1003	}
1004
1005	if preempt {
1006		if gp == thisg.m.g0 {
1007			throw("runtime: preempt g0")
1008		}
1009		if thisg.m.p == 0 && thisg.m.locks == 0 {
1010			throw("runtime: g is running but p is not")
1011		}
1012
1013		if gp.preemptShrink {
1014			// We're at a synchronous safe point now, so
1015			// do the pending stack shrink.
1016			gp.preemptShrink = false
1017			shrinkstack(gp)
1018		}
1019
1020		if gp.preemptStop {
1021			preemptPark(gp) // never returns
1022		}
1023
1024		// Act like goroutine called runtime.Gosched.
1025		gopreempt_m(gp) // never return
1026	}
1027
1028	// Allocate a bigger segment and move the stack.
1029	oldsize := gp.stack.hi - gp.stack.lo
1030	newsize := oldsize * 2
1031	if newsize > maxstacksize {
1032		print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1033		print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
1034		throw("stack overflow")
1035	}
1036
1037	// The goroutine must be executing in order to call newstack,
1038	// so it must be Grunning (or Gscanrunning).
1039	casgstatus(gp, _Grunning, _Gcopystack)
1040
1041	// The concurrent GC will not scan the stack while we are doing the copy since
1042	// the gp is in a Gcopystack status.
1043	copystack(gp, newsize)
1044	if stackDebug >= 1 {
1045		print("stack grow done\n")
1046	}
1047	casgstatus(gp, _Gcopystack, _Grunning)
1048	gogo(&gp.sched)
1049}
1050
1051//go:nosplit
1052func nilfunc() {
1053	*(*uint8)(nil) = 0
1054}
1055
1056// adjust Gobuf as if it executed a call to fn
1057// and then did an immediate gosave.
1058func gostartcallfn(gobuf *gobuf, fv *funcval) {
1059	var fn unsafe.Pointer
1060	if fv != nil {
1061		fn = unsafe.Pointer(fv.fn)
1062	} else {
1063		fn = unsafe.Pointer(funcPC(nilfunc))
1064	}
1065	gostartcall(gobuf, fn, unsafe.Pointer(fv))
1066}
1067
1068// isShrinkStackSafe returns whether it's safe to attempt to shrink
1069// gp's stack. Shrinking the stack is only safe when we have precise
1070// pointer maps for all frames on the stack.
1071func isShrinkStackSafe(gp *g) bool {
1072	// We can't copy the stack if we're in a syscall.
1073	// The syscall might have pointers into the stack and
1074	// often we don't have precise pointer maps for the innermost
1075	// frames.
1076	//
1077	// We also can't copy the stack if we're at an asynchronous
1078	// safe-point because we don't have precise pointer maps for
1079	// all frames.
1080	return gp.syscallsp == 0 && !gp.asyncSafePoint
1081}
1082
1083// Maybe shrink the stack being used by gp.
1084//
1085// gp must be stopped and we must own its stack. It may be in
1086// _Grunning, but only if this is our own user G.
1087func shrinkstack(gp *g) {
1088	if gp.stack.lo == 0 {
1089		throw("missing stack in shrinkstack")
1090	}
1091	if s := readgstatus(gp); s&_Gscan == 0 {
1092		// We don't own the stack via _Gscan. We could still
1093		// own it if this is our own user G and we're on the
1094		// system stack.
1095		if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
1096			// We don't own the stack.
1097			throw("bad status in shrinkstack")
1098		}
1099	}
1100	if !isShrinkStackSafe(gp) {
1101		throw("shrinkstack at bad time")
1102	}
1103	// Check for self-shrinks while in a libcall. These may have
1104	// pointers into the stack disguised as uintptrs, but these
1105	// code paths should all be nosplit.
1106	if gp == getg().m.curg && gp.m.libcallsp != 0 {
1107		throw("shrinking stack in libcall")
1108	}
1109
1110	if debug.gcshrinkstackoff > 0 {
1111		return
1112	}
1113	f := findfunc(gp.startpc)
1114	if f.valid() && f.funcID == funcID_gcBgMarkWorker {
1115		// We're not allowed to shrink the gcBgMarkWorker
1116		// stack (see gcBgMarkWorker for explanation).
1117		return
1118	}
1119
1120	oldsize := gp.stack.hi - gp.stack.lo
1121	newsize := oldsize / 2
1122	// Don't shrink the allocation below the minimum-sized stack
1123	// allocation.
1124	if newsize < _FixedStack {
1125		return
1126	}
1127	// Compute how much of the stack is currently in use and only
1128	// shrink the stack if gp is using less than a quarter of its
1129	// current stack. The currently used stack includes everything
1130	// down to the SP plus the stack guard space that ensures
1131	// there's room for nosplit functions.
1132	avail := gp.stack.hi - gp.stack.lo
1133	if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
1134		return
1135	}
1136
1137	if stackDebug > 0 {
1138		print("shrinking stack ", oldsize, "->", newsize, "\n")
1139	}
1140
1141	copystack(gp, newsize)
1142}
1143
1144// freeStackSpans frees unused stack spans at the end of GC.
1145func freeStackSpans() {
1146
1147	// Scan stack pools for empty stack spans.
1148	for order := range stackpool {
1149		lock(&stackpool[order].item.mu)
1150		list := &stackpool[order].item.span
1151		for s := list.first; s != nil; {
1152			next := s.next
1153			if s.allocCount == 0 {
1154				list.remove(s)
1155				s.manualFreeList = 0
1156				osStackFree(s)
1157				mheap_.freeManual(s, &memstats.stacks_inuse)
1158			}
1159			s = next
1160		}
1161		unlock(&stackpool[order].item.mu)
1162	}
1163
1164	// Free large stack spans.
1165	lock(&stackLarge.lock)
1166	for i := range stackLarge.free {
1167		for s := stackLarge.free[i].first; s != nil; {
1168			next := s.next
1169			stackLarge.free[i].remove(s)
1170			osStackFree(s)
1171			mheap_.freeManual(s, &memstats.stacks_inuse)
1172			s = next
1173		}
1174	}
1175	unlock(&stackLarge.lock)
1176}
1177
1178// getStackMap returns the locals and arguments live pointer maps, and
1179// stack object list for frame.
1180func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
1181	targetpc := frame.continpc
1182	if targetpc == 0 {
1183		// Frame is dead. Return empty bitvectors.
1184		return
1185	}
1186
1187	f := frame.fn
1188	pcdata := int32(-1)
1189	if targetpc != f.entry {
1190		// Back up to the CALL. If we're at the function entry
1191		// point, we want to use the entry map (-1), even if
1192		// the first instruction of the function changes the
1193		// stack map.
1194		targetpc--
1195		pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
1196	}
1197	if pcdata == -1 {
1198		// We do not have a valid pcdata value but there might be a
1199		// stackmap for this function. It is likely that we are looking
1200		// at the function prologue, assume so and hope for the best.
1201		pcdata = 0
1202	}
1203
1204	// Local variables.
1205	size := frame.varp - frame.sp
1206	var minsize uintptr
1207	switch sys.ArchFamily {
1208	case sys.ARM64:
1209		minsize = sys.SpAlign
1210	default:
1211		minsize = sys.MinFrameSize
1212	}
1213	if size > minsize {
1214		var stkmap *stackmap
1215		stackid := pcdata
1216		if f.funcID != funcID_debugCallV1 {
1217			stkmap = (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
1218		} else {
1219			// debugCallV1's stack map is the register map
1220			// at its call site.
1221			callerPC := frame.lr
1222			caller := findfunc(callerPC)
1223			if !caller.valid() {
1224				println("runtime: debugCallV1 called by unknown caller", hex(callerPC))
1225				throw("bad debugCallV1")
1226			}
1227			stackid = int32(-1)
1228			if callerPC != caller.entry {
1229				callerPC--
1230				stackid = pcdatavalue(caller, _PCDATA_RegMapIndex, callerPC, cache)
1231			}
1232			if stackid == -1 {
1233				stackid = 0 // in prologue
1234			}
1235			stkmap = (*stackmap)(funcdata(caller, _FUNCDATA_RegPointerMaps))
1236		}
1237		if stkmap == nil || stkmap.n <= 0 {
1238			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
1239			throw("missing stackmap")
1240		}
1241		// If nbit == 0, there's no work to do.
1242		if stkmap.nbit > 0 {
1243			if stackid < 0 || stackid >= stkmap.n {
1244				// don't know where we are
1245				print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
1246				throw("bad symbol table")
1247			}
1248			locals = stackmapdata(stkmap, stackid)
1249			if stackDebug >= 3 && debug {
1250				print("      locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
1251			}
1252		} else if stackDebug >= 3 && debug {
1253			print("      no locals to adjust\n")
1254		}
1255	}
1256
1257	// Arguments.
1258	if frame.arglen > 0 {
1259		if frame.argmap != nil {
1260			// argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
1261			// In this case, arglen specifies how much of the args section is actually live.
1262			// (It could be either all the args + results, or just the args.)
1263			args = *frame.argmap
1264			n := int32(frame.arglen / sys.PtrSize)
1265			if n < args.n {
1266				args.n = n // Don't use more of the arguments than arglen.
1267			}
1268		} else {
1269			stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
1270			if stackmap == nil || stackmap.n <= 0 {
1271				print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
1272				throw("missing stackmap")
1273			}
1274			if pcdata < 0 || pcdata >= stackmap.n {
1275				// don't know where we are
1276				print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
1277				throw("bad symbol table")
1278			}
1279			if stackmap.nbit > 0 {
1280				args = stackmapdata(stackmap, pcdata)
1281			}
1282		}
1283	}
1284
1285	// stack objects.
1286	p := funcdata(f, _FUNCDATA_StackObjects)
1287	if p != nil {
1288		n := *(*uintptr)(p)
1289		p = add(p, sys.PtrSize)
1290		*(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
1291		// Note: the noescape above is needed to keep
1292		// getStackMap from "leaking param content:
1293		// frame".  That leak propagates up to getgcmask, then
1294		// GCMask, then verifyGCInfo, which converts the stack
1295		// gcinfo tests into heap gcinfo tests :(
1296	}
1297
1298	return
1299}
1300
1301// A stackObjectRecord is generated by the compiler for each stack object in a stack frame.
1302// This record must match the generator code in cmd/compile/internal/gc/ssa.go:emitStackObjects.
1303type stackObjectRecord struct {
1304	// offset in frame
1305	// if negative, offset from varp
1306	// if non-negative, offset from argp
1307	off int
1308	typ *_type
1309}
1310
1311// This is exported as ABI0 via linkname so obj can call it.
1312//
1313//go:nosplit
1314//go:linkname morestackc
1315func morestackc() {
1316	throw("attempt to execute system stack code on user stack")
1317}
1318