1// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"runtime/internal/atomic"
9	"runtime/internal/sys"
10	"unsafe"
11)
12
13/*
14Stack layout parameters.
15Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
16
17The per-goroutine g->stackguard is set to point StackGuard bytes
18above the bottom of the stack.  Each function compares its stack
19pointer against g->stackguard to check for overflow.  To cut one
20instruction from the check sequence for functions with tiny frames,
21the stack is allowed to protrude StackSmall bytes below the stack
22guard.  Functions with large frames don't bother with the check and
23always call morestack.  The sequences are (for amd64, others are
24similar):
25
26	guard = g->stackguard
27	frame = function's stack frame size
28	argsize = size of function arguments (call + return)
29
30	stack frame size <= StackSmall:
31		CMPQ guard, SP
32		JHI 3(PC)
33		MOVQ m->morearg, $(argsize << 32)
34		CALL morestack(SB)
35
36	stack frame size > StackSmall but < StackBig
37		LEAQ (frame-StackSmall)(SP), R0
38		CMPQ guard, R0
39		JHI 3(PC)
40		MOVQ m->morearg, $(argsize << 32)
41		CALL morestack(SB)
42
43	stack frame size >= StackBig:
44		MOVQ m->morearg, $((argsize << 32) | frame)
45		CALL morestack(SB)
46
47The bottom StackGuard - StackSmall bytes are important: there has
48to be enough room to execute functions that refuse to check for
49stack overflow, either because they need to be adjacent to the
50actual caller's frame (deferproc) or because they handle the imminent
51stack overflow (morestack).
52
53For example, deferproc might call malloc, which does one of the
54above checks (without allocating a full frame), which might trigger
55a call to morestack.  This sequence needs to fit in the bottom
56section of the stack.  On amd64, morestack's frame is 40 bytes, and
57deferproc's frame is 56 bytes.  That fits well within the
58StackGuard - StackSmall bytes at the bottom.
59The linkers explore all possible call traces involving non-splitting
60functions to make sure that this limit cannot be violated.
61*/
62
63const (
64	// StackSystem is a number of additional bytes to add
65	// to each stack below the usual guard area for OS-specific
66	// purposes like signal handling. Used on Windows, Plan 9,
67	// and Darwin/ARM because they do not use a separate stack.
68	_StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024
69
70	// The minimum size of stack used by Go code
71	_StackMin = 2048
72
73	// The minimum stack size to allocate.
74	// The hackery here rounds FixedStack0 up to a power of 2.
75	_FixedStack0 = _StackMin + _StackSystem
76	_FixedStack1 = _FixedStack0 - 1
77	_FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
78	_FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
79	_FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
80	_FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
81	_FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
82	_FixedStack  = _FixedStack6 + 1
83
84	// Functions that need frames bigger than this use an extra
85	// instruction to do the stack split check, to avoid overflow
86	// in case SP - framesize wraps below zero.
87	// This value can be no bigger than the size of the unmapped
88	// space at zero.
89	_StackBig = 4096
90
91	// The stack guard is a pointer this many bytes above the
92	// bottom of the stack.
93	_StackGuard = 720*sys.StackGuardMultiplier + _StackSystem
94
95	// After a stack split check the SP is allowed to be this
96	// many bytes below the stack guard.  This saves an instruction
97	// in the checking sequence for tiny frames.
98	_StackSmall = 128
99
100	// The maximum number of bytes that a chain of NOSPLIT
101	// functions can use.
102	_StackLimit = _StackGuard - _StackSystem - _StackSmall
103)
104
105// Goroutine preemption request.
106// Stored into g->stackguard0 to cause split stack check failure.
107// Must be greater than any real sp.
108// 0xfffffade in hex.
109const (
110	_StackPreempt = uintptrMask & -1314
111	_StackFork    = uintptrMask & -1234
112)
113
114const (
115	// stackDebug == 0: no logging
116	//            == 1: logging of per-stack operations
117	//            == 2: logging of per-frame operations
118	//            == 3: logging of per-word updates
119	//            == 4: logging of per-word reads
120	stackDebug       = 0
121	stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
122	stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
123	stackPoisonCopy  = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
124
125	stackCache = 1
126)
127
128const (
129	uintptrMask = 1<<(8*sys.PtrSize) - 1
130	poisonStack = uintptrMask & 0x6868686868686868
131
132	// Goroutine preemption request.
133	// Stored into g->stackguard0 to cause split stack check failure.
134	// Must be greater than any real sp.
135	// 0xfffffade in hex.
136	stackPreempt = uintptrMask & -1314
137
138	// Thread is forking.
139	// Stored into g->stackguard0 to cause split stack check failure.
140	// Must be greater than any real sp.
141	stackFork = uintptrMask & -1234
142)
143
144// Global pool of spans that have free stacks.
145// Stacks are assigned an order according to size.
146//     order = log_2(size/FixedStack)
147// There is a free list for each order.
148// TODO: one lock per order?
149var stackpool [_NumStackOrders]mSpanList
150var stackpoolmu mutex
151
152// Global pool of large stack spans.
153var stackLarge struct {
154	lock mutex
155	free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages)
156}
157
158// Cached value of haveexperiment("framepointer")
159var framepointer_enabled bool
160
161func stackinit() {
162	if _StackCacheSize&_PageMask != 0 {
163		throw("cache size must be a multiple of page size")
164	}
165	for i := range stackpool {
166		stackpool[i].init()
167	}
168	for i := range stackLarge.free {
169		stackLarge.free[i].init()
170	}
171}
172
173// stacklog2 returns ⌊log_2(n)⌋.
174func stacklog2(n uintptr) int {
175	log2 := 0
176	for n > 1 {
177		n >>= 1
178		log2++
179	}
180	return log2
181}
182
183// Allocates a stack from the free pool.  Must be called with
184// stackpoolmu held.
185func stackpoolalloc(order uint8) gclinkptr {
186	list := &stackpool[order]
187	s := list.first
188	if s == nil {
189		// no free stacks.  Allocate another span worth.
190		s = mheap_.allocStack(_StackCacheSize >> _PageShift)
191		if s == nil {
192			throw("out of memory")
193		}
194		if s.ref != 0 {
195			throw("bad ref")
196		}
197		if s.freelist.ptr() != nil {
198			throw("bad freelist")
199		}
200		for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
201			x := gclinkptr(uintptr(s.start)<<_PageShift + i)
202			x.ptr().next = s.freelist
203			s.freelist = x
204		}
205		list.insert(s)
206	}
207	x := s.freelist
208	if x.ptr() == nil {
209		throw("span has no free stacks")
210	}
211	s.freelist = x.ptr().next
212	s.ref++
213	if s.freelist.ptr() == nil {
214		// all stacks in s are allocated.
215		list.remove(s)
216	}
217	return x
218}
219
220// Adds stack x to the free pool.  Must be called with stackpoolmu held.
221func stackpoolfree(x gclinkptr, order uint8) {
222	s := mheap_.lookup(unsafe.Pointer(x))
223	if s.state != _MSpanStack {
224		throw("freeing stack not in a stack span")
225	}
226	if s.freelist.ptr() == nil {
227		// s will now have a free stack
228		stackpool[order].insert(s)
229	}
230	x.ptr().next = s.freelist
231	s.freelist = x
232	s.ref--
233	if gcphase == _GCoff && s.ref == 0 {
234		// Span is completely free. Return it to the heap
235		// immediately if we're sweeping.
236		//
237		// If GC is active, we delay the free until the end of
238		// GC to avoid the following type of situation:
239		//
240		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
241		// 2) The stack that pointer points to is copied
242		// 3) The old stack is freed
243		// 4) The containing span is marked free
244		// 5) GC attempts to mark the SudoG.elem pointer. The
245		//    marking fails because the pointer looks like a
246		//    pointer into a free span.
247		//
248		// By not freeing, we prevent step #4 until GC is done.
249		stackpool[order].remove(s)
250		s.freelist = 0
251		mheap_.freeStack(s)
252	}
253}
254
255// stackcacherefill/stackcacherelease implement a global pool of stack segments.
256// The pool is required to prevent unlimited growth of per-thread caches.
257func stackcacherefill(c *mcache, order uint8) {
258	if stackDebug >= 1 {
259		print("stackcacherefill order=", order, "\n")
260	}
261
262	// Grab some stacks from the global cache.
263	// Grab half of the allowed capacity (to prevent thrashing).
264	var list gclinkptr
265	var size uintptr
266	lock(&stackpoolmu)
267	for size < _StackCacheSize/2 {
268		x := stackpoolalloc(order)
269		x.ptr().next = list
270		list = x
271		size += _FixedStack << order
272	}
273	unlock(&stackpoolmu)
274	c.stackcache[order].list = list
275	c.stackcache[order].size = size
276}
277
278func stackcacherelease(c *mcache, order uint8) {
279	if stackDebug >= 1 {
280		print("stackcacherelease order=", order, "\n")
281	}
282	x := c.stackcache[order].list
283	size := c.stackcache[order].size
284	lock(&stackpoolmu)
285	for size > _StackCacheSize/2 {
286		y := x.ptr().next
287		stackpoolfree(x, order)
288		x = y
289		size -= _FixedStack << order
290	}
291	unlock(&stackpoolmu)
292	c.stackcache[order].list = x
293	c.stackcache[order].size = size
294}
295
296func stackcache_clear(c *mcache) {
297	if stackDebug >= 1 {
298		print("stackcache clear\n")
299	}
300	lock(&stackpoolmu)
301	for order := uint8(0); order < _NumStackOrders; order++ {
302		x := c.stackcache[order].list
303		for x.ptr() != nil {
304			y := x.ptr().next
305			stackpoolfree(x, order)
306			x = y
307		}
308		c.stackcache[order].list = 0
309		c.stackcache[order].size = 0
310	}
311	unlock(&stackpoolmu)
312}
313
314func stackalloc(n uint32) (stack, []stkbar) {
315	// Stackalloc must be called on scheduler stack, so that we
316	// never try to grow the stack during the code that stackalloc runs.
317	// Doing so would cause a deadlock (issue 1547).
318	thisg := getg()
319	if thisg != thisg.m.g0 {
320		throw("stackalloc not on scheduler stack")
321	}
322	if n&(n-1) != 0 {
323		throw("stack size not a power of 2")
324	}
325	if stackDebug >= 1 {
326		print("stackalloc ", n, "\n")
327	}
328
329	// Compute the size of stack barrier array.
330	maxstkbar := gcMaxStackBarriers(int(n))
331	nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar)
332
333	if debug.efence != 0 || stackFromSystem != 0 {
334		v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
335		if v == nil {
336			throw("out of memory (stackalloc)")
337		}
338		top := uintptr(n) - nstkbar
339		stkbarSlice := slice{add(v, top), 0, maxstkbar}
340		return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
341	}
342
343	// Small stacks are allocated with a fixed-size free-list allocator.
344	// If we need a stack of a bigger size, we fall back on allocating
345	// a dedicated span.
346	var v unsafe.Pointer
347	if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
348		order := uint8(0)
349		n2 := n
350		for n2 > _FixedStack {
351			order++
352			n2 >>= 1
353		}
354		var x gclinkptr
355		c := thisg.m.mcache
356		if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
357			// c == nil can happen in the guts of exitsyscall or
358			// procresize. Just get a stack from the global pool.
359			// Also don't touch stackcache during gc
360			// as it's flushed concurrently.
361			lock(&stackpoolmu)
362			x = stackpoolalloc(order)
363			unlock(&stackpoolmu)
364		} else {
365			x = c.stackcache[order].list
366			if x.ptr() == nil {
367				stackcacherefill(c, order)
368				x = c.stackcache[order].list
369			}
370			c.stackcache[order].list = x.ptr().next
371			c.stackcache[order].size -= uintptr(n)
372		}
373		v = unsafe.Pointer(x)
374	} else {
375		var s *mspan
376		npage := uintptr(n) >> _PageShift
377		log2npage := stacklog2(npage)
378
379		// Try to get a stack from the large stack cache.
380		lock(&stackLarge.lock)
381		if !stackLarge.free[log2npage].isEmpty() {
382			s = stackLarge.free[log2npage].first
383			stackLarge.free[log2npage].remove(s)
384		}
385		unlock(&stackLarge.lock)
386
387		if s == nil {
388			// Allocate a new stack from the heap.
389			s = mheap_.allocStack(npage)
390			if s == nil {
391				throw("out of memory")
392			}
393		}
394		v = unsafe.Pointer(s.start << _PageShift)
395	}
396
397	if raceenabled {
398		racemalloc(v, uintptr(n))
399	}
400	if msanenabled {
401		msanmalloc(v, uintptr(n))
402	}
403	if stackDebug >= 1 {
404		print("  allocated ", v, "\n")
405	}
406	top := uintptr(n) - nstkbar
407	stkbarSlice := slice{add(v, top), 0, maxstkbar}
408	return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
409}
410
411func stackfree(stk stack, n uintptr) {
412	gp := getg()
413	v := unsafe.Pointer(stk.lo)
414	if n&(n-1) != 0 {
415		throw("stack not a power of 2")
416	}
417	if stk.lo+n < stk.hi {
418		throw("bad stack size")
419	}
420	if stackDebug >= 1 {
421		println("stackfree", v, n)
422		memclr(v, n) // for testing, clobber stack data
423	}
424	if debug.efence != 0 || stackFromSystem != 0 {
425		if debug.efence != 0 || stackFaultOnFree != 0 {
426			sysFault(v, n)
427		} else {
428			sysFree(v, n, &memstats.stacks_sys)
429		}
430		return
431	}
432	if msanenabled {
433		msanfree(v, n)
434	}
435	if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
436		order := uint8(0)
437		n2 := n
438		for n2 > _FixedStack {
439			order++
440			n2 >>= 1
441		}
442		x := gclinkptr(v)
443		c := gp.m.mcache
444		if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
445			lock(&stackpoolmu)
446			stackpoolfree(x, order)
447			unlock(&stackpoolmu)
448		} else {
449			if c.stackcache[order].size >= _StackCacheSize {
450				stackcacherelease(c, order)
451			}
452			x.ptr().next = c.stackcache[order].list
453			c.stackcache[order].list = x
454			c.stackcache[order].size += n
455		}
456	} else {
457		s := mheap_.lookup(v)
458		if s.state != _MSpanStack {
459			println(hex(s.start<<_PageShift), v)
460			throw("bad span state")
461		}
462		if gcphase == _GCoff {
463			// Free the stack immediately if we're
464			// sweeping.
465			mheap_.freeStack(s)
466		} else {
467			// If the GC is running, we can't return a
468			// stack span to the heap because it could be
469			// reused as a heap span, and this state
470			// change would race with GC. Add it to the
471			// large stack cache instead.
472			log2npage := stacklog2(s.npages)
473			lock(&stackLarge.lock)
474			stackLarge.free[log2npage].insert(s)
475			unlock(&stackLarge.lock)
476		}
477	}
478}
479
480var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
481
482var ptrnames = []string{
483	0: "scalar",
484	1: "ptr",
485}
486
487// Stack frame layout
488//
489// (x86)
490// +------------------+
491// | args from caller |
492// +------------------+ <- frame->argp
493// |  return address  |
494// +------------------+
495// |  caller's BP (*) | (*) if framepointer_enabled && varp < sp
496// +------------------+ <- frame->varp
497// |     locals       |
498// +------------------+
499// |  args to callee  |
500// +------------------+ <- frame->sp
501//
502// (arm)
503// +------------------+
504// | args from caller |
505// +------------------+ <- frame->argp
506// | caller's retaddr |
507// +------------------+ <- frame->varp
508// |     locals       |
509// +------------------+
510// |  args to callee  |
511// +------------------+
512// |  return address  |
513// +------------------+ <- frame->sp
514
515type adjustinfo struct {
516	old   stack
517	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
518	cache pcvalueCache
519}
520
521// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
522// If so, it rewrites *vpp to point into the new stack.
523func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
524	pp := (*unsafe.Pointer)(vpp)
525	p := *pp
526	if stackDebug >= 4 {
527		print("        ", pp, ":", p, "\n")
528	}
529	if adjinfo.old.lo <= uintptr(p) && uintptr(p) < adjinfo.old.hi {
530		*pp = add(p, adjinfo.delta)
531		if stackDebug >= 3 {
532			print("        adjust ptr ", pp, ":", p, " -> ", *pp, "\n")
533		}
534	}
535}
536
537// Information from the compiler about the layout of stack frames.
538type bitvector struct {
539	n        int32 // # of bits
540	bytedata *uint8
541}
542
543type gobitvector struct {
544	n        uintptr
545	bytedata []uint8
546}
547
548func gobv(bv bitvector) gobitvector {
549	return gobitvector{
550		uintptr(bv.n),
551		(*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
552	}
553}
554
555func ptrbit(bv *gobitvector, i uintptr) uint8 {
556	return (bv.bytedata[i/8] >> (i % 8)) & 1
557}
558
559// bv describes the memory starting at address scanp.
560// Adjust any pointers contained therein.
561func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) {
562	bv := gobv(*cbv)
563	minp := adjinfo.old.lo
564	maxp := adjinfo.old.hi
565	delta := adjinfo.delta
566	num := uintptr(bv.n)
567	for i := uintptr(0); i < num; i++ {
568		if stackDebug >= 4 {
569			print("        ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
570		}
571		if ptrbit(&bv, i) == 1 {
572			pp := (*uintptr)(add(scanp, i*sys.PtrSize))
573			p := *pp
574			if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 || p == poisonStack {
575				// Looks like a junk value in a pointer slot.
576				// Live analysis wrong?
577				getg().m.traceback = 2
578				print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
579				throw("invalid stack pointer")
580			}
581			if minp <= p && p < maxp {
582				if stackDebug >= 3 {
583					print("adjust ptr ", p, " ", funcname(f), "\n")
584				}
585				*pp = p + delta
586			}
587		}
588	}
589}
590
591// Note: the argument/return area is adjusted by the callee.
592func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
593	adjinfo := (*adjustinfo)(arg)
594	targetpc := frame.continpc
595	if targetpc == 0 {
596		// Frame is dead.
597		return true
598	}
599	f := frame.fn
600	if stackDebug >= 2 {
601		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
602	}
603	if f.entry == systemstack_switchPC {
604		// A special routine at the bottom of stack of a goroutine that does an systemstack call.
605		// We will allow it to be copied even though we don't
606		// have full GC info for it (because it is written in asm).
607		return true
608	}
609	if targetpc != f.entry {
610		targetpc--
611	}
612	pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache)
613	if pcdata == -1 {
614		pcdata = 0 // in prologue
615	}
616
617	// Adjust local variables if stack frame has been allocated.
618	size := frame.varp - frame.sp
619	var minsize uintptr
620	switch sys.TheChar {
621	case '7':
622		minsize = sys.SpAlign
623	default:
624		minsize = sys.MinFrameSize
625	}
626	if size > minsize {
627		var bv bitvector
628		stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
629		if stackmap == nil || stackmap.n <= 0 {
630			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
631			throw("missing stackmap")
632		}
633		// Locals bitmap information, scan just the pointers in locals.
634		if pcdata < 0 || pcdata >= stackmap.n {
635			// don't know where we are
636			print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
637			throw("bad symbol table")
638		}
639		bv = stackmapdata(stackmap, pcdata)
640		size = uintptr(bv.n) * sys.PtrSize
641		if stackDebug >= 3 {
642			print("      locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n")
643		}
644		adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
645	}
646
647	// Adjust saved base pointer if there is one.
648	if sys.TheChar == '6' && frame.argp-frame.varp == 2*sys.RegSize {
649		if !framepointer_enabled {
650			print("runtime: found space for saved base pointer, but no framepointer experiment\n")
651			print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
652			throw("bad frame layout")
653		}
654		if stackDebug >= 3 {
655			print("      saved bp\n")
656		}
657		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
658	}
659
660	// Adjust arguments.
661	if frame.arglen > 0 {
662		var bv bitvector
663		if frame.argmap != nil {
664			bv = *frame.argmap
665		} else {
666			stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
667			if stackmap == nil || stackmap.n <= 0 {
668				print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
669				throw("missing stackmap")
670			}
671			if pcdata < 0 || pcdata >= stackmap.n {
672				// don't know where we are
673				print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
674				throw("bad symbol table")
675			}
676			bv = stackmapdata(stackmap, pcdata)
677		}
678		if stackDebug >= 3 {
679			print("      args\n")
680		}
681		adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil)
682	}
683	return true
684}
685
686func adjustctxt(gp *g, adjinfo *adjustinfo) {
687	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
688}
689
690func adjustdefers(gp *g, adjinfo *adjustinfo) {
691	// Adjust defer argument blocks the same way we adjust active stack frames.
692	tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
693
694	// Adjust pointers in the Defer structs.
695	// Defer structs themselves are never on the stack.
696	for d := gp._defer; d != nil; d = d.link {
697		adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
698		adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
699		adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
700	}
701}
702
703func adjustpanics(gp *g, adjinfo *adjustinfo) {
704	// Panics are on stack and already adjusted.
705	// Update pointer to head of list in G.
706	adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
707}
708
709func adjustsudogs(gp *g, adjinfo *adjustinfo) {
710	// the data elements pointed to by a SudoG structure
711	// might be in the stack.
712	for s := gp.waiting; s != nil; s = s.waitlink {
713		adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
714		adjustpointer(adjinfo, unsafe.Pointer(&s.selectdone))
715	}
716}
717
718func adjuststkbar(gp *g, adjinfo *adjustinfo) {
719	for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ {
720		adjustpointer(adjinfo, unsafe.Pointer(&gp.stkbar[i].savedLRPtr))
721	}
722}
723
724func fillstack(stk stack, b byte) {
725	for p := stk.lo; p < stk.hi; p++ {
726		*(*byte)(unsafe.Pointer(p)) = b
727	}
728}
729
730// Copies gp's stack to a new stack of a different size.
731// Caller must have changed gp status to Gcopystack.
732func copystack(gp *g, newsize uintptr) {
733	if gp.syscallsp != 0 {
734		throw("stack growth not allowed in system call")
735	}
736	old := gp.stack
737	if old.lo == 0 {
738		throw("nil stackbase")
739	}
740	used := old.hi - gp.sched.sp
741
742	// allocate new stack
743	new, newstkbar := stackalloc(uint32(newsize))
744	if stackPoisonCopy != 0 {
745		fillstack(new, 0xfd)
746	}
747	if stackDebug >= 1 {
748		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
749	}
750
751	// Disallow sigprof scans of this stack and block if there's
752	// one in progress.
753	gcLockStackBarriers(gp)
754
755	// adjust pointers in the to-be-copied frames
756	var adjinfo adjustinfo
757	adjinfo.old = old
758	adjinfo.delta = new.hi - old.hi
759	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
760
761	// adjust other miscellaneous things that have pointers into stacks.
762	adjustctxt(gp, &adjinfo)
763	adjustdefers(gp, &adjinfo)
764	adjustpanics(gp, &adjinfo)
765	adjustsudogs(gp, &adjinfo)
766	adjuststkbar(gp, &adjinfo)
767
768	// copy the stack to the new location
769	if stackPoisonCopy != 0 {
770		fillstack(new, 0xfb)
771	}
772	memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)
773
774	// copy old stack barriers to new stack barrier array
775	newstkbar = newstkbar[:len(gp.stkbar)]
776	copy(newstkbar, gp.stkbar)
777
778	// Swap out old stack for new one
779	gp.stack = new
780	gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
781	gp.sched.sp = new.hi - used
782	oldsize := gp.stackAlloc
783	gp.stackAlloc = newsize
784	gp.stkbar = newstkbar
785	gp.stktopsp += adjinfo.delta
786
787	gcUnlockStackBarriers(gp)
788
789	// free old stack
790	if stackPoisonCopy != 0 {
791		fillstack(old, 0xfc)
792	}
793	stackfree(old, oldsize)
794}
795
796// round x up to a power of 2.
797func round2(x int32) int32 {
798	s := uint(0)
799	for 1<<s < x {
800		s++
801	}
802	return 1 << s
803}
804
805// Called from runtime·morestack when more stack is needed.
806// Allocate larger stack and relocate to new stack.
807// Stack growth is multiplicative, for constant amortized cost.
808//
809// g->atomicstatus will be Grunning or Gscanrunning upon entry.
810// If the GC is trying to stop this g then it will set preemptscan to true.
811func newstack() {
812	thisg := getg()
813	// TODO: double check all gp. shouldn't be getg().
814	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
815		throw("stack growth after fork")
816	}
817	if thisg.m.morebuf.g.ptr() != thisg.m.curg {
818		print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
819		morebuf := thisg.m.morebuf
820		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
821		throw("runtime: wrong goroutine in newstack")
822	}
823	if thisg.m.curg.throwsplit {
824		gp := thisg.m.curg
825		// Update syscallsp, syscallpc in case traceback uses them.
826		morebuf := thisg.m.morebuf
827		gp.syscallsp = morebuf.sp
828		gp.syscallpc = morebuf.pc
829		print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
830			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
831			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
832
833		traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
834		throw("runtime: stack split at bad time")
835	}
836
837	gp := thisg.m.curg
838	morebuf := thisg.m.morebuf
839	thisg.m.morebuf.pc = 0
840	thisg.m.morebuf.lr = 0
841	thisg.m.morebuf.sp = 0
842	thisg.m.morebuf.g = 0
843	rewindmorestack(&gp.sched)
844
845	// NOTE: stackguard0 may change underfoot, if another thread
846	// is about to try to preempt gp. Read it just once and use that same
847	// value now and below.
848	preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
849
850	// Be conservative about where we preempt.
851	// We are interested in preempting user Go code, not runtime code.
852	// If we're holding locks, mallocing, or preemption is disabled, don't
853	// preempt.
854	// This check is very early in newstack so that even the status change
855	// from Grunning to Gwaiting and back doesn't happen in this case.
856	// That status change by itself can be viewed as a small preemption,
857	// because the GC might change Gwaiting to Gscanwaiting, and then
858	// this goroutine has to wait for the GC to finish before continuing.
859	// If the GC is in some way dependent on this goroutine (for example,
860	// it needs a lock held by the goroutine), that small preemption turns
861	// into a real deadlock.
862	if preempt {
863		if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
864			// Let the goroutine keep running for now.
865			// gp->preempt is set, so it will be preempted next time.
866			gp.stackguard0 = gp.stack.lo + _StackGuard
867			gogo(&gp.sched) // never return
868		}
869	}
870
871	// The goroutine must be executing in order to call newstack,
872	// so it must be Grunning (or Gscanrunning).
873	casgstatus(gp, _Grunning, _Gwaiting)
874	gp.waitreason = "stack growth"
875
876	if gp.stack.lo == 0 {
877		throw("missing stack in newstack")
878	}
879	sp := gp.sched.sp
880	if sys.TheChar == '6' || sys.TheChar == '8' {
881		// The call to morestack cost a word.
882		sp -= sys.PtrSize
883	}
884	if stackDebug >= 1 || sp < gp.stack.lo {
885		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
886			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
887			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
888	}
889	if sp < gp.stack.lo {
890		print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
891		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
892		throw("runtime: split stack overflow")
893	}
894
895	if gp.sched.ctxt != nil {
896		// morestack wrote sched.ctxt on its way in here,
897		// without a write barrier. Run the write barrier now.
898		// It is not possible to be preempted between then
899		// and now, so it's okay.
900		writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
901	}
902
903	if preempt {
904		if gp == thisg.m.g0 {
905			throw("runtime: preempt g0")
906		}
907		if thisg.m.p == 0 && thisg.m.locks == 0 {
908			throw("runtime: g is running but p is not")
909		}
910		if gp.preemptscan {
911			for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
912				// Likely to be racing with the GC as
913				// it sees a _Gwaiting and does the
914				// stack scan. If so, gcworkdone will
915				// be set and gcphasework will simply
916				// return.
917			}
918			if !gp.gcscandone {
919				scanstack(gp)
920				gp.gcscandone = true
921			}
922			gp.preemptscan = false
923			gp.preempt = false
924			casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
925			casgstatus(gp, _Gwaiting, _Grunning)
926			gp.stackguard0 = gp.stack.lo + _StackGuard
927			gogo(&gp.sched) // never return
928		}
929
930		// Act like goroutine called runtime.Gosched.
931		casgstatus(gp, _Gwaiting, _Grunning)
932		gopreempt_m(gp) // never return
933	}
934
935	// Allocate a bigger segment and move the stack.
936	oldsize := int(gp.stackAlloc)
937	newsize := oldsize * 2
938	if uintptr(newsize) > maxstacksize {
939		print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
940		throw("stack overflow")
941	}
942
943	casgstatus(gp, _Gwaiting, _Gcopystack)
944
945	// The concurrent GC will not scan the stack while we are doing the copy since
946	// the gp is in a Gcopystack status.
947	copystack(gp, uintptr(newsize))
948	if stackDebug >= 1 {
949		print("stack grow done\n")
950	}
951	casgstatus(gp, _Gcopystack, _Grunning)
952	gogo(&gp.sched)
953}
954
955//go:nosplit
956func nilfunc() {
957	*(*uint8)(nil) = 0
958}
959
960// adjust Gobuf as if it executed a call to fn
961// and then did an immediate gosave.
962func gostartcallfn(gobuf *gobuf, fv *funcval) {
963	var fn unsafe.Pointer
964	if fv != nil {
965		fn = unsafe.Pointer(fv.fn)
966	} else {
967		fn = unsafe.Pointer(funcPC(nilfunc))
968	}
969	gostartcall(gobuf, fn, unsafe.Pointer(fv))
970}
971
972// Maybe shrink the stack being used by gp.
973// Called at garbage collection time.
974func shrinkstack(gp *g) {
975	if readgstatus(gp) == _Gdead {
976		if gp.stack.lo != 0 {
977			// Free whole stack - it will get reallocated
978			// if G is used again.
979			stackfree(gp.stack, gp.stackAlloc)
980			gp.stack.lo = 0
981			gp.stack.hi = 0
982			gp.stkbar = nil
983			gp.stkbarPos = 0
984		}
985		return
986	}
987	if gp.stack.lo == 0 {
988		throw("missing stack in shrinkstack")
989	}
990
991	if debug.gcshrinkstackoff > 0 {
992		return
993	}
994
995	oldsize := gp.stackAlloc
996	newsize := oldsize / 2
997	// Don't shrink the allocation below the minimum-sized stack
998	// allocation.
999	if newsize < _FixedStack {
1000		return
1001	}
1002	// Compute how much of the stack is currently in use and only
1003	// shrink the stack if gp is using less than a quarter of its
1004	// current stack. The currently used stack includes everything
1005	// down to the SP plus the stack guard space that ensures
1006	// there's room for nosplit functions.
1007	avail := gp.stack.hi - gp.stack.lo
1008	if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
1009		return
1010	}
1011
1012	// We can't copy the stack if we're in a syscall.
1013	// The syscall might have pointers into the stack.
1014	if gp.syscallsp != 0 {
1015		return
1016	}
1017	if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
1018		return
1019	}
1020
1021	if stackDebug > 0 {
1022		print("shrinking stack ", oldsize, "->", newsize, "\n")
1023	}
1024
1025	oldstatus := casgcopystack(gp)
1026	copystack(gp, newsize)
1027	casgstatus(gp, _Gcopystack, oldstatus)
1028}
1029
1030// freeStackSpans frees unused stack spans at the end of GC.
1031func freeStackSpans() {
1032	lock(&stackpoolmu)
1033
1034	// Scan stack pools for empty stack spans.
1035	for order := range stackpool {
1036		list := &stackpool[order]
1037		for s := list.first; s != nil; {
1038			next := s.next
1039			if s.ref == 0 {
1040				list.remove(s)
1041				s.freelist = 0
1042				mheap_.freeStack(s)
1043			}
1044			s = next
1045		}
1046	}
1047
1048	unlock(&stackpoolmu)
1049
1050	// Free large stack spans.
1051	lock(&stackLarge.lock)
1052	for i := range stackLarge.free {
1053		for s := stackLarge.free[i].first; s != nil; {
1054			next := s.next
1055			stackLarge.free[i].remove(s)
1056			mheap_.freeStack(s)
1057			s = next
1058		}
1059	}
1060	unlock(&stackLarge.lock)
1061}
1062
1063//go:nosplit
1064func morestackc() {
1065	systemstack(func() {
1066		throw("attempt to execute C code on Go stack")
1067	})
1068}
1069