1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"runtime/internal/atomic"
9	"unsafe"
10)
11
12// For gccgo, use go:linkname to export compiler-called functions.
13//
14//go:linkname deferproc
15//go:linkname deferprocStack
16//go:linkname deferreturn
17//go:linkname setdeferretaddr
18//go:linkname checkdefer
19//go:linkname gopanic
20//go:linkname canrecover
21//go:linkname makefuncfficanrecover
22//go:linkname makefuncreturning
23//go:linkname gorecover
24//go:linkname deferredrecover
25//go:linkname goPanicIndex
26//go:linkname goPanicIndexU
27//go:linkname goPanicSliceAlen
28//go:linkname goPanicSliceAlenU
29//go:linkname goPanicSliceAcap
30//go:linkname goPanicSliceAcapU
31//go:linkname goPanicSliceB
32//go:linkname goPanicSliceBU
33//go:linkname goPanicSlice3Alen
34//go:linkname goPanicSlice3AlenU
35//go:linkname goPanicSlice3Acap
36//go:linkname goPanicSlice3AcapU
37//go:linkname goPanicSlice3B
38//go:linkname goPanicSlice3BU
39//go:linkname goPanicSlice3C
40//go:linkname goPanicSlice3CU
41//go:linkname panicshift
42//go:linkname panicdivide
43//go:linkname panicmem
44// Temporary for C code to call:
45//go:linkname throw
46
47// Check to make sure we can really generate a panic. If the panic
48// was generated from the runtime, or from inside malloc, then convert
49// to a throw of msg.
50// pc should be the program counter of the compiler-generated code that
51// triggered this panic.
52func panicCheck1(pc uintptr, msg string) {
53	name, _, _, _ := funcfileline(pc-1, -1, false)
54	if hasPrefix(name, "runtime.") {
55		throw(msg)
56	}
57	// TODO: is this redundant? How could we be in malloc
58	// but not in the runtime? runtime/internal/*, maybe?
59	gp := getg()
60	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
61		throw(msg)
62	}
63}
64
65// Same as above, but calling from the runtime is allowed.
66//
67// Using this function is necessary for any panic that may be
68// generated by runtime.sigpanic, since those are always called by the
69// runtime.
70func panicCheck2(err string) {
71	// panic allocates, so to avoid recursive malloc, turn panics
72	// during malloc into throws.
73	gp := getg()
74	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
75		throw(err)
76	}
77}
78
79// Many of the following panic entry-points turn into throws when they
80// happen in various runtime contexts. These should never happen in
81// the runtime, and if they do, they indicate a serious issue and
82// should not be caught by user code.
83//
84// The panic{Index,Slice,divide,shift} functions are called by
85// code generated by the compiler for out of bounds index expressions,
86// out of bounds slice expressions, division by zero, and shift by negative.
87// The panicdivide (again), panicoverflow, panicfloat, and panicmem
88// functions are called by the signal handler when a signal occurs
89// indicating the respective problem.
90//
91// Since panic{Index,Slice,shift} are never called directly, and
92// since the runtime package should never have an out of bounds slice
93// or array reference or negative shift, if we see those functions called from the
94// runtime package we turn the panic into a throw. That will dump the
95// entire runtime stack for easier debugging.
96//
97// The entry points called by the signal handler will be called from
98// runtime.sigpanic, so we can't disallow calls from the runtime to
99// these (they always look like they're called from the runtime).
100// Hence, for these, we just check for clearly bad runtime conditions.
101
102// failures in the comparisons for s[x], 0 <= x < y (y == len(s))
103func goPanicIndex(x int, y int) {
104	panicCheck1(getcallerpc(), "index out of range")
105	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex})
106}
107func goPanicIndexU(x uint, y int) {
108	panicCheck1(getcallerpc(), "index out of range")
109	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex})
110}
111
112// failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
113func goPanicSliceAlen(x int, y int) {
114	panicCheck1(getcallerpc(), "slice bounds out of range")
115	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen})
116}
117func goPanicSliceAlenU(x uint, y int) {
118	panicCheck1(getcallerpc(), "slice bounds out of range")
119	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen})
120}
121func goPanicSliceAcap(x int, y int) {
122	panicCheck1(getcallerpc(), "slice bounds out of range")
123	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap})
124}
125func goPanicSliceAcapU(x uint, y int) {
126	panicCheck1(getcallerpc(), "slice bounds out of range")
127	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap})
128}
129
130// failures in the comparisons for s[x:y], 0 <= x <= y
131func goPanicSliceB(x int, y int) {
132	panicCheck1(getcallerpc(), "slice bounds out of range")
133	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB})
134}
135func goPanicSliceBU(x uint, y int) {
136	panicCheck1(getcallerpc(), "slice bounds out of range")
137	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB})
138}
139
140// failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
141func goPanicSlice3Alen(x int, y int) {
142	panicCheck1(getcallerpc(), "slice bounds out of range")
143	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen})
144}
145func goPanicSlice3AlenU(x uint, y int) {
146	panicCheck1(getcallerpc(), "slice bounds out of range")
147	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen})
148}
149func goPanicSlice3Acap(x int, y int) {
150	panicCheck1(getcallerpc(), "slice bounds out of range")
151	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap})
152}
153func goPanicSlice3AcapU(x uint, y int) {
154	panicCheck1(getcallerpc(), "slice bounds out of range")
155	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap})
156}
157
158// failures in the comparisons for s[:x:y], 0 <= x <= y
159func goPanicSlice3B(x int, y int) {
160	panicCheck1(getcallerpc(), "slice bounds out of range")
161	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B})
162}
163func goPanicSlice3BU(x uint, y int) {
164	panicCheck1(getcallerpc(), "slice bounds out of range")
165	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B})
166}
167
168// failures in the comparisons for s[x:y:], 0 <= x <= y
169func goPanicSlice3C(x int, y int) {
170	panicCheck1(getcallerpc(), "slice bounds out of range")
171	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C})
172}
173func goPanicSlice3CU(x uint, y int) {
174	panicCheck1(getcallerpc(), "slice bounds out of range")
175	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C})
176}
177
178var shiftError = error(errorString("negative shift amount"))
179
180func panicshift() {
181	panicCheck1(getcallerpc(), "negative shift amount")
182	panic(shiftError)
183}
184
185var divideError = error(errorString("integer divide by zero"))
186
187func panicdivide() {
188	panicCheck2("integer divide by zero")
189	panic(divideError)
190}
191
192var overflowError = error(errorString("integer overflow"))
193
194func panicoverflow() {
195	panicCheck2("integer overflow")
196	panic(overflowError)
197}
198
199var floatError = error(errorString("floating point error"))
200
201func panicfloat() {
202	panicCheck2("floating point error")
203	panic(floatError)
204}
205
206var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
207
208func panicmem() {
209	panicCheck2("invalid memory address or nil pointer dereference")
210	panic(memoryError)
211}
212
213// deferproc creates a new deferred function.
214// The compiler turns a defer statement into a call to this.
215// frame points into the stack frame; it is used to determine which
216// deferred functions are for the current stack frame, and whether we
217// have already deferred functions for this frame.
218// pfn is a C function pointer.
219// arg is a value to pass to pfn.
220func deferproc(frame *bool, pfn uintptr, arg unsafe.Pointer) {
221	gp := getg()
222	d := newdefer()
223	if d._panic != nil {
224		throw("deferproc: d.panic != nil after newdefer")
225	}
226	d.link = gp._defer
227	gp._defer = d
228	d.frame = frame
229	d.panicStack = getg()._panic
230	d.pfn = pfn
231	d.arg = arg
232	d.retaddr = 0
233	d.makefunccanrecover = false
234}
235
236// deferprocStack queues a new deferred function with a defer record on the stack.
237// The defer record, d, does not need to be initialized.
238// Other arguments are the same as in deferproc.
239//go:nosplit
240func deferprocStack(d *_defer, frame *bool, pfn uintptr, arg unsafe.Pointer) {
241	gp := getg()
242	if gp.m.curg != gp {
243		// go code on the system stack can't defer
244		throw("defer on system stack")
245	}
246	d.pfn = pfn
247	d.retaddr = 0
248	d.makefunccanrecover = false
249	d.heap = false
250	// The lines below implement:
251	//   d.frame = frame
252	//   d.arg = arg
253	//   d._panic = nil
254	//   d.panicStack = gp._panic
255	//   d.link = gp._defer
256	// But without write barriers. They are writes to the stack so they
257	// don't need a write barrier, and furthermore are to uninitialized
258	// memory, so they must not use a write barrier.
259	*(*uintptr)(unsafe.Pointer(&d.frame)) = uintptr(unsafe.Pointer(frame))
260	*(*uintptr)(unsafe.Pointer(&d.arg)) = uintptr(unsafe.Pointer(arg))
261	*(*uintptr)(unsafe.Pointer(&d._panic)) = 0
262	*(*uintptr)(unsafe.Pointer(&d.panicStack)) = uintptr(unsafe.Pointer(gp._panic))
263	*(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
264
265	gp._defer = d
266}
267
268// Allocate a Defer, usually using per-P pool.
269// Each defer must be released with freedefer.
270func newdefer() *_defer {
271	var d *_defer
272	gp := getg()
273	pp := gp.m.p.ptr()
274	if len(pp.deferpool) == 0 && sched.deferpool != nil {
275		systemstack(func() {
276			lock(&sched.deferlock)
277			for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
278				d := sched.deferpool
279				sched.deferpool = d.link
280				d.link = nil
281				pp.deferpool = append(pp.deferpool, d)
282			}
283			unlock(&sched.deferlock)
284		})
285	}
286	if n := len(pp.deferpool); n > 0 {
287		d = pp.deferpool[n-1]
288		pp.deferpool[n-1] = nil
289		pp.deferpool = pp.deferpool[:n-1]
290	}
291	if d == nil {
292		systemstack(func() {
293			d = new(_defer)
294		})
295		if debugCachedWork {
296			// Duplicate the tail below so if there's a
297			// crash in checkPut we can tell if d was just
298			// allocated or came from the pool.
299			d.heap = true
300			d.link = gp._defer
301			gp._defer = d
302			return d
303		}
304	}
305	d.heap = true
306	return d
307}
308
309// Free the given defer.
310// The defer cannot be used after this call.
311//
312// This must not grow the stack because there may be a frame without a
313// stack map when this is called.
314//
315//go:nosplit
316func freedefer(d *_defer) {
317	if d._panic != nil {
318		freedeferpanic()
319	}
320	if d.pfn != 0 {
321		freedeferfn()
322	}
323	if !d.heap {
324		return
325	}
326	pp := getg().m.p.ptr()
327	if len(pp.deferpool) == cap(pp.deferpool) {
328		// Transfer half of local cache to the central cache.
329		//
330		// Take this slow path on the system stack so
331		// we don't grow freedefer's stack.
332		systemstack(func() {
333			var first, last *_defer
334			for len(pp.deferpool) > cap(pp.deferpool)/2 {
335				n := len(pp.deferpool)
336				d := pp.deferpool[n-1]
337				pp.deferpool[n-1] = nil
338				pp.deferpool = pp.deferpool[:n-1]
339				if first == nil {
340					first = d
341				} else {
342					last.link = d
343				}
344				last = d
345			}
346			lock(&sched.deferlock)
347			last.link = sched.deferpool
348			sched.deferpool = first
349			unlock(&sched.deferlock)
350		})
351	}
352
353	// These lines used to be simply `*d = _defer{}` but that
354	// started causing a nosplit stack overflow via typedmemmove.
355	d.link = nil
356	d.frame = nil
357	d.panicStack = nil
358	d.arg = nil
359	d.retaddr = 0
360	d.makefunccanrecover = false
361	// d._panic and d.pfn must be nil already.
362	// If not, we would have called freedeferpanic or freedeferfn above,
363	// both of which throw.
364
365	pp.deferpool = append(pp.deferpool, d)
366}
367
368// Separate function so that it can split stack.
369// Windows otherwise runs out of stack space.
370func freedeferpanic() {
371	// _panic must be cleared before d is unlinked from gp.
372	throw("freedefer with d._panic != nil")
373}
374
375func freedeferfn() {
376	// fn must be cleared before d is unlinked from gp.
377	throw("freedefer with d.fn != nil")
378}
379
380// deferreturn is called to undefer the stack.
381// The compiler inserts a call to this function as a finally clause
382// wrapped around the body of any function that calls defer.
383// The frame argument points to the stack frame of the function.
384func deferreturn(frame *bool) {
385	gp := getg()
386	for gp._defer != nil && gp._defer.frame == frame {
387		d := gp._defer
388		pfn := d.pfn
389		d.pfn = 0
390
391		if pfn != 0 {
392			// This is rather awkward.
393			// The gc compiler does this using assembler
394			// code in jmpdefer.
395			var fn func(unsafe.Pointer)
396			*(*uintptr)(unsafe.Pointer(&fn)) = uintptr(noescape(unsafe.Pointer(&pfn)))
397			gp.deferring = true
398			fn(d.arg)
399			gp.deferring = false
400		}
401
402		// If that was CgocallBackDone, it will have freed the
403		// defer for us, since we are no longer running as Go code.
404		if getg() == nil {
405			*frame = true
406			return
407		}
408		if gp.ranCgocallBackDone {
409			gp.ranCgocallBackDone = false
410			*frame = true
411			return
412		}
413
414		gp._defer = d.link
415
416		freedefer(d)
417
418		// Since we are executing a defer function now, we
419		// know that we are returning from the calling
420		// function. If the calling function, or one of its
421		// callees, panicked, then the defer functions would
422		// be executed by panic.
423		*frame = true
424	}
425}
426
427// __builtin_extract_return_addr is a GCC intrinsic that converts an
428// address returned by __builtin_return_address(0) to a real address.
429// On most architectures this is a nop.
430//extern __builtin_extract_return_addr
431func __builtin_extract_return_addr(uintptr) uintptr
432
433// setdeferretaddr records the address to which the deferred function
434// returns.  This is check by canrecover.  The frontend relies on this
435// function returning false.
436func setdeferretaddr(retaddr uintptr) bool {
437	gp := getg()
438	if gp._defer != nil {
439		gp._defer.retaddr = __builtin_extract_return_addr(retaddr)
440	}
441	return false
442}
443
444// checkdefer is called by exception handlers used when unwinding the
445// stack after a recovered panic. The exception handler is simply
446//   checkdefer(frame)
447//   return;
448// If we have not yet reached the frame we are looking for, we
449// continue unwinding.
450func checkdefer(frame *bool) {
451	gp := getg()
452	if gp == nil {
453		// We should never wind up here. Even if some other
454		// language throws an exception, the cgo code
455		// should ensure that g is set.
456		throw("no g in checkdefer")
457	} else if gp.isforeign {
458		// Some other language has thrown an exception.
459		// We need to run the local defer handlers.
460		// If they call recover, we stop unwinding here.
461		var p _panic
462		p.isforeign = true
463		p.link = gp._panic
464		gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
465		for {
466			d := gp._defer
467			if d == nil || d.frame != frame || d.pfn == 0 {
468				break
469			}
470
471			pfn := d.pfn
472			gp._defer = d.link
473
474			var fn func(unsafe.Pointer)
475			*(*uintptr)(unsafe.Pointer(&fn)) = uintptr(noescape(unsafe.Pointer(&pfn)))
476			gp.deferring = true
477			fn(d.arg)
478			gp.deferring = false
479
480			freedefer(d)
481
482			if p.recovered {
483				// The recover function caught the panic
484				// thrown by some other language.
485				break
486			}
487		}
488
489		recovered := p.recovered
490		gp._panic = p.link
491
492		if recovered {
493			// Just return and continue executing Go code.
494			*frame = true
495			return
496		}
497
498		// We are panicking through this function.
499		*frame = false
500	} else if gp._defer != nil && gp._defer.pfn == 0 && gp._defer.frame == frame {
501		// This is the defer function that called recover.
502		// Simply return to stop the stack unwind, and let the
503		// Go code continue to execute.
504		d := gp._defer
505		gp._defer = d.link
506		freedefer(d)
507
508		// We are returning from this function.
509		*frame = true
510
511		return
512	}
513
514	// This is some other defer function. It was already run by
515	// the call to panic, or just above. Rethrow the exception.
516	rethrowException()
517	throw("rethrowException returned")
518}
519
520// unwindStack starts unwinding the stack for a panic. We unwind
521// function calls until we reach the one which used a defer function
522// which called recover. Each function which uses a defer statement
523// will have an exception handler, as shown above for checkdefer.
524func unwindStack() {
525	// Allocate the exception type used by the unwind ABI.
526	// It would be nice to define it in runtime_sysinfo.go,
527	// but current definitions don't work because the required
528	// alignment is larger than can be represented in Go.
529	// The type never contains any Go pointers.
530	size := unwindExceptionSize()
531	usize := uintptr(unsafe.Sizeof(uintptr(0)))
532	c := (size + usize - 1) / usize
533	s := make([]uintptr, c)
534	getg().exception = unsafe.Pointer(&s[0])
535	throwException()
536}
537
538// Goexit terminates the goroutine that calls it. No other goroutine is affected.
539// Goexit runs all deferred calls before terminating the goroutine. Because Goexit
540// is not a panic, any recover calls in those deferred functions will return nil.
541//
542// Calling Goexit from the main goroutine terminates that goroutine
543// without func main returning. Since func main has not returned,
544// the program continues execution of other goroutines.
545// If all other goroutines exit, the program crashes.
546func Goexit() {
547	// Run all deferred functions for the current goroutine.
548	// This code is similar to gopanic, see that implementation
549	// for detailed comments.
550	gp := getg()
551	gp.goexiting = true
552
553	// Create a panic object for Goexit, so we can recognize when it might be
554	// bypassed by a recover().
555	var p _panic
556	p.goexit = true
557	p.link = gp._panic
558	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
559
560	for {
561		d := gp._defer
562		if d == nil {
563			break
564		}
565
566		pfn := d.pfn
567		if pfn == 0 {
568			if d._panic != nil {
569				d._panic.aborted = true
570				d._panic = nil
571			}
572			gp._defer = d.link
573			freedefer(d)
574			continue
575		}
576		d.pfn = 0
577
578		var fn func(unsafe.Pointer)
579		*(*uintptr)(unsafe.Pointer(&fn)) = uintptr(noescape(unsafe.Pointer(&pfn)))
580		gp.deferring = true
581		fn(d.arg)
582		gp.deferring = false
583
584		if gp._defer != d {
585			throw("bad defer entry in Goexit")
586		}
587		d._panic = nil
588		gp._defer = d.link
589		freedefer(d)
590		// Note: we ignore recovers here because Goexit isn't a panic
591	}
592	gp.goexiting = false
593	goexit1()
594}
595
596// Call all Error and String methods before freezing the world.
597// Used when crashing with panicking.
598func preprintpanics(p *_panic) {
599	defer func() {
600		if recover() != nil {
601			throw("panic while printing panic value")
602		}
603	}()
604	for p != nil {
605		switch v := p.arg.(type) {
606		case error:
607			p.arg = v.Error()
608		case stringer:
609			p.arg = v.String()
610		}
611		p = p.link
612	}
613}
614
615// Print all currently active panics. Used when crashing.
616// Should only be called after preprintpanics.
617func printpanics(p *_panic) {
618	if p.link != nil {
619		printpanics(p.link)
620		if !p.link.goexit {
621			print("\t")
622		}
623	}
624	if p.goexit {
625		return
626	}
627	print("panic: ")
628	printany(p.arg)
629	if p.recovered {
630		print(" [recovered]")
631	}
632	print("\n")
633}
634
635// The implementation of the predeclared function panic.
636func gopanic(e interface{}) {
637	gp := getg()
638	if gp.m.curg != gp {
639		print("panic: ")
640		printany(e)
641		print("\n")
642		throw("panic on system stack")
643	}
644
645	if gp.m.mallocing != 0 {
646		print("panic: ")
647		printany(e)
648		print("\n")
649		throw("panic during malloc")
650	}
651	if gp.m.preemptoff != "" {
652		print("panic: ")
653		printany(e)
654		print("\n")
655		print("preempt off reason: ")
656		print(gp.m.preemptoff)
657		print("\n")
658		throw("panic during preemptoff")
659	}
660	if gp.m.locks != 0 {
661		print("panic: ")
662		printany(e)
663		print("\n")
664		throw("panic holding locks")
665	}
666
667	// The gc compiler allocates this new _panic struct on the
668	// stack. We can't do that, because when a deferred function
669	// recovers the panic we unwind the stack. We unlink this
670	// entry before unwinding the stack, but that doesn't help in
671	// the case where we panic, a deferred function recovers and
672	// then panics itself, that panic is in turn recovered, and
673	// unwinds the stack past this stack frame.
674
675	p := &_panic{
676		arg:  e,
677		link: gp._panic,
678	}
679	gp._panic = p
680
681	atomic.Xadd(&runningPanicDefers, 1)
682
683	for {
684		d := gp._defer
685		if d == nil {
686			break
687		}
688
689		pfn := d.pfn
690
691		// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
692		// take defer off list. The earlier panic or Goexit will not continue running.
693		if pfn == 0 {
694			if d._panic != nil {
695				d._panic.aborted = true
696			}
697			d._panic = nil
698			gp._defer = d.link
699			freedefer(d)
700			continue
701		}
702		d.pfn = 0
703
704		// Record the panic that is running the defer.
705		// If there is a new panic during the deferred call, that panic
706		// will find d in the list and will mark d._panic (this panic) aborted.
707		d._panic = p
708
709		var fn func(unsafe.Pointer)
710		*(*uintptr)(unsafe.Pointer(&fn)) = uintptr(noescape(unsafe.Pointer(&pfn)))
711		gp.deferring = true
712		fn(d.arg)
713		gp.deferring = false
714
715		if gp._defer != d {
716			throw("bad defer entry in panic")
717		}
718		d._panic = nil
719
720		if p.recovered {
721			gp._panic = p.link
722			if gp._panic != nil && gp._panic.goexit && gp._panic.aborted {
723				Goexit()
724				throw("Goexit returned")
725			}
726			atomic.Xadd(&runningPanicDefers, -1)
727
728			// Aborted panics are marked but remain on the g.panic list.
729			// Remove them from the list.
730			for gp._panic != nil && gp._panic.aborted {
731				gp._panic = gp._panic.link
732			}
733			if gp._panic == nil { // must be done with signal
734				gp.sig = 0
735			}
736
737			if gp._panic != nil && gp._panic.goexit {
738				Goexit()
739				throw("Goexit returned")
740			}
741
742			// Unwind the stack by throwing an exception.
743			// The compiler has arranged to create
744			// exception handlers in each function
745			// that uses a defer statement.  These
746			// exception handlers will check whether
747			// the entry on the top of the defer stack
748			// is from the current function.  If it is,
749			// we have unwound the stack far enough.
750			unwindStack()
751
752			throw("unwindStack returned")
753		}
754
755		// Because we executed that defer function by a panic,
756		// and it did not call recover, we know that we are
757		// not returning from the calling function--we are
758		// panicking through it.
759		*d.frame = false
760
761		// Deferred function did not panic. Remove d.
762		// In the p.recovered case, d will be removed by checkdefer.
763		gp._defer = d.link
764
765		freedefer(d)
766	}
767
768	// ran out of deferred calls - old-school panic now
769	// Because it is unsafe to call arbitrary user code after freezing
770	// the world, we call preprintpanics to invoke all necessary Error
771	// and String methods to prepare the panic strings before startpanic.
772	preprintpanics(gp._panic)
773
774	fatalpanic(gp._panic) // should not return
775	*(*int)(nil) = 0      // not reached
776}
777
778// currentDefer returns the top of the defer stack if it can be recovered.
779// Otherwise it returns nil.
780func currentDefer() *_defer {
781	gp := getg()
782	d := gp._defer
783	if d == nil {
784		return nil
785	}
786
787	// The panic that would be recovered is the one on the top of
788	// the panic stack. We do not want to recover it if that panic
789	// was on the top of the panic stack when this function was
790	// deferred.
791	if d.panicStack == gp._panic {
792		return nil
793	}
794
795	// The deferred thunk will call setdeferretaddr. If this has
796	// not happened, then we have not been called via defer, and
797	// we can not recover.
798	if d.retaddr == 0 {
799		return nil
800	}
801
802	return d
803}
804
805// canrecover is called by a thunk to see if the real function would
806// be permitted to recover a panic value. Recovering a value is
807// permitted if the thunk was called directly by defer. retaddr is the
808// return address of the function that is calling canrecover--that is,
809// the thunk.
810func canrecover(retaddr uintptr) bool {
811	d := currentDefer()
812	if d == nil {
813		return false
814	}
815
816	ret := __builtin_extract_return_addr(retaddr)
817	dret := d.retaddr
818	if ret <= dret && ret+16 >= dret {
819		return true
820	}
821
822	// On some systems, in some cases, the return address does not
823	// work reliably. See http://gcc.gnu.org/PR60406. If we are
824	// permitted to call recover, the call stack will look like this:
825	//     runtime.gopanic, runtime.deferreturn, etc.
826	//     thunk to call deferred function (calls __go_set_defer_retaddr)
827	//     function that calls __go_can_recover (passing return address)
828	//     runtime.canrecover
829	// Calling callers will skip the thunks. So if our caller's
830	// caller starts with "runtime.", then we are permitted to
831	// call recover.
832	var locs [16]location
833	if callers(1, locs[:2]) < 2 {
834		return false
835	}
836
837	name := locs[1].function
838	if hasPrefix(name, "runtime.") {
839		return true
840	}
841
842	// If the function calling recover was created by reflect.MakeFunc,
843	// then makefuncfficanrecover will have set makefunccanrecover.
844	if !d.makefunccanrecover {
845		return false
846	}
847
848	// We look up the stack, ignoring libffi functions and
849	// functions in the reflect package, until we find
850	// reflect.makeFuncStub or reflect.ffi_callback called by FFI
851	// functions.  Then we check the caller of that function.
852
853	n := callers(2, locs[:])
854	foundFFICallback := false
855	i := 0
856	for ; i < n; i++ {
857		name = locs[i].function
858		if name == "" {
859			// No function name means this caller isn't Go code.
860			// Assume that this is libffi.
861			continue
862		}
863
864		// Ignore function in libffi.
865		if hasPrefix(name, "ffi_") {
866			continue
867		}
868
869		if foundFFICallback {
870			break
871		}
872
873		if name == "reflect.ffi_callback" {
874			foundFFICallback = true
875			continue
876		}
877
878		// Ignore other functions in the reflect package.
879		if hasPrefix(name, "reflect.") || hasPrefix(name, ".1reflect.") {
880			continue
881		}
882
883		// We should now be looking at the real caller.
884		break
885	}
886
887	if i < n {
888		name = locs[i].function
889		if hasPrefix(name, "runtime.") {
890			return true
891		}
892	}
893
894	return false
895}
896
897// This function is called when code is about to enter a function
898// created by the libffi version of reflect.MakeFunc. This function is
899// passed the names of the callers of the libffi code that called the
900// stub. It uses them to decide whether it is permitted to call
901// recover, and sets d.makefunccanrecover so that gorecover can make
902// the same decision.
903func makefuncfficanrecover(loc []location) {
904	d := currentDefer()
905	if d == nil {
906		return
907	}
908
909	// If we are already in a call stack of MakeFunc functions,
910	// there is nothing we can usefully check here.
911	if d.makefunccanrecover {
912		return
913	}
914
915	// loc starts with the caller of our caller. That will be a thunk.
916	// If its caller was a function function, then it was called
917	// directly by defer.
918	if len(loc) < 2 {
919		return
920	}
921
922	name := loc[1].function
923	if hasPrefix(name, "runtime.") {
924		d.makefunccanrecover = true
925	}
926}
927
928// makefuncreturning is called when code is about to exit a function
929// created by reflect.MakeFunc. It is called by the function stub used
930// by reflect.MakeFunc. It clears the makefunccanrecover field. It's
931// OK to always clear this field, because canrecover will only be
932// called by a stub created for a function that calls recover. That
933// stub will not call a function created by reflect.MakeFunc, so by
934// the time we get here any caller higher up on the call stack no
935// longer needs the information.
936func makefuncreturning() {
937	d := getg()._defer
938	if d != nil {
939		d.makefunccanrecover = false
940	}
941}
942
943// The implementation of the predeclared function recover.
944func gorecover() interface{} {
945	gp := getg()
946	p := gp._panic
947	if p != nil && !p.goexit && !p.recovered {
948		p.recovered = true
949		return p.arg
950	}
951	return nil
952}
953
954// deferredrecover is called when a call to recover is deferred.  That
955// is, something like
956//   defer recover()
957//
958// We need to handle this specially.  In gc, the recover function
959// looks up the stack frame. In particular, that means that a deferred
960// recover will not recover a panic thrown in the same function that
961// defers the recover. It will only recover a panic thrown in a
962// function that defers the deferred call to recover.
963//
964// In other words:
965//
966// func f1() {
967// 	defer recover()	// does not stop panic
968// 	panic(0)
969// }
970//
971// func f2() {
972// 	defer func() {
973// 		defer recover()	// stops panic(0)
974// 	}()
975// 	panic(0)
976// }
977//
978// func f3() {
979// 	defer func() {
980// 		defer recover()	// does not stop panic
981// 		panic(0)
982// 	}()
983// 	panic(1)
984// }
985//
986// func f4() {
987// 	defer func() {
988// 		defer func() {
989// 			defer recover()	// stops panic(0)
990// 		}()
991// 		panic(0)
992// 	}()
993// 	panic(1)
994// }
995//
996// The interesting case here is f3. As can be seen from f2, the
997// deferred recover could pick up panic(1). However, this does not
998// happen because it is blocked by the panic(0).
999//
1000// When a function calls recover, then when we invoke it we pass a
1001// hidden parameter indicating whether it should recover something.
1002// This parameter is set based on whether the function is being
1003// invoked directly from defer. The parameter winds up determining
1004// whether __go_recover or __go_deferred_recover is called at all.
1005//
1006// In the case of a deferred recover, the hidden parameter that
1007// controls the call is actually the one set up for the function that
1008// runs the defer recover() statement. That is the right thing in all
1009// the cases above except for f3. In f3 the function is permitted to
1010// call recover, but the deferred recover call is not. We address that
1011// here by checking for that specific case before calling recover. If
1012// this function was deferred when there is already a panic on the
1013// panic stack, then we can only recover that panic, not any other.
1014
1015// Note that we can get away with using a special function here
1016// because you are not permitted to take the address of a predeclared
1017// function like recover.
1018func deferredrecover() interface{} {
1019	gp := getg()
1020	if gp._defer == nil || gp._defer.panicStack != gp._panic {
1021		return nil
1022	}
1023	return gorecover()
1024}
1025
1026//go:linkname sync_throw sync.throw
1027func sync_throw(s string) {
1028	throw(s)
1029}
1030
1031//go:nosplit
1032func throw(s string) {
1033	// Everything throw does should be recursively nosplit so it
1034	// can be called even when it's unsafe to grow the stack.
1035	systemstack(func() {
1036		print("fatal error: ", s, "\n")
1037	})
1038	gp := getg()
1039	if gp.m.throwing == 0 {
1040		gp.m.throwing = 1
1041	}
1042	fatalthrow()
1043	*(*int)(nil) = 0 // not reached
1044}
1045
1046// runningPanicDefers is non-zero while running deferred functions for panic.
1047// runningPanicDefers is incremented and decremented atomically.
1048// This is used to try hard to get a panic stack trace out when exiting.
1049var runningPanicDefers uint32
1050
1051// panicking is non-zero when crashing the program for an unrecovered panic.
1052// panicking is incremented and decremented atomically.
1053var panicking uint32
1054
1055// paniclk is held while printing the panic information and stack trace,
1056// so that two concurrent panics don't overlap their output.
1057var paniclk mutex
1058
1059// fatalthrow implements an unrecoverable runtime throw. It freezes the
1060// system, prints stack traces starting from its caller, and terminates the
1061// process.
1062//
1063//go:nosplit
1064func fatalthrow() {
1065	pc := getcallerpc()
1066	sp := getcallersp()
1067	gp := getg()
1068
1069	startpanic_m()
1070
1071	if dopanic_m(gp, pc, sp) {
1072		crash()
1073	}
1074
1075	exit(2)
1076
1077	*(*int)(nil) = 0 // not reached
1078}
1079
1080// fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
1081// that if msgs != nil, fatalpanic also prints panic messages and decrements
1082// runningPanicDefers once main is blocked from exiting.
1083//
1084//go:nosplit
1085func fatalpanic(msgs *_panic) {
1086	pc := getcallerpc()
1087	sp := getcallersp()
1088	gp := getg()
1089	var docrash bool
1090
1091	if startpanic_m() && msgs != nil {
1092		// There were panic messages and startpanic_m
1093		// says it's okay to try to print them.
1094
1095		// startpanic_m set panicking, which will
1096		// block main from exiting, so now OK to
1097		// decrement runningPanicDefers.
1098		atomic.Xadd(&runningPanicDefers, -1)
1099
1100		printpanics(msgs)
1101	}
1102
1103	docrash = dopanic_m(gp, pc, sp)
1104
1105	if docrash {
1106		// By crashing outside the above systemstack call, debuggers
1107		// will not be confused when generating a backtrace.
1108		// Function crash is marked nosplit to avoid stack growth.
1109		crash()
1110	}
1111
1112	systemstack(func() {
1113		exit(2)
1114	})
1115
1116	*(*int)(nil) = 0 // not reached
1117}
1118
1119// startpanic_m prepares for an unrecoverable panic.
1120//
1121// It returns true if panic messages should be printed, or false if
1122// the runtime is in bad shape and should just print stacks.
1123//
1124// It must not have write barriers even though the write barrier
1125// explicitly ignores writes once dying > 0. Write barriers still
1126// assume that g.m.p != nil, and this function may not have P
1127// in some contexts (e.g. a panic in a signal handler for a signal
1128// sent to an M with no P).
1129//
1130//go:nowritebarrierrec
1131func startpanic_m() bool {
1132	_g_ := getg()
1133	if mheap_.cachealloc.size == 0 { // very early
1134		print("runtime: panic before malloc heap initialized\n")
1135	}
1136	// Disallow malloc during an unrecoverable panic. A panic
1137	// could happen in a signal handler, or in a throw, or inside
1138	// malloc itself. We want to catch if an allocation ever does
1139	// happen (even if we're not in one of these situations).
1140	_g_.m.mallocing++
1141
1142	// If we're dying because of a bad lock count, set it to a
1143	// good lock count so we don't recursively panic below.
1144	if _g_.m.locks < 0 {
1145		_g_.m.locks = 1
1146	}
1147
1148	switch _g_.m.dying {
1149	case 0:
1150		// Setting dying >0 has the side-effect of disabling this G's writebuf.
1151		_g_.m.dying = 1
1152		atomic.Xadd(&panicking, 1)
1153		lock(&paniclk)
1154		if debug.schedtrace > 0 || debug.scheddetail > 0 {
1155			schedtrace(true)
1156		}
1157		freezetheworld()
1158		return true
1159	case 1:
1160		// Something failed while panicking.
1161		// Just print a stack trace and exit.
1162		_g_.m.dying = 2
1163		print("panic during panic\n")
1164		return false
1165	case 2:
1166		// This is a genuine bug in the runtime, we couldn't even
1167		// print the stack trace successfully.
1168		_g_.m.dying = 3
1169		print("stack trace unavailable\n")
1170		exit(4)
1171		fallthrough
1172	default:
1173		// Can't even print! Just exit.
1174		exit(5)
1175		return false // Need to return something.
1176	}
1177}
1178
1179// throwReportQuirk, if non-nil, is called by throw after dumping the stacks.
1180//
1181// TODO(austin): Remove this after Go 1.15 when we remove the
1182// mlockGsignal workaround.
1183var throwReportQuirk func()
1184
1185var didothers bool
1186var deadlock mutex
1187
1188func dopanic_m(gp *g, pc, sp uintptr) bool {
1189	if gp.sig != 0 {
1190		signame := signame(gp.sig)
1191		if signame != "" {
1192			print("[signal ", signame)
1193		} else {
1194			print("[signal ", hex(gp.sig))
1195		}
1196		print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
1197	}
1198
1199	level, all, docrash := gotraceback()
1200	_g_ := getg()
1201	if level > 0 {
1202		if gp != gp.m.curg {
1203			all = true
1204		}
1205		if gp != gp.m.g0 {
1206			print("\n")
1207			goroutineheader(gp)
1208			traceback(0)
1209		} else if level >= 2 || _g_.m.throwing > 0 {
1210			print("\nruntime stack:\n")
1211			traceback(0)
1212		}
1213		if !didothers && all {
1214			didothers = true
1215			tracebackothers(gp)
1216		}
1217	}
1218	unlock(&paniclk)
1219
1220	if atomic.Xadd(&panicking, -1) != 0 {
1221		// Some other m is panicking too.
1222		// Let it print what it needs to print.
1223		// Wait forever without chewing up cpu.
1224		// It will exit when it's done.
1225		lock(&deadlock)
1226		lock(&deadlock)
1227	}
1228
1229	printDebugLog()
1230
1231	if throwReportQuirk != nil {
1232		throwReportQuirk()
1233	}
1234
1235	return docrash
1236}
1237
1238// canpanic returns false if a signal should throw instead of
1239// panicking.
1240//
1241//go:nosplit
1242func canpanic(gp *g) bool {
1243	// Note that g is m->gsignal, different from gp.
1244	// Note also that g->m can change at preemption, so m can go stale
1245	// if this function ever makes a function call.
1246	_g_ := getg()
1247	_m_ := _g_.m
1248
1249	// Is it okay for gp to panic instead of crashing the program?
1250	// Yes, as long as it is running Go code, not runtime code,
1251	// and not stuck in a system call.
1252	if gp == nil || gp != _m_.curg {
1253		return false
1254	}
1255	if _m_.locks != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 {
1256		return false
1257	}
1258	status := readgstatus(gp)
1259	if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
1260		return false
1261	}
1262	return true
1263}
1264
1265// isAbortPC reports whether pc is the program counter at which
1266// runtime.abort raises a signal.
1267//
1268// It is nosplit because it's part of the isgoexception
1269// implementation.
1270//
1271//go:nosplit
1272func isAbortPC(pc uintptr) bool {
1273	return false
1274}
1275