1// Copyright 2014 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5package runtime 6 7import ( 8 "runtime/internal/atomic" 9 "unsafe" 10) 11 12// For gccgo, use go:linkname to rename compiler-called functions to 13// themselves, so that the compiler will export them. 14// 15//go:linkname deferproc runtime.deferproc 16//go:linkname deferreturn runtime.deferreturn 17//go:linkname setdeferretaddr runtime.setdeferretaddr 18//go:linkname checkdefer runtime.checkdefer 19//go:linkname gopanic runtime.gopanic 20//go:linkname canrecover runtime.canrecover 21//go:linkname makefuncfficanrecover runtime.makefuncfficanrecover 22//go:linkname makefuncreturning runtime.makefuncreturning 23//go:linkname gorecover runtime.gorecover 24//go:linkname deferredrecover runtime.deferredrecover 25//go:linkname panicmem runtime.panicmem 26// Temporary for C code to call: 27//go:linkname throw runtime.throw 28 29// Calling panic with one of the errors below will call errorString.Error 30// which will call mallocgc to concatenate strings. That will fail if 31// malloc is locked, causing a confusing error message. Throw a better 32// error message instead. 33func panicCheckMalloc(err error) { 34 gp := getg() 35 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 36 throw(string(err.(errorString))) 37 } 38} 39 40var indexError = error(errorString("index out of range")) 41 42func panicindex() { 43 panicCheckMalloc(indexError) 44 panic(indexError) 45} 46 47var sliceError = error(errorString("slice bounds out of range")) 48 49func panicslice() { 50 panicCheckMalloc(sliceError) 51 panic(sliceError) 52} 53 54var divideError = error(errorString("integer divide by zero")) 55 56func panicdivide() { 57 panicCheckMalloc(divideError) 58 panic(divideError) 59} 60 61var overflowError = error(errorString("integer overflow")) 62 63func panicoverflow() { 64 panicCheckMalloc(overflowError) 65 panic(overflowError) 66} 67 68var floatError = error(errorString("floating point error")) 69 70func panicfloat() { 71 panicCheckMalloc(floatError) 72 panic(floatError) 73} 74 75var memoryError = error(errorString("invalid memory address or nil pointer dereference")) 76 77func panicmem() { 78 panicCheckMalloc(memoryError) 79 panic(memoryError) 80} 81 82func throwinit() { 83 throw("recursive call during initialization - linker skew") 84} 85 86// deferproc creates a new deferred function. 87// The compiler turns a defer statement into a call to this. 88// frame points into the stack frame; it is used to determine which 89// deferred functions are for the current stack frame, and whether we 90// have already deferred functions for this frame. 91// pfn is a C function pointer. 92// arg is a value to pass to pfn. 93func deferproc(frame *bool, pfn uintptr, arg unsafe.Pointer) { 94 d := newdefer() 95 if d._panic != nil { 96 throw("deferproc: d.panic != nil after newdefer") 97 } 98 d.frame = frame 99 d.panicStack = getg()._panic 100 d.pfn = pfn 101 d.arg = arg 102 d.retaddr = 0 103 d.makefunccanrecover = false 104} 105 106// Allocate a Defer, usually using per-P pool. 107// Each defer must be released with freedefer. 108func newdefer() *_defer { 109 var d *_defer 110 gp := getg() 111 pp := gp.m.p.ptr() 112 if len(pp.deferpool) == 0 && sched.deferpool != nil { 113 systemstack(func() { 114 lock(&sched.deferlock) 115 for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil { 116 d := sched.deferpool 117 sched.deferpool = d.link 118 d.link = nil 119 pp.deferpool = append(pp.deferpool, d) 120 } 121 unlock(&sched.deferlock) 122 }) 123 } 124 if n := len(pp.deferpool); n > 0 { 125 d = pp.deferpool[n-1] 126 pp.deferpool[n-1] = nil 127 pp.deferpool = pp.deferpool[:n-1] 128 } 129 if d == nil { 130 systemstack(func() { 131 d = new(_defer) 132 }) 133 } 134 d.link = gp._defer 135 gp._defer = d 136 return d 137} 138 139// Free the given defer. 140// The defer cannot be used after this call. 141// 142// This must not grow the stack because there may be a frame without a 143// stack map when this is called. 144// 145//go:nosplit 146func freedefer(d *_defer) { 147 pp := getg().m.p.ptr() 148 if len(pp.deferpool) == cap(pp.deferpool) { 149 // Transfer half of local cache to the central cache. 150 // 151 // Take this slow path on the system stack so 152 // we don't grow freedefer's stack. 153 systemstack(func() { 154 var first, last *_defer 155 for len(pp.deferpool) > cap(pp.deferpool)/2 { 156 n := len(pp.deferpool) 157 d := pp.deferpool[n-1] 158 pp.deferpool[n-1] = nil 159 pp.deferpool = pp.deferpool[:n-1] 160 if first == nil { 161 first = d 162 } else { 163 last.link = d 164 } 165 last = d 166 } 167 lock(&sched.deferlock) 168 last.link = sched.deferpool 169 sched.deferpool = first 170 unlock(&sched.deferlock) 171 }) 172 } 173 174 // These lines used to be simply `*d = _defer{}` but that 175 // started causing a nosplit stack overflow via typedmemmove. 176 d.link = nil 177 d.frame = nil 178 d.panicStack = nil 179 d._panic = nil 180 d.pfn = 0 181 d.arg = nil 182 d.retaddr = 0 183 d.makefunccanrecover = false 184 185 pp.deferpool = append(pp.deferpool, d) 186} 187 188// deferreturn is called to undefer the stack. 189// The compiler inserts a call to this function as a finally clause 190// wrapped around the body of any function that calls defer. 191// The frame argument points to the stack frame of the function. 192func deferreturn(frame *bool) { 193 gp := getg() 194 for gp._defer != nil && gp._defer.frame == frame { 195 d := gp._defer 196 pfn := d.pfn 197 d.pfn = 0 198 199 if pfn != 0 { 200 // This is rather awkward. 201 // The gc compiler does this using assembler 202 // code in jmpdefer. 203 var fn func(unsafe.Pointer) 204 *(*uintptr)(unsafe.Pointer(&fn)) = uintptr(noescape(unsafe.Pointer(&pfn))) 205 fn(d.arg) 206 } 207 208 // If we are returning from a Go function called by a 209 // C function running in a C thread, g may now be nil, 210 // in which case CgocallBackDone will have cleared _defer. 211 // In that case some other goroutine may already be using gp. 212 if getg() == nil { 213 *frame = true 214 return 215 } 216 217 gp._defer = d.link 218 219 freedefer(d) 220 221 // Since we are executing a defer function now, we 222 // know that we are returning from the calling 223 // function. If the calling function, or one of its 224 // callees, panicked, then the defer functions would 225 // be executed by panic. 226 *frame = true 227 } 228} 229 230// __builtin_extract_return_addr is a GCC intrinsic that converts an 231// address returned by __builtin_return_address(0) to a real address. 232// On most architectures this is a nop. 233//extern __builtin_extract_return_addr 234func __builtin_extract_return_addr(uintptr) uintptr 235 236// setdeferretaddr records the address to which the deferred function 237// returns. This is check by canrecover. The frontend relies on this 238// function returning false. 239func setdeferretaddr(retaddr uintptr) bool { 240 gp := getg() 241 if gp._defer != nil { 242 gp._defer.retaddr = __builtin_extract_return_addr(retaddr) 243 } 244 return false 245} 246 247// checkdefer is called by exception handlers used when unwinding the 248// stack after a recovered panic. The exception handler is simply 249// checkdefer(frame) 250// return; 251// If we have not yet reached the frame we are looking for, we 252// continue unwinding. 253func checkdefer(frame *bool) { 254 gp := getg() 255 if gp == nil { 256 // We should never wind up here. Even if some other 257 // language throws an exception, the cgo code 258 // should ensure that g is set. 259 throw("no g in checkdefer") 260 } else if gp.isforeign { 261 // Some other language has thrown an exception. 262 // We need to run the local defer handlers. 263 // If they call recover, we stop unwinding here. 264 var p _panic 265 p.isforeign = true 266 p.link = gp._panic 267 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 268 for { 269 d := gp._defer 270 if d == nil || d.frame != frame || d.pfn == 0 { 271 break 272 } 273 274 pfn := d.pfn 275 gp._defer = d.link 276 277 var fn func(unsafe.Pointer) 278 *(*uintptr)(unsafe.Pointer(&fn)) = uintptr(noescape(unsafe.Pointer(&pfn))) 279 fn(d.arg) 280 281 freedefer(d) 282 283 if p.recovered { 284 // The recover function caught the panic 285 // thrown by some other language. 286 break 287 } 288 } 289 290 recovered := p.recovered 291 gp._panic = p.link 292 293 if recovered { 294 // Just return and continue executing Go code. 295 *frame = true 296 return 297 } 298 299 // We are panicking through this function. 300 *frame = false 301 } else if gp._defer != nil && gp._defer.pfn == 0 && gp._defer.frame == frame { 302 // This is the defer function that called recover. 303 // Simply return to stop the stack unwind, and let the 304 // Go code continue to execute. 305 d := gp._defer 306 gp._defer = d.link 307 freedefer(d) 308 309 // We are returning from this function. 310 *frame = true 311 312 return 313 } 314 315 // This is some other defer function. It was already run by 316 // the call to panic, or just above. Rethrow the exception. 317 rethrowException() 318 throw("rethrowException returned") 319} 320 321// unwindStack starts unwinding the stack for a panic. We unwind 322// function calls until we reach the one which used a defer function 323// which called recover. Each function which uses a defer statement 324// will have an exception handler, as shown above for checkdefer. 325func unwindStack() { 326 // Allocate the exception type used by the unwind ABI. 327 // It would be nice to define it in runtime_sysinfo.go, 328 // but current definitions don't work because the required 329 // alignment is larger than can be represented in Go. 330 // The type never contains any Go pointers. 331 size := unwindExceptionSize() 332 usize := uintptr(unsafe.Sizeof(uintptr(0))) 333 c := (size + usize - 1) / usize 334 s := make([]uintptr, c) 335 getg().exception = unsafe.Pointer(&s[0]) 336 throwException() 337} 338 339// Goexit terminates the goroutine that calls it. No other goroutine is affected. 340// Goexit runs all deferred calls before terminating the goroutine. Because Goexit 341// is not a panic, any recover calls in those deferred functions will return nil. 342// 343// Calling Goexit from the main goroutine terminates that goroutine 344// without func main returning. Since func main has not returned, 345// the program continues execution of other goroutines. 346// If all other goroutines exit, the program crashes. 347func Goexit() { 348 // Run all deferred functions for the current goroutine. 349 // This code is similar to gopanic, see that implementation 350 // for detailed comments. 351 gp := getg() 352 for { 353 d := gp._defer 354 if d == nil { 355 break 356 } 357 358 pfn := d.pfn 359 if pfn == 0 { 360 if d._panic != nil { 361 d._panic.aborted = true 362 d._panic = nil 363 } 364 gp._defer = d.link 365 freedefer(d) 366 continue 367 } 368 d.pfn = 0 369 370 var fn func(unsafe.Pointer) 371 *(*uintptr)(unsafe.Pointer(&fn)) = uintptr(noescape(unsafe.Pointer(&pfn))) 372 fn(d.arg) 373 374 if gp._defer != d { 375 throw("bad defer entry in Goexit") 376 } 377 d._panic = nil 378 gp._defer = d.link 379 freedefer(d) 380 // Note: we ignore recovers here because Goexit isn't a panic 381 } 382 goexit1() 383} 384 385// Call all Error and String methods before freezing the world. 386// Used when crashing with panicking. 387func preprintpanics(p *_panic) { 388 defer func() { 389 if recover() != nil { 390 throw("panic while printing panic value") 391 } 392 }() 393 for p != nil { 394 switch v := p.arg.(type) { 395 case error: 396 p.arg = v.Error() 397 case stringer: 398 p.arg = v.String() 399 } 400 p = p.link 401 } 402} 403 404// Print all currently active panics. Used when crashing. 405// Should only be called after preprintpanics. 406func printpanics(p *_panic) { 407 if p.link != nil { 408 printpanics(p.link) 409 print("\t") 410 } 411 print("panic: ") 412 printany(p.arg) 413 if p.recovered { 414 print(" [recovered]") 415 } 416 print("\n") 417} 418 419// The implementation of the predeclared function panic. 420func gopanic(e interface{}) { 421 gp := getg() 422 if gp.m.curg != gp { 423 print("panic: ") 424 printany(e) 425 print("\n") 426 throw("panic on system stack") 427 } 428 429 if gp.m.mallocing != 0 { 430 print("panic: ") 431 printany(e) 432 print("\n") 433 throw("panic during malloc") 434 } 435 if gp.m.preemptoff != "" { 436 print("panic: ") 437 printany(e) 438 print("\n") 439 print("preempt off reason: ") 440 print(gp.m.preemptoff) 441 print("\n") 442 throw("panic during preemptoff") 443 } 444 if gp.m.locks != 0 { 445 print("panic: ") 446 printany(e) 447 print("\n") 448 throw("panic holding locks") 449 } 450 451 // The gc compiler allocates this new _panic struct on the 452 // stack. We can't do that, because when a deferred function 453 // recovers the panic we unwind the stack. We unlink this 454 // entry before unwinding the stack, but that doesn't help in 455 // the case where we panic, a deferred function recovers and 456 // then panics itself, that panic is in turn recovered, and 457 // unwinds the stack past this stack frame. 458 459 p := &_panic{ 460 arg: e, 461 link: gp._panic, 462 } 463 gp._panic = p 464 465 atomic.Xadd(&runningPanicDefers, 1) 466 467 for { 468 d := gp._defer 469 if d == nil { 470 break 471 } 472 473 pfn := d.pfn 474 475 // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic), 476 // take defer off list. The earlier panic or Goexit will not continue running. 477 if pfn == 0 { 478 if d._panic != nil { 479 d._panic.aborted = true 480 } 481 d._panic = nil 482 gp._defer = d.link 483 freedefer(d) 484 continue 485 } 486 d.pfn = 0 487 488 // Record the panic that is running the defer. 489 // If there is a new panic during the deferred call, that panic 490 // will find d in the list and will mark d._panic (this panic) aborted. 491 d._panic = p 492 493 var fn func(unsafe.Pointer) 494 *(*uintptr)(unsafe.Pointer(&fn)) = uintptr(noescape(unsafe.Pointer(&pfn))) 495 fn(d.arg) 496 497 if gp._defer != d { 498 throw("bad defer entry in panic") 499 } 500 d._panic = nil 501 502 if p.recovered { 503 atomic.Xadd(&runningPanicDefers, -1) 504 505 gp._panic = p.link 506 507 // Aborted panics are marked but remain on the g.panic list. 508 // Remove them from the list. 509 for gp._panic != nil && gp._panic.aborted { 510 gp._panic = gp._panic.link 511 } 512 if gp._panic == nil { // must be done with signal 513 gp.sig = 0 514 } 515 516 // Unwind the stack by throwing an exception. 517 // The compiler has arranged to create 518 // exception handlers in each function 519 // that uses a defer statement. These 520 // exception handlers will check whether 521 // the entry on the top of the defer stack 522 // is from the current function. If it is, 523 // we have unwound the stack far enough. 524 unwindStack() 525 526 throw("unwindStack returned") 527 } 528 529 // Because we executed that defer function by a panic, 530 // and it did not call recover, we know that we are 531 // not returning from the calling function--we are 532 // panicking through it. 533 *d.frame = false 534 535 // Deferred function did not panic. Remove d. 536 // In the p.recovered case, d will be removed by checkdefer. 537 gp._defer = d.link 538 539 freedefer(d) 540 } 541 542 // ran out of deferred calls - old-school panic now 543 // Because it is unsafe to call arbitrary user code after freezing 544 // the world, we call preprintpanics to invoke all necessary Error 545 // and String methods to prepare the panic strings before startpanic. 546 preprintpanics(gp._panic) 547 startpanic() 548 549 // startpanic set panicking, which will block main from exiting, 550 // so now OK to decrement runningPanicDefers. 551 atomic.Xadd(&runningPanicDefers, -1) 552 553 printpanics(gp._panic) 554 dopanic(0) // should not return 555 *(*int)(nil) = 0 // not reached 556} 557 558// currentDefer returns the top of the defer stack if it can be recovered. 559// Otherwise it returns nil. 560func currentDefer() *_defer { 561 gp := getg() 562 d := gp._defer 563 if d == nil { 564 return nil 565 } 566 567 // The panic that would be recovered is the one on the top of 568 // the panic stack. We do not want to recover it if that panic 569 // was on the top of the panic stack when this function was 570 // deferred. 571 if d.panicStack == gp._panic { 572 return nil 573 } 574 575 // The deferred thunk will call setdeferretaddr. If this has 576 // not happened, then we have not been called via defer, and 577 // we can not recover. 578 if d.retaddr == 0 { 579 return nil 580 } 581 582 return d 583} 584 585// canrecover is called by a thunk to see if the real function would 586// be permitted to recover a panic value. Recovering a value is 587// permitted if the thunk was called directly by defer. retaddr is the 588// return address of the function that is calling canrecover--that is, 589// the thunk. 590func canrecover(retaddr uintptr) bool { 591 d := currentDefer() 592 if d == nil { 593 return false 594 } 595 596 ret := __builtin_extract_return_addr(retaddr) 597 dret := d.retaddr 598 if ret <= dret && ret+16 >= dret { 599 return true 600 } 601 602 // On some systems, in some cases, the return address does not 603 // work reliably. See http://gcc.gnu.org/PR60406. If we are 604 // permitted to call recover, the call stack will look like this: 605 // runtime.gopanic, runtime.deferreturn, etc. 606 // thunk to call deferred function (calls __go_set_defer_retaddr) 607 // function that calls __go_can_recover (passing return address) 608 // runtime.canrecover 609 // Calling callers will skip the thunks. So if our caller's 610 // caller starts with "runtime.", then we are permitted to 611 // call recover. 612 var locs [16]location 613 if callers(1, locs[:2]) < 2 { 614 return false 615 } 616 617 name := locs[1].function 618 if hasprefix(name, "runtime.") { 619 return true 620 } 621 622 // If the function calling recover was created by reflect.MakeFunc, 623 // then makefuncfficanrecover will have set makefunccanrecover. 624 if !d.makefunccanrecover { 625 return false 626 } 627 628 // We look up the stack, ignoring libffi functions and 629 // functions in the reflect package, until we find 630 // reflect.makeFuncStub or reflect.ffi_callback called by FFI 631 // functions. Then we check the caller of that function. 632 633 n := callers(2, locs[:]) 634 foundFFICallback := false 635 i := 0 636 for ; i < n; i++ { 637 name = locs[i].function 638 if name == "" { 639 // No function name means this caller isn't Go code. 640 // Assume that this is libffi. 641 continue 642 } 643 644 // Ignore function in libffi. 645 if hasprefix(name, "ffi_") { 646 continue 647 } 648 649 if foundFFICallback { 650 break 651 } 652 653 if name == "reflect.ffi_callback" { 654 foundFFICallback = true 655 continue 656 } 657 658 // Ignore other functions in the reflect package. 659 if hasprefix(name, "reflect.") || hasprefix(name, ".1reflect.") { 660 continue 661 } 662 663 // We should now be looking at the real caller. 664 break 665 } 666 667 if i < n { 668 name = locs[i].function 669 if hasprefix(name, "runtime.") { 670 return true 671 } 672 } 673 674 return false 675} 676 677// This function is called when code is about to enter a function 678// created by the libffi version of reflect.MakeFunc. This function is 679// passed the names of the callers of the libffi code that called the 680// stub. It uses them to decide whether it is permitted to call 681// recover, and sets d.makefunccanrecover so that gorecover can make 682// the same decision. 683func makefuncfficanrecover(loc []location) { 684 d := currentDefer() 685 if d == nil { 686 return 687 } 688 689 // If we are already in a call stack of MakeFunc functions, 690 // there is nothing we can usefully check here. 691 if d.makefunccanrecover { 692 return 693 } 694 695 // loc starts with the caller of our caller. That will be a thunk. 696 // If its caller was a function function, then it was called 697 // directly by defer. 698 if len(loc) < 2 { 699 return 700 } 701 702 name := loc[1].function 703 if hasprefix(name, "runtime.") { 704 d.makefunccanrecover = true 705 } 706} 707 708// makefuncreturning is called when code is about to exit a function 709// created by reflect.MakeFunc. It is called by the function stub used 710// by reflect.MakeFunc. It clears the makefunccanrecover field. It's 711// OK to always clear this field, because canrecover will only be 712// called by a stub created for a function that calls recover. That 713// stub will not call a function created by reflect.MakeFunc, so by 714// the time we get here any caller higher up on the call stack no 715// longer needs the information. 716func makefuncreturning() { 717 d := getg()._defer 718 if d != nil { 719 d.makefunccanrecover = false 720 } 721} 722 723// The implementation of the predeclared function recover. 724func gorecover() interface{} { 725 gp := getg() 726 p := gp._panic 727 if p != nil && !p.recovered { 728 p.recovered = true 729 return p.arg 730 } 731 return nil 732} 733 734// deferredrecover is called when a call to recover is deferred. That 735// is, something like 736// defer recover() 737// 738// We need to handle this specially. In gc, the recover function 739// looks up the stack frame. In particular, that means that a deferred 740// recover will not recover a panic thrown in the same function that 741// defers the recover. It will only recover a panic thrown in a 742// function that defers the deferred call to recover. 743// 744// In other words: 745// 746// func f1() { 747// defer recover() // does not stop panic 748// panic(0) 749// } 750// 751// func f2() { 752// defer func() { 753// defer recover() // stops panic(0) 754// }() 755// panic(0) 756// } 757// 758// func f3() { 759// defer func() { 760// defer recover() // does not stop panic 761// panic(0) 762// }() 763// panic(1) 764// } 765// 766// func f4() { 767// defer func() { 768// defer func() { 769// defer recover() // stops panic(0) 770// }() 771// panic(0) 772// }() 773// panic(1) 774// } 775// 776// The interesting case here is f3. As can be seen from f2, the 777// deferred recover could pick up panic(1). However, this does not 778// happen because it is blocked by the panic(0). 779// 780// When a function calls recover, then when we invoke it we pass a 781// hidden parameter indicating whether it should recover something. 782// This parameter is set based on whether the function is being 783// invoked directly from defer. The parameter winds up determining 784// whether __go_recover or __go_deferred_recover is called at all. 785// 786// In the case of a deferred recover, the hidden parameter that 787// controls the call is actually the one set up for the function that 788// runs the defer recover() statement. That is the right thing in all 789// the cases above except for f3. In f3 the function is permitted to 790// call recover, but the deferred recover call is not. We address that 791// here by checking for that specific case before calling recover. If 792// this function was deferred when there is already a panic on the 793// panic stack, then we can only recover that panic, not any other. 794 795// Note that we can get away with using a special function here 796// because you are not permitted to take the address of a predeclared 797// function like recover. 798func deferredrecover() interface{} { 799 gp := getg() 800 if gp._defer == nil || gp._defer.panicStack != gp._panic { 801 return nil 802 } 803 return gorecover() 804} 805 806//go:linkname sync_throw sync.throw 807func sync_throw(s string) { 808 throw(s) 809} 810 811//go:nosplit 812func throw(s string) { 813 print("fatal error: ", s, "\n") 814 gp := getg() 815 if gp.m.throwing == 0 { 816 gp.m.throwing = 1 817 } 818 startpanic() 819 dopanic(0) 820 *(*int)(nil) = 0 // not reached 821} 822 823// runningPanicDefers is non-zero while running deferred functions for panic. 824// runningPanicDefers is incremented and decremented atomically. 825// This is used to try hard to get a panic stack trace out when exiting. 826var runningPanicDefers uint32 827 828// panicking is non-zero when crashing the program for an unrecovered panic. 829// panicking is incremented and decremented atomically. 830var panicking uint32 831 832// paniclk is held while printing the panic information and stack trace, 833// so that two concurrent panics don't overlap their output. 834var paniclk mutex 835 836// startpanic_m prepares for an unrecoverable panic. 837// 838// It can have write barriers because the write barrier explicitly 839// ignores writes once dying > 0. 840// 841//go:yeswritebarrierrec 842func startpanic() { 843 _g_ := getg() 844 if mheap_.cachealloc.size == 0 { // very early 845 print("runtime: panic before malloc heap initialized\n") 846 } 847 // Disallow malloc during an unrecoverable panic. A panic 848 // could happen in a signal handler, or in a throw, or inside 849 // malloc itself. We want to catch if an allocation ever does 850 // happen (even if we're not in one of these situations). 851 _g_.m.mallocing++ 852 853 switch _g_.m.dying { 854 case 0: 855 _g_.m.dying = 1 856 _g_.writebuf = nil 857 atomic.Xadd(&panicking, 1) 858 lock(&paniclk) 859 if debug.schedtrace > 0 || debug.scheddetail > 0 { 860 schedtrace(true) 861 } 862 freezetheworld() 863 return 864 case 1: 865 // Something failed while panicking, probably the print of the 866 // argument to panic(). Just print a stack trace and exit. 867 _g_.m.dying = 2 868 print("panic during panic\n") 869 dopanic(0) 870 exit(3) 871 fallthrough 872 case 2: 873 // This is a genuine bug in the runtime, we couldn't even 874 // print the stack trace successfully. 875 _g_.m.dying = 3 876 print("stack trace unavailable\n") 877 exit(4) 878 fallthrough 879 default: 880 // Can't even print! Just exit. 881 exit(5) 882 } 883} 884 885var didothers bool 886var deadlock mutex 887 888func dopanic(unused int) { 889 gp := getg() 890 if gp.sig != 0 { 891 signame := signame(gp.sig) 892 if signame != "" { 893 print("[signal ", signame) 894 } else { 895 print("[signal ", hex(gp.sig)) 896 } 897 print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n") 898 } 899 900 level, all, docrash := gotraceback() 901 _g_ := getg() 902 if level > 0 { 903 if gp != gp.m.curg { 904 all = true 905 } 906 if gp != gp.m.g0 { 907 print("\n") 908 goroutineheader(gp) 909 traceback(0) 910 } else if level >= 2 || _g_.m.throwing > 0 { 911 print("\nruntime stack:\n") 912 traceback(0) 913 } 914 if !didothers && all { 915 didothers = true 916 tracebackothers(gp) 917 } 918 } 919 unlock(&paniclk) 920 921 if atomic.Xadd(&panicking, -1) != 0 { 922 // Some other m is panicking too. 923 // Let it print what it needs to print. 924 // Wait forever without chewing up cpu. 925 // It will exit when it's done. 926 lock(&deadlock) 927 lock(&deadlock) 928 } 929 930 if docrash { 931 crash() 932 } 933 934 exit(2) 935} 936 937// canpanic returns false if a signal should throw instead of 938// panicking. 939// 940//go:nosplit 941func canpanic(gp *g) bool { 942 // Note that g is m->gsignal, different from gp. 943 // Note also that g->m can change at preemption, so m can go stale 944 // if this function ever makes a function call. 945 _g_ := getg() 946 _m_ := _g_.m 947 948 // Is it okay for gp to panic instead of crashing the program? 949 // Yes, as long as it is running Go code, not runtime code, 950 // and not stuck in a system call. 951 if gp == nil || gp != _m_.curg { 952 return false 953 } 954 if _m_.locks-_m_.softfloat != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 { 955 return false 956 } 957 status := readgstatus(gp) 958 if status&^_Gscan != _Grunning || gp.syscallsp != 0 { 959 return false 960 } 961 return true 962} 963