1// Copyright 2009 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5package runtime 6 7import ( 8 "internal/cpu" 9 "runtime/internal/atomic" 10 "runtime/internal/sys" 11 "unsafe" 12) 13 14// defined constants 15const ( 16 // G status 17 // 18 // Beyond indicating the general state of a G, the G status 19 // acts like a lock on the goroutine's stack (and hence its 20 // ability to execute user code). 21 // 22 // If you add to this list, add to the list 23 // of "okay during garbage collection" status 24 // in mgcmark.go too. 25 // 26 // TODO(austin): The _Gscan bit could be much lighter-weight. 27 // For example, we could choose not to run _Gscanrunnable 28 // goroutines found in the run queue, rather than CAS-looping 29 // until they become _Grunnable. And transitions like 30 // _Gscanwaiting -> _Gscanrunnable are actually okay because 31 // they don't affect stack ownership. 32 33 // _Gidle means this goroutine was just allocated and has not 34 // yet been initialized. 35 _Gidle = iota // 0 36 37 // _Grunnable means this goroutine is on a run queue. It is 38 // not currently executing user code. The stack is not owned. 39 _Grunnable // 1 40 41 // _Grunning means this goroutine may execute user code. The 42 // stack is owned by this goroutine. It is not on a run queue. 43 // It is assigned an M and a P (g.m and g.m.p are valid). 44 _Grunning // 2 45 46 // _Gsyscall means this goroutine is executing a system call. 47 // It is not executing user code. The stack is owned by this 48 // goroutine. It is not on a run queue. It is assigned an M. 49 _Gsyscall // 3 50 51 // _Gwaiting means this goroutine is blocked in the runtime. 52 // It is not executing user code. It is not on a run queue, 53 // but should be recorded somewhere (e.g., a channel wait 54 // queue) so it can be ready()d when necessary. The stack is 55 // not owned *except* that a channel operation may read or 56 // write parts of the stack under the appropriate channel 57 // lock. Otherwise, it is not safe to access the stack after a 58 // goroutine enters _Gwaiting (e.g., it may get moved). 59 _Gwaiting // 4 60 61 // _Gmoribund_unused is currently unused, but hardcoded in gdb 62 // scripts. 63 _Gmoribund_unused // 5 64 65 // _Gdead means this goroutine is currently unused. It may be 66 // just exited, on a free list, or just being initialized. It 67 // is not executing user code. It may or may not have a stack 68 // allocated. The G and its stack (if any) are owned by the M 69 // that is exiting the G or that obtained the G from the free 70 // list. 71 _Gdead // 6 72 73 // _Genqueue_unused is currently unused. 74 _Genqueue_unused // 7 75 76 // _Gcopystack means this goroutine's stack is being moved. It 77 // is not executing user code and is not on a run queue. The 78 // stack is owned by the goroutine that put it in _Gcopystack. 79 _Gcopystack // 8 80 81 // _Gpreempted means this goroutine stopped itself for a 82 // suspendG preemption. It is like _Gwaiting, but nothing is 83 // yet responsible for ready()ing it. Some suspendG must CAS 84 // the status to _Gwaiting to take responsibility for 85 // ready()ing this G. 86 _Gpreempted // 9 87 88 // _Gexitingsyscall means this goroutine is exiting from a 89 // system call. This is like _Gsyscall, but the GC should not 90 // scan its stack. Currently this is only used in exitsyscall0 91 // as a transient state when it drops the G. 92 _Gexitingsyscall // 10 93 94 // _Gscan combined with one of the above states other than 95 // _Grunning indicates that GC is scanning the stack. The 96 // goroutine is not executing user code and the stack is owned 97 // by the goroutine that set the _Gscan bit. 98 // 99 // _Gscanrunning is different: it is used to briefly block 100 // state transitions while GC signals the G to scan its own 101 // stack. This is otherwise like _Grunning. 102 // 103 // atomicstatus&~Gscan gives the state the goroutine will 104 // return to when the scan completes. 105 _Gscan = 0x1000 106 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 107 _Gscanrunning = _Gscan + _Grunning // 0x1002 108 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 109 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 110 _Gscanpreempted = _Gscan + _Gpreempted // 0x1009 111) 112 113const ( 114 // P status 115 116 // _Pidle means a P is not being used to run user code or the 117 // scheduler. Typically, it's on the idle P list and available 118 // to the scheduler, but it may just be transitioning between 119 // other states. 120 // 121 // The P is owned by the idle list or by whatever is 122 // transitioning its state. Its run queue is empty. 123 _Pidle = iota 124 125 // _Prunning means a P is owned by an M and is being used to 126 // run user code or the scheduler. Only the M that owns this P 127 // is allowed to change the P's status from _Prunning. The M 128 // may transition the P to _Pidle (if it has no more work to 129 // do), _Psyscall (when entering a syscall), or _Pgcstop (to 130 // halt for the GC). The M may also hand ownership of the P 131 // off directly to another M (e.g., to schedule a locked G). 132 _Prunning 133 134 // _Psyscall means a P is not running user code. It has 135 // affinity to an M in a syscall but is not owned by it and 136 // may be stolen by another M. This is similar to _Pidle but 137 // uses lightweight transitions and maintains M affinity. 138 // 139 // Leaving _Psyscall must be done with a CAS, either to steal 140 // or retake the P. Note that there's an ABA hazard: even if 141 // an M successfully CASes its original P back to _Prunning 142 // after a syscall, it must understand the P may have been 143 // used by another M in the interim. 144 _Psyscall 145 146 // _Pgcstop means a P is halted for STW and owned by the M 147 // that stopped the world. The M that stopped the world 148 // continues to use its P, even in _Pgcstop. Transitioning 149 // from _Prunning to _Pgcstop causes an M to release its P and 150 // park. 151 // 152 // The P retains its run queue and startTheWorld will restart 153 // the scheduler on Ps with non-empty run queues. 154 _Pgcstop 155 156 // _Pdead means a P is no longer used (GOMAXPROCS shrank). We 157 // reuse Ps if GOMAXPROCS increases. A dead P is mostly 158 // stripped of its resources, though a few things remain 159 // (e.g., trace buffers). 160 _Pdead 161) 162 163// Mutual exclusion locks. In the uncontended case, 164// as fast as spin locks (just a few user-level instructions), 165// but on the contention path they sleep in the kernel. 166// A zeroed Mutex is unlocked (no need to initialize each lock). 167type mutex struct { 168 // Futex-based impl treats it as uint32 key, 169 // while sema-based impl as M* waitm. 170 // Used to be a union, but unions break precise GC. 171 key uintptr 172} 173 174// sleep and wakeup on one-time events. 175// before any calls to notesleep or notewakeup, 176// must call noteclear to initialize the Note. 177// then, exactly one thread can call notesleep 178// and exactly one thread can call notewakeup (once). 179// once notewakeup has been called, the notesleep 180// will return. future notesleep will return immediately. 181// subsequent noteclear must be called only after 182// previous notesleep has returned, e.g. it's disallowed 183// to call noteclear straight after notewakeup. 184// 185// notetsleep is like notesleep but wakes up after 186// a given number of nanoseconds even if the event 187// has not yet happened. if a goroutine uses notetsleep to 188// wake up early, it must wait to call noteclear until it 189// can be sure that no other goroutine is calling 190// notewakeup. 191// 192// notesleep/notetsleep are generally called on g0, 193// notetsleepg is similar to notetsleep but is called on user g. 194type note struct { 195 // Futex-based impl treats it as uint32 key, 196 // while sema-based impl as M* waitm. 197 // Used to be a union, but unions break precise GC. 198 key uintptr 199} 200 201type funcval struct { 202 fn uintptr 203 // variable-size, fn-specific data here 204} 205 206// The representation of a non-empty interface. 207// See comment in iface.go for more details on this struct. 208type iface struct { 209 tab unsafe.Pointer 210 data unsafe.Pointer 211} 212 213// The representation of an empty interface. 214// See comment in iface.go for more details on this struct. 215type eface struct { 216 _type *_type 217 data unsafe.Pointer 218} 219 220func efaceOf(ep *interface{}) *eface { 221 return (*eface)(unsafe.Pointer(ep)) 222} 223 224// The guintptr, muintptr, and puintptr are all used to bypass write barriers. 225// It is particularly important to avoid write barriers when the current P has 226// been released, because the GC thinks the world is stopped, and an 227// unexpected write barrier would not be synchronized with the GC, 228// which can lead to a half-executed write barrier that has marked the object 229// but not queued it. If the GC skips the object and completes before the 230// queuing can occur, it will incorrectly free the object. 231// 232// We tried using special assignment functions invoked only when not 233// holding a running P, but then some updates to a particular memory 234// word went through write barriers and some did not. This breaks the 235// write barrier shadow checking mode, and it is also scary: better to have 236// a word that is completely ignored by the GC than to have one for which 237// only a few updates are ignored. 238// 239// Gs and Ps are always reachable via true pointers in the 240// allgs and allp lists or (during allocation before they reach those lists) 241// from stack variables. 242// 243// Ms are always reachable via true pointers either from allm or 244// freem. Unlike Gs and Ps we do free Ms, so it's important that 245// nothing ever hold an muintptr across a safe point. 246 247// A guintptr holds a goroutine pointer, but typed as a uintptr 248// to bypass write barriers. It is used in the Gobuf goroutine state 249// and in scheduling lists that are manipulated without a P. 250// 251// The Gobuf.g goroutine pointer is almost always updated by assembly code. 252// In one of the few places it is updated by Go code - func save - it must be 253// treated as a uintptr to avoid a write barrier being emitted at a bad time. 254// Instead of figuring out how to emit the write barriers missing in the 255// assembly manipulation, we change the type of the field to uintptr, 256// so that it does not require write barriers at all. 257// 258// Goroutine structs are published in the allg list and never freed. 259// That will keep the goroutine structs from being collected. 260// There is never a time that Gobuf.g's contain the only references 261// to a goroutine: the publishing of the goroutine in allg comes first. 262// Goroutine pointers are also kept in non-GC-visible places like TLS, 263// so I can't see them ever moving. If we did want to start moving data 264// in the GC, we'd need to allocate the goroutine structs from an 265// alternate arena. Using guintptr doesn't make that problem any worse. 266type guintptr uintptr 267 268//go:nosplit 269func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) } 270 271//go:nosplit 272func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) } 273 274//go:nosplit 275func (gp *guintptr) cas(old, new guintptr) bool { 276 return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) 277} 278 279// setGNoWB performs *gp = new without a write barrier. 280// For times when it's impractical to use a guintptr. 281//go:nosplit 282//go:nowritebarrier 283func setGNoWB(gp **g, new *g) { 284 (*guintptr)(unsafe.Pointer(gp)).set(new) 285} 286 287type puintptr uintptr 288 289//go:nosplit 290func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) } 291 292//go:nosplit 293func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) } 294 295// muintptr is a *m that is not tracked by the garbage collector. 296// 297// Because we do free Ms, there are some additional constrains on 298// muintptrs: 299// 300// 1. Never hold an muintptr locally across a safe point. 301// 302// 2. Any muintptr in the heap must be owned by the M itself so it can 303// ensure it is not in use when the last true *m is released. 304type muintptr uintptr 305 306//go:nosplit 307func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) } 308 309//go:nosplit 310func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) } 311 312// setMNoWB performs *mp = new without a write barrier. 313// For times when it's impractical to use an muintptr. 314//go:nosplit 315//go:nowritebarrier 316func setMNoWB(mp **m, new *m) { 317 (*muintptr)(unsafe.Pointer(mp)).set(new) 318} 319 320// sudog represents a g in a wait list, such as for sending/receiving 321// on a channel. 322// 323// sudog is necessary because the g ↔ synchronization object relation 324// is many-to-many. A g can be on many wait lists, so there may be 325// many sudogs for one g; and many gs may be waiting on the same 326// synchronization object, so there may be many sudogs for one object. 327// 328// sudogs are allocated from a special pool. Use acquireSudog and 329// releaseSudog to allocate and free them. 330type sudog struct { 331 // The following fields are protected by the hchan.lock of the 332 // channel this sudog is blocking on. shrinkstack depends on 333 // this for sudogs involved in channel ops. 334 335 g *g 336 337 // isSelect indicates g is participating in a select, so 338 // g.selectDone must be CAS'd to win the wake-up race. 339 isSelect bool 340 next *sudog 341 prev *sudog 342 elem unsafe.Pointer // data element (may point to stack) 343 344 // The following fields are never accessed concurrently. 345 // For channels, waitlink is only accessed by g. 346 // For semaphores, all fields (including the ones above) 347 // are only accessed when holding a semaRoot lock. 348 349 acquiretime int64 350 releasetime int64 351 ticket uint32 352 parent *sudog // semaRoot binary tree 353 waitlink *sudog // g.waiting list or semaRoot 354 waittail *sudog // semaRoot 355 c *hchan // channel 356} 357 358/* 359Not used by gccgo. 360 361type libcall struct { 362 fn uintptr 363 n uintptr // number of parameters 364 args uintptr // parameters 365 r1 uintptr // return values 366 r2 uintptr 367 err uintptr // error number 368} 369 370*/ 371 372/* 373Not used by gccgo. 374 375// describes how to handle callback 376type wincallbackcontext struct { 377 gobody unsafe.Pointer // go function to call 378 argsize uintptr // callback arguments size (in bytes) 379 restorestack uintptr // adjust stack on return by (in bytes) (386 only) 380 cleanstack bool 381} 382*/ 383 384/* 385Not used by gccgo. 386 387// Stack describes a Go execution stack. 388// The bounds of the stack are exactly [lo, hi), 389// with no implicit data structures on either side. 390type stack struct { 391 lo uintptr 392 hi uintptr 393} 394*/ 395 396type g struct { 397 // Stack parameters. 398 // stack describes the actual stack memory: [stack.lo, stack.hi). 399 // stackguard0 is the stack pointer compared in the Go stack growth prologue. 400 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. 401 // stackguard1 is the stack pointer compared in the C stack growth prologue. 402 // It is stack.lo+StackGuard on g0 and gsignal stacks. 403 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). 404 // Not for gccgo: stack stack // offset known to runtime/cgo 405 // Not for gccgo: stackguard0 uintptr // offset known to liblink 406 // Not for gccgo: stackguard1 uintptr // offset known to liblink 407 408 _panic *_panic // innermost panic - offset known to liblink 409 _defer *_defer // innermost defer 410 m *m // current m; offset known to arm liblink 411 // Not for gccgo: sched gobuf 412 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc 413 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc 414 // Not for gccgo: stktopsp uintptr // expected sp at top of stack, to check in traceback 415 param unsafe.Pointer // passed parameter on wakeup 416 atomicstatus uint32 417 // Not for gccgo: stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus 418 goid int64 419 schedlink guintptr 420 waitsince int64 // approx time when the g become blocked 421 waitreason waitReason // if status==Gwaiting 422 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt 423 preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule 424 // Not for gccgo: preemptShrink bool // shrink stack at synchronous safe point 425 // asyncSafePoint is set if g is stopped at an asynchronous 426 // safe point. This means there are frames on the stack 427 // without precise pointer information. 428 asyncSafePoint bool 429 430 paniconfault bool // panic (instead of crash) on unexpected fault address 431 preemptscan bool // preempted g does scan for gc 432 gcscandone bool // g has scanned stack; protected by _Gscan bit in status 433 throwsplit bool // must not split stack 434 435 gcScannedSyscallStack bool // gccgo specific; see scanSyscallStack 436 437 // activeStackChans indicates that there are unlocked channels 438 // pointing into this goroutine's stack. If true, stack 439 // copying needs to acquire channel locks to protect these 440 // areas of the stack. 441 activeStackChans bool 442 443 raceignore int8 // ignore race detection events 444 sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine 445 sysexitticks int64 // cputicks when syscall has returned (for tracing) 446 traceseq uint64 // trace event sequencer 447 tracelastp puintptr // last P emitted an event for this goroutine 448 lockedm muintptr 449 sig uint32 450 writebuf []byte 451 sigcode0 uintptr 452 sigcode1 uintptr 453 sigpc uintptr 454 gopc uintptr // pc of go statement that created this goroutine 455 ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors) 456 startpc uintptr // pc of goroutine function 457 // Not for gccgo: racectx uintptr 458 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order 459 // Not for gccgo: cgoCtxt []uintptr // cgo traceback context 460 labels unsafe.Pointer // profiler labels 461 timer *timer // cached timer for time.Sleep 462 selectDone uint32 // are we participating in a select and did someone win the race? 463 464 // Per-G GC state 465 466 // gcAssistBytes is this G's GC assist credit in terms of 467 // bytes allocated. If this is positive, then the G has credit 468 // to allocate gcAssistBytes bytes without assisting. If this 469 // is negative, then the G must correct this by performing 470 // scan work. We track this in bytes to make it fast to update 471 // and check for debt in the malloc hot path. The assist ratio 472 // determines how this corresponds to scan work debt. 473 gcAssistBytes int64 474 475 // Remaining fields are specific to gccgo. 476 477 exception unsafe.Pointer // current exception being thrown 478 isforeign bool // whether current exception is not from Go 479 480 // When using split-stacks, these fields holds the results of 481 // __splitstack_find while executing a syscall. These are used 482 // by the garbage collector to scan the goroutine's stack. 483 // 484 // When not using split-stacks, g0 stacks are allocated by the 485 // libc and other goroutine stacks are allocated by malg. 486 // gcstack: unused (sometimes cleared) 487 // gcstacksize: g0: 0; others: size of stack 488 // gcnextsegment: unused 489 // gcnextsp: current SP while executing a syscall 490 // gcinitialsp: g0: top of stack; others: start of stack memory 491 // gcnextsp2: current secondary stack pointer (if present) 492 // gcinitialsp2: start of secondary stack (if present) 493 gcstack uintptr 494 gcstacksize uintptr 495 gcnextsegment uintptr 496 gcnextsp uintptr 497 gcinitialsp unsafe.Pointer 498 gcnextsp2 uintptr 499 gcinitialsp2 unsafe.Pointer 500 501 // gcregs holds the register values while executing a syscall. 502 // This is set by getcontext and scanned by the garbage collector. 503 gcregs g_ucontext_t 504 505 entry func(unsafe.Pointer) // goroutine function to run 506 entryfn uintptr // function address passed to __go_go 507 entrysp uintptr // the stack pointer of the outermost Go frame 508 fromgogo bool // whether entered from gogo function 509 510 scanningself bool // whether goroutine is scanning its own stack 511 512 scang uintptr // the g that wants to scan this g's stack (uintptr to avoid write barrier) 513 scangcw uintptr // gc worker for scanning stack (uintptr to avoid write barrier) 514 515 isSystemGoroutine bool // whether goroutine is a "system" goroutine 516 isFinalizerGoroutine bool // whether goroutine is the finalizer goroutine 517 518 deferring bool // whether we are running a deferred function 519 goexiting bool // whether we are running Goexit 520 ranCgocallBackDone bool // whether we deferred CgocallBackDone 521 522 traceback uintptr // stack traceback buffer 523 524 context g_ucontext_t // saved context for setcontext 525 stackcontext [10]uintptr // split-stack context 526} 527 528type m struct { 529 g0 *g // goroutine with scheduling stack 530 // Not for gccgo: morebuf gobuf // gobuf arg to morestack 531 // Not for gccgo: divmod uint32 // div/mod denominator for arm - known to liblink 532 533 // Fields not known to debuggers. 534 procid uint64 // for debuggers, but offset not hard-coded 535 gsignal *g // signal-handling g 536 // Not for gccgo: goSigStack gsignalStack // Go-allocated signal handling stack 537 sigmask sigset // storage for saved signal mask 538 // Not for gccgo: tls [6]uintptr // thread-local storage (for x86 extern register) 539 mstartfn func() 540 curg *g // current running goroutine 541 caughtsig guintptr // goroutine running during fatal signal 542 p puintptr // attached p for executing go code (nil if not executing go code) 543 nextp puintptr 544 oldp puintptr // the p that was attached before executing a syscall 545 id int64 546 mallocing int32 547 throwing int32 548 preemptoff string // if != "", keep curg running on this m 549 locks int32 550 softfloat int32 551 dying int32 552 profilehz int32 553 spinning bool // m is out of work and is actively looking for work 554 blocked bool // m is blocked on a note 555 newSigstack bool // minit on C thread called sigaltstack 556 printlock int8 557 incgo bool // m is executing a cgo call 558 freeWait uint32 // if == 0, safe to free g0 and delete m (atomic) 559 fastrand [2]uint32 560 needextram bool 561 traceback uint8 562 ncgocall uint64 // number of cgo calls in total 563 ncgo int32 // number of cgo calls currently in progress 564 // Not for gccgo: cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily 565 // Not for gccgo: cgoCallers *cgoCallers // cgo traceback if crashing in cgo call 566 park note 567 alllink *m // on allm 568 schedlink muintptr 569 mcache *mcache 570 lockedg guintptr 571 createstack [32]location // stack that created this thread. 572 lockedExt uint32 // tracking for external LockOSThread 573 lockedInt uint32 // tracking for internal lockOSThread 574 nextwaitm muintptr // next m waiting for lock 575 waitunlockf func(*g, unsafe.Pointer) bool 576 waitlock unsafe.Pointer 577 waittraceev byte 578 waittraceskip int 579 startingtrace bool 580 syscalltick uint32 581 freelink *m // on sched.freem 582 583 // these are here because they are too large to be on the stack 584 // of low-level NOSPLIT functions. 585 // Not for gccgo: libcall libcall 586 // Not for gccgo: libcallpc uintptr // for cpu profiler 587 // Not for gccgo: libcallsp uintptr 588 // Not for gccgo: libcallg guintptr 589 // Not for gccgo: syscall libcall // stores syscall parameters on windows 590 591 // preemptGen counts the number of completed preemption 592 // signals. This is used to detect when a preemption is 593 // requested, but fails. Accessed atomically. 594 preemptGen uint32 595 596 // Whether this is a pending preemption signal on this M. 597 // Accessed atomically. 598 signalPending uint32 599 600 dlogPerM 601 602 mOS 603 604 // Remaining fields are specific to gccgo. 605 606 gsignalstack unsafe.Pointer // stack for gsignal 607 gsignalstacksize uintptr 608 609 dropextram bool // drop after call is done 610 exiting bool // thread is exiting 611 612 scannote note // synchonization for signal-based stack scanning 613} 614 615type p struct { 616 id int32 617 status uint32 // one of pidle/prunning/... 618 link puintptr 619 schedtick uint32 // incremented on every scheduler call 620 syscalltick uint32 // incremented on every system call 621 sysmontick sysmontick // last tick observed by sysmon 622 m muintptr // back-link to associated m (nil if idle) 623 mcache *mcache 624 pcache pageCache 625 raceprocctx uintptr 626 627 // gccgo has only one size of defer. 628 deferpool []*_defer 629 deferpoolbuf [32]*_defer 630 631 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen. 632 goidcache uint64 633 goidcacheend uint64 634 635 // Queue of runnable goroutines. Accessed without lock. 636 runqhead uint32 637 runqtail uint32 638 runq [256]guintptr 639 // runnext, if non-nil, is a runnable G that was ready'd by 640 // the current G and should be run next instead of what's in 641 // runq if there's time remaining in the running G's time 642 // slice. It will inherit the time left in the current time 643 // slice. If a set of goroutines is locked in a 644 // communicate-and-wait pattern, this schedules that set as a 645 // unit and eliminates the (potentially large) scheduling 646 // latency that otherwise arises from adding the ready'd 647 // goroutines to the end of the run queue. 648 runnext guintptr 649 650 // Available G's (status == Gdead) 651 gFree struct { 652 gList 653 n int32 654 } 655 656 sudogcache []*sudog 657 sudogbuf [128]*sudog 658 659 // Cache of mspan objects from the heap. 660 mspancache struct { 661 // We need an explicit length here because this field is used 662 // in allocation codepaths where write barriers are not allowed, 663 // and eliminating the write barrier/keeping it eliminated from 664 // slice updates is tricky, moreso than just managing the length 665 // ourselves. 666 len int 667 buf [128]*mspan 668 } 669 670 tracebuf traceBufPtr 671 672 // traceSweep indicates the sweep events should be traced. 673 // This is used to defer the sweep start event until a span 674 // has actually been swept. 675 traceSweep bool 676 // traceSwept and traceReclaimed track the number of bytes 677 // swept and reclaimed by sweeping in the current sweep loop. 678 traceSwept, traceReclaimed uintptr 679 680 palloc persistentAlloc // per-P to avoid mutex 681 682 _ uint32 // Alignment for atomic fields below 683 684 // The when field of the first entry on the timer heap. 685 // This is updated using atomic functions. 686 // This is 0 if the timer heap is empty. 687 timer0When uint64 688 689 // Per-P GC state 690 gcAssistTime int64 // Nanoseconds in assistAlloc 691 gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic) 692 gcBgMarkWorker guintptr // (atomic) 693 gcMarkWorkerMode gcMarkWorkerMode 694 695 // gcMarkWorkerStartTime is the nanotime() at which this mark 696 // worker started. 697 gcMarkWorkerStartTime int64 698 699 // gcw is this P's GC work buffer cache. The work buffer is 700 // filled by write barriers, drained by mutator assists, and 701 // disposed on certain GC state transitions. 702 gcw gcWork 703 704 // wbBuf is this P's GC write barrier buffer. 705 // 706 // TODO: Consider caching this in the running G. 707 wbBuf wbBuf 708 709 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point 710 711 // Lock for timers. We normally access the timers while running 712 // on this P, but the scheduler can also do it from a different P. 713 timersLock mutex 714 715 // Actions to take at some time. This is used to implement the 716 // standard library's time package. 717 // Must hold timersLock to access. 718 timers []*timer 719 720 // Number of timers in P's heap. 721 // Modified using atomic instructions. 722 numTimers uint32 723 724 // Number of timerModifiedEarlier timers on P's heap. 725 // This should only be modified while holding timersLock, 726 // or while the timer status is in a transient state 727 // such as timerModifying. 728 adjustTimers uint32 729 730 // Number of timerDeleted timers in P's heap. 731 // Modified using atomic instructions. 732 deletedTimers uint32 733 734 // Race context used while executing timer functions. 735 // Not for gccgo: timerRaceCtx uintptr 736 737 // preempt is set to indicate that this P should be enter the 738 // scheduler ASAP (regardless of what G is running on it). 739 preempt bool 740 741 pad cpu.CacheLinePad 742} 743 744type schedt struct { 745 // accessed atomically. keep at top to ensure alignment on 32-bit systems. 746 goidgen uint64 747 lastpoll uint64 // time of last network poll, 0 if currently polling 748 pollUntil uint64 // time to which current poll is sleeping 749 750 lock mutex 751 752 // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be 753 // sure to call checkdead(). 754 755 midle muintptr // idle m's waiting for work 756 nmidle int32 // number of idle m's waiting for work 757 nmidlelocked int32 // number of locked m's waiting for work 758 mnext int64 // number of m's that have been created and next M ID 759 maxmcount int32 // maximum number of m's allowed (or die) 760 nmsys int32 // number of system m's not counted for deadlock 761 nmfreed int64 // cumulative number of freed m's 762 763 ngsys uint32 // number of system goroutines; updated atomically 764 765 pidle puintptr // idle p's 766 npidle uint32 767 nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go. 768 769 // Global runnable queue. 770 runq gQueue 771 runqsize int32 772 773 // disable controls selective disabling of the scheduler. 774 // 775 // Use schedEnableUser to control this. 776 // 777 // disable is protected by sched.lock. 778 disable struct { 779 // user disables scheduling of user goroutines. 780 user bool 781 runnable gQueue // pending runnable Gs 782 n int32 // length of runnable 783 } 784 785 // Global cache of dead G's. 786 gFree struct { 787 lock mutex 788 list gList // Gs 789 n int32 790 } 791 792 // Central cache of sudog structs. 793 sudoglock mutex 794 sudogcache *sudog 795 796 // Central pool of available defer structs. 797 deferlock mutex 798 deferpool *_defer 799 800 // freem is the list of m's waiting to be freed when their 801 // m.exited is set. Linked through m.freelink. 802 freem *m 803 804 gcwaiting uint32 // gc is waiting to run 805 stopwait int32 806 stopnote note 807 sysmonwait uint32 808 sysmonnote note 809 810 // safepointFn should be called on each P at the next GC 811 // safepoint if p.runSafePointFn is set. 812 safePointFn func(*p) 813 safePointWait int32 814 safePointNote note 815 816 profilehz int32 // cpu profiling rate 817 818 procresizetime int64 // nanotime() of last change to gomaxprocs 819 totaltime int64 // ∫gomaxprocs dt up to procresizetime 820} 821 822// Values for the flags field of a sigTabT. 823const ( 824 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel 825 _SigKill // if signal.Notify doesn't take it, exit quietly 826 _SigThrow // if signal.Notify doesn't take it, exit loudly 827 _SigPanic // if the signal is from the kernel, panic 828 _SigDefault // if the signal isn't explicitly requested, don't monitor it 829 _SigGoExit // cause all runtime procs to exit (only used on Plan 9). 830 _SigSetStack // add SA_ONSTACK to libc handler 831 _SigUnblock // always unblock; see blockableSig 832 _SigIgn // _SIG_DFL action is to ignore the signal 833) 834 835// Lock-free stack node. 836// Also known to export_test.go. 837type lfnode struct { 838 next uint64 839 pushcnt uintptr 840} 841 842type forcegcstate struct { 843 lock mutex 844 g *g 845 idle uint32 846} 847 848// startup_random_data holds random bytes initialized at startup. These come from 849// the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go). 850var startupRandomData []byte 851 852// extendRandom extends the random numbers in r[:n] to the whole slice r. 853// Treats n<0 as n==0. 854func extendRandom(r []byte, n int) { 855 if n < 0 { 856 n = 0 857 } 858 for n < len(r) { 859 // Extend random bits using hash function & time seed 860 w := n 861 if w > 16 { 862 w = 16 863 } 864 h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w)) 865 for i := 0; i < sys.PtrSize && n < len(r); i++ { 866 r[n] = byte(h) 867 n++ 868 h >>= 8 869 } 870 } 871} 872 873// A _defer holds an entry on the list of deferred calls. 874// If you add a field here, add code to clear it in freedefer. 875// This struct must match the code in Defer_statement::defer_struct_type 876// in the compiler. 877// Some defers will be allocated on the stack and some on the heap. 878// All defers are logically part of the stack, so write barriers to 879// initialize them are not required. All defers must be manually scanned, 880// and for heap defers, marked. 881type _defer struct { 882 // The next entry in the stack. 883 link *_defer 884 885 // The stack variable for the function which called this defer 886 // statement. This is set to true if we are returning from 887 // that function, false if we are panicing through it. 888 frame *bool 889 890 // The value of the panic stack when this function is 891 // deferred. This function can not recover this value from 892 // the panic stack. This can happen if a deferred function 893 // has a defer statement itself. 894 panicStack *_panic 895 896 // The panic that caused the defer to run. This is used to 897 // discard panics that have already been handled. 898 _panic *_panic 899 900 // The function to call. 901 pfn uintptr 902 903 // The argument to pass to the function. 904 arg unsafe.Pointer 905 906 // The return address that a recover thunk matches against. 907 // This is set by __go_set_defer_retaddr which is called by 908 // the thunks created by defer statements. 909 retaddr uintptr 910 911 // Set to true if a function created by reflect.MakeFunc is 912 // permitted to recover. The return address of such a 913 // function function will be somewhere in libffi, so __retaddr 914 // is not useful. 915 makefunccanrecover bool 916 917 // Whether the _defer is heap allocated. 918 heap bool 919} 920 921// panics 922// This is the gccgo version. 923// 924// This is marked go:notinheap because _panic values must only ever 925// live on the stack. 926// 927//go:notinheap 928type _panic struct { 929 // The next entry in the stack. 930 link *_panic 931 932 // The value associated with this panic. 933 arg interface{} 934 935 // Whether this panic has been recovered. 936 recovered bool 937 938 // Whether this panic was pushed on the stack because of an 939 // exception thrown in some other language. 940 isforeign bool 941 942 // Whether this panic was already seen by a deferred function 943 // which called panic again. 944 aborted bool 945 946 // Whether this panic was created for goexit. 947 goexit bool 948} 949 950// ancestorInfo records details of where a goroutine was started. 951type ancestorInfo struct { 952 pcs []uintptr // pcs from the stack of this goroutine 953 goid int64 // goroutine id of this goroutine; original goroutine possibly dead 954 gopc uintptr // pc of go statement that created this goroutine 955} 956 957const ( 958 _TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions. 959 _TraceTrap // the initial PC, SP are from a trap, not a return PC from a call 960 _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it 961) 962 963// The maximum number of frames we print for a traceback 964const _TracebackMaxFrames = 100 965 966// A waitReason explains why a goroutine has been stopped. 967// See gopark. Do not re-use waitReasons, add new ones. 968type waitReason uint8 969 970const ( 971 waitReasonZero waitReason = iota // "" 972 waitReasonGCAssistMarking // "GC assist marking" 973 waitReasonIOWait // "IO wait" 974 waitReasonChanReceiveNilChan // "chan receive (nil chan)" 975 waitReasonChanSendNilChan // "chan send (nil chan)" 976 waitReasonDumpingHeap // "dumping heap" 977 waitReasonGarbageCollection // "garbage collection" 978 waitReasonGarbageCollectionScan // "garbage collection scan" 979 waitReasonPanicWait // "panicwait" 980 waitReasonSelect // "select" 981 waitReasonSelectNoCases // "select (no cases)" 982 waitReasonGCAssistWait // "GC assist wait" 983 waitReasonGCSweepWait // "GC sweep wait" 984 waitReasonGCScavengeWait // "GC scavenge wait" 985 waitReasonChanReceive // "chan receive" 986 waitReasonChanSend // "chan send" 987 waitReasonFinalizerWait // "finalizer wait" 988 waitReasonForceGGIdle // "force gc (idle)" 989 waitReasonSemacquire // "semacquire" 990 waitReasonSleep // "sleep" 991 waitReasonSyncCondWait // "sync.Cond.Wait" 992 waitReasonTimerGoroutineIdle // "timer goroutine (idle)" 993 waitReasonTraceReaderBlocked // "trace reader (blocked)" 994 waitReasonWaitForGCCycle // "wait for GC cycle" 995 waitReasonGCWorkerIdle // "GC worker (idle)" 996 waitReasonPreempted // "preempted" 997) 998 999var waitReasonStrings = [...]string{ 1000 waitReasonZero: "", 1001 waitReasonGCAssistMarking: "GC assist marking", 1002 waitReasonIOWait: "IO wait", 1003 waitReasonChanReceiveNilChan: "chan receive (nil chan)", 1004 waitReasonChanSendNilChan: "chan send (nil chan)", 1005 waitReasonDumpingHeap: "dumping heap", 1006 waitReasonGarbageCollection: "garbage collection", 1007 waitReasonGarbageCollectionScan: "garbage collection scan", 1008 waitReasonPanicWait: "panicwait", 1009 waitReasonSelect: "select", 1010 waitReasonSelectNoCases: "select (no cases)", 1011 waitReasonGCAssistWait: "GC assist wait", 1012 waitReasonGCSweepWait: "GC sweep wait", 1013 waitReasonGCScavengeWait: "GC scavenge wait", 1014 waitReasonChanReceive: "chan receive", 1015 waitReasonChanSend: "chan send", 1016 waitReasonFinalizerWait: "finalizer wait", 1017 waitReasonForceGGIdle: "force gc (idle)", 1018 waitReasonSemacquire: "semacquire", 1019 waitReasonSleep: "sleep", 1020 waitReasonSyncCondWait: "sync.Cond.Wait", 1021 waitReasonTimerGoroutineIdle: "timer goroutine (idle)", 1022 waitReasonTraceReaderBlocked: "trace reader (blocked)", 1023 waitReasonWaitForGCCycle: "wait for GC cycle", 1024 waitReasonGCWorkerIdle: "GC worker (idle)", 1025 waitReasonPreempted: "preempted", 1026} 1027 1028func (w waitReason) String() string { 1029 if w < 0 || w >= waitReason(len(waitReasonStrings)) { 1030 return "unknown wait reason" 1031 } 1032 return waitReasonStrings[w] 1033} 1034 1035var ( 1036 allglen uintptr 1037 allm *m 1038 allp []*p // len(allp) == gomaxprocs; may change at safe points, otherwise immutable 1039 allpLock mutex // Protects P-less reads of allp and all writes 1040 gomaxprocs int32 1041 ncpu int32 1042 forcegc forcegcstate 1043 sched schedt 1044 newprocs int32 1045 1046 support_aes bool 1047) 1048 1049// Set by the linker so the runtime can determine the buildmode. 1050var ( 1051 islibrary bool // -buildmode=c-shared 1052 isarchive bool // -buildmode=c-archive 1053) 1054 1055// Types that are only used by gccgo. 1056 1057// g_ucontext_t is a Go version of the C ucontext_t type, used by getcontext. 1058// _sizeof_ucontext_t is defined by mkrsysinfo.sh from <ucontext.h>. 1059// On some systems getcontext and friends require a value that is 1060// aligned to a 16-byte boundary. We implement this by increasing the 1061// required size and picking an appropriate offset when we use the 1062// array. 1063type g_ucontext_t [(_sizeof_ucontext_t + 15) / unsafe.Sizeof(uintptr(0))]uintptr 1064 1065// sigset is the Go version of the C type sigset_t. 1066// _sigset_t is defined by the Makefile from <signal.h>. 1067type sigset _sigset_t 1068 1069// getMemstats returns a pointer to the internal memstats variable, 1070// for C code. 1071//go:linkname getMemstats 1072func getMemstats() *mstats { 1073 return &memstats 1074} 1075