1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"runtime/internal/atomic"
9	"runtime/internal/sys"
10	"unsafe"
11)
12
13// defined constants
14const (
15	// G status
16	//
17	// Beyond indicating the general state of a G, the G status
18	// acts like a lock on the goroutine's stack (and hence its
19	// ability to execute user code).
20	//
21	// If you add to this list, add to the list
22	// of "okay during garbage collection" status
23	// in mgcmark.go too.
24
25	// _Gidle means this goroutine was just allocated and has not
26	// yet been initialized.
27	_Gidle = iota // 0
28
29	// _Grunnable means this goroutine is on a run queue. It is
30	// not currently executing user code. The stack is not owned.
31	_Grunnable // 1
32
33	// _Grunning means this goroutine may execute user code. The
34	// stack is owned by this goroutine. It is not on a run queue.
35	// It is assigned an M and a P.
36	_Grunning // 2
37
38	// _Gsyscall means this goroutine is executing a system call.
39	// It is not executing user code. The stack is owned by this
40	// goroutine. It is not on a run queue. It is assigned an M.
41	_Gsyscall // 3
42
43	// _Gwaiting means this goroutine is blocked in the runtime.
44	// It is not executing user code. It is not on a run queue,
45	// but should be recorded somewhere (e.g., a channel wait
46	// queue) so it can be ready()d when necessary. The stack is
47	// not owned *except* that a channel operation may read or
48	// write parts of the stack under the appropriate channel
49	// lock. Otherwise, it is not safe to access the stack after a
50	// goroutine enters _Gwaiting (e.g., it may get moved).
51	_Gwaiting // 4
52
53	// _Gmoribund_unused is currently unused, but hardcoded in gdb
54	// scripts.
55	_Gmoribund_unused // 5
56
57	// _Gdead means this goroutine is currently unused. It may be
58	// just exited, on a free list, or just being initialized. It
59	// is not executing user code. It may or may not have a stack
60	// allocated. The G and its stack (if any) are owned by the M
61	// that is exiting the G or that obtained the G from the free
62	// list.
63	_Gdead // 6
64
65	// _Genqueue_unused is currently unused.
66	_Genqueue_unused // 7
67
68	// _Gcopystack means this goroutine's stack is being moved. It
69	// is not executing user code and is not on a run queue. The
70	// stack is owned by the goroutine that put it in _Gcopystack.
71	_Gcopystack // 8
72
73	// _Gscan combined with one of the above states other than
74	// _Grunning indicates that GC is scanning the stack. The
75	// goroutine is not executing user code and the stack is owned
76	// by the goroutine that set the _Gscan bit.
77	//
78	// _Gscanrunning is different: it is used to briefly block
79	// state transitions while GC signals the G to scan its own
80	// stack. This is otherwise like _Grunning.
81	//
82	// atomicstatus&~Gscan gives the state the goroutine will
83	// return to when the scan completes.
84	_Gscan         = 0x1000
85	_Gscanrunnable = _Gscan + _Grunnable // 0x1001
86	_Gscanrunning  = _Gscan + _Grunning  // 0x1002
87	_Gscansyscall  = _Gscan + _Gsyscall  // 0x1003
88	_Gscanwaiting  = _Gscan + _Gwaiting  // 0x1004
89)
90
91const (
92	// P status
93	_Pidle    = iota
94	_Prunning // Only this P is allowed to change from _Prunning.
95	_Psyscall
96	_Pgcstop
97	_Pdead
98)
99
100// Mutual exclusion locks.  In the uncontended case,
101// as fast as spin locks (just a few user-level instructions),
102// but on the contention path they sleep in the kernel.
103// A zeroed Mutex is unlocked (no need to initialize each lock).
104type mutex struct {
105	// Futex-based impl treats it as uint32 key,
106	// while sema-based impl as M* waitm.
107	// Used to be a union, but unions break precise GC.
108	key uintptr
109}
110
111// sleep and wakeup on one-time events.
112// before any calls to notesleep or notewakeup,
113// must call noteclear to initialize the Note.
114// then, exactly one thread can call notesleep
115// and exactly one thread can call notewakeup (once).
116// once notewakeup has been called, the notesleep
117// will return.  future notesleep will return immediately.
118// subsequent noteclear must be called only after
119// previous notesleep has returned, e.g. it's disallowed
120// to call noteclear straight after notewakeup.
121//
122// notetsleep is like notesleep but wakes up after
123// a given number of nanoseconds even if the event
124// has not yet happened.  if a goroutine uses notetsleep to
125// wake up early, it must wait to call noteclear until it
126// can be sure that no other goroutine is calling
127// notewakeup.
128//
129// notesleep/notetsleep are generally called on g0,
130// notetsleepg is similar to notetsleep but is called on user g.
131type note struct {
132	// Futex-based impl treats it as uint32 key,
133	// while sema-based impl as M* waitm.
134	// Used to be a union, but unions break precise GC.
135	key uintptr
136}
137
138type funcval struct {
139	fn uintptr
140	// variable-size, fn-specific data here
141}
142
143// The representation of a non-empty interface.
144// See comment in iface.go for more details on this struct.
145type iface struct {
146	tab  unsafe.Pointer
147	data unsafe.Pointer
148}
149
150// The representation of an empty interface.
151// See comment in iface.go for more details on this struct.
152type eface struct {
153	_type *_type
154	data  unsafe.Pointer
155}
156
157func efaceOf(ep *interface{}) *eface {
158	return (*eface)(unsafe.Pointer(ep))
159}
160
161// The guintptr, muintptr, and puintptr are all used to bypass write barriers.
162// It is particularly important to avoid write barriers when the current P has
163// been released, because the GC thinks the world is stopped, and an
164// unexpected write barrier would not be synchronized with the GC,
165// which can lead to a half-executed write barrier that has marked the object
166// but not queued it. If the GC skips the object and completes before the
167// queuing can occur, it will incorrectly free the object.
168//
169// We tried using special assignment functions invoked only when not
170// holding a running P, but then some updates to a particular memory
171// word went through write barriers and some did not. This breaks the
172// write barrier shadow checking mode, and it is also scary: better to have
173// a word that is completely ignored by the GC than to have one for which
174// only a few updates are ignored.
175//
176// Gs and Ps are always reachable via true pointers in the
177// allgs and allp lists or (during allocation before they reach those lists)
178// from stack variables.
179//
180// Ms are always reachable via true pointers either from allm or
181// freem. Unlike Gs and Ps we do free Ms, so it's important that
182// nothing ever hold an muintptr across a safe point.
183
184// A guintptr holds a goroutine pointer, but typed as a uintptr
185// to bypass write barriers. It is used in the Gobuf goroutine state
186// and in scheduling lists that are manipulated without a P.
187//
188// The Gobuf.g goroutine pointer is almost always updated by assembly code.
189// In one of the few places it is updated by Go code - func save - it must be
190// treated as a uintptr to avoid a write barrier being emitted at a bad time.
191// Instead of figuring out how to emit the write barriers missing in the
192// assembly manipulation, we change the type of the field to uintptr,
193// so that it does not require write barriers at all.
194//
195// Goroutine structs are published in the allg list and never freed.
196// That will keep the goroutine structs from being collected.
197// There is never a time that Gobuf.g's contain the only references
198// to a goroutine: the publishing of the goroutine in allg comes first.
199// Goroutine pointers are also kept in non-GC-visible places like TLS,
200// so I can't see them ever moving. If we did want to start moving data
201// in the GC, we'd need to allocate the goroutine structs from an
202// alternate arena. Using guintptr doesn't make that problem any worse.
203type guintptr uintptr
204
205//go:nosplit
206func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
207
208//go:nosplit
209func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
210
211//go:nosplit
212func (gp *guintptr) cas(old, new guintptr) bool {
213	return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
214}
215
216// setGNoWB performs *gp = new without a write barrier.
217// For times when it's impractical to use a guintptr.
218//go:nosplit
219//go:nowritebarrier
220func setGNoWB(gp **g, new *g) {
221	(*guintptr)(unsafe.Pointer(gp)).set(new)
222}
223
224type puintptr uintptr
225
226//go:nosplit
227func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
228
229//go:nosplit
230func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
231
232// muintptr is a *m that is not tracked by the garbage collector.
233//
234// Because we do free Ms, there are some additional constrains on
235// muintptrs:
236//
237// 1. Never hold an muintptr locally across a safe point.
238//
239// 2. Any muintptr in the heap must be owned by the M itself so it can
240//    ensure it is not in use when the last true *m is released.
241type muintptr uintptr
242
243//go:nosplit
244func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
245
246//go:nosplit
247func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
248
249// setMNoWB performs *mp = new without a write barrier.
250// For times when it's impractical to use an muintptr.
251//go:nosplit
252//go:nowritebarrier
253func setMNoWB(mp **m, new *m) {
254	(*muintptr)(unsafe.Pointer(mp)).set(new)
255}
256
257// sudog represents a g in a wait list, such as for sending/receiving
258// on a channel.
259//
260// sudog is necessary because the g ↔ synchronization object relation
261// is many-to-many. A g can be on many wait lists, so there may be
262// many sudogs for one g; and many gs may be waiting on the same
263// synchronization object, so there may be many sudogs for one object.
264//
265// sudogs are allocated from a special pool. Use acquireSudog and
266// releaseSudog to allocate and free them.
267type sudog struct {
268	// The following fields are protected by the hchan.lock of the
269	// channel this sudog is blocking on. shrinkstack depends on
270	// this for sudogs involved in channel ops.
271
272	g *g
273
274	// isSelect indicates g is participating in a select, so
275	// g.selectDone must be CAS'd to win the wake-up race.
276	isSelect bool
277	next     *sudog
278	prev     *sudog
279	elem     unsafe.Pointer // data element (may point to stack)
280
281	// The following fields are never accessed concurrently.
282	// For channels, waitlink is only accessed by g.
283	// For semaphores, all fields (including the ones above)
284	// are only accessed when holding a semaRoot lock.
285
286	acquiretime int64
287	releasetime int64
288	ticket      uint32
289	parent      *sudog // semaRoot binary tree
290	waitlink    *sudog // g.waiting list or semaRoot
291	waittail    *sudog // semaRoot
292	c           *hchan // channel
293}
294
295/*
296Not used by gccgo.
297
298type libcall struct {
299	fn   uintptr
300	n    uintptr // number of parameters
301	args uintptr // parameters
302	r1   uintptr // return values
303	r2   uintptr
304	err  uintptr // error number
305}
306
307*/
308
309/*
310Not used by gccgo.
311
312// describes how to handle callback
313type wincallbackcontext struct {
314	gobody       unsafe.Pointer // go function to call
315	argsize      uintptr        // callback arguments size (in bytes)
316	restorestack uintptr        // adjust stack on return by (in bytes) (386 only)
317	cleanstack   bool
318}
319*/
320
321/*
322Not used by gccgo.
323
324// Stack describes a Go execution stack.
325// The bounds of the stack are exactly [lo, hi),
326// with no implicit data structures on either side.
327type stack struct {
328	lo uintptr
329	hi uintptr
330}
331*/
332
333type g struct {
334	// Stack parameters.
335	// stack describes the actual stack memory: [stack.lo, stack.hi).
336	// stackguard0 is the stack pointer compared in the Go stack growth prologue.
337	// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
338	// stackguard1 is the stack pointer compared in the C stack growth prologue.
339	// It is stack.lo+StackGuard on g0 and gsignal stacks.
340	// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
341	// Not for gccgo: stack       stack   // offset known to runtime/cgo
342	// Not for gccgo: stackguard0 uintptr // offset known to liblink
343	// Not for gccgo: stackguard1 uintptr // offset known to liblink
344
345	_panic *_panic // innermost panic - offset known to liblink
346	_defer *_defer // innermost defer
347	m      *m      // current m; offset known to arm liblink
348	// Not for gccgo: sched          gobuf
349	syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
350	syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
351	// Not for gccgo: stktopsp       uintptr        // expected sp at top of stack, to check in traceback
352	param        unsafe.Pointer // passed parameter on wakeup
353	atomicstatus uint32
354	// Not for gccgo: stackLock      uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
355	goid           int64
356	waitsince      int64  // approx time when the g become blocked
357	waitreason     string // if status==Gwaiting
358	schedlink      guintptr
359	preempt        bool     // preemption signal, duplicates stackguard0 = stackpreempt
360	paniconfault   bool     // panic (instead of crash) on unexpected fault address
361	preemptscan    bool     // preempted g does scan for gc
362	gcscandone     bool     // g has scanned stack; protected by _Gscan bit in status
363	gcscanvalid    bool     // false at start of gc cycle, true if G has not run since last scan; TODO: remove?
364	throwsplit     bool     // must not split stack
365	raceignore     int8     // ignore race detection events
366	sysblocktraced bool     // StartTrace has emitted EvGoInSyscall about this goroutine
367	sysexitticks   int64    // cputicks when syscall has returned (for tracing)
368	traceseq       uint64   // trace event sequencer
369	tracelastp     puintptr // last P emitted an event for this goroutine
370	lockedm        muintptr
371	sig            uint32
372	writebuf       []byte
373	sigcode0       uintptr
374	sigcode1       uintptr
375	sigpc          uintptr
376	gopc           uintptr // pc of go statement that created this goroutine
377	startpc        uintptr // pc of goroutine function
378	// Not for gccgo: racectx        uintptr
379	waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
380	// Not for gccgo: cgoCtxt        []uintptr      // cgo traceback context
381	labels     unsafe.Pointer // profiler labels
382	timer      *timer         // cached timer for time.Sleep
383	selectDone uint32         // are we participating in a select and did someone win the race?
384
385	// Per-G GC state
386
387	// gcAssistBytes is this G's GC assist credit in terms of
388	// bytes allocated. If this is positive, then the G has credit
389	// to allocate gcAssistBytes bytes without assisting. If this
390	// is negative, then the G must correct this by performing
391	// scan work. We track this in bytes to make it fast to update
392	// and check for debt in the malloc hot path. The assist ratio
393	// determines how this corresponds to scan work debt.
394	gcAssistBytes int64
395
396	// Remaining fields are specific to gccgo.
397
398	exception unsafe.Pointer // current exception being thrown
399	isforeign bool           // whether current exception is not from Go
400
401	// When using split-stacks, these fields holds the results of
402	// __splitstack_find while executing a syscall. These are used
403	// by the garbage collector to scan the goroutine's stack.
404	//
405	// When not using split-stacks, g0 stacks are allocated by the
406	// libc and other goroutine stacks are allocated by malg.
407	// gcstack: unused (sometimes cleared)
408	// gcstacksize: g0: 0; others: size of stack
409	// gcnextsegment: unused
410	// gcnextsp: current SP while executing a syscall
411	// gcinitialsp: g0: top of stack; others: start of stack memory
412	// gcnextsp2: current secondary stack pointer (if present)
413	// gcinitialsp2: start of secondary stack (if present)
414	gcstack       uintptr
415	gcstacksize   uintptr
416	gcnextsegment uintptr
417	gcnextsp      uintptr
418	gcinitialsp   unsafe.Pointer
419	gcnextsp2     uintptr
420	gcinitialsp2  unsafe.Pointer
421
422	// gcregs holds the register values while executing a syscall.
423	// This is set by getcontext and scanned by the garbage collector.
424	gcregs g_ucontext_t
425
426	entry    func(unsafe.Pointer) // goroutine function to run
427	entryfn  uintptr              // function address passed to __go_go
428	fromgogo bool                 // whether entered from gogo function
429
430	scanningself bool // whether goroutine is scanning its own stack
431
432	isSystemGoroutine bool // whether goroutine is a "system" goroutine
433
434	traceback *tracebackg // stack traceback buffer
435
436	context      g_ucontext_t // saved context for setcontext
437	stackcontext [10]uintptr  // split-stack context
438}
439
440type m struct {
441	g0 *g // goroutine with scheduling stack
442	// Not for gccgo: morebuf gobuf  // gobuf arg to morestack
443	// Not for gccgo: divmod  uint32 // div/mod denominator for arm - known to liblink
444
445	// Fields not known to debuggers.
446	procid  uint64 // for debuggers, but offset not hard-coded
447	gsignal *g     // signal-handling g
448	// Not for gccgo: goSigStack    gsignalStack // Go-allocated signal handling stack
449	sigmask sigset // storage for saved signal mask
450	// Not for gccgo: tls           [6]uintptr   // thread-local storage (for x86 extern register)
451	mstartfn    func()
452	curg        *g       // current running goroutine
453	caughtsig   guintptr // goroutine running during fatal signal
454	p           puintptr // attached p for executing go code (nil if not executing go code)
455	nextp       puintptr
456	id          int64
457	mallocing   int32
458	throwing    int32
459	preemptoff  string // if != "", keep curg running on this m
460	locks       int32
461	softfloat   int32
462	dying       int32
463	profilehz   int32
464	helpgc      int32
465	spinning    bool // m is out of work and is actively looking for work
466	blocked     bool // m is blocked on a note
467	inwb        bool // m is executing a write barrier
468	newSigstack bool // minit on C thread called sigaltstack
469	printlock   int8
470	incgo       bool   // m is executing a cgo call
471	freeWait    uint32 // if == 0, safe to free g0 and delete m (atomic)
472	fastrand    [2]uint32
473	needextram  bool
474	traceback   uint8
475	ncgocall    uint64 // number of cgo calls in total
476	ncgo        int32  // number of cgo calls currently in progress
477	// Not for gccgo: cgoCallersUse uint32      // if non-zero, cgoCallers in use temporarily
478	// Not for gccgo: cgoCallers    *cgoCallers // cgo traceback if crashing in cgo call
479	park        note
480	alllink     *m // on allm
481	schedlink   muintptr
482	mcache      *mcache
483	lockedg     guintptr
484	createstack [32]location // stack that created this thread.
485	// Not for gccgo: freglo        [16]uint32     // d[i] lsb and f[i]
486	// Not for gccgo: freghi        [16]uint32     // d[i] msb and f[i+16]
487	// Not for gccgo: fflag         uint32         // floating point compare flags
488	lockedExt     uint32         // tracking for external LockOSThread
489	lockedInt     uint32         // tracking for internal lockOSThread
490	nextwaitm     muintptr       // next m waiting for lock
491	waitunlockf   unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
492	waitlock      unsafe.Pointer
493	waittraceev   byte
494	waittraceskip int
495	startingtrace bool
496	syscalltick   uint32
497	// Not for gccgo: thread        uintptr // thread handle
498	freelink *m // on sched.freem
499
500	// these are here because they are too large to be on the stack
501	// of low-level NOSPLIT functions.
502	// Not for gccgo: libcall   libcall
503	// Not for gccgo: libcallpc uintptr // for cpu profiler
504	// Not for gccgo: libcallsp uintptr
505	// Not for gccgo: libcallg  guintptr
506	// Not for gccgo: syscall   libcall // stores syscall parameters on windows
507
508	mos mOS
509
510	// Remaining fields are specific to gccgo.
511
512	gsignalstack     unsafe.Pointer // stack for gsignal
513	gsignalstacksize uintptr
514
515	dropextram bool // drop after call is done
516	exiting    bool // thread is exiting
517
518	gcing int32
519}
520
521type p struct {
522	lock mutex
523
524	id          int32
525	status      uint32 // one of pidle/prunning/...
526	link        puintptr
527	schedtick   uint32     // incremented on every scheduler call
528	syscalltick uint32     // incremented on every system call
529	sysmontick  sysmontick // last tick observed by sysmon
530	m           muintptr   // back-link to associated m (nil if idle)
531	mcache      *mcache
532	racectx     uintptr
533
534	// gccgo has only one size of defer.
535	deferpool    []*_defer
536	deferpoolbuf [32]*_defer
537
538	// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
539	goidcache    uint64
540	goidcacheend uint64
541
542	// Queue of runnable goroutines. Accessed without lock.
543	runqhead uint32
544	runqtail uint32
545	runq     [256]guintptr
546	// runnext, if non-nil, is a runnable G that was ready'd by
547	// the current G and should be run next instead of what's in
548	// runq if there's time remaining in the running G's time
549	// slice. It will inherit the time left in the current time
550	// slice. If a set of goroutines is locked in a
551	// communicate-and-wait pattern, this schedules that set as a
552	// unit and eliminates the (potentially large) scheduling
553	// latency that otherwise arises from adding the ready'd
554	// goroutines to the end of the run queue.
555	runnext guintptr
556
557	// Available G's (status == Gdead)
558	gfree    *g
559	gfreecnt int32
560
561	sudogcache []*sudog
562	sudogbuf   [128]*sudog
563
564	tracebuf traceBufPtr
565
566	// traceSweep indicates the sweep events should be traced.
567	// This is used to defer the sweep start event until a span
568	// has actually been swept.
569	traceSweep bool
570	// traceSwept and traceReclaimed track the number of bytes
571	// swept and reclaimed by sweeping in the current sweep loop.
572	traceSwept, traceReclaimed uintptr
573
574	palloc persistentAlloc // per-P to avoid mutex
575
576	// Per-P GC state
577	gcAssistTime         int64 // Nanoseconds in assistAlloc
578	gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker
579	gcBgMarkWorker       guintptr
580	gcMarkWorkerMode     gcMarkWorkerMode
581
582	// gcMarkWorkerStartTime is the nanotime() at which this mark
583	// worker started.
584	gcMarkWorkerStartTime int64
585
586	// gcw is this P's GC work buffer cache. The work buffer is
587	// filled by write barriers, drained by mutator assists, and
588	// disposed on certain GC state transitions.
589	gcw gcWork
590
591	// wbBuf is this P's GC write barrier buffer.
592	//
593	// TODO: Consider caching this in the running G.
594	wbBuf wbBuf
595
596	runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point
597
598	pad [sys.CacheLineSize]byte
599}
600
601type schedt struct {
602	// accessed atomically. keep at top to ensure alignment on 32-bit systems.
603	goidgen  uint64
604	lastpoll uint64
605
606	lock mutex
607
608	// When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be
609	// sure to call checkdead().
610
611	midle        muintptr // idle m's waiting for work
612	nmidle       int32    // number of idle m's waiting for work
613	nmidlelocked int32    // number of locked m's waiting for work
614	mnext        int64    // number of m's that have been created and next M ID
615	maxmcount    int32    // maximum number of m's allowed (or die)
616	nmsys        int32    // number of system m's not counted for deadlock
617	nmfreed      int64    // cumulative number of freed m's
618
619	ngsys uint32 // number of system goroutines; updated atomically
620
621	pidle      puintptr // idle p's
622	npidle     uint32
623	nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go.
624
625	// Global runnable queue.
626	runqhead guintptr
627	runqtail guintptr
628	runqsize int32
629
630	// Global cache of dead G's.
631	gflock mutex
632	gfree  *g
633	ngfree int32
634
635	// Central cache of sudog structs.
636	sudoglock  mutex
637	sudogcache *sudog
638
639	// Central pool of available defer structs.
640	deferlock mutex
641	deferpool *_defer
642
643	// freem is the list of m's waiting to be freed when their
644	// m.exited is set. Linked through m.freelink.
645	freem *m
646
647	gcwaiting  uint32 // gc is waiting to run
648	stopwait   int32
649	stopnote   note
650	sysmonwait uint32
651	sysmonnote note
652
653	// safepointFn should be called on each P at the next GC
654	// safepoint if p.runSafePointFn is set.
655	safePointFn   func(*p)
656	safePointWait int32
657	safePointNote note
658
659	profilehz int32 // cpu profiling rate
660
661	procresizetime int64 // nanotime() of last change to gomaxprocs
662	totaltime      int64 // ∫gomaxprocs dt up to procresizetime
663}
664
665// Values for the flags field of a sigTabT.
666const (
667	_SigNotify   = 1 << iota // let signal.Notify have signal, even if from kernel
668	_SigKill                 // if signal.Notify doesn't take it, exit quietly
669	_SigThrow                // if signal.Notify doesn't take it, exit loudly
670	_SigPanic                // if the signal is from the kernel, panic
671	_SigDefault              // if the signal isn't explicitly requested, don't monitor it
672	_SigGoExit               // cause all runtime procs to exit (only used on Plan 9).
673	_SigSetStack             // add SA_ONSTACK to libc handler
674	_SigUnblock              // always unblock; see blockableSig
675	_SigIgn                  // _SIG_DFL action is to ignore the signal
676)
677
678// Lock-free stack node.
679// // Also known to export_test.go.
680type lfnode struct {
681	next    uint64
682	pushcnt uintptr
683}
684
685type forcegcstate struct {
686	lock mutex
687	g    *g
688	idle uint32
689}
690
691// startup_random_data holds random bytes initialized at startup. These come from
692// the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
693var startupRandomData []byte
694
695// extendRandom extends the random numbers in r[:n] to the whole slice r.
696// Treats n<0 as n==0.
697func extendRandom(r []byte, n int) {
698	if n < 0 {
699		n = 0
700	}
701	for n < len(r) {
702		// Extend random bits using hash function & time seed
703		w := n
704		if w > 16 {
705			w = 16
706		}
707		h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
708		for i := 0; i < sys.PtrSize && n < len(r); i++ {
709			r[n] = byte(h)
710			n++
711			h >>= 8
712		}
713	}
714}
715
716// A _defer holds an entry on the list of deferred calls.
717// If you add a field here, add code to clear it in freedefer.
718type _defer struct {
719	// The next entry in the stack.
720	link *_defer
721
722	// The stack variable for the function which called this defer
723	// statement.  This is set to true if we are returning from
724	// that function, false if we are panicing through it.
725	frame *bool
726
727	// The value of the panic stack when this function is
728	// deferred.  This function can not recover this value from
729	// the panic stack.  This can happen if a deferred function
730	// has a defer statement itself.
731	panicStack *_panic
732
733	// The panic that caused the defer to run. This is used to
734	// discard panics that have already been handled.
735	_panic *_panic
736
737	// The function to call.
738	pfn uintptr
739
740	// The argument to pass to the function.
741	arg unsafe.Pointer
742
743	// The return address that a recover thunk matches against.
744	// This is set by __go_set_defer_retaddr which is called by
745	// the thunks created by defer statements.
746	retaddr uintptr
747
748	// Set to true if a function created by reflect.MakeFunc is
749	// permitted to recover.  The return address of such a
750	// function function will be somewhere in libffi, so __retaddr
751	// is not useful.
752	makefunccanrecover bool
753}
754
755// panics
756// This is the gccgo version.
757type _panic struct {
758	// The next entry in the stack.
759	link *_panic
760
761	// The value associated with this panic.
762	arg interface{}
763
764	// Whether this panic has been recovered.
765	recovered bool
766
767	// Whether this panic was pushed on the stack because of an
768	// exception thrown in some other language.
769	isforeign bool
770
771	// Whether this panic was already seen by a deferred function
772	// which called panic again.
773	aborted bool
774}
775
776const (
777	_TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions.
778	_TraceTrap                      // the initial PC, SP are from a trap, not a return PC from a call
779	_TraceJumpStack                 // if traceback is on a systemstack, resume trace at g that called into it
780)
781
782// The maximum number of frames we print for a traceback
783const _TracebackMaxFrames = 100
784
785var (
786	allglen    uintptr
787	allm       *m
788	allp       []*p  // len(allp) == gomaxprocs; may change at safe points, otherwise immutable
789	allpLock   mutex // Protects P-less reads of allp and all writes
790	gomaxprocs int32
791	ncpu       int32
792	forcegc    forcegcstate
793	sched      schedt
794	newprocs   int32
795
796	// Information about what cpu features are available.
797	// Set on startup in asm_{x86,amd64}.s.
798	// Packages outside the runtime should not use these
799	// as they are not an external api.
800	cpuid_ecx   uint32
801	support_aes bool
802
803	// cpuid_edx         uint32
804	// cpuid_ebx7        uint32
805	// lfenceBeforeRdtsc bool
806	// support_avx       bool
807	// support_avx2      bool
808	// support_bmi1      bool
809	// support_bmi2      bool
810
811//	goarm                uint8 // set by cmd/link on arm systems
812//	framepointer_enabled bool  // set by cmd/link
813)
814
815// Set by the linker so the runtime can determine the buildmode.
816var (
817	islibrary bool // -buildmode=c-shared
818	isarchive bool // -buildmode=c-archive
819)
820
821// Types that are only used by gccgo.
822
823// g_ucontext_t is a Go version of the C ucontext_t type, used by getcontext.
824// _sizeof_ucontext_t is defined by mkrsysinfo.sh from <ucontext.h>.
825// On some systems getcontext and friends require a value that is
826// aligned to a 16-byte boundary.  We implement this by increasing the
827// required size and picking an appropriate offset when we use the
828// array.
829type g_ucontext_t [(_sizeof_ucontext_t + 15) / unsafe.Sizeof(uintptr(0))]uintptr
830
831// sigset is the Go version of the C type sigset_t.
832// _sigset_t is defined by the Makefile from <signal.h>.
833type sigset _sigset_t
834
835// getMemstats returns a pointer to the internal memstats variable,
836// for C code.
837//go:linkname getMemstats runtime.getMemstats
838func getMemstats() *mstats {
839	return &memstats
840}
841