1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4 
5 #include "config.h"
6 
7 #include "go-assert.h"
8 #include <complex.h>
9 #include <signal.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <sys/types.h>
14 #include <sys/stat.h>
15 #include <fcntl.h>
16 #include <unistd.h>
17 #include <pthread.h>
18 #include <semaphore.h>
19 #include <ucontext.h>
20 
21 #ifdef HAVE_SYS_MMAN_H
22 #include <sys/mman.h>
23 #endif
24 
25 #include "interface.h"
26 #include "go-alloc.h"
27 
28 #define _STRINGIFY2_(x) #x
29 #define _STRINGIFY_(x) _STRINGIFY2_(x)
30 #define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
31 
32 /* This file supports C files copied from the 6g runtime library.
33    This is a version of the 6g runtime.h rewritten for gccgo's version
34    of the code.  */
35 
36 typedef signed int   int8    __attribute__ ((mode (QI)));
37 typedef unsigned int uint8   __attribute__ ((mode (QI)));
38 typedef signed int   int16   __attribute__ ((mode (HI)));
39 typedef unsigned int uint16  __attribute__ ((mode (HI)));
40 typedef signed int   int32   __attribute__ ((mode (SI)));
41 typedef unsigned int uint32  __attribute__ ((mode (SI)));
42 typedef signed int   int64   __attribute__ ((mode (DI)));
43 typedef unsigned int uint64  __attribute__ ((mode (DI)));
44 typedef float        float32 __attribute__ ((mode (SF)));
45 typedef double       float64 __attribute__ ((mode (DF)));
46 typedef signed int   intptr __attribute__ ((mode (pointer)));
47 typedef unsigned int uintptr __attribute__ ((mode (pointer)));
48 
49 typedef intptr		intgo; // Go's int
50 typedef uintptr		uintgo; // Go's uint
51 
52 typedef uintptr		uintreg;
53 
54 /* Defined types.  */
55 
56 typedef	uint8			bool;
57 typedef	uint8			byte;
58 typedef	struct	Func		Func;
59 typedef	struct	G		G;
60 typedef	struct	Lock		Lock;
61 typedef	struct	M		M;
62 typedef	struct	P		P;
63 typedef	struct	Note		Note;
64 typedef	struct	String		String;
65 typedef	struct	FuncVal		FuncVal;
66 typedef	struct	SigTab		SigTab;
67 typedef	struct	MCache		MCache;
68 typedef struct	FixAlloc	FixAlloc;
69 typedef	struct	Hchan		Hchan;
70 typedef	struct	Timers		Timers;
71 typedef	struct	Timer		Timer;
72 typedef	struct	GCStats		GCStats;
73 typedef	struct	LFNode		LFNode;
74 typedef	struct	ParFor		ParFor;
75 typedef	struct	ParForThread	ParForThread;
76 typedef	struct	CgoMal		CgoMal;
77 typedef	struct	PollDesc	PollDesc;
78 typedef	struct	DebugVars	DebugVars;
79 
80 typedef	struct	__go_open_array		Slice;
81 typedef struct	__go_interface		Iface;
82 typedef	struct	__go_empty_interface	Eface;
83 typedef	struct	__go_type_descriptor	Type;
84 typedef	struct	__go_defer_stack	Defer;
85 typedef	struct	__go_panic_stack	Panic;
86 
87 typedef struct	__go_ptr_type		PtrType;
88 typedef struct	__go_func_type		FuncType;
89 typedef struct	__go_interface_type	InterfaceType;
90 typedef struct	__go_map_type		MapType;
91 typedef struct	__go_channel_type	ChanType;
92 
93 typedef struct  Traceback	Traceback;
94 
95 typedef struct	Location	Location;
96 
97 /*
98  * Per-CPU declaration.
99  */
100 extern M*	runtime_m(void);
101 extern G*	runtime_g(void);
102 
103 extern M	runtime_m0;
104 extern G	runtime_g0;
105 
106 /*
107  * defined constants
108  */
109 enum
110 {
111 	// G status
112 	//
113 	// If you add to this list, add to the list
114 	// of "okay during garbage collection" status
115 	// in mgc0.c too.
116 	Gidle,
117 	Grunnable,
118 	Grunning,
119 	Gsyscall,
120 	Gwaiting,
121 	Gmoribund_unused,  // currently unused, but hardcoded in gdb scripts
122 	Gdead,
123 };
124 enum
125 {
126 	// P status
127 	Pidle,
128 	Prunning,
129 	Psyscall,
130 	Pgcstop,
131 	Pdead,
132 };
133 enum
134 {
135 	true	= 1,
136 	false	= 0,
137 };
138 enum
139 {
140 	PtrSize = sizeof(void*),
141 };
142 enum
143 {
144 	// Per-M stack segment cache size.
145 	StackCacheSize = 32,
146 	// Global <-> per-M stack segment cache transfer batch size.
147 	StackCacheBatch = 16,
148 };
149 /*
150  * structures
151  */
152 struct	Lock
153 {
154 	// Futex-based impl treats it as uint32 key,
155 	// while sema-based impl as M* waitm.
156 	// Used to be a union, but unions break precise GC.
157 	uintptr	key __attribute__((aligned(4)));
158 };
159 struct	Note
160 {
161 	// Futex-based impl treats it as uint32 key,
162 	// while sema-based impl as M* waitm.
163 	// Used to be a union, but unions break precise GC.
164 	uintptr	key __attribute__((aligned(4)));
165 };
166 struct String
167 {
168 	const byte*	str;
169 	intgo		len;
170 };
171 struct FuncVal
172 {
173 	void	(*fn)(void);
174 	// variable-size, fn-specific data here
175 };
176 struct	GCStats
177 {
178 	// the struct must consist of only uint64's,
179 	// because it is casted to uint64[].
180 	uint64	nhandoff;
181 	uint64	nhandoffcnt;
182 	uint64	nprocyield;
183 	uint64	nosyield;
184 	uint64	nsleep;
185 };
186 
187 // A location in the program, used for backtraces.
188 struct	Location
189 {
190 	uintptr	pc;
191 	String	filename;
192 	String	function;
193 	intgo	lineno;
194 };
195 
196 struct	G
197 {
198 	Defer*	defer;
199 	Panic*	panic;
200 	void*	exception;	// current exception being thrown
201 	bool	is_foreign;	// whether current exception from other language
202 	void	*gcstack;	// if status==Gsyscall, gcstack = stackbase to use during gc
203 	size_t	gcstack_size;
204 	void*	gcnext_segment;
205 	void*	gcnext_sp;
206 	void*	gcinitial_sp;
207 	ucontext_t gcregs;
208 	byte*	entry;		// initial function
209 	void*	param;		// passed parameter on wakeup
210 	bool	fromgogo;	// reached from gogo
211 	int16	status;
212 	uint32	selgen;		// valid sudog pointer
213 	int64	goid;
214 	int64	waitsince;	// approx time when the G become blocked
215 	const char*	waitreason;	// if status==Gwaiting
216 	G*	schedlink;
217 	bool	ispanic;
218 	bool	issystem;	// do not output in stack dump
219 	bool	isbackground;	// ignore in deadlock detector
220 	bool	paniconfault;	// panic (instead of crash) on unexpected fault address
221 	M*	m;		// for debuggers, but offset not hard-coded
222 	M*	lockedm;
223 	int32	sig;
224 	int32	writenbuf;
225 	byte*	writebuf;
226 	uintptr	sigcode0;
227 	uintptr	sigcode1;
228 	// uintptr	sigpc;
229 	uintptr	gopc;	// pc of go statement that created this goroutine
230 
231 	int32	ncgo;
232 	CgoMal*	cgomal;
233 
234 	Traceback* traceback;
235 
236 	ucontext_t	context;
237 	void*		stack_context[10];
238 };
239 
240 struct	M
241 {
242 	G*	g0;		// goroutine with scheduling stack
243 	G*	gsignal;	// signal-handling G
244 	byte*	gsignalstack;
245 	size_t	gsignalstacksize;
246 	void	(*mstartfn)(void);
247 	G*	curg;		// current running goroutine
248 	G*	caughtsig;	// goroutine running during fatal signal
249 	P*	p;		// attached P for executing Go code (nil if not executing Go code)
250 	P*	nextp;
251 	int32	id;
252 	int32	mallocing;
253 	int32	throwing;
254 	int32	gcing;
255 	int32	locks;
256 	int32	softfloat;
257 	int32	dying;
258 	int32	profilehz;
259 	int32	helpgc;
260 	bool	spinning;	// M is out of work and is actively looking for work
261 	bool	blocked;	// M is blocked on a Note
262 	uint32	fastrand;
263 	uint64	ncgocall;	// number of cgo calls in total
264 	int32	ncgo;		// number of cgo calls currently in progress
265 	CgoMal*	cgomal;
266 	Note	park;
267 	M*	alllink;	// on allm
268 	M*	schedlink;
269 	MCache	*mcache;
270 	G*	lockedg;
271 	Location createstack[32];	// Stack that created this thread.
272 	uint32	locked;	// tracking for LockOSThread
273 	M*	nextwaitm;	// next M waiting for lock
274 	uintptr	waitsema;	// semaphore for parking on locks
275 	uint32	waitsemacount;
276 	uint32	waitsemalock;
277 	GCStats	gcstats;
278 	bool	needextram;
279 	bool	dropextram;	// for gccgo: drop after call is done.
280 	uint8	traceback;
281 	bool	(*waitunlockf)(G*, void*);
282 	void*	waitlock;
283 	uintptr	end[];
284 };
285 
286 struct P
287 {
288 	Lock;
289 
290 	int32	id;
291 	uint32	status;		// one of Pidle/Prunning/...
292 	P*	link;
293 	uint32	schedtick;	// incremented on every scheduler call
294 	uint32	syscalltick;	// incremented on every system call
295 	M*	m;		// back-link to associated M (nil if idle)
296 	MCache*	mcache;
297 	Defer*	deferpool;	// pool of available Defer structs (see panic.c)
298 
299 	// Cache of goroutine ids, amortizes accesses to runtime_sched.goidgen.
300 	uint64	goidcache;
301 	uint64	goidcacheend;
302 
303 	// Queue of runnable goroutines.
304 	uint32	runqhead;
305 	uint32	runqtail;
306 	G*	runq[256];
307 
308 	// Available G's (status == Gdead)
309 	G*	gfree;
310 	int32	gfreecnt;
311 
312 	byte	pad[64];
313 };
314 
315 // The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
316 // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
317 // External locks are not recursive; a second lock is silently ignored.
318 // The upper bits of m->lockedcount record the nesting depth of calls to lockOSThread
319 // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
320 // Internal locks can be recursive. For instance, a lock for cgo can occur while the main
321 // goroutine is holding the lock during the initialization phase.
322 enum
323 {
324 	LockExternal = 1,
325 	LockInternal = 2,
326 };
327 
328 struct	SigTab
329 {
330 	int32	sig;
331 	int32	flags;
332 	void*   fwdsig;
333 };
334 enum
335 {
336 	SigNotify = 1<<0,	// let signal.Notify have signal, even if from kernel
337 	SigKill = 1<<1,		// if signal.Notify doesn't take it, exit quietly
338 	SigThrow = 1<<2,	// if signal.Notify doesn't take it, exit loudly
339 	SigPanic = 1<<3,	// if the signal is from the kernel, panic
340 	SigDefault = 1<<4,	// if the signal isn't explicitly requested, don't monitor it
341 	SigHandling = 1<<5,	// our signal handler is registered
342 	SigGoExit = 1<<6,	// cause all runtime procs to exit (only used on Plan 9).
343 };
344 
345 // Layout of in-memory per-function information prepared by linker
346 // See http://golang.org/s/go12symtab.
347 // Keep in sync with linker and with ../../libmach/sym.c
348 // and with package debug/gosym.
349 struct	Func
350 {
351 	String	name;
352 	uintptr	entry;	// entry pc
353 };
354 
355 #ifdef GOOS_nacl
356 enum {
357    NaCl = 1,
358 };
359 #else
360 enum {
361    NaCl = 0,
362 };
363 #endif
364 
365 #ifdef GOOS_windows
366 enum {
367    Windows = 1
368 };
369 #else
370 enum {
371    Windows = 0
372 };
373 #endif
374 #ifdef GOOS_solaris
375 enum {
376    Solaris = 1
377 };
378 #else
379 enum {
380    Solaris = 0
381 };
382 #endif
383 
384 struct	Timers
385 {
386 	Lock;
387 	G	*timerproc;
388 	bool		sleeping;
389 	bool		rescheduling;
390 	Note	waitnote;
391 	Timer	**t;
392 	int32	len;
393 	int32	cap;
394 };
395 
396 // Package time knows the layout of this structure.
397 // If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
398 // For GOOS=nacl, package syscall knows the layout of this structure.
399 // If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer.
400 struct	Timer
401 {
402 	intgo	i;	// heap index
403 
404 	// Timer wakes up at when, and then at when+period, ... (period > 0 only)
405 	// each time calling f(now, arg) in the timer goroutine, so f must be
406 	// a well-behaved function and not block.
407 	int64	when;
408 	int64	period;
409 	FuncVal	*fv;
410 	Eface	arg;
411 	uintptr	seq;
412 };
413 
414 // Lock-free stack node.
415 struct LFNode
416 {
417 	LFNode	*next;
418 	uintptr	pushcnt;
419 };
420 
421 // Parallel for descriptor.
422 struct ParFor
423 {
424 	const FuncVal *body;		// executed for each element
425 	uint32 done;			// number of idle threads
426 	uint32 nthr;			// total number of threads
427 	uint32 nthrmax;			// maximum number of threads
428 	uint32 thrseq;			// thread id sequencer
429 	uint32 cnt;			// iteration space [0, cnt)
430 	bool wait;			// if true, wait while all threads finish processing,
431 					// otherwise parfor may return while other threads are still working
432 	ParForThread *thr;		// array of thread descriptors
433 	// stats
434 	uint64 nsteal __attribute__((aligned(8))); // force alignment for m68k
435 	uint64 nstealcnt;
436 	uint64 nprocyield;
437 	uint64 nosyield;
438 	uint64 nsleep;
439 };
440 
441 // Track memory allocated by code not written in Go during a cgo call,
442 // so that the garbage collector can see them.
443 struct CgoMal
444 {
445 	CgoMal	*next;
446 	void	*alloc;
447 };
448 
449 // Holds variables parsed from GODEBUG env var.
450 struct DebugVars
451 {
452 	int32	allocfreetrace;
453 	int32   cgocheck;
454 	int32	efence;
455 	int32   gccheckmark;
456 	int32   gcpacertrace;
457 	int32   gcshrinkstackoff;
458 	int32   gcstackbarrieroff;
459 	int32   gcstackbarrierall;
460 	int32   gcstoptheworld;
461 	int32	gctrace;
462 	int32	gcdead;
463 	int32   invalidptr;
464 	int32   sbrk;
465 	int32   scavenge;
466 	int32	scheddetail;
467 	int32	schedtrace;
468 	int32   wbshadow;
469 };
470 
471 extern bool runtime_precisestack;
472 extern bool runtime_copystack;
473 
474 /*
475  * defined macros
476  *    you need super-gopher-guru privilege
477  *    to add this list.
478  */
479 #define	nelem(x)	(sizeof(x)/sizeof((x)[0]))
480 #define	nil		((void*)0)
481 #define USED(v)		((void) v)
482 #define	ROUND(x, n)	(((x)+(n)-1)&~(uintptr)((n)-1)) /* all-caps to mark as macro: it evaluates n twice */
483 
484 byte*	runtime_startup_random_data;
485 uint32	runtime_startup_random_data_len;
486 void	runtime_get_random_data(byte**, int32*);
487 
488 enum {
489 	// hashinit wants this many random bytes
490 	HashRandomBytes = 32
491 };
492 void	runtime_hashinit(void);
493 
494 void	runtime_traceback(void);
495 void	runtime_tracebackothers(G*);
496 enum
497 {
498 	// The maximum number of frames we print for a traceback
499 	TracebackMaxFrames = 100,
500 };
501 
502 /*
503  * external data
504  */
505 extern	uintptr runtime_zerobase;
506 extern	G**	runtime_allg;
507 extern	uintptr runtime_allglen;
508 extern	G*	runtime_lastg;
509 extern	M*	runtime_allm;
510 extern	P**	runtime_allp;
511 extern	int32	runtime_gomaxprocs;
512 extern	uint32	runtime_needextram;
513 extern	uint32	runtime_panicking;
514 extern	int8*	runtime_goos;
515 extern	int32	runtime_ncpu;
516 extern 	void	(*runtime_sysargs)(int32, uint8**);
517 extern	uint32	runtime_Hchansize;
518 extern	DebugVars	runtime_debug;
519 extern	uintptr	runtime_maxstacksize;
520 
521 extern	bool	runtime_isstarted;
522 extern	bool	runtime_isarchive;
523 
524 /*
525  * common functions and data
526  */
527 #define runtime_strcmp(s1, s2) __builtin_strcmp((s1), (s2))
528 #define runtime_strncmp(s1, s2, n) __builtin_strncmp((s1), (s2), (n))
529 #define runtime_strstr(s1, s2) __builtin_strstr((s1), (s2))
530 intgo	runtime_findnull(const byte*);
531 intgo	runtime_findnullw(const uint16*);
532 void	runtime_dump(byte*, int32);
533 
534 void	runtime_gogo(G*);
535 struct __go_func_type;
536 void	runtime_args(int32, byte**);
537 void	runtime_osinit();
538 void	runtime_goargs(void);
539 void	runtime_goenvs(void);
540 void	runtime_goenvs_unix(void);
541 void	runtime_throw(const char*) __attribute__ ((noreturn));
542 void	runtime_panicstring(const char*) __attribute__ ((noreturn));
543 bool	runtime_canpanic(G*);
544 void	runtime_prints(const char*);
545 void	runtime_printf(const char*, ...);
546 int32	runtime_snprintf(byte*, int32, const char*, ...);
547 #define runtime_mcmp(a, b, s) __builtin_memcmp((a), (b), (s))
548 #define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s))
549 void*	runtime_mal(uintptr);
550 String	runtime_gostring(const byte*);
551 String	runtime_gostringnocopy(const byte*);
552 void	runtime_schedinit(void);
553 void	runtime_initsig(bool);
554 void	runtime_sigenable(uint32 sig);
555 void	runtime_sigdisable(uint32 sig);
556 void	runtime_sigignore(uint32 sig);
557 int32	runtime_gotraceback(bool *crash);
558 void	runtime_goroutineheader(G*);
559 void	runtime_printtrace(Location*, int32, bool);
560 #define runtime_open(p, f, m) open((p), (f), (m))
561 #define runtime_read(d, v, n) read((d), (v), (n))
562 #define runtime_write(d, v, n) write((d), (v), (n))
563 #define runtime_close(d) close(d)
564 void	runtime_ready(G*);
565 String	runtime_getenv(const char*);
566 int32	runtime_atoi(const byte*, intgo);
567 void*	runtime_mstart(void*);
568 G*	runtime_malg(int32, byte**, size_t*);
569 void	runtime_mpreinit(M*);
570 void	runtime_minit(void);
571 void	runtime_unminit(void);
572 void	runtime_needm(void);
573 void	runtime_dropm(void);
574 void	runtime_signalstack(byte*, int32);
575 MCache*	runtime_allocmcache(void);
576 void	runtime_freemcache(MCache*);
577 void	runtime_mallocinit(void);
578 void	runtime_mprofinit(void);
579 #define runtime_malloc(s) __go_alloc(s)
580 #define runtime_free(p) __go_free(p)
581 #define runtime_getcallersp(p) __builtin_frame_address(1)
582 int32	runtime_mcount(void);
583 int32	runtime_gcount(void);
584 void	runtime_mcall(void(*)(G*));
585 uint32	runtime_fastrand1(void);
586 int32	runtime_timediv(int64, int32, int32*);
587 int32	runtime_round2(int32 x); // round x up to a power of 2.
588 
589 // atomic operations
590 #define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
591 #define runtime_cas64(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
592 #define runtime_casp(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
593 // Don't confuse with XADD x86 instruction,
594 // this one is actually 'addx', that is, add-and-fetch.
595 #define runtime_xadd(p, v) __sync_add_and_fetch (p, v)
596 #define runtime_xadd64(p, v) __sync_add_and_fetch (p, v)
597 #define runtime_xchg(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
598 #define runtime_xchg64(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
599 #define runtime_xchgp(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
600 #define runtime_atomicload(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
601 #define runtime_atomicstore(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
602 #define runtime_atomicstore64(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
603 #define runtime_atomicload64(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
604 #define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
605 #define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
606 
607 void runtime_setmg(M*, G*);
608 void runtime_newextram(void);
609 #define runtime_exit(s) exit(s)
610 #define runtime_breakpoint() __builtin_trap()
611 void	runtime_gosched(void);
612 void	runtime_gosched0(G*);
613 void	runtime_schedtrace(bool);
614 void	runtime_park(bool(*)(G*, void*), void*, const char*);
615 void	runtime_parkunlock(Lock*, const char*);
616 void	runtime_tsleep(int64, const char*);
617 M*	runtime_newm(void);
618 void	runtime_goexit(void);
619 void	runtime_entersyscall(void) __asm__ (GOSYM_PREFIX "syscall.Entersyscall");
620 void	runtime_entersyscallblock(void);
621 void	runtime_exitsyscall(void) __asm__ (GOSYM_PREFIX "syscall.Exitsyscall");
622 G*	__go_go(void (*pfn)(void*), void*);
623 void	siginit(void);
624 bool	__go_sigsend(int32 sig);
625 int32	runtime_callers(int32, Location*, int32, bool keep_callers);
626 int64	runtime_nanotime(void);	// monotonic time
627 int64	runtime_unixnanotime(void); // real time, can skip
628 void	runtime_dopanic(int32) __attribute__ ((noreturn));
629 void	runtime_startpanic(void);
630 void	runtime_freezetheworld(void);
631 void	runtime_unwindstack(G*, byte*);
632 void	runtime_sigprof();
633 void	runtime_resetcpuprofiler(int32);
634 void	runtime_setcpuprofilerate(void(*)(uintptr*, int32), int32);
635 void	runtime_usleep(uint32);
636 int64	runtime_cputicks(void);
637 int64	runtime_tickspersecond(void);
638 void	runtime_blockevent(int64, int32);
639 extern int64 runtime_blockprofilerate;
640 void	runtime_addtimer(Timer*);
641 bool	runtime_deltimer(Timer*);
642 G*	runtime_netpoll(bool);
643 void	runtime_netpollinit(void);
644 int32	runtime_netpollopen(uintptr, PollDesc*);
645 int32   runtime_netpollclose(uintptr);
646 void	runtime_netpollready(G**, PollDesc*, int32);
647 uintptr	runtime_netpollfd(PollDesc*);
648 void	runtime_netpollarm(PollDesc*, int32);
649 void**	runtime_netpolluser(PollDesc*);
650 bool	runtime_netpollclosing(PollDesc*);
651 void	runtime_netpolllock(PollDesc*);
652 void	runtime_netpollunlock(PollDesc*);
653 void	runtime_crash(void);
654 void	runtime_parsedebugvars(void);
655 void	_rt0_go(void);
656 void*	runtime_funcdata(Func*, int32);
657 int32	runtime_setmaxthreads(int32);
658 G*	runtime_timejump(void);
659 void	runtime_iterate_finq(void (*callback)(FuncVal*, void*, const FuncType*, const PtrType*));
660 
661 void	runtime_stoptheworld(void);
662 void	runtime_starttheworld(void);
663 extern uint32 runtime_worldsema;
664 
665 /*
666  * mutual exclusion locks.  in the uncontended case,
667  * as fast as spin locks (just a few user-level instructions),
668  * but on the contention path they sleep in the kernel.
669  * a zeroed Lock is unlocked (no need to initialize each lock).
670  */
671 void	runtime_lock(Lock*);
672 void	runtime_unlock(Lock*);
673 
674 /*
675  * sleep and wakeup on one-time events.
676  * before any calls to notesleep or notewakeup,
677  * must call noteclear to initialize the Note.
678  * then, exactly one thread can call notesleep
679  * and exactly one thread can call notewakeup (once).
680  * once notewakeup has been called, the notesleep
681  * will return.  future notesleep will return immediately.
682  * subsequent noteclear must be called only after
683  * previous notesleep has returned, e.g. it's disallowed
684  * to call noteclear straight after notewakeup.
685  *
686  * notetsleep is like notesleep but wakes up after
687  * a given number of nanoseconds even if the event
688  * has not yet happened.  if a goroutine uses notetsleep to
689  * wake up early, it must wait to call noteclear until it
690  * can be sure that no other goroutine is calling
691  * notewakeup.
692  *
693  * notesleep/notetsleep are generally called on g0,
694  * notetsleepg is similar to notetsleep but is called on user g.
695  */
696 void	runtime_noteclear(Note*);
697 void	runtime_notesleep(Note*);
698 void	runtime_notewakeup(Note*);
699 bool	runtime_notetsleep(Note*, int64);  // false - timeout
700 bool	runtime_notetsleepg(Note*, int64);  // false - timeout
701 
702 /*
703  * low-level synchronization for implementing the above
704  */
705 uintptr	runtime_semacreate(void);
706 int32	runtime_semasleep(int64);
707 void	runtime_semawakeup(M*);
708 // or
709 void	runtime_futexsleep(uint32*, uint32, int64);
710 void	runtime_futexwakeup(uint32*, uint32);
711 
712 /*
713  * Lock-free stack.
714  * Initialize uint64 head to 0, compare with 0 to test for emptiness.
715  * The stack does not keep pointers to nodes,
716  * so they can be garbage collected if there are no other pointers to nodes.
717  */
718 void	runtime_lfstackpush(uint64 *head, LFNode *node)
719   __asm__ (GOSYM_PREFIX "runtime.lfstackpush");
720 LFNode*	runtime_lfstackpop(uint64 *head);
721 
722 /*
723  * Parallel for over [0, n).
724  * body() is executed for each iteration.
725  * nthr - total number of worker threads.
726  * if wait=true, threads return from parfor() when all work is done;
727  * otherwise, threads can return while other threads are still finishing processing.
728  */
729 ParFor*	runtime_parforalloc(uint32 nthrmax);
730 void	runtime_parforsetup(ParFor *desc, uint32 nthr, uint32 n, bool wait, const FuncVal *body);
731 void	runtime_parfordo(ParFor *desc);
732 void	runtime_parforiters(ParFor*, uintptr, uintptr*, uintptr*);
733 
734 /*
735  * low level C-called
736  */
737 #define runtime_mmap mmap
738 #define runtime_munmap munmap
739 #define runtime_madvise madvise
740 #define runtime_memclr(buf, size) __builtin_memset((buf), 0, (size))
741 #define runtime_getcallerpc(p) __builtin_return_address(0)
742 
743 #ifdef __rtems__
744 void __wrap_rtems_task_variable_add(void **);
745 #endif
746 
747 /*
748  * Names generated by gccgo.
749  */
750 #define runtime_printbool	__go_print_bool
751 #define runtime_printfloat	__go_print_double
752 #define runtime_printint	__go_print_int64
753 #define runtime_printiface	__go_print_interface
754 #define runtime_printeface	__go_print_empty_interface
755 #define runtime_printstring	__go_print_string
756 #define runtime_printpointer	__go_print_pointer
757 #define runtime_printuint	__go_print_uint64
758 #define runtime_printslice	__go_print_slice
759 #define runtime_printcomplex	__go_print_complex
760 
761 /*
762  * runtime go-called
763  */
764 void	runtime_printbool(_Bool);
765 void	runtime_printbyte(int8);
766 void	runtime_printfloat(double);
767 void	runtime_printint(int64);
768 void	runtime_printiface(Iface);
769 void	runtime_printeface(Eface);
770 void	runtime_printstring(String);
771 void	runtime_printpc(void*);
772 void	runtime_printpointer(void*);
773 void	runtime_printuint(uint64);
774 void	runtime_printhex(uint64);
775 void	runtime_printslice(Slice);
776 void	runtime_printcomplex(complex double);
777 void reflect_call(const struct __go_func_type *, FuncVal *, _Bool, _Bool,
778 		  void **, void **)
779   __asm__ (GOSYM_PREFIX "reflect.call");
780 #define runtime_panic __go_panic
781 
782 /*
783  * runtime c-called (but written in Go)
784  */
785 void	runtime_printany(Eface)
786      __asm__ (GOSYM_PREFIX "runtime.Printany");
787 void	runtime_newTypeAssertionError(const String*, const String*, const String*, const String*, Eface*)
788      __asm__ (GOSYM_PREFIX "runtime.NewTypeAssertionError");
789 void	runtime_newErrorCString(const char*, Eface*)
790      __asm__ (GOSYM_PREFIX "runtime.NewErrorCString");
791 
792 /*
793  * wrapped for go users
794  */
795 void	runtime_semacquire(uint32 volatile *, bool);
796 void	runtime_semrelease(uint32 volatile *);
797 int32	runtime_gomaxprocsfunc(int32 n);
798 void	runtime_procyield(uint32);
799 void	runtime_osyield(void);
800 void	runtime_lockOSThread(void);
801 void	runtime_unlockOSThread(void);
802 bool	runtime_lockedOSThread(void);
803 
804 bool	runtime_showframe(String, bool);
805 void	runtime_printcreatedby(G*);
806 
807 uintptr	runtime_memlimit(void);
808 
809 #define ISNAN(f) __builtin_isnan(f)
810 
811 enum
812 {
813 	UseSpanType = 1,
814 };
815 
816 #define runtime_setitimer setitimer
817 
818 void	runtime_check(void);
819 
820 // A list of global variables that the garbage collector must scan.
821 struct root_list {
822 	struct root_list *next;
823 	struct root {
824 		void *decl;
825 		size_t size;
826 	} roots[];
827 };
828 
829 void	__go_register_gc_roots(struct root_list*);
830 
831 // Size of stack space allocated using Go's allocator.
832 // This will be 0 when using split stacks, as in that case
833 // the stacks are allocated by the splitstack library.
834 extern uintptr runtime_stacks_sys;
835 
836 struct backtrace_state;
837 extern struct backtrace_state *__go_get_backtrace_state(void);
838 extern _Bool __go_file_line(uintptr, String*, String*, intgo *);
839 extern byte* runtime_progname();
840 extern void runtime_main(void*);
841 extern uint32 runtime_in_callers;
842 
843 int32 getproccount(void);
844 
845 #define PREFETCH(p) __builtin_prefetch(p)
846 
847 bool	runtime_gcwaiting(void);
848 void	runtime_badsignal(int);
849 Defer*	runtime_newdefer(void);
850 void	runtime_freedefer(Defer*);
851 
852 struct time_now_ret
853 {
854   int64_t sec;
855   int32_t nsec;
856 };
857 
858 struct time_now_ret now() __asm__ (GOSYM_PREFIX "time.now")
859   __attribute__ ((no_split_stack));
860 
861 extern void _cgo_wait_runtime_init_done (void);
862 extern void _cgo_notify_runtime_init_done (void);
863 extern _Bool runtime_iscgo;
864 extern _Bool runtime_cgoHasExtraM;
865 extern Hchan *runtime_main_init_done;
866 extern uintptr __go_end __attribute__ ((weak));
867