1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"runtime/internal/sys"
9	"unsafe"
10)
11
12// Should be a built-in for unsafe.Pointer?
13//go:nosplit
14func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
15	return unsafe.Pointer(uintptr(p) + x)
16}
17
18// getg returns the pointer to the current g.
19// The compiler rewrites calls to this function into instructions
20// that fetch the g directly (from TLS or from the dedicated register).
21func getg() *g
22
23// mcall switches from the g to the g0 stack and invokes fn(g),
24// where g is the goroutine that made the call.
25// mcall saves g's current PC/SP in g->sched so that it can be restored later.
26// It is up to fn to arrange for that later execution, typically by recording
27// g in a data structure, causing something to call ready(g) later.
28// mcall returns to the original goroutine g later, when g has been rescheduled.
29// fn must not return at all; typically it ends by calling schedule, to let the m
30// run other goroutines.
31//
32// mcall can only be called from g stacks (not g0, not gsignal).
33//
34// This must NOT be go:noescape: if fn is a stack-allocated closure,
35// fn puts g on a run queue, and g executes before fn returns, the
36// closure will be invalidated while it is still executing.
37func mcall(fn func(*g))
38
39// systemstack runs fn on a system stack.
40//
41// It is common to use a func literal as the argument, in order
42// to share inputs and outputs with the code around the call
43// to system stack:
44//
45//	... set up y ...
46//	systemstack(func() {
47//		x = bigcall(y)
48//	})
49//	... use x ...
50//
51// For the gc toolchain this permits running a function that requires
52// additional stack space in a context where the stack can not be
53// split. We don't really need additional stack space in gccgo, since
54// stack splitting is handled separately. But to keep things looking
55// the same, we do switch to the g0 stack here if necessary.
56func systemstack(fn func()) {
57	gp := getg()
58	mp := gp.m
59	if gp == mp.g0 || gp == mp.gsignal {
60		fn()
61	} else if gp == mp.curg {
62		fn1 := func(origg *g) {
63			fn()
64			gogo(origg)
65		}
66		mcall(*(*func(*g))(noescape(unsafe.Pointer(&fn1))))
67	} else {
68		badsystemstack()
69	}
70}
71
72func badsystemstack() {
73	throw("systemstack called from unexpected goroutine")
74}
75
76// memclrNoHeapPointers clears n bytes starting at ptr.
77//
78// Usually you should use typedmemclr. memclrNoHeapPointers should be
79// used only when the caller knows that *ptr contains no heap pointers
80// because either:
81//
82// 1. *ptr is initialized memory and its type is pointer-free.
83//
84// 2. *ptr is uninitialized memory (e.g., memory that's being reused
85//    for a new allocation) and hence contains only "junk".
86//
87// in memclr_*.s
88//go:noescape
89func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
90
91//go:linkname reflect_memclrNoHeapPointers reflect.memclrNoHeapPointers
92func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
93	memclrNoHeapPointers(ptr, n)
94}
95
96// memmove copies n bytes from "from" to "to".
97//go:noescape
98func memmove(to, from unsafe.Pointer, n uintptr)
99
100//go:linkname reflect_memmove reflect.memmove
101func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
102	memmove(to, from, n)
103}
104
105//go:noescape
106//extern __builtin_memcmp
107func memcmp(a, b unsafe.Pointer, size uintptr) int32
108
109// exported value for testing
110var hashLoad = float32(loadFactorNum) / float32(loadFactorDen)
111
112//go:nosplit
113func fastrand() uint32 {
114	mp := getg().m
115	// Implement xorshift64+: 2 32-bit xorshift sequences added together.
116	// Shift triplet [17,7,16] was calculated as indicated in Marsaglia's
117	// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
118	// This generator passes the SmallCrush suite, part of TestU01 framework:
119	// http://simul.iro.umontreal.ca/testu01/tu01.html
120	s1, s0 := mp.fastrand[0], mp.fastrand[1]
121	s1 ^= s1 << 17
122	s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
123	mp.fastrand[0], mp.fastrand[1] = s0, s1
124	return s0 + s1
125}
126
127//go:nosplit
128func fastrandn(n uint32) uint32 {
129	// This is similar to fastrand() % n, but faster.
130	// See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
131	return uint32(uint64(fastrand()) * uint64(n) >> 32)
132}
133
134//go:linkname sync_fastrand sync.fastrand
135func sync_fastrand() uint32 { return fastrand() }
136
137// in asm_*.s
138//go:noescape
139func memequal(a, b unsafe.Pointer, size uintptr) bool
140
141// noescape hides a pointer from escape analysis.  noescape is
142// the identity function but escape analysis doesn't think the
143// output depends on the input.  noescape is inlined and currently
144// compiles down to zero instructions.
145// USE CAREFULLY!
146//go:nosplit
147func noescape(p unsafe.Pointer) unsafe.Pointer {
148	x := uintptr(p)
149	return unsafe.Pointer(x ^ 0)
150}
151
152//go:noescape
153func jmpdefer(fv *funcval, argp uintptr)
154func exit1(code int32)
155func setg(gg *g)
156
157//extern __builtin_trap
158func breakpoint()
159
160func asminit() {}
161
162//go:linkname reflectcall reflect.call
163//go:noescape
164func reflectcall(fntype *functype, fn *funcval, isInterface, isMethod bool, params, results *unsafe.Pointer)
165
166func procyield(cycles uint32)
167
168type neverCallThisFunction struct{}
169
170// goexit is the return stub at the top of every goroutine call stack.
171// Each goroutine stack is constructed as if goexit called the
172// goroutine's entry point function, so that when the entry point
173// function returns, it will return to goexit, which will call goexit1
174// to perform the actual exit.
175//
176// This function must never be called directly. Call goexit1 instead.
177// gentraceback assumes that goexit terminates the stack. A direct
178// call on the stack will cause gentraceback to stop walking the stack
179// prematurely and if there is leftover state it may panic.
180func goexit(neverCallThisFunction)
181
182// publicationBarrier performs a store/store barrier (a "publication"
183// or "export" barrier). Some form of synchronization is required
184// between initializing an object and making that object accessible to
185// another processor. Without synchronization, the initialization
186// writes and the "publication" write may be reordered, allowing the
187// other processor to follow the pointer and observe an uninitialized
188// object. In general, higher-level synchronization should be used,
189// such as locking or an atomic pointer write. publicationBarrier is
190// for when those aren't an option, such as in the implementation of
191// the memory manager.
192//
193// There's no corresponding barrier for the read side because the read
194// side naturally has a data dependency order. All architectures that
195// Go supports or seems likely to ever support automatically enforce
196// data dependency ordering.
197func publicationBarrier()
198
199// getcallerpc returns the program counter (PC) of its caller's caller.
200// getcallersp returns the stack pointer (SP) of its caller's caller.
201// argp must be a pointer to the caller's first function argument.
202// The implementation may or may not use argp, depending on
203// the architecture. The implementation may be a compiler
204// intrinsic; there is not necessarily code implementing this
205// on every platform.
206//
207// For example:
208//
209//	func f(arg1, arg2, arg3 int) {
210//		pc := getcallerpc()
211//		sp := getcallersp(unsafe.Pointer(&arg1))
212//	}
213//
214// These two lines find the PC and SP immediately following
215// the call to f (where f will return).
216//
217// The call to getcallerpc and getcallersp must be done in the
218// frame being asked about. It would not be correct for f to pass &arg1
219// to another function g and let g call getcallerpc/getcallersp.
220// The call inside g might return information about g's caller or
221// information about f's caller or complete garbage.
222//
223// The result of getcallersp is correct at the time of the return,
224// but it may be invalidated by any subsequent call to a function
225// that might relocate the stack in order to grow or shrink it.
226// A general rule is that the result of getcallersp should be used
227// immediately and can only be passed to nosplit functions.
228
229//go:noescape
230func getcallerpc() uintptr
231
232//go:noescape
233func getcallersp(argp unsafe.Pointer) uintptr
234
235func asmcgocall(fn, arg unsafe.Pointer) int32 {
236	throw("asmcgocall")
237	return 0
238}
239
240// argp used in Defer structs when there is no argp.
241const _NoArgs = ^uintptr(0)
242
243//extern __builtin_prefetch
244func prefetch(addr unsafe.Pointer, rw int32, locality int32)
245
246func prefetcht0(addr uintptr) {
247	prefetch(unsafe.Pointer(addr), 0, 3)
248}
249
250func prefetcht1(addr uintptr) {
251	prefetch(unsafe.Pointer(addr), 0, 2)
252}
253
254func prefetcht2(addr uintptr) {
255	prefetch(unsafe.Pointer(addr), 0, 1)
256}
257
258func prefetchnta(addr uintptr) {
259	prefetch(unsafe.Pointer(addr), 0, 0)
260}
261
262// round n up to a multiple of a.  a must be a power of 2.
263func round(n, a uintptr) uintptr {
264	return (n + a - 1) &^ (a - 1)
265}
266
267// checkASM returns whether assembly runtime checks have passed.
268func checkASM() bool {
269	return true
270}
271
272func eqstring(x, y string) bool {
273	a := stringStructOf(&x)
274	b := stringStructOf(&y)
275	if a.len != b.len {
276		return false
277	}
278	if a.str == b.str {
279		return true
280	}
281	return memequal(a.str, b.str, uintptr(a.len))
282}
283
284// For gccgo this is in the C code.
285func osyield()
286
287// For gccgo this can be called directly.
288//extern syscall
289func syscall(trap uintptr, a1, a2, a3, a4, a5, a6 uintptr) uintptr
290
291// For gccgo, to communicate from the C code to the Go code.
292//go:linkname setIsCgo runtime.setIsCgo
293func setIsCgo() {
294	iscgo = true
295}
296
297// For gccgo, to communicate from the C code to the Go code.
298//go:linkname setCpuidECX runtime.setCpuidECX
299func setCpuidECX(v uint32) {
300	cpuid_ecx = v
301}
302
303// For gccgo, to communicate from the C code to the Go code.
304//go:linkname setSupportAES runtime.setSupportAES
305func setSupportAES(v bool) {
306	support_aes = v
307}
308
309// Here for gccgo.
310func errno() int
311
312// Temporary for gccgo until we port proc.go.
313func entersyscall(int32)
314func entersyscallblock(int32)
315
316// For gccgo to call from C code, so that the C code and the Go code
317// can share the memstats variable for now.
318//go:linkname getMstats runtime.getMstats
319func getMstats() *mstats {
320	return &memstats
321}
322
323// Get signal trampoline, written in C.
324func getSigtramp() uintptr
325
326// The sa_handler field is generally hidden in a union, so use C accessors.
327//go:noescape
328func getSigactionHandler(*_sigaction) uintptr
329
330//go:noescape
331func setSigactionHandler(*_sigaction, uintptr)
332
333// Retrieve fields from the siginfo_t and ucontext_t pointers passed
334// to a signal handler using C, as they are often hidden in a union.
335// Returns  and, if available, PC where signal occurred.
336func getSiginfo(*_siginfo_t, unsafe.Pointer) (sigaddr uintptr, sigpc uintptr)
337
338// Implemented in C for gccgo.
339func dumpregs(*_siginfo_t, unsafe.Pointer)
340
341// Temporary for gccgo until we port proc.go.
342//go:linkname getsched runtime.getsched
343func getsched() *schedt {
344	return &sched
345}
346
347// Temporary for gccgo until we port proc.go.
348//go:linkname getCgoHasExtraM runtime.getCgoHasExtraM
349func getCgoHasExtraM() *bool {
350	return &cgoHasExtraM
351}
352
353// Temporary for gccgo until we port proc.go.
354//go:linkname getAllP runtime.getAllP
355func getAllP() **p {
356	return &allp[0]
357}
358
359// Temporary for gccgo until we port proc.go.
360//go:linkname allocg runtime.allocg
361func allocg() *g {
362	return new(g)
363}
364
365// Temporary for gccgo until we port the garbage collector.
366//go:linkname getallglen runtime.getallglen
367func getallglen() uintptr {
368	return allglen
369}
370
371// Temporary for gccgo until we port the garbage collector.
372//go:linkname getallg runtime.getallg
373func getallg(i int) *g {
374	return allgs[i]
375}
376
377// Temporary for gccgo until we port the garbage collector.
378//go:linkname getallm runtime.getallm
379func getallm() *m {
380	return allm
381}
382
383// Throw and rethrow an exception.
384func throwException()
385func rethrowException()
386
387// Fetch the size and required alignment of the _Unwind_Exception type
388// used by the stack unwinder.
389func unwindExceptionSize() uintptr
390
391// Temporary for gccgo until C code no longer needs it.
392//go:nosplit
393//go:linkname getPanicking runtime.getPanicking
394func getPanicking() uint32 {
395	return panicking
396}
397
398// Called by C code to set the number of CPUs.
399//go:linkname setncpu runtime.setncpu
400func setncpu(n int32) {
401	ncpu = n
402}
403
404// Called by C code to set the page size.
405//go:linkname setpagesize runtime.setpagesize
406func setpagesize(s uintptr) {
407	if physPageSize == 0 {
408		physPageSize = s
409	}
410}
411
412// Called by C code during library initialization.
413//go:linkname runtime_m0 runtime.runtime_m0
414func runtime_m0() *m {
415	return &m0
416}
417
418// Temporary for gccgo until we port mgc.go.
419//go:linkname runtime_g0 runtime.runtime_g0
420func runtime_g0() *g {
421	return &g0
422}
423
424const uintptrMask = 1<<(8*sys.PtrSize) - 1
425
426type bitvector struct {
427	n        int32 // # of bits
428	bytedata *uint8
429}
430
431// bool2int returns 0 if x is false or 1 if x is true.
432func bool2int(x bool) int {
433	if x {
434		return 1
435	}
436	return 0
437}
438