1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"unsafe"
9)
10
11type callbacks struct {
12	lock mutex
13	ctxt [cb_max]*wincallbackcontext
14	n    int
15}
16
17func (c *wincallbackcontext) isCleanstack() bool {
18	return c.cleanstack
19}
20
21func (c *wincallbackcontext) setCleanstack(cleanstack bool) {
22	c.cleanstack = cleanstack
23}
24
25var (
26	cbs     callbacks
27	cbctxts **wincallbackcontext = &cbs.ctxt[0] // to simplify access to cbs.ctxt in sys_windows_*.s
28)
29
30func callbackasm()
31
32// callbackasmAddr returns address of runtime.callbackasm
33// function adjusted by i.
34// On x86 and amd64, runtime.callbackasm is a series of CALL instructions,
35// and we want callback to arrive at
36// correspondent call instruction instead of start of
37// runtime.callbackasm.
38// On ARM, runtime.callbackasm is a series of mov and branch instructions.
39// R12 is loaded with the callback index. Each entry is two instructions,
40// hence 8 bytes.
41func callbackasmAddr(i int) uintptr {
42	var entrySize int
43	switch GOARCH {
44	default:
45		panic("unsupported architecture")
46	case "386", "amd64":
47		entrySize = 5
48	case "arm":
49		// On ARM, each entry is a MOV instruction
50		// followed by a branch instruction
51		entrySize = 8
52	}
53	return funcPC(callbackasm) + uintptr(i*entrySize)
54}
55
56//go:linkname compileCallback syscall.compileCallback
57func compileCallback(fn eface, cleanstack bool) (code uintptr) {
58	if fn._type == nil || (fn._type.kind&kindMask) != kindFunc {
59		panic("compileCallback: expected function with one uintptr-sized result")
60	}
61	ft := (*functype)(unsafe.Pointer(fn._type))
62	if len(ft.out()) != 1 {
63		panic("compileCallback: expected function with one uintptr-sized result")
64	}
65	uintptrSize := unsafe.Sizeof(uintptr(0))
66	if ft.out()[0].size != uintptrSize {
67		panic("compileCallback: expected function with one uintptr-sized result")
68	}
69	argsize := uintptr(0)
70	for _, t := range ft.in() {
71		if t.size > uintptrSize {
72			panic("compileCallback: argument size is larger than uintptr")
73		}
74		argsize += uintptrSize
75	}
76
77	lock(&cbs.lock) // We don't unlock this in a defer because this is used from the system stack.
78
79	n := cbs.n
80	for i := 0; i < n; i++ {
81		if cbs.ctxt[i].gobody == fn.data && cbs.ctxt[i].isCleanstack() == cleanstack {
82			r := callbackasmAddr(i)
83			unlock(&cbs.lock)
84			return r
85		}
86	}
87	if n >= cb_max {
88		unlock(&cbs.lock)
89		throw("too many callback functions")
90	}
91
92	c := new(wincallbackcontext)
93	c.gobody = fn.data
94	c.argsize = argsize
95	c.setCleanstack(cleanstack)
96	if cleanstack && argsize != 0 {
97		c.restorestack = argsize
98	} else {
99		c.restorestack = 0
100	}
101	cbs.ctxt[n] = c
102	cbs.n++
103
104	r := callbackasmAddr(n)
105	unlock(&cbs.lock)
106	return r
107}
108
109const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
110
111// When available, this function will use LoadLibraryEx with the filename
112// parameter and the important SEARCH_SYSTEM32 argument. But on systems that
113// do not have that option, absoluteFilepath should contain a fallback
114// to the full path inside of system32 for use with vanilla LoadLibrary.
115//go:linkname syscall_loadsystemlibrary syscall.loadsystemlibrary
116//go:nosplit
117func syscall_loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (handle, err uintptr) {
118	lockOSThread()
119	c := &getg().m.syscall
120
121	if useLoadLibraryEx {
122		c.fn = getLoadLibraryEx()
123		c.n = 3
124		args := struct {
125			lpFileName *uint16
126			hFile      uintptr // always 0
127			flags      uint32
128		}{filename, 0, _LOAD_LIBRARY_SEARCH_SYSTEM32}
129		c.args = uintptr(noescape(unsafe.Pointer(&args)))
130	} else {
131		c.fn = getLoadLibrary()
132		c.n = 1
133		c.args = uintptr(noescape(unsafe.Pointer(&absoluteFilepath)))
134	}
135
136	cgocall(asmstdcallAddr, unsafe.Pointer(c))
137	handle = c.r1
138	if handle == 0 {
139		err = c.err
140	}
141	unlockOSThread() // not defer'd after the lockOSThread above to save stack frame size.
142	return
143}
144
145//go:linkname syscall_loadlibrary syscall.loadlibrary
146//go:nosplit
147func syscall_loadlibrary(filename *uint16) (handle, err uintptr) {
148	lockOSThread()
149	defer unlockOSThread()
150	c := &getg().m.syscall
151	c.fn = getLoadLibrary()
152	c.n = 1
153	c.args = uintptr(noescape(unsafe.Pointer(&filename)))
154	cgocall(asmstdcallAddr, unsafe.Pointer(c))
155	handle = c.r1
156	if handle == 0 {
157		err = c.err
158	}
159	return
160}
161
162//go:linkname syscall_getprocaddress syscall.getprocaddress
163//go:nosplit
164func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uintptr) {
165	lockOSThread()
166	defer unlockOSThread()
167	c := &getg().m.syscall
168	c.fn = getGetProcAddress()
169	c.n = 2
170	c.args = uintptr(noescape(unsafe.Pointer(&handle)))
171	cgocall(asmstdcallAddr, unsafe.Pointer(c))
172	outhandle = c.r1
173	if outhandle == 0 {
174		err = c.err
175	}
176	return
177}
178
179//go:linkname syscall_Syscall syscall.Syscall
180//go:nosplit
181func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
182	lockOSThread()
183	defer unlockOSThread()
184	c := &getg().m.syscall
185	c.fn = fn
186	c.n = nargs
187	c.args = uintptr(noescape(unsafe.Pointer(&a1)))
188	cgocall(asmstdcallAddr, unsafe.Pointer(c))
189	return c.r1, c.r2, c.err
190}
191
192//go:linkname syscall_Syscall6 syscall.Syscall6
193//go:nosplit
194func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
195	lockOSThread()
196	defer unlockOSThread()
197	c := &getg().m.syscall
198	c.fn = fn
199	c.n = nargs
200	c.args = uintptr(noescape(unsafe.Pointer(&a1)))
201	cgocall(asmstdcallAddr, unsafe.Pointer(c))
202	return c.r1, c.r2, c.err
203}
204
205//go:linkname syscall_Syscall9 syscall.Syscall9
206//go:nosplit
207func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
208	lockOSThread()
209	defer unlockOSThread()
210	c := &getg().m.syscall
211	c.fn = fn
212	c.n = nargs
213	c.args = uintptr(noescape(unsafe.Pointer(&a1)))
214	cgocall(asmstdcallAddr, unsafe.Pointer(c))
215	return c.r1, c.r2, c.err
216}
217
218//go:linkname syscall_Syscall12 syscall.Syscall12
219//go:nosplit
220func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) {
221	lockOSThread()
222	defer unlockOSThread()
223	c := &getg().m.syscall
224	c.fn = fn
225	c.n = nargs
226	c.args = uintptr(noescape(unsafe.Pointer(&a1)))
227	cgocall(asmstdcallAddr, unsafe.Pointer(c))
228	return c.r1, c.r2, c.err
229}
230
231//go:linkname syscall_Syscall15 syscall.Syscall15
232//go:nosplit
233func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) {
234	lockOSThread()
235	defer unlockOSThread()
236	c := &getg().m.syscall
237	c.fn = fn
238	c.n = nargs
239	c.args = uintptr(noescape(unsafe.Pointer(&a1)))
240	cgocall(asmstdcallAddr, unsafe.Pointer(c))
241	return c.r1, c.r2, c.err
242}
243
244//go:linkname syscall_Syscall18 syscall.Syscall18
245//go:nosplit
246func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) {
247	lockOSThread()
248	defer unlockOSThread()
249	c := &getg().m.syscall
250	c.fn = fn
251	c.n = nargs
252	c.args = uintptr(noescape(unsafe.Pointer(&a1)))
253	cgocall(asmstdcallAddr, unsafe.Pointer(c))
254	return c.r1, c.r2, c.err
255}
256