xref: /freebsd/sys/cddl/dev/kinst/trampoline.c (revision 4e8d558c)
1 /*
2  * SPDX-License-Identifier: CDDL 1.0
3  *
4  * Copyright 2022 Christos Margiolis <christos@FreeBSD.org>
5  * Copyright 2022 Mark Johnston <markj@FreeBSD.org>
6  */
7 
8 #include <sys/param.h>
9 #include <sys/bitset.h>
10 #include <sys/cred.h>
11 #include <sys/eventhandler.h>
12 #include <sys/kernel.h>
13 #include <sys/lock.h>
14 #include <sys/malloc.h>
15 #include <sys/proc.h>
16 #include <sys/queue.h>
17 #include <sys/sx.h>
18 
19 #include <vm/vm.h>
20 #include <vm/vm_param.h>
21 #include <vm/pmap.h>
22 #include <vm/vm_map.h>
23 #include <vm/vm_kern.h>
24 #include <vm/vm_object.h>
25 
26 #include <cddl/dev/dtrace/dtrace_cddl.h>
27 
28 #include "kinst.h"
29 #include "kinst_isa.h"
30 
31 #define KINST_TRAMP_FILL_PATTERN	((kinst_patchval_t []){KINST_PATCHVAL})
32 #define KINST_TRAMP_FILL_SIZE		sizeof(kinst_patchval_t)
33 
34 #define KINST_TRAMPS_PER_CHUNK		(KINST_TRAMPCHUNK_SIZE / KINST_TRAMP_SIZE)
35 
36 struct trampchunk {
37 	TAILQ_ENTRY(trampchunk) next;
38 	uint8_t *addr;
39 	/* 0 -> allocated, 1 -> free */
40 	BITSET_DEFINE(, KINST_TRAMPS_PER_CHUNK) free;
41 };
42 
43 static TAILQ_HEAD(, trampchunk)	kinst_trampchunks =
44     TAILQ_HEAD_INITIALIZER(kinst_trampchunks);
45 static struct sx		kinst_tramp_sx;
46 SX_SYSINIT(kinst_tramp_sx, &kinst_tramp_sx, "kinst tramp");
47 static eventhandler_tag		kinst_thread_ctor_handler;
48 static eventhandler_tag		kinst_thread_dtor_handler;
49 
50 /*
51  * Fill the trampolines with KINST_TRAMP_FILL_PATTERN so that the kernel will
52  * crash cleanly if things somehow go wrong.
53  */
54 static void
55 kinst_trampoline_fill(uint8_t *addr, int size)
56 {
57 	int i;
58 
59 	for (i = 0; i < size; i += KINST_TRAMP_FILL_SIZE) {
60 		memcpy(&addr[i], KINST_TRAMP_FILL_PATTERN,
61 		    KINST_TRAMP_FILL_SIZE);
62 	}
63 }
64 
65 static struct trampchunk *
66 kinst_trampchunk_alloc(void)
67 {
68 	struct trampchunk *chunk;
69 	vm_offset_t trampaddr;
70 	int error __diagused;
71 
72 	sx_assert(&kinst_tramp_sx, SX_XLOCKED);
73 
74 #ifdef __amd64__
75 	/*
76 	 * To simplify population of trampolines, we follow the amd64 kernel's
77 	 * code model and allocate them above KERNBASE, i.e., in the top 2GB of
78 	 * the kernel's virtual address space (not the case for other
79 	 * platforms).
80 	 */
81 	trampaddr = KERNBASE;
82 #else
83 	trampaddr = VM_MIN_KERNEL_ADDRESS;
84 #endif
85 	/*
86 	 * Allocate virtual memory for the trampoline chunk. The returned
87 	 * address is saved in "trampaddr". Trampolines must be executable so
88 	 * max_prot must include VM_PROT_EXECUTE.
89 	 */
90 	error = vm_map_find(kernel_map, NULL, 0, &trampaddr,
91 	    KINST_TRAMPCHUNK_SIZE, 0, VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
92 	    0);
93 	if (error != KERN_SUCCESS) {
94 		KINST_LOG("trampoline chunk allocation failed: %d", error);
95 		return (NULL);
96 	}
97 
98 	error = kmem_back(kernel_object, trampaddr, KINST_TRAMPCHUNK_SIZE,
99 	    M_WAITOK | M_EXEC);
100 	KASSERT(error == KERN_SUCCESS, ("kmem_back failed: %d", error));
101 
102 	kinst_trampoline_fill((uint8_t *)trampaddr, KINST_TRAMPCHUNK_SIZE);
103 
104 	/* Allocate a tracker for this chunk. */
105 	chunk = malloc(sizeof(*chunk), M_KINST, M_WAITOK);
106 	chunk->addr = (void *)trampaddr;
107 	BIT_FILL(KINST_TRAMPS_PER_CHUNK, &chunk->free);
108 
109 	TAILQ_INSERT_HEAD(&kinst_trampchunks, chunk, next);
110 
111 	return (chunk);
112 }
113 
114 static void
115 kinst_trampchunk_free(struct trampchunk *chunk)
116 {
117 	sx_assert(&kinst_tramp_sx, SX_XLOCKED);
118 
119 	TAILQ_REMOVE(&kinst_trampchunks, chunk, next);
120 	kmem_unback(kernel_object, (vm_offset_t)chunk->addr,
121 	    KINST_TRAMPCHUNK_SIZE);
122 	(void)vm_map_remove(kernel_map, (vm_offset_t)chunk->addr,
123 	    (vm_offset_t)(chunk->addr + KINST_TRAMPCHUNK_SIZE));
124 	free(chunk, M_KINST);
125 }
126 
127 static uint8_t *
128 kinst_trampoline_alloc_locked(int how)
129 {
130 	struct trampchunk *chunk;
131 	uint8_t *tramp;
132 	int off;
133 
134 	sx_assert(&kinst_tramp_sx, SX_XLOCKED);
135 
136 	TAILQ_FOREACH(chunk, &kinst_trampchunks, next) {
137 		/* All trampolines from this chunk are already allocated. */
138 		if ((off = BIT_FFS(KINST_TRAMPS_PER_CHUNK, &chunk->free)) == 0)
139 			continue;
140 		/* BIT_FFS() returns indices starting at 1 instead of 0. */
141 		off--;
142 		break;
143 	}
144 	if (chunk == NULL) {
145 		if ((how & M_NOWAIT) != 0)
146 			return (NULL);
147 
148 		/*
149 		 * We didn't find any free trampoline in the current list,
150 		 * allocate a new one.  If that fails the provider will no
151 		 * longer be reliable, so try to warn the user.
152 		 */
153 		if ((chunk = kinst_trampchunk_alloc()) == NULL) {
154 			static bool once = true;
155 
156 			if (once) {
157 				once = false;
158 				KINST_LOG(
159 				    "kinst: failed to allocate trampoline, "
160 				    "probes may not fire");
161 			}
162 			return (NULL);
163 		}
164 		off = 0;
165 	}
166 	BIT_CLR(KINST_TRAMPS_PER_CHUNK, off, &chunk->free);
167 	tramp = chunk->addr + off * KINST_TRAMP_SIZE;
168 	return (tramp);
169 }
170 
171 uint8_t *
172 kinst_trampoline_alloc(int how)
173 {
174 	uint8_t *tramp;
175 
176 	sx_xlock(&kinst_tramp_sx);
177 	tramp = kinst_trampoline_alloc_locked(how);
178 	sx_xunlock(&kinst_tramp_sx);
179 	return (tramp);
180 }
181 
182 static void
183 kinst_trampoline_dealloc_locked(uint8_t *tramp, bool freechunks)
184 {
185 	struct trampchunk *chunk;
186 	int off;
187 
188 	sx_assert(&kinst_tramp_sx, SX_XLOCKED);
189 
190 	if (tramp == NULL)
191 		return;
192 
193 	TAILQ_FOREACH(chunk, &kinst_trampchunks, next) {
194 		for (off = 0; off < KINST_TRAMPS_PER_CHUNK; off++) {
195 			if (chunk->addr + off * KINST_TRAMP_SIZE == tramp) {
196 				kinst_trampoline_fill(tramp, KINST_TRAMP_SIZE);
197 				BIT_SET(KINST_TRAMPS_PER_CHUNK, off,
198 				    &chunk->free);
199 				if (freechunks &&
200 				    BIT_ISFULLSET(KINST_TRAMPS_PER_CHUNK,
201 				    &chunk->free))
202 					kinst_trampchunk_free(chunk);
203 				return;
204 			}
205 		}
206 	}
207 	panic("%s: did not find trampoline chunk for %p", __func__, tramp);
208 }
209 
210 void
211 kinst_trampoline_dealloc(uint8_t *tramp)
212 {
213 	sx_xlock(&kinst_tramp_sx);
214 	kinst_trampoline_dealloc_locked(tramp, true);
215 	sx_xunlock(&kinst_tramp_sx);
216 }
217 
218 static void
219 kinst_thread_ctor(void *arg __unused, struct thread *td)
220 {
221 	td->t_kinst = kinst_trampoline_alloc(M_WAITOK);
222 }
223 
224 static void
225 kinst_thread_dtor(void *arg __unused, struct thread *td)
226 {
227 	void *tramp;
228 
229 	tramp = td->t_kinst;
230 	td->t_kinst = NULL;
231 
232 	/*
233 	 * This assumes that the thread_dtor event permits sleeping, which
234 	 * appears to be true for the time being.
235 	 */
236 	kinst_trampoline_dealloc(tramp);
237 }
238 
239 int
240 kinst_trampoline_init(void)
241 {
242 	struct proc *p;
243 	struct thread *td;
244 	void *tramp;
245 	int error;
246 
247 	kinst_thread_ctor_handler = EVENTHANDLER_REGISTER(thread_ctor,
248 	    kinst_thread_ctor, NULL, EVENTHANDLER_PRI_ANY);
249 	kinst_thread_dtor_handler = EVENTHANDLER_REGISTER(thread_dtor,
250 	    kinst_thread_dtor, NULL, EVENTHANDLER_PRI_ANY);
251 
252 	error = 0;
253 	tramp = NULL;
254 
255 	sx_slock(&allproc_lock);
256 	sx_xlock(&kinst_tramp_sx);
257 	FOREACH_PROC_IN_SYSTEM(p) {
258 retry:
259 		PROC_LOCK(p);
260 		FOREACH_THREAD_IN_PROC(p, td) {
261 			if (td->t_kinst != NULL)
262 				continue;
263 			if (tramp == NULL) {
264 				/*
265 				 * Try to allocate a trampoline without dropping
266 				 * the process lock.  If all chunks are fully
267 				 * utilized, we must release the lock and try
268 				 * again.
269 				 */
270 				tramp = kinst_trampoline_alloc_locked(M_NOWAIT);
271 				if (tramp == NULL) {
272 					PROC_UNLOCK(p);
273 					tramp = kinst_trampoline_alloc_locked(
274 					    M_WAITOK);
275 					if (tramp == NULL) {
276 						/*
277 						 * Let the unload handler clean
278 						 * up.
279 						 */
280 						error = ENOMEM;
281 						goto out;
282 					} else
283 						goto retry;
284 				}
285 			}
286 			td->t_kinst = tramp;
287 			tramp = NULL;
288 		}
289 		PROC_UNLOCK(p);
290 	}
291 out:
292 	sx_xunlock(&kinst_tramp_sx);
293 	sx_sunlock(&allproc_lock);
294 	return (error);
295 }
296 
297 int
298 kinst_trampoline_deinit(void)
299 {
300 	struct trampchunk *chunk, *tmp;
301 	struct proc *p;
302 	struct thread *td;
303 
304 	EVENTHANDLER_DEREGISTER(thread_ctor, kinst_thread_ctor_handler);
305 	EVENTHANDLER_DEREGISTER(thread_dtor, kinst_thread_dtor_handler);
306 
307 	sx_slock(&allproc_lock);
308 	sx_xlock(&kinst_tramp_sx);
309 	FOREACH_PROC_IN_SYSTEM(p) {
310 		PROC_LOCK(p);
311 		FOREACH_THREAD_IN_PROC(p, td) {
312 			kinst_trampoline_dealloc_locked(td->t_kinst, false);
313 			td->t_kinst = NULL;
314 		}
315 		PROC_UNLOCK(p);
316 	}
317 	sx_sunlock(&allproc_lock);
318 	TAILQ_FOREACH_SAFE(chunk, &kinst_trampchunks, next, tmp)
319 		kinst_trampchunk_free(chunk);
320 	sx_xunlock(&kinst_tramp_sx);
321 
322 	return (0);
323 }
324