xref: /linux/drivers/misc/lkdtm/usercopy.c (revision 607289a7)
1039a1c42SKees Cook // SPDX-License-Identifier: GPL-2.0
2039a1c42SKees Cook /*
3039a1c42SKees Cook  * This is for all the tests related to copy_to_user() and copy_from_user()
4039a1c42SKees Cook  * hardening.
5039a1c42SKees Cook  */
6039a1c42SKees Cook #include "lkdtm.h"
7039a1c42SKees Cook #include <linux/slab.h>
8fc34eec6SKees Cook #include <linux/highmem.h>
9039a1c42SKees Cook #include <linux/vmalloc.h>
10039a1c42SKees Cook #include <linux/sched/task_stack.h>
11039a1c42SKees Cook #include <linux/mman.h>
12039a1c42SKees Cook #include <linux/uaccess.h>
13039a1c42SKees Cook #include <asm/cacheflush.h>
14039a1c42SKees Cook 
15039a1c42SKees Cook /*
16039a1c42SKees Cook  * Many of the tests here end up using const sizes, but those would
17039a1c42SKees Cook  * normally be ignored by hardened usercopy, so force the compiler
18039a1c42SKees Cook  * into choosing the non-const path to make sure we trigger the
19039a1c42SKees Cook  * hardened usercopy checks by added "unconst" to all the const copies,
20039a1c42SKees Cook  * and making sure "cache_size" isn't optimized into a const.
21039a1c42SKees Cook  */
220181cfd9SParth Y Shah static volatile size_t unconst;
23039a1c42SKees Cook static volatile size_t cache_size = 1024;
24039a1c42SKees Cook static struct kmem_cache *whitelist_cache;
25039a1c42SKees Cook 
26039a1c42SKees Cook static const unsigned char test_text[] = "This is a test.\n";
27039a1c42SKees Cook 
28039a1c42SKees Cook /*
29039a1c42SKees Cook  * Instead of adding -Wno-return-local-addr, just pass the stack address
30039a1c42SKees Cook  * through a function to obfuscate it from the compiler.
31039a1c42SKees Cook  */
trick_compiler(unsigned char * stack)32039a1c42SKees Cook static noinline unsigned char *trick_compiler(unsigned char *stack)
33039a1c42SKees Cook {
34f387e86dSKees Cook 	return stack + unconst;
35039a1c42SKees Cook }
36039a1c42SKees Cook 
do_usercopy_stack_callee(int value)37039a1c42SKees Cook static noinline unsigned char *do_usercopy_stack_callee(int value)
38039a1c42SKees Cook {
39f387e86dSKees Cook 	unsigned char buf[128];
40039a1c42SKees Cook 	int i;
41039a1c42SKees Cook 
42039a1c42SKees Cook 	/* Exercise stack to avoid everything living in registers. */
43039a1c42SKees Cook 	for (i = 0; i < sizeof(buf); i++) {
44039a1c42SKees Cook 		buf[i] = value & 0xff;
45039a1c42SKees Cook 	}
46039a1c42SKees Cook 
47f387e86dSKees Cook 	/*
48f387e86dSKees Cook 	 * Put the target buffer in the middle of stack allocation
49f387e86dSKees Cook 	 * so that we don't step on future stack users regardless
50f387e86dSKees Cook 	 * of stack growth direction.
51f387e86dSKees Cook 	 */
52f387e86dSKees Cook 	return trick_compiler(&buf[(128/2)-32]);
53039a1c42SKees Cook }
54039a1c42SKees Cook 
do_usercopy_stack(bool to_user,bool bad_frame)55039a1c42SKees Cook static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
56039a1c42SKees Cook {
57039a1c42SKees Cook 	unsigned long user_addr;
58039a1c42SKees Cook 	unsigned char good_stack[32];
59039a1c42SKees Cook 	unsigned char *bad_stack;
60039a1c42SKees Cook 	int i;
61039a1c42SKees Cook 
62039a1c42SKees Cook 	/* Exercise stack to avoid everything living in registers. */
63039a1c42SKees Cook 	for (i = 0; i < sizeof(good_stack); i++)
64039a1c42SKees Cook 		good_stack[i] = test_text[i % sizeof(test_text)];
65039a1c42SKees Cook 
66039a1c42SKees Cook 	/* This is a pointer to outside our current stack frame. */
67039a1c42SKees Cook 	if (bad_frame) {
68039a1c42SKees Cook 		bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
69039a1c42SKees Cook 	} else {
70039a1c42SKees Cook 		/* Put start address just inside stack. */
71039a1c42SKees Cook 		bad_stack = task_stack_page(current) + THREAD_SIZE;
72039a1c42SKees Cook 		bad_stack -= sizeof(unsigned long);
73039a1c42SKees Cook 	}
74039a1c42SKees Cook 
75f387e86dSKees Cook #ifdef ARCH_HAS_CURRENT_STACK_POINTER
76f387e86dSKees Cook 	pr_info("stack     : %px\n", (void *)current_stack_pointer);
77f387e86dSKees Cook #endif
78f387e86dSKees Cook 	pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
79f387e86dSKees Cook 	pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
80f387e86dSKees Cook 
81039a1c42SKees Cook 	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
82039a1c42SKees Cook 			    PROT_READ | PROT_WRITE | PROT_EXEC,
83039a1c42SKees Cook 			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
84039a1c42SKees Cook 	if (user_addr >= TASK_SIZE) {
85039a1c42SKees Cook 		pr_warn("Failed to allocate user memory\n");
86039a1c42SKees Cook 		return;
87039a1c42SKees Cook 	}
88039a1c42SKees Cook 
89039a1c42SKees Cook 	if (to_user) {
90039a1c42SKees Cook 		pr_info("attempting good copy_to_user of local stack\n");
91039a1c42SKees Cook 		if (copy_to_user((void __user *)user_addr, good_stack,
92039a1c42SKees Cook 				 unconst + sizeof(good_stack))) {
93039a1c42SKees Cook 			pr_warn("copy_to_user failed unexpectedly?!\n");
94039a1c42SKees Cook 			goto free_user;
95039a1c42SKees Cook 		}
96039a1c42SKees Cook 
97039a1c42SKees Cook 		pr_info("attempting bad copy_to_user of distant stack\n");
98039a1c42SKees Cook 		if (copy_to_user((void __user *)user_addr, bad_stack,
99039a1c42SKees Cook 				 unconst + sizeof(good_stack))) {
100039a1c42SKees Cook 			pr_warn("copy_to_user failed, but lacked Oops\n");
101039a1c42SKees Cook 			goto free_user;
102039a1c42SKees Cook 		}
103039a1c42SKees Cook 	} else {
104039a1c42SKees Cook 		/*
105039a1c42SKees Cook 		 * There isn't a safe way to not be protected by usercopy
106039a1c42SKees Cook 		 * if we're going to write to another thread's stack.
107039a1c42SKees Cook 		 */
108039a1c42SKees Cook 		if (!bad_frame)
109039a1c42SKees Cook 			goto free_user;
110039a1c42SKees Cook 
111039a1c42SKees Cook 		pr_info("attempting good copy_from_user of local stack\n");
112039a1c42SKees Cook 		if (copy_from_user(good_stack, (void __user *)user_addr,
113039a1c42SKees Cook 				   unconst + sizeof(good_stack))) {
114039a1c42SKees Cook 			pr_warn("copy_from_user failed unexpectedly?!\n");
115039a1c42SKees Cook 			goto free_user;
116039a1c42SKees Cook 		}
117039a1c42SKees Cook 
118039a1c42SKees Cook 		pr_info("attempting bad copy_from_user of distant stack\n");
119039a1c42SKees Cook 		if (copy_from_user(bad_stack, (void __user *)user_addr,
120039a1c42SKees Cook 				   unconst + sizeof(good_stack))) {
121039a1c42SKees Cook 			pr_warn("copy_from_user failed, but lacked Oops\n");
122039a1c42SKees Cook 			goto free_user;
123039a1c42SKees Cook 		}
124039a1c42SKees Cook 	}
125039a1c42SKees Cook 
126039a1c42SKees Cook free_user:
127039a1c42SKees Cook 	vm_munmap(user_addr, PAGE_SIZE);
128039a1c42SKees Cook }
129039a1c42SKees Cook 
130039a1c42SKees Cook /*
131039a1c42SKees Cook  * This checks for whole-object size validation with hardened usercopy,
132039a1c42SKees Cook  * with or without usercopy whitelisting.
133039a1c42SKees Cook  */
do_usercopy_slab_size(bool to_user)134d2b8060fSKees Cook static void do_usercopy_slab_size(bool to_user)
135039a1c42SKees Cook {
136039a1c42SKees Cook 	unsigned long user_addr;
137039a1c42SKees Cook 	unsigned char *one, *two;
138039a1c42SKees Cook 	void __user *test_user_addr;
139039a1c42SKees Cook 	void *test_kern_addr;
140039a1c42SKees Cook 	size_t size = unconst + 1024;
141039a1c42SKees Cook 
142039a1c42SKees Cook 	one = kmalloc(size, GFP_KERNEL);
143039a1c42SKees Cook 	two = kmalloc(size, GFP_KERNEL);
144039a1c42SKees Cook 	if (!one || !two) {
145039a1c42SKees Cook 		pr_warn("Failed to allocate kernel memory\n");
146039a1c42SKees Cook 		goto free_kernel;
147039a1c42SKees Cook 	}
148039a1c42SKees Cook 
149039a1c42SKees Cook 	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
150039a1c42SKees Cook 			    PROT_READ | PROT_WRITE | PROT_EXEC,
151039a1c42SKees Cook 			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
152039a1c42SKees Cook 	if (user_addr >= TASK_SIZE) {
153039a1c42SKees Cook 		pr_warn("Failed to allocate user memory\n");
154039a1c42SKees Cook 		goto free_kernel;
155039a1c42SKees Cook 	}
156039a1c42SKees Cook 
157039a1c42SKees Cook 	memset(one, 'A', size);
158039a1c42SKees Cook 	memset(two, 'B', size);
159039a1c42SKees Cook 
160039a1c42SKees Cook 	test_user_addr = (void __user *)(user_addr + 16);
161039a1c42SKees Cook 	test_kern_addr = one + 16;
162039a1c42SKees Cook 
163039a1c42SKees Cook 	if (to_user) {
164039a1c42SKees Cook 		pr_info("attempting good copy_to_user of correct size\n");
165039a1c42SKees Cook 		if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
166039a1c42SKees Cook 			pr_warn("copy_to_user failed unexpectedly?!\n");
167039a1c42SKees Cook 			goto free_user;
168039a1c42SKees Cook 		}
169039a1c42SKees Cook 
170039a1c42SKees Cook 		pr_info("attempting bad copy_to_user of too large size\n");
171039a1c42SKees Cook 		if (copy_to_user(test_user_addr, test_kern_addr, size)) {
172039a1c42SKees Cook 			pr_warn("copy_to_user failed, but lacked Oops\n");
173039a1c42SKees Cook 			goto free_user;
174039a1c42SKees Cook 		}
175039a1c42SKees Cook 	} else {
176039a1c42SKees Cook 		pr_info("attempting good copy_from_user of correct size\n");
177039a1c42SKees Cook 		if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
178039a1c42SKees Cook 			pr_warn("copy_from_user failed unexpectedly?!\n");
179039a1c42SKees Cook 			goto free_user;
180039a1c42SKees Cook 		}
181039a1c42SKees Cook 
182039a1c42SKees Cook 		pr_info("attempting bad copy_from_user of too large size\n");
183039a1c42SKees Cook 		if (copy_from_user(test_kern_addr, test_user_addr, size)) {
184039a1c42SKees Cook 			pr_warn("copy_from_user failed, but lacked Oops\n");
185039a1c42SKees Cook 			goto free_user;
186039a1c42SKees Cook 		}
187039a1c42SKees Cook 	}
1885b777131SKees Cook 	pr_err("FAIL: bad usercopy not detected!\n");
1895b777131SKees Cook 	pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
190039a1c42SKees Cook 
191039a1c42SKees Cook free_user:
192039a1c42SKees Cook 	vm_munmap(user_addr, PAGE_SIZE);
193039a1c42SKees Cook free_kernel:
194039a1c42SKees Cook 	kfree(one);
195039a1c42SKees Cook 	kfree(two);
196039a1c42SKees Cook }
197039a1c42SKees Cook 
198039a1c42SKees Cook /*
199039a1c42SKees Cook  * This checks for the specific whitelist window within an object. If this
200d2b8060fSKees Cook  * test passes, then do_usercopy_slab_size() tests will pass too.
201039a1c42SKees Cook  */
do_usercopy_slab_whitelist(bool to_user)202d2b8060fSKees Cook static void do_usercopy_slab_whitelist(bool to_user)
203039a1c42SKees Cook {
204039a1c42SKees Cook 	unsigned long user_alloc;
205039a1c42SKees Cook 	unsigned char *buf = NULL;
206039a1c42SKees Cook 	unsigned char __user *user_addr;
207039a1c42SKees Cook 	size_t offset, size;
208039a1c42SKees Cook 
209039a1c42SKees Cook 	/* Make sure cache was prepared. */
210039a1c42SKees Cook 	if (!whitelist_cache) {
211039a1c42SKees Cook 		pr_warn("Failed to allocate kernel cache\n");
212039a1c42SKees Cook 		return;
213039a1c42SKees Cook 	}
214039a1c42SKees Cook 
215039a1c42SKees Cook 	/*
216039a1c42SKees Cook 	 * Allocate a buffer with a whitelisted window in the buffer.
217039a1c42SKees Cook 	 */
218039a1c42SKees Cook 	buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
219039a1c42SKees Cook 	if (!buf) {
220039a1c42SKees Cook 		pr_warn("Failed to allocate buffer from whitelist cache\n");
221039a1c42SKees Cook 		goto free_alloc;
222039a1c42SKees Cook 	}
223039a1c42SKees Cook 
224039a1c42SKees Cook 	/* Allocate user memory we'll poke at. */
225039a1c42SKees Cook 	user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
226039a1c42SKees Cook 			    PROT_READ | PROT_WRITE | PROT_EXEC,
227039a1c42SKees Cook 			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
228039a1c42SKees Cook 	if (user_alloc >= TASK_SIZE) {
229039a1c42SKees Cook 		pr_warn("Failed to allocate user memory\n");
230039a1c42SKees Cook 		goto free_alloc;
231039a1c42SKees Cook 	}
232039a1c42SKees Cook 	user_addr = (void __user *)user_alloc;
233039a1c42SKees Cook 
234039a1c42SKees Cook 	memset(buf, 'B', cache_size);
235039a1c42SKees Cook 
236039a1c42SKees Cook 	/* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
237039a1c42SKees Cook 	offset = (cache_size / 4) + unconst;
238039a1c42SKees Cook 	size = (cache_size / 16) + unconst;
239039a1c42SKees Cook 
240039a1c42SKees Cook 	if (to_user) {
241039a1c42SKees Cook 		pr_info("attempting good copy_to_user inside whitelist\n");
242039a1c42SKees Cook 		if (copy_to_user(user_addr, buf + offset, size)) {
243039a1c42SKees Cook 			pr_warn("copy_to_user failed unexpectedly?!\n");
244039a1c42SKees Cook 			goto free_user;
245039a1c42SKees Cook 		}
246039a1c42SKees Cook 
247039a1c42SKees Cook 		pr_info("attempting bad copy_to_user outside whitelist\n");
248039a1c42SKees Cook 		if (copy_to_user(user_addr, buf + offset - 1, size)) {
249039a1c42SKees Cook 			pr_warn("copy_to_user failed, but lacked Oops\n");
250039a1c42SKees Cook 			goto free_user;
251039a1c42SKees Cook 		}
252039a1c42SKees Cook 	} else {
253039a1c42SKees Cook 		pr_info("attempting good copy_from_user inside whitelist\n");
254039a1c42SKees Cook 		if (copy_from_user(buf + offset, user_addr, size)) {
255039a1c42SKees Cook 			pr_warn("copy_from_user failed unexpectedly?!\n");
256039a1c42SKees Cook 			goto free_user;
257039a1c42SKees Cook 		}
258039a1c42SKees Cook 
259039a1c42SKees Cook 		pr_info("attempting bad copy_from_user outside whitelist\n");
260039a1c42SKees Cook 		if (copy_from_user(buf + offset - 1, user_addr, size)) {
261039a1c42SKees Cook 			pr_warn("copy_from_user failed, but lacked Oops\n");
262039a1c42SKees Cook 			goto free_user;
263039a1c42SKees Cook 		}
264039a1c42SKees Cook 	}
2655b777131SKees Cook 	pr_err("FAIL: bad usercopy not detected!\n");
2665b777131SKees Cook 	pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
267039a1c42SKees Cook 
268039a1c42SKees Cook free_user:
269039a1c42SKees Cook 	vm_munmap(user_alloc, PAGE_SIZE);
270039a1c42SKees Cook free_alloc:
271039a1c42SKees Cook 	if (buf)
272039a1c42SKees Cook 		kmem_cache_free(whitelist_cache, buf);
273039a1c42SKees Cook }
274039a1c42SKees Cook 
275039a1c42SKees Cook /* Callable tests. */
lkdtm_USERCOPY_SLAB_SIZE_TO(void)276d2b8060fSKees Cook static void lkdtm_USERCOPY_SLAB_SIZE_TO(void)
277039a1c42SKees Cook {
278d2b8060fSKees Cook 	do_usercopy_slab_size(true);
279039a1c42SKees Cook }
280039a1c42SKees Cook 
lkdtm_USERCOPY_SLAB_SIZE_FROM(void)281d2b8060fSKees Cook static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void)
282039a1c42SKees Cook {
283d2b8060fSKees Cook 	do_usercopy_slab_size(false);
284039a1c42SKees Cook }
285039a1c42SKees Cook 
lkdtm_USERCOPY_SLAB_WHITELIST_TO(void)286d2b8060fSKees Cook static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void)
287039a1c42SKees Cook {
288d2b8060fSKees Cook 	do_usercopy_slab_whitelist(true);
289039a1c42SKees Cook }
290039a1c42SKees Cook 
lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void)291d2b8060fSKees Cook static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void)
292039a1c42SKees Cook {
293d2b8060fSKees Cook 	do_usercopy_slab_whitelist(false);
294039a1c42SKees Cook }
295039a1c42SKees Cook 
lkdtm_USERCOPY_STACK_FRAME_TO(void)29673f62e60SKees Cook static void lkdtm_USERCOPY_STACK_FRAME_TO(void)
297039a1c42SKees Cook {
298039a1c42SKees Cook 	do_usercopy_stack(true, true);
299039a1c42SKees Cook }
300039a1c42SKees Cook 
lkdtm_USERCOPY_STACK_FRAME_FROM(void)30173f62e60SKees Cook static void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
302039a1c42SKees Cook {
303039a1c42SKees Cook 	do_usercopy_stack(false, true);
304039a1c42SKees Cook }
305039a1c42SKees Cook 
lkdtm_USERCOPY_STACK_BEYOND(void)30673f62e60SKees Cook static void lkdtm_USERCOPY_STACK_BEYOND(void)
307039a1c42SKees Cook {
308039a1c42SKees Cook 	do_usercopy_stack(true, false);
309039a1c42SKees Cook }
310039a1c42SKees Cook 
lkdtm_USERCOPY_KERNEL(void)31173f62e60SKees Cook static void lkdtm_USERCOPY_KERNEL(void)
312039a1c42SKees Cook {
313039a1c42SKees Cook 	unsigned long user_addr;
314039a1c42SKees Cook 
315039a1c42SKees Cook 	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
316039a1c42SKees Cook 			    PROT_READ | PROT_WRITE | PROT_EXEC,
317039a1c42SKees Cook 			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
318039a1c42SKees Cook 	if (user_addr >= TASK_SIZE) {
319039a1c42SKees Cook 		pr_warn("Failed to allocate user memory\n");
320039a1c42SKees Cook 		return;
321039a1c42SKees Cook 	}
322039a1c42SKees Cook 
323464e86b4SKees Cook 	pr_info("attempting good copy_to_user from kernel rodata: %px\n",
324464e86b4SKees Cook 		test_text);
325039a1c42SKees Cook 	if (copy_to_user((void __user *)user_addr, test_text,
326039a1c42SKees Cook 			 unconst + sizeof(test_text))) {
327039a1c42SKees Cook 		pr_warn("copy_to_user failed unexpectedly?!\n");
328039a1c42SKees Cook 		goto free_user;
329039a1c42SKees Cook 	}
330039a1c42SKees Cook 
331464e86b4SKees Cook 	pr_info("attempting bad copy_to_user from kernel text: %px\n",
332464e86b4SKees Cook 		vm_mmap);
333*607289a7SSami Tolvanen 	if (copy_to_user((void __user *)user_addr, vm_mmap,
334039a1c42SKees Cook 			 unconst + PAGE_SIZE)) {
335039a1c42SKees Cook 		pr_warn("copy_to_user failed, but lacked Oops\n");
336039a1c42SKees Cook 		goto free_user;
337039a1c42SKees Cook 	}
3385b777131SKees Cook 	pr_err("FAIL: bad copy_to_user() not detected!\n");
3395b777131SKees Cook 	pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
340039a1c42SKees Cook 
341039a1c42SKees Cook free_user:
342039a1c42SKees Cook 	vm_munmap(user_addr, PAGE_SIZE);
343039a1c42SKees Cook }
344039a1c42SKees Cook 
345fc34eec6SKees Cook /*
346fc34eec6SKees Cook  * This expects "kaddr" to point to a PAGE_SIZE allocation, which means
347fc34eec6SKees Cook  * a more complete test that would include copy_from_user() would risk
348fc34eec6SKees Cook  * memory corruption. Just test copy_to_user() here, as that exercises
349fc34eec6SKees Cook  * almost exactly the same code paths.
350fc34eec6SKees Cook  */
do_usercopy_page_span(const char * name,void * kaddr)351fc34eec6SKees Cook static void do_usercopy_page_span(const char *name, void *kaddr)
352fc34eec6SKees Cook {
353fc34eec6SKees Cook 	unsigned long uaddr;
354fc34eec6SKees Cook 
355fc34eec6SKees Cook 	uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
356fc34eec6SKees Cook 			MAP_ANONYMOUS | MAP_PRIVATE, 0);
357fc34eec6SKees Cook 	if (uaddr >= TASK_SIZE) {
358fc34eec6SKees Cook 		pr_warn("Failed to allocate user memory\n");
359fc34eec6SKees Cook 		return;
360fc34eec6SKees Cook 	}
361fc34eec6SKees Cook 
362fc34eec6SKees Cook 	/* Initialize contents. */
363fc34eec6SKees Cook 	memset(kaddr, 0xAA, PAGE_SIZE);
364fc34eec6SKees Cook 
365fc34eec6SKees Cook 	/* Bump the kaddr forward to detect a page-spanning overflow. */
366fc34eec6SKees Cook 	kaddr += PAGE_SIZE / 2;
367fc34eec6SKees Cook 
368fc34eec6SKees Cook 	pr_info("attempting good copy_to_user() from kernel %s: %px\n",
369fc34eec6SKees Cook 		name, kaddr);
370fc34eec6SKees Cook 	if (copy_to_user((void __user *)uaddr, kaddr,
371fc34eec6SKees Cook 			 unconst + (PAGE_SIZE / 2))) {
372fc34eec6SKees Cook 		pr_err("copy_to_user() failed unexpectedly?!\n");
373fc34eec6SKees Cook 		goto free_user;
374fc34eec6SKees Cook 	}
375fc34eec6SKees Cook 
376fc34eec6SKees Cook 	pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
377fc34eec6SKees Cook 		name, kaddr);
378fc34eec6SKees Cook 	if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) {
379fc34eec6SKees Cook 		pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
380fc34eec6SKees Cook 		goto free_user;
381fc34eec6SKees Cook 	}
382fc34eec6SKees Cook 
383fc34eec6SKees Cook 	pr_err("FAIL: bad copy_to_user() not detected!\n");
384fc34eec6SKees Cook 	pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
385fc34eec6SKees Cook 
386fc34eec6SKees Cook free_user:
387fc34eec6SKees Cook 	vm_munmap(uaddr, PAGE_SIZE);
388fc34eec6SKees Cook }
389fc34eec6SKees Cook 
lkdtm_USERCOPY_VMALLOC(void)390fc34eec6SKees Cook static void lkdtm_USERCOPY_VMALLOC(void)
391fc34eec6SKees Cook {
392fc34eec6SKees Cook 	void *addr;
393fc34eec6SKees Cook 
394fc34eec6SKees Cook 	addr = vmalloc(PAGE_SIZE);
395fc34eec6SKees Cook 	if (!addr) {
396fc34eec6SKees Cook 		pr_err("vmalloc() failed!?\n");
397fc34eec6SKees Cook 		return;
398fc34eec6SKees Cook 	}
399fc34eec6SKees Cook 	do_usercopy_page_span("vmalloc", addr);
400fc34eec6SKees Cook 	vfree(addr);
401fc34eec6SKees Cook }
402fc34eec6SKees Cook 
lkdtm_USERCOPY_FOLIO(void)403fc34eec6SKees Cook static void lkdtm_USERCOPY_FOLIO(void)
404fc34eec6SKees Cook {
405fc34eec6SKees Cook 	struct folio *folio;
406fc34eec6SKees Cook 	void *addr;
407fc34eec6SKees Cook 
408fc34eec6SKees Cook 	/*
409fc34eec6SKees Cook 	 * FIXME: Folio checking currently misses 0-order allocations, so
410fc34eec6SKees Cook 	 * allocate and bump forward to the last page.
411fc34eec6SKees Cook 	 */
412fc34eec6SKees Cook 	folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
413fc34eec6SKees Cook 	if (!folio) {
414fc34eec6SKees Cook 		pr_err("folio_alloc() failed!?\n");
415fc34eec6SKees Cook 		return;
416fc34eec6SKees Cook 	}
417fc34eec6SKees Cook 	addr = folio_address(folio);
418fc34eec6SKees Cook 	if (addr)
419fc34eec6SKees Cook 		do_usercopy_page_span("folio", addr + PAGE_SIZE);
420fc34eec6SKees Cook 	else
421fc34eec6SKees Cook 		pr_err("folio_address() failed?!\n");
422fc34eec6SKees Cook 	folio_put(folio);
423fc34eec6SKees Cook }
424fc34eec6SKees Cook 
lkdtm_usercopy_init(void)425039a1c42SKees Cook void __init lkdtm_usercopy_init(void)
426039a1c42SKees Cook {
427039a1c42SKees Cook 	/* Prepare cache that lacks SLAB_USERCOPY flag. */
428039a1c42SKees Cook 	whitelist_cache =
429039a1c42SKees Cook 		kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
430039a1c42SKees Cook 					   0, 0,
431039a1c42SKees Cook 					   cache_size / 4,
432039a1c42SKees Cook 					   cache_size / 16,
433039a1c42SKees Cook 					   NULL);
434039a1c42SKees Cook }
435039a1c42SKees Cook 
lkdtm_usercopy_exit(void)436039a1c42SKees Cook void __exit lkdtm_usercopy_exit(void)
437039a1c42SKees Cook {
438039a1c42SKees Cook 	kmem_cache_destroy(whitelist_cache);
439039a1c42SKees Cook }
44073f62e60SKees Cook 
44173f62e60SKees Cook static struct crashtype crashtypes[] = {
442d2b8060fSKees Cook 	CRASHTYPE(USERCOPY_SLAB_SIZE_TO),
443d2b8060fSKees Cook 	CRASHTYPE(USERCOPY_SLAB_SIZE_FROM),
444d2b8060fSKees Cook 	CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO),
445d2b8060fSKees Cook 	CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM),
44673f62e60SKees Cook 	CRASHTYPE(USERCOPY_STACK_FRAME_TO),
44773f62e60SKees Cook 	CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
44873f62e60SKees Cook 	CRASHTYPE(USERCOPY_STACK_BEYOND),
449fc34eec6SKees Cook 	CRASHTYPE(USERCOPY_VMALLOC),
450fc34eec6SKees Cook 	CRASHTYPE(USERCOPY_FOLIO),
45173f62e60SKees Cook 	CRASHTYPE(USERCOPY_KERNEL),
45273f62e60SKees Cook };
45373f62e60SKees Cook 
45473f62e60SKees Cook struct crashtype_category usercopy_crashtypes = {
45573f62e60SKees Cook 	.crashtypes = crashtypes,
45673f62e60SKees Cook 	.len	    = ARRAY_SIZE(crashtypes),
45773f62e60SKees Cook };
458