1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KFENCE guarded object allocator and fault handling.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7 
8 #define pr_fmt(fmt) "kfence: " fmt
9 
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/irq_work.h>
14 #include <linux/kcsan-checks.h>
15 #include <linux/kfence.h>
16 #include <linux/kmemleak.h>
17 #include <linux/list.h>
18 #include <linux/lockdep.h>
19 #include <linux/memblock.h>
20 #include <linux/moduleparam.h>
21 #include <linux/random.h>
22 #include <linux/rcupdate.h>
23 #include <linux/sched/sysctl.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/string.h>
28 
29 #include <asm/kfence.h>
30 
31 #include "kfence.h"
32 
33 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
34 #define KFENCE_WARN_ON(cond)                                                   \
35 	({                                                                     \
36 		const bool __cond = WARN_ON(cond);                             \
37 		if (unlikely(__cond))                                          \
38 			WRITE_ONCE(kfence_enabled, false);                     \
39 		__cond;                                                        \
40 	})
41 
42 /* === Data ================================================================= */
43 
44 static bool kfence_enabled __read_mostly;
45 
46 static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
47 
48 #ifdef MODULE_PARAM_PREFIX
49 #undef MODULE_PARAM_PREFIX
50 #endif
51 #define MODULE_PARAM_PREFIX "kfence."
52 
param_set_sample_interval(const char * val,const struct kernel_param * kp)53 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
54 {
55 	unsigned long num;
56 	int ret = kstrtoul(val, 0, &num);
57 
58 	if (ret < 0)
59 		return ret;
60 
61 	if (!num) /* Using 0 to indicate KFENCE is disabled. */
62 		WRITE_ONCE(kfence_enabled, false);
63 	else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
64 		return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */
65 
66 	*((unsigned long *)kp->arg) = num;
67 	return 0;
68 }
69 
param_get_sample_interval(char * buffer,const struct kernel_param * kp)70 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
71 {
72 	if (!READ_ONCE(kfence_enabled))
73 		return sprintf(buffer, "0\n");
74 
75 	return param_get_ulong(buffer, kp);
76 }
77 
78 static const struct kernel_param_ops sample_interval_param_ops = {
79 	.set = param_set_sample_interval,
80 	.get = param_get_sample_interval,
81 };
82 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
83 
84 /* The pool of pages used for guard pages and objects. */
85 char *__kfence_pool __ro_after_init;
86 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
87 
88 /*
89  * Per-object metadata, with one-to-one mapping of object metadata to
90  * backing pages (in __kfence_pool).
91  */
92 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
93 struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
94 
95 /* Freelist with available objects. */
96 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
97 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
98 
99 #ifdef CONFIG_KFENCE_STATIC_KEYS
100 /* The static key to set up a KFENCE allocation. */
101 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
102 #endif
103 
104 /* Gates the allocation, ensuring only one succeeds in a given period. */
105 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
106 
107 /* Statistics counters for debugfs. */
108 enum kfence_counter_id {
109 	KFENCE_COUNTER_ALLOCATED,
110 	KFENCE_COUNTER_ALLOCS,
111 	KFENCE_COUNTER_FREES,
112 	KFENCE_COUNTER_ZOMBIES,
113 	KFENCE_COUNTER_BUGS,
114 	KFENCE_COUNTER_COUNT,
115 };
116 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
117 static const char *const counter_names[] = {
118 	[KFENCE_COUNTER_ALLOCATED]	= "currently allocated",
119 	[KFENCE_COUNTER_ALLOCS]		= "total allocations",
120 	[KFENCE_COUNTER_FREES]		= "total frees",
121 	[KFENCE_COUNTER_ZOMBIES]	= "zombie allocations",
122 	[KFENCE_COUNTER_BUGS]		= "total bugs",
123 };
124 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
125 
126 /* === Internals ============================================================ */
127 
kfence_protect(unsigned long addr)128 static bool kfence_protect(unsigned long addr)
129 {
130 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
131 }
132 
kfence_unprotect(unsigned long addr)133 static bool kfence_unprotect(unsigned long addr)
134 {
135 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
136 }
137 
addr_to_metadata(unsigned long addr)138 static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
139 {
140 	long index;
141 
142 	/* The checks do not affect performance; only called from slow-paths. */
143 
144 	if (!is_kfence_address((void *)addr))
145 		return NULL;
146 
147 	/*
148 	 * May be an invalid index if called with an address at the edge of
149 	 * __kfence_pool, in which case we would report an "invalid access"
150 	 * error.
151 	 */
152 	index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
153 	if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
154 		return NULL;
155 
156 	return &kfence_metadata[index];
157 }
158 
metadata_to_pageaddr(const struct kfence_metadata * meta)159 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
160 {
161 	unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
162 	unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
163 
164 	/* The checks do not affect performance; only called from slow-paths. */
165 
166 	/* Only call with a pointer into kfence_metadata. */
167 	if (KFENCE_WARN_ON(meta < kfence_metadata ||
168 			   meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
169 		return 0;
170 
171 	/*
172 	 * This metadata object only ever maps to 1 page; verify that the stored
173 	 * address is in the expected range.
174 	 */
175 	if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
176 		return 0;
177 
178 	return pageaddr;
179 }
180 
181 /*
182  * Update the object's metadata state, including updating the alloc/free stacks
183  * depending on the state transition.
184  */
metadata_update_state(struct kfence_metadata * meta,enum kfence_object_state next)185 static noinline void metadata_update_state(struct kfence_metadata *meta,
186 					   enum kfence_object_state next)
187 {
188 	struct kfence_track *track =
189 		next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
190 
191 	lockdep_assert_held(&meta->lock);
192 
193 	/*
194 	 * Skip over 1 (this) functions; noinline ensures we do not accidentally
195 	 * skip over the caller by never inlining.
196 	 */
197 	track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
198 	track->pid = task_pid_nr(current);
199 
200 	/*
201 	 * Pairs with READ_ONCE() in
202 	 *	kfence_shutdown_cache(),
203 	 *	kfence_handle_page_fault().
204 	 */
205 	WRITE_ONCE(meta->state, next);
206 }
207 
208 /* Write canary byte to @addr. */
set_canary_byte(u8 * addr)209 static inline bool set_canary_byte(u8 *addr)
210 {
211 	*addr = KFENCE_CANARY_PATTERN(addr);
212 	return true;
213 }
214 
215 /* Check canary byte at @addr. */
check_canary_byte(u8 * addr)216 static inline bool check_canary_byte(u8 *addr)
217 {
218 	if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
219 		return true;
220 
221 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
222 	kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
223 			    KFENCE_ERROR_CORRUPTION);
224 	return false;
225 }
226 
227 /* __always_inline this to ensure we won't do an indirect call to fn. */
for_each_canary(const struct kfence_metadata * meta,bool (* fn)(u8 *))228 static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
229 {
230 	const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
231 	unsigned long addr;
232 
233 	lockdep_assert_held(&meta->lock);
234 
235 	/*
236 	 * We'll iterate over each canary byte per-side until fn() returns
237 	 * false. However, we'll still iterate over the canary bytes to the
238 	 * right of the object even if there was an error in the canary bytes to
239 	 * the left of the object. Specifically, if check_canary_byte()
240 	 * generates an error, showing both sides might give more clues as to
241 	 * what the error is about when displaying which bytes were corrupted.
242 	 */
243 
244 	/* Apply to left of object. */
245 	for (addr = pageaddr; addr < meta->addr; addr++) {
246 		if (!fn((u8 *)addr))
247 			break;
248 	}
249 
250 	/* Apply to right of object. */
251 	for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
252 		if (!fn((u8 *)addr))
253 			break;
254 	}
255 }
256 
kfence_guarded_alloc(struct kmem_cache * cache,size_t size,gfp_t gfp)257 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp)
258 {
259 	struct kfence_metadata *meta = NULL;
260 	unsigned long flags;
261 	struct page *page;
262 	void *addr;
263 
264 	/* Try to obtain a free object. */
265 	raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
266 	if (!list_empty(&kfence_freelist)) {
267 		meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
268 		list_del_init(&meta->list);
269 	}
270 	raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
271 	if (!meta)
272 		return NULL;
273 
274 	if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
275 		/*
276 		 * This is extremely unlikely -- we are reporting on a
277 		 * use-after-free, which locked meta->lock, and the reporting
278 		 * code via printk calls kmalloc() which ends up in
279 		 * kfence_alloc() and tries to grab the same object that we're
280 		 * reporting on. While it has never been observed, lockdep does
281 		 * report that there is a possibility of deadlock. Fix it by
282 		 * using trylock and bailing out gracefully.
283 		 */
284 		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
285 		/* Put the object back on the freelist. */
286 		list_add_tail(&meta->list, &kfence_freelist);
287 		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
288 
289 		return NULL;
290 	}
291 
292 	meta->addr = metadata_to_pageaddr(meta);
293 	/* Unprotect if we're reusing this page. */
294 	if (meta->state == KFENCE_OBJECT_FREED)
295 		kfence_unprotect(meta->addr);
296 
297 	/*
298 	 * Note: for allocations made before RNG initialization, will always
299 	 * return zero. We still benefit from enabling KFENCE as early as
300 	 * possible, even when the RNG is not yet available, as this will allow
301 	 * KFENCE to detect bugs due to earlier allocations. The only downside
302 	 * is that the out-of-bounds accesses detected are deterministic for
303 	 * such allocations.
304 	 */
305 	if (prandom_u32_max(2)) {
306 		/* Allocate on the "right" side, re-calculate address. */
307 		meta->addr += PAGE_SIZE - size;
308 		meta->addr = ALIGN_DOWN(meta->addr, cache->align);
309 	}
310 
311 	addr = (void *)meta->addr;
312 
313 	/* Update remaining metadata. */
314 	metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED);
315 	/* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
316 	WRITE_ONCE(meta->cache, cache);
317 	meta->size = size;
318 	for_each_canary(meta, set_canary_byte);
319 
320 	/* Set required struct page fields. */
321 	page = virt_to_page(meta->addr);
322 	page->slab_cache = cache;
323 	if (IS_ENABLED(CONFIG_SLUB))
324 		page->objects = 1;
325 	if (IS_ENABLED(CONFIG_SLAB))
326 		page->s_mem = addr;
327 
328 	raw_spin_unlock_irqrestore(&meta->lock, flags);
329 
330 	/* Memory initialization. */
331 
332 	/*
333 	 * We check slab_want_init_on_alloc() ourselves, rather than letting
334 	 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
335 	 * redzone.
336 	 */
337 	if (unlikely(slab_want_init_on_alloc(gfp, cache)))
338 		memzero_explicit(addr, size);
339 	if (cache->ctor)
340 		cache->ctor(addr);
341 
342 	if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
343 		kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
344 
345 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
346 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
347 
348 	return addr;
349 }
350 
kfence_guarded_free(void * addr,struct kfence_metadata * meta,bool zombie)351 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
352 {
353 	struct kcsan_scoped_access assert_page_exclusive;
354 	unsigned long flags;
355 
356 	raw_spin_lock_irqsave(&meta->lock, flags);
357 
358 	if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
359 		/* Invalid or double-free, bail out. */
360 		atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
361 		kfence_report_error((unsigned long)addr, false, NULL, meta,
362 				    KFENCE_ERROR_INVALID_FREE);
363 		raw_spin_unlock_irqrestore(&meta->lock, flags);
364 		return;
365 	}
366 
367 	/* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
368 	kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
369 				  KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
370 				  &assert_page_exclusive);
371 
372 	if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
373 		kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
374 
375 	/* Restore page protection if there was an OOB access. */
376 	if (meta->unprotected_page) {
377 		memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
378 		kfence_protect(meta->unprotected_page);
379 		meta->unprotected_page = 0;
380 	}
381 
382 	/* Check canary bytes for memory corruption. */
383 	for_each_canary(meta, check_canary_byte);
384 
385 	/*
386 	 * Clear memory if init-on-free is set. While we protect the page, the
387 	 * data is still there, and after a use-after-free is detected, we
388 	 * unprotect the page, so the data is still accessible.
389 	 */
390 	if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
391 		memzero_explicit(addr, meta->size);
392 
393 	/* Mark the object as freed. */
394 	metadata_update_state(meta, KFENCE_OBJECT_FREED);
395 
396 	raw_spin_unlock_irqrestore(&meta->lock, flags);
397 
398 	/* Protect to detect use-after-frees. */
399 	kfence_protect((unsigned long)addr);
400 
401 	kcsan_end_scoped_access(&assert_page_exclusive);
402 	if (!zombie) {
403 		/* Add it to the tail of the freelist for reuse. */
404 		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
405 		KFENCE_WARN_ON(!list_empty(&meta->list));
406 		list_add_tail(&meta->list, &kfence_freelist);
407 		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
408 
409 		atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
410 		atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
411 	} else {
412 		/* See kfence_shutdown_cache(). */
413 		atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
414 	}
415 }
416 
rcu_guarded_free(struct rcu_head * h)417 static void rcu_guarded_free(struct rcu_head *h)
418 {
419 	struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
420 
421 	kfence_guarded_free((void *)meta->addr, meta, false);
422 }
423 
kfence_init_pool(void)424 static bool __init kfence_init_pool(void)
425 {
426 	unsigned long addr = (unsigned long)__kfence_pool;
427 	struct page *pages;
428 	int i;
429 
430 	if (!__kfence_pool)
431 		return false;
432 
433 	if (!arch_kfence_init_pool())
434 		goto err;
435 
436 	pages = virt_to_page(addr);
437 
438 	/*
439 	 * Set up object pages: they must have PG_slab set, to avoid freeing
440 	 * these as real pages.
441 	 *
442 	 * We also want to avoid inserting kfence_free() in the kfree()
443 	 * fast-path in SLUB, and therefore need to ensure kfree() correctly
444 	 * enters __slab_free() slow-path.
445 	 */
446 	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
447 		if (!i || (i % 2))
448 			continue;
449 
450 		/* Verify we do not have a compound head page. */
451 		if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
452 			goto err;
453 
454 		__SetPageSlab(&pages[i]);
455 	}
456 
457 	/*
458 	 * Protect the first 2 pages. The first page is mostly unnecessary, and
459 	 * merely serves as an extended guard page. However, adding one
460 	 * additional page in the beginning gives us an even number of pages,
461 	 * which simplifies the mapping of address to metadata index.
462 	 */
463 	for (i = 0; i < 2; i++) {
464 		if (unlikely(!kfence_protect(addr)))
465 			goto err;
466 
467 		addr += PAGE_SIZE;
468 	}
469 
470 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
471 		struct kfence_metadata *meta = &kfence_metadata[i];
472 
473 		/* Initialize metadata. */
474 		INIT_LIST_HEAD(&meta->list);
475 		raw_spin_lock_init(&meta->lock);
476 		meta->state = KFENCE_OBJECT_UNUSED;
477 		meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
478 		list_add_tail(&meta->list, &kfence_freelist);
479 
480 		/* Protect the right redzone. */
481 		if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
482 			goto err;
483 
484 		addr += 2 * PAGE_SIZE;
485 	}
486 
487 	/*
488 	 * The pool is live and will never be deallocated from this point on.
489 	 * Remove the pool object from the kmemleak object tree, as it would
490 	 * otherwise overlap with allocations returned by kfence_alloc(), which
491 	 * are registered with kmemleak through the slab post-alloc hook.
492 	 */
493 	kmemleak_free(__kfence_pool);
494 
495 	return true;
496 
497 err:
498 	/*
499 	 * Only release unprotected pages, and do not try to go back and change
500 	 * page attributes due to risk of failing to do so as well. If changing
501 	 * page attributes for some pages fails, it is very likely that it also
502 	 * fails for the first page, and therefore expect addr==__kfence_pool in
503 	 * most failure cases.
504 	 */
505 	memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
506 	__kfence_pool = NULL;
507 	return false;
508 }
509 
510 /* === DebugFS Interface ==================================================== */
511 
stats_show(struct seq_file * seq,void * v)512 static int stats_show(struct seq_file *seq, void *v)
513 {
514 	int i;
515 
516 	seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
517 	for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
518 		seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
519 
520 	return 0;
521 }
522 DEFINE_SHOW_ATTRIBUTE(stats);
523 
524 /*
525  * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
526  * start_object() and next_object() return the object index + 1, because NULL is used
527  * to stop iteration.
528  */
start_object(struct seq_file * seq,loff_t * pos)529 static void *start_object(struct seq_file *seq, loff_t *pos)
530 {
531 	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
532 		return (void *)((long)*pos + 1);
533 	return NULL;
534 }
535 
stop_object(struct seq_file * seq,void * v)536 static void stop_object(struct seq_file *seq, void *v)
537 {
538 }
539 
next_object(struct seq_file * seq,void * v,loff_t * pos)540 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
541 {
542 	++*pos;
543 	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
544 		return (void *)((long)*pos + 1);
545 	return NULL;
546 }
547 
show_object(struct seq_file * seq,void * v)548 static int show_object(struct seq_file *seq, void *v)
549 {
550 	struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
551 	unsigned long flags;
552 
553 	raw_spin_lock_irqsave(&meta->lock, flags);
554 	kfence_print_object(seq, meta);
555 	raw_spin_unlock_irqrestore(&meta->lock, flags);
556 	seq_puts(seq, "---------------------------------\n");
557 
558 	return 0;
559 }
560 
561 static const struct seq_operations object_seqops = {
562 	.start = start_object,
563 	.next = next_object,
564 	.stop = stop_object,
565 	.show = show_object,
566 };
567 
open_objects(struct inode * inode,struct file * file)568 static int open_objects(struct inode *inode, struct file *file)
569 {
570 	return seq_open(file, &object_seqops);
571 }
572 
573 static const struct file_operations objects_fops = {
574 	.open = open_objects,
575 	.read = seq_read,
576 	.llseek = seq_lseek,
577 };
578 
kfence_debugfs_init(void)579 static int __init kfence_debugfs_init(void)
580 {
581 	struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
582 
583 	debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
584 	debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
585 	return 0;
586 }
587 
588 late_initcall(kfence_debugfs_init);
589 
590 /* === Allocation Gate Timer ================================================ */
591 
592 #ifdef CONFIG_KFENCE_STATIC_KEYS
593 /* Wait queue to wake up allocation-gate timer task. */
594 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
595 
wake_up_kfence_timer(struct irq_work * work)596 static void wake_up_kfence_timer(struct irq_work *work)
597 {
598 	wake_up(&allocation_wait);
599 }
600 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
601 #endif
602 
603 /*
604  * Set up delayed work, which will enable and disable the static key. We need to
605  * use a work queue (rather than a simple timer), since enabling and disabling a
606  * static key cannot be done from an interrupt.
607  *
608  * Note: Toggling a static branch currently causes IPIs, and here we'll end up
609  * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
610  * more aggressive sampling intervals), we could get away with a variant that
611  * avoids IPIs, at the cost of not immediately capturing allocations if the
612  * instructions remain cached.
613  */
614 static struct delayed_work kfence_timer;
toggle_allocation_gate(struct work_struct * work)615 static void toggle_allocation_gate(struct work_struct *work)
616 {
617 	if (!READ_ONCE(kfence_enabled))
618 		return;
619 
620 	atomic_set(&kfence_allocation_gate, 0);
621 #ifdef CONFIG_KFENCE_STATIC_KEYS
622 	/* Enable static key, and await allocation to happen. */
623 	static_branch_enable(&kfence_allocation_key);
624 
625 	if (sysctl_hung_task_timeout_secs) {
626 		/*
627 		 * During low activity with no allocations we might wait a
628 		 * while; let's avoid the hung task warning.
629 		 */
630 		wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
631 				   sysctl_hung_task_timeout_secs * HZ / 2);
632 	} else {
633 		wait_event(allocation_wait, atomic_read(&kfence_allocation_gate));
634 	}
635 
636 	/* Disable static key and reset timer. */
637 	static_branch_disable(&kfence_allocation_key);
638 #endif
639 	queue_delayed_work(system_power_efficient_wq, &kfence_timer,
640 			   msecs_to_jiffies(kfence_sample_interval));
641 }
642 static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
643 
644 /* === Public interface ===================================================== */
645 
kfence_alloc_pool(void)646 void __init kfence_alloc_pool(void)
647 {
648 	if (!kfence_sample_interval)
649 		return;
650 
651 	__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
652 
653 	if (!__kfence_pool)
654 		pr_err("failed to allocate pool\n");
655 }
656 
kfence_init(void)657 void __init kfence_init(void)
658 {
659 	/* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
660 	if (!kfence_sample_interval)
661 		return;
662 
663 	if (!kfence_init_pool()) {
664 		pr_err("%s failed\n", __func__);
665 		return;
666 	}
667 
668 	WRITE_ONCE(kfence_enabled, true);
669 	queue_delayed_work(system_power_efficient_wq, &kfence_timer, 0);
670 	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
671 		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
672 		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
673 }
674 
kfence_shutdown_cache(struct kmem_cache * s)675 void kfence_shutdown_cache(struct kmem_cache *s)
676 {
677 	unsigned long flags;
678 	struct kfence_metadata *meta;
679 	int i;
680 
681 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
682 		bool in_use;
683 
684 		meta = &kfence_metadata[i];
685 
686 		/*
687 		 * If we observe some inconsistent cache and state pair where we
688 		 * should have returned false here, cache destruction is racing
689 		 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
690 		 * the lock will not help, as different critical section
691 		 * serialization will have the same outcome.
692 		 */
693 		if (READ_ONCE(meta->cache) != s ||
694 		    READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
695 			continue;
696 
697 		raw_spin_lock_irqsave(&meta->lock, flags);
698 		in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
699 		raw_spin_unlock_irqrestore(&meta->lock, flags);
700 
701 		if (in_use) {
702 			/*
703 			 * This cache still has allocations, and we should not
704 			 * release them back into the freelist so they can still
705 			 * safely be used and retain the kernel's default
706 			 * behaviour of keeping the allocations alive (leak the
707 			 * cache); however, they effectively become "zombie
708 			 * allocations" as the KFENCE objects are the only ones
709 			 * still in use and the owning cache is being destroyed.
710 			 *
711 			 * We mark them freed, so that any subsequent use shows
712 			 * more useful error messages that will include stack
713 			 * traces of the user of the object, the original
714 			 * allocation, and caller to shutdown_cache().
715 			 */
716 			kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
717 		}
718 	}
719 
720 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
721 		meta = &kfence_metadata[i];
722 
723 		/* See above. */
724 		if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
725 			continue;
726 
727 		raw_spin_lock_irqsave(&meta->lock, flags);
728 		if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
729 			meta->cache = NULL;
730 		raw_spin_unlock_irqrestore(&meta->lock, flags);
731 	}
732 }
733 
__kfence_alloc(struct kmem_cache * s,size_t size,gfp_t flags)734 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
735 {
736 	/*
737 	 * allocation_gate only needs to become non-zero, so it doesn't make
738 	 * sense to continue writing to it and pay the associated contention
739 	 * cost, in case we have a large number of concurrent allocations.
740 	 */
741 	if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
742 		return NULL;
743 #ifdef CONFIG_KFENCE_STATIC_KEYS
744 	/*
745 	 * waitqueue_active() is fully ordered after the update of
746 	 * kfence_allocation_gate per atomic_inc_return().
747 	 */
748 	if (waitqueue_active(&allocation_wait)) {
749 		/*
750 		 * Calling wake_up() here may deadlock when allocations happen
751 		 * from within timer code. Use an irq_work to defer it.
752 		 */
753 		irq_work_queue(&wake_up_kfence_timer_work);
754 	}
755 #endif
756 
757 	if (!READ_ONCE(kfence_enabled))
758 		return NULL;
759 
760 	if (size > PAGE_SIZE)
761 		return NULL;
762 
763 	return kfence_guarded_alloc(s, size, flags);
764 }
765 
kfence_ksize(const void * addr)766 size_t kfence_ksize(const void *addr)
767 {
768 	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
769 
770 	/*
771 	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
772 	 * either a use-after-free or invalid access.
773 	 */
774 	return meta ? meta->size : 0;
775 }
776 
kfence_object_start(const void * addr)777 void *kfence_object_start(const void *addr)
778 {
779 	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
780 
781 	/*
782 	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
783 	 * either a use-after-free or invalid access.
784 	 */
785 	return meta ? (void *)meta->addr : NULL;
786 }
787 
__kfence_free(void * addr)788 void __kfence_free(void *addr)
789 {
790 	struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
791 
792 	/*
793 	 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
794 	 * the object, as the object page may be recycled for other-typed
795 	 * objects once it has been freed. meta->cache may be NULL if the cache
796 	 * was destroyed.
797 	 */
798 	if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
799 		call_rcu(&meta->rcu_head, rcu_guarded_free);
800 	else
801 		kfence_guarded_free(addr, meta, false);
802 }
803 
kfence_handle_page_fault(unsigned long addr,bool is_write,struct pt_regs * regs)804 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
805 {
806 	const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
807 	struct kfence_metadata *to_report = NULL;
808 	enum kfence_error_type error_type;
809 	unsigned long flags;
810 
811 	if (!is_kfence_address((void *)addr))
812 		return false;
813 
814 	if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
815 		return kfence_unprotect(addr); /* ... unprotect and proceed. */
816 
817 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
818 
819 	if (page_index % 2) {
820 		/* This is a redzone, report a buffer overflow. */
821 		struct kfence_metadata *meta;
822 		int distance = 0;
823 
824 		meta = addr_to_metadata(addr - PAGE_SIZE);
825 		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
826 			to_report = meta;
827 			/* Data race ok; distance calculation approximate. */
828 			distance = addr - data_race(meta->addr + meta->size);
829 		}
830 
831 		meta = addr_to_metadata(addr + PAGE_SIZE);
832 		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
833 			/* Data race ok; distance calculation approximate. */
834 			if (!to_report || distance > data_race(meta->addr) - addr)
835 				to_report = meta;
836 		}
837 
838 		if (!to_report)
839 			goto out;
840 
841 		raw_spin_lock_irqsave(&to_report->lock, flags);
842 		to_report->unprotected_page = addr;
843 		error_type = KFENCE_ERROR_OOB;
844 
845 		/*
846 		 * If the object was freed before we took the look we can still
847 		 * report this as an OOB -- the report will simply show the
848 		 * stacktrace of the free as well.
849 		 */
850 	} else {
851 		to_report = addr_to_metadata(addr);
852 		if (!to_report)
853 			goto out;
854 
855 		raw_spin_lock_irqsave(&to_report->lock, flags);
856 		error_type = KFENCE_ERROR_UAF;
857 		/*
858 		 * We may race with __kfence_alloc(), and it is possible that a
859 		 * freed object may be reallocated. We simply report this as a
860 		 * use-after-free, with the stack trace showing the place where
861 		 * the object was re-allocated.
862 		 */
863 	}
864 
865 out:
866 	if (to_report) {
867 		kfence_report_error(addr, is_write, regs, to_report, error_type);
868 		raw_spin_unlock_irqrestore(&to_report->lock, flags);
869 	} else {
870 		/* This may be a UAF or OOB access, but we can't be sure. */
871 		kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
872 	}
873 
874 	return kfence_unprotect(addr); /* Unprotect and let access proceed. */
875 }
876