xref: /linux/arch/riscv/include/asm/kfence.h (revision 2da68a77)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef _ASM_RISCV_KFENCE_H
4 #define _ASM_RISCV_KFENCE_H
5 
6 #include <linux/kfence.h>
7 #include <linux/pfn.h>
8 #include <asm-generic/pgalloc.h>
9 #include <asm/pgtable.h>
10 
11 static inline int split_pmd_page(unsigned long addr)
12 {
13 	int i;
14 	unsigned long pfn = PFN_DOWN(__pa((addr & PMD_MASK)));
15 	pmd_t *pmd = pmd_off_k(addr);
16 	pte_t *pte = pte_alloc_one_kernel(&init_mm);
17 
18 	if (!pte)
19 		return -ENOMEM;
20 
21 	for (i = 0; i < PTRS_PER_PTE; i++)
22 		set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL));
23 	set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(pte)), PAGE_TABLE));
24 
25 	flush_tlb_kernel_range(addr, addr + PMD_SIZE);
26 	return 0;
27 }
28 
29 static inline bool arch_kfence_init_pool(void)
30 {
31 	int ret;
32 	unsigned long addr;
33 	pmd_t *pmd;
34 
35 	for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
36 	     addr += PAGE_SIZE) {
37 		pmd = pmd_off_k(addr);
38 
39 		if (pmd_leaf(*pmd)) {
40 			ret = split_pmd_page(addr);
41 			if (ret)
42 				return false;
43 		}
44 	}
45 
46 	return true;
47 }
48 
49 static inline bool kfence_protect_page(unsigned long addr, bool protect)
50 {
51 	pte_t *pte = virt_to_kpte(addr);
52 
53 	if (protect)
54 		set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
55 	else
56 		set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
57 
58 	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
59 
60 	return true;
61 }
62 
63 #endif /* _ASM_RISCV_KFENCE_H */
64