xref: /linux/arch/x86/mm/kasan_init_64.c (revision f86fd32d)
1 // SPDX-License-Identifier: GPL-2.0
2 #define DISABLE_BRANCH_PROFILING
3 #define pr_fmt(fmt) "kasan: " fmt
4 
5 /* cpu_feature_enabled() cannot be used this early */
6 #define USE_EARLY_PGTABLE_L5
7 
8 #include <linux/memblock.h>
9 #include <linux/kasan.h>
10 #include <linux/kdebug.h>
11 #include <linux/mm.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task.h>
14 #include <linux/vmalloc.h>
15 
16 #include <asm/e820/types.h>
17 #include <asm/pgalloc.h>
18 #include <asm/tlbflush.h>
19 #include <asm/sections.h>
20 #include <asm/pgtable.h>
21 #include <asm/cpu_entry_area.h>
22 
23 extern struct range pfn_mapped[E820_MAX_ENTRIES];
24 
25 static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
26 
27 static __init void *early_alloc(size_t size, int nid, bool should_panic)
28 {
29 	void *ptr = memblock_alloc_try_nid(size, size,
30 			__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
31 
32 	if (!ptr && should_panic)
33 		panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
34 		      (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
35 
36 	return ptr;
37 }
38 
39 static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
40 				      unsigned long end, int nid)
41 {
42 	pte_t *pte;
43 
44 	if (pmd_none(*pmd)) {
45 		void *p;
46 
47 		if (boot_cpu_has(X86_FEATURE_PSE) &&
48 		    ((end - addr) == PMD_SIZE) &&
49 		    IS_ALIGNED(addr, PMD_SIZE)) {
50 			p = early_alloc(PMD_SIZE, nid, false);
51 			if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
52 				return;
53 			else if (p)
54 				memblock_free(__pa(p), PMD_SIZE);
55 		}
56 
57 		p = early_alloc(PAGE_SIZE, nid, true);
58 		pmd_populate_kernel(&init_mm, pmd, p);
59 	}
60 
61 	pte = pte_offset_kernel(pmd, addr);
62 	do {
63 		pte_t entry;
64 		void *p;
65 
66 		if (!pte_none(*pte))
67 			continue;
68 
69 		p = early_alloc(PAGE_SIZE, nid, true);
70 		entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
71 		set_pte_at(&init_mm, addr, pte, entry);
72 	} while (pte++, addr += PAGE_SIZE, addr != end);
73 }
74 
75 static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
76 				      unsigned long end, int nid)
77 {
78 	pmd_t *pmd;
79 	unsigned long next;
80 
81 	if (pud_none(*pud)) {
82 		void *p;
83 
84 		if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
85 		    ((end - addr) == PUD_SIZE) &&
86 		    IS_ALIGNED(addr, PUD_SIZE)) {
87 			p = early_alloc(PUD_SIZE, nid, false);
88 			if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
89 				return;
90 			else if (p)
91 				memblock_free(__pa(p), PUD_SIZE);
92 		}
93 
94 		p = early_alloc(PAGE_SIZE, nid, true);
95 		pud_populate(&init_mm, pud, p);
96 	}
97 
98 	pmd = pmd_offset(pud, addr);
99 	do {
100 		next = pmd_addr_end(addr, end);
101 		if (!pmd_large(*pmd))
102 			kasan_populate_pmd(pmd, addr, next, nid);
103 	} while (pmd++, addr = next, addr != end);
104 }
105 
106 static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
107 				      unsigned long end, int nid)
108 {
109 	pud_t *pud;
110 	unsigned long next;
111 
112 	if (p4d_none(*p4d)) {
113 		void *p = early_alloc(PAGE_SIZE, nid, true);
114 
115 		p4d_populate(&init_mm, p4d, p);
116 	}
117 
118 	pud = pud_offset(p4d, addr);
119 	do {
120 		next = pud_addr_end(addr, end);
121 		if (!pud_large(*pud))
122 			kasan_populate_pud(pud, addr, next, nid);
123 	} while (pud++, addr = next, addr != end);
124 }
125 
126 static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
127 				      unsigned long end, int nid)
128 {
129 	void *p;
130 	p4d_t *p4d;
131 	unsigned long next;
132 
133 	if (pgd_none(*pgd)) {
134 		p = early_alloc(PAGE_SIZE, nid, true);
135 		pgd_populate(&init_mm, pgd, p);
136 	}
137 
138 	p4d = p4d_offset(pgd, addr);
139 	do {
140 		next = p4d_addr_end(addr, end);
141 		kasan_populate_p4d(p4d, addr, next, nid);
142 	} while (p4d++, addr = next, addr != end);
143 }
144 
145 static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
146 					 int nid)
147 {
148 	pgd_t *pgd;
149 	unsigned long next;
150 
151 	addr = addr & PAGE_MASK;
152 	end = round_up(end, PAGE_SIZE);
153 	pgd = pgd_offset_k(addr);
154 	do {
155 		next = pgd_addr_end(addr, end);
156 		kasan_populate_pgd(pgd, addr, next, nid);
157 	} while (pgd++, addr = next, addr != end);
158 }
159 
160 static void __init map_range(struct range *range)
161 {
162 	unsigned long start;
163 	unsigned long end;
164 
165 	start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
166 	end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
167 
168 	kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
169 }
170 
171 static void __init clear_pgds(unsigned long start,
172 			unsigned long end)
173 {
174 	pgd_t *pgd;
175 	/* See comment in kasan_init() */
176 	unsigned long pgd_end = end & PGDIR_MASK;
177 
178 	for (; start < pgd_end; start += PGDIR_SIZE) {
179 		pgd = pgd_offset_k(start);
180 		/*
181 		 * With folded p4d, pgd_clear() is nop, use p4d_clear()
182 		 * instead.
183 		 */
184 		if (pgtable_l5_enabled())
185 			pgd_clear(pgd);
186 		else
187 			p4d_clear(p4d_offset(pgd, start));
188 	}
189 
190 	pgd = pgd_offset_k(start);
191 	for (; start < end; start += P4D_SIZE)
192 		p4d_clear(p4d_offset(pgd, start));
193 }
194 
195 static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
196 {
197 	unsigned long p4d;
198 
199 	if (!pgtable_l5_enabled())
200 		return (p4d_t *)pgd;
201 
202 	p4d = pgd_val(*pgd) & PTE_PFN_MASK;
203 	p4d += __START_KERNEL_map - phys_base;
204 	return (p4d_t *)p4d + p4d_index(addr);
205 }
206 
207 static void __init kasan_early_p4d_populate(pgd_t *pgd,
208 		unsigned long addr,
209 		unsigned long end)
210 {
211 	pgd_t pgd_entry;
212 	p4d_t *p4d, p4d_entry;
213 	unsigned long next;
214 
215 	if (pgd_none(*pgd)) {
216 		pgd_entry = __pgd(_KERNPG_TABLE |
217 					__pa_nodebug(kasan_early_shadow_p4d));
218 		set_pgd(pgd, pgd_entry);
219 	}
220 
221 	p4d = early_p4d_offset(pgd, addr);
222 	do {
223 		next = p4d_addr_end(addr, end);
224 
225 		if (!p4d_none(*p4d))
226 			continue;
227 
228 		p4d_entry = __p4d(_KERNPG_TABLE |
229 					__pa_nodebug(kasan_early_shadow_pud));
230 		set_p4d(p4d, p4d_entry);
231 	} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
232 }
233 
234 static void __init kasan_map_early_shadow(pgd_t *pgd)
235 {
236 	/* See comment in kasan_init() */
237 	unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
238 	unsigned long end = KASAN_SHADOW_END;
239 	unsigned long next;
240 
241 	pgd += pgd_index(addr);
242 	do {
243 		next = pgd_addr_end(addr, end);
244 		kasan_early_p4d_populate(pgd, addr, next);
245 	} while (pgd++, addr = next, addr != end);
246 }
247 
248 static void __init kasan_shallow_populate_p4ds(pgd_t *pgd,
249 					       unsigned long addr,
250 					       unsigned long end)
251 {
252 	p4d_t *p4d;
253 	unsigned long next;
254 	void *p;
255 
256 	p4d = p4d_offset(pgd, addr);
257 	do {
258 		next = p4d_addr_end(addr, end);
259 
260 		if (p4d_none(*p4d)) {
261 			p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
262 			p4d_populate(&init_mm, p4d, p);
263 		}
264 	} while (p4d++, addr = next, addr != end);
265 }
266 
267 static void __init kasan_shallow_populate_pgds(void *start, void *end)
268 {
269 	unsigned long addr, next;
270 	pgd_t *pgd;
271 	void *p;
272 
273 	addr = (unsigned long)start;
274 	pgd = pgd_offset_k(addr);
275 	do {
276 		next = pgd_addr_end(addr, (unsigned long)end);
277 
278 		if (pgd_none(*pgd)) {
279 			p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
280 			pgd_populate(&init_mm, pgd, p);
281 		}
282 
283 		/*
284 		 * we need to populate p4ds to be synced when running in
285 		 * four level mode - see sync_global_pgds_l4()
286 		 */
287 		kasan_shallow_populate_p4ds(pgd, addr, next);
288 	} while (pgd++, addr = next, addr != (unsigned long)end);
289 }
290 
291 void __init kasan_early_init(void)
292 {
293 	int i;
294 	pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
295 				__PAGE_KERNEL | _PAGE_ENC;
296 	pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
297 	pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
298 	p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;
299 
300 	/* Mask out unsupported __PAGE_KERNEL bits: */
301 	pte_val &= __default_kernel_pte_mask;
302 	pmd_val &= __default_kernel_pte_mask;
303 	pud_val &= __default_kernel_pte_mask;
304 	p4d_val &= __default_kernel_pte_mask;
305 
306 	for (i = 0; i < PTRS_PER_PTE; i++)
307 		kasan_early_shadow_pte[i] = __pte(pte_val);
308 
309 	for (i = 0; i < PTRS_PER_PMD; i++)
310 		kasan_early_shadow_pmd[i] = __pmd(pmd_val);
311 
312 	for (i = 0; i < PTRS_PER_PUD; i++)
313 		kasan_early_shadow_pud[i] = __pud(pud_val);
314 
315 	for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
316 		kasan_early_shadow_p4d[i] = __p4d(p4d_val);
317 
318 	kasan_map_early_shadow(early_top_pgt);
319 	kasan_map_early_shadow(init_top_pgt);
320 }
321 
322 void __init kasan_init(void)
323 {
324 	int i;
325 	void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
326 
327 	memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
328 
329 	/*
330 	 * We use the same shadow offset for 4- and 5-level paging to
331 	 * facilitate boot-time switching between paging modes.
332 	 * As result in 5-level paging mode KASAN_SHADOW_START and
333 	 * KASAN_SHADOW_END are not aligned to PGD boundary.
334 	 *
335 	 * KASAN_SHADOW_START doesn't share PGD with anything else.
336 	 * We claim whole PGD entry to make things easier.
337 	 *
338 	 * KASAN_SHADOW_END lands in the last PGD entry and it collides with
339 	 * bunch of things like kernel code, modules, EFI mapping, etc.
340 	 * We need to take extra steps to not overwrite them.
341 	 */
342 	if (pgtable_l5_enabled()) {
343 		void *ptr;
344 
345 		ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
346 		memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
347 		set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
348 				__pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
349 	}
350 
351 	load_cr3(early_top_pgt);
352 	__flush_tlb_all();
353 
354 	clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
355 
356 	kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
357 			kasan_mem_to_shadow((void *)PAGE_OFFSET));
358 
359 	for (i = 0; i < E820_MAX_ENTRIES; i++) {
360 		if (pfn_mapped[i].end == 0)
361 			break;
362 
363 		map_range(&pfn_mapped[i]);
364 	}
365 
366 	shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
367 	shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
368 	shadow_cpu_entry_begin = (void *)round_down(
369 			(unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);
370 
371 	shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
372 					CPU_ENTRY_AREA_MAP_SIZE);
373 	shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
374 	shadow_cpu_entry_end = (void *)round_up(
375 			(unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
376 
377 	kasan_populate_early_shadow(
378 		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
379 		kasan_mem_to_shadow((void *)VMALLOC_START));
380 
381 	/*
382 	 * If we're in full vmalloc mode, don't back vmalloc space with early
383 	 * shadow pages. Instead, prepopulate pgds/p4ds so they are synced to
384 	 * the global table and we can populate the lower levels on demand.
385 	 */
386 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
387 		kasan_shallow_populate_pgds(
388 			kasan_mem_to_shadow((void *)VMALLOC_START),
389 			kasan_mem_to_shadow((void *)VMALLOC_END));
390 	else
391 		kasan_populate_early_shadow(
392 			kasan_mem_to_shadow((void *)VMALLOC_START),
393 			kasan_mem_to_shadow((void *)VMALLOC_END));
394 
395 	kasan_populate_early_shadow(
396 		kasan_mem_to_shadow((void *)VMALLOC_END + 1),
397 		shadow_cpu_entry_begin);
398 
399 	kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
400 			      (unsigned long)shadow_cpu_entry_end, 0);
401 
402 	kasan_populate_early_shadow(shadow_cpu_entry_end,
403 			kasan_mem_to_shadow((void *)__START_KERNEL_map));
404 
405 	kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
406 			      (unsigned long)kasan_mem_to_shadow(_end),
407 			      early_pfn_to_nid(__pa(_stext)));
408 
409 	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
410 					(void *)KASAN_SHADOW_END);
411 
412 	load_cr3(init_top_pgt);
413 	__flush_tlb_all();
414 
415 	/*
416 	 * kasan_early_shadow_page has been used as early shadow memory, thus
417 	 * it may contain some garbage. Now we can clear and write protect it,
418 	 * since after the TLB flush no one should write to it.
419 	 */
420 	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
421 	for (i = 0; i < PTRS_PER_PTE; i++) {
422 		pte_t pte;
423 		pgprot_t prot;
424 
425 		prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
426 		pgprot_val(prot) &= __default_kernel_pte_mask;
427 
428 		pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
429 		set_pte(&kasan_early_shadow_pte[i], pte);
430 	}
431 	/* Flush TLBs again to be sure that write protection applied. */
432 	__flush_tlb_all();
433 
434 	init_task.kasan_depth = 0;
435 	pr_info("KernelAddressSanitizer initialized\n");
436 }
437