1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/sched/task.h>
3 #include <linux/pgtable.h>
4 #include <linux/kasan.h>
5 #include <asm/page-states.h>
6 #include <asm/pgalloc.h>
7 #include <asm/facility.h>
8 #include <asm/sections.h>
9 #include <asm/ctlreg.h>
10 #include <asm/physmem_info.h>
11 #include <asm/maccess.h>
12 #include <asm/abs_lowcore.h>
13 #include "decompressor.h"
14 #include "boot.h"
15
16 struct ctlreg __bootdata_preserved(s390_invalid_asce);
17
18 #ifdef CONFIG_PROC_FS
19 atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
20 #endif
21
22 #define init_mm (*(struct mm_struct *)vmlinux.init_mm_off)
23 #define swapper_pg_dir vmlinux.swapper_pg_dir_off
24 #define invalid_pg_dir vmlinux.invalid_pg_dir_off
25
26 enum populate_mode {
27 POPULATE_NONE,
28 POPULATE_DIRECT,
29 POPULATE_ABS_LOWCORE,
30 POPULATE_IDENTITY,
31 POPULATE_KERNEL,
32 #ifdef CONFIG_KASAN
33 POPULATE_KASAN_MAP_SHADOW,
34 POPULATE_KASAN_ZERO_SHADOW,
35 POPULATE_KASAN_SHALLOW
36 #endif
37 };
38
39 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
40
41 #ifdef CONFIG_KASAN
42
43 #define kasan_early_shadow_page vmlinux.kasan_early_shadow_page_off
44 #define kasan_early_shadow_pte ((pte_t *)vmlinux.kasan_early_shadow_pte_off)
45 #define kasan_early_shadow_pmd ((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
46 #define kasan_early_shadow_pud ((pud_t *)vmlinux.kasan_early_shadow_pud_off)
47 #define kasan_early_shadow_p4d ((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
48 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
49
50 static pte_t pte_z;
51
kasan_populate(unsigned long start,unsigned long end,enum populate_mode mode)52 static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
53 {
54 start = PAGE_ALIGN_DOWN(__sha(start));
55 end = PAGE_ALIGN(__sha(end));
56 pgtable_populate(start, end, mode);
57 }
58
kasan_populate_shadow(unsigned long kernel_start,unsigned long kernel_end)59 static void kasan_populate_shadow(unsigned long kernel_start, unsigned long kernel_end)
60 {
61 pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
62 pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
63 p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
64 unsigned long memgap_start = 0;
65 unsigned long untracked_end;
66 unsigned long start, end;
67 int i;
68
69 pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
70 if (!machine.has_nx)
71 pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
72 crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
73 crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
74 crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
75 memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
76 __arch_set_page_dat(kasan_early_shadow_p4d, 1UL << CRST_ALLOC_ORDER);
77 __arch_set_page_dat(kasan_early_shadow_pud, 1UL << CRST_ALLOC_ORDER);
78 __arch_set_page_dat(kasan_early_shadow_pmd, 1UL << CRST_ALLOC_ORDER);
79 __arch_set_page_dat(kasan_early_shadow_pte, 1);
80
81 for_each_physmem_usable_range(i, &start, &end) {
82 kasan_populate((unsigned long)__identity_va(start),
83 (unsigned long)__identity_va(end),
84 POPULATE_KASAN_MAP_SHADOW);
85 if (memgap_start && physmem_info.info_source == MEM_DETECT_DIAG260) {
86 kasan_populate((unsigned long)__identity_va(memgap_start),
87 (unsigned long)__identity_va(start),
88 POPULATE_KASAN_ZERO_SHADOW);
89 }
90 memgap_start = end;
91 }
92 kasan_populate(kernel_start, kernel_end, POPULATE_KASAN_MAP_SHADOW);
93 kasan_populate(0, (unsigned long)__identity_va(0), POPULATE_KASAN_ZERO_SHADOW);
94 kasan_populate(AMODE31_START, AMODE31_END, POPULATE_KASAN_ZERO_SHADOW);
95 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
96 untracked_end = VMALLOC_START;
97 /* shallowly populate kasan shadow for vmalloc and modules */
98 kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
99 } else {
100 untracked_end = MODULES_VADDR;
101 }
102 /* populate kasan shadow for untracked memory */
103 kasan_populate((unsigned long)__identity_va(ident_map_size), untracked_end,
104 POPULATE_KASAN_ZERO_SHADOW);
105 kasan_populate(kernel_end, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
106 }
107
kasan_pgd_populate_zero_shadow(pgd_t * pgd,unsigned long addr,unsigned long end,enum populate_mode mode)108 static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
109 unsigned long end, enum populate_mode mode)
110 {
111 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
112 IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
113 pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d);
114 return true;
115 }
116 return false;
117 }
118
kasan_p4d_populate_zero_shadow(p4d_t * p4d,unsigned long addr,unsigned long end,enum populate_mode mode)119 static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
120 unsigned long end, enum populate_mode mode)
121 {
122 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
123 IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
124 p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
125 return true;
126 }
127 return false;
128 }
129
kasan_pud_populate_zero_shadow(pud_t * pud,unsigned long addr,unsigned long end,enum populate_mode mode)130 static bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
131 unsigned long end, enum populate_mode mode)
132 {
133 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
134 IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
135 pud_populate(&init_mm, pud, kasan_early_shadow_pmd);
136 return true;
137 }
138 return false;
139 }
140
kasan_pmd_populate_zero_shadow(pmd_t * pmd,unsigned long addr,unsigned long end,enum populate_mode mode)141 static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
142 unsigned long end, enum populate_mode mode)
143 {
144 if (mode == POPULATE_KASAN_ZERO_SHADOW &&
145 IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
146 pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
147 return true;
148 }
149 return false;
150 }
151
kasan_pte_populate_zero_shadow(pte_t * pte,enum populate_mode mode)152 static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
153 {
154 if (mode == POPULATE_KASAN_ZERO_SHADOW) {
155 set_pte(pte, pte_z);
156 return true;
157 }
158 return false;
159 }
160 #else
161
kasan_populate_shadow(unsigned long kernel_start,unsigned long kernel_end)162 static inline void kasan_populate_shadow(unsigned long kernel_start, unsigned long kernel_end)
163 {
164 }
165
kasan_pgd_populate_zero_shadow(pgd_t * pgd,unsigned long addr,unsigned long end,enum populate_mode mode)166 static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
167 unsigned long end, enum populate_mode mode)
168 {
169 return false;
170 }
171
kasan_p4d_populate_zero_shadow(p4d_t * p4d,unsigned long addr,unsigned long end,enum populate_mode mode)172 static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
173 unsigned long end, enum populate_mode mode)
174 {
175 return false;
176 }
177
kasan_pud_populate_zero_shadow(pud_t * pud,unsigned long addr,unsigned long end,enum populate_mode mode)178 static inline bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
179 unsigned long end, enum populate_mode mode)
180 {
181 return false;
182 }
183
kasan_pmd_populate_zero_shadow(pmd_t * pmd,unsigned long addr,unsigned long end,enum populate_mode mode)184 static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
185 unsigned long end, enum populate_mode mode)
186 {
187 return false;
188 }
189
kasan_pte_populate_zero_shadow(pte_t * pte,enum populate_mode mode)190 static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
191 {
192 return false;
193 }
194
195 #endif
196
197 /*
198 * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
199 */
__virt_to_kpte(unsigned long va)200 static inline pte_t *__virt_to_kpte(unsigned long va)
201 {
202 return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
203 }
204
boot_crst_alloc(unsigned long val)205 static void *boot_crst_alloc(unsigned long val)
206 {
207 unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
208 unsigned long *table;
209
210 table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
211 crst_table_init(table, val);
212 __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
213 return table;
214 }
215
boot_pte_alloc(void)216 static pte_t *boot_pte_alloc(void)
217 {
218 static void *pte_leftover;
219 pte_t *pte;
220
221 /*
222 * handling pte_leftovers this way helps to avoid memory fragmentation
223 * during POPULATE_KASAN_MAP_SHADOW when EDAT is off
224 */
225 if (!pte_leftover) {
226 pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
227 pte = pte_leftover + _PAGE_TABLE_SIZE;
228 __arch_set_page_dat(pte, 1);
229 } else {
230 pte = pte_leftover;
231 pte_leftover = NULL;
232 }
233
234 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
235 return pte;
236 }
237
_pa(unsigned long addr,unsigned long size,enum populate_mode mode)238 static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
239 {
240 switch (mode) {
241 case POPULATE_NONE:
242 return -1;
243 case POPULATE_DIRECT:
244 return addr;
245 case POPULATE_ABS_LOWCORE:
246 return __abs_lowcore_pa(addr);
247 case POPULATE_KERNEL:
248 return __kernel_pa(addr);
249 case POPULATE_IDENTITY:
250 return __identity_pa(addr);
251 #ifdef CONFIG_KASAN
252 case POPULATE_KASAN_MAP_SHADOW:
253 addr = physmem_alloc_top_down(RR_VMEM, size, size);
254 memset((void *)addr, 0, size);
255 return addr;
256 #endif
257 default:
258 return -1;
259 }
260 }
261
large_allowed(enum populate_mode mode)262 static bool large_allowed(enum populate_mode mode)
263 {
264 return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL);
265 }
266
can_large_pud(pud_t * pu_dir,unsigned long addr,unsigned long end,enum populate_mode mode)267 static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
268 enum populate_mode mode)
269 {
270 unsigned long size = end - addr;
271
272 return machine.has_edat2 && large_allowed(mode) &&
273 IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) &&
274 IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE);
275 }
276
can_large_pmd(pmd_t * pm_dir,unsigned long addr,unsigned long end,enum populate_mode mode)277 static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end,
278 enum populate_mode mode)
279 {
280 unsigned long size = end - addr;
281
282 return machine.has_edat1 && large_allowed(mode) &&
283 IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) &&
284 IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE);
285 }
286
pgtable_pte_populate(pmd_t * pmd,unsigned long addr,unsigned long end,enum populate_mode mode)287 static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
288 enum populate_mode mode)
289 {
290 unsigned long pages = 0;
291 pte_t *pte, entry;
292
293 pte = pte_offset_kernel(pmd, addr);
294 for (; addr < end; addr += PAGE_SIZE, pte++) {
295 if (pte_none(*pte)) {
296 if (kasan_pte_populate_zero_shadow(pte, mode))
297 continue;
298 entry = __pte(_pa(addr, PAGE_SIZE, mode));
299 entry = set_pte_bit(entry, PAGE_KERNEL);
300 if (!machine.has_nx)
301 entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC));
302 set_pte(pte, entry);
303 pages++;
304 }
305 }
306 if (mode == POPULATE_DIRECT)
307 update_page_count(PG_DIRECT_MAP_4K, pages);
308 }
309
pgtable_pmd_populate(pud_t * pud,unsigned long addr,unsigned long end,enum populate_mode mode)310 static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
311 enum populate_mode mode)
312 {
313 unsigned long next, pages = 0;
314 pmd_t *pmd, entry;
315 pte_t *pte;
316
317 pmd = pmd_offset(pud, addr);
318 for (; addr < end; addr = next, pmd++) {
319 next = pmd_addr_end(addr, end);
320 if (pmd_none(*pmd)) {
321 if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
322 continue;
323 if (can_large_pmd(pmd, addr, next, mode)) {
324 entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
325 entry = set_pmd_bit(entry, SEGMENT_KERNEL);
326 if (!machine.has_nx)
327 entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
328 set_pmd(pmd, entry);
329 pages++;
330 continue;
331 }
332 pte = boot_pte_alloc();
333 pmd_populate(&init_mm, pmd, pte);
334 } else if (pmd_leaf(*pmd)) {
335 continue;
336 }
337 pgtable_pte_populate(pmd, addr, next, mode);
338 }
339 if (mode == POPULATE_DIRECT)
340 update_page_count(PG_DIRECT_MAP_1M, pages);
341 }
342
pgtable_pud_populate(p4d_t * p4d,unsigned long addr,unsigned long end,enum populate_mode mode)343 static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
344 enum populate_mode mode)
345 {
346 unsigned long next, pages = 0;
347 pud_t *pud, entry;
348 pmd_t *pmd;
349
350 pud = pud_offset(p4d, addr);
351 for (; addr < end; addr = next, pud++) {
352 next = pud_addr_end(addr, end);
353 if (pud_none(*pud)) {
354 if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
355 continue;
356 if (can_large_pud(pud, addr, next, mode)) {
357 entry = __pud(_pa(addr, _REGION3_SIZE, mode));
358 entry = set_pud_bit(entry, REGION3_KERNEL);
359 if (!machine.has_nx)
360 entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC));
361 set_pud(pud, entry);
362 pages++;
363 continue;
364 }
365 pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
366 pud_populate(&init_mm, pud, pmd);
367 } else if (pud_leaf(*pud)) {
368 continue;
369 }
370 pgtable_pmd_populate(pud, addr, next, mode);
371 }
372 if (mode == POPULATE_DIRECT)
373 update_page_count(PG_DIRECT_MAP_2G, pages);
374 }
375
pgtable_p4d_populate(pgd_t * pgd,unsigned long addr,unsigned long end,enum populate_mode mode)376 static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end,
377 enum populate_mode mode)
378 {
379 unsigned long next;
380 p4d_t *p4d;
381 pud_t *pud;
382
383 p4d = p4d_offset(pgd, addr);
384 for (; addr < end; addr = next, p4d++) {
385 next = p4d_addr_end(addr, end);
386 if (p4d_none(*p4d)) {
387 if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
388 continue;
389 pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
390 p4d_populate(&init_mm, p4d, pud);
391 }
392 pgtable_pud_populate(p4d, addr, next, mode);
393 }
394 }
395
pgtable_populate(unsigned long addr,unsigned long end,enum populate_mode mode)396 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode)
397 {
398 unsigned long next;
399 pgd_t *pgd;
400 p4d_t *p4d;
401
402 pgd = pgd_offset(&init_mm, addr);
403 for (; addr < end; addr = next, pgd++) {
404 next = pgd_addr_end(addr, end);
405 if (pgd_none(*pgd)) {
406 if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
407 continue;
408 p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
409 pgd_populate(&init_mm, pgd, p4d);
410 }
411 #ifdef CONFIG_KASAN
412 if (mode == POPULATE_KASAN_SHALLOW)
413 continue;
414 #endif
415 pgtable_p4d_populate(pgd, addr, next, mode);
416 }
417 }
418
setup_vmem(unsigned long kernel_start,unsigned long kernel_end,unsigned long asce_limit)419 void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit)
420 {
421 unsigned long start, end;
422 unsigned long asce_type;
423 unsigned long asce_bits;
424 pgd_t *init_mm_pgd;
425 int i;
426
427 /*
428 * Mark whole memory as no-dat. This must be done before any
429 * page tables are allocated, or kernel image builtin pages
430 * are marked as dat tables.
431 */
432 for_each_physmem_online_range(i, &start, &end)
433 __arch_set_page_nodat((void *)start, (end - start) >> PAGE_SHIFT);
434
435 /*
436 * init_mm->pgd contains virtual address of swapper_pg_dir.
437 * It is unusable at this stage since DAT is yet off. Swap
438 * it for physical address of swapper_pg_dir and restore
439 * the virtual address after all page tables are created.
440 */
441 init_mm_pgd = init_mm.pgd;
442 init_mm.pgd = (pgd_t *)swapper_pg_dir;
443
444 if (asce_limit == _REGION1_SIZE) {
445 asce_type = _REGION2_ENTRY_EMPTY;
446 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
447 } else {
448 asce_type = _REGION3_ENTRY_EMPTY;
449 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
450 }
451 s390_invalid_asce.val = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
452
453 crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
454 crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
455 __arch_set_page_dat((void *)swapper_pg_dir, 1UL << CRST_ALLOC_ORDER);
456 __arch_set_page_dat((void *)invalid_pg_dir, 1UL << CRST_ALLOC_ORDER);
457
458 /*
459 * To allow prefixing the lowcore must be mapped with 4KB pages.
460 * To prevent creation of a large page at address 0 first map
461 * the lowcore and create the identity mapping only afterwards.
462 */
463 pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT);
464 for_each_physmem_usable_range(i, &start, &end) {
465 pgtable_populate((unsigned long)__identity_va(start),
466 (unsigned long)__identity_va(end),
467 POPULATE_IDENTITY);
468 }
469 pgtable_populate(kernel_start, kernel_end, POPULATE_KERNEL);
470 pgtable_populate(AMODE31_START, AMODE31_END, POPULATE_DIRECT);
471 pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
472 POPULATE_ABS_LOWCORE);
473 pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
474 POPULATE_NONE);
475 memcpy_real_ptep = __identity_va(__virt_to_kpte(__memcpy_real_area));
476
477 kasan_populate_shadow(kernel_start, kernel_end);
478
479 S390_lowcore.kernel_asce.val = swapper_pg_dir | asce_bits;
480 S390_lowcore.user_asce = s390_invalid_asce;
481
482 local_ctl_load(1, &S390_lowcore.kernel_asce);
483 local_ctl_load(7, &S390_lowcore.user_asce);
484 local_ctl_load(13, &S390_lowcore.kernel_asce);
485
486 init_mm.context.asce = S390_lowcore.kernel_asce.val;
487 init_mm.pgd = init_mm_pgd;
488 }
489