1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Page table handling routines for radix page table.
4 *
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 */
7
8 #define pr_fmt(fmt) "radix-mmu: " fmt
9
10 #include <linux/io.h>
11 #include <linux/kernel.h>
12 #include <linux/sched/mm.h>
13 #include <linux/memblock.h>
14 #include <linux/of_fdt.h>
15 #include <linux/mm.h>
16 #include <linux/hugetlb.h>
17 #include <linux/string_helpers.h>
18 #include <linux/memory.h>
19
20 #include <asm/pgalloc.h>
21 #include <asm/mmu_context.h>
22 #include <asm/dma.h>
23 #include <asm/machdep.h>
24 #include <asm/mmu.h>
25 #include <asm/firmware.h>
26 #include <asm/powernv.h>
27 #include <asm/sections.h>
28 #include <asm/smp.h>
29 #include <asm/trace.h>
30 #include <asm/uaccess.h>
31 #include <asm/ultravisor.h>
32
33 #include <trace/events/thp.h>
34
35 unsigned int mmu_pid_bits;
36 unsigned int mmu_base_pid;
37 unsigned long radix_mem_block_size __ro_after_init;
38
early_alloc_pgtable(unsigned long size,int nid,unsigned long region_start,unsigned long region_end)39 static __ref void *early_alloc_pgtable(unsigned long size, int nid,
40 unsigned long region_start, unsigned long region_end)
41 {
42 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
43 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
44 void *ptr;
45
46 if (region_start)
47 min_addr = region_start;
48 if (region_end)
49 max_addr = region_end;
50
51 ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
52
53 if (!ptr)
54 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
55 __func__, size, size, nid, &min_addr, &max_addr);
56
57 return ptr;
58 }
59
60 /*
61 * When allocating pud or pmd pointers, we allocate a complete page
62 * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
63 * is to ensure that the page obtained from the memblock allocator
64 * can be completely used as page table page and can be freed
65 * correctly when the page table entries are removed.
66 */
early_map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size,int nid,unsigned long region_start,unsigned long region_end)67 static int early_map_kernel_page(unsigned long ea, unsigned long pa,
68 pgprot_t flags,
69 unsigned int map_page_size,
70 int nid,
71 unsigned long region_start, unsigned long region_end)
72 {
73 unsigned long pfn = pa >> PAGE_SHIFT;
74 pgd_t *pgdp;
75 p4d_t *p4dp;
76 pud_t *pudp;
77 pmd_t *pmdp;
78 pte_t *ptep;
79
80 pgdp = pgd_offset_k(ea);
81 p4dp = p4d_offset(pgdp, ea);
82 if (p4d_none(*p4dp)) {
83 pudp = early_alloc_pgtable(PAGE_SIZE, nid,
84 region_start, region_end);
85 p4d_populate(&init_mm, p4dp, pudp);
86 }
87 pudp = pud_offset(p4dp, ea);
88 if (map_page_size == PUD_SIZE) {
89 ptep = (pte_t *)pudp;
90 goto set_the_pte;
91 }
92 if (pud_none(*pudp)) {
93 pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
94 region_end);
95 pud_populate(&init_mm, pudp, pmdp);
96 }
97 pmdp = pmd_offset(pudp, ea);
98 if (map_page_size == PMD_SIZE) {
99 ptep = pmdp_ptep(pmdp);
100 goto set_the_pte;
101 }
102 if (!pmd_present(*pmdp)) {
103 ptep = early_alloc_pgtable(PAGE_SIZE, nid,
104 region_start, region_end);
105 pmd_populate_kernel(&init_mm, pmdp, ptep);
106 }
107 ptep = pte_offset_kernel(pmdp, ea);
108
109 set_the_pte:
110 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
111 asm volatile("ptesync": : :"memory");
112 return 0;
113 }
114
115 /*
116 * nid, region_start, and region_end are hints to try to place the page
117 * table memory in the same node or region.
118 */
__map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size,int nid,unsigned long region_start,unsigned long region_end)119 static int __map_kernel_page(unsigned long ea, unsigned long pa,
120 pgprot_t flags,
121 unsigned int map_page_size,
122 int nid,
123 unsigned long region_start, unsigned long region_end)
124 {
125 unsigned long pfn = pa >> PAGE_SHIFT;
126 pgd_t *pgdp;
127 p4d_t *p4dp;
128 pud_t *pudp;
129 pmd_t *pmdp;
130 pte_t *ptep;
131 /*
132 * Make sure task size is correct as per the max adddr
133 */
134 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
135
136 #ifdef CONFIG_PPC_64K_PAGES
137 BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
138 #endif
139
140 if (unlikely(!slab_is_available()))
141 return early_map_kernel_page(ea, pa, flags, map_page_size,
142 nid, region_start, region_end);
143
144 /*
145 * Should make page table allocation functions be able to take a
146 * node, so we can place kernel page tables on the right nodes after
147 * boot.
148 */
149 pgdp = pgd_offset_k(ea);
150 p4dp = p4d_offset(pgdp, ea);
151 pudp = pud_alloc(&init_mm, p4dp, ea);
152 if (!pudp)
153 return -ENOMEM;
154 if (map_page_size == PUD_SIZE) {
155 ptep = (pte_t *)pudp;
156 goto set_the_pte;
157 }
158 pmdp = pmd_alloc(&init_mm, pudp, ea);
159 if (!pmdp)
160 return -ENOMEM;
161 if (map_page_size == PMD_SIZE) {
162 ptep = pmdp_ptep(pmdp);
163 goto set_the_pte;
164 }
165 ptep = pte_alloc_kernel(pmdp, ea);
166 if (!ptep)
167 return -ENOMEM;
168
169 set_the_pte:
170 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
171 asm volatile("ptesync": : :"memory");
172 return 0;
173 }
174
radix__map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size)175 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
176 pgprot_t flags,
177 unsigned int map_page_size)
178 {
179 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
180 }
181
182 #ifdef CONFIG_STRICT_KERNEL_RWX
radix__change_memory_range(unsigned long start,unsigned long end,unsigned long clear)183 static void radix__change_memory_range(unsigned long start, unsigned long end,
184 unsigned long clear)
185 {
186 unsigned long idx;
187 pgd_t *pgdp;
188 p4d_t *p4dp;
189 pud_t *pudp;
190 pmd_t *pmdp;
191 pte_t *ptep;
192
193 start = ALIGN_DOWN(start, PAGE_SIZE);
194 end = PAGE_ALIGN(end); // aligns up
195
196 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
197 start, end, clear);
198
199 for (idx = start; idx < end; idx += PAGE_SIZE) {
200 pgdp = pgd_offset_k(idx);
201 p4dp = p4d_offset(pgdp, idx);
202 pudp = pud_alloc(&init_mm, p4dp, idx);
203 if (!pudp)
204 continue;
205 if (pud_is_leaf(*pudp)) {
206 ptep = (pte_t *)pudp;
207 goto update_the_pte;
208 }
209 pmdp = pmd_alloc(&init_mm, pudp, idx);
210 if (!pmdp)
211 continue;
212 if (pmd_is_leaf(*pmdp)) {
213 ptep = pmdp_ptep(pmdp);
214 goto update_the_pte;
215 }
216 ptep = pte_alloc_kernel(pmdp, idx);
217 if (!ptep)
218 continue;
219 update_the_pte:
220 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
221 }
222
223 radix__flush_tlb_kernel_range(start, end);
224 }
225
radix__mark_rodata_ro(void)226 void radix__mark_rodata_ro(void)
227 {
228 unsigned long start, end;
229
230 start = (unsigned long)_stext;
231 end = (unsigned long)__init_begin;
232
233 radix__change_memory_range(start, end, _PAGE_WRITE);
234 }
235
radix__mark_initmem_nx(void)236 void radix__mark_initmem_nx(void)
237 {
238 unsigned long start = (unsigned long)__init_begin;
239 unsigned long end = (unsigned long)__init_end;
240
241 radix__change_memory_range(start, end, _PAGE_EXEC);
242 }
243 #endif /* CONFIG_STRICT_KERNEL_RWX */
244
245 static inline void __meminit
print_mapping(unsigned long start,unsigned long end,unsigned long size,bool exec)246 print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
247 {
248 char buf[10];
249
250 if (end <= start)
251 return;
252
253 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
254
255 pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
256 exec ? " (exec)" : "");
257 }
258
next_boundary(unsigned long addr,unsigned long end)259 static unsigned long next_boundary(unsigned long addr, unsigned long end)
260 {
261 #ifdef CONFIG_STRICT_KERNEL_RWX
262 if (addr < __pa_symbol(__init_begin))
263 return __pa_symbol(__init_begin);
264 #endif
265 return end;
266 }
267
create_physical_mapping(unsigned long start,unsigned long end,unsigned long max_mapping_size,int nid,pgprot_t _prot)268 static int __meminit create_physical_mapping(unsigned long start,
269 unsigned long end,
270 unsigned long max_mapping_size,
271 int nid, pgprot_t _prot)
272 {
273 unsigned long vaddr, addr, mapping_size = 0;
274 bool prev_exec, exec = false;
275 pgprot_t prot;
276 int psize;
277
278 start = ALIGN(start, PAGE_SIZE);
279 end = ALIGN_DOWN(end, PAGE_SIZE);
280 for (addr = start; addr < end; addr += mapping_size) {
281 unsigned long gap, previous_size;
282 int rc;
283
284 gap = next_boundary(addr, end) - addr;
285 if (gap > max_mapping_size)
286 gap = max_mapping_size;
287 previous_size = mapping_size;
288 prev_exec = exec;
289
290 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
291 mmu_psize_defs[MMU_PAGE_1G].shift) {
292 mapping_size = PUD_SIZE;
293 psize = MMU_PAGE_1G;
294 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
295 mmu_psize_defs[MMU_PAGE_2M].shift) {
296 mapping_size = PMD_SIZE;
297 psize = MMU_PAGE_2M;
298 } else {
299 mapping_size = PAGE_SIZE;
300 psize = mmu_virtual_psize;
301 }
302
303 vaddr = (unsigned long)__va(addr);
304
305 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
306 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
307 prot = PAGE_KERNEL_X;
308 exec = true;
309 } else {
310 prot = _prot;
311 exec = false;
312 }
313
314 if (mapping_size != previous_size || exec != prev_exec) {
315 print_mapping(start, addr, previous_size, prev_exec);
316 start = addr;
317 }
318
319 rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
320 if (rc)
321 return rc;
322
323 update_page_count(psize, 1);
324 }
325
326 print_mapping(start, addr, mapping_size, exec);
327 return 0;
328 }
329
radix_init_pgtable(void)330 static void __init radix_init_pgtable(void)
331 {
332 unsigned long rts_field;
333 phys_addr_t start, end;
334 u64 i;
335
336 /* We don't support slb for radix */
337 mmu_slb_size = 0;
338
339 /*
340 * Create the linear mapping
341 */
342 for_each_mem_range(i, &start, &end) {
343 /*
344 * The memblock allocator is up at this point, so the
345 * page tables will be allocated within the range. No
346 * need or a node (which we don't have yet).
347 */
348
349 if (end >= RADIX_VMALLOC_START) {
350 pr_warn("Outside the supported range\n");
351 continue;
352 }
353
354 WARN_ON(create_physical_mapping(start, end,
355 radix_mem_block_size,
356 -1, PAGE_KERNEL));
357 }
358
359 /* Find out how many PID bits are supported */
360 if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
361 if (!mmu_pid_bits)
362 mmu_pid_bits = 20;
363 mmu_base_pid = 1;
364 } else if (cpu_has_feature(CPU_FTR_HVMODE)) {
365 if (!mmu_pid_bits)
366 mmu_pid_bits = 20;
367 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
368 /*
369 * When KVM is possible, we only use the top half of the
370 * PID space to avoid collisions between host and guest PIDs
371 * which can cause problems due to prefetch when exiting the
372 * guest with AIL=3
373 */
374 mmu_base_pid = 1 << (mmu_pid_bits - 1);
375 #else
376 mmu_base_pid = 1;
377 #endif
378 } else {
379 /* The guest uses the bottom half of the PID space */
380 if (!mmu_pid_bits)
381 mmu_pid_bits = 19;
382 mmu_base_pid = 1;
383 }
384
385 /*
386 * Allocate Partition table and process table for the
387 * host.
388 */
389 BUG_ON(PRTB_SIZE_SHIFT > 36);
390 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
391 /*
392 * Fill in the process table.
393 */
394 rts_field = radix__get_tree_size();
395 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
396
397 /*
398 * The init_mm context is given the first available (non-zero) PID,
399 * which is the "guard PID" and contains no page table. PIDR should
400 * never be set to zero because that duplicates the kernel address
401 * space at the 0x0... offset (quadrant 0)!
402 *
403 * An arbitrary PID that may later be allocated by the PID allocator
404 * for userspace processes must not be used either, because that
405 * would cause stale user mappings for that PID on CPUs outside of
406 * the TLB invalidation scheme (because it won't be in mm_cpumask).
407 *
408 * So permanently carve out one PID for the purpose of a guard PID.
409 */
410 init_mm.context.id = mmu_base_pid;
411 mmu_base_pid++;
412 }
413
radix_init_partition_table(void)414 static void __init radix_init_partition_table(void)
415 {
416 unsigned long rts_field, dw0, dw1;
417
418 mmu_partition_table_init();
419 rts_field = radix__get_tree_size();
420 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
421 dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
422 mmu_partition_table_set_entry(0, dw0, dw1, false);
423
424 pr_info("Initializing Radix MMU\n");
425 }
426
get_idx_from_shift(unsigned int shift)427 static int __init get_idx_from_shift(unsigned int shift)
428 {
429 int idx = -1;
430
431 switch (shift) {
432 case 0xc:
433 idx = MMU_PAGE_4K;
434 break;
435 case 0x10:
436 idx = MMU_PAGE_64K;
437 break;
438 case 0x15:
439 idx = MMU_PAGE_2M;
440 break;
441 case 0x1e:
442 idx = MMU_PAGE_1G;
443 break;
444 }
445 return idx;
446 }
447
radix_dt_scan_page_sizes(unsigned long node,const char * uname,int depth,void * data)448 static int __init radix_dt_scan_page_sizes(unsigned long node,
449 const char *uname, int depth,
450 void *data)
451 {
452 int size = 0;
453 int shift, idx;
454 unsigned int ap;
455 const __be32 *prop;
456 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
457
458 /* We are scanning "cpu" nodes only */
459 if (type == NULL || strcmp(type, "cpu") != 0)
460 return 0;
461
462 /* Find MMU PID size */
463 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
464 if (prop && size == 4)
465 mmu_pid_bits = be32_to_cpup(prop);
466
467 /* Grab page size encodings */
468 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
469 if (!prop)
470 return 0;
471
472 pr_info("Page sizes from device-tree:\n");
473 for (; size >= 4; size -= 4, ++prop) {
474
475 struct mmu_psize_def *def;
476
477 /* top 3 bit is AP encoding */
478 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
479 ap = be32_to_cpu(prop[0]) >> 29;
480 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
481
482 idx = get_idx_from_shift(shift);
483 if (idx < 0)
484 continue;
485
486 def = &mmu_psize_defs[idx];
487 def->shift = shift;
488 def->ap = ap;
489 }
490
491 /* needed ? */
492 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
493 return 1;
494 }
495
496 #ifdef CONFIG_MEMORY_HOTPLUG
probe_memory_block_size(unsigned long node,const char * uname,int depth,void * data)497 static int __init probe_memory_block_size(unsigned long node, const char *uname, int
498 depth, void *data)
499 {
500 unsigned long *mem_block_size = (unsigned long *)data;
501 const __be32 *prop;
502 int len;
503
504 if (depth != 1)
505 return 0;
506
507 if (strcmp(uname, "ibm,dynamic-reconfiguration-memory"))
508 return 0;
509
510 prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
511
512 if (!prop || len < dt_root_size_cells * sizeof(__be32))
513 /*
514 * Nothing in the device tree
515 */
516 *mem_block_size = MIN_MEMORY_BLOCK_SIZE;
517 else
518 *mem_block_size = of_read_number(prop, dt_root_size_cells);
519 return 1;
520 }
521
radix_memory_block_size(void)522 static unsigned long radix_memory_block_size(void)
523 {
524 unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
525
526 /*
527 * OPAL firmware feature is set by now. Hence we are ok
528 * to test OPAL feature.
529 */
530 if (firmware_has_feature(FW_FEATURE_OPAL))
531 mem_block_size = 1UL * 1024 * 1024 * 1024;
532 else
533 of_scan_flat_dt(probe_memory_block_size, &mem_block_size);
534
535 return mem_block_size;
536 }
537
538 #else /* CONFIG_MEMORY_HOTPLUG */
539
radix_memory_block_size(void)540 static unsigned long radix_memory_block_size(void)
541 {
542 return 1UL * 1024 * 1024 * 1024;
543 }
544
545 #endif /* CONFIG_MEMORY_HOTPLUG */
546
547
radix__early_init_devtree(void)548 void __init radix__early_init_devtree(void)
549 {
550 int rc;
551
552 /*
553 * Try to find the available page sizes in the device-tree
554 */
555 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
556 if (!rc) {
557 /*
558 * No page size details found in device tree.
559 * Let's assume we have page 4k and 64k support
560 */
561 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
562 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
563
564 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
565 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
566 }
567
568 /*
569 * Max mapping size used when mapping pages. We don't use
570 * ppc_md.memory_block_size() here because this get called
571 * early and we don't have machine probe called yet. Also
572 * the pseries implementation only check for ibm,lmb-size.
573 * All hypervisor supporting radix do expose that device
574 * tree node.
575 */
576 radix_mem_block_size = radix_memory_block_size();
577 return;
578 }
579
radix_init_amor(void)580 static void radix_init_amor(void)
581 {
582 /*
583 * In HV mode, we init AMOR (Authority Mask Override Register) so that
584 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
585 * Register), enable key 0 and set it to 1.
586 *
587 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
588 */
589 mtspr(SPRN_AMOR, (3ul << 62));
590 }
591
radix__early_init_mmu(void)592 void __init radix__early_init_mmu(void)
593 {
594 unsigned long lpcr;
595
596 #ifdef CONFIG_PPC_64K_PAGES
597 /* PAGE_SIZE mappings */
598 mmu_virtual_psize = MMU_PAGE_64K;
599 #else
600 mmu_virtual_psize = MMU_PAGE_4K;
601 #endif
602
603 #ifdef CONFIG_SPARSEMEM_VMEMMAP
604 /* vmemmap mapping */
605 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
606 /*
607 * map vmemmap using 2M if available
608 */
609 mmu_vmemmap_psize = MMU_PAGE_2M;
610 } else
611 mmu_vmemmap_psize = mmu_virtual_psize;
612 #endif
613 /*
614 * initialize page table size
615 */
616 __pte_index_size = RADIX_PTE_INDEX_SIZE;
617 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
618 __pud_index_size = RADIX_PUD_INDEX_SIZE;
619 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
620 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
621 __pte_table_size = RADIX_PTE_TABLE_SIZE;
622 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
623 __pud_table_size = RADIX_PUD_TABLE_SIZE;
624 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
625
626 __pmd_val_bits = RADIX_PMD_VAL_BITS;
627 __pud_val_bits = RADIX_PUD_VAL_BITS;
628 __pgd_val_bits = RADIX_PGD_VAL_BITS;
629
630 __kernel_virt_start = RADIX_KERN_VIRT_START;
631 __vmalloc_start = RADIX_VMALLOC_START;
632 __vmalloc_end = RADIX_VMALLOC_END;
633 __kernel_io_start = RADIX_KERN_IO_START;
634 __kernel_io_end = RADIX_KERN_IO_END;
635 vmemmap = (struct page *)RADIX_VMEMMAP_START;
636 ioremap_bot = IOREMAP_BASE;
637
638 #ifdef CONFIG_PCI
639 pci_io_base = ISA_IO_BASE;
640 #endif
641 __pte_frag_nr = RADIX_PTE_FRAG_NR;
642 __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
643 __pmd_frag_nr = RADIX_PMD_FRAG_NR;
644 __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
645
646 radix_init_pgtable();
647
648 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
649 lpcr = mfspr(SPRN_LPCR);
650 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
651 radix_init_partition_table();
652 radix_init_amor();
653 } else {
654 radix_init_pseries();
655 }
656
657 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
658
659 /* Switch to the guard PID before turning on MMU */
660 radix__switch_mmu_context(NULL, &init_mm);
661 tlbiel_all();
662 }
663
radix__early_init_mmu_secondary(void)664 void radix__early_init_mmu_secondary(void)
665 {
666 unsigned long lpcr;
667 /*
668 * update partition table control register and UPRT
669 */
670 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
671 lpcr = mfspr(SPRN_LPCR);
672 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
673
674 set_ptcr_when_no_uv(__pa(partition_tb) |
675 (PATB_SIZE_SHIFT - 12));
676
677 radix_init_amor();
678 }
679
680 radix__switch_mmu_context(NULL, &init_mm);
681 tlbiel_all();
682
683 /* Make sure userspace can't change the AMR */
684 mtspr(SPRN_UAMOR, 0);
685 }
686
radix__mmu_cleanup_all(void)687 void radix__mmu_cleanup_all(void)
688 {
689 unsigned long lpcr;
690
691 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
692 lpcr = mfspr(SPRN_LPCR);
693 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
694 set_ptcr_when_no_uv(0);
695 powernv_set_nmmu_ptcr(0);
696 radix__flush_tlb_all();
697 }
698 }
699
700 #ifdef CONFIG_MEMORY_HOTPLUG
free_pte_table(pte_t * pte_start,pmd_t * pmd)701 static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
702 {
703 pte_t *pte;
704 int i;
705
706 for (i = 0; i < PTRS_PER_PTE; i++) {
707 pte = pte_start + i;
708 if (!pte_none(*pte))
709 return;
710 }
711
712 pte_free_kernel(&init_mm, pte_start);
713 pmd_clear(pmd);
714 }
715
free_pmd_table(pmd_t * pmd_start,pud_t * pud)716 static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
717 {
718 pmd_t *pmd;
719 int i;
720
721 for (i = 0; i < PTRS_PER_PMD; i++) {
722 pmd = pmd_start + i;
723 if (!pmd_none(*pmd))
724 return;
725 }
726
727 pmd_free(&init_mm, pmd_start);
728 pud_clear(pud);
729 }
730
free_pud_table(pud_t * pud_start,p4d_t * p4d)731 static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
732 {
733 pud_t *pud;
734 int i;
735
736 for (i = 0; i < PTRS_PER_PUD; i++) {
737 pud = pud_start + i;
738 if (!pud_none(*pud))
739 return;
740 }
741
742 pud_free(&init_mm, pud_start);
743 p4d_clear(p4d);
744 }
745
remove_pte_table(pte_t * pte_start,unsigned long addr,unsigned long end)746 static void remove_pte_table(pte_t *pte_start, unsigned long addr,
747 unsigned long end)
748 {
749 unsigned long next;
750 pte_t *pte;
751
752 pte = pte_start + pte_index(addr);
753 for (; addr < end; addr = next, pte++) {
754 next = (addr + PAGE_SIZE) & PAGE_MASK;
755 if (next > end)
756 next = end;
757
758 if (!pte_present(*pte))
759 continue;
760
761 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
762 /*
763 * The vmemmap_free() and remove_section_mapping()
764 * codepaths call us with aligned addresses.
765 */
766 WARN_ONCE(1, "%s: unaligned range\n", __func__);
767 continue;
768 }
769
770 pte_clear(&init_mm, addr, pte);
771 }
772 }
773
remove_pmd_table(pmd_t * pmd_start,unsigned long addr,unsigned long end)774 static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
775 unsigned long end)
776 {
777 unsigned long next;
778 pte_t *pte_base;
779 pmd_t *pmd;
780
781 pmd = pmd_start + pmd_index(addr);
782 for (; addr < end; addr = next, pmd++) {
783 next = pmd_addr_end(addr, end);
784
785 if (!pmd_present(*pmd))
786 continue;
787
788 if (pmd_is_leaf(*pmd)) {
789 if (!IS_ALIGNED(addr, PMD_SIZE) ||
790 !IS_ALIGNED(next, PMD_SIZE)) {
791 WARN_ONCE(1, "%s: unaligned range\n", __func__);
792 continue;
793 }
794 pte_clear(&init_mm, addr, (pte_t *)pmd);
795 continue;
796 }
797
798 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
799 remove_pte_table(pte_base, addr, next);
800 free_pte_table(pte_base, pmd);
801 }
802 }
803
remove_pud_table(pud_t * pud_start,unsigned long addr,unsigned long end)804 static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
805 unsigned long end)
806 {
807 unsigned long next;
808 pmd_t *pmd_base;
809 pud_t *pud;
810
811 pud = pud_start + pud_index(addr);
812 for (; addr < end; addr = next, pud++) {
813 next = pud_addr_end(addr, end);
814
815 if (!pud_present(*pud))
816 continue;
817
818 if (pud_is_leaf(*pud)) {
819 if (!IS_ALIGNED(addr, PUD_SIZE) ||
820 !IS_ALIGNED(next, PUD_SIZE)) {
821 WARN_ONCE(1, "%s: unaligned range\n", __func__);
822 continue;
823 }
824 pte_clear(&init_mm, addr, (pte_t *)pud);
825 continue;
826 }
827
828 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
829 remove_pmd_table(pmd_base, addr, next);
830 free_pmd_table(pmd_base, pud);
831 }
832 }
833
remove_pagetable(unsigned long start,unsigned long end)834 static void __meminit remove_pagetable(unsigned long start, unsigned long end)
835 {
836 unsigned long addr, next;
837 pud_t *pud_base;
838 pgd_t *pgd;
839 p4d_t *p4d;
840
841 spin_lock(&init_mm.page_table_lock);
842
843 for (addr = start; addr < end; addr = next) {
844 next = pgd_addr_end(addr, end);
845
846 pgd = pgd_offset_k(addr);
847 p4d = p4d_offset(pgd, addr);
848 if (!p4d_present(*p4d))
849 continue;
850
851 if (p4d_is_leaf(*p4d)) {
852 if (!IS_ALIGNED(addr, P4D_SIZE) ||
853 !IS_ALIGNED(next, P4D_SIZE)) {
854 WARN_ONCE(1, "%s: unaligned range\n", __func__);
855 continue;
856 }
857
858 pte_clear(&init_mm, addr, (pte_t *)pgd);
859 continue;
860 }
861
862 pud_base = (pud_t *)p4d_page_vaddr(*p4d);
863 remove_pud_table(pud_base, addr, next);
864 free_pud_table(pud_base, p4d);
865 }
866
867 spin_unlock(&init_mm.page_table_lock);
868 radix__flush_tlb_kernel_range(start, end);
869 }
870
radix__create_section_mapping(unsigned long start,unsigned long end,int nid,pgprot_t prot)871 int __meminit radix__create_section_mapping(unsigned long start,
872 unsigned long end, int nid,
873 pgprot_t prot)
874 {
875 if (end >= RADIX_VMALLOC_START) {
876 pr_warn("Outside the supported range\n");
877 return -1;
878 }
879
880 return create_physical_mapping(__pa(start), __pa(end),
881 radix_mem_block_size, nid, prot);
882 }
883
radix__remove_section_mapping(unsigned long start,unsigned long end)884 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
885 {
886 remove_pagetable(start, end);
887 return 0;
888 }
889 #endif /* CONFIG_MEMORY_HOTPLUG */
890
891 #ifdef CONFIG_SPARSEMEM_VMEMMAP
__map_kernel_page_nid(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size,int nid)892 static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
893 pgprot_t flags, unsigned int map_page_size,
894 int nid)
895 {
896 return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
897 }
898
radix__vmemmap_create_mapping(unsigned long start,unsigned long page_size,unsigned long phys)899 int __meminit radix__vmemmap_create_mapping(unsigned long start,
900 unsigned long page_size,
901 unsigned long phys)
902 {
903 /* Create a PTE encoding */
904 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
905 int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
906 int ret;
907
908 if ((start + page_size) >= RADIX_VMEMMAP_END) {
909 pr_warn("Outside the supported range\n");
910 return -1;
911 }
912
913 ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
914 BUG_ON(ret);
915
916 return 0;
917 }
918
919 #ifdef CONFIG_MEMORY_HOTPLUG
radix__vmemmap_remove_mapping(unsigned long start,unsigned long page_size)920 void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
921 {
922 remove_pagetable(start, start + page_size);
923 }
924 #endif
925 #endif
926
927 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
928
radix__pmd_hugepage_update(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,unsigned long clr,unsigned long set)929 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
930 pmd_t *pmdp, unsigned long clr,
931 unsigned long set)
932 {
933 unsigned long old;
934
935 #ifdef CONFIG_DEBUG_VM
936 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
937 assert_spin_locked(pmd_lockptr(mm, pmdp));
938 #endif
939
940 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
941 trace_hugepage_update(addr, old, clr, set);
942
943 return old;
944 }
945
radix__pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)946 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
947 pmd_t *pmdp)
948
949 {
950 pmd_t pmd;
951
952 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
953 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
954 VM_BUG_ON(pmd_devmap(*pmdp));
955 /*
956 * khugepaged calls this for normal pmd
957 */
958 pmd = *pmdp;
959 pmd_clear(pmdp);
960
961 /*
962 * pmdp collapse_flush need to ensure that there are no parallel gup
963 * walk after this call. This is needed so that we can have stable
964 * page ref count when collapsing a page. We don't allow a collapse page
965 * if we have gup taken on the page. We can ensure that by sending IPI
966 * because gup walk happens with IRQ disabled.
967 */
968 serialize_against_pte_lookup(vma->vm_mm);
969
970 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
971
972 return pmd;
973 }
974
975 /*
976 * For us pgtable_t is pte_t *. Inorder to save the deposisted
977 * page table, we consider the allocated page table as a list
978 * head. On withdraw we need to make sure we zero out the used
979 * list_head memory area.
980 */
radix__pgtable_trans_huge_deposit(struct mm_struct * mm,pmd_t * pmdp,pgtable_t pgtable)981 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
982 pgtable_t pgtable)
983 {
984 struct list_head *lh = (struct list_head *) pgtable;
985
986 assert_spin_locked(pmd_lockptr(mm, pmdp));
987
988 /* FIFO */
989 if (!pmd_huge_pte(mm, pmdp))
990 INIT_LIST_HEAD(lh);
991 else
992 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
993 pmd_huge_pte(mm, pmdp) = pgtable;
994 }
995
radix__pgtable_trans_huge_withdraw(struct mm_struct * mm,pmd_t * pmdp)996 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
997 {
998 pte_t *ptep;
999 pgtable_t pgtable;
1000 struct list_head *lh;
1001
1002 assert_spin_locked(pmd_lockptr(mm, pmdp));
1003
1004 /* FIFO */
1005 pgtable = pmd_huge_pte(mm, pmdp);
1006 lh = (struct list_head *) pgtable;
1007 if (list_empty(lh))
1008 pmd_huge_pte(mm, pmdp) = NULL;
1009 else {
1010 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1011 list_del(lh);
1012 }
1013 ptep = (pte_t *) pgtable;
1014 *ptep = __pte(0);
1015 ptep++;
1016 *ptep = __pte(0);
1017 return pgtable;
1018 }
1019
radix__pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1020 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1021 unsigned long addr, pmd_t *pmdp)
1022 {
1023 pmd_t old_pmd;
1024 unsigned long old;
1025
1026 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1027 old_pmd = __pmd(old);
1028 return old_pmd;
1029 }
1030
1031 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1032
radix__ptep_set_access_flags(struct vm_area_struct * vma,pte_t * ptep,pte_t entry,unsigned long address,int psize)1033 void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1034 pte_t entry, unsigned long address, int psize)
1035 {
1036 struct mm_struct *mm = vma->vm_mm;
1037 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1038 _PAGE_RW | _PAGE_EXEC);
1039
1040 unsigned long change = pte_val(entry) ^ pte_val(*ptep);
1041 /*
1042 * To avoid NMMU hang while relaxing access, we need mark
1043 * the pte invalid in between.
1044 */
1045 if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
1046 unsigned long old_pte, new_pte;
1047
1048 old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
1049 /*
1050 * new value of pte
1051 */
1052 new_pte = old_pte | set;
1053 radix__flush_tlb_page_psize(mm, address, psize);
1054 __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
1055 } else {
1056 __radix_pte_update(ptep, 0, set);
1057 /*
1058 * Book3S does not require a TLB flush when relaxing access
1059 * restrictions when the address space is not attached to a
1060 * NMMU, because the core MMU will reload the pte after taking
1061 * an access fault, which is defined by the architecture.
1062 */
1063 }
1064 /* See ptesync comment in radix__set_pte_at */
1065 }
1066
radix__ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)1067 void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1068 unsigned long addr, pte_t *ptep,
1069 pte_t old_pte, pte_t pte)
1070 {
1071 struct mm_struct *mm = vma->vm_mm;
1072
1073 /*
1074 * To avoid NMMU hang while relaxing access we need to flush the tlb before
1075 * we set the new value. We need to do this only for radix, because hash
1076 * translation does flush when updating the linux pte.
1077 */
1078 if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1079 (atomic_read(&mm->context.copros) > 0))
1080 radix__flush_tlb_page(vma, addr);
1081
1082 set_pte_at(mm, addr, ptep, pte);
1083 }
1084
pud_set_huge(pud_t * pud,phys_addr_t addr,pgprot_t prot)1085 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1086 {
1087 pte_t *ptep = (pte_t *)pud;
1088 pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1089
1090 if (!radix_enabled())
1091 return 0;
1092
1093 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1094
1095 return 1;
1096 }
1097
pud_clear_huge(pud_t * pud)1098 int pud_clear_huge(pud_t *pud)
1099 {
1100 if (pud_huge(*pud)) {
1101 pud_clear(pud);
1102 return 1;
1103 }
1104
1105 return 0;
1106 }
1107
pud_free_pmd_page(pud_t * pud,unsigned long addr)1108 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1109 {
1110 pmd_t *pmd;
1111 int i;
1112
1113 pmd = (pmd_t *)pud_page_vaddr(*pud);
1114 pud_clear(pud);
1115
1116 flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1117
1118 for (i = 0; i < PTRS_PER_PMD; i++) {
1119 if (!pmd_none(pmd[i])) {
1120 pte_t *pte;
1121 pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1122
1123 pte_free_kernel(&init_mm, pte);
1124 }
1125 }
1126
1127 pmd_free(&init_mm, pmd);
1128
1129 return 1;
1130 }
1131
pmd_set_huge(pmd_t * pmd,phys_addr_t addr,pgprot_t prot)1132 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1133 {
1134 pte_t *ptep = (pte_t *)pmd;
1135 pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1136
1137 if (!radix_enabled())
1138 return 0;
1139
1140 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1141
1142 return 1;
1143 }
1144
pmd_clear_huge(pmd_t * pmd)1145 int pmd_clear_huge(pmd_t *pmd)
1146 {
1147 if (pmd_huge(*pmd)) {
1148 pmd_clear(pmd);
1149 return 1;
1150 }
1151
1152 return 0;
1153 }
1154
pmd_free_pte_page(pmd_t * pmd,unsigned long addr)1155 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1156 {
1157 pte_t *pte;
1158
1159 pte = (pte_t *)pmd_page_vaddr(*pmd);
1160 pmd_clear(pmd);
1161
1162 flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1163
1164 pte_free_kernel(&init_mm, pte);
1165
1166 return 1;
1167 }
1168