xref: /linux/arch/arm64/mm/hugetlbpage.c (revision db10cb9b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch/arm64/mm/hugetlbpage.c
4  *
5  * Copyright (C) 2013 Linaro Ltd.
6  *
7  * Based on arch/x86/mm/hugetlbpage.c.
8  */
9 
10 #include <linux/init.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20 
21 /*
22  * HugeTLB Support Matrix
23  *
24  * ---------------------------------------------------
25  * | Page Size | CONT PTE |  PMD  | CONT PMD |  PUD  |
26  * ---------------------------------------------------
27  * |     4K    |   64K    |   2M  |    32M   |   1G  |
28  * |    16K    |    2M    |  32M  |     1G   |       |
29  * |    64K    |    2M    | 512M  |    16G   |       |
30  * ---------------------------------------------------
31  */
32 
33 /*
34  * Reserve CMA areas for the largest supported gigantic
35  * huge page when requested. Any other smaller gigantic
36  * huge pages could still be served from those areas.
37  */
38 #ifdef CONFIG_CMA
39 void __init arm64_hugetlb_cma_reserve(void)
40 {
41 	int order;
42 
43 	if (pud_sect_supported())
44 		order = PUD_SHIFT - PAGE_SHIFT;
45 	else
46 		order = CONT_PMD_SHIFT - PAGE_SHIFT;
47 
48 	/*
49 	 * HugeTLB CMA reservation is required for gigantic
50 	 * huge pages which could not be allocated via the
51 	 * page allocator. Just warn if there is any change
52 	 * breaking this assumption.
53 	 */
54 	WARN_ON(order <= MAX_ORDER);
55 	hugetlb_cma_reserve(order);
56 }
57 #endif /* CONFIG_CMA */
58 
59 static bool __hugetlb_valid_size(unsigned long size)
60 {
61 	switch (size) {
62 #ifndef __PAGETABLE_PMD_FOLDED
63 	case PUD_SIZE:
64 		return pud_sect_supported();
65 #endif
66 	case CONT_PMD_SIZE:
67 	case PMD_SIZE:
68 	case CONT_PTE_SIZE:
69 		return true;
70 	}
71 
72 	return false;
73 }
74 
75 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
76 bool arch_hugetlb_migration_supported(struct hstate *h)
77 {
78 	size_t pagesize = huge_page_size(h);
79 
80 	if (!__hugetlb_valid_size(pagesize)) {
81 		pr_warn("%s: unrecognized huge page size 0x%lx\n",
82 			__func__, pagesize);
83 		return false;
84 	}
85 	return true;
86 }
87 #endif
88 
89 int pmd_huge(pmd_t pmd)
90 {
91 	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
92 }
93 
94 int pud_huge(pud_t pud)
95 {
96 #ifndef __PAGETABLE_PMD_FOLDED
97 	return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
98 #else
99 	return 0;
100 #endif
101 }
102 
103 static int find_num_contig(struct mm_struct *mm, unsigned long addr,
104 			   pte_t *ptep, size_t *pgsize)
105 {
106 	pgd_t *pgdp = pgd_offset(mm, addr);
107 	p4d_t *p4dp;
108 	pud_t *pudp;
109 	pmd_t *pmdp;
110 
111 	*pgsize = PAGE_SIZE;
112 	p4dp = p4d_offset(pgdp, addr);
113 	pudp = pud_offset(p4dp, addr);
114 	pmdp = pmd_offset(pudp, addr);
115 	if ((pte_t *)pmdp == ptep) {
116 		*pgsize = PMD_SIZE;
117 		return CONT_PMDS;
118 	}
119 	return CONT_PTES;
120 }
121 
122 static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
123 {
124 	int contig_ptes = 0;
125 
126 	*pgsize = size;
127 
128 	switch (size) {
129 #ifndef __PAGETABLE_PMD_FOLDED
130 	case PUD_SIZE:
131 		if (pud_sect_supported())
132 			contig_ptes = 1;
133 		break;
134 #endif
135 	case PMD_SIZE:
136 		contig_ptes = 1;
137 		break;
138 	case CONT_PMD_SIZE:
139 		*pgsize = PMD_SIZE;
140 		contig_ptes = CONT_PMDS;
141 		break;
142 	case CONT_PTE_SIZE:
143 		*pgsize = PAGE_SIZE;
144 		contig_ptes = CONT_PTES;
145 		break;
146 	}
147 
148 	return contig_ptes;
149 }
150 
151 pte_t huge_ptep_get(pte_t *ptep)
152 {
153 	int ncontig, i;
154 	size_t pgsize;
155 	pte_t orig_pte = ptep_get(ptep);
156 
157 	if (!pte_present(orig_pte) || !pte_cont(orig_pte))
158 		return orig_pte;
159 
160 	ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
161 	for (i = 0; i < ncontig; i++, ptep++) {
162 		pte_t pte = ptep_get(ptep);
163 
164 		if (pte_dirty(pte))
165 			orig_pte = pte_mkdirty(orig_pte);
166 
167 		if (pte_young(pte))
168 			orig_pte = pte_mkyoung(orig_pte);
169 	}
170 	return orig_pte;
171 }
172 
173 /*
174  * Changing some bits of contiguous entries requires us to follow a
175  * Break-Before-Make approach, breaking the whole contiguous set
176  * before we can change any entries. See ARM DDI 0487A.k_iss10775,
177  * "Misprogramming of the Contiguous bit", page D4-1762.
178  *
179  * This helper performs the break step.
180  */
181 static pte_t get_clear_contig(struct mm_struct *mm,
182 			     unsigned long addr,
183 			     pte_t *ptep,
184 			     unsigned long pgsize,
185 			     unsigned long ncontig)
186 {
187 	pte_t orig_pte = ptep_get(ptep);
188 	unsigned long i;
189 
190 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
191 		pte_t pte = ptep_get_and_clear(mm, addr, ptep);
192 
193 		/*
194 		 * If HW_AFDBM is enabled, then the HW could turn on
195 		 * the dirty or accessed bit for any page in the set,
196 		 * so check them all.
197 		 */
198 		if (pte_dirty(pte))
199 			orig_pte = pte_mkdirty(orig_pte);
200 
201 		if (pte_young(pte))
202 			orig_pte = pte_mkyoung(orig_pte);
203 	}
204 	return orig_pte;
205 }
206 
207 static pte_t get_clear_contig_flush(struct mm_struct *mm,
208 				    unsigned long addr,
209 				    pte_t *ptep,
210 				    unsigned long pgsize,
211 				    unsigned long ncontig)
212 {
213 	pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
214 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
215 
216 	flush_tlb_range(&vma, addr, addr + (pgsize * ncontig));
217 	return orig_pte;
218 }
219 
220 /*
221  * Changing some bits of contiguous entries requires us to follow a
222  * Break-Before-Make approach, breaking the whole contiguous set
223  * before we can change any entries. See ARM DDI 0487A.k_iss10775,
224  * "Misprogramming of the Contiguous bit", page D4-1762.
225  *
226  * This helper performs the break step for use cases where the
227  * original pte is not needed.
228  */
229 static void clear_flush(struct mm_struct *mm,
230 			     unsigned long addr,
231 			     pte_t *ptep,
232 			     unsigned long pgsize,
233 			     unsigned long ncontig)
234 {
235 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
236 	unsigned long i, saddr = addr;
237 
238 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
239 		ptep_clear(mm, addr, ptep);
240 
241 	flush_tlb_range(&vma, saddr, addr);
242 }
243 
244 static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry)
245 {
246 	VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry));
247 
248 	return page_folio(pfn_to_page(swp_offset_pfn(entry)));
249 }
250 
251 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
252 			    pte_t *ptep, pte_t pte)
253 {
254 	size_t pgsize;
255 	int i;
256 	int ncontig;
257 	unsigned long pfn, dpfn;
258 	pgprot_t hugeprot;
259 
260 	if (!pte_present(pte)) {
261 		struct folio *folio;
262 
263 		folio = hugetlb_swap_entry_to_folio(pte_to_swp_entry(pte));
264 		ncontig = num_contig_ptes(folio_size(folio), &pgsize);
265 
266 		for (i = 0; i < ncontig; i++, ptep++)
267 			set_pte_at(mm, addr, ptep, pte);
268 		return;
269 	}
270 
271 	if (!pte_cont(pte)) {
272 		set_pte_at(mm, addr, ptep, pte);
273 		return;
274 	}
275 
276 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
277 	pfn = pte_pfn(pte);
278 	dpfn = pgsize >> PAGE_SHIFT;
279 	hugeprot = pte_pgprot(pte);
280 
281 	clear_flush(mm, addr, ptep, pgsize, ncontig);
282 
283 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
284 		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
285 }
286 
287 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
288 		      unsigned long addr, unsigned long sz)
289 {
290 	pgd_t *pgdp;
291 	p4d_t *p4dp;
292 	pud_t *pudp;
293 	pmd_t *pmdp;
294 	pte_t *ptep = NULL;
295 
296 	pgdp = pgd_offset(mm, addr);
297 	p4dp = p4d_offset(pgdp, addr);
298 	pudp = pud_alloc(mm, p4dp, addr);
299 	if (!pudp)
300 		return NULL;
301 
302 	if (sz == PUD_SIZE) {
303 		ptep = (pte_t *)pudp;
304 	} else if (sz == (CONT_PTE_SIZE)) {
305 		pmdp = pmd_alloc(mm, pudp, addr);
306 		if (!pmdp)
307 			return NULL;
308 
309 		WARN_ON(addr & (sz - 1));
310 		ptep = pte_alloc_huge(mm, pmdp, addr);
311 	} else if (sz == PMD_SIZE) {
312 		if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
313 			ptep = huge_pmd_share(mm, vma, addr, pudp);
314 		else
315 			ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
316 	} else if (sz == (CONT_PMD_SIZE)) {
317 		pmdp = pmd_alloc(mm, pudp, addr);
318 		WARN_ON(addr & (sz - 1));
319 		return (pte_t *)pmdp;
320 	}
321 
322 	return ptep;
323 }
324 
325 pte_t *huge_pte_offset(struct mm_struct *mm,
326 		       unsigned long addr, unsigned long sz)
327 {
328 	pgd_t *pgdp;
329 	p4d_t *p4dp;
330 	pud_t *pudp, pud;
331 	pmd_t *pmdp, pmd;
332 
333 	pgdp = pgd_offset(mm, addr);
334 	if (!pgd_present(READ_ONCE(*pgdp)))
335 		return NULL;
336 
337 	p4dp = p4d_offset(pgdp, addr);
338 	if (!p4d_present(READ_ONCE(*p4dp)))
339 		return NULL;
340 
341 	pudp = pud_offset(p4dp, addr);
342 	pud = READ_ONCE(*pudp);
343 	if (sz != PUD_SIZE && pud_none(pud))
344 		return NULL;
345 	/* hugepage or swap? */
346 	if (pud_huge(pud) || !pud_present(pud))
347 		return (pte_t *)pudp;
348 	/* table; check the next level */
349 
350 	if (sz == CONT_PMD_SIZE)
351 		addr &= CONT_PMD_MASK;
352 
353 	pmdp = pmd_offset(pudp, addr);
354 	pmd = READ_ONCE(*pmdp);
355 	if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
356 	    pmd_none(pmd))
357 		return NULL;
358 	if (pmd_huge(pmd) || !pmd_present(pmd))
359 		return (pte_t *)pmdp;
360 
361 	if (sz == CONT_PTE_SIZE)
362 		return pte_offset_huge(pmdp, (addr & CONT_PTE_MASK));
363 
364 	return NULL;
365 }
366 
367 unsigned long hugetlb_mask_last_page(struct hstate *h)
368 {
369 	unsigned long hp_size = huge_page_size(h);
370 
371 	switch (hp_size) {
372 #ifndef __PAGETABLE_PMD_FOLDED
373 	case PUD_SIZE:
374 		return PGDIR_SIZE - PUD_SIZE;
375 #endif
376 	case CONT_PMD_SIZE:
377 		return PUD_SIZE - CONT_PMD_SIZE;
378 	case PMD_SIZE:
379 		return PUD_SIZE - PMD_SIZE;
380 	case CONT_PTE_SIZE:
381 		return PMD_SIZE - CONT_PTE_SIZE;
382 	default:
383 		break;
384 	}
385 
386 	return 0UL;
387 }
388 
389 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
390 {
391 	size_t pagesize = 1UL << shift;
392 
393 	entry = pte_mkhuge(entry);
394 	if (pagesize == CONT_PTE_SIZE) {
395 		entry = pte_mkcont(entry);
396 	} else if (pagesize == CONT_PMD_SIZE) {
397 		entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
398 	} else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
399 		pr_warn("%s: unrecognized huge page size 0x%lx\n",
400 			__func__, pagesize);
401 	}
402 	return entry;
403 }
404 
405 void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
406 		    pte_t *ptep, unsigned long sz)
407 {
408 	int i, ncontig;
409 	size_t pgsize;
410 
411 	ncontig = num_contig_ptes(sz, &pgsize);
412 
413 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
414 		pte_clear(mm, addr, ptep);
415 }
416 
417 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
418 			      unsigned long addr, pte_t *ptep)
419 {
420 	int ncontig;
421 	size_t pgsize;
422 	pte_t orig_pte = ptep_get(ptep);
423 
424 	if (!pte_cont(orig_pte))
425 		return ptep_get_and_clear(mm, addr, ptep);
426 
427 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
428 
429 	return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
430 }
431 
432 /*
433  * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
434  * and write permission.
435  *
436  * For a contiguous huge pte range we need to check whether or not write
437  * permission has to change only on the first pte in the set. Then for
438  * all the contiguous ptes we need to check whether or not there is a
439  * discrepancy between dirty or young.
440  */
441 static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
442 {
443 	int i;
444 
445 	if (pte_write(pte) != pte_write(ptep_get(ptep)))
446 		return 1;
447 
448 	for (i = 0; i < ncontig; i++) {
449 		pte_t orig_pte = ptep_get(ptep + i);
450 
451 		if (pte_dirty(pte) != pte_dirty(orig_pte))
452 			return 1;
453 
454 		if (pte_young(pte) != pte_young(orig_pte))
455 			return 1;
456 	}
457 
458 	return 0;
459 }
460 
461 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
462 			       unsigned long addr, pte_t *ptep,
463 			       pte_t pte, int dirty)
464 {
465 	int ncontig, i;
466 	size_t pgsize = 0;
467 	unsigned long pfn = pte_pfn(pte), dpfn;
468 	struct mm_struct *mm = vma->vm_mm;
469 	pgprot_t hugeprot;
470 	pte_t orig_pte;
471 
472 	if (!pte_cont(pte))
473 		return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
474 
475 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
476 	dpfn = pgsize >> PAGE_SHIFT;
477 
478 	if (!__cont_access_flags_changed(ptep, pte, ncontig))
479 		return 0;
480 
481 	orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
482 
483 	/* Make sure we don't lose the dirty or young state */
484 	if (pte_dirty(orig_pte))
485 		pte = pte_mkdirty(pte);
486 
487 	if (pte_young(orig_pte))
488 		pte = pte_mkyoung(pte);
489 
490 	hugeprot = pte_pgprot(pte);
491 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
492 		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
493 
494 	return 1;
495 }
496 
497 void huge_ptep_set_wrprotect(struct mm_struct *mm,
498 			     unsigned long addr, pte_t *ptep)
499 {
500 	unsigned long pfn, dpfn;
501 	pgprot_t hugeprot;
502 	int ncontig, i;
503 	size_t pgsize;
504 	pte_t pte;
505 
506 	if (!pte_cont(READ_ONCE(*ptep))) {
507 		ptep_set_wrprotect(mm, addr, ptep);
508 		return;
509 	}
510 
511 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
512 	dpfn = pgsize >> PAGE_SHIFT;
513 
514 	pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
515 	pte = pte_wrprotect(pte);
516 
517 	hugeprot = pte_pgprot(pte);
518 	pfn = pte_pfn(pte);
519 
520 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
521 		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
522 }
523 
524 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
525 			    unsigned long addr, pte_t *ptep)
526 {
527 	struct mm_struct *mm = vma->vm_mm;
528 	size_t pgsize;
529 	int ncontig;
530 
531 	if (!pte_cont(READ_ONCE(*ptep)))
532 		return ptep_clear_flush(vma, addr, ptep);
533 
534 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
535 	return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
536 }
537 
538 static int __init hugetlbpage_init(void)
539 {
540 	if (pud_sect_supported())
541 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
542 
543 	hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
544 	hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
545 	hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
546 
547 	return 0;
548 }
549 arch_initcall(hugetlbpage_init);
550 
551 bool __init arch_hugetlb_valid_size(unsigned long size)
552 {
553 	return __hugetlb_valid_size(size);
554 }
555 
556 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
557 {
558 	if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
559 	    cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
560 		/*
561 		 * Break-before-make (BBM) is required for all user space mappings
562 		 * when the permission changes from executable to non-executable
563 		 * in cases where cpu is affected with errata #2645198.
564 		 */
565 		if (pte_user_exec(READ_ONCE(*ptep)))
566 			return huge_ptep_clear_flush(vma, addr, ptep);
567 	}
568 	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
569 }
570 
571 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
572 				  pte_t old_pte, pte_t pte)
573 {
574 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
575 }
576