xref: /linux/arch/s390/include/asm/hugetlb.h (revision 44f57d78)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  IBM System z Huge TLB Page Support for Kernel.
4  *
5  *    Copyright IBM Corp. 2008
6  *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
7  */
8 
9 #ifndef _ASM_S390_HUGETLB_H
10 #define _ASM_S390_HUGETLB_H
11 
12 #include <asm/page.h>
13 #include <asm/pgtable.h>
14 
15 
16 #define is_hugepage_only_range(mm, addr, len)	0
17 #define hugetlb_free_pgd_range			free_pgd_range
18 #define hugepages_supported()			(MACHINE_HAS_EDAT1)
19 
20 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
21 		     pte_t *ptep, pte_t pte);
22 pte_t huge_ptep_get(pte_t *ptep);
23 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
24 			      unsigned long addr, pte_t *ptep);
25 
26 /*
27  * If the arch doesn't supply something else, assume that hugepage
28  * size aligned regions are ok without further preparation.
29  */
30 static inline int prepare_hugepage_range(struct file *file,
31 			unsigned long addr, unsigned long len)
32 {
33 	if (len & ~HPAGE_MASK)
34 		return -EINVAL;
35 	if (addr & ~HPAGE_MASK)
36 		return -EINVAL;
37 	return 0;
38 }
39 
40 static inline void arch_clear_hugepage_flags(struct page *page)
41 {
42 	clear_bit(PG_arch_1, &page->flags);
43 }
44 
45 static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
46 				  pte_t *ptep, unsigned long sz)
47 {
48 	if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
49 		pte_val(*ptep) = _REGION3_ENTRY_EMPTY;
50 	else
51 		pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
52 }
53 
54 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
55 					 unsigned long address, pte_t *ptep)
56 {
57 	huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
58 }
59 
60 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
61 					     unsigned long addr, pte_t *ptep,
62 					     pte_t pte, int dirty)
63 {
64 	int changed = !pte_same(huge_ptep_get(ptep), pte);
65 	if (changed) {
66 		huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
67 		set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
68 	}
69 	return changed;
70 }
71 
72 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
73 					   unsigned long addr, pte_t *ptep)
74 {
75 	pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
76 	set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
77 }
78 
79 static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
80 {
81 	return mk_pte(page, pgprot);
82 }
83 
84 static inline int huge_pte_none(pte_t pte)
85 {
86 	return pte_none(pte);
87 }
88 
89 static inline int huge_pte_write(pte_t pte)
90 {
91 	return pte_write(pte);
92 }
93 
94 static inline int huge_pte_dirty(pte_t pte)
95 {
96 	return pte_dirty(pte);
97 }
98 
99 static inline pte_t huge_pte_mkwrite(pte_t pte)
100 {
101 	return pte_mkwrite(pte);
102 }
103 
104 static inline pte_t huge_pte_mkdirty(pte_t pte)
105 {
106 	return pte_mkdirty(pte);
107 }
108 
109 static inline pte_t huge_pte_wrprotect(pte_t pte)
110 {
111 	return pte_wrprotect(pte);
112 }
113 
114 static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
115 {
116 	return pte_modify(pte, newprot);
117 }
118 
119 static inline bool gigantic_page_runtime_supported(void)
120 {
121 	return true;
122 }
123 
124 #endif /* _ASM_S390_HUGETLB_H */
125