xref: /linux/include/linux/huge_mm.h (revision f86fd32d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4 
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7 
8 #include <linux/fs.h> /* only for vma_is_dax() */
9 
10 extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 			 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 			 struct vm_area_struct *vma);
14 extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15 extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 			 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 			 struct vm_area_struct *vma);
18 
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26 
27 extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
28 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 					  unsigned long addr,
30 					  pmd_t *pmd,
31 					  unsigned int flags);
32 extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
33 			struct vm_area_struct *vma,
34 			pmd_t *pmd, unsigned long addr, unsigned long next);
35 extern int zap_huge_pmd(struct mmu_gather *tlb,
36 			struct vm_area_struct *vma,
37 			pmd_t *pmd, unsigned long addr);
38 extern int zap_huge_pud(struct mmu_gather *tlb,
39 			struct vm_area_struct *vma,
40 			pud_t *pud, unsigned long addr);
41 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
42 			unsigned long addr, unsigned long end,
43 			unsigned char *vec);
44 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
45 			 unsigned long new_addr, unsigned long old_end,
46 			 pmd_t *old_pmd, pmd_t *new_pmd);
47 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
48 			unsigned long addr, pgprot_t newprot,
49 			int prot_numa);
50 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
51 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
52 enum transparent_hugepage_flag {
53 	TRANSPARENT_HUGEPAGE_FLAG,
54 	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
55 	TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
56 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
57 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
58 	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
59 	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
60 	TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
61 #ifdef CONFIG_DEBUG_VM
62 	TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
63 #endif
64 };
65 
66 struct kobject;
67 struct kobj_attribute;
68 
69 extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
70 				 struct kobj_attribute *attr,
71 				 const char *buf, size_t count,
72 				 enum transparent_hugepage_flag flag);
73 extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
74 				struct kobj_attribute *attr, char *buf,
75 				enum transparent_hugepage_flag flag);
76 extern struct kobj_attribute shmem_enabled_attr;
77 
78 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
79 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
80 
81 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
82 #define HPAGE_PMD_SHIFT PMD_SHIFT
83 #define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
84 #define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
85 
86 #define HPAGE_PUD_SHIFT PUD_SHIFT
87 #define HPAGE_PUD_SIZE	((1UL) << HPAGE_PUD_SHIFT)
88 #define HPAGE_PUD_MASK	(~(HPAGE_PUD_SIZE - 1))
89 
90 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
91 
92 extern unsigned long transparent_hugepage_flags;
93 
94 /*
95  * to be used on vmas which are known to support THP.
96  * Use transparent_hugepage_enabled otherwise
97  */
98 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
99 {
100 	if (vma->vm_flags & VM_NOHUGEPAGE)
101 		return false;
102 
103 	if (is_vma_temporary_stack(vma))
104 		return false;
105 
106 	if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
107 		return false;
108 
109 	if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
110 		return true;
111 	/*
112 	 * For dax vmas, try to always use hugepage mappings. If the kernel does
113 	 * not support hugepages, fsdax mappings will fallback to PAGE_SIZE
114 	 * mappings, and device-dax namespaces, that try to guarantee a given
115 	 * mapping size, will fail to enable
116 	 */
117 	if (vma_is_dax(vma))
118 		return true;
119 
120 	if (transparent_hugepage_flags &
121 				(1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
122 		return !!(vma->vm_flags & VM_HUGEPAGE);
123 
124 	return false;
125 }
126 
127 bool transparent_hugepage_enabled(struct vm_area_struct *vma);
128 
129 #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
130 
131 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
132 		unsigned long haddr)
133 {
134 	/* Don't have to check pgoff for anonymous vma */
135 	if (!vma_is_anonymous(vma)) {
136 		if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
137 			(vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
138 			return false;
139 	}
140 
141 	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
142 		return false;
143 	return true;
144 }
145 
146 #define transparent_hugepage_use_zero_page()				\
147 	(transparent_hugepage_flags &					\
148 	 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
149 #ifdef CONFIG_DEBUG_VM
150 #define transparent_hugepage_debug_cow()				\
151 	(transparent_hugepage_flags &					\
152 	 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
153 #else /* CONFIG_DEBUG_VM */
154 #define transparent_hugepage_debug_cow() 0
155 #endif /* CONFIG_DEBUG_VM */
156 
157 extern unsigned long thp_get_unmapped_area(struct file *filp,
158 		unsigned long addr, unsigned long len, unsigned long pgoff,
159 		unsigned long flags);
160 
161 extern void prep_transhuge_page(struct page *page);
162 extern void free_transhuge_page(struct page *page);
163 bool is_transparent_hugepage(struct page *page);
164 
165 bool can_split_huge_page(struct page *page, int *pextra_pins);
166 int split_huge_page_to_list(struct page *page, struct list_head *list);
167 static inline int split_huge_page(struct page *page)
168 {
169 	return split_huge_page_to_list(page, NULL);
170 }
171 void deferred_split_huge_page(struct page *page);
172 
173 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
174 		unsigned long address, bool freeze, struct page *page);
175 
176 #define split_huge_pmd(__vma, __pmd, __address)				\
177 	do {								\
178 		pmd_t *____pmd = (__pmd);				\
179 		if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)	\
180 					|| pmd_devmap(*____pmd))	\
181 			__split_huge_pmd(__vma, __pmd, __address,	\
182 						false, NULL);		\
183 	}  while (0)
184 
185 
186 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
187 		bool freeze, struct page *page);
188 
189 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
190 		unsigned long address);
191 
192 #define split_huge_pud(__vma, __pud, __address)				\
193 	do {								\
194 		pud_t *____pud = (__pud);				\
195 		if (pud_trans_huge(*____pud)				\
196 					|| pud_devmap(*____pud))	\
197 			__split_huge_pud(__vma, __pud, __address);	\
198 	}  while (0)
199 
200 extern int hugepage_madvise(struct vm_area_struct *vma,
201 			    unsigned long *vm_flags, int advice);
202 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
203 				    unsigned long start,
204 				    unsigned long end,
205 				    long adjust_next);
206 extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
207 		struct vm_area_struct *vma);
208 extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
209 		struct vm_area_struct *vma);
210 
211 static inline int is_swap_pmd(pmd_t pmd)
212 {
213 	return !pmd_none(pmd) && !pmd_present(pmd);
214 }
215 
216 /* mmap_sem must be held on entry */
217 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
218 		struct vm_area_struct *vma)
219 {
220 	if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
221 		return __pmd_trans_huge_lock(pmd, vma);
222 	else
223 		return NULL;
224 }
225 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
226 		struct vm_area_struct *vma)
227 {
228 	if (pud_trans_huge(*pud) || pud_devmap(*pud))
229 		return __pud_trans_huge_lock(pud, vma);
230 	else
231 		return NULL;
232 }
233 static inline int hpage_nr_pages(struct page *page)
234 {
235 	if (unlikely(PageTransHuge(page)))
236 		return HPAGE_PMD_NR;
237 	return 1;
238 }
239 
240 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
241 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
242 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
243 		pud_t *pud, int flags, struct dev_pagemap **pgmap);
244 
245 extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
246 
247 extern struct page *huge_zero_page;
248 
249 static inline bool is_huge_zero_page(struct page *page)
250 {
251 	return READ_ONCE(huge_zero_page) == page;
252 }
253 
254 static inline bool is_huge_zero_pmd(pmd_t pmd)
255 {
256 	return is_huge_zero_page(pmd_page(pmd));
257 }
258 
259 static inline bool is_huge_zero_pud(pud_t pud)
260 {
261 	return false;
262 }
263 
264 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
265 void mm_put_huge_zero_page(struct mm_struct *mm);
266 
267 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
268 
269 static inline bool thp_migration_supported(void)
270 {
271 	return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
272 }
273 
274 static inline struct list_head *page_deferred_list(struct page *page)
275 {
276 	/*
277 	 * Global or memcg deferred list in the second tail pages is
278 	 * occupied by compound_head.
279 	 */
280 	return &page[2].deferred_list;
281 }
282 
283 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
284 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
285 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
286 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
287 
288 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
289 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
290 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
291 
292 #define hpage_nr_pages(x) 1
293 
294 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
295 {
296 	return false;
297 }
298 
299 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
300 {
301 	return false;
302 }
303 
304 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
305 		unsigned long haddr)
306 {
307 	return false;
308 }
309 
310 static inline void prep_transhuge_page(struct page *page) {}
311 
312 static inline bool is_transparent_hugepage(struct page *page)
313 {
314 	return false;
315 }
316 
317 #define transparent_hugepage_flags 0UL
318 
319 #define thp_get_unmapped_area	NULL
320 
321 static inline bool
322 can_split_huge_page(struct page *page, int *pextra_pins)
323 {
324 	BUILD_BUG();
325 	return false;
326 }
327 static inline int
328 split_huge_page_to_list(struct page *page, struct list_head *list)
329 {
330 	return 0;
331 }
332 static inline int split_huge_page(struct page *page)
333 {
334 	return 0;
335 }
336 static inline void deferred_split_huge_page(struct page *page) {}
337 #define split_huge_pmd(__vma, __pmd, __address)	\
338 	do { } while (0)
339 
340 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
341 		unsigned long address, bool freeze, struct page *page) {}
342 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
343 		unsigned long address, bool freeze, struct page *page) {}
344 
345 #define split_huge_pud(__vma, __pmd, __address)	\
346 	do { } while (0)
347 
348 static inline int hugepage_madvise(struct vm_area_struct *vma,
349 				   unsigned long *vm_flags, int advice)
350 {
351 	BUG();
352 	return 0;
353 }
354 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
355 					 unsigned long start,
356 					 unsigned long end,
357 					 long adjust_next)
358 {
359 }
360 static inline int is_swap_pmd(pmd_t pmd)
361 {
362 	return 0;
363 }
364 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
365 		struct vm_area_struct *vma)
366 {
367 	return NULL;
368 }
369 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
370 		struct vm_area_struct *vma)
371 {
372 	return NULL;
373 }
374 
375 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
376 		pmd_t orig_pmd)
377 {
378 	return 0;
379 }
380 
381 static inline bool is_huge_zero_page(struct page *page)
382 {
383 	return false;
384 }
385 
386 static inline bool is_huge_zero_pud(pud_t pud)
387 {
388 	return false;
389 }
390 
391 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
392 {
393 	return;
394 }
395 
396 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
397 	unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
398 {
399 	return NULL;
400 }
401 
402 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
403 	unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
404 {
405 	return NULL;
406 }
407 
408 static inline bool thp_migration_supported(void)
409 {
410 	return false;
411 }
412 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
413 
414 #endif /* _LINUX_HUGE_MM_H */
415