xref: /linux/include/linux/huge_mm.h (revision 1e525507)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4 
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7 
8 #include <linux/fs.h> /* only for vma_is_dax() */
9 
10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 		  struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
14 void huge_pmd_set_accessed(struct vm_fault *vmf);
15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 		  struct vm_area_struct *vma);
18 
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26 
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
28 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
29 			   pmd_t *pmd, unsigned long addr, unsigned long next);
30 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
31 		 unsigned long addr);
32 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
33 		 unsigned long addr);
34 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
35 		   unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
36 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
37 		    pmd_t *pmd, unsigned long addr, pgprot_t newprot,
38 		    unsigned long cp_flags);
39 
40 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
41 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
42 
43 enum transparent_hugepage_flag {
44 	TRANSPARENT_HUGEPAGE_UNSUPPORTED,
45 	TRANSPARENT_HUGEPAGE_FLAG,
46 	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
47 	TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
48 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
49 	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
50 	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
51 	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
52 	TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
53 };
54 
55 struct kobject;
56 struct kobj_attribute;
57 
58 ssize_t single_hugepage_flag_store(struct kobject *kobj,
59 				   struct kobj_attribute *attr,
60 				   const char *buf, size_t count,
61 				   enum transparent_hugepage_flag flag);
62 ssize_t single_hugepage_flag_show(struct kobject *kobj,
63 				  struct kobj_attribute *attr, char *buf,
64 				  enum transparent_hugepage_flag flag);
65 extern struct kobj_attribute shmem_enabled_attr;
66 
67 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
68 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
69 
70 /*
71  * Mask of all large folio orders supported for anonymous THP; all orders up to
72  * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
73  * (which is a limitation of the THP implementation).
74  */
75 #define THP_ORDERS_ALL_ANON	((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
76 
77 /*
78  * Mask of all large folio orders supported for file THP.
79  */
80 #define THP_ORDERS_ALL_FILE	(BIT(PMD_ORDER) | BIT(PUD_ORDER))
81 
82 /*
83  * Mask of all large folio orders supported for THP.
84  */
85 #define THP_ORDERS_ALL		(THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
86 
87 #define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
88 	(!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
89 
90 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
91 #define HPAGE_PMD_SHIFT PMD_SHIFT
92 #define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
93 #define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
94 
95 #define HPAGE_PUD_SHIFT PUD_SHIFT
96 #define HPAGE_PUD_SIZE	((1UL) << HPAGE_PUD_SHIFT)
97 #define HPAGE_PUD_MASK	(~(HPAGE_PUD_SIZE - 1))
98 
99 extern unsigned long transparent_hugepage_flags;
100 extern unsigned long huge_anon_orders_always;
101 extern unsigned long huge_anon_orders_madvise;
102 extern unsigned long huge_anon_orders_inherit;
103 
104 static inline bool hugepage_global_enabled(void)
105 {
106 	return transparent_hugepage_flags &
107 			((1<<TRANSPARENT_HUGEPAGE_FLAG) |
108 			(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
109 }
110 
111 static inline bool hugepage_global_always(void)
112 {
113 	return transparent_hugepage_flags &
114 			(1<<TRANSPARENT_HUGEPAGE_FLAG);
115 }
116 
117 static inline bool hugepage_flags_enabled(void)
118 {
119 	/*
120 	 * We cover both the anon and the file-backed case here; we must return
121 	 * true if globally enabled, even when all anon sizes are set to never.
122 	 * So we don't need to look at huge_anon_orders_inherit.
123 	 */
124 	return hugepage_global_enabled() ||
125 	       huge_anon_orders_always ||
126 	       huge_anon_orders_madvise;
127 }
128 
129 static inline int highest_order(unsigned long orders)
130 {
131 	return fls_long(orders) - 1;
132 }
133 
134 static inline int next_order(unsigned long *orders, int prev)
135 {
136 	*orders &= ~BIT(prev);
137 	return highest_order(*orders);
138 }
139 
140 /*
141  * Do the below checks:
142  *   - For file vma, check if the linear page offset of vma is
143  *     order-aligned within the file.  The hugepage is
144  *     guaranteed to be order-aligned within the file, but we must
145  *     check that the order-aligned addresses in the VMA map to
146  *     order-aligned offsets within the file, else the hugepage will
147  *     not be mappable.
148  *   - For all vmas, check if the haddr is in an aligned hugepage
149  *     area.
150  */
151 static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
152 		unsigned long addr, int order)
153 {
154 	unsigned long hpage_size = PAGE_SIZE << order;
155 	unsigned long haddr;
156 
157 	/* Don't have to check pgoff for anonymous vma */
158 	if (!vma_is_anonymous(vma)) {
159 		if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
160 				hpage_size >> PAGE_SHIFT))
161 			return false;
162 	}
163 
164 	haddr = ALIGN_DOWN(addr, hpage_size);
165 
166 	if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
167 		return false;
168 	return true;
169 }
170 
171 /*
172  * Filter the bitfield of input orders to the ones suitable for use in the vma.
173  * See thp_vma_suitable_order().
174  * All orders that pass the checks are returned as a bitfield.
175  */
176 static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
177 		unsigned long addr, unsigned long orders)
178 {
179 	int order;
180 
181 	/*
182 	 * Iterate over orders, highest to lowest, removing orders that don't
183 	 * meet alignment requirements from the set. Exit loop at first order
184 	 * that meets requirements, since all lower orders must also meet
185 	 * requirements.
186 	 */
187 
188 	order = highest_order(orders);
189 
190 	while (orders) {
191 		if (thp_vma_suitable_order(vma, addr, order))
192 			break;
193 		order = next_order(&orders, order);
194 	}
195 
196 	return orders;
197 }
198 
199 static inline bool file_thp_enabled(struct vm_area_struct *vma)
200 {
201 	struct inode *inode;
202 
203 	if (!vma->vm_file)
204 		return false;
205 
206 	inode = vma->vm_file->f_inode;
207 
208 	return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
209 	       !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
210 }
211 
212 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
213 					 unsigned long vm_flags, bool smaps,
214 					 bool in_pf, bool enforce_sysfs,
215 					 unsigned long orders);
216 
217 /**
218  * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
219  * @vma:  the vm area to check
220  * @vm_flags: use these vm_flags instead of vma->vm_flags
221  * @smaps: whether answer will be used for smaps file
222  * @in_pf: whether answer will be used by page fault handler
223  * @enforce_sysfs: whether sysfs config should be taken into account
224  * @orders: bitfield of all orders to consider
225  *
226  * Calculates the intersection of the requested hugepage orders and the allowed
227  * hugepage orders for the provided vma. Permitted orders are encoded as a set
228  * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
229  * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
230  *
231  * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
232  * orders are allowed.
233  */
234 static inline
235 unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
236 				       unsigned long vm_flags, bool smaps,
237 				       bool in_pf, bool enforce_sysfs,
238 				       unsigned long orders)
239 {
240 	/* Optimization to check if required orders are enabled early. */
241 	if (enforce_sysfs && vma_is_anonymous(vma)) {
242 		unsigned long mask = READ_ONCE(huge_anon_orders_always);
243 
244 		if (vm_flags & VM_HUGEPAGE)
245 			mask |= READ_ONCE(huge_anon_orders_madvise);
246 		if (hugepage_global_always() ||
247 		    ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
248 			mask |= READ_ONCE(huge_anon_orders_inherit);
249 
250 		orders &= mask;
251 		if (!orders)
252 			return 0;
253 	}
254 
255 	return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf,
256 					  enforce_sysfs, orders);
257 }
258 
259 #define transparent_hugepage_use_zero_page()				\
260 	(transparent_hugepage_flags &					\
261 	 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
262 
263 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
264 		unsigned long len, unsigned long pgoff, unsigned long flags);
265 
266 void folio_prep_large_rmappable(struct folio *folio);
267 bool can_split_folio(struct folio *folio, int *pextra_pins);
268 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
269 		unsigned int new_order);
270 static inline int split_huge_page(struct page *page)
271 {
272 	return split_huge_page_to_list_to_order(page, NULL, 0);
273 }
274 void deferred_split_folio(struct folio *folio);
275 
276 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
277 		unsigned long address, bool freeze, struct folio *folio);
278 
279 #define split_huge_pmd(__vma, __pmd, __address)				\
280 	do {								\
281 		pmd_t *____pmd = (__pmd);				\
282 		if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)	\
283 					|| pmd_devmap(*____pmd))	\
284 			__split_huge_pmd(__vma, __pmd, __address,	\
285 						false, NULL);		\
286 	}  while (0)
287 
288 
289 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
290 		bool freeze, struct folio *folio);
291 
292 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
293 		unsigned long address);
294 
295 #define split_huge_pud(__vma, __pud, __address)				\
296 	do {								\
297 		pud_t *____pud = (__pud);				\
298 		if (pud_trans_huge(*____pud)				\
299 					|| pud_devmap(*____pud))	\
300 			__split_huge_pud(__vma, __pud, __address);	\
301 	}  while (0)
302 
303 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
304 		     int advice);
305 int madvise_collapse(struct vm_area_struct *vma,
306 		     struct vm_area_struct **prev,
307 		     unsigned long start, unsigned long end);
308 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
309 			   unsigned long end, long adjust_next);
310 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
311 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
312 
313 static inline int is_swap_pmd(pmd_t pmd)
314 {
315 	return !pmd_none(pmd) && !pmd_present(pmd);
316 }
317 
318 /* mmap_lock must be held on entry */
319 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
320 		struct vm_area_struct *vma)
321 {
322 	if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
323 		return __pmd_trans_huge_lock(pmd, vma);
324 	else
325 		return NULL;
326 }
327 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
328 		struct vm_area_struct *vma)
329 {
330 	if (pud_trans_huge(*pud) || pud_devmap(*pud))
331 		return __pud_trans_huge_lock(pud, vma);
332 	else
333 		return NULL;
334 }
335 
336 /**
337  * folio_test_pmd_mappable - Can we map this folio with a PMD?
338  * @folio: The folio to test
339  */
340 static inline bool folio_test_pmd_mappable(struct folio *folio)
341 {
342 	return folio_order(folio) >= HPAGE_PMD_ORDER;
343 }
344 
345 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
346 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
347 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
348 		pud_t *pud, int flags, struct dev_pagemap **pgmap);
349 
350 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
351 
352 extern struct page *huge_zero_page;
353 extern unsigned long huge_zero_pfn;
354 
355 static inline bool is_huge_zero_page(struct page *page)
356 {
357 	return READ_ONCE(huge_zero_page) == page;
358 }
359 
360 static inline bool is_huge_zero_pmd(pmd_t pmd)
361 {
362 	return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
363 }
364 
365 static inline bool is_huge_zero_pud(pud_t pud)
366 {
367 	return false;
368 }
369 
370 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
371 void mm_put_huge_zero_page(struct mm_struct *mm);
372 
373 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
374 
375 static inline bool thp_migration_supported(void)
376 {
377 	return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
378 }
379 
380 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
381 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
382 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
383 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
384 
385 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
386 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
387 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
388 
389 static inline bool folio_test_pmd_mappable(struct folio *folio)
390 {
391 	return false;
392 }
393 
394 static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
395 		unsigned long addr, int order)
396 {
397 	return false;
398 }
399 
400 static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
401 		unsigned long addr, unsigned long orders)
402 {
403 	return 0;
404 }
405 
406 static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
407 					unsigned long vm_flags, bool smaps,
408 					bool in_pf, bool enforce_sysfs,
409 					unsigned long orders)
410 {
411 	return 0;
412 }
413 
414 static inline void folio_prep_large_rmappable(struct folio *folio) {}
415 
416 #define transparent_hugepage_flags 0UL
417 
418 #define thp_get_unmapped_area	NULL
419 
420 static inline bool
421 can_split_folio(struct folio *folio, int *pextra_pins)
422 {
423 	return false;
424 }
425 static inline int
426 split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
427 		unsigned int new_order)
428 {
429 	return 0;
430 }
431 static inline int split_huge_page(struct page *page)
432 {
433 	return 0;
434 }
435 static inline void deferred_split_folio(struct folio *folio) {}
436 #define split_huge_pmd(__vma, __pmd, __address)	\
437 	do { } while (0)
438 
439 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
440 		unsigned long address, bool freeze, struct folio *folio) {}
441 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
442 		unsigned long address, bool freeze, struct folio *folio) {}
443 
444 #define split_huge_pud(__vma, __pmd, __address)	\
445 	do { } while (0)
446 
447 static inline int hugepage_madvise(struct vm_area_struct *vma,
448 				   unsigned long *vm_flags, int advice)
449 {
450 	return -EINVAL;
451 }
452 
453 static inline int madvise_collapse(struct vm_area_struct *vma,
454 				   struct vm_area_struct **prev,
455 				   unsigned long start, unsigned long end)
456 {
457 	return -EINVAL;
458 }
459 
460 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
461 					 unsigned long start,
462 					 unsigned long end,
463 					 long adjust_next)
464 {
465 }
466 static inline int is_swap_pmd(pmd_t pmd)
467 {
468 	return 0;
469 }
470 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
471 		struct vm_area_struct *vma)
472 {
473 	return NULL;
474 }
475 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
476 		struct vm_area_struct *vma)
477 {
478 	return NULL;
479 }
480 
481 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
482 {
483 	return 0;
484 }
485 
486 static inline bool is_huge_zero_page(struct page *page)
487 {
488 	return false;
489 }
490 
491 static inline bool is_huge_zero_pmd(pmd_t pmd)
492 {
493 	return false;
494 }
495 
496 static inline bool is_huge_zero_pud(pud_t pud)
497 {
498 	return false;
499 }
500 
501 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
502 {
503 	return;
504 }
505 
506 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
507 	unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
508 {
509 	return NULL;
510 }
511 
512 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
513 	unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
514 {
515 	return NULL;
516 }
517 
518 static inline bool thp_migration_supported(void)
519 {
520 	return false;
521 }
522 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
523 
524 static inline int split_folio_to_list_to_order(struct folio *folio,
525 		struct list_head *list, int new_order)
526 {
527 	return split_huge_page_to_list_to_order(&folio->page, list, new_order);
528 }
529 
530 static inline int split_folio_to_order(struct folio *folio, int new_order)
531 {
532 	return split_folio_to_list_to_order(folio, NULL, new_order);
533 }
534 
535 #define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
536 #define split_folio(f) split_folio_to_order(f, 0)
537 
538 /*
539  * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
540  * limitations in the implementation like arm64 MTE can override this to
541  * false
542  */
543 #ifndef arch_thp_swp_supported
544 static inline bool arch_thp_swp_supported(void)
545 {
546 	return true;
547 }
548 #endif
549 
550 #endif /* _LINUX_HUGE_MM_H */
551