1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KHUGEPAGED_H
3 #define _LINUX_KHUGEPAGED_H
4 
5 #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
6 #include <linux/shmem_fs.h>
7 
8 
9 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
10 extern struct attribute_group khugepaged_attr_group;
11 
12 extern int khugepaged_init(void);
13 extern void khugepaged_destroy(void);
14 extern int start_stop_khugepaged(void);
15 extern int __khugepaged_enter(struct mm_struct *mm);
16 extern void __khugepaged_exit(struct mm_struct *mm);
17 extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
18 				      unsigned long vm_flags);
19 extern void khugepaged_min_free_kbytes_update(void);
20 #ifdef CONFIG_SHMEM
21 extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
22 #else
collapse_pte_mapped_thp(struct mm_struct * mm,unsigned long addr)23 static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
24 					   unsigned long addr)
25 {
26 }
27 #endif
28 
29 #define khugepaged_enabled()					       \
30 	(transparent_hugepage_flags &				       \
31 	 ((1<<TRANSPARENT_HUGEPAGE_FLAG) |		       \
32 	  (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
33 #define khugepaged_always()				\
34 	(transparent_hugepage_flags &			\
35 	 (1<<TRANSPARENT_HUGEPAGE_FLAG))
36 #define khugepaged_req_madv()					\
37 	(transparent_hugepage_flags &				\
38 	 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
39 #define khugepaged_defrag()					\
40 	(transparent_hugepage_flags &				\
41 	 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
42 
khugepaged_fork(struct mm_struct * mm,struct mm_struct * oldmm)43 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
44 {
45 	if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
46 		return __khugepaged_enter(mm);
47 	return 0;
48 }
49 
khugepaged_exit(struct mm_struct * mm)50 static inline void khugepaged_exit(struct mm_struct *mm)
51 {
52 	if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
53 		__khugepaged_exit(mm);
54 }
55 
khugepaged_enter(struct vm_area_struct * vma,unsigned long vm_flags)56 static inline int khugepaged_enter(struct vm_area_struct *vma,
57 				   unsigned long vm_flags)
58 {
59 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
60 		if ((khugepaged_always() ||
61 		     (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) ||
62 		     (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
63 		    !(vm_flags & VM_NOHUGEPAGE) &&
64 		    !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
65 			if (__khugepaged_enter(vma->vm_mm))
66 				return -ENOMEM;
67 	return 0;
68 }
69 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
khugepaged_fork(struct mm_struct * mm,struct mm_struct * oldmm)70 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
71 {
72 	return 0;
73 }
khugepaged_exit(struct mm_struct * mm)74 static inline void khugepaged_exit(struct mm_struct *mm)
75 {
76 }
khugepaged_enter(struct vm_area_struct * vma,unsigned long vm_flags)77 static inline int khugepaged_enter(struct vm_area_struct *vma,
78 				   unsigned long vm_flags)
79 {
80 	return 0;
81 }
khugepaged_enter_vma_merge(struct vm_area_struct * vma,unsigned long vm_flags)82 static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
83 					     unsigned long vm_flags)
84 {
85 	return 0;
86 }
collapse_pte_mapped_thp(struct mm_struct * mm,unsigned long addr)87 static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
88 					   unsigned long addr)
89 {
90 }
91 
khugepaged_min_free_kbytes_update(void)92 static inline void khugepaged_min_free_kbytes_update(void)
93 {
94 }
95 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
96 
97 #endif /* _LINUX_KHUGEPAGED_H */
98