xref: /linux/include/linux/migrate.h (revision 52338415)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MIGRATE_H
3 #define _LINUX_MIGRATE_H
4 
5 #include <linux/mm.h>
6 #include <linux/mempolicy.h>
7 #include <linux/migrate_mode.h>
8 #include <linux/hugetlb.h>
9 
10 typedef struct page *new_page_t(struct page *page, unsigned long private);
11 typedef void free_page_t(struct page *page, unsigned long private);
12 
13 /*
14  * Return values from addresss_space_operations.migratepage():
15  * - negative errno on page migration failure;
16  * - zero on page migration success;
17  */
18 #define MIGRATEPAGE_SUCCESS		0
19 
20 enum migrate_reason {
21 	MR_COMPACTION,
22 	MR_MEMORY_FAILURE,
23 	MR_MEMORY_HOTPLUG,
24 	MR_SYSCALL,		/* also applies to cpusets */
25 	MR_MEMPOLICY_MBIND,
26 	MR_NUMA_MISPLACED,
27 	MR_CONTIG_RANGE,
28 	MR_TYPES
29 };
30 
31 /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
32 extern const char *migrate_reason_names[MR_TYPES];
33 
34 static inline struct page *new_page_nodemask(struct page *page,
35 				int preferred_nid, nodemask_t *nodemask)
36 {
37 	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
38 	unsigned int order = 0;
39 	struct page *new_page = NULL;
40 
41 	if (PageHuge(page))
42 		return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
43 				preferred_nid, nodemask);
44 
45 	if (PageTransHuge(page)) {
46 		gfp_mask |= GFP_TRANSHUGE;
47 		order = HPAGE_PMD_ORDER;
48 	}
49 
50 	if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
51 		gfp_mask |= __GFP_HIGHMEM;
52 
53 	new_page = __alloc_pages_nodemask(gfp_mask, order,
54 				preferred_nid, nodemask);
55 
56 	if (new_page && PageTransHuge(new_page))
57 		prep_transhuge_page(new_page);
58 
59 	return new_page;
60 }
61 
62 #ifdef CONFIG_MIGRATION
63 
64 extern void putback_movable_pages(struct list_head *l);
65 extern int migrate_page(struct address_space *mapping,
66 			struct page *newpage, struct page *page,
67 			enum migrate_mode mode);
68 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
69 		unsigned long private, enum migrate_mode mode, int reason);
70 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
71 extern void putback_movable_page(struct page *page);
72 
73 extern int migrate_prep(void);
74 extern int migrate_prep_local(void);
75 extern void migrate_page_states(struct page *newpage, struct page *page);
76 extern void migrate_page_copy(struct page *newpage, struct page *page);
77 extern int migrate_huge_page_move_mapping(struct address_space *mapping,
78 				  struct page *newpage, struct page *page);
79 extern int migrate_page_move_mapping(struct address_space *mapping,
80 		struct page *newpage, struct page *page, int extra_count);
81 #else
82 
83 static inline void putback_movable_pages(struct list_head *l) {}
84 static inline int migrate_pages(struct list_head *l, new_page_t new,
85 		free_page_t free, unsigned long private, enum migrate_mode mode,
86 		int reason)
87 	{ return -ENOSYS; }
88 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
89 	{ return -EBUSY; }
90 
91 static inline int migrate_prep(void) { return -ENOSYS; }
92 static inline int migrate_prep_local(void) { return -ENOSYS; }
93 
94 static inline void migrate_page_states(struct page *newpage, struct page *page)
95 {
96 }
97 
98 static inline void migrate_page_copy(struct page *newpage,
99 				     struct page *page) {}
100 
101 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
102 				  struct page *newpage, struct page *page)
103 {
104 	return -ENOSYS;
105 }
106 
107 #endif /* CONFIG_MIGRATION */
108 
109 #ifdef CONFIG_COMPACTION
110 extern int PageMovable(struct page *page);
111 extern void __SetPageMovable(struct page *page, struct address_space *mapping);
112 extern void __ClearPageMovable(struct page *page);
113 #else
114 static inline int PageMovable(struct page *page) { return 0; };
115 static inline void __SetPageMovable(struct page *page,
116 				struct address_space *mapping)
117 {
118 }
119 static inline void __ClearPageMovable(struct page *page)
120 {
121 }
122 #endif
123 
124 #ifdef CONFIG_NUMA_BALANCING
125 extern bool pmd_trans_migrating(pmd_t pmd);
126 extern int migrate_misplaced_page(struct page *page,
127 				  struct vm_area_struct *vma, int node);
128 #else
129 static inline bool pmd_trans_migrating(pmd_t pmd)
130 {
131 	return false;
132 }
133 static inline int migrate_misplaced_page(struct page *page,
134 					 struct vm_area_struct *vma, int node)
135 {
136 	return -EAGAIN; /* can't migrate now */
137 }
138 #endif /* CONFIG_NUMA_BALANCING */
139 
140 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
141 extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
142 			struct vm_area_struct *vma,
143 			pmd_t *pmd, pmd_t entry,
144 			unsigned long address,
145 			struct page *page, int node);
146 #else
147 static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
148 			struct vm_area_struct *vma,
149 			pmd_t *pmd, pmd_t entry,
150 			unsigned long address,
151 			struct page *page, int node)
152 {
153 	return -EAGAIN;
154 }
155 #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
156 
157 
158 #ifdef CONFIG_MIGRATION
159 
160 /*
161  * Watch out for PAE architecture, which has an unsigned long, and might not
162  * have enough bits to store all physical address and flags. So far we have
163  * enough room for all our flags.
164  */
165 #define MIGRATE_PFN_VALID	(1UL << 0)
166 #define MIGRATE_PFN_MIGRATE	(1UL << 1)
167 #define MIGRATE_PFN_LOCKED	(1UL << 2)
168 #define MIGRATE_PFN_WRITE	(1UL << 3)
169 #define MIGRATE_PFN_SHIFT	6
170 
171 static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
172 {
173 	if (!(mpfn & MIGRATE_PFN_VALID))
174 		return NULL;
175 	return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
176 }
177 
178 static inline unsigned long migrate_pfn(unsigned long pfn)
179 {
180 	return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
181 }
182 
183 struct migrate_vma {
184 	struct vm_area_struct	*vma;
185 	/*
186 	 * Both src and dst array must be big enough for
187 	 * (end - start) >> PAGE_SHIFT entries.
188 	 *
189 	 * The src array must not be modified by the caller after
190 	 * migrate_vma_setup(), and must not change the dst array after
191 	 * migrate_vma_pages() returns.
192 	 */
193 	unsigned long		*dst;
194 	unsigned long		*src;
195 	unsigned long		cpages;
196 	unsigned long		npages;
197 	unsigned long		start;
198 	unsigned long		end;
199 };
200 
201 int migrate_vma_setup(struct migrate_vma *args);
202 void migrate_vma_pages(struct migrate_vma *migrate);
203 void migrate_vma_finalize(struct migrate_vma *migrate);
204 
205 #endif /* CONFIG_MIGRATION */
206 
207 #endif /* _LINUX_MIGRATE_H */
208