xref: /linux/mm/mempolicy.c (revision dd093fb0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Simple NUMA memory policy for the Linux kernel.
4  *
5  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7  *
8  * NUMA policy allows the user to give hints in which node(s) memory should
9  * be allocated.
10  *
11  * Support four policies per VMA and per process:
12  *
13  * The VMA policy has priority over the process policy for a page fault.
14  *
15  * interleave     Allocate memory interleaved over a set of nodes,
16  *                with normal fallback if it fails.
17  *                For VMA based allocations this interleaves based on the
18  *                offset into the backing object or offset into the mapping
19  *                for anonymous memory. For process policy an process counter
20  *                is used.
21  *
22  * bind           Only allocate memory on a specific set of nodes,
23  *                no fallback.
24  *                FIXME: memory is allocated starting with the first node
25  *                to the last. It would be better if bind would truly restrict
26  *                the allocation to memory nodes instead
27  *
28  * preferred       Try a specific node first before normal fallback.
29  *                As a special case NUMA_NO_NODE here means do the allocation
30  *                on the local CPU. This is normally identical to default,
31  *                but useful to set in a VMA when you have a non default
32  *                process policy.
33  *
34  * preferred many Try a set of nodes first before normal fallback. This is
35  *                similar to preferred without the special case.
36  *
37  * default        Allocate on the local node first, or when on a VMA
38  *                use the process policy. This is what Linux always did
39  *		  in a NUMA aware kernel and still does by, ahem, default.
40  *
41  * The process policy is applied for most non interrupt memory allocations
42  * in that process' context. Interrupts ignore the policies and always
43  * try to allocate on the local CPU. The VMA policy is only applied for memory
44  * allocations for a VMA in the VM.
45  *
46  * Currently there are a few corner cases in swapping where the policy
47  * is not applied, but the majority should be handled. When process policy
48  * is used it is not remembered over swap outs/swap ins.
49  *
50  * Only the highest zone in the zone hierarchy gets policied. Allocations
51  * requesting a lower zone just use default policy. This implies that
52  * on systems with highmem kernel lowmem allocation don't get policied.
53  * Same with GFP_DMA allocations.
54  *
55  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
56  * all users and remembered even when nobody has memory mapped.
57  */
58 
59 /* Notebook:
60    fix mmap readahead to honour policy and enable policy for any page cache
61    object
62    statistics for bigpages
63    global policy for page cache? currently it uses process policy. Requires
64    first item above.
65    handle mremap for shared memory (currently ignored for the policy)
66    grows down?
67    make bind policy root only? It can trigger oom much faster and the
68    kernel is not always grateful with that.
69 */
70 
71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72 
73 #include <linux/mempolicy.h>
74 #include <linux/pagewalk.h>
75 #include <linux/highmem.h>
76 #include <linux/hugetlb.h>
77 #include <linux/kernel.h>
78 #include <linux/sched.h>
79 #include <linux/sched/mm.h>
80 #include <linux/sched/numa_balancing.h>
81 #include <linux/sched/task.h>
82 #include <linux/nodemask.h>
83 #include <linux/cpuset.h>
84 #include <linux/slab.h>
85 #include <linux/string.h>
86 #include <linux/export.h>
87 #include <linux/nsproxy.h>
88 #include <linux/interrupt.h>
89 #include <linux/init.h>
90 #include <linux/compat.h>
91 #include <linux/ptrace.h>
92 #include <linux/swap.h>
93 #include <linux/seq_file.h>
94 #include <linux/proc_fs.h>
95 #include <linux/migrate.h>
96 #include <linux/ksm.h>
97 #include <linux/rmap.h>
98 #include <linux/security.h>
99 #include <linux/syscalls.h>
100 #include <linux/ctype.h>
101 #include <linux/mm_inline.h>
102 #include <linux/mmu_notifier.h>
103 #include <linux/printk.h>
104 #include <linux/swapops.h>
105 
106 #include <asm/tlbflush.h>
107 #include <asm/tlb.h>
108 #include <linux/uaccess.h>
109 
110 #include "internal.h"
111 
112 /* Internal flags */
113 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
114 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
115 
116 static struct kmem_cache *policy_cache;
117 static struct kmem_cache *sn_cache;
118 
119 /* Highest zone. An specific allocation for a zone below that is not
120    policied. */
121 enum zone_type policy_zone = 0;
122 
123 /*
124  * run-time system-wide default policy => local allocation
125  */
126 static struct mempolicy default_policy = {
127 	.refcnt = ATOMIC_INIT(1), /* never free it */
128 	.mode = MPOL_LOCAL,
129 };
130 
131 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
132 
133 /**
134  * numa_map_to_online_node - Find closest online node
135  * @node: Node id to start the search
136  *
137  * Lookup the next closest node by distance if @nid is not online.
138  *
139  * Return: this @node if it is online, otherwise the closest node by distance
140  */
141 int numa_map_to_online_node(int node)
142 {
143 	int min_dist = INT_MAX, dist, n, min_node;
144 
145 	if (node == NUMA_NO_NODE || node_online(node))
146 		return node;
147 
148 	min_node = node;
149 	for_each_online_node(n) {
150 		dist = node_distance(node, n);
151 		if (dist < min_dist) {
152 			min_dist = dist;
153 			min_node = n;
154 		}
155 	}
156 
157 	return min_node;
158 }
159 EXPORT_SYMBOL_GPL(numa_map_to_online_node);
160 
161 struct mempolicy *get_task_policy(struct task_struct *p)
162 {
163 	struct mempolicy *pol = p->mempolicy;
164 	int node;
165 
166 	if (pol)
167 		return pol;
168 
169 	node = numa_node_id();
170 	if (node != NUMA_NO_NODE) {
171 		pol = &preferred_node_policy[node];
172 		/* preferred_node_policy is not initialised early in boot */
173 		if (pol->mode)
174 			return pol;
175 	}
176 
177 	return &default_policy;
178 }
179 
180 static const struct mempolicy_operations {
181 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
182 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
183 } mpol_ops[MPOL_MAX];
184 
185 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
186 {
187 	return pol->flags & MPOL_MODE_FLAGS;
188 }
189 
190 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
191 				   const nodemask_t *rel)
192 {
193 	nodemask_t tmp;
194 	nodes_fold(tmp, *orig, nodes_weight(*rel));
195 	nodes_onto(*ret, tmp, *rel);
196 }
197 
198 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
199 {
200 	if (nodes_empty(*nodes))
201 		return -EINVAL;
202 	pol->nodes = *nodes;
203 	return 0;
204 }
205 
206 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
207 {
208 	if (nodes_empty(*nodes))
209 		return -EINVAL;
210 
211 	nodes_clear(pol->nodes);
212 	node_set(first_node(*nodes), pol->nodes);
213 	return 0;
214 }
215 
216 /*
217  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
218  * any, for the new policy.  mpol_new() has already validated the nodes
219  * parameter with respect to the policy mode and flags.
220  *
221  * Must be called holding task's alloc_lock to protect task's mems_allowed
222  * and mempolicy.  May also be called holding the mmap_lock for write.
223  */
224 static int mpol_set_nodemask(struct mempolicy *pol,
225 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
226 {
227 	int ret;
228 
229 	/*
230 	 * Default (pol==NULL) resp. local memory policies are not a
231 	 * subject of any remapping. They also do not need any special
232 	 * constructor.
233 	 */
234 	if (!pol || pol->mode == MPOL_LOCAL)
235 		return 0;
236 
237 	/* Check N_MEMORY */
238 	nodes_and(nsc->mask1,
239 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
240 
241 	VM_BUG_ON(!nodes);
242 
243 	if (pol->flags & MPOL_F_RELATIVE_NODES)
244 		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
245 	else
246 		nodes_and(nsc->mask2, *nodes, nsc->mask1);
247 
248 	if (mpol_store_user_nodemask(pol))
249 		pol->w.user_nodemask = *nodes;
250 	else
251 		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
252 
253 	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
254 	return ret;
255 }
256 
257 /*
258  * This function just creates a new policy, does some check and simple
259  * initialization. You must invoke mpol_set_nodemask() to set nodes.
260  */
261 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
262 				  nodemask_t *nodes)
263 {
264 	struct mempolicy *policy;
265 
266 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
267 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
268 
269 	if (mode == MPOL_DEFAULT) {
270 		if (nodes && !nodes_empty(*nodes))
271 			return ERR_PTR(-EINVAL);
272 		return NULL;
273 	}
274 	VM_BUG_ON(!nodes);
275 
276 	/*
277 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
278 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
279 	 * All other modes require a valid pointer to a non-empty nodemask.
280 	 */
281 	if (mode == MPOL_PREFERRED) {
282 		if (nodes_empty(*nodes)) {
283 			if (((flags & MPOL_F_STATIC_NODES) ||
284 			     (flags & MPOL_F_RELATIVE_NODES)))
285 				return ERR_PTR(-EINVAL);
286 
287 			mode = MPOL_LOCAL;
288 		}
289 	} else if (mode == MPOL_LOCAL) {
290 		if (!nodes_empty(*nodes) ||
291 		    (flags & MPOL_F_STATIC_NODES) ||
292 		    (flags & MPOL_F_RELATIVE_NODES))
293 			return ERR_PTR(-EINVAL);
294 	} else if (nodes_empty(*nodes))
295 		return ERR_PTR(-EINVAL);
296 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
297 	if (!policy)
298 		return ERR_PTR(-ENOMEM);
299 	atomic_set(&policy->refcnt, 1);
300 	policy->mode = mode;
301 	policy->flags = flags;
302 	policy->home_node = NUMA_NO_NODE;
303 
304 	return policy;
305 }
306 
307 /* Slow path of a mpol destructor. */
308 void __mpol_put(struct mempolicy *p)
309 {
310 	if (!atomic_dec_and_test(&p->refcnt))
311 		return;
312 	kmem_cache_free(policy_cache, p);
313 }
314 
315 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
316 {
317 }
318 
319 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
320 {
321 	nodemask_t tmp;
322 
323 	if (pol->flags & MPOL_F_STATIC_NODES)
324 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
325 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
326 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
327 	else {
328 		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
329 								*nodes);
330 		pol->w.cpuset_mems_allowed = *nodes;
331 	}
332 
333 	if (nodes_empty(tmp))
334 		tmp = *nodes;
335 
336 	pol->nodes = tmp;
337 }
338 
339 static void mpol_rebind_preferred(struct mempolicy *pol,
340 						const nodemask_t *nodes)
341 {
342 	pol->w.cpuset_mems_allowed = *nodes;
343 }
344 
345 /*
346  * mpol_rebind_policy - Migrate a policy to a different set of nodes
347  *
348  * Per-vma policies are protected by mmap_lock. Allocations using per-task
349  * policies are protected by task->mems_allowed_seq to prevent a premature
350  * OOM/allocation failure due to parallel nodemask modification.
351  */
352 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
353 {
354 	if (!pol || pol->mode == MPOL_LOCAL)
355 		return;
356 	if (!mpol_store_user_nodemask(pol) &&
357 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
358 		return;
359 
360 	mpol_ops[pol->mode].rebind(pol, newmask);
361 }
362 
363 /*
364  * Wrapper for mpol_rebind_policy() that just requires task
365  * pointer, and updates task mempolicy.
366  *
367  * Called with task's alloc_lock held.
368  */
369 
370 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
371 {
372 	mpol_rebind_policy(tsk->mempolicy, new);
373 }
374 
375 /*
376  * Rebind each vma in mm to new nodemask.
377  *
378  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
379  */
380 
381 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
382 {
383 	struct vm_area_struct *vma;
384 	VMA_ITERATOR(vmi, mm, 0);
385 
386 	mmap_write_lock(mm);
387 	for_each_vma(vmi, vma)
388 		mpol_rebind_policy(vma->vm_policy, new);
389 	mmap_write_unlock(mm);
390 }
391 
392 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
393 	[MPOL_DEFAULT] = {
394 		.rebind = mpol_rebind_default,
395 	},
396 	[MPOL_INTERLEAVE] = {
397 		.create = mpol_new_nodemask,
398 		.rebind = mpol_rebind_nodemask,
399 	},
400 	[MPOL_PREFERRED] = {
401 		.create = mpol_new_preferred,
402 		.rebind = mpol_rebind_preferred,
403 	},
404 	[MPOL_BIND] = {
405 		.create = mpol_new_nodemask,
406 		.rebind = mpol_rebind_nodemask,
407 	},
408 	[MPOL_LOCAL] = {
409 		.rebind = mpol_rebind_default,
410 	},
411 	[MPOL_PREFERRED_MANY] = {
412 		.create = mpol_new_nodemask,
413 		.rebind = mpol_rebind_preferred,
414 	},
415 };
416 
417 static int migrate_page_add(struct page *page, struct list_head *pagelist,
418 				unsigned long flags);
419 
420 struct queue_pages {
421 	struct list_head *pagelist;
422 	unsigned long flags;
423 	nodemask_t *nmask;
424 	unsigned long start;
425 	unsigned long end;
426 	struct vm_area_struct *first;
427 };
428 
429 /*
430  * Check if the page's nid is in qp->nmask.
431  *
432  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
433  * in the invert of qp->nmask.
434  */
435 static inline bool queue_pages_required(struct page *page,
436 					struct queue_pages *qp)
437 {
438 	int nid = page_to_nid(page);
439 	unsigned long flags = qp->flags;
440 
441 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
442 }
443 
444 /*
445  * queue_pages_pmd() has three possible return values:
446  * 0 - pages are placed on the right node or queued successfully, or
447  *     special page is met, i.e. huge zero page.
448  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
449  *     specified.
450  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
451  *        existing page was already on a node that does not follow the
452  *        policy.
453  */
454 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
455 				unsigned long end, struct mm_walk *walk)
456 	__releases(ptl)
457 {
458 	int ret = 0;
459 	struct page *page;
460 	struct queue_pages *qp = walk->private;
461 	unsigned long flags;
462 
463 	if (unlikely(is_pmd_migration_entry(*pmd))) {
464 		ret = -EIO;
465 		goto unlock;
466 	}
467 	page = pmd_page(*pmd);
468 	if (is_huge_zero_page(page)) {
469 		walk->action = ACTION_CONTINUE;
470 		goto unlock;
471 	}
472 	if (!queue_pages_required(page, qp))
473 		goto unlock;
474 
475 	flags = qp->flags;
476 	/* go to thp migration */
477 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
478 		if (!vma_migratable(walk->vma) ||
479 		    migrate_page_add(page, qp->pagelist, flags)) {
480 			ret = 1;
481 			goto unlock;
482 		}
483 	} else
484 		ret = -EIO;
485 unlock:
486 	spin_unlock(ptl);
487 	return ret;
488 }
489 
490 /*
491  * Scan through pages checking if pages follow certain conditions,
492  * and move them to the pagelist if they do.
493  *
494  * queue_pages_pte_range() has three possible return values:
495  * 0 - pages are placed on the right node or queued successfully, or
496  *     special page is met, i.e. zero page.
497  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
498  *     specified.
499  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
500  *        on a node that does not follow the policy.
501  */
502 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
503 			unsigned long end, struct mm_walk *walk)
504 {
505 	struct vm_area_struct *vma = walk->vma;
506 	struct page *page;
507 	struct queue_pages *qp = walk->private;
508 	unsigned long flags = qp->flags;
509 	bool has_unmovable = false;
510 	pte_t *pte, *mapped_pte;
511 	spinlock_t *ptl;
512 
513 	ptl = pmd_trans_huge_lock(pmd, vma);
514 	if (ptl)
515 		return queue_pages_pmd(pmd, ptl, addr, end, walk);
516 
517 	if (pmd_trans_unstable(pmd))
518 		return 0;
519 
520 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
521 	for (; addr != end; pte++, addr += PAGE_SIZE) {
522 		if (!pte_present(*pte))
523 			continue;
524 		page = vm_normal_page(vma, addr, *pte);
525 		if (!page || is_zone_device_page(page))
526 			continue;
527 		/*
528 		 * vm_normal_page() filters out zero pages, but there might
529 		 * still be PageReserved pages to skip, perhaps in a VDSO.
530 		 */
531 		if (PageReserved(page))
532 			continue;
533 		if (!queue_pages_required(page, qp))
534 			continue;
535 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
536 			/* MPOL_MF_STRICT must be specified if we get here */
537 			if (!vma_migratable(vma)) {
538 				has_unmovable = true;
539 				break;
540 			}
541 
542 			/*
543 			 * Do not abort immediately since there may be
544 			 * temporary off LRU pages in the range.  Still
545 			 * need migrate other LRU pages.
546 			 */
547 			if (migrate_page_add(page, qp->pagelist, flags))
548 				has_unmovable = true;
549 		} else
550 			break;
551 	}
552 	pte_unmap_unlock(mapped_pte, ptl);
553 	cond_resched();
554 
555 	if (has_unmovable)
556 		return 1;
557 
558 	return addr != end ? -EIO : 0;
559 }
560 
561 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
562 			       unsigned long addr, unsigned long end,
563 			       struct mm_walk *walk)
564 {
565 	int ret = 0;
566 #ifdef CONFIG_HUGETLB_PAGE
567 	struct queue_pages *qp = walk->private;
568 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
569 	struct page *page;
570 	spinlock_t *ptl;
571 	pte_t entry;
572 
573 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
574 	entry = huge_ptep_get(pte);
575 	if (!pte_present(entry))
576 		goto unlock;
577 	page = pte_page(entry);
578 	if (!queue_pages_required(page, qp))
579 		goto unlock;
580 
581 	if (flags == MPOL_MF_STRICT) {
582 		/*
583 		 * STRICT alone means only detecting misplaced page and no
584 		 * need to further check other vma.
585 		 */
586 		ret = -EIO;
587 		goto unlock;
588 	}
589 
590 	if (!vma_migratable(walk->vma)) {
591 		/*
592 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
593 		 * stopped walking current vma.
594 		 * Detecting misplaced page but allow migrating pages which
595 		 * have been queued.
596 		 */
597 		ret = 1;
598 		goto unlock;
599 	}
600 
601 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
602 	if (flags & (MPOL_MF_MOVE_ALL) ||
603 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
604 	     !hugetlb_pmd_shared(pte))) {
605 		if (isolate_hugetlb(page, qp->pagelist) &&
606 			(flags & MPOL_MF_STRICT))
607 			/*
608 			 * Failed to isolate page but allow migrating pages
609 			 * which have been queued.
610 			 */
611 			ret = 1;
612 	}
613 unlock:
614 	spin_unlock(ptl);
615 #else
616 	BUG();
617 #endif
618 	return ret;
619 }
620 
621 #ifdef CONFIG_NUMA_BALANCING
622 /*
623  * This is used to mark a range of virtual addresses to be inaccessible.
624  * These are later cleared by a NUMA hinting fault. Depending on these
625  * faults, pages may be migrated for better NUMA placement.
626  *
627  * This is assuming that NUMA faults are handled using PROT_NONE. If
628  * an architecture makes a different choice, it will need further
629  * changes to the core.
630  */
631 unsigned long change_prot_numa(struct vm_area_struct *vma,
632 			unsigned long addr, unsigned long end)
633 {
634 	struct mmu_gather tlb;
635 	int nr_updated;
636 
637 	tlb_gather_mmu(&tlb, vma->vm_mm);
638 
639 	nr_updated = change_protection(&tlb, vma, addr, end, PAGE_NONE,
640 				       MM_CP_PROT_NUMA);
641 	if (nr_updated)
642 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
643 
644 	tlb_finish_mmu(&tlb);
645 
646 	return nr_updated;
647 }
648 #else
649 static unsigned long change_prot_numa(struct vm_area_struct *vma,
650 			unsigned long addr, unsigned long end)
651 {
652 	return 0;
653 }
654 #endif /* CONFIG_NUMA_BALANCING */
655 
656 static int queue_pages_test_walk(unsigned long start, unsigned long end,
657 				struct mm_walk *walk)
658 {
659 	struct vm_area_struct *next, *vma = walk->vma;
660 	struct queue_pages *qp = walk->private;
661 	unsigned long endvma = vma->vm_end;
662 	unsigned long flags = qp->flags;
663 
664 	/* range check first */
665 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
666 
667 	if (!qp->first) {
668 		qp->first = vma;
669 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
670 			(qp->start < vma->vm_start))
671 			/* hole at head side of range */
672 			return -EFAULT;
673 	}
674 	next = find_vma(vma->vm_mm, vma->vm_end);
675 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
676 		((vma->vm_end < qp->end) &&
677 		(!next || vma->vm_end < next->vm_start)))
678 		/* hole at middle or tail of range */
679 		return -EFAULT;
680 
681 	/*
682 	 * Need check MPOL_MF_STRICT to return -EIO if possible
683 	 * regardless of vma_migratable
684 	 */
685 	if (!vma_migratable(vma) &&
686 	    !(flags & MPOL_MF_STRICT))
687 		return 1;
688 
689 	if (endvma > end)
690 		endvma = end;
691 
692 	if (flags & MPOL_MF_LAZY) {
693 		/* Similar to task_numa_work, skip inaccessible VMAs */
694 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
695 			!(vma->vm_flags & VM_MIXEDMAP))
696 			change_prot_numa(vma, start, endvma);
697 		return 1;
698 	}
699 
700 	/* queue pages from current vma */
701 	if (flags & MPOL_MF_VALID)
702 		return 0;
703 	return 1;
704 }
705 
706 static const struct mm_walk_ops queue_pages_walk_ops = {
707 	.hugetlb_entry		= queue_pages_hugetlb,
708 	.pmd_entry		= queue_pages_pte_range,
709 	.test_walk		= queue_pages_test_walk,
710 };
711 
712 /*
713  * Walk through page tables and collect pages to be migrated.
714  *
715  * If pages found in a given range are on a set of nodes (determined by
716  * @nodes and @flags,) it's isolated and queued to the pagelist which is
717  * passed via @private.
718  *
719  * queue_pages_range() has three possible return values:
720  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
721  *     specified.
722  * 0 - queue pages successfully or no misplaced page.
723  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
724  *         memory range specified by nodemask and maxnode points outside
725  *         your accessible address space (-EFAULT)
726  */
727 static int
728 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
729 		nodemask_t *nodes, unsigned long flags,
730 		struct list_head *pagelist)
731 {
732 	int err;
733 	struct queue_pages qp = {
734 		.pagelist = pagelist,
735 		.flags = flags,
736 		.nmask = nodes,
737 		.start = start,
738 		.end = end,
739 		.first = NULL,
740 	};
741 
742 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
743 
744 	if (!qp.first)
745 		/* whole range in hole */
746 		err = -EFAULT;
747 
748 	return err;
749 }
750 
751 /*
752  * Apply policy to a single VMA
753  * This must be called with the mmap_lock held for writing.
754  */
755 static int vma_replace_policy(struct vm_area_struct *vma,
756 						struct mempolicy *pol)
757 {
758 	int err;
759 	struct mempolicy *old;
760 	struct mempolicy *new;
761 
762 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
763 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
764 		 vma->vm_ops, vma->vm_file,
765 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
766 
767 	new = mpol_dup(pol);
768 	if (IS_ERR(new))
769 		return PTR_ERR(new);
770 
771 	if (vma->vm_ops && vma->vm_ops->set_policy) {
772 		err = vma->vm_ops->set_policy(vma, new);
773 		if (err)
774 			goto err_out;
775 	}
776 
777 	old = vma->vm_policy;
778 	vma->vm_policy = new; /* protected by mmap_lock */
779 	mpol_put(old);
780 
781 	return 0;
782  err_out:
783 	mpol_put(new);
784 	return err;
785 }
786 
787 /* Step 2: apply policy to a range and do splits. */
788 static int mbind_range(struct mm_struct *mm, unsigned long start,
789 		       unsigned long end, struct mempolicy *new_pol)
790 {
791 	MA_STATE(mas, &mm->mm_mt, start, start);
792 	struct vm_area_struct *prev;
793 	struct vm_area_struct *vma;
794 	int err = 0;
795 	pgoff_t pgoff;
796 
797 	prev = mas_prev(&mas, 0);
798 	if (unlikely(!prev))
799 		mas_set(&mas, start);
800 
801 	vma = mas_find(&mas, end - 1);
802 	if (WARN_ON(!vma))
803 		return 0;
804 
805 	if (start > vma->vm_start)
806 		prev = vma;
807 
808 	for (; vma; vma = mas_next(&mas, end - 1)) {
809 		unsigned long vmstart = max(start, vma->vm_start);
810 		unsigned long vmend = min(end, vma->vm_end);
811 
812 		if (mpol_equal(vma_policy(vma), new_pol))
813 			goto next;
814 
815 		pgoff = vma->vm_pgoff +
816 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
817 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
818 				 vma->anon_vma, vma->vm_file, pgoff,
819 				 new_pol, vma->vm_userfaultfd_ctx,
820 				 anon_vma_name(vma));
821 		if (prev) {
822 			/* vma_merge() invalidated the mas */
823 			mas_pause(&mas);
824 			vma = prev;
825 			goto replace;
826 		}
827 		if (vma->vm_start != vmstart) {
828 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
829 			if (err)
830 				goto out;
831 			/* split_vma() invalidated the mas */
832 			mas_pause(&mas);
833 		}
834 		if (vma->vm_end != vmend) {
835 			err = split_vma(vma->vm_mm, vma, vmend, 0);
836 			if (err)
837 				goto out;
838 			/* split_vma() invalidated the mas */
839 			mas_pause(&mas);
840 		}
841 replace:
842 		err = vma_replace_policy(vma, new_pol);
843 		if (err)
844 			goto out;
845 next:
846 		prev = vma;
847 	}
848 
849 out:
850 	return err;
851 }
852 
853 /* Set the process memory policy */
854 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
855 			     nodemask_t *nodes)
856 {
857 	struct mempolicy *new, *old;
858 	NODEMASK_SCRATCH(scratch);
859 	int ret;
860 
861 	if (!scratch)
862 		return -ENOMEM;
863 
864 	new = mpol_new(mode, flags, nodes);
865 	if (IS_ERR(new)) {
866 		ret = PTR_ERR(new);
867 		goto out;
868 	}
869 
870 	task_lock(current);
871 	ret = mpol_set_nodemask(new, nodes, scratch);
872 	if (ret) {
873 		task_unlock(current);
874 		mpol_put(new);
875 		goto out;
876 	}
877 
878 	old = current->mempolicy;
879 	current->mempolicy = new;
880 	if (new && new->mode == MPOL_INTERLEAVE)
881 		current->il_prev = MAX_NUMNODES-1;
882 	task_unlock(current);
883 	mpol_put(old);
884 	ret = 0;
885 out:
886 	NODEMASK_SCRATCH_FREE(scratch);
887 	return ret;
888 }
889 
890 /*
891  * Return nodemask for policy for get_mempolicy() query
892  *
893  * Called with task's alloc_lock held
894  */
895 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
896 {
897 	nodes_clear(*nodes);
898 	if (p == &default_policy)
899 		return;
900 
901 	switch (p->mode) {
902 	case MPOL_BIND:
903 	case MPOL_INTERLEAVE:
904 	case MPOL_PREFERRED:
905 	case MPOL_PREFERRED_MANY:
906 		*nodes = p->nodes;
907 		break;
908 	case MPOL_LOCAL:
909 		/* return empty node mask for local allocation */
910 		break;
911 	default:
912 		BUG();
913 	}
914 }
915 
916 static int lookup_node(struct mm_struct *mm, unsigned long addr)
917 {
918 	struct page *p = NULL;
919 	int ret;
920 
921 	ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
922 	if (ret > 0) {
923 		ret = page_to_nid(p);
924 		put_page(p);
925 	}
926 	return ret;
927 }
928 
929 /* Retrieve NUMA policy */
930 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
931 			     unsigned long addr, unsigned long flags)
932 {
933 	int err;
934 	struct mm_struct *mm = current->mm;
935 	struct vm_area_struct *vma = NULL;
936 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
937 
938 	if (flags &
939 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
940 		return -EINVAL;
941 
942 	if (flags & MPOL_F_MEMS_ALLOWED) {
943 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
944 			return -EINVAL;
945 		*policy = 0;	/* just so it's initialized */
946 		task_lock(current);
947 		*nmask  = cpuset_current_mems_allowed;
948 		task_unlock(current);
949 		return 0;
950 	}
951 
952 	if (flags & MPOL_F_ADDR) {
953 		/*
954 		 * Do NOT fall back to task policy if the
955 		 * vma/shared policy at addr is NULL.  We
956 		 * want to return MPOL_DEFAULT in this case.
957 		 */
958 		mmap_read_lock(mm);
959 		vma = vma_lookup(mm, addr);
960 		if (!vma) {
961 			mmap_read_unlock(mm);
962 			return -EFAULT;
963 		}
964 		if (vma->vm_ops && vma->vm_ops->get_policy)
965 			pol = vma->vm_ops->get_policy(vma, addr);
966 		else
967 			pol = vma->vm_policy;
968 	} else if (addr)
969 		return -EINVAL;
970 
971 	if (!pol)
972 		pol = &default_policy;	/* indicates default behavior */
973 
974 	if (flags & MPOL_F_NODE) {
975 		if (flags & MPOL_F_ADDR) {
976 			/*
977 			 * Take a refcount on the mpol, because we are about to
978 			 * drop the mmap_lock, after which only "pol" remains
979 			 * valid, "vma" is stale.
980 			 */
981 			pol_refcount = pol;
982 			vma = NULL;
983 			mpol_get(pol);
984 			mmap_read_unlock(mm);
985 			err = lookup_node(mm, addr);
986 			if (err < 0)
987 				goto out;
988 			*policy = err;
989 		} else if (pol == current->mempolicy &&
990 				pol->mode == MPOL_INTERLEAVE) {
991 			*policy = next_node_in(current->il_prev, pol->nodes);
992 		} else {
993 			err = -EINVAL;
994 			goto out;
995 		}
996 	} else {
997 		*policy = pol == &default_policy ? MPOL_DEFAULT :
998 						pol->mode;
999 		/*
1000 		 * Internal mempolicy flags must be masked off before exposing
1001 		 * the policy to userspace.
1002 		 */
1003 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
1004 	}
1005 
1006 	err = 0;
1007 	if (nmask) {
1008 		if (mpol_store_user_nodemask(pol)) {
1009 			*nmask = pol->w.user_nodemask;
1010 		} else {
1011 			task_lock(current);
1012 			get_policy_nodemask(pol, nmask);
1013 			task_unlock(current);
1014 		}
1015 	}
1016 
1017  out:
1018 	mpol_cond_put(pol);
1019 	if (vma)
1020 		mmap_read_unlock(mm);
1021 	if (pol_refcount)
1022 		mpol_put(pol_refcount);
1023 	return err;
1024 }
1025 
1026 #ifdef CONFIG_MIGRATION
1027 /*
1028  * page migration, thp tail pages can be passed.
1029  */
1030 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1031 				unsigned long flags)
1032 {
1033 	struct page *head = compound_head(page);
1034 	/*
1035 	 * Avoid migrating a page that is shared with others.
1036 	 */
1037 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1038 		if (!isolate_lru_page(head)) {
1039 			list_add_tail(&head->lru, pagelist);
1040 			mod_node_page_state(page_pgdat(head),
1041 				NR_ISOLATED_ANON + page_is_file_lru(head),
1042 				thp_nr_pages(head));
1043 		} else if (flags & MPOL_MF_STRICT) {
1044 			/*
1045 			 * Non-movable page may reach here.  And, there may be
1046 			 * temporary off LRU pages or non-LRU movable pages.
1047 			 * Treat them as unmovable pages since they can't be
1048 			 * isolated, so they can't be moved at the moment.  It
1049 			 * should return -EIO for this case too.
1050 			 */
1051 			return -EIO;
1052 		}
1053 	}
1054 
1055 	return 0;
1056 }
1057 
1058 /*
1059  * Migrate pages from one node to a target node.
1060  * Returns error or the number of pages not migrated.
1061  */
1062 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1063 			   int flags)
1064 {
1065 	nodemask_t nmask;
1066 	struct vm_area_struct *vma;
1067 	LIST_HEAD(pagelist);
1068 	int err = 0;
1069 	struct migration_target_control mtc = {
1070 		.nid = dest,
1071 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1072 	};
1073 
1074 	nodes_clear(nmask);
1075 	node_set(source, nmask);
1076 
1077 	/*
1078 	 * This does not "check" the range but isolates all pages that
1079 	 * need migration.  Between passing in the full user address
1080 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1081 	 */
1082 	vma = find_vma(mm, 0);
1083 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1084 	queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
1085 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1086 
1087 	if (!list_empty(&pagelist)) {
1088 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1089 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1090 		if (err)
1091 			putback_movable_pages(&pagelist);
1092 	}
1093 
1094 	return err;
1095 }
1096 
1097 /*
1098  * Move pages between the two nodesets so as to preserve the physical
1099  * layout as much as possible.
1100  *
1101  * Returns the number of page that could not be moved.
1102  */
1103 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1104 		     const nodemask_t *to, int flags)
1105 {
1106 	int busy = 0;
1107 	int err = 0;
1108 	nodemask_t tmp;
1109 
1110 	lru_cache_disable();
1111 
1112 	mmap_read_lock(mm);
1113 
1114 	/*
1115 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1116 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1117 	 * bit in 'tmp', and return that <source, dest> pair for migration.
1118 	 * The pair of nodemasks 'to' and 'from' define the map.
1119 	 *
1120 	 * If no pair of bits is found that way, fallback to picking some
1121 	 * pair of 'source' and 'dest' bits that are not the same.  If the
1122 	 * 'source' and 'dest' bits are the same, this represents a node
1123 	 * that will be migrating to itself, so no pages need move.
1124 	 *
1125 	 * If no bits are left in 'tmp', or if all remaining bits left
1126 	 * in 'tmp' correspond to the same bit in 'to', return false
1127 	 * (nothing left to migrate).
1128 	 *
1129 	 * This lets us pick a pair of nodes to migrate between, such that
1130 	 * if possible the dest node is not already occupied by some other
1131 	 * source node, minimizing the risk of overloading the memory on a
1132 	 * node that would happen if we migrated incoming memory to a node
1133 	 * before migrating outgoing memory source that same node.
1134 	 *
1135 	 * A single scan of tmp is sufficient.  As we go, we remember the
1136 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1137 	 * that not only moved, but what's better, moved to an empty slot
1138 	 * (d is not set in tmp), then we break out then, with that pair.
1139 	 * Otherwise when we finish scanning from_tmp, we at least have the
1140 	 * most recent <s, d> pair that moved.  If we get all the way through
1141 	 * the scan of tmp without finding any node that moved, much less
1142 	 * moved to an empty node, then there is nothing left worth migrating.
1143 	 */
1144 
1145 	tmp = *from;
1146 	while (!nodes_empty(tmp)) {
1147 		int s, d;
1148 		int source = NUMA_NO_NODE;
1149 		int dest = 0;
1150 
1151 		for_each_node_mask(s, tmp) {
1152 
1153 			/*
1154 			 * do_migrate_pages() tries to maintain the relative
1155 			 * node relationship of the pages established between
1156 			 * threads and memory areas.
1157                          *
1158 			 * However if the number of source nodes is not equal to
1159 			 * the number of destination nodes we can not preserve
1160 			 * this node relative relationship.  In that case, skip
1161 			 * copying memory from a node that is in the destination
1162 			 * mask.
1163 			 *
1164 			 * Example: [2,3,4] -> [3,4,5] moves everything.
1165 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1166 			 */
1167 
1168 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1169 						(node_isset(s, *to)))
1170 				continue;
1171 
1172 			d = node_remap(s, *from, *to);
1173 			if (s == d)
1174 				continue;
1175 
1176 			source = s;	/* Node moved. Memorize */
1177 			dest = d;
1178 
1179 			/* dest not in remaining from nodes? */
1180 			if (!node_isset(dest, tmp))
1181 				break;
1182 		}
1183 		if (source == NUMA_NO_NODE)
1184 			break;
1185 
1186 		node_clear(source, tmp);
1187 		err = migrate_to_node(mm, source, dest, flags);
1188 		if (err > 0)
1189 			busy += err;
1190 		if (err < 0)
1191 			break;
1192 	}
1193 	mmap_read_unlock(mm);
1194 
1195 	lru_cache_enable();
1196 	if (err < 0)
1197 		return err;
1198 	return busy;
1199 
1200 }
1201 
1202 /*
1203  * Allocate a new page for page migration based on vma policy.
1204  * Start by assuming the page is mapped by the same vma as contains @start.
1205  * Search forward from there, if not.  N.B., this assumes that the
1206  * list of pages handed to migrate_pages()--which is how we get here--
1207  * is in virtual address order.
1208  */
1209 static struct page *new_page(struct page *page, unsigned long start)
1210 {
1211 	struct folio *dst, *src = page_folio(page);
1212 	struct vm_area_struct *vma;
1213 	unsigned long address;
1214 	VMA_ITERATOR(vmi, current->mm, start);
1215 	gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
1216 
1217 	for_each_vma(vmi, vma) {
1218 		address = page_address_in_vma(page, vma);
1219 		if (address != -EFAULT)
1220 			break;
1221 	}
1222 
1223 	if (folio_test_hugetlb(src))
1224 		return alloc_huge_page_vma(page_hstate(&src->page),
1225 				vma, address);
1226 
1227 	if (folio_test_large(src))
1228 		gfp = GFP_TRANSHUGE;
1229 
1230 	/*
1231 	 * if !vma, vma_alloc_folio() will use task or system default policy
1232 	 */
1233 	dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
1234 			folio_test_large(src));
1235 	return &dst->page;
1236 }
1237 #else
1238 
1239 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1240 				unsigned long flags)
1241 {
1242 	return -EIO;
1243 }
1244 
1245 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1246 		     const nodemask_t *to, int flags)
1247 {
1248 	return -ENOSYS;
1249 }
1250 
1251 static struct page *new_page(struct page *page, unsigned long start)
1252 {
1253 	return NULL;
1254 }
1255 #endif
1256 
1257 static long do_mbind(unsigned long start, unsigned long len,
1258 		     unsigned short mode, unsigned short mode_flags,
1259 		     nodemask_t *nmask, unsigned long flags)
1260 {
1261 	struct mm_struct *mm = current->mm;
1262 	struct mempolicy *new;
1263 	unsigned long end;
1264 	int err;
1265 	int ret;
1266 	LIST_HEAD(pagelist);
1267 
1268 	if (flags & ~(unsigned long)MPOL_MF_VALID)
1269 		return -EINVAL;
1270 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1271 		return -EPERM;
1272 
1273 	if (start & ~PAGE_MASK)
1274 		return -EINVAL;
1275 
1276 	if (mode == MPOL_DEFAULT)
1277 		flags &= ~MPOL_MF_STRICT;
1278 
1279 	len = PAGE_ALIGN(len);
1280 	end = start + len;
1281 
1282 	if (end < start)
1283 		return -EINVAL;
1284 	if (end == start)
1285 		return 0;
1286 
1287 	new = mpol_new(mode, mode_flags, nmask);
1288 	if (IS_ERR(new))
1289 		return PTR_ERR(new);
1290 
1291 	if (flags & MPOL_MF_LAZY)
1292 		new->flags |= MPOL_F_MOF;
1293 
1294 	/*
1295 	 * If we are using the default policy then operation
1296 	 * on discontinuous address spaces is okay after all
1297 	 */
1298 	if (!new)
1299 		flags |= MPOL_MF_DISCONTIG_OK;
1300 
1301 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1302 		 start, start + len, mode, mode_flags,
1303 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1304 
1305 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1306 
1307 		lru_cache_disable();
1308 	}
1309 	{
1310 		NODEMASK_SCRATCH(scratch);
1311 		if (scratch) {
1312 			mmap_write_lock(mm);
1313 			err = mpol_set_nodemask(new, nmask, scratch);
1314 			if (err)
1315 				mmap_write_unlock(mm);
1316 		} else
1317 			err = -ENOMEM;
1318 		NODEMASK_SCRATCH_FREE(scratch);
1319 	}
1320 	if (err)
1321 		goto mpol_out;
1322 
1323 	ret = queue_pages_range(mm, start, end, nmask,
1324 			  flags | MPOL_MF_INVERT, &pagelist);
1325 
1326 	if (ret < 0) {
1327 		err = ret;
1328 		goto up_out;
1329 	}
1330 
1331 	err = mbind_range(mm, start, end, new);
1332 
1333 	if (!err) {
1334 		int nr_failed = 0;
1335 
1336 		if (!list_empty(&pagelist)) {
1337 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1338 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1339 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1340 			if (nr_failed)
1341 				putback_movable_pages(&pagelist);
1342 		}
1343 
1344 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1345 			err = -EIO;
1346 	} else {
1347 up_out:
1348 		if (!list_empty(&pagelist))
1349 			putback_movable_pages(&pagelist);
1350 	}
1351 
1352 	mmap_write_unlock(mm);
1353 mpol_out:
1354 	mpol_put(new);
1355 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1356 		lru_cache_enable();
1357 	return err;
1358 }
1359 
1360 /*
1361  * User space interface with variable sized bitmaps for nodelists.
1362  */
1363 static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1364 		      unsigned long maxnode)
1365 {
1366 	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1367 	int ret;
1368 
1369 	if (in_compat_syscall())
1370 		ret = compat_get_bitmap(mask,
1371 					(const compat_ulong_t __user *)nmask,
1372 					maxnode);
1373 	else
1374 		ret = copy_from_user(mask, nmask,
1375 				     nlongs * sizeof(unsigned long));
1376 
1377 	if (ret)
1378 		return -EFAULT;
1379 
1380 	if (maxnode % BITS_PER_LONG)
1381 		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1382 
1383 	return 0;
1384 }
1385 
1386 /* Copy a node mask from user space. */
1387 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1388 		     unsigned long maxnode)
1389 {
1390 	--maxnode;
1391 	nodes_clear(*nodes);
1392 	if (maxnode == 0 || !nmask)
1393 		return 0;
1394 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1395 		return -EINVAL;
1396 
1397 	/*
1398 	 * When the user specified more nodes than supported just check
1399 	 * if the non supported part is all zero, one word at a time,
1400 	 * starting at the end.
1401 	 */
1402 	while (maxnode > MAX_NUMNODES) {
1403 		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1404 		unsigned long t;
1405 
1406 		if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
1407 			return -EFAULT;
1408 
1409 		if (maxnode - bits >= MAX_NUMNODES) {
1410 			maxnode -= bits;
1411 		} else {
1412 			maxnode = MAX_NUMNODES;
1413 			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1414 		}
1415 		if (t)
1416 			return -EINVAL;
1417 	}
1418 
1419 	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
1420 }
1421 
1422 /* Copy a kernel node mask to user space */
1423 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1424 			      nodemask_t *nodes)
1425 {
1426 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1427 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1428 	bool compat = in_compat_syscall();
1429 
1430 	if (compat)
1431 		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
1432 
1433 	if (copy > nbytes) {
1434 		if (copy > PAGE_SIZE)
1435 			return -EINVAL;
1436 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1437 			return -EFAULT;
1438 		copy = nbytes;
1439 		maxnode = nr_node_ids;
1440 	}
1441 
1442 	if (compat)
1443 		return compat_put_bitmap((compat_ulong_t __user *)mask,
1444 					 nodes_addr(*nodes), maxnode);
1445 
1446 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1447 }
1448 
1449 /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1450 static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1451 {
1452 	*flags = *mode & MPOL_MODE_FLAGS;
1453 	*mode &= ~MPOL_MODE_FLAGS;
1454 
1455 	if ((unsigned int)(*mode) >=  MPOL_MAX)
1456 		return -EINVAL;
1457 	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1458 		return -EINVAL;
1459 	if (*flags & MPOL_F_NUMA_BALANCING) {
1460 		if (*mode != MPOL_BIND)
1461 			return -EINVAL;
1462 		*flags |= (MPOL_F_MOF | MPOL_F_MORON);
1463 	}
1464 	return 0;
1465 }
1466 
1467 static long kernel_mbind(unsigned long start, unsigned long len,
1468 			 unsigned long mode, const unsigned long __user *nmask,
1469 			 unsigned long maxnode, unsigned int flags)
1470 {
1471 	unsigned short mode_flags;
1472 	nodemask_t nodes;
1473 	int lmode = mode;
1474 	int err;
1475 
1476 	start = untagged_addr(start);
1477 	err = sanitize_mpol_flags(&lmode, &mode_flags);
1478 	if (err)
1479 		return err;
1480 
1481 	err = get_nodes(&nodes, nmask, maxnode);
1482 	if (err)
1483 		return err;
1484 
1485 	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1486 }
1487 
1488 SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1489 		unsigned long, home_node, unsigned long, flags)
1490 {
1491 	struct mm_struct *mm = current->mm;
1492 	struct vm_area_struct *vma;
1493 	struct mempolicy *new;
1494 	unsigned long vmstart;
1495 	unsigned long vmend;
1496 	unsigned long end;
1497 	int err = -ENOENT;
1498 	VMA_ITERATOR(vmi, mm, start);
1499 
1500 	start = untagged_addr(start);
1501 	if (start & ~PAGE_MASK)
1502 		return -EINVAL;
1503 	/*
1504 	 * flags is used for future extension if any.
1505 	 */
1506 	if (flags != 0)
1507 		return -EINVAL;
1508 
1509 	/*
1510 	 * Check home_node is online to avoid accessing uninitialized
1511 	 * NODE_DATA.
1512 	 */
1513 	if (home_node >= MAX_NUMNODES || !node_online(home_node))
1514 		return -EINVAL;
1515 
1516 	len = PAGE_ALIGN(len);
1517 	end = start + len;
1518 
1519 	if (end < start)
1520 		return -EINVAL;
1521 	if (end == start)
1522 		return 0;
1523 	mmap_write_lock(mm);
1524 	for_each_vma_range(vmi, vma, end) {
1525 		vmstart = max(start, vma->vm_start);
1526 		vmend   = min(end, vma->vm_end);
1527 		new = mpol_dup(vma_policy(vma));
1528 		if (IS_ERR(new)) {
1529 			err = PTR_ERR(new);
1530 			break;
1531 		}
1532 		/*
1533 		 * Only update home node if there is an existing vma policy
1534 		 */
1535 		if (!new)
1536 			continue;
1537 
1538 		/*
1539 		 * If any vma in the range got policy other than MPOL_BIND
1540 		 * or MPOL_PREFERRED_MANY we return error. We don't reset
1541 		 * the home node for vmas we already updated before.
1542 		 */
1543 		if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) {
1544 			mpol_put(new);
1545 			err = -EOPNOTSUPP;
1546 			break;
1547 		}
1548 
1549 		new->home_node = home_node;
1550 		err = mbind_range(mm, vmstart, vmend, new);
1551 		mpol_put(new);
1552 		if (err)
1553 			break;
1554 	}
1555 	mmap_write_unlock(mm);
1556 	return err;
1557 }
1558 
1559 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1560 		unsigned long, mode, const unsigned long __user *, nmask,
1561 		unsigned long, maxnode, unsigned int, flags)
1562 {
1563 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1564 }
1565 
1566 /* Set the process memory policy */
1567 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1568 				 unsigned long maxnode)
1569 {
1570 	unsigned short mode_flags;
1571 	nodemask_t nodes;
1572 	int lmode = mode;
1573 	int err;
1574 
1575 	err = sanitize_mpol_flags(&lmode, &mode_flags);
1576 	if (err)
1577 		return err;
1578 
1579 	err = get_nodes(&nodes, nmask, maxnode);
1580 	if (err)
1581 		return err;
1582 
1583 	return do_set_mempolicy(lmode, mode_flags, &nodes);
1584 }
1585 
1586 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1587 		unsigned long, maxnode)
1588 {
1589 	return kernel_set_mempolicy(mode, nmask, maxnode);
1590 }
1591 
1592 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1593 				const unsigned long __user *old_nodes,
1594 				const unsigned long __user *new_nodes)
1595 {
1596 	struct mm_struct *mm = NULL;
1597 	struct task_struct *task;
1598 	nodemask_t task_nodes;
1599 	int err;
1600 	nodemask_t *old;
1601 	nodemask_t *new;
1602 	NODEMASK_SCRATCH(scratch);
1603 
1604 	if (!scratch)
1605 		return -ENOMEM;
1606 
1607 	old = &scratch->mask1;
1608 	new = &scratch->mask2;
1609 
1610 	err = get_nodes(old, old_nodes, maxnode);
1611 	if (err)
1612 		goto out;
1613 
1614 	err = get_nodes(new, new_nodes, maxnode);
1615 	if (err)
1616 		goto out;
1617 
1618 	/* Find the mm_struct */
1619 	rcu_read_lock();
1620 	task = pid ? find_task_by_vpid(pid) : current;
1621 	if (!task) {
1622 		rcu_read_unlock();
1623 		err = -ESRCH;
1624 		goto out;
1625 	}
1626 	get_task_struct(task);
1627 
1628 	err = -EINVAL;
1629 
1630 	/*
1631 	 * Check if this process has the right to modify the specified process.
1632 	 * Use the regular "ptrace_may_access()" checks.
1633 	 */
1634 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1635 		rcu_read_unlock();
1636 		err = -EPERM;
1637 		goto out_put;
1638 	}
1639 	rcu_read_unlock();
1640 
1641 	task_nodes = cpuset_mems_allowed(task);
1642 	/* Is the user allowed to access the target nodes? */
1643 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1644 		err = -EPERM;
1645 		goto out_put;
1646 	}
1647 
1648 	task_nodes = cpuset_mems_allowed(current);
1649 	nodes_and(*new, *new, task_nodes);
1650 	if (nodes_empty(*new))
1651 		goto out_put;
1652 
1653 	err = security_task_movememory(task);
1654 	if (err)
1655 		goto out_put;
1656 
1657 	mm = get_task_mm(task);
1658 	put_task_struct(task);
1659 
1660 	if (!mm) {
1661 		err = -EINVAL;
1662 		goto out;
1663 	}
1664 
1665 	err = do_migrate_pages(mm, old, new,
1666 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1667 
1668 	mmput(mm);
1669 out:
1670 	NODEMASK_SCRATCH_FREE(scratch);
1671 
1672 	return err;
1673 
1674 out_put:
1675 	put_task_struct(task);
1676 	goto out;
1677 
1678 }
1679 
1680 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1681 		const unsigned long __user *, old_nodes,
1682 		const unsigned long __user *, new_nodes)
1683 {
1684 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1685 }
1686 
1687 
1688 /* Retrieve NUMA policy */
1689 static int kernel_get_mempolicy(int __user *policy,
1690 				unsigned long __user *nmask,
1691 				unsigned long maxnode,
1692 				unsigned long addr,
1693 				unsigned long flags)
1694 {
1695 	int err;
1696 	int pval;
1697 	nodemask_t nodes;
1698 
1699 	if (nmask != NULL && maxnode < nr_node_ids)
1700 		return -EINVAL;
1701 
1702 	addr = untagged_addr(addr);
1703 
1704 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1705 
1706 	if (err)
1707 		return err;
1708 
1709 	if (policy && put_user(pval, policy))
1710 		return -EFAULT;
1711 
1712 	if (nmask)
1713 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1714 
1715 	return err;
1716 }
1717 
1718 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1719 		unsigned long __user *, nmask, unsigned long, maxnode,
1720 		unsigned long, addr, unsigned long, flags)
1721 {
1722 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1723 }
1724 
1725 bool vma_migratable(struct vm_area_struct *vma)
1726 {
1727 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1728 		return false;
1729 
1730 	/*
1731 	 * DAX device mappings require predictable access latency, so avoid
1732 	 * incurring periodic faults.
1733 	 */
1734 	if (vma_is_dax(vma))
1735 		return false;
1736 
1737 	if (is_vm_hugetlb_page(vma) &&
1738 		!hugepage_migration_supported(hstate_vma(vma)))
1739 		return false;
1740 
1741 	/*
1742 	 * Migration allocates pages in the highest zone. If we cannot
1743 	 * do so then migration (at least from node to node) is not
1744 	 * possible.
1745 	 */
1746 	if (vma->vm_file &&
1747 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1748 			< policy_zone)
1749 		return false;
1750 	return true;
1751 }
1752 
1753 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1754 						unsigned long addr)
1755 {
1756 	struct mempolicy *pol = NULL;
1757 
1758 	if (vma) {
1759 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1760 			pol = vma->vm_ops->get_policy(vma, addr);
1761 		} else if (vma->vm_policy) {
1762 			pol = vma->vm_policy;
1763 
1764 			/*
1765 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1766 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1767 			 * count on these policies which will be dropped by
1768 			 * mpol_cond_put() later
1769 			 */
1770 			if (mpol_needs_cond_ref(pol))
1771 				mpol_get(pol);
1772 		}
1773 	}
1774 
1775 	return pol;
1776 }
1777 
1778 /*
1779  * get_vma_policy(@vma, @addr)
1780  * @vma: virtual memory area whose policy is sought
1781  * @addr: address in @vma for shared policy lookup
1782  *
1783  * Returns effective policy for a VMA at specified address.
1784  * Falls back to current->mempolicy or system default policy, as necessary.
1785  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1786  * count--added by the get_policy() vm_op, as appropriate--to protect against
1787  * freeing by another task.  It is the caller's responsibility to free the
1788  * extra reference for shared policies.
1789  */
1790 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1791 						unsigned long addr)
1792 {
1793 	struct mempolicy *pol = __get_vma_policy(vma, addr);
1794 
1795 	if (!pol)
1796 		pol = get_task_policy(current);
1797 
1798 	return pol;
1799 }
1800 
1801 bool vma_policy_mof(struct vm_area_struct *vma)
1802 {
1803 	struct mempolicy *pol;
1804 
1805 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1806 		bool ret = false;
1807 
1808 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1809 		if (pol && (pol->flags & MPOL_F_MOF))
1810 			ret = true;
1811 		mpol_cond_put(pol);
1812 
1813 		return ret;
1814 	}
1815 
1816 	pol = vma->vm_policy;
1817 	if (!pol)
1818 		pol = get_task_policy(current);
1819 
1820 	return pol->flags & MPOL_F_MOF;
1821 }
1822 
1823 bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1824 {
1825 	enum zone_type dynamic_policy_zone = policy_zone;
1826 
1827 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1828 
1829 	/*
1830 	 * if policy->nodes has movable memory only,
1831 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1832 	 *
1833 	 * policy->nodes is intersect with node_states[N_MEMORY].
1834 	 * so if the following test fails, it implies
1835 	 * policy->nodes has movable memory only.
1836 	 */
1837 	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1838 		dynamic_policy_zone = ZONE_MOVABLE;
1839 
1840 	return zone >= dynamic_policy_zone;
1841 }
1842 
1843 /*
1844  * Return a nodemask representing a mempolicy for filtering nodes for
1845  * page allocation
1846  */
1847 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1848 {
1849 	int mode = policy->mode;
1850 
1851 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1852 	if (unlikely(mode == MPOL_BIND) &&
1853 		apply_policy_zone(policy, gfp_zone(gfp)) &&
1854 		cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1855 		return &policy->nodes;
1856 
1857 	if (mode == MPOL_PREFERRED_MANY)
1858 		return &policy->nodes;
1859 
1860 	return NULL;
1861 }
1862 
1863 /*
1864  * Return the  preferred node id for 'prefer' mempolicy, and return
1865  * the given id for all other policies.
1866  *
1867  * policy_node() is always coupled with policy_nodemask(), which
1868  * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1869  */
1870 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1871 {
1872 	if (policy->mode == MPOL_PREFERRED) {
1873 		nd = first_node(policy->nodes);
1874 	} else {
1875 		/*
1876 		 * __GFP_THISNODE shouldn't even be used with the bind policy
1877 		 * because we might easily break the expectation to stay on the
1878 		 * requested node and not break the policy.
1879 		 */
1880 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1881 	}
1882 
1883 	if ((policy->mode == MPOL_BIND ||
1884 	     policy->mode == MPOL_PREFERRED_MANY) &&
1885 	    policy->home_node != NUMA_NO_NODE)
1886 		return policy->home_node;
1887 
1888 	return nd;
1889 }
1890 
1891 /* Do dynamic interleaving for a process */
1892 static unsigned interleave_nodes(struct mempolicy *policy)
1893 {
1894 	unsigned next;
1895 	struct task_struct *me = current;
1896 
1897 	next = next_node_in(me->il_prev, policy->nodes);
1898 	if (next < MAX_NUMNODES)
1899 		me->il_prev = next;
1900 	return next;
1901 }
1902 
1903 /*
1904  * Depending on the memory policy provide a node from which to allocate the
1905  * next slab entry.
1906  */
1907 unsigned int mempolicy_slab_node(void)
1908 {
1909 	struct mempolicy *policy;
1910 	int node = numa_mem_id();
1911 
1912 	if (!in_task())
1913 		return node;
1914 
1915 	policy = current->mempolicy;
1916 	if (!policy)
1917 		return node;
1918 
1919 	switch (policy->mode) {
1920 	case MPOL_PREFERRED:
1921 		return first_node(policy->nodes);
1922 
1923 	case MPOL_INTERLEAVE:
1924 		return interleave_nodes(policy);
1925 
1926 	case MPOL_BIND:
1927 	case MPOL_PREFERRED_MANY:
1928 	{
1929 		struct zoneref *z;
1930 
1931 		/*
1932 		 * Follow bind policy behavior and start allocation at the
1933 		 * first node.
1934 		 */
1935 		struct zonelist *zonelist;
1936 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1937 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1938 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1939 							&policy->nodes);
1940 		return z->zone ? zone_to_nid(z->zone) : node;
1941 	}
1942 	case MPOL_LOCAL:
1943 		return node;
1944 
1945 	default:
1946 		BUG();
1947 	}
1948 }
1949 
1950 /*
1951  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1952  * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1953  * number of present nodes.
1954  */
1955 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1956 {
1957 	nodemask_t nodemask = pol->nodes;
1958 	unsigned int target, nnodes;
1959 	int i;
1960 	int nid;
1961 	/*
1962 	 * The barrier will stabilize the nodemask in a register or on
1963 	 * the stack so that it will stop changing under the code.
1964 	 *
1965 	 * Between first_node() and next_node(), pol->nodes could be changed
1966 	 * by other threads. So we put pol->nodes in a local stack.
1967 	 */
1968 	barrier();
1969 
1970 	nnodes = nodes_weight(nodemask);
1971 	if (!nnodes)
1972 		return numa_node_id();
1973 	target = (unsigned int)n % nnodes;
1974 	nid = first_node(nodemask);
1975 	for (i = 0; i < target; i++)
1976 		nid = next_node(nid, nodemask);
1977 	return nid;
1978 }
1979 
1980 /* Determine a node number for interleave */
1981 static inline unsigned interleave_nid(struct mempolicy *pol,
1982 		 struct vm_area_struct *vma, unsigned long addr, int shift)
1983 {
1984 	if (vma) {
1985 		unsigned long off;
1986 
1987 		/*
1988 		 * for small pages, there is no difference between
1989 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1990 		 * for huge pages, since vm_pgoff is in units of small
1991 		 * pages, we need to shift off the always 0 bits to get
1992 		 * a useful offset.
1993 		 */
1994 		BUG_ON(shift < PAGE_SHIFT);
1995 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1996 		off += (addr - vma->vm_start) >> shift;
1997 		return offset_il_node(pol, off);
1998 	} else
1999 		return interleave_nodes(pol);
2000 }
2001 
2002 #ifdef CONFIG_HUGETLBFS
2003 /*
2004  * huge_node(@vma, @addr, @gfp_flags, @mpol)
2005  * @vma: virtual memory area whose policy is sought
2006  * @addr: address in @vma for shared policy lookup and interleave policy
2007  * @gfp_flags: for requested zone
2008  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2009  * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2010  *
2011  * Returns a nid suitable for a huge page allocation and a pointer
2012  * to the struct mempolicy for conditional unref after allocation.
2013  * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2014  * to the mempolicy's @nodemask for filtering the zonelist.
2015  *
2016  * Must be protected by read_mems_allowed_begin()
2017  */
2018 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2019 				struct mempolicy **mpol, nodemask_t **nodemask)
2020 {
2021 	int nid;
2022 	int mode;
2023 
2024 	*mpol = get_vma_policy(vma, addr);
2025 	*nodemask = NULL;
2026 	mode = (*mpol)->mode;
2027 
2028 	if (unlikely(mode == MPOL_INTERLEAVE)) {
2029 		nid = interleave_nid(*mpol, vma, addr,
2030 					huge_page_shift(hstate_vma(vma)));
2031 	} else {
2032 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
2033 		if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
2034 			*nodemask = &(*mpol)->nodes;
2035 	}
2036 	return nid;
2037 }
2038 
2039 /*
2040  * init_nodemask_of_mempolicy
2041  *
2042  * If the current task's mempolicy is "default" [NULL], return 'false'
2043  * to indicate default policy.  Otherwise, extract the policy nodemask
2044  * for 'bind' or 'interleave' policy into the argument nodemask, or
2045  * initialize the argument nodemask to contain the single node for
2046  * 'preferred' or 'local' policy and return 'true' to indicate presence
2047  * of non-default mempolicy.
2048  *
2049  * We don't bother with reference counting the mempolicy [mpol_get/put]
2050  * because the current task is examining it's own mempolicy and a task's
2051  * mempolicy is only ever changed by the task itself.
2052  *
2053  * N.B., it is the caller's responsibility to free a returned nodemask.
2054  */
2055 bool init_nodemask_of_mempolicy(nodemask_t *mask)
2056 {
2057 	struct mempolicy *mempolicy;
2058 
2059 	if (!(mask && current->mempolicy))
2060 		return false;
2061 
2062 	task_lock(current);
2063 	mempolicy = current->mempolicy;
2064 	switch (mempolicy->mode) {
2065 	case MPOL_PREFERRED:
2066 	case MPOL_PREFERRED_MANY:
2067 	case MPOL_BIND:
2068 	case MPOL_INTERLEAVE:
2069 		*mask = mempolicy->nodes;
2070 		break;
2071 
2072 	case MPOL_LOCAL:
2073 		init_nodemask_of_node(mask, numa_node_id());
2074 		break;
2075 
2076 	default:
2077 		BUG();
2078 	}
2079 	task_unlock(current);
2080 
2081 	return true;
2082 }
2083 #endif
2084 
2085 /*
2086  * mempolicy_in_oom_domain
2087  *
2088  * If tsk's mempolicy is "bind", check for intersection between mask and
2089  * the policy nodemask. Otherwise, return true for all other policies
2090  * including "interleave", as a tsk with "interleave" policy may have
2091  * memory allocated from all nodes in system.
2092  *
2093  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2094  */
2095 bool mempolicy_in_oom_domain(struct task_struct *tsk,
2096 					const nodemask_t *mask)
2097 {
2098 	struct mempolicy *mempolicy;
2099 	bool ret = true;
2100 
2101 	if (!mask)
2102 		return ret;
2103 
2104 	task_lock(tsk);
2105 	mempolicy = tsk->mempolicy;
2106 	if (mempolicy && mempolicy->mode == MPOL_BIND)
2107 		ret = nodes_intersects(mempolicy->nodes, *mask);
2108 	task_unlock(tsk);
2109 
2110 	return ret;
2111 }
2112 
2113 /* Allocate a page in interleaved policy.
2114    Own path because it needs to do special accounting. */
2115 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2116 					unsigned nid)
2117 {
2118 	struct page *page;
2119 
2120 	page = __alloc_pages(gfp, order, nid, NULL);
2121 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2122 	if (!static_branch_likely(&vm_numa_stat_key))
2123 		return page;
2124 	if (page && page_to_nid(page) == nid) {
2125 		preempt_disable();
2126 		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2127 		preempt_enable();
2128 	}
2129 	return page;
2130 }
2131 
2132 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2133 						int nid, struct mempolicy *pol)
2134 {
2135 	struct page *page;
2136 	gfp_t preferred_gfp;
2137 
2138 	/*
2139 	 * This is a two pass approach. The first pass will only try the
2140 	 * preferred nodes but skip the direct reclaim and allow the
2141 	 * allocation to fail, while the second pass will try all the
2142 	 * nodes in system.
2143 	 */
2144 	preferred_gfp = gfp | __GFP_NOWARN;
2145 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2146 	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2147 	if (!page)
2148 		page = __alloc_pages(gfp, order, nid, NULL);
2149 
2150 	return page;
2151 }
2152 
2153 /**
2154  * vma_alloc_folio - Allocate a folio for a VMA.
2155  * @gfp: GFP flags.
2156  * @order: Order of the folio.
2157  * @vma: Pointer to VMA or NULL if not available.
2158  * @addr: Virtual address of the allocation.  Must be inside @vma.
2159  * @hugepage: For hugepages try only the preferred node if possible.
2160  *
2161  * Allocate a folio for a specific address in @vma, using the appropriate
2162  * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2163  * of the mm_struct of the VMA to prevent it from going away.  Should be
2164  * used for all allocations for folios that will be mapped into user space.
2165  *
2166  * Return: The folio on success or NULL if allocation fails.
2167  */
2168 struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2169 		unsigned long addr, bool hugepage)
2170 {
2171 	struct mempolicy *pol;
2172 	int node = numa_node_id();
2173 	struct folio *folio;
2174 	int preferred_nid;
2175 	nodemask_t *nmask;
2176 
2177 	pol = get_vma_policy(vma, addr);
2178 
2179 	if (pol->mode == MPOL_INTERLEAVE) {
2180 		struct page *page;
2181 		unsigned nid;
2182 
2183 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2184 		mpol_cond_put(pol);
2185 		gfp |= __GFP_COMP;
2186 		page = alloc_page_interleave(gfp, order, nid);
2187 		if (page && order > 1)
2188 			prep_transhuge_page(page);
2189 		folio = (struct folio *)page;
2190 		goto out;
2191 	}
2192 
2193 	if (pol->mode == MPOL_PREFERRED_MANY) {
2194 		struct page *page;
2195 
2196 		node = policy_node(gfp, pol, node);
2197 		gfp |= __GFP_COMP;
2198 		page = alloc_pages_preferred_many(gfp, order, node, pol);
2199 		mpol_cond_put(pol);
2200 		if (page && order > 1)
2201 			prep_transhuge_page(page);
2202 		folio = (struct folio *)page;
2203 		goto out;
2204 	}
2205 
2206 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2207 		int hpage_node = node;
2208 
2209 		/*
2210 		 * For hugepage allocation and non-interleave policy which
2211 		 * allows the current node (or other explicitly preferred
2212 		 * node) we only try to allocate from the current/preferred
2213 		 * node and don't fall back to other nodes, as the cost of
2214 		 * remote accesses would likely offset THP benefits.
2215 		 *
2216 		 * If the policy is interleave or does not allow the current
2217 		 * node in its nodemask, we allocate the standard way.
2218 		 */
2219 		if (pol->mode == MPOL_PREFERRED)
2220 			hpage_node = first_node(pol->nodes);
2221 
2222 		nmask = policy_nodemask(gfp, pol);
2223 		if (!nmask || node_isset(hpage_node, *nmask)) {
2224 			mpol_cond_put(pol);
2225 			/*
2226 			 * First, try to allocate THP only on local node, but
2227 			 * don't reclaim unnecessarily, just compact.
2228 			 */
2229 			folio = __folio_alloc_node(gfp | __GFP_THISNODE |
2230 					__GFP_NORETRY, order, hpage_node);
2231 
2232 			/*
2233 			 * If hugepage allocations are configured to always
2234 			 * synchronous compact or the vma has been madvised
2235 			 * to prefer hugepage backing, retry allowing remote
2236 			 * memory with both reclaim and compact as well.
2237 			 */
2238 			if (!folio && (gfp & __GFP_DIRECT_RECLAIM))
2239 				folio = __folio_alloc(gfp, order, hpage_node,
2240 						      nmask);
2241 
2242 			goto out;
2243 		}
2244 	}
2245 
2246 	nmask = policy_nodemask(gfp, pol);
2247 	preferred_nid = policy_node(gfp, pol, node);
2248 	folio = __folio_alloc(gfp, order, preferred_nid, nmask);
2249 	mpol_cond_put(pol);
2250 out:
2251 	return folio;
2252 }
2253 EXPORT_SYMBOL(vma_alloc_folio);
2254 
2255 /**
2256  * alloc_pages - Allocate pages.
2257  * @gfp: GFP flags.
2258  * @order: Power of two of number of pages to allocate.
2259  *
2260  * Allocate 1 << @order contiguous pages.  The physical address of the
2261  * first page is naturally aligned (eg an order-3 allocation will be aligned
2262  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
2263  * process is honoured when in process context.
2264  *
2265  * Context: Can be called from any context, providing the appropriate GFP
2266  * flags are used.
2267  * Return: The page on success or NULL if allocation fails.
2268  */
2269 struct page *alloc_pages(gfp_t gfp, unsigned order)
2270 {
2271 	struct mempolicy *pol = &default_policy;
2272 	struct page *page;
2273 
2274 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2275 		pol = get_task_policy(current);
2276 
2277 	/*
2278 	 * No reference counting needed for current->mempolicy
2279 	 * nor system default_policy
2280 	 */
2281 	if (pol->mode == MPOL_INTERLEAVE)
2282 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2283 	else if (pol->mode == MPOL_PREFERRED_MANY)
2284 		page = alloc_pages_preferred_many(gfp, order,
2285 				  policy_node(gfp, pol, numa_node_id()), pol);
2286 	else
2287 		page = __alloc_pages(gfp, order,
2288 				policy_node(gfp, pol, numa_node_id()),
2289 				policy_nodemask(gfp, pol));
2290 
2291 	return page;
2292 }
2293 EXPORT_SYMBOL(alloc_pages);
2294 
2295 struct folio *folio_alloc(gfp_t gfp, unsigned order)
2296 {
2297 	struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2298 
2299 	if (page && order > 1)
2300 		prep_transhuge_page(page);
2301 	return (struct folio *)page;
2302 }
2303 EXPORT_SYMBOL(folio_alloc);
2304 
2305 static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2306 		struct mempolicy *pol, unsigned long nr_pages,
2307 		struct page **page_array)
2308 {
2309 	int nodes;
2310 	unsigned long nr_pages_per_node;
2311 	int delta;
2312 	int i;
2313 	unsigned long nr_allocated;
2314 	unsigned long total_allocated = 0;
2315 
2316 	nodes = nodes_weight(pol->nodes);
2317 	nr_pages_per_node = nr_pages / nodes;
2318 	delta = nr_pages - nodes * nr_pages_per_node;
2319 
2320 	for (i = 0; i < nodes; i++) {
2321 		if (delta) {
2322 			nr_allocated = __alloc_pages_bulk(gfp,
2323 					interleave_nodes(pol), NULL,
2324 					nr_pages_per_node + 1, NULL,
2325 					page_array);
2326 			delta--;
2327 		} else {
2328 			nr_allocated = __alloc_pages_bulk(gfp,
2329 					interleave_nodes(pol), NULL,
2330 					nr_pages_per_node, NULL, page_array);
2331 		}
2332 
2333 		page_array += nr_allocated;
2334 		total_allocated += nr_allocated;
2335 	}
2336 
2337 	return total_allocated;
2338 }
2339 
2340 static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2341 		struct mempolicy *pol, unsigned long nr_pages,
2342 		struct page **page_array)
2343 {
2344 	gfp_t preferred_gfp;
2345 	unsigned long nr_allocated = 0;
2346 
2347 	preferred_gfp = gfp | __GFP_NOWARN;
2348 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2349 
2350 	nr_allocated  = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2351 					   nr_pages, NULL, page_array);
2352 
2353 	if (nr_allocated < nr_pages)
2354 		nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2355 				nr_pages - nr_allocated, NULL,
2356 				page_array + nr_allocated);
2357 	return nr_allocated;
2358 }
2359 
2360 /* alloc pages bulk and mempolicy should be considered at the
2361  * same time in some situation such as vmalloc.
2362  *
2363  * It can accelerate memory allocation especially interleaving
2364  * allocate memory.
2365  */
2366 unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2367 		unsigned long nr_pages, struct page **page_array)
2368 {
2369 	struct mempolicy *pol = &default_policy;
2370 
2371 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2372 		pol = get_task_policy(current);
2373 
2374 	if (pol->mode == MPOL_INTERLEAVE)
2375 		return alloc_pages_bulk_array_interleave(gfp, pol,
2376 							 nr_pages, page_array);
2377 
2378 	if (pol->mode == MPOL_PREFERRED_MANY)
2379 		return alloc_pages_bulk_array_preferred_many(gfp,
2380 				numa_node_id(), pol, nr_pages, page_array);
2381 
2382 	return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2383 				  policy_nodemask(gfp, pol), nr_pages, NULL,
2384 				  page_array);
2385 }
2386 
2387 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2388 {
2389 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2390 
2391 	if (IS_ERR(pol))
2392 		return PTR_ERR(pol);
2393 	dst->vm_policy = pol;
2394 	return 0;
2395 }
2396 
2397 /*
2398  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2399  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2400  * with the mems_allowed returned by cpuset_mems_allowed().  This
2401  * keeps mempolicies cpuset relative after its cpuset moves.  See
2402  * further kernel/cpuset.c update_nodemask().
2403  *
2404  * current's mempolicy may be rebinded by the other task(the task that changes
2405  * cpuset's mems), so we needn't do rebind work for current task.
2406  */
2407 
2408 /* Slow path of a mempolicy duplicate */
2409 struct mempolicy *__mpol_dup(struct mempolicy *old)
2410 {
2411 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2412 
2413 	if (!new)
2414 		return ERR_PTR(-ENOMEM);
2415 
2416 	/* task's mempolicy is protected by alloc_lock */
2417 	if (old == current->mempolicy) {
2418 		task_lock(current);
2419 		*new = *old;
2420 		task_unlock(current);
2421 	} else
2422 		*new = *old;
2423 
2424 	if (current_cpuset_is_being_rebound()) {
2425 		nodemask_t mems = cpuset_mems_allowed(current);
2426 		mpol_rebind_policy(new, &mems);
2427 	}
2428 	atomic_set(&new->refcnt, 1);
2429 	return new;
2430 }
2431 
2432 /* Slow path of a mempolicy comparison */
2433 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2434 {
2435 	if (!a || !b)
2436 		return false;
2437 	if (a->mode != b->mode)
2438 		return false;
2439 	if (a->flags != b->flags)
2440 		return false;
2441 	if (a->home_node != b->home_node)
2442 		return false;
2443 	if (mpol_store_user_nodemask(a))
2444 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2445 			return false;
2446 
2447 	switch (a->mode) {
2448 	case MPOL_BIND:
2449 	case MPOL_INTERLEAVE:
2450 	case MPOL_PREFERRED:
2451 	case MPOL_PREFERRED_MANY:
2452 		return !!nodes_equal(a->nodes, b->nodes);
2453 	case MPOL_LOCAL:
2454 		return true;
2455 	default:
2456 		BUG();
2457 		return false;
2458 	}
2459 }
2460 
2461 /*
2462  * Shared memory backing store policy support.
2463  *
2464  * Remember policies even when nobody has shared memory mapped.
2465  * The policies are kept in Red-Black tree linked from the inode.
2466  * They are protected by the sp->lock rwlock, which should be held
2467  * for any accesses to the tree.
2468  */
2469 
2470 /*
2471  * lookup first element intersecting start-end.  Caller holds sp->lock for
2472  * reading or for writing
2473  */
2474 static struct sp_node *
2475 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2476 {
2477 	struct rb_node *n = sp->root.rb_node;
2478 
2479 	while (n) {
2480 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2481 
2482 		if (start >= p->end)
2483 			n = n->rb_right;
2484 		else if (end <= p->start)
2485 			n = n->rb_left;
2486 		else
2487 			break;
2488 	}
2489 	if (!n)
2490 		return NULL;
2491 	for (;;) {
2492 		struct sp_node *w = NULL;
2493 		struct rb_node *prev = rb_prev(n);
2494 		if (!prev)
2495 			break;
2496 		w = rb_entry(prev, struct sp_node, nd);
2497 		if (w->end <= start)
2498 			break;
2499 		n = prev;
2500 	}
2501 	return rb_entry(n, struct sp_node, nd);
2502 }
2503 
2504 /*
2505  * Insert a new shared policy into the list.  Caller holds sp->lock for
2506  * writing.
2507  */
2508 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2509 {
2510 	struct rb_node **p = &sp->root.rb_node;
2511 	struct rb_node *parent = NULL;
2512 	struct sp_node *nd;
2513 
2514 	while (*p) {
2515 		parent = *p;
2516 		nd = rb_entry(parent, struct sp_node, nd);
2517 		if (new->start < nd->start)
2518 			p = &(*p)->rb_left;
2519 		else if (new->end > nd->end)
2520 			p = &(*p)->rb_right;
2521 		else
2522 			BUG();
2523 	}
2524 	rb_link_node(&new->nd, parent, p);
2525 	rb_insert_color(&new->nd, &sp->root);
2526 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2527 		 new->policy ? new->policy->mode : 0);
2528 }
2529 
2530 /* Find shared policy intersecting idx */
2531 struct mempolicy *
2532 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2533 {
2534 	struct mempolicy *pol = NULL;
2535 	struct sp_node *sn;
2536 
2537 	if (!sp->root.rb_node)
2538 		return NULL;
2539 	read_lock(&sp->lock);
2540 	sn = sp_lookup(sp, idx, idx+1);
2541 	if (sn) {
2542 		mpol_get(sn->policy);
2543 		pol = sn->policy;
2544 	}
2545 	read_unlock(&sp->lock);
2546 	return pol;
2547 }
2548 
2549 static void sp_free(struct sp_node *n)
2550 {
2551 	mpol_put(n->policy);
2552 	kmem_cache_free(sn_cache, n);
2553 }
2554 
2555 /**
2556  * mpol_misplaced - check whether current page node is valid in policy
2557  *
2558  * @page: page to be checked
2559  * @vma: vm area where page mapped
2560  * @addr: virtual address where page mapped
2561  *
2562  * Lookup current policy node id for vma,addr and "compare to" page's
2563  * node id.  Policy determination "mimics" alloc_page_vma().
2564  * Called from fault path where we know the vma and faulting address.
2565  *
2566  * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2567  * policy, or a suitable node ID to allocate a replacement page from.
2568  */
2569 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2570 {
2571 	struct mempolicy *pol;
2572 	struct zoneref *z;
2573 	int curnid = page_to_nid(page);
2574 	unsigned long pgoff;
2575 	int thiscpu = raw_smp_processor_id();
2576 	int thisnid = cpu_to_node(thiscpu);
2577 	int polnid = NUMA_NO_NODE;
2578 	int ret = NUMA_NO_NODE;
2579 
2580 	pol = get_vma_policy(vma, addr);
2581 	if (!(pol->flags & MPOL_F_MOF))
2582 		goto out;
2583 
2584 	switch (pol->mode) {
2585 	case MPOL_INTERLEAVE:
2586 		pgoff = vma->vm_pgoff;
2587 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2588 		polnid = offset_il_node(pol, pgoff);
2589 		break;
2590 
2591 	case MPOL_PREFERRED:
2592 		if (node_isset(curnid, pol->nodes))
2593 			goto out;
2594 		polnid = first_node(pol->nodes);
2595 		break;
2596 
2597 	case MPOL_LOCAL:
2598 		polnid = numa_node_id();
2599 		break;
2600 
2601 	case MPOL_BIND:
2602 		/* Optimize placement among multiple nodes via NUMA balancing */
2603 		if (pol->flags & MPOL_F_MORON) {
2604 			if (node_isset(thisnid, pol->nodes))
2605 				break;
2606 			goto out;
2607 		}
2608 		fallthrough;
2609 
2610 	case MPOL_PREFERRED_MANY:
2611 		/*
2612 		 * use current page if in policy nodemask,
2613 		 * else select nearest allowed node, if any.
2614 		 * If no allowed nodes, use current [!misplaced].
2615 		 */
2616 		if (node_isset(curnid, pol->nodes))
2617 			goto out;
2618 		z = first_zones_zonelist(
2619 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2620 				gfp_zone(GFP_HIGHUSER),
2621 				&pol->nodes);
2622 		polnid = zone_to_nid(z->zone);
2623 		break;
2624 
2625 	default:
2626 		BUG();
2627 	}
2628 
2629 	/* Migrate the page towards the node whose CPU is referencing it */
2630 	if (pol->flags & MPOL_F_MORON) {
2631 		polnid = thisnid;
2632 
2633 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2634 			goto out;
2635 	}
2636 
2637 	if (curnid != polnid)
2638 		ret = polnid;
2639 out:
2640 	mpol_cond_put(pol);
2641 
2642 	return ret;
2643 }
2644 
2645 /*
2646  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2647  * dropped after task->mempolicy is set to NULL so that any allocation done as
2648  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2649  * policy.
2650  */
2651 void mpol_put_task_policy(struct task_struct *task)
2652 {
2653 	struct mempolicy *pol;
2654 
2655 	task_lock(task);
2656 	pol = task->mempolicy;
2657 	task->mempolicy = NULL;
2658 	task_unlock(task);
2659 	mpol_put(pol);
2660 }
2661 
2662 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2663 {
2664 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2665 	rb_erase(&n->nd, &sp->root);
2666 	sp_free(n);
2667 }
2668 
2669 static void sp_node_init(struct sp_node *node, unsigned long start,
2670 			unsigned long end, struct mempolicy *pol)
2671 {
2672 	node->start = start;
2673 	node->end = end;
2674 	node->policy = pol;
2675 }
2676 
2677 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2678 				struct mempolicy *pol)
2679 {
2680 	struct sp_node *n;
2681 	struct mempolicy *newpol;
2682 
2683 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2684 	if (!n)
2685 		return NULL;
2686 
2687 	newpol = mpol_dup(pol);
2688 	if (IS_ERR(newpol)) {
2689 		kmem_cache_free(sn_cache, n);
2690 		return NULL;
2691 	}
2692 	newpol->flags |= MPOL_F_SHARED;
2693 	sp_node_init(n, start, end, newpol);
2694 
2695 	return n;
2696 }
2697 
2698 /* Replace a policy range. */
2699 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2700 				 unsigned long end, struct sp_node *new)
2701 {
2702 	struct sp_node *n;
2703 	struct sp_node *n_new = NULL;
2704 	struct mempolicy *mpol_new = NULL;
2705 	int ret = 0;
2706 
2707 restart:
2708 	write_lock(&sp->lock);
2709 	n = sp_lookup(sp, start, end);
2710 	/* Take care of old policies in the same range. */
2711 	while (n && n->start < end) {
2712 		struct rb_node *next = rb_next(&n->nd);
2713 		if (n->start >= start) {
2714 			if (n->end <= end)
2715 				sp_delete(sp, n);
2716 			else
2717 				n->start = end;
2718 		} else {
2719 			/* Old policy spanning whole new range. */
2720 			if (n->end > end) {
2721 				if (!n_new)
2722 					goto alloc_new;
2723 
2724 				*mpol_new = *n->policy;
2725 				atomic_set(&mpol_new->refcnt, 1);
2726 				sp_node_init(n_new, end, n->end, mpol_new);
2727 				n->end = start;
2728 				sp_insert(sp, n_new);
2729 				n_new = NULL;
2730 				mpol_new = NULL;
2731 				break;
2732 			} else
2733 				n->end = start;
2734 		}
2735 		if (!next)
2736 			break;
2737 		n = rb_entry(next, struct sp_node, nd);
2738 	}
2739 	if (new)
2740 		sp_insert(sp, new);
2741 	write_unlock(&sp->lock);
2742 	ret = 0;
2743 
2744 err_out:
2745 	if (mpol_new)
2746 		mpol_put(mpol_new);
2747 	if (n_new)
2748 		kmem_cache_free(sn_cache, n_new);
2749 
2750 	return ret;
2751 
2752 alloc_new:
2753 	write_unlock(&sp->lock);
2754 	ret = -ENOMEM;
2755 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2756 	if (!n_new)
2757 		goto err_out;
2758 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2759 	if (!mpol_new)
2760 		goto err_out;
2761 	atomic_set(&mpol_new->refcnt, 1);
2762 	goto restart;
2763 }
2764 
2765 /**
2766  * mpol_shared_policy_init - initialize shared policy for inode
2767  * @sp: pointer to inode shared policy
2768  * @mpol:  struct mempolicy to install
2769  *
2770  * Install non-NULL @mpol in inode's shared policy rb-tree.
2771  * On entry, the current task has a reference on a non-NULL @mpol.
2772  * This must be released on exit.
2773  * This is called at get_inode() calls and we can use GFP_KERNEL.
2774  */
2775 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2776 {
2777 	int ret;
2778 
2779 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2780 	rwlock_init(&sp->lock);
2781 
2782 	if (mpol) {
2783 		struct vm_area_struct pvma;
2784 		struct mempolicy *new;
2785 		NODEMASK_SCRATCH(scratch);
2786 
2787 		if (!scratch)
2788 			goto put_mpol;
2789 		/* contextualize the tmpfs mount point mempolicy */
2790 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2791 		if (IS_ERR(new))
2792 			goto free_scratch; /* no valid nodemask intersection */
2793 
2794 		task_lock(current);
2795 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2796 		task_unlock(current);
2797 		if (ret)
2798 			goto put_new;
2799 
2800 		/* Create pseudo-vma that contains just the policy */
2801 		vma_init(&pvma, NULL);
2802 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2803 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2804 
2805 put_new:
2806 		mpol_put(new);			/* drop initial ref */
2807 free_scratch:
2808 		NODEMASK_SCRATCH_FREE(scratch);
2809 put_mpol:
2810 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2811 	}
2812 }
2813 
2814 int mpol_set_shared_policy(struct shared_policy *info,
2815 			struct vm_area_struct *vma, struct mempolicy *npol)
2816 {
2817 	int err;
2818 	struct sp_node *new = NULL;
2819 	unsigned long sz = vma_pages(vma);
2820 
2821 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2822 		 vma->vm_pgoff,
2823 		 sz, npol ? npol->mode : -1,
2824 		 npol ? npol->flags : -1,
2825 		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
2826 
2827 	if (npol) {
2828 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2829 		if (!new)
2830 			return -ENOMEM;
2831 	}
2832 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2833 	if (err && new)
2834 		sp_free(new);
2835 	return err;
2836 }
2837 
2838 /* Free a backing policy store on inode delete. */
2839 void mpol_free_shared_policy(struct shared_policy *p)
2840 {
2841 	struct sp_node *n;
2842 	struct rb_node *next;
2843 
2844 	if (!p->root.rb_node)
2845 		return;
2846 	write_lock(&p->lock);
2847 	next = rb_first(&p->root);
2848 	while (next) {
2849 		n = rb_entry(next, struct sp_node, nd);
2850 		next = rb_next(&n->nd);
2851 		sp_delete(p, n);
2852 	}
2853 	write_unlock(&p->lock);
2854 }
2855 
2856 #ifdef CONFIG_NUMA_BALANCING
2857 static int __initdata numabalancing_override;
2858 
2859 static void __init check_numabalancing_enable(void)
2860 {
2861 	bool numabalancing_default = false;
2862 
2863 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2864 		numabalancing_default = true;
2865 
2866 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2867 	if (numabalancing_override)
2868 		set_numabalancing_state(numabalancing_override == 1);
2869 
2870 	if (num_online_nodes() > 1 && !numabalancing_override) {
2871 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2872 			numabalancing_default ? "Enabling" : "Disabling");
2873 		set_numabalancing_state(numabalancing_default);
2874 	}
2875 }
2876 
2877 static int __init setup_numabalancing(char *str)
2878 {
2879 	int ret = 0;
2880 	if (!str)
2881 		goto out;
2882 
2883 	if (!strcmp(str, "enable")) {
2884 		numabalancing_override = 1;
2885 		ret = 1;
2886 	} else if (!strcmp(str, "disable")) {
2887 		numabalancing_override = -1;
2888 		ret = 1;
2889 	}
2890 out:
2891 	if (!ret)
2892 		pr_warn("Unable to parse numa_balancing=\n");
2893 
2894 	return ret;
2895 }
2896 __setup("numa_balancing=", setup_numabalancing);
2897 #else
2898 static inline void __init check_numabalancing_enable(void)
2899 {
2900 }
2901 #endif /* CONFIG_NUMA_BALANCING */
2902 
2903 /* assumes fs == KERNEL_DS */
2904 void __init numa_policy_init(void)
2905 {
2906 	nodemask_t interleave_nodes;
2907 	unsigned long largest = 0;
2908 	int nid, prefer = 0;
2909 
2910 	policy_cache = kmem_cache_create("numa_policy",
2911 					 sizeof(struct mempolicy),
2912 					 0, SLAB_PANIC, NULL);
2913 
2914 	sn_cache = kmem_cache_create("shared_policy_node",
2915 				     sizeof(struct sp_node),
2916 				     0, SLAB_PANIC, NULL);
2917 
2918 	for_each_node(nid) {
2919 		preferred_node_policy[nid] = (struct mempolicy) {
2920 			.refcnt = ATOMIC_INIT(1),
2921 			.mode = MPOL_PREFERRED,
2922 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2923 			.nodes = nodemask_of_node(nid),
2924 		};
2925 	}
2926 
2927 	/*
2928 	 * Set interleaving policy for system init. Interleaving is only
2929 	 * enabled across suitably sized nodes (default is >= 16MB), or
2930 	 * fall back to the largest node if they're all smaller.
2931 	 */
2932 	nodes_clear(interleave_nodes);
2933 	for_each_node_state(nid, N_MEMORY) {
2934 		unsigned long total_pages = node_present_pages(nid);
2935 
2936 		/* Preserve the largest node */
2937 		if (largest < total_pages) {
2938 			largest = total_pages;
2939 			prefer = nid;
2940 		}
2941 
2942 		/* Interleave this node? */
2943 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2944 			node_set(nid, interleave_nodes);
2945 	}
2946 
2947 	/* All too small, use the largest */
2948 	if (unlikely(nodes_empty(interleave_nodes)))
2949 		node_set(prefer, interleave_nodes);
2950 
2951 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2952 		pr_err("%s: interleaving failed\n", __func__);
2953 
2954 	check_numabalancing_enable();
2955 }
2956 
2957 /* Reset policy of current process to default */
2958 void numa_default_policy(void)
2959 {
2960 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2961 }
2962 
2963 /*
2964  * Parse and format mempolicy from/to strings
2965  */
2966 
2967 static const char * const policy_modes[] =
2968 {
2969 	[MPOL_DEFAULT]    = "default",
2970 	[MPOL_PREFERRED]  = "prefer",
2971 	[MPOL_BIND]       = "bind",
2972 	[MPOL_INTERLEAVE] = "interleave",
2973 	[MPOL_LOCAL]      = "local",
2974 	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2975 };
2976 
2977 
2978 #ifdef CONFIG_TMPFS
2979 /**
2980  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2981  * @str:  string containing mempolicy to parse
2982  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2983  *
2984  * Format of input:
2985  *	<mode>[=<flags>][:<nodelist>]
2986  *
2987  * Return: %0 on success, else %1
2988  */
2989 int mpol_parse_str(char *str, struct mempolicy **mpol)
2990 {
2991 	struct mempolicy *new = NULL;
2992 	unsigned short mode_flags;
2993 	nodemask_t nodes;
2994 	char *nodelist = strchr(str, ':');
2995 	char *flags = strchr(str, '=');
2996 	int err = 1, mode;
2997 
2998 	if (flags)
2999 		*flags++ = '\0';	/* terminate mode string */
3000 
3001 	if (nodelist) {
3002 		/* NUL-terminate mode or flags string */
3003 		*nodelist++ = '\0';
3004 		if (nodelist_parse(nodelist, nodes))
3005 			goto out;
3006 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
3007 			goto out;
3008 	} else
3009 		nodes_clear(nodes);
3010 
3011 	mode = match_string(policy_modes, MPOL_MAX, str);
3012 	if (mode < 0)
3013 		goto out;
3014 
3015 	switch (mode) {
3016 	case MPOL_PREFERRED:
3017 		/*
3018 		 * Insist on a nodelist of one node only, although later
3019 		 * we use first_node(nodes) to grab a single node, so here
3020 		 * nodelist (or nodes) cannot be empty.
3021 		 */
3022 		if (nodelist) {
3023 			char *rest = nodelist;
3024 			while (isdigit(*rest))
3025 				rest++;
3026 			if (*rest)
3027 				goto out;
3028 			if (nodes_empty(nodes))
3029 				goto out;
3030 		}
3031 		break;
3032 	case MPOL_INTERLEAVE:
3033 		/*
3034 		 * Default to online nodes with memory if no nodelist
3035 		 */
3036 		if (!nodelist)
3037 			nodes = node_states[N_MEMORY];
3038 		break;
3039 	case MPOL_LOCAL:
3040 		/*
3041 		 * Don't allow a nodelist;  mpol_new() checks flags
3042 		 */
3043 		if (nodelist)
3044 			goto out;
3045 		break;
3046 	case MPOL_DEFAULT:
3047 		/*
3048 		 * Insist on a empty nodelist
3049 		 */
3050 		if (!nodelist)
3051 			err = 0;
3052 		goto out;
3053 	case MPOL_PREFERRED_MANY:
3054 	case MPOL_BIND:
3055 		/*
3056 		 * Insist on a nodelist
3057 		 */
3058 		if (!nodelist)
3059 			goto out;
3060 	}
3061 
3062 	mode_flags = 0;
3063 	if (flags) {
3064 		/*
3065 		 * Currently, we only support two mutually exclusive
3066 		 * mode flags.
3067 		 */
3068 		if (!strcmp(flags, "static"))
3069 			mode_flags |= MPOL_F_STATIC_NODES;
3070 		else if (!strcmp(flags, "relative"))
3071 			mode_flags |= MPOL_F_RELATIVE_NODES;
3072 		else
3073 			goto out;
3074 	}
3075 
3076 	new = mpol_new(mode, mode_flags, &nodes);
3077 	if (IS_ERR(new))
3078 		goto out;
3079 
3080 	/*
3081 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
3082 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3083 	 */
3084 	if (mode != MPOL_PREFERRED) {
3085 		new->nodes = nodes;
3086 	} else if (nodelist) {
3087 		nodes_clear(new->nodes);
3088 		node_set(first_node(nodes), new->nodes);
3089 	} else {
3090 		new->mode = MPOL_LOCAL;
3091 	}
3092 
3093 	/*
3094 	 * Save nodes for contextualization: this will be used to "clone"
3095 	 * the mempolicy in a specific context [cpuset] at a later time.
3096 	 */
3097 	new->w.user_nodemask = nodes;
3098 
3099 	err = 0;
3100 
3101 out:
3102 	/* Restore string for error message */
3103 	if (nodelist)
3104 		*--nodelist = ':';
3105 	if (flags)
3106 		*--flags = '=';
3107 	if (!err)
3108 		*mpol = new;
3109 	return err;
3110 }
3111 #endif /* CONFIG_TMPFS */
3112 
3113 /**
3114  * mpol_to_str - format a mempolicy structure for printing
3115  * @buffer:  to contain formatted mempolicy string
3116  * @maxlen:  length of @buffer
3117  * @pol:  pointer to mempolicy to be formatted
3118  *
3119  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3120  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3121  * longest flag, "relative", and to display at least a few node ids.
3122  */
3123 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3124 {
3125 	char *p = buffer;
3126 	nodemask_t nodes = NODE_MASK_NONE;
3127 	unsigned short mode = MPOL_DEFAULT;
3128 	unsigned short flags = 0;
3129 
3130 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3131 		mode = pol->mode;
3132 		flags = pol->flags;
3133 	}
3134 
3135 	switch (mode) {
3136 	case MPOL_DEFAULT:
3137 	case MPOL_LOCAL:
3138 		break;
3139 	case MPOL_PREFERRED:
3140 	case MPOL_PREFERRED_MANY:
3141 	case MPOL_BIND:
3142 	case MPOL_INTERLEAVE:
3143 		nodes = pol->nodes;
3144 		break;
3145 	default:
3146 		WARN_ON_ONCE(1);
3147 		snprintf(p, maxlen, "unknown");
3148 		return;
3149 	}
3150 
3151 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
3152 
3153 	if (flags & MPOL_MODE_FLAGS) {
3154 		p += snprintf(p, buffer + maxlen - p, "=");
3155 
3156 		/*
3157 		 * Currently, the only defined flags are mutually exclusive
3158 		 */
3159 		if (flags & MPOL_F_STATIC_NODES)
3160 			p += snprintf(p, buffer + maxlen - p, "static");
3161 		else if (flags & MPOL_F_RELATIVE_NODES)
3162 			p += snprintf(p, buffer + maxlen - p, "relative");
3163 	}
3164 
3165 	if (!nodes_empty(nodes))
3166 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3167 			       nodemask_pr_args(&nodes));
3168 }
3169