xref: /linux/mm/khugepaged.c (revision 52338415)
1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
21 
22 #include <asm/tlb.h>
23 #include <asm/pgalloc.h>
24 #include "internal.h"
25 
26 enum scan_result {
27 	SCAN_FAIL,
28 	SCAN_SUCCEED,
29 	SCAN_PMD_NULL,
30 	SCAN_EXCEED_NONE_PTE,
31 	SCAN_PTE_NON_PRESENT,
32 	SCAN_PAGE_RO,
33 	SCAN_LACK_REFERENCED_PAGE,
34 	SCAN_PAGE_NULL,
35 	SCAN_SCAN_ABORT,
36 	SCAN_PAGE_COUNT,
37 	SCAN_PAGE_LRU,
38 	SCAN_PAGE_LOCK,
39 	SCAN_PAGE_ANON,
40 	SCAN_PAGE_COMPOUND,
41 	SCAN_ANY_PROCESS,
42 	SCAN_VMA_NULL,
43 	SCAN_VMA_CHECK,
44 	SCAN_ADDRESS_RANGE,
45 	SCAN_SWAP_CACHE_PAGE,
46 	SCAN_DEL_PAGE_LRU,
47 	SCAN_ALLOC_HUGE_PAGE_FAIL,
48 	SCAN_CGROUP_CHARGE_FAIL,
49 	SCAN_EXCEED_SWAP_PTE,
50 	SCAN_TRUNCATED,
51 	SCAN_PAGE_HAS_PRIVATE,
52 };
53 
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/huge_memory.h>
56 
57 /* default scan 8*512 pte (or vmas) every 30 second */
58 static unsigned int khugepaged_pages_to_scan __read_mostly;
59 static unsigned int khugepaged_pages_collapsed;
60 static unsigned int khugepaged_full_scans;
61 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
62 /* during fragmentation poll the hugepage allocator once every minute */
63 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
64 static unsigned long khugepaged_sleep_expire;
65 static DEFINE_SPINLOCK(khugepaged_mm_lock);
66 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
67 /*
68  * default collapse hugepages if there is at least one pte mapped like
69  * it would have happened if the vma was large enough during page
70  * fault.
71  */
72 static unsigned int khugepaged_max_ptes_none __read_mostly;
73 static unsigned int khugepaged_max_ptes_swap __read_mostly;
74 
75 #define MM_SLOTS_HASH_BITS 10
76 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
77 
78 static struct kmem_cache *mm_slot_cache __read_mostly;
79 
80 #define MAX_PTE_MAPPED_THP 8
81 
82 /**
83  * struct mm_slot - hash lookup from mm to mm_slot
84  * @hash: hash collision list
85  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
86  * @mm: the mm that this information is valid for
87  */
88 struct mm_slot {
89 	struct hlist_node hash;
90 	struct list_head mm_node;
91 	struct mm_struct *mm;
92 
93 	/* pte-mapped THP in this mm */
94 	int nr_pte_mapped_thp;
95 	unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
96 };
97 
98 /**
99  * struct khugepaged_scan - cursor for scanning
100  * @mm_head: the head of the mm list to scan
101  * @mm_slot: the current mm_slot we are scanning
102  * @address: the next address inside that to be scanned
103  *
104  * There is only the one khugepaged_scan instance of this cursor structure.
105  */
106 struct khugepaged_scan {
107 	struct list_head mm_head;
108 	struct mm_slot *mm_slot;
109 	unsigned long address;
110 };
111 
112 static struct khugepaged_scan khugepaged_scan = {
113 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
114 };
115 
116 #ifdef CONFIG_SYSFS
117 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
118 					 struct kobj_attribute *attr,
119 					 char *buf)
120 {
121 	return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
122 }
123 
124 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
125 					  struct kobj_attribute *attr,
126 					  const char *buf, size_t count)
127 {
128 	unsigned long msecs;
129 	int err;
130 
131 	err = kstrtoul(buf, 10, &msecs);
132 	if (err || msecs > UINT_MAX)
133 		return -EINVAL;
134 
135 	khugepaged_scan_sleep_millisecs = msecs;
136 	khugepaged_sleep_expire = 0;
137 	wake_up_interruptible(&khugepaged_wait);
138 
139 	return count;
140 }
141 static struct kobj_attribute scan_sleep_millisecs_attr =
142 	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
143 	       scan_sleep_millisecs_store);
144 
145 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
146 					  struct kobj_attribute *attr,
147 					  char *buf)
148 {
149 	return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
150 }
151 
152 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
153 					   struct kobj_attribute *attr,
154 					   const char *buf, size_t count)
155 {
156 	unsigned long msecs;
157 	int err;
158 
159 	err = kstrtoul(buf, 10, &msecs);
160 	if (err || msecs > UINT_MAX)
161 		return -EINVAL;
162 
163 	khugepaged_alloc_sleep_millisecs = msecs;
164 	khugepaged_sleep_expire = 0;
165 	wake_up_interruptible(&khugepaged_wait);
166 
167 	return count;
168 }
169 static struct kobj_attribute alloc_sleep_millisecs_attr =
170 	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
171 	       alloc_sleep_millisecs_store);
172 
173 static ssize_t pages_to_scan_show(struct kobject *kobj,
174 				  struct kobj_attribute *attr,
175 				  char *buf)
176 {
177 	return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
178 }
179 static ssize_t pages_to_scan_store(struct kobject *kobj,
180 				   struct kobj_attribute *attr,
181 				   const char *buf, size_t count)
182 {
183 	int err;
184 	unsigned long pages;
185 
186 	err = kstrtoul(buf, 10, &pages);
187 	if (err || !pages || pages > UINT_MAX)
188 		return -EINVAL;
189 
190 	khugepaged_pages_to_scan = pages;
191 
192 	return count;
193 }
194 static struct kobj_attribute pages_to_scan_attr =
195 	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
196 	       pages_to_scan_store);
197 
198 static ssize_t pages_collapsed_show(struct kobject *kobj,
199 				    struct kobj_attribute *attr,
200 				    char *buf)
201 {
202 	return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
203 }
204 static struct kobj_attribute pages_collapsed_attr =
205 	__ATTR_RO(pages_collapsed);
206 
207 static ssize_t full_scans_show(struct kobject *kobj,
208 			       struct kobj_attribute *attr,
209 			       char *buf)
210 {
211 	return sprintf(buf, "%u\n", khugepaged_full_scans);
212 }
213 static struct kobj_attribute full_scans_attr =
214 	__ATTR_RO(full_scans);
215 
216 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
217 				      struct kobj_attribute *attr, char *buf)
218 {
219 	return single_hugepage_flag_show(kobj, attr, buf,
220 				TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
221 }
222 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
223 				       struct kobj_attribute *attr,
224 				       const char *buf, size_t count)
225 {
226 	return single_hugepage_flag_store(kobj, attr, buf, count,
227 				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
228 }
229 static struct kobj_attribute khugepaged_defrag_attr =
230 	__ATTR(defrag, 0644, khugepaged_defrag_show,
231 	       khugepaged_defrag_store);
232 
233 /*
234  * max_ptes_none controls if khugepaged should collapse hugepages over
235  * any unmapped ptes in turn potentially increasing the memory
236  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
237  * reduce the available free memory in the system as it
238  * runs. Increasing max_ptes_none will instead potentially reduce the
239  * free memory in the system during the khugepaged scan.
240  */
241 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
242 					     struct kobj_attribute *attr,
243 					     char *buf)
244 {
245 	return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
246 }
247 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
248 					      struct kobj_attribute *attr,
249 					      const char *buf, size_t count)
250 {
251 	int err;
252 	unsigned long max_ptes_none;
253 
254 	err = kstrtoul(buf, 10, &max_ptes_none);
255 	if (err || max_ptes_none > HPAGE_PMD_NR-1)
256 		return -EINVAL;
257 
258 	khugepaged_max_ptes_none = max_ptes_none;
259 
260 	return count;
261 }
262 static struct kobj_attribute khugepaged_max_ptes_none_attr =
263 	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
264 	       khugepaged_max_ptes_none_store);
265 
266 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
267 					     struct kobj_attribute *attr,
268 					     char *buf)
269 {
270 	return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
271 }
272 
273 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
274 					      struct kobj_attribute *attr,
275 					      const char *buf, size_t count)
276 {
277 	int err;
278 	unsigned long max_ptes_swap;
279 
280 	err  = kstrtoul(buf, 10, &max_ptes_swap);
281 	if (err || max_ptes_swap > HPAGE_PMD_NR-1)
282 		return -EINVAL;
283 
284 	khugepaged_max_ptes_swap = max_ptes_swap;
285 
286 	return count;
287 }
288 
289 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
290 	__ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
291 	       khugepaged_max_ptes_swap_store);
292 
293 static struct attribute *khugepaged_attr[] = {
294 	&khugepaged_defrag_attr.attr,
295 	&khugepaged_max_ptes_none_attr.attr,
296 	&pages_to_scan_attr.attr,
297 	&pages_collapsed_attr.attr,
298 	&full_scans_attr.attr,
299 	&scan_sleep_millisecs_attr.attr,
300 	&alloc_sleep_millisecs_attr.attr,
301 	&khugepaged_max_ptes_swap_attr.attr,
302 	NULL,
303 };
304 
305 struct attribute_group khugepaged_attr_group = {
306 	.attrs = khugepaged_attr,
307 	.name = "khugepaged",
308 };
309 #endif /* CONFIG_SYSFS */
310 
311 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
312 
313 int hugepage_madvise(struct vm_area_struct *vma,
314 		     unsigned long *vm_flags, int advice)
315 {
316 	switch (advice) {
317 	case MADV_HUGEPAGE:
318 #ifdef CONFIG_S390
319 		/*
320 		 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
321 		 * can't handle this properly after s390_enable_sie, so we simply
322 		 * ignore the madvise to prevent qemu from causing a SIGSEGV.
323 		 */
324 		if (mm_has_pgste(vma->vm_mm))
325 			return 0;
326 #endif
327 		*vm_flags &= ~VM_NOHUGEPAGE;
328 		*vm_flags |= VM_HUGEPAGE;
329 		/*
330 		 * If the vma become good for khugepaged to scan,
331 		 * register it here without waiting a page fault that
332 		 * may not happen any time soon.
333 		 */
334 		if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
335 				khugepaged_enter_vma_merge(vma, *vm_flags))
336 			return -ENOMEM;
337 		break;
338 	case MADV_NOHUGEPAGE:
339 		*vm_flags &= ~VM_HUGEPAGE;
340 		*vm_flags |= VM_NOHUGEPAGE;
341 		/*
342 		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
343 		 * this vma even if we leave the mm registered in khugepaged if
344 		 * it got registered before VM_NOHUGEPAGE was set.
345 		 */
346 		break;
347 	}
348 
349 	return 0;
350 }
351 
352 int __init khugepaged_init(void)
353 {
354 	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
355 					  sizeof(struct mm_slot),
356 					  __alignof__(struct mm_slot), 0, NULL);
357 	if (!mm_slot_cache)
358 		return -ENOMEM;
359 
360 	khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
361 	khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
362 	khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
363 
364 	return 0;
365 }
366 
367 void __init khugepaged_destroy(void)
368 {
369 	kmem_cache_destroy(mm_slot_cache);
370 }
371 
372 static inline struct mm_slot *alloc_mm_slot(void)
373 {
374 	if (!mm_slot_cache)	/* initialization failed */
375 		return NULL;
376 	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
377 }
378 
379 static inline void free_mm_slot(struct mm_slot *mm_slot)
380 {
381 	kmem_cache_free(mm_slot_cache, mm_slot);
382 }
383 
384 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
385 {
386 	struct mm_slot *mm_slot;
387 
388 	hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
389 		if (mm == mm_slot->mm)
390 			return mm_slot;
391 
392 	return NULL;
393 }
394 
395 static void insert_to_mm_slots_hash(struct mm_struct *mm,
396 				    struct mm_slot *mm_slot)
397 {
398 	mm_slot->mm = mm;
399 	hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
400 }
401 
402 static inline int khugepaged_test_exit(struct mm_struct *mm)
403 {
404 	return atomic_read(&mm->mm_users) == 0;
405 }
406 
407 static bool hugepage_vma_check(struct vm_area_struct *vma,
408 			       unsigned long vm_flags)
409 {
410 	if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
411 	    (vm_flags & VM_NOHUGEPAGE) ||
412 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
413 		return false;
414 
415 	if (shmem_file(vma->vm_file) ||
416 	    (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
417 	     vma->vm_file &&
418 	     (vm_flags & VM_DENYWRITE))) {
419 		if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
420 			return false;
421 		return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
422 				HPAGE_PMD_NR);
423 	}
424 	if (!vma->anon_vma || vma->vm_ops)
425 		return false;
426 	if (is_vma_temporary_stack(vma))
427 		return false;
428 	return !(vm_flags & VM_NO_KHUGEPAGED);
429 }
430 
431 int __khugepaged_enter(struct mm_struct *mm)
432 {
433 	struct mm_slot *mm_slot;
434 	int wakeup;
435 
436 	mm_slot = alloc_mm_slot();
437 	if (!mm_slot)
438 		return -ENOMEM;
439 
440 	/* __khugepaged_exit() must not run from under us */
441 	VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
442 	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
443 		free_mm_slot(mm_slot);
444 		return 0;
445 	}
446 
447 	spin_lock(&khugepaged_mm_lock);
448 	insert_to_mm_slots_hash(mm, mm_slot);
449 	/*
450 	 * Insert just behind the scanning cursor, to let the area settle
451 	 * down a little.
452 	 */
453 	wakeup = list_empty(&khugepaged_scan.mm_head);
454 	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
455 	spin_unlock(&khugepaged_mm_lock);
456 
457 	mmgrab(mm);
458 	if (wakeup)
459 		wake_up_interruptible(&khugepaged_wait);
460 
461 	return 0;
462 }
463 
464 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
465 			       unsigned long vm_flags)
466 {
467 	unsigned long hstart, hend;
468 
469 	/*
470 	 * khugepaged only supports read-only files for non-shmem files.
471 	 * khugepaged does not yet work on special mappings. And
472 	 * file-private shmem THP is not supported.
473 	 */
474 	if (!hugepage_vma_check(vma, vm_flags))
475 		return 0;
476 
477 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
478 	hend = vma->vm_end & HPAGE_PMD_MASK;
479 	if (hstart < hend)
480 		return khugepaged_enter(vma, vm_flags);
481 	return 0;
482 }
483 
484 void __khugepaged_exit(struct mm_struct *mm)
485 {
486 	struct mm_slot *mm_slot;
487 	int free = 0;
488 
489 	spin_lock(&khugepaged_mm_lock);
490 	mm_slot = get_mm_slot(mm);
491 	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
492 		hash_del(&mm_slot->hash);
493 		list_del(&mm_slot->mm_node);
494 		free = 1;
495 	}
496 	spin_unlock(&khugepaged_mm_lock);
497 
498 	if (free) {
499 		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
500 		free_mm_slot(mm_slot);
501 		mmdrop(mm);
502 	} else if (mm_slot) {
503 		/*
504 		 * This is required to serialize against
505 		 * khugepaged_test_exit() (which is guaranteed to run
506 		 * under mmap sem read mode). Stop here (after we
507 		 * return all pagetables will be destroyed) until
508 		 * khugepaged has finished working on the pagetables
509 		 * under the mmap_sem.
510 		 */
511 		down_write(&mm->mmap_sem);
512 		up_write(&mm->mmap_sem);
513 	}
514 }
515 
516 static void release_pte_page(struct page *page)
517 {
518 	dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
519 	unlock_page(page);
520 	putback_lru_page(page);
521 }
522 
523 static void release_pte_pages(pte_t *pte, pte_t *_pte)
524 {
525 	while (--_pte >= pte) {
526 		pte_t pteval = *_pte;
527 		if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
528 			release_pte_page(pte_page(pteval));
529 	}
530 }
531 
532 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
533 					unsigned long address,
534 					pte_t *pte)
535 {
536 	struct page *page = NULL;
537 	pte_t *_pte;
538 	int none_or_zero = 0, result = 0, referenced = 0;
539 	bool writable = false;
540 
541 	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
542 	     _pte++, address += PAGE_SIZE) {
543 		pte_t pteval = *_pte;
544 		if (pte_none(pteval) || (pte_present(pteval) &&
545 				is_zero_pfn(pte_pfn(pteval)))) {
546 			if (!userfaultfd_armed(vma) &&
547 			    ++none_or_zero <= khugepaged_max_ptes_none) {
548 				continue;
549 			} else {
550 				result = SCAN_EXCEED_NONE_PTE;
551 				goto out;
552 			}
553 		}
554 		if (!pte_present(pteval)) {
555 			result = SCAN_PTE_NON_PRESENT;
556 			goto out;
557 		}
558 		page = vm_normal_page(vma, address, pteval);
559 		if (unlikely(!page)) {
560 			result = SCAN_PAGE_NULL;
561 			goto out;
562 		}
563 
564 		/* TODO: teach khugepaged to collapse THP mapped with pte */
565 		if (PageCompound(page)) {
566 			result = SCAN_PAGE_COMPOUND;
567 			goto out;
568 		}
569 
570 		VM_BUG_ON_PAGE(!PageAnon(page), page);
571 
572 		/*
573 		 * We can do it before isolate_lru_page because the
574 		 * page can't be freed from under us. NOTE: PG_lock
575 		 * is needed to serialize against split_huge_page
576 		 * when invoked from the VM.
577 		 */
578 		if (!trylock_page(page)) {
579 			result = SCAN_PAGE_LOCK;
580 			goto out;
581 		}
582 
583 		/*
584 		 * cannot use mapcount: can't collapse if there's a gup pin.
585 		 * The page must only be referenced by the scanned process
586 		 * and page swap cache.
587 		 */
588 		if (page_count(page) != 1 + PageSwapCache(page)) {
589 			unlock_page(page);
590 			result = SCAN_PAGE_COUNT;
591 			goto out;
592 		}
593 		if (pte_write(pteval)) {
594 			writable = true;
595 		} else {
596 			if (PageSwapCache(page) &&
597 			    !reuse_swap_page(page, NULL)) {
598 				unlock_page(page);
599 				result = SCAN_SWAP_CACHE_PAGE;
600 				goto out;
601 			}
602 			/*
603 			 * Page is not in the swap cache. It can be collapsed
604 			 * into a THP.
605 			 */
606 		}
607 
608 		/*
609 		 * Isolate the page to avoid collapsing an hugepage
610 		 * currently in use by the VM.
611 		 */
612 		if (isolate_lru_page(page)) {
613 			unlock_page(page);
614 			result = SCAN_DEL_PAGE_LRU;
615 			goto out;
616 		}
617 		inc_node_page_state(page,
618 				NR_ISOLATED_ANON + page_is_file_cache(page));
619 		VM_BUG_ON_PAGE(!PageLocked(page), page);
620 		VM_BUG_ON_PAGE(PageLRU(page), page);
621 
622 		/* There should be enough young pte to collapse the page */
623 		if (pte_young(pteval) ||
624 		    page_is_young(page) || PageReferenced(page) ||
625 		    mmu_notifier_test_young(vma->vm_mm, address))
626 			referenced++;
627 	}
628 	if (likely(writable)) {
629 		if (likely(referenced)) {
630 			result = SCAN_SUCCEED;
631 			trace_mm_collapse_huge_page_isolate(page, none_or_zero,
632 							    referenced, writable, result);
633 			return 1;
634 		}
635 	} else {
636 		result = SCAN_PAGE_RO;
637 	}
638 
639 out:
640 	release_pte_pages(pte, _pte);
641 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
642 					    referenced, writable, result);
643 	return 0;
644 }
645 
646 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
647 				      struct vm_area_struct *vma,
648 				      unsigned long address,
649 				      spinlock_t *ptl)
650 {
651 	pte_t *_pte;
652 	for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
653 				_pte++, page++, address += PAGE_SIZE) {
654 		pte_t pteval = *_pte;
655 		struct page *src_page;
656 
657 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
658 			clear_user_highpage(page, address);
659 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
660 			if (is_zero_pfn(pte_pfn(pteval))) {
661 				/*
662 				 * ptl mostly unnecessary.
663 				 */
664 				spin_lock(ptl);
665 				/*
666 				 * paravirt calls inside pte_clear here are
667 				 * superfluous.
668 				 */
669 				pte_clear(vma->vm_mm, address, _pte);
670 				spin_unlock(ptl);
671 			}
672 		} else {
673 			src_page = pte_page(pteval);
674 			copy_user_highpage(page, src_page, address, vma);
675 			VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
676 			release_pte_page(src_page);
677 			/*
678 			 * ptl mostly unnecessary, but preempt has to
679 			 * be disabled to update the per-cpu stats
680 			 * inside page_remove_rmap().
681 			 */
682 			spin_lock(ptl);
683 			/*
684 			 * paravirt calls inside pte_clear here are
685 			 * superfluous.
686 			 */
687 			pte_clear(vma->vm_mm, address, _pte);
688 			page_remove_rmap(src_page, false);
689 			spin_unlock(ptl);
690 			free_page_and_swap_cache(src_page);
691 		}
692 	}
693 }
694 
695 static void khugepaged_alloc_sleep(void)
696 {
697 	DEFINE_WAIT(wait);
698 
699 	add_wait_queue(&khugepaged_wait, &wait);
700 	freezable_schedule_timeout_interruptible(
701 		msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
702 	remove_wait_queue(&khugepaged_wait, &wait);
703 }
704 
705 static int khugepaged_node_load[MAX_NUMNODES];
706 
707 static bool khugepaged_scan_abort(int nid)
708 {
709 	int i;
710 
711 	/*
712 	 * If node_reclaim_mode is disabled, then no extra effort is made to
713 	 * allocate memory locally.
714 	 */
715 	if (!node_reclaim_mode)
716 		return false;
717 
718 	/* If there is a count for this node already, it must be acceptable */
719 	if (khugepaged_node_load[nid])
720 		return false;
721 
722 	for (i = 0; i < MAX_NUMNODES; i++) {
723 		if (!khugepaged_node_load[i])
724 			continue;
725 		if (node_distance(nid, i) > node_reclaim_distance)
726 			return true;
727 	}
728 	return false;
729 }
730 
731 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
732 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
733 {
734 	return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
735 }
736 
737 #ifdef CONFIG_NUMA
738 static int khugepaged_find_target_node(void)
739 {
740 	static int last_khugepaged_target_node = NUMA_NO_NODE;
741 	int nid, target_node = 0, max_value = 0;
742 
743 	/* find first node with max normal pages hit */
744 	for (nid = 0; nid < MAX_NUMNODES; nid++)
745 		if (khugepaged_node_load[nid] > max_value) {
746 			max_value = khugepaged_node_load[nid];
747 			target_node = nid;
748 		}
749 
750 	/* do some balance if several nodes have the same hit record */
751 	if (target_node <= last_khugepaged_target_node)
752 		for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
753 				nid++)
754 			if (max_value == khugepaged_node_load[nid]) {
755 				target_node = nid;
756 				break;
757 			}
758 
759 	last_khugepaged_target_node = target_node;
760 	return target_node;
761 }
762 
763 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
764 {
765 	if (IS_ERR(*hpage)) {
766 		if (!*wait)
767 			return false;
768 
769 		*wait = false;
770 		*hpage = NULL;
771 		khugepaged_alloc_sleep();
772 	} else if (*hpage) {
773 		put_page(*hpage);
774 		*hpage = NULL;
775 	}
776 
777 	return true;
778 }
779 
780 static struct page *
781 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
782 {
783 	VM_BUG_ON_PAGE(*hpage, *hpage);
784 
785 	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
786 	if (unlikely(!*hpage)) {
787 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
788 		*hpage = ERR_PTR(-ENOMEM);
789 		return NULL;
790 	}
791 
792 	prep_transhuge_page(*hpage);
793 	count_vm_event(THP_COLLAPSE_ALLOC);
794 	return *hpage;
795 }
796 #else
797 static int khugepaged_find_target_node(void)
798 {
799 	return 0;
800 }
801 
802 static inline struct page *alloc_khugepaged_hugepage(void)
803 {
804 	struct page *page;
805 
806 	page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
807 			   HPAGE_PMD_ORDER);
808 	if (page)
809 		prep_transhuge_page(page);
810 	return page;
811 }
812 
813 static struct page *khugepaged_alloc_hugepage(bool *wait)
814 {
815 	struct page *hpage;
816 
817 	do {
818 		hpage = alloc_khugepaged_hugepage();
819 		if (!hpage) {
820 			count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
821 			if (!*wait)
822 				return NULL;
823 
824 			*wait = false;
825 			khugepaged_alloc_sleep();
826 		} else
827 			count_vm_event(THP_COLLAPSE_ALLOC);
828 	} while (unlikely(!hpage) && likely(khugepaged_enabled()));
829 
830 	return hpage;
831 }
832 
833 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
834 {
835 	if (!*hpage)
836 		*hpage = khugepaged_alloc_hugepage(wait);
837 
838 	if (unlikely(!*hpage))
839 		return false;
840 
841 	return true;
842 }
843 
844 static struct page *
845 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
846 {
847 	VM_BUG_ON(!*hpage);
848 
849 	return  *hpage;
850 }
851 #endif
852 
853 /*
854  * If mmap_sem temporarily dropped, revalidate vma
855  * before taking mmap_sem.
856  * Return 0 if succeeds, otherwise return none-zero
857  * value (scan code).
858  */
859 
860 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
861 		struct vm_area_struct **vmap)
862 {
863 	struct vm_area_struct *vma;
864 	unsigned long hstart, hend;
865 
866 	if (unlikely(khugepaged_test_exit(mm)))
867 		return SCAN_ANY_PROCESS;
868 
869 	*vmap = vma = find_vma(mm, address);
870 	if (!vma)
871 		return SCAN_VMA_NULL;
872 
873 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
874 	hend = vma->vm_end & HPAGE_PMD_MASK;
875 	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
876 		return SCAN_ADDRESS_RANGE;
877 	if (!hugepage_vma_check(vma, vma->vm_flags))
878 		return SCAN_VMA_CHECK;
879 	return 0;
880 }
881 
882 /*
883  * Bring missing pages in from swap, to complete THP collapse.
884  * Only done if khugepaged_scan_pmd believes it is worthwhile.
885  *
886  * Called and returns without pte mapped or spinlocks held,
887  * but with mmap_sem held to protect against vma changes.
888  */
889 
890 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
891 					struct vm_area_struct *vma,
892 					unsigned long address, pmd_t *pmd,
893 					int referenced)
894 {
895 	int swapped_in = 0;
896 	vm_fault_t ret = 0;
897 	struct vm_fault vmf = {
898 		.vma = vma,
899 		.address = address,
900 		.flags = FAULT_FLAG_ALLOW_RETRY,
901 		.pmd = pmd,
902 		.pgoff = linear_page_index(vma, address),
903 	};
904 
905 	/* we only decide to swapin, if there is enough young ptes */
906 	if (referenced < HPAGE_PMD_NR/2) {
907 		trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
908 		return false;
909 	}
910 	vmf.pte = pte_offset_map(pmd, address);
911 	for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
912 			vmf.pte++, vmf.address += PAGE_SIZE) {
913 		vmf.orig_pte = *vmf.pte;
914 		if (!is_swap_pte(vmf.orig_pte))
915 			continue;
916 		swapped_in++;
917 		ret = do_swap_page(&vmf);
918 
919 		/* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
920 		if (ret & VM_FAULT_RETRY) {
921 			down_read(&mm->mmap_sem);
922 			if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
923 				/* vma is no longer available, don't continue to swapin */
924 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
925 				return false;
926 			}
927 			/* check if the pmd is still valid */
928 			if (mm_find_pmd(mm, address) != pmd) {
929 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
930 				return false;
931 			}
932 		}
933 		if (ret & VM_FAULT_ERROR) {
934 			trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
935 			return false;
936 		}
937 		/* pte is unmapped now, we need to map it */
938 		vmf.pte = pte_offset_map(pmd, vmf.address);
939 	}
940 	vmf.pte--;
941 	pte_unmap(vmf.pte);
942 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
943 	return true;
944 }
945 
946 static void collapse_huge_page(struct mm_struct *mm,
947 				   unsigned long address,
948 				   struct page **hpage,
949 				   int node, int referenced)
950 {
951 	pmd_t *pmd, _pmd;
952 	pte_t *pte;
953 	pgtable_t pgtable;
954 	struct page *new_page;
955 	spinlock_t *pmd_ptl, *pte_ptl;
956 	int isolated = 0, result = 0;
957 	struct mem_cgroup *memcg;
958 	struct vm_area_struct *vma;
959 	struct mmu_notifier_range range;
960 	gfp_t gfp;
961 
962 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
963 
964 	/* Only allocate from the target node */
965 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
966 
967 	/*
968 	 * Before allocating the hugepage, release the mmap_sem read lock.
969 	 * The allocation can take potentially a long time if it involves
970 	 * sync compaction, and we do not need to hold the mmap_sem during
971 	 * that. We will recheck the vma after taking it again in write mode.
972 	 */
973 	up_read(&mm->mmap_sem);
974 	new_page = khugepaged_alloc_page(hpage, gfp, node);
975 	if (!new_page) {
976 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
977 		goto out_nolock;
978 	}
979 
980 	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
981 		result = SCAN_CGROUP_CHARGE_FAIL;
982 		goto out_nolock;
983 	}
984 
985 	down_read(&mm->mmap_sem);
986 	result = hugepage_vma_revalidate(mm, address, &vma);
987 	if (result) {
988 		mem_cgroup_cancel_charge(new_page, memcg, true);
989 		up_read(&mm->mmap_sem);
990 		goto out_nolock;
991 	}
992 
993 	pmd = mm_find_pmd(mm, address);
994 	if (!pmd) {
995 		result = SCAN_PMD_NULL;
996 		mem_cgroup_cancel_charge(new_page, memcg, true);
997 		up_read(&mm->mmap_sem);
998 		goto out_nolock;
999 	}
1000 
1001 	/*
1002 	 * __collapse_huge_page_swapin always returns with mmap_sem locked.
1003 	 * If it fails, we release mmap_sem and jump out_nolock.
1004 	 * Continuing to collapse causes inconsistency.
1005 	 */
1006 	if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
1007 		mem_cgroup_cancel_charge(new_page, memcg, true);
1008 		up_read(&mm->mmap_sem);
1009 		goto out_nolock;
1010 	}
1011 
1012 	up_read(&mm->mmap_sem);
1013 	/*
1014 	 * Prevent all access to pagetables with the exception of
1015 	 * gup_fast later handled by the ptep_clear_flush and the VM
1016 	 * handled by the anon_vma lock + PG_lock.
1017 	 */
1018 	down_write(&mm->mmap_sem);
1019 	result = SCAN_ANY_PROCESS;
1020 	if (!mmget_still_valid(mm))
1021 		goto out;
1022 	result = hugepage_vma_revalidate(mm, address, &vma);
1023 	if (result)
1024 		goto out;
1025 	/* check if the pmd is still valid */
1026 	if (mm_find_pmd(mm, address) != pmd)
1027 		goto out;
1028 
1029 	anon_vma_lock_write(vma->anon_vma);
1030 
1031 	pte = pte_offset_map(pmd, address);
1032 	pte_ptl = pte_lockptr(mm, pmd);
1033 
1034 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1035 				address, address + HPAGE_PMD_SIZE);
1036 	mmu_notifier_invalidate_range_start(&range);
1037 	pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1038 	/*
1039 	 * After this gup_fast can't run anymore. This also removes
1040 	 * any huge TLB entry from the CPU so we won't allow
1041 	 * huge and small TLB entries for the same virtual address
1042 	 * to avoid the risk of CPU bugs in that area.
1043 	 */
1044 	_pmd = pmdp_collapse_flush(vma, address, pmd);
1045 	spin_unlock(pmd_ptl);
1046 	mmu_notifier_invalidate_range_end(&range);
1047 
1048 	spin_lock(pte_ptl);
1049 	isolated = __collapse_huge_page_isolate(vma, address, pte);
1050 	spin_unlock(pte_ptl);
1051 
1052 	if (unlikely(!isolated)) {
1053 		pte_unmap(pte);
1054 		spin_lock(pmd_ptl);
1055 		BUG_ON(!pmd_none(*pmd));
1056 		/*
1057 		 * We can only use set_pmd_at when establishing
1058 		 * hugepmds and never for establishing regular pmds that
1059 		 * points to regular pagetables. Use pmd_populate for that
1060 		 */
1061 		pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1062 		spin_unlock(pmd_ptl);
1063 		anon_vma_unlock_write(vma->anon_vma);
1064 		result = SCAN_FAIL;
1065 		goto out;
1066 	}
1067 
1068 	/*
1069 	 * All pages are isolated and locked so anon_vma rmap
1070 	 * can't run anymore.
1071 	 */
1072 	anon_vma_unlock_write(vma->anon_vma);
1073 
1074 	__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1075 	pte_unmap(pte);
1076 	__SetPageUptodate(new_page);
1077 	pgtable = pmd_pgtable(_pmd);
1078 
1079 	_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1080 	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1081 
1082 	/*
1083 	 * spin_lock() below is not the equivalent of smp_wmb(), so
1084 	 * this is needed to avoid the copy_huge_page writes to become
1085 	 * visible after the set_pmd_at() write.
1086 	 */
1087 	smp_wmb();
1088 
1089 	spin_lock(pmd_ptl);
1090 	BUG_ON(!pmd_none(*pmd));
1091 	page_add_new_anon_rmap(new_page, vma, address, true);
1092 	mem_cgroup_commit_charge(new_page, memcg, false, true);
1093 	count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
1094 	lru_cache_add_active_or_unevictable(new_page, vma);
1095 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
1096 	set_pmd_at(mm, address, pmd, _pmd);
1097 	update_mmu_cache_pmd(vma, address, pmd);
1098 	spin_unlock(pmd_ptl);
1099 
1100 	*hpage = NULL;
1101 
1102 	khugepaged_pages_collapsed++;
1103 	result = SCAN_SUCCEED;
1104 out_up_write:
1105 	up_write(&mm->mmap_sem);
1106 out_nolock:
1107 	trace_mm_collapse_huge_page(mm, isolated, result);
1108 	return;
1109 out:
1110 	mem_cgroup_cancel_charge(new_page, memcg, true);
1111 	goto out_up_write;
1112 }
1113 
1114 static int khugepaged_scan_pmd(struct mm_struct *mm,
1115 			       struct vm_area_struct *vma,
1116 			       unsigned long address,
1117 			       struct page **hpage)
1118 {
1119 	pmd_t *pmd;
1120 	pte_t *pte, *_pte;
1121 	int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1122 	struct page *page = NULL;
1123 	unsigned long _address;
1124 	spinlock_t *ptl;
1125 	int node = NUMA_NO_NODE, unmapped = 0;
1126 	bool writable = false;
1127 
1128 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1129 
1130 	pmd = mm_find_pmd(mm, address);
1131 	if (!pmd) {
1132 		result = SCAN_PMD_NULL;
1133 		goto out;
1134 	}
1135 
1136 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1137 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1138 	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1139 	     _pte++, _address += PAGE_SIZE) {
1140 		pte_t pteval = *_pte;
1141 		if (is_swap_pte(pteval)) {
1142 			if (++unmapped <= khugepaged_max_ptes_swap) {
1143 				continue;
1144 			} else {
1145 				result = SCAN_EXCEED_SWAP_PTE;
1146 				goto out_unmap;
1147 			}
1148 		}
1149 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1150 			if (!userfaultfd_armed(vma) &&
1151 			    ++none_or_zero <= khugepaged_max_ptes_none) {
1152 				continue;
1153 			} else {
1154 				result = SCAN_EXCEED_NONE_PTE;
1155 				goto out_unmap;
1156 			}
1157 		}
1158 		if (!pte_present(pteval)) {
1159 			result = SCAN_PTE_NON_PRESENT;
1160 			goto out_unmap;
1161 		}
1162 		if (pte_write(pteval))
1163 			writable = true;
1164 
1165 		page = vm_normal_page(vma, _address, pteval);
1166 		if (unlikely(!page)) {
1167 			result = SCAN_PAGE_NULL;
1168 			goto out_unmap;
1169 		}
1170 
1171 		/* TODO: teach khugepaged to collapse THP mapped with pte */
1172 		if (PageCompound(page)) {
1173 			result = SCAN_PAGE_COMPOUND;
1174 			goto out_unmap;
1175 		}
1176 
1177 		/*
1178 		 * Record which node the original page is from and save this
1179 		 * information to khugepaged_node_load[].
1180 		 * Khupaged will allocate hugepage from the node has the max
1181 		 * hit record.
1182 		 */
1183 		node = page_to_nid(page);
1184 		if (khugepaged_scan_abort(node)) {
1185 			result = SCAN_SCAN_ABORT;
1186 			goto out_unmap;
1187 		}
1188 		khugepaged_node_load[node]++;
1189 		if (!PageLRU(page)) {
1190 			result = SCAN_PAGE_LRU;
1191 			goto out_unmap;
1192 		}
1193 		if (PageLocked(page)) {
1194 			result = SCAN_PAGE_LOCK;
1195 			goto out_unmap;
1196 		}
1197 		if (!PageAnon(page)) {
1198 			result = SCAN_PAGE_ANON;
1199 			goto out_unmap;
1200 		}
1201 
1202 		/*
1203 		 * cannot use mapcount: can't collapse if there's a gup pin.
1204 		 * The page must only be referenced by the scanned process
1205 		 * and page swap cache.
1206 		 */
1207 		if (page_count(page) != 1 + PageSwapCache(page)) {
1208 			result = SCAN_PAGE_COUNT;
1209 			goto out_unmap;
1210 		}
1211 		if (pte_young(pteval) ||
1212 		    page_is_young(page) || PageReferenced(page) ||
1213 		    mmu_notifier_test_young(vma->vm_mm, address))
1214 			referenced++;
1215 	}
1216 	if (writable) {
1217 		if (referenced) {
1218 			result = SCAN_SUCCEED;
1219 			ret = 1;
1220 		} else {
1221 			result = SCAN_LACK_REFERENCED_PAGE;
1222 		}
1223 	} else {
1224 		result = SCAN_PAGE_RO;
1225 	}
1226 out_unmap:
1227 	pte_unmap_unlock(pte, ptl);
1228 	if (ret) {
1229 		node = khugepaged_find_target_node();
1230 		/* collapse_huge_page will return with the mmap_sem released */
1231 		collapse_huge_page(mm, address, hpage, node, referenced);
1232 	}
1233 out:
1234 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1235 				     none_or_zero, result, unmapped);
1236 	return ret;
1237 }
1238 
1239 static void collect_mm_slot(struct mm_slot *mm_slot)
1240 {
1241 	struct mm_struct *mm = mm_slot->mm;
1242 
1243 	lockdep_assert_held(&khugepaged_mm_lock);
1244 
1245 	if (khugepaged_test_exit(mm)) {
1246 		/* free mm_slot */
1247 		hash_del(&mm_slot->hash);
1248 		list_del(&mm_slot->mm_node);
1249 
1250 		/*
1251 		 * Not strictly needed because the mm exited already.
1252 		 *
1253 		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1254 		 */
1255 
1256 		/* khugepaged_mm_lock actually not necessary for the below */
1257 		free_mm_slot(mm_slot);
1258 		mmdrop(mm);
1259 	}
1260 }
1261 
1262 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1263 /*
1264  * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1265  * khugepaged should try to collapse the page table.
1266  */
1267 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1268 					 unsigned long addr)
1269 {
1270 	struct mm_slot *mm_slot;
1271 
1272 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1273 
1274 	spin_lock(&khugepaged_mm_lock);
1275 	mm_slot = get_mm_slot(mm);
1276 	if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1277 		mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1278 	spin_unlock(&khugepaged_mm_lock);
1279 	return 0;
1280 }
1281 
1282 /**
1283  * Try to collapse a pte-mapped THP for mm at address haddr.
1284  *
1285  * This function checks whether all the PTEs in the PMD are pointing to the
1286  * right THP. If so, retract the page table so the THP can refault in with
1287  * as pmd-mapped.
1288  */
1289 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1290 {
1291 	unsigned long haddr = addr & HPAGE_PMD_MASK;
1292 	struct vm_area_struct *vma = find_vma(mm, haddr);
1293 	struct page *hpage = NULL;
1294 	pte_t *start_pte, *pte;
1295 	pmd_t *pmd, _pmd;
1296 	spinlock_t *ptl;
1297 	int count = 0;
1298 	int i;
1299 
1300 	if (!vma || !vma->vm_file ||
1301 	    vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1302 		return;
1303 
1304 	/*
1305 	 * This vm_flags may not have VM_HUGEPAGE if the page was not
1306 	 * collapsed by this mm. But we can still collapse if the page is
1307 	 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1308 	 * will not fail the vma for missing VM_HUGEPAGE
1309 	 */
1310 	if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1311 		return;
1312 
1313 	pmd = mm_find_pmd(mm, haddr);
1314 	if (!pmd)
1315 		return;
1316 
1317 	start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1318 
1319 	/* step 1: check all mapped PTEs are to the right huge page */
1320 	for (i = 0, addr = haddr, pte = start_pte;
1321 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1322 		struct page *page;
1323 
1324 		/* empty pte, skip */
1325 		if (pte_none(*pte))
1326 			continue;
1327 
1328 		/* page swapped out, abort */
1329 		if (!pte_present(*pte))
1330 			goto abort;
1331 
1332 		page = vm_normal_page(vma, addr, *pte);
1333 
1334 		if (!page || !PageCompound(page))
1335 			goto abort;
1336 
1337 		if (!hpage) {
1338 			hpage = compound_head(page);
1339 			/*
1340 			 * The mapping of the THP should not change.
1341 			 *
1342 			 * Note that uprobe, debugger, or MAP_PRIVATE may
1343 			 * change the page table, but the new page will
1344 			 * not pass PageCompound() check.
1345 			 */
1346 			if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
1347 				goto abort;
1348 		}
1349 
1350 		/*
1351 		 * Confirm the page maps to the correct subpage.
1352 		 *
1353 		 * Note that uprobe, debugger, or MAP_PRIVATE may change
1354 		 * the page table, but the new page will not pass
1355 		 * PageCompound() check.
1356 		 */
1357 		if (WARN_ON(hpage + i != page))
1358 			goto abort;
1359 		count++;
1360 	}
1361 
1362 	/* step 2: adjust rmap */
1363 	for (i = 0, addr = haddr, pte = start_pte;
1364 	     i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1365 		struct page *page;
1366 
1367 		if (pte_none(*pte))
1368 			continue;
1369 		page = vm_normal_page(vma, addr, *pte);
1370 		page_remove_rmap(page, false);
1371 	}
1372 
1373 	pte_unmap_unlock(start_pte, ptl);
1374 
1375 	/* step 3: set proper refcount and mm_counters. */
1376 	if (hpage) {
1377 		page_ref_sub(hpage, count);
1378 		add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1379 	}
1380 
1381 	/* step 4: collapse pmd */
1382 	ptl = pmd_lock(vma->vm_mm, pmd);
1383 	_pmd = pmdp_collapse_flush(vma, addr, pmd);
1384 	spin_unlock(ptl);
1385 	mm_dec_nr_ptes(mm);
1386 	pte_free(mm, pmd_pgtable(_pmd));
1387 	return;
1388 
1389 abort:
1390 	pte_unmap_unlock(start_pte, ptl);
1391 }
1392 
1393 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1394 {
1395 	struct mm_struct *mm = mm_slot->mm;
1396 	int i;
1397 
1398 	if (likely(mm_slot->nr_pte_mapped_thp == 0))
1399 		return 0;
1400 
1401 	if (!down_write_trylock(&mm->mmap_sem))
1402 		return -EBUSY;
1403 
1404 	if (unlikely(khugepaged_test_exit(mm)))
1405 		goto out;
1406 
1407 	for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1408 		collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1409 
1410 out:
1411 	mm_slot->nr_pte_mapped_thp = 0;
1412 	up_write(&mm->mmap_sem);
1413 	return 0;
1414 }
1415 
1416 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1417 {
1418 	struct vm_area_struct *vma;
1419 	unsigned long addr;
1420 	pmd_t *pmd, _pmd;
1421 
1422 	i_mmap_lock_write(mapping);
1423 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1424 		/*
1425 		 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1426 		 * got written to. These VMAs are likely not worth investing
1427 		 * down_write(mmap_sem) as PMD-mapping is likely to be split
1428 		 * later.
1429 		 *
1430 		 * Not that vma->anon_vma check is racy: it can be set up after
1431 		 * the check but before we took mmap_sem by the fault path.
1432 		 * But page lock would prevent establishing any new ptes of the
1433 		 * page, so we are safe.
1434 		 *
1435 		 * An alternative would be drop the check, but check that page
1436 		 * table is clear before calling pmdp_collapse_flush() under
1437 		 * ptl. It has higher chance to recover THP for the VMA, but
1438 		 * has higher cost too.
1439 		 */
1440 		if (vma->anon_vma)
1441 			continue;
1442 		addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1443 		if (addr & ~HPAGE_PMD_MASK)
1444 			continue;
1445 		if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1446 			continue;
1447 		pmd = mm_find_pmd(vma->vm_mm, addr);
1448 		if (!pmd)
1449 			continue;
1450 		/*
1451 		 * We need exclusive mmap_sem to retract page table.
1452 		 *
1453 		 * We use trylock due to lock inversion: we need to acquire
1454 		 * mmap_sem while holding page lock. Fault path does it in
1455 		 * reverse order. Trylock is a way to avoid deadlock.
1456 		 */
1457 		if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1458 			spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1459 			/* assume page table is clear */
1460 			_pmd = pmdp_collapse_flush(vma, addr, pmd);
1461 			spin_unlock(ptl);
1462 			up_write(&vma->vm_mm->mmap_sem);
1463 			mm_dec_nr_ptes(vma->vm_mm);
1464 			pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1465 		} else {
1466 			/* Try again later */
1467 			khugepaged_add_pte_mapped_thp(vma->vm_mm, addr);
1468 		}
1469 	}
1470 	i_mmap_unlock_write(mapping);
1471 }
1472 
1473 /**
1474  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1475  *
1476  * Basic scheme is simple, details are more complex:
1477  *  - allocate and lock a new huge page;
1478  *  - scan page cache replacing old pages with the new one
1479  *    + swap/gup in pages if necessary;
1480  *    + fill in gaps;
1481  *    + keep old pages around in case rollback is required;
1482  *  - if replacing succeeds:
1483  *    + copy data over;
1484  *    + free old pages;
1485  *    + unlock huge page;
1486  *  - if replacing failed;
1487  *    + put all pages back and unfreeze them;
1488  *    + restore gaps in the page cache;
1489  *    + unlock and free huge page;
1490  */
1491 static void collapse_file(struct mm_struct *mm,
1492 		struct file *file, pgoff_t start,
1493 		struct page **hpage, int node)
1494 {
1495 	struct address_space *mapping = file->f_mapping;
1496 	gfp_t gfp;
1497 	struct page *new_page;
1498 	struct mem_cgroup *memcg;
1499 	pgoff_t index, end = start + HPAGE_PMD_NR;
1500 	LIST_HEAD(pagelist);
1501 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1502 	int nr_none = 0, result = SCAN_SUCCEED;
1503 	bool is_shmem = shmem_file(file);
1504 
1505 	VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1506 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1507 
1508 	/* Only allocate from the target node */
1509 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1510 
1511 	new_page = khugepaged_alloc_page(hpage, gfp, node);
1512 	if (!new_page) {
1513 		result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1514 		goto out;
1515 	}
1516 
1517 	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1518 		result = SCAN_CGROUP_CHARGE_FAIL;
1519 		goto out;
1520 	}
1521 
1522 	/* This will be less messy when we use multi-index entries */
1523 	do {
1524 		xas_lock_irq(&xas);
1525 		xas_create_range(&xas);
1526 		if (!xas_error(&xas))
1527 			break;
1528 		xas_unlock_irq(&xas);
1529 		if (!xas_nomem(&xas, GFP_KERNEL)) {
1530 			mem_cgroup_cancel_charge(new_page, memcg, true);
1531 			result = SCAN_FAIL;
1532 			goto out;
1533 		}
1534 	} while (1);
1535 
1536 	__SetPageLocked(new_page);
1537 	if (is_shmem)
1538 		__SetPageSwapBacked(new_page);
1539 	new_page->index = start;
1540 	new_page->mapping = mapping;
1541 
1542 	/*
1543 	 * At this point the new_page is locked and not up-to-date.
1544 	 * It's safe to insert it into the page cache, because nobody would
1545 	 * be able to map it or use it in another way until we unlock it.
1546 	 */
1547 
1548 	xas_set(&xas, start);
1549 	for (index = start; index < end; index++) {
1550 		struct page *page = xas_next(&xas);
1551 
1552 		VM_BUG_ON(index != xas.xa_index);
1553 		if (is_shmem) {
1554 			if (!page) {
1555 				/*
1556 				 * Stop if extent has been truncated or
1557 				 * hole-punched, and is now completely
1558 				 * empty.
1559 				 */
1560 				if (index == start) {
1561 					if (!xas_next_entry(&xas, end - 1)) {
1562 						result = SCAN_TRUNCATED;
1563 						goto xa_locked;
1564 					}
1565 					xas_set(&xas, index);
1566 				}
1567 				if (!shmem_charge(mapping->host, 1)) {
1568 					result = SCAN_FAIL;
1569 					goto xa_locked;
1570 				}
1571 				xas_store(&xas, new_page);
1572 				nr_none++;
1573 				continue;
1574 			}
1575 
1576 			if (xa_is_value(page) || !PageUptodate(page)) {
1577 				xas_unlock_irq(&xas);
1578 				/* swap in or instantiate fallocated page */
1579 				if (shmem_getpage(mapping->host, index, &page,
1580 						  SGP_NOHUGE)) {
1581 					result = SCAN_FAIL;
1582 					goto xa_unlocked;
1583 				}
1584 			} else if (trylock_page(page)) {
1585 				get_page(page);
1586 				xas_unlock_irq(&xas);
1587 			} else {
1588 				result = SCAN_PAGE_LOCK;
1589 				goto xa_locked;
1590 			}
1591 		} else {	/* !is_shmem */
1592 			if (!page || xa_is_value(page)) {
1593 				xas_unlock_irq(&xas);
1594 				page_cache_sync_readahead(mapping, &file->f_ra,
1595 							  file, index,
1596 							  PAGE_SIZE);
1597 				/* drain pagevecs to help isolate_lru_page() */
1598 				lru_add_drain();
1599 				page = find_lock_page(mapping, index);
1600 				if (unlikely(page == NULL)) {
1601 					result = SCAN_FAIL;
1602 					goto xa_unlocked;
1603 				}
1604 			} else if (!PageUptodate(page)) {
1605 				xas_unlock_irq(&xas);
1606 				wait_on_page_locked(page);
1607 				if (!trylock_page(page)) {
1608 					result = SCAN_PAGE_LOCK;
1609 					goto xa_unlocked;
1610 				}
1611 				get_page(page);
1612 			} else if (PageDirty(page)) {
1613 				result = SCAN_FAIL;
1614 				goto xa_locked;
1615 			} else if (trylock_page(page)) {
1616 				get_page(page);
1617 				xas_unlock_irq(&xas);
1618 			} else {
1619 				result = SCAN_PAGE_LOCK;
1620 				goto xa_locked;
1621 			}
1622 		}
1623 
1624 		/*
1625 		 * The page must be locked, so we can drop the i_pages lock
1626 		 * without racing with truncate.
1627 		 */
1628 		VM_BUG_ON_PAGE(!PageLocked(page), page);
1629 		VM_BUG_ON_PAGE(!PageUptodate(page), page);
1630 
1631 		/*
1632 		 * If file was truncated then extended, or hole-punched, before
1633 		 * we locked the first page, then a THP might be there already.
1634 		 */
1635 		if (PageTransCompound(page)) {
1636 			result = SCAN_PAGE_COMPOUND;
1637 			goto out_unlock;
1638 		}
1639 
1640 		if (page_mapping(page) != mapping) {
1641 			result = SCAN_TRUNCATED;
1642 			goto out_unlock;
1643 		}
1644 
1645 		if (isolate_lru_page(page)) {
1646 			result = SCAN_DEL_PAGE_LRU;
1647 			goto out_unlock;
1648 		}
1649 
1650 		if (page_has_private(page) &&
1651 		    !try_to_release_page(page, GFP_KERNEL)) {
1652 			result = SCAN_PAGE_HAS_PRIVATE;
1653 			goto out_unlock;
1654 		}
1655 
1656 		if (page_mapped(page))
1657 			unmap_mapping_pages(mapping, index, 1, false);
1658 
1659 		xas_lock_irq(&xas);
1660 		xas_set(&xas, index);
1661 
1662 		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1663 		VM_BUG_ON_PAGE(page_mapped(page), page);
1664 
1665 		/*
1666 		 * The page is expected to have page_count() == 3:
1667 		 *  - we hold a pin on it;
1668 		 *  - one reference from page cache;
1669 		 *  - one from isolate_lru_page;
1670 		 */
1671 		if (!page_ref_freeze(page, 3)) {
1672 			result = SCAN_PAGE_COUNT;
1673 			xas_unlock_irq(&xas);
1674 			putback_lru_page(page);
1675 			goto out_unlock;
1676 		}
1677 
1678 		/*
1679 		 * Add the page to the list to be able to undo the collapse if
1680 		 * something go wrong.
1681 		 */
1682 		list_add_tail(&page->lru, &pagelist);
1683 
1684 		/* Finally, replace with the new page. */
1685 		xas_store(&xas, new_page);
1686 		continue;
1687 out_unlock:
1688 		unlock_page(page);
1689 		put_page(page);
1690 		goto xa_unlocked;
1691 	}
1692 
1693 	if (is_shmem)
1694 		__inc_node_page_state(new_page, NR_SHMEM_THPS);
1695 	else {
1696 		__inc_node_page_state(new_page, NR_FILE_THPS);
1697 		filemap_nr_thps_inc(mapping);
1698 	}
1699 
1700 	if (nr_none) {
1701 		struct zone *zone = page_zone(new_page);
1702 
1703 		__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1704 		if (is_shmem)
1705 			__mod_node_page_state(zone->zone_pgdat,
1706 					      NR_SHMEM, nr_none);
1707 	}
1708 
1709 xa_locked:
1710 	xas_unlock_irq(&xas);
1711 xa_unlocked:
1712 
1713 	if (result == SCAN_SUCCEED) {
1714 		struct page *page, *tmp;
1715 
1716 		/*
1717 		 * Replacing old pages with new one has succeeded, now we
1718 		 * need to copy the content and free the old pages.
1719 		 */
1720 		index = start;
1721 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1722 			while (index < page->index) {
1723 				clear_highpage(new_page + (index % HPAGE_PMD_NR));
1724 				index++;
1725 			}
1726 			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1727 					page);
1728 			list_del(&page->lru);
1729 			page->mapping = NULL;
1730 			page_ref_unfreeze(page, 1);
1731 			ClearPageActive(page);
1732 			ClearPageUnevictable(page);
1733 			unlock_page(page);
1734 			put_page(page);
1735 			index++;
1736 		}
1737 		while (index < end) {
1738 			clear_highpage(new_page + (index % HPAGE_PMD_NR));
1739 			index++;
1740 		}
1741 
1742 		SetPageUptodate(new_page);
1743 		page_ref_add(new_page, HPAGE_PMD_NR - 1);
1744 		mem_cgroup_commit_charge(new_page, memcg, false, true);
1745 
1746 		if (is_shmem) {
1747 			set_page_dirty(new_page);
1748 			lru_cache_add_anon(new_page);
1749 		} else {
1750 			lru_cache_add_file(new_page);
1751 		}
1752 		count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
1753 
1754 		/*
1755 		 * Remove pte page tables, so we can re-fault the page as huge.
1756 		 */
1757 		retract_page_tables(mapping, start);
1758 		*hpage = NULL;
1759 
1760 		khugepaged_pages_collapsed++;
1761 	} else {
1762 		struct page *page;
1763 
1764 		/* Something went wrong: roll back page cache changes */
1765 		xas_lock_irq(&xas);
1766 		mapping->nrpages -= nr_none;
1767 
1768 		if (is_shmem)
1769 			shmem_uncharge(mapping->host, nr_none);
1770 
1771 		xas_set(&xas, start);
1772 		xas_for_each(&xas, page, end - 1) {
1773 			page = list_first_entry_or_null(&pagelist,
1774 					struct page, lru);
1775 			if (!page || xas.xa_index < page->index) {
1776 				if (!nr_none)
1777 					break;
1778 				nr_none--;
1779 				/* Put holes back where they were */
1780 				xas_store(&xas, NULL);
1781 				continue;
1782 			}
1783 
1784 			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1785 
1786 			/* Unfreeze the page. */
1787 			list_del(&page->lru);
1788 			page_ref_unfreeze(page, 2);
1789 			xas_store(&xas, page);
1790 			xas_pause(&xas);
1791 			xas_unlock_irq(&xas);
1792 			unlock_page(page);
1793 			putback_lru_page(page);
1794 			xas_lock_irq(&xas);
1795 		}
1796 		VM_BUG_ON(nr_none);
1797 		xas_unlock_irq(&xas);
1798 
1799 		mem_cgroup_cancel_charge(new_page, memcg, true);
1800 		new_page->mapping = NULL;
1801 	}
1802 
1803 	unlock_page(new_page);
1804 out:
1805 	VM_BUG_ON(!list_empty(&pagelist));
1806 	/* TODO: tracepoints */
1807 }
1808 
1809 static void khugepaged_scan_file(struct mm_struct *mm,
1810 		struct file *file, pgoff_t start, struct page **hpage)
1811 {
1812 	struct page *page = NULL;
1813 	struct address_space *mapping = file->f_mapping;
1814 	XA_STATE(xas, &mapping->i_pages, start);
1815 	int present, swap;
1816 	int node = NUMA_NO_NODE;
1817 	int result = SCAN_SUCCEED;
1818 
1819 	present = 0;
1820 	swap = 0;
1821 	memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1822 	rcu_read_lock();
1823 	xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1824 		if (xas_retry(&xas, page))
1825 			continue;
1826 
1827 		if (xa_is_value(page)) {
1828 			if (++swap > khugepaged_max_ptes_swap) {
1829 				result = SCAN_EXCEED_SWAP_PTE;
1830 				break;
1831 			}
1832 			continue;
1833 		}
1834 
1835 		if (PageTransCompound(page)) {
1836 			result = SCAN_PAGE_COMPOUND;
1837 			break;
1838 		}
1839 
1840 		node = page_to_nid(page);
1841 		if (khugepaged_scan_abort(node)) {
1842 			result = SCAN_SCAN_ABORT;
1843 			break;
1844 		}
1845 		khugepaged_node_load[node]++;
1846 
1847 		if (!PageLRU(page)) {
1848 			result = SCAN_PAGE_LRU;
1849 			break;
1850 		}
1851 
1852 		if (page_count(page) !=
1853 		    1 + page_mapcount(page) + page_has_private(page)) {
1854 			result = SCAN_PAGE_COUNT;
1855 			break;
1856 		}
1857 
1858 		/*
1859 		 * We probably should check if the page is referenced here, but
1860 		 * nobody would transfer pte_young() to PageReferenced() for us.
1861 		 * And rmap walk here is just too costly...
1862 		 */
1863 
1864 		present++;
1865 
1866 		if (need_resched()) {
1867 			xas_pause(&xas);
1868 			cond_resched_rcu();
1869 		}
1870 	}
1871 	rcu_read_unlock();
1872 
1873 	if (result == SCAN_SUCCEED) {
1874 		if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1875 			result = SCAN_EXCEED_NONE_PTE;
1876 		} else {
1877 			node = khugepaged_find_target_node();
1878 			collapse_file(mm, file, start, hpage, node);
1879 		}
1880 	}
1881 
1882 	/* TODO: tracepoints */
1883 }
1884 #else
1885 static void khugepaged_scan_file(struct mm_struct *mm,
1886 		struct file *file, pgoff_t start, struct page **hpage)
1887 {
1888 	BUILD_BUG();
1889 }
1890 
1891 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1892 {
1893 	return 0;
1894 }
1895 #endif
1896 
1897 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1898 					    struct page **hpage)
1899 	__releases(&khugepaged_mm_lock)
1900 	__acquires(&khugepaged_mm_lock)
1901 {
1902 	struct mm_slot *mm_slot;
1903 	struct mm_struct *mm;
1904 	struct vm_area_struct *vma;
1905 	int progress = 0;
1906 
1907 	VM_BUG_ON(!pages);
1908 	lockdep_assert_held(&khugepaged_mm_lock);
1909 
1910 	if (khugepaged_scan.mm_slot)
1911 		mm_slot = khugepaged_scan.mm_slot;
1912 	else {
1913 		mm_slot = list_entry(khugepaged_scan.mm_head.next,
1914 				     struct mm_slot, mm_node);
1915 		khugepaged_scan.address = 0;
1916 		khugepaged_scan.mm_slot = mm_slot;
1917 	}
1918 	spin_unlock(&khugepaged_mm_lock);
1919 	khugepaged_collapse_pte_mapped_thps(mm_slot);
1920 
1921 	mm = mm_slot->mm;
1922 	/*
1923 	 * Don't wait for semaphore (to avoid long wait times).  Just move to
1924 	 * the next mm on the list.
1925 	 */
1926 	vma = NULL;
1927 	if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1928 		goto breakouterloop_mmap_sem;
1929 	if (likely(!khugepaged_test_exit(mm)))
1930 		vma = find_vma(mm, khugepaged_scan.address);
1931 
1932 	progress++;
1933 	for (; vma; vma = vma->vm_next) {
1934 		unsigned long hstart, hend;
1935 
1936 		cond_resched();
1937 		if (unlikely(khugepaged_test_exit(mm))) {
1938 			progress++;
1939 			break;
1940 		}
1941 		if (!hugepage_vma_check(vma, vma->vm_flags)) {
1942 skip:
1943 			progress++;
1944 			continue;
1945 		}
1946 		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1947 		hend = vma->vm_end & HPAGE_PMD_MASK;
1948 		if (hstart >= hend)
1949 			goto skip;
1950 		if (khugepaged_scan.address > hend)
1951 			goto skip;
1952 		if (khugepaged_scan.address < hstart)
1953 			khugepaged_scan.address = hstart;
1954 		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1955 
1956 		while (khugepaged_scan.address < hend) {
1957 			int ret;
1958 			cond_resched();
1959 			if (unlikely(khugepaged_test_exit(mm)))
1960 				goto breakouterloop;
1961 
1962 			VM_BUG_ON(khugepaged_scan.address < hstart ||
1963 				  khugepaged_scan.address + HPAGE_PMD_SIZE >
1964 				  hend);
1965 			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
1966 				struct file *file;
1967 				pgoff_t pgoff = linear_page_index(vma,
1968 						khugepaged_scan.address);
1969 
1970 				if (shmem_file(vma->vm_file)
1971 				    && !shmem_huge_enabled(vma))
1972 					goto skip;
1973 				file = get_file(vma->vm_file);
1974 				up_read(&mm->mmap_sem);
1975 				ret = 1;
1976 				khugepaged_scan_file(mm, file, pgoff, hpage);
1977 				fput(file);
1978 			} else {
1979 				ret = khugepaged_scan_pmd(mm, vma,
1980 						khugepaged_scan.address,
1981 						hpage);
1982 			}
1983 			/* move to next address */
1984 			khugepaged_scan.address += HPAGE_PMD_SIZE;
1985 			progress += HPAGE_PMD_NR;
1986 			if (ret)
1987 				/* we released mmap_sem so break loop */
1988 				goto breakouterloop_mmap_sem;
1989 			if (progress >= pages)
1990 				goto breakouterloop;
1991 		}
1992 	}
1993 breakouterloop:
1994 	up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1995 breakouterloop_mmap_sem:
1996 
1997 	spin_lock(&khugepaged_mm_lock);
1998 	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1999 	/*
2000 	 * Release the current mm_slot if this mm is about to die, or
2001 	 * if we scanned all vmas of this mm.
2002 	 */
2003 	if (khugepaged_test_exit(mm) || !vma) {
2004 		/*
2005 		 * Make sure that if mm_users is reaching zero while
2006 		 * khugepaged runs here, khugepaged_exit will find
2007 		 * mm_slot not pointing to the exiting mm.
2008 		 */
2009 		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2010 			khugepaged_scan.mm_slot = list_entry(
2011 				mm_slot->mm_node.next,
2012 				struct mm_slot, mm_node);
2013 			khugepaged_scan.address = 0;
2014 		} else {
2015 			khugepaged_scan.mm_slot = NULL;
2016 			khugepaged_full_scans++;
2017 		}
2018 
2019 		collect_mm_slot(mm_slot);
2020 	}
2021 
2022 	return progress;
2023 }
2024 
2025 static int khugepaged_has_work(void)
2026 {
2027 	return !list_empty(&khugepaged_scan.mm_head) &&
2028 		khugepaged_enabled();
2029 }
2030 
2031 static int khugepaged_wait_event(void)
2032 {
2033 	return !list_empty(&khugepaged_scan.mm_head) ||
2034 		kthread_should_stop();
2035 }
2036 
2037 static void khugepaged_do_scan(void)
2038 {
2039 	struct page *hpage = NULL;
2040 	unsigned int progress = 0, pass_through_head = 0;
2041 	unsigned int pages = khugepaged_pages_to_scan;
2042 	bool wait = true;
2043 
2044 	barrier(); /* write khugepaged_pages_to_scan to local stack */
2045 
2046 	while (progress < pages) {
2047 		if (!khugepaged_prealloc_page(&hpage, &wait))
2048 			break;
2049 
2050 		cond_resched();
2051 
2052 		if (unlikely(kthread_should_stop() || try_to_freeze()))
2053 			break;
2054 
2055 		spin_lock(&khugepaged_mm_lock);
2056 		if (!khugepaged_scan.mm_slot)
2057 			pass_through_head++;
2058 		if (khugepaged_has_work() &&
2059 		    pass_through_head < 2)
2060 			progress += khugepaged_scan_mm_slot(pages - progress,
2061 							    &hpage);
2062 		else
2063 			progress = pages;
2064 		spin_unlock(&khugepaged_mm_lock);
2065 	}
2066 
2067 	if (!IS_ERR_OR_NULL(hpage))
2068 		put_page(hpage);
2069 }
2070 
2071 static bool khugepaged_should_wakeup(void)
2072 {
2073 	return kthread_should_stop() ||
2074 	       time_after_eq(jiffies, khugepaged_sleep_expire);
2075 }
2076 
2077 static void khugepaged_wait_work(void)
2078 {
2079 	if (khugepaged_has_work()) {
2080 		const unsigned long scan_sleep_jiffies =
2081 			msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2082 
2083 		if (!scan_sleep_jiffies)
2084 			return;
2085 
2086 		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2087 		wait_event_freezable_timeout(khugepaged_wait,
2088 					     khugepaged_should_wakeup(),
2089 					     scan_sleep_jiffies);
2090 		return;
2091 	}
2092 
2093 	if (khugepaged_enabled())
2094 		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2095 }
2096 
2097 static int khugepaged(void *none)
2098 {
2099 	struct mm_slot *mm_slot;
2100 
2101 	set_freezable();
2102 	set_user_nice(current, MAX_NICE);
2103 
2104 	while (!kthread_should_stop()) {
2105 		khugepaged_do_scan();
2106 		khugepaged_wait_work();
2107 	}
2108 
2109 	spin_lock(&khugepaged_mm_lock);
2110 	mm_slot = khugepaged_scan.mm_slot;
2111 	khugepaged_scan.mm_slot = NULL;
2112 	if (mm_slot)
2113 		collect_mm_slot(mm_slot);
2114 	spin_unlock(&khugepaged_mm_lock);
2115 	return 0;
2116 }
2117 
2118 static void set_recommended_min_free_kbytes(void)
2119 {
2120 	struct zone *zone;
2121 	int nr_zones = 0;
2122 	unsigned long recommended_min;
2123 
2124 	for_each_populated_zone(zone) {
2125 		/*
2126 		 * We don't need to worry about fragmentation of
2127 		 * ZONE_MOVABLE since it only has movable pages.
2128 		 */
2129 		if (zone_idx(zone) > gfp_zone(GFP_USER))
2130 			continue;
2131 
2132 		nr_zones++;
2133 	}
2134 
2135 	/* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2136 	recommended_min = pageblock_nr_pages * nr_zones * 2;
2137 
2138 	/*
2139 	 * Make sure that on average at least two pageblocks are almost free
2140 	 * of another type, one for a migratetype to fall back to and a
2141 	 * second to avoid subsequent fallbacks of other types There are 3
2142 	 * MIGRATE_TYPES we care about.
2143 	 */
2144 	recommended_min += pageblock_nr_pages * nr_zones *
2145 			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2146 
2147 	/* don't ever allow to reserve more than 5% of the lowmem */
2148 	recommended_min = min(recommended_min,
2149 			      (unsigned long) nr_free_buffer_pages() / 20);
2150 	recommended_min <<= (PAGE_SHIFT-10);
2151 
2152 	if (recommended_min > min_free_kbytes) {
2153 		if (user_min_free_kbytes >= 0)
2154 			pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2155 				min_free_kbytes, recommended_min);
2156 
2157 		min_free_kbytes = recommended_min;
2158 	}
2159 	setup_per_zone_wmarks();
2160 }
2161 
2162 int start_stop_khugepaged(void)
2163 {
2164 	static struct task_struct *khugepaged_thread __read_mostly;
2165 	static DEFINE_MUTEX(khugepaged_mutex);
2166 	int err = 0;
2167 
2168 	mutex_lock(&khugepaged_mutex);
2169 	if (khugepaged_enabled()) {
2170 		if (!khugepaged_thread)
2171 			khugepaged_thread = kthread_run(khugepaged, NULL,
2172 							"khugepaged");
2173 		if (IS_ERR(khugepaged_thread)) {
2174 			pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2175 			err = PTR_ERR(khugepaged_thread);
2176 			khugepaged_thread = NULL;
2177 			goto fail;
2178 		}
2179 
2180 		if (!list_empty(&khugepaged_scan.mm_head))
2181 			wake_up_interruptible(&khugepaged_wait);
2182 
2183 		set_recommended_min_free_kbytes();
2184 	} else if (khugepaged_thread) {
2185 		kthread_stop(khugepaged_thread);
2186 		khugepaged_thread = NULL;
2187 	}
2188 fail:
2189 	mutex_unlock(&khugepaged_mutex);
2190 	return err;
2191 }
2192