1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_MM_H 3 #define _LINUX_MM_H 4 5 #include <linux/errno.h> 6 7 #ifdef __KERNEL__ 8 9 #include <linux/mmdebug.h> 10 #include <linux/gfp.h> 11 #include <linux/bug.h> 12 #include <linux/list.h> 13 #include <linux/mmzone.h> 14 #include <linux/rbtree.h> 15 #include <linux/atomic.h> 16 #include <linux/debug_locks.h> 17 #include <linux/mm_types.h> 18 #include <linux/range.h> 19 #include <linux/pfn.h> 20 #include <linux/percpu-refcount.h> 21 #include <linux/bit_spinlock.h> 22 #include <linux/shrinker.h> 23 #include <linux/resource.h> 24 #include <linux/page_ext.h> 25 #include <linux/err.h> 26 #include <linux/page_ref.h> 27 #include <linux/memremap.h> 28 #include <linux/overflow.h> 29 #include <linux/sizes.h> 30 31 struct mempolicy; 32 struct anon_vma; 33 struct anon_vma_chain; 34 struct file_ra_state; 35 struct user_struct; 36 struct writeback_control; 37 struct bdi_writeback; 38 39 void init_mm_internals(void); 40 41 #ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 42 extern unsigned long max_mapnr; 43 44 static inline void set_max_mapnr(unsigned long limit) 45 { 46 max_mapnr = limit; 47 } 48 #else 49 static inline void set_max_mapnr(unsigned long limit) { } 50 #endif 51 52 extern atomic_long_t _totalram_pages; 53 static inline unsigned long totalram_pages(void) 54 { 55 return (unsigned long)atomic_long_read(&_totalram_pages); 56 } 57 58 static inline void totalram_pages_inc(void) 59 { 60 atomic_long_inc(&_totalram_pages); 61 } 62 63 static inline void totalram_pages_dec(void) 64 { 65 atomic_long_dec(&_totalram_pages); 66 } 67 68 static inline void totalram_pages_add(long count) 69 { 70 atomic_long_add(count, &_totalram_pages); 71 } 72 73 static inline void totalram_pages_set(long val) 74 { 75 atomic_long_set(&_totalram_pages, val); 76 } 77 78 extern void * high_memory; 79 extern int page_cluster; 80 81 #ifdef CONFIG_SYSCTL 82 extern int sysctl_legacy_va_layout; 83 #else 84 #define sysctl_legacy_va_layout 0 85 #endif 86 87 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 88 extern const int mmap_rnd_bits_min; 89 extern const int mmap_rnd_bits_max; 90 extern int mmap_rnd_bits __read_mostly; 91 #endif 92 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 93 extern const int mmap_rnd_compat_bits_min; 94 extern const int mmap_rnd_compat_bits_max; 95 extern int mmap_rnd_compat_bits __read_mostly; 96 #endif 97 98 #include <asm/page.h> 99 #include <asm/pgtable.h> 100 #include <asm/processor.h> 101 102 /* 103 * Architectures that support memory tagging (assigning tags to memory regions, 104 * embedding these tags into addresses that point to these memory regions, and 105 * checking that the memory and the pointer tags match on memory accesses) 106 * redefine this macro to strip tags from pointers. 107 * It's defined as noop for arcitectures that don't support memory tagging. 108 */ 109 #ifndef untagged_addr 110 #define untagged_addr(addr) (addr) 111 #endif 112 113 #ifndef __pa_symbol 114 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 115 #endif 116 117 #ifndef page_to_virt 118 #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) 119 #endif 120 121 #ifndef lm_alias 122 #define lm_alias(x) __va(__pa_symbol(x)) 123 #endif 124 125 /* 126 * To prevent common memory management code establishing 127 * a zero page mapping on a read fault. 128 * This macro should be defined within <asm/pgtable.h>. 129 * s390 does this to prevent multiplexing of hardware bits 130 * related to the physical page in case of virtualization. 131 */ 132 #ifndef mm_forbids_zeropage 133 #define mm_forbids_zeropage(X) (0) 134 #endif 135 136 /* 137 * On some architectures it is expensive to call memset() for small sizes. 138 * If an architecture decides to implement their own version of 139 * mm_zero_struct_page they should wrap the defines below in a #ifndef and 140 * define their own version of this macro in <asm/pgtable.h> 141 */ 142 #if BITS_PER_LONG == 64 143 /* This function must be updated when the size of struct page grows above 80 144 * or reduces below 56. The idea that compiler optimizes out switch() 145 * statement, and only leaves move/store instructions. Also the compiler can 146 * combine write statments if they are both assignments and can be reordered, 147 * this can result in several of the writes here being dropped. 148 */ 149 #define mm_zero_struct_page(pp) __mm_zero_struct_page(pp) 150 static inline void __mm_zero_struct_page(struct page *page) 151 { 152 unsigned long *_pp = (void *)page; 153 154 /* Check that struct page is either 56, 64, 72, or 80 bytes */ 155 BUILD_BUG_ON(sizeof(struct page) & 7); 156 BUILD_BUG_ON(sizeof(struct page) < 56); 157 BUILD_BUG_ON(sizeof(struct page) > 80); 158 159 switch (sizeof(struct page)) { 160 case 80: 161 _pp[9] = 0; /* fallthrough */ 162 case 72: 163 _pp[8] = 0; /* fallthrough */ 164 case 64: 165 _pp[7] = 0; /* fallthrough */ 166 case 56: 167 _pp[6] = 0; 168 _pp[5] = 0; 169 _pp[4] = 0; 170 _pp[3] = 0; 171 _pp[2] = 0; 172 _pp[1] = 0; 173 _pp[0] = 0; 174 } 175 } 176 #else 177 #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) 178 #endif 179 180 /* 181 * Default maximum number of active map areas, this limits the number of vmas 182 * per mm struct. Users can overwrite this number by sysctl but there is a 183 * problem. 184 * 185 * When a program's coredump is generated as ELF format, a section is created 186 * per a vma. In ELF, the number of sections is represented in unsigned short. 187 * This means the number of sections should be smaller than 65535 at coredump. 188 * Because the kernel adds some informative sections to a image of program at 189 * generating coredump, we need some margin. The number of extra sections is 190 * 1-3 now and depends on arch. We use "5" as safe margin, here. 191 * 192 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is 193 * not a hard limit any more. Although some userspace tools can be surprised by 194 * that. 195 */ 196 #define MAPCOUNT_ELF_CORE_MARGIN (5) 197 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 198 199 extern int sysctl_max_map_count; 200 201 extern unsigned long sysctl_user_reserve_kbytes; 202 extern unsigned long sysctl_admin_reserve_kbytes; 203 204 extern int sysctl_overcommit_memory; 205 extern int sysctl_overcommit_ratio; 206 extern unsigned long sysctl_overcommit_kbytes; 207 208 extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *, 209 size_t *, loff_t *); 210 extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, 211 size_t *, loff_t *); 212 213 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 214 215 /* to align the pointer to the (next) page boundary */ 216 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) 217 218 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ 219 #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) 220 221 #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) 222 223 /* 224 * Linux kernel virtual memory manager primitives. 225 * The idea being to have a "virtual" mm in the same way 226 * we have a virtual fs - giving a cleaner interface to the 227 * mm details, and allowing different kinds of memory mappings 228 * (from shared memory to executable loading to arbitrary 229 * mmap() functions). 230 */ 231 232 struct vm_area_struct *vm_area_alloc(struct mm_struct *); 233 struct vm_area_struct *vm_area_dup(struct vm_area_struct *); 234 void vm_area_free(struct vm_area_struct *); 235 236 #ifndef CONFIG_MMU 237 extern struct rb_root nommu_region_tree; 238 extern struct rw_semaphore nommu_region_sem; 239 240 extern unsigned int kobjsize(const void *objp); 241 #endif 242 243 /* 244 * vm_flags in vm_area_struct, see mm_types.h. 245 * When changing, update also include/trace/events/mmflags.h 246 */ 247 #define VM_NONE 0x00000000 248 249 #define VM_READ 0x00000001 /* currently active flags */ 250 #define VM_WRITE 0x00000002 251 #define VM_EXEC 0x00000004 252 #define VM_SHARED 0x00000008 253 254 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 255 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ 256 #define VM_MAYWRITE 0x00000020 257 #define VM_MAYEXEC 0x00000040 258 #define VM_MAYSHARE 0x00000080 259 260 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 261 #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ 262 #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 263 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 264 #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ 265 266 #define VM_LOCKED 0x00002000 267 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 268 269 /* Used by sys_madvise() */ 270 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 271 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 272 273 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 274 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 275 #define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ 276 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 277 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 278 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 279 #define VM_SYNC 0x00800000 /* Synchronous page faults */ 280 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 281 #define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */ 282 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 283 284 #ifdef CONFIG_MEM_SOFT_DIRTY 285 # define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ 286 #else 287 # define VM_SOFTDIRTY 0 288 #endif 289 290 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 291 #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ 292 #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ 293 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ 294 295 #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS 296 #define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ 297 #define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */ 298 #define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */ 299 #define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ 300 #define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ 301 #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) 302 #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) 303 #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) 304 #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) 305 #define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) 306 #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ 307 308 #ifdef CONFIG_ARCH_HAS_PKEYS 309 # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 310 # define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ 311 # define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */ 312 # define VM_PKEY_BIT2 VM_HIGH_ARCH_2 313 # define VM_PKEY_BIT3 VM_HIGH_ARCH_3 314 #ifdef CONFIG_PPC 315 # define VM_PKEY_BIT4 VM_HIGH_ARCH_4 316 #else 317 # define VM_PKEY_BIT4 0 318 #endif 319 #endif /* CONFIG_ARCH_HAS_PKEYS */ 320 321 #if defined(CONFIG_X86) 322 # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ 323 #elif defined(CONFIG_PPC) 324 # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 325 #elif defined(CONFIG_PARISC) 326 # define VM_GROWSUP VM_ARCH_1 327 #elif defined(CONFIG_IA64) 328 # define VM_GROWSUP VM_ARCH_1 329 #elif defined(CONFIG_SPARC64) 330 # define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ 331 # define VM_ARCH_CLEAR VM_SPARC_ADI 332 #elif !defined(CONFIG_MMU) 333 # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ 334 #endif 335 336 #if defined(CONFIG_X86_INTEL_MPX) 337 /* MPX specific bounds table or bounds directory */ 338 # define VM_MPX VM_HIGH_ARCH_4 339 #else 340 # define VM_MPX VM_NONE 341 #endif 342 343 #ifndef VM_GROWSUP 344 # define VM_GROWSUP VM_NONE 345 #endif 346 347 /* Bits set in the VMA until the stack is in its final location */ 348 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) 349 350 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 351 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 352 #endif 353 354 #ifdef CONFIG_STACK_GROWSUP 355 #define VM_STACK VM_GROWSUP 356 #else 357 #define VM_STACK VM_GROWSDOWN 358 #endif 359 360 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 361 362 /* 363 * Special vmas that are non-mergable, non-mlock()able. 364 * Note: mm/huge_memory.c VM_NO_THP depends on this definition. 365 */ 366 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 367 368 /* This mask defines which mm->def_flags a process can inherit its parent */ 369 #define VM_INIT_DEF_MASK VM_NOHUGEPAGE 370 371 /* This mask is used to clear all the VMA flags used by mlock */ 372 #define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) 373 374 /* Arch-specific flags to clear when updating VM flags on protection change */ 375 #ifndef VM_ARCH_CLEAR 376 # define VM_ARCH_CLEAR VM_NONE 377 #endif 378 #define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR) 379 380 /* 381 * mapping from the currently active vm_flags protection bits (the 382 * low four bits) to a page protection mask.. 383 */ 384 extern pgprot_t protection_map[16]; 385 386 #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 387 #define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */ 388 #define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */ 389 #define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */ 390 #define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */ 391 #define FAULT_FLAG_TRIED 0x20 /* Second try */ 392 #define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */ 393 #define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ 394 #define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ 395 396 #define FAULT_FLAG_TRACE \ 397 { FAULT_FLAG_WRITE, "WRITE" }, \ 398 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \ 399 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \ 400 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \ 401 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \ 402 { FAULT_FLAG_TRIED, "TRIED" }, \ 403 { FAULT_FLAG_USER, "USER" }, \ 404 { FAULT_FLAG_REMOTE, "REMOTE" }, \ 405 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" } 406 407 /* 408 * vm_fault is filled by the the pagefault handler and passed to the vma's 409 * ->fault function. The vma's ->fault is responsible for returning a bitmask 410 * of VM_FAULT_xxx flags that give details about how the fault was handled. 411 * 412 * MM layer fills up gfp_mask for page allocations but fault handler might 413 * alter it if its implementation requires a different allocation context. 414 * 415 * pgoff should be used in favour of virtual_address, if possible. 416 */ 417 struct vm_fault { 418 struct vm_area_struct *vma; /* Target VMA */ 419 unsigned int flags; /* FAULT_FLAG_xxx flags */ 420 gfp_t gfp_mask; /* gfp mask to be used for allocations */ 421 pgoff_t pgoff; /* Logical page offset based on vma */ 422 unsigned long address; /* Faulting virtual address */ 423 pmd_t *pmd; /* Pointer to pmd entry matching 424 * the 'address' */ 425 pud_t *pud; /* Pointer to pud entry matching 426 * the 'address' 427 */ 428 pte_t orig_pte; /* Value of PTE at the time of fault */ 429 430 struct page *cow_page; /* Page handler may use for COW fault */ 431 struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */ 432 struct page *page; /* ->fault handlers should return a 433 * page here, unless VM_FAULT_NOPAGE 434 * is set (which is also implied by 435 * VM_FAULT_ERROR). 436 */ 437 /* These three entries are valid only while holding ptl lock */ 438 pte_t *pte; /* Pointer to pte entry matching 439 * the 'address'. NULL if the page 440 * table hasn't been allocated. 441 */ 442 spinlock_t *ptl; /* Page table lock. 443 * Protects pte page table if 'pte' 444 * is not NULL, otherwise pmd. 445 */ 446 pgtable_t prealloc_pte; /* Pre-allocated pte page table. 447 * vm_ops->map_pages() calls 448 * alloc_set_pte() from atomic context. 449 * do_fault_around() pre-allocates 450 * page table to avoid allocation from 451 * atomic context. 452 */ 453 }; 454 455 /* page entry size for vm->huge_fault() */ 456 enum page_entry_size { 457 PE_SIZE_PTE = 0, 458 PE_SIZE_PMD, 459 PE_SIZE_PUD, 460 }; 461 462 /* 463 * These are the virtual MM functions - opening of an area, closing and 464 * unmapping it (needed to keep files on disk up-to-date etc), pointer 465 * to the functions called when a no-page or a wp-page exception occurs. 466 */ 467 struct vm_operations_struct { 468 void (*open)(struct vm_area_struct * area); 469 void (*close)(struct vm_area_struct * area); 470 int (*split)(struct vm_area_struct * area, unsigned long addr); 471 int (*mremap)(struct vm_area_struct * area); 472 vm_fault_t (*fault)(struct vm_fault *vmf); 473 vm_fault_t (*huge_fault)(struct vm_fault *vmf, 474 enum page_entry_size pe_size); 475 void (*map_pages)(struct vm_fault *vmf, 476 pgoff_t start_pgoff, pgoff_t end_pgoff); 477 unsigned long (*pagesize)(struct vm_area_struct * area); 478 479 /* notification that a previously read-only page is about to become 480 * writable, if an error is returned it will cause a SIGBUS */ 481 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); 482 483 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 484 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); 485 486 /* called by access_process_vm when get_user_pages() fails, typically 487 * for use by special VMAs that can switch between memory and hardware 488 */ 489 int (*access)(struct vm_area_struct *vma, unsigned long addr, 490 void *buf, int len, int write); 491 492 /* Called by the /proc/PID/maps code to ask the vma whether it 493 * has a special name. Returning non-NULL will also cause this 494 * vma to be dumped unconditionally. */ 495 const char *(*name)(struct vm_area_struct *vma); 496 497 #ifdef CONFIG_NUMA 498 /* 499 * set_policy() op must add a reference to any non-NULL @new mempolicy 500 * to hold the policy upon return. Caller should pass NULL @new to 501 * remove a policy and fall back to surrounding context--i.e. do not 502 * install a MPOL_DEFAULT policy, nor the task or system default 503 * mempolicy. 504 */ 505 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 506 507 /* 508 * get_policy() op must add reference [mpol_get()] to any policy at 509 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 510 * in mm/mempolicy.c will do this automatically. 511 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 512 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. 513 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 514 * must return NULL--i.e., do not "fallback" to task or system default 515 * policy. 516 */ 517 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 518 unsigned long addr); 519 #endif 520 /* 521 * Called by vm_normal_page() for special PTEs to find the 522 * page for @addr. This is useful if the default behavior 523 * (using pte_page()) would not find the correct page. 524 */ 525 struct page *(*find_special_page)(struct vm_area_struct *vma, 526 unsigned long addr); 527 }; 528 529 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) 530 { 531 static const struct vm_operations_struct dummy_vm_ops = {}; 532 533 memset(vma, 0, sizeof(*vma)); 534 vma->vm_mm = mm; 535 vma->vm_ops = &dummy_vm_ops; 536 INIT_LIST_HEAD(&vma->anon_vma_chain); 537 } 538 539 static inline void vma_set_anonymous(struct vm_area_struct *vma) 540 { 541 vma->vm_ops = NULL; 542 } 543 544 static inline bool vma_is_anonymous(struct vm_area_struct *vma) 545 { 546 return !vma->vm_ops; 547 } 548 549 #ifdef CONFIG_SHMEM 550 /* 551 * The vma_is_shmem is not inline because it is used only by slow 552 * paths in userfault. 553 */ 554 bool vma_is_shmem(struct vm_area_struct *vma); 555 #else 556 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } 557 #endif 558 559 int vma_is_stack_for_current(struct vm_area_struct *vma); 560 561 /* flush_tlb_range() takes a vma, not a mm, and can care about flags */ 562 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } 563 564 struct mmu_gather; 565 struct inode; 566 567 /* 568 * FIXME: take this include out, include page-flags.h in 569 * files which need it (119 of them) 570 */ 571 #include <linux/page-flags.h> 572 #include <linux/huge_mm.h> 573 574 /* 575 * Methods to modify the page usage count. 576 * 577 * What counts for a page usage: 578 * - cache mapping (page->mapping) 579 * - private data (page->private) 580 * - page mapped in a task's page tables, each mapping 581 * is counted separately 582 * 583 * Also, many kernel routines increase the page count before a critical 584 * routine so they can be sure the page doesn't go away from under them. 585 */ 586 587 /* 588 * Drop a ref, return true if the refcount fell to zero (the page has no users) 589 */ 590 static inline int put_page_testzero(struct page *page) 591 { 592 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 593 return page_ref_dec_and_test(page); 594 } 595 596 /* 597 * Try to grab a ref unless the page has a refcount of zero, return false if 598 * that is the case. 599 * This can be called when MMU is off so it must not access 600 * any of the virtual mappings. 601 */ 602 static inline int get_page_unless_zero(struct page *page) 603 { 604 return page_ref_add_unless(page, 1, 0); 605 } 606 607 extern int page_is_ram(unsigned long pfn); 608 609 enum { 610 REGION_INTERSECTS, 611 REGION_DISJOINT, 612 REGION_MIXED, 613 }; 614 615 int region_intersects(resource_size_t offset, size_t size, unsigned long flags, 616 unsigned long desc); 617 618 /* Support for virtually mapped pages */ 619 struct page *vmalloc_to_page(const void *addr); 620 unsigned long vmalloc_to_pfn(const void *addr); 621 622 /* 623 * Determine if an address is within the vmalloc range 624 * 625 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there 626 * is no special casing required. 627 */ 628 static inline bool is_vmalloc_addr(const void *x) 629 { 630 #ifdef CONFIG_MMU 631 unsigned long addr = (unsigned long)x; 632 633 return addr >= VMALLOC_START && addr < VMALLOC_END; 634 #else 635 return false; 636 #endif 637 } 638 639 #ifndef is_ioremap_addr 640 #define is_ioremap_addr(x) is_vmalloc_addr(x) 641 #endif 642 643 #ifdef CONFIG_MMU 644 extern int is_vmalloc_or_module_addr(const void *x); 645 #else 646 static inline int is_vmalloc_or_module_addr(const void *x) 647 { 648 return 0; 649 } 650 #endif 651 652 extern void *kvmalloc_node(size_t size, gfp_t flags, int node); 653 static inline void *kvmalloc(size_t size, gfp_t flags) 654 { 655 return kvmalloc_node(size, flags, NUMA_NO_NODE); 656 } 657 static inline void *kvzalloc_node(size_t size, gfp_t flags, int node) 658 { 659 return kvmalloc_node(size, flags | __GFP_ZERO, node); 660 } 661 static inline void *kvzalloc(size_t size, gfp_t flags) 662 { 663 return kvmalloc(size, flags | __GFP_ZERO); 664 } 665 666 static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) 667 { 668 size_t bytes; 669 670 if (unlikely(check_mul_overflow(n, size, &bytes))) 671 return NULL; 672 673 return kvmalloc(bytes, flags); 674 } 675 676 static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) 677 { 678 return kvmalloc_array(n, size, flags | __GFP_ZERO); 679 } 680 681 extern void kvfree(const void *addr); 682 683 static inline int compound_mapcount(struct page *page) 684 { 685 VM_BUG_ON_PAGE(!PageCompound(page), page); 686 page = compound_head(page); 687 return atomic_read(compound_mapcount_ptr(page)) + 1; 688 } 689 690 /* 691 * The atomic page->_mapcount, starts from -1: so that transitions 692 * both from it and to it can be tracked, using atomic_inc_and_test 693 * and atomic_add_negative(-1). 694 */ 695 static inline void page_mapcount_reset(struct page *page) 696 { 697 atomic_set(&(page)->_mapcount, -1); 698 } 699 700 int __page_mapcount(struct page *page); 701 702 static inline int page_mapcount(struct page *page) 703 { 704 VM_BUG_ON_PAGE(PageSlab(page), page); 705 706 if (unlikely(PageCompound(page))) 707 return __page_mapcount(page); 708 return atomic_read(&page->_mapcount) + 1; 709 } 710 711 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 712 int total_mapcount(struct page *page); 713 int page_trans_huge_mapcount(struct page *page, int *total_mapcount); 714 #else 715 static inline int total_mapcount(struct page *page) 716 { 717 return page_mapcount(page); 718 } 719 static inline int page_trans_huge_mapcount(struct page *page, 720 int *total_mapcount) 721 { 722 int mapcount = page_mapcount(page); 723 if (total_mapcount) 724 *total_mapcount = mapcount; 725 return mapcount; 726 } 727 #endif 728 729 static inline struct page *virt_to_head_page(const void *x) 730 { 731 struct page *page = virt_to_page(x); 732 733 return compound_head(page); 734 } 735 736 void __put_page(struct page *page); 737 738 void put_pages_list(struct list_head *pages); 739 740 void split_page(struct page *page, unsigned int order); 741 742 /* 743 * Compound pages have a destructor function. Provide a 744 * prototype for that function and accessor functions. 745 * These are _only_ valid on the head of a compound page. 746 */ 747 typedef void compound_page_dtor(struct page *); 748 749 /* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ 750 enum compound_dtor_id { 751 NULL_COMPOUND_DTOR, 752 COMPOUND_PAGE_DTOR, 753 #ifdef CONFIG_HUGETLB_PAGE 754 HUGETLB_PAGE_DTOR, 755 #endif 756 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 757 TRANSHUGE_PAGE_DTOR, 758 #endif 759 NR_COMPOUND_DTORS, 760 }; 761 extern compound_page_dtor * const compound_page_dtors[]; 762 763 static inline void set_compound_page_dtor(struct page *page, 764 enum compound_dtor_id compound_dtor) 765 { 766 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); 767 page[1].compound_dtor = compound_dtor; 768 } 769 770 static inline compound_page_dtor *get_compound_page_dtor(struct page *page) 771 { 772 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); 773 return compound_page_dtors[page[1].compound_dtor]; 774 } 775 776 static inline unsigned int compound_order(struct page *page) 777 { 778 if (!PageHead(page)) 779 return 0; 780 return page[1].compound_order; 781 } 782 783 static inline void set_compound_order(struct page *page, unsigned int order) 784 { 785 page[1].compound_order = order; 786 } 787 788 /* Returns the number of pages in this potentially compound page. */ 789 static inline unsigned long compound_nr(struct page *page) 790 { 791 return 1UL << compound_order(page); 792 } 793 794 /* Returns the number of bytes in this potentially compound page. */ 795 static inline unsigned long page_size(struct page *page) 796 { 797 return PAGE_SIZE << compound_order(page); 798 } 799 800 /* Returns the number of bits needed for the number of bytes in a page */ 801 static inline unsigned int page_shift(struct page *page) 802 { 803 return PAGE_SHIFT + compound_order(page); 804 } 805 806 void free_compound_page(struct page *page); 807 808 #ifdef CONFIG_MMU 809 /* 810 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 811 * servicing faults for write access. In the normal case, do always want 812 * pte_mkwrite. But get_user_pages can cause write faults for mappings 813 * that do not have writing enabled, when used by access_process_vm. 814 */ 815 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 816 { 817 if (likely(vma->vm_flags & VM_WRITE)) 818 pte = pte_mkwrite(pte); 819 return pte; 820 } 821 822 vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, 823 struct page *page); 824 vm_fault_t finish_fault(struct vm_fault *vmf); 825 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); 826 #endif 827 828 /* 829 * Multiple processes may "see" the same page. E.g. for untouched 830 * mappings of /dev/null, all processes see the same page full of 831 * zeroes, and text pages of executables and shared libraries have 832 * only one copy in memory, at most, normally. 833 * 834 * For the non-reserved pages, page_count(page) denotes a reference count. 835 * page_count() == 0 means the page is free. page->lru is then used for 836 * freelist management in the buddy allocator. 837 * page_count() > 0 means the page has been allocated. 838 * 839 * Pages are allocated by the slab allocator in order to provide memory 840 * to kmalloc and kmem_cache_alloc. In this case, the management of the 841 * page, and the fields in 'struct page' are the responsibility of mm/slab.c 842 * unless a particular usage is carefully commented. (the responsibility of 843 * freeing the kmalloc memory is the caller's, of course). 844 * 845 * A page may be used by anyone else who does a __get_free_page(). 846 * In this case, page_count still tracks the references, and should only 847 * be used through the normal accessor functions. The top bits of page->flags 848 * and page->virtual store page management information, but all other fields 849 * are unused and could be used privately, carefully. The management of this 850 * page is the responsibility of the one who allocated it, and those who have 851 * subsequently been given references to it. 852 * 853 * The other pages (we may call them "pagecache pages") are completely 854 * managed by the Linux memory manager: I/O, buffers, swapping etc. 855 * The following discussion applies only to them. 856 * 857 * A pagecache page contains an opaque `private' member, which belongs to the 858 * page's address_space. Usually, this is the address of a circular list of 859 * the page's disk buffers. PG_private must be set to tell the VM to call 860 * into the filesystem to release these pages. 861 * 862 * A page may belong to an inode's memory mapping. In this case, page->mapping 863 * is the pointer to the inode, and page->index is the file offset of the page, 864 * in units of PAGE_SIZE. 865 * 866 * If pagecache pages are not associated with an inode, they are said to be 867 * anonymous pages. These may become associated with the swapcache, and in that 868 * case PG_swapcache is set, and page->private is an offset into the swapcache. 869 * 870 * In either case (swapcache or inode backed), the pagecache itself holds one 871 * reference to the page. Setting PG_private should also increment the 872 * refcount. The each user mapping also has a reference to the page. 873 * 874 * The pagecache pages are stored in a per-mapping radix tree, which is 875 * rooted at mapping->i_pages, and indexed by offset. 876 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space 877 * lists, we instead now tag pages as dirty/writeback in the radix tree. 878 * 879 * All pagecache pages may be subject to I/O: 880 * - inode pages may need to be read from disk, 881 * - inode pages which have been modified and are MAP_SHARED may need 882 * to be written back to the inode on disk, 883 * - anonymous pages (including MAP_PRIVATE file mappings) which have been 884 * modified may need to be swapped out to swap space and (later) to be read 885 * back into memory. 886 */ 887 888 /* 889 * The zone field is never updated after free_area_init_core() 890 * sets it, so none of the operations on it need to be atomic. 891 */ 892 893 /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ 894 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) 895 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 896 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 897 #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) 898 #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) 899 900 /* 901 * Define the bit shifts to access each section. For non-existent 902 * sections we define the shift as 0; that plus a 0 mask ensures 903 * the compiler will optimise away reference to them. 904 */ 905 #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 906 #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 907 #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 908 #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) 909 #define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) 910 911 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 912 #ifdef NODE_NOT_IN_PAGE_FLAGS 913 #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) 914 #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ 915 SECTIONS_PGOFF : ZONES_PGOFF) 916 #else 917 #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) 918 #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ 919 NODES_PGOFF : ZONES_PGOFF) 920 #endif 921 922 #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) 923 924 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 925 #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 926 #endif 927 928 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 929 #define NODES_MASK ((1UL << NODES_WIDTH) - 1) 930 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 931 #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) 932 #define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) 933 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 934 935 static inline enum zone_type page_zonenum(const struct page *page) 936 { 937 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 938 } 939 940 #ifdef CONFIG_ZONE_DEVICE 941 static inline bool is_zone_device_page(const struct page *page) 942 { 943 return page_zonenum(page) == ZONE_DEVICE; 944 } 945 extern void memmap_init_zone_device(struct zone *, unsigned long, 946 unsigned long, struct dev_pagemap *); 947 #else 948 static inline bool is_zone_device_page(const struct page *page) 949 { 950 return false; 951 } 952 #endif 953 954 #ifdef CONFIG_DEV_PAGEMAP_OPS 955 void __put_devmap_managed_page(struct page *page); 956 DECLARE_STATIC_KEY_FALSE(devmap_managed_key); 957 static inline bool put_devmap_managed_page(struct page *page) 958 { 959 if (!static_branch_unlikely(&devmap_managed_key)) 960 return false; 961 if (!is_zone_device_page(page)) 962 return false; 963 switch (page->pgmap->type) { 964 case MEMORY_DEVICE_PRIVATE: 965 case MEMORY_DEVICE_FS_DAX: 966 __put_devmap_managed_page(page); 967 return true; 968 default: 969 break; 970 } 971 return false; 972 } 973 974 #else /* CONFIG_DEV_PAGEMAP_OPS */ 975 static inline bool put_devmap_managed_page(struct page *page) 976 { 977 return false; 978 } 979 #endif /* CONFIG_DEV_PAGEMAP_OPS */ 980 981 static inline bool is_device_private_page(const struct page *page) 982 { 983 return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && 984 IS_ENABLED(CONFIG_DEVICE_PRIVATE) && 985 is_zone_device_page(page) && 986 page->pgmap->type == MEMORY_DEVICE_PRIVATE; 987 } 988 989 static inline bool is_pci_p2pdma_page(const struct page *page) 990 { 991 return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && 992 IS_ENABLED(CONFIG_PCI_P2PDMA) && 993 is_zone_device_page(page) && 994 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; 995 } 996 997 /* 127: arbitrary random number, small enough to assemble well */ 998 #define page_ref_zero_or_close_to_overflow(page) \ 999 ((unsigned int) page_ref_count(page) + 127u <= 127u) 1000 1001 static inline void get_page(struct page *page) 1002 { 1003 page = compound_head(page); 1004 /* 1005 * Getting a normal page or the head of a compound page 1006 * requires to already have an elevated page->_refcount. 1007 */ 1008 VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page); 1009 page_ref_inc(page); 1010 } 1011 1012 static inline __must_check bool try_get_page(struct page *page) 1013 { 1014 page = compound_head(page); 1015 if (WARN_ON_ONCE(page_ref_count(page) <= 0)) 1016 return false; 1017 page_ref_inc(page); 1018 return true; 1019 } 1020 1021 static inline void put_page(struct page *page) 1022 { 1023 page = compound_head(page); 1024 1025 /* 1026 * For devmap managed pages we need to catch refcount transition from 1027 * 2 to 1, when refcount reach one it means the page is free and we 1028 * need to inform the device driver through callback. See 1029 * include/linux/memremap.h and HMM for details. 1030 */ 1031 if (put_devmap_managed_page(page)) 1032 return; 1033 1034 if (put_page_testzero(page)) 1035 __put_page(page); 1036 } 1037 1038 /** 1039 * put_user_page() - release a gup-pinned page 1040 * @page: pointer to page to be released 1041 * 1042 * Pages that were pinned via get_user_pages*() must be released via 1043 * either put_user_page(), or one of the put_user_pages*() routines 1044 * below. This is so that eventually, pages that are pinned via 1045 * get_user_pages*() can be separately tracked and uniquely handled. In 1046 * particular, interactions with RDMA and filesystems need special 1047 * handling. 1048 * 1049 * put_user_page() and put_page() are not interchangeable, despite this early 1050 * implementation that makes them look the same. put_user_page() calls must 1051 * be perfectly matched up with get_user_page() calls. 1052 */ 1053 static inline void put_user_page(struct page *page) 1054 { 1055 put_page(page); 1056 } 1057 1058 void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, 1059 bool make_dirty); 1060 1061 void put_user_pages(struct page **pages, unsigned long npages); 1062 1063 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 1064 #define SECTION_IN_PAGE_FLAGS 1065 #endif 1066 1067 /* 1068 * The identification function is mainly used by the buddy allocator for 1069 * determining if two pages could be buddies. We are not really identifying 1070 * the zone since we could be using the section number id if we do not have 1071 * node id available in page flags. 1072 * We only guarantee that it will return the same value for two combinable 1073 * pages in a zone. 1074 */ 1075 static inline int page_zone_id(struct page *page) 1076 { 1077 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; 1078 } 1079 1080 #ifdef NODE_NOT_IN_PAGE_FLAGS 1081 extern int page_to_nid(const struct page *page); 1082 #else 1083 static inline int page_to_nid(const struct page *page) 1084 { 1085 struct page *p = (struct page *)page; 1086 1087 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK; 1088 } 1089 #endif 1090 1091 #ifdef CONFIG_NUMA_BALANCING 1092 static inline int cpu_pid_to_cpupid(int cpu, int pid) 1093 { 1094 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); 1095 } 1096 1097 static inline int cpupid_to_pid(int cpupid) 1098 { 1099 return cpupid & LAST__PID_MASK; 1100 } 1101 1102 static inline int cpupid_to_cpu(int cpupid) 1103 { 1104 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK; 1105 } 1106 1107 static inline int cpupid_to_nid(int cpupid) 1108 { 1109 return cpu_to_node(cpupid_to_cpu(cpupid)); 1110 } 1111 1112 static inline bool cpupid_pid_unset(int cpupid) 1113 { 1114 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK); 1115 } 1116 1117 static inline bool cpupid_cpu_unset(int cpupid) 1118 { 1119 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK); 1120 } 1121 1122 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) 1123 { 1124 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid); 1125 } 1126 1127 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) 1128 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 1129 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 1130 { 1131 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); 1132 } 1133 1134 static inline int page_cpupid_last(struct page *page) 1135 { 1136 return page->_last_cpupid; 1137 } 1138 static inline void page_cpupid_reset_last(struct page *page) 1139 { 1140 page->_last_cpupid = -1 & LAST_CPUPID_MASK; 1141 } 1142 #else 1143 static inline int page_cpupid_last(struct page *page) 1144 { 1145 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; 1146 } 1147 1148 extern int page_cpupid_xchg_last(struct page *page, int cpupid); 1149 1150 static inline void page_cpupid_reset_last(struct page *page) 1151 { 1152 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; 1153 } 1154 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ 1155 #else /* !CONFIG_NUMA_BALANCING */ 1156 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 1157 { 1158 return page_to_nid(page); /* XXX */ 1159 } 1160 1161 static inline int page_cpupid_last(struct page *page) 1162 { 1163 return page_to_nid(page); /* XXX */ 1164 } 1165 1166 static inline int cpupid_to_nid(int cpupid) 1167 { 1168 return -1; 1169 } 1170 1171 static inline int cpupid_to_pid(int cpupid) 1172 { 1173 return -1; 1174 } 1175 1176 static inline int cpupid_to_cpu(int cpupid) 1177 { 1178 return -1; 1179 } 1180 1181 static inline int cpu_pid_to_cpupid(int nid, int pid) 1182 { 1183 return -1; 1184 } 1185 1186 static inline bool cpupid_pid_unset(int cpupid) 1187 { 1188 return 1; 1189 } 1190 1191 static inline void page_cpupid_reset_last(struct page *page) 1192 { 1193 } 1194 1195 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) 1196 { 1197 return false; 1198 } 1199 #endif /* CONFIG_NUMA_BALANCING */ 1200 1201 #ifdef CONFIG_KASAN_SW_TAGS 1202 static inline u8 page_kasan_tag(const struct page *page) 1203 { 1204 return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; 1205 } 1206 1207 static inline void page_kasan_tag_set(struct page *page, u8 tag) 1208 { 1209 page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); 1210 page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; 1211 } 1212 1213 static inline void page_kasan_tag_reset(struct page *page) 1214 { 1215 page_kasan_tag_set(page, 0xff); 1216 } 1217 #else 1218 static inline u8 page_kasan_tag(const struct page *page) 1219 { 1220 return 0xff; 1221 } 1222 1223 static inline void page_kasan_tag_set(struct page *page, u8 tag) { } 1224 static inline void page_kasan_tag_reset(struct page *page) { } 1225 #endif 1226 1227 static inline struct zone *page_zone(const struct page *page) 1228 { 1229 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 1230 } 1231 1232 static inline pg_data_t *page_pgdat(const struct page *page) 1233 { 1234 return NODE_DATA(page_to_nid(page)); 1235 } 1236 1237 #ifdef SECTION_IN_PAGE_FLAGS 1238 static inline void set_page_section(struct page *page, unsigned long section) 1239 { 1240 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 1241 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 1242 } 1243 1244 static inline unsigned long page_to_section(const struct page *page) 1245 { 1246 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 1247 } 1248 #endif 1249 1250 static inline void set_page_zone(struct page *page, enum zone_type zone) 1251 { 1252 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 1253 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 1254 } 1255 1256 static inline void set_page_node(struct page *page, unsigned long node) 1257 { 1258 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 1259 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 1260 } 1261 1262 static inline void set_page_links(struct page *page, enum zone_type zone, 1263 unsigned long node, unsigned long pfn) 1264 { 1265 set_page_zone(page, zone); 1266 set_page_node(page, node); 1267 #ifdef SECTION_IN_PAGE_FLAGS 1268 set_page_section(page, pfn_to_section_nr(pfn)); 1269 #endif 1270 } 1271 1272 #ifdef CONFIG_MEMCG 1273 static inline struct mem_cgroup *page_memcg(struct page *page) 1274 { 1275 return page->mem_cgroup; 1276 } 1277 static inline struct mem_cgroup *page_memcg_rcu(struct page *page) 1278 { 1279 WARN_ON_ONCE(!rcu_read_lock_held()); 1280 return READ_ONCE(page->mem_cgroup); 1281 } 1282 #else 1283 static inline struct mem_cgroup *page_memcg(struct page *page) 1284 { 1285 return NULL; 1286 } 1287 static inline struct mem_cgroup *page_memcg_rcu(struct page *page) 1288 { 1289 WARN_ON_ONCE(!rcu_read_lock_held()); 1290 return NULL; 1291 } 1292 #endif 1293 1294 /* 1295 * Some inline functions in vmstat.h depend on page_zone() 1296 */ 1297 #include <linux/vmstat.h> 1298 1299 static __always_inline void *lowmem_page_address(const struct page *page) 1300 { 1301 return page_to_virt(page); 1302 } 1303 1304 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) 1305 #define HASHED_PAGE_VIRTUAL 1306 #endif 1307 1308 #if defined(WANT_PAGE_VIRTUAL) 1309 static inline void *page_address(const struct page *page) 1310 { 1311 return page->virtual; 1312 } 1313 static inline void set_page_address(struct page *page, void *address) 1314 { 1315 page->virtual = address; 1316 } 1317 #define page_address_init() do { } while(0) 1318 #endif 1319 1320 #if defined(HASHED_PAGE_VIRTUAL) 1321 void *page_address(const struct page *page); 1322 void set_page_address(struct page *page, void *virtual); 1323 void page_address_init(void); 1324 #endif 1325 1326 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) 1327 #define page_address(page) lowmem_page_address(page) 1328 #define set_page_address(page, address) do { } while(0) 1329 #define page_address_init() do { } while(0) 1330 #endif 1331 1332 extern void *page_rmapping(struct page *page); 1333 extern struct anon_vma *page_anon_vma(struct page *page); 1334 extern struct address_space *page_mapping(struct page *page); 1335 1336 extern struct address_space *__page_file_mapping(struct page *); 1337 1338 static inline 1339 struct address_space *page_file_mapping(struct page *page) 1340 { 1341 if (unlikely(PageSwapCache(page))) 1342 return __page_file_mapping(page); 1343 1344 return page->mapping; 1345 } 1346 1347 extern pgoff_t __page_file_index(struct page *page); 1348 1349 /* 1350 * Return the pagecache index of the passed page. Regular pagecache pages 1351 * use ->index whereas swapcache pages use swp_offset(->private) 1352 */ 1353 static inline pgoff_t page_index(struct page *page) 1354 { 1355 if (unlikely(PageSwapCache(page))) 1356 return __page_file_index(page); 1357 return page->index; 1358 } 1359 1360 bool page_mapped(struct page *page); 1361 struct address_space *page_mapping(struct page *page); 1362 struct address_space *page_mapping_file(struct page *page); 1363 1364 /* 1365 * Return true only if the page has been allocated with 1366 * ALLOC_NO_WATERMARKS and the low watermark was not 1367 * met implying that the system is under some pressure. 1368 */ 1369 static inline bool page_is_pfmemalloc(struct page *page) 1370 { 1371 /* 1372 * Page index cannot be this large so this must be 1373 * a pfmemalloc page. 1374 */ 1375 return page->index == -1UL; 1376 } 1377 1378 /* 1379 * Only to be called by the page allocator on a freshly allocated 1380 * page. 1381 */ 1382 static inline void set_page_pfmemalloc(struct page *page) 1383 { 1384 page->index = -1UL; 1385 } 1386 1387 static inline void clear_page_pfmemalloc(struct page *page) 1388 { 1389 page->index = 0; 1390 } 1391 1392 /* 1393 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. 1394 */ 1395 extern void pagefault_out_of_memory(void); 1396 1397 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 1398 1399 /* 1400 * Flags passed to show_mem() and show_free_areas() to suppress output in 1401 * various contexts. 1402 */ 1403 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 1404 1405 extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); 1406 1407 #ifdef CONFIG_MMU 1408 extern bool can_do_mlock(void); 1409 #else 1410 static inline bool can_do_mlock(void) { return false; } 1411 #endif 1412 extern int user_shm_lock(size_t, struct user_struct *); 1413 extern void user_shm_unlock(size_t, struct user_struct *); 1414 1415 /* 1416 * Parameter block passed down to zap_pte_range in exceptional cases. 1417 */ 1418 struct zap_details { 1419 struct address_space *check_mapping; /* Check page->mapping if set */ 1420 pgoff_t first_index; /* Lowest page->index to unmap */ 1421 pgoff_t last_index; /* Highest page->index to unmap */ 1422 }; 1423 1424 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 1425 pte_t pte); 1426 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 1427 pmd_t pmd); 1428 1429 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1430 unsigned long size); 1431 void zap_page_range(struct vm_area_struct *vma, unsigned long address, 1432 unsigned long size); 1433 void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 1434 unsigned long start, unsigned long end); 1435 1436 struct mmu_notifier_range; 1437 1438 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 1439 unsigned long end, unsigned long floor, unsigned long ceiling); 1440 int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 1441 struct vm_area_struct *vma); 1442 int follow_pte_pmd(struct mm_struct *mm, unsigned long address, 1443 struct mmu_notifier_range *range, 1444 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); 1445 int follow_pfn(struct vm_area_struct *vma, unsigned long address, 1446 unsigned long *pfn); 1447 int follow_phys(struct vm_area_struct *vma, unsigned long address, 1448 unsigned int flags, unsigned long *prot, resource_size_t *phys); 1449 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 1450 void *buf, int len, int write); 1451 1452 extern void truncate_pagecache(struct inode *inode, loff_t new); 1453 extern void truncate_setsize(struct inode *inode, loff_t newsize); 1454 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); 1455 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 1456 int truncate_inode_page(struct address_space *mapping, struct page *page); 1457 int generic_error_remove_page(struct address_space *mapping, struct page *page); 1458 int invalidate_inode_page(struct page *page); 1459 1460 #ifdef CONFIG_MMU 1461 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, 1462 unsigned long address, unsigned int flags); 1463 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 1464 unsigned long address, unsigned int fault_flags, 1465 bool *unlocked); 1466 void unmap_mapping_pages(struct address_space *mapping, 1467 pgoff_t start, pgoff_t nr, bool even_cows); 1468 void unmap_mapping_range(struct address_space *mapping, 1469 loff_t const holebegin, loff_t const holelen, int even_cows); 1470 #else 1471 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, 1472 unsigned long address, unsigned int flags) 1473 { 1474 /* should never happen if there's no MMU */ 1475 BUG(); 1476 return VM_FAULT_SIGBUS; 1477 } 1478 static inline int fixup_user_fault(struct task_struct *tsk, 1479 struct mm_struct *mm, unsigned long address, 1480 unsigned int fault_flags, bool *unlocked) 1481 { 1482 /* should never happen if there's no MMU */ 1483 BUG(); 1484 return -EFAULT; 1485 } 1486 static inline void unmap_mapping_pages(struct address_space *mapping, 1487 pgoff_t start, pgoff_t nr, bool even_cows) { } 1488 static inline void unmap_mapping_range(struct address_space *mapping, 1489 loff_t const holebegin, loff_t const holelen, int even_cows) { } 1490 #endif 1491 1492 static inline void unmap_shared_mapping_range(struct address_space *mapping, 1493 loff_t const holebegin, loff_t const holelen) 1494 { 1495 unmap_mapping_range(mapping, holebegin, holelen, 0); 1496 } 1497 1498 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, 1499 void *buf, int len, unsigned int gup_flags); 1500 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1501 void *buf, int len, unsigned int gup_flags); 1502 extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 1503 unsigned long addr, void *buf, int len, unsigned int gup_flags); 1504 1505 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 1506 unsigned long start, unsigned long nr_pages, 1507 unsigned int gup_flags, struct page **pages, 1508 struct vm_area_struct **vmas, int *locked); 1509 long get_user_pages(unsigned long start, unsigned long nr_pages, 1510 unsigned int gup_flags, struct page **pages, 1511 struct vm_area_struct **vmas); 1512 long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 1513 unsigned int gup_flags, struct page **pages, int *locked); 1514 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 1515 struct page **pages, unsigned int gup_flags); 1516 1517 int get_user_pages_fast(unsigned long start, int nr_pages, 1518 unsigned int gup_flags, struct page **pages); 1519 1520 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); 1521 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, 1522 struct task_struct *task, bool bypass_rlim); 1523 1524 /* Container for pinned pfns / pages */ 1525 struct frame_vector { 1526 unsigned int nr_allocated; /* Number of frames we have space for */ 1527 unsigned int nr_frames; /* Number of frames stored in ptrs array */ 1528 bool got_ref; /* Did we pin pages by getting page ref? */ 1529 bool is_pfns; /* Does array contain pages or pfns? */ 1530 void *ptrs[0]; /* Array of pinned pfns / pages. Use 1531 * pfns_vector_pages() or pfns_vector_pfns() 1532 * for access */ 1533 }; 1534 1535 struct frame_vector *frame_vector_create(unsigned int nr_frames); 1536 void frame_vector_destroy(struct frame_vector *vec); 1537 int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, 1538 unsigned int gup_flags, struct frame_vector *vec); 1539 void put_vaddr_frames(struct frame_vector *vec); 1540 int frame_vector_to_pages(struct frame_vector *vec); 1541 void frame_vector_to_pfns(struct frame_vector *vec); 1542 1543 static inline unsigned int frame_vector_count(struct frame_vector *vec) 1544 { 1545 return vec->nr_frames; 1546 } 1547 1548 static inline struct page **frame_vector_pages(struct frame_vector *vec) 1549 { 1550 if (vec->is_pfns) { 1551 int err = frame_vector_to_pages(vec); 1552 1553 if (err) 1554 return ERR_PTR(err); 1555 } 1556 return (struct page **)(vec->ptrs); 1557 } 1558 1559 static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) 1560 { 1561 if (!vec->is_pfns) 1562 frame_vector_to_pfns(vec); 1563 return (unsigned long *)(vec->ptrs); 1564 } 1565 1566 struct kvec; 1567 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, 1568 struct page **pages); 1569 int get_kernel_page(unsigned long start, int write, struct page **pages); 1570 struct page *get_dump_page(unsigned long addr); 1571 1572 extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 1573 extern void do_invalidatepage(struct page *page, unsigned int offset, 1574 unsigned int length); 1575 1576 void __set_page_dirty(struct page *, struct address_space *, int warn); 1577 int __set_page_dirty_nobuffers(struct page *page); 1578 int __set_page_dirty_no_writeback(struct page *page); 1579 int redirty_page_for_writepage(struct writeback_control *wbc, 1580 struct page *page); 1581 void account_page_dirtied(struct page *page, struct address_space *mapping); 1582 void account_page_cleaned(struct page *page, struct address_space *mapping, 1583 struct bdi_writeback *wb); 1584 int set_page_dirty(struct page *page); 1585 int set_page_dirty_lock(struct page *page); 1586 void __cancel_dirty_page(struct page *page); 1587 static inline void cancel_dirty_page(struct page *page) 1588 { 1589 /* Avoid atomic ops, locking, etc. when not actually needed. */ 1590 if (PageDirty(page)) 1591 __cancel_dirty_page(page); 1592 } 1593 int clear_page_dirty_for_io(struct page *page); 1594 1595 int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1596 1597 extern unsigned long move_page_tables(struct vm_area_struct *vma, 1598 unsigned long old_addr, struct vm_area_struct *new_vma, 1599 unsigned long new_addr, unsigned long len, 1600 bool need_rmap_locks); 1601 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, 1602 unsigned long end, pgprot_t newprot, 1603 int dirty_accountable, int prot_numa); 1604 extern int mprotect_fixup(struct vm_area_struct *vma, 1605 struct vm_area_struct **pprev, unsigned long start, 1606 unsigned long end, unsigned long newflags); 1607 1608 /* 1609 * doesn't attempt to fault and will return short. 1610 */ 1611 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 1612 struct page **pages); 1613 /* 1614 * per-process(per-mm_struct) statistics. 1615 */ 1616 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1617 { 1618 long val = atomic_long_read(&mm->rss_stat.count[member]); 1619 1620 #ifdef SPLIT_RSS_COUNTING 1621 /* 1622 * counter is updated in asynchronous manner and may go to minus. 1623 * But it's never be expected number for users. 1624 */ 1625 if (val < 0) 1626 val = 0; 1627 #endif 1628 return (unsigned long)val; 1629 } 1630 1631 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count); 1632 1633 static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1634 { 1635 long count = atomic_long_add_return(value, &mm->rss_stat.count[member]); 1636 1637 mm_trace_rss_stat(mm, member, count); 1638 } 1639 1640 static inline void inc_mm_counter(struct mm_struct *mm, int member) 1641 { 1642 long count = atomic_long_inc_return(&mm->rss_stat.count[member]); 1643 1644 mm_trace_rss_stat(mm, member, count); 1645 } 1646 1647 static inline void dec_mm_counter(struct mm_struct *mm, int member) 1648 { 1649 long count = atomic_long_dec_return(&mm->rss_stat.count[member]); 1650 1651 mm_trace_rss_stat(mm, member, count); 1652 } 1653 1654 /* Optimized variant when page is already known not to be PageAnon */ 1655 static inline int mm_counter_file(struct page *page) 1656 { 1657 if (PageSwapBacked(page)) 1658 return MM_SHMEMPAGES; 1659 return MM_FILEPAGES; 1660 } 1661 1662 static inline int mm_counter(struct page *page) 1663 { 1664 if (PageAnon(page)) 1665 return MM_ANONPAGES; 1666 return mm_counter_file(page); 1667 } 1668 1669 static inline unsigned long get_mm_rss(struct mm_struct *mm) 1670 { 1671 return get_mm_counter(mm, MM_FILEPAGES) + 1672 get_mm_counter(mm, MM_ANONPAGES) + 1673 get_mm_counter(mm, MM_SHMEMPAGES); 1674 } 1675 1676 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) 1677 { 1678 return max(mm->hiwater_rss, get_mm_rss(mm)); 1679 } 1680 1681 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) 1682 { 1683 return max(mm->hiwater_vm, mm->total_vm); 1684 } 1685 1686 static inline void update_hiwater_rss(struct mm_struct *mm) 1687 { 1688 unsigned long _rss = get_mm_rss(mm); 1689 1690 if ((mm)->hiwater_rss < _rss) 1691 (mm)->hiwater_rss = _rss; 1692 } 1693 1694 static inline void update_hiwater_vm(struct mm_struct *mm) 1695 { 1696 if (mm->hiwater_vm < mm->total_vm) 1697 mm->hiwater_vm = mm->total_vm; 1698 } 1699 1700 static inline void reset_mm_hiwater_rss(struct mm_struct *mm) 1701 { 1702 mm->hiwater_rss = get_mm_rss(mm); 1703 } 1704 1705 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, 1706 struct mm_struct *mm) 1707 { 1708 unsigned long hiwater_rss = get_mm_hiwater_rss(mm); 1709 1710 if (*maxrss < hiwater_rss) 1711 *maxrss = hiwater_rss; 1712 } 1713 1714 #if defined(SPLIT_RSS_COUNTING) 1715 void sync_mm_rss(struct mm_struct *mm); 1716 #else 1717 static inline void sync_mm_rss(struct mm_struct *mm) 1718 { 1719 } 1720 #endif 1721 1722 #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP 1723 static inline int pte_devmap(pte_t pte) 1724 { 1725 return 0; 1726 } 1727 #endif 1728 1729 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 1730 1731 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1732 spinlock_t **ptl); 1733 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 1734 spinlock_t **ptl) 1735 { 1736 pte_t *ptep; 1737 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); 1738 return ptep; 1739 } 1740 1741 #ifdef __PAGETABLE_P4D_FOLDED 1742 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, 1743 unsigned long address) 1744 { 1745 return 0; 1746 } 1747 #else 1748 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 1749 #endif 1750 1751 #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU) 1752 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, 1753 unsigned long address) 1754 { 1755 return 0; 1756 } 1757 static inline void mm_inc_nr_puds(struct mm_struct *mm) {} 1758 static inline void mm_dec_nr_puds(struct mm_struct *mm) {} 1759 1760 #else 1761 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); 1762 1763 static inline void mm_inc_nr_puds(struct mm_struct *mm) 1764 { 1765 if (mm_pud_folded(mm)) 1766 return; 1767 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); 1768 } 1769 1770 static inline void mm_dec_nr_puds(struct mm_struct *mm) 1771 { 1772 if (mm_pud_folded(mm)) 1773 return; 1774 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); 1775 } 1776 #endif 1777 1778 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) 1779 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, 1780 unsigned long address) 1781 { 1782 return 0; 1783 } 1784 1785 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} 1786 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} 1787 1788 #else 1789 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); 1790 1791 static inline void mm_inc_nr_pmds(struct mm_struct *mm) 1792 { 1793 if (mm_pmd_folded(mm)) 1794 return; 1795 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); 1796 } 1797 1798 static inline void mm_dec_nr_pmds(struct mm_struct *mm) 1799 { 1800 if (mm_pmd_folded(mm)) 1801 return; 1802 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); 1803 } 1804 #endif 1805 1806 #ifdef CONFIG_MMU 1807 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) 1808 { 1809 atomic_long_set(&mm->pgtables_bytes, 0); 1810 } 1811 1812 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) 1813 { 1814 return atomic_long_read(&mm->pgtables_bytes); 1815 } 1816 1817 static inline void mm_inc_nr_ptes(struct mm_struct *mm) 1818 { 1819 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); 1820 } 1821 1822 static inline void mm_dec_nr_ptes(struct mm_struct *mm) 1823 { 1824 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); 1825 } 1826 #else 1827 1828 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {} 1829 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) 1830 { 1831 return 0; 1832 } 1833 1834 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {} 1835 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} 1836 #endif 1837 1838 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd); 1839 int __pte_alloc_kernel(pmd_t *pmd); 1840 1841 #if defined(CONFIG_MMU) 1842 1843 /* 1844 * The following ifdef needed to get the 5level-fixup.h header to work. 1845 * Remove it when 5level-fixup.h has been removed. 1846 */ 1847 #ifndef __ARCH_HAS_5LEVEL_HACK 1848 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, 1849 unsigned long address) 1850 { 1851 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? 1852 NULL : p4d_offset(pgd, address); 1853 } 1854 1855 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, 1856 unsigned long address) 1857 { 1858 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? 1859 NULL : pud_offset(p4d, address); 1860 } 1861 #endif /* !__ARCH_HAS_5LEVEL_HACK */ 1862 1863 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 1864 { 1865 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? 1866 NULL: pmd_offset(pud, address); 1867 } 1868 #endif /* CONFIG_MMU */ 1869 1870 #if USE_SPLIT_PTE_PTLOCKS 1871 #if ALLOC_SPLIT_PTLOCKS 1872 void __init ptlock_cache_init(void); 1873 extern bool ptlock_alloc(struct page *page); 1874 extern void ptlock_free(struct page *page); 1875 1876 static inline spinlock_t *ptlock_ptr(struct page *page) 1877 { 1878 return page->ptl; 1879 } 1880 #else /* ALLOC_SPLIT_PTLOCKS */ 1881 static inline void ptlock_cache_init(void) 1882 { 1883 } 1884 1885 static inline bool ptlock_alloc(struct page *page) 1886 { 1887 return true; 1888 } 1889 1890 static inline void ptlock_free(struct page *page) 1891 { 1892 } 1893 1894 static inline spinlock_t *ptlock_ptr(struct page *page) 1895 { 1896 return &page->ptl; 1897 } 1898 #endif /* ALLOC_SPLIT_PTLOCKS */ 1899 1900 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1901 { 1902 return ptlock_ptr(pmd_page(*pmd)); 1903 } 1904 1905 static inline bool ptlock_init(struct page *page) 1906 { 1907 /* 1908 * prep_new_page() initialize page->private (and therefore page->ptl) 1909 * with 0. Make sure nobody took it in use in between. 1910 * 1911 * It can happen if arch try to use slab for page table allocation: 1912 * slab code uses page->slab_cache, which share storage with page->ptl. 1913 */ 1914 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); 1915 if (!ptlock_alloc(page)) 1916 return false; 1917 spin_lock_init(ptlock_ptr(page)); 1918 return true; 1919 } 1920 1921 #else /* !USE_SPLIT_PTE_PTLOCKS */ 1922 /* 1923 * We use mm->page_table_lock to guard all pagetable pages of the mm. 1924 */ 1925 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1926 { 1927 return &mm->page_table_lock; 1928 } 1929 static inline void ptlock_cache_init(void) {} 1930 static inline bool ptlock_init(struct page *page) { return true; } 1931 static inline void ptlock_free(struct page *page) {} 1932 #endif /* USE_SPLIT_PTE_PTLOCKS */ 1933 1934 static inline void pgtable_init(void) 1935 { 1936 ptlock_cache_init(); 1937 pgtable_cache_init(); 1938 } 1939 1940 static inline bool pgtable_pte_page_ctor(struct page *page) 1941 { 1942 if (!ptlock_init(page)) 1943 return false; 1944 __SetPageTable(page); 1945 inc_zone_page_state(page, NR_PAGETABLE); 1946 return true; 1947 } 1948 1949 static inline void pgtable_pte_page_dtor(struct page *page) 1950 { 1951 ptlock_free(page); 1952 __ClearPageTable(page); 1953 dec_zone_page_state(page, NR_PAGETABLE); 1954 } 1955 1956 #define pte_offset_map_lock(mm, pmd, address, ptlp) \ 1957 ({ \ 1958 spinlock_t *__ptl = pte_lockptr(mm, pmd); \ 1959 pte_t *__pte = pte_offset_map(pmd, address); \ 1960 *(ptlp) = __ptl; \ 1961 spin_lock(__ptl); \ 1962 __pte; \ 1963 }) 1964 1965 #define pte_unmap_unlock(pte, ptl) do { \ 1966 spin_unlock(ptl); \ 1967 pte_unmap(pte); \ 1968 } while (0) 1969 1970 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd)) 1971 1972 #define pte_alloc_map(mm, pmd, address) \ 1973 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address)) 1974 1975 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 1976 (pte_alloc(mm, pmd) ? \ 1977 NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) 1978 1979 #define pte_alloc_kernel(pmd, address) \ 1980 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ 1981 NULL: pte_offset_kernel(pmd, address)) 1982 1983 #if USE_SPLIT_PMD_PTLOCKS 1984 1985 static struct page *pmd_to_page(pmd_t *pmd) 1986 { 1987 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); 1988 return virt_to_page((void *)((unsigned long) pmd & mask)); 1989 } 1990 1991 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 1992 { 1993 return ptlock_ptr(pmd_to_page(pmd)); 1994 } 1995 1996 static inline bool pgtable_pmd_page_ctor(struct page *page) 1997 { 1998 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1999 page->pmd_huge_pte = NULL; 2000 #endif 2001 return ptlock_init(page); 2002 } 2003 2004 static inline void pgtable_pmd_page_dtor(struct page *page) 2005 { 2006 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2007 VM_BUG_ON_PAGE(page->pmd_huge_pte, page); 2008 #endif 2009 ptlock_free(page); 2010 } 2011 2012 #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte) 2013 2014 #else 2015 2016 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 2017 { 2018 return &mm->page_table_lock; 2019 } 2020 2021 static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; } 2022 static inline void pgtable_pmd_page_dtor(struct page *page) {} 2023 2024 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) 2025 2026 #endif 2027 2028 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) 2029 { 2030 spinlock_t *ptl = pmd_lockptr(mm, pmd); 2031 spin_lock(ptl); 2032 return ptl; 2033 } 2034 2035 /* 2036 * No scalability reason to split PUD locks yet, but follow the same pattern 2037 * as the PMD locks to make it easier if we decide to. The VM should not be 2038 * considered ready to switch to split PUD locks yet; there may be places 2039 * which need to be converted from page_table_lock. 2040 */ 2041 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) 2042 { 2043 return &mm->page_table_lock; 2044 } 2045 2046 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) 2047 { 2048 spinlock_t *ptl = pud_lockptr(mm, pud); 2049 2050 spin_lock(ptl); 2051 return ptl; 2052 } 2053 2054 extern void __init pagecache_init(void); 2055 extern void free_area_init(unsigned long * zones_size); 2056 extern void __init free_area_init_node(int nid, unsigned long * zones_size, 2057 unsigned long zone_start_pfn, unsigned long *zholes_size); 2058 extern void free_initmem(void); 2059 2060 /* 2061 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) 2062 * into the buddy system. The freed pages will be poisoned with pattern 2063 * "poison" if it's within range [0, UCHAR_MAX]. 2064 * Return pages freed into the buddy system. 2065 */ 2066 extern unsigned long free_reserved_area(void *start, void *end, 2067 int poison, const char *s); 2068 2069 #ifdef CONFIG_HIGHMEM 2070 /* 2071 * Free a highmem page into the buddy system, adjusting totalhigh_pages 2072 * and totalram_pages. 2073 */ 2074 extern void free_highmem_page(struct page *page); 2075 #endif 2076 2077 extern void adjust_managed_page_count(struct page *page, long count); 2078 extern void mem_init_print_info(const char *str); 2079 2080 extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); 2081 2082 /* Free the reserved page into the buddy system, so it gets managed. */ 2083 static inline void __free_reserved_page(struct page *page) 2084 { 2085 ClearPageReserved(page); 2086 init_page_count(page); 2087 __free_page(page); 2088 } 2089 2090 static inline void free_reserved_page(struct page *page) 2091 { 2092 __free_reserved_page(page); 2093 adjust_managed_page_count(page, 1); 2094 } 2095 2096 static inline void mark_page_reserved(struct page *page) 2097 { 2098 SetPageReserved(page); 2099 adjust_managed_page_count(page, -1); 2100 } 2101 2102 /* 2103 * Default method to free all the __init memory into the buddy system. 2104 * The freed pages will be poisoned with pattern "poison" if it's within 2105 * range [0, UCHAR_MAX]. 2106 * Return pages freed into the buddy system. 2107 */ 2108 static inline unsigned long free_initmem_default(int poison) 2109 { 2110 extern char __init_begin[], __init_end[]; 2111 2112 return free_reserved_area(&__init_begin, &__init_end, 2113 poison, "unused kernel"); 2114 } 2115 2116 static inline unsigned long get_num_physpages(void) 2117 { 2118 int nid; 2119 unsigned long phys_pages = 0; 2120 2121 for_each_online_node(nid) 2122 phys_pages += node_present_pages(nid); 2123 2124 return phys_pages; 2125 } 2126 2127 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 2128 /* 2129 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its 2130 * zones, allocate the backing mem_map and account for memory holes in a more 2131 * architecture independent manner. This is a substitute for creating the 2132 * zone_sizes[] and zholes_size[] arrays and passing them to 2133 * free_area_init_node() 2134 * 2135 * An architecture is expected to register range of page frames backed by 2136 * physical memory with memblock_add[_node]() before calling 2137 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic 2138 * usage, an architecture is expected to do something like 2139 * 2140 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 2141 * max_highmem_pfn}; 2142 * for_each_valid_physical_page_range() 2143 * memblock_add_node(base, size, nid) 2144 * free_area_init_nodes(max_zone_pfns); 2145 * 2146 * free_bootmem_with_active_regions() calls free_bootmem_node() for each 2147 * registered physical page range. Similarly 2148 * sparse_memory_present_with_active_regions() calls memory_present() for 2149 * each range when SPARSEMEM is enabled. 2150 * 2151 * See mm/page_alloc.c for more information on each function exposed by 2152 * CONFIG_HAVE_MEMBLOCK_NODE_MAP. 2153 */ 2154 extern void free_area_init_nodes(unsigned long *max_zone_pfn); 2155 unsigned long node_map_pfn_alignment(void); 2156 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 2157 unsigned long end_pfn); 2158 extern unsigned long absent_pages_in_range(unsigned long start_pfn, 2159 unsigned long end_pfn); 2160 extern void get_pfn_range_for_nid(unsigned int nid, 2161 unsigned long *start_pfn, unsigned long *end_pfn); 2162 extern unsigned long find_min_pfn_with_active_regions(void); 2163 extern void free_bootmem_with_active_regions(int nid, 2164 unsigned long max_low_pfn); 2165 extern void sparse_memory_present_with_active_regions(int nid); 2166 2167 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 2168 2169 #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ 2170 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) 2171 static inline int __early_pfn_to_nid(unsigned long pfn, 2172 struct mminit_pfnnid_cache *state) 2173 { 2174 return 0; 2175 } 2176 #else 2177 /* please see mm/page_alloc.c */ 2178 extern int __meminit early_pfn_to_nid(unsigned long pfn); 2179 /* there is a per-arch backend function. */ 2180 extern int __meminit __early_pfn_to_nid(unsigned long pfn, 2181 struct mminit_pfnnid_cache *state); 2182 #endif 2183 2184 #if !defined(CONFIG_FLAT_NODE_MEM_MAP) 2185 void zero_resv_unavail(void); 2186 #else 2187 static inline void zero_resv_unavail(void) {} 2188 #endif 2189 2190 extern void set_dma_reserve(unsigned long new_dma_reserve); 2191 extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, 2192 enum memmap_context, struct vmem_altmap *); 2193 extern void setup_per_zone_wmarks(void); 2194 extern int __meminit init_per_zone_wmark_min(void); 2195 extern void mem_init(void); 2196 extern void __init mmap_init(void); 2197 extern void show_mem(unsigned int flags, nodemask_t *nodemask); 2198 extern long si_mem_available(void); 2199 extern void si_meminfo(struct sysinfo * val); 2200 extern void si_meminfo_node(struct sysinfo *val, int nid); 2201 #ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES 2202 extern unsigned long arch_reserved_kernel_pages(void); 2203 #endif 2204 2205 extern __printf(3, 4) 2206 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); 2207 2208 extern void setup_per_cpu_pageset(void); 2209 2210 /* page_alloc.c */ 2211 extern int min_free_kbytes; 2212 extern int watermark_boost_factor; 2213 extern int watermark_scale_factor; 2214 2215 /* nommu.c */ 2216 extern atomic_long_t mmap_pages_allocated; 2217 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); 2218 2219 /* interval_tree.c */ 2220 void vma_interval_tree_insert(struct vm_area_struct *node, 2221 struct rb_root_cached *root); 2222 void vma_interval_tree_insert_after(struct vm_area_struct *node, 2223 struct vm_area_struct *prev, 2224 struct rb_root_cached *root); 2225 void vma_interval_tree_remove(struct vm_area_struct *node, 2226 struct rb_root_cached *root); 2227 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root, 2228 unsigned long start, unsigned long last); 2229 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, 2230 unsigned long start, unsigned long last); 2231 2232 #define vma_interval_tree_foreach(vma, root, start, last) \ 2233 for (vma = vma_interval_tree_iter_first(root, start, last); \ 2234 vma; vma = vma_interval_tree_iter_next(vma, start, last)) 2235 2236 void anon_vma_interval_tree_insert(struct anon_vma_chain *node, 2237 struct rb_root_cached *root); 2238 void anon_vma_interval_tree_remove(struct anon_vma_chain *node, 2239 struct rb_root_cached *root); 2240 struct anon_vma_chain * 2241 anon_vma_interval_tree_iter_first(struct rb_root_cached *root, 2242 unsigned long start, unsigned long last); 2243 struct anon_vma_chain *anon_vma_interval_tree_iter_next( 2244 struct anon_vma_chain *node, unsigned long start, unsigned long last); 2245 #ifdef CONFIG_DEBUG_VM_RB 2246 void anon_vma_interval_tree_verify(struct anon_vma_chain *node); 2247 #endif 2248 2249 #define anon_vma_interval_tree_foreach(avc, root, start, last) \ 2250 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ 2251 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) 2252 2253 /* mmap.c */ 2254 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); 2255 extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, 2256 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, 2257 struct vm_area_struct *expand); 2258 static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, 2259 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) 2260 { 2261 return __vma_adjust(vma, start, end, pgoff, insert, NULL); 2262 } 2263 extern struct vm_area_struct *vma_merge(struct mm_struct *, 2264 struct vm_area_struct *prev, unsigned long addr, unsigned long end, 2265 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, 2266 struct mempolicy *, struct vm_userfaultfd_ctx); 2267 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); 2268 extern int __split_vma(struct mm_struct *, struct vm_area_struct *, 2269 unsigned long addr, int new_below); 2270 extern int split_vma(struct mm_struct *, struct vm_area_struct *, 2271 unsigned long addr, int new_below); 2272 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); 2273 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, 2274 struct rb_node **, struct rb_node *); 2275 extern void unlink_file_vma(struct vm_area_struct *); 2276 extern struct vm_area_struct *copy_vma(struct vm_area_struct **, 2277 unsigned long addr, unsigned long len, pgoff_t pgoff, 2278 bool *need_rmap_locks); 2279 extern void exit_mmap(struct mm_struct *); 2280 2281 static inline int check_data_rlimit(unsigned long rlim, 2282 unsigned long new, 2283 unsigned long start, 2284 unsigned long end_data, 2285 unsigned long start_data) 2286 { 2287 if (rlim < RLIM_INFINITY) { 2288 if (((new - start) + (end_data - start_data)) > rlim) 2289 return -ENOSPC; 2290 } 2291 2292 return 0; 2293 } 2294 2295 extern int mm_take_all_locks(struct mm_struct *mm); 2296 extern void mm_drop_all_locks(struct mm_struct *mm); 2297 2298 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); 2299 extern struct file *get_mm_exe_file(struct mm_struct *mm); 2300 extern struct file *get_task_exe_file(struct task_struct *task); 2301 2302 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); 2303 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); 2304 2305 extern bool vma_is_special_mapping(const struct vm_area_struct *vma, 2306 const struct vm_special_mapping *sm); 2307 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, 2308 unsigned long addr, unsigned long len, 2309 unsigned long flags, 2310 const struct vm_special_mapping *spec); 2311 /* This is an obsolete alternative to _install_special_mapping. */ 2312 extern int install_special_mapping(struct mm_struct *mm, 2313 unsigned long addr, unsigned long len, 2314 unsigned long flags, struct page **pages); 2315 2316 unsigned long randomize_stack_top(unsigned long stack_top); 2317 2318 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 2319 2320 extern unsigned long mmap_region(struct file *file, unsigned long addr, 2321 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 2322 struct list_head *uf); 2323 extern unsigned long do_mmap(struct file *file, unsigned long addr, 2324 unsigned long len, unsigned long prot, unsigned long flags, 2325 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, 2326 struct list_head *uf); 2327 extern int __do_munmap(struct mm_struct *, unsigned long, size_t, 2328 struct list_head *uf, bool downgrade); 2329 extern int do_munmap(struct mm_struct *, unsigned long, size_t, 2330 struct list_head *uf); 2331 2332 static inline unsigned long 2333 do_mmap_pgoff(struct file *file, unsigned long addr, 2334 unsigned long len, unsigned long prot, unsigned long flags, 2335 unsigned long pgoff, unsigned long *populate, 2336 struct list_head *uf) 2337 { 2338 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf); 2339 } 2340 2341 #ifdef CONFIG_MMU 2342 extern int __mm_populate(unsigned long addr, unsigned long len, 2343 int ignore_errors); 2344 static inline void mm_populate(unsigned long addr, unsigned long len) 2345 { 2346 /* Ignore errors */ 2347 (void) __mm_populate(addr, len, 1); 2348 } 2349 #else 2350 static inline void mm_populate(unsigned long addr, unsigned long len) {} 2351 #endif 2352 2353 /* These take the mm semaphore themselves */ 2354 extern int __must_check vm_brk(unsigned long, unsigned long); 2355 extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); 2356 extern int vm_munmap(unsigned long, size_t); 2357 extern unsigned long __must_check vm_mmap(struct file *, unsigned long, 2358 unsigned long, unsigned long, 2359 unsigned long, unsigned long); 2360 2361 struct vm_unmapped_area_info { 2362 #define VM_UNMAPPED_AREA_TOPDOWN 1 2363 unsigned long flags; 2364 unsigned long length; 2365 unsigned long low_limit; 2366 unsigned long high_limit; 2367 unsigned long align_mask; 2368 unsigned long align_offset; 2369 }; 2370 2371 extern unsigned long unmapped_area(struct vm_unmapped_area_info *info); 2372 extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); 2373 2374 /* 2375 * Search for an unmapped address range. 2376 * 2377 * We are looking for a range that: 2378 * - does not intersect with any VMA; 2379 * - is contained within the [low_limit, high_limit) interval; 2380 * - is at least the desired size. 2381 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 2382 */ 2383 static inline unsigned long 2384 vm_unmapped_area(struct vm_unmapped_area_info *info) 2385 { 2386 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) 2387 return unmapped_area_topdown(info); 2388 else 2389 return unmapped_area(info); 2390 } 2391 2392 /* truncate.c */ 2393 extern void truncate_inode_pages(struct address_space *, loff_t); 2394 extern void truncate_inode_pages_range(struct address_space *, 2395 loff_t lstart, loff_t lend); 2396 extern void truncate_inode_pages_final(struct address_space *); 2397 2398 /* generic vm_area_ops exported for stackable file systems */ 2399 extern vm_fault_t filemap_fault(struct vm_fault *vmf); 2400 extern void filemap_map_pages(struct vm_fault *vmf, 2401 pgoff_t start_pgoff, pgoff_t end_pgoff); 2402 extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); 2403 2404 /* mm/page-writeback.c */ 2405 int __must_check write_one_page(struct page *page); 2406 void task_dirty_inc(struct task_struct *tsk); 2407 2408 /* readahead.c */ 2409 #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) 2410 2411 int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 2412 pgoff_t offset, unsigned long nr_to_read); 2413 2414 void page_cache_sync_readahead(struct address_space *mapping, 2415 struct file_ra_state *ra, 2416 struct file *filp, 2417 pgoff_t offset, 2418 unsigned long size); 2419 2420 void page_cache_async_readahead(struct address_space *mapping, 2421 struct file_ra_state *ra, 2422 struct file *filp, 2423 struct page *pg, 2424 pgoff_t offset, 2425 unsigned long size); 2426 2427 extern unsigned long stack_guard_gap; 2428 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 2429 extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 2430 2431 /* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ 2432 extern int expand_downwards(struct vm_area_struct *vma, 2433 unsigned long address); 2434 #if VM_GROWSUP 2435 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 2436 #else 2437 #define expand_upwards(vma, address) (0) 2438 #endif 2439 2440 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 2441 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 2442 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, 2443 struct vm_area_struct **pprev); 2444 2445 /* Look up the first VMA which intersects the interval start_addr..end_addr-1, 2446 NULL if none. Assume start_addr < end_addr. */ 2447 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) 2448 { 2449 struct vm_area_struct * vma = find_vma(mm,start_addr); 2450 2451 if (vma && end_addr <= vma->vm_start) 2452 vma = NULL; 2453 return vma; 2454 } 2455 2456 static inline unsigned long vm_start_gap(struct vm_area_struct *vma) 2457 { 2458 unsigned long vm_start = vma->vm_start; 2459 2460 if (vma->vm_flags & VM_GROWSDOWN) { 2461 vm_start -= stack_guard_gap; 2462 if (vm_start > vma->vm_start) 2463 vm_start = 0; 2464 } 2465 return vm_start; 2466 } 2467 2468 static inline unsigned long vm_end_gap(struct vm_area_struct *vma) 2469 { 2470 unsigned long vm_end = vma->vm_end; 2471 2472 if (vma->vm_flags & VM_GROWSUP) { 2473 vm_end += stack_guard_gap; 2474 if (vm_end < vma->vm_end) 2475 vm_end = -PAGE_SIZE; 2476 } 2477 return vm_end; 2478 } 2479 2480 static inline unsigned long vma_pages(struct vm_area_struct *vma) 2481 { 2482 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 2483 } 2484 2485 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */ 2486 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, 2487 unsigned long vm_start, unsigned long vm_end) 2488 { 2489 struct vm_area_struct *vma = find_vma(mm, vm_start); 2490 2491 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) 2492 vma = NULL; 2493 2494 return vma; 2495 } 2496 2497 static inline bool range_in_vma(struct vm_area_struct *vma, 2498 unsigned long start, unsigned long end) 2499 { 2500 return (vma && vma->vm_start <= start && end <= vma->vm_end); 2501 } 2502 2503 #ifdef CONFIG_MMU 2504 pgprot_t vm_get_page_prot(unsigned long vm_flags); 2505 void vma_set_page_prot(struct vm_area_struct *vma); 2506 #else 2507 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) 2508 { 2509 return __pgprot(0); 2510 } 2511 static inline void vma_set_page_prot(struct vm_area_struct *vma) 2512 { 2513 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 2514 } 2515 #endif 2516 2517 #ifdef CONFIG_NUMA_BALANCING 2518 unsigned long change_prot_numa(struct vm_area_struct *vma, 2519 unsigned long start, unsigned long end); 2520 #endif 2521 2522 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 2523 int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 2524 unsigned long pfn, unsigned long size, pgprot_t); 2525 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 2526 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, 2527 unsigned long num); 2528 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, 2529 unsigned long num); 2530 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2531 unsigned long pfn); 2532 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 2533 unsigned long pfn, pgprot_t pgprot); 2534 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2535 pfn_t pfn); 2536 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, 2537 unsigned long addr, pfn_t pfn); 2538 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); 2539 2540 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, 2541 unsigned long addr, struct page *page) 2542 { 2543 int err = vm_insert_page(vma, addr, page); 2544 2545 if (err == -ENOMEM) 2546 return VM_FAULT_OOM; 2547 if (err < 0 && err != -EBUSY) 2548 return VM_FAULT_SIGBUS; 2549 2550 return VM_FAULT_NOPAGE; 2551 } 2552 2553 static inline vm_fault_t vmf_error(int err) 2554 { 2555 if (err == -ENOMEM) 2556 return VM_FAULT_OOM; 2557 return VM_FAULT_SIGBUS; 2558 } 2559 2560 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 2561 unsigned int foll_flags); 2562 2563 #define FOLL_WRITE 0x01 /* check pte is writable */ 2564 #define FOLL_TOUCH 0x02 /* mark page accessed */ 2565 #define FOLL_GET 0x04 /* do get_page on page */ 2566 #define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ 2567 #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ 2568 #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO 2569 * and return without waiting upon it */ 2570 #define FOLL_POPULATE 0x40 /* fault in page */ 2571 #define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ 2572 #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 2573 #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ 2574 #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ 2575 #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ 2576 #define FOLL_MLOCK 0x1000 /* lock present pages */ 2577 #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ 2578 #define FOLL_COW 0x4000 /* internal GUP flag */ 2579 #define FOLL_ANON 0x8000 /* don't do file mappings */ 2580 #define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */ 2581 #define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */ 2582 2583 /* 2584 * NOTE on FOLL_LONGTERM: 2585 * 2586 * FOLL_LONGTERM indicates that the page will be held for an indefinite time 2587 * period _often_ under userspace control. This is contrasted with 2588 * iov_iter_get_pages() where usages which are transient. 2589 * 2590 * FIXME: For pages which are part of a filesystem, mappings are subject to the 2591 * lifetime enforced by the filesystem and we need guarantees that longterm 2592 * users like RDMA and V4L2 only establish mappings which coordinate usage with 2593 * the filesystem. Ideas for this coordination include revoking the longterm 2594 * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was 2595 * added after the problem with filesystems was found FS DAX VMAs are 2596 * specifically failed. Filesystem pages are still subject to bugs and use of 2597 * FOLL_LONGTERM should be avoided on those pages. 2598 * 2599 * FIXME: Also NOTE that FOLL_LONGTERM is not supported in every GUP call. 2600 * Currently only get_user_pages() and get_user_pages_fast() support this flag 2601 * and calls to get_user_pages_[un]locked are specifically not allowed. This 2602 * is due to an incompatibility with the FS DAX check and 2603 * FAULT_FLAG_ALLOW_RETRY 2604 * 2605 * In the CMA case: longterm pins in a CMA region would unnecessarily fragment 2606 * that region. And so CMA attempts to migrate the page before pinning when 2607 * FOLL_LONGTERM is specified. 2608 */ 2609 2610 static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) 2611 { 2612 if (vm_fault & VM_FAULT_OOM) 2613 return -ENOMEM; 2614 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 2615 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; 2616 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 2617 return -EFAULT; 2618 return 0; 2619 } 2620 2621 typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data); 2622 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, 2623 unsigned long size, pte_fn_t fn, void *data); 2624 extern int apply_to_existing_page_range(struct mm_struct *mm, 2625 unsigned long address, unsigned long size, 2626 pte_fn_t fn, void *data); 2627 2628 #ifdef CONFIG_PAGE_POISONING 2629 extern bool page_poisoning_enabled(void); 2630 extern void kernel_poison_pages(struct page *page, int numpages, int enable); 2631 #else 2632 static inline bool page_poisoning_enabled(void) { return false; } 2633 static inline void kernel_poison_pages(struct page *page, int numpages, 2634 int enable) { } 2635 #endif 2636 2637 #ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON 2638 DECLARE_STATIC_KEY_TRUE(init_on_alloc); 2639 #else 2640 DECLARE_STATIC_KEY_FALSE(init_on_alloc); 2641 #endif 2642 static inline bool want_init_on_alloc(gfp_t flags) 2643 { 2644 if (static_branch_unlikely(&init_on_alloc) && 2645 !page_poisoning_enabled()) 2646 return true; 2647 return flags & __GFP_ZERO; 2648 } 2649 2650 #ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON 2651 DECLARE_STATIC_KEY_TRUE(init_on_free); 2652 #else 2653 DECLARE_STATIC_KEY_FALSE(init_on_free); 2654 #endif 2655 static inline bool want_init_on_free(void) 2656 { 2657 return static_branch_unlikely(&init_on_free) && 2658 !page_poisoning_enabled(); 2659 } 2660 2661 #ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT 2662 DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled); 2663 #else 2664 DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); 2665 #endif 2666 2667 static inline bool debug_pagealloc_enabled(void) 2668 { 2669 if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) 2670 return false; 2671 2672 return static_branch_unlikely(&_debug_pagealloc_enabled); 2673 } 2674 2675 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) 2676 extern void __kernel_map_pages(struct page *page, int numpages, int enable); 2677 2678 static inline void 2679 kernel_map_pages(struct page *page, int numpages, int enable) 2680 { 2681 __kernel_map_pages(page, numpages, enable); 2682 } 2683 #ifdef CONFIG_HIBERNATION 2684 extern bool kernel_page_present(struct page *page); 2685 #endif /* CONFIG_HIBERNATION */ 2686 #else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ 2687 static inline void 2688 kernel_map_pages(struct page *page, int numpages, int enable) {} 2689 #ifdef CONFIG_HIBERNATION 2690 static inline bool kernel_page_present(struct page *page) { return true; } 2691 #endif /* CONFIG_HIBERNATION */ 2692 #endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ 2693 2694 #ifdef __HAVE_ARCH_GATE_AREA 2695 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); 2696 extern int in_gate_area_no_mm(unsigned long addr); 2697 extern int in_gate_area(struct mm_struct *mm, unsigned long addr); 2698 #else 2699 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 2700 { 2701 return NULL; 2702 } 2703 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; } 2704 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) 2705 { 2706 return 0; 2707 } 2708 #endif /* __HAVE_ARCH_GATE_AREA */ 2709 2710 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); 2711 2712 #ifdef CONFIG_SYSCTL 2713 extern int sysctl_drop_caches; 2714 int drop_caches_sysctl_handler(struct ctl_table *, int, 2715 void __user *, size_t *, loff_t *); 2716 #endif 2717 2718 void drop_slab(void); 2719 void drop_slab_node(int nid); 2720 2721 #ifndef CONFIG_MMU 2722 #define randomize_va_space 0 2723 #else 2724 extern int randomize_va_space; 2725 #endif 2726 2727 const char * arch_vma_name(struct vm_area_struct *vma); 2728 #ifdef CONFIG_MMU 2729 void print_vma_addr(char *prefix, unsigned long rip); 2730 #else 2731 static inline void print_vma_addr(char *prefix, unsigned long rip) 2732 { 2733 } 2734 #endif 2735 2736 void *sparse_buffer_alloc(unsigned long size); 2737 struct page * __populate_section_memmap(unsigned long pfn, 2738 unsigned long nr_pages, int nid, struct vmem_altmap *altmap); 2739 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 2740 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); 2741 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); 2742 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 2743 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); 2744 void *vmemmap_alloc_block(unsigned long size, int node); 2745 struct vmem_altmap; 2746 void *vmemmap_alloc_block_buf(unsigned long size, int node); 2747 void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap); 2748 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 2749 int vmemmap_populate_basepages(unsigned long start, unsigned long end, 2750 int node); 2751 int vmemmap_populate(unsigned long start, unsigned long end, int node, 2752 struct vmem_altmap *altmap); 2753 void vmemmap_populate_print_last(void); 2754 #ifdef CONFIG_MEMORY_HOTPLUG 2755 void vmemmap_free(unsigned long start, unsigned long end, 2756 struct vmem_altmap *altmap); 2757 #endif 2758 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, 2759 unsigned long nr_pages); 2760 2761 enum mf_flags { 2762 MF_COUNT_INCREASED = 1 << 0, 2763 MF_ACTION_REQUIRED = 1 << 1, 2764 MF_MUST_KILL = 1 << 2, 2765 MF_SOFT_OFFLINE = 1 << 3, 2766 }; 2767 extern int memory_failure(unsigned long pfn, int flags); 2768 extern void memory_failure_queue(unsigned long pfn, int flags); 2769 extern int unpoison_memory(unsigned long pfn); 2770 extern int get_hwpoison_page(struct page *page); 2771 #define put_hwpoison_page(page) put_page(page) 2772 extern int sysctl_memory_failure_early_kill; 2773 extern int sysctl_memory_failure_recovery; 2774 extern void shake_page(struct page *p, int access); 2775 extern atomic_long_t num_poisoned_pages __read_mostly; 2776 extern int soft_offline_page(unsigned long pfn, int flags); 2777 2778 2779 /* 2780 * Error handlers for various types of pages. 2781 */ 2782 enum mf_result { 2783 MF_IGNORED, /* Error: cannot be handled */ 2784 MF_FAILED, /* Error: handling failed */ 2785 MF_DELAYED, /* Will be handled later */ 2786 MF_RECOVERED, /* Successfully recovered */ 2787 }; 2788 2789 enum mf_action_page_type { 2790 MF_MSG_KERNEL, 2791 MF_MSG_KERNEL_HIGH_ORDER, 2792 MF_MSG_SLAB, 2793 MF_MSG_DIFFERENT_COMPOUND, 2794 MF_MSG_POISONED_HUGE, 2795 MF_MSG_HUGE, 2796 MF_MSG_FREE_HUGE, 2797 MF_MSG_NON_PMD_HUGE, 2798 MF_MSG_UNMAP_FAILED, 2799 MF_MSG_DIRTY_SWAPCACHE, 2800 MF_MSG_CLEAN_SWAPCACHE, 2801 MF_MSG_DIRTY_MLOCKED_LRU, 2802 MF_MSG_CLEAN_MLOCKED_LRU, 2803 MF_MSG_DIRTY_UNEVICTABLE_LRU, 2804 MF_MSG_CLEAN_UNEVICTABLE_LRU, 2805 MF_MSG_DIRTY_LRU, 2806 MF_MSG_CLEAN_LRU, 2807 MF_MSG_TRUNCATED_LRU, 2808 MF_MSG_BUDDY, 2809 MF_MSG_BUDDY_2ND, 2810 MF_MSG_DAX, 2811 MF_MSG_UNKNOWN, 2812 }; 2813 2814 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 2815 extern void clear_huge_page(struct page *page, 2816 unsigned long addr_hint, 2817 unsigned int pages_per_huge_page); 2818 extern void copy_user_huge_page(struct page *dst, struct page *src, 2819 unsigned long addr_hint, 2820 struct vm_area_struct *vma, 2821 unsigned int pages_per_huge_page); 2822 extern long copy_huge_page_from_user(struct page *dst_page, 2823 const void __user *usr_src, 2824 unsigned int pages_per_huge_page, 2825 bool allow_pagefault); 2826 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 2827 2828 #ifdef CONFIG_DEBUG_PAGEALLOC 2829 extern unsigned int _debug_guardpage_minorder; 2830 DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); 2831 2832 static inline unsigned int debug_guardpage_minorder(void) 2833 { 2834 return _debug_guardpage_minorder; 2835 } 2836 2837 static inline bool debug_guardpage_enabled(void) 2838 { 2839 return static_branch_unlikely(&_debug_guardpage_enabled); 2840 } 2841 2842 static inline bool page_is_guard(struct page *page) 2843 { 2844 if (!debug_guardpage_enabled()) 2845 return false; 2846 2847 return PageGuard(page); 2848 } 2849 #else 2850 static inline unsigned int debug_guardpage_minorder(void) { return 0; } 2851 static inline bool debug_guardpage_enabled(void) { return false; } 2852 static inline bool page_is_guard(struct page *page) { return false; } 2853 #endif /* CONFIG_DEBUG_PAGEALLOC */ 2854 2855 #if MAX_NUMNODES > 1 2856 void __init setup_nr_node_ids(void); 2857 #else 2858 static inline void setup_nr_node_ids(void) {} 2859 #endif 2860 2861 extern int memcmp_pages(struct page *page1, struct page *page2); 2862 2863 static inline int pages_identical(struct page *page1, struct page *page2) 2864 { 2865 return !memcmp_pages(page1, page2); 2866 } 2867 2868 #ifdef CONFIG_MAPPING_DIRTY_HELPERS 2869 unsigned long clean_record_shared_mapping_range(struct address_space *mapping, 2870 pgoff_t first_index, pgoff_t nr, 2871 pgoff_t bitmap_pgoff, 2872 unsigned long *bitmap, 2873 pgoff_t *start, 2874 pgoff_t *end); 2875 2876 unsigned long wp_shared_mapping_range(struct address_space *mapping, 2877 pgoff_t first_index, pgoff_t nr); 2878 #endif 2879 2880 #endif /* __KERNEL__ */ 2881 #endif /* _LINUX_MM_H */ 2882