1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_MM_H 3 #define _LINUX_MM_H 4 5 #include <linux/errno.h> 6 #include <linux/mmdebug.h> 7 #include <linux/gfp.h> 8 #include <linux/bug.h> 9 #include <linux/list.h> 10 #include <linux/mmzone.h> 11 #include <linux/rbtree.h> 12 #include <linux/atomic.h> 13 #include <linux/debug_locks.h> 14 #include <linux/mm_types.h> 15 #include <linux/mmap_lock.h> 16 #include <linux/range.h> 17 #include <linux/pfn.h> 18 #include <linux/percpu-refcount.h> 19 #include <linux/bit_spinlock.h> 20 #include <linux/shrinker.h> 21 #include <linux/resource.h> 22 #include <linux/page_ext.h> 23 #include <linux/err.h> 24 #include <linux/page-flags.h> 25 #include <linux/page_ref.h> 26 #include <linux/overflow.h> 27 #include <linux/sizes.h> 28 #include <linux/sched.h> 29 #include <linux/pgtable.h> 30 #include <linux/kasan.h> 31 #include <linux/memremap.h> 32 33 struct mempolicy; 34 struct anon_vma; 35 struct anon_vma_chain; 36 struct user_struct; 37 struct pt_regs; 38 39 extern int sysctl_page_lock_unfairness; 40 41 void init_mm_internals(void); 42 43 #ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */ 44 extern unsigned long max_mapnr; 45 46 static inline void set_max_mapnr(unsigned long limit) 47 { 48 max_mapnr = limit; 49 } 50 #else 51 static inline void set_max_mapnr(unsigned long limit) { } 52 #endif 53 54 extern atomic_long_t _totalram_pages; 55 static inline unsigned long totalram_pages(void) 56 { 57 return (unsigned long)atomic_long_read(&_totalram_pages); 58 } 59 60 static inline void totalram_pages_inc(void) 61 { 62 atomic_long_inc(&_totalram_pages); 63 } 64 65 static inline void totalram_pages_dec(void) 66 { 67 atomic_long_dec(&_totalram_pages); 68 } 69 70 static inline void totalram_pages_add(long count) 71 { 72 atomic_long_add(count, &_totalram_pages); 73 } 74 75 extern void * high_memory; 76 extern int page_cluster; 77 extern const int page_cluster_max; 78 79 #ifdef CONFIG_SYSCTL 80 extern int sysctl_legacy_va_layout; 81 #else 82 #define sysctl_legacy_va_layout 0 83 #endif 84 85 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 86 extern const int mmap_rnd_bits_min; 87 extern const int mmap_rnd_bits_max; 88 extern int mmap_rnd_bits __read_mostly; 89 #endif 90 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 91 extern const int mmap_rnd_compat_bits_min; 92 extern const int mmap_rnd_compat_bits_max; 93 extern int mmap_rnd_compat_bits __read_mostly; 94 #endif 95 96 #include <asm/page.h> 97 #include <asm/processor.h> 98 99 /* 100 * Architectures that support memory tagging (assigning tags to memory regions, 101 * embedding these tags into addresses that point to these memory regions, and 102 * checking that the memory and the pointer tags match on memory accesses) 103 * redefine this macro to strip tags from pointers. 104 * It's defined as noop for architectures that don't support memory tagging. 105 */ 106 #ifndef untagged_addr 107 #define untagged_addr(addr) (addr) 108 #endif 109 110 #ifndef __pa_symbol 111 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 112 #endif 113 114 #ifndef page_to_virt 115 #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) 116 #endif 117 118 #ifndef lm_alias 119 #define lm_alias(x) __va(__pa_symbol(x)) 120 #endif 121 122 /* 123 * To prevent common memory management code establishing 124 * a zero page mapping on a read fault. 125 * This macro should be defined within <asm/pgtable.h>. 126 * s390 does this to prevent multiplexing of hardware bits 127 * related to the physical page in case of virtualization. 128 */ 129 #ifndef mm_forbids_zeropage 130 #define mm_forbids_zeropage(X) (0) 131 #endif 132 133 /* 134 * On some architectures it is expensive to call memset() for small sizes. 135 * If an architecture decides to implement their own version of 136 * mm_zero_struct_page they should wrap the defines below in a #ifndef and 137 * define their own version of this macro in <asm/pgtable.h> 138 */ 139 #if BITS_PER_LONG == 64 140 /* This function must be updated when the size of struct page grows above 96 141 * or reduces below 56. The idea that compiler optimizes out switch() 142 * statement, and only leaves move/store instructions. Also the compiler can 143 * combine write statements if they are both assignments and can be reordered, 144 * this can result in several of the writes here being dropped. 145 */ 146 #define mm_zero_struct_page(pp) __mm_zero_struct_page(pp) 147 static inline void __mm_zero_struct_page(struct page *page) 148 { 149 unsigned long *_pp = (void *)page; 150 151 /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */ 152 BUILD_BUG_ON(sizeof(struct page) & 7); 153 BUILD_BUG_ON(sizeof(struct page) < 56); 154 BUILD_BUG_ON(sizeof(struct page) > 96); 155 156 switch (sizeof(struct page)) { 157 case 96: 158 _pp[11] = 0; 159 fallthrough; 160 case 88: 161 _pp[10] = 0; 162 fallthrough; 163 case 80: 164 _pp[9] = 0; 165 fallthrough; 166 case 72: 167 _pp[8] = 0; 168 fallthrough; 169 case 64: 170 _pp[7] = 0; 171 fallthrough; 172 case 56: 173 _pp[6] = 0; 174 _pp[5] = 0; 175 _pp[4] = 0; 176 _pp[3] = 0; 177 _pp[2] = 0; 178 _pp[1] = 0; 179 _pp[0] = 0; 180 } 181 } 182 #else 183 #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) 184 #endif 185 186 /* 187 * Default maximum number of active map areas, this limits the number of vmas 188 * per mm struct. Users can overwrite this number by sysctl but there is a 189 * problem. 190 * 191 * When a program's coredump is generated as ELF format, a section is created 192 * per a vma. In ELF, the number of sections is represented in unsigned short. 193 * This means the number of sections should be smaller than 65535 at coredump. 194 * Because the kernel adds some informative sections to a image of program at 195 * generating coredump, we need some margin. The number of extra sections is 196 * 1-3 now and depends on arch. We use "5" as safe margin, here. 197 * 198 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is 199 * not a hard limit any more. Although some userspace tools can be surprised by 200 * that. 201 */ 202 #define MAPCOUNT_ELF_CORE_MARGIN (5) 203 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 204 205 extern int sysctl_max_map_count; 206 207 extern unsigned long sysctl_user_reserve_kbytes; 208 extern unsigned long sysctl_admin_reserve_kbytes; 209 210 extern int sysctl_overcommit_memory; 211 extern int sysctl_overcommit_ratio; 212 extern unsigned long sysctl_overcommit_kbytes; 213 214 int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *, 215 loff_t *); 216 int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, 217 loff_t *); 218 int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, 219 loff_t *); 220 221 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 222 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 223 #define folio_page_idx(folio, p) (page_to_pfn(p) - folio_pfn(folio)) 224 #else 225 #define nth_page(page,n) ((page) + (n)) 226 #define folio_page_idx(folio, p) ((p) - &(folio)->page) 227 #endif 228 229 /* to align the pointer to the (next) page boundary */ 230 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) 231 232 /* to align the pointer to the (prev) page boundary */ 233 #define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE) 234 235 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ 236 #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) 237 238 #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) 239 static inline struct folio *lru_to_folio(struct list_head *head) 240 { 241 return list_entry((head)->prev, struct folio, lru); 242 } 243 244 void setup_initial_init_mm(void *start_code, void *end_code, 245 void *end_data, void *brk); 246 247 /* 248 * Linux kernel virtual memory manager primitives. 249 * The idea being to have a "virtual" mm in the same way 250 * we have a virtual fs - giving a cleaner interface to the 251 * mm details, and allowing different kinds of memory mappings 252 * (from shared memory to executable loading to arbitrary 253 * mmap() functions). 254 */ 255 256 struct vm_area_struct *vm_area_alloc(struct mm_struct *); 257 struct vm_area_struct *vm_area_dup(struct vm_area_struct *); 258 void vm_area_free(struct vm_area_struct *); 259 260 #ifndef CONFIG_MMU 261 extern struct rb_root nommu_region_tree; 262 extern struct rw_semaphore nommu_region_sem; 263 264 extern unsigned int kobjsize(const void *objp); 265 #endif 266 267 /* 268 * vm_flags in vm_area_struct, see mm_types.h. 269 * When changing, update also include/trace/events/mmflags.h 270 */ 271 #define VM_NONE 0x00000000 272 273 #define VM_READ 0x00000001 /* currently active flags */ 274 #define VM_WRITE 0x00000002 275 #define VM_EXEC 0x00000004 276 #define VM_SHARED 0x00000008 277 278 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 279 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ 280 #define VM_MAYWRITE 0x00000020 281 #define VM_MAYEXEC 0x00000040 282 #define VM_MAYSHARE 0x00000080 283 284 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 285 #ifdef CONFIG_MMU 286 #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ 287 #else /* CONFIG_MMU */ 288 #define VM_MAYOVERLAY 0x00000200 /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */ 289 #define VM_UFFD_MISSING 0 290 #endif /* CONFIG_MMU */ 291 #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 292 #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ 293 294 #define VM_LOCKED 0x00002000 295 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 296 297 /* Used by sys_madvise() */ 298 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 299 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 300 301 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 302 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 303 #define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ 304 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 305 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 306 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 307 #define VM_SYNC 0x00800000 /* Synchronous page faults */ 308 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 309 #define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */ 310 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 311 312 #ifdef CONFIG_MEM_SOFT_DIRTY 313 # define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ 314 #else 315 # define VM_SOFTDIRTY 0 316 #endif 317 318 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 319 #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ 320 #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ 321 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ 322 323 #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS 324 #define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ 325 #define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */ 326 #define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */ 327 #define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ 328 #define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ 329 #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) 330 #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) 331 #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) 332 #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) 333 #define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) 334 #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ 335 336 #ifdef CONFIG_ARCH_HAS_PKEYS 337 # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 338 # define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ 339 # define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */ 340 # define VM_PKEY_BIT2 VM_HIGH_ARCH_2 341 # define VM_PKEY_BIT3 VM_HIGH_ARCH_3 342 #ifdef CONFIG_PPC 343 # define VM_PKEY_BIT4 VM_HIGH_ARCH_4 344 #else 345 # define VM_PKEY_BIT4 0 346 #endif 347 #endif /* CONFIG_ARCH_HAS_PKEYS */ 348 349 #if defined(CONFIG_X86) 350 # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ 351 #elif defined(CONFIG_PPC) 352 # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 353 #elif defined(CONFIG_PARISC) 354 # define VM_GROWSUP VM_ARCH_1 355 #elif defined(CONFIG_IA64) 356 # define VM_GROWSUP VM_ARCH_1 357 #elif defined(CONFIG_SPARC64) 358 # define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ 359 # define VM_ARCH_CLEAR VM_SPARC_ADI 360 #elif defined(CONFIG_ARM64) 361 # define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */ 362 # define VM_ARCH_CLEAR VM_ARM64_BTI 363 #elif !defined(CONFIG_MMU) 364 # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ 365 #endif 366 367 #if defined(CONFIG_ARM64_MTE) 368 # define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */ 369 # define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */ 370 #else 371 # define VM_MTE VM_NONE 372 # define VM_MTE_ALLOWED VM_NONE 373 #endif 374 375 #ifndef VM_GROWSUP 376 # define VM_GROWSUP VM_NONE 377 #endif 378 379 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 380 # define VM_UFFD_MINOR_BIT 37 381 # define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */ 382 #else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ 383 # define VM_UFFD_MINOR VM_NONE 384 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ 385 386 /* Bits set in the VMA until the stack is in its final location */ 387 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) 388 389 #define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) 390 391 /* Common data flag combinations */ 392 #define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ 393 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 394 #define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \ 395 VM_MAYWRITE | VM_MAYEXEC) 396 #define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \ 397 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 398 399 #ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */ 400 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC 401 #endif 402 403 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 404 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 405 #endif 406 407 #ifdef CONFIG_STACK_GROWSUP 408 #define VM_STACK VM_GROWSUP 409 #else 410 #define VM_STACK VM_GROWSDOWN 411 #endif 412 413 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 414 415 /* VMA basic access permission flags */ 416 #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) 417 418 419 /* 420 * Special vmas that are non-mergable, non-mlock()able. 421 */ 422 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 423 424 /* This mask prevents VMA from being scanned with khugepaged */ 425 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) 426 427 /* This mask defines which mm->def_flags a process can inherit its parent */ 428 #define VM_INIT_DEF_MASK VM_NOHUGEPAGE 429 430 /* This mask represents all the VMA flag bits used by mlock */ 431 #define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) 432 433 /* Arch-specific flags to clear when updating VM flags on protection change */ 434 #ifndef VM_ARCH_CLEAR 435 # define VM_ARCH_CLEAR VM_NONE 436 #endif 437 #define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR) 438 439 /* 440 * mapping from the currently active vm_flags protection bits (the 441 * low four bits) to a page protection mask.. 442 */ 443 444 /* 445 * The default fault flags that should be used by most of the 446 * arch-specific page fault handlers. 447 */ 448 #define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \ 449 FAULT_FLAG_KILLABLE | \ 450 FAULT_FLAG_INTERRUPTIBLE) 451 452 /** 453 * fault_flag_allow_retry_first - check ALLOW_RETRY the first time 454 * @flags: Fault flags. 455 * 456 * This is mostly used for places where we want to try to avoid taking 457 * the mmap_lock for too long a time when waiting for another condition 458 * to change, in which case we can try to be polite to release the 459 * mmap_lock in the first round to avoid potential starvation of other 460 * processes that would also want the mmap_lock. 461 * 462 * Return: true if the page fault allows retry and this is the first 463 * attempt of the fault handling; false otherwise. 464 */ 465 static inline bool fault_flag_allow_retry_first(enum fault_flag flags) 466 { 467 return (flags & FAULT_FLAG_ALLOW_RETRY) && 468 (!(flags & FAULT_FLAG_TRIED)); 469 } 470 471 #define FAULT_FLAG_TRACE \ 472 { FAULT_FLAG_WRITE, "WRITE" }, \ 473 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \ 474 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \ 475 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \ 476 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \ 477 { FAULT_FLAG_TRIED, "TRIED" }, \ 478 { FAULT_FLAG_USER, "USER" }, \ 479 { FAULT_FLAG_REMOTE, "REMOTE" }, \ 480 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \ 481 { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" } 482 483 /* 484 * vm_fault is filled by the pagefault handler and passed to the vma's 485 * ->fault function. The vma's ->fault is responsible for returning a bitmask 486 * of VM_FAULT_xxx flags that give details about how the fault was handled. 487 * 488 * MM layer fills up gfp_mask for page allocations but fault handler might 489 * alter it if its implementation requires a different allocation context. 490 * 491 * pgoff should be used in favour of virtual_address, if possible. 492 */ 493 struct vm_fault { 494 const struct { 495 struct vm_area_struct *vma; /* Target VMA */ 496 gfp_t gfp_mask; /* gfp mask to be used for allocations */ 497 pgoff_t pgoff; /* Logical page offset based on vma */ 498 unsigned long address; /* Faulting virtual address - masked */ 499 unsigned long real_address; /* Faulting virtual address - unmasked */ 500 }; 501 enum fault_flag flags; /* FAULT_FLAG_xxx flags 502 * XXX: should really be 'const' */ 503 pmd_t *pmd; /* Pointer to pmd entry matching 504 * the 'address' */ 505 pud_t *pud; /* Pointer to pud entry matching 506 * the 'address' 507 */ 508 union { 509 pte_t orig_pte; /* Value of PTE at the time of fault */ 510 pmd_t orig_pmd; /* Value of PMD at the time of fault, 511 * used by PMD fault only. 512 */ 513 }; 514 515 struct page *cow_page; /* Page handler may use for COW fault */ 516 struct page *page; /* ->fault handlers should return a 517 * page here, unless VM_FAULT_NOPAGE 518 * is set (which is also implied by 519 * VM_FAULT_ERROR). 520 */ 521 /* These three entries are valid only while holding ptl lock */ 522 pte_t *pte; /* Pointer to pte entry matching 523 * the 'address'. NULL if the page 524 * table hasn't been allocated. 525 */ 526 spinlock_t *ptl; /* Page table lock. 527 * Protects pte page table if 'pte' 528 * is not NULL, otherwise pmd. 529 */ 530 pgtable_t prealloc_pte; /* Pre-allocated pte page table. 531 * vm_ops->map_pages() sets up a page 532 * table from atomic context. 533 * do_fault_around() pre-allocates 534 * page table to avoid allocation from 535 * atomic context. 536 */ 537 }; 538 539 /* page entry size for vm->huge_fault() */ 540 enum page_entry_size { 541 PE_SIZE_PTE = 0, 542 PE_SIZE_PMD, 543 PE_SIZE_PUD, 544 }; 545 546 /* 547 * These are the virtual MM functions - opening of an area, closing and 548 * unmapping it (needed to keep files on disk up-to-date etc), pointer 549 * to the functions called when a no-page or a wp-page exception occurs. 550 */ 551 struct vm_operations_struct { 552 void (*open)(struct vm_area_struct * area); 553 /** 554 * @close: Called when the VMA is being removed from the MM. 555 * Context: User context. May sleep. Caller holds mmap_lock. 556 */ 557 void (*close)(struct vm_area_struct * area); 558 /* Called any time before splitting to check if it's allowed */ 559 int (*may_split)(struct vm_area_struct *area, unsigned long addr); 560 int (*mremap)(struct vm_area_struct *area); 561 /* 562 * Called by mprotect() to make driver-specific permission 563 * checks before mprotect() is finalised. The VMA must not 564 * be modified. Returns 0 if mprotect() can proceed. 565 */ 566 int (*mprotect)(struct vm_area_struct *vma, unsigned long start, 567 unsigned long end, unsigned long newflags); 568 vm_fault_t (*fault)(struct vm_fault *vmf); 569 vm_fault_t (*huge_fault)(struct vm_fault *vmf, 570 enum page_entry_size pe_size); 571 vm_fault_t (*map_pages)(struct vm_fault *vmf, 572 pgoff_t start_pgoff, pgoff_t end_pgoff); 573 unsigned long (*pagesize)(struct vm_area_struct * area); 574 575 /* notification that a previously read-only page is about to become 576 * writable, if an error is returned it will cause a SIGBUS */ 577 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); 578 579 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 580 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); 581 582 /* called by access_process_vm when get_user_pages() fails, typically 583 * for use by special VMAs. See also generic_access_phys() for a generic 584 * implementation useful for any iomem mapping. 585 */ 586 int (*access)(struct vm_area_struct *vma, unsigned long addr, 587 void *buf, int len, int write); 588 589 /* Called by the /proc/PID/maps code to ask the vma whether it 590 * has a special name. Returning non-NULL will also cause this 591 * vma to be dumped unconditionally. */ 592 const char *(*name)(struct vm_area_struct *vma); 593 594 #ifdef CONFIG_NUMA 595 /* 596 * set_policy() op must add a reference to any non-NULL @new mempolicy 597 * to hold the policy upon return. Caller should pass NULL @new to 598 * remove a policy and fall back to surrounding context--i.e. do not 599 * install a MPOL_DEFAULT policy, nor the task or system default 600 * mempolicy. 601 */ 602 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 603 604 /* 605 * get_policy() op must add reference [mpol_get()] to any policy at 606 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 607 * in mm/mempolicy.c will do this automatically. 608 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 609 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. 610 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 611 * must return NULL--i.e., do not "fallback" to task or system default 612 * policy. 613 */ 614 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 615 unsigned long addr); 616 #endif 617 /* 618 * Called by vm_normal_page() for special PTEs to find the 619 * page for @addr. This is useful if the default behavior 620 * (using pte_page()) would not find the correct page. 621 */ 622 struct page *(*find_special_page)(struct vm_area_struct *vma, 623 unsigned long addr); 624 }; 625 626 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) 627 { 628 static const struct vm_operations_struct dummy_vm_ops = {}; 629 630 memset(vma, 0, sizeof(*vma)); 631 vma->vm_mm = mm; 632 vma->vm_ops = &dummy_vm_ops; 633 INIT_LIST_HEAD(&vma->anon_vma_chain); 634 } 635 636 /* Use when VMA is not part of the VMA tree and needs no locking */ 637 static inline void vm_flags_init(struct vm_area_struct *vma, 638 vm_flags_t flags) 639 { 640 ACCESS_PRIVATE(vma, __vm_flags) = flags; 641 } 642 643 /* Use when VMA is part of the VMA tree and modifications need coordination */ 644 static inline void vm_flags_reset(struct vm_area_struct *vma, 645 vm_flags_t flags) 646 { 647 mmap_assert_write_locked(vma->vm_mm); 648 vm_flags_init(vma, flags); 649 } 650 651 static inline void vm_flags_reset_once(struct vm_area_struct *vma, 652 vm_flags_t flags) 653 { 654 mmap_assert_write_locked(vma->vm_mm); 655 WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags); 656 } 657 658 static inline void vm_flags_set(struct vm_area_struct *vma, 659 vm_flags_t flags) 660 { 661 mmap_assert_write_locked(vma->vm_mm); 662 ACCESS_PRIVATE(vma, __vm_flags) |= flags; 663 } 664 665 static inline void vm_flags_clear(struct vm_area_struct *vma, 666 vm_flags_t flags) 667 { 668 mmap_assert_write_locked(vma->vm_mm); 669 ACCESS_PRIVATE(vma, __vm_flags) &= ~flags; 670 } 671 672 /* 673 * Use only if VMA is not part of the VMA tree or has no other users and 674 * therefore needs no locking. 675 */ 676 static inline void __vm_flags_mod(struct vm_area_struct *vma, 677 vm_flags_t set, vm_flags_t clear) 678 { 679 vm_flags_init(vma, (vma->vm_flags | set) & ~clear); 680 } 681 682 /* 683 * Use only when the order of set/clear operations is unimportant, otherwise 684 * use vm_flags_{set|clear} explicitly. 685 */ 686 static inline void vm_flags_mod(struct vm_area_struct *vma, 687 vm_flags_t set, vm_flags_t clear) 688 { 689 mmap_assert_write_locked(vma->vm_mm); 690 __vm_flags_mod(vma, set, clear); 691 } 692 693 static inline void vma_set_anonymous(struct vm_area_struct *vma) 694 { 695 vma->vm_ops = NULL; 696 } 697 698 static inline bool vma_is_anonymous(struct vm_area_struct *vma) 699 { 700 return !vma->vm_ops; 701 } 702 703 static inline bool vma_is_temporary_stack(struct vm_area_struct *vma) 704 { 705 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 706 707 if (!maybe_stack) 708 return false; 709 710 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 711 VM_STACK_INCOMPLETE_SETUP) 712 return true; 713 714 return false; 715 } 716 717 static inline bool vma_is_foreign(struct vm_area_struct *vma) 718 { 719 if (!current->mm) 720 return true; 721 722 if (current->mm != vma->vm_mm) 723 return true; 724 725 return false; 726 } 727 728 static inline bool vma_is_accessible(struct vm_area_struct *vma) 729 { 730 return vma->vm_flags & VM_ACCESS_FLAGS; 731 } 732 733 static inline 734 struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max) 735 { 736 return mas_find(&vmi->mas, max - 1); 737 } 738 739 static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) 740 { 741 /* 742 * Uses mas_find() to get the first VMA when the iterator starts. 743 * Calling mas_next() could skip the first entry. 744 */ 745 return mas_find(&vmi->mas, ULONG_MAX); 746 } 747 748 static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi) 749 { 750 return mas_prev(&vmi->mas, 0); 751 } 752 753 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) 754 { 755 return vmi->mas.index; 756 } 757 758 static inline unsigned long vma_iter_end(struct vma_iterator *vmi) 759 { 760 return vmi->mas.last + 1; 761 } 762 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi, 763 unsigned long count) 764 { 765 return mas_expected_entries(&vmi->mas, count); 766 } 767 768 /* Free any unused preallocations */ 769 static inline void vma_iter_free(struct vma_iterator *vmi) 770 { 771 mas_destroy(&vmi->mas); 772 } 773 774 static inline int vma_iter_bulk_store(struct vma_iterator *vmi, 775 struct vm_area_struct *vma) 776 { 777 vmi->mas.index = vma->vm_start; 778 vmi->mas.last = vma->vm_end - 1; 779 mas_store(&vmi->mas, vma); 780 if (unlikely(mas_is_err(&vmi->mas))) 781 return -ENOMEM; 782 783 return 0; 784 } 785 786 static inline void vma_iter_invalidate(struct vma_iterator *vmi) 787 { 788 mas_pause(&vmi->mas); 789 } 790 791 static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr) 792 { 793 mas_set(&vmi->mas, addr); 794 } 795 796 #define for_each_vma(__vmi, __vma) \ 797 while (((__vma) = vma_next(&(__vmi))) != NULL) 798 799 /* The MM code likes to work with exclusive end addresses */ 800 #define for_each_vma_range(__vmi, __vma, __end) \ 801 while (((__vma) = vma_find(&(__vmi), (__end))) != NULL) 802 803 #ifdef CONFIG_SHMEM 804 /* 805 * The vma_is_shmem is not inline because it is used only by slow 806 * paths in userfault. 807 */ 808 bool vma_is_shmem(struct vm_area_struct *vma); 809 bool vma_is_anon_shmem(struct vm_area_struct *vma); 810 #else 811 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } 812 static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; } 813 #endif 814 815 int vma_is_stack_for_current(struct vm_area_struct *vma); 816 817 /* flush_tlb_range() takes a vma, not a mm, and can care about flags */ 818 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } 819 820 struct mmu_gather; 821 struct inode; 822 823 /* 824 * compound_order() can be called without holding a reference, which means 825 * that niceties like page_folio() don't work. These callers should be 826 * prepared to handle wild return values. For example, PG_head may be 827 * set before _folio_order is initialised, or this may be a tail page. 828 * See compaction.c for some good examples. 829 */ 830 static inline unsigned int compound_order(struct page *page) 831 { 832 struct folio *folio = (struct folio *)page; 833 834 if (!test_bit(PG_head, &folio->flags)) 835 return 0; 836 return folio->_folio_order; 837 } 838 839 /** 840 * folio_order - The allocation order of a folio. 841 * @folio: The folio. 842 * 843 * A folio is composed of 2^order pages. See get_order() for the definition 844 * of order. 845 * 846 * Return: The order of the folio. 847 */ 848 static inline unsigned int folio_order(struct folio *folio) 849 { 850 if (!folio_test_large(folio)) 851 return 0; 852 return folio->_folio_order; 853 } 854 855 #include <linux/huge_mm.h> 856 857 /* 858 * Methods to modify the page usage count. 859 * 860 * What counts for a page usage: 861 * - cache mapping (page->mapping) 862 * - private data (page->private) 863 * - page mapped in a task's page tables, each mapping 864 * is counted separately 865 * 866 * Also, many kernel routines increase the page count before a critical 867 * routine so they can be sure the page doesn't go away from under them. 868 */ 869 870 /* 871 * Drop a ref, return true if the refcount fell to zero (the page has no users) 872 */ 873 static inline int put_page_testzero(struct page *page) 874 { 875 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 876 return page_ref_dec_and_test(page); 877 } 878 879 static inline int folio_put_testzero(struct folio *folio) 880 { 881 return put_page_testzero(&folio->page); 882 } 883 884 /* 885 * Try to grab a ref unless the page has a refcount of zero, return false if 886 * that is the case. 887 * This can be called when MMU is off so it must not access 888 * any of the virtual mappings. 889 */ 890 static inline bool get_page_unless_zero(struct page *page) 891 { 892 return page_ref_add_unless(page, 1, 0); 893 } 894 895 static inline struct folio *folio_get_nontail_page(struct page *page) 896 { 897 if (unlikely(!get_page_unless_zero(page))) 898 return NULL; 899 return (struct folio *)page; 900 } 901 902 extern int page_is_ram(unsigned long pfn); 903 904 enum { 905 REGION_INTERSECTS, 906 REGION_DISJOINT, 907 REGION_MIXED, 908 }; 909 910 int region_intersects(resource_size_t offset, size_t size, unsigned long flags, 911 unsigned long desc); 912 913 /* Support for virtually mapped pages */ 914 struct page *vmalloc_to_page(const void *addr); 915 unsigned long vmalloc_to_pfn(const void *addr); 916 917 /* 918 * Determine if an address is within the vmalloc range 919 * 920 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there 921 * is no special casing required. 922 */ 923 924 #ifndef is_ioremap_addr 925 #define is_ioremap_addr(x) is_vmalloc_addr(x) 926 #endif 927 928 #ifdef CONFIG_MMU 929 extern bool is_vmalloc_addr(const void *x); 930 extern int is_vmalloc_or_module_addr(const void *x); 931 #else 932 static inline bool is_vmalloc_addr(const void *x) 933 { 934 return false; 935 } 936 static inline int is_vmalloc_or_module_addr(const void *x) 937 { 938 return 0; 939 } 940 #endif 941 942 /* 943 * How many times the entire folio is mapped as a single unit (eg by a 944 * PMD or PUD entry). This is probably not what you want, except for 945 * debugging purposes - it does not include PTE-mapped sub-pages; look 946 * at folio_mapcount() or page_mapcount() or total_mapcount() instead. 947 */ 948 static inline int folio_entire_mapcount(struct folio *folio) 949 { 950 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); 951 return atomic_read(&folio->_entire_mapcount) + 1; 952 } 953 954 /* 955 * The atomic page->_mapcount, starts from -1: so that transitions 956 * both from it and to it can be tracked, using atomic_inc_and_test 957 * and atomic_add_negative(-1). 958 */ 959 static inline void page_mapcount_reset(struct page *page) 960 { 961 atomic_set(&(page)->_mapcount, -1); 962 } 963 964 /** 965 * page_mapcount() - Number of times this precise page is mapped. 966 * @page: The page. 967 * 968 * The number of times this page is mapped. If this page is part of 969 * a large folio, it includes the number of times this page is mapped 970 * as part of that folio. 971 * 972 * The result is undefined for pages which cannot be mapped into userspace. 973 * For example SLAB or special types of pages. See function page_has_type(). 974 * They use this field in struct page differently. 975 */ 976 static inline int page_mapcount(struct page *page) 977 { 978 int mapcount = atomic_read(&page->_mapcount) + 1; 979 980 if (unlikely(PageCompound(page))) 981 mapcount += folio_entire_mapcount(page_folio(page)); 982 983 return mapcount; 984 } 985 986 int folio_total_mapcount(struct folio *folio); 987 988 /** 989 * folio_mapcount() - Calculate the number of mappings of this folio. 990 * @folio: The folio. 991 * 992 * A large folio tracks both how many times the entire folio is mapped, 993 * and how many times each individual page in the folio is mapped. 994 * This function calculates the total number of times the folio is 995 * mapped. 996 * 997 * Return: The number of times this folio is mapped. 998 */ 999 static inline int folio_mapcount(struct folio *folio) 1000 { 1001 if (likely(!folio_test_large(folio))) 1002 return atomic_read(&folio->_mapcount) + 1; 1003 return folio_total_mapcount(folio); 1004 } 1005 1006 static inline int total_mapcount(struct page *page) 1007 { 1008 if (likely(!PageCompound(page))) 1009 return atomic_read(&page->_mapcount) + 1; 1010 return folio_total_mapcount(page_folio(page)); 1011 } 1012 1013 static inline bool folio_large_is_mapped(struct folio *folio) 1014 { 1015 /* 1016 * Reading _entire_mapcount below could be omitted if hugetlb 1017 * participated in incrementing nr_pages_mapped when compound mapped. 1018 */ 1019 return atomic_read(&folio->_nr_pages_mapped) > 0 || 1020 atomic_read(&folio->_entire_mapcount) >= 0; 1021 } 1022 1023 /** 1024 * folio_mapped - Is this folio mapped into userspace? 1025 * @folio: The folio. 1026 * 1027 * Return: True if any page in this folio is referenced by user page tables. 1028 */ 1029 static inline bool folio_mapped(struct folio *folio) 1030 { 1031 if (likely(!folio_test_large(folio))) 1032 return atomic_read(&folio->_mapcount) >= 0; 1033 return folio_large_is_mapped(folio); 1034 } 1035 1036 /* 1037 * Return true if this page is mapped into pagetables. 1038 * For compound page it returns true if any sub-page of compound page is mapped, 1039 * even if this particular sub-page is not itself mapped by any PTE or PMD. 1040 */ 1041 static inline bool page_mapped(struct page *page) 1042 { 1043 if (likely(!PageCompound(page))) 1044 return atomic_read(&page->_mapcount) >= 0; 1045 return folio_large_is_mapped(page_folio(page)); 1046 } 1047 1048 static inline struct page *virt_to_head_page(const void *x) 1049 { 1050 struct page *page = virt_to_page(x); 1051 1052 return compound_head(page); 1053 } 1054 1055 static inline struct folio *virt_to_folio(const void *x) 1056 { 1057 struct page *page = virt_to_page(x); 1058 1059 return page_folio(page); 1060 } 1061 1062 void __folio_put(struct folio *folio); 1063 1064 void put_pages_list(struct list_head *pages); 1065 1066 void split_page(struct page *page, unsigned int order); 1067 void folio_copy(struct folio *dst, struct folio *src); 1068 1069 unsigned long nr_free_buffer_pages(void); 1070 1071 /* 1072 * Compound pages have a destructor function. Provide a 1073 * prototype for that function and accessor functions. 1074 * These are _only_ valid on the head of a compound page. 1075 */ 1076 typedef void compound_page_dtor(struct page *); 1077 1078 /* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ 1079 enum compound_dtor_id { 1080 NULL_COMPOUND_DTOR, 1081 COMPOUND_PAGE_DTOR, 1082 #ifdef CONFIG_HUGETLB_PAGE 1083 HUGETLB_PAGE_DTOR, 1084 #endif 1085 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1086 TRANSHUGE_PAGE_DTOR, 1087 #endif 1088 NR_COMPOUND_DTORS, 1089 }; 1090 extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS]; 1091 1092 static inline void set_compound_page_dtor(struct page *page, 1093 enum compound_dtor_id compound_dtor) 1094 { 1095 struct folio *folio = (struct folio *)page; 1096 1097 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); 1098 VM_BUG_ON_PAGE(!PageHead(page), page); 1099 folio->_folio_dtor = compound_dtor; 1100 } 1101 1102 static inline void folio_set_compound_dtor(struct folio *folio, 1103 enum compound_dtor_id compound_dtor) 1104 { 1105 VM_BUG_ON_FOLIO(compound_dtor >= NR_COMPOUND_DTORS, folio); 1106 folio->_folio_dtor = compound_dtor; 1107 } 1108 1109 void destroy_large_folio(struct folio *folio); 1110 1111 static inline void set_compound_order(struct page *page, unsigned int order) 1112 { 1113 struct folio *folio = (struct folio *)page; 1114 1115 folio->_folio_order = order; 1116 #ifdef CONFIG_64BIT 1117 folio->_folio_nr_pages = 1U << order; 1118 #endif 1119 } 1120 1121 /* Returns the number of bytes in this potentially compound page. */ 1122 static inline unsigned long page_size(struct page *page) 1123 { 1124 return PAGE_SIZE << compound_order(page); 1125 } 1126 1127 /* Returns the number of bits needed for the number of bytes in a page */ 1128 static inline unsigned int page_shift(struct page *page) 1129 { 1130 return PAGE_SHIFT + compound_order(page); 1131 } 1132 1133 /** 1134 * thp_order - Order of a transparent huge page. 1135 * @page: Head page of a transparent huge page. 1136 */ 1137 static inline unsigned int thp_order(struct page *page) 1138 { 1139 VM_BUG_ON_PGFLAGS(PageTail(page), page); 1140 return compound_order(page); 1141 } 1142 1143 /** 1144 * thp_size - Size of a transparent huge page. 1145 * @page: Head page of a transparent huge page. 1146 * 1147 * Return: Number of bytes in this page. 1148 */ 1149 static inline unsigned long thp_size(struct page *page) 1150 { 1151 return PAGE_SIZE << thp_order(page); 1152 } 1153 1154 void free_compound_page(struct page *page); 1155 1156 #ifdef CONFIG_MMU 1157 /* 1158 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 1159 * servicing faults for write access. In the normal case, do always want 1160 * pte_mkwrite. But get_user_pages can cause write faults for mappings 1161 * that do not have writing enabled, when used by access_process_vm. 1162 */ 1163 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 1164 { 1165 if (likely(vma->vm_flags & VM_WRITE)) 1166 pte = pte_mkwrite(pte); 1167 return pte; 1168 } 1169 1170 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page); 1171 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr); 1172 1173 vm_fault_t finish_fault(struct vm_fault *vmf); 1174 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); 1175 #endif 1176 1177 /* 1178 * Multiple processes may "see" the same page. E.g. for untouched 1179 * mappings of /dev/null, all processes see the same page full of 1180 * zeroes, and text pages of executables and shared libraries have 1181 * only one copy in memory, at most, normally. 1182 * 1183 * For the non-reserved pages, page_count(page) denotes a reference count. 1184 * page_count() == 0 means the page is free. page->lru is then used for 1185 * freelist management in the buddy allocator. 1186 * page_count() > 0 means the page has been allocated. 1187 * 1188 * Pages are allocated by the slab allocator in order to provide memory 1189 * to kmalloc and kmem_cache_alloc. In this case, the management of the 1190 * page, and the fields in 'struct page' are the responsibility of mm/slab.c 1191 * unless a particular usage is carefully commented. (the responsibility of 1192 * freeing the kmalloc memory is the caller's, of course). 1193 * 1194 * A page may be used by anyone else who does a __get_free_page(). 1195 * In this case, page_count still tracks the references, and should only 1196 * be used through the normal accessor functions. The top bits of page->flags 1197 * and page->virtual store page management information, but all other fields 1198 * are unused and could be used privately, carefully. The management of this 1199 * page is the responsibility of the one who allocated it, and those who have 1200 * subsequently been given references to it. 1201 * 1202 * The other pages (we may call them "pagecache pages") are completely 1203 * managed by the Linux memory manager: I/O, buffers, swapping etc. 1204 * The following discussion applies only to them. 1205 * 1206 * A pagecache page contains an opaque `private' member, which belongs to the 1207 * page's address_space. Usually, this is the address of a circular list of 1208 * the page's disk buffers. PG_private must be set to tell the VM to call 1209 * into the filesystem to release these pages. 1210 * 1211 * A page may belong to an inode's memory mapping. In this case, page->mapping 1212 * is the pointer to the inode, and page->index is the file offset of the page, 1213 * in units of PAGE_SIZE. 1214 * 1215 * If pagecache pages are not associated with an inode, they are said to be 1216 * anonymous pages. These may become associated with the swapcache, and in that 1217 * case PG_swapcache is set, and page->private is an offset into the swapcache. 1218 * 1219 * In either case (swapcache or inode backed), the pagecache itself holds one 1220 * reference to the page. Setting PG_private should also increment the 1221 * refcount. The each user mapping also has a reference to the page. 1222 * 1223 * The pagecache pages are stored in a per-mapping radix tree, which is 1224 * rooted at mapping->i_pages, and indexed by offset. 1225 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space 1226 * lists, we instead now tag pages as dirty/writeback in the radix tree. 1227 * 1228 * All pagecache pages may be subject to I/O: 1229 * - inode pages may need to be read from disk, 1230 * - inode pages which have been modified and are MAP_SHARED may need 1231 * to be written back to the inode on disk, 1232 * - anonymous pages (including MAP_PRIVATE file mappings) which have been 1233 * modified may need to be swapped out to swap space and (later) to be read 1234 * back into memory. 1235 */ 1236 1237 #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX) 1238 DECLARE_STATIC_KEY_FALSE(devmap_managed_key); 1239 1240 bool __put_devmap_managed_page_refs(struct page *page, int refs); 1241 static inline bool put_devmap_managed_page_refs(struct page *page, int refs) 1242 { 1243 if (!static_branch_unlikely(&devmap_managed_key)) 1244 return false; 1245 if (!is_zone_device_page(page)) 1246 return false; 1247 return __put_devmap_managed_page_refs(page, refs); 1248 } 1249 #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ 1250 static inline bool put_devmap_managed_page_refs(struct page *page, int refs) 1251 { 1252 return false; 1253 } 1254 #endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ 1255 1256 static inline bool put_devmap_managed_page(struct page *page) 1257 { 1258 return put_devmap_managed_page_refs(page, 1); 1259 } 1260 1261 /* 127: arbitrary random number, small enough to assemble well */ 1262 #define folio_ref_zero_or_close_to_overflow(folio) \ 1263 ((unsigned int) folio_ref_count(folio) + 127u <= 127u) 1264 1265 /** 1266 * folio_get - Increment the reference count on a folio. 1267 * @folio: The folio. 1268 * 1269 * Context: May be called in any context, as long as you know that 1270 * you have a refcount on the folio. If you do not already have one, 1271 * folio_try_get() may be the right interface for you to use. 1272 */ 1273 static inline void folio_get(struct folio *folio) 1274 { 1275 VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio); 1276 folio_ref_inc(folio); 1277 } 1278 1279 static inline void get_page(struct page *page) 1280 { 1281 folio_get(page_folio(page)); 1282 } 1283 1284 static inline __must_check bool try_get_page(struct page *page) 1285 { 1286 page = compound_head(page); 1287 if (WARN_ON_ONCE(page_ref_count(page) <= 0)) 1288 return false; 1289 page_ref_inc(page); 1290 return true; 1291 } 1292 1293 /** 1294 * folio_put - Decrement the reference count on a folio. 1295 * @folio: The folio. 1296 * 1297 * If the folio's reference count reaches zero, the memory will be 1298 * released back to the page allocator and may be used by another 1299 * allocation immediately. Do not access the memory or the struct folio 1300 * after calling folio_put() unless you can be sure that it wasn't the 1301 * last reference. 1302 * 1303 * Context: May be called in process or interrupt context, but not in NMI 1304 * context. May be called while holding a spinlock. 1305 */ 1306 static inline void folio_put(struct folio *folio) 1307 { 1308 if (folio_put_testzero(folio)) 1309 __folio_put(folio); 1310 } 1311 1312 /** 1313 * folio_put_refs - Reduce the reference count on a folio. 1314 * @folio: The folio. 1315 * @refs: The amount to subtract from the folio's reference count. 1316 * 1317 * If the folio's reference count reaches zero, the memory will be 1318 * released back to the page allocator and may be used by another 1319 * allocation immediately. Do not access the memory or the struct folio 1320 * after calling folio_put_refs() unless you can be sure that these weren't 1321 * the last references. 1322 * 1323 * Context: May be called in process or interrupt context, but not in NMI 1324 * context. May be called while holding a spinlock. 1325 */ 1326 static inline void folio_put_refs(struct folio *folio, int refs) 1327 { 1328 if (folio_ref_sub_and_test(folio, refs)) 1329 __folio_put(folio); 1330 } 1331 1332 /* 1333 * union release_pages_arg - an array of pages or folios 1334 * 1335 * release_pages() releases a simple array of multiple pages, and 1336 * accepts various different forms of said page array: either 1337 * a regular old boring array of pages, an array of folios, or 1338 * an array of encoded page pointers. 1339 * 1340 * The transparent union syntax for this kind of "any of these 1341 * argument types" is all kinds of ugly, so look away. 1342 */ 1343 typedef union { 1344 struct page **pages; 1345 struct folio **folios; 1346 struct encoded_page **encoded_pages; 1347 } release_pages_arg __attribute__ ((__transparent_union__)); 1348 1349 void release_pages(release_pages_arg, int nr); 1350 1351 /** 1352 * folios_put - Decrement the reference count on an array of folios. 1353 * @folios: The folios. 1354 * @nr: How many folios there are. 1355 * 1356 * Like folio_put(), but for an array of folios. This is more efficient 1357 * than writing the loop yourself as it will optimise the locks which 1358 * need to be taken if the folios are freed. 1359 * 1360 * Context: May be called in process or interrupt context, but not in NMI 1361 * context. May be called while holding a spinlock. 1362 */ 1363 static inline void folios_put(struct folio **folios, unsigned int nr) 1364 { 1365 release_pages(folios, nr); 1366 } 1367 1368 static inline void put_page(struct page *page) 1369 { 1370 struct folio *folio = page_folio(page); 1371 1372 /* 1373 * For some devmap managed pages we need to catch refcount transition 1374 * from 2 to 1: 1375 */ 1376 if (put_devmap_managed_page(&folio->page)) 1377 return; 1378 folio_put(folio); 1379 } 1380 1381 /* 1382 * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload 1383 * the page's refcount so that two separate items are tracked: the original page 1384 * reference count, and also a new count of how many pin_user_pages() calls were 1385 * made against the page. ("gup-pinned" is another term for the latter). 1386 * 1387 * With this scheme, pin_user_pages() becomes special: such pages are marked as 1388 * distinct from normal pages. As such, the unpin_user_page() call (and its 1389 * variants) must be used in order to release gup-pinned pages. 1390 * 1391 * Choice of value: 1392 * 1393 * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference 1394 * counts with respect to pin_user_pages() and unpin_user_page() becomes 1395 * simpler, due to the fact that adding an even power of two to the page 1396 * refcount has the effect of using only the upper N bits, for the code that 1397 * counts up using the bias value. This means that the lower bits are left for 1398 * the exclusive use of the original code that increments and decrements by one 1399 * (or at least, by much smaller values than the bias value). 1400 * 1401 * Of course, once the lower bits overflow into the upper bits (and this is 1402 * OK, because subtraction recovers the original values), then visual inspection 1403 * no longer suffices to directly view the separate counts. However, for normal 1404 * applications that don't have huge page reference counts, this won't be an 1405 * issue. 1406 * 1407 * Locking: the lockless algorithm described in folio_try_get_rcu() 1408 * provides safe operation for get_user_pages(), page_mkclean() and 1409 * other calls that race to set up page table entries. 1410 */ 1411 #define GUP_PIN_COUNTING_BIAS (1U << 10) 1412 1413 void unpin_user_page(struct page *page); 1414 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, 1415 bool make_dirty); 1416 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, 1417 bool make_dirty); 1418 void unpin_user_pages(struct page **pages, unsigned long npages); 1419 1420 static inline bool is_cow_mapping(vm_flags_t flags) 1421 { 1422 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 1423 } 1424 1425 #ifndef CONFIG_MMU 1426 static inline bool is_nommu_shared_mapping(vm_flags_t flags) 1427 { 1428 /* 1429 * NOMMU shared mappings are ordinary MAP_SHARED mappings and selected 1430 * R/O MAP_PRIVATE file mappings that are an effective R/O overlay of 1431 * a file mapping. R/O MAP_PRIVATE mappings might still modify 1432 * underlying memory if ptrace is active, so this is only possible if 1433 * ptrace does not apply. Note that there is no mprotect() to upgrade 1434 * write permissions later. 1435 */ 1436 return flags & (VM_MAYSHARE | VM_MAYOVERLAY); 1437 } 1438 #endif 1439 1440 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 1441 #define SECTION_IN_PAGE_FLAGS 1442 #endif 1443 1444 /* 1445 * The identification function is mainly used by the buddy allocator for 1446 * determining if two pages could be buddies. We are not really identifying 1447 * the zone since we could be using the section number id if we do not have 1448 * node id available in page flags. 1449 * We only guarantee that it will return the same value for two combinable 1450 * pages in a zone. 1451 */ 1452 static inline int page_zone_id(struct page *page) 1453 { 1454 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; 1455 } 1456 1457 #ifdef NODE_NOT_IN_PAGE_FLAGS 1458 extern int page_to_nid(const struct page *page); 1459 #else 1460 static inline int page_to_nid(const struct page *page) 1461 { 1462 struct page *p = (struct page *)page; 1463 1464 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK; 1465 } 1466 #endif 1467 1468 static inline int folio_nid(const struct folio *folio) 1469 { 1470 return page_to_nid(&folio->page); 1471 } 1472 1473 #ifdef CONFIG_NUMA_BALANCING 1474 /* page access time bits needs to hold at least 4 seconds */ 1475 #define PAGE_ACCESS_TIME_MIN_BITS 12 1476 #if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS 1477 #define PAGE_ACCESS_TIME_BUCKETS \ 1478 (PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT) 1479 #else 1480 #define PAGE_ACCESS_TIME_BUCKETS 0 1481 #endif 1482 1483 #define PAGE_ACCESS_TIME_MASK \ 1484 (LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS) 1485 1486 static inline int cpu_pid_to_cpupid(int cpu, int pid) 1487 { 1488 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); 1489 } 1490 1491 static inline int cpupid_to_pid(int cpupid) 1492 { 1493 return cpupid & LAST__PID_MASK; 1494 } 1495 1496 static inline int cpupid_to_cpu(int cpupid) 1497 { 1498 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK; 1499 } 1500 1501 static inline int cpupid_to_nid(int cpupid) 1502 { 1503 return cpu_to_node(cpupid_to_cpu(cpupid)); 1504 } 1505 1506 static inline bool cpupid_pid_unset(int cpupid) 1507 { 1508 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK); 1509 } 1510 1511 static inline bool cpupid_cpu_unset(int cpupid) 1512 { 1513 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK); 1514 } 1515 1516 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) 1517 { 1518 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid); 1519 } 1520 1521 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) 1522 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 1523 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 1524 { 1525 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); 1526 } 1527 1528 static inline int page_cpupid_last(struct page *page) 1529 { 1530 return page->_last_cpupid; 1531 } 1532 static inline void page_cpupid_reset_last(struct page *page) 1533 { 1534 page->_last_cpupid = -1 & LAST_CPUPID_MASK; 1535 } 1536 #else 1537 static inline int page_cpupid_last(struct page *page) 1538 { 1539 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; 1540 } 1541 1542 extern int page_cpupid_xchg_last(struct page *page, int cpupid); 1543 1544 static inline void page_cpupid_reset_last(struct page *page) 1545 { 1546 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; 1547 } 1548 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ 1549 1550 static inline int xchg_page_access_time(struct page *page, int time) 1551 { 1552 int last_time; 1553 1554 last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS); 1555 return last_time << PAGE_ACCESS_TIME_BUCKETS; 1556 } 1557 #else /* !CONFIG_NUMA_BALANCING */ 1558 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 1559 { 1560 return page_to_nid(page); /* XXX */ 1561 } 1562 1563 static inline int xchg_page_access_time(struct page *page, int time) 1564 { 1565 return 0; 1566 } 1567 1568 static inline int page_cpupid_last(struct page *page) 1569 { 1570 return page_to_nid(page); /* XXX */ 1571 } 1572 1573 static inline int cpupid_to_nid(int cpupid) 1574 { 1575 return -1; 1576 } 1577 1578 static inline int cpupid_to_pid(int cpupid) 1579 { 1580 return -1; 1581 } 1582 1583 static inline int cpupid_to_cpu(int cpupid) 1584 { 1585 return -1; 1586 } 1587 1588 static inline int cpu_pid_to_cpupid(int nid, int pid) 1589 { 1590 return -1; 1591 } 1592 1593 static inline bool cpupid_pid_unset(int cpupid) 1594 { 1595 return true; 1596 } 1597 1598 static inline void page_cpupid_reset_last(struct page *page) 1599 { 1600 } 1601 1602 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) 1603 { 1604 return false; 1605 } 1606 #endif /* CONFIG_NUMA_BALANCING */ 1607 1608 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) 1609 1610 /* 1611 * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid 1612 * setting tags for all pages to native kernel tag value 0xff, as the default 1613 * value 0x00 maps to 0xff. 1614 */ 1615 1616 static inline u8 page_kasan_tag(const struct page *page) 1617 { 1618 u8 tag = 0xff; 1619 1620 if (kasan_enabled()) { 1621 tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; 1622 tag ^= 0xff; 1623 } 1624 1625 return tag; 1626 } 1627 1628 static inline void page_kasan_tag_set(struct page *page, u8 tag) 1629 { 1630 unsigned long old_flags, flags; 1631 1632 if (!kasan_enabled()) 1633 return; 1634 1635 tag ^= 0xff; 1636 old_flags = READ_ONCE(page->flags); 1637 do { 1638 flags = old_flags; 1639 flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); 1640 flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; 1641 } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); 1642 } 1643 1644 static inline void page_kasan_tag_reset(struct page *page) 1645 { 1646 if (kasan_enabled()) 1647 page_kasan_tag_set(page, 0xff); 1648 } 1649 1650 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 1651 1652 static inline u8 page_kasan_tag(const struct page *page) 1653 { 1654 return 0xff; 1655 } 1656 1657 static inline void page_kasan_tag_set(struct page *page, u8 tag) { } 1658 static inline void page_kasan_tag_reset(struct page *page) { } 1659 1660 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 1661 1662 static inline struct zone *page_zone(const struct page *page) 1663 { 1664 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 1665 } 1666 1667 static inline pg_data_t *page_pgdat(const struct page *page) 1668 { 1669 return NODE_DATA(page_to_nid(page)); 1670 } 1671 1672 static inline struct zone *folio_zone(const struct folio *folio) 1673 { 1674 return page_zone(&folio->page); 1675 } 1676 1677 static inline pg_data_t *folio_pgdat(const struct folio *folio) 1678 { 1679 return page_pgdat(&folio->page); 1680 } 1681 1682 #ifdef SECTION_IN_PAGE_FLAGS 1683 static inline void set_page_section(struct page *page, unsigned long section) 1684 { 1685 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 1686 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 1687 } 1688 1689 static inline unsigned long page_to_section(const struct page *page) 1690 { 1691 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 1692 } 1693 #endif 1694 1695 /** 1696 * folio_pfn - Return the Page Frame Number of a folio. 1697 * @folio: The folio. 1698 * 1699 * A folio may contain multiple pages. The pages have consecutive 1700 * Page Frame Numbers. 1701 * 1702 * Return: The Page Frame Number of the first page in the folio. 1703 */ 1704 static inline unsigned long folio_pfn(struct folio *folio) 1705 { 1706 return page_to_pfn(&folio->page); 1707 } 1708 1709 static inline struct folio *pfn_folio(unsigned long pfn) 1710 { 1711 return page_folio(pfn_to_page(pfn)); 1712 } 1713 1714 /** 1715 * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA. 1716 * @folio: The folio. 1717 * 1718 * This function checks if a folio has been pinned via a call to 1719 * a function in the pin_user_pages() family. 1720 * 1721 * For small folios, the return value is partially fuzzy: false is not fuzzy, 1722 * because it means "definitely not pinned for DMA", but true means "probably 1723 * pinned for DMA, but possibly a false positive due to having at least 1724 * GUP_PIN_COUNTING_BIAS worth of normal folio references". 1725 * 1726 * False positives are OK, because: a) it's unlikely for a folio to 1727 * get that many refcounts, and b) all the callers of this routine are 1728 * expected to be able to deal gracefully with a false positive. 1729 * 1730 * For large folios, the result will be exactly correct. That's because 1731 * we have more tracking data available: the _pincount field is used 1732 * instead of the GUP_PIN_COUNTING_BIAS scheme. 1733 * 1734 * For more information, please see Documentation/core-api/pin_user_pages.rst. 1735 * 1736 * Return: True, if it is likely that the page has been "dma-pinned". 1737 * False, if the page is definitely not dma-pinned. 1738 */ 1739 static inline bool folio_maybe_dma_pinned(struct folio *folio) 1740 { 1741 if (folio_test_large(folio)) 1742 return atomic_read(&folio->_pincount) > 0; 1743 1744 /* 1745 * folio_ref_count() is signed. If that refcount overflows, then 1746 * folio_ref_count() returns a negative value, and callers will avoid 1747 * further incrementing the refcount. 1748 * 1749 * Here, for that overflow case, use the sign bit to count a little 1750 * bit higher via unsigned math, and thus still get an accurate result. 1751 */ 1752 return ((unsigned int)folio_ref_count(folio)) >= 1753 GUP_PIN_COUNTING_BIAS; 1754 } 1755 1756 static inline bool page_maybe_dma_pinned(struct page *page) 1757 { 1758 return folio_maybe_dma_pinned(page_folio(page)); 1759 } 1760 1761 /* 1762 * This should most likely only be called during fork() to see whether we 1763 * should break the cow immediately for an anon page on the src mm. 1764 * 1765 * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq. 1766 */ 1767 static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, 1768 struct page *page) 1769 { 1770 VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1)); 1771 1772 if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) 1773 return false; 1774 1775 return page_maybe_dma_pinned(page); 1776 } 1777 1778 /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ 1779 #ifdef CONFIG_MIGRATION 1780 static inline bool is_longterm_pinnable_page(struct page *page) 1781 { 1782 #ifdef CONFIG_CMA 1783 int mt = get_pageblock_migratetype(page); 1784 1785 if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE) 1786 return false; 1787 #endif 1788 /* The zero page may always be pinned */ 1789 if (is_zero_pfn(page_to_pfn(page))) 1790 return true; 1791 1792 /* Coherent device memory must always allow eviction. */ 1793 if (is_device_coherent_page(page)) 1794 return false; 1795 1796 /* Otherwise, non-movable zone pages can be pinned. */ 1797 return !is_zone_movable_page(page); 1798 } 1799 #else 1800 static inline bool is_longterm_pinnable_page(struct page *page) 1801 { 1802 return true; 1803 } 1804 #endif 1805 1806 static inline bool folio_is_longterm_pinnable(struct folio *folio) 1807 { 1808 return is_longterm_pinnable_page(&folio->page); 1809 } 1810 1811 static inline void set_page_zone(struct page *page, enum zone_type zone) 1812 { 1813 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 1814 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 1815 } 1816 1817 static inline void set_page_node(struct page *page, unsigned long node) 1818 { 1819 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 1820 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 1821 } 1822 1823 static inline void set_page_links(struct page *page, enum zone_type zone, 1824 unsigned long node, unsigned long pfn) 1825 { 1826 set_page_zone(page, zone); 1827 set_page_node(page, node); 1828 #ifdef SECTION_IN_PAGE_FLAGS 1829 set_page_section(page, pfn_to_section_nr(pfn)); 1830 #endif 1831 } 1832 1833 /** 1834 * folio_nr_pages - The number of pages in the folio. 1835 * @folio: The folio. 1836 * 1837 * Return: A positive power of two. 1838 */ 1839 static inline long folio_nr_pages(struct folio *folio) 1840 { 1841 if (!folio_test_large(folio)) 1842 return 1; 1843 #ifdef CONFIG_64BIT 1844 return folio->_folio_nr_pages; 1845 #else 1846 return 1L << folio->_folio_order; 1847 #endif 1848 } 1849 1850 /* 1851 * compound_nr() returns the number of pages in this potentially compound 1852 * page. compound_nr() can be called on a tail page, and is defined to 1853 * return 1 in that case. 1854 */ 1855 static inline unsigned long compound_nr(struct page *page) 1856 { 1857 struct folio *folio = (struct folio *)page; 1858 1859 if (!test_bit(PG_head, &folio->flags)) 1860 return 1; 1861 #ifdef CONFIG_64BIT 1862 return folio->_folio_nr_pages; 1863 #else 1864 return 1L << folio->_folio_order; 1865 #endif 1866 } 1867 1868 /** 1869 * thp_nr_pages - The number of regular pages in this huge page. 1870 * @page: The head page of a huge page. 1871 */ 1872 static inline int thp_nr_pages(struct page *page) 1873 { 1874 return folio_nr_pages((struct folio *)page); 1875 } 1876 1877 /** 1878 * folio_next - Move to the next physical folio. 1879 * @folio: The folio we're currently operating on. 1880 * 1881 * If you have physically contiguous memory which may span more than 1882 * one folio (eg a &struct bio_vec), use this function to move from one 1883 * folio to the next. Do not use it if the memory is only virtually 1884 * contiguous as the folios are almost certainly not adjacent to each 1885 * other. This is the folio equivalent to writing ``page++``. 1886 * 1887 * Context: We assume that the folios are refcounted and/or locked at a 1888 * higher level and do not adjust the reference counts. 1889 * Return: The next struct folio. 1890 */ 1891 static inline struct folio *folio_next(struct folio *folio) 1892 { 1893 return (struct folio *)folio_page(folio, folio_nr_pages(folio)); 1894 } 1895 1896 /** 1897 * folio_shift - The size of the memory described by this folio. 1898 * @folio: The folio. 1899 * 1900 * A folio represents a number of bytes which is a power-of-two in size. 1901 * This function tells you which power-of-two the folio is. See also 1902 * folio_size() and folio_order(). 1903 * 1904 * Context: The caller should have a reference on the folio to prevent 1905 * it from being split. It is not necessary for the folio to be locked. 1906 * Return: The base-2 logarithm of the size of this folio. 1907 */ 1908 static inline unsigned int folio_shift(struct folio *folio) 1909 { 1910 return PAGE_SHIFT + folio_order(folio); 1911 } 1912 1913 /** 1914 * folio_size - The number of bytes in a folio. 1915 * @folio: The folio. 1916 * 1917 * Context: The caller should have a reference on the folio to prevent 1918 * it from being split. It is not necessary for the folio to be locked. 1919 * Return: The number of bytes in this folio. 1920 */ 1921 static inline size_t folio_size(struct folio *folio) 1922 { 1923 return PAGE_SIZE << folio_order(folio); 1924 } 1925 1926 /** 1927 * folio_estimated_sharers - Estimate the number of sharers of a folio. 1928 * @folio: The folio. 1929 * 1930 * folio_estimated_sharers() aims to serve as a function to efficiently 1931 * estimate the number of processes sharing a folio. This is done by 1932 * looking at the precise mapcount of the first subpage in the folio, and 1933 * assuming the other subpages are the same. This may not be true for large 1934 * folios. If you want exact mapcounts for exact calculations, look at 1935 * page_mapcount() or folio_total_mapcount(). 1936 * 1937 * Return: The estimated number of processes sharing a folio. 1938 */ 1939 static inline int folio_estimated_sharers(struct folio *folio) 1940 { 1941 return page_mapcount(folio_page(folio, 0)); 1942 } 1943 1944 #ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE 1945 static inline int arch_make_page_accessible(struct page *page) 1946 { 1947 return 0; 1948 } 1949 #endif 1950 1951 #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE 1952 static inline int arch_make_folio_accessible(struct folio *folio) 1953 { 1954 int ret; 1955 long i, nr = folio_nr_pages(folio); 1956 1957 for (i = 0; i < nr; i++) { 1958 ret = arch_make_page_accessible(folio_page(folio, i)); 1959 if (ret) 1960 break; 1961 } 1962 1963 return ret; 1964 } 1965 #endif 1966 1967 /* 1968 * Some inline functions in vmstat.h depend on page_zone() 1969 */ 1970 #include <linux/vmstat.h> 1971 1972 static __always_inline void *lowmem_page_address(const struct page *page) 1973 { 1974 return page_to_virt(page); 1975 } 1976 1977 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) 1978 #define HASHED_PAGE_VIRTUAL 1979 #endif 1980 1981 #if defined(WANT_PAGE_VIRTUAL) 1982 static inline void *page_address(const struct page *page) 1983 { 1984 return page->virtual; 1985 } 1986 static inline void set_page_address(struct page *page, void *address) 1987 { 1988 page->virtual = address; 1989 } 1990 #define page_address_init() do { } while(0) 1991 #endif 1992 1993 #if defined(HASHED_PAGE_VIRTUAL) 1994 void *page_address(const struct page *page); 1995 void set_page_address(struct page *page, void *virtual); 1996 void page_address_init(void); 1997 #endif 1998 1999 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) 2000 #define page_address(page) lowmem_page_address(page) 2001 #define set_page_address(page, address) do { } while(0) 2002 #define page_address_init() do { } while(0) 2003 #endif 2004 2005 static inline void *folio_address(const struct folio *folio) 2006 { 2007 return page_address(&folio->page); 2008 } 2009 2010 extern void *page_rmapping(struct page *page); 2011 extern pgoff_t __page_file_index(struct page *page); 2012 2013 /* 2014 * Return the pagecache index of the passed page. Regular pagecache pages 2015 * use ->index whereas swapcache pages use swp_offset(->private) 2016 */ 2017 static inline pgoff_t page_index(struct page *page) 2018 { 2019 if (unlikely(PageSwapCache(page))) 2020 return __page_file_index(page); 2021 return page->index; 2022 } 2023 2024 /* 2025 * Return true only if the page has been allocated with 2026 * ALLOC_NO_WATERMARKS and the low watermark was not 2027 * met implying that the system is under some pressure. 2028 */ 2029 static inline bool page_is_pfmemalloc(const struct page *page) 2030 { 2031 /* 2032 * lru.next has bit 1 set if the page is allocated from the 2033 * pfmemalloc reserves. Callers may simply overwrite it if 2034 * they do not need to preserve that information. 2035 */ 2036 return (uintptr_t)page->lru.next & BIT(1); 2037 } 2038 2039 /* 2040 * Return true only if the folio has been allocated with 2041 * ALLOC_NO_WATERMARKS and the low watermark was not 2042 * met implying that the system is under some pressure. 2043 */ 2044 static inline bool folio_is_pfmemalloc(const struct folio *folio) 2045 { 2046 /* 2047 * lru.next has bit 1 set if the page is allocated from the 2048 * pfmemalloc reserves. Callers may simply overwrite it if 2049 * they do not need to preserve that information. 2050 */ 2051 return (uintptr_t)folio->lru.next & BIT(1); 2052 } 2053 2054 /* 2055 * Only to be called by the page allocator on a freshly allocated 2056 * page. 2057 */ 2058 static inline void set_page_pfmemalloc(struct page *page) 2059 { 2060 page->lru.next = (void *)BIT(1); 2061 } 2062 2063 static inline void clear_page_pfmemalloc(struct page *page) 2064 { 2065 page->lru.next = NULL; 2066 } 2067 2068 /* 2069 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. 2070 */ 2071 extern void pagefault_out_of_memory(void); 2072 2073 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 2074 #define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1)) 2075 #define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1)) 2076 2077 /* 2078 * Flags passed to show_mem() and show_free_areas() to suppress output in 2079 * various contexts. 2080 */ 2081 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 2082 2083 extern void __show_free_areas(unsigned int flags, nodemask_t *nodemask, int max_zone_idx); 2084 static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodemask) 2085 { 2086 __show_free_areas(flags, nodemask, MAX_NR_ZONES - 1); 2087 } 2088 2089 /* 2090 * Parameter block passed down to zap_pte_range in exceptional cases. 2091 */ 2092 struct zap_details { 2093 struct folio *single_folio; /* Locked folio to be unmapped */ 2094 bool even_cows; /* Zap COWed private pages too? */ 2095 zap_flags_t zap_flags; /* Extra flags for zapping */ 2096 }; 2097 2098 /* 2099 * Whether to drop the pte markers, for example, the uffd-wp information for 2100 * file-backed memory. This should only be specified when we will completely 2101 * drop the page in the mm, either by truncation or unmapping of the vma. By 2102 * default, the flag is not set. 2103 */ 2104 #define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0)) 2105 /* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */ 2106 #define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1)) 2107 2108 #ifdef CONFIG_SCHED_MM_CID 2109 void sched_mm_cid_before_execve(struct task_struct *t); 2110 void sched_mm_cid_after_execve(struct task_struct *t); 2111 void sched_mm_cid_fork(struct task_struct *t); 2112 void sched_mm_cid_exit_signals(struct task_struct *t); 2113 static inline int task_mm_cid(struct task_struct *t) 2114 { 2115 return t->mm_cid; 2116 } 2117 #else 2118 static inline void sched_mm_cid_before_execve(struct task_struct *t) { } 2119 static inline void sched_mm_cid_after_execve(struct task_struct *t) { } 2120 static inline void sched_mm_cid_fork(struct task_struct *t) { } 2121 static inline void sched_mm_cid_exit_signals(struct task_struct *t) { } 2122 static inline int task_mm_cid(struct task_struct *t) 2123 { 2124 /* 2125 * Use the processor id as a fall-back when the mm cid feature is 2126 * disabled. This provides functional per-cpu data structure accesses 2127 * in user-space, althrough it won't provide the memory usage benefits. 2128 */ 2129 return raw_smp_processor_id(); 2130 } 2131 #endif 2132 2133 #ifdef CONFIG_MMU 2134 extern bool can_do_mlock(void); 2135 #else 2136 static inline bool can_do_mlock(void) { return false; } 2137 #endif 2138 extern int user_shm_lock(size_t, struct ucounts *); 2139 extern void user_shm_unlock(size_t, struct ucounts *); 2140 2141 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, 2142 pte_t pte); 2143 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 2144 pte_t pte); 2145 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 2146 pmd_t pmd); 2147 2148 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 2149 unsigned long size); 2150 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, 2151 unsigned long size, struct zap_details *details); 2152 static inline void zap_vma_pages(struct vm_area_struct *vma) 2153 { 2154 zap_page_range_single(vma, vma->vm_start, 2155 vma->vm_end - vma->vm_start, NULL); 2156 } 2157 void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt, 2158 struct vm_area_struct *start_vma, unsigned long start, 2159 unsigned long end, bool mm_wr_locked); 2160 2161 struct mmu_notifier_range; 2162 2163 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 2164 unsigned long end, unsigned long floor, unsigned long ceiling); 2165 int 2166 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); 2167 int follow_pte(struct mm_struct *mm, unsigned long address, 2168 pte_t **ptepp, spinlock_t **ptlp); 2169 int follow_pfn(struct vm_area_struct *vma, unsigned long address, 2170 unsigned long *pfn); 2171 int follow_phys(struct vm_area_struct *vma, unsigned long address, 2172 unsigned int flags, unsigned long *prot, resource_size_t *phys); 2173 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 2174 void *buf, int len, int write); 2175 2176 extern void truncate_pagecache(struct inode *inode, loff_t new); 2177 extern void truncate_setsize(struct inode *inode, loff_t newsize); 2178 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); 2179 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 2180 int generic_error_remove_page(struct address_space *mapping, struct page *page); 2181 2182 #ifdef CONFIG_MMU 2183 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, 2184 unsigned long address, unsigned int flags, 2185 struct pt_regs *regs); 2186 extern int fixup_user_fault(struct mm_struct *mm, 2187 unsigned long address, unsigned int fault_flags, 2188 bool *unlocked); 2189 void unmap_mapping_pages(struct address_space *mapping, 2190 pgoff_t start, pgoff_t nr, bool even_cows); 2191 void unmap_mapping_range(struct address_space *mapping, 2192 loff_t const holebegin, loff_t const holelen, int even_cows); 2193 #else 2194 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, 2195 unsigned long address, unsigned int flags, 2196 struct pt_regs *regs) 2197 { 2198 /* should never happen if there's no MMU */ 2199 BUG(); 2200 return VM_FAULT_SIGBUS; 2201 } 2202 static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address, 2203 unsigned int fault_flags, bool *unlocked) 2204 { 2205 /* should never happen if there's no MMU */ 2206 BUG(); 2207 return -EFAULT; 2208 } 2209 static inline void unmap_mapping_pages(struct address_space *mapping, 2210 pgoff_t start, pgoff_t nr, bool even_cows) { } 2211 static inline void unmap_mapping_range(struct address_space *mapping, 2212 loff_t const holebegin, loff_t const holelen, int even_cows) { } 2213 #endif 2214 2215 static inline void unmap_shared_mapping_range(struct address_space *mapping, 2216 loff_t const holebegin, loff_t const holelen) 2217 { 2218 unmap_mapping_range(mapping, holebegin, holelen, 0); 2219 } 2220 2221 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, 2222 void *buf, int len, unsigned int gup_flags); 2223 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 2224 void *buf, int len, unsigned int gup_flags); 2225 extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr, 2226 void *buf, int len, unsigned int gup_flags); 2227 2228 long get_user_pages_remote(struct mm_struct *mm, 2229 unsigned long start, unsigned long nr_pages, 2230 unsigned int gup_flags, struct page **pages, 2231 struct vm_area_struct **vmas, int *locked); 2232 long pin_user_pages_remote(struct mm_struct *mm, 2233 unsigned long start, unsigned long nr_pages, 2234 unsigned int gup_flags, struct page **pages, 2235 struct vm_area_struct **vmas, int *locked); 2236 long get_user_pages(unsigned long start, unsigned long nr_pages, 2237 unsigned int gup_flags, struct page **pages, 2238 struct vm_area_struct **vmas); 2239 long pin_user_pages(unsigned long start, unsigned long nr_pages, 2240 unsigned int gup_flags, struct page **pages, 2241 struct vm_area_struct **vmas); 2242 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 2243 struct page **pages, unsigned int gup_flags); 2244 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 2245 struct page **pages, unsigned int gup_flags); 2246 2247 int get_user_pages_fast(unsigned long start, int nr_pages, 2248 unsigned int gup_flags, struct page **pages); 2249 int pin_user_pages_fast(unsigned long start, int nr_pages, 2250 unsigned int gup_flags, struct page **pages); 2251 2252 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); 2253 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, 2254 struct task_struct *task, bool bypass_rlim); 2255 2256 struct kvec; 2257 struct page *get_dump_page(unsigned long addr); 2258 2259 bool folio_mark_dirty(struct folio *folio); 2260 bool set_page_dirty(struct page *page); 2261 int set_page_dirty_lock(struct page *page); 2262 2263 int get_cmdline(struct task_struct *task, char *buffer, int buflen); 2264 2265 extern unsigned long move_page_tables(struct vm_area_struct *vma, 2266 unsigned long old_addr, struct vm_area_struct *new_vma, 2267 unsigned long new_addr, unsigned long len, 2268 bool need_rmap_locks); 2269 2270 /* 2271 * Flags used by change_protection(). For now we make it a bitmap so 2272 * that we can pass in multiple flags just like parameters. However 2273 * for now all the callers are only use one of the flags at the same 2274 * time. 2275 */ 2276 /* 2277 * Whether we should manually check if we can map individual PTEs writable, 2278 * because something (e.g., COW, uffd-wp) blocks that from happening for all 2279 * PTEs automatically in a writable mapping. 2280 */ 2281 #define MM_CP_TRY_CHANGE_WRITABLE (1UL << 0) 2282 /* Whether this protection change is for NUMA hints */ 2283 #define MM_CP_PROT_NUMA (1UL << 1) 2284 /* Whether this change is for write protecting */ 2285 #define MM_CP_UFFD_WP (1UL << 2) /* do wp */ 2286 #define MM_CP_UFFD_WP_RESOLVE (1UL << 3) /* Resolve wp */ 2287 #define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \ 2288 MM_CP_UFFD_WP_RESOLVE) 2289 2290 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 2291 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) 2292 { 2293 /* 2294 * We want to check manually if we can change individual PTEs writable 2295 * if we can't do that automatically for all PTEs in a mapping. For 2296 * private mappings, that's always the case when we have write 2297 * permissions as we properly have to handle COW. 2298 */ 2299 if (vma->vm_flags & VM_SHARED) 2300 return vma_wants_writenotify(vma, vma->vm_page_prot); 2301 return !!(vma->vm_flags & VM_WRITE); 2302 2303 } 2304 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr, 2305 pte_t pte); 2306 extern long change_protection(struct mmu_gather *tlb, 2307 struct vm_area_struct *vma, unsigned long start, 2308 unsigned long end, unsigned long cp_flags); 2309 extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, 2310 struct vm_area_struct *vma, struct vm_area_struct **pprev, 2311 unsigned long start, unsigned long end, unsigned long newflags); 2312 2313 /* 2314 * doesn't attempt to fault and will return short. 2315 */ 2316 int get_user_pages_fast_only(unsigned long start, int nr_pages, 2317 unsigned int gup_flags, struct page **pages); 2318 2319 static inline bool get_user_page_fast_only(unsigned long addr, 2320 unsigned int gup_flags, struct page **pagep) 2321 { 2322 return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1; 2323 } 2324 /* 2325 * per-process(per-mm_struct) statistics. 2326 */ 2327 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 2328 { 2329 return percpu_counter_read_positive(&mm->rss_stat[member]); 2330 } 2331 2332 void mm_trace_rss_stat(struct mm_struct *mm, int member); 2333 2334 static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 2335 { 2336 percpu_counter_add(&mm->rss_stat[member], value); 2337 2338 mm_trace_rss_stat(mm, member); 2339 } 2340 2341 static inline void inc_mm_counter(struct mm_struct *mm, int member) 2342 { 2343 percpu_counter_inc(&mm->rss_stat[member]); 2344 2345 mm_trace_rss_stat(mm, member); 2346 } 2347 2348 static inline void dec_mm_counter(struct mm_struct *mm, int member) 2349 { 2350 percpu_counter_dec(&mm->rss_stat[member]); 2351 2352 mm_trace_rss_stat(mm, member); 2353 } 2354 2355 /* Optimized variant when page is already known not to be PageAnon */ 2356 static inline int mm_counter_file(struct page *page) 2357 { 2358 if (PageSwapBacked(page)) 2359 return MM_SHMEMPAGES; 2360 return MM_FILEPAGES; 2361 } 2362 2363 static inline int mm_counter(struct page *page) 2364 { 2365 if (PageAnon(page)) 2366 return MM_ANONPAGES; 2367 return mm_counter_file(page); 2368 } 2369 2370 static inline unsigned long get_mm_rss(struct mm_struct *mm) 2371 { 2372 return get_mm_counter(mm, MM_FILEPAGES) + 2373 get_mm_counter(mm, MM_ANONPAGES) + 2374 get_mm_counter(mm, MM_SHMEMPAGES); 2375 } 2376 2377 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) 2378 { 2379 return max(mm->hiwater_rss, get_mm_rss(mm)); 2380 } 2381 2382 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) 2383 { 2384 return max(mm->hiwater_vm, mm->total_vm); 2385 } 2386 2387 static inline void update_hiwater_rss(struct mm_struct *mm) 2388 { 2389 unsigned long _rss = get_mm_rss(mm); 2390 2391 if ((mm)->hiwater_rss < _rss) 2392 (mm)->hiwater_rss = _rss; 2393 } 2394 2395 static inline void update_hiwater_vm(struct mm_struct *mm) 2396 { 2397 if (mm->hiwater_vm < mm->total_vm) 2398 mm->hiwater_vm = mm->total_vm; 2399 } 2400 2401 static inline void reset_mm_hiwater_rss(struct mm_struct *mm) 2402 { 2403 mm->hiwater_rss = get_mm_rss(mm); 2404 } 2405 2406 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, 2407 struct mm_struct *mm) 2408 { 2409 unsigned long hiwater_rss = get_mm_hiwater_rss(mm); 2410 2411 if (*maxrss < hiwater_rss) 2412 *maxrss = hiwater_rss; 2413 } 2414 2415 #if defined(SPLIT_RSS_COUNTING) 2416 void sync_mm_rss(struct mm_struct *mm); 2417 #else 2418 static inline void sync_mm_rss(struct mm_struct *mm) 2419 { 2420 } 2421 #endif 2422 2423 #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL 2424 static inline int pte_special(pte_t pte) 2425 { 2426 return 0; 2427 } 2428 2429 static inline pte_t pte_mkspecial(pte_t pte) 2430 { 2431 return pte; 2432 } 2433 #endif 2434 2435 #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP 2436 static inline int pte_devmap(pte_t pte) 2437 { 2438 return 0; 2439 } 2440 #endif 2441 2442 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 2443 spinlock_t **ptl); 2444 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 2445 spinlock_t **ptl) 2446 { 2447 pte_t *ptep; 2448 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); 2449 return ptep; 2450 } 2451 2452 #ifdef __PAGETABLE_P4D_FOLDED 2453 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, 2454 unsigned long address) 2455 { 2456 return 0; 2457 } 2458 #else 2459 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 2460 #endif 2461 2462 #if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU) 2463 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, 2464 unsigned long address) 2465 { 2466 return 0; 2467 } 2468 static inline void mm_inc_nr_puds(struct mm_struct *mm) {} 2469 static inline void mm_dec_nr_puds(struct mm_struct *mm) {} 2470 2471 #else 2472 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); 2473 2474 static inline void mm_inc_nr_puds(struct mm_struct *mm) 2475 { 2476 if (mm_pud_folded(mm)) 2477 return; 2478 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); 2479 } 2480 2481 static inline void mm_dec_nr_puds(struct mm_struct *mm) 2482 { 2483 if (mm_pud_folded(mm)) 2484 return; 2485 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); 2486 } 2487 #endif 2488 2489 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) 2490 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, 2491 unsigned long address) 2492 { 2493 return 0; 2494 } 2495 2496 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} 2497 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} 2498 2499 #else 2500 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); 2501 2502 static inline void mm_inc_nr_pmds(struct mm_struct *mm) 2503 { 2504 if (mm_pmd_folded(mm)) 2505 return; 2506 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); 2507 } 2508 2509 static inline void mm_dec_nr_pmds(struct mm_struct *mm) 2510 { 2511 if (mm_pmd_folded(mm)) 2512 return; 2513 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); 2514 } 2515 #endif 2516 2517 #ifdef CONFIG_MMU 2518 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) 2519 { 2520 atomic_long_set(&mm->pgtables_bytes, 0); 2521 } 2522 2523 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) 2524 { 2525 return atomic_long_read(&mm->pgtables_bytes); 2526 } 2527 2528 static inline void mm_inc_nr_ptes(struct mm_struct *mm) 2529 { 2530 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); 2531 } 2532 2533 static inline void mm_dec_nr_ptes(struct mm_struct *mm) 2534 { 2535 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); 2536 } 2537 #else 2538 2539 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {} 2540 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) 2541 { 2542 return 0; 2543 } 2544 2545 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {} 2546 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} 2547 #endif 2548 2549 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd); 2550 int __pte_alloc_kernel(pmd_t *pmd); 2551 2552 #if defined(CONFIG_MMU) 2553 2554 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, 2555 unsigned long address) 2556 { 2557 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? 2558 NULL : p4d_offset(pgd, address); 2559 } 2560 2561 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, 2562 unsigned long address) 2563 { 2564 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? 2565 NULL : pud_offset(p4d, address); 2566 } 2567 2568 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 2569 { 2570 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? 2571 NULL: pmd_offset(pud, address); 2572 } 2573 #endif /* CONFIG_MMU */ 2574 2575 #if USE_SPLIT_PTE_PTLOCKS 2576 #if ALLOC_SPLIT_PTLOCKS 2577 void __init ptlock_cache_init(void); 2578 extern bool ptlock_alloc(struct page *page); 2579 extern void ptlock_free(struct page *page); 2580 2581 static inline spinlock_t *ptlock_ptr(struct page *page) 2582 { 2583 return page->ptl; 2584 } 2585 #else /* ALLOC_SPLIT_PTLOCKS */ 2586 static inline void ptlock_cache_init(void) 2587 { 2588 } 2589 2590 static inline bool ptlock_alloc(struct page *page) 2591 { 2592 return true; 2593 } 2594 2595 static inline void ptlock_free(struct page *page) 2596 { 2597 } 2598 2599 static inline spinlock_t *ptlock_ptr(struct page *page) 2600 { 2601 return &page->ptl; 2602 } 2603 #endif /* ALLOC_SPLIT_PTLOCKS */ 2604 2605 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 2606 { 2607 return ptlock_ptr(pmd_page(*pmd)); 2608 } 2609 2610 static inline bool ptlock_init(struct page *page) 2611 { 2612 /* 2613 * prep_new_page() initialize page->private (and therefore page->ptl) 2614 * with 0. Make sure nobody took it in use in between. 2615 * 2616 * It can happen if arch try to use slab for page table allocation: 2617 * slab code uses page->slab_cache, which share storage with page->ptl. 2618 */ 2619 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); 2620 if (!ptlock_alloc(page)) 2621 return false; 2622 spin_lock_init(ptlock_ptr(page)); 2623 return true; 2624 } 2625 2626 #else /* !USE_SPLIT_PTE_PTLOCKS */ 2627 /* 2628 * We use mm->page_table_lock to guard all pagetable pages of the mm. 2629 */ 2630 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 2631 { 2632 return &mm->page_table_lock; 2633 } 2634 static inline void ptlock_cache_init(void) {} 2635 static inline bool ptlock_init(struct page *page) { return true; } 2636 static inline void ptlock_free(struct page *page) {} 2637 #endif /* USE_SPLIT_PTE_PTLOCKS */ 2638 2639 static inline void pgtable_init(void) 2640 { 2641 ptlock_cache_init(); 2642 pgtable_cache_init(); 2643 } 2644 2645 static inline bool pgtable_pte_page_ctor(struct page *page) 2646 { 2647 if (!ptlock_init(page)) 2648 return false; 2649 __SetPageTable(page); 2650 inc_lruvec_page_state(page, NR_PAGETABLE); 2651 return true; 2652 } 2653 2654 static inline void pgtable_pte_page_dtor(struct page *page) 2655 { 2656 ptlock_free(page); 2657 __ClearPageTable(page); 2658 dec_lruvec_page_state(page, NR_PAGETABLE); 2659 } 2660 2661 #define pte_offset_map_lock(mm, pmd, address, ptlp) \ 2662 ({ \ 2663 spinlock_t *__ptl = pte_lockptr(mm, pmd); \ 2664 pte_t *__pte = pte_offset_map(pmd, address); \ 2665 *(ptlp) = __ptl; \ 2666 spin_lock(__ptl); \ 2667 __pte; \ 2668 }) 2669 2670 #define pte_unmap_unlock(pte, ptl) do { \ 2671 spin_unlock(ptl); \ 2672 pte_unmap(pte); \ 2673 } while (0) 2674 2675 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd)) 2676 2677 #define pte_alloc_map(mm, pmd, address) \ 2678 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address)) 2679 2680 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 2681 (pte_alloc(mm, pmd) ? \ 2682 NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) 2683 2684 #define pte_alloc_kernel(pmd, address) \ 2685 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ 2686 NULL: pte_offset_kernel(pmd, address)) 2687 2688 #if USE_SPLIT_PMD_PTLOCKS 2689 2690 static inline struct page *pmd_pgtable_page(pmd_t *pmd) 2691 { 2692 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); 2693 return virt_to_page((void *)((unsigned long) pmd & mask)); 2694 } 2695 2696 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 2697 { 2698 return ptlock_ptr(pmd_pgtable_page(pmd)); 2699 } 2700 2701 static inline bool pmd_ptlock_init(struct page *page) 2702 { 2703 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2704 page->pmd_huge_pte = NULL; 2705 #endif 2706 return ptlock_init(page); 2707 } 2708 2709 static inline void pmd_ptlock_free(struct page *page) 2710 { 2711 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2712 VM_BUG_ON_PAGE(page->pmd_huge_pte, page); 2713 #endif 2714 ptlock_free(page); 2715 } 2716 2717 #define pmd_huge_pte(mm, pmd) (pmd_pgtable_page(pmd)->pmd_huge_pte) 2718 2719 #else 2720 2721 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 2722 { 2723 return &mm->page_table_lock; 2724 } 2725 2726 static inline bool pmd_ptlock_init(struct page *page) { return true; } 2727 static inline void pmd_ptlock_free(struct page *page) {} 2728 2729 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) 2730 2731 #endif 2732 2733 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) 2734 { 2735 spinlock_t *ptl = pmd_lockptr(mm, pmd); 2736 spin_lock(ptl); 2737 return ptl; 2738 } 2739 2740 static inline bool pgtable_pmd_page_ctor(struct page *page) 2741 { 2742 if (!pmd_ptlock_init(page)) 2743 return false; 2744 __SetPageTable(page); 2745 inc_lruvec_page_state(page, NR_PAGETABLE); 2746 return true; 2747 } 2748 2749 static inline void pgtable_pmd_page_dtor(struct page *page) 2750 { 2751 pmd_ptlock_free(page); 2752 __ClearPageTable(page); 2753 dec_lruvec_page_state(page, NR_PAGETABLE); 2754 } 2755 2756 /* 2757 * No scalability reason to split PUD locks yet, but follow the same pattern 2758 * as the PMD locks to make it easier if we decide to. The VM should not be 2759 * considered ready to switch to split PUD locks yet; there may be places 2760 * which need to be converted from page_table_lock. 2761 */ 2762 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) 2763 { 2764 return &mm->page_table_lock; 2765 } 2766 2767 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) 2768 { 2769 spinlock_t *ptl = pud_lockptr(mm, pud); 2770 2771 spin_lock(ptl); 2772 return ptl; 2773 } 2774 2775 extern void __init pagecache_init(void); 2776 extern void free_initmem(void); 2777 2778 /* 2779 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) 2780 * into the buddy system. The freed pages will be poisoned with pattern 2781 * "poison" if it's within range [0, UCHAR_MAX]. 2782 * Return pages freed into the buddy system. 2783 */ 2784 extern unsigned long free_reserved_area(void *start, void *end, 2785 int poison, const char *s); 2786 2787 extern void adjust_managed_page_count(struct page *page, long count); 2788 extern void mem_init_print_info(void); 2789 2790 extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); 2791 2792 /* Free the reserved page into the buddy system, so it gets managed. */ 2793 static inline void free_reserved_page(struct page *page) 2794 { 2795 ClearPageReserved(page); 2796 init_page_count(page); 2797 __free_page(page); 2798 adjust_managed_page_count(page, 1); 2799 } 2800 #define free_highmem_page(page) free_reserved_page(page) 2801 2802 static inline void mark_page_reserved(struct page *page) 2803 { 2804 SetPageReserved(page); 2805 adjust_managed_page_count(page, -1); 2806 } 2807 2808 /* 2809 * Default method to free all the __init memory into the buddy system. 2810 * The freed pages will be poisoned with pattern "poison" if it's within 2811 * range [0, UCHAR_MAX]. 2812 * Return pages freed into the buddy system. 2813 */ 2814 static inline unsigned long free_initmem_default(int poison) 2815 { 2816 extern char __init_begin[], __init_end[]; 2817 2818 return free_reserved_area(&__init_begin, &__init_end, 2819 poison, "unused kernel image (initmem)"); 2820 } 2821 2822 static inline unsigned long get_num_physpages(void) 2823 { 2824 int nid; 2825 unsigned long phys_pages = 0; 2826 2827 for_each_online_node(nid) 2828 phys_pages += node_present_pages(nid); 2829 2830 return phys_pages; 2831 } 2832 2833 /* 2834 * Using memblock node mappings, an architecture may initialise its 2835 * zones, allocate the backing mem_map and account for memory holes in an 2836 * architecture independent manner. 2837 * 2838 * An architecture is expected to register range of page frames backed by 2839 * physical memory with memblock_add[_node]() before calling 2840 * free_area_init() passing in the PFN each zone ends at. At a basic 2841 * usage, an architecture is expected to do something like 2842 * 2843 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 2844 * max_highmem_pfn}; 2845 * for_each_valid_physical_page_range() 2846 * memblock_add_node(base, size, nid, MEMBLOCK_NONE) 2847 * free_area_init(max_zone_pfns); 2848 */ 2849 void free_area_init(unsigned long *max_zone_pfn); 2850 unsigned long node_map_pfn_alignment(void); 2851 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 2852 unsigned long end_pfn); 2853 extern unsigned long absent_pages_in_range(unsigned long start_pfn, 2854 unsigned long end_pfn); 2855 extern void get_pfn_range_for_nid(unsigned int nid, 2856 unsigned long *start_pfn, unsigned long *end_pfn); 2857 2858 #ifndef CONFIG_NUMA 2859 static inline int early_pfn_to_nid(unsigned long pfn) 2860 { 2861 return 0; 2862 } 2863 #else 2864 /* please see mm/page_alloc.c */ 2865 extern int __meminit early_pfn_to_nid(unsigned long pfn); 2866 #endif 2867 2868 extern void set_dma_reserve(unsigned long new_dma_reserve); 2869 extern void memmap_init_range(unsigned long, int, unsigned long, 2870 unsigned long, unsigned long, enum meminit_context, 2871 struct vmem_altmap *, int migratetype); 2872 extern void setup_per_zone_wmarks(void); 2873 extern void calculate_min_free_kbytes(void); 2874 extern int __meminit init_per_zone_wmark_min(void); 2875 extern void mem_init(void); 2876 extern void __init mmap_init(void); 2877 2878 extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx); 2879 static inline void show_mem(unsigned int flags, nodemask_t *nodemask) 2880 { 2881 __show_mem(flags, nodemask, MAX_NR_ZONES - 1); 2882 } 2883 extern long si_mem_available(void); 2884 extern void si_meminfo(struct sysinfo * val); 2885 extern void si_meminfo_node(struct sysinfo *val, int nid); 2886 #ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES 2887 extern unsigned long arch_reserved_kernel_pages(void); 2888 #endif 2889 2890 extern __printf(3, 4) 2891 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); 2892 2893 extern void setup_per_cpu_pageset(void); 2894 2895 /* page_alloc.c */ 2896 extern int min_free_kbytes; 2897 extern int watermark_boost_factor; 2898 extern int watermark_scale_factor; 2899 extern bool arch_has_descending_max_zone_pfns(void); 2900 2901 /* nommu.c */ 2902 extern atomic_long_t mmap_pages_allocated; 2903 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); 2904 2905 /* interval_tree.c */ 2906 void vma_interval_tree_insert(struct vm_area_struct *node, 2907 struct rb_root_cached *root); 2908 void vma_interval_tree_insert_after(struct vm_area_struct *node, 2909 struct vm_area_struct *prev, 2910 struct rb_root_cached *root); 2911 void vma_interval_tree_remove(struct vm_area_struct *node, 2912 struct rb_root_cached *root); 2913 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root, 2914 unsigned long start, unsigned long last); 2915 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, 2916 unsigned long start, unsigned long last); 2917 2918 #define vma_interval_tree_foreach(vma, root, start, last) \ 2919 for (vma = vma_interval_tree_iter_first(root, start, last); \ 2920 vma; vma = vma_interval_tree_iter_next(vma, start, last)) 2921 2922 void anon_vma_interval_tree_insert(struct anon_vma_chain *node, 2923 struct rb_root_cached *root); 2924 void anon_vma_interval_tree_remove(struct anon_vma_chain *node, 2925 struct rb_root_cached *root); 2926 struct anon_vma_chain * 2927 anon_vma_interval_tree_iter_first(struct rb_root_cached *root, 2928 unsigned long start, unsigned long last); 2929 struct anon_vma_chain *anon_vma_interval_tree_iter_next( 2930 struct anon_vma_chain *node, unsigned long start, unsigned long last); 2931 #ifdef CONFIG_DEBUG_VM_RB 2932 void anon_vma_interval_tree_verify(struct anon_vma_chain *node); 2933 #endif 2934 2935 #define anon_vma_interval_tree_foreach(avc, root, start, last) \ 2936 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ 2937 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) 2938 2939 /* mmap.c */ 2940 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); 2941 extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, 2942 unsigned long start, unsigned long end, pgoff_t pgoff, 2943 struct vm_area_struct *next); 2944 extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, 2945 unsigned long start, unsigned long end, pgoff_t pgoff); 2946 extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi, 2947 struct mm_struct *, struct vm_area_struct *prev, unsigned long addr, 2948 unsigned long end, unsigned long vm_flags, struct anon_vma *, 2949 struct file *, pgoff_t, struct mempolicy *, struct vm_userfaultfd_ctx, 2950 struct anon_vma_name *); 2951 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); 2952 extern int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *, 2953 unsigned long addr, int new_below); 2954 extern int split_vma(struct vma_iterator *vmi, struct vm_area_struct *, 2955 unsigned long addr, int new_below); 2956 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); 2957 extern void unlink_file_vma(struct vm_area_struct *); 2958 extern struct vm_area_struct *copy_vma(struct vm_area_struct **, 2959 unsigned long addr, unsigned long len, pgoff_t pgoff, 2960 bool *need_rmap_locks); 2961 extern void exit_mmap(struct mm_struct *); 2962 2963 static inline int check_data_rlimit(unsigned long rlim, 2964 unsigned long new, 2965 unsigned long start, 2966 unsigned long end_data, 2967 unsigned long start_data) 2968 { 2969 if (rlim < RLIM_INFINITY) { 2970 if (((new - start) + (end_data - start_data)) > rlim) 2971 return -ENOSPC; 2972 } 2973 2974 return 0; 2975 } 2976 2977 extern int mm_take_all_locks(struct mm_struct *mm); 2978 extern void mm_drop_all_locks(struct mm_struct *mm); 2979 2980 extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); 2981 extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); 2982 extern struct file *get_mm_exe_file(struct mm_struct *mm); 2983 extern struct file *get_task_exe_file(struct task_struct *task); 2984 2985 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); 2986 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); 2987 2988 extern bool vma_is_special_mapping(const struct vm_area_struct *vma, 2989 const struct vm_special_mapping *sm); 2990 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, 2991 unsigned long addr, unsigned long len, 2992 unsigned long flags, 2993 const struct vm_special_mapping *spec); 2994 /* This is an obsolete alternative to _install_special_mapping. */ 2995 extern int install_special_mapping(struct mm_struct *mm, 2996 unsigned long addr, unsigned long len, 2997 unsigned long flags, struct page **pages); 2998 2999 unsigned long randomize_stack_top(unsigned long stack_top); 3000 unsigned long randomize_page(unsigned long start, unsigned long range); 3001 3002 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 3003 3004 extern unsigned long mmap_region(struct file *file, unsigned long addr, 3005 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 3006 struct list_head *uf); 3007 extern unsigned long do_mmap(struct file *file, unsigned long addr, 3008 unsigned long len, unsigned long prot, unsigned long flags, 3009 unsigned long pgoff, unsigned long *populate, struct list_head *uf); 3010 extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 3011 unsigned long start, size_t len, struct list_head *uf, 3012 bool downgrade); 3013 extern int do_munmap(struct mm_struct *, unsigned long, size_t, 3014 struct list_head *uf); 3015 extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior); 3016 3017 #ifdef CONFIG_MMU 3018 extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 3019 unsigned long start, unsigned long end, 3020 struct list_head *uf, bool downgrade); 3021 extern int __mm_populate(unsigned long addr, unsigned long len, 3022 int ignore_errors); 3023 static inline void mm_populate(unsigned long addr, unsigned long len) 3024 { 3025 /* Ignore errors */ 3026 (void) __mm_populate(addr, len, 1); 3027 } 3028 #else 3029 static inline void mm_populate(unsigned long addr, unsigned long len) {} 3030 #endif 3031 3032 /* These take the mm semaphore themselves */ 3033 extern int __must_check vm_brk(unsigned long, unsigned long); 3034 extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); 3035 extern int vm_munmap(unsigned long, size_t); 3036 extern unsigned long __must_check vm_mmap(struct file *, unsigned long, 3037 unsigned long, unsigned long, 3038 unsigned long, unsigned long); 3039 3040 struct vm_unmapped_area_info { 3041 #define VM_UNMAPPED_AREA_TOPDOWN 1 3042 unsigned long flags; 3043 unsigned long length; 3044 unsigned long low_limit; 3045 unsigned long high_limit; 3046 unsigned long align_mask; 3047 unsigned long align_offset; 3048 }; 3049 3050 extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info); 3051 3052 /* truncate.c */ 3053 extern void truncate_inode_pages(struct address_space *, loff_t); 3054 extern void truncate_inode_pages_range(struct address_space *, 3055 loff_t lstart, loff_t lend); 3056 extern void truncate_inode_pages_final(struct address_space *); 3057 3058 /* generic vm_area_ops exported for stackable file systems */ 3059 extern vm_fault_t filemap_fault(struct vm_fault *vmf); 3060 extern vm_fault_t filemap_map_pages(struct vm_fault *vmf, 3061 pgoff_t start_pgoff, pgoff_t end_pgoff); 3062 extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); 3063 3064 extern unsigned long stack_guard_gap; 3065 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 3066 extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 3067 3068 /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ 3069 extern int expand_downwards(struct vm_area_struct *vma, 3070 unsigned long address); 3071 #if VM_GROWSUP 3072 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 3073 #else 3074 #define expand_upwards(vma, address) (0) 3075 #endif 3076 3077 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 3078 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 3079 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, 3080 struct vm_area_struct **pprev); 3081 3082 /* 3083 * Look up the first VMA which intersects the interval [start_addr, end_addr) 3084 * NULL if none. Assume start_addr < end_addr. 3085 */ 3086 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 3087 unsigned long start_addr, unsigned long end_addr); 3088 3089 /** 3090 * vma_lookup() - Find a VMA at a specific address 3091 * @mm: The process address space. 3092 * @addr: The user address. 3093 * 3094 * Return: The vm_area_struct at the given address, %NULL otherwise. 3095 */ 3096 static inline 3097 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) 3098 { 3099 return mtree_load(&mm->mm_mt, addr); 3100 } 3101 3102 static inline unsigned long vm_start_gap(struct vm_area_struct *vma) 3103 { 3104 unsigned long vm_start = vma->vm_start; 3105 3106 if (vma->vm_flags & VM_GROWSDOWN) { 3107 vm_start -= stack_guard_gap; 3108 if (vm_start > vma->vm_start) 3109 vm_start = 0; 3110 } 3111 return vm_start; 3112 } 3113 3114 static inline unsigned long vm_end_gap(struct vm_area_struct *vma) 3115 { 3116 unsigned long vm_end = vma->vm_end; 3117 3118 if (vma->vm_flags & VM_GROWSUP) { 3119 vm_end += stack_guard_gap; 3120 if (vm_end < vma->vm_end) 3121 vm_end = -PAGE_SIZE; 3122 } 3123 return vm_end; 3124 } 3125 3126 static inline unsigned long vma_pages(struct vm_area_struct *vma) 3127 { 3128 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 3129 } 3130 3131 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */ 3132 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, 3133 unsigned long vm_start, unsigned long vm_end) 3134 { 3135 struct vm_area_struct *vma = vma_lookup(mm, vm_start); 3136 3137 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) 3138 vma = NULL; 3139 3140 return vma; 3141 } 3142 3143 static inline bool range_in_vma(struct vm_area_struct *vma, 3144 unsigned long start, unsigned long end) 3145 { 3146 return (vma && vma->vm_start <= start && end <= vma->vm_end); 3147 } 3148 3149 #ifdef CONFIG_MMU 3150 pgprot_t vm_get_page_prot(unsigned long vm_flags); 3151 void vma_set_page_prot(struct vm_area_struct *vma); 3152 #else 3153 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) 3154 { 3155 return __pgprot(0); 3156 } 3157 static inline void vma_set_page_prot(struct vm_area_struct *vma) 3158 { 3159 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 3160 } 3161 #endif 3162 3163 void vma_set_file(struct vm_area_struct *vma, struct file *file); 3164 3165 #ifdef CONFIG_NUMA_BALANCING 3166 unsigned long change_prot_numa(struct vm_area_struct *vma, 3167 unsigned long start, unsigned long end); 3168 #endif 3169 3170 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 3171 int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 3172 unsigned long pfn, unsigned long size, pgprot_t); 3173 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, 3174 unsigned long pfn, unsigned long size, pgprot_t prot); 3175 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 3176 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, 3177 struct page **pages, unsigned long *num); 3178 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, 3179 unsigned long num); 3180 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, 3181 unsigned long num); 3182 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 3183 unsigned long pfn); 3184 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 3185 unsigned long pfn, pgprot_t pgprot); 3186 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 3187 pfn_t pfn); 3188 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, 3189 pfn_t pfn, pgprot_t pgprot); 3190 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, 3191 unsigned long addr, pfn_t pfn); 3192 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); 3193 3194 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, 3195 unsigned long addr, struct page *page) 3196 { 3197 int err = vm_insert_page(vma, addr, page); 3198 3199 if (err == -ENOMEM) 3200 return VM_FAULT_OOM; 3201 if (err < 0 && err != -EBUSY) 3202 return VM_FAULT_SIGBUS; 3203 3204 return VM_FAULT_NOPAGE; 3205 } 3206 3207 #ifndef io_remap_pfn_range 3208 static inline int io_remap_pfn_range(struct vm_area_struct *vma, 3209 unsigned long addr, unsigned long pfn, 3210 unsigned long size, pgprot_t prot) 3211 { 3212 return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot)); 3213 } 3214 #endif 3215 3216 static inline vm_fault_t vmf_error(int err) 3217 { 3218 if (err == -ENOMEM) 3219 return VM_FAULT_OOM; 3220 return VM_FAULT_SIGBUS; 3221 } 3222 3223 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 3224 unsigned int foll_flags); 3225 3226 static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) 3227 { 3228 if (vm_fault & VM_FAULT_OOM) 3229 return -ENOMEM; 3230 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 3231 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; 3232 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 3233 return -EFAULT; 3234 return 0; 3235 } 3236 3237 /* 3238 * Indicates whether GUP can follow a PROT_NONE mapped page, or whether 3239 * a (NUMA hinting) fault is required. 3240 */ 3241 static inline bool gup_can_follow_protnone(unsigned int flags) 3242 { 3243 /* 3244 * FOLL_FORCE has to be able to make progress even if the VMA is 3245 * inaccessible. Further, FOLL_FORCE access usually does not represent 3246 * application behaviour and we should avoid triggering NUMA hinting 3247 * faults. 3248 */ 3249 return flags & FOLL_FORCE; 3250 } 3251 3252 typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data); 3253 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, 3254 unsigned long size, pte_fn_t fn, void *data); 3255 extern int apply_to_existing_page_range(struct mm_struct *mm, 3256 unsigned long address, unsigned long size, 3257 pte_fn_t fn, void *data); 3258 3259 extern void __init init_mem_debugging_and_hardening(void); 3260 #ifdef CONFIG_PAGE_POISONING 3261 extern void __kernel_poison_pages(struct page *page, int numpages); 3262 extern void __kernel_unpoison_pages(struct page *page, int numpages); 3263 extern bool _page_poisoning_enabled_early; 3264 DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled); 3265 static inline bool page_poisoning_enabled(void) 3266 { 3267 return _page_poisoning_enabled_early; 3268 } 3269 /* 3270 * For use in fast paths after init_mem_debugging() has run, or when a 3271 * false negative result is not harmful when called too early. 3272 */ 3273 static inline bool page_poisoning_enabled_static(void) 3274 { 3275 return static_branch_unlikely(&_page_poisoning_enabled); 3276 } 3277 static inline void kernel_poison_pages(struct page *page, int numpages) 3278 { 3279 if (page_poisoning_enabled_static()) 3280 __kernel_poison_pages(page, numpages); 3281 } 3282 static inline void kernel_unpoison_pages(struct page *page, int numpages) 3283 { 3284 if (page_poisoning_enabled_static()) 3285 __kernel_unpoison_pages(page, numpages); 3286 } 3287 #else 3288 static inline bool page_poisoning_enabled(void) { return false; } 3289 static inline bool page_poisoning_enabled_static(void) { return false; } 3290 static inline void __kernel_poison_pages(struct page *page, int nunmpages) { } 3291 static inline void kernel_poison_pages(struct page *page, int numpages) { } 3292 static inline void kernel_unpoison_pages(struct page *page, int numpages) { } 3293 #endif 3294 3295 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); 3296 static inline bool want_init_on_alloc(gfp_t flags) 3297 { 3298 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, 3299 &init_on_alloc)) 3300 return true; 3301 return flags & __GFP_ZERO; 3302 } 3303 3304 DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); 3305 static inline bool want_init_on_free(void) 3306 { 3307 return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, 3308 &init_on_free); 3309 } 3310 3311 extern bool _debug_pagealloc_enabled_early; 3312 DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); 3313 3314 static inline bool debug_pagealloc_enabled(void) 3315 { 3316 return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && 3317 _debug_pagealloc_enabled_early; 3318 } 3319 3320 /* 3321 * For use in fast paths after init_debug_pagealloc() has run, or when a 3322 * false negative result is not harmful when called too early. 3323 */ 3324 static inline bool debug_pagealloc_enabled_static(void) 3325 { 3326 if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) 3327 return false; 3328 3329 return static_branch_unlikely(&_debug_pagealloc_enabled); 3330 } 3331 3332 #ifdef CONFIG_DEBUG_PAGEALLOC 3333 /* 3334 * To support DEBUG_PAGEALLOC architecture must ensure that 3335 * __kernel_map_pages() never fails 3336 */ 3337 extern void __kernel_map_pages(struct page *page, int numpages, int enable); 3338 3339 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) 3340 { 3341 if (debug_pagealloc_enabled_static()) 3342 __kernel_map_pages(page, numpages, 1); 3343 } 3344 3345 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) 3346 { 3347 if (debug_pagealloc_enabled_static()) 3348 __kernel_map_pages(page, numpages, 0); 3349 } 3350 #else /* CONFIG_DEBUG_PAGEALLOC */ 3351 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {} 3352 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} 3353 #endif /* CONFIG_DEBUG_PAGEALLOC */ 3354 3355 #ifdef __HAVE_ARCH_GATE_AREA 3356 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); 3357 extern int in_gate_area_no_mm(unsigned long addr); 3358 extern int in_gate_area(struct mm_struct *mm, unsigned long addr); 3359 #else 3360 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 3361 { 3362 return NULL; 3363 } 3364 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; } 3365 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) 3366 { 3367 return 0; 3368 } 3369 #endif /* __HAVE_ARCH_GATE_AREA */ 3370 3371 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); 3372 3373 #ifdef CONFIG_SYSCTL 3374 extern int sysctl_drop_caches; 3375 int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *, 3376 loff_t *); 3377 #endif 3378 3379 void drop_slab(void); 3380 3381 #ifndef CONFIG_MMU 3382 #define randomize_va_space 0 3383 #else 3384 extern int randomize_va_space; 3385 #endif 3386 3387 const char * arch_vma_name(struct vm_area_struct *vma); 3388 #ifdef CONFIG_MMU 3389 void print_vma_addr(char *prefix, unsigned long rip); 3390 #else 3391 static inline void print_vma_addr(char *prefix, unsigned long rip) 3392 { 3393 } 3394 #endif 3395 3396 void *sparse_buffer_alloc(unsigned long size); 3397 struct page * __populate_section_memmap(unsigned long pfn, 3398 unsigned long nr_pages, int nid, struct vmem_altmap *altmap, 3399 struct dev_pagemap *pgmap); 3400 void pmd_init(void *addr); 3401 void pud_init(void *addr); 3402 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 3403 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); 3404 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); 3405 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 3406 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, 3407 struct vmem_altmap *altmap, struct page *reuse); 3408 void *vmemmap_alloc_block(unsigned long size, int node); 3409 struct vmem_altmap; 3410 void *vmemmap_alloc_block_buf(unsigned long size, int node, 3411 struct vmem_altmap *altmap); 3412 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 3413 void vmemmap_set_pmd(pmd_t *pmd, void *p, int node, 3414 unsigned long addr, unsigned long next); 3415 int vmemmap_check_pmd(pmd_t *pmd, int node, 3416 unsigned long addr, unsigned long next); 3417 int vmemmap_populate_basepages(unsigned long start, unsigned long end, 3418 int node, struct vmem_altmap *altmap); 3419 int vmemmap_populate_hugepages(unsigned long start, unsigned long end, 3420 int node, struct vmem_altmap *altmap); 3421 int vmemmap_populate(unsigned long start, unsigned long end, int node, 3422 struct vmem_altmap *altmap); 3423 void vmemmap_populate_print_last(void); 3424 #ifdef CONFIG_MEMORY_HOTPLUG 3425 void vmemmap_free(unsigned long start, unsigned long end, 3426 struct vmem_altmap *altmap); 3427 #endif 3428 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, 3429 unsigned long nr_pages); 3430 3431 enum mf_flags { 3432 MF_COUNT_INCREASED = 1 << 0, 3433 MF_ACTION_REQUIRED = 1 << 1, 3434 MF_MUST_KILL = 1 << 2, 3435 MF_SOFT_OFFLINE = 1 << 3, 3436 MF_UNPOISON = 1 << 4, 3437 MF_SW_SIMULATED = 1 << 5, 3438 MF_NO_RETRY = 1 << 6, 3439 }; 3440 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, 3441 unsigned long count, int mf_flags); 3442 extern int memory_failure(unsigned long pfn, int flags); 3443 extern void memory_failure_queue_kick(int cpu); 3444 extern int unpoison_memory(unsigned long pfn); 3445 extern int sysctl_memory_failure_early_kill; 3446 extern int sysctl_memory_failure_recovery; 3447 extern void shake_page(struct page *p); 3448 extern atomic_long_t num_poisoned_pages __read_mostly; 3449 extern int soft_offline_page(unsigned long pfn, int flags); 3450 #ifdef CONFIG_MEMORY_FAILURE 3451 extern void memory_failure_queue(unsigned long pfn, int flags); 3452 extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, 3453 bool *migratable_cleared); 3454 void num_poisoned_pages_inc(unsigned long pfn); 3455 void num_poisoned_pages_sub(unsigned long pfn, long i); 3456 #else 3457 static inline void memory_failure_queue(unsigned long pfn, int flags) 3458 { 3459 } 3460 3461 static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, 3462 bool *migratable_cleared) 3463 { 3464 return 0; 3465 } 3466 3467 static inline void num_poisoned_pages_inc(unsigned long pfn) 3468 { 3469 } 3470 3471 static inline void num_poisoned_pages_sub(unsigned long pfn, long i) 3472 { 3473 } 3474 #endif 3475 3476 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG) 3477 extern void memblk_nr_poison_inc(unsigned long pfn); 3478 extern void memblk_nr_poison_sub(unsigned long pfn, long i); 3479 #else 3480 static inline void memblk_nr_poison_inc(unsigned long pfn) 3481 { 3482 } 3483 3484 static inline void memblk_nr_poison_sub(unsigned long pfn, long i) 3485 { 3486 } 3487 #endif 3488 3489 #ifndef arch_memory_failure 3490 static inline int arch_memory_failure(unsigned long pfn, int flags) 3491 { 3492 return -ENXIO; 3493 } 3494 #endif 3495 3496 #ifndef arch_is_platform_page 3497 static inline bool arch_is_platform_page(u64 paddr) 3498 { 3499 return false; 3500 } 3501 #endif 3502 3503 /* 3504 * Error handlers for various types of pages. 3505 */ 3506 enum mf_result { 3507 MF_IGNORED, /* Error: cannot be handled */ 3508 MF_FAILED, /* Error: handling failed */ 3509 MF_DELAYED, /* Will be handled later */ 3510 MF_RECOVERED, /* Successfully recovered */ 3511 }; 3512 3513 enum mf_action_page_type { 3514 MF_MSG_KERNEL, 3515 MF_MSG_KERNEL_HIGH_ORDER, 3516 MF_MSG_SLAB, 3517 MF_MSG_DIFFERENT_COMPOUND, 3518 MF_MSG_HUGE, 3519 MF_MSG_FREE_HUGE, 3520 MF_MSG_UNMAP_FAILED, 3521 MF_MSG_DIRTY_SWAPCACHE, 3522 MF_MSG_CLEAN_SWAPCACHE, 3523 MF_MSG_DIRTY_MLOCKED_LRU, 3524 MF_MSG_CLEAN_MLOCKED_LRU, 3525 MF_MSG_DIRTY_UNEVICTABLE_LRU, 3526 MF_MSG_CLEAN_UNEVICTABLE_LRU, 3527 MF_MSG_DIRTY_LRU, 3528 MF_MSG_CLEAN_LRU, 3529 MF_MSG_TRUNCATED_LRU, 3530 MF_MSG_BUDDY, 3531 MF_MSG_DAX, 3532 MF_MSG_UNSPLIT_THP, 3533 MF_MSG_UNKNOWN, 3534 }; 3535 3536 /* 3537 * Sysfs entries for memory failure handling statistics. 3538 */ 3539 extern const struct attribute_group memory_failure_attr_group; 3540 3541 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 3542 extern void clear_huge_page(struct page *page, 3543 unsigned long addr_hint, 3544 unsigned int pages_per_huge_page); 3545 extern void copy_user_huge_page(struct page *dst, struct page *src, 3546 unsigned long addr_hint, 3547 struct vm_area_struct *vma, 3548 unsigned int pages_per_huge_page); 3549 extern long copy_huge_page_from_user(struct page *dst_page, 3550 const void __user *usr_src, 3551 unsigned int pages_per_huge_page, 3552 bool allow_pagefault); 3553 3554 /** 3555 * vma_is_special_huge - Are transhuge page-table entries considered special? 3556 * @vma: Pointer to the struct vm_area_struct to consider 3557 * 3558 * Whether transhuge page-table entries are considered "special" following 3559 * the definition in vm_normal_page(). 3560 * 3561 * Return: true if transhuge page-table entries should be considered special, 3562 * false otherwise. 3563 */ 3564 static inline bool vma_is_special_huge(const struct vm_area_struct *vma) 3565 { 3566 return vma_is_dax(vma) || (vma->vm_file && 3567 (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); 3568 } 3569 3570 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 3571 3572 #ifdef CONFIG_DEBUG_PAGEALLOC 3573 extern unsigned int _debug_guardpage_minorder; 3574 DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); 3575 3576 static inline unsigned int debug_guardpage_minorder(void) 3577 { 3578 return _debug_guardpage_minorder; 3579 } 3580 3581 static inline bool debug_guardpage_enabled(void) 3582 { 3583 return static_branch_unlikely(&_debug_guardpage_enabled); 3584 } 3585 3586 static inline bool page_is_guard(struct page *page) 3587 { 3588 if (!debug_guardpage_enabled()) 3589 return false; 3590 3591 return PageGuard(page); 3592 } 3593 #else 3594 static inline unsigned int debug_guardpage_minorder(void) { return 0; } 3595 static inline bool debug_guardpage_enabled(void) { return false; } 3596 static inline bool page_is_guard(struct page *page) { return false; } 3597 #endif /* CONFIG_DEBUG_PAGEALLOC */ 3598 3599 #if MAX_NUMNODES > 1 3600 void __init setup_nr_node_ids(void); 3601 #else 3602 static inline void setup_nr_node_ids(void) {} 3603 #endif 3604 3605 extern int memcmp_pages(struct page *page1, struct page *page2); 3606 3607 static inline int pages_identical(struct page *page1, struct page *page2) 3608 { 3609 return !memcmp_pages(page1, page2); 3610 } 3611 3612 #ifdef CONFIG_MAPPING_DIRTY_HELPERS 3613 unsigned long clean_record_shared_mapping_range(struct address_space *mapping, 3614 pgoff_t first_index, pgoff_t nr, 3615 pgoff_t bitmap_pgoff, 3616 unsigned long *bitmap, 3617 pgoff_t *start, 3618 pgoff_t *end); 3619 3620 unsigned long wp_shared_mapping_range(struct address_space *mapping, 3621 pgoff_t first_index, pgoff_t nr); 3622 #endif 3623 3624 extern int sysctl_nr_trim_pages; 3625 3626 #ifdef CONFIG_PRINTK 3627 void mem_dump_obj(void *object); 3628 #else 3629 static inline void mem_dump_obj(void *object) {} 3630 #endif 3631 3632 /** 3633 * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it 3634 * @seals: the seals to check 3635 * @vma: the vma to operate on 3636 * 3637 * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on 3638 * the vma flags. Return 0 if check pass, or <0 for errors. 3639 */ 3640 static inline int seal_check_future_write(int seals, struct vm_area_struct *vma) 3641 { 3642 if (seals & F_SEAL_FUTURE_WRITE) { 3643 /* 3644 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when 3645 * "future write" seal active. 3646 */ 3647 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) 3648 return -EPERM; 3649 3650 /* 3651 * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as 3652 * MAP_SHARED and read-only, take care to not allow mprotect to 3653 * revert protections on such mappings. Do this only for shared 3654 * mappings. For private mappings, don't need to mask 3655 * VM_MAYWRITE as we still want them to be COW-writable. 3656 */ 3657 if (vma->vm_flags & VM_SHARED) 3658 vm_flags_clear(vma, VM_MAYWRITE); 3659 } 3660 3661 return 0; 3662 } 3663 3664 #ifdef CONFIG_ANON_VMA_NAME 3665 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, 3666 unsigned long len_in, 3667 struct anon_vma_name *anon_name); 3668 #else 3669 static inline int 3670 madvise_set_anon_name(struct mm_struct *mm, unsigned long start, 3671 unsigned long len_in, struct anon_vma_name *anon_name) { 3672 return 0; 3673 } 3674 #endif 3675 3676 #endif /* _LINUX_MM_H */ 3677