/linux/mm/ |
H A D | mmap.c | 82 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot() local 87 vm_flags &= ~VM_SHARED; in vma_set_page_prot() 789 vm_flags); in arch_get_unmapped_area() 855 vm_flags); in arch_get_unmapped_area_topdown() 1476 if (unlikely(vm_flags != vma->vm_flags && vmg.prev)) { in mmap_region() 1493 vm_flags = vma->vm_flags; in mmap_region() 1499 vm_flags = vma->vm_flags; in mmap_region() 1646 vm_flags_t vm_flags; in SYSCALL_DEFINE5() local 1688 vm_flags = vma->vm_flags; in SYSCALL_DEFINE5() 1714 if (vma->vm_flags != vm_flags) in SYSCALL_DEFINE5() [all …]
|
H A D | nommu.c | 844 unsigned long vm_flags; in determine_vm_flags() local 870 vm_flags |= VM_MAYOVERLAY; in determine_vm_flags() 873 vm_flags |= VM_SHARED | VM_MAYSHARE | in determine_vm_flags() 877 return vm_flags; in determine_vm_flags() 957 region->vm_flags = vma->vm_flags; in do_mmap_private() 1008 vm_flags_t vm_flags, in do_mmap() argument 1048 region->vm_flags = vm_flags; in do_mmap() 1051 vm_flags_init(vma, vm_flags); in do_mmap() 1607 if (!(vma->vm_flags & VM_USERMAP)) in remap_vmalloc_range() 1734 if (vma->vm_flags & VM_SHARED) { in nommu_shrink_inode_mappings() [all …]
|
H A D | mremap.c | 666 unsigned long vm_flags = vma->vm_flags; in move_vma() local 703 MADV_UNMERGEABLE, &vm_flags); in move_vma() 707 if (vm_flags & VM_ACCOUNT) { in move_vma() 717 if (vm_flags & VM_ACCOUNT) in move_vma() 772 if (unlikely(vma->vm_flags & VM_PFNMAP)) in move_vma() 799 if (vm_flags & VM_LOCKED) { in move_vma() 867 if (!may_expand_vm(mm, vma->vm_flags, in vma_to_resize() 946 if (vma->vm_flags & VM_MAYSHARE) in mremap_to() 1125 if (vma->vm_flags & VM_ACCOUNT) { in SYSCALL_DEFINE5() 1150 if (vma->vm_flags & VM_LOCKED) { in SYSCALL_DEFINE5() [all …]
|
H A D | userfaultfd.c | 50 else if (!(vma->vm_flags & VM_SHARED) && in find_vma_and_prepare_anon() 683 if (!(dst_vma->vm_flags & VM_SHARED)) { in mfill_atomic_pte() 755 dst_vma->vm_flags & VM_SHARED)) in mfill_atomic() 1366 if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) || in validate_move_areas() 1371 if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED)) in validate_move_areas() 1378 if (!(src_vma->vm_flags & VM_WRITE)) in validate_move_areas() 1628 if (src_vma->vm_flags & VM_SHARED) in move_pages() 1633 if (dst_vma->vm_flags & VM_SHARED) in move_pages() 1827 unsigned long vm_flags, in userfaultfd_register_range() argument 1852 (vma->vm_flags & vm_flags) == vm_flags) in userfaultfd_register_range() [all …]
|
H A D | execmem.c | 20 unsigned long vm_flags = VM_FLUSH_RESET_PERMS; in __execmem_alloc() local 29 vm_flags |= VM_DEFER_KMEMLEAK; in __execmem_alloc() 32 pgprot, vm_flags, NUMA_NO_NODE, in __execmem_alloc() 38 pgprot, vm_flags, NUMA_NO_NODE, in __execmem_alloc()
|
H A D | mseal.c | 42 if (vma->vm_file || vma->vm_flags & VM_SHARED) in is_ro_anon() 49 if (!(vma->vm_flags & VM_WRITE) || in is_ro_anon() 76 vm_flags_t oldflags = vma->vm_flags; in mseal_fixup() 147 newflags = vma->vm_flags | VM_SEALED; in apply_mm_seal()
|
H A D | mlock.c | 334 if (!(vma->vm_flags & VM_LOCKED)) in allow_mlock_munlock() 371 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range() 396 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range() 473 vm_flags_t oldflags = vma->vm_flags; in mlock_fixup() 545 newflags = vma->vm_flags & ~VM_LOCKED_MASK; in apply_vma_lock_flags() 586 if (vma->vm_flags & VM_LOCKED) { in count_mm_mlocked_page_nr() 666 vm_flags_t vm_flags = VM_LOCKED; in SYSCALL_DEFINE3() local 672 vm_flags |= VM_LOCKONFAULT; in SYSCALL_DEFINE3() 674 return do_mlock(start, len, vm_flags); in SYSCALL_DEFINE3() 730 newflags = vma->vm_flags & ~VM_LOCKED_MASK; in apply_mlockall_flags()
|
/linux/include/trace/events/ |
H A D | fs_dax.h | 18 __field(unsigned long, vm_flags) 31 __entry->vm_flags = vmf->vma->vm_flags; 43 __entry->vm_flags & VM_SHARED ? "shared" : "private", 70 __field(unsigned long, vm_flags) 79 __entry->vm_flags = vmf->vma->vm_flags; 111 __field(unsigned long, vm_flags) 122 __entry->vm_flags = vmf->vma->vm_flags; 158 __field(unsigned long, vm_flags) 168 __entry->vm_flags = vmf->vma->vm_flags; 202 __field(unsigned long, vm_flags) [all …]
|
/linux/arch/powerpc/include/asm/book3s/64/ |
H A D | hash-pkey.h | 8 static inline u64 hash__vmflag_to_pte_pkey_bits(u64 vm_flags) in hash__vmflag_to_pte_pkey_bits() argument 10 return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT0 : 0x0UL) | in hash__vmflag_to_pte_pkey_bits() 11 ((vm_flags & VM_PKEY_BIT1) ? H_PTE_PKEY_BIT1 : 0x0UL) | in hash__vmflag_to_pte_pkey_bits() 12 ((vm_flags & VM_PKEY_BIT2) ? H_PTE_PKEY_BIT2 : 0x0UL) | in hash__vmflag_to_pte_pkey_bits() 13 ((vm_flags & VM_PKEY_BIT3) ? H_PTE_PKEY_BIT3 : 0x0UL) | in hash__vmflag_to_pte_pkey_bits() 14 ((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT4 : 0x0UL)); in hash__vmflag_to_pte_pkey_bits()
|
H A D | pkeys.h | 8 static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags) in vmflag_to_pte_pkey_bits() argument 15 return hash__vmflag_to_pte_pkey_bits(vm_flags); in vmflag_to_pte_pkey_bits()
|
/linux/arch/arm64/mm/ |
H A D | mmap.c | 84 pgprot_t vm_get_page_prot(unsigned long vm_flags) in vm_get_page_prot() argument 86 pteval_t prot = pgprot_val(protection_map[vm_flags & in vm_get_page_prot() 89 if (vm_flags & VM_ARM64_BTI) in vm_get_page_prot() 102 if (vm_flags & VM_MTE) in vm_get_page_prot() 107 if (vm_flags & VM_PKEY_BIT0) in vm_get_page_prot() 109 if (vm_flags & VM_PKEY_BIT1) in vm_get_page_prot() 111 if (vm_flags & VM_PKEY_BIT2) in vm_get_page_prot()
|
H A D | fault.c | 527 unsigned long vm_flags; in do_page_fault() local 555 vm_flags = VM_EXEC; in do_page_fault() 559 vm_flags = VM_WRITE; in do_page_fault() 563 vm_flags = VM_READ; in do_page_fault() 565 vm_flags |= VM_WRITE; in do_page_fault() 568 vm_flags |= VM_EXEC; in do_page_fault() 590 if (!(vma->vm_flags & vm_flags)) { in do_page_fault() 635 if (!(vma->vm_flags & vm_flags)) { in do_page_fault() 983 if (vma->vm_flags & VM_MTE) in vma_alloc_zeroed_movable_folio()
|
/linux/arch/sparc/include/asm/ |
H A D | mman.h | 57 #define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags) argument 61 static inline bool arch_validate_flags(unsigned long vm_flags) in arch_validate_flags() argument 67 if (vm_flags & VM_SPARC_ADI) { in arch_validate_flags() 72 if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in arch_validate_flags() 83 if (vm_flags & VM_MERGEABLE) in arch_validate_flags()
|
/linux/include/linux/ |
H A D | userfaultfd_k.h | 168 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_huge_pmd_share() 185 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing() 190 return vma->vm_flags & VM_UFFD_WP; in userfaultfd_wp() 195 return vma->vm_flags & VM_UFFD_MINOR; in userfaultfd_minor() 212 return vma->vm_flags & __VM_UFFD_FLAGS; in userfaultfd_armed() 216 unsigned long vm_flags, in vma_can_userfault() argument 219 vm_flags &= __VM_UFFD_FLAGS; in vma_can_userfault() 221 if (vm_flags & VM_DROPPABLE) in vma_can_userfault() 224 if ((vm_flags & VM_UFFD_MINOR) && in vma_can_userfault() 232 if (wp_async && (vm_flags == VM_UFFD_WP)) in vma_can_userfault() [all …]
|
H A D | mman.h | 192 static inline bool map_deny_write_exec(struct vm_area_struct *vma, unsigned long vm_flags) in map_deny_write_exec() argument 197 if ((vm_flags & VM_EXEC) && (vm_flags & VM_WRITE)) in map_deny_write_exec() 200 if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC)) in map_deny_write_exec()
|
H A D | huge_mm.h | 96 #define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \ argument 270 unsigned long vm_flags, 291 unsigned long vm_flags, in thp_vma_allowable_orders() argument 299 if (vm_flags & VM_HUGEPAGE) in thp_vma_allowable_orders() 302 ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled())) in thp_vma_allowable_orders() 326 unsigned long vm_flags) in vma_thp_disabled() argument 333 return (vm_flags & VM_NOHUGEPAGE) || in vma_thp_disabled() 347 vm_flags_t vm_flags); 506 unsigned long vm_flags, in thp_vma_allowable_orders() argument 520 unsigned long flags, vm_flags_t vm_flags) in thp_get_unmapped_area_vmflags() argument [all …]
|
/linux/arch/x86/mm/ |
H A D | pgprot.c | 35 pgprot_t vm_get_page_prot(unsigned long vm_flags) in vm_get_page_prot() argument 37 unsigned long val = pgprot_val(protection_map[vm_flags & in vm_get_page_prot() 48 if (vm_flags & VM_PKEY_BIT0) in vm_get_page_prot() 50 if (vm_flags & VM_PKEY_BIT1) in vm_get_page_prot() 52 if (vm_flags & VM_PKEY_BIT2) in vm_get_page_prot() 54 if (vm_flags & VM_PKEY_BIT3) in vm_get_page_prot()
|
/linux/arch/arm64/include/asm/ |
H A D | mman.h | 63 static inline bool arch_validate_flags(unsigned long vm_flags) in arch_validate_flags() argument 69 return !(vm_flags & VM_MTE) || (vm_flags & VM_MTE_ALLOWED); in arch_validate_flags() 71 #define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags) argument
|
/linux/arch/x86/kernel/ |
H A D | sys_x86_64.c | 115 static inline unsigned long stack_guard_placement(vm_flags_t vm_flags) in stack_guard_placement() argument 117 if (vm_flags & VM_SHADOW_STACK) in stack_guard_placement() 125 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) in arch_get_unmapped_area() argument 152 info.start_gap = stack_guard_placement(vm_flags); in arch_get_unmapped_area() 163 unsigned long flags, vm_flags_t vm_flags) in arch_get_unmapped_area_topdown() argument 202 info.start_gap = stack_guard_placement(vm_flags); in arch_get_unmapped_area_topdown()
|
/linux/tools/testing/selftests/bpf/progs/ |
H A D | bpf_iter_task_vmas.c | 43 perm_str[0] = (vma->vm_flags & VM_READ) ? 'r' : '-'; in proc_maps() 44 perm_str[1] = (vma->vm_flags & VM_WRITE) ? 'w' : '-'; in proc_maps() 45 perm_str[2] = (vma->vm_flags & VM_EXEC) ? 'x' : '-'; in proc_maps() 46 perm_str[3] = (vma->vm_flags & VM_MAYSHARE) ? 's' : 'p'; in proc_maps()
|
/linux/arch/nios2/mm/ |
H A D | cacheflush.c | 90 if (!(vma->vm_flags & VM_MAYSHARE)) in flush_aliases() 138 if (vma == NULL || (vma->vm_flags & VM_EXEC)) in flush_cache_range() 159 if (vma->vm_flags & VM_EXEC) in flush_cache_page() 236 if (vma->vm_flags & VM_EXEC) in update_mmu_cache_range() 268 if (vma->vm_flags & VM_EXEC) in copy_from_user_page() 279 if (vma->vm_flags & VM_EXEC) in copy_to_user_page()
|
/linux/arch/hexagon/mm/ |
H A D | vm_fault.c | 70 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault() 74 if (!(vma->vm_flags & VM_READ)) in do_page_fault() 78 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
|
/linux/drivers/sbus/char/ |
H A D | flash.c | 44 if ((vma->vm_flags & VM_READ) && in flash_mmap() 45 (vma->vm_flags & VM_WRITE)) { in flash_mmap() 49 if (vma->vm_flags & VM_READ) { in flash_mmap() 52 } else if (vma->vm_flags & VM_WRITE) { in flash_mmap()
|
/linux/tools/testing/vma/ |
H A D | vma_internal.h | 212 const vm_flags_t vm_flags; member 376 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) in vm_get_page_prot() argument 378 return __pgprot(vm_flags); in vm_get_page_prot() 381 static inline bool is_shared_maywrite(vm_flags_t vm_flags) in is_shared_maywrite() argument 383 return (vm_flags & (VM_SHARED | VM_MAYWRITE)) == in is_shared_maywrite() 389 return is_shared_maywrite(vma->vm_flags); in vma_is_shared_maywrite() 875 unsigned long vm_flags) in khugepaged_enter_vma() argument 878 (void)vm_flags; in khugepaged_enter_vma()
|
/linux/arch/arm/mm/ |
H A D | fault.c | 271 unsigned long vm_flags = VM_ACCESS_FLAGS; in do_page_fault() local 293 vm_flags = VM_WRITE; in do_page_fault() 297 vm_flags = VM_EXEC; in do_page_fault() 321 if (!(vma->vm_flags & vm_flags)) { in do_page_fault() 360 if (!(vma->vm_flags & vm_flags)) { in do_page_fault()
|