/linux/mm/ |
H A D | util.c | 504 unsigned long locked_vm, limit; in __account_locked_vm() local 509 locked_vm = mm->locked_vm; in __account_locked_vm() 513 if (locked_vm + pages > limit) in __account_locked_vm() 517 mm->locked_vm = locked_vm + pages; in __account_locked_vm() 519 WARN_ON_ONCE(pages > locked_vm); in __account_locked_vm() 520 mm->locked_vm = locked_vm - pages; in __account_locked_vm() 525 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), in __account_locked_vm()
|
H A D | mmap.c | 1171 locked_pages += mm->locked_vm; in mlock_future_ok() 2098 mm->locked_vm += grow; in expand_upwards() 2190 mm->locked_vm += grow; in expand_downwards() 2568 unsigned long locked_vm = 0; in do_vmi_align_munmap() local 2616 locked_vm += vma_pages(next); in do_vmi_align_munmap() 2668 mm->locked_vm -= locked_vm; in do_vmi_align_munmap() 2983 mm->locked_vm += (len >> PAGE_SHIFT); in mmap_region() 3255 mm->locked_vm += (len >> PAGE_SHIFT); in do_brk_flags()
|
H A D | debug.c | 206 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, in dump_mm()
|
H A D | mlock.c | 506 mm->locked_vm += nr_pages; in mlock_fixup() 644 locked += current->mm->locked_vm; in do_mlock()
|
H A D | mremap.c | 793 mm->locked_vm += new_len >> PAGE_SHIFT; in move_vma() 1164 mm->locked_vm += pages; in SYSCALL_DEFINE5()
|
/linux/net/xdp/ |
H A D | xdp_umem.c | 35 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages() 139 old_npgs = atomic_long_read(&umem->user->locked_vm); in xdp_umem_account_pages() 146 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs, in xdp_umem_account_pages()
|
/linux/include/linux/sched/ |
H A D | user.h | 29 atomic_long_t locked_vm; member
|
/linux/arch/s390/kvm/ |
H A D | pci.c | 199 atomic_long_sub(nr_pages, &user->locked_vm); in unaccount_mem() 212 cur_pages = atomic_long_read(&user->locked_vm); in account_mem() 216 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages, in account_mem()
|
/linux/io_uring/ |
H A D | rsrc.h | 133 atomic_long_sub(nr_pages, &user->locked_vm); in __io_unaccount_mem()
|
H A D | rsrc.c | 53 cur_pages = atomic_long_read(&user->locked_vm); in __io_account_mem() 58 } while (!atomic_long_try_cmpxchg(&user->locked_vm, in __io_account_mem()
|
/linux/drivers/iommu/iommufd/ |
H A D | pages.c | 813 cur_pages = atomic_long_read(&pages->source_user->locked_vm); in incr_user_locked_vm() 817 } while (atomic_long_cmpxchg(&pages->source_user->locked_vm, cur_pages, in incr_user_locked_vm() 824 if (WARN_ON(atomic_long_read(&pages->source_user->locked_vm) < npages)) in decr_user_locked_vm() 826 atomic_long_sub(npages, &pages->source_user->locked_vm); in decr_user_locked_vm()
|
/linux/include/linux/ |
H A D | mm_types.h | 879 unsigned long locked_vm; /* Pages that have PG_mlocked set */ member
|
/linux/drivers/vfio/ |
H A D | vfio_iommu_type1.c | 101 size_t locked_vm; member 439 dma->locked_vm += npage; in vfio_lock_acct() 663 mm->locked_vm + lock_acct + 1 > limit) { in vfio_pin_pages_remote() 1517 long npage = dma->locked_vm; in vfio_change_dma_owner()
|
/linux/net/core/ |
H A D | skbuff.c | 1641 old_pg = atomic_long_read(&user->locked_vm); in mm_account_pinned_pages() 1646 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg)); in mm_account_pinned_pages() 1662 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); in mm_unaccount_pinned_pages()
|
/linux/Documentation/driver-api/ |
H A D | vfio.rst | 636 mm::locked_vm counter to make sure we do not exceed the rlimit.
|
/linux/Documentation/mm/ |
H A D | unevictable-lru.rst | 363 VMAs against the task's "locked_vm".
|
/linux/kernel/ |
H A D | fork.c | 1269 mm->locked_vm = 0; in mm_init()
|
/linux/fs/proc/ |
H A D | task_mmu.c | 64 SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); in task_mem()
|
/linux/kernel/events/ |
H A D | core.c | 6379 atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm); in perf_mmap_close() 6454 &mmap_user->locked_vm); 6600 user_locked = atomic_long_read(&user->locked_vm); in perf_mmap() 6604 * user->locked_vm > user_lock_limit in perf_mmap() 6612 * charge locked_vm until it hits user_lock_limit; in perf_mmap() 6662 atomic_long_add(user_extra, &user->locked_vm); in perf_mmap()
|