Home
last modified time | relevance | path

Searched refs:vma (Results 1 – 25 of 1044) sorted by relevance

12345678910>>...42

/linux/drivers/gpu/drm/i915/
H A Di915_vma.h131 return vma->node.size - 2 * vma->guard; in __i915_vma_size()
154 return vma->node.start + vma->guard; in __i915_vma_offset()
191 return vma; in i915_vma_get()
197 return vma; in i915_vma_tryget()
221 cmp = vma->gtt_view.type; in i915_vma_compare()
276 #define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv) argument
328 atomic_inc(&vma->flags); in __i915_vma_pin()
335 atomic_dec(&vma->flags); in __i915_vma_unpin()
341 __i915_vma_unpin(vma); in i915_vma_unpin()
417 if (vma->fence) in i915_vma_unpin_fence()
[all …]
H A Di915_vma.c82 vma->node.start, vma->node.size, reason); in vma_print_allocator()
89 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator()
266 list_add(&vma->obj_link, &obj->vma.list); in vma_create()
452 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes, in i915_vma_resource_init_from_vma()
455 vma->ops, vma->private, __i915_vma_offset(vma), in i915_vma_resource_init_from_vma()
456 __i915_vma_size(vma), vma->size, vma->guard); in i915_vma_resource_init_from_vma()
484 GEM_BUG_ON(vma->size > i915_vma_size(vma)); in i915_vma_bind()
573 vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index, in i915_vma_bind()
1362 vma->page_sizes = vma->obj->mm.page_sizes; in i915_vma_get_pages()
1400 if (vma->pages != vma->obj->mm.pages) { in __vma_put_pages()
[all …]
H A Di915_gem_evict.c88 if (dying_vma(vma)) in ungrab_vma()
98 struct i915_vma *vma, in mark_free() argument
105 if (!grab_vma(vma, ww)) in mark_free()
224 active = vma; in i915_gem_evict_something()
238 ungrab_vma(vma); in i915_gem_evict_something()
287 ungrab_vma(vma); in i915_gem_evict_something()
297 ungrab_vma(vma); in i915_gem_evict_something()
307 ungrab_vma(vma); in i915_gem_evict_something()
380 vma = container_of(node, typeof(*vma), node); in i915_gem_evict_for_node()
424 __i915_vma_pin(vma); in i915_gem_evict_for_node()
[all …]
/linux/mm/
H A Dmmap.c140 if (vma->vm_ops && vma->vm_ops->close) in remove_vma()
141 vma->vm_ops->close(vma); in remove_vma()
437 vp->vma = vma; in init_multi_vma_prep()
470 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); in vma_prepare()
1130 VM_BUG_ON_VMA(prev != vma, vma); in find_mergeable_anon_vma()
2306 if (vma && vma->vm_start <= addr) in expand_stack()
2526 vma_policy(vma), vma->vm_userfaultfd_ctx, anon_vma_name(vma)); in vma_merge_new_vma()
2540 return vma_merge(vmi, vma, vma, vma->vm_end, vma->vm_end + delta, in vma_merge_extend()
2913 vma->vm_start, vma->vm_end, in mmap_region()
3005 vma->vm_ops->close(vma); in mmap_region()
[all …]
H A Dmremap.c587 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); in move_page_tables()
608 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables()
679 if (vma->vm_ops && vma->vm_ops->may_split) { in move_vma()
681 err = vma->vm_ops->may_split(vma, old_addr); in move_vma()
719 } else if (vma->vm_ops && vma->vm_ops->mremap) { in move_vma()
776 if (new_vma != vma && vma->vm_start == old_addr && in move_vma()
821 if (!vma) in vma_to_resize()
846 return vma; in vma_to_resize()
864 return vma; in vma_to_resize()
1050 if (!vma) { in SYSCALL_DEFINE5()
[all …]
H A Dnommu.c102 if (vma) in kobjsize()
103 return vma->vm_end - vma->vm_start; in kobjsize()
153 if (vma) in __vmalloc_user_flags()
568 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start); in delete_vma_from_mm()
587 if (vma->vm_ops && vma->vm_ops->close) in delete_vma()
588 vma->vm_ops->close(vma); in delete_vma()
883 ret = call_mmap(vma->vm_file, vma); in do_mmap_shared_file()
916 ret = call_mmap(vma->vm_file, vma); in do_mmap_private()
1560 vma->vm_end = vma->vm_start + new_len; in do_mremap()
1599 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
[all …]
H A Dmseal.c51 if (vma->vm_file || vma->vm_flags & VM_SHARED) in is_ro_anon()
103 if (unlikely(is_ro_anon(vma) && !can_modify_vma(vma))) in can_modify_mm_madv()
120 vma = vma_modify_flags(vmi, *prev, vma, start, end, newflags); in mseal_fixup()
121 if (IS_ERR(vma)) { in mseal_fixup()
122 ret = PTR_ERR(vma); in mseal_fixup()
126 set_vma_sealed(vma); in mseal_fixup()
128 *prev = vma; in mseal_fixup()
152 if (vma->vm_end >= end) in check_mm_seal()
155 nstart = vma->vm_end; in check_mm_seal()
178 prev = vma; in apply_mm_seal()
[all …]
H A Dmprotect.c591 *pprev = vma; in mprotect_fixup()
637 vma = vma_modify_flags(vmi, *pprev, vma, start, end, newflags); in mprotect_fixup()
638 if (IS_ERR(vma)) { in mprotect_fixup()
643 *pprev = vma; in mprotect_fixup()
727 if (!vma) in do_mprotect_pkey()
741 end = vma->vm_end; in do_mprotect_pkey()
759 prev = vma; in do_mprotect_pkey()
763 tmp = vma->vm_start; in do_mprotect_pkey()
810 tmp = vma->vm_end; in do_mprotect_pkey()
814 if (vma->vm_ops && vma->vm_ops->mprotect) { in do_mprotect_pkey()
[all …]
H A Dmadvise.c147 *prev = vma; in madvise_update_vma()
156 *prev = vma; in madvise_update_vma()
161 if (!vma->vm_file || vma_is_anon_shmem(vma)) { in madvise_update_vma()
269 *prev = vma; in madvise_willneed()
344 struct vm_area_struct *vma = walk->vma; in madvise_cold_or_pageout_pte_range() local
649 struct vm_area_struct *vma = walk->vma; in madvise_free_pte_range() local
899 if (!vma) in madvise_dontneed_free()
1066 if (vma->vm_file || vma->vm_flags & VM_SHARED) in madvise_vma_behavior()
1244 if (vma && start > vma->vm_start) in madvise_walk_vmas()
1251 if (!vma) in madvise_walk_vmas()
[all …]
H A Dmemory.c535 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_pte()
2715 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
2974 struct vm_area_struct *vma = vmf->vma; in __wp_page_copy_user() local
3123 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page() local
3177 struct vm_area_struct *vma = vmf->vma; in wp_page_reuse() local
3209 struct vm_area_struct *vma = vmf->vma; in vmf_can_call_fault() local
3234 struct vm_area_struct *vma = vmf->vma; in vmf_anon_prepare() local
5434 .vma = vma, in __handle_mm_fault()
6214 if (vma && vma->vm_file) { in print_vma_addr()
6220 vma->vm_end - vma->vm_start); in print_vma_addr()
[all …]
H A Drmap.c154 avc->vma = vma; in anon_vma_chain_link()
345 vma->anon_vma = NULL; in anon_vma_fork()
356 if (vma->anon_vma) in anon_vma_fork()
392 unlink_anon_vmas(vma); in anon_vma_fork()
423 if (vma->anon_vma) { in unlink_anon_vmas()
1005 struct vm_area_struct *vma = pvmw->vma; in page_vma_mkclean_one() local
1127 .vma = vma, in pfn_mkclean_range()
1399 address + (nr << PAGE_SHIFT) > vma->vm_end, vma); in folio_add_new_anon_rmap()
2359 vma->vm_mm, address, min(vma->vm_end, in page_make_device_exclusive_one()
2595 struct vm_area_struct *vma = avc->vma; in rmap_walk_anon() local
[all …]
H A Dpagewalk.c159 if (walk->vma) in walk_pmd_range()
213 if (walk->vma) in walk_pud_range()
312 struct vm_area_struct *vma = walk->vma; in walk_hugetlb_range() local
355 struct vm_area_struct *vma = walk->vma; in walk_page_test() local
382 struct vm_area_struct *vma = walk->vma; in __walk_page_range() local
505 walk.vma = vma; in walk_page_range()
507 vma = find_vma(mm, vma->vm_end); in walk_page_range()
598 .vma = vma, in walk_page_range_vma()
604 if (start < vma->vm_start || end > vma->vm_end) in walk_page_range_vma()
618 .vma = vma, in walk_page_vma()
[all …]
/linux/include/linux/
H A Duserfaultfd_k.h185 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing()
190 return vma->vm_flags & VM_UFFD_WP; in userfaultfd_wp()
195 return vma->vm_flags & VM_UFFD_MINOR; in userfaultfd_minor()
201 return userfaultfd_wp(vma) && pte_uffd_wp(pte); in userfaultfd_pte_wp()
212 return vma->vm_flags & __VM_UFFD_FLAGS; in userfaultfd_armed()
222 (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) in vma_can_userfault()
243 return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || in vma_can_userfault()
244 vma_is_shmem(vma); in vma_can_userfault()
383 if (!userfaultfd_wp(vma)) in userfaultfd_wp_use_markers()
387 if (!vma_is_anonymous(vma)) in userfaultfd_wp_use_markers()
[all …]
H A Dmm.h824 memset(vma, 0, sizeof(*vma)); in vma_init()
909 return vma->vm_start < vma->vm_mm->brk && in vma_is_initial_heap()
910 vma->vm_end > vma->vm_mm->start_brk; in vma_is_initial_heap()
925 vma->vm_end >= vma->vm_mm->start_stack; in vma_is_initial_stack()
2426 zap_page_range_single(vma, vma->vm_start, in zap_vma_pages()
2427 vma->vm_end - vma->vm_start, NULL); in zap_vma_pages()
3297 vma; vma = vma_interval_tree_iter_next(vma, start, last))
3348 vma_policy(vma), vma->vm_userfaultfd_ctx, in vma_modify_flags()
3581 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) in find_exact_vma()
3590 return (vma && vma->vm_start <= start && end <= vma->vm_end); in range_in_vma()
[all …]
H A Dhuge_mm.h17 struct vm_area_struct *vma);
170 if (!vma_is_anonymous(vma)) { in thp_vma_suitable_order()
171 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, in thp_vma_suitable_order()
178 if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end) in thp_vma_suitable_order()
215 if (!vma->vm_file) in file_thp_enabled()
218 inode = vma->vm_file->f_inode; in file_thp_enabled()
354 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument
357 return __pmd_trans_huge_lock(pmd, vma); in pmd_trans_huge_lock()
362 struct vm_area_struct *vma) in pud_trans_huge_lock() argument
502 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument
[all …]
/linux/drivers/gpu/drm/i915/display/
H A Dintel_fb_pin.c30 struct i915_vma *vma; in intel_pin_fb_obj_dpt() local
75 if (IS_ERR(vma)) { in intel_pin_fb_obj_dpt()
95 vma->display_alignment = max(vma->display_alignment, alignment); in intel_pin_fb_obj_dpt()
99 i915_vma_get(vma); in intel_pin_fb_obj_dpt()
103 return vma; in intel_pin_fb_obj_dpt()
208 if (vma->fence) in intel_pin_and_fence_fb_obj()
228 return vma; in intel_pin_and_fence_fb_obj()
254 if (IS_ERR(vma)) in intel_plane_pin_fb()
300 if (vma) in intel_plane_unpin_fb()
306 if (vma) in intel_plane_unpin_fb()
[all …]
/linux/drivers/gpu/drm/nouveau/
H A Dnouveau_vmm.c31 if (vma->mem) { in nouveau_vma_unmap()
32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap()
33 vma->mem = NULL; in nouveau_vma_unmap()
44 vma->mem = mem; in nouveau_vma_map()
55 return vma; in nouveau_vma_find()
65 if (vma && --vma->refs <= 0) { in nouveau_vma_del()
86 vma->refs++; in nouveau_vma_new()
90 if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL))) in nouveau_vma_new()
92 vma->vmm = vmm; in nouveau_vma_new()
93 vma->refs = 1; in nouveau_vma_new()
[all …]
/linux/drivers/gpu/drm/msm/
H A Dmsm_gem_vma.c48 if (!vma->mapped) in msm_gem_vma_purge()
53 vma->mapped = false; in msm_gem_vma_purge()
67 if (vma->mapped) in msm_gem_vma_map()
70 vma->mapped = true; in msm_gem_vma_map()
101 if (vma->iova) in msm_gem_vma_close()
105 vma->iova = 0; in msm_gem_vma_close()
114 vma = kzalloc(sizeof(*vma), GFP_KERNEL); in msm_gem_vma_new()
115 if (!vma) in msm_gem_vma_new()
120 return vma; in msm_gem_vma_new()
145 vma->iova = vma->node.start; in msm_gem_vma_init()
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dvmm.c922 if (addr >= vma->addr + vma->size) in nvkm_vmm_node_search()
983 if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) in nvkm_vmm_node_split()
1006 vma->addr, (u64)vma->size, in nvkm_vma_dump()
1221 if (vma->addr == addr && vma->part && (prev = node(vma, prev))) { in nvkm_vmm_pfn_split_merge()
1249 if (!vma->mapped || vma->memory) in nvkm_vmm_pfn_unmap()
1263 } while ((vma = node(vma, next)) && (start = vma->addr) < limit); in nvkm_vmm_pfn_unmap()
1321 if (!vma->mapref || vma->memory) { in nvkm_vmm_pfn_map()
1377 vma = node(vma, next); in nvkm_vmm_pfn_map()
1618 if (vma->mapref || !vma->sparse) { in nvkm_vmm_put_locked()
1659 if (vma->sparse && !vma->mapref) { in nvkm_vmm_put_locked()
[all …]
/linux/drivers/gpu/drm/i915/selftests/
H A Di915_gem_gtt.c481 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole()
514 __func__, p->name, vma->node.start, vma->node.size, in fill_hole()
523 __func__, p->name, vma->node.start, vma->node.size, in fill_hole()
560 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole()
593 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), in fill_hole()
602 __func__, p->name, vma->node.start, vma->node.size, in fill_hole()
1446 vma->resource->bi.pages = vma->pages; in track_vma_bind()
1449 list_move_tail(&vma->vm_link, &vma->vm->bound_list); in track_vma_bind()
1583 vma->node.start, vma->node.size, in igt_gtt_reserve()
1629 vma->node.start, vma->node.size, in igt_gtt_reserve()
[all …]
H A Di915_vma.c73 return vma; in checked_vma_instance()
93 if (i915_vma_compare(vma, vma->vm, in checked_vma_instance()
94 i915_vma_is_ggtt(vma) ? &vma->gtt_view : NULL)) { in checked_vma_instance()
104 return vma; in checked_vma_instance()
656 if (vma->node.size < vma->size) { in igt_vma_rotate_remap()
658 vma->size, vma->node.size); in igt_vma_rotate_remap()
756 if (vma->node.size < vma->size) { in assert_pin()
758 name, vma->size, vma->node.size); in assert_pin()
769 if (vma->pages == vma->obj->mm.pages) { in assert_pin()
781 if (vma->pages != vma->obj->mm.pages) { in assert_pin()
[all …]
/linux/drivers/pci/
H A Dmmap.c25 struct vm_area_struct *vma, in pci_mmap_resource_range() argument
32 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range()
36 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range()
38 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range()
41 ret = pci_iobar_pfn(pdev, bar, vma); in pci_mmap_resource_range()
47 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range()
49 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_resource_range()
50 vma->vm_end - vma->vm_start, in pci_mmap_resource_range()
51 vma->vm_page_prot); in pci_mmap_resource_range()
67 nr = vma_pages(vma); in pci_mmap_fits()
[all …]
/linux/drivers/gpu/drm/xe/
H A Dxe_vm.h109 return vma->gpuva.va.addr; in xe_vma_start()
114 return vma->gpuva.va.range; in xe_vma_size()
119 return xe_vma_start(vma) + xe_vma_size(vma); in xe_vma_end()
124 return vma->gpuva.gem.offset; in xe_vma_bo_offset()
129 return !vma->gpuva.gem.obj ? NULL : in xe_vma_bo()
140 return vma->gpuva.flags & XE_VMA_READ_ONLY; in xe_vma_read_only()
145 return vma->gpuva.gem.offset; in xe_vma_userptr()
155 return !xe_vma_bo(vma); in xe_vma_has_no_bo()
160 return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma); in xe_vma_is_userptr()
171 xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma)); in to_userptr_vma()
[all …]
/linux/arch/powerpc/mm/book3s64/
H A Dradix_hugetlbpage.c13 struct hstate *hstate = hstate_file(vma->vm_file); in radix__flush_hugetlb_page()
16 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__flush_hugetlb_page()
22 struct hstate *hstate = hstate_file(vma->vm_file); in radix__local_flush_hugetlb_page()
25 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__local_flush_hugetlb_page()
32 struct hstate *hstate = hstate_file(vma->vm_file); in radix__flush_hugetlb_tlb_range()
39 radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range()
41 radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range()
49 struct mm_struct *mm = vma->vm_mm; in radix__huge_ptep_modify_prot_commit()
50 unsigned long psize = huge_page_size(hstate_vma(vma)); in radix__huge_ptep_modify_prot_commit()
60 radix__flush_hugetlb_page(vma, addr); in radix__huge_ptep_modify_prot_commit()
[all …]
/linux/fs/proc/
H A Dtask_mmu.c306 if (vma->vm_ops && vma->vm_ops->name) { in show_map_vma()
307 name = vma->vm_ops->name(vma); in show_map_vma()
505 struct vm_area_struct *vma = walk->vma; in smaps_pte_hole() local
531 struct vm_area_struct *vma = walk->vma; in smaps_pte_entry() local
578 struct vm_area_struct *vma = walk->vma; in smaps_pmd_entry() local
619 struct vm_area_struct *vma = walk->vma; in smaps_pte_range() local
732 struct vm_area_struct *vma = walk->vma; in smaps_hugetlb_range() local
1171 struct vm_area_struct *vma = walk->vma; in clear_refs_pte_range() local
1231 struct vm_area_struct *vma = walk->vma; in clear_refs_test_walk() local
1470 struct vm_area_struct *vma = walk->vma; in pagemap_pmd_range() local
[all …]

12345678910>>...42