/openbsd/sys/dev/pci/drm/ttm/ |
H A D | ttm_bo.c | 296 dma_resv_unlock(bo->base.resv); in ttm_bo_cleanup_refs() 315 dma_resv_unlock(bo->base.resv); in ttm_bo_delayed_delete() 378 dma_resv_unlock(bo->base.resv); in ttm_bo_release() 528 if (bo->base.resv == ctx->resv) { in ttm_bo_evict_swapout_allowable() 980 if (resv) in ttm_bo_init_reserved() 981 bo->base.resv = resv; in ttm_bo_init_reserved() 1000 if (!resv) in ttm_bo_init_reserved() 1003 dma_resv_assert_held(resv); in ttm_bo_init_reserved() 1012 if (!resv) in ttm_bo_init_reserved() 1067 sg, resv, destroy); in ttm_bo_init_validate() [all …]
|
H A D | ttm_bo_vm.c | 65 (void)dma_resv_wait_timeout(bo->base.resv, in ttm_bo_vm_fault_idle() 68 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_fault_idle() 127 if (unlikely(!dma_resv_trylock(bo->base.resv))) { in ttm_bo_vm_reserve() 139 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_reserve() 156 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_reserve() 346 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_fault() 378 (void)dma_resv_wait_timeout(bo->base.resv, in ttm_bo_vm_fault_idle() 381 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_fault_idle() 453 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_reserve() 471 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_reserve() [all …]
|
H A D | ttm_execbuf_util.c | 38 dma_resv_unlock(bo->base.resv); in ttm_eu_backoff_reservation_reverse() 54 dma_resv_unlock(bo->base.resv); in ttm_eu_backoff_reservation() 102 ret = dma_resv_reserve_fences(bo->base.resv, in ttm_eu_reserve_buffers() 119 ret = dma_resv_reserve_fences(bo->base.resv, in ttm_eu_reserve_buffers() 153 dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ? in ttm_eu_fence_buffer_objects() 156 dma_resv_unlock(bo->base.resv); in ttm_eu_fence_buffer_objects()
|
H A D | ttm_bo_util.c | 252 fbo->base.base.resv = &fbo->base.base._resv; in ttm_buffer_object_transfer() 488 dma_resv_assert_held(bo->base.resv); in ttm_bo_vmap() 564 dma_resv_assert_held(bo->base.resv); in ttm_bo_vunmap() 586 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, in ttm_bo_wait_free_node() 686 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); in ttm_bo_move_accel_cleanup() 745 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) { in ttm_bo_pipeline_gutting() 779 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv); in ttm_bo_pipeline_gutting() 782 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, in ttm_bo_pipeline_gutting()
|
/openbsd/sys/dev/pci/drm/amd/amdgpu/ |
H A D | amdgpu_dma_buf.c | 330 struct dma_resv *resv = dma_buf->resv; in amdgpu_dma_buf_create_obj() local 337 dma_resv_lock(resv, NULL); in amdgpu_dma_buf_create_obj() 349 ttm_bo_type_sg, resv, &gobj, 0); in amdgpu_dma_buf_create_obj() 357 dma_resv_unlock(resv); in amdgpu_dma_buf_create_obj() 361 dma_resv_unlock(resv); in amdgpu_dma_buf_create_obj() 377 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv); in amdgpu_dma_buf_move_notify() 396 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_dma_buf_move_notify() local 403 r = dma_resv_lock(resv, ticket); in amdgpu_dma_buf_move_notify() 412 if (!dma_resv_trylock(resv)) in amdgpu_dma_buf_move_notify() 417 r = dma_resv_reserve_fences(resv, 2); in amdgpu_dma_buf_move_notify() [all …]
|
H A D | amdgpu_vm.c | 380 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_base_init() 1081 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv && in amdgpu_vm_bo_get_memory() 1086 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_get_memory() 1145 resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_bo_update() 1149 resv = bo->tbo.base.resv; in amdgpu_vm_bo_update() 1225 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { in amdgpu_vm_bo_update() 1362 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_prt_fini() local 1392 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_clear_freed() local 1466 resv = bo_va->base.bo->tbo.base.resv; in amdgpu_vm_handle_moved() 1955 if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_del() [all …]
|
H A D | amdgpu_vm_cpu.c | 49 struct dma_resv *resv, in amdgpu_vm_cpu_prepare() argument 52 if (!resv) in amdgpu_vm_cpu_prepare() 55 return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true); in amdgpu_vm_cpu_prepare() 80 r = dma_resv_wait_timeout(vmbo->bo.tbo.base.resv, DMA_RESV_USAGE_KERNEL, in amdgpu_vm_cpu_update()
|
H A D | amdgpu_object.c | 270 bp.resv = NULL; in amdgpu_bo_create_reserved() 554 .resv = bp->resv in amdgpu_bo_create() 627 bp->resv, bp->destroy); in amdgpu_bo_create() 650 if (!bp->resv) in amdgpu_bo_create() 663 if (!bp->resv) in amdgpu_bo_create() 664 dma_resv_unlock(bo->tbo.base.resv); in amdgpu_bo_create() 1386 if (bo->base.resv == &bo->base._resv) in amdgpu_bo_release_notify() 1403 dma_resv_unlock(bo->base.resv); in amdgpu_bo_release_notify() 1469 struct dma_resv *resv = bo->tbo.base.resv; in amdgpu_bo_fence() local 1472 r = dma_resv_reserve_fences(resv, 1); in amdgpu_bo_fence() [all …]
|
H A D | amdgpu_vm_sdma.c | 87 struct dma_resv *resv, in amdgpu_vm_sdma_prepare() argument 97 if (!resv) in amdgpu_vm_sdma_prepare() 101 r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm); in amdgpu_vm_sdma_prepare() 143 dma_resv_add_fence(p->vm->root.bo->tbo.base.resv, f, in amdgpu_vm_sdma_commit() 244 dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL); in amdgpu_vm_sdma_update()
|
H A D | amdgpu_vm_pt.c | 509 struct dma_resv *resv; in amdgpu_vm_pt_create() local 542 bp.resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_pt_create() 554 if (!bp.resv) in amdgpu_vm_pt_create() 555 WARN_ON(dma_resv_lock(bo->tbo.base.resv, in amdgpu_vm_pt_create() 557 resv = bp.resv; in amdgpu_vm_pt_create() 563 bp.resv = bo->tbo.base.resv; in amdgpu_vm_pt_create() 569 if (!resv) in amdgpu_vm_pt_create() 570 dma_resv_unlock(bo->tbo.base.resv); in amdgpu_vm_pt_create()
|
H A D | amdgpu_gem.c | 78 dma_resv_unlock(bo->base.resv); in amdgpu_gem_fault() 128 dma_resv_unlock(bo->base.resv); in amdgpu_gem_fault() 181 struct dma_resv *resv, in amdgpu_gem_object_create() argument 195 bp.resv = resv; in amdgpu_gem_object_create() 268 abo->tbo.base.resv != vm->root.bo->tbo.base.resv) 404 struct dma_resv *resv = NULL; local 451 resv = vm->root.bo->tbo.base.resv; 458 flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1); 655 ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
|
H A D | amdgpu_ttm.h | 153 struct dma_resv *resv, 160 struct dma_resv *resv, 164 struct dma_resv *resv,
|
/openbsd/sys/dev/pci/drm/ |
H A D | drm_exec.c | 60 dma_resv_unlock(obj->resv); in drm_exec_unlock_all() 175 ret = dma_resv_lock_slow_interruptible(obj->resv, in drm_exec_lock_contended() 180 dma_resv_lock_slow(obj->resv, &exec->ticket); in drm_exec_lock_contended() 191 dma_resv_unlock(obj->resv); in drm_exec_lock_contended() 224 ret = dma_resv_lock_interruptible(obj->resv, &exec->ticket); in drm_exec_lock_obj() 226 ret = dma_resv_lock(obj->resv, &exec->ticket); in drm_exec_lock_obj() 248 dma_resv_unlock(obj->resv); in drm_exec_lock_obj() 268 dma_resv_unlock(obj->resv); in drm_exec_unlock_obj() 300 ret = dma_resv_reserve_fences(obj->resv, num_fences); in drm_exec_prepare_obj()
|
H A D | drm_gem.c | 347 if (!obj->resv) in drm_gem_private_object_init() 348 obj->resv = &obj->_resv; in drm_gem_private_object_init() 1490 dma_resv_assert_held(obj->resv); in drm_gem_vmap() 1507 dma_resv_assert_held(obj->resv); in drm_gem_vunmap() 1524 dma_resv_lock(obj->resv, NULL); in drm_gem_vmap_unlocked() 1526 dma_resv_unlock(obj->resv); in drm_gem_vmap_unlocked() 1534 dma_resv_lock(obj->resv, NULL); in drm_gem_vunmap_unlocked() 1536 dma_resv_unlock(obj->resv); in drm_gem_vunmap_unlocked() 1612 dma_resv_unlock(objs[i]->resv); in drm_gem_unlock_reservations() 1778 dma_resv_unlock(obj->resv); in drm_gem_lru_scan() [all …]
|
/openbsd/sys/dev/pci/drm/include/drm/ttm/ |
H A D | ttm_bo.h | 186 struct dma_resv *resv; member 252 success = dma_resv_trylock(bo->base.resv); in ttm_bo_reserve() 257 ret = dma_resv_lock_interruptible(bo->base.resv, ticket); in ttm_bo_reserve() 259 ret = dma_resv_lock(bo->base.resv, ticket); in ttm_bo_reserve() 280 int ret = dma_resv_lock_slow_interruptible(bo->base.resv, in ttm_bo_reserve_slowpath() 286 dma_resv_lock_slow(bo->base.resv, ticket); in ttm_bo_reserve_slowpath() 331 dma_resv_unlock(bo->base.resv); in ttm_bo_unreserve() 365 struct sg_table *sg, struct dma_resv *resv, 370 struct sg_table *sg, struct dma_resv *resv,
|
/openbsd/sys/dev/pci/drm/i915/gem/ |
H A D | i915_gem_wait.c | 36 i915_gem_object_boost(struct dma_resv *resv, unsigned int flags) in i915_gem_object_boost() argument 57 dma_resv_iter_begin(&cursor, resv, in i915_gem_object_boost() 67 i915_gem_object_wait_reservation(struct dma_resv *resv, in i915_gem_object_wait_reservation() argument 75 i915_gem_object_boost(resv, flags); in i915_gem_object_wait_reservation() 77 dma_resv_iter_begin(&cursor, resv, in i915_gem_object_wait_reservation() 155 dma_resv_iter_begin(&cursor, obj->base.resv, in i915_gem_object_wait_priority() 177 timeout = i915_gem_object_wait_reservation(obj->base.resv, in i915_gem_object_wait()
|
H A D | i915_gem_clflush.c | 113 dma_resv_reserve_fences(obj->base.resv, 1) == 0) in i915_gem_clflush_object() 117 obj->base.resv, true, in i915_gem_clflush_object() 120 dma_resv_add_fence(obj->base.resv, &clflush->base.dma, in i915_gem_clflush_object()
|
H A D | i915_gem_ttm_move.c | 645 ret = i915_deps_add_resv(&deps, bo->base.resv, ctx); in i915_ttm_move() 729 ret = dma_resv_reserve_fences(src_bo->base.resv, 1); in i915_gem_obj_copy_ttm() 733 ret = dma_resv_reserve_fences(dst_bo->base.resv, 1); in i915_gem_obj_copy_ttm() 737 ret = i915_deps_add_resv(&deps, dst_bo->base.resv, &ctx); in i915_gem_obj_copy_ttm() 741 ret = i915_deps_add_resv(&deps, src_bo->base.resv, &ctx); in i915_gem_obj_copy_ttm() 755 dma_resv_add_fence(dst_bo->base.resv, copy_fence, DMA_RESV_USAGE_WRITE); in i915_gem_obj_copy_ttm() 756 dma_resv_add_fence(src_bo->base.resv, copy_fence, DMA_RESV_USAGE_READ); in i915_gem_obj_copy_ttm()
|
/openbsd/sys/dev/pci/drm/radeon/ |
H A D | radeon_prime.c | 50 struct dma_resv *resv = attach->dmabuf->resv; in radeon_gem_prime_import_sg_table() local 55 dma_resv_lock(resv, NULL); in radeon_gem_prime_import_sg_table() 57 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); in radeon_gem_prime_import_sg_table() 58 dma_resv_unlock(resv); in radeon_gem_prime_import_sg_table()
|
H A D | radeon_benchmark.c | 38 struct dma_resv *resv) in radeon_benchmark_do_move() argument 51 resv); in radeon_benchmark_do_move() 56 resv); in radeon_benchmark_do_move() 125 dobj->tbo.base.resv); in radeon_benchmark_move() 136 dobj->tbo.base.resv); in radeon_benchmark_move()
|
H A D | radeon_object.c | 133 struct dma_resv *resv, in radeon_bo_create() argument 206 &bo->placement, page_align, !kernel, sg, resv, in radeon_bo_create() 224 r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL, in radeon_bo_kmap() 569 dma_resv_assert_held(bo->tbo.base.resv); in radeon_bo_get_surface_reg() 694 dma_resv_assert_held(bo->tbo.base.resv); in radeon_bo_get_tiling_flags() 706 dma_resv_assert_held(bo->tbo.base.resv); in radeon_bo_check_tiling() 808 struct dma_resv *resv = bo->tbo.base.resv; in radeon_bo_fence() local 811 r = dma_resv_reserve_fences(resv, 1); in radeon_bo_fence() 818 dma_resv_add_fence(resv, &fence->base, shared ? in radeon_bo_fence()
|
H A D | rv770_dma.c | 45 struct dma_resv *resv) in rv770_copy_dma() argument 66 radeon_sync_resv(rdev, &sync, resv, false); in rv770_copy_dma()
|
/openbsd/sys/dev/pci/drm/include/linux/ |
H A D | dma-buf.h | 35 struct dma_resv *resv; member 61 struct dma_resv *resv; member
|
/openbsd/sys/dev/pci/drm/i915/ |
H A D | i915_deps.c | 222 int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv, in i915_deps_add_resv() argument 228 dma_resv_assert_held(resv); in i915_deps_add_resv() 229 dma_resv_for_each_fence(&iter, resv, dma_resv_usage_rw(true), fence) { in i915_deps_add_resv()
|
H A D | i915_gem_ww.c | 51 ret = dma_resv_lock_slow_interruptible(ww->contended->base.resv, &ww->ctx); in i915_gem_ww_ctx_backoff() 53 dma_resv_lock_slow(ww->contended->base.resv, &ww->ctx); in i915_gem_ww_ctx_backoff()
|