xref: /openbsd/sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c (revision f46a341e)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/ktime.h>
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
31 #include <linux/pci.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/drm_exec.h>
37 #include <drm/drm_gem_ttm_helper.h>
38 #include <drm/ttm/ttm_tt.h>
39 
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_dma_buf.h"
43 #include "amdgpu_hmm.h"
44 #include "amdgpu_xgmi.h"
45 
46 static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
47 
48 #ifdef __linux__
amdgpu_gem_fault(struct vm_fault * vmf)49 static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
50 {
51 	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
52 	struct drm_device *ddev = bo->base.dev;
53 	vm_fault_t ret;
54 	int idx;
55 
56 	ret = ttm_bo_vm_reserve(bo, vmf);
57 	if (ret)
58 		return ret;
59 
60 	if (drm_dev_enter(ddev, &idx)) {
61 		ret = amdgpu_bo_fault_reserve_notify(bo);
62 		if (ret) {
63 			drm_dev_exit(idx);
64 			goto unlock;
65 		}
66 
67 		ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
68 					       TTM_BO_VM_NUM_PREFAULT);
69 
70 		drm_dev_exit(idx);
71 	} else {
72 		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
73 	}
74 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
75 		return ret;
76 
77 unlock:
78 	dma_resv_unlock(bo->base.resv);
79 	return ret;
80 }
81 
82 static const struct vm_operations_struct amdgpu_gem_vm_ops = {
83 	.fault = amdgpu_gem_fault,
84 	.open = ttm_bo_vm_open,
85 	.close = ttm_bo_vm_close,
86 	.access = ttm_bo_vm_access
87 };
88 #else /* !__linux__ */
89 int
amdgpu_gem_fault(struct uvm_faultinfo * ufi,vaddr_t vaddr,vm_page_t * pps,int npages,int centeridx,vm_fault_t fault_type,vm_prot_t access_type,int flags)90 amdgpu_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
91     int npages, int centeridx, vm_fault_t fault_type,
92     vm_prot_t access_type, int flags)
93 {
94 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
95 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
96 	struct drm_device *ddev = bo->base.dev;
97 	vm_fault_t ret;
98 	int idx;
99 
100 	ret = ttm_bo_vm_reserve(bo);
101 	if (ret) {
102 		goto out;
103 	}
104 
105 	if (drm_dev_enter(ddev, &idx)) {
106 		ret = amdgpu_bo_fault_reserve_notify(bo);
107 		if (ret) {
108 			drm_dev_exit(idx);
109 			goto unlock;
110 		}
111 
112 		 ret = ttm_bo_vm_fault_reserved(ufi, vaddr,
113 						TTM_BO_VM_NUM_PREFAULT, 1);
114 
115 		 drm_dev_exit(idx);
116 	} else {
117 		STUB();
118 #ifdef notyet
119 		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
120 #endif
121 	}
122 #ifdef __linux__
123 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
124 		return ret;
125 #endif
126 
127 unlock:
128 	dma_resv_unlock(bo->base.resv);
129 out:
130 	switch (ret) {
131 	case VM_FAULT_NOPAGE:
132 		ret = 0;
133 		break;
134 	case VM_FAULT_RETRY:
135 		ret = ERESTART;
136 		break;
137 	default:
138 		ret = EACCES;
139 		break;
140 	}
141 	uvmfault_unlockall(ufi, NULL, uobj);
142 	return ret;
143 }
144 
145 void
amdgpu_gem_vm_reference(struct uvm_object * uobj)146 amdgpu_gem_vm_reference(struct uvm_object *uobj)
147 {
148 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
149 
150 	ttm_bo_get(bo);
151 }
152 
153 void
amdgpu_gem_vm_detach(struct uvm_object * uobj)154 amdgpu_gem_vm_detach(struct uvm_object *uobj)
155 {
156 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
157 
158 	ttm_bo_put(bo);
159 }
160 
161 static const struct uvm_pagerops amdgpu_gem_vm_ops = {
162 	.pgo_fault = amdgpu_gem_fault,
163 	.pgo_reference = amdgpu_gem_vm_reference,
164 	.pgo_detach = amdgpu_gem_vm_detach
165 };
166 #endif /* !__linux__ */
167 
amdgpu_gem_object_free(struct drm_gem_object * gobj)168 static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
169 {
170 	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
171 
172 	if (robj) {
173 		amdgpu_hmm_unregister(robj);
174 		amdgpu_bo_unref(&robj);
175 	}
176 }
177 
amdgpu_gem_object_create(struct amdgpu_device * adev,unsigned long size,int alignment,u32 initial_domain,u64 flags,enum ttm_bo_type type,struct dma_resv * resv,struct drm_gem_object ** obj,int8_t xcp_id_plus1)178 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
179 			     int alignment, u32 initial_domain,
180 			     u64 flags, enum ttm_bo_type type,
181 			     struct dma_resv *resv,
182 			     struct drm_gem_object **obj, int8_t xcp_id_plus1)
183 {
184 	struct amdgpu_bo *bo;
185 	struct amdgpu_bo_user *ubo;
186 	struct amdgpu_bo_param bp;
187 	int r;
188 
189 	memset(&bp, 0, sizeof(bp));
190 	*obj = NULL;
191 
192 	bp.size = size;
193 	bp.byte_align = alignment;
194 	bp.type = type;
195 	bp.resv = resv;
196 	bp.preferred_domain = initial_domain;
197 	bp.flags = flags;
198 	bp.domain = initial_domain;
199 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
200 	bp.xcp_id_plus1 = xcp_id_plus1;
201 
202 	r = amdgpu_bo_create_user(adev, &bp, &ubo);
203 	if (r)
204 		return r;
205 
206 	bo = &ubo->bo;
207 	*obj = &bo->tbo.base;
208 	(*obj)->funcs = &amdgpu_gem_object_funcs;
209 
210 	return 0;
211 }
212 
213 int	drm_file_cmp(struct drm_file *, struct drm_file *);
214 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
215 
amdgpu_gem_force_release(struct amdgpu_device * adev)216 void amdgpu_gem_force_release(struct amdgpu_device *adev)
217 {
218 	struct drm_device *ddev = adev_to_drm(adev);
219 	struct drm_file *file;
220 
221 	mutex_lock(&ddev->filelist_mutex);
222 
223 #ifdef __linux__
224 	list_for_each_entry(file, &ddev->filelist, lhead) {
225 #else
226 	SPLAY_FOREACH(file, drm_file_tree, &ddev->files) {
227 #endif
228 		struct drm_gem_object *gobj;
229 		int handle;
230 
231 		WARN_ONCE(1, "Still active user space clients!\n");
232 		spin_lock(&file->table_lock);
233 		idr_for_each_entry(&file->object_idr, gobj, handle) {
234 			WARN_ONCE(1, "And also active allocations!\n");
235 			drm_gem_object_put(gobj);
236 		}
237 		idr_destroy(&file->object_idr);
238 		spin_unlock(&file->table_lock);
239 	}
240 
241 	mutex_unlock(&ddev->filelist_mutex);
242 }
243 
244 /*
245  * Call from drm_gem_handle_create which appear in both new and open ioctl
246  * case.
247  */
248 static int amdgpu_gem_object_open(struct drm_gem_object *obj,
249 				  struct drm_file *file_priv)
250 {
251 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
252 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
253 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
254 	struct amdgpu_vm *vm = &fpriv->vm;
255 	struct amdgpu_bo_va *bo_va;
256 #ifdef notyet
257 	struct mm_struct *mm;
258 #endif
259 	int r;
260 
261 #ifdef notyet
262 	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
263 	if (mm && mm != current->mm)
264 		return -EPERM;
265 #endif
266 
267 	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
268 	    abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
269 		return -EPERM;
270 
271 	r = amdgpu_bo_reserve(abo, false);
272 	if (r)
273 		return r;
274 
275 	bo_va = amdgpu_vm_bo_find(vm, abo);
276 	if (!bo_va)
277 		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
278 	else
279 		++bo_va->ref_count;
280 	amdgpu_bo_unreserve(abo);
281 	return 0;
282 }
283 
284 static void amdgpu_gem_object_close(struct drm_gem_object *obj,
285 				    struct drm_file *file_priv)
286 {
287 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
288 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
289 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
290 	struct amdgpu_vm *vm = &fpriv->vm;
291 
292 	struct dma_fence *fence = NULL;
293 	struct amdgpu_bo_va *bo_va;
294 	struct drm_exec exec;
295 	long r;
296 
297 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
298 	drm_exec_until_all_locked(&exec) {
299 		r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
300 		drm_exec_retry_on_contention(&exec);
301 		if (unlikely(r))
302 			goto out_unlock;
303 
304 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
305 		drm_exec_retry_on_contention(&exec);
306 		if (unlikely(r))
307 			goto out_unlock;
308 	}
309 
310 	bo_va = amdgpu_vm_bo_find(vm, bo);
311 	if (!bo_va || --bo_va->ref_count)
312 		goto out_unlock;
313 
314 	amdgpu_vm_bo_del(adev, bo_va);
315 	if (!amdgpu_vm_ready(vm))
316 		goto out_unlock;
317 
318 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
319 	if (unlikely(r < 0))
320 		dev_err(adev->dev, "failed to clear page "
321 			"tables on GEM object close (%ld)\n", r);
322 	if (r || !fence)
323 		goto out_unlock;
324 
325 	amdgpu_bo_fence(bo, fence, true);
326 	dma_fence_put(fence);
327 
328 out_unlock:
329 	if (r)
330 		dev_err(adev->dev, "leaking bo va (%ld)\n", r);
331 	drm_exec_fini(&exec);
332 }
333 
334 #ifdef __linux__
335 static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
336 {
337 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
338 
339 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
340 		return -EPERM;
341 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
342 		return -EPERM;
343 
344 	/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
345 	 * for debugger access to invisible VRAM. Should have used MAP_SHARED
346 	 * instead. Clearing VM_MAYWRITE prevents the mapping from ever
347 	 * becoming writable and makes is_cow_mapping(vm_flags) false.
348 	 */
349 	if (is_cow_mapping(vma->vm_flags) &&
350 	    !(vma->vm_flags & VM_ACCESS_FLAGS))
351 		vm_flags_clear(vma, VM_MAYWRITE);
352 
353 	return drm_gem_ttm_mmap(obj, vma);
354 }
355 #else
356 static int amdgpu_gem_object_mmap(struct drm_gem_object *obj,
357     vm_prot_t accessprot, voff_t off, vsize_t size)
358 {
359 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
360 
361 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
362 		return -EPERM;
363 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
364 		return -EPERM;
365 
366 	/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
367 	 * for debugger access to invisible VRAM. Should have used MAP_SHARED
368 	 * instead. Clearing VM_MAYWRITE prevents the mapping from ever
369 	 * becoming writable and makes is_cow_mapping(vm_flags) false.
370 	 */
371 #ifdef notyet
372 	if (is_cow_mapping(vma->vm_flags) &&
373 	    !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
374 		vma->vm_flags &= ~VM_MAYWRITE;
375 #endif
376 
377 	return drm_gem_ttm_mmap(obj, accessprot, off, size);
378 }
379 #endif
380 
381 static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
382 	.free = amdgpu_gem_object_free,
383 	.open = amdgpu_gem_object_open,
384 	.close = amdgpu_gem_object_close,
385 	.export = amdgpu_gem_prime_export,
386 	.vmap = drm_gem_ttm_vmap,
387 	.vunmap = drm_gem_ttm_vunmap,
388 	.mmap = amdgpu_gem_object_mmap,
389 	.vm_ops = &amdgpu_gem_vm_ops,
390 };
391 
392 /*
393  * GEM ioctls.
394  */
395 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
396 			    struct drm_file *filp)
397 {
398 	struct amdgpu_device *adev = drm_to_adev(dev);
399 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
400 	struct amdgpu_vm *vm = &fpriv->vm;
401 	union drm_amdgpu_gem_create *args = data;
402 	uint64_t flags = args->in.domain_flags;
403 	uint64_t size = args->in.bo_size;
404 	struct dma_resv *resv = NULL;
405 	struct drm_gem_object *gobj;
406 	uint32_t handle, initial_domain;
407 	int r;
408 
409 	/* reject DOORBELLs until userspace code to use it is available */
410 	if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL)
411 		return -EINVAL;
412 
413 	/* reject invalid gem flags */
414 	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
415 		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
416 		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
417 		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
418 		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
419 		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
420 		      AMDGPU_GEM_CREATE_ENCRYPTED |
421 		      AMDGPU_GEM_CREATE_DISCARDABLE))
422 		return -EINVAL;
423 
424 	/* reject invalid gem domains */
425 	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
426 		return -EINVAL;
427 
428 	if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
429 		DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
430 		return -EINVAL;
431 	}
432 
433 	/* create a gem object to contain this object in */
434 	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
435 	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
436 		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
437 			/* if gds bo is created from user space, it must be
438 			 * passed to bo list
439 			 */
440 			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
441 			return -EINVAL;
442 		}
443 		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
444 	}
445 
446 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
447 		r = amdgpu_bo_reserve(vm->root.bo, false);
448 		if (r)
449 			return r;
450 
451 		resv = vm->root.bo->tbo.base.resv;
452 	}
453 
454 	initial_domain = (u32)(0xffffffff & args->in.domains);
455 retry:
456 	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
457 				     initial_domain,
458 				     flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1);
459 	if (r && r != -ERESTARTSYS) {
460 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
461 			flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
462 			goto retry;
463 		}
464 
465 		if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
466 			initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
467 			goto retry;
468 		}
469 		DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
470 				size, initial_domain, args->in.alignment, r);
471 	}
472 
473 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
474 		if (!r) {
475 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
476 
477 			abo->parent = amdgpu_bo_ref(vm->root.bo);
478 		}
479 		amdgpu_bo_unreserve(vm->root.bo);
480 	}
481 	if (r)
482 		return r;
483 
484 	r = drm_gem_handle_create(filp, gobj, &handle);
485 	/* drop reference from allocate - handle holds it now */
486 	drm_gem_object_put(gobj);
487 	if (r)
488 		return r;
489 
490 	memset(args, 0, sizeof(*args));
491 	args->out.handle = handle;
492 	return 0;
493 }
494 
495 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
496 			     struct drm_file *filp)
497 {
498 	return -ENOSYS;
499 #ifdef notyet
500 	struct ttm_operation_ctx ctx = { true, false };
501 	struct amdgpu_device *adev = drm_to_adev(dev);
502 	struct drm_amdgpu_gem_userptr *args = data;
503 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
504 	struct drm_gem_object *gobj;
505 	struct hmm_range *range;
506 	struct amdgpu_bo *bo;
507 	uint32_t handle;
508 	int r;
509 
510 	args->addr = untagged_addr(args->addr);
511 
512 	if (offset_in_page(args->addr | args->size))
513 		return -EINVAL;
514 
515 	/* reject unknown flag values */
516 	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
517 	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
518 	    AMDGPU_GEM_USERPTR_REGISTER))
519 		return -EINVAL;
520 
521 	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
522 	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
523 
524 		/* if we want to write to it we must install a MMU notifier */
525 		return -EACCES;
526 	}
527 
528 	/* create a gem object to contain this object in */
529 	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
530 				     0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
531 	if (r)
532 		return r;
533 
534 	bo = gem_to_amdgpu_bo(gobj);
535 	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
536 	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
537 	r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
538 	if (r)
539 		goto release_object;
540 
541 	r = amdgpu_hmm_register(bo, args->addr);
542 	if (r)
543 		goto release_object;
544 
545 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
546 		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
547 						 &range);
548 		if (r)
549 			goto release_object;
550 
551 		r = amdgpu_bo_reserve(bo, true);
552 		if (r)
553 			goto user_pages_done;
554 
555 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
556 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
557 		amdgpu_bo_unreserve(bo);
558 		if (r)
559 			goto user_pages_done;
560 	}
561 
562 	r = drm_gem_handle_create(filp, gobj, &handle);
563 	if (r)
564 		goto user_pages_done;
565 
566 	args->handle = handle;
567 
568 user_pages_done:
569 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
570 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
571 
572 release_object:
573 	drm_gem_object_put(gobj);
574 
575 	return r;
576 #endif
577 }
578 
579 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
580 			  struct drm_device *dev,
581 			  uint32_t handle, uint64_t *offset_p)
582 {
583 	struct drm_gem_object *gobj;
584 	struct amdgpu_bo *robj;
585 
586 	gobj = drm_gem_object_lookup(filp, handle);
587 	if (!gobj)
588 		return -ENOENT;
589 
590 	robj = gem_to_amdgpu_bo(gobj);
591 	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
592 	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
593 		drm_gem_object_put(gobj);
594 		return -EPERM;
595 	}
596 	*offset_p = amdgpu_bo_mmap_offset(robj);
597 	drm_gem_object_put(gobj);
598 	return 0;
599 }
600 
601 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
602 			  struct drm_file *filp)
603 {
604 	union drm_amdgpu_gem_mmap *args = data;
605 	uint32_t handle = args->in.handle;
606 
607 	memset(args, 0, sizeof(*args));
608 	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
609 }
610 
611 /**
612  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
613  *
614  * @timeout_ns: timeout in ns
615  *
616  * Calculate the timeout in jiffies from an absolute timeout in ns.
617  */
618 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
619 {
620 	unsigned long timeout_jiffies;
621 	ktime_t timeout;
622 
623 	/* clamp timeout if it's to large */
624 	if (((int64_t)timeout_ns) < 0)
625 		return MAX_SCHEDULE_TIMEOUT;
626 
627 	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
628 	if (ktime_to_ns(timeout) < 0)
629 		return 0;
630 
631 	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
632 	/*  clamp timeout to avoid unsigned-> signed overflow */
633 	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT)
634 		return MAX_SCHEDULE_TIMEOUT - 1;
635 
636 	return timeout_jiffies;
637 }
638 
639 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
640 			      struct drm_file *filp)
641 {
642 	union drm_amdgpu_gem_wait_idle *args = data;
643 	struct drm_gem_object *gobj;
644 	struct amdgpu_bo *robj;
645 	uint32_t handle = args->in.handle;
646 	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
647 	int r = 0;
648 	long ret;
649 
650 	gobj = drm_gem_object_lookup(filp, handle);
651 	if (!gobj)
652 		return -ENOENT;
653 
654 	robj = gem_to_amdgpu_bo(gobj);
655 	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
656 				    true, timeout);
657 
658 	/* ret == 0 means not signaled,
659 	 * ret > 0 means signaled
660 	 * ret < 0 means interrupted before timeout
661 	 */
662 	if (ret >= 0) {
663 		memset(args, 0, sizeof(*args));
664 		args->out.status = (ret == 0);
665 	} else
666 		r = ret;
667 
668 	drm_gem_object_put(gobj);
669 	return r;
670 }
671 
672 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
673 				struct drm_file *filp)
674 {
675 	struct drm_amdgpu_gem_metadata *args = data;
676 	struct drm_gem_object *gobj;
677 	struct amdgpu_bo *robj;
678 	int r = -1;
679 
680 	DRM_DEBUG("%d\n", args->handle);
681 	gobj = drm_gem_object_lookup(filp, args->handle);
682 	if (gobj == NULL)
683 		return -ENOENT;
684 	robj = gem_to_amdgpu_bo(gobj);
685 
686 	r = amdgpu_bo_reserve(robj, false);
687 	if (unlikely(r != 0))
688 		goto out;
689 
690 	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
691 		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
692 		r = amdgpu_bo_get_metadata(robj, args->data.data,
693 					   sizeof(args->data.data),
694 					   &args->data.data_size_bytes,
695 					   &args->data.flags);
696 	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
697 		if (args->data.data_size_bytes > sizeof(args->data.data)) {
698 			r = -EINVAL;
699 			goto unreserve;
700 		}
701 		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
702 		if (!r)
703 			r = amdgpu_bo_set_metadata(robj, args->data.data,
704 						   args->data.data_size_bytes,
705 						   args->data.flags);
706 	}
707 
708 unreserve:
709 	amdgpu_bo_unreserve(robj);
710 out:
711 	drm_gem_object_put(gobj);
712 	return r;
713 }
714 
715 /**
716  * amdgpu_gem_va_update_vm -update the bo_va in its VM
717  *
718  * @adev: amdgpu_device pointer
719  * @vm: vm to update
720  * @bo_va: bo_va to update
721  * @operation: map, unmap or clear
722  *
723  * Update the bo_va directly after setting its address. Errors are not
724  * vital here, so they are not reported back to userspace.
725  */
726 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
727 				    struct amdgpu_vm *vm,
728 				    struct amdgpu_bo_va *bo_va,
729 				    uint32_t operation)
730 {
731 	int r;
732 
733 	if (!amdgpu_vm_ready(vm))
734 		return;
735 
736 	r = amdgpu_vm_clear_freed(adev, vm, NULL);
737 	if (r)
738 		goto error;
739 
740 	if (operation == AMDGPU_VA_OP_MAP ||
741 	    operation == AMDGPU_VA_OP_REPLACE) {
742 		r = amdgpu_vm_bo_update(adev, bo_va, false);
743 		if (r)
744 			goto error;
745 	}
746 
747 	r = amdgpu_vm_update_pdes(adev, vm, false);
748 
749 error:
750 	if (r && r != -ERESTARTSYS)
751 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
752 }
753 
754 /**
755  * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
756  *
757  * @adev: amdgpu_device pointer
758  * @flags: GEM UAPI flags
759  *
760  * Returns the GEM UAPI flags mapped into hardware for the ASIC.
761  */
762 uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
763 {
764 	uint64_t pte_flag = 0;
765 
766 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
767 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
768 	if (flags & AMDGPU_VM_PAGE_READABLE)
769 		pte_flag |= AMDGPU_PTE_READABLE;
770 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
771 		pte_flag |= AMDGPU_PTE_WRITEABLE;
772 	if (flags & AMDGPU_VM_PAGE_PRT)
773 		pte_flag |= AMDGPU_PTE_PRT;
774 	if (flags & AMDGPU_VM_PAGE_NOALLOC)
775 		pte_flag |= AMDGPU_PTE_NOALLOC;
776 
777 	if (adev->gmc.gmc_funcs->map_mtype)
778 		pte_flag |= amdgpu_gmc_map_mtype(adev,
779 						 flags & AMDGPU_VM_MTYPE_MASK);
780 
781 	return pte_flag;
782 }
783 
784 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
785 			  struct drm_file *filp)
786 {
787 	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
788 		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
789 		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK |
790 		AMDGPU_VM_PAGE_NOALLOC;
791 	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
792 		AMDGPU_VM_PAGE_PRT;
793 
794 	struct drm_amdgpu_gem_va *args = data;
795 	struct drm_gem_object *gobj;
796 	struct amdgpu_device *adev = drm_to_adev(dev);
797 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
798 	struct amdgpu_bo *abo;
799 	struct amdgpu_bo_va *bo_va;
800 	struct drm_exec exec;
801 	uint64_t va_flags;
802 	uint64_t vm_size;
803 	int r = 0;
804 
805 	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
806 		dev_dbg(dev->dev,
807 			"va_address 0x%llx is in reserved area 0x%llx\n",
808 			args->va_address, AMDGPU_VA_RESERVED_SIZE);
809 		return -EINVAL;
810 	}
811 
812 	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
813 	    args->va_address < AMDGPU_GMC_HOLE_END) {
814 		dev_dbg(dev->dev,
815 			"va_address 0x%llx is in VA hole 0x%llx-0x%llx\n",
816 			args->va_address, AMDGPU_GMC_HOLE_START,
817 			AMDGPU_GMC_HOLE_END);
818 		return -EINVAL;
819 	}
820 
821 	args->va_address &= AMDGPU_GMC_HOLE_MASK;
822 
823 	vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
824 	vm_size -= AMDGPU_VA_RESERVED_SIZE;
825 	if (args->va_address + args->map_size > vm_size) {
826 		dev_dbg(dev->dev,
827 			"va_address 0x%llx is in top reserved area 0x%llx\n",
828 			args->va_address + args->map_size, vm_size);
829 		return -EINVAL;
830 	}
831 
832 	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
833 		dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
834 			args->flags);
835 		return -EINVAL;
836 	}
837 
838 	switch (args->operation) {
839 	case AMDGPU_VA_OP_MAP:
840 	case AMDGPU_VA_OP_UNMAP:
841 	case AMDGPU_VA_OP_CLEAR:
842 	case AMDGPU_VA_OP_REPLACE:
843 		break;
844 	default:
845 		dev_dbg(dev->dev, "unsupported operation %d\n",
846 			args->operation);
847 		return -EINVAL;
848 	}
849 
850 	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
851 	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
852 		gobj = drm_gem_object_lookup(filp, args->handle);
853 		if (gobj == NULL)
854 			return -ENOENT;
855 		abo = gem_to_amdgpu_bo(gobj);
856 	} else {
857 		gobj = NULL;
858 		abo = NULL;
859 	}
860 
861 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
862 		      DRM_EXEC_IGNORE_DUPLICATES);
863 	drm_exec_until_all_locked(&exec) {
864 		if (gobj) {
865 			r = drm_exec_lock_obj(&exec, gobj);
866 			drm_exec_retry_on_contention(&exec);
867 			if (unlikely(r))
868 				goto error;
869 		}
870 
871 		r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
872 		drm_exec_retry_on_contention(&exec);
873 		if (unlikely(r))
874 			goto error;
875 	}
876 
877 	if (abo) {
878 		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
879 		if (!bo_va) {
880 			r = -ENOENT;
881 			goto error;
882 		}
883 	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
884 		bo_va = fpriv->prt_va;
885 	} else {
886 		bo_va = NULL;
887 	}
888 
889 	switch (args->operation) {
890 	case AMDGPU_VA_OP_MAP:
891 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
892 		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
893 				     args->offset_in_bo, args->map_size,
894 				     va_flags);
895 		break;
896 	case AMDGPU_VA_OP_UNMAP:
897 		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
898 		break;
899 
900 	case AMDGPU_VA_OP_CLEAR:
901 		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
902 						args->va_address,
903 						args->map_size);
904 		break;
905 	case AMDGPU_VA_OP_REPLACE:
906 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
907 		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
908 					     args->offset_in_bo, args->map_size,
909 					     va_flags);
910 		break;
911 	default:
912 		break;
913 	}
914 	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
915 		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
916 					args->operation);
917 
918 error:
919 	drm_exec_fini(&exec);
920 	drm_gem_object_put(gobj);
921 	return r;
922 }
923 
924 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
925 			struct drm_file *filp)
926 {
927 	struct amdgpu_device *adev = drm_to_adev(dev);
928 	struct drm_amdgpu_gem_op *args = data;
929 	struct drm_gem_object *gobj;
930 	struct amdgpu_vm_bo_base *base;
931 	struct amdgpu_bo *robj;
932 	int r;
933 
934 	gobj = drm_gem_object_lookup(filp, args->handle);
935 	if (!gobj)
936 		return -ENOENT;
937 
938 	robj = gem_to_amdgpu_bo(gobj);
939 
940 	r = amdgpu_bo_reserve(robj, false);
941 	if (unlikely(r))
942 		goto out;
943 
944 	switch (args->op) {
945 	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
946 		struct drm_amdgpu_gem_create_in info;
947 		void __user *out = u64_to_user_ptr(args->value);
948 
949 		info.bo_size = robj->tbo.base.size;
950 		info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
951 		info.domains = robj->preferred_domains;
952 		info.domain_flags = robj->flags;
953 		amdgpu_bo_unreserve(robj);
954 		if (copy_to_user(out, &info, sizeof(info)))
955 			r = -EFAULT;
956 		break;
957 	}
958 	case AMDGPU_GEM_OP_SET_PLACEMENT:
959 		if (robj->tbo.base.import_attach &&
960 		    args->value & AMDGPU_GEM_DOMAIN_VRAM) {
961 			r = -EINVAL;
962 			amdgpu_bo_unreserve(robj);
963 			break;
964 		}
965 		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
966 			r = -EPERM;
967 			amdgpu_bo_unreserve(robj);
968 			break;
969 		}
970 		for (base = robj->vm_bo; base; base = base->next)
971 			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
972 				amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
973 				r = -EINVAL;
974 				amdgpu_bo_unreserve(robj);
975 				goto out;
976 			}
977 
978 
979 		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
980 							AMDGPU_GEM_DOMAIN_GTT |
981 							AMDGPU_GEM_DOMAIN_CPU);
982 		robj->allowed_domains = robj->preferred_domains;
983 		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
984 			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
985 
986 		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
987 			amdgpu_vm_bo_invalidate(adev, robj, true);
988 
989 		amdgpu_bo_unreserve(robj);
990 		break;
991 	default:
992 		amdgpu_bo_unreserve(robj);
993 		r = -EINVAL;
994 	}
995 
996 out:
997 	drm_gem_object_put(gobj);
998 	return r;
999 }
1000 
1001 static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
1002 				  int width,
1003 				  int cpp,
1004 				  bool tiled)
1005 {
1006 	int aligned = width;
1007 	int pitch_mask = 0;
1008 
1009 	switch (cpp) {
1010 	case 1:
1011 		pitch_mask = 255;
1012 		break;
1013 	case 2:
1014 		pitch_mask = 127;
1015 		break;
1016 	case 3:
1017 	case 4:
1018 		pitch_mask = 63;
1019 		break;
1020 	}
1021 
1022 	aligned += pitch_mask;
1023 	aligned &= ~pitch_mask;
1024 	return aligned * cpp;
1025 }
1026 
1027 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
1028 			    struct drm_device *dev,
1029 			    struct drm_mode_create_dumb *args)
1030 {
1031 	struct amdgpu_device *adev = drm_to_adev(dev);
1032 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1033 	struct drm_gem_object *gobj;
1034 	uint32_t handle;
1035 	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1036 		    AMDGPU_GEM_CREATE_CPU_GTT_USWC |
1037 		    AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1038 	u32 domain;
1039 	int r;
1040 
1041 	/*
1042 	 * The buffer returned from this function should be cleared, but
1043 	 * it can only be done if the ring is enabled or we'll fail to
1044 	 * create the buffer.
1045 	 */
1046 	if (adev->mman.buffer_funcs_enabled)
1047 		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
1048 
1049 	args->pitch = amdgpu_gem_align_pitch(adev, args->width,
1050 					     DIV_ROUND_UP(args->bpp, 8), 0);
1051 	args->size = (u64)args->pitch * args->height;
1052 	args->size = ALIGN(args->size, PAGE_SIZE);
1053 	domain = amdgpu_bo_get_preferred_domain(adev,
1054 				amdgpu_display_supported_domains(adev, flags));
1055 	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
1056 				     ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
1057 	if (r)
1058 		return -ENOMEM;
1059 
1060 	r = drm_gem_handle_create(file_priv, gobj, &handle);
1061 	/* drop reference from allocate - handle holds it now */
1062 	drm_gem_object_put(gobj);
1063 	if (r)
1064 		return r;
1065 
1066 	args->handle = handle;
1067 	return 0;
1068 }
1069 
1070 #if defined(CONFIG_DEBUG_FS)
1071 static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
1072 {
1073 	struct amdgpu_device *adev = m->private;
1074 	struct drm_device *dev = adev_to_drm(adev);
1075 	struct drm_file *file;
1076 	int r;
1077 
1078 	r = mutex_lock_interruptible(&dev->filelist_mutex);
1079 	if (r)
1080 		return r;
1081 
1082 	list_for_each_entry(file, &dev->filelist, lhead) {
1083 		struct task_struct *task;
1084 		struct drm_gem_object *gobj;
1085 		struct pid *pid;
1086 		int id;
1087 
1088 		/*
1089 		 * Although we have a valid reference on file->pid, that does
1090 		 * not guarantee that the task_struct who called get_pid() is
1091 		 * still alive (e.g. get_pid(current) => fork() => exit()).
1092 		 * Therefore, we need to protect this ->comm access using RCU.
1093 		 */
1094 		rcu_read_lock();
1095 		pid = rcu_dereference(file->pid);
1096 		task = pid_task(pid, PIDTYPE_TGID);
1097 		seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
1098 			   task ? task->comm : "<unknown>");
1099 		rcu_read_unlock();
1100 
1101 		spin_lock(&file->table_lock);
1102 		idr_for_each_entry(&file->object_idr, gobj, id) {
1103 			struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
1104 
1105 			amdgpu_bo_print_info(id, bo, m);
1106 		}
1107 		spin_unlock(&file->table_lock);
1108 	}
1109 
1110 	mutex_unlock(&dev->filelist_mutex);
1111 	return 0;
1112 }
1113 
1114 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
1115 
1116 #endif
1117 
1118 void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
1119 {
1120 #if defined(CONFIG_DEBUG_FS)
1121 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1122 	struct dentry *root = minor->debugfs_root;
1123 
1124 	debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
1125 			    &amdgpu_debugfs_gem_info_fops);
1126 #endif
1127 }
1128