xref: /openbsd/sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c (revision f005ef32)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/ktime.h>
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
31 #include <linux/pci.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/drm_exec.h>
37 #include <drm/drm_gem_ttm_helper.h>
38 #include <drm/ttm/ttm_tt.h>
39 
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_dma_buf.h"
43 #include "amdgpu_hmm.h"
44 #include "amdgpu_xgmi.h"
45 
46 static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
47 
48 #ifdef __linux__
amdgpu_gem_fault(struct vm_fault * vmf)49 static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
50 {
51 	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
52 	struct drm_device *ddev = bo->base.dev;
53 	vm_fault_t ret;
54 	int idx;
55 
56 	ret = ttm_bo_vm_reserve(bo, vmf);
57 	if (ret)
58 		return ret;
59 
60 	if (drm_dev_enter(ddev, &idx)) {
61 		ret = amdgpu_bo_fault_reserve_notify(bo);
62 		if (ret) {
63 			drm_dev_exit(idx);
64 			goto unlock;
65 		}
66 
67 		ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
68 					       TTM_BO_VM_NUM_PREFAULT);
69 
70 		drm_dev_exit(idx);
71 	} else {
72 		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
73 	}
74 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
75 		return ret;
76 
77 unlock:
78 	dma_resv_unlock(bo->base.resv);
79 	return ret;
80 }
81 
82 static const struct vm_operations_struct amdgpu_gem_vm_ops = {
83 	.fault = amdgpu_gem_fault,
84 	.open = ttm_bo_vm_open,
85 	.close = ttm_bo_vm_close,
86 	.access = ttm_bo_vm_access
87 };
88 #else /* !__linux__ */
89 int
amdgpu_gem_fault(struct uvm_faultinfo * ufi,vaddr_t vaddr,vm_page_t * pps,int npages,int centeridx,vm_fault_t fault_type,vm_prot_t access_type,int flags)90 amdgpu_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
91     int npages, int centeridx, vm_fault_t fault_type,
92     vm_prot_t access_type, int flags)
93 {
94 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
95 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
96 	struct drm_device *ddev = bo->base.dev;
97 	vm_fault_t ret;
98 	int idx;
99 
100 	ret = ttm_bo_vm_reserve(bo);
101 	if (ret) {
102 		switch (ret) {
103 		case VM_FAULT_NOPAGE:
104 			ret = VM_PAGER_OK;
105 			break;
106 		case VM_FAULT_RETRY:
107 			ret = VM_PAGER_REFAULT;
108 			break;
109 		default:
110 			ret = VM_PAGER_BAD;
111 			break;
112 		}
113 		uvmfault_unlockall(ufi, NULL, uobj);
114 		return ret;
115 	}
116 
117 	if (drm_dev_enter(ddev, &idx)) {
118 		ret = amdgpu_bo_fault_reserve_notify(bo);
119 		if (ret) {
120 			drm_dev_exit(idx);
121 			goto unlock;
122 		}
123 
124 		 ret = ttm_bo_vm_fault_reserved(ufi, vaddr,
125 						TTM_BO_VM_NUM_PREFAULT, 1);
126 
127 		 drm_dev_exit(idx);
128 	} else {
129 		STUB();
130 #ifdef notyet
131 		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
132 #endif
133 	}
134 #ifdef __linux__
135 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
136 		return ret;
137 #endif
138 
139 unlock:
140 	switch (ret) {
141 	case VM_FAULT_NOPAGE:
142 		ret = VM_PAGER_OK;
143 		break;
144 	case VM_FAULT_RETRY:
145 		ret = VM_PAGER_REFAULT;
146 		break;
147 	default:
148 		ret = VM_PAGER_BAD;
149 		break;
150 	}
151 	dma_resv_unlock(bo->base.resv);
152 	uvmfault_unlockall(ufi, NULL, uobj);
153 	return ret;
154 }
155 
156 void
amdgpu_gem_vm_reference(struct uvm_object * uobj)157 amdgpu_gem_vm_reference(struct uvm_object *uobj)
158 {
159 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
160 
161 	ttm_bo_get(bo);
162 }
163 
164 void
amdgpu_gem_vm_detach(struct uvm_object * uobj)165 amdgpu_gem_vm_detach(struct uvm_object *uobj)
166 {
167 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
168 
169 	ttm_bo_put(bo);
170 }
171 
172 static const struct uvm_pagerops amdgpu_gem_vm_ops = {
173 	.pgo_fault = amdgpu_gem_fault,
174 	.pgo_reference = amdgpu_gem_vm_reference,
175 	.pgo_detach = amdgpu_gem_vm_detach
176 };
177 #endif /* !__linux__ */
178 
amdgpu_gem_object_free(struct drm_gem_object * gobj)179 static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
180 {
181 	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
182 
183 	if (robj) {
184 		amdgpu_hmm_unregister(robj);
185 		amdgpu_bo_unref(&robj);
186 	}
187 }
188 
amdgpu_gem_object_create(struct amdgpu_device * adev,unsigned long size,int alignment,u32 initial_domain,u64 flags,enum ttm_bo_type type,struct dma_resv * resv,struct drm_gem_object ** obj,int8_t xcp_id_plus1)189 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
190 			     int alignment, u32 initial_domain,
191 			     u64 flags, enum ttm_bo_type type,
192 			     struct dma_resv *resv,
193 			     struct drm_gem_object **obj, int8_t xcp_id_plus1)
194 {
195 	struct amdgpu_bo *bo;
196 	struct amdgpu_bo_user *ubo;
197 	struct amdgpu_bo_param bp;
198 	int r;
199 
200 	memset(&bp, 0, sizeof(bp));
201 	*obj = NULL;
202 
203 	bp.size = size;
204 	bp.byte_align = alignment;
205 	bp.type = type;
206 	bp.resv = resv;
207 	bp.preferred_domain = initial_domain;
208 	bp.flags = flags;
209 	bp.domain = initial_domain;
210 	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
211 	bp.xcp_id_plus1 = xcp_id_plus1;
212 
213 	r = amdgpu_bo_create_user(adev, &bp, &ubo);
214 	if (r)
215 		return r;
216 
217 	bo = &ubo->bo;
218 	*obj = &bo->tbo.base;
219 	(*obj)->funcs = &amdgpu_gem_object_funcs;
220 
221 	return 0;
222 }
223 
224 int	drm_file_cmp(struct drm_file *, struct drm_file *);
225 SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
226 
amdgpu_gem_force_release(struct amdgpu_device * adev)227 void amdgpu_gem_force_release(struct amdgpu_device *adev)
228 {
229 	struct drm_device *ddev = adev_to_drm(adev);
230 	struct drm_file *file;
231 
232 	mutex_lock(&ddev->filelist_mutex);
233 
234 #ifdef __linux__
235 	list_for_each_entry(file, &ddev->filelist, lhead) {
236 #else
237 	SPLAY_FOREACH(file, drm_file_tree, &ddev->files) {
238 #endif
239 		struct drm_gem_object *gobj;
240 		int handle;
241 
242 		WARN_ONCE(1, "Still active user space clients!\n");
243 		spin_lock(&file->table_lock);
244 		idr_for_each_entry(&file->object_idr, gobj, handle) {
245 			WARN_ONCE(1, "And also active allocations!\n");
246 			drm_gem_object_put(gobj);
247 		}
248 		idr_destroy(&file->object_idr);
249 		spin_unlock(&file->table_lock);
250 	}
251 
252 	mutex_unlock(&ddev->filelist_mutex);
253 }
254 
255 /*
256  * Call from drm_gem_handle_create which appear in both new and open ioctl
257  * case.
258  */
259 static int amdgpu_gem_object_open(struct drm_gem_object *obj,
260 				  struct drm_file *file_priv)
261 {
262 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
263 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
264 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
265 	struct amdgpu_vm *vm = &fpriv->vm;
266 	struct amdgpu_bo_va *bo_va;
267 #ifdef notyet
268 	struct mm_struct *mm;
269 #endif
270 	int r;
271 
272 #ifdef notyet
273 	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
274 	if (mm && mm != current->mm)
275 		return -EPERM;
276 #endif
277 
278 	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
279 	    abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
280 		return -EPERM;
281 
282 	r = amdgpu_bo_reserve(abo, false);
283 	if (r)
284 		return r;
285 
286 	bo_va = amdgpu_vm_bo_find(vm, abo);
287 	if (!bo_va)
288 		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
289 	else
290 		++bo_va->ref_count;
291 	amdgpu_bo_unreserve(abo);
292 	return 0;
293 }
294 
295 static void amdgpu_gem_object_close(struct drm_gem_object *obj,
296 				    struct drm_file *file_priv)
297 {
298 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
299 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
300 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
301 	struct amdgpu_vm *vm = &fpriv->vm;
302 
303 	struct dma_fence *fence = NULL;
304 	struct amdgpu_bo_va *bo_va;
305 	struct drm_exec exec;
306 	long r;
307 
308 	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
309 	drm_exec_until_all_locked(&exec) {
310 		r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
311 		drm_exec_retry_on_contention(&exec);
312 		if (unlikely(r))
313 			goto out_unlock;
314 
315 		r = amdgpu_vm_lock_pd(vm, &exec, 0);
316 		drm_exec_retry_on_contention(&exec);
317 		if (unlikely(r))
318 			goto out_unlock;
319 	}
320 
321 	bo_va = amdgpu_vm_bo_find(vm, bo);
322 	if (!bo_va || --bo_va->ref_count)
323 		goto out_unlock;
324 
325 	amdgpu_vm_bo_del(adev, bo_va);
326 	if (!amdgpu_vm_ready(vm))
327 		goto out_unlock;
328 
329 	r = amdgpu_vm_clear_freed(adev, vm, &fence);
330 	if (unlikely(r < 0))
331 		dev_err(adev->dev, "failed to clear page "
332 			"tables on GEM object close (%ld)\n", r);
333 	if (r || !fence)
334 		goto out_unlock;
335 
336 	amdgpu_bo_fence(bo, fence, true);
337 	dma_fence_put(fence);
338 
339 out_unlock:
340 	if (r)
341 		dev_err(adev->dev, "leaking bo va (%ld)\n", r);
342 	drm_exec_fini(&exec);
343 }
344 
345 #ifdef __linux__
346 static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
347 {
348 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
349 
350 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
351 		return -EPERM;
352 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
353 		return -EPERM;
354 
355 	/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
356 	 * for debugger access to invisible VRAM. Should have used MAP_SHARED
357 	 * instead. Clearing VM_MAYWRITE prevents the mapping from ever
358 	 * becoming writable and makes is_cow_mapping(vm_flags) false.
359 	 */
360 	if (is_cow_mapping(vma->vm_flags) &&
361 	    !(vma->vm_flags & VM_ACCESS_FLAGS))
362 		vm_flags_clear(vma, VM_MAYWRITE);
363 
364 	return drm_gem_ttm_mmap(obj, vma);
365 }
366 #else
367 static int amdgpu_gem_object_mmap(struct drm_gem_object *obj,
368     vm_prot_t accessprot, voff_t off, vsize_t size)
369 {
370 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
371 
372 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
373 		return -EPERM;
374 	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
375 		return -EPERM;
376 
377 	/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
378 	 * for debugger access to invisible VRAM. Should have used MAP_SHARED
379 	 * instead. Clearing VM_MAYWRITE prevents the mapping from ever
380 	 * becoming writable and makes is_cow_mapping(vm_flags) false.
381 	 */
382 #ifdef notyet
383 	if (is_cow_mapping(vma->vm_flags) &&
384 	    !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
385 		vma->vm_flags &= ~VM_MAYWRITE;
386 #endif
387 
388 	return drm_gem_ttm_mmap(obj, accessprot, off, size);
389 }
390 #endif
391 
392 static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
393 	.free = amdgpu_gem_object_free,
394 	.open = amdgpu_gem_object_open,
395 	.close = amdgpu_gem_object_close,
396 	.export = amdgpu_gem_prime_export,
397 	.vmap = drm_gem_ttm_vmap,
398 	.vunmap = drm_gem_ttm_vunmap,
399 	.mmap = amdgpu_gem_object_mmap,
400 	.vm_ops = &amdgpu_gem_vm_ops,
401 };
402 
403 /*
404  * GEM ioctls.
405  */
406 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
407 			    struct drm_file *filp)
408 {
409 	struct amdgpu_device *adev = drm_to_adev(dev);
410 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
411 	struct amdgpu_vm *vm = &fpriv->vm;
412 	union drm_amdgpu_gem_create *args = data;
413 	uint64_t flags = args->in.domain_flags;
414 	uint64_t size = args->in.bo_size;
415 	struct dma_resv *resv = NULL;
416 	struct drm_gem_object *gobj;
417 	uint32_t handle, initial_domain;
418 	int r;
419 
420 	/* reject DOORBELLs until userspace code to use it is available */
421 	if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL)
422 		return -EINVAL;
423 
424 	/* reject invalid gem flags */
425 	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
426 		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
427 		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
428 		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
429 		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
430 		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
431 		      AMDGPU_GEM_CREATE_ENCRYPTED |
432 		      AMDGPU_GEM_CREATE_DISCARDABLE))
433 		return -EINVAL;
434 
435 	/* reject invalid gem domains */
436 	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
437 		return -EINVAL;
438 
439 	if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
440 		DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
441 		return -EINVAL;
442 	}
443 
444 	/* create a gem object to contain this object in */
445 	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
446 	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
447 		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
448 			/* if gds bo is created from user space, it must be
449 			 * passed to bo list
450 			 */
451 			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
452 			return -EINVAL;
453 		}
454 		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
455 	}
456 
457 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
458 		r = amdgpu_bo_reserve(vm->root.bo, false);
459 		if (r)
460 			return r;
461 
462 		resv = vm->root.bo->tbo.base.resv;
463 	}
464 
465 	initial_domain = (u32)(0xffffffff & args->in.domains);
466 retry:
467 	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
468 				     initial_domain,
469 				     flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1);
470 	if (r && r != -ERESTARTSYS) {
471 		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
472 			flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
473 			goto retry;
474 		}
475 
476 		if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
477 			initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
478 			goto retry;
479 		}
480 		DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
481 				size, initial_domain, args->in.alignment, r);
482 	}
483 
484 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
485 		if (!r) {
486 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
487 
488 			abo->parent = amdgpu_bo_ref(vm->root.bo);
489 		}
490 		amdgpu_bo_unreserve(vm->root.bo);
491 	}
492 	if (r)
493 		return r;
494 
495 	r = drm_gem_handle_create(filp, gobj, &handle);
496 	/* drop reference from allocate - handle holds it now */
497 	drm_gem_object_put(gobj);
498 	if (r)
499 		return r;
500 
501 	memset(args, 0, sizeof(*args));
502 	args->out.handle = handle;
503 	return 0;
504 }
505 
506 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
507 			     struct drm_file *filp)
508 {
509 	return -ENOSYS;
510 #ifdef notyet
511 	struct ttm_operation_ctx ctx = { true, false };
512 	struct amdgpu_device *adev = drm_to_adev(dev);
513 	struct drm_amdgpu_gem_userptr *args = data;
514 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
515 	struct drm_gem_object *gobj;
516 	struct hmm_range *range;
517 	struct amdgpu_bo *bo;
518 	uint32_t handle;
519 	int r;
520 
521 	args->addr = untagged_addr(args->addr);
522 
523 	if (offset_in_page(args->addr | args->size))
524 		return -EINVAL;
525 
526 	/* reject unknown flag values */
527 	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
528 	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
529 	    AMDGPU_GEM_USERPTR_REGISTER))
530 		return -EINVAL;
531 
532 	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
533 	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
534 
535 		/* if we want to write to it we must install a MMU notifier */
536 		return -EACCES;
537 	}
538 
539 	/* create a gem object to contain this object in */
540 	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
541 				     0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
542 	if (r)
543 		return r;
544 
545 	bo = gem_to_amdgpu_bo(gobj);
546 	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
547 	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
548 	r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
549 	if (r)
550 		goto release_object;
551 
552 	r = amdgpu_hmm_register(bo, args->addr);
553 	if (r)
554 		goto release_object;
555 
556 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
557 		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
558 						 &range);
559 		if (r)
560 			goto release_object;
561 
562 		r = amdgpu_bo_reserve(bo, true);
563 		if (r)
564 			goto user_pages_done;
565 
566 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
567 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
568 		amdgpu_bo_unreserve(bo);
569 		if (r)
570 			goto user_pages_done;
571 	}
572 
573 	r = drm_gem_handle_create(filp, gobj, &handle);
574 	if (r)
575 		goto user_pages_done;
576 
577 	args->handle = handle;
578 
579 user_pages_done:
580 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
581 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
582 
583 release_object:
584 	drm_gem_object_put(gobj);
585 
586 	return r;
587 #endif
588 }
589 
590 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
591 			  struct drm_device *dev,
592 			  uint32_t handle, uint64_t *offset_p)
593 {
594 	struct drm_gem_object *gobj;
595 	struct amdgpu_bo *robj;
596 
597 	gobj = drm_gem_object_lookup(filp, handle);
598 	if (!gobj)
599 		return -ENOENT;
600 
601 	robj = gem_to_amdgpu_bo(gobj);
602 	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
603 	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
604 		drm_gem_object_put(gobj);
605 		return -EPERM;
606 	}
607 	*offset_p = amdgpu_bo_mmap_offset(robj);
608 	drm_gem_object_put(gobj);
609 	return 0;
610 }
611 
612 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
613 			  struct drm_file *filp)
614 {
615 	union drm_amdgpu_gem_mmap *args = data;
616 	uint32_t handle = args->in.handle;
617 
618 	memset(args, 0, sizeof(*args));
619 	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
620 }
621 
622 /**
623  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
624  *
625  * @timeout_ns: timeout in ns
626  *
627  * Calculate the timeout in jiffies from an absolute timeout in ns.
628  */
629 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
630 {
631 	unsigned long timeout_jiffies;
632 	ktime_t timeout;
633 
634 	/* clamp timeout if it's to large */
635 	if (((int64_t)timeout_ns) < 0)
636 		return MAX_SCHEDULE_TIMEOUT;
637 
638 	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
639 	if (ktime_to_ns(timeout) < 0)
640 		return 0;
641 
642 	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
643 	/*  clamp timeout to avoid unsigned-> signed overflow */
644 	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT)
645 		return MAX_SCHEDULE_TIMEOUT - 1;
646 
647 	return timeout_jiffies;
648 }
649 
650 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
651 			      struct drm_file *filp)
652 {
653 	union drm_amdgpu_gem_wait_idle *args = data;
654 	struct drm_gem_object *gobj;
655 	struct amdgpu_bo *robj;
656 	uint32_t handle = args->in.handle;
657 	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
658 	int r = 0;
659 	long ret;
660 
661 	gobj = drm_gem_object_lookup(filp, handle);
662 	if (!gobj)
663 		return -ENOENT;
664 
665 	robj = gem_to_amdgpu_bo(gobj);
666 	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
667 				    true, timeout);
668 
669 	/* ret == 0 means not signaled,
670 	 * ret > 0 means signaled
671 	 * ret < 0 means interrupted before timeout
672 	 */
673 	if (ret >= 0) {
674 		memset(args, 0, sizeof(*args));
675 		args->out.status = (ret == 0);
676 	} else
677 		r = ret;
678 
679 	drm_gem_object_put(gobj);
680 	return r;
681 }
682 
683 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
684 				struct drm_file *filp)
685 {
686 	struct drm_amdgpu_gem_metadata *args = data;
687 	struct drm_gem_object *gobj;
688 	struct amdgpu_bo *robj;
689 	int r = -1;
690 
691 	DRM_DEBUG("%d\n", args->handle);
692 	gobj = drm_gem_object_lookup(filp, args->handle);
693 	if (gobj == NULL)
694 		return -ENOENT;
695 	robj = gem_to_amdgpu_bo(gobj);
696 
697 	r = amdgpu_bo_reserve(robj, false);
698 	if (unlikely(r != 0))
699 		goto out;
700 
701 	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
702 		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
703 		r = amdgpu_bo_get_metadata(robj, args->data.data,
704 					   sizeof(args->data.data),
705 					   &args->data.data_size_bytes,
706 					   &args->data.flags);
707 	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
708 		if (args->data.data_size_bytes > sizeof(args->data.data)) {
709 			r = -EINVAL;
710 			goto unreserve;
711 		}
712 		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
713 		if (!r)
714 			r = amdgpu_bo_set_metadata(robj, args->data.data,
715 						   args->data.data_size_bytes,
716 						   args->data.flags);
717 	}
718 
719 unreserve:
720 	amdgpu_bo_unreserve(robj);
721 out:
722 	drm_gem_object_put(gobj);
723 	return r;
724 }
725 
726 /**
727  * amdgpu_gem_va_update_vm -update the bo_va in its VM
728  *
729  * @adev: amdgpu_device pointer
730  * @vm: vm to update
731  * @bo_va: bo_va to update
732  * @operation: map, unmap or clear
733  *
734  * Update the bo_va directly after setting its address. Errors are not
735  * vital here, so they are not reported back to userspace.
736  */
737 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
738 				    struct amdgpu_vm *vm,
739 				    struct amdgpu_bo_va *bo_va,
740 				    uint32_t operation)
741 {
742 	int r;
743 
744 	if (!amdgpu_vm_ready(vm))
745 		return;
746 
747 	r = amdgpu_vm_clear_freed(adev, vm, NULL);
748 	if (r)
749 		goto error;
750 
751 	if (operation == AMDGPU_VA_OP_MAP ||
752 	    operation == AMDGPU_VA_OP_REPLACE) {
753 		r = amdgpu_vm_bo_update(adev, bo_va, false);
754 		if (r)
755 			goto error;
756 	}
757 
758 	r = amdgpu_vm_update_pdes(adev, vm, false);
759 
760 error:
761 	if (r && r != -ERESTARTSYS)
762 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
763 }
764 
765 /**
766  * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
767  *
768  * @adev: amdgpu_device pointer
769  * @flags: GEM UAPI flags
770  *
771  * Returns the GEM UAPI flags mapped into hardware for the ASIC.
772  */
773 uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
774 {
775 	uint64_t pte_flag = 0;
776 
777 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
778 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
779 	if (flags & AMDGPU_VM_PAGE_READABLE)
780 		pte_flag |= AMDGPU_PTE_READABLE;
781 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
782 		pte_flag |= AMDGPU_PTE_WRITEABLE;
783 	if (flags & AMDGPU_VM_PAGE_PRT)
784 		pte_flag |= AMDGPU_PTE_PRT;
785 	if (flags & AMDGPU_VM_PAGE_NOALLOC)
786 		pte_flag |= AMDGPU_PTE_NOALLOC;
787 
788 	if (adev->gmc.gmc_funcs->map_mtype)
789 		pte_flag |= amdgpu_gmc_map_mtype(adev,
790 						 flags & AMDGPU_VM_MTYPE_MASK);
791 
792 	return pte_flag;
793 }
794 
795 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
796 			  struct drm_file *filp)
797 {
798 	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
799 		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
800 		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK |
801 		AMDGPU_VM_PAGE_NOALLOC;
802 	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
803 		AMDGPU_VM_PAGE_PRT;
804 
805 	struct drm_amdgpu_gem_va *args = data;
806 	struct drm_gem_object *gobj;
807 	struct amdgpu_device *adev = drm_to_adev(dev);
808 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
809 	struct amdgpu_bo *abo;
810 	struct amdgpu_bo_va *bo_va;
811 	struct drm_exec exec;
812 	uint64_t va_flags;
813 	uint64_t vm_size;
814 	int r = 0;
815 
816 	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
817 		dev_dbg(dev->dev,
818 			"va_address 0x%llx is in reserved area 0x%llx\n",
819 			args->va_address, AMDGPU_VA_RESERVED_SIZE);
820 		return -EINVAL;
821 	}
822 
823 	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
824 	    args->va_address < AMDGPU_GMC_HOLE_END) {
825 		dev_dbg(dev->dev,
826 			"va_address 0x%llx is in VA hole 0x%llx-0x%llx\n",
827 			args->va_address, AMDGPU_GMC_HOLE_START,
828 			AMDGPU_GMC_HOLE_END);
829 		return -EINVAL;
830 	}
831 
832 	args->va_address &= AMDGPU_GMC_HOLE_MASK;
833 
834 	vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
835 	vm_size -= AMDGPU_VA_RESERVED_SIZE;
836 	if (args->va_address + args->map_size > vm_size) {
837 		dev_dbg(dev->dev,
838 			"va_address 0x%llx is in top reserved area 0x%llx\n",
839 			args->va_address + args->map_size, vm_size);
840 		return -EINVAL;
841 	}
842 
843 	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
844 		dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
845 			args->flags);
846 		return -EINVAL;
847 	}
848 
849 	switch (args->operation) {
850 	case AMDGPU_VA_OP_MAP:
851 	case AMDGPU_VA_OP_UNMAP:
852 	case AMDGPU_VA_OP_CLEAR:
853 	case AMDGPU_VA_OP_REPLACE:
854 		break;
855 	default:
856 		dev_dbg(dev->dev, "unsupported operation %d\n",
857 			args->operation);
858 		return -EINVAL;
859 	}
860 
861 	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
862 	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
863 		gobj = drm_gem_object_lookup(filp, args->handle);
864 		if (gobj == NULL)
865 			return -ENOENT;
866 		abo = gem_to_amdgpu_bo(gobj);
867 	} else {
868 		gobj = NULL;
869 		abo = NULL;
870 	}
871 
872 	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
873 		      DRM_EXEC_IGNORE_DUPLICATES);
874 	drm_exec_until_all_locked(&exec) {
875 		if (gobj) {
876 			r = drm_exec_lock_obj(&exec, gobj);
877 			drm_exec_retry_on_contention(&exec);
878 			if (unlikely(r))
879 				goto error;
880 		}
881 
882 		r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
883 		drm_exec_retry_on_contention(&exec);
884 		if (unlikely(r))
885 			goto error;
886 	}
887 
888 	if (abo) {
889 		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
890 		if (!bo_va) {
891 			r = -ENOENT;
892 			goto error;
893 		}
894 	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
895 		bo_va = fpriv->prt_va;
896 	} else {
897 		bo_va = NULL;
898 	}
899 
900 	switch (args->operation) {
901 	case AMDGPU_VA_OP_MAP:
902 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
903 		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
904 				     args->offset_in_bo, args->map_size,
905 				     va_flags);
906 		break;
907 	case AMDGPU_VA_OP_UNMAP:
908 		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
909 		break;
910 
911 	case AMDGPU_VA_OP_CLEAR:
912 		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
913 						args->va_address,
914 						args->map_size);
915 		break;
916 	case AMDGPU_VA_OP_REPLACE:
917 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
918 		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
919 					     args->offset_in_bo, args->map_size,
920 					     va_flags);
921 		break;
922 	default:
923 		break;
924 	}
925 	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
926 		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
927 					args->operation);
928 
929 error:
930 	drm_exec_fini(&exec);
931 	drm_gem_object_put(gobj);
932 	return r;
933 }
934 
935 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
936 			struct drm_file *filp)
937 {
938 	struct amdgpu_device *adev = drm_to_adev(dev);
939 	struct drm_amdgpu_gem_op *args = data;
940 	struct drm_gem_object *gobj;
941 	struct amdgpu_vm_bo_base *base;
942 	struct amdgpu_bo *robj;
943 	int r;
944 
945 	gobj = drm_gem_object_lookup(filp, args->handle);
946 	if (!gobj)
947 		return -ENOENT;
948 
949 	robj = gem_to_amdgpu_bo(gobj);
950 
951 	r = amdgpu_bo_reserve(robj, false);
952 	if (unlikely(r))
953 		goto out;
954 
955 	switch (args->op) {
956 	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
957 		struct drm_amdgpu_gem_create_in info;
958 		void __user *out = u64_to_user_ptr(args->value);
959 
960 		info.bo_size = robj->tbo.base.size;
961 		info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
962 		info.domains = robj->preferred_domains;
963 		info.domain_flags = robj->flags;
964 		amdgpu_bo_unreserve(robj);
965 		if (copy_to_user(out, &info, sizeof(info)))
966 			r = -EFAULT;
967 		break;
968 	}
969 	case AMDGPU_GEM_OP_SET_PLACEMENT:
970 		if (robj->tbo.base.import_attach &&
971 		    args->value & AMDGPU_GEM_DOMAIN_VRAM) {
972 			r = -EINVAL;
973 			amdgpu_bo_unreserve(robj);
974 			break;
975 		}
976 		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
977 			r = -EPERM;
978 			amdgpu_bo_unreserve(robj);
979 			break;
980 		}
981 		for (base = robj->vm_bo; base; base = base->next)
982 			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
983 				amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
984 				r = -EINVAL;
985 				amdgpu_bo_unreserve(robj);
986 				goto out;
987 			}
988 
989 
990 		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
991 							AMDGPU_GEM_DOMAIN_GTT |
992 							AMDGPU_GEM_DOMAIN_CPU);
993 		robj->allowed_domains = robj->preferred_domains;
994 		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
995 			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
996 
997 		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
998 			amdgpu_vm_bo_invalidate(adev, robj, true);
999 
1000 		amdgpu_bo_unreserve(robj);
1001 		break;
1002 	default:
1003 		amdgpu_bo_unreserve(robj);
1004 		r = -EINVAL;
1005 	}
1006 
1007 out:
1008 	drm_gem_object_put(gobj);
1009 	return r;
1010 }
1011 
1012 static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
1013 				  int width,
1014 				  int cpp,
1015 				  bool tiled)
1016 {
1017 	int aligned = width;
1018 	int pitch_mask = 0;
1019 
1020 	switch (cpp) {
1021 	case 1:
1022 		pitch_mask = 255;
1023 		break;
1024 	case 2:
1025 		pitch_mask = 127;
1026 		break;
1027 	case 3:
1028 	case 4:
1029 		pitch_mask = 63;
1030 		break;
1031 	}
1032 
1033 	aligned += pitch_mask;
1034 	aligned &= ~pitch_mask;
1035 	return aligned * cpp;
1036 }
1037 
1038 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
1039 			    struct drm_device *dev,
1040 			    struct drm_mode_create_dumb *args)
1041 {
1042 	struct amdgpu_device *adev = drm_to_adev(dev);
1043 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1044 	struct drm_gem_object *gobj;
1045 	uint32_t handle;
1046 	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1047 		    AMDGPU_GEM_CREATE_CPU_GTT_USWC |
1048 		    AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1049 	u32 domain;
1050 	int r;
1051 
1052 	/*
1053 	 * The buffer returned from this function should be cleared, but
1054 	 * it can only be done if the ring is enabled or we'll fail to
1055 	 * create the buffer.
1056 	 */
1057 	if (adev->mman.buffer_funcs_enabled)
1058 		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
1059 
1060 	args->pitch = amdgpu_gem_align_pitch(adev, args->width,
1061 					     DIV_ROUND_UP(args->bpp, 8), 0);
1062 	args->size = (u64)args->pitch * args->height;
1063 	args->size = ALIGN(args->size, PAGE_SIZE);
1064 	domain = amdgpu_bo_get_preferred_domain(adev,
1065 				amdgpu_display_supported_domains(adev, flags));
1066 	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
1067 				     ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
1068 	if (r)
1069 		return -ENOMEM;
1070 
1071 	r = drm_gem_handle_create(file_priv, gobj, &handle);
1072 	/* drop reference from allocate - handle holds it now */
1073 	drm_gem_object_put(gobj);
1074 	if (r)
1075 		return r;
1076 
1077 	args->handle = handle;
1078 	return 0;
1079 }
1080 
1081 #if defined(CONFIG_DEBUG_FS)
1082 static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
1083 {
1084 	struct amdgpu_device *adev = m->private;
1085 	struct drm_device *dev = adev_to_drm(adev);
1086 	struct drm_file *file;
1087 	int r;
1088 
1089 	r = mutex_lock_interruptible(&dev->filelist_mutex);
1090 	if (r)
1091 		return r;
1092 
1093 	list_for_each_entry(file, &dev->filelist, lhead) {
1094 		struct task_struct *task;
1095 		struct drm_gem_object *gobj;
1096 		struct pid *pid;
1097 		int id;
1098 
1099 		/*
1100 		 * Although we have a valid reference on file->pid, that does
1101 		 * not guarantee that the task_struct who called get_pid() is
1102 		 * still alive (e.g. get_pid(current) => fork() => exit()).
1103 		 * Therefore, we need to protect this ->comm access using RCU.
1104 		 */
1105 		rcu_read_lock();
1106 		pid = rcu_dereference(file->pid);
1107 		task = pid_task(pid, PIDTYPE_TGID);
1108 		seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
1109 			   task ? task->comm : "<unknown>");
1110 		rcu_read_unlock();
1111 
1112 		spin_lock(&file->table_lock);
1113 		idr_for_each_entry(&file->object_idr, gobj, id) {
1114 			struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
1115 
1116 			amdgpu_bo_print_info(id, bo, m);
1117 		}
1118 		spin_unlock(&file->table_lock);
1119 	}
1120 
1121 	mutex_unlock(&dev->filelist_mutex);
1122 	return 0;
1123 }
1124 
1125 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
1126 
1127 #endif
1128 
1129 void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
1130 {
1131 #if defined(CONFIG_DEBUG_FS)
1132 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1133 	struct dentry *root = minor->debugfs_root;
1134 
1135 	debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
1136 			    &amdgpu_debugfs_gem_info_fops);
1137 #endif
1138 }
1139