1 /*	$NetBSD: amdgpu_gem.c,v 1.9 2021/12/19 12:02:39 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2008 Advanced Micro Devices, Inc.
5  * Copyright 2008 Red Hat Inc.
6  * Copyright 2009 Jerome Glisse.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24  * OTHER DEALINGS IN THE SOFTWARE.
25  *
26  * Authors: Dave Airlie
27  *          Alex Deucher
28  *          Jerome Glisse
29  */
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: amdgpu_gem.c,v 1.9 2021/12/19 12:02:39 riastradh Exp $");
32 
33 #include <linux/ktime.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/pci.h>
37 
38 #include <drm/amdgpu_drm.h>
39 #include <drm/drm_debugfs.h>
40 
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_xgmi.h"
44 
45 #include <linux/nbsd-namespace.h>
46 
amdgpu_gem_object_free(struct drm_gem_object * gobj)47 void amdgpu_gem_object_free(struct drm_gem_object *gobj)
48 {
49 	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
50 
51 	if (robj) {
52 		amdgpu_mn_unregister(robj);
53 		amdgpu_bo_unref(&robj);
54 	}
55 }
56 
amdgpu_gem_object_create(struct amdgpu_device * adev,unsigned long size,int alignment,u32 initial_domain,u64 flags,enum ttm_bo_type type,struct dma_resv * resv,struct drm_gem_object ** obj)57 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
58 			     int alignment, u32 initial_domain,
59 			     u64 flags, enum ttm_bo_type type,
60 			     struct dma_resv *resv,
61 			     struct drm_gem_object **obj)
62 {
63 	struct amdgpu_bo *bo;
64 	struct amdgpu_bo_param bp;
65 	int r;
66 
67 	memset(&bp, 0, sizeof(bp));
68 	*obj = NULL;
69 
70 	bp.size = size;
71 	bp.byte_align = alignment;
72 	bp.type = type;
73 	bp.resv = resv;
74 	bp.preferred_domain = initial_domain;
75 retry:
76 	bp.flags = flags;
77 	bp.domain = initial_domain;
78 	r = amdgpu_bo_create(adev, &bp, &bo);
79 	if (r) {
80 		if (r != -ERESTARTSYS) {
81 			if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
82 				flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
83 				goto retry;
84 			}
85 
86 			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
87 				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
88 				goto retry;
89 			}
90 			DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
91 				  size, initial_domain, alignment, r);
92 		}
93 		return r;
94 	}
95 	*obj = &bo->tbo.base;
96 
97 	return 0;
98 }
99 
amdgpu_gem_force_release(struct amdgpu_device * adev)100 void amdgpu_gem_force_release(struct amdgpu_device *adev)
101 {
102 	struct drm_device *ddev = adev->ddev;
103 	struct drm_file *file;
104 
105 	mutex_lock(&ddev->filelist_mutex);
106 
107 	list_for_each_entry(file, &ddev->filelist, lhead) {
108 		struct drm_gem_object *gobj;
109 		int handle;
110 
111 		WARN_ONCE(1, "Still active user space clients!\n");
112 		spin_lock(&file->table_lock);
113 		idr_for_each_entry(&file->object_idr, gobj, handle) {
114 			WARN_ONCE(1, "And also active allocations!\n");
115 			drm_gem_object_put_unlocked(gobj);
116 		}
117 		idr_destroy(&file->object_idr);
118 		spin_unlock(&file->table_lock);
119 	}
120 
121 	mutex_unlock(&ddev->filelist_mutex);
122 }
123 
124 /*
125  * Call from drm_gem_handle_create which appear in both new and open ioctl
126  * case.
127  */
amdgpu_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)128 int amdgpu_gem_object_open(struct drm_gem_object *obj,
129 			   struct drm_file *file_priv)
130 {
131 	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
132 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
133 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
134 	struct amdgpu_vm *vm = &fpriv->vm;
135 	struct amdgpu_bo_va *bo_va;
136 #ifdef __NetBSD__
137 	struct vmspace *mm;
138 #else
139 	struct mm_struct *mm;
140 #endif
141 	int r;
142 
143 	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
144 #ifdef __NetBSD__
145 	if (mm && mm != curproc->p_vmspace)
146 #else
147 	if (mm && mm != current->mm)
148 #endif
149 		return -EPERM;
150 
151 	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
152 	    abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
153 		return -EPERM;
154 
155 	r = amdgpu_bo_reserve(abo, false);
156 	if (r)
157 		return r;
158 
159 	bo_va = amdgpu_vm_bo_find(vm, abo);
160 	if (!bo_va) {
161 		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
162 	} else {
163 		++bo_va->ref_count;
164 	}
165 	amdgpu_bo_unreserve(abo);
166 	return 0;
167 }
168 
amdgpu_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)169 void amdgpu_gem_object_close(struct drm_gem_object *obj,
170 			     struct drm_file *file_priv)
171 {
172 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
173 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
174 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
175 	struct amdgpu_vm *vm = &fpriv->vm;
176 
177 	struct amdgpu_bo_list_entry vm_pd;
178 	struct list_head list, duplicates;
179 	struct ttm_validate_buffer tv;
180 	struct ww_acquire_ctx ticket;
181 	struct amdgpu_bo_va *bo_va;
182 	int r;
183 
184 	INIT_LIST_HEAD(&list);
185 	INIT_LIST_HEAD(&duplicates);
186 
187 	tv.bo = &bo->tbo;
188 	tv.num_shared = 1;
189 	list_add(&tv.head, &list);
190 
191 	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
192 
193 	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
194 	if (r) {
195 		dev_err(adev->dev, "leaking bo va because "
196 			"we fail to reserve bo (%d)\n", r);
197 		return;
198 	}
199 	bo_va = amdgpu_vm_bo_find(vm, bo);
200 	if (bo_va && --bo_va->ref_count == 0) {
201 		amdgpu_vm_bo_rmv(adev, bo_va);
202 
203 		if (amdgpu_vm_ready(vm)) {
204 			struct dma_fence *fence = NULL;
205 
206 			r = amdgpu_vm_clear_freed(adev, vm, &fence);
207 			if (unlikely(r)) {
208 				dev_err(adev->dev, "failed to clear page "
209 					"tables on GEM object close (%d)\n", r);
210 			}
211 
212 			if (fence) {
213 				amdgpu_bo_fence(bo, fence, true);
214 				dma_fence_put(fence);
215 			}
216 		}
217 	}
218 	ttm_eu_backoff_reservation(&ticket, &list);
219 }
220 
221 /*
222  * GEM ioctls.
223  */
amdgpu_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)224 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
225 			    struct drm_file *filp)
226 {
227 	struct amdgpu_device *adev = dev->dev_private;
228 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
229 	struct amdgpu_vm *vm = &fpriv->vm;
230 	union drm_amdgpu_gem_create *args = data;
231 	uint64_t flags = args->in.domain_flags;
232 	uint64_t size = args->in.bo_size;
233 	struct dma_resv *resv = NULL;
234 	struct drm_gem_object *gobj;
235 	uint32_t handle;
236 	int r;
237 
238 	/* reject invalid gem flags */
239 	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
240 		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
241 		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
242 		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
243 		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
244 		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
245 
246 		return -EINVAL;
247 
248 	/* reject invalid gem domains */
249 	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
250 		return -EINVAL;
251 
252 	/* create a gem object to contain this object in */
253 	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
254 	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
255 		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
256 			/* if gds bo is created from user space, it must be
257 			 * passed to bo list
258 			 */
259 			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
260 			return -EINVAL;
261 		}
262 		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
263 	}
264 
265 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
266 		r = amdgpu_bo_reserve(vm->root.base.bo, false);
267 		if (r)
268 			return r;
269 
270 		resv = vm->root.base.bo->tbo.base.resv;
271 	}
272 
273 	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
274 				     (u32)(0xffffffff & args->in.domains),
275 				     flags, ttm_bo_type_device, resv, &gobj);
276 	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
277 		if (!r) {
278 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
279 
280 			abo->parent = amdgpu_bo_ref(vm->root.base.bo);
281 		}
282 		amdgpu_bo_unreserve(vm->root.base.bo);
283 	}
284 	if (r)
285 		return r;
286 
287 	r = drm_gem_handle_create(filp, gobj, &handle);
288 	/* drop reference from allocate - handle holds it now */
289 	drm_gem_object_put_unlocked(gobj);
290 	if (r)
291 		return r;
292 
293 	memset(args, 0, sizeof(*args));
294 	args->out.handle = handle;
295 	return 0;
296 }
297 
amdgpu_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)298 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
299 			     struct drm_file *filp)
300 {
301 	struct ttm_operation_ctx ctx = { true, false };
302 	struct amdgpu_device *adev = dev->dev_private;
303 	struct drm_amdgpu_gem_userptr *args = data;
304 	struct drm_gem_object *gobj;
305 	struct amdgpu_bo *bo;
306 	uint32_t handle;
307 	int r;
308 
309 	args->addr = untagged_addr(args->addr);
310 
311 	if (offset_in_page(args->addr | args->size))
312 		return -EINVAL;
313 
314 	/* reject unknown flag values */
315 	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
316 	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
317 	    AMDGPU_GEM_USERPTR_REGISTER))
318 		return -EINVAL;
319 
320 	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
321 	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
322 
323 		/* if we want to write to it we must install a MMU notifier */
324 		return -EACCES;
325 	}
326 
327 	/* create a gem object to contain this object in */
328 	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
329 				     0, ttm_bo_type_device, NULL, &gobj);
330 	if (r)
331 		return r;
332 
333 	bo = gem_to_amdgpu_bo(gobj);
334 	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
335 	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
336 	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
337 	if (r)
338 		goto release_object;
339 
340 	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
341 		r = amdgpu_mn_register(bo, args->addr);
342 		if (r)
343 			goto release_object;
344 	}
345 
346 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
347 		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
348 		if (r)
349 			goto release_object;
350 
351 		r = amdgpu_bo_reserve(bo, true);
352 		if (r)
353 			goto user_pages_done;
354 
355 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
356 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
357 		amdgpu_bo_unreserve(bo);
358 		if (r)
359 			goto user_pages_done;
360 	}
361 
362 	r = drm_gem_handle_create(filp, gobj, &handle);
363 	if (r)
364 		goto user_pages_done;
365 
366 	args->handle = handle;
367 
368 user_pages_done:
369 	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
370 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
371 
372 release_object:
373 	drm_gem_object_put_unlocked(gobj);
374 
375 	return r;
376 }
377 
amdgpu_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)378 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
379 			  struct drm_device *dev,
380 			  uint32_t handle, uint64_t *offset_p)
381 {
382 	struct drm_gem_object *gobj;
383 	struct amdgpu_bo *robj;
384 
385 	gobj = drm_gem_object_lookup(filp, handle);
386 	if (gobj == NULL) {
387 		return -ENOENT;
388 	}
389 	robj = gem_to_amdgpu_bo(gobj);
390 	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
391 	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
392 		drm_gem_object_put_unlocked(gobj);
393 		return -EPERM;
394 	}
395 	*offset_p = amdgpu_bo_mmap_offset(robj);
396 	drm_gem_object_put_unlocked(gobj);
397 	return 0;
398 }
399 
amdgpu_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)400 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
401 			  struct drm_file *filp)
402 {
403 	union drm_amdgpu_gem_mmap *args = data;
404 	uint32_t handle = args->in.handle;
405 	memset(args, 0, sizeof(*args));
406 	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
407 }
408 
409 /**
410  * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
411  *
412  * @timeout_ns: timeout in ns
413  *
414  * Calculate the timeout in jiffies from an absolute timeout in ns.
415  */
amdgpu_gem_timeout(uint64_t timeout_ns)416 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
417 {
418 	unsigned long timeout_jiffies;
419 	ktime_t timeout;
420 
421 	/* clamp timeout if it's to large */
422 	if (((int64_t)timeout_ns) < 0)
423 		return MAX_SCHEDULE_TIMEOUT;
424 
425 	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
426 	if (ktime_to_ns(timeout) < 0)
427 		return 0;
428 
429 	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
430 	/*  clamp timeout to avoid unsigned-> signed overflow */
431 	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
432 		return MAX_SCHEDULE_TIMEOUT - 1;
433 
434 	return timeout_jiffies;
435 }
436 
amdgpu_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)437 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
438 			      struct drm_file *filp)
439 {
440 	union drm_amdgpu_gem_wait_idle *args = data;
441 	struct drm_gem_object *gobj;
442 	struct amdgpu_bo *robj;
443 	uint32_t handle = args->in.handle;
444 	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
445 	int r = 0;
446 	long ret;
447 
448 	gobj = drm_gem_object_lookup(filp, handle);
449 	if (gobj == NULL) {
450 		return -ENOENT;
451 	}
452 	robj = gem_to_amdgpu_bo(gobj);
453 	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
454 						  timeout);
455 
456 	/* ret == 0 means not signaled,
457 	 * ret > 0 means signaled
458 	 * ret < 0 means interrupted before timeout
459 	 */
460 	if (ret >= 0) {
461 		memset(args, 0, sizeof(*args));
462 		args->out.status = (ret == 0);
463 	} else
464 		r = ret;
465 
466 	drm_gem_object_put_unlocked(gobj);
467 	return r;
468 }
469 
amdgpu_gem_metadata_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)470 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
471 				struct drm_file *filp)
472 {
473 	struct drm_amdgpu_gem_metadata *args = data;
474 	struct drm_gem_object *gobj;
475 	struct amdgpu_bo *robj;
476 	int r = -1;
477 
478 	DRM_DEBUG("%d \n", args->handle);
479 	gobj = drm_gem_object_lookup(filp, args->handle);
480 	if (gobj == NULL)
481 		return -ENOENT;
482 	robj = gem_to_amdgpu_bo(gobj);
483 
484 	r = amdgpu_bo_reserve(robj, false);
485 	if (unlikely(r != 0))
486 		goto out;
487 
488 	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
489 		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
490 		r = amdgpu_bo_get_metadata(robj, args->data.data,
491 					   sizeof(args->data.data),
492 					   &args->data.data_size_bytes,
493 					   &args->data.flags);
494 	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
495 		if (args->data.data_size_bytes > sizeof(args->data.data)) {
496 			r = -EINVAL;
497 			goto unreserve;
498 		}
499 		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
500 		if (!r)
501 			r = amdgpu_bo_set_metadata(robj, args->data.data,
502 						   args->data.data_size_bytes,
503 						   args->data.flags);
504 	}
505 
506 unreserve:
507 	amdgpu_bo_unreserve(robj);
508 out:
509 	drm_gem_object_put_unlocked(gobj);
510 	return r;
511 }
512 
513 /**
514  * amdgpu_gem_va_update_vm -update the bo_va in its VM
515  *
516  * @adev: amdgpu_device pointer
517  * @vm: vm to update
518  * @bo_va: bo_va to update
519  * @operation: map, unmap or clear
520  *
521  * Update the bo_va directly after setting its address. Errors are not
522  * vital here, so they are not reported back to userspace.
523  */
amdgpu_gem_va_update_vm(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_va * bo_va,uint32_t operation)524 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
525 				    struct amdgpu_vm *vm,
526 				    struct amdgpu_bo_va *bo_va,
527 				    uint32_t operation)
528 {
529 	int r;
530 
531 	if (!amdgpu_vm_ready(vm))
532 		return;
533 
534 	r = amdgpu_vm_clear_freed(adev, vm, NULL);
535 	if (r)
536 		goto error;
537 
538 	if (operation == AMDGPU_VA_OP_MAP ||
539 	    operation == AMDGPU_VA_OP_REPLACE) {
540 		r = amdgpu_vm_bo_update(adev, bo_va, false);
541 		if (r)
542 			goto error;
543 	}
544 
545 	r = amdgpu_vm_update_pdes(adev, vm, false);
546 
547 error:
548 	if (r && r != -ERESTARTSYS)
549 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
550 }
551 
552 /**
553  * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
554  *
555  * @adev: amdgpu_device pointer
556  * @flags: GEM UAPI flags
557  *
558  * Returns the GEM UAPI flags mapped into hardware for the ASIC.
559  */
amdgpu_gem_va_map_flags(struct amdgpu_device * adev,uint32_t flags)560 uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
561 {
562 	uint64_t pte_flag = 0;
563 
564 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
565 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
566 	if (flags & AMDGPU_VM_PAGE_READABLE)
567 		pte_flag |= AMDGPU_PTE_READABLE;
568 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
569 		pte_flag |= AMDGPU_PTE_WRITEABLE;
570 	if (flags & AMDGPU_VM_PAGE_PRT)
571 		pte_flag |= AMDGPU_PTE_PRT;
572 
573 	if (adev->gmc.gmc_funcs->map_mtype)
574 		pte_flag |= amdgpu_gmc_map_mtype(adev,
575 						 flags & AMDGPU_VM_MTYPE_MASK);
576 
577 	return pte_flag;
578 }
579 
amdgpu_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)580 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
581 			  struct drm_file *filp)
582 {
583 	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
584 		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
585 		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
586 	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
587 		AMDGPU_VM_PAGE_PRT;
588 
589 	struct drm_amdgpu_gem_va *args = data;
590 	struct drm_gem_object *gobj;
591 	struct amdgpu_device *adev = dev->dev_private;
592 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
593 	struct amdgpu_bo *abo;
594 	struct amdgpu_bo_va *bo_va;
595 	struct amdgpu_bo_list_entry vm_pd;
596 	struct ttm_validate_buffer tv;
597 	struct ww_acquire_ctx ticket;
598 	struct list_head list, duplicates;
599 	uint64_t va_flags;
600 	int r = 0;
601 
602 	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
603 		dev_dbg(pci_dev_dev(dev->pdev),
604 			"va_address 0x%"PRIX64" is in reserved area 0x%"PRIX64"\n",
605 			args->va_address, (uint64_t)AMDGPU_VA_RESERVED_SIZE);
606 		return -EINVAL;
607 	}
608 
609 	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
610 	    args->va_address < AMDGPU_GMC_HOLE_END) {
611 		dev_dbg(pci_dev_dev(dev->pdev),
612 			"va_address 0x%"PRIX64" is in VA hole 0x%"PRIX64"-0x%"PRIX64"\n",
613 			args->va_address, (uint64_t)AMDGPU_GMC_HOLE_START,
614 			(uint64_t)AMDGPU_GMC_HOLE_END);
615 		return -EINVAL;
616 	}
617 
618 	args->va_address &= AMDGPU_GMC_HOLE_MASK;
619 
620 	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
621 		dev_dbg(pci_dev_dev(dev->pdev), "invalid flags combination 0x%08X\n",
622 			args->flags);
623 		return -EINVAL;
624 	}
625 
626 	switch (args->operation) {
627 	case AMDGPU_VA_OP_MAP:
628 	case AMDGPU_VA_OP_UNMAP:
629 	case AMDGPU_VA_OP_CLEAR:
630 	case AMDGPU_VA_OP_REPLACE:
631 		break;
632 	default:
633 		dev_dbg(pci_dev_dev(dev->pdev), "unsupported operation %d\n",
634 			args->operation);
635 		return -EINVAL;
636 	}
637 
638 	INIT_LIST_HEAD(&list);
639 	INIT_LIST_HEAD(&duplicates);
640 	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
641 	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
642 		gobj = drm_gem_object_lookup(filp, args->handle);
643 		if (gobj == NULL)
644 			return -ENOENT;
645 		abo = gem_to_amdgpu_bo(gobj);
646 		tv.bo = &abo->tbo;
647 		if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
648 			tv.num_shared = 1;
649 		else
650 			tv.num_shared = 0;
651 		list_add(&tv.head, &list);
652 	} else {
653 		gobj = NULL;
654 		abo = NULL;
655 	}
656 
657 	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
658 
659 	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
660 	if (r)
661 		goto error_unref;
662 
663 	if (abo) {
664 		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
665 		if (!bo_va) {
666 			r = -ENOENT;
667 			goto error_backoff;
668 		}
669 	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
670 		bo_va = fpriv->prt_va;
671 	} else {
672 		bo_va = NULL;
673 	}
674 
675 	switch (args->operation) {
676 	case AMDGPU_VA_OP_MAP:
677 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
678 		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
679 				     args->offset_in_bo, args->map_size,
680 				     va_flags);
681 		break;
682 	case AMDGPU_VA_OP_UNMAP:
683 		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
684 		break;
685 
686 	case AMDGPU_VA_OP_CLEAR:
687 		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
688 						args->va_address,
689 						args->map_size);
690 		break;
691 	case AMDGPU_VA_OP_REPLACE:
692 		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
693 		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
694 					     args->offset_in_bo, args->map_size,
695 					     va_flags);
696 		break;
697 	default:
698 		break;
699 	}
700 	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
701 		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
702 					args->operation);
703 
704 error_backoff:
705 	ttm_eu_backoff_reservation(&ticket, &list);
706 
707 error_unref:
708 	drm_gem_object_put_unlocked(gobj);
709 	return r;
710 }
711 
amdgpu_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)712 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
713 			struct drm_file *filp)
714 {
715 	struct amdgpu_device *adev = dev->dev_private;
716 	struct drm_amdgpu_gem_op *args = data;
717 	struct drm_gem_object *gobj;
718 	struct amdgpu_vm_bo_base *base;
719 	struct amdgpu_bo *robj;
720 	int r;
721 
722 	gobj = drm_gem_object_lookup(filp, args->handle);
723 	if (gobj == NULL) {
724 		return -ENOENT;
725 	}
726 	robj = gem_to_amdgpu_bo(gobj);
727 
728 	r = amdgpu_bo_reserve(robj, false);
729 	if (unlikely(r))
730 		goto out;
731 
732 	switch (args->op) {
733 	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
734 		struct drm_amdgpu_gem_create_in info;
735 		void __user *out = u64_to_user_ptr(args->value);
736 
737 		info.bo_size = robj->tbo.base.size;
738 		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
739 		info.domains = robj->preferred_domains;
740 		info.domain_flags = robj->flags;
741 		amdgpu_bo_unreserve(robj);
742 		if (copy_to_user(out, &info, sizeof(info)))
743 			r = -EFAULT;
744 		break;
745 	}
746 	case AMDGPU_GEM_OP_SET_PLACEMENT:
747 		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
748 			r = -EINVAL;
749 			amdgpu_bo_unreserve(robj);
750 			break;
751 		}
752 		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
753 			r = -EPERM;
754 			amdgpu_bo_unreserve(robj);
755 			break;
756 		}
757 		for (base = robj->vm_bo; base; base = base->next)
758 			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
759 				amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
760 				r = -EINVAL;
761 				amdgpu_bo_unreserve(robj);
762 				goto out;
763 			}
764 
765 
766 		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
767 							AMDGPU_GEM_DOMAIN_GTT |
768 							AMDGPU_GEM_DOMAIN_CPU);
769 		robj->allowed_domains = robj->preferred_domains;
770 		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
771 			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
772 
773 		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
774 			amdgpu_vm_bo_invalidate(adev, robj, true);
775 
776 		amdgpu_bo_unreserve(robj);
777 		break;
778 	default:
779 		amdgpu_bo_unreserve(robj);
780 		r = -EINVAL;
781 	}
782 
783 out:
784 	drm_gem_object_put_unlocked(gobj);
785 	return r;
786 }
787 
amdgpu_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)788 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
789 			    struct drm_device *dev,
790 			    struct drm_mode_create_dumb *args)
791 {
792 	struct amdgpu_device *adev = dev->dev_private;
793 	struct drm_gem_object *gobj;
794 	uint32_t handle;
795 	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
796 		    AMDGPU_GEM_CREATE_CPU_GTT_USWC;
797 	u32 domain;
798 	int r;
799 
800 	/*
801 	 * The buffer returned from this function should be cleared, but
802 	 * it can only be done if the ring is enabled or we'll fail to
803 	 * create the buffer.
804 	 */
805 	if (adev->mman.buffer_funcs_enabled)
806 		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
807 
808 	args->pitch = amdgpu_align_pitch(adev, args->width,
809 					 DIV_ROUND_UP(args->bpp, 8), 0);
810 	args->size = (u64)args->pitch * args->height;
811 	args->size = ALIGN(args->size, PAGE_SIZE);
812 	domain = amdgpu_bo_get_preferred_pin_domain(adev,
813 				amdgpu_display_supported_domains(adev, flags));
814 	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
815 				     ttm_bo_type_device, NULL, &gobj);
816 	if (r)
817 		return -ENOMEM;
818 
819 	r = drm_gem_handle_create(file_priv, gobj, &handle);
820 	/* drop reference from allocate - handle holds it now */
821 	drm_gem_object_put_unlocked(gobj);
822 	if (r) {
823 		return r;
824 	}
825 	args->handle = handle;
826 	return 0;
827 }
828 
829 #if defined(CONFIG_DEBUG_FS)
830 
831 #define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag)	\
832 	if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
833 		seq_printf((m), " " #flag);		\
834 	}
835 
amdgpu_debugfs_gem_bo_info(int id,void * ptr,void * data)836 static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
837 {
838 	struct drm_gem_object *gobj = ptr;
839 	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
840 	struct seq_file *m = data;
841 
842 	struct dma_buf_attachment *attachment;
843 	struct dma_buf *dma_buf;
844 	unsigned domain;
845 	const char *placement;
846 	unsigned pin_count;
847 
848 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
849 	switch (domain) {
850 	case AMDGPU_GEM_DOMAIN_VRAM:
851 		placement = "VRAM";
852 		break;
853 	case AMDGPU_GEM_DOMAIN_GTT:
854 		placement = " GTT";
855 		break;
856 	case AMDGPU_GEM_DOMAIN_CPU:
857 	default:
858 		placement = " CPU";
859 		break;
860 	}
861 	seq_printf(m, "\t0x%08x: %12ld byte %s",
862 		   id, amdgpu_bo_size(bo), placement);
863 
864 	pin_count = READ_ONCE(bo->pin_count);
865 	if (pin_count)
866 		seq_printf(m, " pin count %d", pin_count);
867 
868 	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
869 	attachment = READ_ONCE(bo->tbo.base.import_attach);
870 
871 	if (attachment)
872 		seq_printf(m, " imported from %p", dma_buf);
873 	else if (dma_buf)
874 		seq_printf(m, " exported as %p", dma_buf);
875 
876 	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
877 	amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
878 	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
879 	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
880 	amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
881 	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
882 	amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
883 	amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
884 
885 	seq_printf(m, "\n");
886 
887 	return 0;
888 }
889 
amdgpu_debugfs_gem_info(struct seq_file * m,void * data)890 static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
891 {
892 	struct drm_info_node *node = (struct drm_info_node *)m->private;
893 	struct drm_device *dev = node->minor->dev;
894 	struct drm_file *file;
895 	int r;
896 
897 	r = mutex_lock_interruptible(&dev->filelist_mutex);
898 	if (r)
899 		return r;
900 
901 	list_for_each_entry(file, &dev->filelist, lhead) {
902 		struct task_struct *task;
903 
904 		/*
905 		 * Although we have a valid reference on file->pid, that does
906 		 * not guarantee that the task_struct who called get_pid() is
907 		 * still alive (e.g. get_pid(current) => fork() => exit()).
908 		 * Therefore, we need to protect this ->comm access using RCU.
909 		 */
910 		rcu_read_lock();
911 		task = pid_task(file->pid, PIDTYPE_PID);
912 		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
913 			   task ? task->comm : "<unknown>");
914 		rcu_read_unlock();
915 
916 		spin_lock(&file->table_lock);
917 		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
918 		spin_unlock(&file->table_lock);
919 	}
920 
921 	mutex_unlock(&dev->filelist_mutex);
922 	return 0;
923 }
924 
925 static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
926 	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
927 };
928 #endif
929 
amdgpu_debugfs_gem_init(struct amdgpu_device * adev)930 int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
931 {
932 #if defined(CONFIG_DEBUG_FS)
933 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
934 #endif
935 	return 0;
936 }
937