xref: /openbsd/sys/dev/pci/drm/radeon/radeon_gem.c (revision a7c1a79f)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/iosys-map.h>
30 #include <linux/pci.h>
31 
32 #include <drm/drm_device.h>
33 #include <drm/drm_file.h>
34 #include <drm/drm_gem_ttm_helper.h>
35 #include <drm/radeon_drm.h>
36 
37 #include "radeon.h"
38 #include "radeon_prime.h"
39 
40 struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
41 					int flags);
42 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
43 int radeon_gem_prime_pin(struct drm_gem_object *obj);
44 void radeon_gem_prime_unpin(struct drm_gem_object *obj);
45 
46 const struct drm_gem_object_funcs radeon_gem_object_funcs;
47 
48 #ifdef __linux__
radeon_gem_fault(struct vm_fault * vmf)49 static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
50 {
51 	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
52 	struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
53 	vm_fault_t ret;
54 
55 	down_read(&rdev->pm.mclk_lock);
56 
57 	ret = ttm_bo_vm_reserve(bo, vmf);
58 	if (ret)
59 		goto unlock_mclk;
60 
61 	ret = radeon_bo_fault_reserve_notify(bo);
62 	if (ret)
63 		goto unlock_resv;
64 
65 	ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
66 				       TTM_BO_VM_NUM_PREFAULT);
67 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
68 		goto unlock_mclk;
69 
70 unlock_resv:
71 	dma_resv_unlock(bo->base.resv);
72 
73 unlock_mclk:
74 	up_read(&rdev->pm.mclk_lock);
75 	return ret;
76 }
77 
78 static const struct vm_operations_struct radeon_gem_vm_ops = {
79 	.fault = radeon_gem_fault,
80 	.open = ttm_bo_vm_open,
81 	.close = ttm_bo_vm_close,
82 	.access = ttm_bo_vm_access
83 };
84 #else /* !__linux__ */
85 int
radeon_gem_fault(struct uvm_faultinfo * ufi,vaddr_t vaddr,vm_page_t * pps,int npages,int centeridx,vm_fault_t fault_type,vm_prot_t access_type,int flags)86 radeon_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
87     int npages, int centeridx, vm_fault_t fault_type,
88     vm_prot_t access_type, int flags)
89 {
90 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
91 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
92 	struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
93 	vm_fault_t ret;
94 
95 	down_read(&rdev->pm.mclk_lock);
96 
97 	ret = ttm_bo_vm_reserve(bo);
98 	if (ret)
99 		goto unlock_mclk;
100 
101 	ret = radeon_bo_fault_reserve_notify(bo);
102 	if (ret)
103 		goto unlock_resv;
104 
105 	ret = ttm_bo_vm_fault_reserved(ufi, vaddr,
106 				       TTM_BO_VM_NUM_PREFAULT, 1);
107 #ifdef notyet
108 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
109 		goto unlock_mclk;
110 #endif
111 
112 unlock_resv:
113 	dma_resv_unlock(bo->base.resv);
114 
115 unlock_mclk:
116 	switch (ret) {
117 	case VM_FAULT_NOPAGE:
118 		ret = VM_PAGER_OK;
119 		break;
120 	case VM_FAULT_RETRY:
121 		ret = VM_PAGER_REFAULT;
122 		break;
123 	default:
124 		ret = VM_PAGER_BAD;
125 		break;
126 	}
127 	up_read(&rdev->pm.mclk_lock);
128 	uvmfault_unlockall(ufi, NULL, uobj);
129 	return ret;
130 }
131 
132 void
radeon_gem_vm_reference(struct uvm_object * uobj)133 radeon_gem_vm_reference(struct uvm_object *uobj)
134 {
135 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
136 
137 	ttm_bo_get(bo);
138 }
139 
140 void
radeon_gem_vm_detach(struct uvm_object * uobj)141 radeon_gem_vm_detach(struct uvm_object *uobj)
142 {
143 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
144 
145 	ttm_bo_put(bo);
146 }
147 
148 static const struct uvm_pagerops radeon_gem_vm_ops = {
149 	.pgo_fault = radeon_gem_fault,
150 	.pgo_reference = radeon_gem_vm_reference,
151 	.pgo_detach = radeon_gem_vm_detach
152 };
153 #endif /* !__linux__ */
154 
radeon_gem_object_free(struct drm_gem_object * gobj)155 static void radeon_gem_object_free(struct drm_gem_object *gobj)
156 {
157 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
158 
159 	if (robj) {
160 		radeon_mn_unregister(robj);
161 		radeon_bo_unref(&robj);
162 	}
163 }
164 
radeon_gem_object_create(struct radeon_device * rdev,unsigned long size,int alignment,int initial_domain,u32 flags,bool kernel,struct drm_gem_object ** obj)165 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
166 				int alignment, int initial_domain,
167 				u32 flags, bool kernel,
168 				struct drm_gem_object **obj)
169 {
170 	struct radeon_bo *robj;
171 	unsigned long max_size;
172 	int r;
173 
174 	*obj = NULL;
175 	/* At least align on page size */
176 	if (alignment < PAGE_SIZE) {
177 		alignment = PAGE_SIZE;
178 	}
179 
180 	/* Maximum bo size is the unpinned gtt size since we use the gtt to
181 	 * handle vram to system pool migrations.
182 	 */
183 	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
184 	if (size > max_size) {
185 		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
186 			  size >> 20, max_size >> 20);
187 		return -ENOMEM;
188 	}
189 
190 retry:
191 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
192 			     flags, NULL, NULL, &robj);
193 	if (r) {
194 		if (r != -ERESTARTSYS) {
195 			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
196 				initial_domain |= RADEON_GEM_DOMAIN_GTT;
197 				goto retry;
198 			}
199 			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
200 				  size, initial_domain, alignment, r);
201 		}
202 		return r;
203 	}
204 	*obj = &robj->tbo.base;
205 	(*obj)->funcs = &radeon_gem_object_funcs;
206 #ifdef __linux__
207 	robj->pid = task_pid_nr(current);
208 #else
209 	robj->pid = curproc->p_p->ps_pid;
210 #endif
211 
212 	mutex_lock(&rdev->gem.mutex);
213 	list_add_tail(&robj->list, &rdev->gem.objects);
214 	mutex_unlock(&rdev->gem.mutex);
215 
216 	return 0;
217 }
218 
radeon_gem_set_domain(struct drm_gem_object * gobj,uint32_t rdomain,uint32_t wdomain)219 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
220 			  uint32_t rdomain, uint32_t wdomain)
221 {
222 	struct radeon_bo *robj;
223 	uint32_t domain;
224 	long r;
225 
226 	/* FIXME: reeimplement */
227 	robj = gem_to_radeon_bo(gobj);
228 	/* work out where to validate the buffer to */
229 	domain = wdomain;
230 	if (!domain) {
231 		domain = rdomain;
232 	}
233 	if (!domain) {
234 		/* Do nothings */
235 		pr_warn("Set domain without domain !\n");
236 		return 0;
237 	}
238 	if (domain == RADEON_GEM_DOMAIN_CPU) {
239 		/* Asking for cpu access wait for object idle */
240 		r = dma_resv_wait_timeout(robj->tbo.base.resv,
241 					  DMA_RESV_USAGE_BOOKKEEP,
242 					  true, 30 * HZ);
243 		if (!r)
244 			r = -EBUSY;
245 
246 		if (r < 0 && r != -EINTR) {
247 			pr_err("Failed to wait for object: %li\n", r);
248 			return r;
249 		}
250 	}
251 	if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
252 		/* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
253 		return -EINVAL;
254 	}
255 	return 0;
256 }
257 
radeon_gem_init(struct radeon_device * rdev)258 int radeon_gem_init(struct radeon_device *rdev)
259 {
260 	INIT_LIST_HEAD(&rdev->gem.objects);
261 	return 0;
262 }
263 
radeon_gem_fini(struct radeon_device * rdev)264 void radeon_gem_fini(struct radeon_device *rdev)
265 {
266 	radeon_bo_force_delete(rdev);
267 }
268 
269 /*
270  * Call from drm_gem_handle_create which appear in both new and open ioctl
271  * case.
272  */
radeon_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)273 static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
274 {
275 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
276 	struct radeon_device *rdev = rbo->rdev;
277 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
278 	struct radeon_vm *vm = &fpriv->vm;
279 	struct radeon_bo_va *bo_va;
280 	int r;
281 
282 	if ((rdev->family < CHIP_CAYMAN) ||
283 	    (!rdev->accel_working)) {
284 		return 0;
285 	}
286 
287 	r = radeon_bo_reserve(rbo, false);
288 	if (r) {
289 		return r;
290 	}
291 
292 	bo_va = radeon_vm_bo_find(vm, rbo);
293 	if (!bo_va) {
294 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
295 	} else {
296 		++bo_va->ref_count;
297 	}
298 	radeon_bo_unreserve(rbo);
299 
300 	return 0;
301 }
302 
radeon_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)303 static void radeon_gem_object_close(struct drm_gem_object *obj,
304 				    struct drm_file *file_priv)
305 {
306 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
307 	struct radeon_device *rdev = rbo->rdev;
308 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
309 	struct radeon_vm *vm = &fpriv->vm;
310 	struct radeon_bo_va *bo_va;
311 	int r;
312 
313 	if ((rdev->family < CHIP_CAYMAN) ||
314 	    (!rdev->accel_working)) {
315 		return;
316 	}
317 
318 	r = radeon_bo_reserve(rbo, true);
319 	if (r) {
320 		dev_err(rdev->dev, "leaking bo va because "
321 			"we fail to reserve bo (%d)\n", r);
322 		return;
323 	}
324 	bo_va = radeon_vm_bo_find(vm, rbo);
325 	if (bo_va) {
326 		if (--bo_va->ref_count == 0) {
327 			radeon_vm_bo_rmv(rdev, bo_va);
328 		}
329 	}
330 	radeon_bo_unreserve(rbo);
331 }
332 
radeon_gem_handle_lockup(struct radeon_device * rdev,int r)333 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
334 {
335 	if (r == -EDEADLK) {
336 		r = radeon_gpu_reset(rdev);
337 		if (!r)
338 			r = -EAGAIN;
339 	}
340 	return r;
341 }
342 
343 #ifdef __linux__
radeon_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)344 static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
345 {
346 	struct radeon_bo *bo = gem_to_radeon_bo(obj);
347 	struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
348 
349 	if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
350 		return -EPERM;
351 
352 	return drm_gem_ttm_mmap(obj, vma);
353 }
354 #else
355 static int
radeon_gem_object_mmap(struct drm_gem_object * obj,vm_prot_t accessprot,voff_t off,vsize_t size)356 radeon_gem_object_mmap(struct drm_gem_object *obj,
357     vm_prot_t accessprot, voff_t off, vsize_t size)
358 {
359 	struct radeon_bo *bo = gem_to_radeon_bo(obj);
360 	struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
361 
362 	if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
363 		return -EPERM;
364 
365 	return drm_gem_ttm_mmap(obj, accessprot, off, size);
366 }
367 #endif
368 
369 const struct drm_gem_object_funcs radeon_gem_object_funcs = {
370 	.free = radeon_gem_object_free,
371 	.open = radeon_gem_object_open,
372 	.close = radeon_gem_object_close,
373 	.export = radeon_gem_prime_export,
374 	.pin = radeon_gem_prime_pin,
375 	.unpin = radeon_gem_prime_unpin,
376 	.get_sg_table = radeon_gem_prime_get_sg_table,
377 	.vmap = drm_gem_ttm_vmap,
378 	.vunmap = drm_gem_ttm_vunmap,
379 	.mmap = radeon_gem_object_mmap,
380 	.vm_ops = &radeon_gem_vm_ops,
381 };
382 
383 /*
384  * GEM ioctls.
385  */
radeon_gem_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)386 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
387 			  struct drm_file *filp)
388 {
389 	struct radeon_device *rdev = dev->dev_private;
390 	struct drm_radeon_gem_info *args = data;
391 	struct ttm_resource_manager *man;
392 
393 	man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
394 
395 	args->vram_size = (u64)man->size << PAGE_SHIFT;
396 	args->vram_visible = rdev->mc.visible_vram_size;
397 	args->vram_visible -= rdev->vram_pin_size;
398 	args->gart_size = rdev->mc.gtt_size;
399 	args->gart_size -= rdev->gart_pin_size;
400 
401 	return 0;
402 }
403 
radeon_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)404 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
405 			    struct drm_file *filp)
406 {
407 	struct radeon_device *rdev = dev->dev_private;
408 	struct drm_radeon_gem_create *args = data;
409 	struct drm_gem_object *gobj;
410 	uint32_t handle;
411 	int r;
412 
413 	down_read(&rdev->exclusive_lock);
414 	/* create a gem object to contain this object in */
415 	args->size = roundup(args->size, PAGE_SIZE);
416 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
417 				     args->initial_domain, args->flags,
418 				     false, &gobj);
419 	if (r) {
420 		up_read(&rdev->exclusive_lock);
421 		r = radeon_gem_handle_lockup(rdev, r);
422 		return r;
423 	}
424 	r = drm_gem_handle_create(filp, gobj, &handle);
425 	/* drop reference from allocate - handle holds it now */
426 	drm_gem_object_put(gobj);
427 	if (r) {
428 		up_read(&rdev->exclusive_lock);
429 		r = radeon_gem_handle_lockup(rdev, r);
430 		return r;
431 	}
432 	args->handle = handle;
433 	up_read(&rdev->exclusive_lock);
434 	return 0;
435 }
436 
radeon_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)437 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
438 			     struct drm_file *filp)
439 {
440 	return -ENOSYS;
441 #ifdef notyet
442 	struct ttm_operation_ctx ctx = { true, false };
443 	struct radeon_device *rdev = dev->dev_private;
444 	struct drm_radeon_gem_userptr *args = data;
445 	struct drm_gem_object *gobj;
446 	struct radeon_bo *bo;
447 	uint32_t handle;
448 	int r;
449 
450 	args->addr = untagged_addr(args->addr);
451 
452 	if (offset_in_page(args->addr | args->size))
453 		return -EINVAL;
454 
455 	/* reject unknown flag values */
456 	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
457 	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
458 	    RADEON_GEM_USERPTR_REGISTER))
459 		return -EINVAL;
460 
461 	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
462 		/* readonly pages not tested on older hardware */
463 		if (rdev->family < CHIP_R600)
464 			return -EINVAL;
465 
466 	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
467 		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
468 
469 		/* if we want to write to it we must require anonymous
470 		   memory and install a MMU notifier */
471 		return -EACCES;
472 	}
473 
474 	down_read(&rdev->exclusive_lock);
475 
476 	/* create a gem object to contain this object in */
477 	r = radeon_gem_object_create(rdev, args->size, 0,
478 				     RADEON_GEM_DOMAIN_CPU, 0,
479 				     false, &gobj);
480 	if (r)
481 		goto handle_lockup;
482 
483 	bo = gem_to_radeon_bo(gobj);
484 	r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
485 	if (r)
486 		goto release_object;
487 
488 	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
489 		r = radeon_mn_register(bo, args->addr);
490 		if (r)
491 			goto release_object;
492 	}
493 
494 	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
495 		mmap_read_lock(current->mm);
496 		r = radeon_bo_reserve(bo, true);
497 		if (r) {
498 			mmap_read_unlock(current->mm);
499 			goto release_object;
500 		}
501 
502 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
503 		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
504 		radeon_bo_unreserve(bo);
505 		mmap_read_unlock(current->mm);
506 		if (r)
507 			goto release_object;
508 	}
509 
510 	r = drm_gem_handle_create(filp, gobj, &handle);
511 	/* drop reference from allocate - handle holds it now */
512 	drm_gem_object_put(gobj);
513 	if (r)
514 		goto handle_lockup;
515 
516 	args->handle = handle;
517 	up_read(&rdev->exclusive_lock);
518 	return 0;
519 
520 release_object:
521 	drm_gem_object_put(gobj);
522 
523 handle_lockup:
524 	up_read(&rdev->exclusive_lock);
525 	r = radeon_gem_handle_lockup(rdev, r);
526 
527 	return r;
528 #endif
529 }
530 
radeon_gem_set_domain_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)531 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
532 				struct drm_file *filp)
533 {
534 	/* transition the BO to a domain -
535 	 * just validate the BO into a certain domain */
536 	struct radeon_device *rdev = dev->dev_private;
537 	struct drm_radeon_gem_set_domain *args = data;
538 	struct drm_gem_object *gobj;
539 	int r;
540 
541 	/* for now if someone requests domain CPU -
542 	 * just make sure the buffer is finished with */
543 	down_read(&rdev->exclusive_lock);
544 
545 	/* just do a BO wait for now */
546 	gobj = drm_gem_object_lookup(filp, args->handle);
547 	if (gobj == NULL) {
548 		up_read(&rdev->exclusive_lock);
549 		return -ENOENT;
550 	}
551 
552 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
553 
554 	drm_gem_object_put(gobj);
555 	up_read(&rdev->exclusive_lock);
556 	r = radeon_gem_handle_lockup(rdev, r);
557 	return r;
558 }
559 
radeon_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)560 int radeon_mode_dumb_mmap(struct drm_file *filp,
561 			  struct drm_device *dev,
562 			  uint32_t handle, uint64_t *offset_p)
563 {
564 	struct drm_gem_object *gobj;
565 	struct radeon_bo *robj;
566 
567 	gobj = drm_gem_object_lookup(filp, handle);
568 	if (gobj == NULL) {
569 		return -ENOENT;
570 	}
571 	robj = gem_to_radeon_bo(gobj);
572 	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
573 		drm_gem_object_put(gobj);
574 		return -EPERM;
575 	}
576 	*offset_p = radeon_bo_mmap_offset(robj);
577 	drm_gem_object_put(gobj);
578 	return 0;
579 }
580 
radeon_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)581 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
582 			  struct drm_file *filp)
583 {
584 	struct drm_radeon_gem_mmap *args = data;
585 
586 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
587 }
588 
radeon_gem_busy_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)589 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
590 			  struct drm_file *filp)
591 {
592 	struct drm_radeon_gem_busy *args = data;
593 	struct drm_gem_object *gobj;
594 	struct radeon_bo *robj;
595 	int r;
596 	uint32_t cur_placement = 0;
597 
598 	gobj = drm_gem_object_lookup(filp, args->handle);
599 	if (gobj == NULL) {
600 		return -ENOENT;
601 	}
602 	robj = gem_to_radeon_bo(gobj);
603 
604 	r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ);
605 	if (r == 0)
606 		r = -EBUSY;
607 	else
608 		r = 0;
609 
610 	cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
611 	args->domain = radeon_mem_type_to_domain(cur_placement);
612 	drm_gem_object_put(gobj);
613 	return r;
614 }
615 
radeon_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)616 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
617 			      struct drm_file *filp)
618 {
619 	struct radeon_device *rdev = dev->dev_private;
620 	struct drm_radeon_gem_wait_idle *args = data;
621 	struct drm_gem_object *gobj;
622 	struct radeon_bo *robj;
623 	int r = 0;
624 	uint32_t cur_placement = 0;
625 	long ret;
626 
627 	gobj = drm_gem_object_lookup(filp, args->handle);
628 	if (gobj == NULL) {
629 		return -ENOENT;
630 	}
631 	robj = gem_to_radeon_bo(gobj);
632 
633 	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
634 				    true, 30 * HZ);
635 	if (ret == 0)
636 		r = -EBUSY;
637 	else if (ret < 0)
638 		r = ret;
639 
640 	/* Flush HDP cache via MMIO if necessary */
641 	cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
642 	if (rdev->asic->mmio_hdp_flush &&
643 	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
644 		robj->rdev->asic->mmio_hdp_flush(rdev);
645 	drm_gem_object_put(gobj);
646 	r = radeon_gem_handle_lockup(rdev, r);
647 	return r;
648 }
649 
radeon_gem_set_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)650 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
651 				struct drm_file *filp)
652 {
653 	struct drm_radeon_gem_set_tiling *args = data;
654 	struct drm_gem_object *gobj;
655 	struct radeon_bo *robj;
656 	int r = 0;
657 
658 	DRM_DEBUG("%d \n", args->handle);
659 	gobj = drm_gem_object_lookup(filp, args->handle);
660 	if (gobj == NULL)
661 		return -ENOENT;
662 	robj = gem_to_radeon_bo(gobj);
663 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
664 	drm_gem_object_put(gobj);
665 	return r;
666 }
667 
radeon_gem_get_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)668 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
669 				struct drm_file *filp)
670 {
671 	struct drm_radeon_gem_get_tiling *args = data;
672 	struct drm_gem_object *gobj;
673 	struct radeon_bo *rbo;
674 	int r = 0;
675 
676 	DRM_DEBUG("\n");
677 	gobj = drm_gem_object_lookup(filp, args->handle);
678 	if (gobj == NULL)
679 		return -ENOENT;
680 	rbo = gem_to_radeon_bo(gobj);
681 	r = radeon_bo_reserve(rbo, false);
682 	if (unlikely(r != 0))
683 		goto out;
684 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
685 	radeon_bo_unreserve(rbo);
686 out:
687 	drm_gem_object_put(gobj);
688 	return r;
689 }
690 
691 /**
692  * radeon_gem_va_update_vm -update the bo_va in its VM
693  *
694  * @rdev: radeon_device pointer
695  * @bo_va: bo_va to update
696  *
697  * Update the bo_va directly after setting it's address. Errors are not
698  * vital here, so they are not reported back to userspace.
699  */
radeon_gem_va_update_vm(struct radeon_device * rdev,struct radeon_bo_va * bo_va)700 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
701 				    struct radeon_bo_va *bo_va)
702 {
703 	struct ttm_validate_buffer tv, *entry;
704 	struct radeon_bo_list *vm_bos;
705 	struct ww_acquire_ctx ticket;
706 	struct list_head list;
707 	unsigned domain;
708 	int r;
709 
710 	INIT_LIST_HEAD(&list);
711 
712 	tv.bo = &bo_va->bo->tbo;
713 	tv.num_shared = 1;
714 	list_add(&tv.head, &list);
715 
716 	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
717 	if (!vm_bos)
718 		return;
719 
720 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
721 	if (r)
722 		goto error_free;
723 
724 	list_for_each_entry(entry, &list, head) {
725 		domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
726 		/* if anything is swapped out don't swap it in here,
727 		   just abort and wait for the next CS */
728 		if (domain == RADEON_GEM_DOMAIN_CPU)
729 			goto error_unreserve;
730 	}
731 
732 	mutex_lock(&bo_va->vm->mutex);
733 	r = radeon_vm_clear_freed(rdev, bo_va->vm);
734 	if (r)
735 		goto error_unlock;
736 
737 	if (bo_va->it.start && bo_va->bo)
738 		r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
739 
740 error_unlock:
741 	mutex_unlock(&bo_va->vm->mutex);
742 
743 error_unreserve:
744 	ttm_eu_backoff_reservation(&ticket, &list);
745 
746 error_free:
747 	kvfree(vm_bos);
748 
749 	if (r && r != -ERESTARTSYS)
750 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
751 }
752 
radeon_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)753 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
754 			  struct drm_file *filp)
755 {
756 	struct drm_radeon_gem_va *args = data;
757 	struct drm_gem_object *gobj;
758 	struct radeon_device *rdev = dev->dev_private;
759 	struct radeon_fpriv *fpriv = filp->driver_priv;
760 	struct radeon_bo *rbo;
761 	struct radeon_bo_va *bo_va;
762 	u32 invalid_flags;
763 	int r = 0;
764 
765 	if (!rdev->vm_manager.enabled) {
766 		args->operation = RADEON_VA_RESULT_ERROR;
767 		return -ENOTTY;
768 	}
769 
770 	/* !! DONT REMOVE !!
771 	 * We don't support vm_id yet, to be sure we don't have broken
772 	 * userspace, reject anyone trying to use non 0 value thus moving
773 	 * forward we can use those fields without breaking existant userspace
774 	 */
775 	if (args->vm_id) {
776 		args->operation = RADEON_VA_RESULT_ERROR;
777 		return -EINVAL;
778 	}
779 
780 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
781 		dev_err(dev->dev,
782 			"offset 0x%lX is in reserved area 0x%X\n",
783 			(unsigned long)args->offset,
784 			RADEON_VA_RESERVED_SIZE);
785 		args->operation = RADEON_VA_RESULT_ERROR;
786 		return -EINVAL;
787 	}
788 
789 	/* don't remove, we need to enforce userspace to set the snooped flag
790 	 * otherwise we will endup with broken userspace and we won't be able
791 	 * to enable this feature without adding new interface
792 	 */
793 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
794 	if ((args->flags & invalid_flags)) {
795 		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
796 			args->flags, invalid_flags);
797 		args->operation = RADEON_VA_RESULT_ERROR;
798 		return -EINVAL;
799 	}
800 
801 	switch (args->operation) {
802 	case RADEON_VA_MAP:
803 	case RADEON_VA_UNMAP:
804 		break;
805 	default:
806 		dev_err(dev->dev, "unsupported operation %d\n",
807 			args->operation);
808 		args->operation = RADEON_VA_RESULT_ERROR;
809 		return -EINVAL;
810 	}
811 
812 	gobj = drm_gem_object_lookup(filp, args->handle);
813 	if (gobj == NULL) {
814 		args->operation = RADEON_VA_RESULT_ERROR;
815 		return -ENOENT;
816 	}
817 	rbo = gem_to_radeon_bo(gobj);
818 	r = radeon_bo_reserve(rbo, false);
819 	if (r) {
820 		args->operation = RADEON_VA_RESULT_ERROR;
821 		drm_gem_object_put(gobj);
822 		return r;
823 	}
824 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
825 	if (!bo_va) {
826 		args->operation = RADEON_VA_RESULT_ERROR;
827 		radeon_bo_unreserve(rbo);
828 		drm_gem_object_put(gobj);
829 		return -ENOENT;
830 	}
831 
832 	switch (args->operation) {
833 	case RADEON_VA_MAP:
834 		if (bo_va->it.start) {
835 			args->operation = RADEON_VA_RESULT_VA_EXIST;
836 			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
837 			radeon_bo_unreserve(rbo);
838 			goto out;
839 		}
840 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
841 		break;
842 	case RADEON_VA_UNMAP:
843 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
844 		break;
845 	default:
846 		break;
847 	}
848 	if (!r)
849 		radeon_gem_va_update_vm(rdev, bo_va);
850 	args->operation = RADEON_VA_RESULT_OK;
851 	if (r) {
852 		args->operation = RADEON_VA_RESULT_ERROR;
853 	}
854 out:
855 	drm_gem_object_put(gobj);
856 	return r;
857 }
858 
radeon_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)859 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
860 			struct drm_file *filp)
861 {
862 	struct drm_radeon_gem_op *args = data;
863 	struct drm_gem_object *gobj;
864 	struct radeon_bo *robj;
865 	int r;
866 
867 	gobj = drm_gem_object_lookup(filp, args->handle);
868 	if (gobj == NULL) {
869 		return -ENOENT;
870 	}
871 	robj = gem_to_radeon_bo(gobj);
872 
873 	r = -EPERM;
874 	if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
875 		goto out;
876 
877 	r = radeon_bo_reserve(robj, false);
878 	if (unlikely(r))
879 		goto out;
880 
881 	switch (args->op) {
882 	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
883 		args->value = robj->initial_domain;
884 		break;
885 	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
886 		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
887 						      RADEON_GEM_DOMAIN_GTT |
888 						      RADEON_GEM_DOMAIN_CPU);
889 		break;
890 	default:
891 		r = -EINVAL;
892 	}
893 
894 	radeon_bo_unreserve(robj);
895 out:
896 	drm_gem_object_put(gobj);
897 	return r;
898 }
899 
radeon_align_pitch(struct radeon_device * rdev,int width,int cpp,bool tiled)900 int radeon_align_pitch(struct radeon_device *rdev, int width, int cpp, bool tiled)
901 {
902 	int aligned = width;
903 	int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
904 	int pitch_mask = 0;
905 
906 	switch (cpp) {
907 	case 1:
908 		pitch_mask = align_large ? 255 : 127;
909 		break;
910 	case 2:
911 		pitch_mask = align_large ? 127 : 31;
912 		break;
913 	case 3:
914 	case 4:
915 		pitch_mask = align_large ? 63 : 15;
916 		break;
917 	}
918 
919 	aligned += pitch_mask;
920 	aligned &= ~pitch_mask;
921 	return aligned * cpp;
922 }
923 
radeon_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)924 int radeon_mode_dumb_create(struct drm_file *file_priv,
925 			    struct drm_device *dev,
926 			    struct drm_mode_create_dumb *args)
927 {
928 	struct radeon_device *rdev = dev->dev_private;
929 	struct drm_gem_object *gobj;
930 	uint32_t handle;
931 	int r;
932 
933 	args->pitch = radeon_align_pitch(rdev, args->width,
934 					 DIV_ROUND_UP(args->bpp, 8), 0);
935 	args->size = (u64)args->pitch * args->height;
936 	args->size = ALIGN(args->size, PAGE_SIZE);
937 
938 	r = radeon_gem_object_create(rdev, args->size, 0,
939 				     RADEON_GEM_DOMAIN_VRAM, 0,
940 				     false, &gobj);
941 	if (r)
942 		return -ENOMEM;
943 
944 	r = drm_gem_handle_create(file_priv, gobj, &handle);
945 	/* drop reference from allocate - handle holds it now */
946 	drm_gem_object_put(gobj);
947 	if (r) {
948 		return r;
949 	}
950 	args->handle = handle;
951 	return 0;
952 }
953 
954 #if defined(CONFIG_DEBUG_FS)
radeon_debugfs_gem_info_show(struct seq_file * m,void * unused)955 static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
956 {
957 	struct radeon_device *rdev = m->private;
958 	struct radeon_bo *rbo;
959 	unsigned i = 0;
960 
961 	mutex_lock(&rdev->gem.mutex);
962 	list_for_each_entry(rbo, &rdev->gem.objects, list) {
963 		unsigned domain;
964 		const char *placement;
965 
966 		domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
967 		switch (domain) {
968 		case RADEON_GEM_DOMAIN_VRAM:
969 			placement = "VRAM";
970 			break;
971 		case RADEON_GEM_DOMAIN_GTT:
972 			placement = " GTT";
973 			break;
974 		case RADEON_GEM_DOMAIN_CPU:
975 		default:
976 			placement = " CPU";
977 			break;
978 		}
979 		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
980 			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
981 			   placement, (unsigned long)rbo->pid);
982 		i++;
983 	}
984 	mutex_unlock(&rdev->gem.mutex);
985 	return 0;
986 }
987 
988 DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
989 #endif
990 
radeon_gem_debugfs_init(struct radeon_device * rdev)991 void radeon_gem_debugfs_init(struct radeon_device *rdev)
992 {
993 #if defined(CONFIG_DEBUG_FS)
994 	struct dentry *root = rdev->ddev->primary->debugfs_root;
995 
996 	debugfs_create_file("radeon_gem_info", 0444, root, rdev,
997 			    &radeon_debugfs_gem_info_fops);
998 
999 #endif
1000 }
1001