xref: /dragonfly/sys/dev/drm/radeon/radeon_gem.c (revision 8af44722)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31 #include "radeon_gem.h"
32 
33 void radeon_gem_object_free(struct drm_gem_object *gobj)
34 {
35 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
36 
37 	if (robj) {
38 #ifdef DUMBBELL_WIP
39 		if (robj->gem_base.import_attach)
40 			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
41 #endif /* DUMBBELL_WIP */
42 		radeon_mn_unregister(robj);
43 		radeon_bo_unref(&robj);
44 	}
45 }
46 
47 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
48 				int alignment, int initial_domain,
49 				u32 flags, bool kernel,
50 				struct drm_gem_object **obj)
51 {
52 	struct radeon_bo *robj;
53 	unsigned long max_size;
54 	int r;
55 
56 	*obj = NULL;
57 	/* At least align on page size */
58 	if (alignment < PAGE_SIZE) {
59 		alignment = PAGE_SIZE;
60 	}
61 
62 	/* Maximum bo size is the unpinned gtt size since we use the gtt to
63 	 * handle vram to system pool migrations.
64 	 */
65 	max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
66 	if (size > max_size) {
67 		DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
68 			  size >> 20, max_size >> 20);
69 		return -ENOMEM;
70 	}
71 
72 retry:
73 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
74 			     flags, NULL, NULL, &robj);
75 	if (r) {
76 		if (r != -ERESTARTSYS) {
77 			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
78 				initial_domain |= RADEON_GEM_DOMAIN_GTT;
79 				goto retry;
80 			}
81 			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
82 				  size, initial_domain, alignment, r);
83 		}
84 		return r;
85 	}
86 	*obj = &robj->gem_base;
87 	robj->pid = curproc ? curproc->p_pid : 0;
88 
89 	mutex_lock(&rdev->gem.mutex);
90 	list_add_tail(&robj->list, &rdev->gem.objects);
91 	mutex_unlock(&rdev->gem.mutex);
92 
93 	return 0;
94 }
95 
96 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
97 			  uint32_t rdomain, uint32_t wdomain)
98 {
99 	struct radeon_bo *robj;
100 	uint32_t domain;
101 	long r;
102 
103 	/* FIXME: reeimplement */
104 	robj = gem_to_radeon_bo(gobj);
105 	/* work out where to validate the buffer to */
106 	domain = wdomain;
107 	if (!domain) {
108 		domain = rdomain;
109 	}
110 	if (!domain) {
111 		/* Do nothings */
112 		printk(KERN_WARNING "Set domain without domain !\n");
113 		return 0;
114 	}
115 	if (domain == RADEON_GEM_DOMAIN_CPU) {
116 		/* Asking for cpu access wait for object idle */
117 		r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
118 		if (!r)
119 			r = -EBUSY;
120 
121 		if (r < 0 && r != -EINTR) {
122 			printk(KERN_ERR "Failed to wait for object: %li\n", r);
123 			return r;
124 		}
125 	}
126 	return 0;
127 }
128 
129 int radeon_gem_init(struct radeon_device *rdev)
130 {
131 	INIT_LIST_HEAD(&rdev->gem.objects);
132 	return 0;
133 }
134 
135 void radeon_gem_fini(struct radeon_device *rdev)
136 {
137 	radeon_bo_force_delete(rdev);
138 }
139 
140 /*
141  * Call from drm_gem_handle_create which appear in both new and open ioctl
142  * case.
143  */
144 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
145 {
146 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
147 	struct radeon_device *rdev = rbo->rdev;
148 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
149 	struct radeon_vm *vm = &fpriv->vm;
150 	struct radeon_bo_va *bo_va;
151 	int r;
152 
153 	if ((rdev->family < CHIP_CAYMAN) ||
154 	    (!rdev->accel_working)) {
155 		return 0;
156 	}
157 
158 	r = radeon_bo_reserve(rbo, false);
159 	if (r) {
160 		return r;
161 	}
162 
163 	bo_va = radeon_vm_bo_find(vm, rbo);
164 	if (!bo_va) {
165 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
166 	} else {
167 		++bo_va->ref_count;
168 	}
169 	radeon_bo_unreserve(rbo);
170 
171 	return 0;
172 }
173 
174 void radeon_gem_object_close(struct drm_gem_object *obj,
175 			     struct drm_file *file_priv)
176 {
177 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
178 	struct radeon_device *rdev = rbo->rdev;
179 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
180 	struct radeon_vm *vm = &fpriv->vm;
181 	struct radeon_bo_va *bo_va;
182 	int r;
183 
184 	if ((rdev->family < CHIP_CAYMAN) ||
185 	    (!rdev->accel_working)) {
186 		return;
187 	}
188 
189 	r = radeon_bo_reserve(rbo, true);
190 	if (r) {
191 		dev_err(rdev->dev, "leaking bo va because "
192 			"we fail to reserve bo (%d)\n", r);
193 		return;
194 	}
195 	bo_va = radeon_vm_bo_find(vm, rbo);
196 	if (bo_va) {
197 		if (--bo_va->ref_count == 0) {
198 			radeon_vm_bo_rmv(rdev, bo_va);
199 		}
200 	}
201 	radeon_bo_unreserve(rbo);
202 }
203 
204 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
205 {
206 	if (r == -EDEADLK) {
207 		r = radeon_gpu_reset(rdev);
208 		if (!r)
209 			r = -EAGAIN;
210 	}
211 	return r;
212 }
213 
214 /*
215  * GEM ioctls.
216  */
217 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
218 			  struct drm_file *filp)
219 {
220 	struct radeon_device *rdev = dev->dev_private;
221 	struct drm_radeon_gem_info *args = data;
222 	struct ttm_mem_type_manager *man;
223 
224 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
225 
226 	args->vram_size = rdev->mc.real_vram_size;
227 	args->vram_visible = (u64)man->size << PAGE_SHIFT;
228 	args->vram_visible -= rdev->vram_pin_size;
229 	args->gart_size = rdev->mc.gtt_size;
230 	args->gart_size -= rdev->gart_pin_size;
231 
232 	return 0;
233 }
234 
235 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
236 			   struct drm_file *filp)
237 {
238 	/* TODO: implement */
239 	DRM_ERROR("unimplemented %s\n", __func__);
240 	return -ENOSYS;
241 }
242 
243 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
244 			    struct drm_file *filp)
245 {
246 	/* TODO: implement */
247 	DRM_ERROR("unimplemented %s\n", __func__);
248 	return -ENOSYS;
249 }
250 
251 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
252 			    struct drm_file *filp)
253 {
254 	struct radeon_device *rdev = dev->dev_private;
255 	struct drm_radeon_gem_create *args = data;
256 	struct drm_gem_object *gobj;
257 	uint32_t handle;
258 	int r;
259 
260 	down_read(&rdev->exclusive_lock);
261 	/* create a gem object to contain this object in */
262 	args->size = roundup(args->size, PAGE_SIZE);
263 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
264 				     args->initial_domain, args->flags,
265 				     false, &gobj);
266 	if (r) {
267 		if (r == -ERESTARTSYS)
268 			r = -EINTR;
269 		up_read(&rdev->exclusive_lock);
270 		r = radeon_gem_handle_lockup(rdev, r);
271 		return r;
272 	}
273 	r = drm_gem_handle_create(filp, gobj, &handle);
274 	/* drop reference from allocate - handle holds it now */
275 	drm_gem_object_unreference_unlocked(gobj);
276 	if (r) {
277 		up_read(&rdev->exclusive_lock);
278 		r = radeon_gem_handle_lockup(rdev, r);
279 		return r;
280 	}
281 	args->handle = handle;
282 	up_read(&rdev->exclusive_lock);
283 	return 0;
284 }
285 
286 #if 0
287 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
288 			     struct drm_file *filp)
289 {
290 	struct radeon_device *rdev = dev->dev_private;
291 	struct drm_radeon_gem_userptr *args = data;
292 	struct drm_gem_object *gobj;
293 	struct radeon_bo *bo;
294 	uint32_t handle;
295 	int r;
296 
297 	if (offset_in_page(args->addr | args->size))
298 		return -EINVAL;
299 
300 	/* reject unknown flag values */
301 	if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
302 	    RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
303 	    RADEON_GEM_USERPTR_REGISTER))
304 		return -EINVAL;
305 
306 	if (args->flags & RADEON_GEM_USERPTR_READONLY) {
307 		/* readonly pages not tested on older hardware */
308 		if (rdev->family < CHIP_R600)
309 			return -EINVAL;
310 
311 	} else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
312 		   !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
313 
314 		/* if we want to write to it we must require anonymous
315 		   memory and install a MMU notifier */
316 		return -EACCES;
317 	}
318 
319 	down_read(&rdev->exclusive_lock);
320 
321 	/* create a gem object to contain this object in */
322 	r = radeon_gem_object_create(rdev, args->size, 0,
323 				     RADEON_GEM_DOMAIN_CPU, 0,
324 				     false, &gobj);
325 	if (r)
326 		goto handle_lockup;
327 
328 	bo = gem_to_radeon_bo(gobj);
329 	r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
330 	if (r)
331 		goto release_object;
332 
333 	if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
334 		r = radeon_mn_register(bo, args->addr);
335 		if (r)
336 			goto release_object;
337 	}
338 
339 	if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
340 		down_read(&current->mm->mmap_sem);
341 		r = radeon_bo_reserve(bo, true);
342 		if (r) {
343 			up_read(&current->mm->mmap_sem);
344 			goto release_object;
345 		}
346 
347 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
348 		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
349 		radeon_bo_unreserve(bo);
350 		up_read(&current->mm->mmap_sem);
351 		if (r)
352 			goto release_object;
353 	}
354 
355 	r = drm_gem_handle_create(filp, gobj, &handle);
356 	/* drop reference from allocate - handle holds it now */
357 	drm_gem_object_unreference_unlocked(gobj);
358 	if (r)
359 		goto handle_lockup;
360 
361 	args->handle = handle;
362 	up_read(&rdev->exclusive_lock);
363 	return 0;
364 
365 release_object:
366 	drm_gem_object_unreference_unlocked(gobj);
367 
368 handle_lockup:
369 	up_read(&rdev->exclusive_lock);
370 	r = radeon_gem_handle_lockup(rdev, r);
371 
372 	return r;
373 }
374 #endif
375 
376 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
377 				struct drm_file *filp)
378 {
379 	/* transition the BO to a domain -
380 	 * just validate the BO into a certain domain */
381 	struct radeon_device *rdev = dev->dev_private;
382 	struct drm_radeon_gem_set_domain *args = data;
383 	struct drm_gem_object *gobj;
384 	struct radeon_bo *robj;
385 	int r;
386 
387 	/* for now if someone requests domain CPU -
388 	 * just make sure the buffer is finished with */
389 	down_read(&rdev->exclusive_lock);
390 
391 	/* just do a BO wait for now */
392 	gobj = drm_gem_object_lookup(filp, args->handle);
393 	if (gobj == NULL) {
394 		up_read(&rdev->exclusive_lock);
395 		return -ENOENT;
396 	}
397 	robj = gem_to_radeon_bo(gobj);
398 
399 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
400 
401 	drm_gem_object_unreference_unlocked(gobj);
402 	up_read(&rdev->exclusive_lock);
403 	r = radeon_gem_handle_lockup(robj->rdev, r);
404 	return r;
405 }
406 
407 int radeon_mode_dumb_mmap(struct drm_file *filp,
408 			  struct drm_device *dev,
409 			  uint32_t handle, uint64_t *offset_p)
410 {
411 	struct drm_gem_object *gobj;
412 	struct radeon_bo *robj;
413 
414 	gobj = drm_gem_object_lookup(filp, handle);
415 	if (gobj == NULL) {
416 		return -ENOENT;
417 	}
418 	robj = gem_to_radeon_bo(gobj);
419 #if 0
420 	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
421 		drm_gem_object_unreference_unlocked(gobj);
422 		return -EPERM;
423 	}
424 #endif
425 	*offset_p = radeon_bo_mmap_offset(robj);
426 	drm_gem_object_unreference_unlocked(gobj);
427 	return 0;
428 }
429 
430 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
431 			  struct drm_file *filp)
432 {
433 	struct drm_radeon_gem_mmap *args = data;
434 
435 	return radeon_mode_dumb_mmap(filp, dev, args->handle, (uint64_t *)&args->addr_ptr);
436 }
437 
438 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
439 			  struct drm_file *filp)
440 {
441 	struct drm_radeon_gem_busy *args = data;
442 	struct drm_gem_object *gobj;
443 	struct radeon_bo *robj;
444 	int r;
445 	uint32_t cur_placement = 0;
446 
447 	gobj = drm_gem_object_lookup(filp, args->handle);
448 	if (gobj == NULL) {
449 		return -ENOENT;
450 	}
451 	robj = gem_to_radeon_bo(gobj);
452 
453 	r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
454 	if (r == 0)
455 		r = -EBUSY;
456 	else
457 		r = 0;
458 
459 	cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
460 	args->domain = radeon_mem_type_to_domain(cur_placement);
461 	drm_gem_object_unreference_unlocked(gobj);
462 	return r;
463 }
464 
465 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
466 			      struct drm_file *filp)
467 {
468 	struct radeon_device *rdev = dev->dev_private;
469 	struct drm_radeon_gem_wait_idle *args = data;
470 	struct drm_gem_object *gobj;
471 	struct radeon_bo *robj;
472 	int r = 0;
473 	uint32_t cur_placement = 0;
474 	long ret;
475 
476 	gobj = drm_gem_object_lookup(filp, args->handle);
477 	if (gobj == NULL) {
478 		return -ENOENT;
479 	}
480 	robj = gem_to_radeon_bo(gobj);
481 
482 	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
483 	if (ret == 0)
484 		r = -EBUSY;
485 	else if (ret < 0)
486 		r = ret;
487 
488 	/* Flush HDP cache via MMIO if necessary */
489 	cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
490 	if (rdev->asic->mmio_hdp_flush &&
491 	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
492 		robj->rdev->asic->mmio_hdp_flush(rdev);
493 	drm_gem_object_unreference_unlocked(gobj);
494 	if (r == -ERESTARTSYS)
495 		r = -EINTR;
496 	r = radeon_gem_handle_lockup(rdev, r);
497 	return r;
498 }
499 
500 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
501 				struct drm_file *filp)
502 {
503 	struct drm_radeon_gem_set_tiling *args = data;
504 	struct drm_gem_object *gobj;
505 	struct radeon_bo *robj;
506 	int r = 0;
507 
508 	DRM_DEBUG("%d \n", args->handle);
509 	gobj = drm_gem_object_lookup(filp, args->handle);
510 	if (gobj == NULL)
511 		return -ENOENT;
512 	robj = gem_to_radeon_bo(gobj);
513 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
514 	drm_gem_object_unreference_unlocked(gobj);
515 	return r;
516 }
517 
518 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
519 				struct drm_file *filp)
520 {
521 	struct drm_radeon_gem_get_tiling *args = data;
522 	struct drm_gem_object *gobj;
523 	struct radeon_bo *rbo;
524 	int r = 0;
525 
526 	DRM_DEBUG("\n");
527 	gobj = drm_gem_object_lookup(filp, args->handle);
528 	if (gobj == NULL)
529 		return -ENOENT;
530 	rbo = gem_to_radeon_bo(gobj);
531 	r = radeon_bo_reserve(rbo, false);
532 	if (unlikely(r != 0))
533 		goto out;
534 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
535 	radeon_bo_unreserve(rbo);
536 out:
537 	drm_gem_object_unreference_unlocked(gobj);
538 	return r;
539 }
540 
541 /**
542  * radeon_gem_va_update_vm -update the bo_va in its VM
543  *
544  * @rdev: radeon_device pointer
545  * @bo_va: bo_va to update
546  *
547  * Update the bo_va directly after setting it's address. Errors are not
548  * vital here, so they are not reported back to userspace.
549  */
550 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
551 				    struct radeon_bo_va *bo_va)
552 {
553 	struct ttm_validate_buffer tv, *entry;
554 	struct radeon_bo_list *vm_bos;
555 	struct ww_acquire_ctx ticket;
556 	struct list_head list;
557 	unsigned domain;
558 	int r;
559 
560 	INIT_LIST_HEAD(&list);
561 
562 	tv.bo = &bo_va->bo->tbo;
563 	tv.shared = true;
564 	list_add(&tv.head, &list);
565 
566 	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
567 	if (!vm_bos)
568 		return;
569 
570 	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
571 	if (r)
572 		goto error_free;
573 
574 	list_for_each_entry(entry, &list, head) {
575 		domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
576 		/* if anything is swapped out don't swap it in here,
577 		   just abort and wait for the next CS */
578 		if (domain == RADEON_GEM_DOMAIN_CPU)
579 			goto error_unreserve;
580 	}
581 
582 	mutex_lock(&bo_va->vm->mutex);
583 	r = radeon_vm_clear_freed(rdev, bo_va->vm);
584 	if (r)
585 		goto error_unlock;
586 
587 	if (bo_va->it.start)
588 		r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
589 
590 error_unlock:
591 	mutex_unlock(&bo_va->vm->mutex);
592 
593 error_unreserve:
594 	ttm_eu_backoff_reservation(&ticket, &list);
595 
596 error_free:
597 	drm_free_large(vm_bos);
598 
599 	if (r && r != -ERESTARTSYS)
600 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
601 }
602 
603 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
604 			  struct drm_file *filp)
605 {
606 	struct drm_radeon_gem_va *args = data;
607 	struct drm_gem_object *gobj;
608 	struct radeon_device *rdev = dev->dev_private;
609 	struct radeon_fpriv *fpriv = filp->driver_priv;
610 	struct radeon_bo *rbo;
611 	struct radeon_bo_va *bo_va;
612 	u32 invalid_flags;
613 	int r = 0;
614 
615 	if (!rdev->vm_manager.enabled) {
616 		args->operation = RADEON_VA_RESULT_ERROR;
617 		return -ENOTTY;
618 	}
619 
620 	/* !! DONT REMOVE !!
621 	 * We don't support vm_id yet, to be sure we don't have have broken
622 	 * userspace, reject anyone trying to use non 0 value thus moving
623 	 * forward we can use those fields without breaking existant userspace
624 	 */
625 	if (args->vm_id) {
626 		args->operation = RADEON_VA_RESULT_ERROR;
627 		return -EINVAL;
628 	}
629 
630 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
631 		dev_err(&dev->pdev->dev,
632 			"offset 0x%lX is in reserved area 0x%X\n",
633 			(unsigned long)args->offset,
634 			RADEON_VA_RESERVED_SIZE);
635 		args->operation = RADEON_VA_RESULT_ERROR;
636 		return -EINVAL;
637 	}
638 
639 	/* don't remove, we need to enforce userspace to set the snooped flag
640 	 * otherwise we will endup with broken userspace and we won't be able
641 	 * to enable this feature without adding new interface
642 	 */
643 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
644 	if ((args->flags & invalid_flags)) {
645 		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
646 			args->flags, invalid_flags);
647 		args->operation = RADEON_VA_RESULT_ERROR;
648 		return -EINVAL;
649 	}
650 
651 	switch (args->operation) {
652 	case RADEON_VA_MAP:
653 	case RADEON_VA_UNMAP:
654 		break;
655 	default:
656 		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
657 			args->operation);
658 		args->operation = RADEON_VA_RESULT_ERROR;
659 		return -EINVAL;
660 	}
661 
662 	gobj = drm_gem_object_lookup(filp, args->handle);
663 	if (gobj == NULL) {
664 		args->operation = RADEON_VA_RESULT_ERROR;
665 		return -ENOENT;
666 	}
667 	rbo = gem_to_radeon_bo(gobj);
668 	r = radeon_bo_reserve(rbo, false);
669 	if (r) {
670 		args->operation = RADEON_VA_RESULT_ERROR;
671 		drm_gem_object_unreference_unlocked(gobj);
672 		return r;
673 	}
674 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
675 	if (!bo_va) {
676 		args->operation = RADEON_VA_RESULT_ERROR;
677 		radeon_bo_unreserve(rbo);
678 		drm_gem_object_unreference_unlocked(gobj);
679 		return -ENOENT;
680 	}
681 
682 	switch (args->operation) {
683 	case RADEON_VA_MAP:
684 		if (bo_va->it.start) {
685 			args->operation = RADEON_VA_RESULT_VA_EXIST;
686 			args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
687 			radeon_bo_unreserve(rbo);
688 			goto out;
689 		}
690 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
691 		break;
692 	case RADEON_VA_UNMAP:
693 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
694 		break;
695 	default:
696 		break;
697 	}
698 	if (!r)
699 		radeon_gem_va_update_vm(rdev, bo_va);
700 	args->operation = RADEON_VA_RESULT_OK;
701 	if (r) {
702 		args->operation = RADEON_VA_RESULT_ERROR;
703 	}
704 out:
705 	drm_gem_object_unreference_unlocked(gobj);
706 	return r;
707 }
708 
709 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
710 			struct drm_file *filp)
711 {
712 	struct drm_radeon_gem_op *args = data;
713 	struct drm_gem_object *gobj;
714 	struct radeon_bo *robj;
715 	int r;
716 
717 	gobj = drm_gem_object_lookup(filp, args->handle);
718 	if (gobj == NULL) {
719 		return -ENOENT;
720 	}
721 	robj = gem_to_radeon_bo(gobj);
722 
723 	r = -EPERM;
724 #if 0
725 	if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
726 		goto out;
727 #endif
728 
729 	r = radeon_bo_reserve(robj, false);
730 	if (unlikely(r))
731 		goto out;
732 
733 	switch (args->op) {
734 	case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
735 		args->value = robj->initial_domain;
736 		break;
737 	case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
738 		robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
739 						      RADEON_GEM_DOMAIN_GTT |
740 						      RADEON_GEM_DOMAIN_CPU);
741 		break;
742 	default:
743 		r = -EINVAL;
744 	}
745 
746 	radeon_bo_unreserve(robj);
747 out:
748 	drm_gem_object_unreference_unlocked(gobj);
749 	return r;
750 }
751 
752 int radeon_mode_dumb_create(struct drm_file *file_priv,
753 			    struct drm_device *dev,
754 			    struct drm_mode_create_dumb *args)
755 {
756 	struct radeon_device *rdev = dev->dev_private;
757 	struct drm_gem_object *gobj;
758 	uint32_t handle;
759 	int r;
760 
761 	args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
762 	args->size = args->pitch * args->height;
763 	args->size = ALIGN(args->size, PAGE_SIZE);
764 
765 	r = radeon_gem_object_create(rdev, args->size, 0,
766 				     RADEON_GEM_DOMAIN_VRAM, 0,
767 				     false, &gobj);
768 	if (r)
769 		return -ENOMEM;
770 
771 	r = drm_gem_handle_create(file_priv, gobj, &handle);
772 	/* drop reference from allocate - handle holds it now */
773 	drm_gem_object_unreference_unlocked(gobj);
774 	if (r) {
775 		return r;
776 	}
777 	args->handle = handle;
778 	return 0;
779 }
780 
781 #if defined(CONFIG_DEBUG_FS)
782 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
783 {
784 	struct drm_info_node *node = (struct drm_info_node *)m->private;
785 	struct drm_device *dev = node->minor->dev;
786 	struct radeon_device *rdev = dev->dev_private;
787 	struct radeon_bo *rbo;
788 	unsigned i = 0;
789 
790 	mutex_lock(&rdev->gem.mutex);
791 	list_for_each_entry(rbo, &rdev->gem.objects, list) {
792 		unsigned domain;
793 		const char *placement;
794 
795 		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
796 		switch (domain) {
797 		case RADEON_GEM_DOMAIN_VRAM:
798 			placement = "VRAM";
799 			break;
800 		case RADEON_GEM_DOMAIN_GTT:
801 			placement = " GTT";
802 			break;
803 		case RADEON_GEM_DOMAIN_CPU:
804 		default:
805 			placement = " CPU";
806 			break;
807 		}
808 		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
809 			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
810 			   placement, (unsigned long)rbo->pid);
811 		i++;
812 	}
813 	mutex_unlock(&rdev->gem.mutex);
814 	return 0;
815 }
816 
817 static struct drm_info_list radeon_debugfs_gem_list[] = {
818 	{"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
819 };
820 #endif
821 
822 int radeon_gem_debugfs_init(struct radeon_device *rdev)
823 {
824 #if defined(CONFIG_DEBUG_FS)
825 	return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
826 #endif
827 	return 0;
828 }
829