xref: /dragonfly/sys/dev/drm/radeon/radeon_gem.c (revision cab8bf9b)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  *
28  * $FreeBSD: head/sys/dev/drm2/radeon/radeon_gem.c 254885 2013-08-25 19:37:15Z dumbbell $
29  */
30 
31 #include <drm/drmP.h>
32 #include <uapi_drm/radeon_drm.h>
33 #include "radeon.h"
34 #include "radeon_gem.h"
35 
36 int radeon_gem_object_init(struct drm_gem_object *obj)
37 {
38 	panic("radeon_gem_object_init() must not be called");
39 
40 	return 0;
41 }
42 
43 void radeon_gem_object_free(struct drm_gem_object *gobj)
44 {
45 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
46 
47 	if (robj) {
48 #ifdef DUMBBELL_WIP
49 		if (robj->gem_base.import_attach)
50 			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
51 #endif /* DUMBBELL_WIP */
52 		radeon_bo_unref(&robj);
53 	}
54 }
55 
56 int radeon_gem_object_create(struct radeon_device *rdev, int size,
57 				int alignment, int initial_domain,
58 				bool discardable, bool kernel,
59 				struct drm_gem_object **obj)
60 {
61 	struct radeon_bo *robj;
62 	unsigned long max_size;
63 	int r;
64 
65 	*obj = NULL;
66 	/* At least align on page size */
67 	if (alignment < PAGE_SIZE) {
68 		alignment = PAGE_SIZE;
69 	}
70 
71 	/* maximun bo size is the minimun btw visible vram and gtt size */
72 	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
73 	if (size > max_size) {
74 		DRM_ERROR("%s:%d alloc size %dMb bigger than %ldMb limit\n",
75 		       __func__, __LINE__, size >> 20, max_size >> 20);
76 		return -ENOMEM;
77 	}
78 
79 retry:
80 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
81 	if (r) {
82 		if (r != -ERESTART) {
83 			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
84 				initial_domain |= RADEON_GEM_DOMAIN_GTT;
85 				goto retry;
86 			}
87 			DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
88 				  size, initial_domain, alignment, r);
89 		}
90 		return r;
91 	}
92 	*obj = &robj->gem_base;
93 
94 	spin_lock(&rdev->gem.mutex);
95 	list_add_tail(&robj->list, &rdev->gem.objects);
96 	spin_unlock(&rdev->gem.mutex);
97 
98 	return 0;
99 }
100 
101 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
102 			  uint32_t rdomain, uint32_t wdomain)
103 {
104 	struct radeon_bo *robj;
105 	uint32_t domain;
106 	int r;
107 
108 	/* FIXME: reeimplement */
109 	robj = gem_to_radeon_bo(gobj);
110 	/* work out where to validate the buffer to */
111 	domain = wdomain;
112 	if (!domain) {
113 		domain = rdomain;
114 	}
115 	if (!domain) {
116 		/* Do nothings */
117 		DRM_ERROR("Set domain without domain !\n");
118 		return 0;
119 	}
120 	if (domain == RADEON_GEM_DOMAIN_CPU) {
121 		/* Asking for cpu access wait for object idle */
122 		r = radeon_bo_wait(robj, NULL, false);
123 		if (r) {
124 			DRM_ERROR("Failed to wait for object !\n");
125 			return r;
126 		}
127 	}
128 	return 0;
129 }
130 
131 int radeon_gem_init(struct radeon_device *rdev)
132 {
133 	INIT_LIST_HEAD(&rdev->gem.objects);
134 	return 0;
135 }
136 
137 void radeon_gem_fini(struct radeon_device *rdev)
138 {
139 	radeon_bo_force_delete(rdev);
140 }
141 
142 /*
143  * Call from drm_gem_handle_create which appear in both new and open ioctl
144  * case.
145  */
146 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
147 {
148 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
149 	struct radeon_device *rdev = rbo->rdev;
150 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
151 	struct radeon_vm *vm = &fpriv->vm;
152 	struct radeon_bo_va *bo_va;
153 	int r;
154 
155 	if (rdev->family < CHIP_CAYMAN) {
156 		return 0;
157 	}
158 
159 	r = radeon_bo_reserve(rbo, false);
160 	if (r) {
161 		return r;
162 	}
163 
164 	bo_va = radeon_vm_bo_find(vm, rbo);
165 	if (!bo_va) {
166 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
167 	} else {
168 		++bo_va->ref_count;
169 	}
170 	radeon_bo_unreserve(rbo);
171 
172 	return 0;
173 }
174 
175 void radeon_gem_object_close(struct drm_gem_object *obj,
176 			     struct drm_file *file_priv)
177 {
178 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
179 	struct radeon_device *rdev = rbo->rdev;
180 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
181 	struct radeon_vm *vm = &fpriv->vm;
182 	struct radeon_bo_va *bo_va;
183 	int r;
184 
185 	if (rdev->family < CHIP_CAYMAN) {
186 		return;
187 	}
188 
189 	r = radeon_bo_reserve(rbo, true);
190 	if (r) {
191 		dev_err(rdev->dev, "leaking bo va because "
192 			"we fail to reserve bo (%d)\n", r);
193 		return;
194 	}
195 	bo_va = radeon_vm_bo_find(vm, rbo);
196 	if (bo_va) {
197 		if (--bo_va->ref_count == 0) {
198 			radeon_vm_bo_rmv(rdev, bo_va);
199 		}
200 	}
201 	radeon_bo_unreserve(rbo);
202 }
203 
204 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
205 {
206 	if (r == -EDEADLK) {
207 		r = radeon_gpu_reset(rdev);
208 		if (!r)
209 			r = -EAGAIN;
210 	}
211 	return r;
212 }
213 
214 /*
215  * GEM ioctls.
216  */
217 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
218 			  struct drm_file *filp)
219 {
220 	struct radeon_device *rdev = dev->dev_private;
221 	struct drm_radeon_gem_info *args = data;
222 	struct ttm_mem_type_manager *man;
223 	unsigned i;
224 
225 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
226 
227 	args->vram_size = rdev->mc.real_vram_size;
228 	args->vram_visible = (u64)man->size << PAGE_SHIFT;
229 	if (rdev->stollen_vga_memory)
230 		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
231 	args->vram_visible -= radeon_fbdev_total_size(rdev);
232 	args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
233 	for(i = 0; i < RADEON_NUM_RINGS; ++i)
234 		args->gart_size -= rdev->ring[i].ring_size;
235 	return 0;
236 }
237 
238 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
239 			   struct drm_file *filp)
240 {
241 	/* TODO: implement */
242 	DRM_ERROR("unimplemented %s\n", __func__);
243 	return -ENOSYS;
244 }
245 
246 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
247 			    struct drm_file *filp)
248 {
249 	/* TODO: implement */
250 	DRM_ERROR("unimplemented %s\n", __func__);
251 	return -ENOSYS;
252 }
253 
254 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
255 			    struct drm_file *filp)
256 {
257 	struct radeon_device *rdev = dev->dev_private;
258 	struct drm_radeon_gem_create *args = data;
259 	struct drm_gem_object *gobj;
260 	uint32_t handle;
261 	int r;
262 
263 	lockmgr(&rdev->exclusive_lock, LK_SHARED);
264 	/* create a gem object to contain this object in */
265 	args->size = roundup(args->size, PAGE_SIZE);
266 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
267 					args->initial_domain, false,
268 					false, &gobj);
269 	if (r) {
270 		lockmgr(&rdev->exclusive_lock, LK_RELEASE);
271 		r = radeon_gem_handle_lockup(rdev, r);
272 		return r;
273 	}
274 	handle = 0;
275 	r = drm_gem_handle_create(filp, gobj, &handle);
276 	/* drop reference from allocate - handle holds it now */
277 	drm_gem_object_unreference_unlocked(gobj);
278 	if (r) {
279 		lockmgr(&rdev->exclusive_lock, LK_RELEASE);
280 		r = radeon_gem_handle_lockup(rdev, r);
281 		return r;
282 	}
283 	args->handle = handle;
284 	lockmgr(&rdev->exclusive_lock, LK_RELEASE);
285 	return 0;
286 }
287 
288 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
289 				struct drm_file *filp)
290 {
291 	/* transition the BO to a domain -
292 	 * just validate the BO into a certain domain */
293 	struct radeon_device *rdev = dev->dev_private;
294 	struct drm_radeon_gem_set_domain *args = data;
295 	struct drm_gem_object *gobj;
296 	struct radeon_bo *robj;
297 	int r;
298 
299 	/* for now if someone requests domain CPU -
300 	 * just make sure the buffer is finished with */
301 	lockmgr(&rdev->exclusive_lock, LK_SHARED);
302 
303 	/* just do a BO wait for now */
304 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
305 	if (gobj == NULL) {
306 		lockmgr(&rdev->exclusive_lock, LK_RELEASE);
307 		return -ENOENT;
308 	}
309 	robj = gem_to_radeon_bo(gobj);
310 
311 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
312 
313 	drm_gem_object_unreference_unlocked(gobj);
314 	lockmgr(&rdev->exclusive_lock, LK_RELEASE);
315 	r = radeon_gem_handle_lockup(robj->rdev, r);
316 	return r;
317 }
318 
319 int radeon_mode_dumb_mmap(struct drm_file *filp,
320 			  struct drm_device *dev,
321 			  uint32_t handle, uint64_t *offset_p)
322 {
323 	struct drm_gem_object *gobj;
324 	struct radeon_bo *robj;
325 
326 	gobj = drm_gem_object_lookup(dev, filp, handle);
327 	if (gobj == NULL) {
328 		return -ENOENT;
329 	}
330 	robj = gem_to_radeon_bo(gobj);
331 	*offset_p = radeon_bo_mmap_offset(robj);
332 	drm_gem_object_unreference_unlocked(gobj);
333 	return 0;
334 }
335 
336 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
337 			  struct drm_file *filp)
338 {
339 	struct drm_radeon_gem_mmap *args = data;
340 
341 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
342 }
343 
344 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
345 			  struct drm_file *filp)
346 {
347 	struct radeon_device *rdev = dev->dev_private;
348 	struct drm_radeon_gem_busy *args = data;
349 	struct drm_gem_object *gobj;
350 	struct radeon_bo *robj;
351 	int r;
352 	uint32_t cur_placement = 0;
353 
354 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
355 	if (gobj == NULL) {
356 		return -ENOENT;
357 	}
358 	robj = gem_to_radeon_bo(gobj);
359 	r = radeon_bo_wait(robj, &cur_placement, true);
360 	switch (cur_placement) {
361 	case TTM_PL_VRAM:
362 		args->domain = RADEON_GEM_DOMAIN_VRAM;
363 		break;
364 	case TTM_PL_TT:
365 		args->domain = RADEON_GEM_DOMAIN_GTT;
366 		break;
367 	case TTM_PL_SYSTEM:
368 		args->domain = RADEON_GEM_DOMAIN_CPU;
369 	default:
370 		break;
371 	}
372 	drm_gem_object_unreference_unlocked(gobj);
373 	r = radeon_gem_handle_lockup(rdev, r);
374 	return r;
375 }
376 
377 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
378 			      struct drm_file *filp)
379 {
380 	struct radeon_device *rdev = dev->dev_private;
381 	struct drm_radeon_gem_wait_idle *args = data;
382 	struct drm_gem_object *gobj;
383 	struct radeon_bo *robj;
384 	int r;
385 
386 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
387 	if (gobj == NULL) {
388 		return -ENOENT;
389 	}
390 	robj = gem_to_radeon_bo(gobj);
391 	r = radeon_bo_wait(robj, NULL, false);
392 	/* callback hw specific functions if any */
393 	if (rdev->asic->ioctl_wait_idle)
394 		robj->rdev->asic->ioctl_wait_idle(rdev, robj);
395 	drm_gem_object_unreference_unlocked(gobj);
396 	r = radeon_gem_handle_lockup(rdev, r);
397 	return r;
398 }
399 
400 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
401 				struct drm_file *filp)
402 {
403 	struct drm_radeon_gem_set_tiling *args = data;
404 	struct drm_gem_object *gobj;
405 	struct radeon_bo *robj;
406 	int r = 0;
407 
408 	DRM_DEBUG("%d \n", args->handle);
409 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
410 	if (gobj == NULL)
411 		return -ENOENT;
412 	robj = gem_to_radeon_bo(gobj);
413 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
414 	drm_gem_object_unreference_unlocked(gobj);
415 	return r;
416 }
417 
418 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
419 				struct drm_file *filp)
420 {
421 	struct drm_radeon_gem_get_tiling *args = data;
422 	struct drm_gem_object *gobj;
423 	struct radeon_bo *rbo;
424 	int r = 0;
425 
426 	DRM_DEBUG("\n");
427 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
428 	if (gobj == NULL)
429 		return -ENOENT;
430 	rbo = gem_to_radeon_bo(gobj);
431 	r = radeon_bo_reserve(rbo, false);
432 	if (unlikely(r != 0))
433 		goto out;
434 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
435 	radeon_bo_unreserve(rbo);
436 out:
437 	drm_gem_object_unreference_unlocked(gobj);
438 	return r;
439 }
440 
441 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
442 			  struct drm_file *filp)
443 {
444 	struct drm_radeon_gem_va *args = data;
445 	struct drm_gem_object *gobj;
446 	struct radeon_device *rdev = dev->dev_private;
447 	struct radeon_fpriv *fpriv = filp->driver_priv;
448 	struct radeon_bo *rbo;
449 	struct radeon_bo_va *bo_va;
450 	u32 invalid_flags;
451 	int r = 0;
452 
453 	if (!rdev->vm_manager.enabled) {
454 		args->operation = RADEON_VA_RESULT_ERROR;
455 		return -ENOTTY;
456 	}
457 
458 	/* !! DONT REMOVE !!
459 	 * We don't support vm_id yet, to be sure we don't have have broken
460 	 * userspace, reject anyone trying to use non 0 value thus moving
461 	 * forward we can use those fields without breaking existant userspace
462 	 */
463 	if (args->vm_id) {
464 		args->operation = RADEON_VA_RESULT_ERROR;
465 		return -EINVAL;
466 	}
467 
468 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
469 		dev_err(dev->dev,
470 			"offset 0x%lX is in reserved area 0x%X\n",
471 			(unsigned long)args->offset,
472 			RADEON_VA_RESERVED_SIZE);
473 		args->operation = RADEON_VA_RESULT_ERROR;
474 		return -EINVAL;
475 	}
476 
477 	/* don't remove, we need to enforce userspace to set the snooped flag
478 	 * otherwise we will endup with broken userspace and we won't be able
479 	 * to enable this feature without adding new interface
480 	 */
481 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
482 	if ((args->flags & invalid_flags)) {
483 		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
484 			args->flags, invalid_flags);
485 		args->operation = RADEON_VA_RESULT_ERROR;
486 		return -EINVAL;
487 	}
488 	if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
489 		dev_err(dev->dev, "only supported snooped mapping for now\n");
490 		args->operation = RADEON_VA_RESULT_ERROR;
491 		return -EINVAL;
492 	}
493 
494 	switch (args->operation) {
495 	case RADEON_VA_MAP:
496 	case RADEON_VA_UNMAP:
497 		break;
498 	default:
499 		dev_err(dev->dev, "unsupported operation %d\n",
500 			args->operation);
501 		args->operation = RADEON_VA_RESULT_ERROR;
502 		return -EINVAL;
503 	}
504 
505 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
506 	if (gobj == NULL) {
507 		args->operation = RADEON_VA_RESULT_ERROR;
508 		return -ENOENT;
509 	}
510 	rbo = gem_to_radeon_bo(gobj);
511 	r = radeon_bo_reserve(rbo, false);
512 	if (r) {
513 		args->operation = RADEON_VA_RESULT_ERROR;
514 		drm_gem_object_unreference_unlocked(gobj);
515 		return r;
516 	}
517 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
518 	if (!bo_va) {
519 		args->operation = RADEON_VA_RESULT_ERROR;
520 		drm_gem_object_unreference_unlocked(gobj);
521 		return -ENOENT;
522 	}
523 
524 	switch (args->operation) {
525 	case RADEON_VA_MAP:
526 		if (bo_va->soffset) {
527 			args->operation = RADEON_VA_RESULT_VA_EXIST;
528 			args->offset = bo_va->soffset;
529 			goto out;
530 		}
531 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
532 		break;
533 	case RADEON_VA_UNMAP:
534 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
535 		break;
536 	default:
537 		break;
538 	}
539 	args->operation = RADEON_VA_RESULT_OK;
540 	if (r) {
541 		args->operation = RADEON_VA_RESULT_ERROR;
542 	}
543 out:
544 	radeon_bo_unreserve(rbo);
545 	drm_gem_object_unreference_unlocked(gobj);
546 	return r;
547 }
548 
549 int radeon_mode_dumb_create(struct drm_file *file_priv,
550 			    struct drm_device *dev,
551 			    struct drm_mode_create_dumb *args)
552 {
553 	struct radeon_device *rdev = dev->dev_private;
554 	struct drm_gem_object *gobj;
555 	uint32_t handle;
556 	int r;
557 
558 	args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
559 	args->size = args->pitch * args->height;
560 	args->size = roundup2(args->size, PAGE_SIZE);
561 
562 	r = radeon_gem_object_create(rdev, args->size, 0,
563 				     RADEON_GEM_DOMAIN_VRAM,
564 				     false, ttm_bo_type_device,
565 				     &gobj);
566 	if (r)
567 		return -ENOMEM;
568 
569 	r = drm_gem_handle_create(file_priv, gobj, &handle);
570 	/* drop reference from allocate - handle holds it now */
571 	drm_gem_object_unreference_unlocked(gobj);
572 	if (r) {
573 		return r;
574 	}
575 	args->handle = handle;
576 	return 0;
577 }
578 
579 int radeon_mode_dumb_destroy(struct drm_file *file_priv,
580 			     struct drm_device *dev,
581 			     uint32_t handle)
582 {
583 	return drm_gem_handle_delete(file_priv, handle);
584 }
585