xref: /dragonfly/sys/dev/drm/radeon/radeon_gem.c (revision c4d6eff4)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  *
28  * $FreeBSD: head/sys/dev/drm2/radeon/radeon_gem.c 254885 2013-08-25 19:37:15Z dumbbell $
29  */
30 
31 #include <drm/drmP.h>
32 #include <uapi_drm/radeon_drm.h>
33 #include "radeon.h"
34 #include "radeon_gem.h"
35 
36 int radeon_gem_object_init(struct drm_gem_object *obj)
37 {
38 	panic("radeon_gem_object_init() must not be called");
39 
40 	return 0;
41 }
42 
43 void radeon_gem_object_free(struct drm_gem_object *gobj)
44 {
45 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
46 
47 	if (robj) {
48 #ifdef DUMBBELL_WIP
49 		if (robj->gem_base.import_attach)
50 			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
51 #endif /* DUMBBELL_WIP */
52 		radeon_bo_unref(&robj);
53 	}
54 }
55 
56 int radeon_gem_object_create(struct radeon_device *rdev, int size,
57 				int alignment, int initial_domain,
58 				bool discardable, bool kernel,
59 				struct drm_gem_object **obj)
60 {
61 	struct radeon_bo *robj;
62 	unsigned long max_size;
63 	int r;
64 
65 	*obj = NULL;
66 	/* At least align on page size */
67 	if (alignment < PAGE_SIZE) {
68 		alignment = PAGE_SIZE;
69 	}
70 
71 	/* maximun bo size is the minimun btw visible vram and gtt size */
72 	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
73 	if (size > max_size) {
74 		DRM_ERROR("%s:%d alloc size %dMb bigger than %ldMb limit\n",
75 		       __func__, __LINE__, size >> 20, max_size >> 20);
76 		return -ENOMEM;
77 	}
78 
79 retry:
80 	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
81 	if (r) {
82 		if (r != -ERESTARTSYS) {
83 			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
84 				initial_domain |= RADEON_GEM_DOMAIN_GTT;
85 				goto retry;
86 			}
87 			DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
88 				  size, initial_domain, alignment, r);
89 		}
90 		return r;
91 	}
92 	*obj = &robj->gem_base;
93 
94 	spin_lock(&rdev->gem.mutex);
95 	list_add_tail(&robj->list, &rdev->gem.objects);
96 	spin_unlock(&rdev->gem.mutex);
97 
98 	return 0;
99 }
100 
101 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
102 			  uint32_t rdomain, uint32_t wdomain)
103 {
104 	struct radeon_bo *robj;
105 	uint32_t domain;
106 	int r;
107 
108 	/* FIXME: reeimplement */
109 	robj = gem_to_radeon_bo(gobj);
110 	/* work out where to validate the buffer to */
111 	domain = wdomain;
112 	if (!domain) {
113 		domain = rdomain;
114 	}
115 	if (!domain) {
116 		/* Do nothings */
117 		DRM_ERROR("Set domain without domain !\n");
118 		return 0;
119 	}
120 	if (domain == RADEON_GEM_DOMAIN_CPU) {
121 		/* Asking for cpu access wait for object idle */
122 		r = radeon_bo_wait(robj, NULL, false);
123 		if (r) {
124 			DRM_ERROR("Failed to wait for object !\n");
125 			return r;
126 		}
127 	}
128 	return 0;
129 }
130 
131 int radeon_gem_init(struct radeon_device *rdev)
132 {
133 	INIT_LIST_HEAD(&rdev->gem.objects);
134 	return 0;
135 }
136 
137 void radeon_gem_fini(struct radeon_device *rdev)
138 {
139 	radeon_bo_force_delete(rdev);
140 }
141 
142 /*
143  * Call from drm_gem_handle_create which appear in both new and open ioctl
144  * case.
145  */
146 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
147 {
148 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
149 	struct radeon_device *rdev = rbo->rdev;
150 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
151 	struct radeon_vm *vm = &fpriv->vm;
152 	struct radeon_bo_va *bo_va;
153 	int r;
154 
155 	if (rdev->family < CHIP_CAYMAN) {
156 		return 0;
157 	}
158 
159 	r = radeon_bo_reserve(rbo, false);
160 	if (r) {
161 		return r;
162 	}
163 
164 	bo_va = radeon_vm_bo_find(vm, rbo);
165 	if (!bo_va) {
166 		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
167 	} else {
168 		++bo_va->ref_count;
169 	}
170 	radeon_bo_unreserve(rbo);
171 
172 	return 0;
173 }
174 
175 void radeon_gem_object_close(struct drm_gem_object *obj,
176 			     struct drm_file *file_priv)
177 {
178 	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
179 	struct radeon_device *rdev = rbo->rdev;
180 	struct radeon_fpriv *fpriv = file_priv->driver_priv;
181 	struct radeon_vm *vm = &fpriv->vm;
182 	struct radeon_bo_va *bo_va;
183 	int r;
184 
185 	if (rdev->family < CHIP_CAYMAN) {
186 		return;
187 	}
188 
189 	r = radeon_bo_reserve(rbo, true);
190 	if (r) {
191 		dev_err(rdev->dev, "leaking bo va because "
192 			"we fail to reserve bo (%d)\n", r);
193 		return;
194 	}
195 	bo_va = radeon_vm_bo_find(vm, rbo);
196 	if (bo_va) {
197 		if (--bo_va->ref_count == 0) {
198 			radeon_vm_bo_rmv(rdev, bo_va);
199 		}
200 	}
201 	radeon_bo_unreserve(rbo);
202 }
203 
204 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
205 {
206 	if (r == -EDEADLK) {
207 		r = radeon_gpu_reset(rdev);
208 		if (!r)
209 			r = -EAGAIN;
210 	}
211 	return r;
212 }
213 
214 /*
215  * GEM ioctls.
216  */
217 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
218 			  struct drm_file *filp)
219 {
220 	struct radeon_device *rdev = dev->dev_private;
221 	struct drm_radeon_gem_info *args = data;
222 	struct ttm_mem_type_manager *man;
223 	unsigned i;
224 
225 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
226 
227 	args->vram_size = rdev->mc.real_vram_size;
228 	args->vram_visible = (u64)man->size << PAGE_SHIFT;
229 	if (rdev->stollen_vga_memory)
230 		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
231 	args->vram_visible -= radeon_fbdev_total_size(rdev);
232 	args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
233 	for(i = 0; i < RADEON_NUM_RINGS; ++i)
234 		args->gart_size -= rdev->ring[i].ring_size;
235 	return 0;
236 }
237 
238 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
239 			   struct drm_file *filp)
240 {
241 	/* TODO: implement */
242 	DRM_ERROR("unimplemented %s\n", __func__);
243 	return -ENOSYS;
244 }
245 
246 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
247 			    struct drm_file *filp)
248 {
249 	/* TODO: implement */
250 	DRM_ERROR("unimplemented %s\n", __func__);
251 	return -ENOSYS;
252 }
253 
254 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
255 			    struct drm_file *filp)
256 {
257 	struct radeon_device *rdev = dev->dev_private;
258 	struct drm_radeon_gem_create *args = data;
259 	struct drm_gem_object *gobj;
260 	uint32_t handle;
261 	int r;
262 
263 	lockmgr(&rdev->exclusive_lock, LK_SHARED);
264 	/* create a gem object to contain this object in */
265 	args->size = roundup(args->size, PAGE_SIZE);
266 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
267 					args->initial_domain, false,
268 					false, &gobj);
269 	if (r) {
270 		if (r == -ERESTARTSYS)
271 			r = -EINTR;
272 		lockmgr(&rdev->exclusive_lock, LK_RELEASE);
273 		r = radeon_gem_handle_lockup(rdev, r);
274 		return r;
275 	}
276 	handle = 0;
277 	r = drm_gem_handle_create(filp, gobj, &handle);
278 	/* drop reference from allocate - handle holds it now */
279 	drm_gem_object_unreference_unlocked(gobj);
280 	if (r) {
281 		lockmgr(&rdev->exclusive_lock, LK_RELEASE);
282 		r = radeon_gem_handle_lockup(rdev, r);
283 		return r;
284 	}
285 	args->handle = handle;
286 	lockmgr(&rdev->exclusive_lock, LK_RELEASE);
287 	return 0;
288 }
289 
290 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
291 				struct drm_file *filp)
292 {
293 	/* transition the BO to a domain -
294 	 * just validate the BO into a certain domain */
295 	struct radeon_device *rdev = dev->dev_private;
296 	struct drm_radeon_gem_set_domain *args = data;
297 	struct drm_gem_object *gobj;
298 	struct radeon_bo *robj;
299 	int r;
300 
301 	/* for now if someone requests domain CPU -
302 	 * just make sure the buffer is finished with */
303 	lockmgr(&rdev->exclusive_lock, LK_SHARED);
304 
305 	/* just do a BO wait for now */
306 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
307 	if (gobj == NULL) {
308 		lockmgr(&rdev->exclusive_lock, LK_RELEASE);
309 		return -ENOENT;
310 	}
311 	robj = gem_to_radeon_bo(gobj);
312 
313 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
314 
315 	drm_gem_object_unreference_unlocked(gobj);
316 	lockmgr(&rdev->exclusive_lock, LK_RELEASE);
317 	r = radeon_gem_handle_lockup(robj->rdev, r);
318 	return r;
319 }
320 
321 int radeon_mode_dumb_mmap(struct drm_file *filp,
322 			  struct drm_device *dev,
323 			  uint32_t handle, uint64_t *offset_p)
324 {
325 	struct drm_gem_object *gobj;
326 	struct radeon_bo *robj;
327 
328 	gobj = drm_gem_object_lookup(dev, filp, handle);
329 	if (gobj == NULL) {
330 		return -ENOENT;
331 	}
332 	robj = gem_to_radeon_bo(gobj);
333 	*offset_p = radeon_bo_mmap_offset(robj);
334 	drm_gem_object_unreference_unlocked(gobj);
335 	return 0;
336 }
337 
338 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
339 			  struct drm_file *filp)
340 {
341 	struct drm_radeon_gem_mmap *args = data;
342 
343 	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
344 }
345 
346 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
347 			  struct drm_file *filp)
348 {
349 	struct radeon_device *rdev = dev->dev_private;
350 	struct drm_radeon_gem_busy *args = data;
351 	struct drm_gem_object *gobj;
352 	struct radeon_bo *robj;
353 	int r;
354 	uint32_t cur_placement = 0;
355 
356 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
357 	if (gobj == NULL) {
358 		return -ENOENT;
359 	}
360 	robj = gem_to_radeon_bo(gobj);
361 	r = radeon_bo_wait(robj, &cur_placement, true);
362 	switch (cur_placement) {
363 	case TTM_PL_VRAM:
364 		args->domain = RADEON_GEM_DOMAIN_VRAM;
365 		break;
366 	case TTM_PL_TT:
367 		args->domain = RADEON_GEM_DOMAIN_GTT;
368 		break;
369 	case TTM_PL_SYSTEM:
370 		args->domain = RADEON_GEM_DOMAIN_CPU;
371 	default:
372 		break;
373 	}
374 	drm_gem_object_unreference_unlocked(gobj);
375 	r = radeon_gem_handle_lockup(rdev, r);
376 	return r;
377 }
378 
379 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
380 			      struct drm_file *filp)
381 {
382 	struct radeon_device *rdev = dev->dev_private;
383 	struct drm_radeon_gem_wait_idle *args = data;
384 	struct drm_gem_object *gobj;
385 	struct radeon_bo *robj;
386 	int r;
387 
388 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
389 	if (gobj == NULL) {
390 		return -ENOENT;
391 	}
392 	robj = gem_to_radeon_bo(gobj);
393 	r = radeon_bo_wait(robj, NULL, false);
394 	/* callback hw specific functions if any */
395 	if (rdev->asic->ioctl_wait_idle)
396 		robj->rdev->asic->ioctl_wait_idle(rdev, robj);
397 	drm_gem_object_unreference_unlocked(gobj);
398 	if (r == -ERESTARTSYS)
399 		r = -EINTR;
400 	r = radeon_gem_handle_lockup(rdev, r);
401 	return r;
402 }
403 
404 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
405 				struct drm_file *filp)
406 {
407 	struct drm_radeon_gem_set_tiling *args = data;
408 	struct drm_gem_object *gobj;
409 	struct radeon_bo *robj;
410 	int r = 0;
411 
412 	DRM_DEBUG("%d \n", args->handle);
413 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
414 	if (gobj == NULL)
415 		return -ENOENT;
416 	robj = gem_to_radeon_bo(gobj);
417 	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
418 	drm_gem_object_unreference_unlocked(gobj);
419 	return r;
420 }
421 
422 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
423 				struct drm_file *filp)
424 {
425 	struct drm_radeon_gem_get_tiling *args = data;
426 	struct drm_gem_object *gobj;
427 	struct radeon_bo *rbo;
428 	int r = 0;
429 
430 	DRM_DEBUG("\n");
431 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
432 	if (gobj == NULL)
433 		return -ENOENT;
434 	rbo = gem_to_radeon_bo(gobj);
435 	r = radeon_bo_reserve(rbo, false);
436 	if (unlikely(r != 0))
437 		goto out;
438 	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
439 	radeon_bo_unreserve(rbo);
440 out:
441 	drm_gem_object_unreference_unlocked(gobj);
442 	return r;
443 }
444 
445 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
446 			  struct drm_file *filp)
447 {
448 	struct drm_radeon_gem_va *args = data;
449 	struct drm_gem_object *gobj;
450 	struct radeon_device *rdev = dev->dev_private;
451 	struct radeon_fpriv *fpriv = filp->driver_priv;
452 	struct radeon_bo *rbo;
453 	struct radeon_bo_va *bo_va;
454 	u32 invalid_flags;
455 	int r = 0;
456 
457 	if (!rdev->vm_manager.enabled) {
458 		args->operation = RADEON_VA_RESULT_ERROR;
459 		return -ENOTTY;
460 	}
461 
462 	/* !! DONT REMOVE !!
463 	 * We don't support vm_id yet, to be sure we don't have have broken
464 	 * userspace, reject anyone trying to use non 0 value thus moving
465 	 * forward we can use those fields without breaking existant userspace
466 	 */
467 	if (args->vm_id) {
468 		args->operation = RADEON_VA_RESULT_ERROR;
469 		return -EINVAL;
470 	}
471 
472 	if (args->offset < RADEON_VA_RESERVED_SIZE) {
473 		dev_err(dev->dev,
474 			"offset 0x%lX is in reserved area 0x%X\n",
475 			(unsigned long)args->offset,
476 			RADEON_VA_RESERVED_SIZE);
477 		args->operation = RADEON_VA_RESULT_ERROR;
478 		return -EINVAL;
479 	}
480 
481 	/* don't remove, we need to enforce userspace to set the snooped flag
482 	 * otherwise we will endup with broken userspace and we won't be able
483 	 * to enable this feature without adding new interface
484 	 */
485 	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
486 	if ((args->flags & invalid_flags)) {
487 		dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
488 			args->flags, invalid_flags);
489 		args->operation = RADEON_VA_RESULT_ERROR;
490 		return -EINVAL;
491 	}
492 	if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
493 		dev_err(dev->dev, "only supported snooped mapping for now\n");
494 		args->operation = RADEON_VA_RESULT_ERROR;
495 		return -EINVAL;
496 	}
497 
498 	switch (args->operation) {
499 	case RADEON_VA_MAP:
500 	case RADEON_VA_UNMAP:
501 		break;
502 	default:
503 		dev_err(dev->dev, "unsupported operation %d\n",
504 			args->operation);
505 		args->operation = RADEON_VA_RESULT_ERROR;
506 		return -EINVAL;
507 	}
508 
509 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
510 	if (gobj == NULL) {
511 		args->operation = RADEON_VA_RESULT_ERROR;
512 		return -ENOENT;
513 	}
514 	rbo = gem_to_radeon_bo(gobj);
515 	r = radeon_bo_reserve(rbo, false);
516 	if (r) {
517 		args->operation = RADEON_VA_RESULT_ERROR;
518 		drm_gem_object_unreference_unlocked(gobj);
519 		return r;
520 	}
521 	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
522 	if (!bo_va) {
523 		args->operation = RADEON_VA_RESULT_ERROR;
524 		drm_gem_object_unreference_unlocked(gobj);
525 		return -ENOENT;
526 	}
527 
528 	switch (args->operation) {
529 	case RADEON_VA_MAP:
530 		if (bo_va->soffset) {
531 			args->operation = RADEON_VA_RESULT_VA_EXIST;
532 			args->offset = bo_va->soffset;
533 			goto out;
534 		}
535 		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
536 		break;
537 	case RADEON_VA_UNMAP:
538 		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
539 		break;
540 	default:
541 		break;
542 	}
543 	args->operation = RADEON_VA_RESULT_OK;
544 	if (r) {
545 		args->operation = RADEON_VA_RESULT_ERROR;
546 	}
547 out:
548 	radeon_bo_unreserve(rbo);
549 	drm_gem_object_unreference_unlocked(gobj);
550 	return r;
551 }
552 
553 int radeon_mode_dumb_create(struct drm_file *file_priv,
554 			    struct drm_device *dev,
555 			    struct drm_mode_create_dumb *args)
556 {
557 	struct radeon_device *rdev = dev->dev_private;
558 	struct drm_gem_object *gobj;
559 	uint32_t handle;
560 	int r;
561 
562 	args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
563 	args->size = args->pitch * args->height;
564 	args->size = roundup2(args->size, PAGE_SIZE);
565 
566 	r = radeon_gem_object_create(rdev, args->size, 0,
567 				     RADEON_GEM_DOMAIN_VRAM,
568 				     false, ttm_bo_type_device,
569 				     &gobj);
570 	if (r)
571 		return -ENOMEM;
572 
573 	r = drm_gem_handle_create(file_priv, gobj, &handle);
574 	/* drop reference from allocate - handle holds it now */
575 	drm_gem_object_unreference_unlocked(gobj);
576 	if (r) {
577 		return r;
578 	}
579 	args->handle = handle;
580 	return 0;
581 }
582 
583 int radeon_mode_dumb_destroy(struct drm_file *file_priv,
584 			     struct drm_device *dev,
585 			     uint32_t handle)
586 {
587 	return drm_gem_handle_delete(file_priv, handle);
588 }
589