1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29 #include <linux/pci.h>
30
31 #include <drm/drm_device.h>
32 #include <drm/drm_file.h>
33 #include <drm/drm_gem_ttm_helper.h>
34 #include <drm/radeon_drm.h>
35
36 #include "radeon.h"
37 #include "radeon_prime.h"
38
39 struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
40 int flags);
41 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
42 int radeon_gem_prime_pin(struct drm_gem_object *obj);
43 void radeon_gem_prime_unpin(struct drm_gem_object *obj);
44
45 const struct drm_gem_object_funcs radeon_gem_object_funcs;
46
radeon_gem_object_free(struct drm_gem_object * gobj)47 static void radeon_gem_object_free(struct drm_gem_object *gobj)
48 {
49 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
50
51 if (robj) {
52 radeon_mn_unregister(robj);
53 radeon_bo_unref(&robj);
54 }
55 }
56
radeon_gem_object_create(struct radeon_device * rdev,unsigned long size,int alignment,int initial_domain,u32 flags,bool kernel,struct drm_gem_object ** obj)57 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
58 int alignment, int initial_domain,
59 u32 flags, bool kernel,
60 struct drm_gem_object **obj)
61 {
62 struct radeon_bo *robj;
63 unsigned long max_size;
64 int r;
65
66 *obj = NULL;
67 /* At least align on page size */
68 if (alignment < PAGE_SIZE) {
69 alignment = PAGE_SIZE;
70 }
71
72 /* Maximum bo size is the unpinned gtt size since we use the gtt to
73 * handle vram to system pool migrations.
74 */
75 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
76 if (size > max_size) {
77 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
78 size >> 20, max_size >> 20);
79 return -ENOMEM;
80 }
81
82 retry:
83 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
84 flags, NULL, NULL, &robj);
85 if (r) {
86 if (r != -ERESTARTSYS) {
87 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
88 initial_domain |= RADEON_GEM_DOMAIN_GTT;
89 goto retry;
90 }
91 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
92 size, initial_domain, alignment, r);
93 }
94 return r;
95 }
96 *obj = &robj->tbo.base;
97 (*obj)->funcs = &radeon_gem_object_funcs;
98 robj->pid = task_pid_nr(current);
99
100 mutex_lock(&rdev->gem.mutex);
101 list_add_tail(&robj->list, &rdev->gem.objects);
102 mutex_unlock(&rdev->gem.mutex);
103
104 return 0;
105 }
106
radeon_gem_set_domain(struct drm_gem_object * gobj,uint32_t rdomain,uint32_t wdomain)107 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
108 uint32_t rdomain, uint32_t wdomain)
109 {
110 struct radeon_bo *robj;
111 uint32_t domain;
112 long r;
113
114 /* FIXME: reeimplement */
115 robj = gem_to_radeon_bo(gobj);
116 /* work out where to validate the buffer to */
117 domain = wdomain;
118 if (!domain) {
119 domain = rdomain;
120 }
121 if (!domain) {
122 /* Do nothings */
123 pr_warn("Set domain without domain !\n");
124 return 0;
125 }
126 if (domain == RADEON_GEM_DOMAIN_CPU) {
127 /* Asking for cpu access wait for object idle */
128 r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
129 if (!r)
130 r = -EBUSY;
131
132 if (r < 0 && r != -EINTR) {
133 pr_err("Failed to wait for object: %li\n", r);
134 return r;
135 }
136 }
137 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
138 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
139 return -EINVAL;
140 }
141 return 0;
142 }
143
radeon_gem_init(struct radeon_device * rdev)144 int radeon_gem_init(struct radeon_device *rdev)
145 {
146 INIT_LIST_HEAD(&rdev->gem.objects);
147 return 0;
148 }
149
radeon_gem_fini(struct radeon_device * rdev)150 void radeon_gem_fini(struct radeon_device *rdev)
151 {
152 radeon_bo_force_delete(rdev);
153 }
154
155 /*
156 * Call from drm_gem_handle_create which appear in both new and open ioctl
157 * case.
158 */
radeon_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)159 static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
160 {
161 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
162 struct radeon_device *rdev = rbo->rdev;
163 struct radeon_fpriv *fpriv = file_priv->driver_priv;
164 struct radeon_vm *vm = &fpriv->vm;
165 struct radeon_bo_va *bo_va;
166 int r;
167
168 if ((rdev->family < CHIP_CAYMAN) ||
169 (!rdev->accel_working)) {
170 return 0;
171 }
172
173 r = radeon_bo_reserve(rbo, false);
174 if (r) {
175 return r;
176 }
177
178 bo_va = radeon_vm_bo_find(vm, rbo);
179 if (!bo_va) {
180 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
181 } else {
182 ++bo_va->ref_count;
183 }
184 radeon_bo_unreserve(rbo);
185
186 return 0;
187 }
188
radeon_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)189 static void radeon_gem_object_close(struct drm_gem_object *obj,
190 struct drm_file *file_priv)
191 {
192 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
193 struct radeon_device *rdev = rbo->rdev;
194 struct radeon_fpriv *fpriv = file_priv->driver_priv;
195 struct radeon_vm *vm = &fpriv->vm;
196 struct radeon_bo_va *bo_va;
197 int r;
198
199 if ((rdev->family < CHIP_CAYMAN) ||
200 (!rdev->accel_working)) {
201 return;
202 }
203
204 r = radeon_bo_reserve(rbo, true);
205 if (r) {
206 dev_err(rdev->dev, "leaking bo va because "
207 "we fail to reserve bo (%d)\n", r);
208 return;
209 }
210 bo_va = radeon_vm_bo_find(vm, rbo);
211 if (bo_va) {
212 if (--bo_va->ref_count == 0) {
213 radeon_vm_bo_rmv(rdev, bo_va);
214 }
215 }
216 radeon_bo_unreserve(rbo);
217 }
218
radeon_gem_handle_lockup(struct radeon_device * rdev,int r)219 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
220 {
221 if (r == -EDEADLK) {
222 r = radeon_gpu_reset(rdev);
223 if (!r)
224 r = -EAGAIN;
225 }
226 return r;
227 }
228
229 const struct drm_gem_object_funcs radeon_gem_object_funcs = {
230 .free = radeon_gem_object_free,
231 .open = radeon_gem_object_open,
232 .close = radeon_gem_object_close,
233 .export = radeon_gem_prime_export,
234 .pin = radeon_gem_prime_pin,
235 .unpin = radeon_gem_prime_unpin,
236 .get_sg_table = radeon_gem_prime_get_sg_table,
237 .vmap = drm_gem_ttm_vmap,
238 .vunmap = drm_gem_ttm_vunmap,
239 };
240
241 /*
242 * GEM ioctls.
243 */
radeon_gem_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)244 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
245 struct drm_file *filp)
246 {
247 struct radeon_device *rdev = dev->dev_private;
248 struct drm_radeon_gem_info *args = data;
249 struct ttm_resource_manager *man;
250
251 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
252
253 args->vram_size = (u64)man->size << PAGE_SHIFT;
254 args->vram_visible = rdev->mc.visible_vram_size;
255 args->vram_visible -= rdev->vram_pin_size;
256 args->gart_size = rdev->mc.gtt_size;
257 args->gart_size -= rdev->gart_pin_size;
258
259 return 0;
260 }
261
radeon_gem_pread_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)262 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
263 struct drm_file *filp)
264 {
265 /* TODO: implement */
266 DRM_ERROR("unimplemented %s\n", __func__);
267 return -ENOSYS;
268 }
269
radeon_gem_pwrite_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)270 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
271 struct drm_file *filp)
272 {
273 /* TODO: implement */
274 DRM_ERROR("unimplemented %s\n", __func__);
275 return -ENOSYS;
276 }
277
radeon_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)278 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
279 struct drm_file *filp)
280 {
281 struct radeon_device *rdev = dev->dev_private;
282 struct drm_radeon_gem_create *args = data;
283 struct drm_gem_object *gobj;
284 uint32_t handle;
285 int r;
286
287 down_read(&rdev->exclusive_lock);
288 /* create a gem object to contain this object in */
289 args->size = roundup(args->size, PAGE_SIZE);
290 r = radeon_gem_object_create(rdev, args->size, args->alignment,
291 args->initial_domain, args->flags,
292 false, &gobj);
293 if (r) {
294 up_read(&rdev->exclusive_lock);
295 r = radeon_gem_handle_lockup(rdev, r);
296 return r;
297 }
298 r = drm_gem_handle_create(filp, gobj, &handle);
299 /* drop reference from allocate - handle holds it now */
300 drm_gem_object_put(gobj);
301 if (r) {
302 up_read(&rdev->exclusive_lock);
303 r = radeon_gem_handle_lockup(rdev, r);
304 return r;
305 }
306 args->handle = handle;
307 up_read(&rdev->exclusive_lock);
308 return 0;
309 }
310
radeon_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)311 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
312 struct drm_file *filp)
313 {
314 struct ttm_operation_ctx ctx = { true, false };
315 struct radeon_device *rdev = dev->dev_private;
316 struct drm_radeon_gem_userptr *args = data;
317 struct drm_gem_object *gobj;
318 struct radeon_bo *bo;
319 uint32_t handle;
320 int r;
321
322 args->addr = untagged_addr(args->addr);
323
324 if (offset_in_page(args->addr | args->size))
325 return -EINVAL;
326
327 /* reject unknown flag values */
328 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
329 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
330 RADEON_GEM_USERPTR_REGISTER))
331 return -EINVAL;
332
333 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
334 /* readonly pages not tested on older hardware */
335 if (rdev->family < CHIP_R600)
336 return -EINVAL;
337
338 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
339 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
340
341 /* if we want to write to it we must require anonymous
342 memory and install a MMU notifier */
343 return -EACCES;
344 }
345
346 down_read(&rdev->exclusive_lock);
347
348 /* create a gem object to contain this object in */
349 r = radeon_gem_object_create(rdev, args->size, 0,
350 RADEON_GEM_DOMAIN_CPU, 0,
351 false, &gobj);
352 if (r)
353 goto handle_lockup;
354
355 bo = gem_to_radeon_bo(gobj);
356 r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
357 if (r)
358 goto release_object;
359
360 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
361 r = radeon_mn_register(bo, args->addr);
362 if (r)
363 goto release_object;
364 }
365
366 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
367 mmap_read_lock(current->mm);
368 r = radeon_bo_reserve(bo, true);
369 if (r) {
370 mmap_read_unlock(current->mm);
371 goto release_object;
372 }
373
374 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
375 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
376 radeon_bo_unreserve(bo);
377 mmap_read_unlock(current->mm);
378 if (r)
379 goto release_object;
380 }
381
382 r = drm_gem_handle_create(filp, gobj, &handle);
383 /* drop reference from allocate - handle holds it now */
384 drm_gem_object_put(gobj);
385 if (r)
386 goto handle_lockup;
387
388 args->handle = handle;
389 up_read(&rdev->exclusive_lock);
390 return 0;
391
392 release_object:
393 drm_gem_object_put(gobj);
394
395 handle_lockup:
396 up_read(&rdev->exclusive_lock);
397 r = radeon_gem_handle_lockup(rdev, r);
398
399 return r;
400 }
401
radeon_gem_set_domain_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)402 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
403 struct drm_file *filp)
404 {
405 /* transition the BO to a domain -
406 * just validate the BO into a certain domain */
407 struct radeon_device *rdev = dev->dev_private;
408 struct drm_radeon_gem_set_domain *args = data;
409 struct drm_gem_object *gobj;
410 struct radeon_bo *robj;
411 int r;
412
413 /* for now if someone requests domain CPU -
414 * just make sure the buffer is finished with */
415 down_read(&rdev->exclusive_lock);
416
417 /* just do a BO wait for now */
418 gobj = drm_gem_object_lookup(filp, args->handle);
419 if (gobj == NULL) {
420 up_read(&rdev->exclusive_lock);
421 return -ENOENT;
422 }
423 robj = gem_to_radeon_bo(gobj);
424
425 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
426
427 drm_gem_object_put(gobj);
428 up_read(&rdev->exclusive_lock);
429 r = radeon_gem_handle_lockup(robj->rdev, r);
430 return r;
431 }
432
radeon_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)433 int radeon_mode_dumb_mmap(struct drm_file *filp,
434 struct drm_device *dev,
435 uint32_t handle, uint64_t *offset_p)
436 {
437 struct drm_gem_object *gobj;
438 struct radeon_bo *robj;
439
440 gobj = drm_gem_object_lookup(filp, handle);
441 if (gobj == NULL) {
442 return -ENOENT;
443 }
444 robj = gem_to_radeon_bo(gobj);
445 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
446 drm_gem_object_put(gobj);
447 return -EPERM;
448 }
449 *offset_p = radeon_bo_mmap_offset(robj);
450 drm_gem_object_put(gobj);
451 return 0;
452 }
453
radeon_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)454 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
455 struct drm_file *filp)
456 {
457 struct drm_radeon_gem_mmap *args = data;
458
459 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
460 }
461
radeon_gem_busy_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)462 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
463 struct drm_file *filp)
464 {
465 struct drm_radeon_gem_busy *args = data;
466 struct drm_gem_object *gobj;
467 struct radeon_bo *robj;
468 int r;
469 uint32_t cur_placement = 0;
470
471 gobj = drm_gem_object_lookup(filp, args->handle);
472 if (gobj == NULL) {
473 return -ENOENT;
474 }
475 robj = gem_to_radeon_bo(gobj);
476
477 r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
478 if (r == 0)
479 r = -EBUSY;
480 else
481 r = 0;
482
483 cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
484 args->domain = radeon_mem_type_to_domain(cur_placement);
485 drm_gem_object_put(gobj);
486 return r;
487 }
488
radeon_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)489 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
490 struct drm_file *filp)
491 {
492 struct radeon_device *rdev = dev->dev_private;
493 struct drm_radeon_gem_wait_idle *args = data;
494 struct drm_gem_object *gobj;
495 struct radeon_bo *robj;
496 int r = 0;
497 uint32_t cur_placement = 0;
498 long ret;
499
500 gobj = drm_gem_object_lookup(filp, args->handle);
501 if (gobj == NULL) {
502 return -ENOENT;
503 }
504 robj = gem_to_radeon_bo(gobj);
505
506 ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
507 if (ret == 0)
508 r = -EBUSY;
509 else if (ret < 0)
510 r = ret;
511
512 /* Flush HDP cache via MMIO if necessary */
513 cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
514 if (rdev->asic->mmio_hdp_flush &&
515 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
516 robj->rdev->asic->mmio_hdp_flush(rdev);
517 drm_gem_object_put(gobj);
518 r = radeon_gem_handle_lockup(rdev, r);
519 return r;
520 }
521
radeon_gem_set_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)522 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
523 struct drm_file *filp)
524 {
525 struct drm_radeon_gem_set_tiling *args = data;
526 struct drm_gem_object *gobj;
527 struct radeon_bo *robj;
528 int r = 0;
529
530 DRM_DEBUG("%d \n", args->handle);
531 gobj = drm_gem_object_lookup(filp, args->handle);
532 if (gobj == NULL)
533 return -ENOENT;
534 robj = gem_to_radeon_bo(gobj);
535 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
536 drm_gem_object_put(gobj);
537 return r;
538 }
539
radeon_gem_get_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)540 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
541 struct drm_file *filp)
542 {
543 struct drm_radeon_gem_get_tiling *args = data;
544 struct drm_gem_object *gobj;
545 struct radeon_bo *rbo;
546 int r = 0;
547
548 DRM_DEBUG("\n");
549 gobj = drm_gem_object_lookup(filp, args->handle);
550 if (gobj == NULL)
551 return -ENOENT;
552 rbo = gem_to_radeon_bo(gobj);
553 r = radeon_bo_reserve(rbo, false);
554 if (unlikely(r != 0))
555 goto out;
556 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
557 radeon_bo_unreserve(rbo);
558 out:
559 drm_gem_object_put(gobj);
560 return r;
561 }
562
563 /**
564 * radeon_gem_va_update_vm -update the bo_va in its VM
565 *
566 * @rdev: radeon_device pointer
567 * @bo_va: bo_va to update
568 *
569 * Update the bo_va directly after setting it's address. Errors are not
570 * vital here, so they are not reported back to userspace.
571 */
radeon_gem_va_update_vm(struct radeon_device * rdev,struct radeon_bo_va * bo_va)572 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
573 struct radeon_bo_va *bo_va)
574 {
575 struct ttm_validate_buffer tv, *entry;
576 struct radeon_bo_list *vm_bos;
577 struct ww_acquire_ctx ticket;
578 struct list_head list;
579 unsigned domain;
580 int r;
581
582 INIT_LIST_HEAD(&list);
583
584 tv.bo = &bo_va->bo->tbo;
585 tv.num_shared = 1;
586 list_add(&tv.head, &list);
587
588 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
589 if (!vm_bos)
590 return;
591
592 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
593 if (r)
594 goto error_free;
595
596 list_for_each_entry(entry, &list, head) {
597 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
598 /* if anything is swapped out don't swap it in here,
599 just abort and wait for the next CS */
600 if (domain == RADEON_GEM_DOMAIN_CPU)
601 goto error_unreserve;
602 }
603
604 mutex_lock(&bo_va->vm->mutex);
605 r = radeon_vm_clear_freed(rdev, bo_va->vm);
606 if (r)
607 goto error_unlock;
608
609 if (bo_va->it.start)
610 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
611
612 error_unlock:
613 mutex_unlock(&bo_va->vm->mutex);
614
615 error_unreserve:
616 ttm_eu_backoff_reservation(&ticket, &list);
617
618 error_free:
619 kvfree(vm_bos);
620
621 if (r && r != -ERESTARTSYS)
622 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
623 }
624
radeon_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)625 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
626 struct drm_file *filp)
627 {
628 struct drm_radeon_gem_va *args = data;
629 struct drm_gem_object *gobj;
630 struct radeon_device *rdev = dev->dev_private;
631 struct radeon_fpriv *fpriv = filp->driver_priv;
632 struct radeon_bo *rbo;
633 struct radeon_bo_va *bo_va;
634 u32 invalid_flags;
635 int r = 0;
636
637 if (!rdev->vm_manager.enabled) {
638 args->operation = RADEON_VA_RESULT_ERROR;
639 return -ENOTTY;
640 }
641
642 /* !! DONT REMOVE !!
643 * We don't support vm_id yet, to be sure we don't have have broken
644 * userspace, reject anyone trying to use non 0 value thus moving
645 * forward we can use those fields without breaking existant userspace
646 */
647 if (args->vm_id) {
648 args->operation = RADEON_VA_RESULT_ERROR;
649 return -EINVAL;
650 }
651
652 if (args->offset < RADEON_VA_RESERVED_SIZE) {
653 dev_err(dev->dev,
654 "offset 0x%lX is in reserved area 0x%X\n",
655 (unsigned long)args->offset,
656 RADEON_VA_RESERVED_SIZE);
657 args->operation = RADEON_VA_RESULT_ERROR;
658 return -EINVAL;
659 }
660
661 /* don't remove, we need to enforce userspace to set the snooped flag
662 * otherwise we will endup with broken userspace and we won't be able
663 * to enable this feature without adding new interface
664 */
665 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
666 if ((args->flags & invalid_flags)) {
667 dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
668 args->flags, invalid_flags);
669 args->operation = RADEON_VA_RESULT_ERROR;
670 return -EINVAL;
671 }
672
673 switch (args->operation) {
674 case RADEON_VA_MAP:
675 case RADEON_VA_UNMAP:
676 break;
677 default:
678 dev_err(dev->dev, "unsupported operation %d\n",
679 args->operation);
680 args->operation = RADEON_VA_RESULT_ERROR;
681 return -EINVAL;
682 }
683
684 gobj = drm_gem_object_lookup(filp, args->handle);
685 if (gobj == NULL) {
686 args->operation = RADEON_VA_RESULT_ERROR;
687 return -ENOENT;
688 }
689 rbo = gem_to_radeon_bo(gobj);
690 r = radeon_bo_reserve(rbo, false);
691 if (r) {
692 args->operation = RADEON_VA_RESULT_ERROR;
693 drm_gem_object_put(gobj);
694 return r;
695 }
696 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
697 if (!bo_va) {
698 args->operation = RADEON_VA_RESULT_ERROR;
699 radeon_bo_unreserve(rbo);
700 drm_gem_object_put(gobj);
701 return -ENOENT;
702 }
703
704 switch (args->operation) {
705 case RADEON_VA_MAP:
706 if (bo_va->it.start) {
707 args->operation = RADEON_VA_RESULT_VA_EXIST;
708 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
709 radeon_bo_unreserve(rbo);
710 goto out;
711 }
712 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
713 break;
714 case RADEON_VA_UNMAP:
715 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
716 break;
717 default:
718 break;
719 }
720 if (!r)
721 radeon_gem_va_update_vm(rdev, bo_va);
722 args->operation = RADEON_VA_RESULT_OK;
723 if (r) {
724 args->operation = RADEON_VA_RESULT_ERROR;
725 }
726 out:
727 drm_gem_object_put(gobj);
728 return r;
729 }
730
radeon_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)731 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
732 struct drm_file *filp)
733 {
734 struct drm_radeon_gem_op *args = data;
735 struct drm_gem_object *gobj;
736 struct radeon_bo *robj;
737 int r;
738
739 gobj = drm_gem_object_lookup(filp, args->handle);
740 if (gobj == NULL) {
741 return -ENOENT;
742 }
743 robj = gem_to_radeon_bo(gobj);
744
745 r = -EPERM;
746 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
747 goto out;
748
749 r = radeon_bo_reserve(robj, false);
750 if (unlikely(r))
751 goto out;
752
753 switch (args->op) {
754 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
755 args->value = robj->initial_domain;
756 break;
757 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
758 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
759 RADEON_GEM_DOMAIN_GTT |
760 RADEON_GEM_DOMAIN_CPU);
761 break;
762 default:
763 r = -EINVAL;
764 }
765
766 radeon_bo_unreserve(robj);
767 out:
768 drm_gem_object_put(gobj);
769 return r;
770 }
771
radeon_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)772 int radeon_mode_dumb_create(struct drm_file *file_priv,
773 struct drm_device *dev,
774 struct drm_mode_create_dumb *args)
775 {
776 struct radeon_device *rdev = dev->dev_private;
777 struct drm_gem_object *gobj;
778 uint32_t handle;
779 int r;
780
781 args->pitch = radeon_align_pitch(rdev, args->width,
782 DIV_ROUND_UP(args->bpp, 8), 0);
783 args->size = args->pitch * args->height;
784 args->size = ALIGN(args->size, PAGE_SIZE);
785
786 r = radeon_gem_object_create(rdev, args->size, 0,
787 RADEON_GEM_DOMAIN_VRAM, 0,
788 false, &gobj);
789 if (r)
790 return -ENOMEM;
791
792 r = drm_gem_handle_create(file_priv, gobj, &handle);
793 /* drop reference from allocate - handle holds it now */
794 drm_gem_object_put(gobj);
795 if (r) {
796 return r;
797 }
798 args->handle = handle;
799 return 0;
800 }
801
802 #if defined(CONFIG_DEBUG_FS)
radeon_debugfs_gem_info_show(struct seq_file * m,void * unused)803 static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
804 {
805 struct radeon_device *rdev = (struct radeon_device *)m->private;
806 struct radeon_bo *rbo;
807 unsigned i = 0;
808
809 mutex_lock(&rdev->gem.mutex);
810 list_for_each_entry(rbo, &rdev->gem.objects, list) {
811 unsigned domain;
812 const char *placement;
813
814 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
815 switch (domain) {
816 case RADEON_GEM_DOMAIN_VRAM:
817 placement = "VRAM";
818 break;
819 case RADEON_GEM_DOMAIN_GTT:
820 placement = " GTT";
821 break;
822 case RADEON_GEM_DOMAIN_CPU:
823 default:
824 placement = " CPU";
825 break;
826 }
827 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
828 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
829 placement, (unsigned long)rbo->pid);
830 i++;
831 }
832 mutex_unlock(&rdev->gem.mutex);
833 return 0;
834 }
835
836 DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
837 #endif
838
radeon_gem_debugfs_init(struct radeon_device * rdev)839 void radeon_gem_debugfs_init(struct radeon_device *rdev)
840 {
841 #if defined(CONFIG_DEBUG_FS)
842 struct dentry *root = rdev->ddev->primary->debugfs_root;
843
844 debugfs_create_file("radeon_gem_info", 0444, root, rdev,
845 &radeon_debugfs_gem_info_fops);
846
847 #endif
848 }
849