1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31 #include "radeon_gem.h"
32
radeon_gem_object_free(struct drm_gem_object * gobj)33 void radeon_gem_object_free(struct drm_gem_object *gobj)
34 {
35 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
36
37 if (robj) {
38 #ifdef DUMBBELL_WIP
39 if (robj->gem_base.import_attach)
40 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
41 #endif /* DUMBBELL_WIP */
42 radeon_mn_unregister(robj);
43 radeon_bo_unref(&robj);
44 }
45 }
46
radeon_gem_object_create(struct radeon_device * rdev,unsigned long size,int alignment,int initial_domain,u32 flags,bool kernel,struct drm_gem_object ** obj)47 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
48 int alignment, int initial_domain,
49 u32 flags, bool kernel,
50 struct drm_gem_object **obj)
51 {
52 struct radeon_bo *robj;
53 unsigned long max_size;
54 int r;
55
56 *obj = NULL;
57 /* At least align on page size */
58 if (alignment < PAGE_SIZE) {
59 alignment = PAGE_SIZE;
60 }
61
62 /* Maximum bo size is the unpinned gtt size since we use the gtt to
63 * handle vram to system pool migrations.
64 */
65 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
66 if (size > max_size) {
67 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
68 size >> 20, max_size >> 20);
69 return -ENOMEM;
70 }
71
72 retry:
73 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
74 flags, NULL, NULL, &robj);
75 if (r) {
76 if (r != -ERESTARTSYS) {
77 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
78 initial_domain |= RADEON_GEM_DOMAIN_GTT;
79 goto retry;
80 }
81 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
82 size, initial_domain, alignment, r);
83 }
84 return r;
85 }
86 *obj = &robj->gem_base;
87 robj->pid = curproc ? curproc->p_pid : 0;
88
89 mutex_lock(&rdev->gem.mutex);
90 list_add_tail(&robj->list, &rdev->gem.objects);
91 mutex_unlock(&rdev->gem.mutex);
92
93 return 0;
94 }
95
radeon_gem_set_domain(struct drm_gem_object * gobj,uint32_t rdomain,uint32_t wdomain)96 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
97 uint32_t rdomain, uint32_t wdomain)
98 {
99 struct radeon_bo *robj;
100 uint32_t domain;
101 long r;
102
103 /* FIXME: reeimplement */
104 robj = gem_to_radeon_bo(gobj);
105 /* work out where to validate the buffer to */
106 domain = wdomain;
107 if (!domain) {
108 domain = rdomain;
109 }
110 if (!domain) {
111 /* Do nothings */
112 pr_warn("Set domain without domain !\n");
113 return 0;
114 }
115 if (domain == RADEON_GEM_DOMAIN_CPU) {
116 /* Asking for cpu access wait for object idle */
117 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
118 if (!r)
119 r = -EBUSY;
120
121 if (r < 0 && r != -EINTR) {
122 pr_err("Failed to wait for object: %li\n", r);
123 return r;
124 }
125 }
126 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
127 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
128 return -EINVAL;
129 }
130 return 0;
131 }
132
radeon_gem_init(struct radeon_device * rdev)133 int radeon_gem_init(struct radeon_device *rdev)
134 {
135 INIT_LIST_HEAD(&rdev->gem.objects);
136 return 0;
137 }
138
radeon_gem_fini(struct radeon_device * rdev)139 void radeon_gem_fini(struct radeon_device *rdev)
140 {
141 radeon_bo_force_delete(rdev);
142 }
143
144 /*
145 * Call from drm_gem_handle_create which appear in both new and open ioctl
146 * case.
147 */
radeon_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)148 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
149 {
150 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
151 struct radeon_device *rdev = rbo->rdev;
152 struct radeon_fpriv *fpriv = file_priv->driver_priv;
153 struct radeon_vm *vm = &fpriv->vm;
154 struct radeon_bo_va *bo_va;
155 int r;
156
157 if ((rdev->family < CHIP_CAYMAN) ||
158 (!rdev->accel_working)) {
159 return 0;
160 }
161
162 r = radeon_bo_reserve(rbo, false);
163 if (r) {
164 return r;
165 }
166
167 bo_va = radeon_vm_bo_find(vm, rbo);
168 if (!bo_va) {
169 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
170 } else {
171 ++bo_va->ref_count;
172 }
173 radeon_bo_unreserve(rbo);
174
175 return 0;
176 }
177
radeon_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)178 void radeon_gem_object_close(struct drm_gem_object *obj,
179 struct drm_file *file_priv)
180 {
181 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
182 struct radeon_device *rdev = rbo->rdev;
183 struct radeon_fpriv *fpriv = file_priv->driver_priv;
184 struct radeon_vm *vm = &fpriv->vm;
185 struct radeon_bo_va *bo_va;
186 int r;
187
188 if ((rdev->family < CHIP_CAYMAN) ||
189 (!rdev->accel_working)) {
190 return;
191 }
192
193 r = radeon_bo_reserve(rbo, true);
194 if (r) {
195 dev_err(rdev->dev, "leaking bo va because "
196 "we fail to reserve bo (%d)\n", r);
197 return;
198 }
199 bo_va = radeon_vm_bo_find(vm, rbo);
200 if (bo_va) {
201 if (--bo_va->ref_count == 0) {
202 radeon_vm_bo_rmv(rdev, bo_va);
203 }
204 }
205 radeon_bo_unreserve(rbo);
206 }
207
radeon_gem_handle_lockup(struct radeon_device * rdev,int r)208 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
209 {
210 if (r == -EDEADLK) {
211 r = radeon_gpu_reset(rdev);
212 if (!r)
213 r = -EAGAIN;
214 }
215 return r;
216 }
217
218 /*
219 * GEM ioctls.
220 */
radeon_gem_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)221 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
222 struct drm_file *filp)
223 {
224 struct radeon_device *rdev = dev->dev_private;
225 struct drm_radeon_gem_info *args = data;
226 struct ttm_mem_type_manager *man;
227
228 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
229
230 args->vram_size = (u64)man->size << PAGE_SHIFT;
231 args->vram_visible = rdev->mc.visible_vram_size;
232 args->vram_visible -= rdev->vram_pin_size;
233 args->gart_size = rdev->mc.gtt_size;
234 args->gart_size -= rdev->gart_pin_size;
235
236 return 0;
237 }
238
radeon_gem_pread_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)239 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
240 struct drm_file *filp)
241 {
242 /* TODO: implement */
243 DRM_ERROR("unimplemented %s\n", __func__);
244 return -ENOSYS;
245 }
246
radeon_gem_pwrite_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)247 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
248 struct drm_file *filp)
249 {
250 /* TODO: implement */
251 DRM_ERROR("unimplemented %s\n", __func__);
252 return -ENOSYS;
253 }
254
radeon_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)255 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
256 struct drm_file *filp)
257 {
258 struct radeon_device *rdev = dev->dev_private;
259 struct drm_radeon_gem_create *args = data;
260 struct drm_gem_object *gobj;
261 uint32_t handle;
262 int r;
263
264 down_read(&rdev->exclusive_lock);
265 /* create a gem object to contain this object in */
266 args->size = roundup(args->size, PAGE_SIZE);
267 r = radeon_gem_object_create(rdev, args->size, args->alignment,
268 args->initial_domain, args->flags,
269 false, &gobj);
270 if (r) {
271 if (r == -ERESTARTSYS)
272 r = -EINTR;
273 up_read(&rdev->exclusive_lock);
274 r = radeon_gem_handle_lockup(rdev, r);
275 return r;
276 }
277 r = drm_gem_handle_create(filp, gobj, &handle);
278 /* drop reference from allocate - handle holds it now */
279 drm_gem_object_put_unlocked(gobj);
280 if (r) {
281 up_read(&rdev->exclusive_lock);
282 r = radeon_gem_handle_lockup(rdev, r);
283 return r;
284 }
285 args->handle = handle;
286 up_read(&rdev->exclusive_lock);
287 return 0;
288 }
289
290 #if 0
291 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
292 struct drm_file *filp)
293 {
294 struct radeon_device *rdev = dev->dev_private;
295 struct drm_radeon_gem_userptr *args = data;
296 struct drm_gem_object *gobj;
297 struct radeon_bo *bo;
298 uint32_t handle;
299 int r;
300
301 if (offset_in_page(args->addr | args->size))
302 return -EINVAL;
303
304 /* reject unknown flag values */
305 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
306 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
307 RADEON_GEM_USERPTR_REGISTER))
308 return -EINVAL;
309
310 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
311 /* readonly pages not tested on older hardware */
312 if (rdev->family < CHIP_R600)
313 return -EINVAL;
314
315 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
316 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
317
318 /* if we want to write to it we must require anonymous
319 memory and install a MMU notifier */
320 return -EACCES;
321 }
322
323 down_read(&rdev->exclusive_lock);
324
325 /* create a gem object to contain this object in */
326 r = radeon_gem_object_create(rdev, args->size, 0,
327 RADEON_GEM_DOMAIN_CPU, 0,
328 false, &gobj);
329 if (r)
330 goto handle_lockup;
331
332 bo = gem_to_radeon_bo(gobj);
333 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
334 if (r)
335 goto release_object;
336
337 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
338 r = radeon_mn_register(bo, args->addr);
339 if (r)
340 goto release_object;
341 }
342
343 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
344 down_read(¤t->mm->mmap_sem);
345 r = radeon_bo_reserve(bo, true);
346 if (r) {
347 up_read(¤t->mm->mmap_sem);
348 goto release_object;
349 }
350
351 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
352 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
353 radeon_bo_unreserve(bo);
354 up_read(¤t->mm->mmap_sem);
355 if (r)
356 goto release_object;
357 }
358
359 r = drm_gem_handle_create(filp, gobj, &handle);
360 /* drop reference from allocate - handle holds it now */
361 drm_gem_object_put_unlocked(gobj);
362 if (r)
363 goto handle_lockup;
364
365 args->handle = handle;
366 up_read(&rdev->exclusive_lock);
367 return 0;
368
369 release_object:
370 drm_gem_object_put_unlocked(gobj);
371
372 handle_lockup:
373 up_read(&rdev->exclusive_lock);
374 r = radeon_gem_handle_lockup(rdev, r);
375
376 return r;
377 }
378 #endif
379
radeon_gem_set_domain_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)380 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
381 struct drm_file *filp)
382 {
383 /* transition the BO to a domain -
384 * just validate the BO into a certain domain */
385 struct radeon_device *rdev = dev->dev_private;
386 struct drm_radeon_gem_set_domain *args = data;
387 struct drm_gem_object *gobj;
388 struct radeon_bo *robj;
389 int r;
390
391 /* for now if someone requests domain CPU -
392 * just make sure the buffer is finished with */
393 down_read(&rdev->exclusive_lock);
394
395 /* just do a BO wait for now */
396 gobj = drm_gem_object_lookup(filp, args->handle);
397 if (gobj == NULL) {
398 up_read(&rdev->exclusive_lock);
399 return -ENOENT;
400 }
401 robj = gem_to_radeon_bo(gobj);
402
403 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
404
405 drm_gem_object_put_unlocked(gobj);
406 up_read(&rdev->exclusive_lock);
407 r = radeon_gem_handle_lockup(robj->rdev, r);
408 return r;
409 }
410
radeon_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)411 int radeon_mode_dumb_mmap(struct drm_file *filp,
412 struct drm_device *dev,
413 uint32_t handle, uint64_t *offset_p)
414 {
415 struct drm_gem_object *gobj;
416 struct radeon_bo *robj;
417
418 gobj = drm_gem_object_lookup(filp, handle);
419 if (gobj == NULL) {
420 return -ENOENT;
421 }
422 robj = gem_to_radeon_bo(gobj);
423 #if 0
424 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
425 drm_gem_object_put_unlocked(gobj);
426 return -EPERM;
427 }
428 #endif
429 *offset_p = radeon_bo_mmap_offset(robj);
430 drm_gem_object_put_unlocked(gobj);
431 return 0;
432 }
433
radeon_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)434 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
435 struct drm_file *filp)
436 {
437 struct drm_radeon_gem_mmap *args = data;
438
439 return radeon_mode_dumb_mmap(filp, dev, args->handle, (uint64_t *)&args->addr_ptr);
440 }
441
radeon_gem_busy_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)442 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
443 struct drm_file *filp)
444 {
445 struct drm_radeon_gem_busy *args = data;
446 struct drm_gem_object *gobj;
447 struct radeon_bo *robj;
448 int r;
449 uint32_t cur_placement = 0;
450
451 gobj = drm_gem_object_lookup(filp, args->handle);
452 if (gobj == NULL) {
453 return -ENOENT;
454 }
455 robj = gem_to_radeon_bo(gobj);
456
457 r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
458 if (r == 0)
459 r = -EBUSY;
460 else
461 r = 0;
462
463 cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
464 args->domain = radeon_mem_type_to_domain(cur_placement);
465 drm_gem_object_put_unlocked(gobj);
466 return r;
467 }
468
radeon_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)469 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
470 struct drm_file *filp)
471 {
472 struct radeon_device *rdev = dev->dev_private;
473 struct drm_radeon_gem_wait_idle *args = data;
474 struct drm_gem_object *gobj;
475 struct radeon_bo *robj;
476 int r = 0;
477 uint32_t cur_placement = 0;
478 long ret;
479
480 gobj = drm_gem_object_lookup(filp, args->handle);
481 if (gobj == NULL) {
482 return -ENOENT;
483 }
484 robj = gem_to_radeon_bo(gobj);
485
486 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
487 if (ret == 0)
488 r = -EBUSY;
489 else if (ret < 0)
490 r = ret;
491
492 /* Flush HDP cache via MMIO if necessary */
493 cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
494 if (rdev->asic->mmio_hdp_flush &&
495 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
496 robj->rdev->asic->mmio_hdp_flush(rdev);
497 drm_gem_object_put_unlocked(gobj);
498 if (r == -ERESTARTSYS)
499 r = -EINTR;
500 r = radeon_gem_handle_lockup(rdev, r);
501 return r;
502 }
503
radeon_gem_set_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)504 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
505 struct drm_file *filp)
506 {
507 struct drm_radeon_gem_set_tiling *args = data;
508 struct drm_gem_object *gobj;
509 struct radeon_bo *robj;
510 int r = 0;
511
512 DRM_DEBUG("%d \n", args->handle);
513 gobj = drm_gem_object_lookup(filp, args->handle);
514 if (gobj == NULL)
515 return -ENOENT;
516 robj = gem_to_radeon_bo(gobj);
517 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
518 drm_gem_object_put_unlocked(gobj);
519 return r;
520 }
521
radeon_gem_get_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)522 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
523 struct drm_file *filp)
524 {
525 struct drm_radeon_gem_get_tiling *args = data;
526 struct drm_gem_object *gobj;
527 struct radeon_bo *rbo;
528 int r = 0;
529
530 DRM_DEBUG("\n");
531 gobj = drm_gem_object_lookup(filp, args->handle);
532 if (gobj == NULL)
533 return -ENOENT;
534 rbo = gem_to_radeon_bo(gobj);
535 r = radeon_bo_reserve(rbo, false);
536 if (unlikely(r != 0))
537 goto out;
538 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
539 radeon_bo_unreserve(rbo);
540 out:
541 drm_gem_object_put_unlocked(gobj);
542 return r;
543 }
544
545 /**
546 * radeon_gem_va_update_vm -update the bo_va in its VM
547 *
548 * @rdev: radeon_device pointer
549 * @bo_va: bo_va to update
550 *
551 * Update the bo_va directly after setting it's address. Errors are not
552 * vital here, so they are not reported back to userspace.
553 */
radeon_gem_va_update_vm(struct radeon_device * rdev,struct radeon_bo_va * bo_va)554 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
555 struct radeon_bo_va *bo_va)
556 {
557 struct ttm_validate_buffer tv, *entry;
558 struct radeon_bo_list *vm_bos;
559 struct ww_acquire_ctx ticket;
560 struct list_head list;
561 unsigned domain;
562 int r;
563
564 INIT_LIST_HEAD(&list);
565
566 tv.bo = &bo_va->bo->tbo;
567 tv.shared = true;
568 list_add(&tv.head, &list);
569
570 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
571 if (!vm_bos)
572 return;
573
574 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
575 if (r)
576 goto error_free;
577
578 list_for_each_entry(entry, &list, head) {
579 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
580 /* if anything is swapped out don't swap it in here,
581 just abort and wait for the next CS */
582 if (domain == RADEON_GEM_DOMAIN_CPU)
583 goto error_unreserve;
584 }
585
586 mutex_lock(&bo_va->vm->mutex);
587 r = radeon_vm_clear_freed(rdev, bo_va->vm);
588 if (r)
589 goto error_unlock;
590
591 if (bo_va->it.start)
592 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
593
594 error_unlock:
595 mutex_unlock(&bo_va->vm->mutex);
596
597 error_unreserve:
598 ttm_eu_backoff_reservation(&ticket, &list);
599
600 error_free:
601 kvfree(vm_bos);
602
603 if (r && r != -ERESTARTSYS)
604 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
605 }
606
radeon_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)607 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
608 struct drm_file *filp)
609 {
610 struct drm_radeon_gem_va *args = data;
611 struct drm_gem_object *gobj;
612 struct radeon_device *rdev = dev->dev_private;
613 struct radeon_fpriv *fpriv = filp->driver_priv;
614 struct radeon_bo *rbo;
615 struct radeon_bo_va *bo_va;
616 u32 invalid_flags;
617 int r = 0;
618
619 if (!rdev->vm_manager.enabled) {
620 args->operation = RADEON_VA_RESULT_ERROR;
621 return -ENOTTY;
622 }
623
624 /* !! DONT REMOVE !!
625 * We don't support vm_id yet, to be sure we don't have have broken
626 * userspace, reject anyone trying to use non 0 value thus moving
627 * forward we can use those fields without breaking existant userspace
628 */
629 if (args->vm_id) {
630 args->operation = RADEON_VA_RESULT_ERROR;
631 return -EINVAL;
632 }
633
634 if (args->offset < RADEON_VA_RESERVED_SIZE) {
635 dev_err(&dev->pdev->dev,
636 "offset 0x%lX is in reserved area 0x%X\n",
637 (unsigned long)args->offset,
638 RADEON_VA_RESERVED_SIZE);
639 args->operation = RADEON_VA_RESULT_ERROR;
640 return -EINVAL;
641 }
642
643 /* don't remove, we need to enforce userspace to set the snooped flag
644 * otherwise we will endup with broken userspace and we won't be able
645 * to enable this feature without adding new interface
646 */
647 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
648 if ((args->flags & invalid_flags)) {
649 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
650 args->flags, invalid_flags);
651 args->operation = RADEON_VA_RESULT_ERROR;
652 return -EINVAL;
653 }
654
655 switch (args->operation) {
656 case RADEON_VA_MAP:
657 case RADEON_VA_UNMAP:
658 break;
659 default:
660 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
661 args->operation);
662 args->operation = RADEON_VA_RESULT_ERROR;
663 return -EINVAL;
664 }
665
666 gobj = drm_gem_object_lookup(filp, args->handle);
667 if (gobj == NULL) {
668 args->operation = RADEON_VA_RESULT_ERROR;
669 return -ENOENT;
670 }
671 rbo = gem_to_radeon_bo(gobj);
672 r = radeon_bo_reserve(rbo, false);
673 if (r) {
674 args->operation = RADEON_VA_RESULT_ERROR;
675 drm_gem_object_put_unlocked(gobj);
676 return r;
677 }
678 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
679 if (!bo_va) {
680 args->operation = RADEON_VA_RESULT_ERROR;
681 radeon_bo_unreserve(rbo);
682 drm_gem_object_put_unlocked(gobj);
683 return -ENOENT;
684 }
685
686 switch (args->operation) {
687 case RADEON_VA_MAP:
688 if (bo_va->it.start) {
689 args->operation = RADEON_VA_RESULT_VA_EXIST;
690 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
691 radeon_bo_unreserve(rbo);
692 goto out;
693 }
694 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
695 break;
696 case RADEON_VA_UNMAP:
697 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
698 break;
699 default:
700 break;
701 }
702 if (!r)
703 radeon_gem_va_update_vm(rdev, bo_va);
704 args->operation = RADEON_VA_RESULT_OK;
705 if (r) {
706 args->operation = RADEON_VA_RESULT_ERROR;
707 }
708 out:
709 drm_gem_object_put_unlocked(gobj);
710 return r;
711 }
712
radeon_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)713 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
714 struct drm_file *filp)
715 {
716 struct drm_radeon_gem_op *args = data;
717 struct drm_gem_object *gobj;
718 struct radeon_bo *robj;
719 int r;
720
721 gobj = drm_gem_object_lookup(filp, args->handle);
722 if (gobj == NULL) {
723 return -ENOENT;
724 }
725 robj = gem_to_radeon_bo(gobj);
726
727 r = -EPERM;
728 #if 0
729 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
730 goto out;
731 #endif
732
733 r = radeon_bo_reserve(robj, false);
734 if (unlikely(r))
735 goto out;
736
737 switch (args->op) {
738 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
739 args->value = robj->initial_domain;
740 break;
741 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
742 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
743 RADEON_GEM_DOMAIN_GTT |
744 RADEON_GEM_DOMAIN_CPU);
745 break;
746 default:
747 r = -EINVAL;
748 }
749
750 radeon_bo_unreserve(robj);
751 out:
752 drm_gem_object_put_unlocked(gobj);
753 return r;
754 }
755
radeon_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)756 int radeon_mode_dumb_create(struct drm_file *file_priv,
757 struct drm_device *dev,
758 struct drm_mode_create_dumb *args)
759 {
760 struct radeon_device *rdev = dev->dev_private;
761 struct drm_gem_object *gobj;
762 uint32_t handle;
763 int r;
764
765 args->pitch = radeon_align_pitch(rdev, args->width,
766 DIV_ROUND_UP(args->bpp, 8), 0);
767 args->size = args->pitch * args->height;
768 args->size = ALIGN(args->size, PAGE_SIZE);
769
770 r = radeon_gem_object_create(rdev, args->size, 0,
771 RADEON_GEM_DOMAIN_VRAM, 0,
772 false, &gobj);
773 if (r)
774 return -ENOMEM;
775
776 r = drm_gem_handle_create(file_priv, gobj, &handle);
777 /* drop reference from allocate - handle holds it now */
778 drm_gem_object_put_unlocked(gobj);
779 if (r) {
780 return r;
781 }
782 args->handle = handle;
783 return 0;
784 }
785
786 #if defined(CONFIG_DEBUG_FS)
radeon_debugfs_gem_info(struct seq_file * m,void * data)787 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
788 {
789 struct drm_info_node *node = (struct drm_info_node *)m->private;
790 struct drm_device *dev = node->minor->dev;
791 struct radeon_device *rdev = dev->dev_private;
792 struct radeon_bo *rbo;
793 unsigned i = 0;
794
795 mutex_lock(&rdev->gem.mutex);
796 list_for_each_entry(rbo, &rdev->gem.objects, list) {
797 unsigned domain;
798 const char *placement;
799
800 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
801 switch (domain) {
802 case RADEON_GEM_DOMAIN_VRAM:
803 placement = "VRAM";
804 break;
805 case RADEON_GEM_DOMAIN_GTT:
806 placement = " GTT";
807 break;
808 case RADEON_GEM_DOMAIN_CPU:
809 default:
810 placement = " CPU";
811 break;
812 }
813 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
814 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
815 placement, (unsigned long)rbo->pid);
816 i++;
817 }
818 mutex_unlock(&rdev->gem.mutex);
819 return 0;
820 }
821
822 static struct drm_info_list radeon_debugfs_gem_list[] = {
823 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
824 };
825 #endif
826
radeon_gem_debugfs_init(struct radeon_device * rdev)827 int radeon_gem_debugfs_init(struct radeon_device *rdev)
828 {
829 #if defined(CONFIG_DEBUG_FS)
830 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
831 #endif
832 return 0;
833 }
834