1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * 28 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_gem.c 254885 2013-08-25 19:37:15Z dumbbell $ 29 */ 30 31 #include <drm/drmP.h> 32 #include <drm/radeon_drm.h> 33 #include "radeon.h" 34 #include "radeon_gem.h" 35 36 void radeon_gem_object_free(struct drm_gem_object *gobj) 37 { 38 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 39 40 if (robj) { 41 #ifdef DUMBBELL_WIP 42 if (robj->gem_base.import_attach) 43 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 44 #endif /* DUMBBELL_WIP */ 45 radeon_bo_unref(&robj); 46 } 47 } 48 49 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, 50 int alignment, int initial_domain, 51 u32 flags, bool kernel, 52 struct drm_gem_object **obj) 53 { 54 struct radeon_bo *robj; 55 unsigned long max_size; 56 int r; 57 58 *obj = NULL; 59 /* At least align on page size */ 60 if (alignment < PAGE_SIZE) { 61 alignment = PAGE_SIZE; 62 } 63 64 /* Maximum bo size is the unpinned gtt size since we use the gtt to 65 * handle vram to system pool migrations. 66 */ 67 max_size = rdev->mc.gtt_size - rdev->gart_pin_size; 68 if (size > max_size) { 69 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", 70 size >> 20, max_size >> 20); 71 return -ENOMEM; 72 } 73 74 retry: 75 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, 76 flags, NULL, NULL, &robj); 77 if (r) { 78 if (r != -ERESTARTSYS) { 79 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 80 initial_domain |= RADEON_GEM_DOMAIN_GTT; 81 goto retry; 82 } 83 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 84 size, initial_domain, alignment, r); 85 } 86 return r; 87 } 88 *obj = &robj->gem_base; 89 robj->pid = curproc ? curproc->p_pid : 0; 90 91 mutex_lock(&rdev->gem.mutex); 92 list_add_tail(&robj->list, &rdev->gem.objects); 93 mutex_unlock(&rdev->gem.mutex); 94 95 return 0; 96 } 97 98 static int radeon_gem_set_domain(struct drm_gem_object *gobj, 99 uint32_t rdomain, uint32_t wdomain) 100 { 101 struct radeon_bo *robj; 102 uint32_t domain; 103 long r; 104 105 /* FIXME: reeimplement */ 106 robj = gem_to_radeon_bo(gobj); 107 /* work out where to validate the buffer to */ 108 domain = wdomain; 109 if (!domain) { 110 domain = rdomain; 111 } 112 if (!domain) { 113 /* Do nothings */ 114 printk(KERN_WARNING "Set domain without domain !\n"); 115 return 0; 116 } 117 if (domain == RADEON_GEM_DOMAIN_CPU) { 118 /* Asking for cpu access wait for object idle */ 119 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); 120 if (!r) 121 r = -EBUSY; 122 123 if (r < 0 && r != -EINTR) { 124 printk(KERN_ERR "Failed to wait for object: %li\n", r); 125 return r; 126 } 127 } 128 return 0; 129 } 130 131 int radeon_gem_init(struct radeon_device *rdev) 132 { 133 INIT_LIST_HEAD(&rdev->gem.objects); 134 return 0; 135 } 136 137 void radeon_gem_fini(struct radeon_device *rdev) 138 { 139 radeon_bo_force_delete(rdev); 140 } 141 142 /* 143 * Call from drm_gem_handle_create which appear in both new and open ioctl 144 * case. 145 */ 146 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 147 { 148 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 149 struct radeon_device *rdev = rbo->rdev; 150 struct radeon_fpriv *fpriv = file_priv->driver_priv; 151 struct radeon_vm *vm = &fpriv->vm; 152 struct radeon_bo_va *bo_va; 153 int r; 154 155 if ((rdev->family < CHIP_CAYMAN) || 156 (!rdev->accel_working)) { 157 return 0; 158 } 159 160 r = radeon_bo_reserve(rbo, false); 161 if (r) { 162 return r; 163 } 164 165 bo_va = radeon_vm_bo_find(vm, rbo); 166 if (!bo_va) { 167 bo_va = radeon_vm_bo_add(rdev, vm, rbo); 168 } else { 169 ++bo_va->ref_count; 170 } 171 radeon_bo_unreserve(rbo); 172 173 return 0; 174 } 175 176 void radeon_gem_object_close(struct drm_gem_object *obj, 177 struct drm_file *file_priv) 178 { 179 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 180 struct radeon_device *rdev = rbo->rdev; 181 struct radeon_fpriv *fpriv = file_priv->driver_priv; 182 struct radeon_vm *vm = &fpriv->vm; 183 struct radeon_bo_va *bo_va; 184 int r; 185 186 if ((rdev->family < CHIP_CAYMAN) || 187 (!rdev->accel_working)) { 188 return; 189 } 190 191 r = radeon_bo_reserve(rbo, true); 192 if (r) { 193 dev_err(rdev->dev, "leaking bo va because " 194 "we fail to reserve bo (%d)\n", r); 195 return; 196 } 197 bo_va = radeon_vm_bo_find(vm, rbo); 198 if (bo_va) { 199 if (--bo_va->ref_count == 0) { 200 radeon_vm_bo_rmv(rdev, bo_va); 201 } 202 } 203 radeon_bo_unreserve(rbo); 204 } 205 206 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) 207 { 208 if (r == -EDEADLK) { 209 r = radeon_gpu_reset(rdev); 210 if (!r) 211 r = -EAGAIN; 212 } 213 return r; 214 } 215 216 /* 217 * GEM ioctls. 218 */ 219 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 220 struct drm_file *filp) 221 { 222 struct radeon_device *rdev = dev->dev_private; 223 struct drm_radeon_gem_info *args = data; 224 struct ttm_mem_type_manager *man; 225 226 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 227 228 args->vram_size = rdev->mc.real_vram_size; 229 args->vram_visible = (u64)man->size << PAGE_SHIFT; 230 args->vram_visible -= rdev->vram_pin_size; 231 args->gart_size = rdev->mc.gtt_size; 232 args->gart_size -= rdev->gart_pin_size; 233 234 return 0; 235 } 236 237 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 238 struct drm_file *filp) 239 { 240 /* TODO: implement */ 241 DRM_ERROR("unimplemented %s\n", __func__); 242 return -ENOSYS; 243 } 244 245 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 246 struct drm_file *filp) 247 { 248 /* TODO: implement */ 249 DRM_ERROR("unimplemented %s\n", __func__); 250 return -ENOSYS; 251 } 252 253 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 254 struct drm_file *filp) 255 { 256 struct radeon_device *rdev = dev->dev_private; 257 struct drm_radeon_gem_create *args = data; 258 struct drm_gem_object *gobj; 259 uint32_t handle; 260 int r; 261 262 lockmgr(&rdev->exclusive_lock, LK_SHARED); 263 /* create a gem object to contain this object in */ 264 args->size = roundup(args->size, PAGE_SIZE); 265 r = radeon_gem_object_create(rdev, args->size, args->alignment, 266 args->initial_domain, args->flags, 267 false, &gobj); 268 if (r) { 269 if (r == -ERESTARTSYS) 270 r = -EINTR; 271 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 272 r = radeon_gem_handle_lockup(rdev, r); 273 return r; 274 } 275 handle = 0; 276 r = drm_gem_handle_create(filp, gobj, &handle); 277 /* drop reference from allocate - handle holds it now */ 278 drm_gem_object_unreference_unlocked(gobj); 279 if (r) { 280 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 281 r = radeon_gem_handle_lockup(rdev, r); 282 return r; 283 } 284 args->handle = handle; 285 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 286 return 0; 287 } 288 289 #if 0 290 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, 291 struct drm_file *filp) 292 { 293 struct radeon_device *rdev = dev->dev_private; 294 struct drm_radeon_gem_userptr *args = data; 295 struct drm_gem_object *gobj; 296 struct radeon_bo *bo; 297 uint32_t handle; 298 int r; 299 300 if (offset_in_page(args->addr | args->size)) 301 return -EINVAL; 302 303 /* reject unknown flag values */ 304 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | 305 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | 306 RADEON_GEM_USERPTR_REGISTER)) 307 return -EINVAL; 308 309 if (args->flags & RADEON_GEM_USERPTR_READONLY) { 310 /* readonly pages not tested on older hardware */ 311 if (rdev->family < CHIP_R600) 312 return -EINVAL; 313 314 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || 315 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { 316 317 /* if we want to write to it we must require anonymous 318 memory and install a MMU notifier */ 319 return -EACCES; 320 } 321 322 down_read(&rdev->exclusive_lock); 323 324 /* create a gem object to contain this object in */ 325 r = radeon_gem_object_create(rdev, args->size, 0, 326 RADEON_GEM_DOMAIN_CPU, 0, 327 false, &gobj); 328 if (r) 329 goto handle_lockup; 330 331 bo = gem_to_radeon_bo(gobj); 332 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); 333 if (r) 334 goto release_object; 335 336 if (args->flags & RADEON_GEM_USERPTR_REGISTER) { 337 r = radeon_mn_register(bo, args->addr); 338 if (r) 339 goto release_object; 340 } 341 342 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { 343 down_read(¤t->mm->mmap_sem); 344 r = radeon_bo_reserve(bo, true); 345 if (r) { 346 up_read(¤t->mm->mmap_sem); 347 goto release_object; 348 } 349 350 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); 351 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 352 radeon_bo_unreserve(bo); 353 up_read(¤t->mm->mmap_sem); 354 if (r) 355 goto release_object; 356 } 357 358 r = drm_gem_handle_create(filp, gobj, &handle); 359 /* drop reference from allocate - handle holds it now */ 360 drm_gem_object_unreference_unlocked(gobj); 361 if (r) 362 goto handle_lockup; 363 364 args->handle = handle; 365 up_read(&rdev->exclusive_lock); 366 return 0; 367 368 release_object: 369 drm_gem_object_unreference_unlocked(gobj); 370 371 handle_lockup: 372 up_read(&rdev->exclusive_lock); 373 r = radeon_gem_handle_lockup(rdev, r); 374 375 return r; 376 } 377 #endif 378 379 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 380 struct drm_file *filp) 381 { 382 /* transition the BO to a domain - 383 * just validate the BO into a certain domain */ 384 struct radeon_device *rdev = dev->dev_private; 385 struct drm_radeon_gem_set_domain *args = data; 386 struct drm_gem_object *gobj; 387 struct radeon_bo *robj; 388 int r; 389 390 /* for now if someone requests domain CPU - 391 * just make sure the buffer is finished with */ 392 lockmgr(&rdev->exclusive_lock, LK_SHARED); 393 394 /* just do a BO wait for now */ 395 gobj = drm_gem_object_lookup(filp, args->handle); 396 if (gobj == NULL) { 397 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 398 return -ENOENT; 399 } 400 robj = gem_to_radeon_bo(gobj); 401 402 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 403 404 drm_gem_object_unreference_unlocked(gobj); 405 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 406 r = radeon_gem_handle_lockup(robj->rdev, r); 407 return r; 408 } 409 410 int radeon_mode_dumb_mmap(struct drm_file *filp, 411 struct drm_device *dev, 412 uint32_t handle, uint64_t *offset_p) 413 { 414 struct drm_gem_object *gobj; 415 struct radeon_bo *robj; 416 417 gobj = drm_gem_object_lookup(filp, handle); 418 if (gobj == NULL) { 419 return -ENOENT; 420 } 421 robj = gem_to_radeon_bo(gobj); 422 *offset_p = radeon_bo_mmap_offset(robj); 423 drm_gem_object_unreference_unlocked(gobj); 424 return 0; 425 } 426 427 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 428 struct drm_file *filp) 429 { 430 struct drm_radeon_gem_mmap *args = data; 431 432 return radeon_mode_dumb_mmap(filp, dev, args->handle, (uint64_t *)&args->addr_ptr); 433 } 434 435 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 436 struct drm_file *filp) 437 { 438 struct radeon_device *rdev = dev->dev_private; 439 struct drm_radeon_gem_busy *args = data; 440 struct drm_gem_object *gobj; 441 struct radeon_bo *robj; 442 int r; 443 uint32_t cur_placement = 0; 444 445 gobj = drm_gem_object_lookup(filp, args->handle); 446 if (gobj == NULL) { 447 return -ENOENT; 448 } 449 robj = gem_to_radeon_bo(gobj); 450 r = radeon_bo_wait(robj, &cur_placement, true); 451 args->domain = radeon_mem_type_to_domain(cur_placement); 452 drm_gem_object_unreference_unlocked(gobj); 453 r = radeon_gem_handle_lockup(rdev, r); 454 return r; 455 } 456 457 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 458 struct drm_file *filp) 459 { 460 struct radeon_device *rdev = dev->dev_private; 461 struct drm_radeon_gem_wait_idle *args = data; 462 struct drm_gem_object *gobj; 463 struct radeon_bo *robj; 464 int r = 0; 465 uint32_t cur_placement = 0; 466 long ret; 467 468 gobj = drm_gem_object_lookup(filp, args->handle); 469 if (gobj == NULL) { 470 return -ENOENT; 471 } 472 robj = gem_to_radeon_bo(gobj); 473 474 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); 475 if (ret == 0) 476 r = -EBUSY; 477 else if (ret < 0) 478 r = ret; 479 480 /* Flush HDP cache via MMIO if necessary */ 481 if (rdev->asic->mmio_hdp_flush && 482 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 483 robj->rdev->asic->mmio_hdp_flush(rdev); 484 drm_gem_object_unreference_unlocked(gobj); 485 if (r == -ERESTARTSYS) 486 r = -EINTR; 487 r = radeon_gem_handle_lockup(rdev, r); 488 return r; 489 } 490 491 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 492 struct drm_file *filp) 493 { 494 struct drm_radeon_gem_set_tiling *args = data; 495 struct drm_gem_object *gobj; 496 struct radeon_bo *robj; 497 int r = 0; 498 499 DRM_DEBUG("%d \n", args->handle); 500 gobj = drm_gem_object_lookup(filp, args->handle); 501 if (gobj == NULL) 502 return -ENOENT; 503 robj = gem_to_radeon_bo(gobj); 504 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 505 drm_gem_object_unreference_unlocked(gobj); 506 return r; 507 } 508 509 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 510 struct drm_file *filp) 511 { 512 struct drm_radeon_gem_get_tiling *args = data; 513 struct drm_gem_object *gobj; 514 struct radeon_bo *rbo; 515 int r = 0; 516 517 DRM_DEBUG("\n"); 518 gobj = drm_gem_object_lookup(filp, args->handle); 519 if (gobj == NULL) 520 return -ENOENT; 521 rbo = gem_to_radeon_bo(gobj); 522 r = radeon_bo_reserve(rbo, false); 523 if (unlikely(r != 0)) 524 goto out; 525 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 526 radeon_bo_unreserve(rbo); 527 out: 528 drm_gem_object_unreference_unlocked(gobj); 529 return r; 530 } 531 532 /** 533 * radeon_gem_va_update_vm -update the bo_va in its VM 534 * 535 * @rdev: radeon_device pointer 536 * @bo_va: bo_va to update 537 * 538 * Update the bo_va directly after setting it's address. Errors are not 539 * vital here, so they are not reported back to userspace. 540 */ 541 static void radeon_gem_va_update_vm(struct radeon_device *rdev, 542 struct radeon_bo_va *bo_va) 543 { 544 struct ttm_validate_buffer tv, *entry; 545 struct radeon_bo_list *vm_bos; 546 struct ww_acquire_ctx ticket; 547 struct list_head list; 548 unsigned domain; 549 int r; 550 551 INIT_LIST_HEAD(&list); 552 553 tv.bo = &bo_va->bo->tbo; 554 tv.shared = true; 555 list_add(&tv.head, &list); 556 557 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); 558 if (!vm_bos) 559 return; 560 561 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 562 if (r) 563 goto error_free; 564 565 list_for_each_entry(entry, &list, head) { 566 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); 567 /* if anything is swapped out don't swap it in here, 568 just abort and wait for the next CS */ 569 if (domain == RADEON_GEM_DOMAIN_CPU) 570 goto error_unreserve; 571 } 572 573 mutex_lock(&bo_va->vm->mutex); 574 r = radeon_vm_clear_freed(rdev, bo_va->vm); 575 if (r) 576 goto error_unlock; 577 578 if (bo_va->it.start) 579 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); 580 581 error_unlock: 582 mutex_unlock(&bo_va->vm->mutex); 583 584 error_unreserve: 585 ttm_eu_backoff_reservation(&ticket, &list); 586 587 error_free: 588 drm_free_large(vm_bos); 589 590 if (r && r != -ERESTARTSYS) 591 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 592 } 593 594 int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 595 struct drm_file *filp) 596 { 597 struct drm_radeon_gem_va *args = data; 598 struct drm_gem_object *gobj; 599 struct radeon_device *rdev = dev->dev_private; 600 struct radeon_fpriv *fpriv = filp->driver_priv; 601 struct radeon_bo *rbo; 602 struct radeon_bo_va *bo_va; 603 u32 invalid_flags; 604 int r = 0; 605 606 if (!rdev->vm_manager.enabled) { 607 args->operation = RADEON_VA_RESULT_ERROR; 608 return -ENOTTY; 609 } 610 611 /* !! DONT REMOVE !! 612 * We don't support vm_id yet, to be sure we don't have have broken 613 * userspace, reject anyone trying to use non 0 value thus moving 614 * forward we can use those fields without breaking existant userspace 615 */ 616 if (args->vm_id) { 617 args->operation = RADEON_VA_RESULT_ERROR; 618 return -EINVAL; 619 } 620 621 if (args->offset < RADEON_VA_RESERVED_SIZE) { 622 dev_err(&dev->pdev->dev, 623 "offset 0x%lX is in reserved area 0x%X\n", 624 (unsigned long)args->offset, 625 RADEON_VA_RESERVED_SIZE); 626 args->operation = RADEON_VA_RESULT_ERROR; 627 return -EINVAL; 628 } 629 630 /* don't remove, we need to enforce userspace to set the snooped flag 631 * otherwise we will endup with broken userspace and we won't be able 632 * to enable this feature without adding new interface 633 */ 634 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; 635 if ((args->flags & invalid_flags)) { 636 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 637 args->flags, invalid_flags); 638 args->operation = RADEON_VA_RESULT_ERROR; 639 return -EINVAL; 640 } 641 642 switch (args->operation) { 643 case RADEON_VA_MAP: 644 case RADEON_VA_UNMAP: 645 break; 646 default: 647 dev_err(&dev->pdev->dev, "unsupported operation %d\n", 648 args->operation); 649 args->operation = RADEON_VA_RESULT_ERROR; 650 return -EINVAL; 651 } 652 653 gobj = drm_gem_object_lookup(filp, args->handle); 654 if (gobj == NULL) { 655 args->operation = RADEON_VA_RESULT_ERROR; 656 return -ENOENT; 657 } 658 rbo = gem_to_radeon_bo(gobj); 659 r = radeon_bo_reserve(rbo, false); 660 if (r) { 661 args->operation = RADEON_VA_RESULT_ERROR; 662 drm_gem_object_unreference_unlocked(gobj); 663 return r; 664 } 665 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 666 if (!bo_va) { 667 args->operation = RADEON_VA_RESULT_ERROR; 668 drm_gem_object_unreference_unlocked(gobj); 669 return -ENOENT; 670 } 671 672 switch (args->operation) { 673 case RADEON_VA_MAP: 674 if (bo_va->it.start) { 675 args->operation = RADEON_VA_RESULT_VA_EXIST; 676 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; 677 radeon_bo_unreserve(rbo); 678 goto out; 679 } 680 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 681 break; 682 case RADEON_VA_UNMAP: 683 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); 684 break; 685 default: 686 break; 687 } 688 if (!r) 689 radeon_gem_va_update_vm(rdev, bo_va); 690 args->operation = RADEON_VA_RESULT_OK; 691 if (r) { 692 args->operation = RADEON_VA_RESULT_ERROR; 693 } 694 out: 695 drm_gem_object_unreference_unlocked(gobj); 696 return r; 697 } 698 699 int radeon_gem_op_ioctl(struct drm_device *dev, void *data, 700 struct drm_file *filp) 701 { 702 struct drm_radeon_gem_op *args = data; 703 struct drm_gem_object *gobj; 704 struct radeon_bo *robj; 705 int r; 706 707 gobj = drm_gem_object_lookup(filp, args->handle); 708 if (gobj == NULL) { 709 return -ENOENT; 710 } 711 robj = gem_to_radeon_bo(gobj); 712 r = radeon_bo_reserve(robj, false); 713 if (unlikely(r)) 714 goto out; 715 716 switch (args->op) { 717 case RADEON_GEM_OP_GET_INITIAL_DOMAIN: 718 args->value = robj->initial_domain; 719 break; 720 case RADEON_GEM_OP_SET_INITIAL_DOMAIN: 721 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | 722 RADEON_GEM_DOMAIN_GTT | 723 RADEON_GEM_DOMAIN_CPU); 724 break; 725 default: 726 r = -EINVAL; 727 } 728 729 radeon_bo_unreserve(robj); 730 out: 731 drm_gem_object_unreference_unlocked(gobj); 732 return r; 733 } 734 735 int radeon_mode_dumb_create(struct drm_file *file_priv, 736 struct drm_device *dev, 737 struct drm_mode_create_dumb *args) 738 { 739 struct radeon_device *rdev = dev->dev_private; 740 struct drm_gem_object *gobj; 741 uint32_t handle; 742 int r; 743 744 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 745 args->size = args->pitch * args->height; 746 args->size = ALIGN(args->size, PAGE_SIZE); 747 748 r = radeon_gem_object_create(rdev, args->size, 0, 749 RADEON_GEM_DOMAIN_VRAM, 0, 750 false, &gobj); 751 if (r) 752 return -ENOMEM; 753 754 r = drm_gem_handle_create(file_priv, gobj, &handle); 755 /* drop reference from allocate - handle holds it now */ 756 drm_gem_object_unreference_unlocked(gobj); 757 if (r) { 758 return r; 759 } 760 args->handle = handle; 761 return 0; 762 } 763 764 #if defined(CONFIG_DEBUG_FS) 765 static int radeon_debugfs_gem_info(struct seq_file *m, void *data) 766 { 767 struct drm_info_node *node = (struct drm_info_node *)m->private; 768 struct drm_device *dev = node->minor->dev; 769 struct radeon_device *rdev = dev->dev_private; 770 struct radeon_bo *rbo; 771 unsigned i = 0; 772 773 mutex_lock(&rdev->gem.mutex); 774 list_for_each_entry(rbo, &rdev->gem.objects, list) { 775 unsigned domain; 776 const char *placement; 777 778 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); 779 switch (domain) { 780 case RADEON_GEM_DOMAIN_VRAM: 781 placement = "VRAM"; 782 break; 783 case RADEON_GEM_DOMAIN_GTT: 784 placement = " GTT"; 785 break; 786 case RADEON_GEM_DOMAIN_CPU: 787 default: 788 placement = " CPU"; 789 break; 790 } 791 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", 792 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, 793 placement, (unsigned long)rbo->pid); 794 i++; 795 } 796 mutex_unlock(&rdev->gem.mutex); 797 return 0; 798 } 799 800 static struct drm_info_list radeon_debugfs_gem_list[] = { 801 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, 802 }; 803 #endif 804 805 int radeon_gem_debugfs_init(struct radeon_device *rdev) 806 { 807 #if defined(CONFIG_DEBUG_FS) 808 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); 809 #endif 810 return 0; 811 } 812