1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * 28 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_gem.c 254885 2013-08-25 19:37:15Z dumbbell $ 29 */ 30 31 #include <drm/drmP.h> 32 #include <uapi_drm/radeon_drm.h> 33 #include "radeon.h" 34 #include "radeon_gem.h" 35 36 void radeon_gem_object_free(struct drm_gem_object *gobj) 37 { 38 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 39 40 if (robj) { 41 #ifdef DUMBBELL_WIP 42 if (robj->gem_base.import_attach) 43 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 44 #endif /* DUMBBELL_WIP */ 45 radeon_bo_unref(&robj); 46 } 47 } 48 49 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, 50 int alignment, int initial_domain, 51 u32 flags, bool kernel, 52 struct drm_gem_object **obj) 53 { 54 struct radeon_bo *robj; 55 unsigned long max_size; 56 int r; 57 58 *obj = NULL; 59 /* At least align on page size */ 60 if (alignment < PAGE_SIZE) { 61 alignment = PAGE_SIZE; 62 } 63 64 /* Maximum bo size is the unpinned gtt size since we use the gtt to 65 * handle vram to system pool migrations. 66 */ 67 max_size = rdev->mc.gtt_size - rdev->gart_pin_size; 68 if (size > max_size) { 69 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", 70 size >> 20, max_size >> 20); 71 return -ENOMEM; 72 } 73 74 retry: 75 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, 76 flags, NULL, &robj); 77 if (r) { 78 if (r != -ERESTARTSYS) { 79 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { 80 initial_domain |= RADEON_GEM_DOMAIN_GTT; 81 goto retry; 82 } 83 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", 84 size, initial_domain, alignment, r); 85 } 86 return r; 87 } 88 *obj = &robj->gem_base; 89 robj->pid = curproc ? curproc->p_pid : 0; 90 91 spin_lock(&rdev->gem.mutex); 92 list_add_tail(&robj->list, &rdev->gem.objects); 93 spin_unlock(&rdev->gem.mutex); 94 95 return 0; 96 } 97 98 static int radeon_gem_set_domain(struct drm_gem_object *gobj, 99 uint32_t rdomain, uint32_t wdomain) 100 { 101 struct radeon_bo *robj; 102 uint32_t domain; 103 int r; 104 105 /* FIXME: reeimplement */ 106 robj = gem_to_radeon_bo(gobj); 107 /* work out where to validate the buffer to */ 108 domain = wdomain; 109 if (!domain) { 110 domain = rdomain; 111 } 112 if (!domain) { 113 /* Do nothings */ 114 printk(KERN_WARNING "Set domain without domain !\n"); 115 return 0; 116 } 117 if (domain == RADEON_GEM_DOMAIN_CPU) { 118 /* Asking for cpu access wait for object idle */ 119 r = radeon_bo_wait(robj, NULL, false); 120 if (r) { 121 printk(KERN_ERR "Failed to wait for object !\n"); 122 return r; 123 } 124 } 125 return 0; 126 } 127 128 int radeon_gem_init(struct radeon_device *rdev) 129 { 130 INIT_LIST_HEAD(&rdev->gem.objects); 131 return 0; 132 } 133 134 void radeon_gem_fini(struct radeon_device *rdev) 135 { 136 radeon_bo_force_delete(rdev); 137 } 138 139 /* 140 * Call from drm_gem_handle_create which appear in both new and open ioctl 141 * case. 142 */ 143 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 144 { 145 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 146 struct radeon_device *rdev = rbo->rdev; 147 struct radeon_fpriv *fpriv = file_priv->driver_priv; 148 struct radeon_vm *vm = &fpriv->vm; 149 struct radeon_bo_va *bo_va; 150 int r; 151 152 if (rdev->family < CHIP_CAYMAN) { 153 return 0; 154 } 155 156 r = radeon_bo_reserve(rbo, false); 157 if (r) { 158 return r; 159 } 160 161 bo_va = radeon_vm_bo_find(vm, rbo); 162 if (!bo_va) { 163 bo_va = radeon_vm_bo_add(rdev, vm, rbo); 164 } else { 165 ++bo_va->ref_count; 166 } 167 radeon_bo_unreserve(rbo); 168 169 return 0; 170 } 171 172 void radeon_gem_object_close(struct drm_gem_object *obj, 173 struct drm_file *file_priv) 174 { 175 struct radeon_bo *rbo = gem_to_radeon_bo(obj); 176 struct radeon_device *rdev = rbo->rdev; 177 struct radeon_fpriv *fpriv = file_priv->driver_priv; 178 struct radeon_vm *vm = &fpriv->vm; 179 struct radeon_bo_va *bo_va; 180 int r; 181 182 if (rdev->family < CHIP_CAYMAN) { 183 return; 184 } 185 186 r = radeon_bo_reserve(rbo, true); 187 if (r) { 188 dev_err(rdev->dev, "leaking bo va because " 189 "we fail to reserve bo (%d)\n", r); 190 return; 191 } 192 bo_va = radeon_vm_bo_find(vm, rbo); 193 if (bo_va) { 194 if (--bo_va->ref_count == 0) { 195 radeon_vm_bo_rmv(rdev, bo_va); 196 } 197 } 198 radeon_bo_unreserve(rbo); 199 } 200 201 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) 202 { 203 if (r == -EDEADLK) { 204 r = radeon_gpu_reset(rdev); 205 if (!r) 206 r = -EAGAIN; 207 } 208 return r; 209 } 210 211 /* 212 * GEM ioctls. 213 */ 214 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 215 struct drm_file *filp) 216 { 217 struct radeon_device *rdev = dev->dev_private; 218 struct drm_radeon_gem_info *args = data; 219 struct ttm_mem_type_manager *man; 220 221 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 222 223 args->vram_size = rdev->mc.real_vram_size; 224 args->vram_visible = (u64)man->size << PAGE_SHIFT; 225 args->vram_visible -= rdev->vram_pin_size; 226 args->gart_size = rdev->mc.gtt_size; 227 args->gart_size -= rdev->gart_pin_size; 228 229 return 0; 230 } 231 232 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 233 struct drm_file *filp) 234 { 235 /* TODO: implement */ 236 DRM_ERROR("unimplemented %s\n", __func__); 237 return -ENOSYS; 238 } 239 240 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 241 struct drm_file *filp) 242 { 243 /* TODO: implement */ 244 DRM_ERROR("unimplemented %s\n", __func__); 245 return -ENOSYS; 246 } 247 248 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 249 struct drm_file *filp) 250 { 251 struct radeon_device *rdev = dev->dev_private; 252 struct drm_radeon_gem_create *args = data; 253 struct drm_gem_object *gobj; 254 uint32_t handle; 255 int r; 256 257 lockmgr(&rdev->exclusive_lock, LK_SHARED); 258 /* create a gem object to contain this object in */ 259 args->size = roundup(args->size, PAGE_SIZE); 260 r = radeon_gem_object_create(rdev, args->size, args->alignment, 261 args->initial_domain, args->flags, 262 false, &gobj); 263 if (r) { 264 if (r == -ERESTARTSYS) 265 r = -EINTR; 266 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 267 r = radeon_gem_handle_lockup(rdev, r); 268 return r; 269 } 270 handle = 0; 271 r = drm_gem_handle_create(filp, gobj, &handle); 272 /* drop reference from allocate - handle holds it now */ 273 drm_gem_object_unreference_unlocked(gobj); 274 if (r) { 275 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 276 r = radeon_gem_handle_lockup(rdev, r); 277 return r; 278 } 279 args->handle = handle; 280 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 281 return 0; 282 } 283 284 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 285 struct drm_file *filp) 286 { 287 /* transition the BO to a domain - 288 * just validate the BO into a certain domain */ 289 struct radeon_device *rdev = dev->dev_private; 290 struct drm_radeon_gem_set_domain *args = data; 291 struct drm_gem_object *gobj; 292 struct radeon_bo *robj; 293 int r; 294 295 /* for now if someone requests domain CPU - 296 * just make sure the buffer is finished with */ 297 lockmgr(&rdev->exclusive_lock, LK_SHARED); 298 299 /* just do a BO wait for now */ 300 gobj = drm_gem_object_lookup(dev, filp, args->handle); 301 if (gobj == NULL) { 302 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 303 return -ENOENT; 304 } 305 robj = gem_to_radeon_bo(gobj); 306 307 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 308 309 drm_gem_object_unreference_unlocked(gobj); 310 lockmgr(&rdev->exclusive_lock, LK_RELEASE); 311 r = radeon_gem_handle_lockup(robj->rdev, r); 312 return r; 313 } 314 315 int radeon_mode_dumb_mmap(struct drm_file *filp, 316 struct drm_device *dev, 317 uint32_t handle, uint64_t *offset_p) 318 { 319 struct drm_gem_object *gobj; 320 struct radeon_bo *robj; 321 322 gobj = drm_gem_object_lookup(dev, filp, handle); 323 if (gobj == NULL) { 324 return -ENOENT; 325 } 326 robj = gem_to_radeon_bo(gobj); 327 *offset_p = radeon_bo_mmap_offset(robj); 328 drm_gem_object_unreference_unlocked(gobj); 329 return 0; 330 } 331 332 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 333 struct drm_file *filp) 334 { 335 struct drm_radeon_gem_mmap *args = data; 336 337 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); 338 } 339 340 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 341 struct drm_file *filp) 342 { 343 struct radeon_device *rdev = dev->dev_private; 344 struct drm_radeon_gem_busy *args = data; 345 struct drm_gem_object *gobj; 346 struct radeon_bo *robj; 347 int r; 348 uint32_t cur_placement = 0; 349 350 gobj = drm_gem_object_lookup(dev, filp, args->handle); 351 if (gobj == NULL) { 352 return -ENOENT; 353 } 354 robj = gem_to_radeon_bo(gobj); 355 r = radeon_bo_wait(robj, &cur_placement, true); 356 args->domain = radeon_mem_type_to_domain(cur_placement); 357 drm_gem_object_unreference_unlocked(gobj); 358 r = radeon_gem_handle_lockup(rdev, r); 359 return r; 360 } 361 362 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 363 struct drm_file *filp) 364 { 365 struct radeon_device *rdev = dev->dev_private; 366 struct drm_radeon_gem_wait_idle *args = data; 367 struct drm_gem_object *gobj; 368 struct radeon_bo *robj; 369 int r; 370 uint32_t cur_placement = 0; 371 372 gobj = drm_gem_object_lookup(dev, filp, args->handle); 373 if (gobj == NULL) { 374 return -ENOENT; 375 } 376 robj = gem_to_radeon_bo(gobj); 377 r = radeon_bo_wait(robj, &cur_placement, false); 378 /* Flush HDP cache via MMIO if necessary */ 379 if (rdev->asic->mmio_hdp_flush && 380 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 381 robj->rdev->asic->mmio_hdp_flush(rdev); 382 drm_gem_object_unreference_unlocked(gobj); 383 if (r == -ERESTARTSYS) 384 r = -EINTR; 385 r = radeon_gem_handle_lockup(rdev, r); 386 return r; 387 } 388 389 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 390 struct drm_file *filp) 391 { 392 struct drm_radeon_gem_set_tiling *args = data; 393 struct drm_gem_object *gobj; 394 struct radeon_bo *robj; 395 int r = 0; 396 397 DRM_DEBUG("%d \n", args->handle); 398 gobj = drm_gem_object_lookup(dev, filp, args->handle); 399 if (gobj == NULL) 400 return -ENOENT; 401 robj = gem_to_radeon_bo(gobj); 402 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 403 drm_gem_object_unreference_unlocked(gobj); 404 return r; 405 } 406 407 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 408 struct drm_file *filp) 409 { 410 struct drm_radeon_gem_get_tiling *args = data; 411 struct drm_gem_object *gobj; 412 struct radeon_bo *rbo; 413 int r = 0; 414 415 DRM_DEBUG("\n"); 416 gobj = drm_gem_object_lookup(dev, filp, args->handle); 417 if (gobj == NULL) 418 return -ENOENT; 419 rbo = gem_to_radeon_bo(gobj); 420 r = radeon_bo_reserve(rbo, false); 421 if (unlikely(r != 0)) 422 goto out; 423 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 424 radeon_bo_unreserve(rbo); 425 out: 426 drm_gem_object_unreference_unlocked(gobj); 427 return r; 428 } 429 430 int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 431 struct drm_file *filp) 432 { 433 struct drm_radeon_gem_va *args = data; 434 struct drm_gem_object *gobj; 435 struct radeon_device *rdev = dev->dev_private; 436 struct radeon_fpriv *fpriv = filp->driver_priv; 437 struct radeon_bo *rbo; 438 struct radeon_bo_va *bo_va; 439 u32 invalid_flags; 440 int r = 0; 441 442 if (!rdev->vm_manager.enabled) { 443 args->operation = RADEON_VA_RESULT_ERROR; 444 return -ENOTTY; 445 } 446 447 /* !! DONT REMOVE !! 448 * We don't support vm_id yet, to be sure we don't have have broken 449 * userspace, reject anyone trying to use non 0 value thus moving 450 * forward we can use those fields without breaking existant userspace 451 */ 452 if (args->vm_id) { 453 args->operation = RADEON_VA_RESULT_ERROR; 454 return -EINVAL; 455 } 456 457 if (args->offset < RADEON_VA_RESERVED_SIZE) { 458 dev_err(dev->pdev->dev, 459 "offset 0x%lX is in reserved area 0x%X\n", 460 (unsigned long)args->offset, 461 RADEON_VA_RESERVED_SIZE); 462 args->operation = RADEON_VA_RESULT_ERROR; 463 return -EINVAL; 464 } 465 466 /* don't remove, we need to enforce userspace to set the snooped flag 467 * otherwise we will endup with broken userspace and we won't be able 468 * to enable this feature without adding new interface 469 */ 470 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; 471 if ((args->flags & invalid_flags)) { 472 dev_err(dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 473 args->flags, invalid_flags); 474 args->operation = RADEON_VA_RESULT_ERROR; 475 return -EINVAL; 476 } 477 478 switch (args->operation) { 479 case RADEON_VA_MAP: 480 case RADEON_VA_UNMAP: 481 break; 482 default: 483 dev_err(dev->pdev->dev, "unsupported operation %d\n", 484 args->operation); 485 args->operation = RADEON_VA_RESULT_ERROR; 486 return -EINVAL; 487 } 488 489 gobj = drm_gem_object_lookup(dev, filp, args->handle); 490 if (gobj == NULL) { 491 args->operation = RADEON_VA_RESULT_ERROR; 492 return -ENOENT; 493 } 494 rbo = gem_to_radeon_bo(gobj); 495 r = radeon_bo_reserve(rbo, false); 496 if (r) { 497 args->operation = RADEON_VA_RESULT_ERROR; 498 drm_gem_object_unreference_unlocked(gobj); 499 return r; 500 } 501 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 502 if (!bo_va) { 503 args->operation = RADEON_VA_RESULT_ERROR; 504 drm_gem_object_unreference_unlocked(gobj); 505 return -ENOENT; 506 } 507 508 switch (args->operation) { 509 case RADEON_VA_MAP: 510 if (bo_va->soffset) { 511 args->operation = RADEON_VA_RESULT_VA_EXIST; 512 args->offset = bo_va->soffset; 513 goto out; 514 } 515 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 516 break; 517 case RADEON_VA_UNMAP: 518 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); 519 break; 520 default: 521 break; 522 } 523 args->operation = RADEON_VA_RESULT_OK; 524 if (r) { 525 args->operation = RADEON_VA_RESULT_ERROR; 526 } 527 out: 528 radeon_bo_unreserve(rbo); 529 drm_gem_object_unreference_unlocked(gobj); 530 return r; 531 } 532 533 int radeon_gem_op_ioctl(struct drm_device *dev, void *data, 534 struct drm_file *filp) 535 { 536 struct drm_radeon_gem_op *args = data; 537 struct drm_gem_object *gobj; 538 struct radeon_bo *robj; 539 int r; 540 541 gobj = drm_gem_object_lookup(dev, filp, args->handle); 542 if (gobj == NULL) { 543 return -ENOENT; 544 } 545 robj = gem_to_radeon_bo(gobj); 546 r = radeon_bo_reserve(robj, false); 547 if (unlikely(r)) 548 goto out; 549 550 switch (args->op) { 551 case RADEON_GEM_OP_GET_INITIAL_DOMAIN: 552 args->value = robj->initial_domain; 553 break; 554 case RADEON_GEM_OP_SET_INITIAL_DOMAIN: 555 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | 556 RADEON_GEM_DOMAIN_GTT | 557 RADEON_GEM_DOMAIN_CPU); 558 break; 559 default: 560 r = -EINVAL; 561 } 562 563 radeon_bo_unreserve(robj); 564 out: 565 drm_gem_object_unreference_unlocked(gobj); 566 return r; 567 } 568 569 int radeon_mode_dumb_create(struct drm_file *file_priv, 570 struct drm_device *dev, 571 struct drm_mode_create_dumb *args) 572 { 573 struct radeon_device *rdev = dev->dev_private; 574 struct drm_gem_object *gobj; 575 uint32_t handle; 576 int r; 577 578 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 579 args->size = args->pitch * args->height; 580 args->size = ALIGN(args->size, PAGE_SIZE); 581 582 r = radeon_gem_object_create(rdev, args->size, 0, 583 RADEON_GEM_DOMAIN_VRAM, 0, 584 false, &gobj); 585 if (r) 586 return -ENOMEM; 587 588 r = drm_gem_handle_create(file_priv, gobj, &handle); 589 /* drop reference from allocate - handle holds it now */ 590 drm_gem_object_unreference_unlocked(gobj); 591 if (r) { 592 return r; 593 } 594 args->handle = handle; 595 return 0; 596 } 597 598 #if defined(CONFIG_DEBUG_FS) 599 static int radeon_debugfs_gem_info(struct seq_file *m, void *data) 600 { 601 struct drm_info_node *node = (struct drm_info_node *)m->private; 602 struct drm_device *dev = node->minor->dev; 603 struct radeon_device *rdev = dev->dev_private; 604 struct radeon_bo *rbo; 605 unsigned i = 0; 606 607 mutex_lock(&rdev->gem.mutex); 608 list_for_each_entry(rbo, &rdev->gem.objects, list) { 609 unsigned domain; 610 const char *placement; 611 612 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); 613 switch (domain) { 614 case RADEON_GEM_DOMAIN_VRAM: 615 placement = "VRAM"; 616 break; 617 case RADEON_GEM_DOMAIN_GTT: 618 placement = " GTT"; 619 break; 620 case RADEON_GEM_DOMAIN_CPU: 621 default: 622 placement = " CPU"; 623 break; 624 } 625 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", 626 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, 627 placement, (unsigned long)rbo->pid); 628 i++; 629 } 630 mutex_unlock(&rdev->gem.mutex); 631 return 0; 632 } 633 634 static struct drm_info_list radeon_debugfs_gem_list[] = { 635 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, 636 }; 637 #endif 638 639 int radeon_gem_debugfs_init(struct radeon_device *rdev) 640 { 641 #if defined(CONFIG_DEBUG_FS) 642 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); 643 #endif 644 return 0; 645 } 646