1 /* 2 * Copyright © 2012 Red Hat 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Dave Airlie <airlied@redhat.com> 25 * Rob Clark <rob.clark@linaro.org> 26 * 27 */ 28 29 #include <linux/export.h> 30 #include <linux/dma-buf.h> 31 #include <linux/rbtree.h> 32 #include <drm/drmP.h> 33 #include <drm/drm_gem.h> 34 35 #include "drm_internal.h" 36 37 /* 38 * DMA-BUF/GEM Object references and lifetime overview: 39 * 40 * On the export the dma_buf holds a reference to the exporting GEM 41 * object. It takes this reference in handle_to_fd_ioctl, when it 42 * first calls .prime_export and stores the exporting GEM object in 43 * the dma_buf priv. This reference is released when the dma_buf 44 * object goes away in the driver .release function. 45 * 46 * On the import the importing GEM object holds a reference to the 47 * dma_buf (which in turn holds a ref to the exporting GEM object). 48 * It takes that reference in the fd_to_handle ioctl. 49 * It calls dma_buf_get, creates an attachment to it and stores the 50 * attachment in the GEM object. When this attachment is destroyed 51 * when the imported object is destroyed, we remove the attachment 52 * and drop the reference to the dma_buf. 53 * 54 * Thus the chain of references always flows in one direction 55 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem 56 * 57 * Self-importing: if userspace is using PRIME as a replacement for flink 58 * then it will get a fd->handle request for a GEM object that it created. 59 * Drivers should detect this situation and return back the gem object 60 * from the dma-buf private. Prime will do this automatically for drivers that 61 * use the drm_gem_prime_{import,export} helpers. 62 */ 63 64 struct drm_prime_member { 65 struct dma_buf *dma_buf; 66 uint32_t handle; 67 68 struct rb_node dmabuf_rb; 69 struct rb_node handle_rb; 70 }; 71 72 struct drm_prime_attachment { 73 struct sg_table *sgt; 74 enum dma_data_direction dir; 75 }; 76 77 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, 78 struct dma_buf *dma_buf, uint32_t handle) 79 { 80 struct drm_prime_member *member; 81 struct rb_node **p, *rb; 82 83 member = kmalloc(sizeof(*member), M_DRM, GFP_KERNEL); 84 if (!member) 85 return -ENOMEM; 86 87 get_dma_buf(dma_buf); 88 member->dma_buf = dma_buf; 89 member->handle = handle; 90 91 rb = NULL; 92 p = &prime_fpriv->dmabufs.rb_node; 93 while (*p) { 94 struct drm_prime_member *pos; 95 96 rb = *p; 97 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 98 if (dma_buf > pos->dma_buf) 99 p = &rb->rb_right; 100 else 101 p = &rb->rb_left; 102 } 103 rb_link_node(&member->dmabuf_rb, rb, p); 104 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs); 105 106 rb = NULL; 107 p = &prime_fpriv->handles.rb_node; 108 while (*p) { 109 struct drm_prime_member *pos; 110 111 rb = *p; 112 pos = rb_entry(rb, struct drm_prime_member, handle_rb); 113 if (handle > pos->handle) 114 p = &rb->rb_right; 115 else 116 p = &rb->rb_left; 117 } 118 rb_link_node(&member->handle_rb, rb, p); 119 rb_insert_color(&member->handle_rb, &prime_fpriv->handles); 120 121 return 0; 122 } 123 124 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, 125 uint32_t handle) 126 { 127 struct rb_node *rb; 128 129 rb = prime_fpriv->handles.rb_node; 130 while (rb) { 131 struct drm_prime_member *member; 132 133 member = rb_entry(rb, struct drm_prime_member, handle_rb); 134 if (member->handle == handle) 135 return member->dma_buf; 136 else if (member->handle < handle) 137 rb = rb->rb_right; 138 else 139 rb = rb->rb_left; 140 } 141 142 return NULL; 143 } 144 145 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, 146 struct dma_buf *dma_buf, 147 uint32_t *handle) 148 { 149 struct rb_node *rb; 150 151 rb = prime_fpriv->dmabufs.rb_node; 152 while (rb) { 153 struct drm_prime_member *member; 154 155 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 156 if (member->dma_buf == dma_buf) { 157 *handle = member->handle; 158 return 0; 159 } else if (member->dma_buf < dma_buf) { 160 rb = rb->rb_right; 161 } else { 162 rb = rb->rb_left; 163 } 164 } 165 166 return -ENOENT; 167 } 168 169 static int drm_gem_map_attach(struct dma_buf *dma_buf, 170 struct device *target_dev, 171 struct dma_buf_attachment *attach) 172 { 173 struct drm_prime_attachment *prime_attach; 174 struct drm_gem_object *obj = dma_buf->priv; 175 struct drm_device *dev = obj->dev; 176 177 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL); 178 if (!prime_attach) 179 return -ENOMEM; 180 181 prime_attach->dir = DMA_NONE; 182 attach->priv = prime_attach; 183 184 if (!dev->driver->gem_prime_pin) 185 return 0; 186 187 return dev->driver->gem_prime_pin(obj); 188 } 189 190 static void drm_gem_map_detach(struct dma_buf *dma_buf, 191 struct dma_buf_attachment *attach) 192 { 193 struct drm_prime_attachment *prime_attach = attach->priv; 194 struct drm_gem_object *obj = dma_buf->priv; 195 struct drm_device *dev = obj->dev; 196 struct sg_table *sgt; 197 198 if (dev->driver->gem_prime_unpin) 199 dev->driver->gem_prime_unpin(obj); 200 201 if (!prime_attach) 202 return; 203 204 sgt = prime_attach->sgt; 205 if (sgt) { 206 if (prime_attach->dir != DMA_NONE) 207 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, 208 prime_attach->dir); 209 sg_free_table(sgt); 210 } 211 212 kfree(sgt); 213 kfree(prime_attach); 214 attach->priv = NULL; 215 } 216 217 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, 218 struct dma_buf *dma_buf) 219 { 220 struct rb_node *rb; 221 222 rb = prime_fpriv->dmabufs.rb_node; 223 while (rb) { 224 struct drm_prime_member *member; 225 226 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 227 if (member->dma_buf == dma_buf) { 228 rb_erase(&member->handle_rb, &prime_fpriv->handles); 229 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs); 230 231 dma_buf_put(dma_buf); 232 kfree(member); 233 return; 234 } else if (member->dma_buf < dma_buf) { 235 rb = rb->rb_right; 236 } else { 237 rb = rb->rb_left; 238 } 239 } 240 } 241 242 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 243 enum dma_data_direction dir) 244 { 245 struct drm_prime_attachment *prime_attach = attach->priv; 246 struct drm_gem_object *obj = attach->dmabuf->priv; 247 struct sg_table *sgt; 248 249 if (WARN_ON(dir == DMA_NONE || !prime_attach)) 250 return ERR_PTR(-EINVAL); 251 252 /* return the cached mapping when possible */ 253 if (prime_attach->dir == dir) 254 return prime_attach->sgt; 255 256 /* 257 * two mappings with different directions for the same attachment are 258 * not allowed 259 */ 260 if (WARN_ON(prime_attach->dir != DMA_NONE)) 261 return ERR_PTR(-EBUSY); 262 263 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 264 265 if (!IS_ERR(sgt)) { 266 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) { 267 sg_free_table(sgt); 268 kfree(sgt); 269 sgt = ERR_PTR(-ENOMEM); 270 } else { 271 prime_attach->sgt = sgt; 272 prime_attach->dir = dir; 273 } 274 } 275 276 return sgt; 277 } 278 279 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 280 struct sg_table *sgt, 281 enum dma_data_direction dir) 282 { 283 /* nothing to be done here */ 284 } 285 286 /** 287 * drm_gem_dmabuf_export - dma_buf export implementation for GEM 288 * @dev: parent device for the exported dmabuf 289 * @exp_info: the export information used by dma_buf_export() 290 * 291 * This wraps dma_buf_export() for use by generic GEM drivers that are using 292 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take 293 * a reference to the &drm_device and the exported &drm_gem_object (stored in 294 * exp_info->priv) which is released by drm_gem_dmabuf_release(). 295 * 296 * Returns the new dmabuf. 297 */ 298 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, 299 struct dma_buf_export_info *exp_info) 300 { 301 struct dma_buf *dma_buf; 302 303 dma_buf = dma_buf_export(exp_info); 304 if (IS_ERR(dma_buf)) 305 return dma_buf; 306 307 drm_dev_ref(dev); 308 drm_gem_object_reference(exp_info->priv); 309 310 return dma_buf; 311 } 312 EXPORT_SYMBOL(drm_gem_dmabuf_export); 313 314 /** 315 * drm_gem_dmabuf_release - dma_buf release implementation for GEM 316 * @dma_buf: buffer to be released 317 * 318 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers 319 * must use this in their dma_buf ops structure as the release callback. 320 * drm_gem_dmabuf_release() should be used in conjunction with 321 * drm_gem_dmabuf_export(). 322 */ 323 void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 324 { 325 struct drm_gem_object *obj = dma_buf->priv; 326 struct drm_device *dev = obj->dev; 327 328 /* drop the reference on the export fd holds */ 329 drm_gem_object_unreference_unlocked(obj); 330 331 drm_dev_unref(dev); 332 } 333 EXPORT_SYMBOL(drm_gem_dmabuf_release); 334 335 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 336 { 337 struct drm_gem_object *obj = dma_buf->priv; 338 struct drm_device *dev = obj->dev; 339 340 return dev->driver->gem_prime_vmap(obj); 341 } 342 343 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 344 { 345 struct drm_gem_object *obj = dma_buf->priv; 346 struct drm_device *dev = obj->dev; 347 348 dev->driver->gem_prime_vunmap(obj, vaddr); 349 } 350 351 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 352 unsigned long page_num) 353 { 354 return NULL; 355 } 356 357 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, 358 unsigned long page_num, void *addr) 359 { 360 361 } 362 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, 363 unsigned long page_num) 364 { 365 return NULL; 366 } 367 368 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, 369 unsigned long page_num, void *addr) 370 { 371 372 } 373 374 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, 375 struct vm_area_struct *vma) 376 { 377 struct drm_gem_object *obj = dma_buf->priv; 378 struct drm_device *dev = obj->dev; 379 380 if (!dev->driver->gem_prime_mmap) 381 return -ENOSYS; 382 383 return dev->driver->gem_prime_mmap(obj, vma); 384 } 385 386 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { 387 .attach = drm_gem_map_attach, 388 .detach = drm_gem_map_detach, 389 .map_dma_buf = drm_gem_map_dma_buf, 390 .unmap_dma_buf = drm_gem_unmap_dma_buf, 391 .release = drm_gem_dmabuf_release, 392 .kmap = drm_gem_dmabuf_kmap, 393 .kmap_atomic = drm_gem_dmabuf_kmap_atomic, 394 .kunmap = drm_gem_dmabuf_kunmap, 395 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, 396 .mmap = drm_gem_dmabuf_mmap, 397 .vmap = drm_gem_dmabuf_vmap, 398 .vunmap = drm_gem_dmabuf_vunmap, 399 }; 400 401 /** 402 * DOC: PRIME Helpers 403 * 404 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of 405 * simpler APIs by using the helper functions @drm_gem_prime_export and 406 * @drm_gem_prime_import. These functions implement dma-buf support in terms of 407 * six lower-level driver callbacks: 408 * 409 * Export callbacks: 410 * 411 * * @gem_prime_pin (optional): prepare a GEM object for exporting 412 * * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages 413 * * @gem_prime_vmap: vmap a buffer exported by your driver 414 * * @gem_prime_vunmap: vunmap a buffer exported by your driver 415 * * @gem_prime_mmap (optional): mmap a buffer exported by your driver 416 * 417 * Import callback: 418 * 419 * * @gem_prime_import_sg_table (import): produce a GEM object from another 420 * driver's scatter/gather table 421 */ 422 423 /** 424 * drm_gem_prime_export - helper library implementation of the export callback 425 * @dev: drm_device to export from 426 * @obj: GEM object to export 427 * @flags: flags like DRM_CLOEXEC and DRM_RDWR 428 * 429 * This is the implementation of the gem_prime_export functions for GEM drivers 430 * using the PRIME helpers. 431 */ 432 struct dma_buf *drm_gem_prime_export(struct drm_device *dev, 433 struct drm_gem_object *obj, 434 int flags) 435 { 436 struct dma_buf_export_info exp_info = { 437 #if 0 438 .exp_name = KBUILD_MODNAME, /* white lie for debug */ 439 .owner = dev->driver->fops->owner, 440 #endif 441 .ops = &drm_gem_prime_dmabuf_ops, 442 .size = obj->size, 443 .flags = flags, 444 .priv = obj, 445 }; 446 447 if (dev->driver->gem_prime_res_obj) 448 exp_info.resv = dev->driver->gem_prime_res_obj(obj); 449 450 return drm_gem_dmabuf_export(dev, &exp_info); 451 } 452 EXPORT_SYMBOL(drm_gem_prime_export); 453 454 static struct dma_buf *export_and_register_object(struct drm_device *dev, 455 struct drm_gem_object *obj, 456 uint32_t flags) 457 { 458 struct dma_buf *dmabuf; 459 460 /* prevent races with concurrent gem_close. */ 461 if (obj->handle_count == 0) { 462 dmabuf = ERR_PTR(-ENOENT); 463 return dmabuf; 464 } 465 466 dmabuf = dev->driver->gem_prime_export(dev, obj, flags); 467 if (IS_ERR(dmabuf)) { 468 /* normally the created dma-buf takes ownership of the ref, 469 * but if that fails then drop the ref 470 */ 471 return dmabuf; 472 } 473 474 /* 475 * Note that callers do not need to clean up the export cache 476 * since the check for obj->handle_count guarantees that someone 477 * will clean it up. 478 */ 479 obj->dma_buf = dmabuf; 480 get_dma_buf(obj->dma_buf); 481 482 return dmabuf; 483 } 484 485 /** 486 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers 487 * @dev: dev to export the buffer from 488 * @file_priv: drm file-private structure 489 * @handle: buffer handle to export 490 * @flags: flags like DRM_CLOEXEC 491 * @prime_fd: pointer to storage for the fd id of the create dma-buf 492 * 493 * This is the PRIME export function which must be used mandatorily by GEM 494 * drivers to ensure correct lifetime management of the underlying GEM object. 495 * The actual exporting from GEM object to a dma-buf is done through the 496 * gem_prime_export driver callback. 497 */ 498 int drm_gem_prime_handle_to_fd(struct drm_device *dev, 499 struct drm_file *file_priv, uint32_t handle, 500 uint32_t flags, 501 int *prime_fd) 502 { 503 struct drm_gem_object *obj; 504 int ret = 0; 505 struct dma_buf *dmabuf; 506 507 mutex_lock(&file_priv->prime.lock); 508 obj = drm_gem_object_lookup(file_priv, handle); 509 if (!obj) { 510 ret = -ENOENT; 511 goto out_unlock; 512 } 513 514 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); 515 if (dmabuf) { 516 get_dma_buf(dmabuf); 517 goto out_have_handle; 518 } 519 520 mutex_lock(&dev->object_name_lock); 521 /* re-export the original imported object */ 522 if (obj->import_attach) { 523 dmabuf = obj->import_attach->dmabuf; 524 get_dma_buf(dmabuf); 525 goto out_have_obj; 526 } 527 528 if (obj->dma_buf) { 529 get_dma_buf(obj->dma_buf); 530 dmabuf = obj->dma_buf; 531 goto out_have_obj; 532 } 533 534 dmabuf = export_and_register_object(dev, obj, flags); 535 if (IS_ERR(dmabuf)) { 536 /* normally the created dma-buf takes ownership of the ref, 537 * but if that fails then drop the ref 538 */ 539 ret = PTR_ERR(dmabuf); 540 mutex_unlock(&dev->object_name_lock); 541 goto out; 542 } 543 544 out_have_obj: 545 /* 546 * If we've exported this buffer then cheat and add it to the import list 547 * so we get the correct handle back. We must do this under the 548 * protection of dev->object_name_lock to ensure that a racing gem close 549 * ioctl doesn't miss to remove this buffer handle from the cache. 550 */ 551 ret = drm_prime_add_buf_handle(&file_priv->prime, 552 dmabuf, handle); 553 mutex_unlock(&dev->object_name_lock); 554 if (ret) 555 goto fail_put_dmabuf; 556 557 out_have_handle: 558 ret = dma_buf_fd(dmabuf, flags); 559 /* 560 * We must _not_ remove the buffer from the handle cache since the newly 561 * created dma buf is already linked in the global obj->dma_buf pointer, 562 * and that is invariant as long as a userspace gem handle exists. 563 * Closing the handle will clean out the cache anyway, so we don't leak. 564 */ 565 if (ret < 0) { 566 goto fail_put_dmabuf; 567 } else { 568 *prime_fd = ret; 569 ret = 0; 570 } 571 572 goto out; 573 574 fail_put_dmabuf: 575 dma_buf_put(dmabuf); 576 out: 577 drm_gem_object_unreference_unlocked(obj); 578 out_unlock: 579 mutex_unlock(&file_priv->prime.lock); 580 581 return ret; 582 } 583 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 584 585 /** 586 * drm_gem_prime_import - helper library implementation of the import callback 587 * @dev: drm_device to import into 588 * @dma_buf: dma-buf object to import 589 * 590 * This is the implementation of the gem_prime_import functions for GEM drivers 591 * using the PRIME helpers. 592 */ 593 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, 594 struct dma_buf *dma_buf) 595 { 596 struct dma_buf_attachment *attach; 597 struct sg_table *sgt; 598 struct drm_gem_object *obj; 599 int ret; 600 601 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { 602 obj = dma_buf->priv; 603 if (obj->dev == dev) { 604 /* 605 * Importing dmabuf exported from out own gem increases 606 * refcount on gem itself instead of f_count of dmabuf. 607 */ 608 drm_gem_object_reference(obj); 609 return obj; 610 } 611 } 612 613 if (!dev->driver->gem_prime_import_sg_table) 614 return ERR_PTR(-EINVAL); 615 616 attach = dma_buf_attach(dma_buf, dev->dev); 617 if (IS_ERR(attach)) 618 return ERR_CAST(attach); 619 620 get_dma_buf(dma_buf); 621 622 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 623 if (IS_ERR(sgt)) { 624 ret = PTR_ERR(sgt); 625 goto fail_detach; 626 } 627 628 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt); 629 if (IS_ERR(obj)) { 630 ret = PTR_ERR(obj); 631 goto fail_unmap; 632 } 633 634 obj->import_attach = attach; 635 636 return obj; 637 638 fail_unmap: 639 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 640 fail_detach: 641 dma_buf_detach(dma_buf, attach); 642 dma_buf_put(dma_buf); 643 644 return ERR_PTR(ret); 645 } 646 EXPORT_SYMBOL(drm_gem_prime_import); 647 648 /** 649 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers 650 * @dev: dev to export the buffer from 651 * @file_priv: drm file-private structure 652 * @prime_fd: fd id of the dma-buf which should be imported 653 * @handle: pointer to storage for the handle of the imported buffer object 654 * 655 * This is the PRIME import function which must be used mandatorily by GEM 656 * drivers to ensure correct lifetime management of the underlying GEM object. 657 * The actual importing of GEM object from the dma-buf is done through the 658 * gem_import_export driver callback. 659 */ 660 int drm_gem_prime_fd_to_handle(struct drm_device *dev, 661 struct drm_file *file_priv, int prime_fd, 662 uint32_t *handle) 663 { 664 struct dma_buf *dma_buf; 665 struct drm_gem_object *obj; 666 int ret; 667 668 dma_buf = dma_buf_get(prime_fd); 669 if (IS_ERR(dma_buf)) 670 return PTR_ERR(dma_buf); 671 672 mutex_lock(&file_priv->prime.lock); 673 674 ret = drm_prime_lookup_buf_handle(&file_priv->prime, 675 dma_buf, handle); 676 if (ret == 0) 677 goto out_put; 678 679 /* never seen this one, need to import */ 680 mutex_lock(&dev->object_name_lock); 681 obj = dev->driver->gem_prime_import(dev, dma_buf); 682 if (IS_ERR(obj)) { 683 ret = PTR_ERR(obj); 684 goto out_unlock; 685 } 686 687 if (obj->dma_buf) { 688 WARN_ON(obj->dma_buf != dma_buf); 689 } else { 690 obj->dma_buf = dma_buf; 691 get_dma_buf(dma_buf); 692 } 693 694 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ 695 ret = drm_gem_handle_create_tail(file_priv, obj, handle); 696 drm_gem_object_unreference_unlocked(obj); 697 if (ret) 698 goto out_put; 699 700 ret = drm_prime_add_buf_handle(&file_priv->prime, 701 dma_buf, *handle); 702 mutex_unlock(&file_priv->prime.lock); 703 if (ret) 704 goto fail; 705 706 dma_buf_put(dma_buf); 707 708 return 0; 709 710 fail: 711 /* hmm, if driver attached, we are relying on the free-object path 712 * to detach.. which seems ok.. 713 */ 714 drm_gem_handle_delete(file_priv, *handle); 715 dma_buf_put(dma_buf); 716 return ret; 717 718 out_unlock: 719 mutex_unlock(&dev->object_name_lock); 720 out_put: 721 mutex_unlock(&file_priv->prime.lock); 722 dma_buf_put(dma_buf); 723 return ret; 724 } 725 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); 726 727 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 728 struct drm_file *file_priv) 729 { 730 struct drm_prime_handle *args = data; 731 732 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 733 return -EINVAL; 734 735 if (!dev->driver->prime_handle_to_fd) 736 return -ENOSYS; 737 738 /* check flags are valid */ 739 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) 740 return -EINVAL; 741 742 return dev->driver->prime_handle_to_fd(dev, file_priv, 743 args->handle, args->flags, &args->fd); 744 } 745 746 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, 747 struct drm_file *file_priv) 748 { 749 struct drm_prime_handle *args = data; 750 751 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 752 return -EINVAL; 753 754 if (!dev->driver->prime_fd_to_handle) 755 return -ENOSYS; 756 757 return dev->driver->prime_fd_to_handle(dev, file_priv, 758 args->fd, &args->handle); 759 } 760 761 /** 762 * drm_prime_pages_to_sg - converts a page array into an sg list 763 * @pages: pointer to the array of page pointers to convert 764 * @nr_pages: length of the page vector 765 * 766 * This helper creates an sg table object from a set of pages 767 * the driver is responsible for mapping the pages into the 768 * importers address space for use with dma_buf itself. 769 */ 770 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages) 771 { 772 struct sg_table *sg = NULL; 773 int ret; 774 775 sg = kmalloc(sizeof(struct sg_table), M_DRM, GFP_KERNEL); 776 if (!sg) { 777 ret = -ENOMEM; 778 goto out; 779 } 780 781 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, 782 nr_pages << PAGE_SHIFT, GFP_KERNEL); 783 if (ret) 784 goto out; 785 786 return sg; 787 out: 788 kfree(sg); 789 return ERR_PTR(ret); 790 } 791 EXPORT_SYMBOL(drm_prime_pages_to_sg); 792 793 /** 794 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array 795 * @sgt: scatter-gather table to convert 796 * @pages: array of page pointers to store the page array in 797 * @addrs: optional array to store the dma bus address of each page 798 * @max_pages: size of both the passed-in arrays 799 * 800 * Exports an sg table into an array of pages and addresses. This is currently 801 * required by the TTM driver in order to do correct fault handling. 802 */ 803 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, 804 dma_addr_t *addrs, int max_pages) 805 { 806 unsigned count; 807 struct scatterlist *sg; 808 struct page *page; 809 u32 len; 810 int pg_index; 811 dma_addr_t addr; 812 813 pg_index = 0; 814 for_each_sg(sgt->sgl, sg, sgt->nents, count) { 815 len = sg->length; 816 page = sg_page(sg); 817 addr = sg_dma_address(sg); 818 819 while (len > 0) { 820 if (WARN_ON(pg_index >= max_pages)) 821 return -1; 822 pages[pg_index] = page; 823 if (addrs) 824 addrs[pg_index] = addr; 825 826 page++; 827 addr += PAGE_SIZE; 828 len -= PAGE_SIZE; 829 pg_index++; 830 } 831 } 832 return 0; 833 } 834 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); 835 836 /** 837 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object 838 * @obj: GEM object which was created from a dma-buf 839 * @sg: the sg-table which was pinned at import time 840 * 841 * This is the cleanup functions which GEM drivers need to call when they use 842 * @drm_gem_prime_import to import dma-bufs. 843 */ 844 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) 845 { 846 struct dma_buf_attachment *attach; 847 struct dma_buf *dma_buf; 848 attach = obj->import_attach; 849 if (sg) 850 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 851 dma_buf = attach->dmabuf; 852 dma_buf_detach(attach->dmabuf, attach); 853 /* remove the reference */ 854 dma_buf_put(dma_buf); 855 } 856 EXPORT_SYMBOL(drm_prime_gem_destroy); 857 858 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) 859 { 860 lockinit(&prime_fpriv->lock, "drmpfpl", 0, LK_CANRECURSE); 861 prime_fpriv->dmabufs = LINUX_RB_ROOT; 862 prime_fpriv->handles = LINUX_RB_ROOT; 863 } 864 865 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) 866 { 867 /* by now drm_gem_release should've made sure the list is empty */ 868 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs)); 869 } 870