1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #ifndef _TTM_BO_API_H_ 32 #define _TTM_BO_API_H_ 33 34 #include <drm/drmP.h> 35 #include <drm/drm_hashtab.h> 36 #include <drm/drm_vma_manager.h> 37 #include <linux/kref.h> 38 #include <linux/list.h> 39 #include <linux/wait.h> 40 #include <linux/mutex.h> 41 #include <linux/mm.h> 42 #include <linux/bitmap.h> 43 #include <linux/reservation.h> 44 45 struct ttm_bo_device; 46 47 struct drm_mm_node; 48 49 /** 50 * struct ttm_place 51 * 52 * @fpfn: first valid page frame number to put the object 53 * @lpfn: last valid page frame number to put the object 54 * @flags: memory domain and caching flags for the object 55 * 56 * Structure indicating a possible place to put an object. 57 */ 58 struct ttm_place { 59 unsigned fpfn; 60 unsigned lpfn; 61 uint32_t flags; 62 }; 63 64 /** 65 * struct ttm_placement 66 * 67 * @num_placement: number of preferred placements 68 * @placement: preferred placements 69 * @num_busy_placement: number of preferred placements when need to evict buffer 70 * @busy_placement: preferred placements when need to evict buffer 71 * 72 * Structure indicating the placement you request for an object. 73 */ 74 struct ttm_placement { 75 unsigned num_placement; 76 const struct ttm_place *placement; 77 unsigned num_busy_placement; 78 const struct ttm_place *busy_placement; 79 }; 80 81 /** 82 * struct ttm_bus_placement 83 * 84 * @addr: mapped virtual address 85 * @base: bus base address 86 * @is_iomem: is this io memory ? 87 * @size: size in byte 88 * @offset: offset from the base address 89 * @io_reserved_vm: The VM system has a refcount in @io_reserved_count 90 * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve 91 * 92 * Structure indicating the bus placement of an object. 93 */ 94 struct ttm_bus_placement { 95 void *addr; 96 unsigned long base; 97 unsigned long size; 98 unsigned long offset; 99 bool is_iomem; 100 bool io_reserved_vm; 101 uint64_t io_reserved_count; 102 }; 103 104 105 /** 106 * struct ttm_mem_reg 107 * 108 * @mm_node: Memory manager node. 109 * @size: Requested size of memory region. 110 * @num_pages: Actual size of memory region in pages. 111 * @page_alignment: Page alignment. 112 * @placement: Placement flags. 113 * @bus: Placement on io bus accessible to the CPU 114 * 115 * Structure indicating the placement and space resources used by a 116 * buffer object. 117 */ 118 119 struct ttm_mem_reg { 120 void *mm_node; 121 unsigned long start; 122 unsigned long size; 123 unsigned long num_pages; 124 uint32_t page_alignment; 125 uint32_t mem_type; 126 uint32_t placement; 127 struct ttm_bus_placement bus; 128 }; 129 130 /** 131 * enum ttm_bo_type 132 * 133 * @ttm_bo_type_device: These are 'normal' buffers that can 134 * be mmapped by user space. Each of these bos occupy a slot in the 135 * device address space, that can be used for normal vm operations. 136 * 137 * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, 138 * but they cannot be accessed from user-space. For kernel-only use. 139 * 140 * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another 141 * driver. 142 */ 143 144 enum ttm_bo_type { 145 ttm_bo_type_device, 146 ttm_bo_type_kernel, 147 ttm_bo_type_sg 148 }; 149 150 struct ttm_tt; 151 152 /** 153 * struct ttm_buffer_object 154 * 155 * @bdev: Pointer to the buffer object device structure. 156 * @type: The bo type. 157 * @destroy: Destruction function. If NULL, kfree is used. 158 * @num_pages: Actual number of pages. 159 * @acc_size: Accounted size for this object. 160 * @kref: Reference count of this buffer object. When this refcount reaches 161 * zero, the object is put on the delayed delete list. 162 * @list_kref: List reference count of this buffer object. This member is 163 * used to avoid destruction while the buffer object is still on a list. 164 * Lru lists may keep one refcount, the delayed delete list, and kref != 0 165 * keeps one refcount. When this refcount reaches zero, 166 * the object is destroyed. 167 * @mem: structure describing current placement. 168 * @persistent_swap_storage: Usually the swap storage is deleted for buffers 169 * pinned in physical memory. If this behaviour is not desired, this member 170 * holds a pointer to a persistent shmem object. 171 * @ttm: TTM structure holding system pages. 172 * @evicted: Whether the object was evicted without user-space knowing. 173 * @cpu_writes: For synchronization. Number of cpu writers. 174 * @lru: List head for the lru list. 175 * @ddestroy: List head for the delayed destroy list. 176 * @swap: List head for swap LRU list. 177 * @priv_flags: Flags describing buffer object internal state. 178 * @vma_node: Address space manager node. 179 * @offset: The current GPU offset, which can have different meanings 180 * depending on the memory type. For SYSTEM type memory, it should be 0. 181 * @cur_placement: Hint of current placement. 182 * @wu_mutex: Wait unreserved mutex. 183 * 184 * Base class for TTM buffer object, that deals with data placement and CPU 185 * mappings. GPU mappings are really up to the driver, but for simpler GPUs 186 * the driver can usually use the placement offset @offset directly as the 187 * GPU virtual address. For drivers implementing multiple 188 * GPU memory manager contexts, the driver should manage the address space 189 * in these contexts separately and use these objects to get the correct 190 * placement and caching for these GPU maps. This makes it possible to use 191 * these objects for even quite elaborate memory management schemes. 192 * The destroy member, the API visibility of this object makes it possible 193 * to derive driver specific types. 194 */ 195 196 struct ttm_buffer_object { 197 /** 198 * Members constant at init. 199 */ 200 201 struct ttm_bo_global *glob; 202 struct ttm_bo_device *bdev; 203 enum ttm_bo_type type; 204 void (*destroy) (struct ttm_buffer_object *); 205 unsigned long num_pages; 206 size_t acc_size; 207 208 /** 209 * Members not needing protection. 210 */ 211 212 struct kref kref; 213 struct kref list_kref; 214 215 /** 216 * Members protected by the bo::resv::reserved lock. 217 */ 218 219 struct ttm_mem_reg mem; 220 struct vm_object *persistent_swap_storage; 221 struct ttm_tt *ttm; 222 bool evicted; 223 224 /** 225 * Members protected by the bo::reserved lock only when written to. 226 */ 227 228 atomic_t cpu_writers; 229 230 /** 231 * Members protected by the bdev::lru_lock. 232 */ 233 234 struct list_head lru; 235 struct list_head ddestroy; 236 struct list_head swap; 237 struct list_head io_reserve_lru; 238 239 /** 240 * Members protected by a bo reservation. 241 */ 242 243 unsigned long priv_flags; 244 245 RB_ENTRY(ttm_buffer_object) vm_rb; /* DragonFly */ 246 struct drm_vma_offset_node vma_node; 247 248 /** 249 * Special members that are protected by the reserve lock 250 * and the bo::lock when written to. Can be read with 251 * either of these locks held. 252 */ 253 254 uint64_t offset; /* GPU address space is independent of CPU word size */ 255 uint32_t cur_placement; 256 257 struct sg_table *sg; 258 259 struct reservation_object *resv; 260 struct reservation_object ttm_resv; 261 struct lock wu_mutex; 262 }; 263 264 /** 265 * struct ttm_bo_kmap_obj 266 * 267 * @virtual: The current kernel virtual address. 268 * @page: The page when kmap'ing a single page. 269 * @bo_kmap_type: Type of bo_kmap. 270 * 271 * Object describing a kernel mapping. Since a TTM bo may be located 272 * in various memory types with various caching policies, the 273 * mapping can either be an ioremap, a vmap, a kmap or part of a 274 * premapped region. 275 */ 276 277 #define TTM_BO_MAP_IOMEM_MASK 0x80 278 struct ttm_bo_kmap_obj { 279 void *virtual; 280 struct page *page; 281 enum { 282 ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK, 283 ttm_bo_map_vmap = 2, 284 ttm_bo_map_kmap = 3, 285 ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, 286 } bo_kmap_type; 287 struct ttm_buffer_object *bo; 288 }; 289 290 /** 291 * ttm_bo_reference - reference a struct ttm_buffer_object 292 * 293 * @bo: The buffer object. 294 * 295 * Returns a refcounted pointer to a buffer object. 296 */ 297 298 static inline struct ttm_buffer_object * 299 ttm_bo_reference(struct ttm_buffer_object *bo) 300 { 301 kref_get(&bo->kref); 302 return bo; 303 } 304 305 /** 306 * ttm_bo_wait - wait for buffer idle. 307 * 308 * @bo: The buffer object. 309 * @interruptible: Use interruptible wait. 310 * @no_wait: Return immediately if buffer is busy. 311 * 312 * This function must be called with the bo::mutex held, and makes 313 * sure any previous rendering to the buffer is completed. 314 * Note: It might be necessary to block validations before the 315 * wait by reserving the buffer. 316 * Returns -EBUSY if no_wait is true and the buffer is busy. 317 * Returns -ERESTARTSYS if interrupted by a signal. 318 */ 319 extern int ttm_bo_wait(struct ttm_buffer_object *bo, 320 bool interruptible, bool no_wait); 321 322 /** 323 * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo 324 * 325 * @placement: Return immediately if buffer is busy. 326 * @mem: The struct ttm_mem_reg indicating the region where the bo resides 327 * @new_flags: Describes compatible placement found 328 * 329 * Returns true if the placement is compatible 330 */ 331 extern bool ttm_bo_mem_compat(struct ttm_placement *placement, 332 struct ttm_mem_reg *mem, 333 uint32_t *new_flags); 334 335 /** 336 * ttm_bo_validate 337 * 338 * @bo: The buffer object. 339 * @placement: Proposed placement for the buffer object. 340 * @interruptible: Sleep interruptible if sleeping. 341 * @no_wait_gpu: Return immediately if the GPU is busy. 342 * 343 * Changes placement and caching policy of the buffer object 344 * according proposed placement. 345 * Returns 346 * -EINVAL on invalid proposed placement. 347 * -ENOMEM on out-of-memory condition. 348 * -EBUSY if no_wait is true and buffer busy. 349 * -ERESTARTSYS if interrupted by a signal. 350 */ 351 extern int ttm_bo_validate(struct ttm_buffer_object *bo, 352 struct ttm_placement *placement, 353 bool interruptible, 354 bool no_wait_gpu); 355 356 /** 357 * ttm_bo_unref 358 * 359 * @bo: The buffer object. 360 * 361 * Unreference and clear a pointer to a buffer object. 362 */ 363 extern void ttm_bo_unref(struct ttm_buffer_object **bo); 364 365 366 /** 367 * ttm_bo_list_ref_sub 368 * 369 * @bo: The buffer object. 370 * @count: The number of references with which to decrease @bo::list_kref; 371 * @never_free: The refcount should not reach zero with this operation. 372 * 373 * Release @count lru list references to this buffer object. 374 */ 375 extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, 376 bool never_free); 377 378 /** 379 * ttm_bo_add_to_lru 380 * 381 * @bo: The buffer object. 382 * 383 * Add this bo to the relevant mem type lru and, if it's backed by 384 * system pages (ttms) to the swap list. 385 * This function must be called with struct ttm_bo_global::lru_lock held, and 386 * is typically called immediately prior to unreserving a bo. 387 */ 388 extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); 389 390 /** 391 * ttm_bo_del_from_lru 392 * 393 * @bo: The buffer object. 394 * 395 * Remove this bo from all lru lists used to lookup and reserve an object. 396 * This function must be called with struct ttm_bo_global::lru_lock held, 397 * and is usually called just immediately after the bo has been reserved to 398 * avoid recursive reservation from lru lists. 399 */ 400 extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo); 401 402 /** 403 * ttm_bo_move_to_lru_tail 404 * 405 * @bo: The buffer object. 406 * 407 * Move this BO to the tail of all lru lists used to lookup and reserve an 408 * object. This function must be called with struct ttm_bo_global::lru_lock 409 * held, and is used to make a BO less likely to be considered for eviction. 410 */ 411 extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); 412 413 /** 414 * ttm_bo_lock_delayed_workqueue 415 * 416 * Prevent the delayed workqueue from running. 417 * Returns 418 * True if the workqueue was queued at the time 419 */ 420 extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); 421 422 /** 423 * ttm_bo_unlock_delayed_workqueue 424 * 425 * Allows the delayed workqueue to run. 426 */ 427 extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, 428 int resched); 429 430 /** 431 * ttm_bo_synccpu_write_grab 432 * 433 * @bo: The buffer object: 434 * @no_wait: Return immediately if buffer is busy. 435 * 436 * Synchronizes a buffer object for CPU RW access. This means 437 * command submission that affects the buffer will return -EBUSY 438 * until ttm_bo_synccpu_write_release is called. 439 * 440 * Returns 441 * -EBUSY if the buffer is busy and no_wait is true. 442 * -ERESTARTSYS if interrupted by a signal. 443 */ 444 extern int 445 ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); 446 447 /** 448 * ttm_bo_synccpu_write_release: 449 * 450 * @bo : The buffer object. 451 * 452 * Releases a synccpu lock. 453 */ 454 extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); 455 456 /** 457 * ttm_bo_acc_size 458 * 459 * @bdev: Pointer to a ttm_bo_device struct. 460 * @bo_size: size of the buffer object in byte. 461 * @struct_size: size of the structure holding buffer object datas 462 * 463 * Returns size to account for a buffer object 464 */ 465 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, 466 unsigned long bo_size, 467 unsigned struct_size); 468 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 469 unsigned long bo_size, 470 unsigned struct_size); 471 472 /** 473 * ttm_bo_init 474 * 475 * @bdev: Pointer to a ttm_bo_device struct. 476 * @bo: Pointer to a ttm_buffer_object to be initialized. 477 * @size: Requested size of buffer object. 478 * @type: Requested type of buffer object. 479 * @flags: Initial placement flags. 480 * @page_alignment: Data alignment in pages. 481 * @interruptible: If needing to sleep to wait for GPU resources, 482 * sleep interruptible. 483 * @persistent_swap_storage: Usually the swap storage is deleted for buffers 484 * pinned in physical memory. If this behaviour is not desired, this member 485 * holds a pointer to a persistent shmem object. Typically, this would 486 * point to the shmem object backing a GEM object if TTM is used to back a 487 * GEM user interface. 488 * @acc_size: Accounted size for this object. 489 * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. 490 * @destroy: Destroy function. Use NULL for kfree(). 491 * 492 * This function initializes a pre-allocated struct ttm_buffer_object. 493 * As this object may be part of a larger structure, this function, 494 * together with the @destroy function, 495 * enables driver-specific objects derived from a ttm_buffer_object. 496 * On successful return, the object kref and list_kref are set to 1. 497 * If a failure occurs, the function will call the @destroy function, or 498 * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is 499 * illegal and will likely cause memory corruption. 500 * 501 * Returns 502 * -ENOMEM: Out of memory. 503 * -EINVAL: Invalid placement flags. 504 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. 505 */ 506 507 extern int ttm_bo_init(struct ttm_bo_device *bdev, 508 struct ttm_buffer_object *bo, 509 unsigned long size, 510 enum ttm_bo_type type, 511 struct ttm_placement *placement, 512 uint32_t page_alignment, 513 bool interrubtible, 514 struct vm_object *persistent_swap_storage, 515 size_t acc_size, 516 struct sg_table *sg, 517 struct reservation_object *resv, 518 void (*destroy) (struct ttm_buffer_object *)); 519 520 /** 521 * ttm_bo_create 522 * 523 * @bdev: Pointer to a ttm_bo_device struct. 524 * @size: Requested size of buffer object. 525 * @type: Requested type of buffer object. 526 * @placement: Initial placement. 527 * @page_alignment: Data alignment in pages. 528 * @interruptible: If needing to sleep while waiting for GPU resources, 529 * sleep interruptible. 530 * @persistent_swap_storage: Usually the swap storage is deleted for buffers 531 * pinned in physical memory. If this behaviour is not desired, this member 532 * holds a pointer to a persistent shmem object. Typically, this would 533 * point to the shmem object backing a GEM object if TTM is used to back a 534 * GEM user interface. 535 * @p_bo: On successful completion *p_bo points to the created object. 536 * 537 * This function allocates a ttm_buffer_object, and then calls ttm_bo_init 538 * on that object. The destroy function is set to kfree(). 539 * Returns 540 * -ENOMEM: Out of memory. 541 * -EINVAL: Invalid placement flags. 542 * -ERESTARTSYS: Interrupted by signal while waiting for resources. 543 */ 544 545 extern int ttm_bo_create(struct ttm_bo_device *bdev, 546 unsigned long size, 547 enum ttm_bo_type type, 548 struct ttm_placement *placement, 549 uint32_t page_alignment, 550 bool interruptible, 551 struct vm_object *persistent_swap_storage, 552 struct ttm_buffer_object **p_bo); 553 554 /** 555 * ttm_bo_init_mm 556 * 557 * @bdev: Pointer to a ttm_bo_device struct. 558 * @mem_type: The memory type. 559 * @p_size: size managed area in pages. 560 * 561 * Initialize a manager for a given memory type. 562 * Note: if part of driver firstopen, it must be protected from a 563 * potentially racing lastclose. 564 * Returns: 565 * -EINVAL: invalid size or memory type. 566 * -ENOMEM: Not enough memory. 567 * May also return driver-specified errors. 568 */ 569 570 extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 571 unsigned long p_size); 572 /** 573 * ttm_bo_clean_mm 574 * 575 * @bdev: Pointer to a ttm_bo_device struct. 576 * @mem_type: The memory type. 577 * 578 * Take down a manager for a given memory type after first walking 579 * the LRU list to evict any buffers left alive. 580 * 581 * Normally, this function is part of lastclose() or unload(), and at that 582 * point there shouldn't be any buffers left created by user-space, since 583 * there should've been removed by the file descriptor release() method. 584 * However, before this function is run, make sure to signal all sync objects, 585 * and verify that the delayed delete queue is empty. The driver must also 586 * make sure that there are no NO_EVICT buffers present in this memory type 587 * when the call is made. 588 * 589 * If this function is part of a VT switch, the caller must make sure that 590 * there are no appications currently validating buffers before this 591 * function is called. The caller can do that by first taking the 592 * struct ttm_bo_device::ttm_lock in write mode. 593 * 594 * Returns: 595 * -EINVAL: invalid or uninitialized memory type. 596 * -EBUSY: There are still buffers left in this memory type. 597 */ 598 599 extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); 600 601 /** 602 * ttm_bo_evict_mm 603 * 604 * @bdev: Pointer to a ttm_bo_device struct. 605 * @mem_type: The memory type. 606 * 607 * Evicts all buffers on the lru list of the memory type. 608 * This is normally part of a VT switch or an 609 * out-of-memory-space-due-to-fragmentation handler. 610 * The caller must make sure that there are no other processes 611 * currently validating buffers, and can do that by taking the 612 * struct ttm_bo_device::ttm_lock in write mode. 613 * 614 * Returns: 615 * -EINVAL: Invalid or uninitialized memory type. 616 * -ERESTARTSYS: The call was interrupted by a signal while waiting to 617 * evict a buffer. 618 */ 619 620 extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); 621 622 /** 623 * ttm_kmap_obj_virtual 624 * 625 * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap. 626 * @is_iomem: Pointer to an integer that on return indicates 1 if the 627 * virtual map is io memory, 0 if normal memory. 628 * 629 * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap. 630 * If *is_iomem is 1 on return, the virtual address points to an io memory area, 631 * that should strictly be accessed by the iowriteXX() and similar functions. 632 */ 633 634 static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, 635 bool *is_iomem) 636 { 637 *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK); 638 return map->virtual; 639 } 640 641 /** 642 * ttm_bo_kmap 643 * 644 * @bo: The buffer object. 645 * @start_page: The first page to map. 646 * @num_pages: Number of pages to map. 647 * @map: pointer to a struct ttm_bo_kmap_obj representing the map. 648 * 649 * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the 650 * data in the buffer object. The ttm_kmap_obj_virtual function can then be 651 * used to obtain a virtual address to the data. 652 * 653 * Returns 654 * -ENOMEM: Out of memory. 655 * -EINVAL: Invalid range. 656 */ 657 658 extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, 659 unsigned long num_pages, struct ttm_bo_kmap_obj *map); 660 661 /** 662 * ttm_bo_kunmap 663 * 664 * @map: Object describing the map to unmap. 665 * 666 * Unmaps a kernel map set up by ttm_bo_kmap. 667 */ 668 669 extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); 670 671 /** 672 * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object. 673 * 674 * @vma: vma as input from the fbdev mmap method. 675 * @bo: The bo backing the address space. The address space will 676 * have the same size as the bo, and start at offset 0. 677 * 678 * This function is intended to be called by the fbdev mmap method 679 * if the fbdev address space is to be backed by a bo. 680 */ 681 682 extern int ttm_fbdev_mmap(struct vm_area_struct *vma, 683 struct ttm_buffer_object *bo); 684 685 /** 686 * ttm_bo_mmap - mmap out of the ttm device address space. 687 * 688 * @filp: filp as input from the mmap method. 689 * @vma: vma as input from the mmap method. 690 * @bdev: Pointer to the ttm_bo_device with the address space manager. 691 * 692 * This function is intended to be called by the device mmap method. 693 * if the device address space is to be backed by the bo manager. 694 */ 695 696 extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, 697 struct ttm_bo_device *bdev); 698 699 /** 700 * ttm_bo_io 701 * 702 * @bdev: Pointer to the struct ttm_bo_device. 703 * @filp: Pointer to the struct file attempting to read / write. 704 * @wbuf: User-space pointer to address of buffer to write. NULL on read. 705 * @rbuf: User-space pointer to address of buffer to read into. 706 * Null on write. 707 * @count: Number of bytes to read / write. 708 * @f_pos: Pointer to current file position. 709 * @write: 1 for read, 0 for write. 710 * 711 * This function implements read / write into ttm buffer objects, and is 712 * intended to 713 * be called from the fops::read and fops::write method. 714 * Returns: 715 * See man (2) write, man(2) read. In particular, 716 * the function may return -ERESTARTSYS if 717 * interrupted by a signal. 718 */ 719 720 extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, 721 const char __user *wbuf, char __user *rbuf, 722 size_t count, loff_t *f_pos, bool write); 723 724 extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); 725 extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); 726 #endif 727