1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #ifndef _TTM_BO_API_H_ 32 #define _TTM_BO_API_H_ 33 34 #include <drm/drmP.h> 35 #include <drm/drm_hashtab.h> 36 #include <drm/drm_vma_manager.h> 37 #include <linux/kref.h> 38 #include <linux/list.h> 39 #include <linux/wait.h> 40 #include <linux/mutex.h> 41 #include <linux/mm.h> 42 #include <linux/bitmap.h> 43 #include <linux/reservation.h> 44 45 struct ttm_bo_device; 46 47 struct drm_mm_node; 48 49 struct ttm_placement; 50 51 struct ttm_place; 52 53 /** 54 * struct ttm_bus_placement 55 * 56 * @addr: mapped virtual address 57 * @base: bus base address 58 * @is_iomem: is this io memory ? 59 * @size: size in byte 60 * @offset: offset from the base address 61 * @io_reserved_vm: The VM system has a refcount in @io_reserved_count 62 * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve 63 * 64 * Structure indicating the bus placement of an object. 65 */ 66 struct ttm_bus_placement { 67 void *addr; 68 unsigned long base; 69 unsigned long size; 70 unsigned long offset; 71 bool is_iomem; 72 bool io_reserved_vm; 73 uint64_t io_reserved_count; 74 }; 75 76 77 /** 78 * struct ttm_mem_reg 79 * 80 * @mm_node: Memory manager node. 81 * @size: Requested size of memory region. 82 * @num_pages: Actual size of memory region in pages. 83 * @page_alignment: Page alignment. 84 * @placement: Placement flags. 85 * @bus: Placement on io bus accessible to the CPU 86 * 87 * Structure indicating the placement and space resources used by a 88 * buffer object. 89 */ 90 91 struct ttm_mem_reg { 92 void *mm_node; 93 unsigned long start; 94 unsigned long size; 95 unsigned long num_pages; 96 uint32_t page_alignment; 97 uint32_t mem_type; 98 uint32_t placement; 99 struct ttm_bus_placement bus; 100 }; 101 102 /** 103 * enum ttm_bo_type 104 * 105 * @ttm_bo_type_device: These are 'normal' buffers that can 106 * be mmapped by user space. Each of these bos occupy a slot in the 107 * device address space, that can be used for normal vm operations. 108 * 109 * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, 110 * but they cannot be accessed from user-space. For kernel-only use. 111 * 112 * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another 113 * driver. 114 */ 115 116 enum ttm_bo_type { 117 ttm_bo_type_device, 118 ttm_bo_type_kernel, 119 ttm_bo_type_sg 120 }; 121 122 struct ttm_tt; 123 124 /** 125 * struct ttm_buffer_object 126 * 127 * @bdev: Pointer to the buffer object device structure. 128 * @type: The bo type. 129 * @destroy: Destruction function. If NULL, kfree is used. 130 * @num_pages: Actual number of pages. 131 * @acc_size: Accounted size for this object. 132 * @kref: Reference count of this buffer object. When this refcount reaches 133 * zero, the object is put on the delayed delete list. 134 * @list_kref: List reference count of this buffer object. This member is 135 * used to avoid destruction while the buffer object is still on a list. 136 * Lru lists may keep one refcount, the delayed delete list, and kref != 0 137 * keeps one refcount. When this refcount reaches zero, 138 * the object is destroyed. 139 * @mem: structure describing current placement. 140 * @persistent_swap_storage: Usually the swap storage is deleted for buffers 141 * pinned in physical memory. If this behaviour is not desired, this member 142 * holds a pointer to a persistent shmem object. 143 * @ttm: TTM structure holding system pages. 144 * @evicted: Whether the object was evicted without user-space knowing. 145 * @cpu_writes: For synchronization. Number of cpu writers. 146 * @lru: List head for the lru list. 147 * @ddestroy: List head for the delayed destroy list. 148 * @swap: List head for swap LRU list. 149 * @moving: Fence set when BO is moving 150 * @vma_node: Address space manager node. 151 * @offset: The current GPU offset, which can have different meanings 152 * depending on the memory type. For SYSTEM type memory, it should be 0. 153 * @cur_placement: Hint of current placement. 154 * @wu_mutex: Wait unreserved mutex. 155 * 156 * Base class for TTM buffer object, that deals with data placement and CPU 157 * mappings. GPU mappings are really up to the driver, but for simpler GPUs 158 * the driver can usually use the placement offset @offset directly as the 159 * GPU virtual address. For drivers implementing multiple 160 * GPU memory manager contexts, the driver should manage the address space 161 * in these contexts separately and use these objects to get the correct 162 * placement and caching for these GPU maps. This makes it possible to use 163 * these objects for even quite elaborate memory management schemes. 164 * The destroy member, the API visibility of this object makes it possible 165 * to derive driver specific types. 166 */ 167 168 struct ttm_buffer_object { 169 /** 170 * Members constant at init. 171 */ 172 173 struct ttm_bo_global *glob; 174 struct ttm_bo_device *bdev; 175 enum ttm_bo_type type; 176 void (*destroy) (struct ttm_buffer_object *); 177 unsigned long num_pages; 178 size_t acc_size; 179 180 /** 181 * Members not needing protection. 182 */ 183 184 struct kref kref; 185 struct kref list_kref; 186 187 /** 188 * Members protected by the bo::resv::reserved lock. 189 */ 190 191 struct ttm_mem_reg mem; 192 struct vm_object *persistent_swap_storage; 193 struct ttm_tt *ttm; 194 bool evicted; 195 196 /** 197 * Members protected by the bo::reserved lock only when written to. 198 */ 199 200 atomic_t cpu_writers; 201 202 /** 203 * Members protected by the bdev::lru_lock. 204 */ 205 206 struct list_head lru; 207 struct list_head ddestroy; 208 struct list_head swap; 209 struct list_head io_reserve_lru; 210 211 /** 212 * Members protected by a bo reservation. 213 */ 214 215 struct dma_fence *moving; 216 217 RB_ENTRY(ttm_buffer_object) vm_rb; /* DragonFly */ 218 struct drm_vma_offset_node vma_node; 219 220 /** 221 * Special members that are protected by the reserve lock 222 * and the bo::lock when written to. Can be read with 223 * either of these locks held. 224 */ 225 226 uint64_t offset; /* GPU address space is independent of CPU word size */ 227 uint32_t cur_placement; 228 229 struct sg_table *sg; 230 231 struct reservation_object *resv; 232 struct reservation_object ttm_resv; 233 struct lock wu_mutex; 234 }; 235 236 /** 237 * struct ttm_bo_kmap_obj 238 * 239 * @virtual: The current kernel virtual address. 240 * @page: The page when kmap'ing a single page. 241 * @bo_kmap_type: Type of bo_kmap. 242 * 243 * Object describing a kernel mapping. Since a TTM bo may be located 244 * in various memory types with various caching policies, the 245 * mapping can either be an ioremap, a vmap, a kmap or part of a 246 * premapped region. 247 */ 248 249 #define TTM_BO_MAP_IOMEM_MASK 0x80 250 struct ttm_bo_kmap_obj { 251 void *virtual; 252 struct page *page; 253 enum { 254 ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK, 255 ttm_bo_map_vmap = 2, 256 ttm_bo_map_kmap = 3, 257 ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, 258 } bo_kmap_type; 259 struct ttm_buffer_object *bo; 260 }; 261 262 /** 263 * ttm_bo_reference - reference a struct ttm_buffer_object 264 * 265 * @bo: The buffer object. 266 * 267 * Returns a refcounted pointer to a buffer object. 268 */ 269 270 static inline struct ttm_buffer_object * 271 ttm_bo_reference(struct ttm_buffer_object *bo) 272 { 273 kref_get(&bo->kref); 274 return bo; 275 } 276 277 /** 278 * ttm_bo_wait - wait for buffer idle. 279 * 280 * @bo: The buffer object. 281 * @interruptible: Use interruptible wait. 282 * @no_wait: Return immediately if buffer is busy. 283 * 284 * This function must be called with the bo::mutex held, and makes 285 * sure any previous rendering to the buffer is completed. 286 * Note: It might be necessary to block validations before the 287 * wait by reserving the buffer. 288 * Returns -EBUSY if no_wait is true and the buffer is busy. 289 * Returns -ERESTARTSYS if interrupted by a signal. 290 */ 291 extern int ttm_bo_wait(struct ttm_buffer_object *bo, 292 bool interruptible, bool no_wait); 293 294 /** 295 * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo 296 * 297 * @placement: Return immediately if buffer is busy. 298 * @mem: The struct ttm_mem_reg indicating the region where the bo resides 299 * @new_flags: Describes compatible placement found 300 * 301 * Returns true if the placement is compatible 302 */ 303 extern bool ttm_bo_mem_compat(struct ttm_placement *placement, 304 struct ttm_mem_reg *mem, 305 uint32_t *new_flags); 306 307 /** 308 * ttm_bo_validate 309 * 310 * @bo: The buffer object. 311 * @placement: Proposed placement for the buffer object. 312 * @interruptible: Sleep interruptible if sleeping. 313 * @no_wait_gpu: Return immediately if the GPU is busy. 314 * 315 * Changes placement and caching policy of the buffer object 316 * according proposed placement. 317 * Returns 318 * -EINVAL on invalid proposed placement. 319 * -ENOMEM on out-of-memory condition. 320 * -EBUSY if no_wait is true and buffer busy. 321 * -ERESTARTSYS if interrupted by a signal. 322 */ 323 extern int ttm_bo_validate(struct ttm_buffer_object *bo, 324 struct ttm_placement *placement, 325 bool interruptible, 326 bool no_wait_gpu); 327 328 /** 329 * ttm_bo_unref 330 * 331 * @bo: The buffer object. 332 * 333 * Unreference and clear a pointer to a buffer object. 334 */ 335 extern void ttm_bo_unref(struct ttm_buffer_object **bo); 336 337 338 /** 339 * ttm_bo_list_ref_sub 340 * 341 * @bo: The buffer object. 342 * @count: The number of references with which to decrease @bo::list_kref; 343 * @never_free: The refcount should not reach zero with this operation. 344 * 345 * Release @count lru list references to this buffer object. 346 */ 347 extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, 348 bool never_free); 349 350 /** 351 * ttm_bo_add_to_lru 352 * 353 * @bo: The buffer object. 354 * 355 * Add this bo to the relevant mem type lru and, if it's backed by 356 * system pages (ttms) to the swap list. 357 * This function must be called with struct ttm_bo_global::lru_lock held, and 358 * is typically called immediately prior to unreserving a bo. 359 */ 360 extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); 361 362 /** 363 * ttm_bo_del_from_lru 364 * 365 * @bo: The buffer object. 366 * 367 * Remove this bo from all lru lists used to lookup and reserve an object. 368 * This function must be called with struct ttm_bo_global::lru_lock held, 369 * and is usually called just immediately after the bo has been reserved to 370 * avoid recursive reservation from lru lists. 371 */ 372 extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo); 373 374 /** 375 * ttm_bo_move_to_lru_tail 376 * 377 * @bo: The buffer object. 378 * 379 * Move this BO to the tail of all lru lists used to lookup and reserve an 380 * object. This function must be called with struct ttm_bo_global::lru_lock 381 * held, and is used to make a BO less likely to be considered for eviction. 382 */ 383 extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); 384 385 /** 386 * ttm_bo_lock_delayed_workqueue 387 * 388 * Prevent the delayed workqueue from running. 389 * Returns 390 * True if the workqueue was queued at the time 391 */ 392 extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); 393 394 /** 395 * ttm_bo_unlock_delayed_workqueue 396 * 397 * Allows the delayed workqueue to run. 398 */ 399 extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, 400 int resched); 401 402 /** 403 * ttm_bo_eviction_valuable 404 * 405 * @bo: The buffer object to evict 406 * @place: the placement we need to make room for 407 * 408 * Check if it is valuable to evict the BO to make room for the given placement. 409 */ 410 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 411 const struct ttm_place *place); 412 413 /** 414 * ttm_bo_synccpu_write_grab 415 * 416 * @bo: The buffer object: 417 * @no_wait: Return immediately if buffer is busy. 418 * 419 * Synchronizes a buffer object for CPU RW access. This means 420 * command submission that affects the buffer will return -EBUSY 421 * until ttm_bo_synccpu_write_release is called. 422 * 423 * Returns 424 * -EBUSY if the buffer is busy and no_wait is true. 425 * -ERESTARTSYS if interrupted by a signal. 426 */ 427 extern int 428 ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); 429 430 /** 431 * ttm_bo_synccpu_write_release: 432 * 433 * @bo : The buffer object. 434 * 435 * Releases a synccpu lock. 436 */ 437 extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); 438 439 /** 440 * ttm_bo_acc_size 441 * 442 * @bdev: Pointer to a ttm_bo_device struct. 443 * @bo_size: size of the buffer object in byte. 444 * @struct_size: size of the structure holding buffer object datas 445 * 446 * Returns size to account for a buffer object 447 */ 448 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, 449 unsigned long bo_size, 450 unsigned struct_size); 451 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, 452 unsigned long bo_size, 453 unsigned struct_size); 454 455 /** 456 * ttm_bo_init 457 * 458 * @bdev: Pointer to a ttm_bo_device struct. 459 * @bo: Pointer to a ttm_buffer_object to be initialized. 460 * @size: Requested size of buffer object. 461 * @type: Requested type of buffer object. 462 * @flags: Initial placement flags. 463 * @page_alignment: Data alignment in pages. 464 * @interruptible: If needing to sleep to wait for GPU resources, 465 * sleep interruptible. 466 * @persistent_swap_storage: Usually the swap storage is deleted for buffers 467 * pinned in physical memory. If this behaviour is not desired, this member 468 * holds a pointer to a persistent shmem object. Typically, this would 469 * point to the shmem object backing a GEM object if TTM is used to back a 470 * GEM user interface. 471 * @acc_size: Accounted size for this object. 472 * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. 473 * @destroy: Destroy function. Use NULL for kfree(). 474 * 475 * This function initializes a pre-allocated struct ttm_buffer_object. 476 * As this object may be part of a larger structure, this function, 477 * together with the @destroy function, 478 * enables driver-specific objects derived from a ttm_buffer_object. 479 * On successful return, the object kref and list_kref are set to 1. 480 * If a failure occurs, the function will call the @destroy function, or 481 * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is 482 * illegal and will likely cause memory corruption. 483 * 484 * Returns 485 * -ENOMEM: Out of memory. 486 * -EINVAL: Invalid placement flags. 487 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. 488 */ 489 490 extern int ttm_bo_init(struct ttm_bo_device *bdev, 491 struct ttm_buffer_object *bo, 492 unsigned long size, 493 enum ttm_bo_type type, 494 struct ttm_placement *placement, 495 uint32_t page_alignment, 496 bool interrubtible, 497 struct vm_object *persistent_swap_storage, 498 size_t acc_size, 499 struct sg_table *sg, 500 struct reservation_object *resv, 501 void (*destroy) (struct ttm_buffer_object *)); 502 503 /** 504 * ttm_bo_create 505 * 506 * @bdev: Pointer to a ttm_bo_device struct. 507 * @size: Requested size of buffer object. 508 * @type: Requested type of buffer object. 509 * @placement: Initial placement. 510 * @page_alignment: Data alignment in pages. 511 * @interruptible: If needing to sleep while waiting for GPU resources, 512 * sleep interruptible. 513 * @persistent_swap_storage: Usually the swap storage is deleted for buffers 514 * pinned in physical memory. If this behaviour is not desired, this member 515 * holds a pointer to a persistent shmem object. Typically, this would 516 * point to the shmem object backing a GEM object if TTM is used to back a 517 * GEM user interface. 518 * @p_bo: On successful completion *p_bo points to the created object. 519 * 520 * This function allocates a ttm_buffer_object, and then calls ttm_bo_init 521 * on that object. The destroy function is set to kfree(). 522 * Returns 523 * -ENOMEM: Out of memory. 524 * -EINVAL: Invalid placement flags. 525 * -ERESTARTSYS: Interrupted by signal while waiting for resources. 526 */ 527 528 extern int ttm_bo_create(struct ttm_bo_device *bdev, 529 unsigned long size, 530 enum ttm_bo_type type, 531 struct ttm_placement *placement, 532 uint32_t page_alignment, 533 bool interruptible, 534 struct vm_object *persistent_swap_storage, 535 struct ttm_buffer_object **p_bo); 536 537 /** 538 * ttm_bo_init_mm 539 * 540 * @bdev: Pointer to a ttm_bo_device struct. 541 * @mem_type: The memory type. 542 * @p_size: size managed area in pages. 543 * 544 * Initialize a manager for a given memory type. 545 * Note: if part of driver firstopen, it must be protected from a 546 * potentially racing lastclose. 547 * Returns: 548 * -EINVAL: invalid size or memory type. 549 * -ENOMEM: Not enough memory. 550 * May also return driver-specified errors. 551 */ 552 553 extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 554 unsigned long p_size); 555 /** 556 * ttm_bo_clean_mm 557 * 558 * @bdev: Pointer to a ttm_bo_device struct. 559 * @mem_type: The memory type. 560 * 561 * Take down a manager for a given memory type after first walking 562 * the LRU list to evict any buffers left alive. 563 * 564 * Normally, this function is part of lastclose() or unload(), and at that 565 * point there shouldn't be any buffers left created by user-space, since 566 * there should've been removed by the file descriptor release() method. 567 * However, before this function is run, make sure to signal all sync objects, 568 * and verify that the delayed delete queue is empty. The driver must also 569 * make sure that there are no NO_EVICT buffers present in this memory type 570 * when the call is made. 571 * 572 * If this function is part of a VT switch, the caller must make sure that 573 * there are no appications currently validating buffers before this 574 * function is called. The caller can do that by first taking the 575 * struct ttm_bo_device::ttm_lock in write mode. 576 * 577 * Returns: 578 * -EINVAL: invalid or uninitialized memory type. 579 * -EBUSY: There are still buffers left in this memory type. 580 */ 581 582 extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); 583 584 /** 585 * ttm_bo_evict_mm 586 * 587 * @bdev: Pointer to a ttm_bo_device struct. 588 * @mem_type: The memory type. 589 * 590 * Evicts all buffers on the lru list of the memory type. 591 * This is normally part of a VT switch or an 592 * out-of-memory-space-due-to-fragmentation handler. 593 * The caller must make sure that there are no other processes 594 * currently validating buffers, and can do that by taking the 595 * struct ttm_bo_device::ttm_lock in write mode. 596 * 597 * Returns: 598 * -EINVAL: Invalid or uninitialized memory type. 599 * -ERESTARTSYS: The call was interrupted by a signal while waiting to 600 * evict a buffer. 601 */ 602 603 extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); 604 605 /** 606 * ttm_kmap_obj_virtual 607 * 608 * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap. 609 * @is_iomem: Pointer to an integer that on return indicates 1 if the 610 * virtual map is io memory, 0 if normal memory. 611 * 612 * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap. 613 * If *is_iomem is 1 on return, the virtual address points to an io memory area, 614 * that should strictly be accessed by the iowriteXX() and similar functions. 615 */ 616 617 static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, 618 bool *is_iomem) 619 { 620 *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK); 621 return map->virtual; 622 } 623 624 /** 625 * ttm_bo_kmap 626 * 627 * @bo: The buffer object. 628 * @start_page: The first page to map. 629 * @num_pages: Number of pages to map. 630 * @map: pointer to a struct ttm_bo_kmap_obj representing the map. 631 * 632 * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the 633 * data in the buffer object. The ttm_kmap_obj_virtual function can then be 634 * used to obtain a virtual address to the data. 635 * 636 * Returns 637 * -ENOMEM: Out of memory. 638 * -EINVAL: Invalid range. 639 */ 640 641 extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, 642 unsigned long num_pages, struct ttm_bo_kmap_obj *map); 643 644 /** 645 * ttm_bo_kunmap 646 * 647 * @map: Object describing the map to unmap. 648 * 649 * Unmaps a kernel map set up by ttm_bo_kmap. 650 */ 651 652 extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); 653 654 /** 655 * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object. 656 * 657 * @vma: vma as input from the fbdev mmap method. 658 * @bo: The bo backing the address space. The address space will 659 * have the same size as the bo, and start at offset 0. 660 * 661 * This function is intended to be called by the fbdev mmap method 662 * if the fbdev address space is to be backed by a bo. 663 */ 664 665 extern int ttm_fbdev_mmap(struct vm_area_struct *vma, 666 struct ttm_buffer_object *bo); 667 668 /** 669 * ttm_bo_mmap - mmap out of the ttm device address space. 670 * 671 * @filp: filp as input from the mmap method. 672 * @vma: vma as input from the mmap method. 673 * @bdev: Pointer to the ttm_bo_device with the address space manager. 674 * 675 * This function is intended to be called by the device mmap method. 676 * if the device address space is to be backed by the bo manager. 677 */ 678 679 extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, 680 struct ttm_bo_device *bdev); 681 682 /** 683 * ttm_bo_io 684 * 685 * @bdev: Pointer to the struct ttm_bo_device. 686 * @filp: Pointer to the struct file attempting to read / write. 687 * @wbuf: User-space pointer to address of buffer to write. NULL on read. 688 * @rbuf: User-space pointer to address of buffer to read into. 689 * Null on write. 690 * @count: Number of bytes to read / write. 691 * @f_pos: Pointer to current file position. 692 * @write: 1 for read, 0 for write. 693 * 694 * This function implements read / write into ttm buffer objects, and is 695 * intended to 696 * be called from the fops::read and fops::write method. 697 * Returns: 698 * See man (2) write, man(2) read. In particular, 699 * the function may return -ERESTARTSYS if 700 * interrupted by a signal. 701 */ 702 703 extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, 704 const char __user *wbuf, char __user *rbuf, 705 size_t count, loff_t *f_pos, bool write); 706 707 extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); 708 extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); 709 #endif 710