1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 #ifndef _TTM_BO_DRIVER_H_ 31 #define _TTM_BO_DRIVER_H_ 32 33 #include <ttm/ttm_bo_api.h> 34 #include <ttm/ttm_memory.h> 35 #include <ttm/ttm_module.h> 36 #include <ttm/ttm_placement.h> 37 #include <drm/drm_mm.h> 38 #include <drm/drm_global.h> 39 #include <drm/drm_vma_manager.h> 40 #include <linux/workqueue.h> 41 #include <linux/fs.h> 42 #include <linux/spinlock.h> 43 #include <linux/reservation.h> 44 45 struct ttm_backend_func { 46 /** 47 * struct ttm_backend_func member bind 48 * 49 * @ttm: Pointer to a struct ttm_tt. 50 * @bo_mem: Pointer to a struct ttm_mem_reg describing the 51 * memory type and location for binding. 52 * 53 * Bind the backend pages into the aperture in the location 54 * indicated by @bo_mem. This function should be able to handle 55 * differences between aperture and system page sizes. 56 */ 57 int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); 58 59 /** 60 * struct ttm_backend_func member unbind 61 * 62 * @ttm: Pointer to a struct ttm_tt. 63 * 64 * Unbind previously bound backend pages. This function should be 65 * able to handle differences between aperture and system page sizes. 66 */ 67 int (*unbind) (struct ttm_tt *ttm); 68 69 /** 70 * struct ttm_backend_func member destroy 71 * 72 * @ttm: Pointer to a struct ttm_tt. 73 * 74 * Destroy the backend. This will be call back from ttm_tt_destroy so 75 * don't call ttm_tt_destroy from the callback or infinite loop. 76 */ 77 void (*destroy) (struct ttm_tt *ttm); 78 }; 79 80 #define TTM_PAGE_FLAG_WRITE (1 << 3) 81 #define TTM_PAGE_FLAG_SWAPPED (1 << 4) 82 #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5) 83 #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) 84 #define TTM_PAGE_FLAG_DMA32 (1 << 7) 85 #define TTM_PAGE_FLAG_SG (1 << 8) 86 87 enum ttm_caching_state { 88 tt_uncached, 89 tt_wc, 90 tt_cached 91 }; 92 93 /** 94 * struct ttm_tt 95 * 96 * @bdev: Pointer to a struct ttm_bo_device. 97 * @func: Pointer to a struct ttm_backend_func that describes 98 * the backend methods. 99 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL 100 * pointer. 101 * @pages: Array of pages backing the data. 102 * @num_pages: Number of pages in the page array. 103 * @bdev: Pointer to the current struct ttm_bo_device. 104 * @be: Pointer to the ttm backend. 105 * @swap_storage: Pointer to shmem struct file for swap storage. 106 * @caching_state: The current caching state of the pages. 107 * @state: The current binding state of the pages. 108 * 109 * This is a structure holding the pages, caching- and aperture binding 110 * status for a buffer object that isn't backed by fixed (VRAM / AGP) 111 * memory. 112 */ 113 114 struct ttm_tt { 115 struct ttm_bo_device *bdev; 116 struct ttm_backend_func *func; 117 struct page *dummy_read_page; 118 struct page **pages; 119 uint32_t page_flags; 120 unsigned long num_pages; 121 struct sg_table *sg; /* for SG objects via dma-buf */ 122 struct ttm_bo_global *glob; 123 struct vm_object *swap_storage; 124 enum ttm_caching_state caching_state; 125 enum { 126 tt_bound, 127 tt_unbound, 128 tt_unpopulated, 129 } state; 130 }; 131 132 /** 133 * struct ttm_dma_tt 134 * 135 * @ttm: Base ttm_tt struct. 136 * @cpu_address: The CPU address of the pages 137 * @dma_address: The DMA (bus) addresses of the pages 138 * @pages_list: used by some page allocation backend 139 * 140 * This is a structure holding the pages, caching- and aperture binding 141 * status for a buffer object that isn't backed by fixed (VRAM / AGP) 142 * memory. 143 */ 144 struct ttm_dma_tt { 145 struct ttm_tt ttm; 146 void **cpu_address; 147 dma_addr_t *dma_address; 148 struct list_head pages_list; 149 }; 150 151 #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ 152 #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ 153 #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ 154 155 struct ttm_mem_type_manager; 156 157 struct ttm_mem_type_manager_func { 158 /** 159 * struct ttm_mem_type_manager member init 160 * 161 * @man: Pointer to a memory type manager. 162 * @p_size: Implementation dependent, but typically the size of the 163 * range to be managed in pages. 164 * 165 * Called to initialize a private range manager. The function is 166 * expected to initialize the man::priv member. 167 * Returns 0 on success, negative error code on failure. 168 */ 169 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); 170 171 /** 172 * struct ttm_mem_type_manager member takedown 173 * 174 * @man: Pointer to a memory type manager. 175 * 176 * Called to undo the setup done in init. All allocated resources 177 * should be freed. 178 */ 179 int (*takedown)(struct ttm_mem_type_manager *man); 180 181 /** 182 * struct ttm_mem_type_manager member get_node 183 * 184 * @man: Pointer to a memory type manager. 185 * @bo: Pointer to the buffer object we're allocating space for. 186 * @placement: Placement details. 187 * @mem: Pointer to a struct ttm_mem_reg to be filled in. 188 * 189 * This function should allocate space in the memory type managed 190 * by @man. Placement details if 191 * applicable are given by @placement. If successful, 192 * @mem::mm_node should be set to a non-null value, and 193 * @mem::start should be set to a value identifying the beginning 194 * of the range allocated, and the function should return zero. 195 * If the memory region accommodate the buffer object, @mem::mm_node 196 * should be set to NULL, and the function should return 0. 197 * If a system error occurred, preventing the request to be fulfilled, 198 * the function should return a negative error code. 199 * 200 * Note that @mem::mm_node will only be dereferenced by 201 * struct ttm_mem_type_manager functions and optionally by the driver, 202 * which has knowledge of the underlying type. 203 * 204 * This function may not be called from within atomic context, so 205 * an implementation can and must use either a mutex or a spinlock to 206 * protect any data structures managing the space. 207 */ 208 int (*get_node)(struct ttm_mem_type_manager *man, 209 struct ttm_buffer_object *bo, 210 const struct ttm_place *place, 211 struct ttm_mem_reg *mem); 212 213 /** 214 * struct ttm_mem_type_manager member put_node 215 * 216 * @man: Pointer to a memory type manager. 217 * @mem: Pointer to a struct ttm_mem_reg to be filled in. 218 * 219 * This function frees memory type resources previously allocated 220 * and that are identified by @mem::mm_node and @mem::start. May not 221 * be called from within atomic context. 222 */ 223 void (*put_node)(struct ttm_mem_type_manager *man, 224 struct ttm_mem_reg *mem); 225 226 /** 227 * struct ttm_mem_type_manager member debug 228 * 229 * @man: Pointer to a memory type manager. 230 * @prefix: Prefix to be used in printout to identify the caller. 231 * 232 * This function is called to print out the state of the memory 233 * type manager to aid debugging of out-of-memory conditions. 234 * It may not be called from within atomic context. 235 */ 236 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); 237 }; 238 239 /** 240 * struct ttm_mem_type_manager 241 * 242 * @has_type: The memory type has been initialized. 243 * @use_type: The memory type is enabled. 244 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory 245 * managed by this memory type. 246 * @gpu_offset: If used, the GPU offset of the first managed page of 247 * fixed memory or the first managed location in an aperture. 248 * @size: Size of the managed region. 249 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, 250 * as defined in ttm_placement_common.h 251 * @default_caching: The default caching policy used for a buffer object 252 * placed in this memory type if the user doesn't provide one. 253 * @func: structure pointer implementing the range manager. See above 254 * @priv: Driver private closure for @func. 255 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures 256 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions 257 * reserved by the TTM vm system. 258 * @io_reserve_lru: Optional lru list for unreserving io mem regions. 259 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain 260 * static information. bdev::driver::io_mem_free is never used. 261 * @lru: The lru list for this memory type. 262 * 263 * This structure is used to identify and manage memory types for a device. 264 * It's set up by the ttm_bo_driver::init_mem_type method. 265 */ 266 267 268 269 struct ttm_mem_type_manager { 270 struct ttm_bo_device *bdev; 271 272 /* 273 * No protection. Constant from start. 274 */ 275 276 bool has_type; 277 bool use_type; 278 uint32_t flags; 279 uint64_t gpu_offset; /* GPU address space is independent of CPU word size */ 280 uint64_t size; 281 uint32_t available_caching; 282 uint32_t default_caching; 283 const struct ttm_mem_type_manager_func *func; 284 void *priv; 285 struct lock io_reserve_mutex; 286 bool use_io_reserve_lru; 287 bool io_reserve_fastpath; 288 289 /* 290 * Protected by @io_reserve_mutex: 291 */ 292 293 struct list_head io_reserve_lru; 294 295 /* 296 * Protected by the global->lru_lock. 297 */ 298 299 struct list_head lru; 300 }; 301 302 /** 303 * struct ttm_bo_driver 304 * 305 * @create_ttm_backend_entry: Callback to create a struct ttm_backend. 306 * @invalidate_caches: Callback to invalidate read caches when a buffer object 307 * has been evicted. 308 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager 309 * structure. 310 * @evict_flags: Callback to obtain placement flags when a buffer is evicted. 311 * @move: Callback for a driver to hook in accelerated functions to 312 * move a buffer. 313 * If set to NULL, a potentially slow memcpy() move is used. 314 */ 315 316 struct ttm_bo_driver { 317 /** 318 * ttm_tt_create 319 * 320 * @bdev: pointer to a struct ttm_bo_device: 321 * @size: Size of the data needed backing. 322 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 323 * @dummy_read_page: See struct ttm_bo_device. 324 * 325 * Create a struct ttm_tt to back data with system memory pages. 326 * No pages are actually allocated. 327 * Returns: 328 * NULL: Out of memory. 329 */ 330 struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev, 331 unsigned long size, 332 uint32_t page_flags, 333 struct page *dummy_read_page); 334 335 /** 336 * ttm_tt_populate 337 * 338 * @ttm: The struct ttm_tt to contain the backing pages. 339 * 340 * Allocate all backing pages 341 * Returns: 342 * -ENOMEM: Out of memory. 343 */ 344 int (*ttm_tt_populate)(struct ttm_tt *ttm); 345 346 /** 347 * ttm_tt_unpopulate 348 * 349 * @ttm: The struct ttm_tt to contain the backing pages. 350 * 351 * Free all backing page 352 */ 353 void (*ttm_tt_unpopulate)(struct ttm_tt *ttm); 354 355 /** 356 * struct ttm_bo_driver member invalidate_caches 357 * 358 * @bdev: the buffer object device. 359 * @flags: new placement of the rebound buffer object. 360 * 361 * A previosly evicted buffer has been rebound in a 362 * potentially new location. Tell the driver that it might 363 * consider invalidating read (texture) caches on the next command 364 * submission as a consequence. 365 */ 366 367 int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags); 368 int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type, 369 struct ttm_mem_type_manager *man); 370 /** 371 * struct ttm_bo_driver member evict_flags: 372 * 373 * @bo: the buffer object to be evicted 374 * 375 * Return the bo flags for a buffer which is not mapped to the hardware. 376 * These will be placed in proposed_flags so that when the move is 377 * finished, they'll end up in bo->mem.flags 378 */ 379 380 void(*evict_flags) (struct ttm_buffer_object *bo, 381 struct ttm_placement *placement); 382 /** 383 * struct ttm_bo_driver member move: 384 * 385 * @bo: the buffer to move 386 * @evict: whether this motion is evicting the buffer from 387 * the graphics address space 388 * @interruptible: Use interruptible sleeps if possible when sleeping. 389 * @no_wait: whether this should give up and return -EBUSY 390 * if this move would require sleeping 391 * @new_mem: the new memory region receiving the buffer 392 * 393 * Move a buffer between two memory regions. 394 */ 395 int (*move) (struct ttm_buffer_object *bo, 396 bool evict, bool interruptible, 397 bool no_wait_gpu, 398 struct ttm_mem_reg *new_mem); 399 400 /** 401 * struct ttm_bo_driver_member verify_access 402 * 403 * @bo: Pointer to a buffer object. 404 * @filp: Pointer to a struct file trying to access the object. 405 * 406 * Called from the map / write / read methods to verify that the 407 * caller is permitted to access the buffer object. 408 * This member may be set to NULL, which will refuse this kind of 409 * access for all buffer objects. 410 * This function should return 0 if access is granted, -EPERM otherwise. 411 */ 412 int (*verify_access) (struct ttm_buffer_object *bo, 413 struct file *filp); 414 415 /* hook to notify driver about a driver move so it 416 * can do tiling things */ 417 void (*move_notify)(struct ttm_buffer_object *bo, 418 struct ttm_mem_reg *new_mem); 419 /* notify the driver we are taking a fault on this BO 420 * and have reserved it */ 421 int (*fault_reserve_notify)(struct ttm_buffer_object *bo); 422 423 /** 424 * notify the driver that we're about to swap out this bo 425 */ 426 void (*swap_notify) (struct ttm_buffer_object *bo); 427 428 /** 429 * Driver callback on when mapping io memory (for bo_move_memcpy 430 * for instance). TTM will take care to call io_mem_free whenever 431 * the mapping is not use anymore. io_mem_reserve & io_mem_free 432 * are balanced. 433 */ 434 int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); 435 void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); 436 437 /** 438 * Optional driver callback for when BO is removed from the LRU. 439 * Called with LRU lock held immediately before the removal. 440 */ 441 void (*lru_removal)(struct ttm_buffer_object *bo); 442 443 /** 444 * Return the list_head after which a BO should be inserted in the LRU. 445 */ 446 struct list_head *(*lru_tail)(struct ttm_buffer_object *bo); 447 struct list_head *(*swap_lru_tail)(struct ttm_buffer_object *bo); 448 }; 449 450 /** 451 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global. 452 */ 453 454 struct ttm_bo_global_ref { 455 struct drm_global_reference ref; 456 struct ttm_mem_global *mem_glob; 457 }; 458 459 /** 460 * struct ttm_bo_global - Buffer object driver global data. 461 * 462 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. 463 * @dummy_read_page: Pointer to a dummy page used for mapping requests 464 * of unpopulated pages. 465 * @shrink: A shrink callback object used for buffer object swap. 466 * @device_list_mutex: Mutex protecting the device list. 467 * This mutex is held while traversing the device list for pm options. 468 * @lru_lock: Spinlock protecting the bo subsystem lru lists. 469 * @device_list: List of buffer object devices. 470 * @swap_lru: Lru list of buffer objects used for swapping. 471 */ 472 473 struct ttm_bo_global { 474 475 /** 476 * Constant after init. 477 */ 478 479 struct kobject kobj; 480 struct ttm_mem_global *mem_glob; 481 struct page *dummy_read_page; 482 struct ttm_mem_shrink shrink; 483 struct lock device_list_mutex; 484 struct lock lru_lock; 485 486 /** 487 * Protected by device_list_mutex. 488 */ 489 struct list_head device_list; 490 491 /** 492 * Protected by the lru_lock. 493 */ 494 struct list_head swap_lru; 495 496 /** 497 * Internal protection. 498 */ 499 atomic_t bo_count; 500 }; 501 502 503 #define TTM_NUM_MEM_TYPES 8 504 505 #define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs 506 idling before CPU mapping */ 507 #define TTM_BO_PRIV_FLAG_ACTIVE 1 508 #define TTM_BO_PRIV_FLAG_MAX 2 509 /** 510 * struct ttm_bo_device - Buffer object driver device-specific data. 511 * 512 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. 513 * @man: An array of mem_type_managers. 514 * @vma_manager: Address space manager 515 * lru_lock: Spinlock that protects the buffer+device lru lists and 516 * ddestroy lists. 517 * @dev_mapping: A pointer to the struct address_space representing the 518 * device address space. 519 * @wq: Work queue structure for the delayed delete workqueue. 520 * 521 */ 522 523 struct ttm_bo_device { 524 525 /* 526 * Constant after bo device init / atomic. 527 */ 528 struct list_head device_list; 529 struct ttm_bo_global *glob; 530 struct ttm_bo_driver *driver; 531 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 532 533 /* 534 * Protected by internal locks. 535 */ 536 struct drm_vma_offset_manager vma_manager; 537 538 /* 539 * Protected by the global:lru lock. 540 */ 541 struct list_head ddestroy; 542 543 /* 544 * Protected by load / firstopen / lastclose /unload sync. 545 */ 546 547 struct address_space *dev_mapping; 548 549 /* 550 * Internal protection. 551 */ 552 553 struct delayed_work wq; 554 555 bool need_dma32; 556 }; 557 558 /** 559 * ttm_flag_masked 560 * 561 * @old: Pointer to the result and original value. 562 * @new: New value of bits. 563 * @mask: Mask of bits to change. 564 * 565 * Convenience function to change a number of bits identified by a mask. 566 */ 567 568 static inline uint32_t 569 ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) 570 { 571 *old ^= (*old ^ new) & mask; 572 return *old; 573 } 574 575 /** 576 * ttm_tt_init 577 * 578 * @ttm: The struct ttm_tt. 579 * @bdev: pointer to a struct ttm_bo_device: 580 * @size: Size of the data needed backing. 581 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 582 * @dummy_read_page: See struct ttm_bo_device. 583 * 584 * Create a struct ttm_tt to back data with system memory pages. 585 * No pages are actually allocated. 586 * Returns: 587 * NULL: Out of memory. 588 */ 589 extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, 590 unsigned long size, uint32_t page_flags, 591 struct page *dummy_read_page); 592 extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 593 unsigned long size, uint32_t page_flags, 594 struct page *dummy_read_page); 595 596 /** 597 * ttm_tt_fini 598 * 599 * @ttm: the ttm_tt structure. 600 * 601 * Free memory of ttm_tt structure 602 */ 603 extern void ttm_tt_fini(struct ttm_tt *ttm); 604 extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); 605 606 /** 607 * ttm_ttm_bind: 608 * 609 * @ttm: The struct ttm_tt containing backing pages. 610 * @bo_mem: The struct ttm_mem_reg identifying the binding location. 611 * 612 * Bind the pages of @ttm to an aperture location identified by @bo_mem 613 */ 614 extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); 615 616 /** 617 * ttm_ttm_destroy: 618 * 619 * @ttm: The struct ttm_tt. 620 * 621 * Unbind, unpopulate and destroy common struct ttm_tt. 622 */ 623 extern void ttm_tt_destroy(struct ttm_tt *ttm); 624 625 /** 626 * ttm_ttm_unbind: 627 * 628 * @ttm: The struct ttm_tt. 629 * 630 * Unbind a struct ttm_tt. 631 */ 632 extern void ttm_tt_unbind(struct ttm_tt *ttm); 633 634 /** 635 * ttm_tt_swapin: 636 * 637 * @ttm: The struct ttm_tt. 638 * 639 * Swap in a previously swap out ttm_tt. 640 */ 641 extern int ttm_tt_swapin(struct ttm_tt *ttm); 642 643 /** 644 * ttm_tt_set_placement_caching: 645 * 646 * @ttm A struct ttm_tt the backing pages of which will change caching policy. 647 * @placement: Flag indicating the desired caching policy. 648 * 649 * This function will change caching policy of any default kernel mappings of 650 * the pages backing @ttm. If changing from cached to uncached or 651 * write-combined, 652 * all CPU caches will first be flushed to make sure the data of the pages 653 * hit RAM. This function may be very costly as it involves global TLB 654 * and cache flushes and potential page splitting / combining. 655 */ 656 extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); 657 extern int ttm_tt_swapout(struct ttm_tt *ttm, 658 struct vm_object *persistent_swap_storage); 659 660 /** 661 * ttm_tt_unpopulate - free pages from a ttm 662 * 663 * @ttm: Pointer to the ttm_tt structure 664 * 665 * Calls the driver method to free all pages from a ttm 666 */ 667 extern void ttm_tt_unpopulate(struct ttm_tt *ttm); 668 669 /* 670 * ttm_bo.c 671 */ 672 673 /** 674 * ttm_mem_reg_is_pci 675 * 676 * @bdev: Pointer to a struct ttm_bo_device. 677 * @mem: A valid struct ttm_mem_reg. 678 * 679 * Returns true if the memory described by @mem is PCI memory, 680 * false otherwise. 681 */ 682 extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, 683 struct ttm_mem_reg *mem); 684 685 /** 686 * ttm_bo_mem_space 687 * 688 * @bo: Pointer to a struct ttm_buffer_object. the data of which 689 * we want to allocate space for. 690 * @proposed_placement: Proposed new placement for the buffer object. 691 * @mem: A struct ttm_mem_reg. 692 * @interruptible: Sleep interruptible when sliping. 693 * @no_wait_gpu: Return immediately if the GPU is busy. 694 * 695 * Allocate memory space for the buffer object pointed to by @bo, using 696 * the placement flags in @mem, potentially evicting other idle buffer objects. 697 * This function may sleep while waiting for space to become available. 698 * Returns: 699 * -EBUSY: No space available (only if no_wait == 1). 700 * -ENOMEM: Could not allocate memory for the buffer object, either due to 701 * fragmentation or concurrent allocators. 702 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 703 */ 704 extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, 705 struct ttm_placement *placement, 706 struct ttm_mem_reg *mem, 707 bool interruptible, 708 bool no_wait_gpu); 709 710 extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, 711 struct ttm_mem_reg *mem); 712 extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, 713 struct ttm_mem_reg *mem); 714 715 extern void ttm_bo_global_release(struct drm_global_reference *ref); 716 extern int ttm_bo_global_init(struct drm_global_reference *ref); 717 718 extern int ttm_bo_device_release(struct ttm_bo_device *bdev); 719 720 /** 721 * ttm_bo_device_init 722 * 723 * @bdev: A pointer to a struct ttm_bo_device to initialize. 724 * @glob: A pointer to an initialized struct ttm_bo_global. 725 * @driver: A pointer to a struct ttm_bo_driver set up by the caller. 726 * @mapping: The address space to use for this bo. 727 * @file_page_offset: Offset into the device address space that is available 728 * for buffer data. This ensures compatibility with other users of the 729 * address space. 730 * 731 * Initializes a struct ttm_bo_device: 732 * Returns: 733 * !0: Failure. 734 */ 735 extern int ttm_bo_device_init(struct ttm_bo_device *bdev, 736 struct ttm_bo_global *glob, 737 struct ttm_bo_driver *driver, 738 struct address_space *mapping, 739 uint64_t file_page_offset, bool need_dma32); 740 741 /** 742 * ttm_bo_unmap_virtual 743 * 744 * @bo: tear down the virtual mappings for this BO 745 */ 746 extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 747 748 /** 749 * ttm_bo_unmap_virtual 750 * 751 * @bo: tear down the virtual mappings for this BO 752 * 753 * The caller must take ttm_mem_io_lock before calling this function. 754 */ 755 extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); 756 757 extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); 758 extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); 759 extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man, 760 bool interruptible); 761 extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); 762 763 extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); 764 765 struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo); 766 struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo); 767 768 /** 769 * __ttm_bo_reserve: 770 * 771 * @bo: A pointer to a struct ttm_buffer_object. 772 * @interruptible: Sleep interruptible if waiting. 773 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 774 * @ticket: ticket used to acquire the ww_mutex. 775 * 776 * Will not remove reserved buffers from the lru lists. 777 * Otherwise identical to ttm_bo_reserve. 778 * 779 * Returns: 780 * -EDEADLK: The reservation may cause a deadlock. 781 * Release all buffer reservations, wait for @bo to become unreserved and 782 * try again. (only if use_sequence == 1). 783 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 784 * a signal. Release all buffer reservations and return to user-space. 785 * -EBUSY: The function needed to sleep, but @no_wait was true 786 * -EALREADY: Bo already reserved using @ticket. This error code will only 787 * be returned if @use_ticket is set to true. 788 */ 789 static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, 790 bool interruptible, bool no_wait, 791 struct ww_acquire_ctx *ticket) 792 { 793 int ret = 0; 794 795 if (no_wait) { 796 bool success; 797 if (WARN_ON(ticket)) 798 return -EBUSY; 799 800 success = ww_mutex_trylock(&bo->resv->lock); 801 return success ? 0 : -EBUSY; 802 } 803 804 if (interruptible) 805 ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket); 806 else 807 ret = ww_mutex_lock(&bo->resv->lock, ticket); 808 if (ret == -EINTR) 809 return -ERESTARTSYS; 810 return ret; 811 } 812 813 /** 814 * ttm_bo_reserve: 815 * 816 * @bo: A pointer to a struct ttm_buffer_object. 817 * @interruptible: Sleep interruptible if waiting. 818 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 819 * @ticket: ticket used to acquire the ww_mutex. 820 * 821 * Locks a buffer object for validation. (Or prevents other processes from 822 * locking it for validation) and removes it from lru lists, while taking 823 * a number of measures to prevent deadlocks. 824 * 825 * Deadlocks may occur when two processes try to reserve multiple buffers in 826 * different order, either by will or as a result of a buffer being evicted 827 * to make room for a buffer already reserved. (Buffers are reserved before 828 * they are evicted). The following algorithm prevents such deadlocks from 829 * occurring: 830 * Processes attempting to reserve multiple buffers other than for eviction, 831 * (typically execbuf), should first obtain a unique 32-bit 832 * validation sequence number, 833 * and call this function with @use_ticket == 1 and @ticket->stamp == the unique 834 * sequence number. If upon call of this function, the buffer object is already 835 * reserved, the validation sequence is checked against the validation 836 * sequence of the process currently reserving the buffer, 837 * and if the current validation sequence is greater than that of the process 838 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps 839 * waiting for the buffer to become unreserved, after which it retries 840 * reserving. 841 * The caller should, when receiving an -EAGAIN error 842 * release all its buffer reservations, wait for @bo to become unreserved, and 843 * then rerun the validation with the same validation sequence. This procedure 844 * will always guarantee that the process with the lowest validation sequence 845 * will eventually succeed, preventing both deadlocks and starvation. 846 * 847 * Returns: 848 * -EDEADLK: The reservation may cause a deadlock. 849 * Release all buffer reservations, wait for @bo to become unreserved and 850 * try again. (only if use_sequence == 1). 851 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 852 * a signal. Release all buffer reservations and return to user-space. 853 * -EBUSY: The function needed to sleep, but @no_wait was true 854 * -EALREADY: Bo already reserved using @ticket. This error code will only 855 * be returned if @use_ticket is set to true. 856 */ 857 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, 858 bool interruptible, bool no_wait, 859 struct ww_acquire_ctx *ticket) 860 { 861 int ret; 862 863 WARN_ON(!atomic_read(&bo->kref.refcount)); 864 865 ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket); 866 if (likely(ret == 0)) 867 ttm_bo_del_sub_from_lru(bo); 868 869 return ret; 870 } 871 872 /** 873 * ttm_bo_reserve_slowpath: 874 * @bo: A pointer to a struct ttm_buffer_object. 875 * @interruptible: Sleep interruptible if waiting. 876 * @sequence: Set (@bo)->sequence to this value after lock 877 * 878 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off 879 * from all our other reservations. Because there are no other reservations 880 * held by us, this function cannot deadlock any more. 881 */ 882 static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, 883 bool interruptible, 884 struct ww_acquire_ctx *ticket) 885 { 886 int ret = 0; 887 888 WARN_ON(!atomic_read(&bo->kref.refcount)); 889 890 if (interruptible) 891 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, 892 ticket); 893 else 894 ww_mutex_lock_slow(&bo->resv->lock, ticket); 895 896 if (likely(ret == 0)) 897 ttm_bo_del_sub_from_lru(bo); 898 else if (ret == -EINTR) 899 ret = -ERESTARTSYS; 900 901 return ret; 902 } 903 904 /** 905 * __ttm_bo_unreserve 906 * @bo: A pointer to a struct ttm_buffer_object. 907 * 908 * Unreserve a previous reservation of @bo where the buffer object is 909 * already on lru lists. 910 */ 911 static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo) 912 { 913 ww_mutex_unlock(&bo->resv->lock); 914 } 915 916 /** 917 * ttm_bo_unreserve 918 * 919 * @bo: A pointer to a struct ttm_buffer_object. 920 * 921 * Unreserve a previous reservation of @bo. 922 */ 923 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) 924 { 925 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 926 lockmgr(&bo->glob->lru_lock, LK_EXCLUSIVE); 927 ttm_bo_add_to_lru(bo); 928 lockmgr(&bo->glob->lru_lock, LK_RELEASE); 929 } 930 __ttm_bo_unreserve(bo); 931 } 932 933 /** 934 * ttm_bo_unreserve_ticket 935 * @bo: A pointer to a struct ttm_buffer_object. 936 * @ticket: ww_acquire_ctx used for reserving 937 * 938 * Unreserve a previous reservation of @bo made with @ticket. 939 */ 940 static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, 941 struct ww_acquire_ctx *t) 942 { 943 ttm_bo_unreserve(bo); 944 } 945 946 /* 947 * ttm_bo_util.c 948 */ 949 950 int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 951 struct ttm_mem_reg *mem); 952 void ttm_mem_io_free(struct ttm_bo_device *bdev, 953 struct ttm_mem_reg *mem); 954 /** 955 * ttm_bo_move_ttm 956 * 957 * @bo: A pointer to a struct ttm_buffer_object. 958 * @evict: 1: This is an eviction. Don't try to pipeline. 959 * @no_wait_gpu: Return immediately if the GPU is busy. 960 * @new_mem: struct ttm_mem_reg indicating where to move. 961 * 962 * Optimized move function for a buffer object with both old and 963 * new placement backed by a TTM. The function will, if successful, 964 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 965 * and update the (@bo)->mem placement flags. If unsuccessful, the old 966 * data remains untouched, and it's up to the caller to free the 967 * memory space indicated by @new_mem. 968 * Returns: 969 * !0: Failure. 970 */ 971 972 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 973 bool evict, bool no_wait_gpu, 974 struct ttm_mem_reg *new_mem); 975 976 /** 977 * ttm_bo_move_memcpy 978 * 979 * @bo: A pointer to a struct ttm_buffer_object. 980 * @evict: 1: This is an eviction. Don't try to pipeline. 981 * @no_wait_gpu: Return immediately if the GPU is busy. 982 * @new_mem: struct ttm_mem_reg indicating where to move. 983 * 984 * Fallback move function for a mappable buffer object in mappable memory. 985 * The function will, if successful, 986 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 987 * and update the (@bo)->mem placement flags. If unsuccessful, the old 988 * data remains untouched, and it's up to the caller to free the 989 * memory space indicated by @new_mem. 990 * Returns: 991 * !0: Failure. 992 */ 993 994 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 995 bool evict, bool no_wait_gpu, 996 struct ttm_mem_reg *new_mem); 997 998 /** 999 * ttm_bo_free_old_node 1000 * 1001 * @bo: A pointer to a struct ttm_buffer_object. 1002 * 1003 * Utility function to free an old placement after a successful move. 1004 */ 1005 extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); 1006 1007 /** 1008 * ttm_bo_move_accel_cleanup. 1009 * 1010 * @bo: A pointer to a struct ttm_buffer_object. 1011 * @fence: A fence object that signals when moving is complete. 1012 * @evict: This is an evict move. Don't return until the buffer is idle. 1013 * @no_wait_gpu: Return immediately if the GPU is busy. 1014 * @new_mem: struct ttm_mem_reg indicating where to move. 1015 * 1016 * Accelerated move function to be called when an accelerated move 1017 * has been scheduled. The function will create a new temporary buffer object 1018 * representing the old placement, and put the sync object on both buffer 1019 * objects. After that the newly created buffer object is unref'd to be 1020 * destroyed when the move is complete. This will help pipeline 1021 * buffer moves. 1022 */ 1023 1024 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 1025 struct fence *fence, 1026 bool evict, bool no_wait_gpu, 1027 struct ttm_mem_reg *new_mem); 1028 /** 1029 * ttm_io_prot 1030 * 1031 * @c_state: Caching state. 1032 * @tmp: Page protection flag for a normal, cached mapping. 1033 * 1034 * Utility function that returns the pgprot_t that should be used for 1035 * setting up a PTE with the caching model indicated by @c_state. 1036 */ 1037 extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); 1038 1039 extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; 1040 1041 #if IS_ENABLED(CONFIG_AGP) 1042 #include <linux/agp_backend.h> 1043 1044 /** 1045 * ttm_agp_tt_create 1046 * 1047 * @bdev: Pointer to a struct ttm_bo_device. 1048 * @bridge: The agp bridge this device is sitting on. 1049 * @size: Size of the data needed backing. 1050 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 1051 * @dummy_read_page: See struct ttm_bo_device. 1052 * 1053 * 1054 * Create a TTM backend that uses the indicated AGP bridge as an aperture 1055 * for TT memory. This function uses the linux agpgart interface to 1056 * bind and unbind memory backing a ttm_tt. 1057 */ 1058 extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, 1059 struct agp_bridge_data *bridge, 1060 unsigned long size, uint32_t page_flags, 1061 struct page *dummy_read_page); 1062 int ttm_agp_tt_populate(struct ttm_tt *ttm); 1063 void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); 1064 #endif 1065 1066 /* required for DragonFly VM, see ttm/ttm_bo_vm.c */ 1067 struct ttm_bo_device_buffer_objects; 1068 int ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a, 1069 struct ttm_buffer_object *b); 1070 RB_PROTOTYPE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb, 1071 ttm_bo_cmp_rb_tree_items); 1072 1073 #endif 1074