1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 #ifndef _TTM_BO_DRIVER_H_ 31 #define _TTM_BO_DRIVER_H_ 32 33 #include <drm/drm_mm.h> 34 #include <drm/drm_global.h> 35 #include <drm/drm_vma_manager.h> 36 #include <linux/workqueue.h> 37 #include <linux/fs.h> 38 #include <linux/spinlock.h> 39 #include <linux/reservation.h> 40 41 #include "ttm_bo_api.h" 42 #include "ttm_memory.h" 43 #include "ttm_module.h" 44 #include "ttm_placement.h" 45 #include "ttm_tt.h" 46 47 #define TTM_MAX_BO_PRIORITY 4U 48 49 #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ 50 #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ 51 #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ 52 53 struct ttm_mem_type_manager; 54 55 struct ttm_mem_type_manager_func { 56 /** 57 * struct ttm_mem_type_manager member init 58 * 59 * @man: Pointer to a memory type manager. 60 * @p_size: Implementation dependent, but typically the size of the 61 * range to be managed in pages. 62 * 63 * Called to initialize a private range manager. The function is 64 * expected to initialize the man::priv member. 65 * Returns 0 on success, negative error code on failure. 66 */ 67 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); 68 69 /** 70 * struct ttm_mem_type_manager member takedown 71 * 72 * @man: Pointer to a memory type manager. 73 * 74 * Called to undo the setup done in init. All allocated resources 75 * should be freed. 76 */ 77 int (*takedown)(struct ttm_mem_type_manager *man); 78 79 /** 80 * struct ttm_mem_type_manager member get_node 81 * 82 * @man: Pointer to a memory type manager. 83 * @bo: Pointer to the buffer object we're allocating space for. 84 * @placement: Placement details. 85 * @flags: Additional placement flags. 86 * @mem: Pointer to a struct ttm_mem_reg to be filled in. 87 * 88 * This function should allocate space in the memory type managed 89 * by @man. Placement details if 90 * applicable are given by @placement. If successful, 91 * @mem::mm_node should be set to a non-null value, and 92 * @mem::start should be set to a value identifying the beginning 93 * of the range allocated, and the function should return zero. 94 * If the memory region accommodate the buffer object, @mem::mm_node 95 * should be set to NULL, and the function should return 0. 96 * If a system error occurred, preventing the request to be fulfilled, 97 * the function should return a negative error code. 98 * 99 * Note that @mem::mm_node will only be dereferenced by 100 * struct ttm_mem_type_manager functions and optionally by the driver, 101 * which has knowledge of the underlying type. 102 * 103 * This function may not be called from within atomic context, so 104 * an implementation can and must use either a mutex or a spinlock to 105 * protect any data structures managing the space. 106 */ 107 int (*get_node)(struct ttm_mem_type_manager *man, 108 struct ttm_buffer_object *bo, 109 const struct ttm_place *place, 110 struct ttm_mem_reg *mem); 111 112 /** 113 * struct ttm_mem_type_manager member put_node 114 * 115 * @man: Pointer to a memory type manager. 116 * @mem: Pointer to a struct ttm_mem_reg to be filled in. 117 * 118 * This function frees memory type resources previously allocated 119 * and that are identified by @mem::mm_node and @mem::start. May not 120 * be called from within atomic context. 121 */ 122 void (*put_node)(struct ttm_mem_type_manager *man, 123 struct ttm_mem_reg *mem); 124 125 /** 126 * struct ttm_mem_type_manager member debug 127 * 128 * @man: Pointer to a memory type manager. 129 * @printer: Prefix to be used in printout to identify the caller. 130 * 131 * This function is called to print out the state of the memory 132 * type manager to aid debugging of out-of-memory conditions. 133 * It may not be called from within atomic context. 134 */ 135 void (*debug)(struct ttm_mem_type_manager *man, 136 struct drm_printer *printer); 137 }; 138 139 /** 140 * struct ttm_mem_type_manager 141 * 142 * @has_type: The memory type has been initialized. 143 * @use_type: The memory type is enabled. 144 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory 145 * managed by this memory type. 146 * @gpu_offset: If used, the GPU offset of the first managed page of 147 * fixed memory or the first managed location in an aperture. 148 * @size: Size of the managed region. 149 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, 150 * as defined in ttm_placement_common.h 151 * @default_caching: The default caching policy used for a buffer object 152 * placed in this memory type if the user doesn't provide one. 153 * @func: structure pointer implementing the range manager. See above 154 * @priv: Driver private closure for @func. 155 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures 156 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions 157 * reserved by the TTM vm system. 158 * @io_reserve_lru: Optional lru list for unreserving io mem regions. 159 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain 160 * @move_lock: lock for move fence 161 * static information. bdev::driver::io_mem_free is never used. 162 * @lru: The lru list for this memory type. 163 * @move: The fence of the last pipelined move operation. 164 * 165 * This structure is used to identify and manage memory types for a device. 166 * It's set up by the ttm_bo_driver::init_mem_type method. 167 */ 168 169 170 171 struct ttm_mem_type_manager { 172 struct ttm_bo_device *bdev; 173 174 /* 175 * No protection. Constant from start. 176 */ 177 178 bool has_type; 179 bool use_type; 180 uint32_t flags; 181 uint64_t gpu_offset; /* GPU address space is independent of CPU word size */ 182 uint64_t size; 183 uint32_t available_caching; 184 uint32_t default_caching; 185 const struct ttm_mem_type_manager_func *func; 186 void *priv; 187 struct lock io_reserve_mutex; 188 bool use_io_reserve_lru; 189 bool io_reserve_fastpath; 190 spinlock_t move_lock; 191 192 /* 193 * Protected by @io_reserve_mutex: 194 */ 195 196 struct list_head io_reserve_lru; 197 198 /* 199 * Protected by the global->lru_lock. 200 */ 201 202 struct list_head lru[TTM_MAX_BO_PRIORITY]; 203 204 /* 205 * Protected by @move_lock. 206 */ 207 struct dma_fence *move; 208 }; 209 210 /** 211 * struct ttm_bo_driver 212 * 213 * @create_ttm_backend_entry: Callback to create a struct ttm_backend. 214 * @invalidate_caches: Callback to invalidate read caches when a buffer object 215 * has been evicted. 216 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager 217 * structure. 218 * @evict_flags: Callback to obtain placement flags when a buffer is evicted. 219 * @move: Callback for a driver to hook in accelerated functions to 220 * move a buffer. 221 * If set to NULL, a potentially slow memcpy() move is used. 222 */ 223 224 struct ttm_bo_driver { 225 /** 226 * ttm_tt_create 227 * 228 * @bo: The buffer object to create the ttm for. 229 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 230 * 231 * Create a struct ttm_tt to back data with system memory pages. 232 * No pages are actually allocated. 233 * Returns: 234 * NULL: Out of memory. 235 */ 236 struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo, 237 uint32_t page_flags); 238 239 /** 240 * ttm_tt_populate 241 * 242 * @ttm: The struct ttm_tt to contain the backing pages. 243 * 244 * Allocate all backing pages 245 * Returns: 246 * -ENOMEM: Out of memory. 247 */ 248 int (*ttm_tt_populate)(struct ttm_tt *ttm, 249 struct ttm_operation_ctx *ctx); 250 251 /** 252 * ttm_tt_unpopulate 253 * 254 * @ttm: The struct ttm_tt to contain the backing pages. 255 * 256 * Free all backing page 257 */ 258 void (*ttm_tt_unpopulate)(struct ttm_tt *ttm); 259 260 /** 261 * struct ttm_bo_driver member invalidate_caches 262 * 263 * @bdev: the buffer object device. 264 * @flags: new placement of the rebound buffer object. 265 * 266 * A previosly evicted buffer has been rebound in a 267 * potentially new location. Tell the driver that it might 268 * consider invalidating read (texture) caches on the next command 269 * submission as a consequence. 270 */ 271 272 int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags); 273 int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type, 274 struct ttm_mem_type_manager *man); 275 276 /** 277 * struct ttm_bo_driver member eviction_valuable 278 * 279 * @bo: the buffer object to be evicted 280 * @place: placement we need room for 281 * 282 * Check with the driver if it is valuable to evict a BO to make room 283 * for a certain placement. 284 */ 285 bool (*eviction_valuable)(struct ttm_buffer_object *bo, 286 const struct ttm_place *place); 287 /** 288 * struct ttm_bo_driver member evict_flags: 289 * 290 * @bo: the buffer object to be evicted 291 * 292 * Return the bo flags for a buffer which is not mapped to the hardware. 293 * These will be placed in proposed_flags so that when the move is 294 * finished, they'll end up in bo->mem.flags 295 */ 296 297 void (*evict_flags)(struct ttm_buffer_object *bo, 298 struct ttm_placement *placement); 299 300 /** 301 * struct ttm_bo_driver member move: 302 * 303 * @bo: the buffer to move 304 * @evict: whether this motion is evicting the buffer from 305 * the graphics address space 306 * @ctx: context for this move with parameters 307 * @new_mem: the new memory region receiving the buffer 308 * 309 * Move a buffer between two memory regions. 310 */ 311 int (*move)(struct ttm_buffer_object *bo, bool evict, 312 struct ttm_operation_ctx *ctx, 313 struct ttm_mem_reg *new_mem); 314 315 /** 316 * struct ttm_bo_driver_member verify_access 317 * 318 * @bo: Pointer to a buffer object. 319 * @filp: Pointer to a struct file trying to access the object. 320 * 321 * Called from the map / write / read methods to verify that the 322 * caller is permitted to access the buffer object. 323 * This member may be set to NULL, which will refuse this kind of 324 * access for all buffer objects. 325 * This function should return 0 if access is granted, -EPERM otherwise. 326 */ 327 int (*verify_access)(struct ttm_buffer_object *bo, 328 struct file *filp); 329 330 /** 331 * Hook to notify driver about a driver move so it 332 * can do tiling things and book-keeping. 333 * 334 * @evict: whether this move is evicting the buffer from the graphics 335 * address space 336 */ 337 void (*move_notify)(struct ttm_buffer_object *bo, 338 bool evict, 339 struct ttm_mem_reg *new_mem); 340 /* notify the driver we are taking a fault on this BO 341 * and have reserved it */ 342 int (*fault_reserve_notify)(struct ttm_buffer_object *bo); 343 344 /** 345 * notify the driver that we're about to swap out this bo 346 */ 347 void (*swap_notify)(struct ttm_buffer_object *bo); 348 349 /** 350 * Driver callback on when mapping io memory (for bo_move_memcpy 351 * for instance). TTM will take care to call io_mem_free whenever 352 * the mapping is not use anymore. io_mem_reserve & io_mem_free 353 * are balanced. 354 */ 355 int (*io_mem_reserve)(struct ttm_bo_device *bdev, 356 struct ttm_mem_reg *mem); 357 void (*io_mem_free)(struct ttm_bo_device *bdev, 358 struct ttm_mem_reg *mem); 359 360 /** 361 * Return the pfn for a given page_offset inside the BO. 362 * 363 * @bo: the BO to look up the pfn for 364 * @page_offset: the offset to look up 365 */ 366 unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, 367 unsigned long page_offset); 368 369 /** 370 * Read/write memory buffers for ptrace access 371 * 372 * @bo: the BO to access 373 * @offset: the offset from the start of the BO 374 * @buf: pointer to source/destination buffer 375 * @len: number of bytes to copy 376 * @write: whether to read (0) from or write (non-0) to BO 377 * 378 * If successful, this function should return the number of 379 * bytes copied, -EIO otherwise. If the number of bytes 380 * returned is < len, the function may be called again with 381 * the remainder of the buffer to copy. 382 */ 383 int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset, 384 void *buf, int len, int write); 385 }; 386 387 /** 388 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global. 389 */ 390 391 struct ttm_bo_global_ref { 392 struct drm_global_reference ref; 393 struct ttm_mem_global *mem_glob; 394 }; 395 396 /** 397 * struct ttm_bo_global - Buffer object driver global data. 398 * 399 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. 400 * @dummy_read_page: Pointer to a dummy page used for mapping requests 401 * of unpopulated pages. 402 * @shrink: A shrink callback object used for buffer object swap. 403 * @device_list_mutex: Mutex protecting the device list. 404 * This mutex is held while traversing the device list for pm options. 405 * @lru_lock: Spinlock protecting the bo subsystem lru lists. 406 * @device_list: List of buffer object devices. 407 * @swap_lru: Lru list of buffer objects used for swapping. 408 */ 409 410 struct ttm_bo_global { 411 412 /** 413 * Constant after init. 414 */ 415 416 struct kobject kobj; 417 struct ttm_mem_global *mem_glob; 418 struct page *dummy_read_page; 419 struct lock device_list_mutex; 420 spinlock_t lru_lock; 421 422 /** 423 * Protected by device_list_mutex. 424 */ 425 struct list_head device_list; 426 427 /** 428 * Protected by the lru_lock. 429 */ 430 struct list_head swap_lru[TTM_MAX_BO_PRIORITY]; 431 432 /** 433 * Internal protection. 434 */ 435 atomic_t bo_count; 436 }; 437 438 439 #define TTM_NUM_MEM_TYPES 8 440 441 /** 442 * struct ttm_bo_device - Buffer object driver device-specific data. 443 * 444 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. 445 * @man: An array of mem_type_managers. 446 * @vma_manager: Address space manager 447 * lru_lock: Spinlock that protects the buffer+device lru lists and 448 * ddestroy lists. 449 * @dev_mapping: A pointer to the struct address_space representing the 450 * device address space. 451 * @wq: Work queue structure for the delayed delete workqueue. 452 * @no_retry: Don't retry allocation if it fails 453 * 454 */ 455 456 struct ttm_bo_device { 457 458 /* 459 * Constant after bo device init / atomic. 460 */ 461 struct list_head device_list; 462 struct ttm_bo_global *glob; 463 struct ttm_bo_driver *driver; 464 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 465 466 /* 467 * Protected by internal locks. 468 */ 469 struct drm_vma_offset_manager vma_manager; 470 471 /* 472 * Protected by the global:lru lock. 473 */ 474 struct list_head ddestroy; 475 476 /* 477 * Protected by load / firstopen / lastclose /unload sync. 478 */ 479 480 struct address_space *dev_mapping; 481 482 /* 483 * Internal protection. 484 */ 485 486 struct delayed_work wq; 487 488 bool need_dma32; 489 490 bool no_retry; 491 }; 492 493 /** 494 * ttm_flag_masked 495 * 496 * @old: Pointer to the result and original value. 497 * @new: New value of bits. 498 * @mask: Mask of bits to change. 499 * 500 * Convenience function to change a number of bits identified by a mask. 501 */ 502 503 static inline uint32_t 504 ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) 505 { 506 *old ^= (*old ^ new) & mask; 507 return *old; 508 } 509 510 /* 511 * ttm_bo.c 512 */ 513 514 /** 515 * ttm_mem_reg_is_pci 516 * 517 * @bdev: Pointer to a struct ttm_bo_device. 518 * @mem: A valid struct ttm_mem_reg. 519 * 520 * Returns true if the memory described by @mem is PCI memory, 521 * false otherwise. 522 */ 523 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); 524 525 /** 526 * ttm_bo_mem_space 527 * 528 * @bo: Pointer to a struct ttm_buffer_object. the data of which 529 * we want to allocate space for. 530 * @proposed_placement: Proposed new placement for the buffer object. 531 * @mem: A struct ttm_mem_reg. 532 * @interruptible: Sleep interruptible when sliping. 533 * @no_wait_gpu: Return immediately if the GPU is busy. 534 * 535 * Allocate memory space for the buffer object pointed to by @bo, using 536 * the placement flags in @mem, potentially evicting other idle buffer objects. 537 * This function may sleep while waiting for space to become available. 538 * Returns: 539 * -EBUSY: No space available (only if no_wait == 1). 540 * -ENOMEM: Could not allocate memory for the buffer object, either due to 541 * fragmentation or concurrent allocators. 542 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 543 */ 544 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 545 struct ttm_placement *placement, 546 struct ttm_mem_reg *mem, 547 struct ttm_operation_ctx *ctx); 548 549 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); 550 void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, 551 struct ttm_mem_reg *mem); 552 553 void ttm_bo_global_release(struct drm_global_reference *ref); 554 int ttm_bo_global_init(struct drm_global_reference *ref); 555 556 int ttm_bo_device_release(struct ttm_bo_device *bdev); 557 558 /** 559 * ttm_bo_device_init 560 * 561 * @bdev: A pointer to a struct ttm_bo_device to initialize. 562 * @glob: A pointer to an initialized struct ttm_bo_global. 563 * @driver: A pointer to a struct ttm_bo_driver set up by the caller. 564 * @mapping: The address space to use for this bo. 565 * @file_page_offset: Offset into the device address space that is available 566 * for buffer data. This ensures compatibility with other users of the 567 * address space. 568 * 569 * Initializes a struct ttm_bo_device: 570 * Returns: 571 * !0: Failure. 572 */ 573 int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob, 574 struct ttm_bo_driver *driver, 575 struct address_space *mapping, 576 uint64_t file_page_offset, bool need_dma32); 577 578 /** 579 * ttm_bo_unmap_virtual 580 * 581 * @bo: tear down the virtual mappings for this BO 582 */ 583 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 584 585 /** 586 * ttm_bo_unmap_virtual 587 * 588 * @bo: tear down the virtual mappings for this BO 589 * 590 * The caller must take ttm_mem_io_lock before calling this function. 591 */ 592 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); 593 594 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); 595 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); 596 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible); 597 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); 598 599 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); 600 601 /** 602 * __ttm_bo_reserve: 603 * 604 * @bo: A pointer to a struct ttm_buffer_object. 605 * @interruptible: Sleep interruptible if waiting. 606 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 607 * @ticket: ticket used to acquire the ww_mutex. 608 * 609 * Will not remove reserved buffers from the lru lists. 610 * Otherwise identical to ttm_bo_reserve. 611 * 612 * Returns: 613 * -EDEADLK: The reservation may cause a deadlock. 614 * Release all buffer reservations, wait for @bo to become unreserved and 615 * try again. (only if use_sequence == 1). 616 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 617 * a signal. Release all buffer reservations and return to user-space. 618 * -EBUSY: The function needed to sleep, but @no_wait was true 619 * -EALREADY: Bo already reserved using @ticket. This error code will only 620 * be returned if @use_ticket is set to true. 621 */ 622 static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, 623 bool interruptible, bool no_wait, 624 struct ww_acquire_ctx *ticket) 625 { 626 int ret = 0; 627 628 if (no_wait) { 629 bool success; 630 if (WARN_ON(ticket)) 631 return -EBUSY; 632 633 success = ww_mutex_trylock(&bo->resv->lock); 634 return success ? 0 : -EBUSY; 635 } 636 637 if (interruptible) 638 ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket); 639 else 640 ret = ww_mutex_lock(&bo->resv->lock, ticket); 641 if (ret == -EINTR) 642 return -ERESTARTSYS; 643 return ret; 644 } 645 646 /** 647 * ttm_bo_reserve: 648 * 649 * @bo: A pointer to a struct ttm_buffer_object. 650 * @interruptible: Sleep interruptible if waiting. 651 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 652 * @ticket: ticket used to acquire the ww_mutex. 653 * 654 * Locks a buffer object for validation. (Or prevents other processes from 655 * locking it for validation) and removes it from lru lists, while taking 656 * a number of measures to prevent deadlocks. 657 * 658 * Deadlocks may occur when two processes try to reserve multiple buffers in 659 * different order, either by will or as a result of a buffer being evicted 660 * to make room for a buffer already reserved. (Buffers are reserved before 661 * they are evicted). The following algorithm prevents such deadlocks from 662 * occurring: 663 * Processes attempting to reserve multiple buffers other than for eviction, 664 * (typically execbuf), should first obtain a unique 32-bit 665 * validation sequence number, 666 * and call this function with @use_ticket == 1 and @ticket->stamp == the unique 667 * sequence number. If upon call of this function, the buffer object is already 668 * reserved, the validation sequence is checked against the validation 669 * sequence of the process currently reserving the buffer, 670 * and if the current validation sequence is greater than that of the process 671 * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps 672 * waiting for the buffer to become unreserved, after which it retries 673 * reserving. 674 * The caller should, when receiving an -EDEADLK error 675 * release all its buffer reservations, wait for @bo to become unreserved, and 676 * then rerun the validation with the same validation sequence. This procedure 677 * will always guarantee that the process with the lowest validation sequence 678 * will eventually succeed, preventing both deadlocks and starvation. 679 * 680 * Returns: 681 * -EDEADLK: The reservation may cause a deadlock. 682 * Release all buffer reservations, wait for @bo to become unreserved and 683 * try again. (only if use_sequence == 1). 684 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 685 * a signal. Release all buffer reservations and return to user-space. 686 * -EBUSY: The function needed to sleep, but @no_wait was true 687 * -EALREADY: Bo already reserved using @ticket. This error code will only 688 * be returned if @use_ticket is set to true. 689 */ 690 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, 691 bool interruptible, bool no_wait, 692 struct ww_acquire_ctx *ticket) 693 { 694 int ret; 695 696 WARN_ON(!kref_read(&bo->kref)); 697 698 ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket); 699 if (likely(ret == 0)) 700 ttm_bo_del_sub_from_lru(bo); 701 702 return ret; 703 } 704 705 /** 706 * ttm_bo_reserve_slowpath: 707 * @bo: A pointer to a struct ttm_buffer_object. 708 * @interruptible: Sleep interruptible if waiting. 709 * @sequence: Set (@bo)->sequence to this value after lock 710 * 711 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off 712 * from all our other reservations. Because there are no other reservations 713 * held by us, this function cannot deadlock any more. 714 */ 715 static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, 716 bool interruptible, 717 struct ww_acquire_ctx *ticket) 718 { 719 int ret = 0; 720 721 WARN_ON(!kref_read(&bo->kref)); 722 723 if (interruptible) 724 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, 725 ticket); 726 else 727 ww_mutex_lock_slow(&bo->resv->lock, ticket); 728 729 if (likely(ret == 0)) 730 ttm_bo_del_sub_from_lru(bo); 731 else if (ret == -EINTR) 732 ret = -ERESTARTSYS; 733 734 return ret; 735 } 736 737 /** 738 * __ttm_bo_unreserve 739 * @bo: A pointer to a struct ttm_buffer_object. 740 * 741 * Unreserve a previous reservation of @bo where the buffer object is 742 * already on lru lists. 743 */ 744 static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo) 745 { 746 ww_mutex_unlock(&bo->resv->lock); 747 } 748 749 /** 750 * ttm_bo_unreserve 751 * 752 * @bo: A pointer to a struct ttm_buffer_object. 753 * 754 * Unreserve a previous reservation of @bo. 755 */ 756 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) 757 { 758 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 759 lockmgr(&bo->bdev->glob->lru_lock, LK_EXCLUSIVE); 760 ttm_bo_add_to_lru(bo); 761 lockmgr(&bo->bdev->glob->lru_lock, LK_RELEASE); 762 } 763 __ttm_bo_unreserve(bo); 764 } 765 766 /* 767 * ttm_bo_util.c 768 */ 769 770 int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 771 struct ttm_mem_reg *mem); 772 void ttm_mem_io_free(struct ttm_bo_device *bdev, 773 struct ttm_mem_reg *mem); 774 /** 775 * ttm_bo_move_ttm 776 * 777 * @bo: A pointer to a struct ttm_buffer_object. 778 * @interruptible: Sleep interruptible if waiting. 779 * @no_wait_gpu: Return immediately if the GPU is busy. 780 * @new_mem: struct ttm_mem_reg indicating where to move. 781 * 782 * Optimized move function for a buffer object with both old and 783 * new placement backed by a TTM. The function will, if successful, 784 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 785 * and update the (@bo)->mem placement flags. If unsuccessful, the old 786 * data remains untouched, and it's up to the caller to free the 787 * memory space indicated by @new_mem. 788 * Returns: 789 * !0: Failure. 790 */ 791 792 int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 793 struct ttm_operation_ctx *ctx, 794 struct ttm_mem_reg *new_mem); 795 796 /** 797 * ttm_bo_move_memcpy 798 * 799 * @bo: A pointer to a struct ttm_buffer_object. 800 * @interruptible: Sleep interruptible if waiting. 801 * @no_wait_gpu: Return immediately if the GPU is busy. 802 * @new_mem: struct ttm_mem_reg indicating where to move. 803 * 804 * Fallback move function for a mappable buffer object in mappable memory. 805 * The function will, if successful, 806 * free any old aperture space, and set (@new_mem)->mm_node to NULL, 807 * and update the (@bo)->mem placement flags. If unsuccessful, the old 808 * data remains untouched, and it's up to the caller to free the 809 * memory space indicated by @new_mem. 810 * Returns: 811 * !0: Failure. 812 */ 813 814 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 815 struct ttm_operation_ctx *ctx, 816 struct ttm_mem_reg *new_mem); 817 818 /** 819 * ttm_bo_free_old_node 820 * 821 * @bo: A pointer to a struct ttm_buffer_object. 822 * 823 * Utility function to free an old placement after a successful move. 824 */ 825 void ttm_bo_free_old_node(struct ttm_buffer_object *bo); 826 827 /** 828 * ttm_bo_move_accel_cleanup. 829 * 830 * @bo: A pointer to a struct ttm_buffer_object. 831 * @fence: A fence object that signals when moving is complete. 832 * @evict: This is an evict move. Don't return until the buffer is idle. 833 * @new_mem: struct ttm_mem_reg indicating where to move. 834 * 835 * Accelerated move function to be called when an accelerated move 836 * has been scheduled. The function will create a new temporary buffer object 837 * representing the old placement, and put the sync object on both buffer 838 * objects. After that the newly created buffer object is unref'd to be 839 * destroyed when the move is complete. This will help pipeline 840 * buffer moves. 841 */ 842 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 843 struct dma_fence *fence, bool evict, 844 struct ttm_mem_reg *new_mem); 845 846 /** 847 * ttm_bo_pipeline_move. 848 * 849 * @bo: A pointer to a struct ttm_buffer_object. 850 * @fence: A fence object that signals when moving is complete. 851 * @evict: This is an evict move. Don't return until the buffer is idle. 852 * @new_mem: struct ttm_mem_reg indicating where to move. 853 * 854 * Function for pipelining accelerated moves. Either free the memory 855 * immediately or hang it on a temporary buffer object. 856 */ 857 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, 858 struct dma_fence *fence, bool evict, 859 struct ttm_mem_reg *new_mem); 860 861 /** 862 * ttm_bo_pipeline_gutting. 863 * 864 * @bo: A pointer to a struct ttm_buffer_object. 865 * 866 * Pipelined gutting a BO of it's backing store. 867 */ 868 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); 869 870 /** 871 * ttm_io_prot 872 * 873 * @c_state: Caching state. 874 * @tmp: Page protection flag for a normal, cached mapping. 875 * 876 * Utility function that returns the pgprot_t that should be used for 877 * setting up a PTE with the caching model indicated by @c_state. 878 */ 879 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); 880 881 extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; 882 883 /* required for DragonFly VM, see ttm/ttm_bo_vm.c */ 884 struct ttm_bo_device_buffer_objects; 885 int ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a, 886 struct ttm_buffer_object *b); 887 RB_PROTOTYPE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb, 888 ttm_bo_cmp_rb_tree_items); 889 890 #endif 891