1 /* $NetBSD: ttm_bo_driver.h,v 1.5 2021/12/18 23:45:46 riastradh Exp $ */
2
3 /**************************************************************************
4 *
5 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29 /*
30 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 */
32 #ifndef _TTM_BO_DRIVER_H_
33 #define _TTM_BO_DRIVER_H_
34
35 #include <drm/drm_mm.h>
36 #include <drm/drm_vma_manager.h>
37 #include <linux/workqueue.h>
38 #include <linux/fs.h>
39 #include <linux/spinlock.h>
40 #include <linux/dma-resv.h>
41
42 #include "ttm_bo_api.h"
43 #include "ttm_memory.h"
44 #include "ttm_module.h"
45 #include "ttm_placement.h"
46 #include "ttm_tt.h"
47
48 #define TTM_MAX_BO_PRIORITY 4U
49
50 #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
51 #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
52 #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
53
54 struct ttm_mem_type_manager;
55
56 struct ttm_mem_type_manager_func {
57 /**
58 * struct ttm_mem_type_manager member init
59 *
60 * @man: Pointer to a memory type manager.
61 * @p_size: Implementation dependent, but typically the size of the
62 * range to be managed in pages.
63 *
64 * Called to initialize a private range manager. The function is
65 * expected to initialize the man::priv member.
66 * Returns 0 on success, negative error code on failure.
67 */
68 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
69
70 /**
71 * struct ttm_mem_type_manager member takedown
72 *
73 * @man: Pointer to a memory type manager.
74 *
75 * Called to undo the setup done in init. All allocated resources
76 * should be freed.
77 */
78 int (*takedown)(struct ttm_mem_type_manager *man);
79
80 /**
81 * struct ttm_mem_type_manager member get_node
82 *
83 * @man: Pointer to a memory type manager.
84 * @bo: Pointer to the buffer object we're allocating space for.
85 * @placement: Placement details.
86 * @flags: Additional placement flags.
87 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
88 *
89 * This function should allocate space in the memory type managed
90 * by @man. Placement details if
91 * applicable are given by @placement. If successful,
92 * @mem::mm_node should be set to a non-null value, and
93 * @mem::start should be set to a value identifying the beginning
94 * of the range allocated, and the function should return zero.
95 * If the memory region accommodate the buffer object, @mem::mm_node
96 * should be set to NULL, and the function should return 0.
97 * If a system error occurred, preventing the request to be fulfilled,
98 * the function should return a negative error code.
99 *
100 * Note that @mem::mm_node will only be dereferenced by
101 * struct ttm_mem_type_manager functions and optionally by the driver,
102 * which has knowledge of the underlying type.
103 *
104 * This function may not be called from within atomic context, so
105 * an implementation can and must use either a mutex or a spinlock to
106 * protect any data structures managing the space.
107 */
108 int (*get_node)(struct ttm_mem_type_manager *man,
109 struct ttm_buffer_object *bo,
110 const struct ttm_place *place,
111 struct ttm_mem_reg *mem);
112
113 /**
114 * struct ttm_mem_type_manager member put_node
115 *
116 * @man: Pointer to a memory type manager.
117 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
118 *
119 * This function frees memory type resources previously allocated
120 * and that are identified by @mem::mm_node and @mem::start. May not
121 * be called from within atomic context.
122 */
123 void (*put_node)(struct ttm_mem_type_manager *man,
124 struct ttm_mem_reg *mem);
125
126 /**
127 * struct ttm_mem_type_manager member debug
128 *
129 * @man: Pointer to a memory type manager.
130 * @printer: Prefix to be used in printout to identify the caller.
131 *
132 * This function is called to print out the state of the memory
133 * type manager to aid debugging of out-of-memory conditions.
134 * It may not be called from within atomic context.
135 */
136 void (*debug)(struct ttm_mem_type_manager *man,
137 struct drm_printer *printer);
138 };
139
140 /**
141 * struct ttm_mem_type_manager
142 *
143 * @has_type: The memory type has been initialized.
144 * @use_type: The memory type is enabled.
145 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
146 * managed by this memory type.
147 * @gpu_offset: If used, the GPU offset of the first managed page of
148 * fixed memory or the first managed location in an aperture.
149 * @size: Size of the managed region.
150 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
151 * as defined in ttm_placement_common.h
152 * @default_caching: The default caching policy used for a buffer object
153 * placed in this memory type if the user doesn't provide one.
154 * @func: structure pointer implementing the range manager. See above
155 * @priv: Driver private closure for @func.
156 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
157 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
158 * reserved by the TTM vm system.
159 * @io_reserve_lru: Optional lru list for unreserving io mem regions.
160 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
161 * @move_lock: lock for move fence
162 * static information. bdev::driver::io_mem_free is never used.
163 * @lru: The lru list for this memory type.
164 * @move: The fence of the last pipelined move operation.
165 *
166 * This structure is used to identify and manage memory types for a device.
167 * It's set up by the ttm_bo_driver::init_mem_type method.
168 */
169
170
171
172 struct ttm_mem_type_manager {
173 struct ttm_bo_device *bdev;
174
175 /*
176 * No protection. Constant from start.
177 */
178
179 bool has_type;
180 bool use_type;
181 uint32_t flags;
182 uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
183 uint64_t size;
184 uint32_t available_caching;
185 uint32_t default_caching;
186 const struct ttm_mem_type_manager_func *func;
187 void *priv;
188 struct mutex io_reserve_mutex;
189 bool use_io_reserve_lru;
190 bool io_reserve_fastpath;
191 spinlock_t move_lock;
192
193 /*
194 * Protected by @io_reserve_mutex:
195 */
196
197 struct list_head io_reserve_lru;
198
199 /*
200 * Protected by the global->lru_lock.
201 */
202
203 struct list_head lru[TTM_MAX_BO_PRIORITY];
204
205 /*
206 * Protected by @move_lock.
207 */
208 struct dma_fence *move;
209 };
210
211 /**
212 * struct ttm_bo_driver
213 *
214 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
215 * @invalidate_caches: Callback to invalidate read caches when a buffer object
216 * has been evicted.
217 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
218 * structure.
219 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
220 * @move: Callback for a driver to hook in accelerated functions to
221 * move a buffer.
222 * If set to NULL, a potentially slow memcpy() move is used.
223 */
224
225 struct ttm_bo_driver {
226 /**
227 * ttm_tt_create
228 *
229 * @bo: The buffer object to create the ttm for.
230 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
231 *
232 * Create a struct ttm_tt to back data with system memory pages.
233 * No pages are actually allocated.
234 * Returns:
235 * NULL: Out of memory.
236 */
237 struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
238 uint32_t page_flags);
239
240 /**
241 * ttm_tt_populate
242 *
243 * @ttm: The struct ttm_tt to contain the backing pages.
244 *
245 * Allocate all backing pages
246 * Returns:
247 * -ENOMEM: Out of memory.
248 */
249 int (*ttm_tt_populate)(struct ttm_tt *ttm,
250 struct ttm_operation_ctx *ctx);
251
252 /**
253 * ttm_tt_unpopulate
254 *
255 * @ttm: The struct ttm_tt to contain the backing pages.
256 *
257 * Free all backing page
258 */
259 void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
260
261 /**
262 * ttm_tt_swapout
263 *
264 * @ttm: The struct ttm_tt to contain the backing pages.
265 *
266 * Deactivate all backing pages, but don't free them
267 */
268 void (*ttm_tt_swapout)(struct ttm_tt *ttm);
269
270 /**
271 * struct ttm_bo_driver member invalidate_caches
272 *
273 * @bdev: the buffer object device.
274 * @flags: new placement of the rebound buffer object.
275 *
276 * A previosly evicted buffer has been rebound in a
277 * potentially new location. Tell the driver that it might
278 * consider invalidating read (texture) caches on the next command
279 * submission as a consequence.
280 */
281
282 int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
283 int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
284 struct ttm_mem_type_manager *man);
285
286 /**
287 * struct ttm_bo_driver member eviction_valuable
288 *
289 * @bo: the buffer object to be evicted
290 * @place: placement we need room for
291 *
292 * Check with the driver if it is valuable to evict a BO to make room
293 * for a certain placement.
294 */
295 bool (*eviction_valuable)(struct ttm_buffer_object *bo,
296 const struct ttm_place *place);
297 /**
298 * struct ttm_bo_driver member evict_flags:
299 *
300 * @bo: the buffer object to be evicted
301 *
302 * Return the bo flags for a buffer which is not mapped to the hardware.
303 * These will be placed in proposed_flags so that when the move is
304 * finished, they'll end up in bo->mem.flags
305 */
306
307 void (*evict_flags)(struct ttm_buffer_object *bo,
308 struct ttm_placement *placement);
309
310 /**
311 * struct ttm_bo_driver member move:
312 *
313 * @bo: the buffer to move
314 * @evict: whether this motion is evicting the buffer from
315 * the graphics address space
316 * @ctx: context for this move with parameters
317 * @new_mem: the new memory region receiving the buffer
318 *
319 * Move a buffer between two memory regions.
320 */
321 int (*move)(struct ttm_buffer_object *bo, bool evict,
322 struct ttm_operation_ctx *ctx,
323 struct ttm_mem_reg *new_mem);
324
325 /**
326 * struct ttm_bo_driver_member verify_access
327 *
328 * @bo: Pointer to a buffer object.
329 * @filp: Pointer to a struct file trying to access the object.
330 *
331 * Called from the map / write / read methods to verify that the
332 * caller is permitted to access the buffer object.
333 * This member may be set to NULL, which will refuse this kind of
334 * access for all buffer objects.
335 * This function should return 0 if access is granted, -EPERM otherwise.
336 */
337 int (*verify_access)(struct ttm_buffer_object *bo,
338 struct file *filp);
339
340 /**
341 * Hook to notify driver about a driver move so it
342 * can do tiling things and book-keeping.
343 *
344 * @evict: whether this move is evicting the buffer from the graphics
345 * address space
346 */
347 void (*move_notify)(struct ttm_buffer_object *bo,
348 bool evict,
349 struct ttm_mem_reg *new_mem);
350 /* notify the driver we are taking a fault on this BO
351 * and have reserved it */
352 int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
353
354 /**
355 * notify the driver that we're about to swap out this bo
356 */
357 void (*swap_notify)(struct ttm_buffer_object *bo);
358
359 /**
360 * Driver callback on when mapping io memory (for bo_move_memcpy
361 * for instance). TTM will take care to call io_mem_free whenever
362 * the mapping is not use anymore. io_mem_reserve & io_mem_free
363 * are balanced.
364 */
365 int (*io_mem_reserve)(struct ttm_bo_device *bdev,
366 struct ttm_mem_reg *mem);
367 void (*io_mem_free)(struct ttm_bo_device *bdev,
368 struct ttm_mem_reg *mem);
369
370 #ifdef __NetBSD__
371 const struct uvm_pagerops *ttm_uvm_ops;
372 #endif
373
374 /**
375 * Return the pfn for a given page_offset inside the BO.
376 *
377 * @bo: the BO to look up the pfn for
378 * @page_offset: the offset to look up
379 */
380 unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
381 unsigned long page_offset);
382
383 /**
384 * Read/write memory buffers for ptrace access
385 *
386 * @bo: the BO to access
387 * @offset: the offset from the start of the BO
388 * @buf: pointer to source/destination buffer
389 * @len: number of bytes to copy
390 * @write: whether to read (0) from or write (non-0) to BO
391 *
392 * If successful, this function should return the number of
393 * bytes copied, -EIO otherwise. If the number of bytes
394 * returned is < len, the function may be called again with
395 * the remainder of the buffer to copy.
396 */
397 int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
398 void *buf, int len, int write);
399
400 /**
401 * struct ttm_bo_driver member del_from_lru_notify
402 *
403 * @bo: the buffer object deleted from lru
404 *
405 * notify driver that a BO was deleted from LRU.
406 */
407 void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
408
409 /**
410 * Notify the driver that we're about to release a BO
411 *
412 * @bo: BO that is about to be released
413 *
414 * Gives the driver a chance to do any cleanup, including
415 * adding fences that may force a delayed delete
416 */
417 void (*release_notify)(struct ttm_buffer_object *bo);
418 };
419
420 /**
421 * struct ttm_bo_global - Buffer object driver global data.
422 *
423 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
424 * @dummy_read_page: Pointer to a dummy page used for mapping requests
425 * of unpopulated pages.
426 * @shrink: A shrink callback object used for buffer object swap.
427 * @device_list_mutex: Mutex protecting the device list.
428 * This mutex is held while traversing the device list for pm options.
429 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
430 * @device_list: List of buffer object devices.
431 * @swap_lru: Lru list of buffer objects used for swapping.
432 */
433
434 extern struct ttm_bo_global {
435
436 /**
437 * Constant after init.
438 */
439
440 #ifndef __NetBSD__
441 struct kobject kobj;
442 #endif
443 struct page *dummy_read_page;
444 spinlock_t lru_lock;
445
446 /**
447 * Protected by ttm_global_mutex.
448 */
449 struct list_head device_list;
450
451 /**
452 * Protected by the lru_lock.
453 */
454 struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
455
456 /**
457 * Internal protection.
458 */
459 atomic_t bo_count;
460 } ttm_bo_glob;
461
462
463 #define TTM_NUM_MEM_TYPES 8
464
465 /**
466 * struct ttm_bo_device - Buffer object driver device-specific data.
467 *
468 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
469 * @man: An array of mem_type_managers.
470 * @vma_manager: Address space manager (pointer)
471 * lru_lock: Spinlock that protects the buffer+device lru lists and
472 * ddestroy lists.
473 * @dev_mapping: A pointer to the struct address_space representing the
474 * device address space.
475 * @wq: Work queue structure for the delayed delete workqueue.
476 * @no_retry: Don't retry allocation if it fails
477 *
478 */
479
480 struct ttm_bo_device {
481
482 /*
483 * Constant after bo device init / atomic.
484 */
485 struct list_head device_list;
486 struct ttm_bo_driver *driver;
487 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
488
489 /*
490 * Protected by internal locks.
491 */
492 struct drm_vma_offset_manager *vma_manager;
493
494 /*
495 * Protected by the global:lru lock.
496 */
497 struct list_head ddestroy;
498
499 /*
500 * Protected by load / firstopen / lastclose /unload sync.
501 */
502
503 #ifdef __NetBSD__
504 bus_space_tag_t memt;
505 bus_dma_tag_t dmat;
506 #else
507 struct address_space *dev_mapping;
508 #endif
509
510 /*
511 * Internal protection.
512 */
513
514 struct delayed_work wq;
515
516 bool need_dma32;
517
518 bool no_retry;
519 };
520
521 /**
522 * struct ttm_lru_bulk_move_pos
523 *
524 * @first: first BO in the bulk move range
525 * @last: last BO in the bulk move range
526 *
527 * Positions for a lru bulk move.
528 */
529 struct ttm_lru_bulk_move_pos {
530 struct ttm_buffer_object *first;
531 struct ttm_buffer_object *last;
532 };
533
534 /**
535 * struct ttm_lru_bulk_move
536 *
537 * @tt: first/last lru entry for BOs in the TT domain
538 * @vram: first/last lru entry for BOs in the VRAM domain
539 * @swap: first/last lru entry for BOs on the swap list
540 *
541 * Helper structure for bulk moves on the LRU list.
542 */
543 struct ttm_lru_bulk_move {
544 struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
545 struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
546 struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY];
547 };
548
549 /**
550 * ttm_flag_masked
551 *
552 * @old: Pointer to the result and original value.
553 * @new: New value of bits.
554 * @mask: Mask of bits to change.
555 *
556 * Convenience function to change a number of bits identified by a mask.
557 */
558
559 static inline uint32_t
ttm_flag_masked(uint32_t * old,uint32_t new,uint32_t mask)560 ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
561 {
562 *old ^= (*old ^ new) & mask;
563 return *old;
564 }
565
566 /*
567 * ttm_bo.c
568 */
569
570 /**
571 * ttm_mem_reg_is_pci
572 *
573 * @bdev: Pointer to a struct ttm_bo_device.
574 * @mem: A valid struct ttm_mem_reg.
575 *
576 * Returns true if the memory described by @mem is PCI memory,
577 * false otherwise.
578 */
579 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
580
581 /**
582 * ttm_bo_mem_space
583 *
584 * @bo: Pointer to a struct ttm_buffer_object. the data of which
585 * we want to allocate space for.
586 * @proposed_placement: Proposed new placement for the buffer object.
587 * @mem: A struct ttm_mem_reg.
588 * @interruptible: Sleep interruptible when sliping.
589 * @no_wait_gpu: Return immediately if the GPU is busy.
590 *
591 * Allocate memory space for the buffer object pointed to by @bo, using
592 * the placement flags in @mem, potentially evicting other idle buffer objects.
593 * This function may sleep while waiting for space to become available.
594 * Returns:
595 * -EBUSY: No space available (only if no_wait == 1).
596 * -ENOMEM: Could not allocate memory for the buffer object, either due to
597 * fragmentation or concurrent allocators.
598 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
599 */
600 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
601 struct ttm_placement *placement,
602 struct ttm_mem_reg *mem,
603 struct ttm_operation_ctx *ctx);
604
605 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
606 void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
607 struct ttm_mem_reg *mem);
608
609 int ttm_bo_device_release(struct ttm_bo_device *bdev);
610
611 /**
612 * ttm_bo_device_init
613 *
614 * @bdev: A pointer to a struct ttm_bo_device to initialize.
615 * @glob: A pointer to an initialized struct ttm_bo_global.
616 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
617 * @mapping: The address space to use for this bo.
618 * @vma_manager: A pointer to a vma manager.
619 * @file_page_offset: Offset into the device address space that is available
620 * for buffer data. This ensures compatibility with other users of the
621 * address space.
622 *
623 * Initializes a struct ttm_bo_device:
624 * Returns:
625 * !0: Failure.
626 */
627 int ttm_bo_device_init(struct ttm_bo_device *bdev,
628 struct ttm_bo_driver *driver,
629 #ifdef __NetBSD__
630 bus_space_tag_t memt,
631 bus_dma_tag_t dmat,
632 #else
633 struct address_space *mapping,
634 #endif
635 struct drm_vma_offset_manager *vma_manager,
636 bool need_dma32);
637
638 /**
639 * ttm_bo_unmap_virtual
640 *
641 * @bo: tear down the virtual mappings for this BO
642 */
643 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
644
645 /**
646 * ttm_bo_unmap_virtual
647 *
648 * @bo: tear down the virtual mappings for this BO
649 *
650 * The caller must take ttm_mem_io_lock before calling this function.
651 */
652 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
653
654 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
655 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
656 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
657 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
658
659 /**
660 * __ttm_bo_reserve:
661 *
662 * @bo: A pointer to a struct ttm_buffer_object.
663 * @interruptible: Sleep interruptible if waiting.
664 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
665 * @ticket: ticket used to acquire the ww_mutex.
666 *
667 * Will not remove reserved buffers from the lru lists.
668 * Otherwise identical to ttm_bo_reserve.
669 *
670 * Returns:
671 * -EDEADLK: The reservation may cause a deadlock.
672 * Release all buffer reservations, wait for @bo to become unreserved and
673 * try again. (only if use_sequence == 1).
674 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
675 * a signal. Release all buffer reservations and return to user-space.
676 * -EBUSY: The function needed to sleep, but @no_wait was true
677 * -EALREADY: Bo already reserved using @ticket. This error code will only
678 * be returned if @use_ticket is set to true.
679 */
__ttm_bo_reserve(struct ttm_buffer_object * bo,bool interruptible,bool no_wait,struct ww_acquire_ctx * ticket)680 static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
681 bool interruptible, bool no_wait,
682 struct ww_acquire_ctx *ticket)
683 {
684 int ret = 0;
685
686 if (no_wait) {
687 bool success;
688 if (WARN_ON(ticket))
689 return -EBUSY;
690
691 success = dma_resv_trylock(bo->base.resv);
692 return success ? 0 : -EBUSY;
693 }
694
695 if (interruptible)
696 ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
697 else
698 ret = dma_resv_lock(bo->base.resv, ticket);
699 if (ret == -EINTR)
700 return -ERESTARTSYS;
701 return ret;
702 }
703
704 /**
705 * ttm_bo_reserve:
706 *
707 * @bo: A pointer to a struct ttm_buffer_object.
708 * @interruptible: Sleep interruptible if waiting.
709 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
710 * @ticket: ticket used to acquire the ww_mutex.
711 *
712 * Locks a buffer object for validation. (Or prevents other processes from
713 * locking it for validation) and removes it from lru lists, while taking
714 * a number of measures to prevent deadlocks.
715 *
716 * Deadlocks may occur when two processes try to reserve multiple buffers in
717 * different order, either by will or as a result of a buffer being evicted
718 * to make room for a buffer already reserved. (Buffers are reserved before
719 * they are evicted). The following algorithm prevents such deadlocks from
720 * occurring:
721 * Processes attempting to reserve multiple buffers other than for eviction,
722 * (typically execbuf), should first obtain a unique 32-bit
723 * validation sequence number,
724 * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
725 * sequence number. If upon call of this function, the buffer object is already
726 * reserved, the validation sequence is checked against the validation
727 * sequence of the process currently reserving the buffer,
728 * and if the current validation sequence is greater than that of the process
729 * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
730 * waiting for the buffer to become unreserved, after which it retries
731 * reserving.
732 * The caller should, when receiving an -EDEADLK error
733 * release all its buffer reservations, wait for @bo to become unreserved, and
734 * then rerun the validation with the same validation sequence. This procedure
735 * will always guarantee that the process with the lowest validation sequence
736 * will eventually succeed, preventing both deadlocks and starvation.
737 *
738 * Returns:
739 * -EDEADLK: The reservation may cause a deadlock.
740 * Release all buffer reservations, wait for @bo to become unreserved and
741 * try again. (only if use_sequence == 1).
742 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
743 * a signal. Release all buffer reservations and return to user-space.
744 * -EBUSY: The function needed to sleep, but @no_wait was true
745 * -EALREADY: Bo already reserved using @ticket. This error code will only
746 * be returned if @use_ticket is set to true.
747 */
ttm_bo_reserve(struct ttm_buffer_object * bo,bool interruptible,bool no_wait,struct ww_acquire_ctx * ticket)748 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
749 bool interruptible, bool no_wait,
750 struct ww_acquire_ctx *ticket)
751 {
752 WARN_ON(!kref_read(&bo->kref));
753
754 return __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
755 }
756
757 /**
758 * ttm_bo_reserve_slowpath:
759 * @bo: A pointer to a struct ttm_buffer_object.
760 * @interruptible: Sleep interruptible if waiting.
761 * @sequence: Set (@bo)->sequence to this value after lock
762 *
763 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
764 * from all our other reservations. Because there are no other reservations
765 * held by us, this function cannot deadlock any more.
766 */
ttm_bo_reserve_slowpath(struct ttm_buffer_object * bo,bool interruptible,struct ww_acquire_ctx * ticket)767 static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
768 bool interruptible,
769 struct ww_acquire_ctx *ticket)
770 {
771 int ret = 0;
772
773 WARN_ON(!kref_read(&bo->kref));
774
775 if (interruptible)
776 ret = dma_resv_lock_slow_interruptible(bo->base.resv,
777 ticket);
778 else
779 dma_resv_lock_slow(bo->base.resv, ticket);
780
781 if (ret == -EINTR)
782 ret = -ERESTARTSYS;
783
784 return ret;
785 }
786
787 /**
788 * ttm_bo_unreserve
789 *
790 * @bo: A pointer to a struct ttm_buffer_object.
791 *
792 * Unreserve a previous reservation of @bo.
793 */
ttm_bo_unreserve(struct ttm_buffer_object * bo)794 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
795 {
796 spin_lock(&ttm_bo_glob.lru_lock);
797 ttm_bo_move_to_lru_tail(bo, NULL);
798 spin_unlock(&ttm_bo_glob.lru_lock);
799 dma_resv_unlock(bo->base.resv);
800 }
801
802 /*
803 * ttm_bo_util.c
804 */
805
806 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
807 struct ttm_mem_reg *mem);
808 void ttm_mem_io_free(struct ttm_bo_device *bdev,
809 struct ttm_mem_reg *mem);
810 /**
811 * ttm_bo_move_ttm
812 *
813 * @bo: A pointer to a struct ttm_buffer_object.
814 * @interruptible: Sleep interruptible if waiting.
815 * @no_wait_gpu: Return immediately if the GPU is busy.
816 * @new_mem: struct ttm_mem_reg indicating where to move.
817 *
818 * Optimized move function for a buffer object with both old and
819 * new placement backed by a TTM. The function will, if successful,
820 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
821 * and update the (@bo)->mem placement flags. If unsuccessful, the old
822 * data remains untouched, and it's up to the caller to free the
823 * memory space indicated by @new_mem.
824 * Returns:
825 * !0: Failure.
826 */
827
828 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
829 struct ttm_operation_ctx *ctx,
830 struct ttm_mem_reg *new_mem);
831
832 /**
833 * ttm_bo_move_memcpy
834 *
835 * @bo: A pointer to a struct ttm_buffer_object.
836 * @interruptible: Sleep interruptible if waiting.
837 * @no_wait_gpu: Return immediately if the GPU is busy.
838 * @new_mem: struct ttm_mem_reg indicating where to move.
839 *
840 * Fallback move function for a mappable buffer object in mappable memory.
841 * The function will, if successful,
842 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
843 * and update the (@bo)->mem placement flags. If unsuccessful, the old
844 * data remains untouched, and it's up to the caller to free the
845 * memory space indicated by @new_mem.
846 * Returns:
847 * !0: Failure.
848 */
849
850 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
851 struct ttm_operation_ctx *ctx,
852 struct ttm_mem_reg *new_mem);
853
854 /**
855 * ttm_bo_free_old_node
856 *
857 * @bo: A pointer to a struct ttm_buffer_object.
858 *
859 * Utility function to free an old placement after a successful move.
860 */
861 void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
862
863 /**
864 * ttm_bo_move_accel_cleanup.
865 *
866 * @bo: A pointer to a struct ttm_buffer_object.
867 * @fence: A fence object that signals when moving is complete.
868 * @evict: This is an evict move. Don't return until the buffer is idle.
869 * @new_mem: struct ttm_mem_reg indicating where to move.
870 *
871 * Accelerated move function to be called when an accelerated move
872 * has been scheduled. The function will create a new temporary buffer object
873 * representing the old placement, and put the sync object on both buffer
874 * objects. After that the newly created buffer object is unref'd to be
875 * destroyed when the move is complete. This will help pipeline
876 * buffer moves.
877 */
878 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
879 struct dma_fence *fence, bool evict,
880 struct ttm_mem_reg *new_mem);
881
882 /**
883 * ttm_bo_pipeline_move.
884 *
885 * @bo: A pointer to a struct ttm_buffer_object.
886 * @fence: A fence object that signals when moving is complete.
887 * @evict: This is an evict move. Don't return until the buffer is idle.
888 * @new_mem: struct ttm_mem_reg indicating where to move.
889 *
890 * Function for pipelining accelerated moves. Either free the memory
891 * immediately or hang it on a temporary buffer object.
892 */
893 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
894 struct dma_fence *fence, bool evict,
895 struct ttm_mem_reg *new_mem);
896
897 /**
898 * ttm_bo_pipeline_gutting.
899 *
900 * @bo: A pointer to a struct ttm_buffer_object.
901 *
902 * Pipelined gutting a BO of its backing store.
903 */
904 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
905
906 /**
907 * ttm_io_prot
908 *
909 * @c_state: Caching state.
910 * @tmp: Page protection flag for a normal, cached mapping.
911 *
912 * Utility function that returns the pgprot_t that should be used for
913 * setting up a PTE with the caching model indicated by @c_state.
914 */
915 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
916
917 extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
918
919 #endif
920