1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 #ifndef _TTM_BO_DRIVER_H_
31 #define _TTM_BO_DRIVER_H_
32 
33 #include <drm/drm_mm.h>
34 #include <drm/drm_global.h>
35 #include <drm/drm_vma_manager.h>
36 #include <linux/workqueue.h>
37 #include <linux/fs.h>
38 #include <linux/spinlock.h>
39 #include <linux/reservation.h>
40 
41 #include "ttm_bo_api.h"
42 #include "ttm_memory.h"
43 #include "ttm_module.h"
44 #include "ttm_placement.h"
45 
46 #define TTM_MAX_BO_PRIORITY	4U
47 
48 struct ttm_backend_func {
49 	/**
50 	 * struct ttm_backend_func member bind
51 	 *
52 	 * @ttm: Pointer to a struct ttm_tt.
53 	 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
54 	 * memory type and location for binding.
55 	 *
56 	 * Bind the backend pages into the aperture in the location
57 	 * indicated by @bo_mem. This function should be able to handle
58 	 * differences between aperture and system page sizes.
59 	 */
60 	int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
61 
62 	/**
63 	 * struct ttm_backend_func member unbind
64 	 *
65 	 * @ttm: Pointer to a struct ttm_tt.
66 	 *
67 	 * Unbind previously bound backend pages. This function should be
68 	 * able to handle differences between aperture and system page sizes.
69 	 */
70 	int (*unbind) (struct ttm_tt *ttm);
71 
72 	/**
73 	 * struct ttm_backend_func member destroy
74 	 *
75 	 * @ttm: Pointer to a struct ttm_tt.
76 	 *
77 	 * Destroy the backend. This will be call back from ttm_tt_destroy so
78 	 * don't call ttm_tt_destroy from the callback or infinite loop.
79 	 */
80 	void (*destroy) (struct ttm_tt *ttm);
81 };
82 
83 #define TTM_PAGE_FLAG_WRITE           (1 << 3)
84 #define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
85 #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
86 #define TTM_PAGE_FLAG_ZERO_ALLOC      (1 << 6)
87 #define TTM_PAGE_FLAG_DMA32           (1 << 7)
88 #define TTM_PAGE_FLAG_SG              (1 << 8)
89 
90 enum ttm_caching_state {
91 	tt_uncached,
92 	tt_wc,
93 	tt_cached
94 };
95 
96 /**
97  * struct ttm_tt
98  *
99  * @bdev: Pointer to a struct ttm_bo_device.
100  * @func: Pointer to a struct ttm_backend_func that describes
101  * the backend methods.
102  * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
103  * pointer.
104  * @pages: Array of pages backing the data.
105  * @num_pages: Number of pages in the page array.
106  * @bdev: Pointer to the current struct ttm_bo_device.
107  * @be: Pointer to the ttm backend.
108  * @swap_storage: Pointer to shmem struct file for swap storage.
109  * @caching_state: The current caching state of the pages.
110  * @state: The current binding state of the pages.
111  *
112  * This is a structure holding the pages, caching- and aperture binding
113  * status for a buffer object that isn't backed by fixed (VRAM / AGP)
114  * memory.
115  */
116 
117 struct ttm_tt {
118 	struct ttm_bo_device *bdev;
119 	struct ttm_backend_func *func;
120 	struct page *dummy_read_page;
121 	struct page **pages;
122 	uint32_t page_flags;
123 	unsigned long num_pages;
124 	struct sg_table *sg; /* for SG objects via dma-buf */
125 	struct ttm_bo_global *glob;
126 	struct vm_object *swap_storage;
127 	enum ttm_caching_state caching_state;
128 	enum {
129 		tt_bound,
130 		tt_unbound,
131 		tt_unpopulated,
132 	} state;
133 };
134 
135 /**
136  * struct ttm_dma_tt
137  *
138  * @ttm: Base ttm_tt struct.
139  * @dma_address: The DMA (bus) addresses of the pages
140  * @pages_list: used by some page allocation backend
141  *
142  * This is a structure holding the pages, caching- and aperture binding
143  * status for a buffer object that isn't backed by fixed (VRAM / AGP)
144  * memory.
145  */
146 struct ttm_dma_tt {
147 	struct ttm_tt ttm;
148 	dma_addr_t *dma_address;
149 	struct list_head pages_list;
150 };
151 
152 #define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)	/* Fixed (on-card) PCI memory */
153 #define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1)	/* Memory mappable */
154 #define TTM_MEMTYPE_FLAG_CMA           (1 << 3)	/* Can't map aperture */
155 
156 struct ttm_mem_type_manager;
157 
158 struct ttm_mem_type_manager_func {
159 	/**
160 	 * struct ttm_mem_type_manager member init
161 	 *
162 	 * @man: Pointer to a memory type manager.
163 	 * @p_size: Implementation dependent, but typically the size of the
164 	 * range to be managed in pages.
165 	 *
166 	 * Called to initialize a private range manager. The function is
167 	 * expected to initialize the man::priv member.
168 	 * Returns 0 on success, negative error code on failure.
169 	 */
170 	int  (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
171 
172 	/**
173 	 * struct ttm_mem_type_manager member takedown
174 	 *
175 	 * @man: Pointer to a memory type manager.
176 	 *
177 	 * Called to undo the setup done in init. All allocated resources
178 	 * should be freed.
179 	 */
180 	int  (*takedown)(struct ttm_mem_type_manager *man);
181 
182 	/**
183 	 * struct ttm_mem_type_manager member get_node
184 	 *
185 	 * @man: Pointer to a memory type manager.
186 	 * @bo: Pointer to the buffer object we're allocating space for.
187 	 * @placement: Placement details.
188 	 * @flags: Additional placement flags.
189 	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
190 	 *
191 	 * This function should allocate space in the memory type managed
192 	 * by @man. Placement details if
193 	 * applicable are given by @placement. If successful,
194 	 * @mem::mm_node should be set to a non-null value, and
195 	 * @mem::start should be set to a value identifying the beginning
196 	 * of the range allocated, and the function should return zero.
197 	 * If the memory region accommodate the buffer object, @mem::mm_node
198 	 * should be set to NULL, and the function should return 0.
199 	 * If a system error occurred, preventing the request to be fulfilled,
200 	 * the function should return a negative error code.
201 	 *
202 	 * Note that @mem::mm_node will only be dereferenced by
203 	 * struct ttm_mem_type_manager functions and optionally by the driver,
204 	 * which has knowledge of the underlying type.
205 	 *
206 	 * This function may not be called from within atomic context, so
207 	 * an implementation can and must use either a mutex or a spinlock to
208 	 * protect any data structures managing the space.
209 	 */
210 	int  (*get_node)(struct ttm_mem_type_manager *man,
211 			 struct ttm_buffer_object *bo,
212 			 const struct ttm_place *place,
213 			 struct ttm_mem_reg *mem);
214 
215 	/**
216 	 * struct ttm_mem_type_manager member put_node
217 	 *
218 	 * @man: Pointer to a memory type manager.
219 	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
220 	 *
221 	 * This function frees memory type resources previously allocated
222 	 * and that are identified by @mem::mm_node and @mem::start. May not
223 	 * be called from within atomic context.
224 	 */
225 	void (*put_node)(struct ttm_mem_type_manager *man,
226 			 struct ttm_mem_reg *mem);
227 
228 	/**
229 	 * struct ttm_mem_type_manager member debug
230 	 *
231 	 * @man: Pointer to a memory type manager.
232 	 * @printer: Prefix to be used in printout to identify the caller.
233 	 *
234 	 * This function is called to print out the state of the memory
235 	 * type manager to aid debugging of out-of-memory conditions.
236 	 * It may not be called from within atomic context.
237 	 */
238 	void (*debug)(struct ttm_mem_type_manager *man,
239 		      struct drm_printer *printer);
240 };
241 
242 /**
243  * struct ttm_mem_type_manager
244  *
245  * @has_type: The memory type has been initialized.
246  * @use_type: The memory type is enabled.
247  * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
248  * managed by this memory type.
249  * @gpu_offset: If used, the GPU offset of the first managed page of
250  * fixed memory or the first managed location in an aperture.
251  * @size: Size of the managed region.
252  * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
253  * as defined in ttm_placement_common.h
254  * @default_caching: The default caching policy used for a buffer object
255  * placed in this memory type if the user doesn't provide one.
256  * @func: structure pointer implementing the range manager. See above
257  * @priv: Driver private closure for @func.
258  * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
259  * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
260  * reserved by the TTM vm system.
261  * @io_reserve_lru: Optional lru list for unreserving io mem regions.
262  * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
263  * @move_lock: lock for move fence
264  * static information. bdev::driver::io_mem_free is never used.
265  * @lru: The lru list for this memory type.
266  * @move: The fence of the last pipelined move operation.
267  *
268  * This structure is used to identify and manage memory types for a device.
269  * It's set up by the ttm_bo_driver::init_mem_type method.
270  */
271 
272 
273 
274 struct ttm_mem_type_manager {
275 	struct ttm_bo_device *bdev;
276 
277 	/*
278 	 * No protection. Constant from start.
279 	 */
280 
281 	bool has_type;
282 	bool use_type;
283 	uint32_t flags;
284 	uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
285 	uint64_t size;
286 	uint32_t available_caching;
287 	uint32_t default_caching;
288 	const struct ttm_mem_type_manager_func *func;
289 	void *priv;
290 	struct lock io_reserve_mutex;
291 	bool use_io_reserve_lru;
292 	bool io_reserve_fastpath;
293 	spinlock_t move_lock;
294 
295 	/*
296 	 * Protected by @io_reserve_mutex:
297 	 */
298 
299 	struct list_head io_reserve_lru;
300 
301 	/*
302 	 * Protected by the global->lru_lock.
303 	 */
304 
305 	struct list_head lru[TTM_MAX_BO_PRIORITY];
306 
307 	/*
308 	 * Protected by @move_lock.
309 	 */
310 	struct dma_fence *move;
311 };
312 
313 /**
314  * struct ttm_bo_driver
315  *
316  * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
317  * @invalidate_caches: Callback to invalidate read caches when a buffer object
318  * has been evicted.
319  * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
320  * structure.
321  * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
322  * @move: Callback for a driver to hook in accelerated functions to
323  * move a buffer.
324  * If set to NULL, a potentially slow memcpy() move is used.
325  */
326 
327 struct ttm_bo_driver {
328 	/**
329 	 * ttm_tt_create
330 	 *
331 	 * @bdev: pointer to a struct ttm_bo_device:
332 	 * @size: Size of the data needed backing.
333 	 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
334 	 * @dummy_read_page: See struct ttm_bo_device.
335 	 *
336 	 * Create a struct ttm_tt to back data with system memory pages.
337 	 * No pages are actually allocated.
338 	 * Returns:
339 	 * NULL: Out of memory.
340 	 */
341 	struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
342 					unsigned long size,
343 					uint32_t page_flags,
344 					struct page *dummy_read_page);
345 
346 	/**
347 	 * ttm_tt_populate
348 	 *
349 	 * @ttm: The struct ttm_tt to contain the backing pages.
350 	 *
351 	 * Allocate all backing pages
352 	 * Returns:
353 	 * -ENOMEM: Out of memory.
354 	 */
355 	int (*ttm_tt_populate)(struct ttm_tt *ttm);
356 
357 	/**
358 	 * ttm_tt_unpopulate
359 	 *
360 	 * @ttm: The struct ttm_tt to contain the backing pages.
361 	 *
362 	 * Free all backing page
363 	 */
364 	void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
365 
366 	/**
367 	 * struct ttm_bo_driver member invalidate_caches
368 	 *
369 	 * @bdev: the buffer object device.
370 	 * @flags: new placement of the rebound buffer object.
371 	 *
372 	 * A previosly evicted buffer has been rebound in a
373 	 * potentially new location. Tell the driver that it might
374 	 * consider invalidating read (texture) caches on the next command
375 	 * submission as a consequence.
376 	 */
377 
378 	int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
379 	int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
380 			     struct ttm_mem_type_manager *man);
381 
382 	/**
383 	 * struct ttm_bo_driver member eviction_valuable
384 	 *
385 	 * @bo: the buffer object to be evicted
386 	 * @place: placement we need room for
387 	 *
388 	 * Check with the driver if it is valuable to evict a BO to make room
389 	 * for a certain placement.
390 	 */
391 	bool (*eviction_valuable)(struct ttm_buffer_object *bo,
392 				  const struct ttm_place *place);
393 	/**
394 	 * struct ttm_bo_driver member evict_flags:
395 	 *
396 	 * @bo: the buffer object to be evicted
397 	 *
398 	 * Return the bo flags for a buffer which is not mapped to the hardware.
399 	 * These will be placed in proposed_flags so that when the move is
400 	 * finished, they'll end up in bo->mem.flags
401 	 */
402 
403 	void (*evict_flags)(struct ttm_buffer_object *bo,
404 			    struct ttm_placement *placement);
405 
406 	/**
407 	 * struct ttm_bo_driver member move:
408 	 *
409 	 * @bo: the buffer to move
410 	 * @evict: whether this motion is evicting the buffer from
411 	 * the graphics address space
412 	 * @interruptible: Use interruptible sleeps if possible when sleeping.
413 	 * @no_wait: whether this should give up and return -EBUSY
414 	 * if this move would require sleeping
415 	 * @new_mem: the new memory region receiving the buffer
416 	 *
417 	 * Move a buffer between two memory regions.
418 	 */
419 	int (*move)(struct ttm_buffer_object *bo, bool evict,
420 		    bool interruptible, bool no_wait_gpu,
421 		    struct ttm_mem_reg *new_mem);
422 
423 	/**
424 	 * struct ttm_bo_driver_member verify_access
425 	 *
426 	 * @bo: Pointer to a buffer object.
427 	 * @filp: Pointer to a struct file trying to access the object.
428 	 *
429 	 * Called from the map / write / read methods to verify that the
430 	 * caller is permitted to access the buffer object.
431 	 * This member may be set to NULL, which will refuse this kind of
432 	 * access for all buffer objects.
433 	 * This function should return 0 if access is granted, -EPERM otherwise.
434 	 */
435 	int (*verify_access)(struct ttm_buffer_object *bo,
436 			     struct file *filp);
437 
438 	/**
439 	 * Hook to notify driver about a driver move so it
440 	 * can do tiling things and book-keeping.
441 	 *
442 	 * @evict: whether this move is evicting the buffer from the graphics
443 	 * address space
444 	 */
445 	void (*move_notify)(struct ttm_buffer_object *bo,
446 			    bool evict,
447 			    struct ttm_mem_reg *new_mem);
448 	/* notify the driver we are taking a fault on this BO
449 	 * and have reserved it */
450 	int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
451 
452 	/**
453 	 * notify the driver that we're about to swap out this bo
454 	 */
455 	void (*swap_notify)(struct ttm_buffer_object *bo);
456 
457 	/**
458 	 * Driver callback on when mapping io memory (for bo_move_memcpy
459 	 * for instance). TTM will take care to call io_mem_free whenever
460 	 * the mapping is not use anymore. io_mem_reserve & io_mem_free
461 	 * are balanced.
462 	 */
463 	int (*io_mem_reserve)(struct ttm_bo_device *bdev,
464 			      struct ttm_mem_reg *mem);
465 	void (*io_mem_free)(struct ttm_bo_device *bdev,
466 			    struct ttm_mem_reg *mem);
467 
468 	/**
469 	 * Return the pfn for a given page_offset inside the BO.
470 	 *
471 	 * @bo: the BO to look up the pfn for
472 	 * @page_offset: the offset to look up
473 	 */
474 	unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
475 				    unsigned long page_offset);
476 
477 	/**
478 	 * Read/write memory buffers for ptrace access
479 	 *
480 	 * @bo: the BO to access
481 	 * @offset: the offset from the start of the BO
482 	 * @buf: pointer to source/destination buffer
483 	 * @len: number of bytes to copy
484 	 * @write: whether to read (0) from or write (non-0) to BO
485 	 *
486 	 * If successful, this function should return the number of
487 	 * bytes copied, -EIO otherwise. If the number of bytes
488 	 * returned is < len, the function may be called again with
489 	 * the remainder of the buffer to copy.
490 	 */
491 	int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
492 			     void *buf, int len, int write);
493 };
494 
495 /**
496  * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
497  */
498 
499 struct ttm_bo_global_ref {
500 	struct drm_global_reference ref;
501 	struct ttm_mem_global *mem_glob;
502 };
503 
504 /**
505  * struct ttm_bo_global - Buffer object driver global data.
506  *
507  * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
508  * @dummy_read_page: Pointer to a dummy page used for mapping requests
509  * of unpopulated pages.
510  * @shrink: A shrink callback object used for buffer object swap.
511  * @device_list_mutex: Mutex protecting the device list.
512  * This mutex is held while traversing the device list for pm options.
513  * @lru_lock: Spinlock protecting the bo subsystem lru lists.
514  * @device_list: List of buffer object devices.
515  * @swap_lru: Lru list of buffer objects used for swapping.
516  */
517 
518 struct ttm_bo_global {
519 
520 	/**
521 	 * Constant after init.
522 	 */
523 
524 	struct kobject kobj;
525 	struct ttm_mem_global *mem_glob;
526 	struct page *dummy_read_page;
527 	struct ttm_mem_shrink shrink;
528 	struct lock device_list_mutex;
529 	spinlock_t lru_lock;
530 
531 	/**
532 	 * Protected by device_list_mutex.
533 	 */
534 	struct list_head device_list;
535 
536 	/**
537 	 * Protected by the lru_lock.
538 	 */
539 	struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
540 
541 	/**
542 	 * Internal protection.
543 	 */
544 	atomic_t bo_count;
545 };
546 
547 
548 #define TTM_NUM_MEM_TYPES 8
549 
550 /**
551  * struct ttm_bo_device - Buffer object driver device-specific data.
552  *
553  * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
554  * @man: An array of mem_type_managers.
555  * @vma_manager: Address space manager
556  * lru_lock: Spinlock that protects the buffer+device lru lists and
557  * ddestroy lists.
558  * @dev_mapping: A pointer to the struct address_space representing the
559  * device address space.
560  * @wq: Work queue structure for the delayed delete workqueue.
561  *
562  */
563 
564 struct ttm_bo_device {
565 
566 	/*
567 	 * Constant after bo device init / atomic.
568 	 */
569 	struct list_head device_list;
570 	struct ttm_bo_global *glob;
571 	struct ttm_bo_driver *driver;
572 	struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
573 
574 	/*
575 	 * Protected by internal locks.
576 	 */
577 	struct drm_vma_offset_manager vma_manager;
578 
579 	/*
580 	 * Protected by the global:lru lock.
581 	 */
582 	struct list_head ddestroy;
583 
584 	/*
585 	 * Protected by load / firstopen / lastclose /unload sync.
586 	 */
587 
588 	struct address_space *dev_mapping;
589 
590 	/*
591 	 * Internal protection.
592 	 */
593 
594 	struct delayed_work wq;
595 
596 	bool need_dma32;
597 };
598 
599 /**
600  * ttm_flag_masked
601  *
602  * @old: Pointer to the result and original value.
603  * @new: New value of bits.
604  * @mask: Mask of bits to change.
605  *
606  * Convenience function to change a number of bits identified by a mask.
607  */
608 
609 static inline uint32_t
610 ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
611 {
612 	*old ^= (*old ^ new) & mask;
613 	return *old;
614 }
615 
616 /**
617  * ttm_tt_init
618  *
619  * @ttm: The struct ttm_tt.
620  * @bdev: pointer to a struct ttm_bo_device:
621  * @size: Size of the data needed backing.
622  * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
623  * @dummy_read_page: See struct ttm_bo_device.
624  *
625  * Create a struct ttm_tt to back data with system memory pages.
626  * No pages are actually allocated.
627  * Returns:
628  * NULL: Out of memory.
629  */
630 extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
631 			unsigned long size, uint32_t page_flags,
632 			struct page *dummy_read_page);
633 extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
634 			   unsigned long size, uint32_t page_flags,
635 			   struct page *dummy_read_page);
636 
637 /**
638  * ttm_tt_fini
639  *
640  * @ttm: the ttm_tt structure.
641  *
642  * Free memory of ttm_tt structure
643  */
644 extern void ttm_tt_fini(struct ttm_tt *ttm);
645 extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
646 
647 /**
648  * ttm_ttm_bind:
649  *
650  * @ttm: The struct ttm_tt containing backing pages.
651  * @bo_mem: The struct ttm_mem_reg identifying the binding location.
652  *
653  * Bind the pages of @ttm to an aperture location identified by @bo_mem
654  */
655 extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
656 
657 /**
658  * ttm_ttm_destroy:
659  *
660  * @ttm: The struct ttm_tt.
661  *
662  * Unbind, unpopulate and destroy common struct ttm_tt.
663  */
664 extern void ttm_tt_destroy(struct ttm_tt *ttm);
665 
666 /**
667  * ttm_ttm_unbind:
668  *
669  * @ttm: The struct ttm_tt.
670  *
671  * Unbind a struct ttm_tt.
672  */
673 extern void ttm_tt_unbind(struct ttm_tt *ttm);
674 
675 /**
676  * ttm_tt_swapin:
677  *
678  * @ttm: The struct ttm_tt.
679  *
680  * Swap in a previously swap out ttm_tt.
681  */
682 extern int ttm_tt_swapin(struct ttm_tt *ttm);
683 
684 /**
685  * ttm_tt_set_placement_caching:
686  *
687  * @ttm A struct ttm_tt the backing pages of which will change caching policy.
688  * @placement: Flag indicating the desired caching policy.
689  *
690  * This function will change caching policy of any default kernel mappings of
691  * the pages backing @ttm. If changing from cached to uncached or
692  * write-combined,
693  * all CPU caches will first be flushed to make sure the data of the pages
694  * hit RAM. This function may be very costly as it involves global TLB
695  * and cache flushes and potential page splitting / combining.
696  */
697 extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
698 extern int ttm_tt_swapout(struct ttm_tt *ttm,
699 			  struct vm_object *persistent_swap_storage);
700 
701 /**
702  * ttm_tt_unpopulate - free pages from a ttm
703  *
704  * @ttm: Pointer to the ttm_tt structure
705  *
706  * Calls the driver method to free all pages from a ttm
707  */
708 extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
709 
710 /*
711  * ttm_bo.c
712  */
713 
714 /**
715  * ttm_mem_reg_is_pci
716  *
717  * @bdev: Pointer to a struct ttm_bo_device.
718  * @mem: A valid struct ttm_mem_reg.
719  *
720  * Returns true if the memory described by @mem is PCI memory,
721  * false otherwise.
722  */
723 extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
724 				   struct ttm_mem_reg *mem);
725 
726 /**
727  * ttm_bo_mem_space
728  *
729  * @bo: Pointer to a struct ttm_buffer_object. the data of which
730  * we want to allocate space for.
731  * @proposed_placement: Proposed new placement for the buffer object.
732  * @mem: A struct ttm_mem_reg.
733  * @interruptible: Sleep interruptible when sliping.
734  * @no_wait_gpu: Return immediately if the GPU is busy.
735  *
736  * Allocate memory space for the buffer object pointed to by @bo, using
737  * the placement flags in @mem, potentially evicting other idle buffer objects.
738  * This function may sleep while waiting for space to become available.
739  * Returns:
740  * -EBUSY: No space available (only if no_wait == 1).
741  * -ENOMEM: Could not allocate memory for the buffer object, either due to
742  * fragmentation or concurrent allocators.
743  * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
744  */
745 extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
746 				struct ttm_placement *placement,
747 				struct ttm_mem_reg *mem,
748 				bool interruptible,
749 				bool no_wait_gpu);
750 
751 extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
752 			   struct ttm_mem_reg *mem);
753 extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
754 				  struct ttm_mem_reg *mem);
755 
756 extern void ttm_bo_global_release(struct drm_global_reference *ref);
757 extern int ttm_bo_global_init(struct drm_global_reference *ref);
758 
759 extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
760 
761 /**
762  * ttm_bo_device_init
763  *
764  * @bdev: A pointer to a struct ttm_bo_device to initialize.
765  * @glob: A pointer to an initialized struct ttm_bo_global.
766  * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
767  * @mapping: The address space to use for this bo.
768  * @file_page_offset: Offset into the device address space that is available
769  * for buffer data. This ensures compatibility with other users of the
770  * address space.
771  *
772  * Initializes a struct ttm_bo_device:
773  * Returns:
774  * !0: Failure.
775  */
776 extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
777 			      struct ttm_bo_global *glob,
778 			      struct ttm_bo_driver *driver,
779 			      struct address_space *mapping,
780 			      uint64_t file_page_offset, bool need_dma32);
781 
782 /**
783  * ttm_bo_unmap_virtual
784  *
785  * @bo: tear down the virtual mappings for this BO
786  */
787 extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
788 
789 /**
790  * ttm_bo_unmap_virtual
791  *
792  * @bo: tear down the virtual mappings for this BO
793  *
794  * The caller must take ttm_mem_io_lock before calling this function.
795  */
796 extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
797 
798 extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
799 extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
800 extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
801 			   bool interruptible);
802 extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
803 
804 extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
805 
806 /**
807  * __ttm_bo_reserve:
808  *
809  * @bo: A pointer to a struct ttm_buffer_object.
810  * @interruptible: Sleep interruptible if waiting.
811  * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
812  * @ticket: ticket used to acquire the ww_mutex.
813  *
814  * Will not remove reserved buffers from the lru lists.
815  * Otherwise identical to ttm_bo_reserve.
816  *
817  * Returns:
818  * -EDEADLK: The reservation may cause a deadlock.
819  * Release all buffer reservations, wait for @bo to become unreserved and
820  * try again. (only if use_sequence == 1).
821  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
822  * a signal. Release all buffer reservations and return to user-space.
823  * -EBUSY: The function needed to sleep, but @no_wait was true
824  * -EALREADY: Bo already reserved using @ticket. This error code will only
825  * be returned if @use_ticket is set to true.
826  */
827 static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
828 				   bool interruptible, bool no_wait,
829 				   struct ww_acquire_ctx *ticket)
830 {
831 	int ret = 0;
832 
833 	if (no_wait) {
834 		bool success;
835 		if (WARN_ON(ticket))
836 			return -EBUSY;
837 
838 		success = ww_mutex_trylock(&bo->resv->lock);
839 		return success ? 0 : -EBUSY;
840 	}
841 
842 	if (interruptible)
843 		ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket);
844 	else
845 		ret = ww_mutex_lock(&bo->resv->lock, ticket);
846 	if (ret == -EINTR)
847 		return -ERESTARTSYS;
848 	return ret;
849 }
850 
851 /**
852  * ttm_bo_reserve:
853  *
854  * @bo: A pointer to a struct ttm_buffer_object.
855  * @interruptible: Sleep interruptible if waiting.
856  * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
857  * @ticket: ticket used to acquire the ww_mutex.
858  *
859  * Locks a buffer object for validation. (Or prevents other processes from
860  * locking it for validation) and removes it from lru lists, while taking
861  * a number of measures to prevent deadlocks.
862  *
863  * Deadlocks may occur when two processes try to reserve multiple buffers in
864  * different order, either by will or as a result of a buffer being evicted
865  * to make room for a buffer already reserved. (Buffers are reserved before
866  * they are evicted). The following algorithm prevents such deadlocks from
867  * occurring:
868  * Processes attempting to reserve multiple buffers other than for eviction,
869  * (typically execbuf), should first obtain a unique 32-bit
870  * validation sequence number,
871  * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
872  * sequence number. If upon call of this function, the buffer object is already
873  * reserved, the validation sequence is checked against the validation
874  * sequence of the process currently reserving the buffer,
875  * and if the current validation sequence is greater than that of the process
876  * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
877  * waiting for the buffer to become unreserved, after which it retries
878  * reserving.
879  * The caller should, when receiving an -EDEADLK error
880  * release all its buffer reservations, wait for @bo to become unreserved, and
881  * then rerun the validation with the same validation sequence. This procedure
882  * will always guarantee that the process with the lowest validation sequence
883  * will eventually succeed, preventing both deadlocks and starvation.
884  *
885  * Returns:
886  * -EDEADLK: The reservation may cause a deadlock.
887  * Release all buffer reservations, wait for @bo to become unreserved and
888  * try again. (only if use_sequence == 1).
889  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
890  * a signal. Release all buffer reservations and return to user-space.
891  * -EBUSY: The function needed to sleep, but @no_wait was true
892  * -EALREADY: Bo already reserved using @ticket. This error code will only
893  * be returned if @use_ticket is set to true.
894  */
895 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
896 				 bool interruptible, bool no_wait,
897 				 struct ww_acquire_ctx *ticket)
898 {
899 	int ret;
900 
901 	WARN_ON(!kref_read(&bo->kref));
902 
903 	ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
904 	if (likely(ret == 0))
905 		ttm_bo_del_sub_from_lru(bo);
906 
907 	return ret;
908 }
909 
910 /**
911  * ttm_bo_reserve_slowpath:
912  * @bo: A pointer to a struct ttm_buffer_object.
913  * @interruptible: Sleep interruptible if waiting.
914  * @sequence: Set (@bo)->sequence to this value after lock
915  *
916  * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
917  * from all our other reservations. Because there are no other reservations
918  * held by us, this function cannot deadlock any more.
919  */
920 static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
921 					  bool interruptible,
922 					  struct ww_acquire_ctx *ticket)
923 {
924 	int ret = 0;
925 
926 	WARN_ON(!kref_read(&bo->kref));
927 
928 	if (interruptible)
929 		ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
930 						       ticket);
931 	else
932 		ww_mutex_lock_slow(&bo->resv->lock, ticket);
933 
934 	if (likely(ret == 0))
935 		ttm_bo_del_sub_from_lru(bo);
936 	else if (ret == -EINTR)
937 		ret = -ERESTARTSYS;
938 
939 	return ret;
940 }
941 
942 /**
943  * __ttm_bo_unreserve
944  * @bo: A pointer to a struct ttm_buffer_object.
945  *
946  * Unreserve a previous reservation of @bo where the buffer object is
947  * already on lru lists.
948  */
949 static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
950 {
951 	ww_mutex_unlock(&bo->resv->lock);
952 }
953 
954 /**
955  * ttm_bo_unreserve
956  *
957  * @bo: A pointer to a struct ttm_buffer_object.
958  *
959  * Unreserve a previous reservation of @bo.
960  */
961 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
962 {
963 	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
964 		lockmgr(&bo->glob->lru_lock, LK_EXCLUSIVE);
965 		ttm_bo_add_to_lru(bo);
966 		lockmgr(&bo->glob->lru_lock, LK_RELEASE);
967 	}
968 	__ttm_bo_unreserve(bo);
969 }
970 
971 /**
972  * ttm_bo_unreserve_ticket
973  * @bo: A pointer to a struct ttm_buffer_object.
974  * @ticket: ww_acquire_ctx used for reserving
975  *
976  * Unreserve a previous reservation of @bo made with @ticket.
977  */
978 static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
979 					   struct ww_acquire_ctx *t)
980 {
981 	ttm_bo_unreserve(bo);
982 }
983 
984 /*
985  * ttm_bo_util.c
986  */
987 
988 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
989 		       struct ttm_mem_reg *mem);
990 void ttm_mem_io_free(struct ttm_bo_device *bdev,
991 		     struct ttm_mem_reg *mem);
992 /**
993  * ttm_bo_move_ttm
994  *
995  * @bo: A pointer to a struct ttm_buffer_object.
996  * @interruptible: Sleep interruptible if waiting.
997  * @no_wait_gpu: Return immediately if the GPU is busy.
998  * @new_mem: struct ttm_mem_reg indicating where to move.
999  *
1000  * Optimized move function for a buffer object with both old and
1001  * new placement backed by a TTM. The function will, if successful,
1002  * free any old aperture space, and set (@new_mem)->mm_node to NULL,
1003  * and update the (@bo)->mem placement flags. If unsuccessful, the old
1004  * data remains untouched, and it's up to the caller to free the
1005  * memory space indicated by @new_mem.
1006  * Returns:
1007  * !0: Failure.
1008  */
1009 
1010 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
1011 			   bool interruptible, bool no_wait_gpu,
1012 			   struct ttm_mem_reg *new_mem);
1013 
1014 /**
1015  * ttm_bo_move_memcpy
1016  *
1017  * @bo: A pointer to a struct ttm_buffer_object.
1018  * @interruptible: Sleep interruptible if waiting.
1019  * @no_wait_gpu: Return immediately if the GPU is busy.
1020  * @new_mem: struct ttm_mem_reg indicating where to move.
1021  *
1022  * Fallback move function for a mappable buffer object in mappable memory.
1023  * The function will, if successful,
1024  * free any old aperture space, and set (@new_mem)->mm_node to NULL,
1025  * and update the (@bo)->mem placement flags. If unsuccessful, the old
1026  * data remains untouched, and it's up to the caller to free the
1027  * memory space indicated by @new_mem.
1028  * Returns:
1029  * !0: Failure.
1030  */
1031 
1032 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
1033 			      bool interruptible, bool no_wait_gpu,
1034 			      struct ttm_mem_reg *new_mem);
1035 
1036 /**
1037  * ttm_bo_free_old_node
1038  *
1039  * @bo: A pointer to a struct ttm_buffer_object.
1040  *
1041  * Utility function to free an old placement after a successful move.
1042  */
1043 extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
1044 
1045 /**
1046  * ttm_bo_move_accel_cleanup.
1047  *
1048  * @bo: A pointer to a struct ttm_buffer_object.
1049  * @fence: A fence object that signals when moving is complete.
1050  * @evict: This is an evict move. Don't return until the buffer is idle.
1051  * @new_mem: struct ttm_mem_reg indicating where to move.
1052  *
1053  * Accelerated move function to be called when an accelerated move
1054  * has been scheduled. The function will create a new temporary buffer object
1055  * representing the old placement, and put the sync object on both buffer
1056  * objects. After that the newly created buffer object is unref'd to be
1057  * destroyed when the move is complete. This will help pipeline
1058  * buffer moves.
1059  */
1060 
1061 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
1062 				     struct dma_fence *fence, bool evict,
1063 				     struct ttm_mem_reg *new_mem);
1064 
1065 /**
1066  * ttm_bo_pipeline_move.
1067  *
1068  * @bo: A pointer to a struct ttm_buffer_object.
1069  * @fence: A fence object that signals when moving is complete.
1070  * @evict: This is an evict move. Don't return until the buffer is idle.
1071  * @new_mem: struct ttm_mem_reg indicating where to move.
1072  *
1073  * Function for pipelining accelerated moves. Either free the memory
1074  * immediately or hang it on a temporary buffer object.
1075  */
1076 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
1077 			 struct dma_fence *fence, bool evict,
1078 			 struct ttm_mem_reg *new_mem);
1079 
1080 /**
1081  * ttm_io_prot
1082  *
1083  * @c_state: Caching state.
1084  * @tmp: Page protection flag for a normal, cached mapping.
1085  *
1086  * Utility function that returns the pgprot_t that should be used for
1087  * setting up a PTE with the caching model indicated by @c_state.
1088  */
1089 extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
1090 
1091 extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
1092 
1093 #if IS_ENABLED(CONFIG_AGP)
1094 #include <linux/agp_backend.h>
1095 
1096 /**
1097  * ttm_agp_tt_create
1098  *
1099  * @bdev: Pointer to a struct ttm_bo_device.
1100  * @bridge: The agp bridge this device is sitting on.
1101  * @size: Size of the data needed backing.
1102  * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
1103  * @dummy_read_page: See struct ttm_bo_device.
1104  *
1105  *
1106  * Create a TTM backend that uses the indicated AGP bridge as an aperture
1107  * for TT memory. This function uses the linux agpgart interface to
1108  * bind and unbind memory backing a ttm_tt.
1109  */
1110 extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1111 					struct agp_bridge_data *bridge,
1112 					unsigned long size, uint32_t page_flags,
1113 					struct page *dummy_read_page);
1114 int ttm_agp_tt_populate(struct ttm_tt *ttm);
1115 void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
1116 #endif
1117 
1118 /* required for DragonFly VM, see ttm/ttm_bo_vm.c */
1119 struct ttm_bo_device_buffer_objects;
1120 int ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
1121         struct ttm_buffer_object *b);
1122 RB_PROTOTYPE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
1123 	ttm_bo_cmp_rb_tree_items);
1124 
1125 #endif
1126