1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 /* $FreeBSD: head/sys/dev/drm2/ttm/ttm_bo_driver.h 247835 2013-03-05 09:49:34Z kib $ */
31 #ifndef _TTM_BO_DRIVER_H_
32 #define _TTM_BO_DRIVER_H_
33 
34 #include <drm/ttm/ttm_bo_api.h>
35 #include <drm/ttm/ttm_memory.h>
36 #include <drm/ttm/ttm_module.h>
37 #include <drm/drm_mm.h>
38 #include <drm/drm_global.h>
39 #include <sys/tree.h>
40 #include <linux/workqueue.h>
41 #include <linux/reservation.h>
42 
43 /* XXX nasty hack, but does the job */
44 #undef RB_ROOT
45 #define	RB_ROOT(head)	(head)->rbh_root
46 
47 struct ttm_backend_func {
48 	/**
49 	 * struct ttm_backend_func member bind
50 	 *
51 	 * @ttm: Pointer to a struct ttm_tt.
52 	 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
53 	 * memory type and location for binding.
54 	 *
55 	 * Bind the backend pages into the aperture in the location
56 	 * indicated by @bo_mem. This function should be able to handle
57 	 * differences between aperture and system page sizes.
58 	 */
59 	int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
60 
61 	/**
62 	 * struct ttm_backend_func member unbind
63 	 *
64 	 * @ttm: Pointer to a struct ttm_tt.
65 	 *
66 	 * Unbind previously bound backend pages. This function should be
67 	 * able to handle differences between aperture and system page sizes.
68 	 */
69 	int (*unbind) (struct ttm_tt *ttm);
70 
71 	/**
72 	 * struct ttm_backend_func member destroy
73 	 *
74 	 * @ttm: Pointer to a struct ttm_tt.
75 	 *
76 	 * Destroy the backend. This will be call back from ttm_tt_destroy so
77 	 * don't call ttm_tt_destroy from the callback or infinite loop.
78 	 */
79 	void (*destroy) (struct ttm_tt *ttm);
80 };
81 
82 #define TTM_PAGE_FLAG_WRITE           (1 << 3)
83 #define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
84 #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
85 #define TTM_PAGE_FLAG_ZERO_ALLOC      (1 << 6)
86 #define TTM_PAGE_FLAG_DMA32           (1 << 7)
87 #define TTM_PAGE_FLAG_SG              (1 << 8)
88 
89 enum ttm_caching_state {
90 	tt_uncached,
91 	tt_wc,
92 	tt_cached
93 };
94 
95 /**
96  * struct ttm_tt
97  *
98  * @bdev: Pointer to a struct ttm_bo_device.
99  * @func: Pointer to a struct ttm_backend_func that describes
100  * the backend methods.
101  * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
102  * pointer.
103  * @pages: Array of pages backing the data.
104  * @num_pages: Number of pages in the page array.
105  * @bdev: Pointer to the current struct ttm_bo_device.
106  * @be: Pointer to the ttm backend.
107  * @swap_storage: Pointer to shmem struct file for swap storage.
108  * @caching_state: The current caching state of the pages.
109  * @state: The current binding state of the pages.
110  *
111  * This is a structure holding the pages, caching- and aperture binding
112  * status for a buffer object that isn't backed by fixed (VRAM / AGP)
113  * memory.
114  */
115 
116 struct ttm_tt {
117 	struct ttm_bo_device *bdev;
118 	struct ttm_backend_func *func;
119 	struct vm_page *dummy_read_page;
120 	struct vm_page **pages;
121 	uint32_t page_flags;
122 	unsigned long num_pages;
123 	struct sg_table *sg; /* for SG objects via dma-buf */
124 	struct ttm_bo_global *glob;
125 	struct vm_object *swap_storage;
126 	enum ttm_caching_state caching_state;
127 	enum {
128 		tt_bound,
129 		tt_unbound,
130 		tt_unpopulated,
131 	} state;
132 };
133 
134 /**
135  * struct ttm_dma_tt
136  *
137  * @ttm: Base ttm_tt struct.
138  * @dma_address: The DMA (bus) addresses of the pages
139  * @pages_list: used by some page allocation backend
140  *
141  * This is a structure holding the pages, caching- and aperture binding
142  * status for a buffer object that isn't backed by fixed (VRAM / AGP)
143  * memory.
144  */
145 struct ttm_dma_tt {
146 	struct ttm_tt ttm;
147 	dma_addr_t *dma_address;
148 	struct list_head pages_list;
149 };
150 
151 #define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)	/* Fixed (on-card) PCI memory */
152 #define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1)	/* Memory mappable */
153 #define TTM_MEMTYPE_FLAG_CMA           (1 << 3)	/* Can't map aperture */
154 
155 struct ttm_mem_type_manager;
156 
157 struct ttm_mem_type_manager_func {
158 	/**
159 	 * struct ttm_mem_type_manager member init
160 	 *
161 	 * @man: Pointer to a memory type manager.
162 	 * @p_size: Implementation dependent, but typically the size of the
163 	 * range to be managed in pages.
164 	 *
165 	 * Called to initialize a private range manager. The function is
166 	 * expected to initialize the man::priv member.
167 	 * Returns 0 on success, negative error code on failure.
168 	 */
169 	int  (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
170 
171 	/**
172 	 * struct ttm_mem_type_manager member takedown
173 	 *
174 	 * @man: Pointer to a memory type manager.
175 	 *
176 	 * Called to undo the setup done in init. All allocated resources
177 	 * should be freed.
178 	 */
179 	int  (*takedown)(struct ttm_mem_type_manager *man);
180 
181 	/**
182 	 * struct ttm_mem_type_manager member get_node
183 	 *
184 	 * @man: Pointer to a memory type manager.
185 	 * @bo: Pointer to the buffer object we're allocating space for.
186 	 * @placement: Placement details.
187 	 * @flags: Additional placement flags.
188 	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
189 	 *
190 	 * This function should allocate space in the memory type managed
191 	 * by @man. Placement details if
192 	 * applicable are given by @placement. If successful,
193 	 * @mem::mm_node should be set to a non-null value, and
194 	 * @mem::start should be set to a value identifying the beginning
195 	 * of the range allocated, and the function should return zero.
196 	 * If the memory region accommodate the buffer object, @mem::mm_node
197 	 * should be set to NULL, and the function should return 0.
198 	 * If a system error occurred, preventing the request to be fulfilled,
199 	 * the function should return a negative error code.
200 	 *
201 	 * Note that @mem::mm_node will only be dereferenced by
202 	 * struct ttm_mem_type_manager functions and optionally by the driver,
203 	 * which has knowledge of the underlying type.
204 	 *
205 	 * This function may not be called from within atomic context, so
206 	 * an implementation can and must use either a mutex or a spinlock to
207 	 * protect any data structures managing the space.
208 	 */
209 	int  (*get_node)(struct ttm_mem_type_manager *man,
210 			 struct ttm_buffer_object *bo,
211 			 struct ttm_placement *placement,
212 			 uint32_t flags,
213 			 struct ttm_mem_reg *mem);
214 
215 	/**
216 	 * struct ttm_mem_type_manager member put_node
217 	 *
218 	 * @man: Pointer to a memory type manager.
219 	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
220 	 *
221 	 * This function frees memory type resources previously allocated
222 	 * and that are identified by @mem::mm_node and @mem::start. May not
223 	 * be called from within atomic context.
224 	 */
225 	void (*put_node)(struct ttm_mem_type_manager *man,
226 			 struct ttm_mem_reg *mem);
227 
228 	/**
229 	 * struct ttm_mem_type_manager member debug
230 	 *
231 	 * @man: Pointer to a memory type manager.
232 	 * @prefix: Prefix to be used in printout to identify the caller.
233 	 *
234 	 * This function is called to print out the state of the memory
235 	 * type manager to aid debugging of out-of-memory conditions.
236 	 * It may not be called from within atomic context.
237 	 */
238 	void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
239 };
240 
241 /**
242  * struct ttm_mem_type_manager
243  *
244  * @has_type: The memory type has been initialized.
245  * @use_type: The memory type is enabled.
246  * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
247  * managed by this memory type.
248  * @gpu_offset: If used, the GPU offset of the first managed page of
249  * fixed memory or the first managed location in an aperture.
250  * @size: Size of the managed region.
251  * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
252  * as defined in ttm_placement_common.h
253  * @default_caching: The default caching policy used for a buffer object
254  * placed in this memory type if the user doesn't provide one.
255  * @func: structure pointer implementing the range manager. See above
256  * @priv: Driver private closure for @func.
257  * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
258  * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
259  * reserved by the TTM vm system.
260  * @io_reserve_lru: Optional lru list for unreserving io mem regions.
261  * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
262  * static information. bdev::driver::io_mem_free is never used.
263  * @lru: The lru list for this memory type.
264  *
265  * This structure is used to identify and manage memory types for a device.
266  * It's set up by the ttm_bo_driver::init_mem_type method.
267  */
268 
269 
270 
271 struct ttm_mem_type_manager {
272 	struct ttm_bo_device *bdev;
273 
274 	/*
275 	 * No protection. Constant from start.
276 	 */
277 
278 	bool has_type;
279 	bool use_type;
280 	uint32_t flags;
281 	unsigned long gpu_offset;
282 	uint64_t size;
283 	uint32_t available_caching;
284 	uint32_t default_caching;
285 	const struct ttm_mem_type_manager_func *func;
286 	void *priv;
287 	struct lock io_reserve_mutex;
288 	bool use_io_reserve_lru;
289 	bool io_reserve_fastpath;
290 
291 	/*
292 	 * Protected by @io_reserve_mutex:
293 	 */
294 
295 	struct list_head io_reserve_lru;
296 
297 	/*
298 	 * Protected by the global->lru_lock.
299 	 */
300 
301 	struct list_head lru;
302 };
303 
304 /**
305  * struct ttm_bo_driver
306  *
307  * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
308  * @invalidate_caches: Callback to invalidate read caches when a buffer object
309  * has been evicted.
310  * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
311  * structure.
312  * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
313  * @move: Callback for a driver to hook in accelerated functions to
314  * move a buffer.
315  * If set to NULL, a potentially slow memcpy() move is used.
316  * @sync_obj_signaled: See ttm_fence_api.h
317  * @sync_obj_wait: See ttm_fence_api.h
318  * @sync_obj_flush: See ttm_fence_api.h
319  * @sync_obj_unref: See ttm_fence_api.h
320  * @sync_obj_ref: See ttm_fence_api.h
321  */
322 
323 struct ttm_bo_driver {
324 	/**
325 	 * ttm_tt_create
326 	 *
327 	 * @bdev: pointer to a struct ttm_bo_device:
328 	 * @size: Size of the data needed backing.
329 	 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
330 	 * @dummy_read_page: See struct ttm_bo_device.
331 	 *
332 	 * Create a struct ttm_tt to back data with system memory pages.
333 	 * No pages are actually allocated.
334 	 * Returns:
335 	 * NULL: Out of memory.
336 	 */
337 	struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
338 					unsigned long size,
339 					uint32_t page_flags,
340 					struct vm_page *dummy_read_page);
341 
342 	/**
343 	 * ttm_tt_populate
344 	 *
345 	 * @ttm: The struct ttm_tt to contain the backing pages.
346 	 *
347 	 * Allocate all backing pages
348 	 * Returns:
349 	 * -ENOMEM: Out of memory.
350 	 */
351 	int (*ttm_tt_populate)(struct ttm_tt *ttm);
352 
353 	/**
354 	 * ttm_tt_unpopulate
355 	 *
356 	 * @ttm: The struct ttm_tt to contain the backing pages.
357 	 *
358 	 * Free all backing page
359 	 */
360 	void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
361 
362 	/**
363 	 * struct ttm_bo_driver member invalidate_caches
364 	 *
365 	 * @bdev: the buffer object device.
366 	 * @flags: new placement of the rebound buffer object.
367 	 *
368 	 * A previosly evicted buffer has been rebound in a
369 	 * potentially new location. Tell the driver that it might
370 	 * consider invalidating read (texture) caches on the next command
371 	 * submission as a consequence.
372 	 */
373 
374 	int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
375 	int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
376 			      struct ttm_mem_type_manager *man);
377 	/**
378 	 * struct ttm_bo_driver member evict_flags:
379 	 *
380 	 * @bo: the buffer object to be evicted
381 	 *
382 	 * Return the bo flags for a buffer which is not mapped to the hardware.
383 	 * These will be placed in proposed_flags so that when the move is
384 	 * finished, they'll end up in bo->mem.flags
385 	 */
386 
387 	 void(*evict_flags) (struct ttm_buffer_object *bo,
388 				struct ttm_placement *placement);
389 	/**
390 	 * struct ttm_bo_driver member move:
391 	 *
392 	 * @bo: the buffer to move
393 	 * @evict: whether this motion is evicting the buffer from
394 	 * the graphics address space
395 	 * @interruptible: Use interruptible sleeps if possible when sleeping.
396 	 * @no_wait: whether this should give up and return -EBUSY
397 	 * if this move would require sleeping
398 	 * @new_mem: the new memory region receiving the buffer
399 	 *
400 	 * Move a buffer between two memory regions.
401 	 */
402 	int (*move) (struct ttm_buffer_object *bo,
403 		     bool evict, bool interruptible,
404 		     bool no_wait_gpu,
405 		     struct ttm_mem_reg *new_mem);
406 
407 	/**
408 	 * struct ttm_bo_driver_member verify_access
409 	 *
410 	 * @bo: Pointer to a buffer object.
411 	 * @filp: Pointer to a struct file trying to access the object.
412 	 * FreeBSD: use devfs_get_cdevpriv etc.
413 	 *
414 	 * Called from the map / write / read methods to verify that the
415 	 * caller is permitted to access the buffer object.
416 	 * This member may be set to NULL, which will refuse this kind of
417 	 * access for all buffer objects.
418 	 * This function should return 0 if access is granted, -EPERM otherwise.
419 	 */
420 	int (*verify_access) (struct ttm_buffer_object *bo);
421 
422 	/**
423 	 * In case a driver writer dislikes the TTM fence objects,
424 	 * the driver writer can replace those with sync objects of
425 	 * his / her own. If it turns out that no driver writer is
426 	 * using these. I suggest we remove these hooks and plug in
427 	 * fences directly. The bo driver needs the following functionality:
428 	 * See the corresponding functions in the fence object API
429 	 * documentation.
430 	 */
431 
432 	bool (*sync_obj_signaled) (void *sync_obj);
433 	int (*sync_obj_wait) (void *sync_obj,
434 			      bool lazy, bool interruptible);
435 	int (*sync_obj_flush) (void *sync_obj);
436 	void (*sync_obj_unref) (void **sync_obj);
437 	void *(*sync_obj_ref) (void *sync_obj);
438 
439 	/* hook to notify driver about a driver move so it
440 	 * can do tiling things */
441 	void (*move_notify)(struct ttm_buffer_object *bo,
442 			    struct ttm_mem_reg *new_mem);
443 	/* notify the driver we are taking a fault on this BO
444 	 * and have reserved it */
445 	int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
446 
447 	/**
448 	 * notify the driver that we're about to swap out this bo
449 	 */
450 	void (*swap_notify) (struct ttm_buffer_object *bo);
451 
452 	/**
453 	 * Driver callback on when mapping io memory (for bo_move_memcpy
454 	 * for instance). TTM will take care to call io_mem_free whenever
455 	 * the mapping is not use anymore. io_mem_reserve & io_mem_free
456 	 * are balanced.
457 	 */
458 	int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
459 	void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
460 };
461 
462 /**
463  * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
464  */
465 
466 struct ttm_bo_global_ref {
467 	struct drm_global_reference ref;
468 	struct ttm_mem_global *mem_glob;
469 };
470 
471 /**
472  * struct ttm_bo_global - Buffer object driver global data.
473  *
474  * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
475  * @dummy_read_page: Pointer to a dummy page used for mapping requests
476  * of unpopulated pages.
477  * @shrink: A shrink callback object used for buffer object swap.
478  * @device_list_mutex: Mutex protecting the device list.
479  * This mutex is held while traversing the device list for pm options.
480  * @lru_lock: Spinlock protecting the bo subsystem lru lists.
481  * @device_list: List of buffer object devices.
482  * @swap_lru: Lru list of buffer objects used for swapping.
483  */
484 
485 struct ttm_bo_global {
486 	u_int kobj_ref;
487 
488 	/**
489 	 * Constant after init.
490 	 */
491 
492 	struct ttm_mem_global *mem_glob;
493 	struct vm_page *dummy_read_page;
494 	struct ttm_mem_shrink shrink;
495 	struct lock device_list_mutex;
496 	struct lock lru_lock;
497 
498 	/**
499 	 * Protected by device_list_mutex.
500 	 */
501 	struct list_head device_list;
502 
503 	/**
504 	 * Protected by the lru_lock.
505 	 */
506 	struct list_head swap_lru;
507 
508 	/**
509 	 * Internal protection.
510 	 */
511 	atomic_t bo_count;
512 };
513 
514 
515 #define TTM_NUM_MEM_TYPES 8
516 
517 #define TTM_BO_PRIV_FLAG_MOVING	0	/* Buffer object is moving and needs
518 					   idling before CPU mapping */
519 #define TTM_BO_PRIV_FLAG_MAX	1
520 #define TTM_BO_PRIV_FLAG_ACTIVE	2	/* Used for release sequencing */
521 /**
522  * struct ttm_bo_device - Buffer object driver device-specific data.
523  *
524  * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
525  * @man: An array of mem_type_managers.
526  * @fence_lock: Protects the synchronizing members on *all* bos belonging
527  * to this device.
528  * @addr_space_mm: Range manager for the device address space.
529  * lru_lock: Spinlock that protects the buffer+device lru lists and
530  * ddestroy lists.
531  * @val_seq: Current validation sequence.
532  * @dev_mapping: A pointer to the struct address_space representing the
533  * device address space.
534  * @wq: Work queue structure for the delayed delete workqueue.
535  *
536  */
537 
538 struct ttm_bo_device {
539 
540 	/*
541 	 * Constant after bo device init / atomic.
542 	 */
543 	struct list_head device_list;
544 	struct ttm_bo_global *glob;
545 	struct ttm_bo_driver *driver;
546 	struct lock vm_lock;
547 	struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
548 	struct lock fence_lock;
549 	/*
550 	 * Protected by the vm lock.
551 	 */
552 
553 	RB_HEAD(ttm_bo_device_buffer_objects, ttm_buffer_object) addr_space_rb;
554 	struct drm_mm addr_space_mm;
555 
556 	/*
557 	 * Protected by the global:lru lock.
558 	 */
559 	struct list_head ddestroy;
560 	uint32_t val_seq;
561 
562 	/*
563 	 * Protected by load / firstopen / lastclose /unload sync.
564 	 */
565 
566 	struct address_space *dev_mapping;
567 
568 	/*
569 	 * Internal protection.
570 	 */
571 
572 	struct delayed_work wq;
573 
574 	bool need_dma32;
575 };
576 
577 /**
578  * ttm_flag_masked
579  *
580  * @old: Pointer to the result and original value.
581  * @new: New value of bits.
582  * @mask: Mask of bits to change.
583  *
584  * Convenience function to change a number of bits identified by a mask.
585  */
586 
587 static inline uint32_t
588 ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
589 {
590 	*old ^= (*old ^ new) & mask;
591 	return *old;
592 }
593 
594 /**
595  * ttm_tt_init
596  *
597  * @ttm: The struct ttm_tt.
598  * @bdev: pointer to a struct ttm_bo_device:
599  * @size: Size of the data needed backing.
600  * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
601  * @dummy_read_page: See struct ttm_bo_device.
602  *
603  * Create a struct ttm_tt to back data with system memory pages.
604  * No pages are actually allocated.
605  * Returns:
606  * NULL: Out of memory.
607  */
608 extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
609 			unsigned long size, uint32_t page_flags,
610 			struct vm_page *dummy_read_page);
611 extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
612 			   unsigned long size, uint32_t page_flags,
613 			   struct vm_page *dummy_read_page);
614 
615 /**
616  * ttm_tt_fini
617  *
618  * @ttm: the ttm_tt structure.
619  *
620  * Free memory of ttm_tt structure
621  */
622 extern void ttm_tt_fini(struct ttm_tt *ttm);
623 extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
624 
625 /**
626  * ttm_ttm_bind:
627  *
628  * @ttm: The struct ttm_tt containing backing pages.
629  * @bo_mem: The struct ttm_mem_reg identifying the binding location.
630  *
631  * Bind the pages of @ttm to an aperture location identified by @bo_mem
632  */
633 extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
634 
635 /**
636  * ttm_ttm_destroy:
637  *
638  * @ttm: The struct ttm_tt.
639  *
640  * Unbind, unpopulate and destroy common struct ttm_tt.
641  */
642 extern void ttm_tt_destroy(struct ttm_tt *ttm);
643 
644 /**
645  * ttm_ttm_unbind:
646  *
647  * @ttm: The struct ttm_tt.
648  *
649  * Unbind a struct ttm_tt.
650  */
651 extern void ttm_tt_unbind(struct ttm_tt *ttm);
652 
653 /**
654  * ttm_tt_swapin:
655  *
656  * @ttm: The struct ttm_tt.
657  *
658  * Swap in a previously swap out ttm_tt.
659  */
660 extern int ttm_tt_swapin(struct ttm_tt *ttm);
661 
662 /**
663  * ttm_tt_set_placement_caching:
664  *
665  * @ttm A struct ttm_tt the backing pages of which will change caching policy.
666  * @placement: Flag indicating the desired caching policy.
667  *
668  * This function will change caching policy of any default kernel mappings of
669  * the pages backing @ttm. If changing from cached to uncached or
670  * write-combined,
671  * all CPU caches will first be flushed to make sure the data of the pages
672  * hit RAM. This function may be very costly as it involves global TLB
673  * and cache flushes and potential page splitting / combining.
674  */
675 extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
676 extern int ttm_tt_swapout(struct ttm_tt *ttm,
677 			  struct vm_object *persistent_swap_storage);
678 
679 /*
680  * ttm_bo.c
681  */
682 
683 /**
684  * ttm_mem_reg_is_pci
685  *
686  * @bdev: Pointer to a struct ttm_bo_device.
687  * @mem: A valid struct ttm_mem_reg.
688  *
689  * Returns true if the memory described by @mem is PCI memory,
690  * false otherwise.
691  */
692 extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
693 				   struct ttm_mem_reg *mem);
694 
695 /**
696  * ttm_bo_mem_space
697  *
698  * @bo: Pointer to a struct ttm_buffer_object. the data of which
699  * we want to allocate space for.
700  * @proposed_placement: Proposed new placement for the buffer object.
701  * @mem: A struct ttm_mem_reg.
702  * @interruptible: Sleep interruptible when sliping.
703  * @no_wait_gpu: Return immediately if the GPU is busy.
704  *
705  * Allocate memory space for the buffer object pointed to by @bo, using
706  * the placement flags in @mem, potentially evicting other idle buffer objects.
707  * This function may sleep while waiting for space to become available.
708  * Returns:
709  * -EBUSY: No space available (only if no_wait == 1).
710  * -ENOMEM: Could not allocate memory for the buffer object, either due to
711  * fragmentation or concurrent allocators.
712  * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
713  */
714 extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
715 				struct ttm_placement *placement,
716 				struct ttm_mem_reg *mem,
717 				bool interruptible,
718 				bool no_wait_gpu);
719 
720 extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
721 			   struct ttm_mem_reg *mem);
722 extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
723 				  struct ttm_mem_reg *mem);
724 
725 extern void ttm_bo_global_release(struct drm_global_reference *ref);
726 extern int ttm_bo_global_init(struct drm_global_reference *ref);
727 
728 extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
729 
730 /**
731  * ttm_bo_device_init
732  *
733  * @bdev: A pointer to a struct ttm_bo_device to initialize.
734  * @glob: A pointer to an initialized struct ttm_bo_global.
735  * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
736  * @file_page_offset: Offset into the device address space that is available
737  * for buffer data. This ensures compatibility with other users of the
738  * address space.
739  *
740  * Initializes a struct ttm_bo_device:
741  * Returns:
742  * !0: Failure.
743  */
744 extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
745 			      struct ttm_bo_global *glob,
746 			      struct ttm_bo_driver *driver,
747 			      uint64_t file_page_offset, bool need_dma32);
748 
749 /**
750  * ttm_bo_unmap_virtual
751  *
752  * @bo: tear down the virtual mappings for this BO
753  */
754 extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
755 
756 /**
757  * ttm_bo_unmap_virtual
758  *
759  * @bo: tear down the virtual mappings for this BO
760  *
761  * The caller must take ttm_mem_io_lock before calling this function.
762  */
763 extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
764 
765 extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
766 extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
767 extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
768 			   bool interruptible);
769 extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
770 
771 
772 /**
773  * ttm_bo_reserve:
774  *
775  * @bo: A pointer to a struct ttm_buffer_object.
776  * @interruptible: Sleep interruptible if waiting.
777  * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
778  * @use_ticket: If @bo is already reserved, Only sleep waiting for
779  * it to become unreserved if @sequence < (@bo)->sequence.
780  *
781  * Locks a buffer object for validation. (Or prevents other processes from
782  * locking it for validation) and removes it from lru lists, while taking
783  * a number of measures to prevent deadlocks.
784  *
785  * Deadlocks may occur when two processes try to reserve multiple buffers in
786  * different order, either by will or as a result of a buffer being evicted
787  * to make room for a buffer already reserved. (Buffers are reserved before
788  * they are evicted). The following algorithm prevents such deadlocks from
789  * occurring:
790  * Processes attempting to reserve multiple buffers other than for eviction,
791  * (typically execbuf), should first obtain a unique 32-bit
792  * validation sequence number,
793  * and call this function with @use_sequence == 1 and @sequence == the unique
794  * sequence number. If upon call of this function, the buffer object is already
795  * reserved, the validation sequence is checked against the validation
796  * sequence of the process currently reserving the buffer,
797  * and if the current validation sequence is greater than that of the process
798  * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
799  * waiting for the buffer to become unreserved, after which it retries
800  * reserving.
801  * The caller should, when receiving an -EAGAIN error
802  * release all its buffer reservations, wait for @bo to become unreserved, and
803  * then rerun the validation with the same validation sequence. This procedure
804  * will always guarantee that the process with the lowest validation sequence
805  * will eventually succeed, preventing both deadlocks and starvation.
806  *
807  * Returns:
808  * -EAGAIN: The reservation may cause a deadlock.
809  * Release all buffer reservations, wait for @bo to become unreserved and
810  * try again. (only if use_sequence == 1).
811  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
812  * a signal. Release all buffer reservations and return to user-space.
813  * -EBUSY: The function needed to sleep, but @no_wait was true
814  * -EDEADLK: Bo already reserved using @sequence. This error code will only
815  * be returned if @use_sequence is set to true.
816  */
817 extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
818 			  bool interruptible,
819 			  bool no_wait, bool use_ticket,
820 			  struct ww_acquire_ctx *ticket);
821 
822 /**
823  * ttm_bo_reserve_slowpath_nolru:
824  * @bo: A pointer to a struct ttm_buffer_object.
825  * @interruptible: Sleep interruptible if waiting.
826  * @sequence: Set (@bo)->sequence to this value after lock
827  *
828  * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
829  * from all our other reservations. Because there are no other reservations
830  * held by us, this function cannot deadlock any more.
831  *
832  * Will not remove reserved buffers from the lru lists.
833  * Otherwise identical to ttm_bo_reserve_slowpath.
834  */
835 extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
836 					 bool interruptible,
837 					 struct ww_acquire_ctx *ticket);
838 
839 
840 /**
841  * ttm_bo_reserve_slowpath:
842  * @bo: A pointer to a struct ttm_buffer_object.
843  * @interruptible: Sleep interruptible if waiting.
844  * @sequence: Set (@bo)->sequence to this value after lock
845  *
846  * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
847  * from all our other reservations. Because there are no other reservations
848  * held by us, this function cannot deadlock any more.
849  */
850 extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
851 				   bool interruptible,
852 				   struct ww_acquire_ctx *ticket);
853 
854 /**
855  * ttm_bo_reserve_nolru:
856  *
857  * @bo: A pointer to a struct ttm_buffer_object.
858  * @interruptible: Sleep interruptible if waiting.
859  * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
860  * @use_sequence: If @bo is already reserved, Only sleep waiting for
861  * it to become unreserved if @sequence < (@bo)->sequence.
862  *
863  * Will not remove reserved buffers from the lru lists.
864  * Otherwise identical to ttm_bo_reserve.
865  *
866  * Returns:
867  * -EAGAIN: The reservation may cause a deadlock.
868  * Release all buffer reservations, wait for @bo to become unreserved and
869  * try again. (only if use_sequence == 1).
870  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
871  * a signal. Release all buffer reservations and return to user-space.
872  * -EBUSY: The function needed to sleep, but @no_wait was true
873  * -EDEADLK: Bo already reserved using @sequence. This error code will only
874  * be returned if @use_sequence is set to true.
875  */
876 extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
877 				 bool interruptible,
878 				 bool no_wait, bool use_ticket,
879 				 struct ww_acquire_ctx *ticket);
880 
881 /**
882  * ttm_bo_unreserve
883  *
884  * @bo: A pointer to a struct ttm_buffer_object.
885  *
886  * Unreserve a previous reservation of @bo.
887  */
888 extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
889 
890 /**
891  * ttm_bo_unreserve_ticket
892  * @bo: A pointer to a struct ttm_buffer_object.
893  * @ticket: ww_acquire_ctx used for reserving
894  *
895  * Unreserve a previous reservation of @bo made with @ticket.
896  */
897 extern void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
898 				    struct ww_acquire_ctx *ticket);
899 
900 /**
901  * ttm_bo_unreserve_locked
902  * @bo: A pointer to a struct ttm_buffer_object.
903  * @ticket: ww_acquire_ctx used for reserving, or NULL
904  *
905  * Unreserve a previous reservation of @bo made with @ticket.
906  * Needs to be called with struct ttm_bo_global::lru_lock held.
907  */
908 extern void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo,
909 					   struct ww_acquire_ctx *ticket);
910 
911 /*
912  * ttm_bo_util.c
913  */
914 
915 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
916 		       struct ttm_mem_reg *mem);
917 void ttm_mem_io_free(struct ttm_bo_device *bdev,
918 		     struct ttm_mem_reg *mem);
919 /**
920  * ttm_bo_move_ttm
921  *
922  * @bo: A pointer to a struct ttm_buffer_object.
923  * @evict: 1: This is an eviction. Don't try to pipeline.
924  * @no_wait_gpu: Return immediately if the GPU is busy.
925  * @new_mem: struct ttm_mem_reg indicating where to move.
926  *
927  * Optimized move function for a buffer object with both old and
928  * new placement backed by a TTM. The function will, if successful,
929  * free any old aperture space, and set (@new_mem)->mm_node to NULL,
930  * and update the (@bo)->mem placement flags. If unsuccessful, the old
931  * data remains untouched, and it's up to the caller to free the
932  * memory space indicated by @new_mem.
933  * Returns:
934  * !0: Failure.
935  */
936 
937 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
938 			   bool evict, bool no_wait_gpu,
939 			   struct ttm_mem_reg *new_mem);
940 
941 /**
942  * ttm_bo_move_memcpy
943  *
944  * @bo: A pointer to a struct ttm_buffer_object.
945  * @evict: 1: This is an eviction. Don't try to pipeline.
946  * @no_wait_gpu: Return immediately if the GPU is busy.
947  * @new_mem: struct ttm_mem_reg indicating where to move.
948  *
949  * Fallback move function for a mappable buffer object in mappable memory.
950  * The function will, if successful,
951  * free any old aperture space, and set (@new_mem)->mm_node to NULL,
952  * and update the (@bo)->mem placement flags. If unsuccessful, the old
953  * data remains untouched, and it's up to the caller to free the
954  * memory space indicated by @new_mem.
955  * Returns:
956  * !0: Failure.
957  */
958 
959 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
960 			      bool evict, bool no_wait_gpu,
961 			      struct ttm_mem_reg *new_mem);
962 
963 /**
964  * ttm_bo_free_old_node
965  *
966  * @bo: A pointer to a struct ttm_buffer_object.
967  *
968  * Utility function to free an old placement after a successful move.
969  */
970 extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
971 
972 /**
973  * ttm_bo_move_accel_cleanup.
974  *
975  * @bo: A pointer to a struct ttm_buffer_object.
976  * @sync_obj: A sync object that signals when moving is complete.
977  * @evict: This is an evict move. Don't return until the buffer is idle.
978  * @no_wait_gpu: Return immediately if the GPU is busy.
979  * @new_mem: struct ttm_mem_reg indicating where to move.
980  *
981  * Accelerated move function to be called when an accelerated move
982  * has been scheduled. The function will create a new temporary buffer object
983  * representing the old placement, and put the sync object on both buffer
984  * objects. After that the newly created buffer object is unref'd to be
985  * destroyed when the move is complete. This will help pipeline
986  * buffer moves.
987  */
988 
989 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
990 				     void *sync_obj,
991 				     bool evict, bool no_wait_gpu,
992 				     struct ttm_mem_reg *new_mem);
993 /**
994  * ttm_io_prot
995  *
996  * @c_state: Caching state.
997  * @tmp: Page protection flag for a normal, cached mapping.
998  *
999  * Utility function that returns the pgprot_t that should be used for
1000  * setting up a PTE with the caching model indicated by @c_state.
1001  */
1002 extern vm_memattr_t ttm_io_prot(uint32_t caching_flags);
1003 
1004 extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
1005 
1006 #if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
1007 #define TTM_HAS_AGP
1008 #include <linux/agp_backend.h>
1009 
1010 /**
1011  * ttm_agp_tt_create
1012  *
1013  * @bdev: Pointer to a struct ttm_bo_device.
1014  * @bridge: The agp bridge this device is sitting on.
1015  * @size: Size of the data needed backing.
1016  * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
1017  * @dummy_read_page: See struct ttm_bo_device.
1018  *
1019  *
1020  * Create a TTM backend that uses the indicated AGP bridge as an aperture
1021  * for TT memory. This function uses the linux agpgart interface to
1022  * bind and unbind memory backing a ttm_tt.
1023  */
1024 extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1025 					struct agp_bridge_data *bridge,
1026 					unsigned long size, uint32_t page_flags,
1027 					struct vm_page *dummy_read_page);
1028 int ttm_agp_tt_populate(struct ttm_tt *ttm);
1029 void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
1030 #endif
1031 
1032 
1033 int ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
1034         struct ttm_buffer_object *b);
1035 RB_PROTOTYPE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
1036     ttm_bo_cmp_rb_tree_items);
1037 
1038 
1039 #endif
1040