xref: /dragonfly/sys/dev/drm/include/drm/ttm/ttm_bo_api.h (revision 4ff4d99f)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 /* $FreeBSD: head/sys/dev/drm2/ttm/ttm_bo_api.h 247835 2013-03-05 09:49:34Z kib $ */
31 
32 #ifndef _TTM_BO_API_H_
33 #define _TTM_BO_API_H_
34 
35 #include <drm/drmP.h>
36 #include <drm/drm_hashtab.h>
37 #include <linux/kref.h>
38 #include <linux/list.h>
39 
40 struct ttm_bo_device;
41 
42 struct drm_mm_node;
43 
44 
45 /**
46  * struct ttm_placement
47  *
48  * @fpfn:		first valid page frame number to put the object
49  * @lpfn:		last valid page frame number to put the object
50  * @num_placement:	number of preferred placements
51  * @placement:		preferred placements
52  * @num_busy_placement:	number of preferred placements when need to evict buffer
53  * @busy_placement:	preferred placements when need to evict buffer
54  *
55  * Structure indicating the placement you request for an object.
56  */
57 struct ttm_placement {
58 	unsigned	fpfn;
59 	unsigned	lpfn;
60 	unsigned	num_placement;
61 	const uint32_t	*placement;
62 	unsigned	num_busy_placement;
63 	const uint32_t	*busy_placement;
64 };
65 
66 /**
67  * struct ttm_bus_placement
68  *
69  * @addr:		mapped virtual address
70  * @base:		bus base address
71  * @is_iomem:		is this io memory ?
72  * @size:		size in byte
73  * @offset:		offset from the base address
74  * @io_reserved_vm:     The VM system has a refcount in @io_reserved_count
75  * @io_reserved_count:  Refcounting the numbers of callers to ttm_mem_io_reserve
76  *
77  * Structure indicating the bus placement of an object.
78  */
79 struct ttm_bus_placement {
80 	void		*addr;
81 	unsigned long	base;
82 	unsigned long	size;
83 	unsigned long	offset;
84 	bool		is_iomem;
85 	bool		io_reserved_vm;
86 	uint64_t        io_reserved_count;
87 };
88 
89 
90 /**
91  * struct ttm_mem_reg
92  *
93  * @mm_node: Memory manager node.
94  * @size: Requested size of memory region.
95  * @num_pages: Actual size of memory region in pages.
96  * @page_alignment: Page alignment.
97  * @placement: Placement flags.
98  * @bus: Placement on io bus accessible to the CPU
99  *
100  * Structure indicating the placement and space resources used by a
101  * buffer object.
102  */
103 
104 struct ttm_mem_reg {
105 	void *mm_node;
106 	unsigned long start;
107 	unsigned long size;
108 	unsigned long num_pages;
109 	uint32_t page_alignment;
110 	uint32_t mem_type;
111 	uint32_t placement;
112 	struct ttm_bus_placement bus;
113 };
114 
115 /**
116  * enum ttm_bo_type
117  *
118  * @ttm_bo_type_device:	These are 'normal' buffers that can
119  * be mmapped by user space. Each of these bos occupy a slot in the
120  * device address space, that can be used for normal vm operations.
121  *
122  * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
123  * but they cannot be accessed from user-space. For kernel-only use.
124  *
125  * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
126  * driver.
127  */
128 
129 enum ttm_bo_type {
130 	ttm_bo_type_device,
131 	ttm_bo_type_kernel,
132 	ttm_bo_type_sg
133 };
134 
135 struct ttm_tt;
136 
137 /**
138  * struct ttm_buffer_object
139  *
140  * @bdev: Pointer to the buffer object device structure.
141  * @type: The bo type.
142  * @destroy: Destruction function. If NULL, kfree is used.
143  * @num_pages: Actual number of pages.
144  * @addr_space_offset: Address space offset.
145  * @acc_size: Accounted size for this object.
146  * @kref: Reference count of this buffer object. When this refcount reaches
147  * zero, the object is put on the delayed delete list.
148  * @list_kref: List reference count of this buffer object. This member is
149  * used to avoid destruction while the buffer object is still on a list.
150  * Lru lists may keep one refcount, the delayed delete list, and kref != 0
151  * keeps one refcount. When this refcount reaches zero,
152  * the object is destroyed.
153  * @event_queue: Queue for processes waiting on buffer object status change.
154  * @mem: structure describing current placement.
155  * @persistent_swap_storage: Usually the swap storage is deleted for buffers
156  * pinned in physical memory. If this behaviour is not desired, this member
157  * holds a pointer to a persistent shmem object.
158  * @ttm: TTM structure holding system pages.
159  * @evicted: Whether the object was evicted without user-space knowing.
160  * @cpu_writes: For synchronization. Number of cpu writers.
161  * @lru: List head for the lru list.
162  * @ddestroy: List head for the delayed destroy list.
163  * @swap: List head for swap LRU list.
164  * @val_seq: Sequence of the validation holding the @reserved lock.
165  * Used to avoid starvation when many processes compete to validate the
166  * buffer. This member is protected by the bo_device::lru_lock.
167  * @seq_valid: The value of @val_seq is valid. This value is protected by
168  * the bo_device::lru_lock.
169  * @reserved: Deadlock-free lock used for synchronization state transitions.
170  * @sync_obj: Pointer to a synchronization object.
171  * @priv_flags: Flags describing buffer object internal state.
172  * @vm_rb: Rb node for the vm rb tree.
173  * @vm_node: Address space manager node.
174  * @offset: The current GPU offset, which can have different meanings
175  * depending on the memory type. For SYSTEM type memory, it should be 0.
176  * @cur_placement: Hint of current placement.
177  *
178  * Base class for TTM buffer object, that deals with data placement and CPU
179  * mappings. GPU mappings are really up to the driver, but for simpler GPUs
180  * the driver can usually use the placement offset @offset directly as the
181  * GPU virtual address. For drivers implementing multiple
182  * GPU memory manager contexts, the driver should manage the address space
183  * in these contexts separately and use these objects to get the correct
184  * placement and caching for these GPU maps. This makes it possible to use
185  * these objects for even quite elaborate memory management schemes.
186  * The destroy member, the API visibility of this object makes it possible
187  * to derive driver specific types.
188  */
189 
190 struct ttm_buffer_object {
191 	/**
192 	 * Members constant at init.
193 	 */
194 
195 	struct ttm_bo_global *glob;
196 	struct ttm_bo_device *bdev;
197 	enum ttm_bo_type type;
198 	void (*destroy) (struct ttm_buffer_object *);
199 	unsigned long num_pages;
200 	uint64_t addr_space_offset;
201 	size_t acc_size;
202 
203 	/**
204 	* Members not needing protection.
205 	*/
206 
207 	struct kref kref;
208 	struct kref list_kref;
209 
210 	wait_queue_head_t event_queue;
211 
212 	/**
213 	 * Members protected by the bo::reserved lock.
214 	 */
215 
216 	struct ttm_mem_reg mem;
217 	struct vm_object *persistent_swap_storage;
218 	struct ttm_tt *ttm;
219 	bool evicted;
220 
221 	/**
222 	 * Members protected by the bo::reserved lock only when written to.
223 	 */
224 
225 	atomic_t cpu_writers;
226 
227 	/**
228 	 * Members protected by the bdev::lru_lock.
229 	 */
230 
231 	struct list_head lru;
232 	struct list_head ddestroy;
233 	struct list_head swap;
234 	struct list_head io_reserve_lru;
235 	uint32_t val_seq;
236 	bool seq_valid;
237 
238 	/**
239 	 * Members protected by the bdev::lru_lock
240 	 * only when written to.
241 	 */
242 
243 	atomic_t reserved;
244 
245 	/**
246 	 * Members protected by struct buffer_object_device::fence_lock
247 	 * In addition, setting sync_obj to anything else
248 	 * than NULL requires bo::reserved to be held. This allows for
249 	 * checking NULL while reserved but not holding the mentioned lock.
250 	 */
251 
252 	void *sync_obj;
253 	unsigned long priv_flags;
254 
255 	/**
256 	 * Members protected by the bdev::vm_lock
257 	 */
258 
259 	RB_ENTRY(ttm_buffer_object) vm_rb;
260 	struct drm_mm_node *vm_node;
261 
262 
263 	/**
264 	 * Special members that are protected by the reserve lock
265 	 * and the bo::lock when written to. Can be read with
266 	 * either of these locks held.
267 	 */
268 
269 	unsigned long offset;
270 	uint32_t cur_placement;
271 
272 	struct sg_table *sg;
273 };
274 
275 /**
276  * struct ttm_bo_kmap_obj
277  *
278  * @virtual: The current kernel virtual address.
279  * @page: The page when kmap'ing a single page.
280  * @bo_kmap_type: Type of bo_kmap.
281  *
282  * Object describing a kernel mapping. Since a TTM bo may be located
283  * in various memory types with various caching policies, the
284  * mapping can either be an ioremap, a vmap, a kmap or part of a
285  * premapped region.
286  */
287 
288 #define TTM_BO_MAP_IOMEM_MASK 0x80
289 struct ttm_bo_kmap_obj {
290 	void *virtual;
291 	struct vm_page *page;
292 	struct sf_buf *sf;
293 	int num_pages;
294 	unsigned long size;
295 	enum {
296 		ttm_bo_map_iomap        = 1 | TTM_BO_MAP_IOMEM_MASK,
297 		ttm_bo_map_vmap         = 2,
298 		ttm_bo_map_kmap         = 3,
299 		ttm_bo_map_premapped    = 4 | TTM_BO_MAP_IOMEM_MASK,
300 	} bo_kmap_type;
301 	struct ttm_buffer_object *bo;
302 };
303 
304 /**
305  * ttm_bo_reference - reference a struct ttm_buffer_object
306  *
307  * @bo: The buffer object.
308  *
309  * Returns a refcounted pointer to a buffer object.
310  */
311 
312 static inline struct ttm_buffer_object *
313 ttm_bo_reference(struct ttm_buffer_object *bo)
314 {
315 	kref_get(&bo->kref);
316 	return bo;
317 }
318 
319 /**
320  * ttm_bo_wait - wait for buffer idle.
321  *
322  * @bo:  The buffer object.
323  * @interruptible:  Use interruptible wait.
324  * @no_wait:  Return immediately if buffer is busy.
325  *
326  * This function must be called with the bo::mutex held, and makes
327  * sure any previous rendering to the buffer is completed.
328  * Note: It might be necessary to block validations before the
329  * wait by reserving the buffer.
330  * Returns -EBUSY if no_wait is true and the buffer is busy.
331  * Returns -ERESTARTSYS if interrupted by a signal.
332  */
333 extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
334 		       bool interruptible, bool no_wait);
335 /**
336  * ttm_bo_validate
337  *
338  * @bo: The buffer object.
339  * @placement: Proposed placement for the buffer object.
340  * @interruptible: Sleep interruptible if sleeping.
341  * @no_wait_gpu: Return immediately if the GPU is busy.
342  *
343  * Changes placement and caching policy of the buffer object
344  * according proposed placement.
345  * Returns
346  * -EINVAL on invalid proposed placement.
347  * -ENOMEM on out-of-memory condition.
348  * -EBUSY if no_wait is true and buffer busy.
349  * -ERESTARTSYS if interrupted by a signal.
350  */
351 extern int ttm_bo_validate(struct ttm_buffer_object *bo,
352 				struct ttm_placement *placement,
353 				bool interruptible,
354 				bool no_wait_gpu);
355 
356 /**
357  * ttm_bo_unref
358  *
359  * @bo: The buffer object.
360  *
361  * Unreference and clear a pointer to a buffer object.
362  */
363 extern void ttm_bo_unref(struct ttm_buffer_object **bo);
364 
365 
366 /**
367  * ttm_bo_list_ref_sub
368  *
369  * @bo: The buffer object.
370  * @count: The number of references with which to decrease @bo::list_kref;
371  * @never_free: The refcount should not reach zero with this operation.
372  *
373  * Release @count lru list references to this buffer object.
374  */
375 extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
376 				bool never_free);
377 
378 /**
379  * ttm_bo_add_to_lru
380  *
381  * @bo: The buffer object.
382  *
383  * Add this bo to the relevant mem type lru and, if it's backed by
384  * system pages (ttms) to the swap list.
385  * This function must be called with struct ttm_bo_global::lru_lock held, and
386  * is typically called immediately prior to unreserving a bo.
387  */
388 extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
389 
390 /**
391  * ttm_bo_del_from_lru
392  *
393  * @bo: The buffer object.
394  *
395  * Remove this bo from all lru lists used to lookup and reserve an object.
396  * This function must be called with struct ttm_bo_global::lru_lock held,
397  * and is usually called just immediately after the bo has been reserved to
398  * avoid recursive reservation from lru lists.
399  */
400 extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
401 
402 
403 /**
404  * ttm_bo_lock_delayed_workqueue
405  *
406  * Prevent the delayed workqueue from running.
407  * Returns
408  * True if the workqueue was queued at the time
409  */
410 extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
411 
412 /**
413  * ttm_bo_unlock_delayed_workqueue
414  *
415  * Allows the delayed workqueue to run.
416  */
417 extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
418 					    int resched);
419 
420 /**
421  * ttm_bo_synccpu_write_grab
422  *
423  * @bo: The buffer object:
424  * @no_wait: Return immediately if buffer is busy.
425  *
426  * Synchronizes a buffer object for CPU RW access. This means
427  * command submission that affects the buffer will return -EBUSY
428  * until ttm_bo_synccpu_write_release is called.
429  *
430  * Returns
431  * -EBUSY if the buffer is busy and no_wait is true.
432  * -ERESTARTSYS if interrupted by a signal.
433  */
434 extern int
435 ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
436 
437 /**
438  * ttm_bo_synccpu_write_release:
439  *
440  * @bo : The buffer object.
441  *
442  * Releases a synccpu lock.
443  */
444 extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
445 
446 /**
447  * ttm_bo_acc_size
448  *
449  * @bdev: Pointer to a ttm_bo_device struct.
450  * @bo_size: size of the buffer object in byte.
451  * @struct_size: size of the structure holding buffer object datas
452  *
453  * Returns size to account for a buffer object
454  */
455 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
456 		       unsigned long bo_size,
457 		       unsigned struct_size);
458 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
459 			   unsigned long bo_size,
460 			   unsigned struct_size);
461 
462 /**
463  * ttm_bo_init
464  *
465  * @bdev: Pointer to a ttm_bo_device struct.
466  * @bo: Pointer to a ttm_buffer_object to be initialized.
467  * @size: Requested size of buffer object.
468  * @type: Requested type of buffer object.
469  * @flags: Initial placement flags.
470  * @page_alignment: Data alignment in pages.
471  * @interruptible: If needing to sleep to wait for GPU resources,
472  * sleep interruptible.
473  * @persistent_swap_storage: Usually the swap storage is deleted for buffers
474  * pinned in physical memory. If this behaviour is not desired, this member
475  * holds a pointer to a persistent shmem object. Typically, this would
476  * point to the shmem object backing a GEM object if TTM is used to back a
477  * GEM user interface.
478  * @acc_size: Accounted size for this object.
479  * @destroy: Destroy function. Use NULL for kfree().
480  *
481  * This function initializes a pre-allocated struct ttm_buffer_object.
482  * As this object may be part of a larger structure, this function,
483  * together with the @destroy function,
484  * enables driver-specific objects derived from a ttm_buffer_object.
485  * On successful return, the object kref and list_kref are set to 1.
486  * If a failure occurs, the function will call the @destroy function, or
487  * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
488  * illegal and will likely cause memory corruption.
489  *
490  * Returns
491  * -ENOMEM: Out of memory.
492  * -EINVAL: Invalid placement flags.
493  * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
494  */
495 
496 extern int ttm_bo_init(struct ttm_bo_device *bdev,
497 			struct ttm_buffer_object *bo,
498 			unsigned long size,
499 			enum ttm_bo_type type,
500 			struct ttm_placement *placement,
501 			uint32_t page_alignment,
502 			bool interrubtible,
503 			struct vm_object *persistent_swap_storage,
504 			size_t acc_size,
505 			struct sg_table *sg,
506 			void (*destroy) (struct ttm_buffer_object *));
507 
508 /**
509  * ttm_bo_synccpu_object_init
510  *
511  * @bdev: Pointer to a ttm_bo_device struct.
512  * @bo: Pointer to a ttm_buffer_object to be initialized.
513  * @size: Requested size of buffer object.
514  * @type: Requested type of buffer object.
515  * @flags: Initial placement flags.
516  * @page_alignment: Data alignment in pages.
517  * @interruptible: If needing to sleep while waiting for GPU resources,
518  * sleep interruptible.
519  * @persistent_swap_storage: Usually the swap storage is deleted for buffers
520  * pinned in physical memory. If this behaviour is not desired, this member
521  * holds a pointer to a persistent shmem object. Typically, this would
522  * point to the shmem object backing a GEM object if TTM is used to back a
523  * GEM user interface.
524  * @p_bo: On successful completion *p_bo points to the created object.
525  *
526  * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
527  * on that object. The destroy function is set to kfree().
528  * Returns
529  * -ENOMEM: Out of memory.
530  * -EINVAL: Invalid placement flags.
531  * -ERESTARTSYS: Interrupted by signal while waiting for resources.
532  */
533 
534 extern int ttm_bo_create(struct ttm_bo_device *bdev,
535 				unsigned long size,
536 				enum ttm_bo_type type,
537 				struct ttm_placement *placement,
538 				uint32_t page_alignment,
539 				bool interruptible,
540 				struct vm_object *persistent_swap_storage,
541 				struct ttm_buffer_object **p_bo);
542 
543 /**
544  * ttm_bo_check_placement
545  *
546  * @bo:		the buffer object.
547  * @placement:	placements
548  *
549  * Performs minimal validity checking on an intended change of
550  * placement flags.
551  * Returns
552  * -EINVAL: Intended change is invalid or not allowed.
553  */
554 extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
555 					struct ttm_placement *placement);
556 
557 /**
558  * ttm_bo_init_mm
559  *
560  * @bdev: Pointer to a ttm_bo_device struct.
561  * @mem_type: The memory type.
562  * @p_size: size managed area in pages.
563  *
564  * Initialize a manager for a given memory type.
565  * Note: if part of driver firstopen, it must be protected from a
566  * potentially racing lastclose.
567  * Returns:
568  * -EINVAL: invalid size or memory type.
569  * -ENOMEM: Not enough memory.
570  * May also return driver-specified errors.
571  */
572 
573 extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
574 				unsigned long p_size);
575 /**
576  * ttm_bo_clean_mm
577  *
578  * @bdev: Pointer to a ttm_bo_device struct.
579  * @mem_type: The memory type.
580  *
581  * Take down a manager for a given memory type after first walking
582  * the LRU list to evict any buffers left alive.
583  *
584  * Normally, this function is part of lastclose() or unload(), and at that
585  * point there shouldn't be any buffers left created by user-space, since
586  * there should've been removed by the file descriptor release() method.
587  * However, before this function is run, make sure to signal all sync objects,
588  * and verify that the delayed delete queue is empty. The driver must also
589  * make sure that there are no NO_EVICT buffers present in this memory type
590  * when the call is made.
591  *
592  * If this function is part of a VT switch, the caller must make sure that
593  * there are no appications currently validating buffers before this
594  * function is called. The caller can do that by first taking the
595  * struct ttm_bo_device::ttm_lock in write mode.
596  *
597  * Returns:
598  * -EINVAL: invalid or uninitialized memory type.
599  * -EBUSY: There are still buffers left in this memory type.
600  */
601 
602 extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
603 
604 /**
605  * ttm_bo_evict_mm
606  *
607  * @bdev: Pointer to a ttm_bo_device struct.
608  * @mem_type: The memory type.
609  *
610  * Evicts all buffers on the lru list of the memory type.
611  * This is normally part of a VT switch or an
612  * out-of-memory-space-due-to-fragmentation handler.
613  * The caller must make sure that there are no other processes
614  * currently validating buffers, and can do that by taking the
615  * struct ttm_bo_device::ttm_lock in write mode.
616  *
617  * Returns:
618  * -EINVAL: Invalid or uninitialized memory type.
619  * -ERESTARTSYS: The call was interrupted by a signal while waiting to
620  * evict a buffer.
621  */
622 
623 extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
624 
625 /**
626  * ttm_kmap_obj_virtual
627  *
628  * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
629  * @is_iomem: Pointer to an integer that on return indicates 1 if the
630  * virtual map is io memory, 0 if normal memory.
631  *
632  * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
633  * If *is_iomem is 1 on return, the virtual address points to an io memory area,
634  * that should strictly be accessed by the iowriteXX() and similar functions.
635  */
636 
637 static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
638 					 bool *is_iomem)
639 {
640 	*is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
641 	return map->virtual;
642 }
643 
644 /**
645  * ttm_bo_kmap
646  *
647  * @bo: The buffer object.
648  * @start_page: The first page to map.
649  * @num_pages: Number of pages to map.
650  * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
651  *
652  * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
653  * data in the buffer object. The ttm_kmap_obj_virtual function can then be
654  * used to obtain a virtual address to the data.
655  *
656  * Returns
657  * -ENOMEM: Out of memory.
658  * -EINVAL: Invalid range.
659  */
660 
661 extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
662 		       unsigned long num_pages, struct ttm_bo_kmap_obj *map);
663 
664 /**
665  * ttm_bo_kunmap
666  *
667  * @map: Object describing the map to unmap.
668  *
669  * Unmaps a kernel map set up by ttm_bo_kmap.
670  */
671 
672 extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
673 
674 /**
675  * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
676  *
677  * @vma:       vma as input from the fbdev mmap method.
678  * @bo:        The bo backing the address space. The address space will
679  * have the same size as the bo, and start at offset 0.
680  *
681  * This function is intended to be called by the fbdev mmap method
682  * if the fbdev address space is to be backed by a bo.
683  */
684 
685 /* XXXKIB
686 extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
687 			  struct ttm_buffer_object *bo);
688 */
689 /**
690  * ttm_bo_mmap - mmap out of the ttm device address space.
691  *
692  * @filp:      filp as input from the mmap method.
693  * @vma:       vma as input from the mmap method.
694  * @bdev:      Pointer to the ttm_bo_device with the address space manager.
695  *
696  * This function is intended to be called by the device mmap method.
697  * if the device address space is to be backed by the bo manager.
698  */
699 /* XXXKIB
700 extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
701 		       struct ttm_bo_device *bdev);
702 */
703 /**
704  * ttm_bo_io
705  *
706  * @bdev:      Pointer to the struct ttm_bo_device.
707  * @filp:      Pointer to the struct file attempting to read / write.
708  * @wbuf:      User-space pointer to address of buffer to write. NULL on read.
709  * @rbuf:      User-space pointer to address of buffer to read into.
710  * Null on write.
711  * @count:     Number of bytes to read / write.
712  * @f_pos:     Pointer to current file position.
713  * @write:     1 for read, 0 for write.
714  *
715  * This function implements read / write into ttm buffer objects, and is
716  * intended to
717  * be called from the fops::read and fops::write method.
718  * Returns:
719  * See man (2) write, man(2) read. In particular,
720  * the function may return -ERESTARTSYS if
721  * interrupted by a signal.
722  */
723 
724 extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
725 			 const char *wbuf, char *rbuf,
726 			 size_t count, off_t *f_pos, bool write);
727 
728 extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
729 
730 /**
731  * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
732  *
733  * @bo:     The buffer object to check.
734  *
735  * This function returns an indication if a bo is reserved or not, and should
736  * only be used to print an error when it is not from incorrect api usage, since
737  * there's no guarantee that it is the caller that is holding the reservation.
738  */
739 static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
740 {
741 	return atomic_read(&bo->reserved);
742 }
743 
744 #endif
745