xref: /openbsd/sys/dev/pci/drm/include/drm/ttm/ttm_bo.h (revision f005ef32)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #ifndef _TTM_BO_API_H_
32 #define _TTM_BO_API_H_
33 
34 #include <drm/drm_gem.h>
35 
36 #include <linux/kref.h>
37 #include <linux/list.h>
38 
39 #include "ttm_device.h"
40 
41 #include <uvm/uvm_extern.h>
42 
43 /* Default number of pre-faulted pages in the TTM fault handler */
44 #define TTM_BO_VM_NUM_PREFAULT 16
45 
46 struct iosys_map;
47 
48 struct ttm_global;
49 struct ttm_device;
50 struct ttm_placement;
51 struct ttm_place;
52 struct ttm_resource;
53 struct ttm_resource_manager;
54 struct ttm_tt;
55 
56 /**
57  * enum ttm_bo_type
58  *
59  * @ttm_bo_type_device:	These are 'normal' buffers that can
60  * be mmapped by user space. Each of these bos occupy a slot in the
61  * device address space, that can be used for normal vm operations.
62  *
63  * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
64  * but they cannot be accessed from user-space. For kernel-only use.
65  *
66  * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
67  * driver.
68  */
69 enum ttm_bo_type {
70 	ttm_bo_type_device,
71 	ttm_bo_type_kernel,
72 	ttm_bo_type_sg
73 };
74 
75 /**
76  * struct ttm_buffer_object
77  *
78  * @base: drm_gem_object superclass data.
79  * @bdev: Pointer to the buffer object device structure.
80  * @type: The bo type.
81  * @page_alignment: Page alignment.
82  * @destroy: Destruction function. If NULL, kfree is used.
83  * @kref: Reference count of this buffer object. When this refcount reaches
84  * zero, the object is destroyed or put on the delayed delete list.
85  * @resource: structure describing current placement.
86  * @ttm: TTM structure holding system pages.
87  * @deleted: True if the object is only a zombie and already deleted.
88  *
89  * Base class for TTM buffer object, that deals with data placement and CPU
90  * mappings. GPU mappings are really up to the driver, but for simpler GPUs
91  * the driver can usually use the placement offset @offset directly as the
92  * GPU virtual address. For drivers implementing multiple
93  * GPU memory manager contexts, the driver should manage the address space
94  * in these contexts separately and use these objects to get the correct
95  * placement and caching for these GPU maps. This makes it possible to use
96  * these objects for even quite elaborate memory management schemes.
97  * The destroy member, the API visibility of this object makes it possible
98  * to derive driver specific types.
99  */
100 struct ttm_buffer_object {
101 	struct drm_gem_object base;
102 
103 	/*
104 	 * Members constant at init.
105 	 */
106 	struct ttm_device *bdev;
107 	enum ttm_bo_type type;
108 	uint32_t page_alignment;
109 	void (*destroy) (struct ttm_buffer_object *);
110 
111 	/*
112 	* Members not needing protection.
113 	*/
114 	struct kref kref;
115 
116 	/*
117 	 * Members protected by the bo::resv::reserved lock.
118 	 */
119 	struct ttm_resource *resource;
120 	struct ttm_tt *ttm;
121 	bool deleted;
122 	struct ttm_lru_bulk_move *bulk_move;
123 	unsigned priority;
124 	unsigned pin_count;
125 
126 	/**
127 	 * @delayed_delete: Work item used when we can't delete the BO
128 	 * immediately
129 	 */
130 	struct work_struct delayed_delete;
131 
132 	/**
133 	 * Special members that are protected by the reserve lock
134 	 * and the bo::lock when written to. Can be read with
135 	 * either of these locks held.
136 	 */
137 	struct sg_table *sg;
138 };
139 
140 /**
141  * struct ttm_bo_kmap_obj
142  *
143  * @virtual: The current kernel virtual address.
144  * @page: The page when kmap'ing a single page.
145  * @bo_kmap_type: Type of bo_kmap.
146  *
147  * Object describing a kernel mapping. Since a TTM bo may be located
148  * in various memory types with various caching policies, the
149  * mapping can either be an ioremap, a vmap, a kmap or part of a
150  * premapped region.
151  */
152 #define TTM_BO_MAP_IOMEM_MASK 0x80
153 struct ttm_bo_kmap_obj {
154 	void *virtual;
155 	struct vm_page *page;
156 	enum {
157 		ttm_bo_map_iomap        = 1 | TTM_BO_MAP_IOMEM_MASK,
158 		ttm_bo_map_vmap         = 2,
159 		ttm_bo_map_kmap         = 3,
160 		ttm_bo_map_premapped    = 4 | TTM_BO_MAP_IOMEM_MASK,
161 	} bo_kmap_type;
162 	struct ttm_buffer_object *bo;
163 };
164 
165 /**
166  * struct ttm_operation_ctx
167  *
168  * @interruptible: Sleep interruptible if sleeping.
169  * @no_wait_gpu: Return immediately if the GPU is busy.
170  * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
171  * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
172  * BOs share the same reservation object.
173  * @force_alloc: Don't check the memory account during suspend or CPU page
174  * faults. Should only be used by TTM internally.
175  * @resv: Reservation object to allow reserved evictions with.
176  *
177  * Context for TTM operations like changing buffer placement or general memory
178  * allocation.
179  */
180 struct ttm_operation_ctx {
181 	bool interruptible;
182 	bool no_wait_gpu;
183 	bool gfp_retry_mayfail;
184 	bool allow_res_evict;
185 	bool force_alloc;
186 	struct dma_resv *resv;
187 	uint64_t bytes_moved;
188 };
189 
190 /**
191  * ttm_bo_get - reference a struct ttm_buffer_object
192  *
193  * @bo: The buffer object.
194  */
ttm_bo_get(struct ttm_buffer_object * bo)195 static inline void ttm_bo_get(struct ttm_buffer_object *bo)
196 {
197 	kref_get(&bo->kref);
198 }
199 
200 /**
201  * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
202  * its refcount has already reached zero.
203  * @bo: The buffer object.
204  *
205  * Used to reference a TTM buffer object in lookups where the object is removed
206  * from the lookup structure during the destructor and for RCU lookups.
207  *
208  * Returns: @bo if the referencing was successful, NULL otherwise.
209  */
210 static inline __must_check struct ttm_buffer_object *
ttm_bo_get_unless_zero(struct ttm_buffer_object * bo)211 ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
212 {
213 	if (!kref_get_unless_zero(&bo->kref))
214 		return NULL;
215 	return bo;
216 }
217 
218 /**
219  * ttm_bo_reserve:
220  *
221  * @bo: A pointer to a struct ttm_buffer_object.
222  * @interruptible: Sleep interruptible if waiting.
223  * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
224  * @ticket: ticket used to acquire the ww_mutex.
225  *
226  * Locks a buffer object for validation. (Or prevents other processes from
227  * locking it for validation), while taking a number of measures to prevent
228  * deadlocks.
229  *
230  * Returns:
231  * -EDEADLK: The reservation may cause a deadlock.
232  * Release all buffer reservations, wait for @bo to become unreserved and
233  * try again.
234  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
235  * a signal. Release all buffer reservations and return to user-space.
236  * -EBUSY: The function needed to sleep, but @no_wait was true
237  * -EALREADY: Bo already reserved using @ticket. This error code will only
238  * be returned if @use_ticket is set to true.
239  */
ttm_bo_reserve(struct ttm_buffer_object * bo,bool interruptible,bool no_wait,struct ww_acquire_ctx * ticket)240 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
241 				 bool interruptible, bool no_wait,
242 				 struct ww_acquire_ctx *ticket)
243 {
244 	int ret = 0;
245 
246 	if (no_wait) {
247 		bool success;
248 
249 		if (WARN_ON(ticket))
250 			return -EBUSY;
251 
252 		success = dma_resv_trylock(bo->base.resv);
253 		return success ? 0 : -EBUSY;
254 	}
255 
256 	if (interruptible)
257 		ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
258 	else
259 		ret = dma_resv_lock(bo->base.resv, ticket);
260 	if (ret == -EINTR)
261 		return -ERESTARTSYS;
262 	return ret;
263 }
264 
265 /**
266  * ttm_bo_reserve_slowpath:
267  * @bo: A pointer to a struct ttm_buffer_object.
268  * @interruptible: Sleep interruptible if waiting.
269  * @sequence: Set (@bo)->sequence to this value after lock
270  *
271  * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
272  * from all our other reservations. Because there are no other reservations
273  * held by us, this function cannot deadlock any more.
274  */
ttm_bo_reserve_slowpath(struct ttm_buffer_object * bo,bool interruptible,struct ww_acquire_ctx * ticket)275 static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
276 					  bool interruptible,
277 					  struct ww_acquire_ctx *ticket)
278 {
279 	if (interruptible) {
280 		int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
281 							   ticket);
282 		if (ret == -EINTR)
283 			ret = -ERESTARTSYS;
284 		return ret;
285 	}
286 	dma_resv_lock_slow(bo->base.resv, ticket);
287 	return 0;
288 }
289 
290 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
291 
292 static inline void
ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object * bo)293 ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
294 {
295 	spin_lock(&bo->bdev->lru_lock);
296 	ttm_bo_move_to_lru_tail(bo);
297 	spin_unlock(&bo->bdev->lru_lock);
298 }
299 
ttm_bo_assign_mem(struct ttm_buffer_object * bo,struct ttm_resource * new_mem)300 static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
301 				     struct ttm_resource *new_mem)
302 {
303 	WARN_ON(bo->resource);
304 	bo->resource = new_mem;
305 }
306 
307 /**
308  * ttm_bo_move_null = assign memory for a buffer object.
309  * @bo: The bo to assign the memory to
310  * @new_mem: The memory to be assigned.
311  *
312  * Assign the memory from new_mem to the memory of the buffer object bo.
313  */
ttm_bo_move_null(struct ttm_buffer_object * bo,struct ttm_resource * new_mem)314 static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
315 				    struct ttm_resource *new_mem)
316 {
317 	ttm_resource_free(bo, &bo->resource);
318 	ttm_bo_assign_mem(bo, new_mem);
319 }
320 
321 /**
322  * ttm_bo_unreserve
323  *
324  * @bo: A pointer to a struct ttm_buffer_object.
325  *
326  * Unreserve a previous reservation of @bo.
327  */
ttm_bo_unreserve(struct ttm_buffer_object * bo)328 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
329 {
330 	ttm_bo_move_to_lru_tail_unlocked(bo);
331 	dma_resv_unlock(bo->base.resv);
332 }
333 
334 /**
335  * ttm_kmap_obj_virtual
336  *
337  * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
338  * @is_iomem: Pointer to an integer that on return indicates 1 if the
339  * virtual map is io memory, 0 if normal memory.
340  *
341  * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
342  * If *is_iomem is 1 on return, the virtual address points to an io memory area,
343  * that should strictly be accessed by the iowriteXX() and similar functions.
344  */
ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj * map,bool * is_iomem)345 static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
346 					 bool *is_iomem)
347 {
348 	*is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
349 	return map->virtual;
350 }
351 
352 int ttm_bo_wait_ctx(struct ttm_buffer_object *bo,
353 		    struct ttm_operation_ctx *ctx);
354 int ttm_bo_validate(struct ttm_buffer_object *bo,
355 		    struct ttm_placement *placement,
356 		    struct ttm_operation_ctx *ctx);
357 void ttm_bo_put(struct ttm_buffer_object *bo);
358 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
359 			  struct ttm_lru_bulk_move *bulk);
360 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
361 			      const struct ttm_place *place);
362 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
363 			 enum ttm_bo_type type, struct ttm_placement *placement,
364 			 uint32_t alignment, struct ttm_operation_ctx *ctx,
365 			 struct sg_table *sg, struct dma_resv *resv,
366 			 void (*destroy)(struct ttm_buffer_object *));
367 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
368 			 enum ttm_bo_type type, struct ttm_placement *placement,
369 			 uint32_t alignment, bool interruptible,
370 			 struct sg_table *sg, struct dma_resv *resv,
371 			 void (*destroy)(struct ttm_buffer_object *));
372 int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
373 		unsigned long num_pages, struct ttm_bo_kmap_obj *map);
374 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
375 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
376 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
377 #ifdef __linux__
378 int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
379 #else
380 int ttm_bo_mmap_obj(struct ttm_buffer_object *bo);
381 #endif
382 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
383 		   gfp_t gfp_flags);
384 void ttm_bo_pin(struct ttm_buffer_object *bo);
385 void ttm_bo_unpin(struct ttm_buffer_object *bo);
386 int ttm_mem_evict_first(struct ttm_device *bdev,
387 			struct ttm_resource_manager *man,
388 			const struct ttm_place *place,
389 			struct ttm_operation_ctx *ctx,
390 			struct ww_acquire_ctx *ticket);
391 #ifdef __linux__
392 vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
393 			     struct vm_fault *vmf);
394 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
395 				    pgprot_t prot,
396 				    pgoff_t num_prefault);
397 vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
398 void ttm_bo_vm_open(struct vm_area_struct *vma);
399 void ttm_bo_vm_close(struct vm_area_struct *vma);
400 int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
401 		     void *buf, int len, int write);
402 vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
403 #else
404 vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo);
405 vm_fault_t ttm_bo_vm_fault_reserved(struct uvm_faultinfo *ufi,
406 				    vaddr_t vaddr,
407 				    pgoff_t num_prefault,
408 				    pgoff_t fault_page_size);
409 int ttm_bo_vm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *,
410     int, int, vm_fault_t, vm_prot_t, int);
411 #endif /* !__linux__ */
412 
413 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
414 		     struct ttm_placement *placement,
415 		     struct ttm_resource **mem,
416 		     struct ttm_operation_ctx *ctx);
417 
418 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
419 /*
420  * ttm_bo_util.c
421  */
422 int ttm_mem_io_reserve(struct ttm_device *bdev,
423 		       struct ttm_resource *mem);
424 void ttm_mem_io_free(struct ttm_device *bdev,
425 		     struct ttm_resource *mem);
426 void ttm_move_memcpy(bool clear, u32 num_pages,
427 		     struct ttm_kmap_iter *dst_iter,
428 		     struct ttm_kmap_iter *src_iter,
429 		     bus_space_tag_t memt);
430 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
431 		       struct ttm_operation_ctx *ctx,
432 		       struct ttm_resource *new_mem);
433 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
434 			      struct dma_fence *fence, bool evict,
435 			      bool pipeline,
436 			      struct ttm_resource *new_mem);
437 void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
438 			      struct ttm_resource *new_mem);
439 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
440 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
441 		     pgprot_t tmp);
442 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
443 
444 #endif
445