1 /*	$NetBSD: radeon_object.h,v 1.4 2021/12/18 23:45:43 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2008 Advanced Micro Devices, Inc.
5  * Copyright 2008 Red Hat Inc.
6  * Copyright 2009 Jerome Glisse.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24  * OTHER DEALINGS IN THE SOFTWARE.
25  *
26  * Authors: Dave Airlie
27  *          Alex Deucher
28  *          Jerome Glisse
29  */
30 #ifndef __RADEON_OBJECT_H__
31 #define __RADEON_OBJECT_H__
32 
33 #include <drm/radeon_drm.h>
34 #include "radeon.h"
35 
36 /**
37  * radeon_mem_type_to_domain - return domain corresponding to mem_type
38  * @mem_type:	ttm memory type
39  *
40  * Returns corresponding domain of the ttm mem_type
41  */
radeon_mem_type_to_domain(u32 mem_type)42 static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
43 {
44 	switch (mem_type) {
45 	case TTM_PL_VRAM:
46 		return RADEON_GEM_DOMAIN_VRAM;
47 	case TTM_PL_TT:
48 		return RADEON_GEM_DOMAIN_GTT;
49 	case TTM_PL_SYSTEM:
50 		return RADEON_GEM_DOMAIN_CPU;
51 	default:
52 		break;
53 	}
54 	return 0;
55 }
56 
57 /**
58  * radeon_bo_reserve - reserve bo
59  * @bo:		bo structure
60  * @no_intr:	don't return -ERESTARTSYS on pending signal
61  *
62  * Returns:
63  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
64  * a signal. Release all buffer reservations and return to user-space.
65  */
radeon_bo_reserve(struct radeon_bo * bo,bool no_intr)66 static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
67 {
68 	int r;
69 
70 	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
71 	if (unlikely(r != 0)) {
72 		if (r != -ERESTARTSYS)
73 			dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
74 		return r;
75 	}
76 	return 0;
77 }
78 
radeon_bo_unreserve(struct radeon_bo * bo)79 static inline void radeon_bo_unreserve(struct radeon_bo *bo)
80 {
81 	ttm_bo_unreserve(&bo->tbo);
82 }
83 
84 /**
85  * radeon_bo_gpu_offset - return GPU offset of bo
86  * @bo:	radeon object for which we query the offset
87  *
88  * Returns current GPU offset of the object.
89  *
90  * Note: object should either be pinned or reserved when calling this
91  * function, it might be useful to add check for this for debugging.
92  */
radeon_bo_gpu_offset(struct radeon_bo * bo)93 static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
94 {
95 	return bo->tbo.offset;
96 }
97 
radeon_bo_size(struct radeon_bo * bo)98 static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
99 {
100 	return bo->tbo.num_pages << PAGE_SHIFT;
101 }
102 
radeon_bo_ngpu_pages(struct radeon_bo * bo)103 static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
104 {
105 	return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
106 }
107 
radeon_bo_gpu_page_alignment(struct radeon_bo * bo)108 static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
109 {
110 	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
111 }
112 
113 /**
114  * radeon_bo_mmap_offset - return mmap offset of bo
115  * @bo:	radeon object for which we query the offset
116  *
117  * Returns mmap offset of the object.
118  */
radeon_bo_mmap_offset(struct radeon_bo * bo)119 static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
120 {
121 	return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
122 }
123 
124 extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
125 			  bool no_wait);
126 
127 extern int radeon_bo_create(struct radeon_device *rdev,
128 			    unsigned long size, int byte_align,
129 			    bool kernel, u32 domain, u32 flags,
130 			    struct sg_table *sg,
131 			    struct dma_resv *resv,
132 			    struct radeon_bo **bo_ptr);
133 extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
134 extern void radeon_bo_kunmap(struct radeon_bo *bo);
135 extern struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo);
136 extern void radeon_bo_unref(struct radeon_bo **bo);
137 extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
138 extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
139 				    u64 max_offset, u64 *gpu_addr);
140 extern int radeon_bo_unpin(struct radeon_bo *bo);
141 extern int radeon_bo_evict_vram(struct radeon_device *rdev);
142 extern void radeon_bo_force_delete(struct radeon_device *rdev);
143 extern int radeon_bo_init(struct radeon_device *rdev);
144 extern void radeon_bo_fini(struct radeon_device *rdev);
145 extern int radeon_bo_list_validate(struct radeon_device *rdev,
146 				   struct ww_acquire_ctx *ticket,
147 				   struct list_head *head, int ring);
148 extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
149 				u32 tiling_flags, u32 pitch);
150 extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
151 				u32 *tiling_flags, u32 *pitch);
152 extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
153 				bool force_drop);
154 extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
155 				  bool evict,
156 				  struct ttm_mem_reg *new_mem);
157 extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
158 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
159 extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
160 			    bool shared);
161 
162 /*
163  * sub allocation
164  */
165 
radeon_sa_bo_gpu_addr(struct radeon_sa_bo * sa_bo)166 static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
167 {
168 	return sa_bo->manager->gpu_addr + sa_bo->soffset;
169 }
170 
radeon_sa_bo_cpu_addr(struct radeon_sa_bo * sa_bo)171 static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
172 {
173 	return (char *)sa_bo->manager->cpu_ptr + sa_bo->soffset;
174 }
175 
176 extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
177 				     struct radeon_sa_manager *sa_manager,
178 				     unsigned size, u32 align, u32 domain,
179 				     u32 flags);
180 extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
181 				      struct radeon_sa_manager *sa_manager);
182 extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
183 				      struct radeon_sa_manager *sa_manager);
184 extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
185 					struct radeon_sa_manager *sa_manager);
186 extern int radeon_sa_bo_new(struct radeon_device *rdev,
187 			    struct radeon_sa_manager *sa_manager,
188 			    struct radeon_sa_bo **sa_bo,
189 			    unsigned size, unsigned align);
190 extern void radeon_sa_bo_free(struct radeon_device *rdev,
191 			      struct radeon_sa_bo **sa_bo,
192 			      struct radeon_fence *fence);
193 #if defined(CONFIG_DEBUG_FS)
194 extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
195 					 struct seq_file *m);
196 #endif
197 
198 
199 #endif
200