1 /* $NetBSD: radeon_ib.c,v 1.3 2021/12/18 23:45:43 riastradh Exp $ */
2
3 /*
4 * Copyright 2008 Advanced Micro Devices, Inc.
5 * Copyright 2008 Red Hat Inc.
6 * Copyright 2009 Jerome Glisse.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors: Dave Airlie
27 * Alex Deucher
28 * Jerome Glisse
29 * Christian König
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: radeon_ib.c,v 1.3 2021/12/18 23:45:43 riastradh Exp $");
34
35 #include <drm/drm_debugfs.h>
36 #include <drm/drm_file.h>
37
38 #include "radeon.h"
39
40 /*
41 * IB
42 * IBs (Indirect Buffers) and areas of GPU accessible memory where
43 * commands are stored. You can put a pointer to the IB in the
44 * command ring and the hw will fetch the commands from the IB
45 * and execute them. Generally userspace acceleration drivers
46 * produce command buffers which are send to the kernel and
47 * put in IBs for execution by the requested ring.
48 */
49 static int radeon_debugfs_sa_init(struct radeon_device *rdev);
50
51 /**
52 * radeon_ib_get - request an IB (Indirect Buffer)
53 *
54 * @rdev: radeon_device pointer
55 * @ring: ring index the IB is associated with
56 * @ib: IB object returned
57 * @size: requested IB size
58 *
59 * Request an IB (all asics). IBs are allocated using the
60 * suballocator.
61 * Returns 0 on success, error on failure.
62 */
radeon_ib_get(struct radeon_device * rdev,int ring,struct radeon_ib * ib,struct radeon_vm * vm,unsigned size)63 int radeon_ib_get(struct radeon_device *rdev, int ring,
64 struct radeon_ib *ib, struct radeon_vm *vm,
65 unsigned size)
66 {
67 int r;
68
69 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
70 if (r) {
71 dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
72 return r;
73 }
74
75 radeon_sync_create(&ib->sync);
76
77 ib->ring = ring;
78 ib->fence = NULL;
79 ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
80 ib->vm = vm;
81 if (vm) {
82 /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
83 * space and soffset is the offset inside the pool bo
84 */
85 ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
86 } else {
87 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
88 }
89 ib->is_const_ib = false;
90
91 return 0;
92 }
93
94 /**
95 * radeon_ib_free - free an IB (Indirect Buffer)
96 *
97 * @rdev: radeon_device pointer
98 * @ib: IB object to free
99 *
100 * Free an IB (all asics).
101 */
radeon_ib_free(struct radeon_device * rdev,struct radeon_ib * ib)102 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
103 {
104 radeon_sync_free(rdev, &ib->sync, ib->fence);
105 radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
106 radeon_fence_unref(&ib->fence);
107 }
108
109 /**
110 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
111 *
112 * @rdev: radeon_device pointer
113 * @ib: IB object to schedule
114 * @const_ib: Const IB to schedule (SI only)
115 * @hdp_flush: Whether or not to perform an HDP cache flush
116 *
117 * Schedule an IB on the associated ring (all asics).
118 * Returns 0 on success, error on failure.
119 *
120 * On SI, there are two parallel engines fed from the primary ring,
121 * the CE (Constant Engine) and the DE (Drawing Engine). Since
122 * resource descriptors have moved to memory, the CE allows you to
123 * prime the caches while the DE is updating register state so that
124 * the resource descriptors will be already in cache when the draw is
125 * processed. To accomplish this, the userspace driver submits two
126 * IBs, one for the CE and one for the DE. If there is a CE IB (called
127 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
128 * to SI there was just a DE IB.
129 */
radeon_ib_schedule(struct radeon_device * rdev,struct radeon_ib * ib,struct radeon_ib * const_ib,bool hdp_flush)130 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
131 struct radeon_ib *const_ib, bool hdp_flush)
132 {
133 struct radeon_ring *ring = &rdev->ring[ib->ring];
134 int r = 0;
135
136 if (!ib->length_dw || !ring->ready) {
137 /* TODO: Nothings in the ib we should report. */
138 dev_err(rdev->dev, "couldn't schedule ib\n");
139 return -EINVAL;
140 }
141
142 /* 64 dwords should be enough for fence too */
143 r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
144 if (r) {
145 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
146 return r;
147 }
148
149 /* grab a vm id if necessary */
150 if (ib->vm) {
151 struct radeon_fence *vm_id_fence;
152 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
153 radeon_sync_fence(&ib->sync, vm_id_fence);
154 }
155
156 /* sync with other rings */
157 r = radeon_sync_rings(rdev, &ib->sync, ib->ring);
158 if (r) {
159 dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
160 radeon_ring_unlock_undo(rdev, ring);
161 return r;
162 }
163
164 if (ib->vm)
165 radeon_vm_flush(rdev, ib->vm, ib->ring,
166 ib->sync.last_vm_update);
167
168 if (const_ib) {
169 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
170 radeon_sync_free(rdev, &const_ib->sync, NULL);
171 }
172 radeon_ring_ib_execute(rdev, ib->ring, ib);
173 r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
174 if (r) {
175 dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
176 radeon_ring_unlock_undo(rdev, ring);
177 return r;
178 }
179 if (const_ib) {
180 const_ib->fence = radeon_fence_ref(ib->fence);
181 }
182
183 if (ib->vm)
184 radeon_vm_fence(rdev, ib->vm, ib->fence);
185
186 radeon_ring_unlock_commit(rdev, ring, hdp_flush);
187 return 0;
188 }
189
190 /**
191 * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
192 *
193 * @rdev: radeon_device pointer
194 *
195 * Initialize the suballocator to manage a pool of memory
196 * for use as IBs (all asics).
197 * Returns 0 on success, error on failure.
198 */
radeon_ib_pool_init(struct radeon_device * rdev)199 int radeon_ib_pool_init(struct radeon_device *rdev)
200 {
201 int r;
202
203 if (rdev->ib_pool_ready) {
204 return 0;
205 }
206
207 if (rdev->family >= CHIP_BONAIRE) {
208 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
209 RADEON_IB_POOL_SIZE*64*1024,
210 RADEON_GPU_PAGE_SIZE,
211 RADEON_GEM_DOMAIN_GTT,
212 RADEON_GEM_GTT_WC);
213 } else {
214 /* Before CIK, it's better to stick to cacheable GTT due
215 * to the command stream checking
216 */
217 r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
218 RADEON_IB_POOL_SIZE*64*1024,
219 RADEON_GPU_PAGE_SIZE,
220 RADEON_GEM_DOMAIN_GTT, 0);
221 }
222 if (r) {
223 return r;
224 }
225
226 r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
227 if (r) {
228 return r;
229 }
230
231 rdev->ib_pool_ready = true;
232 if (radeon_debugfs_sa_init(rdev)) {
233 dev_err(rdev->dev, "failed to register debugfs file for SA\n");
234 }
235 return 0;
236 }
237
238 /**
239 * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
240 *
241 * @rdev: radeon_device pointer
242 *
243 * Tear down the suballocator managing the pool of memory
244 * for use as IBs (all asics).
245 */
radeon_ib_pool_fini(struct radeon_device * rdev)246 void radeon_ib_pool_fini(struct radeon_device *rdev)
247 {
248 if (rdev->ib_pool_ready) {
249 radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
250 radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
251 rdev->ib_pool_ready = false;
252 }
253 }
254
255 /**
256 * radeon_ib_ring_tests - test IBs on the rings
257 *
258 * @rdev: radeon_device pointer
259 *
260 * Test an IB (Indirect Buffer) on each ring.
261 * If the test fails, disable the ring.
262 * Returns 0 on success, error if the primary GFX ring
263 * IB test fails.
264 */
radeon_ib_ring_tests(struct radeon_device * rdev)265 int radeon_ib_ring_tests(struct radeon_device *rdev)
266 {
267 unsigned i;
268 int r;
269
270 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
271 struct radeon_ring *ring = &rdev->ring[i];
272
273 if (!ring->ready)
274 continue;
275
276 r = radeon_ib_test(rdev, i, ring);
277 if (r) {
278 radeon_fence_driver_force_completion(rdev, i);
279 ring->ready = false;
280 rdev->needs_reset = false;
281
282 if (i == RADEON_RING_TYPE_GFX_INDEX) {
283 /* oh, oh, that's really bad */
284 DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
285 rdev->accel_working = false;
286 return r;
287
288 } else {
289 /* still not good, but we can live with it */
290 DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
291 }
292 }
293 }
294 return 0;
295 }
296
297 /*
298 * Debugfs info
299 */
300 #if defined(CONFIG_DEBUG_FS)
301
radeon_debugfs_sa_info(struct seq_file * m,void * data)302 static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
303 {
304 struct drm_info_node *node = (struct drm_info_node *) m->private;
305 struct drm_device *dev = node->minor->dev;
306 struct radeon_device *rdev = dev->dev_private;
307
308 radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
309
310 return 0;
311
312 }
313
314 static struct drm_info_list radeon_debugfs_sa_list[] = {
315 {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
316 };
317
318 #endif
319
radeon_debugfs_sa_init(struct radeon_device * rdev)320 static int radeon_debugfs_sa_init(struct radeon_device *rdev)
321 {
322 #if defined(CONFIG_DEBUG_FS)
323 return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
324 #else
325 return 0;
326 #endif
327 }
328