1 /* $NetBSD: amdgpu_ib.c,v 1.3 2021/12/18 23:44:58 riastradh Exp $ */
2
3 /*
4 * Copyright 2008 Advanced Micro Devices, Inc.
5 * Copyright 2008 Red Hat Inc.
6 * Copyright 2009 Jerome Glisse.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors: Dave Airlie
27 * Alex Deucher
28 * Jerome Glisse
29 * Christian König
30 */
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: amdgpu_ib.c,v 1.3 2021/12/18 23:44:58 riastradh Exp $");
33
34 #include <linux/seq_file.h>
35 #include <linux/slab.h>
36
37 #include <drm/amdgpu_drm.h>
38 #include <drm/drm_debugfs.h>
39
40 #include "amdgpu.h"
41 #include "atom.h"
42 #include "amdgpu_trace.h"
43
44 #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
45 #define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000)
46
47 /*
48 * IB
49 * IBs (Indirect Buffers) and areas of GPU accessible memory where
50 * commands are stored. You can put a pointer to the IB in the
51 * command ring and the hw will fetch the commands from the IB
52 * and execute them. Generally userspace acceleration drivers
53 * produce command buffers which are send to the kernel and
54 * put in IBs for execution by the requested ring.
55 */
56 static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
57
58 /**
59 * amdgpu_ib_get - request an IB (Indirect Buffer)
60 *
61 * @ring: ring index the IB is associated with
62 * @size: requested IB size
63 * @ib: IB object returned
64 *
65 * Request an IB (all asics). IBs are allocated using the
66 * suballocator.
67 * Returns 0 on success, error on failure.
68 */
amdgpu_ib_get(struct amdgpu_device * adev,struct amdgpu_vm * vm,unsigned size,struct amdgpu_ib * ib)69 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
70 unsigned size, struct amdgpu_ib *ib)
71 {
72 int r;
73
74 if (size) {
75 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
76 &ib->sa_bo, size, 256);
77 if (r) {
78 dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
79 return r;
80 }
81
82 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
83
84 if (!vm)
85 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
86 }
87
88 return 0;
89 }
90
91 /**
92 * amdgpu_ib_free - free an IB (Indirect Buffer)
93 *
94 * @adev: amdgpu_device pointer
95 * @ib: IB object to free
96 * @f: the fence SA bo need wait on for the ib alloation
97 *
98 * Free an IB (all asics).
99 */
amdgpu_ib_free(struct amdgpu_device * adev,struct amdgpu_ib * ib,struct dma_fence * f)100 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
101 struct dma_fence *f)
102 {
103 amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
104 }
105
106 /**
107 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
108 *
109 * @adev: amdgpu_device pointer
110 * @num_ibs: number of IBs to schedule
111 * @ibs: IB objects to schedule
112 * @f: fence created during this submission
113 *
114 * Schedule an IB on the associated ring (all asics).
115 * Returns 0 on success, error on failure.
116 *
117 * On SI, there are two parallel engines fed from the primary ring,
118 * the CE (Constant Engine) and the DE (Drawing Engine). Since
119 * resource descriptors have moved to memory, the CE allows you to
120 * prime the caches while the DE is updating register state so that
121 * the resource descriptors will be already in cache when the draw is
122 * processed. To accomplish this, the userspace driver submits two
123 * IBs, one for the CE and one for the DE. If there is a CE IB (called
124 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
125 * to SI there was just a DE IB.
126 */
amdgpu_ib_schedule(struct amdgpu_ring * ring,unsigned num_ibs,struct amdgpu_ib * ibs,struct amdgpu_job * job,struct dma_fence ** f)127 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
128 struct amdgpu_ib *ibs, struct amdgpu_job *job,
129 struct dma_fence **f)
130 {
131 struct amdgpu_device *adev = ring->adev;
132 struct amdgpu_ib *ib = &ibs[0];
133 struct dma_fence *tmp = NULL;
134 bool skip_preamble, need_ctx_switch;
135 unsigned patch_offset = ~0;
136 struct amdgpu_vm *vm;
137 uint64_t fence_ctx;
138 uint32_t status = 0, alloc_size;
139 unsigned fence_flags = 0;
140
141 unsigned i;
142 int r = 0;
143 bool need_pipe_sync = false;
144
145 if (num_ibs == 0)
146 return -EINVAL;
147
148 /* ring tests don't use a job */
149 if (job) {
150 vm = job->vm;
151 fence_ctx = job->base.s_fence ?
152 job->base.s_fence->scheduled.context : 0;
153 } else {
154 vm = NULL;
155 fence_ctx = 0;
156 }
157
158 if (!ring->sched.ready) {
159 dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
160 return -EINVAL;
161 }
162
163 if (vm && !job->vmid) {
164 dev_err(adev->dev, "VM IB without ID\n");
165 return -EINVAL;
166 }
167
168 alloc_size = ring->funcs->emit_frame_size + num_ibs *
169 ring->funcs->emit_ib_size;
170
171 r = amdgpu_ring_alloc(ring, alloc_size);
172 if (r) {
173 dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
174 return r;
175 }
176
177 need_ctx_switch = ring->current_ctx != fence_ctx;
178 if (ring->funcs->emit_pipeline_sync && job &&
179 ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
180 (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
181 amdgpu_vm_need_pipeline_sync(ring, job))) {
182 need_pipe_sync = true;
183
184 if (tmp)
185 trace_amdgpu_ib_pipe_sync(job, tmp);
186
187 dma_fence_put(tmp);
188 }
189
190 if (ring->funcs->insert_start)
191 ring->funcs->insert_start(ring);
192
193 if (job) {
194 r = amdgpu_vm_flush(ring, job, need_pipe_sync);
195 if (r) {
196 amdgpu_ring_undo(ring);
197 return r;
198 }
199 }
200
201 if (job && ring->funcs->init_cond_exec)
202 patch_offset = amdgpu_ring_init_cond_exec(ring);
203
204 #ifdef CONFIG_X86_64
205 if (!(adev->flags & AMD_IS_APU))
206 #endif
207 {
208 if (ring->funcs->emit_hdp_flush)
209 amdgpu_ring_emit_hdp_flush(ring);
210 else
211 amdgpu_asic_flush_hdp(adev, ring);
212 }
213
214 if (need_ctx_switch)
215 status |= AMDGPU_HAVE_CTX_SWITCH;
216
217 skip_preamble = ring->current_ctx == fence_ctx;
218 if (job && ring->funcs->emit_cntxcntl) {
219 status |= job->preamble_status;
220 status |= job->preemption_status;
221 amdgpu_ring_emit_cntxcntl(ring, status);
222 }
223
224 for (i = 0; i < num_ibs; ++i) {
225 ib = &ibs[i];
226
227 /* drop preamble IBs if we don't have a context switch */
228 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
229 skip_preamble &&
230 !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
231 !amdgpu_mcbp &&
232 !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
233 continue;
234
235 amdgpu_ring_emit_ib(ring, job, ib, status);
236 status &= ~AMDGPU_HAVE_CTX_SWITCH;
237 }
238
239 if (ring->funcs->emit_tmz)
240 amdgpu_ring_emit_tmz(ring, false);
241
242 #ifdef CONFIG_X86_64
243 if (!(adev->flags & AMD_IS_APU))
244 #endif
245 amdgpu_asic_invalidate_hdp(adev, ring);
246
247 if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
248 fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
249
250 /* wrap the last IB with fence */
251 if (job && job->uf_addr) {
252 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
253 fence_flags | AMDGPU_FENCE_FLAG_64BIT);
254 }
255
256 r = amdgpu_fence_emit(ring, f, fence_flags);
257 if (r) {
258 dev_err(adev->dev, "failed to emit fence (%d)\n", r);
259 if (job && job->vmid)
260 amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
261 amdgpu_ring_undo(ring);
262 return r;
263 }
264
265 if (ring->funcs->insert_end)
266 ring->funcs->insert_end(ring);
267
268 if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
269 amdgpu_ring_patch_cond_exec(ring, patch_offset);
270
271 ring->current_ctx = fence_ctx;
272 if (vm && ring->funcs->emit_switch_buffer)
273 amdgpu_ring_emit_switch_buffer(ring);
274 amdgpu_ring_commit(ring);
275 return 0;
276 }
277
278 /**
279 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
280 *
281 * @adev: amdgpu_device pointer
282 *
283 * Initialize the suballocator to manage a pool of memory
284 * for use as IBs (all asics).
285 * Returns 0 on success, error on failure.
286 */
amdgpu_ib_pool_init(struct amdgpu_device * adev)287 int amdgpu_ib_pool_init(struct amdgpu_device *adev)
288 {
289 int r;
290
291 if (adev->ib_pool_ready) {
292 return 0;
293 }
294 r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
295 AMDGPU_IB_POOL_SIZE*64*1024,
296 AMDGPU_GPU_PAGE_SIZE,
297 AMDGPU_GEM_DOMAIN_GTT);
298 if (r) {
299 return r;
300 }
301
302 adev->ib_pool_ready = true;
303 if (amdgpu_debugfs_sa_init(adev)) {
304 dev_err(adev->dev, "failed to register debugfs file for SA\n");
305 }
306 return 0;
307 }
308
309 /**
310 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
311 *
312 * @adev: amdgpu_device pointer
313 *
314 * Tear down the suballocator managing the pool of memory
315 * for use as IBs (all asics).
316 */
amdgpu_ib_pool_fini(struct amdgpu_device * adev)317 void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
318 {
319 if (adev->ib_pool_ready) {
320 amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
321 adev->ib_pool_ready = false;
322 }
323 }
324
325 /**
326 * amdgpu_ib_ring_tests - test IBs on the rings
327 *
328 * @adev: amdgpu_device pointer
329 *
330 * Test an IB (Indirect Buffer) on each ring.
331 * If the test fails, disable the ring.
332 * Returns 0 on success, error if the primary GFX ring
333 * IB test fails.
334 */
amdgpu_ib_ring_tests(struct amdgpu_device * adev)335 int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
336 {
337 unsigned i;
338 int r, ret = 0;
339 long tmo_gfx, tmo_mm;
340
341 tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
342 if (amdgpu_sriov_vf(adev)) {
343 /* for MM engines in hypervisor side they are not scheduled together
344 * with CP and SDMA engines, so even in exclusive mode MM engine could
345 * still running on other VF thus the IB TEST TIMEOUT for MM engines
346 * under SR-IOV should be set to a long time. 8 sec should be enough
347 * for the MM comes back to this VF.
348 */
349 tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
350 }
351
352 if (amdgpu_sriov_runtime(adev)) {
353 /* for CP & SDMA engines since they are scheduled together so
354 * need to make the timeout width enough to cover the time
355 * cost waiting for it coming back under RUNTIME only
356 */
357 tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
358 } else if (adev->gmc.xgmi.hive_id) {
359 tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
360 }
361
362 for (i = 0; i < adev->num_rings; ++i) {
363 struct amdgpu_ring *ring = adev->rings[i];
364 long tmo;
365
366 /* KIQ rings don't have an IB test because we never submit IBs
367 * to them and they have no interrupt support.
368 */
369 if (!ring->sched.ready || !ring->funcs->test_ib)
370 continue;
371
372 /* MM engine need more time */
373 if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
374 ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
375 ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
376 ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
377 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
378 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
379 tmo = tmo_mm;
380 else
381 tmo = tmo_gfx;
382
383 r = amdgpu_ring_test_ib(ring, tmo);
384 if (!r) {
385 DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
386 ring->name);
387 continue;
388 }
389
390 ring->sched.ready = false;
391 DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
392 ring->name, r);
393
394 if (ring == &adev->gfx.gfx_ring[0]) {
395 /* oh, oh, that's really bad */
396 adev->accel_working = false;
397 return r;
398
399 } else {
400 ret = r;
401 }
402 }
403 return ret;
404 }
405
406 /*
407 * Debugfs info
408 */
409 #if defined(CONFIG_DEBUG_FS)
410
amdgpu_debugfs_sa_info(struct seq_file * m,void * data)411 static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
412 {
413 struct drm_info_node *node = (struct drm_info_node *) m->private;
414 struct drm_device *dev = node->minor->dev;
415 struct amdgpu_device *adev = dev->dev_private;
416
417 amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
418
419 return 0;
420
421 }
422
423 static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
424 {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
425 };
426
427 #endif
428
amdgpu_debugfs_sa_init(struct amdgpu_device * adev)429 static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
430 {
431 #if defined(CONFIG_DEBUG_FS)
432 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
433 #else
434 return 0;
435 #endif
436 }
437