1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34
amdgpu_mes_fence_wait_polling(u64 * fence,u64 wait_seq,signed long timeout)35 signed long amdgpu_mes_fence_wait_polling(u64 *fence,
36 u64 wait_seq,
37 signed long timeout)
38 {
39
40 while ((s64)(wait_seq - *fence) > 0 && timeout > 0) {
41 udelay(2);
42 timeout -= 2;
43 }
44 return timeout > 0 ? timeout : 0;
45 }
46
amdgpu_mes_doorbell_process_slice(struct amdgpu_device * adev)47 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
48 {
49 return roundup(AMDGPU_ONE_DOORBELL_SIZE *
50 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
51 PAGE_SIZE);
52 }
53
amdgpu_mes_kernel_doorbell_get(struct amdgpu_device * adev,int ip_type,uint64_t * doorbell_index)54 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
55 int ip_type, uint64_t *doorbell_index)
56 {
57 unsigned int offset, found;
58 struct amdgpu_mes *mes = &adev->mes;
59
60 if (ip_type == AMDGPU_RING_TYPE_SDMA)
61 offset = adev->doorbell_index.sdma_engine[0];
62 else
63 offset = 0;
64
65 found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
66 if (found >= mes->num_mes_dbs) {
67 DRM_WARN("No doorbell available\n");
68 return -ENOSPC;
69 }
70
71 set_bit(found, mes->doorbell_bitmap);
72
73 /* Get the absolute doorbell index on BAR */
74 *doorbell_index = mes->db_start_dw_offset + found * 2;
75 return 0;
76 }
77
amdgpu_mes_kernel_doorbell_free(struct amdgpu_device * adev,uint32_t doorbell_index)78 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
79 uint32_t doorbell_index)
80 {
81 unsigned int old, rel_index;
82 struct amdgpu_mes *mes = &adev->mes;
83
84 /* Find the relative index of the doorbell in this object */
85 rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
86 old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
87 WARN_ON(!old);
88 }
89
amdgpu_mes_doorbell_init(struct amdgpu_device * adev)90 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
91 {
92 int i;
93 struct amdgpu_mes *mes = &adev->mes;
94
95 /* Bitmap for dynamic allocation of kernel doorbells */
96 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
97 if (!mes->doorbell_bitmap) {
98 DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
99 return -ENOMEM;
100 }
101
102 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
103 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
104 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
105 set_bit(i, mes->doorbell_bitmap);
106 }
107
108 return 0;
109 }
110
amdgpu_mes_event_log_init(struct amdgpu_device * adev)111 static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
112 {
113 int r;
114
115 if (!amdgpu_mes_log_enable)
116 return 0;
117
118 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
119 AMDGPU_GEM_DOMAIN_GTT,
120 &adev->mes.event_log_gpu_obj,
121 &adev->mes.event_log_gpu_addr,
122 &adev->mes.event_log_cpu_addr);
123 if (r) {
124 dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
125 return r;
126 }
127
128 memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
129
130 return 0;
131
132 }
133
amdgpu_mes_doorbell_free(struct amdgpu_device * adev)134 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
135 {
136 bitmap_free(adev->mes.doorbell_bitmap);
137 }
138
amdgpu_mes_init(struct amdgpu_device * adev)139 int amdgpu_mes_init(struct amdgpu_device *adev)
140 {
141 int i, r;
142
143 adev->mes.adev = adev;
144
145 idr_init(&adev->mes.pasid_idr);
146 idr_init(&adev->mes.gang_id_idr);
147 idr_init(&adev->mes.queue_id_idr);
148 ida_init(&adev->mes.doorbell_ida);
149 spin_lock_init(&adev->mes.queue_id_lock);
150 spin_lock_init(&adev->mes.ring_lock);
151 mutex_init(&adev->mes.mutex_hidden);
152
153 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
154 adev->mes.vmid_mask_mmhub = 0xffffff00;
155 adev->mes.vmid_mask_gfxhub = 0xffffff00;
156
157 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
158 /* use only 1st MEC pipes */
159 if (i >= 4)
160 continue;
161 adev->mes.compute_hqd_mask[i] = 0xc;
162 }
163
164 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
165 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
166
167 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
168 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
169 IP_VERSION(6, 0, 0))
170 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
171 /* zero sdma_hqd_mask for non-existent engine */
172 else if (adev->sdma.num_instances == 1)
173 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
174 else
175 adev->mes.sdma_hqd_mask[i] = 0xfc;
176 }
177
178 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
179 if (r) {
180 dev_err(adev->dev,
181 "(%d) ring trail_fence_offs wb alloc failed\n", r);
182 goto error_ids;
183 }
184 adev->mes.sch_ctx_gpu_addr =
185 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
186 adev->mes.sch_ctx_ptr =
187 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
188
189 r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
190 if (r) {
191 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
192 dev_err(adev->dev,
193 "(%d) query_status_fence_offs wb alloc failed\n", r);
194 goto error_ids;
195 }
196 adev->mes.query_status_fence_gpu_addr =
197 adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
198 adev->mes.query_status_fence_ptr =
199 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
200
201 r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
202 if (r) {
203 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
204 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
205 dev_err(adev->dev,
206 "(%d) read_val_offs alloc failed\n", r);
207 goto error_ids;
208 }
209 adev->mes.read_val_gpu_addr =
210 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
211 adev->mes.read_val_ptr =
212 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
213
214 r = amdgpu_mes_doorbell_init(adev);
215 if (r)
216 goto error;
217
218 r = amdgpu_mes_event_log_init(adev);
219 if (r)
220 goto error_doorbell;
221
222 return 0;
223
224 error_doorbell:
225 amdgpu_mes_doorbell_free(adev);
226 error:
227 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
228 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
229 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
230 error_ids:
231 idr_destroy(&adev->mes.pasid_idr);
232 idr_destroy(&adev->mes.gang_id_idr);
233 idr_destroy(&adev->mes.queue_id_idr);
234 ida_destroy(&adev->mes.doorbell_ida);
235 mutex_destroy(&adev->mes.mutex_hidden);
236 return r;
237 }
238
amdgpu_mes_fini(struct amdgpu_device * adev)239 void amdgpu_mes_fini(struct amdgpu_device *adev)
240 {
241 amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
242 &adev->mes.event_log_gpu_addr,
243 &adev->mes.event_log_cpu_addr);
244
245 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
246 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
247 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
248 amdgpu_mes_doorbell_free(adev);
249
250 idr_destroy(&adev->mes.pasid_idr);
251 idr_destroy(&adev->mes.gang_id_idr);
252 idr_destroy(&adev->mes.queue_id_idr);
253 ida_destroy(&adev->mes.doorbell_ida);
254 mutex_destroy(&adev->mes.mutex_hidden);
255 }
256
amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue * q)257 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
258 {
259 amdgpu_bo_free_kernel(&q->mqd_obj,
260 &q->mqd_gpu_addr,
261 &q->mqd_cpu_ptr);
262 }
263
amdgpu_mes_create_process(struct amdgpu_device * adev,int pasid,struct amdgpu_vm * vm)264 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
265 struct amdgpu_vm *vm)
266 {
267 struct amdgpu_mes_process *process;
268 int r;
269
270 /* allocate the mes process buffer */
271 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
272 if (!process) {
273 DRM_ERROR("no more memory to create mes process\n");
274 return -ENOMEM;
275 }
276
277 /* allocate the process context bo and map it */
278 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
279 AMDGPU_GEM_DOMAIN_GTT,
280 &process->proc_ctx_bo,
281 &process->proc_ctx_gpu_addr,
282 &process->proc_ctx_cpu_ptr);
283 if (r) {
284 DRM_ERROR("failed to allocate process context bo\n");
285 goto clean_up_memory;
286 }
287 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
288
289 /*
290 * Avoid taking any other locks under MES lock to avoid circular
291 * lock dependencies.
292 */
293 amdgpu_mes_lock(&adev->mes);
294
295 /* add the mes process to idr list */
296 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
297 GFP_KERNEL);
298 if (r < 0) {
299 DRM_ERROR("failed to lock pasid=%d\n", pasid);
300 goto clean_up_ctx;
301 }
302
303 INIT_LIST_HEAD(&process->gang_list);
304 process->vm = vm;
305 process->pasid = pasid;
306 process->process_quantum = adev->mes.default_process_quantum;
307 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
308
309 amdgpu_mes_unlock(&adev->mes);
310 return 0;
311
312 clean_up_ctx:
313 amdgpu_mes_unlock(&adev->mes);
314 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
315 &process->proc_ctx_gpu_addr,
316 &process->proc_ctx_cpu_ptr);
317 clean_up_memory:
318 kfree(process);
319 return r;
320 }
321
amdgpu_mes_destroy_process(struct amdgpu_device * adev,int pasid)322 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
323 {
324 struct amdgpu_mes_process *process;
325 struct amdgpu_mes_gang *gang, *tmp1;
326 struct amdgpu_mes_queue *queue, *tmp2;
327 struct mes_remove_queue_input queue_input;
328 unsigned long flags;
329 int r;
330
331 /*
332 * Avoid taking any other locks under MES lock to avoid circular
333 * lock dependencies.
334 */
335 amdgpu_mes_lock(&adev->mes);
336
337 process = idr_find(&adev->mes.pasid_idr, pasid);
338 if (!process) {
339 DRM_WARN("pasid %d doesn't exist\n", pasid);
340 amdgpu_mes_unlock(&adev->mes);
341 return;
342 }
343
344 /* Remove all queues from hardware */
345 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
346 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
347 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
348 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
349 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
350
351 queue_input.doorbell_offset = queue->doorbell_off;
352 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
353
354 r = adev->mes.funcs->remove_hw_queue(&adev->mes,
355 &queue_input);
356 if (r)
357 DRM_WARN("failed to remove hardware queue\n");
358 }
359
360 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
361 }
362
363 idr_remove(&adev->mes.pasid_idr, pasid);
364 amdgpu_mes_unlock(&adev->mes);
365
366 /* free all memory allocated by the process */
367 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
368 /* free all queues in the gang */
369 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
370 amdgpu_mes_queue_free_mqd(queue);
371 list_del(&queue->list);
372 kfree(queue);
373 }
374 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
375 &gang->gang_ctx_gpu_addr,
376 &gang->gang_ctx_cpu_ptr);
377 list_del(&gang->list);
378 kfree(gang);
379
380 }
381 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
382 &process->proc_ctx_gpu_addr,
383 &process->proc_ctx_cpu_ptr);
384 kfree(process);
385 }
386
amdgpu_mes_add_gang(struct amdgpu_device * adev,int pasid,struct amdgpu_mes_gang_properties * gprops,int * gang_id)387 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
388 struct amdgpu_mes_gang_properties *gprops,
389 int *gang_id)
390 {
391 struct amdgpu_mes_process *process;
392 struct amdgpu_mes_gang *gang;
393 int r;
394
395 /* allocate the mes gang buffer */
396 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
397 if (!gang) {
398 return -ENOMEM;
399 }
400
401 /* allocate the gang context bo and map it to cpu space */
402 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
403 AMDGPU_GEM_DOMAIN_GTT,
404 &gang->gang_ctx_bo,
405 &gang->gang_ctx_gpu_addr,
406 &gang->gang_ctx_cpu_ptr);
407 if (r) {
408 DRM_ERROR("failed to allocate process context bo\n");
409 goto clean_up_mem;
410 }
411 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
412
413 /*
414 * Avoid taking any other locks under MES lock to avoid circular
415 * lock dependencies.
416 */
417 amdgpu_mes_lock(&adev->mes);
418
419 process = idr_find(&adev->mes.pasid_idr, pasid);
420 if (!process) {
421 DRM_ERROR("pasid %d doesn't exist\n", pasid);
422 r = -EINVAL;
423 goto clean_up_ctx;
424 }
425
426 /* add the mes gang to idr list */
427 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
428 GFP_KERNEL);
429 if (r < 0) {
430 DRM_ERROR("failed to allocate idr for gang\n");
431 goto clean_up_ctx;
432 }
433
434 gang->gang_id = r;
435 *gang_id = r;
436
437 INIT_LIST_HEAD(&gang->queue_list);
438 gang->process = process;
439 gang->priority = gprops->priority;
440 gang->gang_quantum = gprops->gang_quantum ?
441 gprops->gang_quantum : adev->mes.default_gang_quantum;
442 gang->global_priority_level = gprops->global_priority_level;
443 gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
444 list_add_tail(&gang->list, &process->gang_list);
445
446 amdgpu_mes_unlock(&adev->mes);
447 return 0;
448
449 clean_up_ctx:
450 amdgpu_mes_unlock(&adev->mes);
451 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
452 &gang->gang_ctx_gpu_addr,
453 &gang->gang_ctx_cpu_ptr);
454 clean_up_mem:
455 kfree(gang);
456 return r;
457 }
458
amdgpu_mes_remove_gang(struct amdgpu_device * adev,int gang_id)459 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
460 {
461 struct amdgpu_mes_gang *gang;
462
463 /*
464 * Avoid taking any other locks under MES lock to avoid circular
465 * lock dependencies.
466 */
467 amdgpu_mes_lock(&adev->mes);
468
469 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
470 if (!gang) {
471 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
472 amdgpu_mes_unlock(&adev->mes);
473 return -EINVAL;
474 }
475
476 if (!list_empty(&gang->queue_list)) {
477 DRM_ERROR("queue list is not empty\n");
478 amdgpu_mes_unlock(&adev->mes);
479 return -EBUSY;
480 }
481
482 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
483 list_del(&gang->list);
484 amdgpu_mes_unlock(&adev->mes);
485
486 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
487 &gang->gang_ctx_gpu_addr,
488 &gang->gang_ctx_cpu_ptr);
489
490 kfree(gang);
491
492 return 0;
493 }
494
amdgpu_mes_suspend(struct amdgpu_device * adev)495 int amdgpu_mes_suspend(struct amdgpu_device *adev)
496 {
497 struct idr *idp;
498 struct amdgpu_mes_process *process;
499 struct amdgpu_mes_gang *gang;
500 struct mes_suspend_gang_input input;
501 int r, pasid;
502
503 /*
504 * Avoid taking any other locks under MES lock to avoid circular
505 * lock dependencies.
506 */
507 amdgpu_mes_lock(&adev->mes);
508
509 idp = &adev->mes.pasid_idr;
510
511 idr_for_each_entry(idp, process, pasid) {
512 list_for_each_entry(gang, &process->gang_list, list) {
513 r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
514 if (r)
515 DRM_ERROR("failed to suspend pasid %d gangid %d",
516 pasid, gang->gang_id);
517 }
518 }
519
520 amdgpu_mes_unlock(&adev->mes);
521 return 0;
522 }
523
amdgpu_mes_resume(struct amdgpu_device * adev)524 int amdgpu_mes_resume(struct amdgpu_device *adev)
525 {
526 struct idr *idp;
527 struct amdgpu_mes_process *process;
528 struct amdgpu_mes_gang *gang;
529 struct mes_resume_gang_input input;
530 int r, pasid;
531
532 /*
533 * Avoid taking any other locks under MES lock to avoid circular
534 * lock dependencies.
535 */
536 amdgpu_mes_lock(&adev->mes);
537
538 idp = &adev->mes.pasid_idr;
539
540 idr_for_each_entry(idp, process, pasid) {
541 list_for_each_entry(gang, &process->gang_list, list) {
542 r = adev->mes.funcs->resume_gang(&adev->mes, &input);
543 if (r)
544 DRM_ERROR("failed to resume pasid %d gangid %d",
545 pasid, gang->gang_id);
546 }
547 }
548
549 amdgpu_mes_unlock(&adev->mes);
550 return 0;
551 }
552
amdgpu_mes_queue_alloc_mqd(struct amdgpu_device * adev,struct amdgpu_mes_queue * q,struct amdgpu_mes_queue_properties * p)553 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
554 struct amdgpu_mes_queue *q,
555 struct amdgpu_mes_queue_properties *p)
556 {
557 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
558 u32 mqd_size = mqd_mgr->mqd_size;
559 int r;
560
561 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
562 AMDGPU_GEM_DOMAIN_GTT,
563 &q->mqd_obj,
564 &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
565 if (r) {
566 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
567 return r;
568 }
569 memset(q->mqd_cpu_ptr, 0, mqd_size);
570
571 r = amdgpu_bo_reserve(q->mqd_obj, false);
572 if (unlikely(r != 0))
573 goto clean_up;
574
575 return 0;
576
577 clean_up:
578 amdgpu_bo_free_kernel(&q->mqd_obj,
579 &q->mqd_gpu_addr,
580 &q->mqd_cpu_ptr);
581 return r;
582 }
583
amdgpu_mes_queue_init_mqd(struct amdgpu_device * adev,struct amdgpu_mes_queue * q,struct amdgpu_mes_queue_properties * p)584 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
585 struct amdgpu_mes_queue *q,
586 struct amdgpu_mes_queue_properties *p)
587 {
588 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
589 struct amdgpu_mqd_prop mqd_prop = {0};
590
591 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
592 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
593 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
594 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
595 mqd_prop.queue_size = p->queue_size;
596 mqd_prop.use_doorbell = true;
597 mqd_prop.doorbell_index = p->doorbell_off;
598 mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
599 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
600 mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
601 mqd_prop.hqd_active = false;
602
603 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
604 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
605 mutex_lock(&adev->srbm_mutex);
606 amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
607 }
608
609 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
610
611 if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
612 p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
613 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
614 mutex_unlock(&adev->srbm_mutex);
615 }
616
617 amdgpu_bo_unreserve(q->mqd_obj);
618 }
619
amdgpu_mes_add_hw_queue(struct amdgpu_device * adev,int gang_id,struct amdgpu_mes_queue_properties * qprops,int * queue_id)620 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
621 struct amdgpu_mes_queue_properties *qprops,
622 int *queue_id)
623 {
624 struct amdgpu_mes_queue *queue;
625 struct amdgpu_mes_gang *gang;
626 struct mes_add_queue_input queue_input;
627 unsigned long flags;
628 int r;
629
630 memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
631
632 /* allocate the mes queue buffer */
633 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
634 if (!queue) {
635 DRM_ERROR("Failed to allocate memory for queue\n");
636 return -ENOMEM;
637 }
638
639 /* Allocate the queue mqd */
640 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
641 if (r)
642 goto clean_up_memory;
643
644 /*
645 * Avoid taking any other locks under MES lock to avoid circular
646 * lock dependencies.
647 */
648 amdgpu_mes_lock(&adev->mes);
649
650 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
651 if (!gang) {
652 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
653 r = -EINVAL;
654 goto clean_up_mqd;
655 }
656
657 /* add the mes gang to idr list */
658 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
659 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
660 GFP_ATOMIC);
661 if (r < 0) {
662 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
663 goto clean_up_mqd;
664 }
665 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
666 *queue_id = queue->queue_id = r;
667
668 /* allocate a doorbell index for the queue */
669 r = amdgpu_mes_kernel_doorbell_get(adev,
670 qprops->queue_type,
671 &qprops->doorbell_off);
672 if (r)
673 goto clean_up_queue_id;
674
675 /* initialize the queue mqd */
676 amdgpu_mes_queue_init_mqd(adev, queue, qprops);
677
678 /* add hw queue to mes */
679 queue_input.process_id = gang->process->pasid;
680
681 queue_input.page_table_base_addr =
682 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
683 adev->gmc.vram_start;
684
685 queue_input.process_va_start = 0;
686 queue_input.process_va_end =
687 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
688 queue_input.process_quantum = gang->process->process_quantum;
689 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
690 queue_input.gang_quantum = gang->gang_quantum;
691 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
692 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
693 queue_input.gang_global_priority_level = gang->global_priority_level;
694 queue_input.doorbell_offset = qprops->doorbell_off;
695 queue_input.mqd_addr = queue->mqd_gpu_addr;
696 queue_input.wptr_addr = qprops->wptr_gpu_addr;
697 queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
698 queue_input.queue_type = qprops->queue_type;
699 queue_input.paging = qprops->paging;
700 queue_input.is_kfd_process = 0;
701
702 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
703 if (r) {
704 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
705 qprops->doorbell_off);
706 goto clean_up_doorbell;
707 }
708
709 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
710 "queue type=%d, doorbell=0x%llx\n",
711 gang->process->pasid, gang_id, qprops->queue_type,
712 qprops->doorbell_off);
713
714 queue->ring = qprops->ring;
715 queue->doorbell_off = qprops->doorbell_off;
716 queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
717 queue->queue_type = qprops->queue_type;
718 queue->paging = qprops->paging;
719 queue->gang = gang;
720 queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
721 list_add_tail(&queue->list, &gang->queue_list);
722
723 amdgpu_mes_unlock(&adev->mes);
724 return 0;
725
726 clean_up_doorbell:
727 amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
728 clean_up_queue_id:
729 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
730 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
731 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
732 clean_up_mqd:
733 amdgpu_mes_unlock(&adev->mes);
734 amdgpu_mes_queue_free_mqd(queue);
735 clean_up_memory:
736 kfree(queue);
737 return r;
738 }
739
amdgpu_mes_remove_hw_queue(struct amdgpu_device * adev,int queue_id)740 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
741 {
742 unsigned long flags;
743 struct amdgpu_mes_queue *queue;
744 struct amdgpu_mes_gang *gang;
745 struct mes_remove_queue_input queue_input;
746 int r;
747
748 /*
749 * Avoid taking any other locks under MES lock to avoid circular
750 * lock dependencies.
751 */
752 amdgpu_mes_lock(&adev->mes);
753
754 /* remove the mes gang from idr list */
755 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
756
757 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
758 if (!queue) {
759 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
760 amdgpu_mes_unlock(&adev->mes);
761 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
762 return -EINVAL;
763 }
764
765 idr_remove(&adev->mes.queue_id_idr, queue_id);
766 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
767
768 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
769 queue->doorbell_off);
770
771 gang = queue->gang;
772 queue_input.doorbell_offset = queue->doorbell_off;
773 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
774
775 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
776 if (r)
777 DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
778 queue_id);
779
780 list_del(&queue->list);
781 amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
782 amdgpu_mes_unlock(&adev->mes);
783
784 amdgpu_mes_queue_free_mqd(queue);
785 kfree(queue);
786 return 0;
787 }
788
amdgpu_mes_map_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring)789 int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
790 struct amdgpu_ring *ring)
791 {
792 struct mes_map_legacy_queue_input queue_input;
793 int r;
794
795 memset(&queue_input, 0, sizeof(queue_input));
796
797 queue_input.queue_type = ring->funcs->type;
798 queue_input.doorbell_offset = ring->doorbell_index;
799 queue_input.pipe_id = ring->pipe;
800 queue_input.queue_id = ring->queue;
801 queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
802 queue_input.wptr_addr = ring->wptr_gpu_addr;
803
804 r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
805 if (r)
806 DRM_ERROR("failed to map legacy queue\n");
807
808 return r;
809 }
810
amdgpu_mes_unmap_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)811 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
812 struct amdgpu_ring *ring,
813 enum amdgpu_unmap_queues_action action,
814 u64 gpu_addr, u64 seq)
815 {
816 struct mes_unmap_legacy_queue_input queue_input;
817 int r;
818
819 queue_input.action = action;
820 queue_input.queue_type = ring->funcs->type;
821 queue_input.doorbell_offset = ring->doorbell_index;
822 queue_input.pipe_id = ring->pipe;
823 queue_input.queue_id = ring->queue;
824 queue_input.trail_fence_addr = gpu_addr;
825 queue_input.trail_fence_data = seq;
826
827 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
828 if (r)
829 DRM_ERROR("failed to unmap legacy queue\n");
830
831 return r;
832 }
833
amdgpu_mes_rreg(struct amdgpu_device * adev,uint32_t reg)834 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
835 {
836 struct mes_misc_op_input op_input;
837 int r, val = 0;
838
839 op_input.op = MES_MISC_OP_READ_REG;
840 op_input.read_reg.reg_offset = reg;
841 op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
842
843 if (!adev->mes.funcs->misc_op) {
844 DRM_ERROR("mes rreg is not supported!\n");
845 goto error;
846 }
847
848 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
849 if (r)
850 DRM_ERROR("failed to read reg (0x%x)\n", reg);
851 else
852 val = *(adev->mes.read_val_ptr);
853
854 error:
855 return val;
856 }
857
amdgpu_mes_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t val)858 int amdgpu_mes_wreg(struct amdgpu_device *adev,
859 uint32_t reg, uint32_t val)
860 {
861 struct mes_misc_op_input op_input;
862 int r;
863
864 op_input.op = MES_MISC_OP_WRITE_REG;
865 op_input.write_reg.reg_offset = reg;
866 op_input.write_reg.reg_value = val;
867
868 if (!adev->mes.funcs->misc_op) {
869 DRM_ERROR("mes wreg is not supported!\n");
870 r = -EINVAL;
871 goto error;
872 }
873
874 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
875 if (r)
876 DRM_ERROR("failed to write reg (0x%x)\n", reg);
877
878 error:
879 return r;
880 }
881
amdgpu_mes_reg_write_reg_wait(struct amdgpu_device * adev,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)882 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
883 uint32_t reg0, uint32_t reg1,
884 uint32_t ref, uint32_t mask)
885 {
886 struct mes_misc_op_input op_input;
887 int r;
888
889 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
890 op_input.wrm_reg.reg0 = reg0;
891 op_input.wrm_reg.reg1 = reg1;
892 op_input.wrm_reg.ref = ref;
893 op_input.wrm_reg.mask = mask;
894
895 if (!adev->mes.funcs->misc_op) {
896 DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
897 r = -EINVAL;
898 goto error;
899 }
900
901 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
902 if (r)
903 DRM_ERROR("failed to reg_write_reg_wait\n");
904
905 error:
906 return r;
907 }
908
amdgpu_mes_reg_wait(struct amdgpu_device * adev,uint32_t reg,uint32_t val,uint32_t mask)909 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
910 uint32_t val, uint32_t mask)
911 {
912 struct mes_misc_op_input op_input;
913 int r;
914
915 op_input.op = MES_MISC_OP_WRM_REG_WAIT;
916 op_input.wrm_reg.reg0 = reg;
917 op_input.wrm_reg.ref = val;
918 op_input.wrm_reg.mask = mask;
919
920 if (!adev->mes.funcs->misc_op) {
921 DRM_ERROR("mes reg wait is not supported!\n");
922 r = -EINVAL;
923 goto error;
924 }
925
926 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
927 if (r)
928 DRM_ERROR("failed to reg_write_reg_wait\n");
929
930 error:
931 return r;
932 }
933
amdgpu_mes_set_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr,uint32_t spi_gdbg_per_vmid_cntl,const uint32_t * tcp_watch_cntl,uint32_t flags,bool trap_en)934 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
935 uint64_t process_context_addr,
936 uint32_t spi_gdbg_per_vmid_cntl,
937 const uint32_t *tcp_watch_cntl,
938 uint32_t flags,
939 bool trap_en)
940 {
941 struct mes_misc_op_input op_input = {0};
942 int r;
943
944 if (!adev->mes.funcs->misc_op) {
945 DRM_ERROR("mes set shader debugger is not supported!\n");
946 return -EINVAL;
947 }
948
949 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
950 op_input.set_shader_debugger.process_context_addr = process_context_addr;
951 op_input.set_shader_debugger.flags.u32all = flags;
952
953 /* use amdgpu mes_flush_shader_debugger instead */
954 if (op_input.set_shader_debugger.flags.process_ctx_flush)
955 return -EINVAL;
956
957 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
958 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
959 sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
960
961 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
962 AMDGPU_MES_API_VERSION_SHIFT) >= 14)
963 op_input.set_shader_debugger.trap_en = trap_en;
964
965 amdgpu_mes_lock(&adev->mes);
966
967 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
968 if (r)
969 DRM_ERROR("failed to set_shader_debugger\n");
970
971 amdgpu_mes_unlock(&adev->mes);
972
973 return r;
974 }
975
amdgpu_mes_flush_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr)976 int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
977 uint64_t process_context_addr)
978 {
979 struct mes_misc_op_input op_input = {0};
980 int r;
981
982 if (!adev->mes.funcs->misc_op) {
983 DRM_ERROR("mes flush shader debugger is not supported!\n");
984 return -EINVAL;
985 }
986
987 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
988 op_input.set_shader_debugger.process_context_addr = process_context_addr;
989 op_input.set_shader_debugger.flags.process_ctx_flush = true;
990
991 amdgpu_mes_lock(&adev->mes);
992
993 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
994 if (r)
995 DRM_ERROR("failed to set_shader_debugger\n");
996
997 amdgpu_mes_unlock(&adev->mes);
998
999 return r;
1000 }
1001
1002 static void
amdgpu_mes_ring_to_queue_props(struct amdgpu_device * adev,struct amdgpu_ring * ring,struct amdgpu_mes_queue_properties * props)1003 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
1004 struct amdgpu_ring *ring,
1005 struct amdgpu_mes_queue_properties *props)
1006 {
1007 props->queue_type = ring->funcs->type;
1008 props->hqd_base_gpu_addr = ring->gpu_addr;
1009 props->rptr_gpu_addr = ring->rptr_gpu_addr;
1010 props->wptr_gpu_addr = ring->wptr_gpu_addr;
1011 props->wptr_mc_addr =
1012 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
1013 props->queue_size = ring->ring_size;
1014 props->eop_gpu_addr = ring->eop_gpu_addr;
1015 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
1016 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
1017 props->paging = false;
1018 props->ring = ring;
1019 }
1020
1021 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
1022 do { \
1023 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
1024 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1025 _eng[ring->idx].slots[id_offs]); \
1026 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
1027 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1028 _eng[ring->idx].ring); \
1029 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
1030 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1031 _eng[ring->idx].ib); \
1032 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
1033 return offsetof(struct amdgpu_mes_ctx_meta_data, \
1034 _eng[ring->idx].padding); \
1035 } while(0)
1036
amdgpu_mes_ctx_get_offs(struct amdgpu_ring * ring,unsigned int id_offs)1037 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
1038 {
1039 switch (ring->funcs->type) {
1040 case AMDGPU_RING_TYPE_GFX:
1041 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
1042 break;
1043 case AMDGPU_RING_TYPE_COMPUTE:
1044 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
1045 break;
1046 case AMDGPU_RING_TYPE_SDMA:
1047 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
1048 break;
1049 default:
1050 break;
1051 }
1052
1053 WARN_ON(1);
1054 return -EINVAL;
1055 }
1056
amdgpu_mes_add_ring(struct amdgpu_device * adev,int gang_id,int queue_type,int idx,struct amdgpu_mes_ctx_data * ctx_data,struct amdgpu_ring ** out)1057 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
1058 int queue_type, int idx,
1059 struct amdgpu_mes_ctx_data *ctx_data,
1060 struct amdgpu_ring **out)
1061 {
1062 struct amdgpu_ring *ring;
1063 struct amdgpu_mes_gang *gang;
1064 struct amdgpu_mes_queue_properties qprops = {0};
1065 int r, queue_id, pasid;
1066
1067 /*
1068 * Avoid taking any other locks under MES lock to avoid circular
1069 * lock dependencies.
1070 */
1071 amdgpu_mes_lock(&adev->mes);
1072 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1073 if (!gang) {
1074 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1075 amdgpu_mes_unlock(&adev->mes);
1076 return -EINVAL;
1077 }
1078 pasid = gang->process->pasid;
1079
1080 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1081 if (!ring) {
1082 amdgpu_mes_unlock(&adev->mes);
1083 return -ENOMEM;
1084 }
1085
1086 ring->ring_obj = NULL;
1087 ring->use_doorbell = true;
1088 ring->is_mes_queue = true;
1089 ring->mes_ctx = ctx_data;
1090 ring->idx = idx;
1091 ring->no_scheduler = true;
1092
1093 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1094 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1095 compute[ring->idx].mec_hpd);
1096 ring->eop_gpu_addr =
1097 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1098 }
1099
1100 switch (queue_type) {
1101 case AMDGPU_RING_TYPE_GFX:
1102 ring->funcs = adev->gfx.gfx_ring[0].funcs;
1103 ring->me = adev->gfx.gfx_ring[0].me;
1104 ring->pipe = adev->gfx.gfx_ring[0].pipe;
1105 break;
1106 case AMDGPU_RING_TYPE_COMPUTE:
1107 ring->funcs = adev->gfx.compute_ring[0].funcs;
1108 ring->me = adev->gfx.compute_ring[0].me;
1109 ring->pipe = adev->gfx.compute_ring[0].pipe;
1110 break;
1111 case AMDGPU_RING_TYPE_SDMA:
1112 ring->funcs = adev->sdma.instance[0].ring.funcs;
1113 break;
1114 default:
1115 BUG();
1116 }
1117
1118 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1119 AMDGPU_RING_PRIO_DEFAULT, NULL);
1120 if (r)
1121 goto clean_up_memory;
1122
1123 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1124
1125 dma_fence_wait(gang->process->vm->last_update, false);
1126 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1127 amdgpu_mes_unlock(&adev->mes);
1128
1129 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1130 if (r)
1131 goto clean_up_ring;
1132
1133 ring->hw_queue_id = queue_id;
1134 ring->doorbell_index = qprops.doorbell_off;
1135
1136 if (queue_type == AMDGPU_RING_TYPE_GFX)
1137 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1138 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1139 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1140 queue_id);
1141 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1142 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1143 queue_id);
1144 else
1145 BUG();
1146
1147 *out = ring;
1148 return 0;
1149
1150 clean_up_ring:
1151 amdgpu_ring_fini(ring);
1152 clean_up_memory:
1153 kfree(ring);
1154 amdgpu_mes_unlock(&adev->mes);
1155 return r;
1156 }
1157
amdgpu_mes_remove_ring(struct amdgpu_device * adev,struct amdgpu_ring * ring)1158 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1159 struct amdgpu_ring *ring)
1160 {
1161 if (!ring)
1162 return;
1163
1164 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1165 del_timer_sync(&ring->fence_drv.fallback_timer);
1166 amdgpu_ring_fini(ring);
1167 kfree(ring);
1168 }
1169
amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device * adev,enum amdgpu_mes_priority_level prio)1170 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1171 enum amdgpu_mes_priority_level prio)
1172 {
1173 return adev->mes.aggregated_doorbells[prio];
1174 }
1175
amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device * adev,struct amdgpu_mes_ctx_data * ctx_data)1176 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1177 struct amdgpu_mes_ctx_data *ctx_data)
1178 {
1179 int r;
1180
1181 r = amdgpu_bo_create_kernel(adev,
1182 sizeof(struct amdgpu_mes_ctx_meta_data),
1183 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1184 &ctx_data->meta_data_obj,
1185 &ctx_data->meta_data_mc_addr,
1186 &ctx_data->meta_data_ptr);
1187 if (r) {
1188 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1189 return r;
1190 }
1191
1192 if (!ctx_data->meta_data_obj)
1193 return -ENOMEM;
1194
1195 memset(ctx_data->meta_data_ptr, 0,
1196 sizeof(struct amdgpu_mes_ctx_meta_data));
1197
1198 return 0;
1199 }
1200
amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data * ctx_data)1201 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1202 {
1203 if (ctx_data->meta_data_obj)
1204 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1205 &ctx_data->meta_data_mc_addr,
1206 &ctx_data->meta_data_ptr);
1207 }
1208
amdgpu_mes_ctx_map_meta_data(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_mes_ctx_data * ctx_data)1209 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1210 struct amdgpu_vm *vm,
1211 struct amdgpu_mes_ctx_data *ctx_data)
1212 {
1213 struct amdgpu_bo_va *bo_va;
1214 struct amdgpu_sync sync;
1215 struct drm_exec exec;
1216 int r;
1217
1218 amdgpu_sync_create(&sync);
1219
1220 drm_exec_init(&exec, 0, 0);
1221 drm_exec_until_all_locked(&exec) {
1222 r = drm_exec_lock_obj(&exec,
1223 &ctx_data->meta_data_obj->tbo.base);
1224 drm_exec_retry_on_contention(&exec);
1225 if (unlikely(r))
1226 goto error_fini_exec;
1227
1228 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1229 drm_exec_retry_on_contention(&exec);
1230 if (unlikely(r))
1231 goto error_fini_exec;
1232 }
1233
1234 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1235 if (!bo_va) {
1236 DRM_ERROR("failed to create bo_va for meta data BO\n");
1237 r = -ENOMEM;
1238 goto error_fini_exec;
1239 }
1240
1241 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1242 sizeof(struct amdgpu_mes_ctx_meta_data),
1243 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1244 AMDGPU_PTE_EXECUTABLE);
1245
1246 if (r) {
1247 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1248 goto error_del_bo_va;
1249 }
1250
1251 r = amdgpu_vm_bo_update(adev, bo_va, false);
1252 if (r) {
1253 DRM_ERROR("failed to do vm_bo_update on meta data\n");
1254 goto error_del_bo_va;
1255 }
1256 amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1257
1258 r = amdgpu_vm_update_pdes(adev, vm, false);
1259 if (r) {
1260 DRM_ERROR("failed to update pdes on meta data\n");
1261 goto error_del_bo_va;
1262 }
1263 amdgpu_sync_fence(&sync, vm->last_update);
1264
1265 amdgpu_sync_wait(&sync, false);
1266 drm_exec_fini(&exec);
1267
1268 amdgpu_sync_free(&sync);
1269 ctx_data->meta_data_va = bo_va;
1270 return 0;
1271
1272 error_del_bo_va:
1273 amdgpu_vm_bo_del(adev, bo_va);
1274
1275 error_fini_exec:
1276 drm_exec_fini(&exec);
1277 amdgpu_sync_free(&sync);
1278 return r;
1279 }
1280
amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device * adev,struct amdgpu_mes_ctx_data * ctx_data)1281 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1282 struct amdgpu_mes_ctx_data *ctx_data)
1283 {
1284 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1285 struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1286 struct amdgpu_vm *vm = bo_va->base.vm;
1287 struct dma_fence *fence;
1288 struct drm_exec exec;
1289 long r;
1290
1291 drm_exec_init(&exec, 0, 0);
1292 drm_exec_until_all_locked(&exec) {
1293 r = drm_exec_lock_obj(&exec,
1294 &ctx_data->meta_data_obj->tbo.base);
1295 drm_exec_retry_on_contention(&exec);
1296 if (unlikely(r))
1297 goto out_unlock;
1298
1299 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1300 drm_exec_retry_on_contention(&exec);
1301 if (unlikely(r))
1302 goto out_unlock;
1303 }
1304
1305 amdgpu_vm_bo_del(adev, bo_va);
1306 if (!amdgpu_vm_ready(vm))
1307 goto out_unlock;
1308
1309 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1310 &fence);
1311 if (r)
1312 goto out_unlock;
1313 if (fence) {
1314 amdgpu_bo_fence(bo, fence, true);
1315 fence = NULL;
1316 }
1317
1318 r = amdgpu_vm_clear_freed(adev, vm, &fence);
1319 if (r || !fence)
1320 goto out_unlock;
1321
1322 dma_fence_wait(fence, false);
1323 amdgpu_bo_fence(bo, fence, true);
1324 dma_fence_put(fence);
1325
1326 out_unlock:
1327 if (unlikely(r < 0))
1328 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1329 drm_exec_fini(&exec);
1330
1331 return r;
1332 }
1333
amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device * adev,int pasid,int * gang_id,int queue_type,int num_queue,struct amdgpu_ring ** added_rings,struct amdgpu_mes_ctx_data * ctx_data)1334 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1335 int pasid, int *gang_id,
1336 int queue_type, int num_queue,
1337 struct amdgpu_ring **added_rings,
1338 struct amdgpu_mes_ctx_data *ctx_data)
1339 {
1340 struct amdgpu_ring *ring;
1341 struct amdgpu_mes_gang_properties gprops = {0};
1342 int r, j;
1343
1344 /* create a gang for the process */
1345 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1346 gprops.gang_quantum = adev->mes.default_gang_quantum;
1347 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1348 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1349 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1350
1351 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1352 if (r) {
1353 DRM_ERROR("failed to add gang\n");
1354 return r;
1355 }
1356
1357 /* create queues for the gang */
1358 for (j = 0; j < num_queue; j++) {
1359 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1360 ctx_data, &ring);
1361 if (r) {
1362 DRM_ERROR("failed to add ring\n");
1363 break;
1364 }
1365
1366 DRM_INFO("ring %s was added\n", ring->name);
1367 added_rings[j] = ring;
1368 }
1369
1370 return 0;
1371 }
1372
amdgpu_mes_test_queues(struct amdgpu_ring ** added_rings)1373 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1374 {
1375 struct amdgpu_ring *ring;
1376 int i, r;
1377
1378 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1379 ring = added_rings[i];
1380 if (!ring)
1381 continue;
1382
1383 r = amdgpu_ring_test_helper(ring);
1384 if (r)
1385 return r;
1386
1387 r = amdgpu_ring_test_ib(ring, 1000 * 10);
1388 if (r) {
1389 DRM_DEV_ERROR(ring->adev->dev,
1390 "ring %s ib test failed (%d)\n",
1391 ring->name, r);
1392 return r;
1393 } else
1394 DRM_INFO("ring %s ib test pass\n", ring->name);
1395 }
1396
1397 return 0;
1398 }
1399
amdgpu_mes_self_test(struct amdgpu_device * adev)1400 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1401 {
1402 struct amdgpu_vm *vm = NULL;
1403 struct amdgpu_mes_ctx_data ctx_data = {0};
1404 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1405 int gang_ids[3] = {0};
1406 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1407 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1408 { AMDGPU_RING_TYPE_SDMA, 1} };
1409 int i, r, pasid, k = 0;
1410
1411 pasid = amdgpu_pasid_alloc(16);
1412 if (pasid < 0) {
1413 dev_warn(adev->dev, "No more PASIDs available!");
1414 pasid = 0;
1415 }
1416
1417 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1418 if (!vm) {
1419 r = -ENOMEM;
1420 goto error_pasid;
1421 }
1422
1423 r = amdgpu_vm_init(adev, vm, -1);
1424 if (r) {
1425 DRM_ERROR("failed to initialize vm\n");
1426 goto error_pasid;
1427 }
1428
1429 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1430 if (r) {
1431 DRM_ERROR("failed to alloc ctx meta data\n");
1432 goto error_fini;
1433 }
1434
1435 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
1436 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1437 if (r) {
1438 DRM_ERROR("failed to map ctx meta data\n");
1439 goto error_vm;
1440 }
1441
1442 r = amdgpu_mes_create_process(adev, pasid, vm);
1443 if (r) {
1444 DRM_ERROR("failed to create MES process\n");
1445 goto error_vm;
1446 }
1447
1448 for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1449 /* On GFX v10.3, fw hasn't supported to map sdma queue. */
1450 if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
1451 IP_VERSION(10, 3, 0) &&
1452 amdgpu_ip_version(adev, GC_HWIP, 0) <
1453 IP_VERSION(11, 0, 0) &&
1454 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1455 continue;
1456
1457 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1458 &gang_ids[i],
1459 queue_types[i][0],
1460 queue_types[i][1],
1461 &added_rings[k],
1462 &ctx_data);
1463 if (r)
1464 goto error_queues;
1465
1466 k += queue_types[i][1];
1467 }
1468
1469 /* start ring test and ib test for MES queues */
1470 amdgpu_mes_test_queues(added_rings);
1471
1472 error_queues:
1473 /* remove all queues */
1474 for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1475 if (!added_rings[i])
1476 continue;
1477 amdgpu_mes_remove_ring(adev, added_rings[i]);
1478 }
1479
1480 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1481 if (!gang_ids[i])
1482 continue;
1483 amdgpu_mes_remove_gang(adev, gang_ids[i]);
1484 }
1485
1486 amdgpu_mes_destroy_process(adev, pasid);
1487
1488 error_vm:
1489 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1490
1491 error_fini:
1492 amdgpu_vm_fini(adev, vm);
1493
1494 error_pasid:
1495 if (pasid)
1496 amdgpu_pasid_free(pasid);
1497
1498 amdgpu_mes_ctx_free_meta_data(&ctx_data);
1499 kfree(vm);
1500 return 0;
1501 }
1502
amdgpu_mes_init_microcode(struct amdgpu_device * adev,int pipe)1503 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1504 {
1505 const struct mes_firmware_header_v1_0 *mes_hdr;
1506 struct amdgpu_firmware_info *info;
1507 char ucode_prefix[30];
1508 char fw_name[50];
1509 bool need_retry = false;
1510 int r;
1511
1512 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1513 sizeof(ucode_prefix));
1514 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1515 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1516 ucode_prefix,
1517 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1518 need_retry = true;
1519 } else {
1520 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1521 ucode_prefix,
1522 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1523 }
1524
1525 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1526 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1527 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1528 ucode_prefix);
1529 DRM_INFO("try to fall back to %s\n", fw_name);
1530 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1531 fw_name);
1532 }
1533
1534 if (r)
1535 goto out;
1536
1537 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1538 adev->mes.fw[pipe]->data;
1539 adev->mes.uc_start_addr[pipe] =
1540 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1541 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1542 adev->mes.data_start_addr[pipe] =
1543 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1544 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1545
1546 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1547 int ucode, ucode_data;
1548
1549 if (pipe == AMDGPU_MES_SCHED_PIPE) {
1550 ucode = AMDGPU_UCODE_ID_CP_MES;
1551 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1552 } else {
1553 ucode = AMDGPU_UCODE_ID_CP_MES1;
1554 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1555 }
1556
1557 info = &adev->firmware.ucode[ucode];
1558 info->ucode_id = ucode;
1559 info->fw = adev->mes.fw[pipe];
1560 adev->firmware.fw_size +=
1561 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1562 PAGE_SIZE);
1563
1564 info = &adev->firmware.ucode[ucode_data];
1565 info->ucode_id = ucode_data;
1566 info->fw = adev->mes.fw[pipe];
1567 adev->firmware.fw_size +=
1568 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1569 PAGE_SIZE);
1570 }
1571
1572 return 0;
1573 out:
1574 amdgpu_ucode_release(&adev->mes.fw[pipe]);
1575 return r;
1576 }
1577
1578 #if defined(CONFIG_DEBUG_FS)
1579
amdgpu_debugfs_mes_event_log_show(struct seq_file * m,void * unused)1580 static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
1581 {
1582 struct amdgpu_device *adev = m->private;
1583 uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
1584
1585 seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
1586 mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
1587
1588 return 0;
1589 }
1590
1591 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
1592
1593 #endif
1594
amdgpu_debugfs_mes_event_log_init(struct amdgpu_device * adev)1595 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
1596 {
1597
1598 #if defined(CONFIG_DEBUG_FS)
1599 struct drm_minor *minor = adev_to_drm(adev)->primary;
1600 struct dentry *root = minor->debugfs_root;
1601 if (adev->enable_mes && amdgpu_mes_log_enable)
1602 debugfs_create_file("amdgpu_mes_event_log", 0444, root,
1603 adev, &amdgpu_debugfs_mes_event_log_fops);
1604
1605 #endif
1606 }
1607