1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/ratelimit.h>
26 #include <linux/printk.h>
27 #include <linux/slab.h>
28 #include <linux/list.h>
29 #include <linux/types.h>
30 #include <linux/bitops.h>
31 #include <linux/sched.h>
32 #include "kfd_priv.h"
33 #include "kfd_device_queue_manager.h"
34 #include "kfd_mqd_manager.h"
35 #include "cik_regs.h"
36 #include "kfd_kernel_queue.h"
37 #include "amdgpu_amdkfd.h"
38 #include "mes_api_def.h"
39 #include "kfd_debug.h"
40
41 /* Size of the per-pipe EOP queue */
42 #define CIK_HPD_EOP_BYTES_LOG2 11
43 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
44
45 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
46 u32 pasid, unsigned int vmid);
47
48 static int execute_queues_cpsch(struct device_queue_manager *dqm,
49 enum kfd_unmap_queues_filter filter,
50 uint32_t filter_param,
51 uint32_t grace_period);
52 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
53 enum kfd_unmap_queues_filter filter,
54 uint32_t filter_param,
55 uint32_t grace_period,
56 bool reset);
57
58 static int map_queues_cpsch(struct device_queue_manager *dqm);
59
60 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
61 struct queue *q);
62
63 static inline void deallocate_hqd(struct device_queue_manager *dqm,
64 struct queue *q);
65 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
66 static int allocate_sdma_queue(struct device_queue_manager *dqm,
67 struct queue *q, const uint32_t *restore_sdma_id);
68 static void kfd_process_hw_exception(struct work_struct *work);
69
70 static inline
get_mqd_type_from_queue_type(enum kfd_queue_type type)71 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
72 {
73 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
74 return KFD_MQD_TYPE_SDMA;
75 return KFD_MQD_TYPE_CP;
76 }
77
is_pipe_enabled(struct device_queue_manager * dqm,int mec,int pipe)78 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
79 {
80 int i;
81 int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec
82 + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe;
83
84 /* queue is available for KFD usage if bit is 1 */
85 for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i)
86 if (test_bit(pipe_offset + i,
87 dqm->dev->kfd->shared_resources.cp_queue_bitmap))
88 return true;
89 return false;
90 }
91
get_cp_queues_num(struct device_queue_manager * dqm)92 unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
93 {
94 return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap,
95 AMDGPU_MAX_QUEUES);
96 }
97
get_queues_per_pipe(struct device_queue_manager * dqm)98 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
99 {
100 return dqm->dev->kfd->shared_resources.num_queue_per_pipe;
101 }
102
get_pipes_per_mec(struct device_queue_manager * dqm)103 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
104 {
105 return dqm->dev->kfd->shared_resources.num_pipe_per_mec;
106 }
107
get_num_all_sdma_engines(struct device_queue_manager * dqm)108 static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
109 {
110 return kfd_get_num_sdma_engines(dqm->dev) +
111 kfd_get_num_xgmi_sdma_engines(dqm->dev);
112 }
113
get_num_sdma_queues(struct device_queue_manager * dqm)114 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
115 {
116 return kfd_get_num_sdma_engines(dqm->dev) *
117 dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
118 }
119
get_num_xgmi_sdma_queues(struct device_queue_manager * dqm)120 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
121 {
122 return kfd_get_num_xgmi_sdma_engines(dqm->dev) *
123 dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
124 }
125
init_sdma_bitmaps(struct device_queue_manager * dqm)126 static void init_sdma_bitmaps(struct device_queue_manager *dqm)
127 {
128 bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES);
129 bitmap_set(dqm->sdma_bitmap, 0, get_num_sdma_queues(dqm));
130
131 bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES);
132 bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm));
133
134 /* Mask out the reserved queues */
135 bitmap_andnot(dqm->sdma_bitmap, dqm->sdma_bitmap,
136 dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap,
137 KFD_MAX_SDMA_QUEUES);
138 }
139
program_sh_mem_settings(struct device_queue_manager * dqm,struct qcm_process_device * qpd)140 void program_sh_mem_settings(struct device_queue_manager *dqm,
141 struct qcm_process_device *qpd)
142 {
143 uint32_t xcc_mask = dqm->dev->xcc_mask;
144 int xcc_id;
145
146 for_each_inst(xcc_id, xcc_mask)
147 dqm->dev->kfd2kgd->program_sh_mem_settings(
148 dqm->dev->adev, qpd->vmid, qpd->sh_mem_config,
149 qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit,
150 qpd->sh_mem_bases, xcc_id);
151 }
152
kfd_hws_hang(struct device_queue_manager * dqm)153 static void kfd_hws_hang(struct device_queue_manager *dqm)
154 {
155 /*
156 * Issue a GPU reset if HWS is unresponsive
157 */
158 dqm->is_hws_hang = true;
159
160 /* It's possible we're detecting a HWS hang in the
161 * middle of a GPU reset. No need to schedule another
162 * reset in this case.
163 */
164 if (!dqm->is_resetting)
165 schedule_work(&dqm->hw_exception_work);
166 }
167
convert_to_mes_queue_type(int queue_type)168 static int convert_to_mes_queue_type(int queue_type)
169 {
170 int mes_queue_type;
171
172 switch (queue_type) {
173 case KFD_QUEUE_TYPE_COMPUTE:
174 mes_queue_type = MES_QUEUE_TYPE_COMPUTE;
175 break;
176 case KFD_QUEUE_TYPE_SDMA:
177 mes_queue_type = MES_QUEUE_TYPE_SDMA;
178 break;
179 default:
180 WARN(1, "Invalid queue type %d", queue_type);
181 mes_queue_type = -EINVAL;
182 break;
183 }
184
185 return mes_queue_type;
186 }
187
add_queue_mes(struct device_queue_manager * dqm,struct queue * q,struct qcm_process_device * qpd)188 static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
189 struct qcm_process_device *qpd)
190 {
191 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
192 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
193 struct mes_add_queue_input queue_input;
194 int r, queue_type;
195 uint64_t wptr_addr_off;
196
197 if (dqm->is_hws_hang)
198 return -EIO;
199
200 memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
201 queue_input.process_id = qpd->pqm->process->pasid;
202 queue_input.page_table_base_addr = qpd->page_table_base;
203 queue_input.process_va_start = 0;
204 queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
205 /* MES unit for quantum is 100ns */
206 queue_input.process_quantum = KFD_MES_PROCESS_QUANTUM; /* Equivalent to 10ms. */
207 queue_input.process_context_addr = pdd->proc_ctx_gpu_addr;
208 queue_input.gang_quantum = KFD_MES_GANG_QUANTUM; /* Equivalent to 1ms */
209 queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
210 queue_input.inprocess_gang_priority = q->properties.priority;
211 queue_input.gang_global_priority_level =
212 AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
213 queue_input.doorbell_offset = q->properties.doorbell_off;
214 queue_input.mqd_addr = q->gart_mqd_addr;
215 queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
216
217 if (q->wptr_bo) {
218 wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
219 queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->wptr_bo) + wptr_addr_off;
220 }
221
222 queue_input.is_kfd_process = 1;
223 queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
224 queue_input.queue_size = q->properties.queue_size >> 2;
225
226 queue_input.paging = false;
227 queue_input.tba_addr = qpd->tba_addr;
228 queue_input.tma_addr = qpd->tma_addr;
229 queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device);
230 queue_input.skip_process_ctx_clear =
231 qpd->pqm->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED &&
232 (qpd->pqm->process->debug_trap_enabled ||
233 kfd_dbg_has_ttmps_always_setup(q->device));
234
235 queue_type = convert_to_mes_queue_type(q->properties.type);
236 if (queue_type < 0) {
237 dev_err(adev->dev, "Queue type not supported with MES, queue:%d\n",
238 q->properties.type);
239 return -EINVAL;
240 }
241 queue_input.queue_type = (uint32_t)queue_type;
242
243 queue_input.exclusively_scheduled = q->properties.is_gws;
244
245 amdgpu_mes_lock(&adev->mes);
246 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
247 amdgpu_mes_unlock(&adev->mes);
248 if (r) {
249 dev_err(adev->dev, "failed to add hardware queue to MES, doorbell=0x%x\n",
250 q->properties.doorbell_off);
251 dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n");
252 kfd_hws_hang(dqm);
253 }
254
255 return r;
256 }
257
remove_queue_mes(struct device_queue_manager * dqm,struct queue * q,struct qcm_process_device * qpd)258 static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
259 struct qcm_process_device *qpd)
260 {
261 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
262 int r;
263 struct mes_remove_queue_input queue_input;
264
265 if (dqm->is_hws_hang)
266 return -EIO;
267
268 memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
269 queue_input.doorbell_offset = q->properties.doorbell_off;
270 queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
271
272 amdgpu_mes_lock(&adev->mes);
273 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
274 amdgpu_mes_unlock(&adev->mes);
275
276 if (r) {
277 dev_err(adev->dev, "failed to remove hardware queue from MES, doorbell=0x%x\n",
278 q->properties.doorbell_off);
279 dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n");
280 kfd_hws_hang(dqm);
281 }
282
283 return r;
284 }
285
remove_all_queues_mes(struct device_queue_manager * dqm)286 static int remove_all_queues_mes(struct device_queue_manager *dqm)
287 {
288 struct device_process_node *cur;
289 struct device *dev = dqm->dev->adev->dev;
290 struct qcm_process_device *qpd;
291 struct queue *q;
292 int retval = 0;
293
294 list_for_each_entry(cur, &dqm->queues, list) {
295 qpd = cur->qpd;
296 list_for_each_entry(q, &qpd->queues_list, list) {
297 if (q->properties.is_active) {
298 retval = remove_queue_mes(dqm, q, qpd);
299 if (retval) {
300 dev_err(dev, "%s: Failed to remove queue %d for dev %d",
301 __func__,
302 q->properties.queue_id,
303 dqm->dev->id);
304 return retval;
305 }
306 }
307 }
308 }
309
310 return retval;
311 }
312
increment_queue_count(struct device_queue_manager * dqm,struct qcm_process_device * qpd,struct queue * q)313 static void increment_queue_count(struct device_queue_manager *dqm,
314 struct qcm_process_device *qpd,
315 struct queue *q)
316 {
317 dqm->active_queue_count++;
318 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
319 q->properties.type == KFD_QUEUE_TYPE_DIQ)
320 dqm->active_cp_queue_count++;
321
322 if (q->properties.is_gws) {
323 dqm->gws_queue_count++;
324 qpd->mapped_gws_queue = true;
325 }
326 }
327
decrement_queue_count(struct device_queue_manager * dqm,struct qcm_process_device * qpd,struct queue * q)328 static void decrement_queue_count(struct device_queue_manager *dqm,
329 struct qcm_process_device *qpd,
330 struct queue *q)
331 {
332 dqm->active_queue_count--;
333 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
334 q->properties.type == KFD_QUEUE_TYPE_DIQ)
335 dqm->active_cp_queue_count--;
336
337 if (q->properties.is_gws) {
338 dqm->gws_queue_count--;
339 qpd->mapped_gws_queue = false;
340 }
341 }
342
343 /*
344 * Allocate a doorbell ID to this queue.
345 * If doorbell_id is passed in, make sure requested ID is valid then allocate it.
346 */
allocate_doorbell(struct qcm_process_device * qpd,struct queue * q,uint32_t const * restore_id)347 static int allocate_doorbell(struct qcm_process_device *qpd,
348 struct queue *q,
349 uint32_t const *restore_id)
350 {
351 struct kfd_node *dev = qpd->dqm->dev;
352
353 if (!KFD_IS_SOC15(dev)) {
354 /* On pre-SOC15 chips we need to use the queue ID to
355 * preserve the user mode ABI.
356 */
357
358 if (restore_id && *restore_id != q->properties.queue_id)
359 return -EINVAL;
360
361 q->doorbell_id = q->properties.queue_id;
362 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
363 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
364 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
365 * doorbell assignments based on the engine and queue id.
366 * The doobell index distance between RLC (2*i) and (2*i+1)
367 * for a SDMA engine is 512.
368 */
369
370 uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx;
371
372 /*
373 * q->properties.sdma_engine_id corresponds to the virtual
374 * sdma engine number. However, for doorbell allocation,
375 * we need the physical sdma engine id in order to get the
376 * correct doorbell offset.
377 */
378 uint32_t valid_id = idx_offset[qpd->dqm->dev->node_id *
379 get_num_all_sdma_engines(qpd->dqm) +
380 q->properties.sdma_engine_id]
381 + (q->properties.sdma_queue_id & 1)
382 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
383 + (q->properties.sdma_queue_id >> 1);
384
385 if (restore_id && *restore_id != valid_id)
386 return -EINVAL;
387 q->doorbell_id = valid_id;
388 } else {
389 /* For CP queues on SOC15 */
390 if (restore_id) {
391 /* make sure that ID is free */
392 if (__test_and_set_bit(*restore_id, qpd->doorbell_bitmap))
393 return -EINVAL;
394
395 q->doorbell_id = *restore_id;
396 } else {
397 /* or reserve a free doorbell ID */
398 unsigned int found;
399
400 found = find_first_zero_bit(qpd->doorbell_bitmap,
401 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
402 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
403 pr_debug("No doorbells available");
404 return -EBUSY;
405 }
406 set_bit(found, qpd->doorbell_bitmap);
407 q->doorbell_id = found;
408 }
409 }
410
411 q->properties.doorbell_off = amdgpu_doorbell_index_on_bar(dev->adev,
412 qpd->proc_doorbells,
413 q->doorbell_id,
414 dev->kfd->device_info.doorbell_size);
415 return 0;
416 }
417
deallocate_doorbell(struct qcm_process_device * qpd,struct queue * q)418 static void deallocate_doorbell(struct qcm_process_device *qpd,
419 struct queue *q)
420 {
421 unsigned int old;
422 struct kfd_node *dev = qpd->dqm->dev;
423
424 if (!KFD_IS_SOC15(dev) ||
425 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
426 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
427 return;
428
429 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
430 WARN_ON(!old);
431 }
432
program_trap_handler_settings(struct device_queue_manager * dqm,struct qcm_process_device * qpd)433 static void program_trap_handler_settings(struct device_queue_manager *dqm,
434 struct qcm_process_device *qpd)
435 {
436 uint32_t xcc_mask = dqm->dev->xcc_mask;
437 int xcc_id;
438
439 if (dqm->dev->kfd2kgd->program_trap_handler_settings)
440 for_each_inst(xcc_id, xcc_mask)
441 dqm->dev->kfd2kgd->program_trap_handler_settings(
442 dqm->dev->adev, qpd->vmid, qpd->tba_addr,
443 qpd->tma_addr, xcc_id);
444 }
445
allocate_vmid(struct device_queue_manager * dqm,struct qcm_process_device * qpd,struct queue * q)446 static int allocate_vmid(struct device_queue_manager *dqm,
447 struct qcm_process_device *qpd,
448 struct queue *q)
449 {
450 struct device *dev = dqm->dev->adev->dev;
451 int allocated_vmid = -1, i;
452
453 for (i = dqm->dev->vm_info.first_vmid_kfd;
454 i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
455 if (!dqm->vmid_pasid[i]) {
456 allocated_vmid = i;
457 break;
458 }
459 }
460
461 if (allocated_vmid < 0) {
462 dev_err(dev, "no more vmid to allocate\n");
463 return -ENOSPC;
464 }
465
466 pr_debug("vmid allocated: %d\n", allocated_vmid);
467
468 dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
469
470 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
471
472 qpd->vmid = allocated_vmid;
473 q->properties.vmid = allocated_vmid;
474
475 program_sh_mem_settings(dqm, qpd);
476
477 if (KFD_IS_SOC15(dqm->dev) && dqm->dev->kfd->cwsr_enabled)
478 program_trap_handler_settings(dqm, qpd);
479
480 /* qpd->page_table_base is set earlier when register_process()
481 * is called, i.e. when the first queue is created.
482 */
483 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev,
484 qpd->vmid,
485 qpd->page_table_base);
486 /* invalidate the VM context after pasid and vmid mapping is set up */
487 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
488
489 if (dqm->dev->kfd2kgd->set_scratch_backing_va)
490 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev,
491 qpd->sh_hidden_private_base, qpd->vmid);
492
493 return 0;
494 }
495
flush_texture_cache_nocpsch(struct kfd_node * kdev,struct qcm_process_device * qpd)496 static int flush_texture_cache_nocpsch(struct kfd_node *kdev,
497 struct qcm_process_device *qpd)
498 {
499 const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf;
500 int ret;
501
502 if (!qpd->ib_kaddr)
503 return -ENOMEM;
504
505 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
506 if (ret)
507 return ret;
508
509 return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid,
510 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
511 pmf->release_mem_size / sizeof(uint32_t));
512 }
513
deallocate_vmid(struct device_queue_manager * dqm,struct qcm_process_device * qpd,struct queue * q)514 static void deallocate_vmid(struct device_queue_manager *dqm,
515 struct qcm_process_device *qpd,
516 struct queue *q)
517 {
518 struct device *dev = dqm->dev->adev->dev;
519
520 /* On GFX v7, CP doesn't flush TC at dequeue */
521 if (q->device->adev->asic_type == CHIP_HAWAII)
522 if (flush_texture_cache_nocpsch(q->device, qpd))
523 dev_err(dev, "Failed to flush TC\n");
524
525 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
526
527 /* Release the vmid mapping */
528 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
529 dqm->vmid_pasid[qpd->vmid] = 0;
530
531 qpd->vmid = 0;
532 q->properties.vmid = 0;
533 }
534
create_queue_nocpsch(struct device_queue_manager * dqm,struct queue * q,struct qcm_process_device * qpd,const struct kfd_criu_queue_priv_data * qd,const void * restore_mqd,const void * restore_ctl_stack)535 static int create_queue_nocpsch(struct device_queue_manager *dqm,
536 struct queue *q,
537 struct qcm_process_device *qpd,
538 const struct kfd_criu_queue_priv_data *qd,
539 const void *restore_mqd, const void *restore_ctl_stack)
540 {
541 struct mqd_manager *mqd_mgr;
542 int retval;
543
544 dqm_lock(dqm);
545
546 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
547 pr_warn("Can't create new usermode queue because %d queues were already created\n",
548 dqm->total_queue_count);
549 retval = -EPERM;
550 goto out_unlock;
551 }
552
553 if (list_empty(&qpd->queues_list)) {
554 retval = allocate_vmid(dqm, qpd, q);
555 if (retval)
556 goto out_unlock;
557 }
558 q->properties.vmid = qpd->vmid;
559 /*
560 * Eviction state logic: mark all queues as evicted, even ones
561 * not currently active. Restoring inactive queues later only
562 * updates the is_evicted flag but is a no-op otherwise.
563 */
564 q->properties.is_evicted = !!qpd->evicted;
565
566 q->properties.tba_addr = qpd->tba_addr;
567 q->properties.tma_addr = qpd->tma_addr;
568
569 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
570 q->properties.type)];
571 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
572 retval = allocate_hqd(dqm, q);
573 if (retval)
574 goto deallocate_vmid;
575 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
576 q->pipe, q->queue);
577 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
578 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
579 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
580 if (retval)
581 goto deallocate_vmid;
582 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
583 }
584
585 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
586 if (retval)
587 goto out_deallocate_hqd;
588
589 /* Temporarily release dqm lock to avoid a circular lock dependency */
590 dqm_unlock(dqm);
591 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
592 dqm_lock(dqm);
593
594 if (!q->mqd_mem_obj) {
595 retval = -ENOMEM;
596 goto out_deallocate_doorbell;
597 }
598
599 if (qd)
600 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
601 &q->properties, restore_mqd, restore_ctl_stack,
602 qd->ctl_stack_size);
603 else
604 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
605 &q->gart_mqd_addr, &q->properties);
606
607 if (q->properties.is_active) {
608 if (!dqm->sched_running) {
609 WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
610 goto add_queue_to_list;
611 }
612
613 if (WARN(q->process->mm != current->mm,
614 "should only run in user thread"))
615 retval = -EFAULT;
616 else
617 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
618 q->queue, &q->properties, current->mm);
619 if (retval)
620 goto out_free_mqd;
621 }
622
623 add_queue_to_list:
624 list_add(&q->list, &qpd->queues_list);
625 qpd->queue_count++;
626 if (q->properties.is_active)
627 increment_queue_count(dqm, qpd, q);
628
629 /*
630 * Unconditionally increment this counter, regardless of the queue's
631 * type or whether the queue is active.
632 */
633 dqm->total_queue_count++;
634 pr_debug("Total of %d queues are accountable so far\n",
635 dqm->total_queue_count);
636 goto out_unlock;
637
638 out_free_mqd:
639 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
640 out_deallocate_doorbell:
641 deallocate_doorbell(qpd, q);
642 out_deallocate_hqd:
643 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
644 deallocate_hqd(dqm, q);
645 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
646 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
647 deallocate_sdma_queue(dqm, q);
648 deallocate_vmid:
649 if (list_empty(&qpd->queues_list))
650 deallocate_vmid(dqm, qpd, q);
651 out_unlock:
652 dqm_unlock(dqm);
653 return retval;
654 }
655
allocate_hqd(struct device_queue_manager * dqm,struct queue * q)656 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
657 {
658 bool set;
659 int pipe, bit, i;
660
661 set = false;
662
663 for (pipe = dqm->next_pipe_to_allocate, i = 0;
664 i < get_pipes_per_mec(dqm);
665 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
666
667 if (!is_pipe_enabled(dqm, 0, pipe))
668 continue;
669
670 if (dqm->allocated_queues[pipe] != 0) {
671 bit = ffs(dqm->allocated_queues[pipe]) - 1;
672 dqm->allocated_queues[pipe] &= ~(1 << bit);
673 q->pipe = pipe;
674 q->queue = bit;
675 set = true;
676 break;
677 }
678 }
679
680 if (!set)
681 return -EBUSY;
682
683 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
684 /* horizontal hqd allocation */
685 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
686
687 return 0;
688 }
689
deallocate_hqd(struct device_queue_manager * dqm,struct queue * q)690 static inline void deallocate_hqd(struct device_queue_manager *dqm,
691 struct queue *q)
692 {
693 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
694 }
695
696 #define SQ_IND_CMD_CMD_KILL 0x00000003
697 #define SQ_IND_CMD_MODE_BROADCAST 0x00000001
698
dbgdev_wave_reset_wavefronts(struct kfd_node * dev,struct kfd_process * p)699 static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p)
700 {
701 int status = 0;
702 unsigned int vmid;
703 uint16_t queried_pasid;
704 union SQ_CMD_BITS reg_sq_cmd;
705 union GRBM_GFX_INDEX_BITS reg_gfx_index;
706 struct kfd_process_device *pdd;
707 int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
708 int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
709 uint32_t xcc_mask = dev->xcc_mask;
710 int xcc_id;
711
712 reg_sq_cmd.u32All = 0;
713 reg_gfx_index.u32All = 0;
714
715 pr_debug("Killing all process wavefronts\n");
716
717 if (!dev->kfd2kgd->get_atc_vmid_pasid_mapping_info) {
718 dev_err(dev->adev->dev, "no vmid pasid mapping supported\n");
719 return -EOPNOTSUPP;
720 }
721
722 /* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
723 * ATC_VMID15_PASID_MAPPING
724 * to check which VMID the current process is mapped to.
725 */
726
727 for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
728 status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
729 (dev->adev, vmid, &queried_pasid);
730
731 if (status && queried_pasid == p->pasid) {
732 pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
733 vmid, p->pasid);
734 break;
735 }
736 }
737
738 if (vmid > last_vmid_to_scan) {
739 dev_err(dev->adev->dev, "Didn't find vmid for pasid 0x%x\n", p->pasid);
740 return -EFAULT;
741 }
742
743 /* taking the VMID for that process on the safe way using PDD */
744 pdd = kfd_get_process_device_data(dev, p);
745 if (!pdd)
746 return -EFAULT;
747
748 reg_gfx_index.bits.sh_broadcast_writes = 1;
749 reg_gfx_index.bits.se_broadcast_writes = 1;
750 reg_gfx_index.bits.instance_broadcast_writes = 1;
751 reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
752 reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL;
753 reg_sq_cmd.bits.vm_id = vmid;
754
755 for_each_inst(xcc_id, xcc_mask)
756 dev->kfd2kgd->wave_control_execute(
757 dev->adev, reg_gfx_index.u32All,
758 reg_sq_cmd.u32All, xcc_id);
759
760 return 0;
761 }
762
763 /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
764 * to avoid asynchronized access
765 */
destroy_queue_nocpsch_locked(struct device_queue_manager * dqm,struct qcm_process_device * qpd,struct queue * q)766 static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
767 struct qcm_process_device *qpd,
768 struct queue *q)
769 {
770 int retval;
771 struct mqd_manager *mqd_mgr;
772
773 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
774 q->properties.type)];
775
776 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
777 deallocate_hqd(dqm, q);
778 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
779 deallocate_sdma_queue(dqm, q);
780 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
781 deallocate_sdma_queue(dqm, q);
782 else {
783 pr_debug("q->properties.type %d is invalid\n",
784 q->properties.type);
785 return -EINVAL;
786 }
787 dqm->total_queue_count--;
788
789 deallocate_doorbell(qpd, q);
790
791 if (!dqm->sched_running) {
792 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
793 return 0;
794 }
795
796 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
797 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
798 KFD_UNMAP_LATENCY_MS,
799 q->pipe, q->queue);
800 if (retval == -ETIME)
801 qpd->reset_wavefronts = true;
802
803 list_del(&q->list);
804 if (list_empty(&qpd->queues_list)) {
805 if (qpd->reset_wavefronts) {
806 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
807 dqm->dev);
808 /* dbgdev_wave_reset_wavefronts has to be called before
809 * deallocate_vmid(), i.e. when vmid is still in use.
810 */
811 dbgdev_wave_reset_wavefronts(dqm->dev,
812 qpd->pqm->process);
813 qpd->reset_wavefronts = false;
814 }
815
816 deallocate_vmid(dqm, qpd, q);
817 }
818 qpd->queue_count--;
819 if (q->properties.is_active)
820 decrement_queue_count(dqm, qpd, q);
821
822 return retval;
823 }
824
destroy_queue_nocpsch(struct device_queue_manager * dqm,struct qcm_process_device * qpd,struct queue * q)825 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
826 struct qcm_process_device *qpd,
827 struct queue *q)
828 {
829 int retval;
830 uint64_t sdma_val = 0;
831 struct device *dev = dqm->dev->adev->dev;
832 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
833 struct mqd_manager *mqd_mgr =
834 dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
835
836 /* Get the SDMA queue stats */
837 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
838 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
839 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
840 &sdma_val);
841 if (retval)
842 dev_err(dev, "Failed to read SDMA queue counter for queue: %d\n",
843 q->properties.queue_id);
844 }
845
846 dqm_lock(dqm);
847 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
848 if (!retval)
849 pdd->sdma_past_activity_counter += sdma_val;
850 dqm_unlock(dqm);
851
852 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
853
854 return retval;
855 }
856
update_queue(struct device_queue_manager * dqm,struct queue * q,struct mqd_update_info * minfo)857 static int update_queue(struct device_queue_manager *dqm, struct queue *q,
858 struct mqd_update_info *minfo)
859 {
860 int retval = 0;
861 struct device *dev = dqm->dev->adev->dev;
862 struct mqd_manager *mqd_mgr;
863 struct kfd_process_device *pdd;
864 bool prev_active = false;
865
866 dqm_lock(dqm);
867 pdd = kfd_get_process_device_data(q->device, q->process);
868 if (!pdd) {
869 retval = -ENODEV;
870 goto out_unlock;
871 }
872 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
873 q->properties.type)];
874
875 /* Save previous activity state for counters */
876 prev_active = q->properties.is_active;
877
878 /* Make sure the queue is unmapped before updating the MQD */
879 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
880 if (!dqm->dev->kfd->shared_resources.enable_mes)
881 retval = unmap_queues_cpsch(dqm,
882 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
883 else if (prev_active)
884 retval = remove_queue_mes(dqm, q, &pdd->qpd);
885
886 if (retval) {
887 dev_err(dev, "unmap queue failed\n");
888 goto out_unlock;
889 }
890 } else if (prev_active &&
891 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
892 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
893 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
894
895 if (!dqm->sched_running) {
896 WARN_ONCE(1, "Update non-HWS queue while stopped\n");
897 goto out_unlock;
898 }
899
900 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
901 (dqm->dev->kfd->cwsr_enabled ?
902 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
903 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
904 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
905 if (retval) {
906 dev_err(dev, "destroy mqd failed\n");
907 goto out_unlock;
908 }
909 }
910
911 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo);
912
913 /*
914 * check active state vs. the previous state and modify
915 * counter accordingly. map_queues_cpsch uses the
916 * dqm->active_queue_count to determine whether a new runlist must be
917 * uploaded.
918 */
919 if (q->properties.is_active && !prev_active) {
920 increment_queue_count(dqm, &pdd->qpd, q);
921 } else if (!q->properties.is_active && prev_active) {
922 decrement_queue_count(dqm, &pdd->qpd, q);
923 } else if (q->gws && !q->properties.is_gws) {
924 if (q->properties.is_active) {
925 dqm->gws_queue_count++;
926 pdd->qpd.mapped_gws_queue = true;
927 }
928 q->properties.is_gws = true;
929 } else if (!q->gws && q->properties.is_gws) {
930 if (q->properties.is_active) {
931 dqm->gws_queue_count--;
932 pdd->qpd.mapped_gws_queue = false;
933 }
934 q->properties.is_gws = false;
935 }
936
937 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
938 if (!dqm->dev->kfd->shared_resources.enable_mes)
939 retval = map_queues_cpsch(dqm);
940 else if (q->properties.is_active)
941 retval = add_queue_mes(dqm, q, &pdd->qpd);
942 } else if (q->properties.is_active &&
943 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
944 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
945 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
946 if (WARN(q->process->mm != current->mm,
947 "should only run in user thread"))
948 retval = -EFAULT;
949 else
950 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
951 q->pipe, q->queue,
952 &q->properties, current->mm);
953 }
954
955 out_unlock:
956 dqm_unlock(dqm);
957 return retval;
958 }
959
960 /* suspend_single_queue does not lock the dqm like the
961 * evict_process_queues_cpsch or evict_process_queues_nocpsch. You should
962 * lock the dqm before calling, and unlock after calling.
963 *
964 * The reason we don't lock the dqm is because this function may be
965 * called on multiple queues in a loop, so rather than locking/unlocking
966 * multiple times, we will just keep the dqm locked for all of the calls.
967 */
suspend_single_queue(struct device_queue_manager * dqm,struct kfd_process_device * pdd,struct queue * q)968 static int suspend_single_queue(struct device_queue_manager *dqm,
969 struct kfd_process_device *pdd,
970 struct queue *q)
971 {
972 bool is_new;
973
974 if (q->properties.is_suspended)
975 return 0;
976
977 pr_debug("Suspending PASID %u queue [%i]\n",
978 pdd->process->pasid,
979 q->properties.queue_id);
980
981 is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW);
982
983 if (is_new || q->properties.is_being_destroyed) {
984 pr_debug("Suspend: skip %s queue id %i\n",
985 is_new ? "new" : "destroyed",
986 q->properties.queue_id);
987 return -EBUSY;
988 }
989
990 q->properties.is_suspended = true;
991 if (q->properties.is_active) {
992 if (dqm->dev->kfd->shared_resources.enable_mes) {
993 int r = remove_queue_mes(dqm, q, &pdd->qpd);
994
995 if (r)
996 return r;
997 }
998
999 decrement_queue_count(dqm, &pdd->qpd, q);
1000 q->properties.is_active = false;
1001 }
1002
1003 return 0;
1004 }
1005
1006 /* resume_single_queue does not lock the dqm like the functions
1007 * restore_process_queues_cpsch or restore_process_queues_nocpsch. You should
1008 * lock the dqm before calling, and unlock after calling.
1009 *
1010 * The reason we don't lock the dqm is because this function may be
1011 * called on multiple queues in a loop, so rather than locking/unlocking
1012 * multiple times, we will just keep the dqm locked for all of the calls.
1013 */
resume_single_queue(struct device_queue_manager * dqm,struct qcm_process_device * qpd,struct queue * q)1014 static int resume_single_queue(struct device_queue_manager *dqm,
1015 struct qcm_process_device *qpd,
1016 struct queue *q)
1017 {
1018 struct kfd_process_device *pdd;
1019
1020 if (!q->properties.is_suspended)
1021 return 0;
1022
1023 pdd = qpd_to_pdd(qpd);
1024
1025 pr_debug("Restoring from suspend PASID %u queue [%i]\n",
1026 pdd->process->pasid,
1027 q->properties.queue_id);
1028
1029 q->properties.is_suspended = false;
1030
1031 if (QUEUE_IS_ACTIVE(q->properties)) {
1032 if (dqm->dev->kfd->shared_resources.enable_mes) {
1033 int r = add_queue_mes(dqm, q, &pdd->qpd);
1034
1035 if (r)
1036 return r;
1037 }
1038
1039 q->properties.is_active = true;
1040 increment_queue_count(dqm, qpd, q);
1041 }
1042
1043 return 0;
1044 }
1045
evict_process_queues_nocpsch(struct device_queue_manager * dqm,struct qcm_process_device * qpd)1046 static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
1047 struct qcm_process_device *qpd)
1048 {
1049 struct queue *q;
1050 struct mqd_manager *mqd_mgr;
1051 struct kfd_process_device *pdd;
1052 int retval, ret = 0;
1053
1054 dqm_lock(dqm);
1055 if (qpd->evicted++ > 0) /* already evicted, do nothing */
1056 goto out;
1057
1058 pdd = qpd_to_pdd(qpd);
1059 pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
1060 pdd->process->pasid);
1061
1062 pdd->last_evict_timestamp = get_jiffies_64();
1063 /* Mark all queues as evicted. Deactivate all active queues on
1064 * the qpd.
1065 */
1066 list_for_each_entry(q, &qpd->queues_list, list) {
1067 q->properties.is_evicted = true;
1068 if (!q->properties.is_active)
1069 continue;
1070
1071 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1072 q->properties.type)];
1073 q->properties.is_active = false;
1074 decrement_queue_count(dqm, qpd, q);
1075
1076 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
1077 continue;
1078
1079 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
1080 (dqm->dev->kfd->cwsr_enabled ?
1081 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
1082 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
1083 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
1084 if (retval && !ret)
1085 /* Return the first error, but keep going to
1086 * maintain a consistent eviction state
1087 */
1088 ret = retval;
1089 }
1090
1091 out:
1092 dqm_unlock(dqm);
1093 return ret;
1094 }
1095
evict_process_queues_cpsch(struct device_queue_manager * dqm,struct qcm_process_device * qpd)1096 static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
1097 struct qcm_process_device *qpd)
1098 {
1099 struct queue *q;
1100 struct device *dev = dqm->dev->adev->dev;
1101 struct kfd_process_device *pdd;
1102 int retval = 0;
1103
1104 dqm_lock(dqm);
1105 if (qpd->evicted++ > 0) /* already evicted, do nothing */
1106 goto out;
1107
1108 pdd = qpd_to_pdd(qpd);
1109
1110 /* The debugger creates processes that temporarily have not acquired
1111 * all VMs for all devices and has no VMs itself.
1112 * Skip queue eviction on process eviction.
1113 */
1114 if (!pdd->drm_priv)
1115 goto out;
1116
1117 pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
1118 pdd->process->pasid);
1119
1120 /* Mark all queues as evicted. Deactivate all active queues on
1121 * the qpd.
1122 */
1123 list_for_each_entry(q, &qpd->queues_list, list) {
1124 q->properties.is_evicted = true;
1125 if (!q->properties.is_active)
1126 continue;
1127
1128 q->properties.is_active = false;
1129 decrement_queue_count(dqm, qpd, q);
1130
1131 if (dqm->dev->kfd->shared_resources.enable_mes) {
1132 retval = remove_queue_mes(dqm, q, qpd);
1133 if (retval) {
1134 dev_err(dev, "Failed to evict queue %d\n",
1135 q->properties.queue_id);
1136 goto out;
1137 }
1138 }
1139 }
1140 pdd->last_evict_timestamp = get_jiffies_64();
1141 if (!dqm->dev->kfd->shared_resources.enable_mes)
1142 retval = execute_queues_cpsch(dqm,
1143 qpd->is_debug ?
1144 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
1145 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
1146 USE_DEFAULT_GRACE_PERIOD);
1147
1148 out:
1149 dqm_unlock(dqm);
1150 return retval;
1151 }
1152
restore_process_queues_nocpsch(struct device_queue_manager * dqm,struct qcm_process_device * qpd)1153 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
1154 struct qcm_process_device *qpd)
1155 {
1156 struct mm_struct *mm = NULL;
1157 struct queue *q;
1158 struct mqd_manager *mqd_mgr;
1159 struct kfd_process_device *pdd;
1160 uint64_t pd_base;
1161 uint64_t eviction_duration;
1162 int retval, ret = 0;
1163
1164 pdd = qpd_to_pdd(qpd);
1165 /* Retrieve PD base */
1166 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
1167
1168 dqm_lock(dqm);
1169 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
1170 goto out;
1171 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
1172 qpd->evicted--;
1173 goto out;
1174 }
1175
1176 pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
1177 pdd->process->pasid);
1178
1179 /* Update PD Base in QPD */
1180 qpd->page_table_base = pd_base;
1181 pr_debug("Updated PD address to 0x%llx\n", pd_base);
1182
1183 if (!list_empty(&qpd->queues_list)) {
1184 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
1185 dqm->dev->adev,
1186 qpd->vmid,
1187 qpd->page_table_base);
1188 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1189 }
1190
1191 /* Take a safe reference to the mm_struct, which may otherwise
1192 * disappear even while the kfd_process is still referenced.
1193 */
1194 mm = get_task_mm(pdd->process->lead_thread);
1195 if (!mm) {
1196 ret = -EFAULT;
1197 goto out;
1198 }
1199
1200 /* Remove the eviction flags. Activate queues that are not
1201 * inactive for other reasons.
1202 */
1203 list_for_each_entry(q, &qpd->queues_list, list) {
1204 q->properties.is_evicted = false;
1205 if (!QUEUE_IS_ACTIVE(q->properties))
1206 continue;
1207
1208 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1209 q->properties.type)];
1210 q->properties.is_active = true;
1211 increment_queue_count(dqm, qpd, q);
1212
1213 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
1214 continue;
1215
1216 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
1217 q->queue, &q->properties, mm);
1218 if (retval && !ret)
1219 /* Return the first error, but keep going to
1220 * maintain a consistent eviction state
1221 */
1222 ret = retval;
1223 }
1224 qpd->evicted = 0;
1225 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
1226 atomic64_add(eviction_duration, &pdd->evict_duration_counter);
1227 out:
1228 if (mm)
1229 mmput(mm);
1230 dqm_unlock(dqm);
1231 return ret;
1232 }
1233
restore_process_queues_cpsch(struct device_queue_manager * dqm,struct qcm_process_device * qpd)1234 static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
1235 struct qcm_process_device *qpd)
1236 {
1237 struct queue *q;
1238 struct device *dev = dqm->dev->adev->dev;
1239 struct kfd_process_device *pdd;
1240 uint64_t eviction_duration;
1241 int retval = 0;
1242
1243 pdd = qpd_to_pdd(qpd);
1244
1245 dqm_lock(dqm);
1246 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
1247 goto out;
1248 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
1249 qpd->evicted--;
1250 goto out;
1251 }
1252
1253 /* The debugger creates processes that temporarily have not acquired
1254 * all VMs for all devices and has no VMs itself.
1255 * Skip queue restore on process restore.
1256 */
1257 if (!pdd->drm_priv)
1258 goto vm_not_acquired;
1259
1260 pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
1261 pdd->process->pasid);
1262
1263 /* Update PD Base in QPD */
1264 qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
1265 pr_debug("Updated PD address to 0x%llx\n", qpd->page_table_base);
1266
1267 /* activate all active queues on the qpd */
1268 list_for_each_entry(q, &qpd->queues_list, list) {
1269 q->properties.is_evicted = false;
1270 if (!QUEUE_IS_ACTIVE(q->properties))
1271 continue;
1272
1273 q->properties.is_active = true;
1274 increment_queue_count(dqm, &pdd->qpd, q);
1275
1276 if (dqm->dev->kfd->shared_resources.enable_mes) {
1277 retval = add_queue_mes(dqm, q, qpd);
1278 if (retval) {
1279 dev_err(dev, "Failed to restore queue %d\n",
1280 q->properties.queue_id);
1281 goto out;
1282 }
1283 }
1284 }
1285 if (!dqm->dev->kfd->shared_resources.enable_mes)
1286 retval = execute_queues_cpsch(dqm,
1287 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
1288 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
1289 atomic64_add(eviction_duration, &pdd->evict_duration_counter);
1290 vm_not_acquired:
1291 qpd->evicted = 0;
1292 out:
1293 dqm_unlock(dqm);
1294 return retval;
1295 }
1296
register_process(struct device_queue_manager * dqm,struct qcm_process_device * qpd)1297 static int register_process(struct device_queue_manager *dqm,
1298 struct qcm_process_device *qpd)
1299 {
1300 struct device_process_node *n;
1301 struct kfd_process_device *pdd;
1302 uint64_t pd_base;
1303 int retval;
1304
1305 n = kzalloc(sizeof(*n), GFP_KERNEL);
1306 if (!n)
1307 return -ENOMEM;
1308
1309 n->qpd = qpd;
1310
1311 pdd = qpd_to_pdd(qpd);
1312 /* Retrieve PD base */
1313 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
1314
1315 dqm_lock(dqm);
1316 list_add(&n->list, &dqm->queues);
1317
1318 /* Update PD Base in QPD */
1319 qpd->page_table_base = pd_base;
1320 pr_debug("Updated PD address to 0x%llx\n", pd_base);
1321
1322 retval = dqm->asic_ops.update_qpd(dqm, qpd);
1323
1324 dqm->processes_count++;
1325
1326 dqm_unlock(dqm);
1327
1328 /* Outside the DQM lock because under the DQM lock we can't do
1329 * reclaim or take other locks that others hold while reclaiming.
1330 */
1331 kfd_inc_compute_active(dqm->dev);
1332
1333 return retval;
1334 }
1335
unregister_process(struct device_queue_manager * dqm,struct qcm_process_device * qpd)1336 static int unregister_process(struct device_queue_manager *dqm,
1337 struct qcm_process_device *qpd)
1338 {
1339 int retval;
1340 struct device_process_node *cur, *next;
1341
1342 pr_debug("qpd->queues_list is %s\n",
1343 list_empty(&qpd->queues_list) ? "empty" : "not empty");
1344
1345 retval = 0;
1346 dqm_lock(dqm);
1347
1348 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
1349 if (qpd == cur->qpd) {
1350 list_del(&cur->list);
1351 kfree(cur);
1352 dqm->processes_count--;
1353 goto out;
1354 }
1355 }
1356 /* qpd not found in dqm list */
1357 retval = 1;
1358 out:
1359 dqm_unlock(dqm);
1360
1361 /* Outside the DQM lock because under the DQM lock we can't do
1362 * reclaim or take other locks that others hold while reclaiming.
1363 */
1364 if (!retval)
1365 kfd_dec_compute_active(dqm->dev);
1366
1367 return retval;
1368 }
1369
1370 static int
set_pasid_vmid_mapping(struct device_queue_manager * dqm,u32 pasid,unsigned int vmid)1371 set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
1372 unsigned int vmid)
1373 {
1374 uint32_t xcc_mask = dqm->dev->xcc_mask;
1375 int xcc_id, ret;
1376
1377 for_each_inst(xcc_id, xcc_mask) {
1378 ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
1379 dqm->dev->adev, pasid, vmid, xcc_id);
1380 if (ret)
1381 break;
1382 }
1383
1384 return ret;
1385 }
1386
init_interrupts(struct device_queue_manager * dqm)1387 static void init_interrupts(struct device_queue_manager *dqm)
1388 {
1389 uint32_t xcc_mask = dqm->dev->xcc_mask;
1390 unsigned int i, xcc_id;
1391
1392 for_each_inst(xcc_id, xcc_mask) {
1393 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) {
1394 if (is_pipe_enabled(dqm, 0, i)) {
1395 dqm->dev->kfd2kgd->init_interrupts(
1396 dqm->dev->adev, i, xcc_id);
1397 }
1398 }
1399 }
1400 }
1401
initialize_nocpsch(struct device_queue_manager * dqm)1402 static int initialize_nocpsch(struct device_queue_manager *dqm)
1403 {
1404 int pipe, queue;
1405
1406 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1407
1408 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
1409 sizeof(unsigned int), GFP_KERNEL);
1410 if (!dqm->allocated_queues)
1411 return -ENOMEM;
1412
1413 mutex_init(&dqm->lock_hidden);
1414 INIT_LIST_HEAD(&dqm->queues);
1415 dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
1416 dqm->active_cp_queue_count = 0;
1417 dqm->gws_queue_count = 0;
1418
1419 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1420 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1421
1422 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
1423 if (test_bit(pipe_offset + queue,
1424 dqm->dev->kfd->shared_resources.cp_queue_bitmap))
1425 dqm->allocated_queues[pipe] |= 1 << queue;
1426 }
1427
1428 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
1429
1430 init_sdma_bitmaps(dqm);
1431
1432 return 0;
1433 }
1434
uninitialize(struct device_queue_manager * dqm)1435 static void uninitialize(struct device_queue_manager *dqm)
1436 {
1437 int i;
1438
1439 WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
1440
1441 kfree(dqm->allocated_queues);
1442 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
1443 kfree(dqm->mqd_mgrs[i]);
1444 mutex_destroy(&dqm->lock_hidden);
1445 }
1446
start_nocpsch(struct device_queue_manager * dqm)1447 static int start_nocpsch(struct device_queue_manager *dqm)
1448 {
1449 int r = 0;
1450
1451 pr_info("SW scheduler is used");
1452 init_interrupts(dqm);
1453
1454 if (dqm->dev->adev->asic_type == CHIP_HAWAII)
1455 r = pm_init(&dqm->packet_mgr, dqm);
1456 if (!r)
1457 dqm->sched_running = true;
1458
1459 return r;
1460 }
1461
stop_nocpsch(struct device_queue_manager * dqm)1462 static int stop_nocpsch(struct device_queue_manager *dqm)
1463 {
1464 dqm_lock(dqm);
1465 if (!dqm->sched_running) {
1466 dqm_unlock(dqm);
1467 return 0;
1468 }
1469
1470 if (dqm->dev->adev->asic_type == CHIP_HAWAII)
1471 pm_uninit(&dqm->packet_mgr, false);
1472 dqm->sched_running = false;
1473 dqm_unlock(dqm);
1474
1475 return 0;
1476 }
1477
pre_reset(struct device_queue_manager * dqm)1478 static void pre_reset(struct device_queue_manager *dqm)
1479 {
1480 dqm_lock(dqm);
1481 dqm->is_resetting = true;
1482 dqm_unlock(dqm);
1483 }
1484
allocate_sdma_queue(struct device_queue_manager * dqm,struct queue * q,const uint32_t * restore_sdma_id)1485 static int allocate_sdma_queue(struct device_queue_manager *dqm,
1486 struct queue *q, const uint32_t *restore_sdma_id)
1487 {
1488 struct device *dev = dqm->dev->adev->dev;
1489 int bit;
1490
1491 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1492 if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
1493 dev_err(dev, "No more SDMA queue to allocate\n");
1494 return -ENOMEM;
1495 }
1496
1497 if (restore_sdma_id) {
1498 /* Re-use existing sdma_id */
1499 if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) {
1500 dev_err(dev, "SDMA queue already in use\n");
1501 return -EBUSY;
1502 }
1503 clear_bit(*restore_sdma_id, dqm->sdma_bitmap);
1504 q->sdma_id = *restore_sdma_id;
1505 } else {
1506 /* Find first available sdma_id */
1507 bit = find_first_bit(dqm->sdma_bitmap,
1508 get_num_sdma_queues(dqm));
1509 clear_bit(bit, dqm->sdma_bitmap);
1510 q->sdma_id = bit;
1511 }
1512
1513 q->properties.sdma_engine_id =
1514 q->sdma_id % kfd_get_num_sdma_engines(dqm->dev);
1515 q->properties.sdma_queue_id = q->sdma_id /
1516 kfd_get_num_sdma_engines(dqm->dev);
1517 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1518 if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
1519 dev_err(dev, "No more XGMI SDMA queue to allocate\n");
1520 return -ENOMEM;
1521 }
1522 if (restore_sdma_id) {
1523 /* Re-use existing sdma_id */
1524 if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) {
1525 dev_err(dev, "SDMA queue already in use\n");
1526 return -EBUSY;
1527 }
1528 clear_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap);
1529 q->sdma_id = *restore_sdma_id;
1530 } else {
1531 bit = find_first_bit(dqm->xgmi_sdma_bitmap,
1532 get_num_xgmi_sdma_queues(dqm));
1533 clear_bit(bit, dqm->xgmi_sdma_bitmap);
1534 q->sdma_id = bit;
1535 }
1536 /* sdma_engine_id is sdma id including
1537 * both PCIe-optimized SDMAs and XGMI-
1538 * optimized SDMAs. The calculation below
1539 * assumes the first N engines are always
1540 * PCIe-optimized ones
1541 */
1542 q->properties.sdma_engine_id =
1543 kfd_get_num_sdma_engines(dqm->dev) +
1544 q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
1545 q->properties.sdma_queue_id = q->sdma_id /
1546 kfd_get_num_xgmi_sdma_engines(dqm->dev);
1547 }
1548
1549 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
1550 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
1551
1552 return 0;
1553 }
1554
deallocate_sdma_queue(struct device_queue_manager * dqm,struct queue * q)1555 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1556 struct queue *q)
1557 {
1558 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1559 if (q->sdma_id >= get_num_sdma_queues(dqm))
1560 return;
1561 set_bit(q->sdma_id, dqm->sdma_bitmap);
1562 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1563 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1564 return;
1565 set_bit(q->sdma_id, dqm->xgmi_sdma_bitmap);
1566 }
1567 }
1568
1569 /*
1570 * Device Queue Manager implementation for cp scheduler
1571 */
1572
set_sched_resources(struct device_queue_manager * dqm)1573 static int set_sched_resources(struct device_queue_manager *dqm)
1574 {
1575 int i, mec;
1576 struct scheduling_resources res;
1577 struct device *dev = dqm->dev->adev->dev;
1578
1579 res.vmid_mask = dqm->dev->compute_vmid_bitmap;
1580
1581 res.queue_mask = 0;
1582 for (i = 0; i < AMDGPU_MAX_QUEUES; ++i) {
1583 mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe)
1584 / dqm->dev->kfd->shared_resources.num_pipe_per_mec;
1585
1586 if (!test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap))
1587 continue;
1588
1589 /* only acquire queues from the first MEC */
1590 if (mec > 0)
1591 continue;
1592
1593 /* This situation may be hit in the future if a new HW
1594 * generation exposes more than 64 queues. If so, the
1595 * definition of res.queue_mask needs updating
1596 */
1597 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
1598 dev_err(dev, "Invalid queue enabled by amdgpu: %d\n", i);
1599 break;
1600 }
1601
1602 res.queue_mask |= 1ull
1603 << amdgpu_queue_mask_bit_to_set_resource_bit(
1604 dqm->dev->adev, i);
1605 }
1606 res.gws_mask = ~0ull;
1607 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
1608
1609 pr_debug("Scheduling resources:\n"
1610 "vmid mask: 0x%8X\n"
1611 "queue mask: 0x%8llX\n",
1612 res.vmid_mask, res.queue_mask);
1613
1614 return pm_send_set_resources(&dqm->packet_mgr, &res);
1615 }
1616
initialize_cpsch(struct device_queue_manager * dqm)1617 static int initialize_cpsch(struct device_queue_manager *dqm)
1618 {
1619 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1620
1621 mutex_init(&dqm->lock_hidden);
1622 INIT_LIST_HEAD(&dqm->queues);
1623 dqm->active_queue_count = dqm->processes_count = 0;
1624 dqm->active_cp_queue_count = 0;
1625 dqm->gws_queue_count = 0;
1626 dqm->active_runlist = false;
1627 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1628 dqm->trap_debug_vmid = 0;
1629
1630 init_sdma_bitmaps(dqm);
1631
1632 if (dqm->dev->kfd2kgd->get_iq_wait_times)
1633 dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev,
1634 &dqm->wait_times,
1635 ffs(dqm->dev->xcc_mask) - 1);
1636 return 0;
1637 }
1638
start_cpsch(struct device_queue_manager * dqm)1639 static int start_cpsch(struct device_queue_manager *dqm)
1640 {
1641 struct device *dev = dqm->dev->adev->dev;
1642 int retval;
1643
1644 retval = 0;
1645
1646 dqm_lock(dqm);
1647
1648 if (!dqm->dev->kfd->shared_resources.enable_mes) {
1649 retval = pm_init(&dqm->packet_mgr, dqm);
1650 if (retval)
1651 goto fail_packet_manager_init;
1652
1653 retval = set_sched_resources(dqm);
1654 if (retval)
1655 goto fail_set_sched_resources;
1656 }
1657 pr_debug("Allocating fence memory\n");
1658
1659 /* allocate fence memory on the gart */
1660 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1661 &dqm->fence_mem);
1662
1663 if (retval)
1664 goto fail_allocate_vidmem;
1665
1666 dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
1667 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1668
1669 init_interrupts(dqm);
1670
1671 /* clear hang status when driver try to start the hw scheduler */
1672 dqm->is_hws_hang = false;
1673 dqm->is_resetting = false;
1674 dqm->sched_running = true;
1675
1676 if (!dqm->dev->kfd->shared_resources.enable_mes)
1677 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
1678
1679 /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */
1680 if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu &&
1681 (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))) {
1682 uint32_t reg_offset = 0;
1683 uint32_t grace_period = 1;
1684
1685 retval = pm_update_grace_period(&dqm->packet_mgr,
1686 grace_period);
1687 if (retval)
1688 dev_err(dev, "Setting grace timeout failed\n");
1689 else if (dqm->dev->kfd2kgd->build_grace_period_packet_info)
1690 /* Update dqm->wait_times maintained in software */
1691 dqm->dev->kfd2kgd->build_grace_period_packet_info(
1692 dqm->dev->adev, dqm->wait_times,
1693 grace_period, ®_offset,
1694 &dqm->wait_times);
1695 }
1696
1697 dqm_unlock(dqm);
1698
1699 return 0;
1700 fail_allocate_vidmem:
1701 fail_set_sched_resources:
1702 if (!dqm->dev->kfd->shared_resources.enable_mes)
1703 pm_uninit(&dqm->packet_mgr, false);
1704 fail_packet_manager_init:
1705 dqm_unlock(dqm);
1706 return retval;
1707 }
1708
stop_cpsch(struct device_queue_manager * dqm)1709 static int stop_cpsch(struct device_queue_manager *dqm)
1710 {
1711 bool hanging;
1712
1713 dqm_lock(dqm);
1714 if (!dqm->sched_running) {
1715 dqm_unlock(dqm);
1716 return 0;
1717 }
1718
1719 if (!dqm->is_hws_hang) {
1720 if (!dqm->dev->kfd->shared_resources.enable_mes)
1721 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
1722 else
1723 remove_all_queues_mes(dqm);
1724 }
1725
1726 hanging = dqm->is_hws_hang || dqm->is_resetting;
1727 dqm->sched_running = false;
1728
1729 if (!dqm->dev->kfd->shared_resources.enable_mes)
1730 pm_release_ib(&dqm->packet_mgr);
1731
1732 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1733 if (!dqm->dev->kfd->shared_resources.enable_mes)
1734 pm_uninit(&dqm->packet_mgr, hanging);
1735 dqm_unlock(dqm);
1736
1737 return 0;
1738 }
1739
create_kernel_queue_cpsch(struct device_queue_manager * dqm,struct kernel_queue * kq,struct qcm_process_device * qpd)1740 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1741 struct kernel_queue *kq,
1742 struct qcm_process_device *qpd)
1743 {
1744 dqm_lock(dqm);
1745 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1746 pr_warn("Can't create new kernel queue because %d queues were already created\n",
1747 dqm->total_queue_count);
1748 dqm_unlock(dqm);
1749 return -EPERM;
1750 }
1751
1752 /*
1753 * Unconditionally increment this counter, regardless of the queue's
1754 * type or whether the queue is active.
1755 */
1756 dqm->total_queue_count++;
1757 pr_debug("Total of %d queues are accountable so far\n",
1758 dqm->total_queue_count);
1759
1760 list_add(&kq->list, &qpd->priv_queue_list);
1761 increment_queue_count(dqm, qpd, kq->queue);
1762 qpd->is_debug = true;
1763 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
1764 USE_DEFAULT_GRACE_PERIOD);
1765 dqm_unlock(dqm);
1766
1767 return 0;
1768 }
1769
destroy_kernel_queue_cpsch(struct device_queue_manager * dqm,struct kernel_queue * kq,struct qcm_process_device * qpd)1770 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1771 struct kernel_queue *kq,
1772 struct qcm_process_device *qpd)
1773 {
1774 dqm_lock(dqm);
1775 list_del(&kq->list);
1776 decrement_queue_count(dqm, qpd, kq->queue);
1777 qpd->is_debug = false;
1778 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
1779 USE_DEFAULT_GRACE_PERIOD);
1780 /*
1781 * Unconditionally decrement this counter, regardless of the queue's
1782 * type.
1783 */
1784 dqm->total_queue_count--;
1785 pr_debug("Total of %d queues are accountable so far\n",
1786 dqm->total_queue_count);
1787 dqm_unlock(dqm);
1788 }
1789
create_queue_cpsch(struct device_queue_manager * dqm,struct queue * q,struct qcm_process_device * qpd,const struct kfd_criu_queue_priv_data * qd,const void * restore_mqd,const void * restore_ctl_stack)1790 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1791 struct qcm_process_device *qpd,
1792 const struct kfd_criu_queue_priv_data *qd,
1793 const void *restore_mqd, const void *restore_ctl_stack)
1794 {
1795 int retval;
1796 struct mqd_manager *mqd_mgr;
1797
1798 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1799 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1800 dqm->total_queue_count);
1801 retval = -EPERM;
1802 goto out;
1803 }
1804
1805 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1806 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1807 dqm_lock(dqm);
1808 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
1809 dqm_unlock(dqm);
1810 if (retval)
1811 goto out;
1812 }
1813
1814 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
1815 if (retval)
1816 goto out_deallocate_sdma_queue;
1817
1818 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1819 q->properties.type)];
1820
1821 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1822 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1823 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1824 q->properties.tba_addr = qpd->tba_addr;
1825 q->properties.tma_addr = qpd->tma_addr;
1826 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1827 if (!q->mqd_mem_obj) {
1828 retval = -ENOMEM;
1829 goto out_deallocate_doorbell;
1830 }
1831
1832 dqm_lock(dqm);
1833 /*
1834 * Eviction state logic: mark all queues as evicted, even ones
1835 * not currently active. Restoring inactive queues later only
1836 * updates the is_evicted flag but is a no-op otherwise.
1837 */
1838 q->properties.is_evicted = !!qpd->evicted;
1839 q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled &&
1840 kfd_dbg_has_cwsr_workaround(q->device);
1841
1842 if (qd)
1843 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
1844 &q->properties, restore_mqd, restore_ctl_stack,
1845 qd->ctl_stack_size);
1846 else
1847 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1848 &q->gart_mqd_addr, &q->properties);
1849
1850 list_add(&q->list, &qpd->queues_list);
1851 qpd->queue_count++;
1852
1853 if (q->properties.is_active) {
1854 increment_queue_count(dqm, qpd, q);
1855
1856 if (!dqm->dev->kfd->shared_resources.enable_mes)
1857 retval = execute_queues_cpsch(dqm,
1858 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
1859 else
1860 retval = add_queue_mes(dqm, q, qpd);
1861 if (retval)
1862 goto cleanup_queue;
1863 }
1864
1865 /*
1866 * Unconditionally increment this counter, regardless of the queue's
1867 * type or whether the queue is active.
1868 */
1869 dqm->total_queue_count++;
1870
1871 pr_debug("Total of %d queues are accountable so far\n",
1872 dqm->total_queue_count);
1873
1874 dqm_unlock(dqm);
1875 return retval;
1876
1877 cleanup_queue:
1878 qpd->queue_count--;
1879 list_del(&q->list);
1880 if (q->properties.is_active)
1881 decrement_queue_count(dqm, qpd, q);
1882 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1883 dqm_unlock(dqm);
1884 out_deallocate_doorbell:
1885 deallocate_doorbell(qpd, q);
1886 out_deallocate_sdma_queue:
1887 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1888 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1889 dqm_lock(dqm);
1890 deallocate_sdma_queue(dqm, q);
1891 dqm_unlock(dqm);
1892 }
1893 out:
1894 return retval;
1895 }
1896
amdkfd_fence_wait_timeout(struct device_queue_manager * dqm,uint64_t fence_value,unsigned int timeout_ms)1897 int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
1898 uint64_t fence_value,
1899 unsigned int timeout_ms)
1900 {
1901 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1902 struct device *dev = dqm->dev->adev->dev;
1903 uint64_t *fence_addr = dqm->fence_addr;
1904
1905 while (*fence_addr != fence_value) {
1906 /* Fatal err detected, this response won't come */
1907 if (amdgpu_amdkfd_is_fed(dqm->dev->adev))
1908 return -EIO;
1909
1910 if (time_after(jiffies, end_jiffies)) {
1911 dev_err(dev, "qcm fence wait loop timeout expired\n");
1912 /* In HWS case, this is used to halt the driver thread
1913 * in order not to mess up CP states before doing
1914 * scandumps for FW debugging.
1915 */
1916 while (halt_if_hws_hang)
1917 schedule();
1918
1919 return -ETIME;
1920 }
1921 schedule();
1922 }
1923
1924 return 0;
1925 }
1926
1927 /* dqm->lock mutex has to be locked before calling this function */
map_queues_cpsch(struct device_queue_manager * dqm)1928 static int map_queues_cpsch(struct device_queue_manager *dqm)
1929 {
1930 struct device *dev = dqm->dev->adev->dev;
1931 int retval;
1932
1933 if (!dqm->sched_running)
1934 return 0;
1935 if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
1936 return 0;
1937 if (dqm->active_runlist)
1938 return 0;
1939
1940 retval = pm_send_runlist(&dqm->packet_mgr, &dqm->queues);
1941 pr_debug("%s sent runlist\n", __func__);
1942 if (retval) {
1943 dev_err(dev, "failed to execute runlist\n");
1944 return retval;
1945 }
1946 dqm->active_runlist = true;
1947
1948 return retval;
1949 }
1950
1951 /* dqm->lock mutex has to be locked before calling this function */
unmap_queues_cpsch(struct device_queue_manager * dqm,enum kfd_unmap_queues_filter filter,uint32_t filter_param,uint32_t grace_period,bool reset)1952 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1953 enum kfd_unmap_queues_filter filter,
1954 uint32_t filter_param,
1955 uint32_t grace_period,
1956 bool reset)
1957 {
1958 struct device *dev = dqm->dev->adev->dev;
1959 struct mqd_manager *mqd_mgr;
1960 int retval = 0;
1961
1962 if (!dqm->sched_running)
1963 return 0;
1964 if (dqm->is_hws_hang || dqm->is_resetting)
1965 return -EIO;
1966 if (!dqm->active_runlist)
1967 return retval;
1968
1969 if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
1970 retval = pm_update_grace_period(&dqm->packet_mgr, grace_period);
1971 if (retval)
1972 return retval;
1973 }
1974
1975 retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset);
1976 if (retval)
1977 return retval;
1978
1979 *dqm->fence_addr = KFD_FENCE_INIT;
1980 pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr,
1981 KFD_FENCE_COMPLETED);
1982 /* should be timed out */
1983 retval = amdkfd_fence_wait_timeout(dqm, KFD_FENCE_COMPLETED,
1984 queue_preemption_timeout_ms);
1985 if (retval) {
1986 dev_err(dev, "The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1987 kfd_hws_hang(dqm);
1988 return retval;
1989 }
1990
1991 /* In the current MEC firmware implementation, if compute queue
1992 * doesn't response to the preemption request in time, HIQ will
1993 * abandon the unmap request without returning any timeout error
1994 * to driver. Instead, MEC firmware will log the doorbell of the
1995 * unresponding compute queue to HIQ.MQD.queue_doorbell_id fields.
1996 * To make sure the queue unmap was successful, driver need to
1997 * check those fields
1998 */
1999 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
2000 if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
2001 while (halt_if_hws_hang)
2002 schedule();
2003 kfd_hws_hang(dqm);
2004 return -ETIME;
2005 }
2006
2007 /* We need to reset the grace period value for this device */
2008 if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
2009 if (pm_update_grace_period(&dqm->packet_mgr,
2010 USE_DEFAULT_GRACE_PERIOD))
2011 dev_err(dev, "Failed to reset grace period\n");
2012 }
2013
2014 pm_release_ib(&dqm->packet_mgr);
2015 dqm->active_runlist = false;
2016
2017 return retval;
2018 }
2019
2020 /* only for compute queue */
reset_queues_cpsch(struct device_queue_manager * dqm,uint16_t pasid)2021 static int reset_queues_cpsch(struct device_queue_manager *dqm,
2022 uint16_t pasid)
2023 {
2024 int retval;
2025
2026 dqm_lock(dqm);
2027
2028 retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID,
2029 pasid, USE_DEFAULT_GRACE_PERIOD, true);
2030
2031 dqm_unlock(dqm);
2032 return retval;
2033 }
2034
2035 /* dqm->lock mutex has to be locked before calling this function */
execute_queues_cpsch(struct device_queue_manager * dqm,enum kfd_unmap_queues_filter filter,uint32_t filter_param,uint32_t grace_period)2036 static int execute_queues_cpsch(struct device_queue_manager *dqm,
2037 enum kfd_unmap_queues_filter filter,
2038 uint32_t filter_param,
2039 uint32_t grace_period)
2040 {
2041 int retval;
2042
2043 if (dqm->is_hws_hang)
2044 return -EIO;
2045 retval = unmap_queues_cpsch(dqm, filter, filter_param, grace_period, false);
2046 if (retval)
2047 return retval;
2048
2049 return map_queues_cpsch(dqm);
2050 }
2051
wait_on_destroy_queue(struct device_queue_manager * dqm,struct queue * q)2052 static int wait_on_destroy_queue(struct device_queue_manager *dqm,
2053 struct queue *q)
2054 {
2055 struct kfd_process_device *pdd = kfd_get_process_device_data(q->device,
2056 q->process);
2057 int ret = 0;
2058
2059 if (pdd->qpd.is_debug)
2060 return ret;
2061
2062 q->properties.is_being_destroyed = true;
2063
2064 if (pdd->process->debug_trap_enabled && q->properties.is_suspended) {
2065 dqm_unlock(dqm);
2066 mutex_unlock(&q->process->mutex);
2067 ret = wait_event_interruptible(dqm->destroy_wait,
2068 !q->properties.is_suspended);
2069
2070 mutex_lock(&q->process->mutex);
2071 dqm_lock(dqm);
2072 }
2073
2074 return ret;
2075 }
2076
destroy_queue_cpsch(struct device_queue_manager * dqm,struct qcm_process_device * qpd,struct queue * q)2077 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
2078 struct qcm_process_device *qpd,
2079 struct queue *q)
2080 {
2081 int retval;
2082 struct mqd_manager *mqd_mgr;
2083 uint64_t sdma_val = 0;
2084 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
2085 struct device *dev = dqm->dev->adev->dev;
2086
2087 /* Get the SDMA queue stats */
2088 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
2089 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
2090 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
2091 &sdma_val);
2092 if (retval)
2093 dev_err(dev, "Failed to read SDMA queue counter for queue: %d\n",
2094 q->properties.queue_id);
2095 }
2096
2097 /* remove queue from list to prevent rescheduling after preemption */
2098 dqm_lock(dqm);
2099
2100 retval = wait_on_destroy_queue(dqm, q);
2101
2102 if (retval) {
2103 dqm_unlock(dqm);
2104 return retval;
2105 }
2106
2107 if (qpd->is_debug) {
2108 /*
2109 * error, currently we do not allow to destroy a queue
2110 * of a currently debugged process
2111 */
2112 retval = -EBUSY;
2113 goto failed_try_destroy_debugged_queue;
2114
2115 }
2116
2117 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2118 q->properties.type)];
2119
2120 deallocate_doorbell(qpd, q);
2121
2122 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
2123 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
2124 deallocate_sdma_queue(dqm, q);
2125 pdd->sdma_past_activity_counter += sdma_val;
2126 }
2127
2128 list_del(&q->list);
2129 qpd->queue_count--;
2130 if (q->properties.is_active) {
2131 decrement_queue_count(dqm, qpd, q);
2132 if (!dqm->dev->kfd->shared_resources.enable_mes) {
2133 retval = execute_queues_cpsch(dqm,
2134 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
2135 USE_DEFAULT_GRACE_PERIOD);
2136 if (retval == -ETIME)
2137 qpd->reset_wavefronts = true;
2138 } else {
2139 retval = remove_queue_mes(dqm, q, qpd);
2140 }
2141 }
2142
2143 /*
2144 * Unconditionally decrement this counter, regardless of the queue's
2145 * type
2146 */
2147 dqm->total_queue_count--;
2148 pr_debug("Total of %d queues are accountable so far\n",
2149 dqm->total_queue_count);
2150
2151 dqm_unlock(dqm);
2152
2153 /*
2154 * Do free_mqd and raise delete event after dqm_unlock(dqm) to avoid
2155 * circular locking
2156 */
2157 kfd_dbg_ev_raise(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE),
2158 qpd->pqm->process, q->device,
2159 -1, false, NULL, 0);
2160
2161 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
2162
2163 return retval;
2164
2165 failed_try_destroy_debugged_queue:
2166
2167 dqm_unlock(dqm);
2168 return retval;
2169 }
2170
2171 /*
2172 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
2173 * stay in user mode.
2174 */
2175 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
2176 /* APE1 limit is inclusive and 64K aligned. */
2177 #define APE1_LIMIT_ALIGNMENT 0xFFFF
2178
set_cache_memory_policy(struct device_queue_manager * dqm,struct qcm_process_device * qpd,enum cache_policy default_policy,enum cache_policy alternate_policy,void __user * alternate_aperture_base,uint64_t alternate_aperture_size)2179 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
2180 struct qcm_process_device *qpd,
2181 enum cache_policy default_policy,
2182 enum cache_policy alternate_policy,
2183 void __user *alternate_aperture_base,
2184 uint64_t alternate_aperture_size)
2185 {
2186 bool retval = true;
2187
2188 if (!dqm->asic_ops.set_cache_memory_policy)
2189 return retval;
2190
2191 dqm_lock(dqm);
2192
2193 if (alternate_aperture_size == 0) {
2194 /* base > limit disables APE1 */
2195 qpd->sh_mem_ape1_base = 1;
2196 qpd->sh_mem_ape1_limit = 0;
2197 } else {
2198 /*
2199 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
2200 * SH_MEM_APE1_BASE[31:0], 0x0000 }
2201 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
2202 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
2203 * Verify that the base and size parameters can be
2204 * represented in this format and convert them.
2205 * Additionally restrict APE1 to user-mode addresses.
2206 */
2207
2208 uint64_t base = (uintptr_t)alternate_aperture_base;
2209 uint64_t limit = base + alternate_aperture_size - 1;
2210
2211 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
2212 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
2213 retval = false;
2214 goto out;
2215 }
2216
2217 qpd->sh_mem_ape1_base = base >> 16;
2218 qpd->sh_mem_ape1_limit = limit >> 16;
2219 }
2220
2221 retval = dqm->asic_ops.set_cache_memory_policy(
2222 dqm,
2223 qpd,
2224 default_policy,
2225 alternate_policy,
2226 alternate_aperture_base,
2227 alternate_aperture_size);
2228
2229 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
2230 program_sh_mem_settings(dqm, qpd);
2231
2232 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
2233 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
2234 qpd->sh_mem_ape1_limit);
2235
2236 out:
2237 dqm_unlock(dqm);
2238 return retval;
2239 }
2240
process_termination_nocpsch(struct device_queue_manager * dqm,struct qcm_process_device * qpd)2241 static int process_termination_nocpsch(struct device_queue_manager *dqm,
2242 struct qcm_process_device *qpd)
2243 {
2244 struct queue *q;
2245 struct device_process_node *cur, *next_dpn;
2246 int retval = 0;
2247 bool found = false;
2248
2249 dqm_lock(dqm);
2250
2251 /* Clear all user mode queues */
2252 while (!list_empty(&qpd->queues_list)) {
2253 struct mqd_manager *mqd_mgr;
2254 int ret;
2255
2256 q = list_first_entry(&qpd->queues_list, struct queue, list);
2257 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2258 q->properties.type)];
2259 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
2260 if (ret)
2261 retval = ret;
2262 dqm_unlock(dqm);
2263 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
2264 dqm_lock(dqm);
2265 }
2266
2267 /* Unregister process */
2268 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
2269 if (qpd == cur->qpd) {
2270 list_del(&cur->list);
2271 kfree(cur);
2272 dqm->processes_count--;
2273 found = true;
2274 break;
2275 }
2276 }
2277
2278 dqm_unlock(dqm);
2279
2280 /* Outside the DQM lock because under the DQM lock we can't do
2281 * reclaim or take other locks that others hold while reclaiming.
2282 */
2283 if (found)
2284 kfd_dec_compute_active(dqm->dev);
2285
2286 return retval;
2287 }
2288
get_wave_state(struct device_queue_manager * dqm,struct queue * q,void __user * ctl_stack,u32 * ctl_stack_used_size,u32 * save_area_used_size)2289 static int get_wave_state(struct device_queue_manager *dqm,
2290 struct queue *q,
2291 void __user *ctl_stack,
2292 u32 *ctl_stack_used_size,
2293 u32 *save_area_used_size)
2294 {
2295 struct mqd_manager *mqd_mgr;
2296
2297 dqm_lock(dqm);
2298
2299 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
2300
2301 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
2302 q->properties.is_active || !q->device->kfd->cwsr_enabled ||
2303 !mqd_mgr->get_wave_state) {
2304 dqm_unlock(dqm);
2305 return -EINVAL;
2306 }
2307
2308 dqm_unlock(dqm);
2309
2310 /*
2311 * get_wave_state is outside the dqm lock to prevent circular locking
2312 * and the queue should be protected against destruction by the process
2313 * lock.
2314 */
2315 return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, &q->properties,
2316 ctl_stack, ctl_stack_used_size, save_area_used_size);
2317 }
2318
get_queue_checkpoint_info(struct device_queue_manager * dqm,const struct queue * q,u32 * mqd_size,u32 * ctl_stack_size)2319 static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
2320 const struct queue *q,
2321 u32 *mqd_size,
2322 u32 *ctl_stack_size)
2323 {
2324 struct mqd_manager *mqd_mgr;
2325 enum KFD_MQD_TYPE mqd_type =
2326 get_mqd_type_from_queue_type(q->properties.type);
2327
2328 dqm_lock(dqm);
2329 mqd_mgr = dqm->mqd_mgrs[mqd_type];
2330 *mqd_size = mqd_mgr->mqd_size;
2331 *ctl_stack_size = 0;
2332
2333 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
2334 mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size);
2335
2336 dqm_unlock(dqm);
2337 }
2338
checkpoint_mqd(struct device_queue_manager * dqm,const struct queue * q,void * mqd,void * ctl_stack)2339 static int checkpoint_mqd(struct device_queue_manager *dqm,
2340 const struct queue *q,
2341 void *mqd,
2342 void *ctl_stack)
2343 {
2344 struct mqd_manager *mqd_mgr;
2345 int r = 0;
2346 enum KFD_MQD_TYPE mqd_type =
2347 get_mqd_type_from_queue_type(q->properties.type);
2348
2349 dqm_lock(dqm);
2350
2351 if (q->properties.is_active || !q->device->kfd->cwsr_enabled) {
2352 r = -EINVAL;
2353 goto dqm_unlock;
2354 }
2355
2356 mqd_mgr = dqm->mqd_mgrs[mqd_type];
2357 if (!mqd_mgr->checkpoint_mqd) {
2358 r = -EOPNOTSUPP;
2359 goto dqm_unlock;
2360 }
2361
2362 mqd_mgr->checkpoint_mqd(mqd_mgr, q->mqd, mqd, ctl_stack);
2363
2364 dqm_unlock:
2365 dqm_unlock(dqm);
2366 return r;
2367 }
2368
process_termination_cpsch(struct device_queue_manager * dqm,struct qcm_process_device * qpd)2369 static int process_termination_cpsch(struct device_queue_manager *dqm,
2370 struct qcm_process_device *qpd)
2371 {
2372 int retval;
2373 struct queue *q;
2374 struct device *dev = dqm->dev->adev->dev;
2375 struct kernel_queue *kq, *kq_next;
2376 struct mqd_manager *mqd_mgr;
2377 struct device_process_node *cur, *next_dpn;
2378 enum kfd_unmap_queues_filter filter =
2379 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
2380 bool found = false;
2381
2382 retval = 0;
2383
2384 dqm_lock(dqm);
2385
2386 /* Clean all kernel queues */
2387 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
2388 list_del(&kq->list);
2389 decrement_queue_count(dqm, qpd, kq->queue);
2390 qpd->is_debug = false;
2391 dqm->total_queue_count--;
2392 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
2393 }
2394
2395 /* Clear all user mode queues */
2396 list_for_each_entry(q, &qpd->queues_list, list) {
2397 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
2398 deallocate_sdma_queue(dqm, q);
2399 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
2400 deallocate_sdma_queue(dqm, q);
2401
2402 if (q->properties.is_active) {
2403 decrement_queue_count(dqm, qpd, q);
2404
2405 if (dqm->dev->kfd->shared_resources.enable_mes) {
2406 retval = remove_queue_mes(dqm, q, qpd);
2407 if (retval)
2408 dev_err(dev, "Failed to remove queue %d\n",
2409 q->properties.queue_id);
2410 }
2411 }
2412
2413 dqm->total_queue_count--;
2414 }
2415
2416 /* Unregister process */
2417 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
2418 if (qpd == cur->qpd) {
2419 list_del(&cur->list);
2420 kfree(cur);
2421 dqm->processes_count--;
2422 found = true;
2423 break;
2424 }
2425 }
2426
2427 if (!dqm->dev->kfd->shared_resources.enable_mes)
2428 retval = execute_queues_cpsch(dqm, filter, 0, USE_DEFAULT_GRACE_PERIOD);
2429
2430 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
2431 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
2432 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
2433 qpd->reset_wavefronts = false;
2434 }
2435
2436 /* Lastly, free mqd resources.
2437 * Do free_mqd() after dqm_unlock to avoid circular locking.
2438 */
2439 while (!list_empty(&qpd->queues_list)) {
2440 q = list_first_entry(&qpd->queues_list, struct queue, list);
2441 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2442 q->properties.type)];
2443 list_del(&q->list);
2444 qpd->queue_count--;
2445 dqm_unlock(dqm);
2446 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
2447 dqm_lock(dqm);
2448 }
2449 dqm_unlock(dqm);
2450
2451 /* Outside the DQM lock because under the DQM lock we can't do
2452 * reclaim or take other locks that others hold while reclaiming.
2453 */
2454 if (found)
2455 kfd_dec_compute_active(dqm->dev);
2456
2457 return retval;
2458 }
2459
init_mqd_managers(struct device_queue_manager * dqm)2460 static int init_mqd_managers(struct device_queue_manager *dqm)
2461 {
2462 int i, j;
2463 struct device *dev = dqm->dev->adev->dev;
2464 struct mqd_manager *mqd_mgr;
2465
2466 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
2467 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
2468 if (!mqd_mgr) {
2469 dev_err(dev, "mqd manager [%d] initialization failed\n", i);
2470 goto out_free;
2471 }
2472 dqm->mqd_mgrs[i] = mqd_mgr;
2473 }
2474
2475 return 0;
2476
2477 out_free:
2478 for (j = 0; j < i; j++) {
2479 kfree(dqm->mqd_mgrs[j]);
2480 dqm->mqd_mgrs[j] = NULL;
2481 }
2482
2483 return -ENOMEM;
2484 }
2485
2486 /* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
allocate_hiq_sdma_mqd(struct device_queue_manager * dqm)2487 static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
2488 {
2489 int retval;
2490 struct kfd_node *dev = dqm->dev;
2491 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
2492 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
2493 get_num_all_sdma_engines(dqm) *
2494 dev->kfd->device_info.num_sdma_queues_per_engine +
2495 (dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size *
2496 NUM_XCC(dqm->dev->xcc_mask));
2497
2498 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size,
2499 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
2500 (void *)&(mem_obj->cpu_ptr), false);
2501
2502 return retval;
2503 }
2504
device_queue_manager_init(struct kfd_node * dev)2505 struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
2506 {
2507 struct device_queue_manager *dqm;
2508
2509 pr_debug("Loading device queue manager\n");
2510
2511 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
2512 if (!dqm)
2513 return NULL;
2514
2515 switch (dev->adev->asic_type) {
2516 /* HWS is not available on Hawaii. */
2517 case CHIP_HAWAII:
2518 /* HWS depends on CWSR for timely dequeue. CWSR is not
2519 * available on Tonga.
2520 *
2521 * FIXME: This argument also applies to Kaveri.
2522 */
2523 case CHIP_TONGA:
2524 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
2525 break;
2526 default:
2527 dqm->sched_policy = sched_policy;
2528 break;
2529 }
2530
2531 dqm->dev = dev;
2532 switch (dqm->sched_policy) {
2533 case KFD_SCHED_POLICY_HWS:
2534 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
2535 /* initialize dqm for cp scheduling */
2536 dqm->ops.create_queue = create_queue_cpsch;
2537 dqm->ops.initialize = initialize_cpsch;
2538 dqm->ops.start = start_cpsch;
2539 dqm->ops.stop = stop_cpsch;
2540 dqm->ops.pre_reset = pre_reset;
2541 dqm->ops.destroy_queue = destroy_queue_cpsch;
2542 dqm->ops.update_queue = update_queue;
2543 dqm->ops.register_process = register_process;
2544 dqm->ops.unregister_process = unregister_process;
2545 dqm->ops.uninitialize = uninitialize;
2546 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
2547 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
2548 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
2549 dqm->ops.process_termination = process_termination_cpsch;
2550 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
2551 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
2552 dqm->ops.get_wave_state = get_wave_state;
2553 dqm->ops.reset_queues = reset_queues_cpsch;
2554 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
2555 dqm->ops.checkpoint_mqd = checkpoint_mqd;
2556 break;
2557 case KFD_SCHED_POLICY_NO_HWS:
2558 /* initialize dqm for no cp scheduling */
2559 dqm->ops.start = start_nocpsch;
2560 dqm->ops.stop = stop_nocpsch;
2561 dqm->ops.pre_reset = pre_reset;
2562 dqm->ops.create_queue = create_queue_nocpsch;
2563 dqm->ops.destroy_queue = destroy_queue_nocpsch;
2564 dqm->ops.update_queue = update_queue;
2565 dqm->ops.register_process = register_process;
2566 dqm->ops.unregister_process = unregister_process;
2567 dqm->ops.initialize = initialize_nocpsch;
2568 dqm->ops.uninitialize = uninitialize;
2569 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
2570 dqm->ops.process_termination = process_termination_nocpsch;
2571 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
2572 dqm->ops.restore_process_queues =
2573 restore_process_queues_nocpsch;
2574 dqm->ops.get_wave_state = get_wave_state;
2575 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
2576 dqm->ops.checkpoint_mqd = checkpoint_mqd;
2577 break;
2578 default:
2579 dev_err(dev->adev->dev, "Invalid scheduling policy %d\n", dqm->sched_policy);
2580 goto out_free;
2581 }
2582
2583 switch (dev->adev->asic_type) {
2584 case CHIP_KAVERI:
2585 case CHIP_HAWAII:
2586 device_queue_manager_init_cik(&dqm->asic_ops);
2587 break;
2588
2589 case CHIP_CARRIZO:
2590 case CHIP_TONGA:
2591 case CHIP_FIJI:
2592 case CHIP_POLARIS10:
2593 case CHIP_POLARIS11:
2594 case CHIP_POLARIS12:
2595 case CHIP_VEGAM:
2596 device_queue_manager_init_vi(&dqm->asic_ops);
2597 break;
2598
2599 default:
2600 if (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0))
2601 device_queue_manager_init_v11(&dqm->asic_ops);
2602 else if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
2603 device_queue_manager_init_v10(&dqm->asic_ops);
2604 else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1))
2605 device_queue_manager_init_v9(&dqm->asic_ops);
2606 else {
2607 WARN(1, "Unexpected ASIC family %u",
2608 dev->adev->asic_type);
2609 goto out_free;
2610 }
2611 }
2612
2613 if (init_mqd_managers(dqm))
2614 goto out_free;
2615
2616 if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) {
2617 dev_err(dev->adev->dev, "Failed to allocate hiq sdma mqd trunk buffer\n");
2618 goto out_free;
2619 }
2620
2621 if (!dqm->ops.initialize(dqm)) {
2622 init_waitqueue_head(&dqm->destroy_wait);
2623 return dqm;
2624 }
2625
2626 out_free:
2627 kfree(dqm);
2628 return NULL;
2629 }
2630
deallocate_hiq_sdma_mqd(struct kfd_node * dev,struct kfd_mem_obj * mqd)2631 static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
2632 struct kfd_mem_obj *mqd)
2633 {
2634 WARN(!mqd, "No hiq sdma mqd trunk to free");
2635
2636 amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem);
2637 }
2638
device_queue_manager_uninit(struct device_queue_manager * dqm)2639 void device_queue_manager_uninit(struct device_queue_manager *dqm)
2640 {
2641 dqm->ops.stop(dqm);
2642 dqm->ops.uninitialize(dqm);
2643 if (!dqm->dev->kfd->shared_resources.enable_mes)
2644 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
2645 kfree(dqm);
2646 }
2647
kfd_dqm_evict_pasid(struct device_queue_manager * dqm,u32 pasid)2648 int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
2649 {
2650 struct kfd_process_device *pdd;
2651 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
2652 int ret = 0;
2653
2654 if (!p)
2655 return -EINVAL;
2656 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
2657 pdd = kfd_get_process_device_data(dqm->dev, p);
2658 if (pdd)
2659 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
2660 kfd_unref_process(p);
2661
2662 return ret;
2663 }
2664
kfd_process_hw_exception(struct work_struct * work)2665 static void kfd_process_hw_exception(struct work_struct *work)
2666 {
2667 struct device_queue_manager *dqm = container_of(work,
2668 struct device_queue_manager, hw_exception_work);
2669 amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
2670 }
2671
reserve_debug_trap_vmid(struct device_queue_manager * dqm,struct qcm_process_device * qpd)2672 int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
2673 struct qcm_process_device *qpd)
2674 {
2675 int r;
2676 struct device *dev = dqm->dev->adev->dev;
2677 int updated_vmid_mask;
2678
2679 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
2680 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
2681 return -EINVAL;
2682 }
2683
2684 dqm_lock(dqm);
2685
2686 if (dqm->trap_debug_vmid != 0) {
2687 dev_err(dev, "Trap debug id already reserved\n");
2688 r = -EBUSY;
2689 goto out_unlock;
2690 }
2691
2692 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
2693 USE_DEFAULT_GRACE_PERIOD, false);
2694 if (r)
2695 goto out_unlock;
2696
2697 updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap;
2698 updated_vmid_mask &= ~(1 << dqm->dev->vm_info.last_vmid_kfd);
2699
2700 dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
2701 dqm->trap_debug_vmid = dqm->dev->vm_info.last_vmid_kfd;
2702 r = set_sched_resources(dqm);
2703 if (r)
2704 goto out_unlock;
2705
2706 r = map_queues_cpsch(dqm);
2707 if (r)
2708 goto out_unlock;
2709
2710 pr_debug("Reserved VMID for trap debug: %i\n", dqm->trap_debug_vmid);
2711
2712 out_unlock:
2713 dqm_unlock(dqm);
2714 return r;
2715 }
2716
2717 /*
2718 * Releases vmid for the trap debugger
2719 */
release_debug_trap_vmid(struct device_queue_manager * dqm,struct qcm_process_device * qpd)2720 int release_debug_trap_vmid(struct device_queue_manager *dqm,
2721 struct qcm_process_device *qpd)
2722 {
2723 struct device *dev = dqm->dev->adev->dev;
2724 int r;
2725 int updated_vmid_mask;
2726 uint32_t trap_debug_vmid;
2727
2728 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
2729 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
2730 return -EINVAL;
2731 }
2732
2733 dqm_lock(dqm);
2734 trap_debug_vmid = dqm->trap_debug_vmid;
2735 if (dqm->trap_debug_vmid == 0) {
2736 dev_err(dev, "Trap debug id is not reserved\n");
2737 r = -EINVAL;
2738 goto out_unlock;
2739 }
2740
2741 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
2742 USE_DEFAULT_GRACE_PERIOD, false);
2743 if (r)
2744 goto out_unlock;
2745
2746 updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap;
2747 updated_vmid_mask |= (1 << dqm->dev->vm_info.last_vmid_kfd);
2748
2749 dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
2750 dqm->trap_debug_vmid = 0;
2751 r = set_sched_resources(dqm);
2752 if (r)
2753 goto out_unlock;
2754
2755 r = map_queues_cpsch(dqm);
2756 if (r)
2757 goto out_unlock;
2758
2759 pr_debug("Released VMID for trap debug: %i\n", trap_debug_vmid);
2760
2761 out_unlock:
2762 dqm_unlock(dqm);
2763 return r;
2764 }
2765
2766 #define QUEUE_NOT_FOUND -1
2767 /* invalidate queue operation in array */
q_array_invalidate(uint32_t num_queues,uint32_t * queue_ids)2768 static void q_array_invalidate(uint32_t num_queues, uint32_t *queue_ids)
2769 {
2770 int i;
2771
2772 for (i = 0; i < num_queues; i++)
2773 queue_ids[i] |= KFD_DBG_QUEUE_INVALID_MASK;
2774 }
2775
2776 /* find queue index in array */
q_array_get_index(unsigned int queue_id,uint32_t num_queues,uint32_t * queue_ids)2777 static int q_array_get_index(unsigned int queue_id,
2778 uint32_t num_queues,
2779 uint32_t *queue_ids)
2780 {
2781 int i;
2782
2783 for (i = 0; i < num_queues; i++)
2784 if (queue_id == (queue_ids[i] & ~KFD_DBG_QUEUE_INVALID_MASK))
2785 return i;
2786
2787 return QUEUE_NOT_FOUND;
2788 }
2789
2790 struct copy_context_work_handler_workarea {
2791 struct work_struct copy_context_work;
2792 struct kfd_process *p;
2793 };
2794
copy_context_work_handler(struct work_struct * work)2795 static void copy_context_work_handler (struct work_struct *work)
2796 {
2797 struct copy_context_work_handler_workarea *workarea;
2798 struct mqd_manager *mqd_mgr;
2799 struct queue *q;
2800 struct mm_struct *mm;
2801 struct kfd_process *p;
2802 uint32_t tmp_ctl_stack_used_size, tmp_save_area_used_size;
2803 int i;
2804
2805 workarea = container_of(work,
2806 struct copy_context_work_handler_workarea,
2807 copy_context_work);
2808
2809 p = workarea->p;
2810 mm = get_task_mm(p->lead_thread);
2811
2812 if (!mm)
2813 return;
2814
2815 kthread_use_mm(mm);
2816 for (i = 0; i < p->n_pdds; i++) {
2817 struct kfd_process_device *pdd = p->pdds[i];
2818 struct device_queue_manager *dqm = pdd->dev->dqm;
2819 struct qcm_process_device *qpd = &pdd->qpd;
2820
2821 list_for_each_entry(q, &qpd->queues_list, list) {
2822 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
2823
2824 /* We ignore the return value from get_wave_state
2825 * because
2826 * i) right now, it always returns 0, and
2827 * ii) if we hit an error, we would continue to the
2828 * next queue anyway.
2829 */
2830 mqd_mgr->get_wave_state(mqd_mgr,
2831 q->mqd,
2832 &q->properties,
2833 (void __user *) q->properties.ctx_save_restore_area_address,
2834 &tmp_ctl_stack_used_size,
2835 &tmp_save_area_used_size);
2836 }
2837 }
2838 kthread_unuse_mm(mm);
2839 mmput(mm);
2840 }
2841
get_queue_ids(uint32_t num_queues,uint32_t * usr_queue_id_array)2842 static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array)
2843 {
2844 size_t array_size = num_queues * sizeof(uint32_t);
2845
2846 if (!usr_queue_id_array)
2847 return NULL;
2848
2849 return memdup_user(usr_queue_id_array, array_size);
2850 }
2851
resume_queues(struct kfd_process * p,uint32_t num_queues,uint32_t * usr_queue_id_array)2852 int resume_queues(struct kfd_process *p,
2853 uint32_t num_queues,
2854 uint32_t *usr_queue_id_array)
2855 {
2856 uint32_t *queue_ids = NULL;
2857 int total_resumed = 0;
2858 int i;
2859
2860 if (usr_queue_id_array) {
2861 queue_ids = get_queue_ids(num_queues, usr_queue_id_array);
2862
2863 if (IS_ERR(queue_ids))
2864 return PTR_ERR(queue_ids);
2865
2866 /* mask all queues as invalid. unmask per successful request */
2867 q_array_invalidate(num_queues, queue_ids);
2868 }
2869
2870 for (i = 0; i < p->n_pdds; i++) {
2871 struct kfd_process_device *pdd = p->pdds[i];
2872 struct device_queue_manager *dqm = pdd->dev->dqm;
2873 struct device *dev = dqm->dev->adev->dev;
2874 struct qcm_process_device *qpd = &pdd->qpd;
2875 struct queue *q;
2876 int r, per_device_resumed = 0;
2877
2878 dqm_lock(dqm);
2879
2880 /* unmask queues that resume or already resumed as valid */
2881 list_for_each_entry(q, &qpd->queues_list, list) {
2882 int q_idx = QUEUE_NOT_FOUND;
2883
2884 if (queue_ids)
2885 q_idx = q_array_get_index(
2886 q->properties.queue_id,
2887 num_queues,
2888 queue_ids);
2889
2890 if (!queue_ids || q_idx != QUEUE_NOT_FOUND) {
2891 int err = resume_single_queue(dqm, &pdd->qpd, q);
2892
2893 if (queue_ids) {
2894 if (!err) {
2895 queue_ids[q_idx] &=
2896 ~KFD_DBG_QUEUE_INVALID_MASK;
2897 } else {
2898 queue_ids[q_idx] |=
2899 KFD_DBG_QUEUE_ERROR_MASK;
2900 break;
2901 }
2902 }
2903
2904 if (dqm->dev->kfd->shared_resources.enable_mes) {
2905 wake_up_all(&dqm->destroy_wait);
2906 if (!err)
2907 total_resumed++;
2908 } else {
2909 per_device_resumed++;
2910 }
2911 }
2912 }
2913
2914 if (!per_device_resumed) {
2915 dqm_unlock(dqm);
2916 continue;
2917 }
2918
2919 r = execute_queues_cpsch(dqm,
2920 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
2921 0,
2922 USE_DEFAULT_GRACE_PERIOD);
2923 if (r) {
2924 dev_err(dev, "Failed to resume process queues\n");
2925 if (queue_ids) {
2926 list_for_each_entry(q, &qpd->queues_list, list) {
2927 int q_idx = q_array_get_index(
2928 q->properties.queue_id,
2929 num_queues,
2930 queue_ids);
2931
2932 /* mask queue as error on resume fail */
2933 if (q_idx != QUEUE_NOT_FOUND)
2934 queue_ids[q_idx] |=
2935 KFD_DBG_QUEUE_ERROR_MASK;
2936 }
2937 }
2938 } else {
2939 wake_up_all(&dqm->destroy_wait);
2940 total_resumed += per_device_resumed;
2941 }
2942
2943 dqm_unlock(dqm);
2944 }
2945
2946 if (queue_ids) {
2947 if (copy_to_user((void __user *)usr_queue_id_array, queue_ids,
2948 num_queues * sizeof(uint32_t)))
2949 pr_err("copy_to_user failed on queue resume\n");
2950
2951 kfree(queue_ids);
2952 }
2953
2954 return total_resumed;
2955 }
2956
suspend_queues(struct kfd_process * p,uint32_t num_queues,uint32_t grace_period,uint64_t exception_clear_mask,uint32_t * usr_queue_id_array)2957 int suspend_queues(struct kfd_process *p,
2958 uint32_t num_queues,
2959 uint32_t grace_period,
2960 uint64_t exception_clear_mask,
2961 uint32_t *usr_queue_id_array)
2962 {
2963 uint32_t *queue_ids = get_queue_ids(num_queues, usr_queue_id_array);
2964 int total_suspended = 0;
2965 int i;
2966
2967 if (IS_ERR(queue_ids))
2968 return PTR_ERR(queue_ids);
2969
2970 /* mask all queues as invalid. umask on successful request */
2971 q_array_invalidate(num_queues, queue_ids);
2972
2973 for (i = 0; i < p->n_pdds; i++) {
2974 struct kfd_process_device *pdd = p->pdds[i];
2975 struct device_queue_manager *dqm = pdd->dev->dqm;
2976 struct device *dev = dqm->dev->adev->dev;
2977 struct qcm_process_device *qpd = &pdd->qpd;
2978 struct queue *q;
2979 int r, per_device_suspended = 0;
2980
2981 mutex_lock(&p->event_mutex);
2982 dqm_lock(dqm);
2983
2984 /* unmask queues that suspend or already suspended */
2985 list_for_each_entry(q, &qpd->queues_list, list) {
2986 int q_idx = q_array_get_index(q->properties.queue_id,
2987 num_queues,
2988 queue_ids);
2989
2990 if (q_idx != QUEUE_NOT_FOUND) {
2991 int err = suspend_single_queue(dqm, pdd, q);
2992 bool is_mes = dqm->dev->kfd->shared_resources.enable_mes;
2993
2994 if (!err) {
2995 queue_ids[q_idx] &= ~KFD_DBG_QUEUE_INVALID_MASK;
2996 if (exception_clear_mask && is_mes)
2997 q->properties.exception_status &=
2998 ~exception_clear_mask;
2999
3000 if (is_mes)
3001 total_suspended++;
3002 else
3003 per_device_suspended++;
3004 } else if (err != -EBUSY) {
3005 r = err;
3006 queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK;
3007 break;
3008 }
3009 }
3010 }
3011
3012 if (!per_device_suspended) {
3013 dqm_unlock(dqm);
3014 mutex_unlock(&p->event_mutex);
3015 if (total_suspended)
3016 amdgpu_amdkfd_debug_mem_fence(dqm->dev->adev);
3017 continue;
3018 }
3019
3020 r = execute_queues_cpsch(dqm,
3021 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
3022 grace_period);
3023
3024 if (r)
3025 dev_err(dev, "Failed to suspend process queues.\n");
3026 else
3027 total_suspended += per_device_suspended;
3028
3029 list_for_each_entry(q, &qpd->queues_list, list) {
3030 int q_idx = q_array_get_index(q->properties.queue_id,
3031 num_queues, queue_ids);
3032
3033 if (q_idx == QUEUE_NOT_FOUND)
3034 continue;
3035
3036 /* mask queue as error on suspend fail */
3037 if (r)
3038 queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK;
3039 else if (exception_clear_mask)
3040 q->properties.exception_status &=
3041 ~exception_clear_mask;
3042 }
3043
3044 dqm_unlock(dqm);
3045 mutex_unlock(&p->event_mutex);
3046 amdgpu_device_flush_hdp(dqm->dev->adev, NULL);
3047 }
3048
3049 if (total_suspended) {
3050 struct copy_context_work_handler_workarea copy_context_worker;
3051
3052 INIT_WORK_ONSTACK(
3053 ©_context_worker.copy_context_work,
3054 copy_context_work_handler);
3055
3056 copy_context_worker.p = p;
3057
3058 schedule_work(©_context_worker.copy_context_work);
3059
3060
3061 flush_work(©_context_worker.copy_context_work);
3062 destroy_work_on_stack(©_context_worker.copy_context_work);
3063 }
3064
3065 if (copy_to_user((void __user *)usr_queue_id_array, queue_ids,
3066 num_queues * sizeof(uint32_t)))
3067 pr_err("copy_to_user failed on queue suspend\n");
3068
3069 kfree(queue_ids);
3070
3071 return total_suspended;
3072 }
3073
set_queue_type_for_user(struct queue_properties * q_props)3074 static uint32_t set_queue_type_for_user(struct queue_properties *q_props)
3075 {
3076 switch (q_props->type) {
3077 case KFD_QUEUE_TYPE_COMPUTE:
3078 return q_props->format == KFD_QUEUE_FORMAT_PM4
3079 ? KFD_IOC_QUEUE_TYPE_COMPUTE
3080 : KFD_IOC_QUEUE_TYPE_COMPUTE_AQL;
3081 case KFD_QUEUE_TYPE_SDMA:
3082 return KFD_IOC_QUEUE_TYPE_SDMA;
3083 case KFD_QUEUE_TYPE_SDMA_XGMI:
3084 return KFD_IOC_QUEUE_TYPE_SDMA_XGMI;
3085 default:
3086 WARN_ONCE(true, "queue type not recognized!");
3087 return 0xffffffff;
3088 };
3089 }
3090
set_queue_snapshot_entry(struct queue * q,uint64_t exception_clear_mask,struct kfd_queue_snapshot_entry * qss_entry)3091 void set_queue_snapshot_entry(struct queue *q,
3092 uint64_t exception_clear_mask,
3093 struct kfd_queue_snapshot_entry *qss_entry)
3094 {
3095 qss_entry->ring_base_address = q->properties.queue_address;
3096 qss_entry->write_pointer_address = (uint64_t)q->properties.write_ptr;
3097 qss_entry->read_pointer_address = (uint64_t)q->properties.read_ptr;
3098 qss_entry->ctx_save_restore_address =
3099 q->properties.ctx_save_restore_area_address;
3100 qss_entry->ctx_save_restore_area_size =
3101 q->properties.ctx_save_restore_area_size;
3102 qss_entry->exception_status = q->properties.exception_status;
3103 qss_entry->queue_id = q->properties.queue_id;
3104 qss_entry->gpu_id = q->device->id;
3105 qss_entry->ring_size = (uint32_t)q->properties.queue_size;
3106 qss_entry->queue_type = set_queue_type_for_user(&q->properties);
3107 q->properties.exception_status &= ~exception_clear_mask;
3108 }
3109
debug_lock_and_unmap(struct device_queue_manager * dqm)3110 int debug_lock_and_unmap(struct device_queue_manager *dqm)
3111 {
3112 struct device *dev = dqm->dev->adev->dev;
3113 int r;
3114
3115 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
3116 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
3117 return -EINVAL;
3118 }
3119
3120 if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
3121 return 0;
3122
3123 dqm_lock(dqm);
3124
3125 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false);
3126 if (r)
3127 dqm_unlock(dqm);
3128
3129 return r;
3130 }
3131
debug_map_and_unlock(struct device_queue_manager * dqm)3132 int debug_map_and_unlock(struct device_queue_manager *dqm)
3133 {
3134 struct device *dev = dqm->dev->adev->dev;
3135 int r;
3136
3137 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
3138 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
3139 return -EINVAL;
3140 }
3141
3142 if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
3143 return 0;
3144
3145 r = map_queues_cpsch(dqm);
3146
3147 dqm_unlock(dqm);
3148
3149 return r;
3150 }
3151
debug_refresh_runlist(struct device_queue_manager * dqm)3152 int debug_refresh_runlist(struct device_queue_manager *dqm)
3153 {
3154 int r = debug_lock_and_unmap(dqm);
3155
3156 if (r)
3157 return r;
3158
3159 return debug_map_and_unlock(dqm);
3160 }
3161
3162 #if defined(CONFIG_DEBUG_FS)
3163
seq_reg_dump(struct seq_file * m,uint32_t (* dump)[2],uint32_t n_regs)3164 static void seq_reg_dump(struct seq_file *m,
3165 uint32_t (*dump)[2], uint32_t n_regs)
3166 {
3167 uint32_t i, count;
3168
3169 for (i = 0, count = 0; i < n_regs; i++) {
3170 if (count == 0 ||
3171 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
3172 seq_printf(m, "%s %08x: %08x",
3173 i ? "\n" : "",
3174 dump[i][0], dump[i][1]);
3175 count = 7;
3176 } else {
3177 seq_printf(m, " %08x", dump[i][1]);
3178 count--;
3179 }
3180 }
3181
3182 seq_puts(m, "\n");
3183 }
3184
dqm_debugfs_hqds(struct seq_file * m,void * data)3185 int dqm_debugfs_hqds(struct seq_file *m, void *data)
3186 {
3187 struct device_queue_manager *dqm = data;
3188 uint32_t xcc_mask = dqm->dev->xcc_mask;
3189 uint32_t (*dump)[2], n_regs;
3190 int pipe, queue;
3191 int r = 0, xcc_id;
3192 uint32_t sdma_engine_start;
3193
3194 if (!dqm->sched_running) {
3195 seq_puts(m, " Device is stopped\n");
3196 return 0;
3197 }
3198
3199 for_each_inst(xcc_id, xcc_mask) {
3200 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
3201 KFD_CIK_HIQ_PIPE,
3202 KFD_CIK_HIQ_QUEUE, &dump,
3203 &n_regs, xcc_id);
3204 if (!r) {
3205 seq_printf(
3206 m,
3207 " Inst %d, HIQ on MEC %d Pipe %d Queue %d\n",
3208 xcc_id,
3209 KFD_CIK_HIQ_PIPE / get_pipes_per_mec(dqm) + 1,
3210 KFD_CIK_HIQ_PIPE % get_pipes_per_mec(dqm),
3211 KFD_CIK_HIQ_QUEUE);
3212 seq_reg_dump(m, dump, n_regs);
3213
3214 kfree(dump);
3215 }
3216
3217 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
3218 int pipe_offset = pipe * get_queues_per_pipe(dqm);
3219
3220 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
3221 if (!test_bit(pipe_offset + queue,
3222 dqm->dev->kfd->shared_resources.cp_queue_bitmap))
3223 continue;
3224
3225 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
3226 pipe, queue,
3227 &dump, &n_regs,
3228 xcc_id);
3229 if (r)
3230 break;
3231
3232 seq_printf(m,
3233 " Inst %d, CP Pipe %d, Queue %d\n",
3234 xcc_id, pipe, queue);
3235 seq_reg_dump(m, dump, n_regs);
3236
3237 kfree(dump);
3238 }
3239 }
3240 }
3241
3242 sdma_engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm);
3243 for (pipe = sdma_engine_start;
3244 pipe < (sdma_engine_start + get_num_all_sdma_engines(dqm));
3245 pipe++) {
3246 for (queue = 0;
3247 queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
3248 queue++) {
3249 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
3250 dqm->dev->adev, pipe, queue, &dump, &n_regs);
3251 if (r)
3252 break;
3253
3254 seq_printf(m, " SDMA Engine %d, RLC %d\n",
3255 pipe, queue);
3256 seq_reg_dump(m, dump, n_regs);
3257
3258 kfree(dump);
3259 }
3260 }
3261
3262 return r;
3263 }
3264
dqm_debugfs_hang_hws(struct device_queue_manager * dqm)3265 int dqm_debugfs_hang_hws(struct device_queue_manager *dqm)
3266 {
3267 int r = 0;
3268
3269 dqm_lock(dqm);
3270 r = pm_debugfs_hang_hws(&dqm->packet_mgr);
3271 if (r) {
3272 dqm_unlock(dqm);
3273 return r;
3274 }
3275 dqm->active_runlist = true;
3276 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
3277 0, USE_DEFAULT_GRACE_PERIOD);
3278 dqm_unlock(dqm);
3279
3280 return r;
3281 }
3282
3283 #endif
3284