/linux/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_device_queue_manager.c | 631 dqm->asic_ops.init_sdma_vm(dqm, q, qpd); in create_queue_nocpsch() 1510 r = pm_init(&dqm->packet_mgr, dqm); in start_nocpsch() 1786 retval = pm_init(&dqm->packet_mgr, dqm); in start_cpsch() 1827 dqm->dev->adev, dqm->wait_times, in start_cpsch() 1849 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); in start_cpsch() 1877 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); in stop_cpsch() 1971 dqm->asic_ops.init_sdma_vm(dqm, q, qpd); in create_queue_cpsch() 2796 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL); in device_queue_manager_init() 2908 if (!dqm->ops.initialize(dqm)) { in device_queue_manager_init() 2928 dqm->ops.stop(dqm); in device_queue_manager_uninit() [all …]
|
H A D | kfd_device_queue_manager.h | 138 int (*create_queue)(struct device_queue_manager *dqm, 158 int (*initialize)(struct device_queue_manager *dqm); 159 int (*start)(struct device_queue_manager *dqm); 160 int (*stop)(struct device_queue_manager *dqm); 162 int (*halt)(struct device_queue_manager *dqm); 163 int (*unhalt)(struct device_queue_manager *dqm); 206 int (*update_qpd)(struct device_queue_manager *dqm, 348 mutex_lock(&dqm->lock_hidden); in dqm_lock() 349 dqm->saved_flags = memalloc_noreclaim_save(); in dqm_lock() 353 memalloc_noreclaim_restore(dqm->saved_flags); in dqm_unlock() [all …]
|
H A D | kfd_packet_manager.c | 48 struct kfd_node *node = pm->dqm->dev; in pm_calc_rlib_size() 94 struct kfd_node *node = pm->dqm->dev; in pm_allocate_runlist_ib() 130 struct kfd_node *node = pm->dqm->dev; in pm_create_runlist_ib() 150 pm->dqm->processes_count, pm->dqm->active_queue_count); in pm_create_runlist_ib() 235 switch (dqm->dev->adev->asic_type) { in pm_init() 257 dqm->dev->adev->asic_type); in pm_init() 262 pm->dqm = dqm; in pm_init() 284 struct kfd_node *node = pm->dqm->dev; in pm_send_set_resources() 357 struct kfd_node *node = pm->dqm->dev; in pm_send_query_status() 388 struct kfd_node *node = pm->dqm->dev; in pm_update_grace_period() [all …]
|
H A D | kfd_device_queue_manager_v9.c | 29 static int update_qpd_v9(struct device_queue_manager *dqm, 31 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, 51 static int update_qpd_v9(struct device_queue_manager *dqm, in update_qpd_v9() argument 63 if (dqm->dev->kfd->noretry) in update_qpd_v9() 66 if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) || in update_qpd_v9() 67 KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4)) in update_qpd_v9() 75 if (KFD_SUPPORT_XNACK_PER_PROCESS(dqm->dev)) { in update_qpd_v9() 90 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v9() argument
|
H A D | kfd_process_queue_manager.c | 90 dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd); in kfd_process_dequeue_from_device() 159 return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, in pqm_set_gws() 359 dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); in pqm_create_queue() 420 retval = dev->dqm->ops.create_kernel_queue(dev->dqm, in pqm_create_queue() 472 dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd); in pqm_create_queue() 484 dqm = NULL; in pqm_destroy_queue() 510 dqm = pqn->kq->dev->dqm; in pqm_destroy_queue() 511 dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd); in pqm_destroy_queue() 520 dqm = pqn->q->device->dqm; in pqm_destroy_queue() 521 retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); in pqm_destroy_queue() [all …]
|
H A D | kfd_device_queue_manager_vi.c | 30 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, 36 static int update_qpd_vi(struct device_queue_manager *dqm, 38 static void init_sdma_vm(struct device_queue_manager *dqm, 79 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, in set_cache_memory_policy_vi() argument 106 static int update_qpd_vi(struct device_queue_manager *dqm, in update_qpd_vi() argument 140 static void init_sdma_vm(struct device_queue_manager *dqm, in init_sdma_vm() argument
|
H A D | kfd_mqd_manager.c | 57 mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem; in allocate_hiq_mqd() 58 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr; in allocate_hiq_mqd() 59 mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr; in allocate_hiq_mqd() 77 dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; in allocate_sdma_mqd() 79 offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * in allocate_sdma_mqd() 84 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; in allocate_sdma_mqd() 86 dev->dqm->hiq_sdma_mqd.cpu_ptr + offset); in allocate_sdma_mqd() 273 return dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; in kfd_hiq_mqd_stride() 284 dev->dqm->hiq_sdma_mqd.gtt_mem : NULL; in kfd_get_hiq_xcc_mqd() 285 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; in kfd_get_hiq_xcc_mqd() [all …]
|
H A D | kfd_device_queue_manager_cik.c | 30 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, 36 static int update_qpd_cik(struct device_queue_manager *dqm, 38 static void init_sdma_vm(struct device_queue_manager *dqm, 78 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, in set_cache_memory_policy_cik() argument 104 static int update_qpd_cik(struct device_queue_manager *dqm, in update_qpd_cik() argument 134 static void init_sdma_vm(struct device_queue_manager *dqm, in init_sdma_vm() argument
|
H A D | kfd_device_queue_manager_v11.c | 29 static int update_qpd_v11(struct device_queue_manager *dqm, 31 static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q, 51 static int update_qpd_v11(struct device_queue_manager *dqm, in update_qpd_v11() argument 76 static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v11() argument
|
H A D | kfd_device_queue_manager_v10.c | 30 static int update_qpd_v10(struct device_queue_manager *dqm, 32 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, 52 static int update_qpd_v10(struct device_queue_manager *dqm, in update_qpd_v10() argument 76 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v10() argument
|
H A D | kfd_device_queue_manager_v12.c | 29 static int update_qpd_v12(struct device_queue_manager *dqm, 31 static void init_sdma_vm_v12(struct device_queue_manager *dqm, struct queue *q, 51 static int update_qpd_v12(struct device_queue_manager *dqm, in update_qpd_v12() argument 76 static void init_sdma_vm_v12(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v12() argument
|
H A D | kfd_packet_manager_v9.c | 37 struct kfd_node *kfd = pm->dqm->dev; in pm_map_process_v9() 60 packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid; in pm_map_process_v9() 94 struct kfd_dev *kfd = pm->dqm->dev->kfd; in pm_map_process_aldebaran() 95 struct kfd_node *knode = pm->dqm->dev; in pm_map_process_aldebaran() 153 struct kfd_node *kfd = pm->dqm->dev; in pm_runlist_v9() 169 1 : min(pm->dqm->processes_count, in pm_runlist_v9() 308 pm->dqm->dev->kfd2kgd->build_grace_period_packet_info( in pm_set_grace_period_v9() 309 pm->dqm->dev->adev, in pm_set_grace_period_v9() 310 pm->dqm->wait_times, in pm_set_grace_period_v9() 316 reg_data = pm->dqm->wait_times; in pm_set_grace_period_v9() [all …]
|
H A D | kfd_debug.c | 241 kfd_dqm_evict_pasid(dev->dqm, p->pasid); in kfd_set_dbg_ev_from_interrupt() 316 err = q->device->dqm->ops.update_queue(q->device->dqm, q, &minfo); in kfd_dbg_set_queue_workaround() 422 r = debug_lock_and_unmap(pdd->dev->dqm); in kfd_dbg_trap_clear_dev_address_watch() 434 r = debug_map_and_unlock(pdd->dev->dqm); in kfd_dbg_trap_clear_dev_address_watch() 456 r = debug_lock_and_unmap(pdd->dev->dqm); in kfd_dbg_trap_set_dev_address_watch() 476 r = debug_map_and_unlock(pdd->dev->dqm); in kfd_dbg_trap_set_dev_address_watch() 528 r = debug_refresh_runlist(pdd->dev->dqm); in kfd_dbg_trap_set_flags() 551 debug_refresh_runlist(pdd->dev->dqm); in kfd_dbg_trap_set_flags() 613 debug_refresh_runlist(pdd->dev->dqm); in kfd_dbg_trap_deactivate() 730 r = debug_refresh_runlist(pdd->dev->dqm); in kfd_dbg_trap_activate() [all …]
|
H A D | kfd_device.c | 593 node->dqm = device_queue_manager_init(node); in kfd_init_node() 594 if (!node->dqm) { in kfd_init_node() 620 device_queue_manager_uninit(node->dqm); in kfd_init_node() 639 device_queue_manager_uninit(knode->dqm); in kfd_cleanup_nodes() 900 node->dqm->sched_policy); in kgd2kfd_device_init() 1016 node->dqm->ops.stop(node->dqm); in kgd2kfd_suspend() 1049 err = node->dqm->ops.start(node->dqm); in kfd_resume() 1464 ret = node->dqm->ops.unhalt(node->dqm); in kgd2kfd_start_sched() 1485 return node->dqm->ops.halt(node->dqm); in kgd2kfd_stop_sched() 1495 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { in kfd_debugfs_hang_hws() [all …]
|
H A D | kfd_process.c | 118 dqm = pdd->dev->dqm; in kfd_sdma_activity_worker() 120 if (!dqm || !qpd) in kfd_sdma_activity_worker() 149 dqm_lock(dqm); in kfd_sdma_activity_worker() 158 dqm_unlock(dqm); in kfd_sdma_activity_worker() 175 dqm_unlock(dqm); in kfd_sdma_activity_worker() 179 dqm_unlock(dqm); in kfd_sdma_activity_worker() 209 dqm_lock(dqm); in kfd_sdma_activity_worker() 231 dqm_unlock(dqm); in kfd_sdma_activity_worker() 1620 pdd->qpd.dqm = dev->dqm; in kfd_create_process_device_data() 1872 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, in kfd_process_evict_queues() [all …]
|
H A D | kfd_priv.h | 288 struct device_queue_manager *dqm; member 652 struct device_queue_manager *dqm; member 1330 void device_queue_manager_uninit(struct device_queue_manager *dqm); 1334 int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid); 1379 int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm, 1393 struct device_queue_manager *dqm; member 1438 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); 1562 int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
|
H A D | kfd_int_process_v11.c | 213 if (dev->dqm->ops.reset_queues) in event_interrupt_poison_consumption_v11() 214 ret = dev->dqm->ops.reset_queues(dev->dqm, pasid); in event_interrupt_poison_consumption_v11()
|
H A D | kfd_packet_manager_vi.c | 80 struct kfd_node *kfd = pm->dqm->dev; in pm_runlist_vi() 94 concurrent_proc_cnt = min(pm->dqm->processes_count, in pm_runlist_vi()
|
H A D | cik_event_interrupt.c | 113 kfd_dqm_evict_pasid(dev->dqm, pasid); in cik_event_interrupt_wq()
|
H A D | kfd_kernel_queue.c | 66 kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_DIQ]; in kq_initialize() 69 kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; in kq_initialize()
|
H A D | kfd_int_process_v9.c | 301 if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in event_interrupt_isr_v9() 308 pasid = dev->dqm->vmid_pasid[vmid]; in event_interrupt_isr_v9()
|
H A D | kfd_topology.c | 2104 dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? in kfd_topology_add_device() 2106 dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm); in kfd_topology_add_device() 2356 r = dqm_debugfs_hqds(m, dev->gpu->dqm); in kfd_debugfs_hqds_by_device() 2381 r = pm_debugfs_runlist(m, &dev->gpu->dqm->packet_mgr); in kfd_debugfs_rls_by_device()
|
H A D | kfd_chardev.c | 594 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm, in kfd_ioctl_set_memory_policy() 908 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && in kfd_ioctl_set_scratch_backing_va() 1472 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in kfd_ioctl_alloc_queue_gws() 2867 debug_refresh_runlist(pdd->dev->dqm); in runtime_disable()
|
H A D | kfd_events.c | 1260 if (dev->dqm->detect_hang_count && !pdd->has_reset_queue) in kfd_signal_reset_event() 1263 if (dev->dqm->detect_hang_count) { in kfd_signal_reset_event()
|
H A D | kfd_mqd_manager_v9.c | 658 uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++; in init_mqd_v9_4_3()
|