Home
last modified time | relevance | path

Searched refs:pdd (Results 1 – 20 of 20) sorted by relevance

/openbsd/sys/dev/pci/drm/amd/amdkfd/
H A Dkfd_flat_memory.c323 pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); in kfd_init_apertures_vi()
330 pdd->gpuvm_limit = in kfd_init_apertures_vi()
334 pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); in kfd_init_apertures_vi()
340 pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); in kfd_init_apertures_v9()
349 pdd->gpuvm_limit = in kfd_init_apertures_v9()
353 pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); in kfd_init_apertures_v9()
360 struct kfd_process_device *pdd; in kfd_init_apertures() local
375 if (!pdd) { in kfd_init_apertures()
385 pdd->lds_base = pdd->lds_limit = 0; in kfd_init_apertures()
386 pdd->gpuvm_base = pdd->gpuvm_limit = 0; in kfd_init_apertures()
[all …]
H A Dkfd_debug.c271 if (!pdd) in kfd_dbg_send_exception_to_runtime()
426 pdd->watch_points[watch_id] = pdd->dev->kfd2kgd->clear_address_watch( in kfd_dbg_trap_clear_dev_address_watch()
464 pdd->dev->adev, in kfd_dbg_trap_set_dev_address_watch()
591 pdd->dev->adev, in kfd_dbg_trap_deactivate()
597 release_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd)) in kfd_dbg_trap_deactivate()
675 r = reserve_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd); in kfd_dbg_trap_activate()
696 pdd->dev->kfd2kgd->enable_debug_trap(pdd->dev->adev, true, in kfd_dbg_trap_activate()
699 pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap( in kfd_dbg_trap_activate()
755 if (pdd->qpd.num_gws && (!kfd_dbg_has_gws_support(pdd->dev) || in kfd_dbg_trap_enable()
841 pdd->dev->adev, in kfd_dbg_trap_set_wave_launch_override()
[all …]
H A Dkfd_process.c115 pdd = workarea->pdd; in kfd_sdma_activity_worker()
116 if (!pdd) in kfd_sdma_activity_worker()
318 sdma_activity_work_handler.pdd = pdd; in kfd_procfs_show()
554 pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats); in kfd_procfs_add_sysfs_stats()
991 if (!pdd) in kfd_process_kunmap_signal_bo()
1028 pdd->dev->adev, pdd->drm_priv); in kfd_process_destroy_pdds()
1032 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base) in kfd_process_destroy_pdds()
1038 kfd_free_process_doorbells(pdd->dev->kfd, pdd); in kfd_process_destroy_pdds()
1569 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL); in kfd_create_process_device_data()
1570 if (!pdd) in kfd_create_process_device_data()
[all …]
H A Dkfd_chardev.c76 if (pdd) in kfd_lock_pdd_by_id()
77 return pdd; in kfd_lock_pdd_by_id()
323 if (!pdd) { in kfd_ioctl_create_queue()
591 if (!pdd) { in kfd_ioctl_set_memory_policy()
597 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_set_memory_policy()
640 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_set_trap_handler()
689 if (pdd) in kfd_ioctl_get_clock_counters()
948 if (!pdd) in kfd_ioctl_get_tile_config()
1048 if (!pdd) in kfd_ioctl_get_available_memory()
1580 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_import_dmabuf()
[all …]
H A Dkfd_process_queue_manager.c86 if (pdd->already_dequeued) in kfd_process_dequeue_from_device()
92 pdd->already_dequeued = true; in kfd_process_dequeue_from_device()
100 struct kfd_process_device *pdd; in pqm_set_gws() local
116 if (!pdd) { in pqm_set_gws()
122 if (gws && pdd->qpd.num_gws) in pqm_set_gws()
183 if (!pdd) { in pqm_clean_queue_resource()
193 pdd->qpd.num_gws = 0; in pqm_clean_queue_resource()
298 if (!pdd) { in pqm_create_queue()
387 kq, &pdd->qpd); in pqm_create_queue()
469 if (!pdd) { in pqm_destroy_queue()
[all …]
H A Dkfd_doorbell.c110 struct kfd_process_device *pdd; in kfd_doorbell_mmap() local
119 pdd = kfd_get_process_device_data(dev, process); in kfd_doorbell_mmap()
120 if (!pdd) in kfd_doorbell_mmap()
124 address = kfd_get_process_doorbells(pdd); in kfd_doorbell_mmap()
237 struct amdgpu_device *adev = pdd->dev->adev; in kfd_get_process_doorbells()
240 if (!pdd->qpd.proc_doorbells) { in kfd_get_process_doorbells()
241 if (kfd_alloc_process_doorbells(pdd->dev->kfd, pdd)) in kfd_get_process_doorbells()
247 pdd->qpd.proc_doorbells, in kfd_get_process_doorbells()
256 struct qcm_process_device *qpd = &pdd->qpd; in kfd_alloc_process_doorbells()
266 r = init_doorbell_bitmap(&pdd->qpd, kfd); in kfd_alloc_process_doorbells()
[all …]
H A Dkfd_device_queue_manager_v9.c42 static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) in compute_sh_mem_bases_64bit() argument
44 uint32_t shared_base = pdd->lds_base >> 48; in compute_sh_mem_bases_64bit()
45 uint32_t private_base = pdd->scratch_base >> 48; in compute_sh_mem_bases_64bit()
54 struct kfd_process_device *pdd; in update_qpd_v9() local
56 pdd = qpd_to_pdd(qpd); in update_qpd_v9()
75 if (!pdd->process->xnack_enabled) in update_qpd_v9()
81 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); in update_qpd_v9()
H A Dkfd_device_queue_manager_v11.c42 static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) in compute_sh_mem_bases_64bit() argument
44 uint32_t shared_base = pdd->lds_base >> 48; in compute_sh_mem_bases_64bit()
45 uint32_t private_base = pdd->scratch_base >> 48; in compute_sh_mem_bases_64bit()
54 struct kfd_process_device *pdd; in update_qpd_v11() local
56 pdd = qpd_to_pdd(qpd); in update_qpd_v11()
69 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); in update_qpd_v11()
H A Dkfd_device_queue_manager_v10.c43 static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) in compute_sh_mem_bases_64bit() argument
45 uint32_t shared_base = pdd->lds_base >> 48; in compute_sh_mem_bases_64bit()
46 uint32_t private_base = pdd->scratch_base >> 48; in compute_sh_mem_bases_64bit()
55 struct kfd_process_device *pdd; in update_qpd_v10() local
57 pdd = qpd_to_pdd(qpd); in update_qpd_v10()
69 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); in update_qpd_v10()
H A Dkfd_device_queue_manager.c739 if (!pdd) in dbgdev_wave_reset_wavefronts()
860 if (!pdd) { in update_queue()
970 pdd->process->pasid, in suspend_single_queue()
1015 pdd = qpd_to_pdd(qpd); in resume_single_queue()
1050 pdd = qpd_to_pdd(qpd); in evict_process_queues_nocpsch()
1099 pdd = qpd_to_pdd(qpd); in evict_process_queues_cpsch()
1105 if (!pdd->drm_priv) in evict_process_queues_cpsch()
1155 pdd = qpd_to_pdd(qpd); in restore_process_queues_nocpsch()
1233 pdd = qpd_to_pdd(qpd); in restore_process_queues_cpsch()
1247 if (!pdd->drm_priv) in restore_process_queues_cpsch()
[all …]
H A Dkfd_svm.c220 if (!pdd) { in svm_range_dma_map()
268 if (!pdd) { in svm_range_free_dma_mappings()
645 if (!pdd) { in svm_range_get_node_by_id()
1330 if (!pdd) { in svm_range_unmap_from_gpus()
1453 if (!pdd) { in svm_range_map_to_gpus()
1458 pdd = kfd_bind_process_to_device(pdd->dev, p); in svm_range_map_to_gpus()
1528 if (!pdd) { in svm_range_reserve_bos()
1560 if (!pdd) in kfd_svm_page_owner()
2264 if (!pdd) in svm_range_drain_retry_fault()
2869 if (pdd) in svm_range_count_fault()
[all …]
H A Dkfd_packet_manager_v9.c38 struct kfd_process_device *pdd = in pm_map_process_v9() local
55 if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled && in pm_map_process_v9()
56 pdd->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) { in pm_map_process_v9()
92 struct kfd_process_device *pdd = in pm_map_process_aldebaran() local
109 packet->spi_gdbg_per_vmid_cntl = pdd->spi_dbg_override | in pm_map_process_aldebaran()
110 pdd->spi_dbg_launch_mode; in pm_map_process_aldebaran()
112 if (pdd->process->debug_trap_enabled) { in pm_map_process_aldebaran()
114 packet->tcp_watch_cntl[i] = pdd->watch_points[i]; in pm_map_process_aldebaran()
117 !!(pdd->process->dbg_flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP); in pm_map_process_aldebaran()
H A Dkfd_device_queue_manager_vi.c109 struct kfd_process_device *pdd; in update_qpd_vi() local
112 pdd = qpd_to_pdd(qpd); in update_qpd_vi()
131 temp = get_sh_mem_bases_nybble_64(pdd); in update_qpd_vi()
H A Dkfd_device_queue_manager_cik.c107 struct kfd_process_device *pdd; in update_qpd_cik() local
110 pdd = qpd_to_pdd(qpd); in update_qpd_cik()
125 temp = get_sh_mem_bases_nybble_64(pdd); in update_qpd_cik()
H A Dkfd_debug.h58 int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd,
60 int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd,
129 int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en);
H A Dkfd_device_queue_manager.h306 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) in get_sh_mem_bases_32() argument
308 return (pdd->lds_base >> 16) & 0xFF; in get_sh_mem_bases_32()
312 get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd) in get_sh_mem_bases_nybble_64() argument
314 return (pdd->lds_base >> 60) & 0x0E; in get_sh_mem_bases_nybble_64()
H A Dkfd_migrate.c401 struct kfd_process_device *pdd; in svm_migrate_vma_to_vram() local
469 pdd = svm_range_get_pdd_by_node(prange, node); in svm_migrate_vma_to_vram()
470 if (pdd) in svm_migrate_vma_to_vram()
471 WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); in svm_migrate_vma_to_vram()
680 struct kfd_process_device *pdd; in svm_migrate_vma_to_ram() local
754 pdd = svm_range_get_pdd_by_node(prange, node); in svm_migrate_vma_to_ram()
755 if (pdd) in svm_migrate_vma_to_ram()
756 WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); in svm_migrate_vma_to_ram()
H A Dkfd_priv.h1042 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1057 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1061 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1086 struct kfd_process_device *pdd,
1088 phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd);
1090 struct kfd_process_device *pdd);
1092 struct kfd_process_device *pdd);
1155 int kfd_process_drain_interrupts(struct kfd_process_device *pdd);
1310 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd);
1465 void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
H A Dkfd_events.c352 struct kfd_process_device *pdd; in kfd_kmap_event_page() local
362 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(event_page_offset)); in kfd_kmap_event_page()
363 if (!pdd) { in kfd_kmap_event_page()
367 kfd = pdd->dev; in kfd_kmap_event_page()
369 pdd = kfd_bind_process_to_device(kfd, p); in kfd_kmap_event_page()
370 if (IS_ERR(pdd)) in kfd_kmap_event_page()
371 return PTR_ERR(pdd); in kfd_kmap_event_page()
373 mem = kfd_process_device_translate_handle(pdd, in kfd_kmap_event_page()
H A Dkfd_smi_events.c317 struct kfd_process_device *pdd = p->pdds[i]; in kfd_smi_event_queue_restore_rescheduled() local
319 kfd_smi_event_add(p->lead_thread->pid, pdd->dev, in kfd_smi_event_queue_restore_rescheduled()
322 p->lead_thread->pid, pdd->dev->id, 'R'); in kfd_smi_event_queue_restore_rescheduled()