1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include <linux/module.h>
29
30 const struct kgd2kfd_calls *kgd2kfd;
31 bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
32
33 static const unsigned int compute_vmid_bitmap = 0xFF00;
34
amdgpu_amdkfd_init(void)35 int amdgpu_amdkfd_init(void)
36 {
37 int ret;
38
39 #if defined(CONFIG_HSA_AMD_MODULE)
40 int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
41
42 kgd2kfd_init_p = symbol_request(kgd2kfd_init);
43
44 if (kgd2kfd_init_p == NULL)
45 return -ENOENT;
46
47 ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
48 if (ret) {
49 #if 0
50 symbol_put(kgd2kfd_init);
51 #endif
52 kgd2kfd = NULL;
53 }
54
55
56 #elif defined(CONFIG_HSA_AMD)
57
58 ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
59 if (ret)
60 kgd2kfd = NULL;
61
62 #else
63 kgd2kfd = NULL;
64 ret = -ENOENT;
65 #endif
66
67 #if defined(CONFIG_HSA_AMD_MODULE) || defined(CONFIG_HSA_AMD)
68 amdgpu_amdkfd_gpuvm_init_mem_limits();
69 #endif
70
71 return ret;
72 }
73
amdgpu_amdkfd_fini(void)74 void amdgpu_amdkfd_fini(void)
75 {
76 if (kgd2kfd) {
77 kgd2kfd->exit();
78 #if 0
79 symbol_put(kgd2kfd_init);
80 #endif
81 }
82 }
83
amdgpu_amdkfd_device_probe(struct amdgpu_device * adev)84 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
85 {
86 const struct kfd2kgd_calls *kfd2kgd;
87
88 if (!kgd2kfd)
89 return;
90
91 switch (adev->asic_type) {
92 #ifdef CONFIG_DRM_AMDGPU_CIK
93 case CHIP_KAVERI:
94 case CHIP_HAWAII:
95 kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
96 break;
97 #endif
98 case CHIP_CARRIZO:
99 case CHIP_TONGA:
100 case CHIP_FIJI:
101 case CHIP_POLARIS10:
102 case CHIP_POLARIS11:
103 kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
104 break;
105 case CHIP_VEGA10:
106 case CHIP_RAVEN:
107 kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
108 break;
109 default:
110 dev_info(adev->dev, "kfd not supported on this ASIC\n");
111 return;
112 }
113
114 adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
115 adev->pdev, kfd2kgd);
116 }
117
118 /**
119 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
120 * setup amdkfd
121 *
122 * @adev: amdgpu_device pointer
123 * @aperture_base: output returning doorbell aperture base physical address
124 * @aperture_size: output returning doorbell aperture size in bytes
125 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
126 *
127 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
128 * takes doorbells required for its own rings and reports the setup to amdkfd.
129 * amdgpu reserved doorbells are at the start of the doorbell aperture.
130 */
amdgpu_doorbell_get_kfd_info(struct amdgpu_device * adev,phys_addr_t * aperture_base,size_t * aperture_size,size_t * start_offset)131 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
132 phys_addr_t *aperture_base,
133 size_t *aperture_size,
134 size_t *start_offset)
135 {
136 /*
137 * The first num_doorbells are used by amdgpu.
138 * amdkfd takes whatever's left in the aperture.
139 */
140 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
141 *aperture_base = adev->doorbell.base;
142 *aperture_size = adev->doorbell.size;
143 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
144 } else {
145 *aperture_base = 0;
146 *aperture_size = 0;
147 *start_offset = 0;
148 }
149 }
150
amdgpu_amdkfd_device_init(struct amdgpu_device * adev)151 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
152 {
153 int i;
154 int last_valid_bit;
155 if (adev->kfd) {
156 struct kgd2kfd_shared_resources gpu_resources = {
157 .compute_vmid_bitmap = compute_vmid_bitmap,
158 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
159 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
160 .gpuvm_size = min(adev->vm_manager.max_pfn
161 << AMDGPU_GPU_PAGE_SHIFT,
162 AMDGPU_VA_HOLE_START),
163 .drm_render_minor = adev->ddev->render->index
164 };
165
166 /* this is going to have a few of the MSBs set that we need to
167 * clear */
168 bitmap_complement(gpu_resources.queue_bitmap,
169 adev->gfx.mec.queue_bitmap,
170 KGD_MAX_QUEUES);
171
172 /* remove the KIQ bit as well */
173 if (adev->gfx.kiq.ring.ready)
174 clear_bit(amdgpu_gfx_queue_to_bit(adev,
175 adev->gfx.kiq.ring.me - 1,
176 adev->gfx.kiq.ring.pipe,
177 adev->gfx.kiq.ring.queue),
178 gpu_resources.queue_bitmap);
179
180 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
181 * nbits is not compile time constant */
182 last_valid_bit = 1 /* only first MEC can have compute queues */
183 * adev->gfx.mec.num_pipe_per_mec
184 * adev->gfx.mec.num_queue_per_pipe;
185 for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
186 clear_bit(i, gpu_resources.queue_bitmap);
187
188 amdgpu_doorbell_get_kfd_info(adev,
189 &gpu_resources.doorbell_physical_address,
190 &gpu_resources.doorbell_aperture_size,
191 &gpu_resources.doorbell_start_offset);
192 if (adev->asic_type >= CHIP_VEGA10) {
193 /* On SOC15 the BIF is involved in routing
194 * doorbells using the low 12 bits of the
195 * address. Communicate the assignments to
196 * KFD. KFD uses two doorbell pages per
197 * process in case of 64-bit doorbells so we
198 * can use each doorbell assignment twice.
199 */
200 gpu_resources.sdma_doorbell[0][0] =
201 AMDGPU_DOORBELL64_sDMA_ENGINE0;
202 gpu_resources.sdma_doorbell[0][1] =
203 AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200;
204 gpu_resources.sdma_doorbell[1][0] =
205 AMDGPU_DOORBELL64_sDMA_ENGINE1;
206 gpu_resources.sdma_doorbell[1][1] =
207 AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200;
208 /* Doorbells 0x0f0-0ff and 0x2f0-2ff are reserved for
209 * SDMA, IH and VCN. So don't use them for the CP.
210 */
211 gpu_resources.reserved_doorbell_mask = 0x1f0;
212 gpu_resources.reserved_doorbell_val = 0x0f0;
213 }
214
215 kgd2kfd->device_init(adev->kfd, &gpu_resources);
216 }
217 }
218
amdgpu_amdkfd_device_fini(struct amdgpu_device * adev)219 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
220 {
221 if (adev->kfd) {
222 kgd2kfd->device_exit(adev->kfd);
223 adev->kfd = NULL;
224 }
225 }
226
amdgpu_amdkfd_interrupt(struct amdgpu_device * adev,const void * ih_ring_entry)227 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
228 const void *ih_ring_entry)
229 {
230 if (adev->kfd)
231 kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
232 }
233
amdgpu_amdkfd_suspend(struct amdgpu_device * adev)234 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
235 {
236 if (adev->kfd)
237 kgd2kfd->suspend(adev->kfd);
238 }
239
amdgpu_amdkfd_resume(struct amdgpu_device * adev)240 int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
241 {
242 int r = 0;
243
244 if (adev->kfd)
245 r = kgd2kfd->resume(adev->kfd);
246
247 return r;
248 }
249
amdgpu_amdkfd_pre_reset(struct amdgpu_device * adev)250 int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
251 {
252 int r = 0;
253
254 if (adev->kfd)
255 r = kgd2kfd->pre_reset(adev->kfd);
256
257 return r;
258 }
259
amdgpu_amdkfd_post_reset(struct amdgpu_device * adev)260 int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
261 {
262 int r = 0;
263
264 if (adev->kfd)
265 r = kgd2kfd->post_reset(adev->kfd);
266
267 return r;
268 }
269
amdgpu_amdkfd_gpu_reset(struct kgd_dev * kgd)270 void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
271 {
272 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
273
274 amdgpu_device_gpu_recover(adev, NULL, false);
275 }
276
alloc_gtt_mem(struct kgd_dev * kgd,size_t size,void ** mem_obj,uint64_t * gpu_addr,void ** cpu_ptr,bool mqd_gfx9)277 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
278 void **mem_obj, uint64_t *gpu_addr,
279 void **cpu_ptr, bool mqd_gfx9)
280 {
281 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
282 struct amdgpu_bo *bo = NULL;
283 struct amdgpu_bo_param bp;
284 int r;
285 void *cpu_ptr_tmp = NULL;
286
287 memset(&bp, 0, sizeof(bp));
288 bp.size = size;
289 bp.byte_align = PAGE_SIZE;
290 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
291 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
292 bp.type = ttm_bo_type_kernel;
293 bp.resv = NULL;
294
295 if (mqd_gfx9)
296 bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
297
298 r = amdgpu_bo_create(adev, &bp, &bo);
299 if (r) {
300 dev_err(adev->dev,
301 "failed to allocate BO for amdkfd (%d)\n", r);
302 return r;
303 }
304
305 /* map the buffer */
306 r = amdgpu_bo_reserve(bo, true);
307 if (r) {
308 dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
309 goto allocate_mem_reserve_bo_failed;
310 }
311
312 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
313 if (r) {
314 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
315 goto allocate_mem_pin_bo_failed;
316 }
317
318 r = amdgpu_ttm_alloc_gart(&bo->tbo);
319 if (r) {
320 dev_err(adev->dev, "%p bind failed\n", bo);
321 goto allocate_mem_kmap_bo_failed;
322 }
323
324 r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
325 if (r) {
326 dev_err(adev->dev,
327 "(%d) failed to map bo to kernel for amdkfd\n", r);
328 goto allocate_mem_kmap_bo_failed;
329 }
330
331 *mem_obj = bo;
332 *gpu_addr = amdgpu_bo_gpu_offset(bo);
333 *cpu_ptr = cpu_ptr_tmp;
334
335 amdgpu_bo_unreserve(bo);
336
337 return 0;
338
339 allocate_mem_kmap_bo_failed:
340 amdgpu_bo_unpin(bo);
341 allocate_mem_pin_bo_failed:
342 amdgpu_bo_unreserve(bo);
343 allocate_mem_reserve_bo_failed:
344 amdgpu_bo_unref(&bo);
345
346 return r;
347 }
348
free_gtt_mem(struct kgd_dev * kgd,void * mem_obj)349 void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
350 {
351 struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
352
353 amdgpu_bo_reserve(bo, true);
354 amdgpu_bo_kunmap(bo);
355 amdgpu_bo_unpin(bo);
356 amdgpu_bo_unreserve(bo);
357 amdgpu_bo_unref(&(bo));
358 }
359
get_local_mem_info(struct kgd_dev * kgd,struct kfd_local_mem_info * mem_info)360 void get_local_mem_info(struct kgd_dev *kgd,
361 struct kfd_local_mem_info *mem_info)
362 {
363 STUB();
364 #if 0
365 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
366 uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
367 ~((1ULL << 32) - 1);
368 resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
369
370 memset(mem_info, 0, sizeof(*mem_info));
371 if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
372 mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
373 mem_info->local_mem_size_private = adev->gmc.real_vram_size -
374 adev->gmc.visible_vram_size;
375 } else {
376 mem_info->local_mem_size_public = 0;
377 mem_info->local_mem_size_private = adev->gmc.real_vram_size;
378 }
379 mem_info->vram_width = adev->gmc.vram_width;
380
381 pr_debug("Address base: %pap limit %pap public 0x%lx private 0x%lx\n",
382 &adev->gmc.aper_base, &aper_limit,
383 mem_info->local_mem_size_public,
384 mem_info->local_mem_size_private);
385
386 if (amdgpu_sriov_vf(adev))
387 mem_info->mem_clk_max = adev->clock.default_mclk / 100;
388 else if (adev->powerplay.pp_funcs)
389 mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
390 else
391 mem_info->mem_clk_max = 100;
392 #endif
393 }
394
get_gpu_clock_counter(struct kgd_dev * kgd)395 uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
396 {
397 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
398
399 if (adev->gfx.funcs->get_gpu_clock_counter)
400 return adev->gfx.funcs->get_gpu_clock_counter(adev);
401 return 0;
402 }
403
get_max_engine_clock_in_mhz(struct kgd_dev * kgd)404 uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
405 {
406 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
407
408 /* the sclk is in quantas of 10kHz */
409 if (amdgpu_sriov_vf(adev))
410 return adev->clock.default_sclk / 100;
411 else if (adev->powerplay.pp_funcs)
412 return amdgpu_dpm_get_sclk(adev, false) / 100;
413 else
414 return 100;
415 }
416
get_cu_info(struct kgd_dev * kgd,struct kfd_cu_info * cu_info)417 void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
418 {
419 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
420 struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
421
422 memset(cu_info, 0, sizeof(*cu_info));
423 if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
424 return;
425
426 cu_info->cu_active_number = acu_info.number;
427 cu_info->cu_ao_mask = acu_info.ao_cu_mask;
428 memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
429 sizeof(acu_info.bitmap));
430 cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
431 cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
432 cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
433 cu_info->simd_per_cu = acu_info.simd_per_cu;
434 cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
435 cu_info->wave_front_size = acu_info.wave_front_size;
436 cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
437 cu_info->lds_size = acu_info.lds_size;
438 }
439
amdgpu_amdkfd_get_vram_usage(struct kgd_dev * kgd)440 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
441 {
442 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
443
444 return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
445 }
446
amdgpu_amdkfd_submit_ib(struct kgd_dev * kgd,enum kgd_engine_type engine,uint32_t vmid,uint64_t gpu_addr,uint32_t * ib_cmd,uint32_t ib_len)447 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
448 uint32_t vmid, uint64_t gpu_addr,
449 uint32_t *ib_cmd, uint32_t ib_len)
450 {
451 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
452 struct amdgpu_job *job;
453 struct amdgpu_ib *ib;
454 struct amdgpu_ring *ring;
455 struct dma_fence *f = NULL;
456 int ret;
457
458 switch (engine) {
459 case KGD_ENGINE_MEC1:
460 ring = &adev->gfx.compute_ring[0];
461 break;
462 case KGD_ENGINE_SDMA1:
463 ring = &adev->sdma.instance[0].ring;
464 break;
465 case KGD_ENGINE_SDMA2:
466 ring = &adev->sdma.instance[1].ring;
467 break;
468 default:
469 pr_err("Invalid engine in IB submission: %d\n", engine);
470 ret = -EINVAL;
471 goto err;
472 }
473
474 ret = amdgpu_job_alloc(adev, 1, &job, NULL);
475 if (ret)
476 goto err;
477
478 ib = &job->ibs[0];
479 memset(ib, 0, sizeof(struct amdgpu_ib));
480
481 ib->gpu_addr = gpu_addr;
482 ib->ptr = ib_cmd;
483 ib->length_dw = ib_len;
484 /* This works for NO_HWS. TODO: need to handle without knowing VMID */
485 job->vmid = vmid;
486
487 ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
488 if (ret) {
489 DRM_ERROR("amdgpu: failed to schedule IB.\n");
490 goto err_ib_sched;
491 }
492
493 ret = dma_fence_wait(f, false);
494
495 err_ib_sched:
496 dma_fence_put(f);
497 amdgpu_job_free(job);
498 err:
499 return ret;
500 }
501
amdgpu_amdkfd_set_compute_idle(struct kgd_dev * kgd,bool idle)502 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
503 {
504 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
505
506 if (adev->powerplay.pp_funcs &&
507 adev->powerplay.pp_funcs->switch_power_profile)
508 amdgpu_dpm_switch_power_profile(adev,
509 PP_SMC_POWER_PROFILE_COMPUTE,
510 !idle);
511 }
512
amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device * adev,u32 vmid)513 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
514 {
515 if (adev->kfd) {
516 if ((1 << vmid) & compute_vmid_bitmap)
517 return true;
518 }
519
520 return false;
521 }
522
523 #if !defined(CONFIG_HSA_AMD_MODULE) && !defined(CONFIG_HSA_AMD)
amdkfd_fence_check_mm(struct dma_fence * f,struct mm_struct * mm)524 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
525 {
526 return false;
527 }
528
amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo * bo)529 void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
530 {
531 }
532
amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device * adev,struct amdgpu_vm * vm)533 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
534 struct amdgpu_vm *vm)
535 {
536 }
537
to_amdgpu_amdkfd_fence(struct dma_fence * f)538 struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
539 {
540 return NULL;
541 }
542
amdgpu_amdkfd_evict_userptr(struct kgd_mem * mem,struct mm_struct * mm)543 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
544 {
545 return 0;
546 }
547
amdgpu_amdkfd_gfx_7_get_functions(void)548 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
549 {
550 return NULL;
551 }
552
amdgpu_amdkfd_gfx_8_0_get_functions(void)553 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
554 {
555 return NULL;
556 }
557
amdgpu_amdkfd_gfx_9_0_get_functions(void)558 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
559 {
560 return NULL;
561 }
562 #endif
563