1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2014 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "amdgpu_amdkfd.h"
25 #include "amd_pcie.h"
26 #include "amd_shared.h"
27
28 #include "amdgpu.h"
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_dma_buf.h"
31 #include <drm/ttm/ttm_tt.h>
32 #include <linux/module.h>
33 #include <linux/dma-buf.h>
34 #include "amdgpu_xgmi.h"
35 #include <uapi/linux/kfd_ioctl.h>
36 #include "amdgpu_ras.h"
37 #include "amdgpu_umc.h"
38 #include "amdgpu_reset.h"
39
40 /* Total memory size in system memory and all GPU VRAM. Used to
41 * estimate worst case amount of memory to reserve for page tables
42 */
43 uint64_t amdgpu_amdkfd_total_mem_size;
44
45 static bool kfd_initialized;
46
amdgpu_amdkfd_init(void)47 int amdgpu_amdkfd_init(void)
48 {
49 struct sysinfo si;
50 int ret;
51
52 si_meminfo(&si);
53 amdgpu_amdkfd_total_mem_size = si.freeram - si.freehigh;
54 amdgpu_amdkfd_total_mem_size *= si.mem_unit;
55
56 ret = kgd2kfd_init();
57 kfd_initialized = !ret;
58
59 return ret;
60 }
61
amdgpu_amdkfd_fini(void)62 void amdgpu_amdkfd_fini(void)
63 {
64 if (kfd_initialized) {
65 kgd2kfd_exit();
66 kfd_initialized = false;
67 }
68 }
69
amdgpu_amdkfd_device_probe(struct amdgpu_device * adev)70 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
71 {
72 bool vf = amdgpu_sriov_vf(adev);
73
74 if (!kfd_initialized)
75 return;
76
77 adev->kfd.dev = kgd2kfd_probe(adev, vf);
78 }
79
80 /**
81 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
82 * setup amdkfd
83 *
84 * @adev: amdgpu_device pointer
85 * @aperture_base: output returning doorbell aperture base physical address
86 * @aperture_size: output returning doorbell aperture size in bytes
87 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
88 *
89 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
90 * takes doorbells required for its own rings and reports the setup to amdkfd.
91 * amdgpu reserved doorbells are at the start of the doorbell aperture.
92 */
amdgpu_doorbell_get_kfd_info(struct amdgpu_device * adev,phys_addr_t * aperture_base,size_t * aperture_size,size_t * start_offset)93 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
94 phys_addr_t *aperture_base,
95 size_t *aperture_size,
96 size_t *start_offset)
97 {
98 /*
99 * The first num_kernel_doorbells are used by amdgpu.
100 * amdkfd takes whatever's left in the aperture.
101 */
102 if (adev->enable_mes) {
103 /*
104 * With MES enabled, we only need to initialize
105 * the base address. The size and offset are
106 * not initialized as AMDGPU manages the whole
107 * doorbell space.
108 */
109 *aperture_base = adev->doorbell.base;
110 *aperture_size = 0;
111 *start_offset = 0;
112 } else if (adev->doorbell.size > adev->doorbell.num_kernel_doorbells *
113 sizeof(u32)) {
114 *aperture_base = adev->doorbell.base;
115 *aperture_size = adev->doorbell.size;
116 *start_offset = adev->doorbell.num_kernel_doorbells * sizeof(u32);
117 } else {
118 *aperture_base = 0;
119 *aperture_size = 0;
120 *start_offset = 0;
121 }
122 }
123
124
amdgpu_amdkfd_reset_work(struct work_struct * work)125 static void amdgpu_amdkfd_reset_work(struct work_struct *work)
126 {
127 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
128 kfd.reset_work);
129
130 struct amdgpu_reset_context reset_context;
131
132 memset(&reset_context, 0, sizeof(reset_context));
133
134 reset_context.method = AMD_RESET_METHOD_NONE;
135 reset_context.reset_req_dev = adev;
136 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
137
138 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
139 }
140
141 static const struct drm_client_funcs kfd_client_funcs = {
142 .unregister = drm_client_release,
143 };
144
amdgpu_amdkfd_drm_client_create(struct amdgpu_device * adev)145 int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
146 {
147 int ret;
148
149 if (!adev->kfd.init_complete || adev->kfd.client.dev)
150 return 0;
151
152 ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
153 &kfd_client_funcs);
154 if (ret) {
155 dev_err(adev->dev, "Failed to init DRM client: %d\n",
156 ret);
157 return ret;
158 }
159
160 drm_client_register(&adev->kfd.client);
161
162 return 0;
163 }
164
amdgpu_amdkfd_device_init(struct amdgpu_device * adev)165 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
166 {
167 int i;
168 int last_valid_bit;
169
170 amdgpu_amdkfd_gpuvm_init_mem_limits();
171
172 if (adev->kfd.dev) {
173 struct kgd2kfd_shared_resources gpu_resources = {
174 .compute_vmid_bitmap =
175 ((1 << AMDGPU_NUM_VMID) - 1) -
176 ((1 << adev->vm_manager.first_kfd_vmid) - 1),
177 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
178 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
179 .gpuvm_size = min(adev->vm_manager.max_pfn
180 << AMDGPU_GPU_PAGE_SHIFT,
181 AMDGPU_GMC_HOLE_START),
182 .drm_render_minor = adev_to_drm(adev)->render->index,
183 .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
184 .enable_mes = adev->enable_mes,
185 };
186
187 /* this is going to have a few of the MSBs set that we need to
188 * clear
189 */
190 bitmap_complement(gpu_resources.cp_queue_bitmap,
191 adev->gfx.mec_bitmap[0].queue_bitmap,
192 AMDGPU_MAX_QUEUES);
193
194 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
195 * nbits is not compile time constant
196 */
197 last_valid_bit = 1 /* only first MEC can have compute queues */
198 * adev->gfx.mec.num_pipe_per_mec
199 * adev->gfx.mec.num_queue_per_pipe;
200 for (i = last_valid_bit; i < AMDGPU_MAX_QUEUES; ++i)
201 clear_bit(i, gpu_resources.cp_queue_bitmap);
202
203 amdgpu_doorbell_get_kfd_info(adev,
204 &gpu_resources.doorbell_physical_address,
205 &gpu_resources.doorbell_aperture_size,
206 &gpu_resources.doorbell_start_offset);
207
208 /* Since SOC15, BIF starts to statically use the
209 * lower 12 bits of doorbell addresses for routing
210 * based on settings in registers like
211 * SDMA0_DOORBELL_RANGE etc..
212 * In order to route a doorbell to CP engine, the lower
213 * 12 bits of its address has to be outside the range
214 * set for SDMA, VCN, and IH blocks.
215 */
216 if (adev->asic_type >= CHIP_VEGA10) {
217 gpu_resources.non_cp_doorbells_start =
218 adev->doorbell_index.first_non_cp;
219 gpu_resources.non_cp_doorbells_end =
220 adev->doorbell_index.last_non_cp;
221 }
222
223 adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
224 &gpu_resources);
225
226 amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
227
228 INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work);
229 }
230 }
231
amdgpu_amdkfd_device_fini_sw(struct amdgpu_device * adev)232 void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev)
233 {
234 if (adev->kfd.dev) {
235 kgd2kfd_device_exit(adev->kfd.dev);
236 adev->kfd.dev = NULL;
237 amdgpu_amdkfd_total_mem_size -= adev->gmc.real_vram_size;
238 }
239 }
240
amdgpu_amdkfd_interrupt(struct amdgpu_device * adev,const void * ih_ring_entry)241 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
242 const void *ih_ring_entry)
243 {
244 if (adev->kfd.dev)
245 kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
246 }
247
amdgpu_amdkfd_suspend(struct amdgpu_device * adev,bool run_pm)248 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
249 {
250 if (adev->kfd.dev)
251 kgd2kfd_suspend(adev->kfd.dev, run_pm);
252 }
253
amdgpu_amdkfd_resume(struct amdgpu_device * adev,bool run_pm)254 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
255 {
256 int r = 0;
257
258 if (adev->kfd.dev)
259 r = kgd2kfd_resume(adev->kfd.dev, run_pm);
260
261 return r;
262 }
263
amdgpu_amdkfd_pre_reset(struct amdgpu_device * adev)264 int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
265 {
266 int r = 0;
267
268 if (adev->kfd.dev)
269 r = kgd2kfd_pre_reset(adev->kfd.dev);
270
271 return r;
272 }
273
amdgpu_amdkfd_post_reset(struct amdgpu_device * adev)274 int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
275 {
276 int r = 0;
277
278 if (adev->kfd.dev)
279 r = kgd2kfd_post_reset(adev->kfd.dev);
280
281 return r;
282 }
283
amdgpu_amdkfd_gpu_reset(struct amdgpu_device * adev)284 void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev)
285 {
286 if (amdgpu_device_should_recover_gpu(adev))
287 amdgpu_reset_domain_schedule(adev->reset_domain,
288 &adev->kfd.reset_work);
289 }
290
amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device * adev,size_t size,void ** mem_obj,uint64_t * gpu_addr,void ** cpu_ptr,bool cp_mqd_gfx9)291 int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
292 void **mem_obj, uint64_t *gpu_addr,
293 void **cpu_ptr, bool cp_mqd_gfx9)
294 {
295 struct amdgpu_bo *bo = NULL;
296 struct amdgpu_bo_param bp;
297 int r;
298 void *cpu_ptr_tmp = NULL;
299
300 memset(&bp, 0, sizeof(bp));
301 bp.size = size;
302 bp.byte_align = PAGE_SIZE;
303 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
304 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
305 bp.type = ttm_bo_type_kernel;
306 bp.resv = NULL;
307 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
308
309 if (cp_mqd_gfx9)
310 bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
311
312 r = amdgpu_bo_create(adev, &bp, &bo);
313 if (r) {
314 dev_err(adev->dev,
315 "failed to allocate BO for amdkfd (%d)\n", r);
316 return r;
317 }
318
319 /* map the buffer */
320 r = amdgpu_bo_reserve(bo, true);
321 if (r) {
322 dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
323 goto allocate_mem_reserve_bo_failed;
324 }
325
326 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
327 if (r) {
328 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
329 goto allocate_mem_pin_bo_failed;
330 }
331
332 r = amdgpu_ttm_alloc_gart(&bo->tbo);
333 if (r) {
334 dev_err(adev->dev, "%p bind failed\n", bo);
335 goto allocate_mem_kmap_bo_failed;
336 }
337
338 r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
339 if (r) {
340 dev_err(adev->dev,
341 "(%d) failed to map bo to kernel for amdkfd\n", r);
342 goto allocate_mem_kmap_bo_failed;
343 }
344
345 *mem_obj = bo;
346 *gpu_addr = amdgpu_bo_gpu_offset(bo);
347 *cpu_ptr = cpu_ptr_tmp;
348
349 amdgpu_bo_unreserve(bo);
350
351 return 0;
352
353 allocate_mem_kmap_bo_failed:
354 amdgpu_bo_unpin(bo);
355 allocate_mem_pin_bo_failed:
356 amdgpu_bo_unreserve(bo);
357 allocate_mem_reserve_bo_failed:
358 amdgpu_bo_unref(&bo);
359
360 return r;
361 }
362
amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device * adev,void * mem_obj)363 void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj)
364 {
365 struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
366
367 amdgpu_bo_reserve(bo, true);
368 amdgpu_bo_kunmap(bo);
369 amdgpu_bo_unpin(bo);
370 amdgpu_bo_unreserve(bo);
371 amdgpu_bo_unref(&(bo));
372 }
373
amdgpu_amdkfd_alloc_gws(struct amdgpu_device * adev,size_t size,void ** mem_obj)374 int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
375 void **mem_obj)
376 {
377 struct amdgpu_bo *bo = NULL;
378 struct amdgpu_bo_user *ubo;
379 struct amdgpu_bo_param bp;
380 int r;
381
382 memset(&bp, 0, sizeof(bp));
383 bp.size = size;
384 bp.byte_align = 1;
385 bp.domain = AMDGPU_GEM_DOMAIN_GWS;
386 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
387 bp.type = ttm_bo_type_device;
388 bp.resv = NULL;
389 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
390
391 r = amdgpu_bo_create_user(adev, &bp, &ubo);
392 if (r) {
393 dev_err(adev->dev,
394 "failed to allocate gws BO for amdkfd (%d)\n", r);
395 return r;
396 }
397
398 bo = &ubo->bo;
399 *mem_obj = bo;
400 return 0;
401 }
402
amdgpu_amdkfd_free_gws(struct amdgpu_device * adev,void * mem_obj)403 void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj)
404 {
405 struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
406
407 amdgpu_bo_unref(&bo);
408 }
409
amdgpu_amdkfd_get_fw_version(struct amdgpu_device * adev,enum kgd_engine_type type)410 uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
411 enum kgd_engine_type type)
412 {
413 switch (type) {
414 case KGD_ENGINE_PFP:
415 return adev->gfx.pfp_fw_version;
416
417 case KGD_ENGINE_ME:
418 return adev->gfx.me_fw_version;
419
420 case KGD_ENGINE_CE:
421 return adev->gfx.ce_fw_version;
422
423 case KGD_ENGINE_MEC1:
424 return adev->gfx.mec_fw_version;
425
426 case KGD_ENGINE_MEC2:
427 return adev->gfx.mec2_fw_version;
428
429 case KGD_ENGINE_RLC:
430 return adev->gfx.rlc_fw_version;
431
432 case KGD_ENGINE_SDMA1:
433 return adev->sdma.instance[0].fw_version;
434
435 case KGD_ENGINE_SDMA2:
436 return adev->sdma.instance[1].fw_version;
437
438 default:
439 return 0;
440 }
441
442 return 0;
443 }
444
amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device * adev,struct kfd_local_mem_info * mem_info,struct amdgpu_xcp * xcp)445 void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
446 struct kfd_local_mem_info *mem_info,
447 struct amdgpu_xcp *xcp)
448 {
449 memset(mem_info, 0, sizeof(*mem_info));
450
451 if (xcp) {
452 if (adev->gmc.real_vram_size == adev->gmc.visible_vram_size)
453 mem_info->local_mem_size_public =
454 KFD_XCP_MEMORY_SIZE(adev, xcp->id);
455 else
456 mem_info->local_mem_size_private =
457 KFD_XCP_MEMORY_SIZE(adev, xcp->id);
458 } else if (adev->flags & AMD_IS_APU) {
459 mem_info->local_mem_size_public = (ttm_tt_pages_limit() << PAGE_SHIFT);
460 mem_info->local_mem_size_private = 0;
461 } else {
462 mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
463 mem_info->local_mem_size_private = adev->gmc.real_vram_size -
464 adev->gmc.visible_vram_size;
465 }
466 mem_info->vram_width = adev->gmc.vram_width;
467
468 pr_debug("Address base: %pap public 0x%llx private 0x%llx\n",
469 &adev->gmc.aper_base,
470 mem_info->local_mem_size_public,
471 mem_info->local_mem_size_private);
472
473 if (adev->pm.dpm_enabled) {
474 if (amdgpu_emu_mode == 1)
475 mem_info->mem_clk_max = 0;
476 else
477 mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
478 } else
479 mem_info->mem_clk_max = 100;
480 }
481
amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device * adev)482 uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev)
483 {
484 if (adev->gfx.funcs->get_gpu_clock_counter)
485 return adev->gfx.funcs->get_gpu_clock_counter(adev);
486 return 0;
487 }
488
amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device * adev)489 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev)
490 {
491 /* the sclk is in quantas of 10kHz */
492 if (adev->pm.dpm_enabled)
493 return amdgpu_dpm_get_sclk(adev, false) / 100;
494 else
495 return 100;
496 }
497
amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device * adev,int dma_buf_fd,struct amdgpu_device ** dmabuf_adev,uint64_t * bo_size,void * metadata_buffer,size_t buffer_size,uint32_t * metadata_size,uint32_t * flags,int8_t * xcp_id)498 int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
499 struct amdgpu_device **dmabuf_adev,
500 uint64_t *bo_size, void *metadata_buffer,
501 size_t buffer_size, uint32_t *metadata_size,
502 uint32_t *flags, int8_t *xcp_id)
503 {
504 struct dma_buf *dma_buf;
505 struct drm_gem_object *obj;
506 struct amdgpu_bo *bo;
507 uint64_t metadata_flags;
508 int r = -EINVAL;
509
510 dma_buf = dma_buf_get(dma_buf_fd);
511 if (IS_ERR(dma_buf))
512 return PTR_ERR(dma_buf);
513
514 if (dma_buf->ops != &amdgpu_dmabuf_ops)
515 /* Can't handle non-graphics buffers */
516 goto out_put;
517
518 obj = dma_buf->priv;
519 if (obj->dev->driver != adev_to_drm(adev)->driver)
520 /* Can't handle buffers from different drivers */
521 goto out_put;
522
523 adev = drm_to_adev(obj->dev);
524 bo = gem_to_amdgpu_bo(obj);
525 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
526 AMDGPU_GEM_DOMAIN_GTT)))
527 /* Only VRAM and GTT BOs are supported */
528 goto out_put;
529
530 r = 0;
531 if (dmabuf_adev)
532 *dmabuf_adev = adev;
533 if (bo_size)
534 *bo_size = amdgpu_bo_size(bo);
535 if (metadata_buffer)
536 r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
537 metadata_size, &metadata_flags);
538 if (flags) {
539 *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
540 KFD_IOC_ALLOC_MEM_FLAGS_VRAM
541 : KFD_IOC_ALLOC_MEM_FLAGS_GTT;
542
543 if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
544 *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
545 }
546 if (xcp_id)
547 *xcp_id = bo->xcp_id;
548
549 out_put:
550 dma_buf_put(dma_buf);
551 return r;
552 }
553
amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device * dst,struct amdgpu_device * src)554 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
555 struct amdgpu_device *src)
556 {
557 struct amdgpu_device *peer_adev = src;
558 struct amdgpu_device *adev = dst;
559 int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
560
561 if (ret < 0) {
562 DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
563 adev->gmc.xgmi.physical_node_id,
564 peer_adev->gmc.xgmi.physical_node_id, ret);
565 ret = 0;
566 }
567 return (uint8_t)ret;
568 }
569
amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device * dst,struct amdgpu_device * src,bool is_min)570 int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
571 struct amdgpu_device *src,
572 bool is_min)
573 {
574 struct amdgpu_device *adev = dst, *peer_adev;
575 int num_links;
576
577 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))
578 return 0;
579
580 if (src)
581 peer_adev = src;
582
583 /* num links returns 0 for indirect peers since indirect route is unknown. */
584 num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev);
585 if (num_links < 0) {
586 DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n",
587 adev->gmc.xgmi.physical_node_id,
588 peer_adev->gmc.xgmi.physical_node_id, num_links);
589 num_links = 0;
590 }
591
592 /* Aldebaran xGMI DPM is defeatured so assume x16 x 25Gbps for bandwidth. */
593 return (num_links * 16 * 25000)/BITS_PER_BYTE;
594 }
595
amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device * adev,bool is_min)596 int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min)
597 {
598 int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
599 fls(adev->pm.pcie_mlw_mask)) - 1;
600 int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask &
601 CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) :
602 fls(adev->pm.pcie_gen_mask &
603 CAIL_PCIE_LINK_SPEED_SUPPORT_MASK)) - 1;
604 uint32_t num_lanes_mask = 1 << num_lanes_shift;
605 uint32_t gen_speed_mask = 1 << gen_speed_shift;
606 int num_lanes_factor = 0, gen_speed_mbits_factor = 0;
607
608 switch (num_lanes_mask) {
609 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1:
610 num_lanes_factor = 1;
611 break;
612 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2:
613 num_lanes_factor = 2;
614 break;
615 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4:
616 num_lanes_factor = 4;
617 break;
618 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8:
619 num_lanes_factor = 8;
620 break;
621 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12:
622 num_lanes_factor = 12;
623 break;
624 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16:
625 num_lanes_factor = 16;
626 break;
627 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32:
628 num_lanes_factor = 32;
629 break;
630 }
631
632 switch (gen_speed_mask) {
633 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1:
634 gen_speed_mbits_factor = 2500;
635 break;
636 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2:
637 gen_speed_mbits_factor = 5000;
638 break;
639 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3:
640 gen_speed_mbits_factor = 8000;
641 break;
642 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4:
643 gen_speed_mbits_factor = 16000;
644 break;
645 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5:
646 gen_speed_mbits_factor = 32000;
647 break;
648 }
649
650 return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE;
651 }
652
amdgpu_amdkfd_submit_ib(struct amdgpu_device * adev,enum kgd_engine_type engine,uint32_t vmid,uint64_t gpu_addr,uint32_t * ib_cmd,uint32_t ib_len)653 int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
654 enum kgd_engine_type engine,
655 uint32_t vmid, uint64_t gpu_addr,
656 uint32_t *ib_cmd, uint32_t ib_len)
657 {
658 struct amdgpu_job *job;
659 struct amdgpu_ib *ib;
660 struct amdgpu_ring *ring;
661 struct dma_fence *f = NULL;
662 int ret;
663
664 switch (engine) {
665 case KGD_ENGINE_MEC1:
666 ring = &adev->gfx.compute_ring[0];
667 break;
668 case KGD_ENGINE_SDMA1:
669 ring = &adev->sdma.instance[0].ring;
670 break;
671 case KGD_ENGINE_SDMA2:
672 ring = &adev->sdma.instance[1].ring;
673 break;
674 default:
675 pr_err("Invalid engine in IB submission: %d\n", engine);
676 ret = -EINVAL;
677 goto err;
678 }
679
680 ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
681 if (ret)
682 goto err;
683
684 ib = &job->ibs[0];
685 memset(ib, 0, sizeof(struct amdgpu_ib));
686
687 ib->gpu_addr = gpu_addr;
688 ib->ptr = ib_cmd;
689 ib->length_dw = ib_len;
690 /* This works for NO_HWS. TODO: need to handle without knowing VMID */
691 job->vmid = vmid;
692 job->num_ibs = 1;
693
694 ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
695
696 if (ret) {
697 DRM_ERROR("amdgpu: failed to schedule IB.\n");
698 goto err_ib_sched;
699 }
700
701 /* Drop the initial kref_init count (see drm_sched_main as example) */
702 dma_fence_put(f);
703 ret = dma_fence_wait(f, false);
704
705 err_ib_sched:
706 amdgpu_job_free(job);
707 err:
708 return ret;
709 }
710
amdgpu_amdkfd_set_compute_idle(struct amdgpu_device * adev,bool idle)711 void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
712 {
713 enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
714 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
715 ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
716 pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
717 amdgpu_gfx_off_ctrl(adev, idle);
718 } else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
719 (adev->flags & AMD_IS_APU)) {
720 /* Disable GFXOFF and PG. Temporary workaround
721 * to fix some compute applications issue on GFX9.
722 */
723 adev->ip_blocks[AMD_IP_BLOCK_TYPE_GFX].version->funcs->set_powergating_state((void *)adev, state);
724 }
725 amdgpu_dpm_switch_power_profile(adev,
726 PP_SMC_POWER_PROFILE_COMPUTE,
727 !idle);
728 }
729
amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device * adev,u32 vmid)730 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
731 {
732 if (adev->kfd.dev)
733 return vmid >= adev->vm_manager.first_kfd_vmid;
734
735 return false;
736 }
737
amdgpu_amdkfd_have_atomics_support(struct amdgpu_device * adev)738 bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev)
739 {
740 return adev->have_atomics_support;
741 }
742
amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device * adev)743 void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev)
744 {
745 amdgpu_device_flush_hdp(adev, NULL);
746 }
747
amdgpu_amdkfd_is_fed(struct amdgpu_device * adev)748 bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev)
749 {
750 return amdgpu_ras_get_fed_status(adev);
751 }
752
amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device * adev,enum amdgpu_ras_block block,uint16_t pasid,pasid_notify pasid_fn,void * data,uint32_t reset)753 void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *adev,
754 enum amdgpu_ras_block block, uint16_t pasid,
755 pasid_notify pasid_fn, void *data, uint32_t reset)
756 {
757 amdgpu_umc_pasid_poison_handler(adev, block, pasid, pasid_fn, data, reset);
758 }
759
amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device * adev,enum amdgpu_ras_block block,uint32_t reset)760 void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
761 enum amdgpu_ras_block block, uint32_t reset)
762 {
763 amdgpu_umc_pasid_poison_handler(adev, block, 0, NULL, NULL, reset);
764 }
765
amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device * adev,uint32_t * payload)766 int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
767 uint32_t *payload)
768 {
769 int ret;
770
771 /* Device or IH ring is not ready so bail. */
772 ret = amdgpu_ih_wait_on_checkpoint_process_ts(adev, &adev->irq.ih);
773 if (ret)
774 return ret;
775
776 /* Send payload to fence KFD interrupts */
777 amdgpu_amdkfd_interrupt(adev, payload);
778
779 return 0;
780 }
781
amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device * adev,int hub_inst,int hub_type)782 bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
783 int hub_inst, int hub_type)
784 {
785 if (!hub_type) {
786 if (adev->gfxhub.funcs->query_utcl2_poison_status)
787 return adev->gfxhub.funcs->query_utcl2_poison_status(adev, hub_inst);
788 else
789 return false;
790 } else {
791 if (adev->mmhub.funcs->query_utcl2_poison_status)
792 return adev->mmhub.funcs->query_utcl2_poison_status(adev, hub_inst);
793 else
794 return false;
795 }
796 }
797
amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device * adev)798 int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
799 {
800 return kgd2kfd_check_and_lock_kfd();
801 }
802
amdgpu_amdkfd_unlock_kfd(struct amdgpu_device * adev)803 void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev)
804 {
805 kgd2kfd_unlock_kfd();
806 }
807
808
amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device * adev,int xcp_id)809 u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id)
810 {
811 s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id);
812 u64 tmp;
813
814 if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) {
815 if (adev->gmc.is_app_apu && adev->gmc.num_mem_partitions == 1) {
816 /* In NPS1 mode, we should restrict the vram reporting
817 * tied to the ttm_pages_limit which is 1/2 of the system
818 * memory. For other partition modes, the HBM is uniformly
819 * divided already per numa node reported. If user wants to
820 * go beyond the default ttm limit and maximize the ROCm
821 * allocations, they can go up to max ttm and sysmem limits.
822 */
823
824 tmp = (ttm_tt_pages_limit() << PAGE_SHIFT) / num_online_nodes();
825 } else {
826 tmp = adev->gmc.mem_partitions[mem_id].size;
827 }
828 do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition);
829 return ALIGN_DOWN(tmp, PAGE_SIZE);
830 } else if (adev->flags & AMD_IS_APU) {
831 return (ttm_tt_pages_limit() << PAGE_SHIFT);
832 } else {
833 return adev->gmc.real_vram_size;
834 }
835 }
836
amdgpu_amdkfd_unmap_hiq(struct amdgpu_device * adev,u32 doorbell_off,u32 inst)837 int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
838 u32 inst)
839 {
840 struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
841 struct amdgpu_ring *kiq_ring = &kiq->ring;
842 struct amdgpu_ring_funcs *ring_funcs;
843 struct amdgpu_ring *ring;
844 int r = 0;
845
846 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
847 return -EINVAL;
848
849 ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
850 if (!ring_funcs)
851 return -ENOMEM;
852
853 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
854 if (!ring) {
855 r = -ENOMEM;
856 goto free_ring_funcs;
857 }
858
859 ring_funcs->type = AMDGPU_RING_TYPE_COMPUTE;
860 ring->doorbell_index = doorbell_off;
861 ring->funcs = ring_funcs;
862
863 spin_lock(&kiq->ring_lock);
864
865 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
866 spin_unlock(&kiq->ring_lock);
867 r = -ENOMEM;
868 goto free_ring;
869 }
870
871 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0);
872
873 if (kiq_ring->sched.ready && !adev->job_hang)
874 r = amdgpu_ring_test_helper(kiq_ring);
875
876 spin_unlock(&kiq->ring_lock);
877
878 free_ring:
879 kfree(ring);
880
881 free_ring_funcs:
882 kfree(ring_funcs);
883
884 return r;
885 }
886