1 /* $NetBSD: amdgpu_atomfirmware.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $ */
2
3 /*
4 * Copyright 2016 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_atomfirmware.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $");
28
29 #include <drm/amdgpu_drm.h>
30 #include "amdgpu.h"
31 #include "atomfirmware.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "atom.h"
34 #include "atombios.h"
35 #include "soc15_hw_ip.h"
36
amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device * adev)37 bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
38 {
39 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
40 firmwareinfo);
41 uint16_t data_offset;
42
43 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
44 NULL, NULL, &data_offset)) {
45 struct atom_firmware_info_v3_1 *firmware_info =
46 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
47 data_offset);
48
49 if (le32_to_cpu(firmware_info->firmware_capability) &
50 ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION)
51 return true;
52 }
53 return false;
54 }
55
amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device * adev)56 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
57 {
58 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
59 firmwareinfo);
60 uint16_t data_offset;
61
62 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
63 NULL, NULL, &data_offset)) {
64 struct atom_firmware_info_v3_1 *firmware_info =
65 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
66 data_offset);
67
68 adev->bios_scratch_reg_offset =
69 le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
70 }
71 }
72
amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device * adev)73 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
74 {
75 struct atom_context *ctx = adev->mode_info.atom_context;
76 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
77 vram_usagebyfirmware);
78 struct vram_usagebyfirmware_v2_1 * firmware_usage;
79 uint32_t start_addr, size;
80 uint16_t data_offset;
81 int usage_bytes = 0;
82
83 if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
84 firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
85 DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
86 le32_to_cpu(firmware_usage->start_address_in_kb),
87 le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
88 le16_to_cpu(firmware_usage->used_by_driver_in_kb));
89
90 start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
91 size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
92
93 if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
94 (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
95 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
96 /* Firmware request VRAM reservation for SR-IOV */
97 adev->fw_vram_usage.start_offset = (start_addr &
98 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
99 adev->fw_vram_usage.size = size << 10;
100 /* Use the default scratch size */
101 usage_bytes = 0;
102 } else {
103 usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
104 }
105 }
106 ctx->scratch_size_bytes = 0;
107 if (usage_bytes == 0)
108 usage_bytes = 20 * 1024;
109 /* allocate some scratch memory */
110 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
111 if (!ctx->scratch)
112 return -ENOMEM;
113 ctx->scratch_size_bytes = usage_bytes;
114 return 0;
115 }
116
117 union igp_info {
118 struct atom_integrated_system_info_v1_11 v11;
119 };
120
121 union umc_info {
122 struct atom_umc_info_v3_1 v31;
123 };
124
125 union vram_info {
126 struct atom_vram_info_header_v2_3 v23;
127 struct atom_vram_info_header_v2_4 v24;
128 };
129
130 union vram_module {
131 struct atom_vram_module_v9 v9;
132 struct atom_vram_module_v10 v10;
133 };
134
convert_atom_mem_type_to_vram_type(struct amdgpu_device * adev,int atom_mem_type)135 static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
136 int atom_mem_type)
137 {
138 int vram_type;
139
140 if (adev->flags & AMD_IS_APU) {
141 switch (atom_mem_type) {
142 case Ddr2MemType:
143 case LpDdr2MemType:
144 vram_type = AMDGPU_VRAM_TYPE_DDR2;
145 break;
146 case Ddr3MemType:
147 case LpDdr3MemType:
148 vram_type = AMDGPU_VRAM_TYPE_DDR3;
149 break;
150 case Ddr4MemType:
151 case LpDdr4MemType:
152 vram_type = AMDGPU_VRAM_TYPE_DDR4;
153 break;
154 default:
155 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
156 break;
157 }
158 } else {
159 switch (atom_mem_type) {
160 case ATOM_DGPU_VRAM_TYPE_GDDR5:
161 vram_type = AMDGPU_VRAM_TYPE_GDDR5;
162 break;
163 case ATOM_DGPU_VRAM_TYPE_HBM2:
164 vram_type = AMDGPU_VRAM_TYPE_HBM;
165 break;
166 case ATOM_DGPU_VRAM_TYPE_GDDR6:
167 vram_type = AMDGPU_VRAM_TYPE_GDDR6;
168 break;
169 default:
170 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
171 break;
172 }
173 }
174
175 return vram_type;
176 }
177
178
179 int
amdgpu_atomfirmware_get_vram_info(struct amdgpu_device * adev,int * vram_width,int * vram_type,int * vram_vendor)180 amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
181 int *vram_width, int *vram_type,
182 int *vram_vendor)
183 {
184 struct amdgpu_mode_info *mode_info = &adev->mode_info;
185 int index, i = 0;
186 u16 data_offset, size;
187 union igp_info *igp_info;
188 union vram_info *vram_info;
189 union vram_module *vram_module;
190 u8 frev, crev;
191 u8 mem_type;
192 u8 mem_vendor;
193 u32 mem_channel_number;
194 u32 mem_channel_width;
195 u32 module_id;
196
197 if (adev->flags & AMD_IS_APU)
198 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
199 integratedsysteminfo);
200 else
201 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
202 vram_info);
203
204 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
205 index, &size,
206 &frev, &crev, &data_offset)) {
207 if (adev->flags & AMD_IS_APU) {
208 igp_info = (union igp_info *)
209 (mode_info->atom_context->bios + data_offset);
210 switch (crev) {
211 case 11:
212 mem_channel_number = igp_info->v11.umachannelnumber;
213 /* channel width is 64 */
214 if (vram_width)
215 *vram_width = mem_channel_number * 64;
216 mem_type = igp_info->v11.memorytype;
217 if (vram_type)
218 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
219 break;
220 default:
221 return -EINVAL;
222 }
223 } else {
224 vram_info = (union vram_info *)
225 (mode_info->atom_context->bios + data_offset);
226 module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
227 switch (crev) {
228 case 3:
229 if (module_id > vram_info->v23.vram_module_num)
230 module_id = 0;
231 vram_module = (union vram_module *)vram_info->v23.vram_module;
232 while (i < module_id) {
233 vram_module = (union vram_module *)
234 ((u8 *)vram_module + vram_module->v9.vram_module_size);
235 i++;
236 }
237 mem_type = vram_module->v9.memory_type;
238 if (vram_type)
239 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
240 mem_channel_number = vram_module->v9.channel_num;
241 mem_channel_width = vram_module->v9.channel_width;
242 if (vram_width)
243 *vram_width = mem_channel_number * (1 << mem_channel_width);
244 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
245 if (vram_vendor)
246 *vram_vendor = mem_vendor;
247 break;
248 case 4:
249 if (module_id > vram_info->v24.vram_module_num)
250 module_id = 0;
251 vram_module = (union vram_module *)vram_info->v24.vram_module;
252 while (i < module_id) {
253 vram_module = (union vram_module *)
254 ((u8 *)vram_module + vram_module->v10.vram_module_size);
255 i++;
256 }
257 mem_type = vram_module->v10.memory_type;
258 if (vram_type)
259 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
260 mem_channel_number = vram_module->v10.channel_num;
261 mem_channel_width = vram_module->v10.channel_width;
262 if (vram_width)
263 *vram_width = mem_channel_number * (1 << mem_channel_width);
264 mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
265 if (vram_vendor)
266 *vram_vendor = mem_vendor;
267 break;
268 default:
269 return -EINVAL;
270 }
271 }
272
273 }
274
275 return 0;
276 }
277
278 /*
279 * Return true if vbios enabled ecc by default, if umc info table is available
280 * or false if ecc is not enabled or umc info table is not available
281 */
amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device * adev)282 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
283 {
284 struct amdgpu_mode_info *mode_info = &adev->mode_info;
285 int index;
286 u16 data_offset, size;
287 union umc_info *umc_info;
288 u8 frev, crev;
289 bool ecc_default_enabled = false;
290
291 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
292 umc_info);
293
294 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
295 index, &size, &frev, &crev, &data_offset)) {
296 /* support umc_info 3.1+ */
297 if ((frev == 3 && crev >= 1) || (frev > 3)) {
298 umc_info = (union umc_info *)
299 (mode_info->atom_context->bios + data_offset);
300 ecc_default_enabled =
301 (le32_to_cpu(umc_info->v31.umc_config) &
302 UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
303 }
304 }
305
306 return ecc_default_enabled;
307 }
308
309 union firmware_info {
310 struct atom_firmware_info_v3_1 v31;
311 };
312
313 /*
314 * Return true if vbios supports sram ecc or false if not
315 */
amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device * adev)316 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
317 {
318 struct amdgpu_mode_info *mode_info = &adev->mode_info;
319 int index;
320 u16 data_offset, size;
321 union firmware_info *firmware_info;
322 u8 frev, crev;
323 bool sram_ecc_supported = false;
324
325 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
326 firmwareinfo);
327
328 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
329 index, &size, &frev, &crev, &data_offset)) {
330 /* support firmware_info 3.1 + */
331 if ((frev == 3 && crev >=1) || (frev > 3)) {
332 firmware_info = (union firmware_info *)
333 (mode_info->atom_context->bios + data_offset);
334 sram_ecc_supported =
335 (le32_to_cpu(firmware_info->v31.firmware_capability) &
336 ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
337 }
338 }
339
340 return sram_ecc_supported;
341 }
342
343 union smu_info {
344 struct atom_smu_info_v3_1 v31;
345 };
346
amdgpu_atomfirmware_get_clock_info(struct amdgpu_device * adev)347 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
348 {
349 struct amdgpu_mode_info *mode_info = &adev->mode_info;
350 struct amdgpu_pll *spll = &adev->clock.spll;
351 struct amdgpu_pll *mpll = &adev->clock.mpll;
352 uint8_t frev, crev;
353 uint16_t data_offset;
354 int ret = -EINVAL, index;
355
356 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
357 firmwareinfo);
358 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
359 &frev, &crev, &data_offset)) {
360 union firmware_info *firmware_info =
361 (union firmware_info *)(mode_info->atom_context->bios +
362 data_offset);
363
364 adev->clock.default_sclk =
365 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
366 adev->clock.default_mclk =
367 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
368
369 adev->pm.current_sclk = adev->clock.default_sclk;
370 adev->pm.current_mclk = adev->clock.default_mclk;
371
372 /* not technically a clock, but... */
373 adev->mode_info.firmware_flags =
374 le32_to_cpu(firmware_info->v31.firmware_capability);
375
376 ret = 0;
377 }
378
379 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
380 smu_info);
381 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
382 &frev, &crev, &data_offset)) {
383 union smu_info *smu_info =
384 (union smu_info *)(mode_info->atom_context->bios +
385 data_offset);
386
387 /* system clock */
388 spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
389
390 spll->reference_div = 0;
391 spll->min_post_div = 1;
392 spll->max_post_div = 1;
393 spll->min_ref_div = 2;
394 spll->max_ref_div = 0xff;
395 spll->min_feedback_div = 4;
396 spll->max_feedback_div = 0xff;
397 spll->best_vco = 0;
398
399 ret = 0;
400 }
401
402 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
403 umc_info);
404 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
405 &frev, &crev, &data_offset)) {
406 union umc_info *umc_info =
407 (union umc_info *)(mode_info->atom_context->bios +
408 data_offset);
409
410 /* memory clock */
411 mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
412
413 mpll->reference_div = 0;
414 mpll->min_post_div = 1;
415 mpll->max_post_div = 1;
416 mpll->min_ref_div = 2;
417 mpll->max_ref_div = 0xff;
418 mpll->min_feedback_div = 4;
419 mpll->max_feedback_div = 0xff;
420 mpll->best_vco = 0;
421
422 ret = 0;
423 }
424
425 return ret;
426 }
427
428 union gfx_info {
429 struct atom_gfx_info_v2_4 v24;
430 };
431
amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device * adev)432 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
433 {
434 struct amdgpu_mode_info *mode_info = &adev->mode_info;
435 int index;
436 uint8_t frev, crev;
437 uint16_t data_offset;
438
439 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
440 gfx_info);
441 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
442 &frev, &crev, &data_offset)) {
443 union gfx_info *gfx_info = (union gfx_info *)
444 (mode_info->atom_context->bios + data_offset);
445 switch (crev) {
446 case 4:
447 adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
448 adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
449 adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
450 adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
451 adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
452 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
453 adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
454 adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
455 adev->gfx.config.gs_prim_buffer_depth =
456 le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
457 adev->gfx.config.double_offchip_lds_buf =
458 gfx_info->v24.gc_double_offchip_lds_buffer;
459 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
460 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
461 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
462 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
463 return 0;
464 default:
465 return -EINVAL;
466 }
467
468 }
469 return -EINVAL;
470 }
471
472 /*
473 * Check if VBIOS supports GDDR6 training data save/restore
474 */
gddr6_mem_train_vbios_support(struct amdgpu_device * adev)475 static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
476 {
477 uint16_t data_offset;
478 int index;
479
480 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
481 firmwareinfo);
482 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
483 NULL, NULL, &data_offset)) {
484 struct atom_firmware_info_v3_1 *firmware_info =
485 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
486 data_offset);
487
488 DRM_DEBUG("atom firmware capability:0x%08x.\n",
489 le32_to_cpu(firmware_info->firmware_capability));
490
491 if (le32_to_cpu(firmware_info->firmware_capability) &
492 ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
493 return true;
494 }
495
496 return false;
497 }
498
gddr6_mem_train_support(struct amdgpu_device * adev)499 static int gddr6_mem_train_support(struct amdgpu_device *adev)
500 {
501 int ret;
502 uint32_t major, minor, revision, hw_v;
503
504 if (gddr6_mem_train_vbios_support(adev)) {
505 amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
506 hw_v = HW_REV(major, minor, revision);
507 /*
508 * treat 0 revision as a special case since register for MP0 and MMHUB is missing
509 * for some Navi10 A0, preventing driver from discovering the hwip information since
510 * none of the functions will be initialized, it should not cause any problems
511 */
512 switch (hw_v) {
513 case HW_REV(11, 0, 0):
514 case HW_REV(11, 0, 5):
515 ret = 1;
516 break;
517 default:
518 DRM_ERROR("memory training vbios supports but psp hw(%08x)"
519 " doesn't support!\n", hw_v);
520 ret = -1;
521 break;
522 }
523 } else {
524 ret = 0;
525 hw_v = -1;
526 }
527
528
529 DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
530 return ret;
531 }
532
amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device * adev)533 int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev)
534 {
535 struct atom_context *ctx = adev->mode_info.atom_context;
536 int index;
537 uint8_t frev, crev;
538 uint16_t data_offset, size;
539 int ret;
540
541 adev->fw_vram_usage.mem_train_support = false;
542
543 if (adev->asic_type != CHIP_NAVI10 &&
544 adev->asic_type != CHIP_NAVI14)
545 return 0;
546
547 if (amdgpu_sriov_vf(adev))
548 return 0;
549
550 ret = gddr6_mem_train_support(adev);
551 if (ret == -1)
552 return -EINVAL;
553 else if (ret == 0)
554 return 0;
555
556 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
557 vram_usagebyfirmware);
558 ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev,
559 &data_offset);
560 if (ret == 0) {
561 DRM_ERROR("parse data header failed.\n");
562 return -EINVAL;
563 }
564
565 DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x,"
566 " crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset);
567 /* only support 2.1+ */
568 if (((uint16_t)frev << 8 | crev) < 0x0201) {
569 DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev);
570 return -EINVAL;
571 }
572
573 adev->fw_vram_usage.mem_train_support = true;
574 return 0;
575 }
576