1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <drm/amdgpu_drm.h>
25 #include "amdgpu.h"
26 #include "atomfirmware.h"
27 #include "amdgpu_atomfirmware.h"
28 #include "atom.h"
29 #include "atombios.h"
30 #include "soc15_hw_ip.h"
31 
32 bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
33 {
34 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
35 						firmwareinfo);
36 	uint16_t data_offset;
37 
38 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
39 					  NULL, NULL, &data_offset)) {
40 		struct atom_firmware_info_v3_1 *firmware_info =
41 			(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
42 							   data_offset);
43 
44 		if (le32_to_cpu(firmware_info->firmware_capability) &
45 		    ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION)
46 			return true;
47 	}
48 	return false;
49 }
50 
51 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
52 {
53 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
54 						firmwareinfo);
55 	uint16_t data_offset;
56 
57 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
58 					  NULL, NULL, &data_offset)) {
59 		struct atom_firmware_info_v3_1 *firmware_info =
60 			(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
61 							   data_offset);
62 
63 		adev->bios_scratch_reg_offset =
64 			le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
65 	}
66 }
67 
68 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
69 {
70 	struct atom_context *ctx = adev->mode_info.atom_context;
71 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
72 						vram_usagebyfirmware);
73 	struct vram_usagebyfirmware_v2_1 *	firmware_usage;
74 	uint32_t start_addr, size;
75 	uint16_t data_offset;
76 	int usage_bytes = 0;
77 
78 	if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
79 		firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
80 		DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
81 			  le32_to_cpu(firmware_usage->start_address_in_kb),
82 			  le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
83 			  le16_to_cpu(firmware_usage->used_by_driver_in_kb));
84 
85 		start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
86 		size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
87 
88 		if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
89 			(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
90 			ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
91 			/* Firmware request VRAM reservation for SR-IOV */
92 			adev->fw_vram_usage.start_offset = (start_addr &
93 				(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
94 			adev->fw_vram_usage.size = size << 10;
95 			/* Use the default scratch size */
96 			usage_bytes = 0;
97 		} else {
98 			usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
99 		}
100 	}
101 	ctx->scratch_size_bytes = 0;
102 	if (usage_bytes == 0)
103 		usage_bytes = 20 * 1024;
104 	/* allocate some scratch memory */
105 	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
106 	if (!ctx->scratch)
107 		return -ENOMEM;
108 	ctx->scratch_size_bytes = usage_bytes;
109 	return 0;
110 }
111 
112 union igp_info {
113 	struct atom_integrated_system_info_v1_11 v11;
114 };
115 
116 union umc_info {
117 	struct atom_umc_info_v3_1 v31;
118 };
119 
120 union vram_info {
121 	struct atom_vram_info_header_v2_3 v23;
122 	struct atom_vram_info_header_v2_4 v24;
123 };
124 
125 union vram_module {
126 	struct atom_vram_module_v9 v9;
127 	struct atom_vram_module_v10 v10;
128 };
129 
130 static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
131 					      int atom_mem_type)
132 {
133 	int vram_type;
134 
135 	if (adev->flags & AMD_IS_APU) {
136 		switch (atom_mem_type) {
137 		case Ddr2MemType:
138 		case LpDdr2MemType:
139 			vram_type = AMDGPU_VRAM_TYPE_DDR2;
140 			break;
141 		case Ddr3MemType:
142 		case LpDdr3MemType:
143 			vram_type = AMDGPU_VRAM_TYPE_DDR3;
144 			break;
145 		case Ddr4MemType:
146 		case LpDdr4MemType:
147 			vram_type = AMDGPU_VRAM_TYPE_DDR4;
148 			break;
149 		default:
150 			vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
151 			break;
152 		}
153 	} else {
154 		switch (atom_mem_type) {
155 		case ATOM_DGPU_VRAM_TYPE_GDDR5:
156 			vram_type = AMDGPU_VRAM_TYPE_GDDR5;
157 			break;
158 		case ATOM_DGPU_VRAM_TYPE_HBM2:
159 			vram_type = AMDGPU_VRAM_TYPE_HBM;
160 			break;
161 		case ATOM_DGPU_VRAM_TYPE_GDDR6:
162 			vram_type = AMDGPU_VRAM_TYPE_GDDR6;
163 			break;
164 		default:
165 			vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
166 			break;
167 		}
168 	}
169 
170 	return vram_type;
171 }
172 
173 
174 int
175 amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
176 				  int *vram_width, int *vram_type,
177 				  int *vram_vendor)
178 {
179 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
180 	int index, i = 0;
181 	u16 data_offset, size;
182 	union igp_info *igp_info;
183 	union vram_info *vram_info;
184 	union vram_module *vram_module;
185 	u8 frev, crev;
186 	u8 mem_type;
187 	u8 mem_vendor;
188 	u32 mem_channel_number;
189 	u32 mem_channel_width;
190 	u32 module_id;
191 
192 	if (adev->flags & AMD_IS_APU)
193 		index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
194 						    integratedsysteminfo);
195 	else
196 		index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
197 						    vram_info);
198 
199 	if (amdgpu_atom_parse_data_header(mode_info->atom_context,
200 					  index, &size,
201 					  &frev, &crev, &data_offset)) {
202 		if (adev->flags & AMD_IS_APU) {
203 			igp_info = (union igp_info *)
204 				(mode_info->atom_context->bios + data_offset);
205 			switch (crev) {
206 			case 11:
207 				mem_channel_number = igp_info->v11.umachannelnumber;
208 				/* channel width is 64 */
209 				if (vram_width)
210 					*vram_width = mem_channel_number * 64;
211 				mem_type = igp_info->v11.memorytype;
212 				if (vram_type)
213 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
214 				break;
215 			default:
216 				return -EINVAL;
217 			}
218 		} else {
219 			vram_info = (union vram_info *)
220 				(mode_info->atom_context->bios + data_offset);
221 			module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
222 			switch (crev) {
223 			case 3:
224 				if (module_id > vram_info->v23.vram_module_num)
225 					module_id = 0;
226 				vram_module = (union vram_module *)vram_info->v23.vram_module;
227 				while (i < module_id) {
228 					vram_module = (union vram_module *)
229 						((u8 *)vram_module + vram_module->v9.vram_module_size);
230 					i++;
231 				}
232 				mem_type = vram_module->v9.memory_type;
233 				if (vram_type)
234 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
235 				mem_channel_number = vram_module->v9.channel_num;
236 				mem_channel_width = vram_module->v9.channel_width;
237 				if (vram_width)
238 					*vram_width = mem_channel_number * (1 << mem_channel_width);
239 				mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
240 				if (vram_vendor)
241 					*vram_vendor = mem_vendor;
242 				break;
243 			case 4:
244 				if (module_id > vram_info->v24.vram_module_num)
245 					module_id = 0;
246 				vram_module = (union vram_module *)vram_info->v24.vram_module;
247 				while (i < module_id) {
248 					vram_module = (union vram_module *)
249 						((u8 *)vram_module + vram_module->v10.vram_module_size);
250 					i++;
251 				}
252 				mem_type = vram_module->v10.memory_type;
253 				if (vram_type)
254 					*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
255 				mem_channel_number = vram_module->v10.channel_num;
256 				mem_channel_width = vram_module->v10.channel_width;
257 				if (vram_width)
258 					*vram_width = mem_channel_number * (1 << mem_channel_width);
259 				mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
260 				if (vram_vendor)
261 					*vram_vendor = mem_vendor;
262 				break;
263 			default:
264 				return -EINVAL;
265 			}
266 		}
267 
268 	}
269 
270 	return 0;
271 }
272 
273 /*
274  * Return true if vbios enabled ecc by default, if umc info table is available
275  * or false if ecc is not enabled or umc info table is not available
276  */
277 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
278 {
279 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
280 	int index;
281 	u16 data_offset, size;
282 	union umc_info *umc_info;
283 	u8 frev, crev;
284 	bool ecc_default_enabled = false;
285 
286 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
287 			umc_info);
288 
289 	if (amdgpu_atom_parse_data_header(mode_info->atom_context,
290 				index, &size, &frev, &crev, &data_offset)) {
291 		/* support umc_info 3.1+ */
292 		if ((frev == 3 && crev >= 1) || (frev > 3)) {
293 			umc_info = (union umc_info *)
294 				(mode_info->atom_context->bios + data_offset);
295 			ecc_default_enabled =
296 				(le32_to_cpu(umc_info->v31.umc_config) &
297 				 UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
298 		}
299 	}
300 
301 	return ecc_default_enabled;
302 }
303 
304 union firmware_info {
305 	struct atom_firmware_info_v3_1 v31;
306 };
307 
308 /*
309  * Return true if vbios supports sram ecc or false if not
310  */
311 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
312 {
313 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
314 	int index;
315 	u16 data_offset, size;
316 	union firmware_info *firmware_info;
317 	u8 frev, crev;
318 	bool sram_ecc_supported = false;
319 
320 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
321 			firmwareinfo);
322 
323 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
324 				index, &size, &frev, &crev, &data_offset)) {
325 		/* support firmware_info 3.1 + */
326 		if ((frev == 3 && crev >=1) || (frev > 3)) {
327 			firmware_info = (union firmware_info *)
328 				(mode_info->atom_context->bios + data_offset);
329 			sram_ecc_supported =
330 				(le32_to_cpu(firmware_info->v31.firmware_capability) &
331 				 ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
332 		}
333 	}
334 
335 	return sram_ecc_supported;
336 }
337 
338 union smu_info {
339 	struct atom_smu_info_v3_1 v31;
340 };
341 
342 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
343 {
344 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
345 	struct amdgpu_pll *spll = &adev->clock.spll;
346 	struct amdgpu_pll *mpll = &adev->clock.mpll;
347 	uint8_t frev, crev;
348 	uint16_t data_offset;
349 	int ret = -EINVAL, index;
350 
351 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
352 					    firmwareinfo);
353 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
354 				   &frev, &crev, &data_offset)) {
355 		union firmware_info *firmware_info =
356 			(union firmware_info *)(mode_info->atom_context->bios +
357 						data_offset);
358 
359 		adev->clock.default_sclk =
360 			le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
361 		adev->clock.default_mclk =
362 			le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
363 
364 		adev->pm.current_sclk = adev->clock.default_sclk;
365 		adev->pm.current_mclk = adev->clock.default_mclk;
366 
367 		/* not technically a clock, but... */
368 		adev->mode_info.firmware_flags =
369 			le32_to_cpu(firmware_info->v31.firmware_capability);
370 
371 		ret = 0;
372 	}
373 
374 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
375 					    smu_info);
376 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
377 				   &frev, &crev, &data_offset)) {
378 		union smu_info *smu_info =
379 			(union smu_info *)(mode_info->atom_context->bios +
380 					   data_offset);
381 
382 		/* system clock */
383 		spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
384 
385 		spll->reference_div = 0;
386 		spll->min_post_div = 1;
387 		spll->max_post_div = 1;
388 		spll->min_ref_div = 2;
389 		spll->max_ref_div = 0xff;
390 		spll->min_feedback_div = 4;
391 		spll->max_feedback_div = 0xff;
392 		spll->best_vco = 0;
393 
394 		ret = 0;
395 	}
396 
397 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
398 					    umc_info);
399 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
400 				   &frev, &crev, &data_offset)) {
401 		union umc_info *umc_info =
402 			(union umc_info *)(mode_info->atom_context->bios +
403 					   data_offset);
404 
405 		/* memory clock */
406 		mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
407 
408 		mpll->reference_div = 0;
409 		mpll->min_post_div = 1;
410 		mpll->max_post_div = 1;
411 		mpll->min_ref_div = 2;
412 		mpll->max_ref_div = 0xff;
413 		mpll->min_feedback_div = 4;
414 		mpll->max_feedback_div = 0xff;
415 		mpll->best_vco = 0;
416 
417 		ret = 0;
418 	}
419 
420 	return ret;
421 }
422 
423 union gfx_info {
424 	struct  atom_gfx_info_v2_4 v24;
425 };
426 
427 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
428 {
429 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
430 	int index;
431 	uint8_t frev, crev;
432 	uint16_t data_offset;
433 
434 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
435 					    gfx_info);
436 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
437 				   &frev, &crev, &data_offset)) {
438 		union gfx_info *gfx_info = (union gfx_info *)
439 			(mode_info->atom_context->bios + data_offset);
440 		switch (crev) {
441 		case 4:
442 			adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
443 			adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
444 			adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
445 			adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
446 			adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
447 			adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
448 			adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
449 			adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
450 			adev->gfx.config.gs_prim_buffer_depth =
451 				le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
452 			adev->gfx.config.double_offchip_lds_buf =
453 				gfx_info->v24.gc_double_offchip_lds_buffer;
454 			adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
455 			adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
456 			adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
457 			adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
458 			return 0;
459 		default:
460 			return -EINVAL;
461 		}
462 
463 	}
464 	return -EINVAL;
465 }
466 
467 /*
468  * Check if VBIOS supports GDDR6 training data save/restore
469  */
470 static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
471 {
472 	uint16_t data_offset;
473 	int index;
474 
475 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
476 					    firmwareinfo);
477 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
478 					  NULL, NULL, &data_offset)) {
479 		struct atom_firmware_info_v3_1 *firmware_info =
480 			(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
481 							   data_offset);
482 
483 		DRM_DEBUG("atom firmware capability:0x%08x.\n",
484 			  le32_to_cpu(firmware_info->firmware_capability));
485 
486 		if (le32_to_cpu(firmware_info->firmware_capability) &
487 		    ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
488 			return true;
489 	}
490 
491 	return false;
492 }
493 
494 static int gddr6_mem_train_support(struct amdgpu_device *adev)
495 {
496 	int ret;
497 	uint32_t major, minor, revision, hw_v;
498 
499 	if (gddr6_mem_train_vbios_support(adev)) {
500 		amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
501 		hw_v = HW_REV(major, minor, revision);
502 		/*
503 		 * treat 0 revision as a special case since register for MP0 and MMHUB is missing
504 		 * for some Navi10 A0, preventing driver from discovering the hwip information since
505 		 * none of the functions will be initialized, it should not cause any problems
506 		 */
507 		switch (hw_v) {
508 		case HW_REV(11, 0, 0):
509 		case HW_REV(11, 0, 5):
510 			ret = 1;
511 			break;
512 		default:
513 			DRM_ERROR("memory training vbios supports but psp hw(%08x)"
514 				  " doesn't support!\n", hw_v);
515 			ret = -1;
516 			break;
517 		}
518 	} else {
519 		ret = 0;
520 		hw_v = -1;
521 	}
522 
523 
524 	DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
525 	return ret;
526 }
527 
528 int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev)
529 {
530 	struct atom_context *ctx = adev->mode_info.atom_context;
531 	int index;
532 	uint8_t frev, crev;
533 	uint16_t data_offset, size;
534 	int ret;
535 
536 	adev->fw_vram_usage.mem_train_support = false;
537 
538 	if (adev->asic_type != CHIP_NAVI10 &&
539 	    adev->asic_type != CHIP_NAVI14)
540 		return 0;
541 
542 	if (amdgpu_sriov_vf(adev))
543 		return 0;
544 
545 	ret = gddr6_mem_train_support(adev);
546 	if (ret == -1)
547 		return -EINVAL;
548 	else if (ret == 0)
549 		return 0;
550 
551 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
552 					    vram_usagebyfirmware);
553 	ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev,
554 					    &data_offset);
555 	if (ret == 0) {
556 		DRM_ERROR("parse data header failed.\n");
557 		return -EINVAL;
558 	}
559 
560 	DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x,"
561 		  " crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset);
562 	/* only support 2.1+ */
563 	if (((uint16_t)frev << 8 | crev) < 0x0201) {
564 		DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev);
565 		return -EINVAL;
566 	}
567 
568 	adev->fw_vram_usage.mem_train_support = true;
569 	return 0;
570 }
571