1 /* $NetBSD: amdgpu_ppatomctrl.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $ */
2
3 /*
4 * Copyright 2015 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_ppatomctrl.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $");
27
28 #include "pp_debug.h"
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include "atom.h"
33 #include "ppatomctrl.h"
34 #include "atombios.h"
35 #include "cgs_common.h"
36 #include "ppevvmath.h"
37
38 #define MEM_ID_MASK 0xff000000
39 #define MEM_ID_SHIFT 24
40 #define CLOCK_RANGE_MASK 0x00ffffff
41 #define CLOCK_RANGE_SHIFT 0
42 #define LOW_NIBBLE_MASK 0xf
43 #define DATA_EQU_PREV 0
44 #define DATA_FROM_TABLE 4
45
46 union voltage_object_info {
47 struct _ATOM_VOLTAGE_OBJECT_INFO v1;
48 struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
49 struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
50 };
51
atomctrl_retrieve_ac_timing(uint8_t index,ATOM_INIT_REG_BLOCK * reg_block,pp_atomctrl_mc_reg_table * table)52 static int atomctrl_retrieve_ac_timing(
53 uint8_t index,
54 ATOM_INIT_REG_BLOCK *reg_block,
55 pp_atomctrl_mc_reg_table *table)
56 {
57 uint32_t i, j;
58 uint8_t tmem_id;
59 ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
60 ((uint8_t *)reg_block + (2 * sizeof(uint16_t)) + le16_to_cpu(reg_block->usRegIndexTblSize));
61
62 uint8_t num_ranges = 0;
63
64 while (*(uint32_t *)reg_data != END_OF_REG_DATA_BLOCK &&
65 num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES) {
66 tmem_id = (uint8_t)((*(uint32_t *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT);
67
68 if (index == tmem_id) {
69 table->mc_reg_table_entry[num_ranges].mclk_max =
70 (uint32_t)((*(uint32_t *)reg_data & CLOCK_RANGE_MASK) >>
71 CLOCK_RANGE_SHIFT);
72
73 for (i = 0, j = 1; i < table->last; i++) {
74 if ((table->mc_reg_address[i].uc_pre_reg_data &
75 LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
76 table->mc_reg_table_entry[num_ranges].mc_data[i] =
77 (uint32_t)*((uint32_t *)reg_data + j);
78 j++;
79 } else if ((table->mc_reg_address[i].uc_pre_reg_data &
80 LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
81 table->mc_reg_table_entry[num_ranges].mc_data[i] =
82 table->mc_reg_table_entry[num_ranges].mc_data[i-1];
83 }
84 }
85 num_ranges++;
86 }
87
88 reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
89 ((uint8_t *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)) ;
90 }
91
92 PP_ASSERT_WITH_CODE((*(uint32_t *)reg_data == END_OF_REG_DATA_BLOCK),
93 "Invalid VramInfo table.", return -1);
94 table->num_entries = num_ranges;
95
96 return 0;
97 }
98
99 /**
100 * Get memory clock AC timing registers index from VBIOS table
101 * VBIOS set end of memory clock AC timing registers by ucPreRegDataLength bit6 = 1
102 * @param reg_block the address ATOM_INIT_REG_BLOCK
103 * @param table the address of MCRegTable
104 * @return 0
105 */
atomctrl_set_mc_reg_address_table(ATOM_INIT_REG_BLOCK * reg_block,pp_atomctrl_mc_reg_table * table)106 static int atomctrl_set_mc_reg_address_table(
107 ATOM_INIT_REG_BLOCK *reg_block,
108 pp_atomctrl_mc_reg_table *table)
109 {
110 uint8_t i = 0;
111 uint8_t num_entries = (uint8_t)((le16_to_cpu(reg_block->usRegIndexTblSize))
112 / sizeof(ATOM_INIT_REG_INDEX_FORMAT));
113 ATOM_INIT_REG_INDEX_FORMAT *format = ®_block->asRegIndexBuf[0];
114
115 num_entries--; /* subtract 1 data end mark entry */
116
117 PP_ASSERT_WITH_CODE((num_entries <= VBIOS_MC_REGISTER_ARRAY_SIZE),
118 "Invalid VramInfo table.", return -1);
119
120 /* ucPreRegDataLength bit6 = 1 is the end of memory clock AC timing registers */
121 while ((!(format->ucPreRegDataLength & ACCESS_PLACEHOLDER)) &&
122 (i < num_entries)) {
123 table->mc_reg_address[i].s1 =
124 (uint16_t)(le16_to_cpu(format->usRegIndex));
125 table->mc_reg_address[i].uc_pre_reg_data =
126 format->ucPreRegDataLength;
127
128 i++;
129 format = (ATOM_INIT_REG_INDEX_FORMAT *)
130 ((uint8_t *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
131 }
132
133 table->last = i;
134 return 0;
135 }
136
atomctrl_initialize_mc_reg_table(struct pp_hwmgr * hwmgr,uint8_t module_index,pp_atomctrl_mc_reg_table * table)137 int atomctrl_initialize_mc_reg_table(
138 struct pp_hwmgr *hwmgr,
139 uint8_t module_index,
140 pp_atomctrl_mc_reg_table *table)
141 {
142 ATOM_VRAM_INFO_HEADER_V2_1 *vram_info;
143 ATOM_INIT_REG_BLOCK *reg_block;
144 int result = 0;
145 u8 frev, crev;
146 u16 size;
147
148 vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
149 smu_atom_get_data_table(hwmgr->adev,
150 GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
151
152 if (module_index >= vram_info->ucNumOfVRAMModule) {
153 pr_err("Invalid VramInfo table.");
154 result = -1;
155 } else if (vram_info->sHeader.ucTableFormatRevision < 2) {
156 pr_err("Invalid VramInfo table.");
157 result = -1;
158 }
159
160 if (0 == result) {
161 reg_block = (ATOM_INIT_REG_BLOCK *)
162 ((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset));
163 result = atomctrl_set_mc_reg_address_table(reg_block, table);
164 }
165
166 if (0 == result) {
167 result = atomctrl_retrieve_ac_timing(module_index,
168 reg_block, table);
169 }
170
171 return result;
172 }
173
174 /**
175 * Set DRAM timings based on engine clock and memory clock.
176 */
atomctrl_set_engine_dram_timings_rv770(struct pp_hwmgr * hwmgr,uint32_t engine_clock,uint32_t memory_clock)177 int atomctrl_set_engine_dram_timings_rv770(
178 struct pp_hwmgr *hwmgr,
179 uint32_t engine_clock,
180 uint32_t memory_clock)
181 {
182 struct amdgpu_device *adev = hwmgr->adev;
183
184 SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters;
185
186 /* They are both in 10KHz Units. */
187 engine_clock_parameters.ulTargetEngineClock =
188 cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) |
189 ((COMPUTE_ENGINE_PLL_PARAM << 24)));
190
191 /* in 10 khz units.*/
192 engine_clock_parameters.sReserved.ulClock =
193 cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
194
195 return amdgpu_atom_execute_table(adev->mode_info.atom_context,
196 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
197 (uint32_t *)&engine_clock_parameters);
198 }
199
200 /**
201 * Private Function to get the PowerPlay Table Address.
202 * WARNING: The tabled returned by this function is in
203 * dynamically allocated memory.
204 * The caller has to release if by calling kfree.
205 */
get_voltage_info_table(void * device)206 static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device)
207 {
208 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
209 u8 frev, crev;
210 u16 size;
211 union voltage_object_info *voltage_info;
212
213 voltage_info = (union voltage_object_info *)
214 smu_atom_get_data_table(device, index,
215 &size, &frev, &crev);
216
217 if (voltage_info != NULL)
218 return (ATOM_VOLTAGE_OBJECT_INFO *) &(voltage_info->v3);
219 else
220 return NULL;
221 }
222
atomctrl_lookup_voltage_type_v3(const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table,uint8_t voltage_type,uint8_t voltage_mode)223 static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3(
224 const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table,
225 uint8_t voltage_type, uint8_t voltage_mode)
226 {
227 unsigned int size = le16_to_cpu(voltage_object_info_table->sHeader.usStructureSize);
228 unsigned int offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
229 const uint8_t *start = (const uint8_t *)voltage_object_info_table;
230
231 while (offset < size) {
232 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object =
233 (const ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
234
235 if (voltage_type == voltage_object->asGpioVoltageObj.sHeader.ucVoltageType &&
236 voltage_mode == voltage_object->asGpioVoltageObj.sHeader.ucVoltageMode)
237 return voltage_object;
238
239 offset += le16_to_cpu(voltage_object->asGpioVoltageObj.sHeader.usSize);
240 }
241
242 return NULL;
243 }
244
245 /** atomctrl_get_memory_pll_dividers_si().
246 *
247 * @param hwmgr input parameter: pointer to HwMgr
248 * @param clock_value input parameter: memory clock
249 * @param dividers output parameter: memory PLL dividers
250 * @param strobe_mode input parameter: 1 for strobe mode, 0 for performance mode
251 */
atomctrl_get_memory_pll_dividers_si(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param * mpll_param,bool strobe_mode)252 int atomctrl_get_memory_pll_dividers_si(
253 struct pp_hwmgr *hwmgr,
254 uint32_t clock_value,
255 pp_atomctrl_memory_clock_param *mpll_param,
256 bool strobe_mode)
257 {
258 struct amdgpu_device *adev = hwmgr->adev;
259 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
260 int result;
261
262 mpll_parameters.ulClock = cpu_to_le32(clock_value);
263 mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
264
265 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
266 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
267 (uint32_t *)&mpll_parameters);
268
269 if (0 == result) {
270 mpll_param->mpll_fb_divider.clk_frac =
271 le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac);
272 mpll_param->mpll_fb_divider.cl_kf =
273 le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv);
274 mpll_param->mpll_post_divider =
275 (uint32_t)mpll_parameters.ucPostDiv;
276 mpll_param->vco_mode =
277 (uint32_t)(mpll_parameters.ucPllCntlFlag &
278 MPLL_CNTL_FLAG_VCO_MODE_MASK);
279 mpll_param->yclk_sel =
280 (uint32_t)((mpll_parameters.ucPllCntlFlag &
281 MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0);
282 mpll_param->qdr =
283 (uint32_t)((mpll_parameters.ucPllCntlFlag &
284 MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0);
285 mpll_param->half_rate =
286 (uint32_t)((mpll_parameters.ucPllCntlFlag &
287 MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0);
288 mpll_param->dll_speed =
289 (uint32_t)(mpll_parameters.ucDllSpeed);
290 mpll_param->bw_ctrl =
291 (uint32_t)(mpll_parameters.ucBWCntl);
292 }
293
294 return result;
295 }
296
297 /** atomctrl_get_memory_pll_dividers_vi().
298 *
299 * @param hwmgr input parameter: pointer to HwMgr
300 * @param clock_value input parameter: memory clock
301 * @param dividers output parameter: memory PLL dividers
302 */
atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param * mpll_param)303 int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
304 uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
305 {
306 struct amdgpu_device *adev = hwmgr->adev;
307 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
308 int result;
309
310 mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
311
312 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
313 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
314 (uint32_t *)&mpll_parameters);
315
316 if (!result)
317 mpll_param->mpll_post_divider =
318 (uint32_t)mpll_parameters.ulClock.ucPostDiv;
319
320 return result;
321 }
322
atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param_ai * mpll_param)323 int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
324 uint32_t clock_value,
325 pp_atomctrl_memory_clock_param_ai *mpll_param)
326 {
327 struct amdgpu_device *adev = hwmgr->adev;
328 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {{0}, 0, 0};
329 int result;
330
331 mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
332
333 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
334 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
335 (uint32_t *)&mpll_parameters);
336
337 /* VEGAM's mpll takes sometime to finish computing */
338 udelay(10);
339
340 if (!result) {
341 mpll_param->ulMclk_fcw_int =
342 le16_to_cpu(mpll_parameters.usMclk_fcw_int);
343 mpll_param->ulMclk_fcw_frac =
344 le16_to_cpu(mpll_parameters.usMclk_fcw_frac);
345 mpll_param->ulClock =
346 le32_to_cpu(mpll_parameters.ulClock.ulClock);
347 mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv;
348 }
349
350 return result;
351 }
352
atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_kong * dividers)353 int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
354 uint32_t clock_value,
355 pp_atomctrl_clock_dividers_kong *dividers)
356 {
357 struct amdgpu_device *adev = hwmgr->adev;
358 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
359 int result;
360
361 pll_parameters.ulClock = cpu_to_le32(clock_value);
362
363 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
364 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
365 (uint32_t *)&pll_parameters);
366
367 if (0 == result) {
368 dividers->pll_post_divider = pll_parameters.ucPostDiv;
369 dividers->real_clock = le32_to_cpu(pll_parameters.ulClock);
370 }
371
372 return result;
373 }
374
atomctrl_get_engine_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_vi * dividers)375 int atomctrl_get_engine_pll_dividers_vi(
376 struct pp_hwmgr *hwmgr,
377 uint32_t clock_value,
378 pp_atomctrl_clock_dividers_vi *dividers)
379 {
380 struct amdgpu_device *adev = hwmgr->adev;
381 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
382 int result;
383
384 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
385 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
386
387 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
388 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
389 (uint32_t *)&pll_patameters);
390
391 if (0 == result) {
392 dividers->pll_post_divider =
393 pll_patameters.ulClock.ucPostDiv;
394 dividers->real_clock =
395 le32_to_cpu(pll_patameters.ulClock.ulClock);
396
397 dividers->ul_fb_div.ul_fb_div_frac =
398 le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
399 dividers->ul_fb_div.ul_fb_div =
400 le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
401
402 dividers->uc_pll_ref_div =
403 pll_patameters.ucPllRefDiv;
404 dividers->uc_pll_post_div =
405 pll_patameters.ucPllPostDiv;
406 dividers->uc_pll_cntl_flag =
407 pll_patameters.ucPllCntlFlag;
408 }
409
410 return result;
411 }
412
atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_ai * dividers)413 int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
414 uint32_t clock_value,
415 pp_atomctrl_clock_dividers_ai *dividers)
416 {
417 struct amdgpu_device *adev = hwmgr->adev;
418 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
419 int result;
420
421 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
422 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
423
424 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
425 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
426 (uint32_t *)&pll_patameters);
427
428 if (0 == result) {
429 dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
430 dividers->usSclk_fcw_int = le16_to_cpu(pll_patameters.usSclk_fcw_int);
431 dividers->ucSclkPostDiv = pll_patameters.ucSclkPostDiv;
432 dividers->ucSclkVcoMode = pll_patameters.ucSclkVcoMode;
433 dividers->ucSclkPllRange = pll_patameters.ucSclkPllRange;
434 dividers->ucSscEnable = pll_patameters.ucSscEnable;
435 dividers->usSsc_fcw1_frac = le16_to_cpu(pll_patameters.usSsc_fcw1_frac);
436 dividers->usSsc_fcw1_int = le16_to_cpu(pll_patameters.usSsc_fcw1_int);
437 dividers->usPcc_fcw_int = le16_to_cpu(pll_patameters.usPcc_fcw_int);
438 dividers->usSsc_fcw_slew_frac = le16_to_cpu(pll_patameters.usSsc_fcw_slew_frac);
439 dividers->usPcc_fcw_slew_frac = le16_to_cpu(pll_patameters.usPcc_fcw_slew_frac);
440 }
441 return result;
442 }
443
atomctrl_get_dfs_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_vi * dividers)444 int atomctrl_get_dfs_pll_dividers_vi(
445 struct pp_hwmgr *hwmgr,
446 uint32_t clock_value,
447 pp_atomctrl_clock_dividers_vi *dividers)
448 {
449 struct amdgpu_device *adev = hwmgr->adev;
450 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
451 int result;
452
453 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
454 pll_patameters.ulClock.ucPostDiv =
455 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
456
457 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
458 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
459 (uint32_t *)&pll_patameters);
460
461 if (0 == result) {
462 dividers->pll_post_divider =
463 pll_patameters.ulClock.ucPostDiv;
464 dividers->real_clock =
465 le32_to_cpu(pll_patameters.ulClock.ulClock);
466
467 dividers->ul_fb_div.ul_fb_div_frac =
468 le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
469 dividers->ul_fb_div.ul_fb_div =
470 le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
471
472 dividers->uc_pll_ref_div =
473 pll_patameters.ucPllRefDiv;
474 dividers->uc_pll_post_div =
475 pll_patameters.ucPllPostDiv;
476 dividers->uc_pll_cntl_flag =
477 pll_patameters.ucPllCntlFlag;
478 }
479
480 return result;
481 }
482
483 /**
484 * Get the reference clock in 10KHz
485 */
atomctrl_get_reference_clock(struct pp_hwmgr * hwmgr)486 uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr)
487 {
488 ATOM_FIRMWARE_INFO *fw_info;
489 u8 frev, crev;
490 u16 size;
491 uint32_t clock;
492
493 fw_info = (ATOM_FIRMWARE_INFO *)
494 smu_atom_get_data_table(hwmgr->adev,
495 GetIndexIntoMasterTable(DATA, FirmwareInfo),
496 &size, &frev, &crev);
497
498 if (fw_info == NULL)
499 clock = 2700;
500 else
501 clock = (uint32_t)(le16_to_cpu(fw_info->usReferenceClock));
502
503 return clock;
504 }
505
506 /**
507 * Returns true if the given voltage type is controlled by GPIO pins.
508 * voltage_type is one of SET_VOLTAGE_TYPE_ASIC_VDDC,
509 * SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ.
510 * voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE
511 */
atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t voltage_mode)512 bool atomctrl_is_voltage_controlled_by_gpio_v3(
513 struct pp_hwmgr *hwmgr,
514 uint8_t voltage_type,
515 uint8_t voltage_mode)
516 {
517 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
518 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
519 bool ret;
520
521 PP_ASSERT_WITH_CODE((NULL != voltage_info),
522 "Could not find Voltage Table in BIOS.", return false;);
523
524 ret = (NULL != atomctrl_lookup_voltage_type_v3
525 (voltage_info, voltage_type, voltage_mode)) ? true : false;
526
527 return ret;
528 }
529
atomctrl_get_voltage_table_v3(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t voltage_mode,pp_atomctrl_voltage_table * voltage_table)530 int atomctrl_get_voltage_table_v3(
531 struct pp_hwmgr *hwmgr,
532 uint8_t voltage_type,
533 uint8_t voltage_mode,
534 pp_atomctrl_voltage_table *voltage_table)
535 {
536 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
537 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
538 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
539 unsigned int i;
540
541 PP_ASSERT_WITH_CODE((NULL != voltage_info),
542 "Could not find Voltage Table in BIOS.", return -1;);
543
544 voltage_object = atomctrl_lookup_voltage_type_v3
545 (voltage_info, voltage_type, voltage_mode);
546
547 if (voltage_object == NULL)
548 return -1;
549
550 PP_ASSERT_WITH_CODE(
551 (voltage_object->asGpioVoltageObj.ucGpioEntryNum <=
552 PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES),
553 "Too many voltage entries!",
554 return -1;
555 );
556
557 for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) {
558 voltage_table->entries[i].value =
559 le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue);
560 voltage_table->entries[i].smio_low =
561 le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId);
562 }
563
564 voltage_table->mask_low =
565 le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal);
566 voltage_table->count =
567 voltage_object->asGpioVoltageObj.ucGpioEntryNum;
568 voltage_table->phase_delay =
569 voltage_object->asGpioVoltageObj.ucPhaseDelay;
570
571 return 0;
572 }
573
atomctrl_lookup_gpio_pin(ATOM_GPIO_PIN_LUT * gpio_lookup_table,const uint32_t pinId,pp_atomctrl_gpio_pin_assignment * gpio_pin_assignment)574 static bool atomctrl_lookup_gpio_pin(
575 ATOM_GPIO_PIN_LUT * gpio_lookup_table,
576 const uint32_t pinId,
577 pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
578 {
579 unsigned int size = le16_to_cpu(gpio_lookup_table->sHeader.usStructureSize);
580 unsigned int offset = offsetof(ATOM_GPIO_PIN_LUT, asGPIO_Pin[0]);
581 uint8_t *start = (uint8_t *)gpio_lookup_table;
582
583 while (offset < size) {
584 const ATOM_GPIO_PIN_ASSIGNMENT *pin_assignment =
585 (const ATOM_GPIO_PIN_ASSIGNMENT *)(start + offset);
586
587 if (pinId == pin_assignment->ucGPIO_ID) {
588 gpio_pin_assignment->uc_gpio_pin_bit_shift =
589 pin_assignment->ucGpioPinBitShift;
590 gpio_pin_assignment->us_gpio_pin_aindex =
591 le16_to_cpu(pin_assignment->usGpioPin_AIndex);
592 return true;
593 }
594
595 offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1;
596 }
597
598 return false;
599 }
600
601 /**
602 * Private Function to get the PowerPlay Table Address.
603 * WARNING: The tabled returned by this function is in
604 * dynamically allocated memory.
605 * The caller has to release if by calling kfree.
606 */
get_gpio_lookup_table(void * device)607 static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device)
608 {
609 u8 frev, crev;
610 u16 size;
611 void *table_address;
612
613 table_address = (ATOM_GPIO_PIN_LUT *)
614 smu_atom_get_data_table(device,
615 GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT),
616 &size, &frev, &crev);
617
618 PP_ASSERT_WITH_CODE((NULL != table_address),
619 "Error retrieving BIOS Table Address!", return NULL;);
620
621 return (ATOM_GPIO_PIN_LUT *)table_address;
622 }
623
624 /**
625 * Returns 1 if the given pin id find in lookup table.
626 */
atomctrl_get_pp_assign_pin(struct pp_hwmgr * hwmgr,const uint32_t pinId,pp_atomctrl_gpio_pin_assignment * gpio_pin_assignment)627 bool atomctrl_get_pp_assign_pin(
628 struct pp_hwmgr *hwmgr,
629 const uint32_t pinId,
630 pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
631 {
632 bool bRet = false;
633 ATOM_GPIO_PIN_LUT *gpio_lookup_table =
634 get_gpio_lookup_table(hwmgr->adev);
635
636 PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
637 "Could not find GPIO lookup Table in BIOS.", return false);
638
639 bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId,
640 gpio_pin_assignment);
641
642 return bRet;
643 }
644
atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t virtual_voltage_Id,uint16_t * voltage,uint16_t dpm_level,bool debug)645 int atomctrl_calculate_voltage_evv_on_sclk(
646 struct pp_hwmgr *hwmgr,
647 uint8_t voltage_type,
648 uint32_t sclk,
649 uint16_t virtual_voltage_Id,
650 uint16_t *voltage,
651 uint16_t dpm_level,
652 bool debug)
653 {
654 ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo;
655 struct amdgpu_device *adev = hwmgr->adev;
656 EFUSE_LINEAR_FUNC_PARAM sRO_fuse;
657 EFUSE_LINEAR_FUNC_PARAM sCACm_fuse;
658 EFUSE_LINEAR_FUNC_PARAM sCACb_fuse;
659 EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse;
660 EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse;
661 EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse;
662 EFUSE_INPUT_PARAMETER sInput_FuseValues;
663 READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues;
664
665 uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused;
666 fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7;
667 fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma;
668 fInt fLkg_FT, repeat;
669 fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX;
670 fInt fRLL_LoadLine, fPowerDPMx __unused, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin;
671 fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM;
672 fInt fSclk_margin, fSclk, fEVV_V;
673 fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL;
674 uint32_t ul_FT_Lkg_V0NORM;
675 fInt fLn_MaxDivMin, fMin, fAverage, fRange;
676 fInt fRoots[2];
677 fInt fStepSize = GetScaledFraction(625, 100000);
678
679 int result;
680
681 getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *)
682 smu_atom_get_data_table(hwmgr->adev,
683 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
684 NULL, NULL, NULL);
685
686 if (!getASICProfilingInfo)
687 return -1;
688
689 if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
690 (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
691 getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
692 return -1;
693
694 /*-----------------------------------------------------------
695 *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL
696 *-----------------------------------------------------------
697 */
698 fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000);
699
700 switch (dpm_level) {
701 case 1:
702 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm1));
703 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000);
704 break;
705 case 2:
706 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm2));
707 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000);
708 break;
709 case 3:
710 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm3));
711 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000);
712 break;
713 case 4:
714 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm4));
715 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000);
716 break;
717 case 5:
718 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm5));
719 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000);
720 break;
721 case 6:
722 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm6));
723 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000);
724 break;
725 case 7:
726 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm7));
727 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000);
728 break;
729 default:
730 pr_err("DPM Level not supported\n");
731 fPowerDPMx = Convert_ULONG_ToFraction(1);
732 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000);
733 }
734
735 /*-------------------------
736 * DECODING FUSE VALUES
737 * ------------------------
738 */
739 /*Decode RO_Fused*/
740 sRO_fuse = getASICProfilingInfo->sRoFuse;
741
742 sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex;
743 sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB;
744 sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength;
745
746 sOutput_FuseValues.sEfuse = sInput_FuseValues;
747
748 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
749 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
750 (uint32_t *)&sOutput_FuseValues);
751
752 if (result)
753 return result;
754
755 /* Finally, the actual fuse value */
756 ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
757 fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1);
758 fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1);
759 fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength);
760
761 sCACm_fuse = getASICProfilingInfo->sCACm;
762
763 sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex;
764 sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB;
765 sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength;
766
767 sOutput_FuseValues.sEfuse = sInput_FuseValues;
768
769 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
770 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
771 (uint32_t *)&sOutput_FuseValues);
772
773 if (result)
774 return result;
775
776 ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
777 fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000);
778 fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000);
779
780 fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength);
781
782 sCACb_fuse = getASICProfilingInfo->sCACb;
783
784 sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex;
785 sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB;
786 sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength;
787 sOutput_FuseValues.sEfuse = sInput_FuseValues;
788
789 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
790 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
791 (uint32_t *)&sOutput_FuseValues);
792
793 if (result)
794 return result;
795
796 ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
797 fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000);
798 fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000);
799
800 fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength);
801
802 sKt_Beta_fuse = getASICProfilingInfo->sKt_b;
803
804 sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex;
805 sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB;
806 sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength;
807
808 sOutput_FuseValues.sEfuse = sInput_FuseValues;
809
810 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
811 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
812 (uint32_t *)&sOutput_FuseValues);
813
814 if (result)
815 return result;
816
817 ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
818 fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000);
819 fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000);
820
821 fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused,
822 fAverage, fRange, sKt_Beta_fuse.ucEfuseLength);
823
824 sKv_m_fuse = getASICProfilingInfo->sKv_m;
825
826 sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex;
827 sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB;
828 sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength;
829
830 sOutput_FuseValues.sEfuse = sInput_FuseValues;
831
832 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
833 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
834 (uint32_t *)&sOutput_FuseValues);
835 if (result)
836 return result;
837
838 ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
839 fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000);
840 fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000);
841 fRange = fMultiply(fRange, ConvertToFraction(-1));
842
843 fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused,
844 fAverage, fRange, sKv_m_fuse.ucEfuseLength);
845
846 sKv_b_fuse = getASICProfilingInfo->sKv_b;
847
848 sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex;
849 sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB;
850 sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength;
851 sOutput_FuseValues.sEfuse = sInput_FuseValues;
852
853 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
854 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
855 (uint32_t *)&sOutput_FuseValues);
856
857 if (result)
858 return result;
859
860 ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
861 fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000);
862 fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000);
863
864 fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused,
865 fAverage, fRange, sKv_b_fuse.ucEfuseLength);
866
867 /* Decoding the Leakage - No special struct container */
868 /*
869 * usLkgEuseIndex=56
870 * ucLkgEfuseBitLSB=6
871 * ucLkgEfuseLength=10
872 * ulLkgEncodeLn_MaxDivMin=69077
873 * ulLkgEncodeMax=1000000
874 * ulLkgEncodeMin=1000
875 * ulEfuseLogisticAlpha=13
876 */
877
878 sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex;
879 sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB;
880 sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength;
881
882 sOutput_FuseValues.sEfuse = sInput_FuseValues;
883
884 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
885 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
886 (uint32_t *)&sOutput_FuseValues);
887
888 if (result)
889 return result;
890
891 ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
892 fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000);
893 fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000);
894
895 fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM,
896 fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength);
897 fLkg_FT = fFT_Lkg_V0NORM;
898
899 /*-------------------------------------------
900 * PART 2 - Grabbing all required values
901 *-------------------------------------------
902 */
903 fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000),
904 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign)));
905 fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000),
906 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign)));
907 fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000),
908 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign)));
909 fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000),
910 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign)));
911 fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000),
912 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign)));
913 fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000),
914 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign)));
915 fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000),
916 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign)));
917 fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000),
918 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign)));
919
920 fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a));
921 fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b));
922 fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c));
923
924 fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed));
925
926 fMargin_FMAX_mean = GetScaledFraction(
927 le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000);
928 fMargin_Plat_mean = GetScaledFraction(
929 le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000);
930 fMargin_FMAX_sigma = GetScaledFraction(
931 le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000);
932 fMargin_Plat_sigma = GetScaledFraction(
933 le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000);
934
935 fMargin_DC_sigma = GetScaledFraction(
936 le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100);
937 fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000));
938
939 fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100));
940 fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100));
941 fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100));
942 fKv_m_fused = fNegate(fDivide(fKv_m_fused, ConvertToFraction(100)));
943 fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10));
944
945 fSclk = GetScaledFraction(sclk, 100);
946
947 fV_max = fDivide(GetScaledFraction(
948 le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4));
949 fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10);
950 fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100);
951 fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10);
952 fV_FT = fDivide(GetScaledFraction(
953 le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4));
954 fV_min = fDivide(GetScaledFraction(
955 le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4));
956
957 /*-----------------------
958 * PART 3
959 *-----------------------
960 */
961
962 fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
963 fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
964 fC_Term = fAdd(fMargin_RO_c,
965 fAdd(fMultiply(fSM_A0, fLkg_FT),
966 fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
967 fAdd(fMultiply(fSM_A3, fSclk),
968 fSubtract(fSM_A7, fRO_fused)))));
969
970 fVDDC_base = fSubtract(fRO_fused,
971 fSubtract(fMargin_RO_c,
972 fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk))));
973 fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2));
974
975 repeat = fSubtract(fVDDC_base,
976 fDivide(fMargin_DC_sigma, ConvertToFraction(1000)));
977
978 fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a,
979 fGetSquare(repeat)),
980 fAdd(fMultiply(fMargin_RO_b, repeat),
981 fMargin_RO_c));
982
983 fDC_SCLK = fSubtract(fRO_fused,
984 fSubtract(fRO_DC_margin,
985 fSubtract(fSM_A3,
986 fMultiply(fSM_A2, repeat))));
987 fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1));
988
989 fSigma_DC = fSubtract(fSclk, fDC_SCLK);
990
991 fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean);
992 fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean);
993 fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma);
994 fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma);
995
996 fSquared_Sigma_DC = fGetSquare(fSigma_DC);
997 fSquared_Sigma_CR = fGetSquare(fSigma_CR);
998 fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX);
999
1000 fSclk_margin = fAdd(fMicro_FMAX,
1001 fAdd(fMicro_CR,
1002 fAdd(fMargin_fixed,
1003 fSqrt(fAdd(fSquared_Sigma_FMAX,
1004 fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR))))));
1005 /*
1006 fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5;
1007 fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6;
1008 fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused;
1009 */
1010
1011 fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5);
1012 fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6);
1013 fC_Term = fAdd(fRO_DC_margin,
1014 fAdd(fMultiply(fSM_A0, fLkg_FT),
1015 fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT),
1016 fAdd(fSclk, fSclk_margin)),
1017 fAdd(fMultiply(fSM_A3,
1018 fAdd(fSclk, fSclk_margin)),
1019 fSubtract(fSM_A7, fRO_fused)))));
1020
1021 SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots);
1022
1023 if (GreaterThan(fRoots[0], fRoots[1]))
1024 fEVV_V = fRoots[1];
1025 else
1026 fEVV_V = fRoots[0];
1027
1028 if (GreaterThan(fV_min, fEVV_V))
1029 fEVV_V = fV_min;
1030 else if (GreaterThan(fEVV_V, fV_max))
1031 fEVV_V = fSubtract(fV_max, fStepSize);
1032
1033 fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0);
1034
1035 /*-----------------
1036 * PART 4
1037 *-----------------
1038 */
1039
1040 fV_x = fV_min;
1041
1042 while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) {
1043 fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd(
1044 fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk),
1045 fGetSquare(fV_x)), fDerateTDP);
1046
1047 fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor,
1048 fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused,
1049 fT_prod), fKv_b_fused), fV_x)), fV_x)));
1050 fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply(
1051 fKt_Beta_fused, fT_prod)));
1052 fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
1053 fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT)));
1054 fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
1055 fKt_Beta_fused, fT_FT)));
1056
1057 fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right);
1058
1059 fTDP_Current = fDivide(fTDP_Power, fV_x);
1060
1061 fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine),
1062 ConvertToFraction(10)));
1063
1064 fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0);
1065
1066 if (GreaterThan(fV_max, fV_NL) &&
1067 (GreaterThan(fV_NL, fEVV_V) ||
1068 Equal(fV_NL, fEVV_V))) {
1069 fV_NL = fMultiply(fV_NL, ConvertToFraction(1000));
1070
1071 *voltage = (uint16_t)fV_NL.partial.real;
1072 break;
1073 } else
1074 fV_x = fAdd(fV_x, fStepSize);
1075 }
1076
1077 return result;
1078 }
1079
1080 /** atomctrl_get_voltage_evv_on_sclk gets voltage via call to ATOM COMMAND table.
1081 * @param hwmgr input: pointer to hwManager
1082 * @param voltage_type input: type of EVV voltage VDDC or VDDGFX
1083 * @param sclk input: in 10Khz unit. DPM state SCLK frequency
1084 * which is define in PPTable SCLK/VDDC dependence
1085 * table associated with this virtual_voltage_Id
1086 * @param virtual_voltage_Id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
1087 * @param voltage output: real voltage level in unit of mv
1088 */
atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t virtual_voltage_Id,uint16_t * voltage)1089 int atomctrl_get_voltage_evv_on_sclk(
1090 struct pp_hwmgr *hwmgr,
1091 uint8_t voltage_type,
1092 uint32_t sclk, uint16_t virtual_voltage_Id,
1093 uint16_t *voltage)
1094 {
1095 struct amdgpu_device *adev = hwmgr->adev;
1096 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1097 int result;
1098
1099 get_voltage_info_param_space.ucVoltageType =
1100 voltage_type;
1101 get_voltage_info_param_space.ucVoltageMode =
1102 ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1103 get_voltage_info_param_space.usVoltageLevel =
1104 cpu_to_le16(virtual_voltage_Id);
1105 get_voltage_info_param_space.ulSCLKFreq =
1106 cpu_to_le32(sclk);
1107
1108 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1109 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1110 (uint32_t *)&get_voltage_info_param_space);
1111
1112 *voltage = result ? 0 :
1113 le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
1114 (&get_voltage_info_param_space))->usVoltageLevel);
1115
1116 return result;
1117 }
1118
1119 /**
1120 * atomctrl_get_voltage_evv gets voltage via call to ATOM COMMAND table.
1121 * @param hwmgr input: pointer to hwManager
1122 * @param virtual_voltage_id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
1123 * @param voltage output: real voltage level in unit of mv
1124 */
atomctrl_get_voltage_evv(struct pp_hwmgr * hwmgr,uint16_t virtual_voltage_id,uint16_t * voltage)1125 int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
1126 uint16_t virtual_voltage_id,
1127 uint16_t *voltage)
1128 {
1129 struct amdgpu_device *adev = hwmgr->adev;
1130 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1131 int result;
1132 int entry_id;
1133
1134 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
1135 for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
1136 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) {
1137 /* found */
1138 break;
1139 }
1140 }
1141
1142 if (entry_id >= hwmgr->dyn_state.vddc_dependency_on_sclk->count) {
1143 pr_debug("Can't find requested voltage id in vddc_dependency_on_sclk table!\n");
1144 return -EINVAL;
1145 }
1146
1147 get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
1148 get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1149 get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id;
1150 get_voltage_info_param_space.ulSCLKFreq =
1151 cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
1152
1153 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1154 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1155 (uint32_t *)&get_voltage_info_param_space);
1156
1157 if (0 != result)
1158 return result;
1159
1160 *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
1161 (&get_voltage_info_param_space))->usVoltageLevel);
1162
1163 return result;
1164 }
1165
1166 /**
1167 * Get the mpll reference clock in 10KHz
1168 */
atomctrl_get_mpll_reference_clock(struct pp_hwmgr * hwmgr)1169 uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr)
1170 {
1171 ATOM_COMMON_TABLE_HEADER *fw_info;
1172 uint32_t clock;
1173 u8 frev, crev;
1174 u16 size;
1175
1176 fw_info = (ATOM_COMMON_TABLE_HEADER *)
1177 smu_atom_get_data_table(hwmgr->adev,
1178 GetIndexIntoMasterTable(DATA, FirmwareInfo),
1179 &size, &frev, &crev);
1180
1181 if (fw_info == NULL)
1182 clock = 2700;
1183 else {
1184 if ((fw_info->ucTableFormatRevision == 2) &&
1185 (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V2_1))) {
1186 ATOM_FIRMWARE_INFO_V2_1 *fwInfo_2_1 =
1187 (ATOM_FIRMWARE_INFO_V2_1 *)fw_info;
1188 clock = (uint32_t)(le16_to_cpu(fwInfo_2_1->usMemoryReferenceClock));
1189 } else {
1190 ATOM_FIRMWARE_INFO *fwInfo_0_0 =
1191 (ATOM_FIRMWARE_INFO *)fw_info;
1192 clock = (uint32_t)(le16_to_cpu(fwInfo_0_0->usReferenceClock));
1193 }
1194 }
1195
1196 return clock;
1197 }
1198
1199 /**
1200 * Get the asic internal spread spectrum table
1201 */
asic_internal_ss_get_ss_table(void * device)1202 static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device)
1203 {
1204 ATOM_ASIC_INTERNAL_SS_INFO *table = NULL;
1205 u8 frev, crev;
1206 u16 size;
1207
1208 table = (ATOM_ASIC_INTERNAL_SS_INFO *)
1209 smu_atom_get_data_table(device,
1210 GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info),
1211 &size, &frev, &crev);
1212
1213 return table;
1214 }
1215
1216 /**
1217 * Get the asic internal spread spectrum assignment
1218 */
asic_internal_ss_get_ss_asignment(struct pp_hwmgr * hwmgr,const uint8_t clockSource,const uint32_t clockSpeed,pp_atomctrl_internal_ss_info * ssEntry)1219 static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
1220 const uint8_t clockSource,
1221 const uint32_t clockSpeed,
1222 pp_atomctrl_internal_ss_info *ssEntry)
1223 {
1224 ATOM_ASIC_INTERNAL_SS_INFO *table;
1225 ATOM_ASIC_SS_ASSIGNMENT *ssInfo;
1226 int entry_found = 0;
1227
1228 memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info));
1229
1230 table = asic_internal_ss_get_ss_table(hwmgr->adev);
1231
1232 if (NULL == table)
1233 return -1;
1234
1235 ssInfo = &table->asSpreadSpectrum[0];
1236
1237 while (((uint8_t *)ssInfo - (uint8_t *)table) <
1238 le16_to_cpu(table->sHeader.usStructureSize)) {
1239 if ((clockSource == ssInfo->ucClockIndication) &&
1240 ((uint32_t)clockSpeed <= le32_to_cpu(ssInfo->ulTargetClockRange))) {
1241 entry_found = 1;
1242 break;
1243 }
1244
1245 ssInfo = (ATOM_ASIC_SS_ASSIGNMENT *)((uint8_t *)ssInfo +
1246 sizeof(ATOM_ASIC_SS_ASSIGNMENT));
1247 }
1248
1249 if (entry_found) {
1250 ssEntry->speed_spectrum_percentage =
1251 le16_to_cpu(ssInfo->usSpreadSpectrumPercentage);
1252 ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz);
1253
1254 if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) &&
1255 (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) ||
1256 (GET_DATA_TABLE_MAJOR_REVISION(table) == 3)) {
1257 ssEntry->speed_spectrum_rate /= 100;
1258 }
1259
1260 switch (ssInfo->ucSpreadSpectrumMode) {
1261 case 0:
1262 ssEntry->speed_spectrum_mode =
1263 pp_atomctrl_spread_spectrum_mode_down;
1264 break;
1265 case 1:
1266 ssEntry->speed_spectrum_mode =
1267 pp_atomctrl_spread_spectrum_mode_center;
1268 break;
1269 default:
1270 ssEntry->speed_spectrum_mode =
1271 pp_atomctrl_spread_spectrum_mode_down;
1272 break;
1273 }
1274 }
1275
1276 return entry_found ? 0 : 1;
1277 }
1278
1279 /**
1280 * Get the memory clock spread spectrum info
1281 */
atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr * hwmgr,const uint32_t memory_clock,pp_atomctrl_internal_ss_info * ssInfo)1282 int atomctrl_get_memory_clock_spread_spectrum(
1283 struct pp_hwmgr *hwmgr,
1284 const uint32_t memory_clock,
1285 pp_atomctrl_internal_ss_info *ssInfo)
1286 {
1287 return asic_internal_ss_get_ss_asignment(hwmgr,
1288 ASIC_INTERNAL_MEMORY_SS, memory_clock, ssInfo);
1289 }
1290 /**
1291 * Get the engine clock spread spectrum info
1292 */
atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr * hwmgr,const uint32_t engine_clock,pp_atomctrl_internal_ss_info * ssInfo)1293 int atomctrl_get_engine_clock_spread_spectrum(
1294 struct pp_hwmgr *hwmgr,
1295 const uint32_t engine_clock,
1296 pp_atomctrl_internal_ss_info *ssInfo)
1297 {
1298 return asic_internal_ss_get_ss_asignment(hwmgr,
1299 ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo);
1300 }
1301
atomctrl_read_efuse(struct pp_hwmgr * hwmgr,uint16_t start_index,uint16_t end_index,uint32_t mask,uint32_t * efuse)1302 int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
1303 uint16_t end_index, uint32_t mask, uint32_t *efuse)
1304 {
1305 struct amdgpu_device *adev = hwmgr->adev;
1306 int result;
1307 READ_EFUSE_VALUE_PARAMETER efuse_param;
1308
1309 efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4);
1310 efuse_param.sEfuse.ucBitShift = (uint8_t)
1311 (start_index - ((start_index / 32) * 32));
1312 efuse_param.sEfuse.ucBitLength = (uint8_t)
1313 ((end_index - start_index) + 1);
1314
1315 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1316 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
1317 (uint32_t *)&efuse_param);
1318 *efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask;
1319
1320 return result;
1321 }
1322
atomctrl_set_ac_timing_ai(struct pp_hwmgr * hwmgr,uint32_t memory_clock,uint8_t level)1323 int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
1324 uint8_t level)
1325 {
1326 struct amdgpu_device *adev = hwmgr->adev;
1327 DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
1328 int result;
1329
1330 memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq =
1331 memory_clock & SET_CLOCK_FREQ_MASK;
1332 memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag =
1333 ADJUST_MC_SETTING_PARAM;
1334 memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
1335
1336 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1337 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
1338 (uint32_t *)&memory_clock_parameters);
1339
1340 return result;
1341 }
1342
atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t virtual_voltage_Id,uint32_t * voltage)1343 int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1344 uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
1345 {
1346 struct amdgpu_device *adev = hwmgr->adev;
1347 int result;
1348 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space;
1349
1350 get_voltage_info_param_space.ucVoltageType = voltage_type;
1351 get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1352 get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
1353 get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
1354
1355 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1356 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1357 (uint32_t *)&get_voltage_info_param_space);
1358
1359 *voltage = result ? 0 :
1360 le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
1361
1362 return result;
1363 }
1364
atomctrl_get_smc_sclk_range_table(struct pp_hwmgr * hwmgr,struct pp_atom_ctrl_sclk_range_table * table)1365 int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table)
1366 {
1367
1368 int i;
1369 u8 frev, crev;
1370 u16 size;
1371
1372 ATOM_SMU_INFO_V2_1 *psmu_info =
1373 (ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
1374 GetIndexIntoMasterTable(DATA, SMU_Info),
1375 &size, &frev, &crev);
1376
1377
1378 for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
1379 table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
1380 table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
1381 table->entry[i].usFcw_pcc =
1382 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc);
1383 table->entry[i].usFcw_trans_upper =
1384 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper);
1385 table->entry[i].usRcw_trans_lower =
1386 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower);
1387 }
1388
1389 return 0;
1390 }
1391
atomctrl_get_avfs_information(struct pp_hwmgr * hwmgr,struct pp_atom_ctrl__avfs_parameters * param)1392 int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
1393 struct pp_atom_ctrl__avfs_parameters *param)
1394 {
1395 ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
1396
1397 if (param == NULL)
1398 return -EINVAL;
1399
1400 profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
1401 smu_atom_get_data_table(hwmgr->adev,
1402 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1403 NULL, NULL, NULL);
1404 if (!profile)
1405 return -1;
1406
1407 param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0);
1408 param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1);
1409 param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2);
1410 param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma);
1411 param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean);
1412 param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma);
1413 param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0);
1414 param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1);
1415 param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2);
1416 param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0);
1417 param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1);
1418 param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2);
1419 param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1420 param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1421 param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1422 param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1);
1423 param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2);
1424 param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b);
1425 param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv);
1426 param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
1427 param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
1428 param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
1429 param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
1430 param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor);
1431 param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
1432
1433 return 0;
1434 }
1435
atomctrl_get_svi2_info(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t * svd_gpio_id,uint8_t * svc_gpio_id,uint16_t * load_line)1436 int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1437 uint8_t *svd_gpio_id, uint8_t *svc_gpio_id,
1438 uint16_t *load_line)
1439 {
1440 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
1441 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
1442
1443 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
1444
1445 PP_ASSERT_WITH_CODE((NULL != voltage_info),
1446 "Could not find Voltage Table in BIOS.", return -EINVAL);
1447
1448 voltage_object = atomctrl_lookup_voltage_type_v3
1449 (voltage_info, voltage_type, VOLTAGE_OBJ_SVID2);
1450
1451 *svd_gpio_id = voltage_object->asSVID2Obj.ucSVDGpioId;
1452 *svc_gpio_id = voltage_object->asSVID2Obj.ucSVCGpioId;
1453 *load_line = voltage_object->asSVID2Obj.usLoadLine_PSI;
1454
1455 return 0;
1456 }
1457
atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr * hwmgr,uint16_t * virtual_voltage_id)1458 int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id)
1459 {
1460 struct amdgpu_device *adev = hwmgr->adev;
1461 SET_VOLTAGE_PS_ALLOCATION allocation;
1462 SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters =
1463 (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage;
1464 int result;
1465
1466 voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID;
1467
1468 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1469 GetIndexIntoMasterTable(COMMAND, SetVoltage),
1470 (uint32_t *)voltage_parameters);
1471
1472 *virtual_voltage_id = voltage_parameters->usVoltageLevel;
1473
1474 return result;
1475 }
1476
atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr * hwmgr,uint16_t * vddc,uint16_t * vddci,uint16_t virtual_voltage_id,uint16_t efuse_voltage_id)1477 int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
1478 uint16_t *vddc, uint16_t *vddci,
1479 uint16_t virtual_voltage_id,
1480 uint16_t efuse_voltage_id)
1481 {
1482 int i, j;
1483 int ix;
1484 u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
1485 ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
1486
1487 *vddc = 0;
1488 *vddci = 0;
1489
1490 ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
1491
1492 profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
1493 smu_atom_get_data_table(hwmgr->adev,
1494 ix,
1495 NULL, NULL, NULL);
1496 if (!profile)
1497 return -EINVAL;
1498
1499 if ((profile->asHeader.ucTableFormatRevision >= 2) &&
1500 (profile->asHeader.ucTableContentRevision >= 1) &&
1501 (profile->asHeader.usStructureSize >= sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))) {
1502 leakage_bin = (u16 *)((char *)profile + profile->usLeakageBinArrayOffset);
1503 vddc_id_buf = (u16 *)((char *)profile + profile->usElbVDDC_IdArrayOffset);
1504 vddc_buf = (u16 *)((char *)profile + profile->usElbVDDC_LevelArrayOffset);
1505 if (profile->ucElbVDDC_Num > 0) {
1506 for (i = 0; i < profile->ucElbVDDC_Num; i++) {
1507 if (vddc_id_buf[i] == virtual_voltage_id) {
1508 for (j = 0; j < profile->ucLeakageBinNum; j++) {
1509 if (efuse_voltage_id <= leakage_bin[j]) {
1510 *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
1511 break;
1512 }
1513 }
1514 break;
1515 }
1516 }
1517 }
1518
1519 vddci_id_buf = (u16 *)((char *)profile + profile->usElbVDDCI_IdArrayOffset);
1520 vddci_buf = (u16 *)((char *)profile + profile->usElbVDDCI_LevelArrayOffset);
1521 if (profile->ucElbVDDCI_Num > 0) {
1522 for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
1523 if (vddci_id_buf[i] == virtual_voltage_id) {
1524 for (j = 0; j < profile->ucLeakageBinNum; j++) {
1525 if (efuse_voltage_id <= leakage_bin[j]) {
1526 *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
1527 break;
1528 }
1529 }
1530 break;
1531 }
1532 }
1533 }
1534 }
1535
1536 return 0;
1537 }
1538
atomctrl_get_voltage_range(struct pp_hwmgr * hwmgr,uint32_t * max_vddc,uint32_t * min_vddc)1539 void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
1540 uint32_t *min_vddc)
1541 {
1542 void *profile;
1543
1544 profile = smu_atom_get_data_table(hwmgr->adev,
1545 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1546 NULL, NULL, NULL);
1547
1548 if (profile) {
1549 switch (hwmgr->chip_id) {
1550 case CHIP_TONGA:
1551 case CHIP_FIJI:
1552 *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4;
1553 *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4;
1554 return;
1555 case CHIP_POLARIS11:
1556 case CHIP_POLARIS10:
1557 case CHIP_POLARIS12:
1558 *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100;
1559 *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100;
1560 return;
1561 default:
1562 break;
1563 }
1564 }
1565 *max_vddc = 0;
1566 *min_vddc = 0;
1567 }
1568