1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "pp_debug.h"
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/fb.h>
28 #include "atom.h"
29 #include "ppatomctrl.h"
30 #include "atombios.h"
31 #include "cgs_common.h"
32 #include "ppevvmath.h"
33
34 #define MEM_ID_MASK 0xff000000
35 #define MEM_ID_SHIFT 24
36 #define CLOCK_RANGE_MASK 0x00ffffff
37 #define CLOCK_RANGE_SHIFT 0
38 #define LOW_NIBBLE_MASK 0xf
39 #define DATA_EQU_PREV 0
40 #define DATA_FROM_TABLE 4
41
42 union voltage_object_info {
43 struct _ATOM_VOLTAGE_OBJECT_INFO v1;
44 struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
45 struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
46 };
47
atomctrl_retrieve_ac_timing(uint8_t index,ATOM_INIT_REG_BLOCK * reg_block,pp_atomctrl_mc_reg_table * table)48 static int atomctrl_retrieve_ac_timing(
49 uint8_t index,
50 ATOM_INIT_REG_BLOCK *reg_block,
51 pp_atomctrl_mc_reg_table *table)
52 {
53 uint32_t i, j;
54 uint8_t tmem_id;
55 ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
56 ((uint8_t *)reg_block + (2 * sizeof(uint16_t)) + le16_to_cpu(reg_block->usRegIndexTblSize));
57
58 uint8_t num_ranges = 0;
59
60 while (*(uint32_t *)reg_data != END_OF_REG_DATA_BLOCK &&
61 num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES) {
62 tmem_id = (uint8_t)((*(uint32_t *)reg_data & MEM_ID_MASK) >> MEM_ID_SHIFT);
63
64 if (index == tmem_id) {
65 table->mc_reg_table_entry[num_ranges].mclk_max =
66 (uint32_t)((*(uint32_t *)reg_data & CLOCK_RANGE_MASK) >>
67 CLOCK_RANGE_SHIFT);
68
69 for (i = 0, j = 1; i < table->last; i++) {
70 if ((table->mc_reg_address[i].uc_pre_reg_data &
71 LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
72 table->mc_reg_table_entry[num_ranges].mc_data[i] =
73 (uint32_t)*((uint32_t *)reg_data + j);
74 j++;
75 } else if ((table->mc_reg_address[i].uc_pre_reg_data &
76 LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
77 table->mc_reg_table_entry[num_ranges].mc_data[i] =
78 table->mc_reg_table_entry[num_ranges].mc_data[i-1];
79 }
80 }
81 num_ranges++;
82 }
83
84 reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
85 ((uint8_t *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)) ;
86 }
87
88 PP_ASSERT_WITH_CODE((*(uint32_t *)reg_data == END_OF_REG_DATA_BLOCK),
89 "Invalid VramInfo table.", return -1);
90 table->num_entries = num_ranges;
91
92 return 0;
93 }
94
95 /**
96 * Get memory clock AC timing registers index from VBIOS table
97 * VBIOS set end of memory clock AC timing registers by ucPreRegDataLength bit6 = 1
98 * @param reg_block the address ATOM_INIT_REG_BLOCK
99 * @param table the address of MCRegTable
100 * @return 0
101 */
atomctrl_set_mc_reg_address_table(ATOM_INIT_REG_BLOCK * reg_block,pp_atomctrl_mc_reg_table * table)102 static int atomctrl_set_mc_reg_address_table(
103 ATOM_INIT_REG_BLOCK *reg_block,
104 pp_atomctrl_mc_reg_table *table)
105 {
106 uint8_t i = 0;
107 uint8_t num_entries = (uint8_t)((le16_to_cpu(reg_block->usRegIndexTblSize))
108 / sizeof(ATOM_INIT_REG_INDEX_FORMAT));
109 ATOM_INIT_REG_INDEX_FORMAT *format = ®_block->asRegIndexBuf[0];
110
111 num_entries--; /* subtract 1 data end mark entry */
112
113 PP_ASSERT_WITH_CODE((num_entries <= VBIOS_MC_REGISTER_ARRAY_SIZE),
114 "Invalid VramInfo table.", return -1);
115
116 /* ucPreRegDataLength bit6 = 1 is the end of memory clock AC timing registers */
117 while ((!(format->ucPreRegDataLength & ACCESS_PLACEHOLDER)) &&
118 (i < num_entries)) {
119 table->mc_reg_address[i].s1 =
120 (uint16_t)(le16_to_cpu(format->usRegIndex));
121 table->mc_reg_address[i].uc_pre_reg_data =
122 format->ucPreRegDataLength;
123
124 i++;
125 format = (ATOM_INIT_REG_INDEX_FORMAT *)
126 ((uint8_t *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
127 }
128
129 table->last = i;
130 return 0;
131 }
132
atomctrl_initialize_mc_reg_table(struct pp_hwmgr * hwmgr,uint8_t module_index,pp_atomctrl_mc_reg_table * table)133 int atomctrl_initialize_mc_reg_table(
134 struct pp_hwmgr *hwmgr,
135 uint8_t module_index,
136 pp_atomctrl_mc_reg_table *table)
137 {
138 ATOM_VRAM_INFO_HEADER_V2_1 *vram_info;
139 ATOM_INIT_REG_BLOCK *reg_block;
140 int result = 0;
141 u8 frev, crev;
142 u16 size;
143
144 vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
145 smu_atom_get_data_table(hwmgr->adev,
146 GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
147
148 if (module_index >= vram_info->ucNumOfVRAMModule) {
149 pr_err("Invalid VramInfo table.");
150 result = -1;
151 } else if (vram_info->sHeader.ucTableFormatRevision < 2) {
152 pr_err("Invalid VramInfo table.");
153 result = -1;
154 }
155
156 if (0 == result) {
157 reg_block = (ATOM_INIT_REG_BLOCK *)
158 ((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset));
159 result = atomctrl_set_mc_reg_address_table(reg_block, table);
160 }
161
162 if (0 == result) {
163 result = atomctrl_retrieve_ac_timing(module_index,
164 reg_block, table);
165 }
166
167 return result;
168 }
169
170 /**
171 * Set DRAM timings based on engine clock and memory clock.
172 */
atomctrl_set_engine_dram_timings_rv770(struct pp_hwmgr * hwmgr,uint32_t engine_clock,uint32_t memory_clock)173 int atomctrl_set_engine_dram_timings_rv770(
174 struct pp_hwmgr *hwmgr,
175 uint32_t engine_clock,
176 uint32_t memory_clock)
177 {
178 struct amdgpu_device *adev = hwmgr->adev;
179
180 SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters;
181
182 /* They are both in 10KHz Units. */
183 engine_clock_parameters.ulTargetEngineClock =
184 cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) |
185 ((COMPUTE_ENGINE_PLL_PARAM << 24)));
186
187 /* in 10 khz units.*/
188 engine_clock_parameters.sReserved.ulClock =
189 cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
190
191 return amdgpu_atom_execute_table(adev->mode_info.atom_context,
192 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
193 (uint32_t *)&engine_clock_parameters);
194 }
195
196 /**
197 * Private Function to get the PowerPlay Table Address.
198 * WARNING: The tabled returned by this function is in
199 * dynamically allocated memory.
200 * The caller has to release if by calling kfree.
201 */
get_voltage_info_table(void * device)202 static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device)
203 {
204 int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
205 u8 frev, crev;
206 u16 size;
207 union voltage_object_info *voltage_info;
208
209 voltage_info = (union voltage_object_info *)
210 smu_atom_get_data_table(device, index,
211 &size, &frev, &crev);
212
213 if (voltage_info != NULL)
214 return (ATOM_VOLTAGE_OBJECT_INFO *) &(voltage_info->v3);
215 else
216 return NULL;
217 }
218
atomctrl_lookup_voltage_type_v3(const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table,uint8_t voltage_type,uint8_t voltage_mode)219 static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3(
220 const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table,
221 uint8_t voltage_type, uint8_t voltage_mode)
222 {
223 unsigned int size = le16_to_cpu(voltage_object_info_table->sHeader.usStructureSize);
224 unsigned int offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
225 uint8_t *start = (uint8_t *)voltage_object_info_table;
226
227 while (offset < size) {
228 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object =
229 (const ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
230
231 if (voltage_type == voltage_object->asGpioVoltageObj.sHeader.ucVoltageType &&
232 voltage_mode == voltage_object->asGpioVoltageObj.sHeader.ucVoltageMode)
233 return voltage_object;
234
235 offset += le16_to_cpu(voltage_object->asGpioVoltageObj.sHeader.usSize);
236 }
237
238 return NULL;
239 }
240
241 /** atomctrl_get_memory_pll_dividers_si().
242 *
243 * @param hwmgr input parameter: pointer to HwMgr
244 * @param clock_value input parameter: memory clock
245 * @param dividers output parameter: memory PLL dividers
246 * @param strobe_mode input parameter: 1 for strobe mode, 0 for performance mode
247 */
atomctrl_get_memory_pll_dividers_si(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param * mpll_param,bool strobe_mode)248 int atomctrl_get_memory_pll_dividers_si(
249 struct pp_hwmgr *hwmgr,
250 uint32_t clock_value,
251 pp_atomctrl_memory_clock_param *mpll_param,
252 bool strobe_mode)
253 {
254 struct amdgpu_device *adev = hwmgr->adev;
255 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
256 int result;
257
258 mpll_parameters.ulClock = cpu_to_le32(clock_value);
259 mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
260
261 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
262 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
263 (uint32_t *)&mpll_parameters);
264
265 if (0 == result) {
266 mpll_param->mpll_fb_divider.clk_frac =
267 le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac);
268 mpll_param->mpll_fb_divider.cl_kf =
269 le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv);
270 mpll_param->mpll_post_divider =
271 (uint32_t)mpll_parameters.ucPostDiv;
272 mpll_param->vco_mode =
273 (uint32_t)(mpll_parameters.ucPllCntlFlag &
274 MPLL_CNTL_FLAG_VCO_MODE_MASK);
275 mpll_param->yclk_sel =
276 (uint32_t)((mpll_parameters.ucPllCntlFlag &
277 MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0);
278 mpll_param->qdr =
279 (uint32_t)((mpll_parameters.ucPllCntlFlag &
280 MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0);
281 mpll_param->half_rate =
282 (uint32_t)((mpll_parameters.ucPllCntlFlag &
283 MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0);
284 mpll_param->dll_speed =
285 (uint32_t)(mpll_parameters.ucDllSpeed);
286 mpll_param->bw_ctrl =
287 (uint32_t)(mpll_parameters.ucBWCntl);
288 }
289
290 return result;
291 }
292
293 /** atomctrl_get_memory_pll_dividers_vi().
294 *
295 * @param hwmgr input parameter: pointer to HwMgr
296 * @param clock_value input parameter: memory clock
297 * @param dividers output parameter: memory PLL dividers
298 */
atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param * mpll_param)299 int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
300 uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
301 {
302 struct amdgpu_device *adev = hwmgr->adev;
303 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
304 int result;
305
306 mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
307
308 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
309 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
310 (uint32_t *)&mpll_parameters);
311
312 if (!result)
313 mpll_param->mpll_post_divider =
314 (uint32_t)mpll_parameters.ulClock.ucPostDiv;
315
316 return result;
317 }
318
atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_memory_clock_param_ai * mpll_param)319 int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
320 uint32_t clock_value,
321 pp_atomctrl_memory_clock_param_ai *mpll_param)
322 {
323 struct amdgpu_device *adev = hwmgr->adev;
324 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {{0}, 0, 0};
325 int result;
326
327 mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
328
329 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
330 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
331 (uint32_t *)&mpll_parameters);
332
333 /* VEGAM's mpll takes sometime to finish computing */
334 udelay(10);
335
336 if (!result) {
337 mpll_param->ulMclk_fcw_int =
338 le16_to_cpu(mpll_parameters.usMclk_fcw_int);
339 mpll_param->ulMclk_fcw_frac =
340 le16_to_cpu(mpll_parameters.usMclk_fcw_frac);
341 mpll_param->ulClock =
342 le32_to_cpu(mpll_parameters.ulClock.ulClock);
343 mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv;
344 }
345
346 return result;
347 }
348
atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_kong * dividers)349 int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
350 uint32_t clock_value,
351 pp_atomctrl_clock_dividers_kong *dividers)
352 {
353 struct amdgpu_device *adev = hwmgr->adev;
354 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
355 int result;
356
357 pll_parameters.ulClock = cpu_to_le32(clock_value);
358
359 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
360 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
361 (uint32_t *)&pll_parameters);
362
363 if (0 == result) {
364 dividers->pll_post_divider = pll_parameters.ucPostDiv;
365 dividers->real_clock = le32_to_cpu(pll_parameters.ulClock);
366 }
367
368 return result;
369 }
370
atomctrl_get_engine_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_vi * dividers)371 int atomctrl_get_engine_pll_dividers_vi(
372 struct pp_hwmgr *hwmgr,
373 uint32_t clock_value,
374 pp_atomctrl_clock_dividers_vi *dividers)
375 {
376 struct amdgpu_device *adev = hwmgr->adev;
377 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
378 int result;
379
380 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
381 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
382
383 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
384 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
385 (uint32_t *)&pll_patameters);
386
387 if (0 == result) {
388 dividers->pll_post_divider =
389 pll_patameters.ulClock.ucPostDiv;
390 dividers->real_clock =
391 le32_to_cpu(pll_patameters.ulClock.ulClock);
392
393 dividers->ul_fb_div.ul_fb_div_frac =
394 le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
395 dividers->ul_fb_div.ul_fb_div =
396 le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
397
398 dividers->uc_pll_ref_div =
399 pll_patameters.ucPllRefDiv;
400 dividers->uc_pll_post_div =
401 pll_patameters.ucPllPostDiv;
402 dividers->uc_pll_cntl_flag =
403 pll_patameters.ucPllCntlFlag;
404 }
405
406 return result;
407 }
408
atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_ai * dividers)409 int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
410 uint32_t clock_value,
411 pp_atomctrl_clock_dividers_ai *dividers)
412 {
413 struct amdgpu_device *adev = hwmgr->adev;
414 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
415 int result;
416
417 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
418 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
419
420 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
421 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
422 (uint32_t *)&pll_patameters);
423
424 if (0 == result) {
425 dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
426 dividers->usSclk_fcw_int = le16_to_cpu(pll_patameters.usSclk_fcw_int);
427 dividers->ucSclkPostDiv = pll_patameters.ucSclkPostDiv;
428 dividers->ucSclkVcoMode = pll_patameters.ucSclkVcoMode;
429 dividers->ucSclkPllRange = pll_patameters.ucSclkPllRange;
430 dividers->ucSscEnable = pll_patameters.ucSscEnable;
431 dividers->usSsc_fcw1_frac = le16_to_cpu(pll_patameters.usSsc_fcw1_frac);
432 dividers->usSsc_fcw1_int = le16_to_cpu(pll_patameters.usSsc_fcw1_int);
433 dividers->usPcc_fcw_int = le16_to_cpu(pll_patameters.usPcc_fcw_int);
434 dividers->usSsc_fcw_slew_frac = le16_to_cpu(pll_patameters.usSsc_fcw_slew_frac);
435 dividers->usPcc_fcw_slew_frac = le16_to_cpu(pll_patameters.usPcc_fcw_slew_frac);
436 }
437 return result;
438 }
439
atomctrl_get_dfs_pll_dividers_vi(struct pp_hwmgr * hwmgr,uint32_t clock_value,pp_atomctrl_clock_dividers_vi * dividers)440 int atomctrl_get_dfs_pll_dividers_vi(
441 struct pp_hwmgr *hwmgr,
442 uint32_t clock_value,
443 pp_atomctrl_clock_dividers_vi *dividers)
444 {
445 struct amdgpu_device *adev = hwmgr->adev;
446 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
447 int result;
448
449 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
450 pll_patameters.ulClock.ucPostDiv =
451 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
452
453 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
454 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
455 (uint32_t *)&pll_patameters);
456
457 if (0 == result) {
458 dividers->pll_post_divider =
459 pll_patameters.ulClock.ucPostDiv;
460 dividers->real_clock =
461 le32_to_cpu(pll_patameters.ulClock.ulClock);
462
463 dividers->ul_fb_div.ul_fb_div_frac =
464 le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
465 dividers->ul_fb_div.ul_fb_div =
466 le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
467
468 dividers->uc_pll_ref_div =
469 pll_patameters.ucPllRefDiv;
470 dividers->uc_pll_post_div =
471 pll_patameters.ucPllPostDiv;
472 dividers->uc_pll_cntl_flag =
473 pll_patameters.ucPllCntlFlag;
474 }
475
476 return result;
477 }
478
479 /**
480 * Get the reference clock in 10KHz
481 */
atomctrl_get_reference_clock(struct pp_hwmgr * hwmgr)482 uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr)
483 {
484 ATOM_FIRMWARE_INFO *fw_info;
485 u8 frev, crev;
486 u16 size;
487 uint32_t clock;
488
489 fw_info = (ATOM_FIRMWARE_INFO *)
490 smu_atom_get_data_table(hwmgr->adev,
491 GetIndexIntoMasterTable(DATA, FirmwareInfo),
492 &size, &frev, &crev);
493
494 if (fw_info == NULL)
495 clock = 2700;
496 else
497 clock = (uint32_t)(le16_to_cpu(fw_info->usReferenceClock));
498
499 return clock;
500 }
501
502 /**
503 * Returns true if the given voltage type is controlled by GPIO pins.
504 * voltage_type is one of SET_VOLTAGE_TYPE_ASIC_VDDC,
505 * SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ.
506 * voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE
507 */
atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t voltage_mode)508 bool atomctrl_is_voltage_controlled_by_gpio_v3(
509 struct pp_hwmgr *hwmgr,
510 uint8_t voltage_type,
511 uint8_t voltage_mode)
512 {
513 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
514 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
515 bool ret;
516
517 PP_ASSERT_WITH_CODE((NULL != voltage_info),
518 "Could not find Voltage Table in BIOS.", return false;);
519
520 ret = (NULL != atomctrl_lookup_voltage_type_v3
521 (voltage_info, voltage_type, voltage_mode)) ? true : false;
522
523 return ret;
524 }
525
atomctrl_get_voltage_table_v3(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t voltage_mode,pp_atomctrl_voltage_table * voltage_table)526 int atomctrl_get_voltage_table_v3(
527 struct pp_hwmgr *hwmgr,
528 uint8_t voltage_type,
529 uint8_t voltage_mode,
530 pp_atomctrl_voltage_table *voltage_table)
531 {
532 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
533 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
534 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
535 unsigned int i;
536
537 PP_ASSERT_WITH_CODE((NULL != voltage_info),
538 "Could not find Voltage Table in BIOS.", return -1;);
539
540 voltage_object = atomctrl_lookup_voltage_type_v3
541 (voltage_info, voltage_type, voltage_mode);
542
543 if (voltage_object == NULL)
544 return -1;
545
546 PP_ASSERT_WITH_CODE(
547 (voltage_object->asGpioVoltageObj.ucGpioEntryNum <=
548 PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES),
549 "Too many voltage entries!",
550 return -1;
551 );
552
553 for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) {
554 voltage_table->entries[i].value =
555 le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue);
556 voltage_table->entries[i].smio_low =
557 le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId);
558 }
559
560 voltage_table->mask_low =
561 le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal);
562 voltage_table->count =
563 voltage_object->asGpioVoltageObj.ucGpioEntryNum;
564 voltage_table->phase_delay =
565 voltage_object->asGpioVoltageObj.ucPhaseDelay;
566
567 return 0;
568 }
569
atomctrl_lookup_gpio_pin(ATOM_GPIO_PIN_LUT * gpio_lookup_table,const uint32_t pinId,pp_atomctrl_gpio_pin_assignment * gpio_pin_assignment)570 static bool atomctrl_lookup_gpio_pin(
571 ATOM_GPIO_PIN_LUT * gpio_lookup_table,
572 const uint32_t pinId,
573 pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
574 {
575 unsigned int size = le16_to_cpu(gpio_lookup_table->sHeader.usStructureSize);
576 unsigned int offset = offsetof(ATOM_GPIO_PIN_LUT, asGPIO_Pin[0]);
577 uint8_t *start = (uint8_t *)gpio_lookup_table;
578
579 while (offset < size) {
580 const ATOM_GPIO_PIN_ASSIGNMENT *pin_assignment =
581 (const ATOM_GPIO_PIN_ASSIGNMENT *)(start + offset);
582
583 if (pinId == pin_assignment->ucGPIO_ID) {
584 gpio_pin_assignment->uc_gpio_pin_bit_shift =
585 pin_assignment->ucGpioPinBitShift;
586 gpio_pin_assignment->us_gpio_pin_aindex =
587 le16_to_cpu(pin_assignment->usGpioPin_AIndex);
588 return true;
589 }
590
591 offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1;
592 }
593
594 return false;
595 }
596
597 /**
598 * Private Function to get the PowerPlay Table Address.
599 * WARNING: The tabled returned by this function is in
600 * dynamically allocated memory.
601 * The caller has to release if by calling kfree.
602 */
get_gpio_lookup_table(void * device)603 static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device)
604 {
605 u8 frev, crev;
606 u16 size;
607 void *table_address;
608
609 table_address = (ATOM_GPIO_PIN_LUT *)
610 smu_atom_get_data_table(device,
611 GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT),
612 &size, &frev, &crev);
613
614 PP_ASSERT_WITH_CODE((NULL != table_address),
615 "Error retrieving BIOS Table Address!", return NULL;);
616
617 return (ATOM_GPIO_PIN_LUT *)table_address;
618 }
619
620 /**
621 * Returns 1 if the given pin id find in lookup table.
622 */
atomctrl_get_pp_assign_pin(struct pp_hwmgr * hwmgr,const uint32_t pinId,pp_atomctrl_gpio_pin_assignment * gpio_pin_assignment)623 bool atomctrl_get_pp_assign_pin(
624 struct pp_hwmgr *hwmgr,
625 const uint32_t pinId,
626 pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
627 {
628 bool bRet = false;
629 ATOM_GPIO_PIN_LUT *gpio_lookup_table =
630 get_gpio_lookup_table(hwmgr->adev);
631
632 PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
633 "Could not find GPIO lookup Table in BIOS.", return false);
634
635 bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId,
636 gpio_pin_assignment);
637
638 return bRet;
639 }
640
atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t virtual_voltage_Id,uint16_t * voltage,uint16_t dpm_level,bool debug)641 int atomctrl_calculate_voltage_evv_on_sclk(
642 struct pp_hwmgr *hwmgr,
643 uint8_t voltage_type,
644 uint32_t sclk,
645 uint16_t virtual_voltage_Id,
646 uint16_t *voltage,
647 uint16_t dpm_level,
648 bool debug)
649 {
650 ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo;
651 struct amdgpu_device *adev = hwmgr->adev;
652 EFUSE_LINEAR_FUNC_PARAM sRO_fuse;
653 EFUSE_LINEAR_FUNC_PARAM sCACm_fuse;
654 EFUSE_LINEAR_FUNC_PARAM sCACb_fuse;
655 EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse;
656 EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse;
657 EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse;
658 EFUSE_INPUT_PARAMETER sInput_FuseValues;
659 READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues;
660
661 uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused;
662 fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7;
663 fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma;
664 fInt fLkg_FT, repeat;
665 fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX;
666 fInt fRLL_LoadLine, fPowerDPMx, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin;
667 fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM;
668 fInt fSclk_margin, fSclk, fEVV_V;
669 fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL;
670 uint32_t ul_FT_Lkg_V0NORM;
671 fInt fLn_MaxDivMin, fMin, fAverage, fRange;
672 fInt fRoots[2];
673 fInt fStepSize = GetScaledFraction(625, 100000);
674
675 int result;
676
677 getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *)
678 smu_atom_get_data_table(hwmgr->adev,
679 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
680 NULL, NULL, NULL);
681
682 if (!getASICProfilingInfo)
683 return -1;
684
685 if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
686 (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
687 getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
688 return -1;
689
690 /*-----------------------------------------------------------
691 *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL
692 *-----------------------------------------------------------
693 */
694 fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000);
695
696 switch (dpm_level) {
697 case 1:
698 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm1));
699 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000);
700 break;
701 case 2:
702 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm2));
703 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000);
704 break;
705 case 3:
706 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm3));
707 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000);
708 break;
709 case 4:
710 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm4));
711 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000);
712 break;
713 case 5:
714 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm5));
715 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000);
716 break;
717 case 6:
718 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm6));
719 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000);
720 break;
721 case 7:
722 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm7));
723 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000);
724 break;
725 default:
726 pr_err("DPM Level not supported\n");
727 fPowerDPMx = Convert_ULONG_ToFraction(1);
728 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000);
729 }
730
731 /*-------------------------
732 * DECODING FUSE VALUES
733 * ------------------------
734 */
735 /*Decode RO_Fused*/
736 sRO_fuse = getASICProfilingInfo->sRoFuse;
737
738 sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex;
739 sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB;
740 sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength;
741
742 sOutput_FuseValues.sEfuse = sInput_FuseValues;
743
744 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
745 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
746 (uint32_t *)&sOutput_FuseValues);
747
748 if (result)
749 return result;
750
751 /* Finally, the actual fuse value */
752 ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
753 fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1);
754 fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1);
755 fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength);
756
757 sCACm_fuse = getASICProfilingInfo->sCACm;
758
759 sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex;
760 sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB;
761 sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength;
762
763 sOutput_FuseValues.sEfuse = sInput_FuseValues;
764
765 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
766 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
767 (uint32_t *)&sOutput_FuseValues);
768
769 if (result)
770 return result;
771
772 ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
773 fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000);
774 fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000);
775
776 fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength);
777
778 sCACb_fuse = getASICProfilingInfo->sCACb;
779
780 sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex;
781 sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB;
782 sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength;
783 sOutput_FuseValues.sEfuse = sInput_FuseValues;
784
785 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
786 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
787 (uint32_t *)&sOutput_FuseValues);
788
789 if (result)
790 return result;
791
792 ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
793 fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000);
794 fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000);
795
796 fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength);
797
798 sKt_Beta_fuse = getASICProfilingInfo->sKt_b;
799
800 sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex;
801 sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB;
802 sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength;
803
804 sOutput_FuseValues.sEfuse = sInput_FuseValues;
805
806 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
807 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
808 (uint32_t *)&sOutput_FuseValues);
809
810 if (result)
811 return result;
812
813 ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
814 fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000);
815 fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000);
816
817 fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused,
818 fAverage, fRange, sKt_Beta_fuse.ucEfuseLength);
819
820 sKv_m_fuse = getASICProfilingInfo->sKv_m;
821
822 sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex;
823 sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB;
824 sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength;
825
826 sOutput_FuseValues.sEfuse = sInput_FuseValues;
827
828 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
829 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
830 (uint32_t *)&sOutput_FuseValues);
831 if (result)
832 return result;
833
834 ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
835 fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000);
836 fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000);
837 fRange = fMultiply(fRange, ConvertToFraction(-1));
838
839 fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused,
840 fAverage, fRange, sKv_m_fuse.ucEfuseLength);
841
842 sKv_b_fuse = getASICProfilingInfo->sKv_b;
843
844 sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex;
845 sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB;
846 sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength;
847 sOutput_FuseValues.sEfuse = sInput_FuseValues;
848
849 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
850 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
851 (uint32_t *)&sOutput_FuseValues);
852
853 if (result)
854 return result;
855
856 ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
857 fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000);
858 fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000);
859
860 fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused,
861 fAverage, fRange, sKv_b_fuse.ucEfuseLength);
862
863 /* Decoding the Leakage - No special struct container */
864 /*
865 * usLkgEuseIndex=56
866 * ucLkgEfuseBitLSB=6
867 * ucLkgEfuseLength=10
868 * ulLkgEncodeLn_MaxDivMin=69077
869 * ulLkgEncodeMax=1000000
870 * ulLkgEncodeMin=1000
871 * ulEfuseLogisticAlpha=13
872 */
873
874 sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex;
875 sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB;
876 sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength;
877
878 sOutput_FuseValues.sEfuse = sInput_FuseValues;
879
880 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
881 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
882 (uint32_t *)&sOutput_FuseValues);
883
884 if (result)
885 return result;
886
887 ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
888 fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000);
889 fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000);
890
891 fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM,
892 fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength);
893 fLkg_FT = fFT_Lkg_V0NORM;
894
895 /*-------------------------------------------
896 * PART 2 - Grabbing all required values
897 *-------------------------------------------
898 */
899 fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000),
900 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign)));
901 fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000),
902 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign)));
903 fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000),
904 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign)));
905 fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000),
906 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign)));
907 fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000),
908 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign)));
909 fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000),
910 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign)));
911 fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000),
912 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign)));
913 fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000),
914 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign)));
915
916 fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a));
917 fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b));
918 fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c));
919
920 fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed));
921
922 fMargin_FMAX_mean = GetScaledFraction(
923 le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000);
924 fMargin_Plat_mean = GetScaledFraction(
925 le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000);
926 fMargin_FMAX_sigma = GetScaledFraction(
927 le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000);
928 fMargin_Plat_sigma = GetScaledFraction(
929 le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000);
930
931 fMargin_DC_sigma = GetScaledFraction(
932 le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100);
933 fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000));
934
935 fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100));
936 fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100));
937 fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100));
938 fKv_m_fused = fNegate(fDivide(fKv_m_fused, ConvertToFraction(100)));
939 fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10));
940
941 fSclk = GetScaledFraction(sclk, 100);
942
943 fV_max = fDivide(GetScaledFraction(
944 le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4));
945 fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10);
946 fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100);
947 fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10);
948 fV_FT = fDivide(GetScaledFraction(
949 le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4));
950 fV_min = fDivide(GetScaledFraction(
951 le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4));
952
953 /*-----------------------
954 * PART 3
955 *-----------------------
956 */
957
958 fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
959 fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
960 fC_Term = fAdd(fMargin_RO_c,
961 fAdd(fMultiply(fSM_A0, fLkg_FT),
962 fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
963 fAdd(fMultiply(fSM_A3, fSclk),
964 fSubtract(fSM_A7, fRO_fused)))));
965
966 fVDDC_base = fSubtract(fRO_fused,
967 fSubtract(fMargin_RO_c,
968 fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk))));
969 fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2));
970
971 repeat = fSubtract(fVDDC_base,
972 fDivide(fMargin_DC_sigma, ConvertToFraction(1000)));
973
974 fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a,
975 fGetSquare(repeat)),
976 fAdd(fMultiply(fMargin_RO_b, repeat),
977 fMargin_RO_c));
978
979 fDC_SCLK = fSubtract(fRO_fused,
980 fSubtract(fRO_DC_margin,
981 fSubtract(fSM_A3,
982 fMultiply(fSM_A2, repeat))));
983 fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1));
984
985 fSigma_DC = fSubtract(fSclk, fDC_SCLK);
986
987 fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean);
988 fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean);
989 fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma);
990 fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma);
991
992 fSquared_Sigma_DC = fGetSquare(fSigma_DC);
993 fSquared_Sigma_CR = fGetSquare(fSigma_CR);
994 fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX);
995
996 fSclk_margin = fAdd(fMicro_FMAX,
997 fAdd(fMicro_CR,
998 fAdd(fMargin_fixed,
999 fSqrt(fAdd(fSquared_Sigma_FMAX,
1000 fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR))))));
1001 /*
1002 fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5;
1003 fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6;
1004 fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused;
1005 */
1006
1007 fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5);
1008 fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6);
1009 fC_Term = fAdd(fRO_DC_margin,
1010 fAdd(fMultiply(fSM_A0, fLkg_FT),
1011 fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT),
1012 fAdd(fSclk, fSclk_margin)),
1013 fAdd(fMultiply(fSM_A3,
1014 fAdd(fSclk, fSclk_margin)),
1015 fSubtract(fSM_A7, fRO_fused)))));
1016
1017 SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots);
1018
1019 if (GreaterThan(fRoots[0], fRoots[1]))
1020 fEVV_V = fRoots[1];
1021 else
1022 fEVV_V = fRoots[0];
1023
1024 if (GreaterThan(fV_min, fEVV_V))
1025 fEVV_V = fV_min;
1026 else if (GreaterThan(fEVV_V, fV_max))
1027 fEVV_V = fSubtract(fV_max, fStepSize);
1028
1029 fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0);
1030
1031 /*-----------------
1032 * PART 4
1033 *-----------------
1034 */
1035
1036 fV_x = fV_min;
1037
1038 while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) {
1039 fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd(
1040 fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk),
1041 fGetSquare(fV_x)), fDerateTDP);
1042
1043 fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor,
1044 fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused,
1045 fT_prod), fKv_b_fused), fV_x)), fV_x)));
1046 fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply(
1047 fKt_Beta_fused, fT_prod)));
1048 fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
1049 fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT)));
1050 fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
1051 fKt_Beta_fused, fT_FT)));
1052
1053 fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right);
1054
1055 fTDP_Current = fDivide(fTDP_Power, fV_x);
1056
1057 fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine),
1058 ConvertToFraction(10)));
1059
1060 fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0);
1061
1062 if (GreaterThan(fV_max, fV_NL) &&
1063 (GreaterThan(fV_NL, fEVV_V) ||
1064 Equal(fV_NL, fEVV_V))) {
1065 fV_NL = fMultiply(fV_NL, ConvertToFraction(1000));
1066
1067 *voltage = (uint16_t)fV_NL.partial.real;
1068 break;
1069 } else
1070 fV_x = fAdd(fV_x, fStepSize);
1071 }
1072
1073 return result;
1074 }
1075
1076 /** atomctrl_get_voltage_evv_on_sclk gets voltage via call to ATOM COMMAND table.
1077 * @param hwmgr input: pointer to hwManager
1078 * @param voltage_type input: type of EVV voltage VDDC or VDDGFX
1079 * @param sclk input: in 10Khz unit. DPM state SCLK frequency
1080 * which is define in PPTable SCLK/VDDC dependence
1081 * table associated with this virtual_voltage_Id
1082 * @param virtual_voltage_Id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
1083 * @param voltage output: real voltage level in unit of mv
1084 */
atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t virtual_voltage_Id,uint16_t * voltage)1085 int atomctrl_get_voltage_evv_on_sclk(
1086 struct pp_hwmgr *hwmgr,
1087 uint8_t voltage_type,
1088 uint32_t sclk, uint16_t virtual_voltage_Id,
1089 uint16_t *voltage)
1090 {
1091 struct amdgpu_device *adev = hwmgr->adev;
1092 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1093 int result;
1094
1095 get_voltage_info_param_space.ucVoltageType =
1096 voltage_type;
1097 get_voltage_info_param_space.ucVoltageMode =
1098 ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1099 get_voltage_info_param_space.usVoltageLevel =
1100 cpu_to_le16(virtual_voltage_Id);
1101 get_voltage_info_param_space.ulSCLKFreq =
1102 cpu_to_le32(sclk);
1103
1104 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1105 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1106 (uint32_t *)&get_voltage_info_param_space);
1107
1108 *voltage = result ? 0 :
1109 le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
1110 (&get_voltage_info_param_space))->usVoltageLevel);
1111
1112 return result;
1113 }
1114
1115 /**
1116 * atomctrl_get_voltage_evv gets voltage via call to ATOM COMMAND table.
1117 * @param hwmgr input: pointer to hwManager
1118 * @param virtual_voltage_id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
1119 * @param voltage output: real voltage level in unit of mv
1120 */
atomctrl_get_voltage_evv(struct pp_hwmgr * hwmgr,uint16_t virtual_voltage_id,uint16_t * voltage)1121 int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
1122 uint16_t virtual_voltage_id,
1123 uint16_t *voltage)
1124 {
1125 struct amdgpu_device *adev = hwmgr->adev;
1126 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1127 int result;
1128 int entry_id;
1129
1130 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
1131 for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
1132 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) {
1133 /* found */
1134 break;
1135 }
1136 }
1137
1138 if (entry_id >= hwmgr->dyn_state.vddc_dependency_on_sclk->count) {
1139 pr_debug("Can't find requested voltage id in vddc_dependency_on_sclk table!\n");
1140 return -EINVAL;
1141 }
1142
1143 get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
1144 get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1145 get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id;
1146 get_voltage_info_param_space.ulSCLKFreq =
1147 cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
1148
1149 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1150 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1151 (uint32_t *)&get_voltage_info_param_space);
1152
1153 if (0 != result)
1154 return result;
1155
1156 *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
1157 (&get_voltage_info_param_space))->usVoltageLevel);
1158
1159 return result;
1160 }
1161
1162 /**
1163 * Get the mpll reference clock in 10KHz
1164 */
atomctrl_get_mpll_reference_clock(struct pp_hwmgr * hwmgr)1165 uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr)
1166 {
1167 ATOM_COMMON_TABLE_HEADER *fw_info;
1168 uint32_t clock;
1169 u8 frev, crev;
1170 u16 size;
1171
1172 fw_info = (ATOM_COMMON_TABLE_HEADER *)
1173 smu_atom_get_data_table(hwmgr->adev,
1174 GetIndexIntoMasterTable(DATA, FirmwareInfo),
1175 &size, &frev, &crev);
1176
1177 if (fw_info == NULL)
1178 clock = 2700;
1179 else {
1180 if ((fw_info->ucTableFormatRevision == 2) &&
1181 (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V2_1))) {
1182 ATOM_FIRMWARE_INFO_V2_1 *fwInfo_2_1 =
1183 (ATOM_FIRMWARE_INFO_V2_1 *)fw_info;
1184 clock = (uint32_t)(le16_to_cpu(fwInfo_2_1->usMemoryReferenceClock));
1185 } else {
1186 ATOM_FIRMWARE_INFO *fwInfo_0_0 =
1187 (ATOM_FIRMWARE_INFO *)fw_info;
1188 clock = (uint32_t)(le16_to_cpu(fwInfo_0_0->usReferenceClock));
1189 }
1190 }
1191
1192 return clock;
1193 }
1194
1195 /**
1196 * Get the asic internal spread spectrum table
1197 */
asic_internal_ss_get_ss_table(void * device)1198 static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device)
1199 {
1200 ATOM_ASIC_INTERNAL_SS_INFO *table = NULL;
1201 u8 frev, crev;
1202 u16 size;
1203
1204 table = (ATOM_ASIC_INTERNAL_SS_INFO *)
1205 smu_atom_get_data_table(device,
1206 GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info),
1207 &size, &frev, &crev);
1208
1209 return table;
1210 }
1211
1212 /**
1213 * Get the asic internal spread spectrum assignment
1214 */
asic_internal_ss_get_ss_asignment(struct pp_hwmgr * hwmgr,const uint8_t clockSource,const uint32_t clockSpeed,pp_atomctrl_internal_ss_info * ssEntry)1215 static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
1216 const uint8_t clockSource,
1217 const uint32_t clockSpeed,
1218 pp_atomctrl_internal_ss_info *ssEntry)
1219 {
1220 ATOM_ASIC_INTERNAL_SS_INFO *table;
1221 ATOM_ASIC_SS_ASSIGNMENT *ssInfo;
1222 int entry_found = 0;
1223
1224 memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info));
1225
1226 table = asic_internal_ss_get_ss_table(hwmgr->adev);
1227
1228 if (NULL == table)
1229 return -1;
1230
1231 ssInfo = &table->asSpreadSpectrum[0];
1232
1233 while (((uint8_t *)ssInfo - (uint8_t *)table) <
1234 le16_to_cpu(table->sHeader.usStructureSize)) {
1235 if ((clockSource == ssInfo->ucClockIndication) &&
1236 ((uint32_t)clockSpeed <= le32_to_cpu(ssInfo->ulTargetClockRange))) {
1237 entry_found = 1;
1238 break;
1239 }
1240
1241 ssInfo = (ATOM_ASIC_SS_ASSIGNMENT *)((uint8_t *)ssInfo +
1242 sizeof(ATOM_ASIC_SS_ASSIGNMENT));
1243 }
1244
1245 if (entry_found) {
1246 ssEntry->speed_spectrum_percentage =
1247 le16_to_cpu(ssInfo->usSpreadSpectrumPercentage);
1248 ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz);
1249
1250 if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) &&
1251 (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) ||
1252 (GET_DATA_TABLE_MAJOR_REVISION(table) == 3)) {
1253 ssEntry->speed_spectrum_rate /= 100;
1254 }
1255
1256 switch (ssInfo->ucSpreadSpectrumMode) {
1257 case 0:
1258 ssEntry->speed_spectrum_mode =
1259 pp_atomctrl_spread_spectrum_mode_down;
1260 break;
1261 case 1:
1262 ssEntry->speed_spectrum_mode =
1263 pp_atomctrl_spread_spectrum_mode_center;
1264 break;
1265 default:
1266 ssEntry->speed_spectrum_mode =
1267 pp_atomctrl_spread_spectrum_mode_down;
1268 break;
1269 }
1270 }
1271
1272 return entry_found ? 0 : 1;
1273 }
1274
1275 /**
1276 * Get the memory clock spread spectrum info
1277 */
atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr * hwmgr,const uint32_t memory_clock,pp_atomctrl_internal_ss_info * ssInfo)1278 int atomctrl_get_memory_clock_spread_spectrum(
1279 struct pp_hwmgr *hwmgr,
1280 const uint32_t memory_clock,
1281 pp_atomctrl_internal_ss_info *ssInfo)
1282 {
1283 return asic_internal_ss_get_ss_asignment(hwmgr,
1284 ASIC_INTERNAL_MEMORY_SS, memory_clock, ssInfo);
1285 }
1286 /**
1287 * Get the engine clock spread spectrum info
1288 */
atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr * hwmgr,const uint32_t engine_clock,pp_atomctrl_internal_ss_info * ssInfo)1289 int atomctrl_get_engine_clock_spread_spectrum(
1290 struct pp_hwmgr *hwmgr,
1291 const uint32_t engine_clock,
1292 pp_atomctrl_internal_ss_info *ssInfo)
1293 {
1294 return asic_internal_ss_get_ss_asignment(hwmgr,
1295 ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo);
1296 }
1297
atomctrl_read_efuse(struct pp_hwmgr * hwmgr,uint16_t start_index,uint16_t end_index,uint32_t mask,uint32_t * efuse)1298 int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
1299 uint16_t end_index, uint32_t mask, uint32_t *efuse)
1300 {
1301 struct amdgpu_device *adev = hwmgr->adev;
1302 int result;
1303 READ_EFUSE_VALUE_PARAMETER efuse_param;
1304
1305 efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4);
1306 efuse_param.sEfuse.ucBitShift = (uint8_t)
1307 (start_index - ((start_index / 32) * 32));
1308 efuse_param.sEfuse.ucBitLength = (uint8_t)
1309 ((end_index - start_index) + 1);
1310
1311 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1312 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
1313 (uint32_t *)&efuse_param);
1314 *efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask;
1315
1316 return result;
1317 }
1318
atomctrl_set_ac_timing_ai(struct pp_hwmgr * hwmgr,uint32_t memory_clock,uint8_t level)1319 int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
1320 uint8_t level)
1321 {
1322 struct amdgpu_device *adev = hwmgr->adev;
1323 DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
1324 int result;
1325
1326 memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq =
1327 memory_clock & SET_CLOCK_FREQ_MASK;
1328 memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag =
1329 ADJUST_MC_SETTING_PARAM;
1330 memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
1331
1332 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1333 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
1334 (uint32_t *)&memory_clock_parameters);
1335
1336 return result;
1337 }
1338
atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t virtual_voltage_Id,uint32_t * voltage)1339 int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1340 uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
1341 {
1342 struct amdgpu_device *adev = hwmgr->adev;
1343 int result;
1344 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space;
1345
1346 get_voltage_info_param_space.ucVoltageType = voltage_type;
1347 get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1348 get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
1349 get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
1350
1351 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1352 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1353 (uint32_t *)&get_voltage_info_param_space);
1354
1355 *voltage = result ? 0 :
1356 le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
1357
1358 return result;
1359 }
1360
atomctrl_get_smc_sclk_range_table(struct pp_hwmgr * hwmgr,struct pp_atom_ctrl_sclk_range_table * table)1361 int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table)
1362 {
1363
1364 int i;
1365 u8 frev, crev;
1366 u16 size;
1367
1368 ATOM_SMU_INFO_V2_1 *psmu_info =
1369 (ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
1370 GetIndexIntoMasterTable(DATA, SMU_Info),
1371 &size, &frev, &crev);
1372
1373
1374 for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
1375 table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
1376 table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
1377 table->entry[i].usFcw_pcc =
1378 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc);
1379 table->entry[i].usFcw_trans_upper =
1380 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper);
1381 table->entry[i].usRcw_trans_lower =
1382 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower);
1383 }
1384
1385 return 0;
1386 }
1387
atomctrl_get_avfs_information(struct pp_hwmgr * hwmgr,struct pp_atom_ctrl__avfs_parameters * param)1388 int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
1389 struct pp_atom_ctrl__avfs_parameters *param)
1390 {
1391 ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
1392
1393 if (param == NULL)
1394 return -EINVAL;
1395
1396 profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
1397 smu_atom_get_data_table(hwmgr->adev,
1398 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1399 NULL, NULL, NULL);
1400 if (!profile)
1401 return -1;
1402
1403 param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0);
1404 param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1);
1405 param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2);
1406 param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma);
1407 param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean);
1408 param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma);
1409 param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0);
1410 param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1);
1411 param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2);
1412 param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0);
1413 param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1);
1414 param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2);
1415 param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1416 param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1417 param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1418 param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1);
1419 param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2);
1420 param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b);
1421 param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv);
1422 param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
1423 param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
1424 param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
1425 param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
1426 param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor);
1427 param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
1428
1429 return 0;
1430 }
1431
atomctrl_get_svi2_info(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint8_t * svd_gpio_id,uint8_t * svc_gpio_id,uint16_t * load_line)1432 int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1433 uint8_t *svd_gpio_id, uint8_t *svc_gpio_id,
1434 uint16_t *load_line)
1435 {
1436 ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
1437 (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
1438
1439 const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
1440
1441 PP_ASSERT_WITH_CODE((NULL != voltage_info),
1442 "Could not find Voltage Table in BIOS.", return -EINVAL);
1443
1444 voltage_object = atomctrl_lookup_voltage_type_v3
1445 (voltage_info, voltage_type, VOLTAGE_OBJ_SVID2);
1446
1447 *svd_gpio_id = voltage_object->asSVID2Obj.ucSVDGpioId;
1448 *svc_gpio_id = voltage_object->asSVID2Obj.ucSVCGpioId;
1449 *load_line = voltage_object->asSVID2Obj.usLoadLine_PSI;
1450
1451 return 0;
1452 }
1453
atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr * hwmgr,uint16_t * virtual_voltage_id)1454 int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id)
1455 {
1456 struct amdgpu_device *adev = hwmgr->adev;
1457 SET_VOLTAGE_PS_ALLOCATION allocation;
1458 SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters =
1459 (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage;
1460 int result;
1461
1462 voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID;
1463
1464 result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
1465 GetIndexIntoMasterTable(COMMAND, SetVoltage),
1466 (uint32_t *)voltage_parameters);
1467
1468 *virtual_voltage_id = voltage_parameters->usVoltageLevel;
1469
1470 return result;
1471 }
1472
atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr * hwmgr,uint16_t * vddc,uint16_t * vddci,uint16_t virtual_voltage_id,uint16_t efuse_voltage_id)1473 int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
1474 uint16_t *vddc, uint16_t *vddci,
1475 uint16_t virtual_voltage_id,
1476 uint16_t efuse_voltage_id)
1477 {
1478 int i, j;
1479 int ix;
1480 u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
1481 ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
1482
1483 *vddc = 0;
1484 *vddci = 0;
1485
1486 ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
1487
1488 profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
1489 smu_atom_get_data_table(hwmgr->adev,
1490 ix,
1491 NULL, NULL, NULL);
1492 if (!profile)
1493 return -EINVAL;
1494
1495 if ((profile->asHeader.ucTableFormatRevision >= 2) &&
1496 (profile->asHeader.ucTableContentRevision >= 1) &&
1497 (profile->asHeader.usStructureSize >= sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))) {
1498 leakage_bin = (u16 *)((char *)profile + profile->usLeakageBinArrayOffset);
1499 vddc_id_buf = (u16 *)((char *)profile + profile->usElbVDDC_IdArrayOffset);
1500 vddc_buf = (u16 *)((char *)profile + profile->usElbVDDC_LevelArrayOffset);
1501 if (profile->ucElbVDDC_Num > 0) {
1502 for (i = 0; i < profile->ucElbVDDC_Num; i++) {
1503 if (vddc_id_buf[i] == virtual_voltage_id) {
1504 for (j = 0; j < profile->ucLeakageBinNum; j++) {
1505 if (efuse_voltage_id <= leakage_bin[j]) {
1506 *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
1507 break;
1508 }
1509 }
1510 break;
1511 }
1512 }
1513 }
1514
1515 vddci_id_buf = (u16 *)((char *)profile + profile->usElbVDDCI_IdArrayOffset);
1516 vddci_buf = (u16 *)((char *)profile + profile->usElbVDDCI_LevelArrayOffset);
1517 if (profile->ucElbVDDCI_Num > 0) {
1518 for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
1519 if (vddci_id_buf[i] == virtual_voltage_id) {
1520 for (j = 0; j < profile->ucLeakageBinNum; j++) {
1521 if (efuse_voltage_id <= leakage_bin[j]) {
1522 *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
1523 break;
1524 }
1525 }
1526 break;
1527 }
1528 }
1529 }
1530 }
1531
1532 return 0;
1533 }
1534
atomctrl_get_voltage_range(struct pp_hwmgr * hwmgr,uint32_t * max_vddc,uint32_t * min_vddc)1535 void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
1536 uint32_t *min_vddc)
1537 {
1538 void *profile;
1539
1540 profile = smu_atom_get_data_table(hwmgr->adev,
1541 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1542 NULL, NULL, NULL);
1543
1544 if (profile) {
1545 switch (hwmgr->chip_id) {
1546 case CHIP_TONGA:
1547 case CHIP_FIJI:
1548 *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4;
1549 *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4;
1550 return;
1551 case CHIP_POLARIS11:
1552 case CHIP_POLARIS10:
1553 case CHIP_POLARIS12:
1554 *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100;
1555 *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100;
1556 return;
1557 default:
1558 break;
1559 }
1560 }
1561 *max_vddc = 0;
1562 *min_vddc = 0;
1563 }
1564