1 /* $NetBSD: amdgpu_smu_helper.c,v 1.2 2021/12/18 23:45:26 riastradh Exp $ */
2
3 /*
4 * Copyright 2018 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_smu_helper.c,v 1.2 2021/12/18 23:45:26 riastradh Exp $");
28
29 #include <linux/pci.h>
30
31 #include "hwmgr.h"
32 #include "pp_debug.h"
33 #include "ppatomctrl.h"
34 #include "ppsmc.h"
35 #include "atom.h"
36 #include "ivsrcid/thm/irqsrcs_thm_9_0.h"
37 #include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
38 #include "ivsrcid/ivsrcid_vislands30.h"
39
convert_to_vid(uint16_t vddc)40 uint8_t convert_to_vid(uint16_t vddc)
41 {
42 return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
43 }
44
convert_to_vddc(uint8_t vid)45 uint16_t convert_to_vddc(uint8_t vid)
46 {
47 return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
48 }
49
phm_copy_clock_limits_array(struct pp_hwmgr * hwmgr,uint32_t ** pptable_info_array,const uint32_t * pptable_array,uint32_t power_saving_clock_count)50 int phm_copy_clock_limits_array(
51 struct pp_hwmgr *hwmgr,
52 uint32_t **pptable_info_array,
53 const uint32_t *pptable_array,
54 uint32_t power_saving_clock_count)
55 {
56 uint32_t array_size, i;
57 uint32_t *table;
58
59 array_size = sizeof(uint32_t) * power_saving_clock_count;
60 table = kzalloc(array_size, GFP_KERNEL);
61 if (NULL == table)
62 return -ENOMEM;
63
64 for (i = 0; i < power_saving_clock_count; i++)
65 table[i] = le32_to_cpu(pptable_array[i]);
66
67 *pptable_info_array = table;
68
69 return 0;
70 }
71
phm_copy_overdrive_settings_limits_array(struct pp_hwmgr * hwmgr,uint32_t ** pptable_info_array,const uint32_t * pptable_array,uint32_t od_setting_count)72 int phm_copy_overdrive_settings_limits_array(
73 struct pp_hwmgr *hwmgr,
74 uint32_t **pptable_info_array,
75 const uint32_t *pptable_array,
76 uint32_t od_setting_count)
77 {
78 uint32_t array_size, i;
79 uint32_t *table;
80
81 array_size = sizeof(uint32_t) * od_setting_count;
82 table = kzalloc(array_size, GFP_KERNEL);
83 if (NULL == table)
84 return -ENOMEM;
85
86 for (i = 0; i < od_setting_count; i++)
87 table[i] = le32_to_cpu(pptable_array[i]);
88
89 *pptable_info_array = table;
90
91 return 0;
92 }
93
phm_set_field_to_u32(u32 offset,u32 original_data,u32 field,u32 size)94 uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
95 {
96 u32 mask = 0;
97 u32 shift = 0;
98
99 shift = (offset % 4) << 3;
100 if (size == sizeof(uint8_t))
101 mask = 0xFF << shift;
102 else if (size == sizeof(uint16_t))
103 mask = 0xFFFF << shift;
104
105 original_data &= ~mask;
106 original_data |= (field << shift);
107 return original_data;
108 }
109
110 /**
111 * Returns once the part of the register indicated by the mask has
112 * reached the given value.
113 */
phm_wait_on_register(struct pp_hwmgr * hwmgr,uint32_t index,uint32_t value,uint32_t mask)114 int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
115 uint32_t value, uint32_t mask)
116 {
117 uint32_t i;
118 uint32_t cur_value;
119
120 if (hwmgr == NULL || hwmgr->device == NULL) {
121 pr_err("Invalid Hardware Manager!");
122 return -EINVAL;
123 }
124
125 for (i = 0; i < hwmgr->usec_timeout; i++) {
126 cur_value = cgs_read_register(hwmgr->device, index);
127 if ((cur_value & mask) == (value & mask))
128 break;
129 udelay(1);
130 }
131
132 /* timeout means wrong logic*/
133 if (i == hwmgr->usec_timeout)
134 return -1;
135 return 0;
136 }
137
138
139 /**
140 * Returns once the part of the register indicated by the mask has
141 * reached the given value.The indirect space is described by giving
142 * the memory-mapped index of the indirect index register.
143 */
phm_wait_on_indirect_register(struct pp_hwmgr * hwmgr,uint32_t indirect_port,uint32_t index,uint32_t value,uint32_t mask)144 int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
145 uint32_t indirect_port,
146 uint32_t index,
147 uint32_t value,
148 uint32_t mask)
149 {
150 if (hwmgr == NULL || hwmgr->device == NULL) {
151 pr_err("Invalid Hardware Manager!");
152 return -EINVAL;
153 }
154
155 cgs_write_register(hwmgr->device, indirect_port, index);
156 return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
157 }
158
phm_wait_for_register_unequal(struct pp_hwmgr * hwmgr,uint32_t index,uint32_t value,uint32_t mask)159 int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
160 uint32_t index,
161 uint32_t value, uint32_t mask)
162 {
163 uint32_t i;
164 uint32_t cur_value;
165
166 if (hwmgr == NULL || hwmgr->device == NULL)
167 return -EINVAL;
168
169 for (i = 0; i < hwmgr->usec_timeout; i++) {
170 cur_value = cgs_read_register(hwmgr->device,
171 index);
172 if ((cur_value & mask) != (value & mask))
173 break;
174 udelay(1);
175 }
176
177 /* timeout means wrong logic */
178 if (i == hwmgr->usec_timeout)
179 return -ETIME;
180 return 0;
181 }
182
phm_wait_for_indirect_register_unequal(struct pp_hwmgr * hwmgr,uint32_t indirect_port,uint32_t index,uint32_t value,uint32_t mask)183 int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
184 uint32_t indirect_port,
185 uint32_t index,
186 uint32_t value,
187 uint32_t mask)
188 {
189 if (hwmgr == NULL || hwmgr->device == NULL)
190 return -EINVAL;
191
192 cgs_write_register(hwmgr->device, indirect_port, index);
193 return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
194 value, mask);
195 }
196
phm_cf_want_uvd_power_gating(struct pp_hwmgr * hwmgr)197 bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
198 {
199 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
200 }
201
phm_cf_want_vce_power_gating(struct pp_hwmgr * hwmgr)202 bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
203 {
204 return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
205 }
206
207
phm_trim_voltage_table(struct pp_atomctrl_voltage_table * vol_table)208 int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
209 {
210 uint32_t i, j;
211 uint16_t vvalue;
212 bool found = false;
213 struct pp_atomctrl_voltage_table *table;
214
215 PP_ASSERT_WITH_CODE((NULL != vol_table),
216 "Voltage Table empty.", return -EINVAL);
217
218 table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
219 GFP_KERNEL);
220
221 if (NULL == table)
222 return -EINVAL;
223
224 table->mask_low = vol_table->mask_low;
225 table->phase_delay = vol_table->phase_delay;
226
227 for (i = 0; i < vol_table->count; i++) {
228 vvalue = vol_table->entries[i].value;
229 found = false;
230
231 for (j = 0; j < table->count; j++) {
232 if (vvalue == table->entries[j].value) {
233 found = true;
234 break;
235 }
236 }
237
238 if (!found) {
239 table->entries[table->count].value = vvalue;
240 table->entries[table->count].smio_low =
241 vol_table->entries[i].smio_low;
242 table->count++;
243 }
244 }
245
246 memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
247 kfree(table);
248 table = NULL;
249 return 0;
250 }
251
phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table * vol_table,phm_ppt_v1_clock_voltage_dependency_table * dep_table)252 int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
253 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
254 {
255 uint32_t i;
256 int result;
257
258 PP_ASSERT_WITH_CODE((0 != dep_table->count),
259 "Voltage Dependency Table empty.", return -EINVAL);
260
261 PP_ASSERT_WITH_CODE((NULL != vol_table),
262 "vol_table empty.", return -EINVAL);
263
264 vol_table->mask_low = 0;
265 vol_table->phase_delay = 0;
266 vol_table->count = dep_table->count;
267
268 for (i = 0; i < dep_table->count; i++) {
269 vol_table->entries[i].value = dep_table->entries[i].mvdd;
270 vol_table->entries[i].smio_low = 0;
271 }
272
273 result = phm_trim_voltage_table(vol_table);
274 PP_ASSERT_WITH_CODE((0 == result),
275 "Failed to trim MVDD table.", return result);
276
277 return 0;
278 }
279
phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table * vol_table,phm_ppt_v1_clock_voltage_dependency_table * dep_table)280 int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
281 phm_ppt_v1_clock_voltage_dependency_table *dep_table)
282 {
283 uint32_t i;
284 int result;
285
286 PP_ASSERT_WITH_CODE((0 != dep_table->count),
287 "Voltage Dependency Table empty.", return -EINVAL);
288
289 PP_ASSERT_WITH_CODE((NULL != vol_table),
290 "vol_table empty.", return -EINVAL);
291
292 vol_table->mask_low = 0;
293 vol_table->phase_delay = 0;
294 vol_table->count = dep_table->count;
295
296 for (i = 0; i < dep_table->count; i++) {
297 vol_table->entries[i].value = dep_table->entries[i].vddci;
298 vol_table->entries[i].smio_low = 0;
299 }
300
301 result = phm_trim_voltage_table(vol_table);
302 PP_ASSERT_WITH_CODE((0 == result),
303 "Failed to trim VDDCI table.", return result);
304
305 return 0;
306 }
307
phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table * vol_table,phm_ppt_v1_voltage_lookup_table * lookup_table)308 int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
309 phm_ppt_v1_voltage_lookup_table *lookup_table)
310 {
311 int i = 0;
312
313 PP_ASSERT_WITH_CODE((0 != lookup_table->count),
314 "Voltage Lookup Table empty.", return -EINVAL);
315
316 PP_ASSERT_WITH_CODE((NULL != vol_table),
317 "vol_table empty.", return -EINVAL);
318
319 vol_table->mask_low = 0;
320 vol_table->phase_delay = 0;
321
322 vol_table->count = lookup_table->count;
323
324 for (i = 0; i < vol_table->count; i++) {
325 vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
326 vol_table->entries[i].smio_low = 0;
327 }
328
329 return 0;
330 }
331
phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,struct pp_atomctrl_voltage_table * vol_table)332 void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
333 struct pp_atomctrl_voltage_table *vol_table)
334 {
335 unsigned int i, diff;
336
337 if (vol_table->count <= max_vol_steps)
338 return;
339
340 diff = vol_table->count - max_vol_steps;
341
342 for (i = 0; i < max_vol_steps; i++)
343 vol_table->entries[i] = vol_table->entries[i + diff];
344
345 vol_table->count = max_vol_steps;
346
347 return;
348 }
349
phm_reset_single_dpm_table(void * table,uint32_t count,int max)350 int phm_reset_single_dpm_table(void *table,
351 uint32_t count, int max)
352 {
353 int i;
354
355 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
356
357 dpm_table->count = count > max ? max : count;
358
359 for (i = 0; i < dpm_table->count; i++)
360 dpm_table->dpm_level[i].enabled = false;
361
362 return 0;
363 }
364
phm_setup_pcie_table_entry(void * table,uint32_t index,uint32_t pcie_gen,uint32_t pcie_lanes)365 void phm_setup_pcie_table_entry(
366 void *table,
367 uint32_t index, uint32_t pcie_gen,
368 uint32_t pcie_lanes)
369 {
370 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
371 dpm_table->dpm_level[index].value = pcie_gen;
372 dpm_table->dpm_level[index].param1 = pcie_lanes;
373 dpm_table->dpm_level[index].enabled = 1;
374 }
375
phm_get_dpm_level_enable_mask_value(void * table)376 int32_t phm_get_dpm_level_enable_mask_value(void *table)
377 {
378 int32_t i;
379 int32_t mask = 0;
380 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
381
382 for (i = dpm_table->count; i > 0; i--) {
383 mask = mask << 1;
384 if (dpm_table->dpm_level[i - 1].enabled)
385 mask |= 0x1;
386 else
387 mask &= 0xFFFFFFFE;
388 }
389
390 return mask;
391 }
392
phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table * lookup_table,uint16_t voltage)393 uint8_t phm_get_voltage_index(
394 struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
395 {
396 uint8_t count = (uint8_t) (lookup_table->count);
397 uint8_t i;
398
399 PP_ASSERT_WITH_CODE((NULL != lookup_table),
400 "Lookup Table empty.", return 0);
401 PP_ASSERT_WITH_CODE((0 != count),
402 "Lookup Table empty.", return 0);
403
404 for (i = 0; i < lookup_table->count; i++) {
405 /* find first voltage equal or bigger than requested */
406 if (lookup_table->entries[i].us_vdd >= voltage)
407 return i;
408 }
409 /* voltage is bigger than max voltage in the table */
410 return i - 1;
411 }
412
phm_get_voltage_id(pp_atomctrl_voltage_table * voltage_table,uint32_t voltage)413 uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
414 uint32_t voltage)
415 {
416 uint8_t count = (uint8_t) (voltage_table->count);
417 uint8_t i = 0;
418
419 PP_ASSERT_WITH_CODE((NULL != voltage_table),
420 "Voltage Table empty.", return 0;);
421 PP_ASSERT_WITH_CODE((0 != count),
422 "Voltage Table empty.", return 0;);
423
424 for (i = 0; i < count; i++) {
425 /* find first voltage bigger than requested */
426 if (voltage_table->entries[i].value >= voltage)
427 return i;
428 }
429
430 /* voltage is bigger than max voltage in the table */
431 return i - 1;
432 }
433
phm_find_closest_vddci(struct pp_atomctrl_voltage_table * vddci_table,uint16_t vddci)434 uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
435 {
436 uint32_t i;
437
438 for (i = 0; i < vddci_table->count; i++) {
439 if (vddci_table->entries[i].value >= vddci)
440 return vddci_table->entries[i].value;
441 }
442
443 pr_debug("vddci is larger than max value in vddci_table\n");
444 return vddci_table->entries[i-1].value;
445 }
446
phm_find_boot_level(void * table,uint32_t value,uint32_t * boot_level)447 int phm_find_boot_level(void *table,
448 uint32_t value, uint32_t *boot_level)
449 {
450 int result = -EINVAL;
451 uint32_t i;
452 struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
453
454 for (i = 0; i < dpm_table->count; i++) {
455 if (value == dpm_table->dpm_level[i].value) {
456 *boot_level = i;
457 result = 0;
458 }
459 }
460
461 return result;
462 }
463
phm_get_sclk_for_voltage_evv(struct pp_hwmgr * hwmgr,phm_ppt_v1_voltage_lookup_table * lookup_table,uint16_t virtual_voltage_id,int32_t * sclk)464 int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
465 phm_ppt_v1_voltage_lookup_table *lookup_table,
466 uint16_t virtual_voltage_id, int32_t *sclk)
467 {
468 uint8_t entry_id;
469 uint8_t voltage_id;
470 struct phm_ppt_v1_information *table_info =
471 (struct phm_ppt_v1_information *)(hwmgr->pptable);
472
473 PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
474
475 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
476 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
477 voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
478 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
479 break;
480 }
481
482 if (entry_id >= table_info->vdd_dep_on_sclk->count) {
483 pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
484 return -EINVAL;
485 }
486
487 *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
488
489 return 0;
490 }
491
492 /**
493 * Initialize Dynamic State Adjustment Rule Settings
494 *
495 * @param hwmgr the address of the powerplay hardware manager.
496 */
phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr * hwmgr)497 int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
498 {
499 uint32_t table_size;
500 struct phm_clock_voltage_dependency_table *table_clk_vlt;
501 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
502
503 /* initialize vddc_dep_on_dal_pwrl table */
504 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
505 table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
506
507 if (NULL == table_clk_vlt) {
508 pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
509 return -ENOMEM;
510 } else {
511 table_clk_vlt->count = 4;
512 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
513 table_clk_vlt->entries[0].v = 0;
514 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
515 table_clk_vlt->entries[1].v = 720;
516 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
517 table_clk_vlt->entries[2].v = 810;
518 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
519 table_clk_vlt->entries[3].v = 900;
520 if (pptable_info != NULL)
521 pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
522 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
523 }
524
525 return 0;
526 }
527
phm_get_lowest_enabled_level(struct pp_hwmgr * hwmgr,uint32_t mask)528 uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
529 {
530 uint32_t level = 0;
531
532 while (0 == (mask & (1 << level)))
533 level++;
534
535 return level;
536 }
537
phm_apply_dal_min_voltage_request(struct pp_hwmgr * hwmgr)538 void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
539 {
540 struct phm_ppt_v1_information *table_info =
541 (struct phm_ppt_v1_information *)hwmgr->pptable;
542 struct phm_clock_voltage_dependency_table *table =
543 table_info->vddc_dep_on_dal_pwrl;
544 struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
545 enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
546 uint32_t req_vddc = 0, req_volt, i;
547
548 if (!table || table->count <= 0
549 || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
550 || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
551 return;
552
553 for (i = 0; i < table->count; i++) {
554 if (dal_power_level == table->entries[i].clk) {
555 req_vddc = table->entries[i].v;
556 break;
557 }
558 }
559
560 vddc_table = table_info->vdd_dep_on_sclk;
561 for (i = 0; i < vddc_table->count; i++) {
562 if (req_vddc <= vddc_table->entries[i].vddc) {
563 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
564 smum_send_msg_to_smc_with_parameter(hwmgr,
565 PPSMC_MSG_VddC_Request, req_volt);
566 return;
567 }
568 }
569 pr_err("DAL requested level can not"
570 " found a available voltage in VDDC DPM Table \n");
571 }
572
phm_get_voltage_evv_on_sclk(struct pp_hwmgr * hwmgr,uint8_t voltage_type,uint32_t sclk,uint16_t id,uint16_t * voltage)573 int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
574 uint32_t sclk, uint16_t id, uint16_t *voltage)
575 {
576 uint32_t vol;
577 int ret = 0;
578
579 if (hwmgr->chip_id < CHIP_TONGA) {
580 ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
581 } else if (hwmgr->chip_id < CHIP_POLARIS10) {
582 ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
583 if (*voltage >= 2000 || *voltage == 0)
584 *voltage = 1150;
585 } else {
586 ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
587 *voltage = (uint16_t)(vol/100);
588 }
589 return ret;
590 }
591
592
phm_irq_process(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)593 int phm_irq_process(struct amdgpu_device *adev,
594 struct amdgpu_irq_src *source,
595 struct amdgpu_iv_entry *entry)
596 {
597 uint32_t client_id = entry->client_id;
598 uint32_t src_id = entry->src_id;
599
600 if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
601 if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
602 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
603 PCI_BUS_NUM(adev->pdev->devfn),
604 PCI_SLOT(adev->pdev->devfn),
605 PCI_FUNC(adev->pdev->devfn));
606 else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
607 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
608 PCI_BUS_NUM(adev->pdev->devfn),
609 PCI_SLOT(adev->pdev->devfn),
610 PCI_FUNC(adev->pdev->devfn));
611 else if (src_id == VISLANDS30_IV_SRCID_GPIO_19)
612 pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
613 PCI_BUS_NUM(adev->pdev->devfn),
614 PCI_SLOT(adev->pdev->devfn),
615 PCI_FUNC(adev->pdev->devfn));
616 } else if (client_id == SOC15_IH_CLIENTID_THM) {
617 if (src_id == 0)
618 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
619 PCI_BUS_NUM(adev->pdev->devfn),
620 PCI_SLOT(adev->pdev->devfn),
621 PCI_FUNC(adev->pdev->devfn));
622 else
623 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
624 PCI_BUS_NUM(adev->pdev->devfn),
625 PCI_SLOT(adev->pdev->devfn),
626 PCI_FUNC(adev->pdev->devfn));
627 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO)
628 pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
629 PCI_BUS_NUM(adev->pdev->devfn),
630 PCI_SLOT(adev->pdev->devfn),
631 PCI_FUNC(adev->pdev->devfn));
632
633 return 0;
634 }
635
636 static const struct amdgpu_irq_src_funcs smu9_irq_funcs = {
637 .process = phm_irq_process,
638 };
639
smu9_register_irq_handlers(struct pp_hwmgr * hwmgr)640 int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
641 {
642 struct amdgpu_irq_src *source =
643 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
644
645 if (!source)
646 return -ENOMEM;
647
648 source->funcs = &smu9_irq_funcs;
649
650 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
651 SOC15_IH_CLIENTID_THM,
652 THM_9_0__SRCID__THM_DIG_THERM_L2H,
653 source);
654 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
655 SOC15_IH_CLIENTID_THM,
656 THM_9_0__SRCID__THM_DIG_THERM_H2L,
657 source);
658
659 /* Register CTF(GPIO_19) interrupt */
660 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
661 SOC15_IH_CLIENTID_ROM_SMUIO,
662 SMUIO_9_0__SRCID__SMUIO_GPIO19,
663 source);
664
665 return 0;
666 }
667
smu_atom_get_data_table(void * dev,uint32_t table,uint16_t * size,uint8_t * frev,uint8_t * crev)668 void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
669 uint8_t *frev, uint8_t *crev)
670 {
671 struct amdgpu_device *adev = dev;
672 uint16_t data_start;
673
674 if (amdgpu_atom_parse_data_header(
675 adev->mode_info.atom_context, table, size,
676 frev, crev, &data_start))
677 return (uint8_t *)adev->mode_info.atom_context->bios +
678 data_start;
679
680 return NULL;
681 }
682
smu_get_voltage_dependency_table_ppt_v1(const struct phm_ppt_v1_clock_voltage_dependency_table * allowed_dep_table,struct phm_ppt_v1_clock_voltage_dependency_table * dep_table)683 int smu_get_voltage_dependency_table_ppt_v1(
684 const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
685 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
686 {
687 uint8_t i = 0;
688 PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
689 "Voltage Lookup Table empty",
690 return -EINVAL);
691
692 dep_table->count = allowed_dep_table->count;
693 for (i=0; i<dep_table->count; i++) {
694 dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
695 dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
696 dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
697 dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
698 dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
699 dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
700 dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
701 dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
702 dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
703 dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
704 }
705
706 return 0;
707 }
708
smu_set_watermarks_for_clocks_ranges(void * wt_table,struct dm_pp_wm_sets_with_clock_ranges_soc15 * wm_with_clock_ranges)709 int smu_set_watermarks_for_clocks_ranges(void *wt_table,
710 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
711 {
712 uint32_t i;
713 struct watermarks *table = wt_table;
714
715 if (!table || !wm_with_clock_ranges)
716 return -EINVAL;
717
718 if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
719 return -EINVAL;
720
721 for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
722 table->WatermarkRow[1][i].MinClock =
723 cpu_to_le16((uint16_t)
724 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
725 1000));
726 table->WatermarkRow[1][i].MaxClock =
727 cpu_to_le16((uint16_t)
728 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
729 1000));
730 table->WatermarkRow[1][i].MinUclk =
731 cpu_to_le16((uint16_t)
732 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
733 1000));
734 table->WatermarkRow[1][i].MaxUclk =
735 cpu_to_le16((uint16_t)
736 (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
737 1000));
738 table->WatermarkRow[1][i].WmSetting = (uint8_t)
739 wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
740 }
741
742 for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
743 table->WatermarkRow[0][i].MinClock =
744 cpu_to_le16((uint16_t)
745 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
746 1000));
747 table->WatermarkRow[0][i].MaxClock =
748 cpu_to_le16((uint16_t)
749 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
750 1000));
751 table->WatermarkRow[0][i].MinUclk =
752 cpu_to_le16((uint16_t)
753 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
754 1000));
755 table->WatermarkRow[0][i].MaxUclk =
756 cpu_to_le16((uint16_t)
757 (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
758 1000));
759 table->WatermarkRow[0][i].WmSetting = (uint8_t)
760 wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
761 }
762 return 0;
763 }
764