1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "pp_debug.h"
25 #include <linux/delay.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 #include <linux/pci.h>
30 #include <drm/amdgpu_drm.h>
31 #include "power_state.h"
32 #include "hwmgr.h"
33 #include "ppsmc.h"
34 #include "amd_acpi.h"
35 #include "pp_psm.h"
36
37 extern const struct pp_smumgr_func ci_smu_funcs;
38 extern const struct pp_smumgr_func smu8_smu_funcs;
39 extern const struct pp_smumgr_func iceland_smu_funcs;
40 extern const struct pp_smumgr_func tonga_smu_funcs;
41 extern const struct pp_smumgr_func fiji_smu_funcs;
42 extern const struct pp_smumgr_func polaris10_smu_funcs;
43 extern const struct pp_smumgr_func vegam_smu_funcs;
44 extern const struct pp_smumgr_func vega10_smu_funcs;
45 extern const struct pp_smumgr_func vega12_smu_funcs;
46 extern const struct pp_smumgr_func smu10_smu_funcs;
47
48 extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
49 extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr);
50 extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
51 extern int vega12_hwmgr_init(struct pp_hwmgr *hwmgr);
52 extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
53
54 static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
55 static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
56 static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
57 static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
58 static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
59 static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
60 static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
61
62
hwmgr_init_workload_prority(struct pp_hwmgr * hwmgr)63 static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
64 {
65 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2;
66 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0;
67 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1;
68 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3;
69 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4;
70
71 hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING;
72 hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO;
73 hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
74 hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR;
75 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE;
76 }
77
hwmgr_early_init(struct pp_hwmgr * hwmgr)78 int hwmgr_early_init(struct pp_hwmgr *hwmgr)
79 {
80 if (!hwmgr)
81 return -EINVAL;
82
83 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
84 hwmgr->pp_table_version = PP_TABLE_V1;
85 hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
86 hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
87 hwmgr_init_default_caps(hwmgr);
88 hwmgr_set_user_specify_caps(hwmgr);
89 hwmgr->fan_ctrl_is_in_default_mode = true;
90 hwmgr->reload_fw = 1;
91 hwmgr_init_workload_prority(hwmgr);
92
93 switch (hwmgr->chip_family) {
94 case AMDGPU_FAMILY_CI:
95 hwmgr->smumgr_funcs = &ci_smu_funcs;
96 ci_set_asic_special_caps(hwmgr);
97 hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
98 PP_ENABLE_GFX_CG_THRU_SMU |
99 PP_GFXOFF_MASK);
100 hwmgr->pp_table_version = PP_TABLE_V0;
101 hwmgr->od_enabled = false;
102 smu7_init_function_pointers(hwmgr);
103 break;
104 case AMDGPU_FAMILY_CZ:
105 hwmgr->od_enabled = false;
106 hwmgr->smumgr_funcs = &smu8_smu_funcs;
107 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
108 smu8_init_function_pointers(hwmgr);
109 break;
110 case AMDGPU_FAMILY_VI:
111 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
112 switch (hwmgr->chip_id) {
113 case CHIP_TOPAZ:
114 hwmgr->smumgr_funcs = &iceland_smu_funcs;
115 topaz_set_asic_special_caps(hwmgr);
116 hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
117 PP_ENABLE_GFX_CG_THRU_SMU);
118 hwmgr->pp_table_version = PP_TABLE_V0;
119 hwmgr->od_enabled = false;
120 break;
121 case CHIP_TONGA:
122 hwmgr->smumgr_funcs = &tonga_smu_funcs;
123 tonga_set_asic_special_caps(hwmgr);
124 hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK;
125 break;
126 case CHIP_FIJI:
127 hwmgr->smumgr_funcs = &fiji_smu_funcs;
128 fiji_set_asic_special_caps(hwmgr);
129 hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
130 PP_ENABLE_GFX_CG_THRU_SMU);
131 break;
132 case CHIP_POLARIS11:
133 case CHIP_POLARIS10:
134 case CHIP_POLARIS12:
135 hwmgr->smumgr_funcs = &polaris10_smu_funcs;
136 polaris_set_asic_special_caps(hwmgr);
137 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
138 break;
139 case CHIP_VEGAM:
140 hwmgr->smumgr_funcs = &vegam_smu_funcs;
141 polaris_set_asic_special_caps(hwmgr);
142 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
143 break;
144 default:
145 return -EINVAL;
146 }
147 smu7_init_function_pointers(hwmgr);
148 break;
149 case AMDGPU_FAMILY_AI:
150 switch (hwmgr->chip_id) {
151 case CHIP_VEGA10:
152 case CHIP_VEGA20:
153 hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
154 hwmgr->smumgr_funcs = &vega10_smu_funcs;
155 vega10_hwmgr_init(hwmgr);
156 break;
157 case CHIP_VEGA12:
158 hwmgr->smumgr_funcs = &vega12_smu_funcs;
159 vega12_hwmgr_init(hwmgr);
160 break;
161 default:
162 return -EINVAL;
163 }
164 break;
165 case AMDGPU_FAMILY_RV:
166 switch (hwmgr->chip_id) {
167 case CHIP_RAVEN:
168 hwmgr->od_enabled = false;
169 hwmgr->smumgr_funcs = &smu10_smu_funcs;
170 smu10_init_function_pointers(hwmgr);
171 break;
172 default:
173 return -EINVAL;
174 }
175 break;
176 default:
177 return -EINVAL;
178 }
179
180 return 0;
181 }
182
hwmgr_sw_init(struct pp_hwmgr * hwmgr)183 int hwmgr_sw_init(struct pp_hwmgr *hwmgr)
184 {
185 if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init)
186 return -EINVAL;
187
188 phm_register_irq_handlers(hwmgr);
189
190 return hwmgr->smumgr_funcs->smu_init(hwmgr);
191 }
192
193
hwmgr_sw_fini(struct pp_hwmgr * hwmgr)194 int hwmgr_sw_fini(struct pp_hwmgr *hwmgr)
195 {
196 if (hwmgr && hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->smu_fini)
197 hwmgr->smumgr_funcs->smu_fini(hwmgr);
198
199 return 0;
200 }
201
hwmgr_hw_init(struct pp_hwmgr * hwmgr)202 int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
203 {
204 int ret = 0;
205
206 if (!hwmgr || !hwmgr->smumgr_funcs)
207 return -EINVAL;
208
209 if (hwmgr->smumgr_funcs->start_smu) {
210 ret = hwmgr->smumgr_funcs->start_smu(hwmgr);
211 if (ret) {
212 pr_err("smc start failed\n");
213 return -EINVAL;
214 }
215 }
216
217 if (!hwmgr->pm_en)
218 return 0;
219
220 if (!hwmgr->pptable_func ||
221 !hwmgr->pptable_func->pptable_init ||
222 !hwmgr->hwmgr_func->backend_init) {
223 hwmgr->pm_en = false;
224 pr_info("dpm not supported \n");
225 return 0;
226 }
227
228 ret = hwmgr->pptable_func->pptable_init(hwmgr);
229 if (ret)
230 goto err;
231
232 ((struct amdgpu_device *)hwmgr->adev)->pm.no_fan =
233 hwmgr->thermal_controller.fanInfo.bNoFan;
234
235 ret = hwmgr->hwmgr_func->backend_init(hwmgr);
236 if (ret)
237 goto err1;
238 /* make sure dc limits are valid */
239 if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
240 (hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0))
241 hwmgr->dyn_state.max_clock_voltage_on_dc =
242 hwmgr->dyn_state.max_clock_voltage_on_ac;
243
244 ret = psm_init_power_state_table(hwmgr);
245 if (ret)
246 goto err2;
247
248 ret = phm_setup_asic(hwmgr);
249 if (ret)
250 goto err2;
251
252 ret = phm_enable_dynamic_state_management(hwmgr);
253 if (ret)
254 goto err2;
255 ret = phm_start_thermal_controller(hwmgr);
256 ret |= psm_set_performance_states(hwmgr);
257 if (ret)
258 goto err2;
259
260 ((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = true;
261
262 return 0;
263 err2:
264 if (hwmgr->hwmgr_func->backend_fini)
265 hwmgr->hwmgr_func->backend_fini(hwmgr);
266 err1:
267 if (hwmgr->pptable_func->pptable_fini)
268 hwmgr->pptable_func->pptable_fini(hwmgr);
269 err:
270 return ret;
271 }
272
hwmgr_hw_fini(struct pp_hwmgr * hwmgr)273 int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
274 {
275 if (!hwmgr || !hwmgr->pm_en)
276 return 0;
277
278 phm_stop_thermal_controller(hwmgr);
279 psm_set_boot_states(hwmgr);
280 psm_adjust_power_state_dynamic(hwmgr, false, NULL);
281 phm_disable_dynamic_state_management(hwmgr);
282 phm_disable_clock_power_gatings(hwmgr);
283
284 if (hwmgr->hwmgr_func->backend_fini)
285 hwmgr->hwmgr_func->backend_fini(hwmgr);
286 if (hwmgr->pptable_func->pptable_fini)
287 hwmgr->pptable_func->pptable_fini(hwmgr);
288 return psm_fini_power_state_table(hwmgr);
289 }
290
hwmgr_suspend(struct pp_hwmgr * hwmgr)291 int hwmgr_suspend(struct pp_hwmgr *hwmgr)
292 {
293 int ret = 0;
294
295 if (!hwmgr || !hwmgr->pm_en)
296 return 0;
297
298 phm_disable_smc_firmware_ctf(hwmgr);
299 ret = psm_set_boot_states(hwmgr);
300 if (ret)
301 return ret;
302 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
303 if (ret)
304 return ret;
305 ret = phm_power_down_asic(hwmgr);
306
307 return ret;
308 }
309
hwmgr_resume(struct pp_hwmgr * hwmgr)310 int hwmgr_resume(struct pp_hwmgr *hwmgr)
311 {
312 int ret = 0;
313
314 if (!hwmgr)
315 return -EINVAL;
316
317 if (hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->start_smu) {
318 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
319 pr_err("smc start failed\n");
320 return -EINVAL;
321 }
322 }
323
324 if (!hwmgr->pm_en)
325 return 0;
326
327 ret = phm_setup_asic(hwmgr);
328 if (ret)
329 return ret;
330
331 ret = phm_enable_dynamic_state_management(hwmgr);
332 if (ret)
333 return ret;
334 ret = phm_start_thermal_controller(hwmgr);
335 ret |= psm_set_performance_states(hwmgr);
336 if (ret)
337 return ret;
338
339 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
340
341 return ret;
342 }
343
power_state_convert(enum amd_pm_state_type state)344 static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
345 {
346 switch (state) {
347 case POWER_STATE_TYPE_BATTERY:
348 return PP_StateUILabel_Battery;
349 case POWER_STATE_TYPE_BALANCED:
350 return PP_StateUILabel_Balanced;
351 case POWER_STATE_TYPE_PERFORMANCE:
352 return PP_StateUILabel_Performance;
353 default:
354 return PP_StateUILabel_None;
355 }
356 }
357
hwmgr_handle_task(struct pp_hwmgr * hwmgr,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)358 int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
359 enum amd_pm_state_type *user_state)
360 {
361 int ret = 0;
362
363 if (hwmgr == NULL)
364 return -EINVAL;
365
366 switch (task_id) {
367 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
368 ret = phm_pre_display_configuration_changed(hwmgr);
369 if (ret)
370 return ret;
371 ret = phm_set_cpu_power_state(hwmgr);
372 if (ret)
373 return ret;
374 ret = psm_set_performance_states(hwmgr);
375 if (ret)
376 return ret;
377 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
378 break;
379 case AMD_PP_TASK_ENABLE_USER_STATE:
380 {
381 enum PP_StateUILabel requested_ui_label;
382 struct pp_power_state *requested_ps = NULL;
383
384 if (user_state == NULL) {
385 ret = -EINVAL;
386 break;
387 }
388
389 requested_ui_label = power_state_convert(*user_state);
390 ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps);
391 if (ret)
392 return ret;
393 ret = psm_adjust_power_state_dynamic(hwmgr, false, requested_ps);
394 break;
395 }
396 case AMD_PP_TASK_COMPLETE_INIT:
397 case AMD_PP_TASK_READJUST_POWER_STATE:
398 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
399 break;
400 default:
401 break;
402 }
403 return ret;
404 }
405
hwmgr_init_default_caps(struct pp_hwmgr * hwmgr)406 void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
407 {
408 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
409
410 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
411 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
412
413 #if defined(CONFIG_ACPI)
414 if (amdgpu_acpi_is_pcie_performance_request_supported(hwmgr->adev))
415 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
416 #endif
417
418 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
419 PHM_PlatformCaps_DynamicPatchPowerState);
420
421 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
422 PHM_PlatformCaps_EnableSMU7ThermalManagement);
423
424 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
425 PHM_PlatformCaps_DynamicPowerManagement);
426
427 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
428 PHM_PlatformCaps_SMC);
429
430 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
431 PHM_PlatformCaps_DynamicUVDState);
432
433 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
434 PHM_PlatformCaps_FanSpeedInTableIsRPM);
435 return;
436 }
437
hwmgr_set_user_specify_caps(struct pp_hwmgr * hwmgr)438 int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
439 {
440 if (hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
441 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
442 PHM_PlatformCaps_SclkDeepSleep);
443 else
444 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
445 PHM_PlatformCaps_SclkDeepSleep);
446
447 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
448 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
449 PHM_PlatformCaps_PowerContainment);
450 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
451 PHM_PlatformCaps_CAC);
452 } else {
453 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
454 PHM_PlatformCaps_PowerContainment);
455 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
456 PHM_PlatformCaps_CAC);
457 }
458
459 if (hwmgr->feature_mask & PP_OVERDRIVE_MASK)
460 hwmgr->od_enabled = true;
461
462 return 0;
463 }
464
polaris_set_asic_special_caps(struct pp_hwmgr * hwmgr)465 int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
466 {
467 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
468 PHM_PlatformCaps_EVV);
469 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
470 PHM_PlatformCaps_SQRamping);
471 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
472 PHM_PlatformCaps_RegulatorHot);
473
474 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
475 PHM_PlatformCaps_AutomaticDCTransition);
476
477 if (hwmgr->chip_id != CHIP_POLARIS10)
478 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
479 PHM_PlatformCaps_SPLLShutdownSupport);
480
481 if (hwmgr->chip_id != CHIP_POLARIS11) {
482 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
483 PHM_PlatformCaps_DBRamping);
484 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
485 PHM_PlatformCaps_TDRamping);
486 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
487 PHM_PlatformCaps_TCPRamping);
488 }
489 return 0;
490 }
491
fiji_set_asic_special_caps(struct pp_hwmgr * hwmgr)492 int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
493 {
494 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
495 PHM_PlatformCaps_EVV);
496 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
497 PHM_PlatformCaps_SQRamping);
498 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
499 PHM_PlatformCaps_DBRamping);
500 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
501 PHM_PlatformCaps_TDRamping);
502 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
503 PHM_PlatformCaps_TCPRamping);
504 return 0;
505 }
506
tonga_set_asic_special_caps(struct pp_hwmgr * hwmgr)507 int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
508 {
509 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
510 PHM_PlatformCaps_EVV);
511 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
512 PHM_PlatformCaps_SQRamping);
513 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
514 PHM_PlatformCaps_DBRamping);
515 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
516 PHM_PlatformCaps_TDRamping);
517 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
518 PHM_PlatformCaps_TCPRamping);
519
520 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
521 PHM_PlatformCaps_UVDPowerGating);
522 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
523 PHM_PlatformCaps_VCEPowerGating);
524 return 0;
525 }
526
topaz_set_asic_special_caps(struct pp_hwmgr * hwmgr)527 int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
528 {
529 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
530 PHM_PlatformCaps_EVV);
531 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
532 PHM_PlatformCaps_SQRamping);
533 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
534 PHM_PlatformCaps_DBRamping);
535 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
536 PHM_PlatformCaps_TDRamping);
537 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
538 PHM_PlatformCaps_TCPRamping);
539 return 0;
540 }
541
ci_set_asic_special_caps(struct pp_hwmgr * hwmgr)542 int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr)
543 {
544 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
545 PHM_PlatformCaps_SQRamping);
546 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
547 PHM_PlatformCaps_DBRamping);
548 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
549 PHM_PlatformCaps_TDRamping);
550 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
551 PHM_PlatformCaps_TCPRamping);
552 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
553 PHM_PlatformCaps_MemorySpreadSpectrumSupport);
554 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
555 PHM_PlatformCaps_EngineSpreadSpectrumSupport);
556 return 0;
557 }
558