1 /*	$NetBSD: amdgpu_hwmgr.c,v 1.2 2021/12/18 23:45:26 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2015 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_hwmgr.c,v 1.2 2021/12/18 23:45:26 riastradh Exp $");
28 
29 #include "pp_debug.h"
30 #include <linux/delay.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
34 #include <linux/pci.h>
35 #include <drm/amdgpu_drm.h>
36 #include "power_state.h"
37 #include "hwmgr.h"
38 #include "ppsmc.h"
39 #include "amd_acpi.h"
40 #include "pp_psm.h"
41 
42 extern const struct pp_smumgr_func ci_smu_funcs;
43 extern const struct pp_smumgr_func smu8_smu_funcs;
44 extern const struct pp_smumgr_func iceland_smu_funcs;
45 extern const struct pp_smumgr_func tonga_smu_funcs;
46 extern const struct pp_smumgr_func fiji_smu_funcs;
47 extern const struct pp_smumgr_func polaris10_smu_funcs;
48 extern const struct pp_smumgr_func vegam_smu_funcs;
49 extern const struct pp_smumgr_func vega10_smu_funcs;
50 extern const struct pp_smumgr_func vega12_smu_funcs;
51 extern const struct pp_smumgr_func smu10_smu_funcs;
52 extern const struct pp_smumgr_func vega20_smu_funcs;
53 
54 extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
55 extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr);
56 extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
57 extern int vega12_hwmgr_init(struct pp_hwmgr *hwmgr);
58 extern int vega20_hwmgr_init(struct pp_hwmgr *hwmgr);
59 extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
60 
61 static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
62 static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
63 static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
64 static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
65 static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
66 static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
67 static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
68 
69 
hwmgr_init_workload_prority(struct pp_hwmgr * hwmgr)70 static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
71 {
72 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
73 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
74 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
75 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
76 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
77 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
78 
79 	hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
80 	hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
81 	hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
82 	hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
83 	hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
84 	hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
85 }
86 
hwmgr_early_init(struct pp_hwmgr * hwmgr)87 int hwmgr_early_init(struct pp_hwmgr *hwmgr)
88 {
89 	struct amdgpu_device *adev;
90 
91 	if (!hwmgr)
92 		return -EINVAL;
93 
94 	hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
95 	hwmgr->pp_table_version = PP_TABLE_V1;
96 	hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
97 	hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
98 	hwmgr_init_default_caps(hwmgr);
99 	hwmgr_set_user_specify_caps(hwmgr);
100 	hwmgr->fan_ctrl_is_in_default_mode = true;
101 	hwmgr_init_workload_prority(hwmgr);
102 	hwmgr->gfxoff_state_changed_by_workload = false;
103 
104 	adev = hwmgr->adev;
105 
106 	switch (hwmgr->chip_family) {
107 	case AMDGPU_FAMILY_CI:
108 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
109 		hwmgr->smumgr_funcs = &ci_smu_funcs;
110 		ci_set_asic_special_caps(hwmgr);
111 		hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
112 					 PP_ENABLE_GFX_CG_THRU_SMU |
113 					 PP_GFXOFF_MASK);
114 		hwmgr->pp_table_version = PP_TABLE_V0;
115 		hwmgr->od_enabled = false;
116 		smu7_init_function_pointers(hwmgr);
117 		break;
118 	case AMDGPU_FAMILY_CZ:
119 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
120 		hwmgr->od_enabled = false;
121 		hwmgr->smumgr_funcs = &smu8_smu_funcs;
122 		hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
123 		smu8_init_function_pointers(hwmgr);
124 		break;
125 	case AMDGPU_FAMILY_VI:
126 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
127 		hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
128 		switch (hwmgr->chip_id) {
129 		case CHIP_TOPAZ:
130 			hwmgr->smumgr_funcs = &iceland_smu_funcs;
131 			topaz_set_asic_special_caps(hwmgr);
132 			hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
133 						PP_ENABLE_GFX_CG_THRU_SMU);
134 			hwmgr->pp_table_version = PP_TABLE_V0;
135 			hwmgr->od_enabled = false;
136 			break;
137 		case CHIP_TONGA:
138 			hwmgr->smumgr_funcs = &tonga_smu_funcs;
139 			tonga_set_asic_special_caps(hwmgr);
140 			hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK;
141 			break;
142 		case CHIP_FIJI:
143 			hwmgr->smumgr_funcs = &fiji_smu_funcs;
144 			fiji_set_asic_special_caps(hwmgr);
145 			hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
146 						PP_ENABLE_GFX_CG_THRU_SMU);
147 			break;
148 		case CHIP_POLARIS11:
149 		case CHIP_POLARIS10:
150 		case CHIP_POLARIS12:
151 			hwmgr->smumgr_funcs = &polaris10_smu_funcs;
152 			polaris_set_asic_special_caps(hwmgr);
153 			hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
154 			break;
155 		case CHIP_VEGAM:
156 			hwmgr->smumgr_funcs = &vegam_smu_funcs;
157 			polaris_set_asic_special_caps(hwmgr);
158 			hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
159 			break;
160 		default:
161 			return -EINVAL;
162 		}
163 		smu7_init_function_pointers(hwmgr);
164 		break;
165 	case AMDGPU_FAMILY_AI:
166 		switch (hwmgr->chip_id) {
167 		case CHIP_VEGA10:
168 			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
169 			hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
170 			hwmgr->smumgr_funcs = &vega10_smu_funcs;
171 			vega10_hwmgr_init(hwmgr);
172 			break;
173 		case CHIP_VEGA12:
174 			hwmgr->smumgr_funcs = &vega12_smu_funcs;
175 			vega12_hwmgr_init(hwmgr);
176 			break;
177 		case CHIP_VEGA20:
178 			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
179 			hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
180 			hwmgr->smumgr_funcs = &vega20_smu_funcs;
181 			vega20_hwmgr_init(hwmgr);
182 			break;
183 		default:
184 			return -EINVAL;
185 		}
186 		break;
187 	case AMDGPU_FAMILY_RV:
188 		switch (hwmgr->chip_id) {
189 		case CHIP_RAVEN:
190 			hwmgr->od_enabled = false;
191 			hwmgr->smumgr_funcs = &smu10_smu_funcs;
192 			smu10_init_function_pointers(hwmgr);
193 			break;
194 		default:
195 			return -EINVAL;
196 		}
197 		break;
198 	default:
199 		return -EINVAL;
200 	}
201 
202 	return 0;
203 }
204 
hwmgr_sw_init(struct pp_hwmgr * hwmgr)205 int hwmgr_sw_init(struct pp_hwmgr *hwmgr)
206 {
207 	if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init)
208 		return -EINVAL;
209 
210 	phm_register_irq_handlers(hwmgr);
211 	pr_info("hwmgr_sw_init smu backed is %s\n", hwmgr->smumgr_funcs->name);
212 
213 	return hwmgr->smumgr_funcs->smu_init(hwmgr);
214 }
215 
216 
hwmgr_sw_fini(struct pp_hwmgr * hwmgr)217 int hwmgr_sw_fini(struct pp_hwmgr *hwmgr)
218 {
219 	if (hwmgr && hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->smu_fini)
220 		hwmgr->smumgr_funcs->smu_fini(hwmgr);
221 
222 	return 0;
223 }
224 
hwmgr_hw_init(struct pp_hwmgr * hwmgr)225 int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
226 {
227 	int ret = 0;
228 
229 	hwmgr->pp_one_vf = amdgpu_sriov_is_pp_one_vf((struct amdgpu_device *)hwmgr->adev);
230 	hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf))
231 			? true : false;
232 	if (!hwmgr->pm_en)
233 		return 0;
234 
235 	if (!hwmgr->pptable_func ||
236 	    !hwmgr->pptable_func->pptable_init ||
237 	    !hwmgr->hwmgr_func->backend_init) {
238 		hwmgr->pm_en = false;
239 		pr_info("dpm not supported \n");
240 		return 0;
241 	}
242 
243 	ret = hwmgr->pptable_func->pptable_init(hwmgr);
244 	if (ret)
245 		goto err;
246 
247 	((struct amdgpu_device *)hwmgr->adev)->pm.no_fan =
248 				hwmgr->thermal_controller.fanInfo.bNoFan;
249 
250 	ret = hwmgr->hwmgr_func->backend_init(hwmgr);
251 	if (ret)
252 		goto err1;
253  /* make sure dc limits are valid */
254 	if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
255 			(hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0))
256 			hwmgr->dyn_state.max_clock_voltage_on_dc =
257 					hwmgr->dyn_state.max_clock_voltage_on_ac;
258 
259 	ret = psm_init_power_state_table(hwmgr);
260 	if (ret)
261 		goto err2;
262 
263 	ret = phm_setup_asic(hwmgr);
264 	if (ret)
265 		goto err2;
266 
267 	ret = phm_enable_dynamic_state_management(hwmgr);
268 	if (ret)
269 		goto err2;
270 	ret = phm_start_thermal_controller(hwmgr);
271 	ret |= psm_set_performance_states(hwmgr);
272 	if (ret)
273 		goto err2;
274 
275 	((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = true;
276 
277 	return 0;
278 err2:
279 	if (hwmgr->hwmgr_func->backend_fini)
280 		hwmgr->hwmgr_func->backend_fini(hwmgr);
281 err1:
282 	if (hwmgr->pptable_func->pptable_fini)
283 		hwmgr->pptable_func->pptable_fini(hwmgr);
284 err:
285 	return ret;
286 }
287 
hwmgr_hw_fini(struct pp_hwmgr * hwmgr)288 int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
289 {
290 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
291 		return 0;
292 
293 	phm_stop_thermal_controller(hwmgr);
294 	psm_set_boot_states(hwmgr);
295 	psm_adjust_power_state_dynamic(hwmgr, true, NULL);
296 	phm_disable_dynamic_state_management(hwmgr);
297 	phm_disable_clock_power_gatings(hwmgr);
298 
299 	if (hwmgr->hwmgr_func->backend_fini)
300 		hwmgr->hwmgr_func->backend_fini(hwmgr);
301 	if (hwmgr->pptable_func->pptable_fini)
302 		hwmgr->pptable_func->pptable_fini(hwmgr);
303 	return psm_fini_power_state_table(hwmgr);
304 }
305 
hwmgr_suspend(struct pp_hwmgr * hwmgr)306 int hwmgr_suspend(struct pp_hwmgr *hwmgr)
307 {
308 	int ret = 0;
309 
310 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
311 		return 0;
312 
313 	phm_disable_smc_firmware_ctf(hwmgr);
314 	ret = psm_set_boot_states(hwmgr);
315 	if (ret)
316 		return ret;
317 	ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
318 	if (ret)
319 		return ret;
320 	ret = phm_power_down_asic(hwmgr);
321 
322 	return ret;
323 }
324 
hwmgr_resume(struct pp_hwmgr * hwmgr)325 int hwmgr_resume(struct pp_hwmgr *hwmgr)
326 {
327 	int ret = 0;
328 
329 	if (!hwmgr)
330 		return -EINVAL;
331 
332 	if (!hwmgr->not_vf || !hwmgr->pm_en)
333 		return 0;
334 
335 	ret = phm_setup_asic(hwmgr);
336 	if (ret)
337 		return ret;
338 
339 	ret = phm_enable_dynamic_state_management(hwmgr);
340 	if (ret)
341 		return ret;
342 	ret = phm_start_thermal_controller(hwmgr);
343 	ret |= psm_set_performance_states(hwmgr);
344 	if (ret)
345 		return ret;
346 
347 	ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
348 
349 	return ret;
350 }
351 
power_state_convert(enum amd_pm_state_type state)352 static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type  state)
353 {
354 	switch (state) {
355 	case POWER_STATE_TYPE_BATTERY:
356 		return PP_StateUILabel_Battery;
357 	case POWER_STATE_TYPE_BALANCED:
358 		return PP_StateUILabel_Balanced;
359 	case POWER_STATE_TYPE_PERFORMANCE:
360 		return PP_StateUILabel_Performance;
361 	default:
362 		return PP_StateUILabel_None;
363 	}
364 }
365 
hwmgr_handle_task(struct pp_hwmgr * hwmgr,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)366 int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
367 		enum amd_pm_state_type *user_state)
368 {
369 	int ret = 0;
370 
371 	if (hwmgr == NULL)
372 		return -EINVAL;
373 
374 	switch (task_id) {
375 	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
376 		if (!hwmgr->not_vf)
377 			return ret;
378 		ret = phm_pre_display_configuration_changed(hwmgr);
379 		if (ret)
380 			return ret;
381 		ret = phm_set_cpu_power_state(hwmgr);
382 		if (ret)
383 			return ret;
384 		ret = psm_set_performance_states(hwmgr);
385 		if (ret)
386 			return ret;
387 		ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
388 		break;
389 	case AMD_PP_TASK_ENABLE_USER_STATE:
390 	{
391 		enum PP_StateUILabel requested_ui_label;
392 		struct pp_power_state *requested_ps = NULL;
393 
394 		if (!hwmgr->not_vf)
395 			return ret;
396 		if (user_state == NULL) {
397 			ret = -EINVAL;
398 			break;
399 		}
400 
401 		requested_ui_label = power_state_convert(*user_state);
402 		ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps);
403 		if (ret)
404 			return ret;
405 		ret = psm_adjust_power_state_dynamic(hwmgr, true, requested_ps);
406 		break;
407 	}
408 	case AMD_PP_TASK_COMPLETE_INIT:
409 	case AMD_PP_TASK_READJUST_POWER_STATE:
410 		ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
411 		break;
412 	default:
413 		break;
414 	}
415 	return ret;
416 }
417 
hwmgr_init_default_caps(struct pp_hwmgr * hwmgr)418 void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
419 {
420 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
421 
422 	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
423 	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
424 
425 #if defined(CONFIG_ACPI)
426 	if (amdgpu_acpi_is_pcie_performance_request_supported(hwmgr->adev))
427 		phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
428 #endif
429 
430 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
431 		PHM_PlatformCaps_DynamicPatchPowerState);
432 
433 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
434 		PHM_PlatformCaps_EnableSMU7ThermalManagement);
435 
436 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
437 			PHM_PlatformCaps_DynamicPowerManagement);
438 
439 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
440 					PHM_PlatformCaps_SMC);
441 
442 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
443 					PHM_PlatformCaps_DynamicUVDState);
444 
445 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
446 						PHM_PlatformCaps_FanSpeedInTableIsRPM);
447 	return;
448 }
449 
hwmgr_set_user_specify_caps(struct pp_hwmgr * hwmgr)450 int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
451 {
452 	if (hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
453 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
454 			PHM_PlatformCaps_SclkDeepSleep);
455 	else
456 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
457 			PHM_PlatformCaps_SclkDeepSleep);
458 
459 	if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
460 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
461 			    PHM_PlatformCaps_PowerContainment);
462 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
463 			PHM_PlatformCaps_CAC);
464 	} else {
465 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
466 			    PHM_PlatformCaps_PowerContainment);
467 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
468 			PHM_PlatformCaps_CAC);
469 	}
470 
471 	if (hwmgr->feature_mask & PP_OVERDRIVE_MASK)
472 		hwmgr->od_enabled = true;
473 
474 	return 0;
475 }
476 
polaris_set_asic_special_caps(struct pp_hwmgr * hwmgr)477 int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
478 {
479 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
480 						PHM_PlatformCaps_EVV);
481 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
482 						PHM_PlatformCaps_SQRamping);
483 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
484 						PHM_PlatformCaps_RegulatorHot);
485 
486 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
487 					PHM_PlatformCaps_AutomaticDCTransition);
488 
489 	if (hwmgr->chip_id != CHIP_POLARIS10)
490 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
491 					PHM_PlatformCaps_SPLLShutdownSupport);
492 
493 	if (hwmgr->chip_id != CHIP_POLARIS11) {
494 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
495 							PHM_PlatformCaps_DBRamping);
496 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
497 							PHM_PlatformCaps_TDRamping);
498 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
499 							PHM_PlatformCaps_TCPRamping);
500 	}
501 	return 0;
502 }
503 
fiji_set_asic_special_caps(struct pp_hwmgr * hwmgr)504 int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
505 {
506 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
507 						PHM_PlatformCaps_EVV);
508 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
509 			PHM_PlatformCaps_SQRamping);
510 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
511 			PHM_PlatformCaps_DBRamping);
512 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
513 			PHM_PlatformCaps_TDRamping);
514 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
515 			PHM_PlatformCaps_TCPRamping);
516 	return 0;
517 }
518 
tonga_set_asic_special_caps(struct pp_hwmgr * hwmgr)519 int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
520 {
521 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
522 						PHM_PlatformCaps_EVV);
523 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
524 			PHM_PlatformCaps_SQRamping);
525 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
526 			PHM_PlatformCaps_DBRamping);
527 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
528 			PHM_PlatformCaps_TDRamping);
529 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
530 			PHM_PlatformCaps_TCPRamping);
531 
532 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
533 		      PHM_PlatformCaps_UVDPowerGating);
534 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
535 		      PHM_PlatformCaps_VCEPowerGating);
536 	return 0;
537 }
538 
topaz_set_asic_special_caps(struct pp_hwmgr * hwmgr)539 int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
540 {
541 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
542 						PHM_PlatformCaps_EVV);
543 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
544 			PHM_PlatformCaps_SQRamping);
545 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
546 			PHM_PlatformCaps_DBRamping);
547 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
548 			PHM_PlatformCaps_TDRamping);
549 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
550 			PHM_PlatformCaps_TCPRamping);
551 	return 0;
552 }
553 
ci_set_asic_special_caps(struct pp_hwmgr * hwmgr)554 int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr)
555 {
556 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
557 			PHM_PlatformCaps_SQRamping);
558 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
559 			PHM_PlatformCaps_DBRamping);
560 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
561 			PHM_PlatformCaps_TDRamping);
562 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
563 			PHM_PlatformCaps_TCPRamping);
564 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
565 			PHM_PlatformCaps_MemorySpreadSpectrumSupport);
566 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
567 			PHM_PlatformCaps_EngineSpreadSpectrumSupport);
568 	return 0;
569 }
570