1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include "atom-types.h"
28 #include "atombios.h"
29 #include "processpptables.h"
30 #include "cgs_common.h"
31 #include "smumgr.h"
32 #include "hwmgr.h"
33 #include "hardwaremanager.h"
34 #include "rv_ppsmc.h"
35 #include "smu10_hwmgr.h"
36 #include "power_state.h"
37 #include "soc15_common.h"
38 
39 #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID     5
40 #define SMU10_MINIMUM_ENGINE_CLOCK         800   /* 8Mhz, the low boundary of engine clock allowed on this chip */
41 #define SCLK_MIN_DIV_INTV_SHIFT         12
42 #define SMU10_DISPCLK_BYPASS_THRESHOLD     10000 /* 100Mhz */
43 #define SMC_RAM_END                     0x40000
44 
45 #define mmPWR_MISC_CNTL_STATUS					0x0183
46 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX				0
47 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT	0x0
48 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT		0x1
49 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK		0x00000001L
50 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK		0x00000006L
51 
52 static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
53 
54 
55 static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
56 		struct pp_display_clock_request *clock_req)
57 {
58 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
59 	enum amd_pp_clock_type clk_type = clock_req->clock_type;
60 	uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
61 	PPSMC_Msg        msg;
62 
63 	switch (clk_type) {
64 	case amd_pp_dcf_clock:
65 		if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
66 			return 0;
67 		msg =  PPSMC_MSG_SetHardMinDcefclkByFreq;
68 		smu10_data->dcf_actual_hard_min_freq = clk_freq;
69 		break;
70 	case amd_pp_soc_clock:
71 		 msg = PPSMC_MSG_SetHardMinSocclkByFreq;
72 		break;
73 	case amd_pp_f_clock:
74 		if (clk_freq == smu10_data->f_actual_hard_min_freq)
75 			return 0;
76 		smu10_data->f_actual_hard_min_freq = clk_freq;
77 		msg = PPSMC_MSG_SetHardMinFclkByFreq;
78 		break;
79 	default:
80 		pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
81 		return -EINVAL;
82 	}
83 	smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
84 
85 	return 0;
86 }
87 
88 static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
89 {
90 	if (SMU10_Magic != hw_ps->magic)
91 		return NULL;
92 
93 	return (struct smu10_power_state *)hw_ps;
94 }
95 
96 static const struct smu10_power_state *cast_const_smu10_ps(
97 				const struct pp_hw_power_state *hw_ps)
98 {
99 	if (SMU10_Magic != hw_ps->magic)
100 		return NULL;
101 
102 	return (struct smu10_power_state *)hw_ps;
103 }
104 
105 static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
106 {
107 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
108 
109 	smu10_data->dce_slow_sclk_threshold = 30000;
110 	smu10_data->thermal_auto_throttling_treshold = 0;
111 	smu10_data->is_nb_dpm_enabled = 1;
112 	smu10_data->dpm_flags = 1;
113 	smu10_data->need_min_deep_sleep_dcefclk = true;
114 	smu10_data->num_active_display = 0;
115 	smu10_data->deep_sleep_dcefclk = 0;
116 
117 	if (hwmgr->feature_mask & PP_GFXOFF_MASK)
118 		smu10_data->gfx_off_controled_by_driver = true;
119 	else
120 		smu10_data->gfx_off_controled_by_driver = false;
121 
122 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
123 					PHM_PlatformCaps_SclkDeepSleep);
124 
125 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
126 				PHM_PlatformCaps_SclkThrottleLowNotification);
127 
128 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
129 				PHM_PlatformCaps_PowerPlaySupport);
130 	return 0;
131 }
132 
133 static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
134 			struct phm_clock_and_voltage_limits *table)
135 {
136 	return 0;
137 }
138 
139 static int smu10_init_dynamic_state_adjustment_rule_settings(
140 							struct pp_hwmgr *hwmgr)
141 {
142 	uint32_t table_size =
143 		sizeof(struct phm_clock_voltage_dependency_table) +
144 		(7 * sizeof(struct phm_clock_voltage_dependency_record));
145 
146 	struct phm_clock_voltage_dependency_table *table_clk_vlt =
147 					kzalloc(table_size, GFP_KERNEL);
148 
149 	if (NULL == table_clk_vlt) {
150 		pr_err("Can not allocate memory!\n");
151 		return -ENOMEM;
152 	}
153 
154 	table_clk_vlt->count = 8;
155 	table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
156 	table_clk_vlt->entries[0].v = 0;
157 	table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
158 	table_clk_vlt->entries[1].v = 1;
159 	table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
160 	table_clk_vlt->entries[2].v = 2;
161 	table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
162 	table_clk_vlt->entries[3].v = 3;
163 	table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
164 	table_clk_vlt->entries[4].v = 4;
165 	table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
166 	table_clk_vlt->entries[5].v = 5;
167 	table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
168 	table_clk_vlt->entries[6].v = 6;
169 	table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
170 	table_clk_vlt->entries[7].v = 7;
171 	hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
172 
173 	return 0;
174 }
175 
176 static int smu10_get_system_info_data(struct pp_hwmgr *hwmgr)
177 {
178 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)hwmgr->backend;
179 
180 	smu10_data->sys_info.htc_hyst_lmt = 5;
181 	smu10_data->sys_info.htc_tmp_lmt = 203;
182 
183 	if (smu10_data->thermal_auto_throttling_treshold == 0)
184 		 smu10_data->thermal_auto_throttling_treshold = 203;
185 
186 	smu10_construct_max_power_limits_table (hwmgr,
187 				    &hwmgr->dyn_state.max_clock_voltage_on_ac);
188 
189 	smu10_init_dynamic_state_adjustment_rule_settings(hwmgr);
190 
191 	return 0;
192 }
193 
194 static int smu10_construct_boot_state(struct pp_hwmgr *hwmgr)
195 {
196 	return 0;
197 }
198 
199 static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
200 {
201 	struct PP_Clocks clocks = {0};
202 	struct pp_display_clock_request clock_req;
203 
204 	clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
205 	clock_req.clock_type = amd_pp_dcf_clock;
206 	clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
207 
208 	PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr, &clock_req),
209 				"Attempt to set DCF Clock Failed!", return -EINVAL);
210 
211 	return 0;
212 }
213 
214 static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
215 {
216 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
217 
218 	if (smu10_data->need_min_deep_sleep_dcefclk && smu10_data->deep_sleep_dcefclk != clock/100) {
219 		smu10_data->deep_sleep_dcefclk = clock/100;
220 		smum_send_msg_to_smc_with_parameter(hwmgr,
221 					PPSMC_MSG_SetMinDeepSleepDcefclk,
222 					smu10_data->deep_sleep_dcefclk);
223 	}
224 	return 0;
225 }
226 
227 static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
228 {
229 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
230 
231 	if (smu10_data->num_active_display != count) {
232 		smu10_data->num_active_display = count;
233 		smum_send_msg_to_smc_with_parameter(hwmgr,
234 				PPSMC_MSG_SetDisplayCount,
235 				smu10_data->num_active_display);
236 	}
237 
238 	return 0;
239 }
240 
241 static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
242 {
243 	return smu10_set_clock_limit(hwmgr, input);
244 }
245 
246 static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
247 {
248 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
249 	struct amdgpu_device *adev = hwmgr->adev;
250 
251 	smu10_data->vcn_power_gated = true;
252 	smu10_data->isp_tileA_power_gated = true;
253 	smu10_data->isp_tileB_power_gated = true;
254 
255 	if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
256 		return smum_send_msg_to_smc_with_parameter(hwmgr,
257 							   PPSMC_MSG_SetGfxCGPG,
258 							   true);
259 	else
260 		return 0;
261 }
262 
263 
264 static int smu10_setup_asic_task(struct pp_hwmgr *hwmgr)
265 {
266 	return smu10_init_power_gate_state(hwmgr);
267 }
268 
269 static int smu10_reset_cc6_data(struct pp_hwmgr *hwmgr)
270 {
271 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
272 
273 	smu10_data->separation_time = 0;
274 	smu10_data->cc6_disable = false;
275 	smu10_data->pstate_disable = false;
276 	smu10_data->cc6_setting_changed = false;
277 
278 	return 0;
279 }
280 
281 static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
282 {
283 	return smu10_reset_cc6_data(hwmgr);
284 }
285 
286 static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr)
287 {
288 	uint32_t reg;
289 	struct amdgpu_device *adev = hwmgr->adev;
290 
291 	reg = RREG32_SOC15(PWR, 0, mmPWR_MISC_CNTL_STATUS);
292 	if ((reg & PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK) ==
293 	    (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT))
294 		return true;
295 
296 	return false;
297 }
298 
299 static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
300 {
301 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
302 
303 	if (smu10_data->gfx_off_controled_by_driver) {
304 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
305 
306 		/* confirm gfx is back to "on" state */
307 		while (!smu10_is_gfx_on(hwmgr))
308 			msleep(1);
309 	}
310 
311 	return 0;
312 }
313 
314 static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
315 {
316 	return 0;
317 }
318 
319 static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
320 {
321 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
322 
323 	if (smu10_data->gfx_off_controled_by_driver)
324 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
325 
326 	return 0;
327 }
328 
329 static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
330 {
331 	return 0;
332 }
333 
334 static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
335 {
336 	if (enable)
337 		return smu10_enable_gfx_off(hwmgr);
338 	else
339 		return smu10_disable_gfx_off(hwmgr);
340 }
341 
342 static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
343 				struct pp_power_state  *prequest_ps,
344 			const struct pp_power_state *pcurrent_ps)
345 {
346 	return 0;
347 }
348 
349 /* temporary hardcoded clock voltage breakdown tables */
350 static const DpmClock_t VddDcfClk[]= {
351 	{ 300, 2600},
352 	{ 600, 3200},
353 	{ 600, 3600},
354 };
355 
356 static const DpmClock_t VddSocClk[]= {
357 	{ 478, 2600},
358 	{ 722, 3200},
359 	{ 722, 3600},
360 };
361 
362 static const DpmClock_t VddFClk[]= {
363 	{ 400, 2600},
364 	{1200, 3200},
365 	{1200, 3600},
366 };
367 
368 static const DpmClock_t VddDispClk[]= {
369 	{ 435, 2600},
370 	{ 661, 3200},
371 	{1086, 3600},
372 };
373 
374 static const DpmClock_t VddDppClk[]= {
375 	{ 435, 2600},
376 	{ 661, 3200},
377 	{ 661, 3600},
378 };
379 
380 static const DpmClock_t VddPhyClk[]= {
381 	{ 540, 2600},
382 	{ 810, 3200},
383 	{ 810, 3600},
384 };
385 
386 static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
387 			struct smu10_voltage_dependency_table **pptable,
388 			uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
389 {
390 	uint32_t table_size, i;
391 	struct smu10_voltage_dependency_table *ptable;
392 
393 	table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
394 	ptable = kzalloc(table_size, GFP_KERNEL);
395 
396 	if (NULL == ptable)
397 		return -ENOMEM;
398 
399 	ptable->count = num_entry;
400 
401 	for (i = 0; i < ptable->count; i++) {
402 		ptable->entries[i].clk         = pclk_dependency_table->Freq * 100;
403 		ptable->entries[i].vol         = pclk_dependency_table->Vol;
404 		pclk_dependency_table++;
405 	}
406 
407 	*pptable = ptable;
408 
409 	return 0;
410 }
411 
412 
413 static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
414 {
415 	uint32_t result;
416 
417 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
418 	DpmClocks_t  *table = &(smu10_data->clock_table);
419 	struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
420 
421 	result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true);
422 
423 	PP_ASSERT_WITH_CODE((0 == result),
424 			"Attempt to copy clock table from smc failed",
425 			return result);
426 
427 	if (0 == result && table->DcefClocks[0].Freq != 0) {
428 		smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
429 						NUM_DCEFCLK_DPM_LEVELS,
430 						&smu10_data->clock_table.DcefClocks[0]);
431 		smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
432 						NUM_SOCCLK_DPM_LEVELS,
433 						&smu10_data->clock_table.SocClocks[0]);
434 		smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
435 						NUM_FCLK_DPM_LEVELS,
436 						&smu10_data->clock_table.FClocks[0]);
437 		smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
438 						NUM_MEMCLK_DPM_LEVELS,
439 						&smu10_data->clock_table.MemClocks[0]);
440 	} else {
441 		smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
442 						ARRAY_SIZE(VddDcfClk),
443 						&VddDcfClk[0]);
444 		smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
445 						ARRAY_SIZE(VddSocClk),
446 						&VddSocClk[0]);
447 		smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
448 						ARRAY_SIZE(VddFClk),
449 						&VddFClk[0]);
450 	}
451 	smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
452 					ARRAY_SIZE(VddDispClk),
453 					&VddDispClk[0]);
454 	smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
455 					ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
456 	smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
457 					ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
458 
459 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
460 	result = smum_get_argument(hwmgr);
461 	smu10_data->gfx_min_freq_limit = result / 10 * 1000;
462 
463 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
464 	result = smum_get_argument(hwmgr);
465 	smu10_data->gfx_max_freq_limit = result / 10 * 1000;
466 
467 	return 0;
468 }
469 
470 static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
471 {
472 	int result = 0;
473 	struct smu10_hwmgr *data;
474 
475 	data = kzalloc(sizeof(struct smu10_hwmgr), GFP_KERNEL);
476 	if (data == NULL)
477 		return -ENOMEM;
478 
479 	hwmgr->backend = data;
480 
481 	result = smu10_initialize_dpm_defaults(hwmgr);
482 	if (result != 0) {
483 		pr_err("smu10_initialize_dpm_defaults failed\n");
484 		return result;
485 	}
486 
487 	smu10_populate_clock_table(hwmgr);
488 
489 	result = smu10_get_system_info_data(hwmgr);
490 	if (result != 0) {
491 		pr_err("smu10_get_system_info_data failed\n");
492 		return result;
493 	}
494 
495 	smu10_construct_boot_state(hwmgr);
496 
497 	hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
498 						SMU10_MAX_HARDWARE_POWERLEVELS;
499 
500 	hwmgr->platform_descriptor.hardwarePerformanceLevels =
501 						SMU10_MAX_HARDWARE_POWERLEVELS;
502 
503 	hwmgr->platform_descriptor.vbiosInterruptId = 0;
504 
505 	hwmgr->platform_descriptor.clockStep.engineClock = 500;
506 
507 	hwmgr->platform_descriptor.clockStep.memoryClock = 500;
508 
509 	hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
510 
511 	hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
512 	hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
513 
514 	return result;
515 }
516 
517 static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
518 {
519 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
520 	struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
521 
522 	kfree(pinfo->vdd_dep_on_dcefclk);
523 	pinfo->vdd_dep_on_dcefclk = NULL;
524 	kfree(pinfo->vdd_dep_on_socclk);
525 	pinfo->vdd_dep_on_socclk = NULL;
526 	kfree(pinfo->vdd_dep_on_fclk);
527 	pinfo->vdd_dep_on_fclk = NULL;
528 	kfree(pinfo->vdd_dep_on_dispclk);
529 	pinfo->vdd_dep_on_dispclk = NULL;
530 	kfree(pinfo->vdd_dep_on_dppclk);
531 	pinfo->vdd_dep_on_dppclk = NULL;
532 	kfree(pinfo->vdd_dep_on_phyclk);
533 	pinfo->vdd_dep_on_phyclk = NULL;
534 
535 	kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
536 	hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
537 
538 	kfree(hwmgr->backend);
539 	hwmgr->backend = NULL;
540 
541 	return 0;
542 }
543 
544 static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
545 				enum amd_dpm_forced_level level)
546 {
547 	struct smu10_hwmgr *data = hwmgr->backend;
548 
549 	if (hwmgr->smu_version < 0x1E3700) {
550 		pr_info("smu firmware version too old, can not set dpm level\n");
551 		return 0;
552 	}
553 
554 	switch (level) {
555 	case AMD_DPM_FORCED_LEVEL_HIGH:
556 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
557 		smum_send_msg_to_smc_with_parameter(hwmgr,
558 						PPSMC_MSG_SetHardMinGfxClk,
559 						data->gfx_max_freq_limit/100);
560 		smum_send_msg_to_smc_with_parameter(hwmgr,
561 						PPSMC_MSG_SetHardMinFclkByFreq,
562 						SMU10_UMD_PSTATE_PEAK_FCLK);
563 		smum_send_msg_to_smc_with_parameter(hwmgr,
564 						PPSMC_MSG_SetHardMinSocclkByFreq,
565 						SMU10_UMD_PSTATE_PEAK_SOCCLK);
566 		smum_send_msg_to_smc_with_parameter(hwmgr,
567 						PPSMC_MSG_SetHardMinVcn,
568 						SMU10_UMD_PSTATE_VCE);
569 
570 		smum_send_msg_to_smc_with_parameter(hwmgr,
571 						PPSMC_MSG_SetSoftMaxGfxClk,
572 						data->gfx_max_freq_limit/100);
573 		smum_send_msg_to_smc_with_parameter(hwmgr,
574 						PPSMC_MSG_SetSoftMaxFclkByFreq,
575 						SMU10_UMD_PSTATE_PEAK_FCLK);
576 		smum_send_msg_to_smc_with_parameter(hwmgr,
577 						PPSMC_MSG_SetSoftMaxSocclkByFreq,
578 						SMU10_UMD_PSTATE_PEAK_SOCCLK);
579 		smum_send_msg_to_smc_with_parameter(hwmgr,
580 						PPSMC_MSG_SetSoftMaxVcn,
581 						SMU10_UMD_PSTATE_VCE);
582 		break;
583 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
584 		smum_send_msg_to_smc_with_parameter(hwmgr,
585 						PPSMC_MSG_SetHardMinGfxClk,
586 						data->gfx_min_freq_limit/100);
587 		smum_send_msg_to_smc_with_parameter(hwmgr,
588 						PPSMC_MSG_SetSoftMaxGfxClk,
589 						data->gfx_min_freq_limit/100);
590 		break;
591 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
592 		smum_send_msg_to_smc_with_parameter(hwmgr,
593 						PPSMC_MSG_SetHardMinFclkByFreq,
594 						SMU10_UMD_PSTATE_MIN_FCLK);
595 		smum_send_msg_to_smc_with_parameter(hwmgr,
596 						PPSMC_MSG_SetSoftMaxFclkByFreq,
597 						SMU10_UMD_PSTATE_MIN_FCLK);
598 		break;
599 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
600 		smum_send_msg_to_smc_with_parameter(hwmgr,
601 						PPSMC_MSG_SetHardMinGfxClk,
602 						SMU10_UMD_PSTATE_GFXCLK);
603 		smum_send_msg_to_smc_with_parameter(hwmgr,
604 						PPSMC_MSG_SetHardMinFclkByFreq,
605 						SMU10_UMD_PSTATE_FCLK);
606 		smum_send_msg_to_smc_with_parameter(hwmgr,
607 						PPSMC_MSG_SetHardMinSocclkByFreq,
608 						SMU10_UMD_PSTATE_SOCCLK);
609 		smum_send_msg_to_smc_with_parameter(hwmgr,
610 						PPSMC_MSG_SetHardMinVcn,
611 						SMU10_UMD_PSTATE_VCE);
612 
613 		smum_send_msg_to_smc_with_parameter(hwmgr,
614 						PPSMC_MSG_SetSoftMaxGfxClk,
615 						SMU10_UMD_PSTATE_GFXCLK);
616 		smum_send_msg_to_smc_with_parameter(hwmgr,
617 						PPSMC_MSG_SetSoftMaxFclkByFreq,
618 						SMU10_UMD_PSTATE_FCLK);
619 		smum_send_msg_to_smc_with_parameter(hwmgr,
620 						PPSMC_MSG_SetSoftMaxSocclkByFreq,
621 						SMU10_UMD_PSTATE_SOCCLK);
622 		smum_send_msg_to_smc_with_parameter(hwmgr,
623 						PPSMC_MSG_SetSoftMaxVcn,
624 						SMU10_UMD_PSTATE_VCE);
625 		break;
626 	case AMD_DPM_FORCED_LEVEL_AUTO:
627 		smum_send_msg_to_smc_with_parameter(hwmgr,
628 						PPSMC_MSG_SetHardMinGfxClk,
629 						data->gfx_min_freq_limit/100);
630 		smum_send_msg_to_smc_with_parameter(hwmgr,
631 						PPSMC_MSG_SetHardMinFclkByFreq,
632 						hwmgr->display_config->num_display > 3 ?
633 						SMU10_UMD_PSTATE_PEAK_FCLK :
634 						SMU10_UMD_PSTATE_MIN_FCLK);
635 
636 		smum_send_msg_to_smc_with_parameter(hwmgr,
637 						PPSMC_MSG_SetHardMinSocclkByFreq,
638 						SMU10_UMD_PSTATE_MIN_SOCCLK);
639 		smum_send_msg_to_smc_with_parameter(hwmgr,
640 						PPSMC_MSG_SetHardMinVcn,
641 						SMU10_UMD_PSTATE_MIN_VCE);
642 
643 		smum_send_msg_to_smc_with_parameter(hwmgr,
644 						PPSMC_MSG_SetSoftMaxGfxClk,
645 						data->gfx_max_freq_limit/100);
646 		smum_send_msg_to_smc_with_parameter(hwmgr,
647 						PPSMC_MSG_SetSoftMaxFclkByFreq,
648 						SMU10_UMD_PSTATE_PEAK_FCLK);
649 		smum_send_msg_to_smc_with_parameter(hwmgr,
650 						PPSMC_MSG_SetSoftMaxSocclkByFreq,
651 						SMU10_UMD_PSTATE_PEAK_SOCCLK);
652 		smum_send_msg_to_smc_with_parameter(hwmgr,
653 						PPSMC_MSG_SetSoftMaxVcn,
654 						SMU10_UMD_PSTATE_VCE);
655 		break;
656 	case AMD_DPM_FORCED_LEVEL_LOW:
657 		smum_send_msg_to_smc_with_parameter(hwmgr,
658 						PPSMC_MSG_SetHardMinGfxClk,
659 						data->gfx_min_freq_limit/100);
660 		smum_send_msg_to_smc_with_parameter(hwmgr,
661 						PPSMC_MSG_SetSoftMaxGfxClk,
662 						data->gfx_min_freq_limit/100);
663 		smum_send_msg_to_smc_with_parameter(hwmgr,
664 						PPSMC_MSG_SetHardMinFclkByFreq,
665 						SMU10_UMD_PSTATE_MIN_FCLK);
666 		smum_send_msg_to_smc_with_parameter(hwmgr,
667 						PPSMC_MSG_SetSoftMaxFclkByFreq,
668 						SMU10_UMD_PSTATE_MIN_FCLK);
669 		break;
670 	case AMD_DPM_FORCED_LEVEL_MANUAL:
671 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
672 	default:
673 		break;
674 	}
675 	return 0;
676 }
677 
678 static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
679 {
680 	struct smu10_hwmgr *data;
681 
682 	if (hwmgr == NULL)
683 		return -EINVAL;
684 
685 	data = (struct smu10_hwmgr *)(hwmgr->backend);
686 
687 	if (low)
688 		return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
689 	else
690 		return data->clock_vol_info.vdd_dep_on_fclk->entries[
691 			data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
692 }
693 
694 static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
695 {
696 	struct smu10_hwmgr *data;
697 
698 	if (hwmgr == NULL)
699 		return -EINVAL;
700 
701 	data = (struct smu10_hwmgr *)(hwmgr->backend);
702 
703 	if (low)
704 		return data->gfx_min_freq_limit;
705 	else
706 		return data->gfx_max_freq_limit;
707 }
708 
709 static int smu10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
710 					struct pp_hw_power_state *hw_ps)
711 {
712 	return 0;
713 }
714 
715 static int smu10_dpm_get_pp_table_entry_callback(
716 						     struct pp_hwmgr *hwmgr,
717 					   struct pp_hw_power_state *hw_ps,
718 							  unsigned int index,
719 						     const void *clock_info)
720 {
721 	struct smu10_power_state *smu10_ps = cast_smu10_ps(hw_ps);
722 
723 	smu10_ps->levels[index].engine_clock = 0;
724 
725 	smu10_ps->levels[index].vddc_index = 0;
726 	smu10_ps->level = index + 1;
727 
728 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
729 		smu10_ps->levels[index].ds_divider_index = 5;
730 		smu10_ps->levels[index].ss_divider_index = 5;
731 	}
732 
733 	return 0;
734 }
735 
736 static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
737 {
738 	int result;
739 	unsigned long ret = 0;
740 
741 	result = pp_tables_get_num_of_entries(hwmgr, &ret);
742 
743 	return result ? 0 : ret;
744 }
745 
746 static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
747 		    unsigned long entry, struct pp_power_state *ps)
748 {
749 	int result;
750 	struct smu10_power_state *smu10_ps;
751 
752 	ps->hardware.magic = SMU10_Magic;
753 
754 	smu10_ps = cast_smu10_ps(&(ps->hardware));
755 
756 	result = pp_tables_get_entry(hwmgr, entry, ps,
757 			smu10_dpm_get_pp_table_entry_callback);
758 
759 	smu10_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
760 	smu10_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
761 
762 	return result;
763 }
764 
765 static int smu10_get_power_state_size(struct pp_hwmgr *hwmgr)
766 {
767 	return sizeof(struct smu10_power_state);
768 }
769 
770 static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
771 {
772 	return 0;
773 }
774 
775 
776 static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
777 			bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
778 {
779 	struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
780 
781 	if (separation_time != data->separation_time ||
782 			cc6_disable != data->cc6_disable ||
783 			pstate_disable != data->pstate_disable) {
784 		data->separation_time = separation_time;
785 		data->cc6_disable = cc6_disable;
786 		data->pstate_disable = pstate_disable;
787 		data->cc6_setting_changed = true;
788 	}
789 	return 0;
790 }
791 
792 static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
793 		struct amd_pp_simple_clock_info *info)
794 {
795 	return -EINVAL;
796 }
797 
798 static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
799 		enum pp_clock_type type, uint32_t mask)
800 {
801 	struct smu10_hwmgr *data = hwmgr->backend;
802 	struct smu10_voltage_dependency_table *mclk_table =
803 					data->clock_vol_info.vdd_dep_on_fclk;
804 	uint32_t low, high;
805 
806 	low = mask ? (ffs(mask) - 1) : 0;
807 	high = mask ? (fls(mask) - 1) : 0;
808 
809 	switch (type) {
810 	case PP_SCLK:
811 		if (low > 2 || high > 2) {
812 			pr_info("Currently sclk only support 3 levels on RV\n");
813 			return -EINVAL;
814 		}
815 
816 		smum_send_msg_to_smc_with_parameter(hwmgr,
817 						PPSMC_MSG_SetHardMinGfxClk,
818 						low == 2 ? data->gfx_max_freq_limit/100 :
819 						low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
820 						data->gfx_min_freq_limit/100);
821 
822 		smum_send_msg_to_smc_with_parameter(hwmgr,
823 						PPSMC_MSG_SetSoftMaxGfxClk,
824 						high == 0 ? data->gfx_min_freq_limit/100 :
825 						high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
826 						data->gfx_max_freq_limit/100);
827 		break;
828 
829 	case PP_MCLK:
830 		if (low > mclk_table->count - 1 || high > mclk_table->count - 1)
831 			return -EINVAL;
832 
833 		smum_send_msg_to_smc_with_parameter(hwmgr,
834 						PPSMC_MSG_SetHardMinFclkByFreq,
835 						mclk_table->entries[low].clk/100);
836 
837 		smum_send_msg_to_smc_with_parameter(hwmgr,
838 						PPSMC_MSG_SetSoftMaxFclkByFreq,
839 						mclk_table->entries[high].clk/100);
840 		break;
841 
842 	case PP_PCIE:
843 	default:
844 		break;
845 	}
846 	return 0;
847 }
848 
849 static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
850 		enum pp_clock_type type, char *buf)
851 {
852 	struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
853 	struct smu10_voltage_dependency_table *mclk_table =
854 			data->clock_vol_info.vdd_dep_on_fclk;
855 	uint32_t i, now, size = 0;
856 
857 	switch (type) {
858 	case PP_SCLK:
859 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
860 		now = smum_get_argument(hwmgr);
861 
862 	/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
863 		if (now == data->gfx_max_freq_limit/100)
864 			i = 2;
865 		else if (now == data->gfx_min_freq_limit/100)
866 			i = 0;
867 		else
868 			i = 1;
869 
870 		size += sprintf(buf + size, "0: %uMhz %s\n",
871 					data->gfx_min_freq_limit/100,
872 					i == 0 ? "*" : "");
873 		size += sprintf(buf + size, "1: %uMhz %s\n",
874 					i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
875 					i == 1 ? "*" : "");
876 		size += sprintf(buf + size, "2: %uMhz %s\n",
877 					data->gfx_max_freq_limit/100,
878 					i == 2 ? "*" : "");
879 		break;
880 	case PP_MCLK:
881 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
882 		now = smum_get_argument(hwmgr);
883 
884 		for (i = 0; i < mclk_table->count; i++)
885 			size += sprintf(buf + size, "%d: %uMhz %s\n",
886 					i,
887 					mclk_table->entries[i].clk / 100,
888 					((mclk_table->entries[i].clk / 100)
889 					 == now) ? "*" : "");
890 		break;
891 	default:
892 		break;
893 	}
894 
895 	return size;
896 }
897 
898 static int smu10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
899 				PHM_PerformanceLevelDesignation designation, uint32_t index,
900 				PHM_PerformanceLevel *level)
901 {
902 	struct smu10_hwmgr *data;
903 
904 	if (level == NULL || hwmgr == NULL || state == NULL)
905 		return -EINVAL;
906 
907 	data = (struct smu10_hwmgr *)(hwmgr->backend);
908 
909 	if (index == 0) {
910 		level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
911 		level->coreClock = data->gfx_min_freq_limit;
912 	} else {
913 		level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
914 			data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
915 		level->coreClock = data->gfx_max_freq_limit;
916 	}
917 
918 	level->nonLocalMemoryFreq = 0;
919 	level->nonLocalMemoryWidth = 0;
920 
921 	return 0;
922 }
923 
924 static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
925 	const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
926 {
927 	const struct smu10_power_state *ps = cast_const_smu10_ps(state);
928 
929 	clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
930 	clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
931 
932 	return 0;
933 }
934 
935 #define MEM_FREQ_LOW_LATENCY        25000
936 #define MEM_FREQ_HIGH_LATENCY       80000
937 #define MEM_LATENCY_HIGH            245
938 #define MEM_LATENCY_LOW             35
939 #define MEM_LATENCY_ERR             0xFFFF
940 
941 
942 static uint32_t smu10_get_mem_latency(struct pp_hwmgr *hwmgr,
943 		uint32_t clock)
944 {
945 	if (clock >= MEM_FREQ_LOW_LATENCY &&
946 			clock < MEM_FREQ_HIGH_LATENCY)
947 		return MEM_LATENCY_HIGH;
948 	else if (clock >= MEM_FREQ_HIGH_LATENCY)
949 		return MEM_LATENCY_LOW;
950 	else
951 		return MEM_LATENCY_ERR;
952 }
953 
954 static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
955 		enum amd_pp_clock_type type,
956 		struct pp_clock_levels_with_latency *clocks)
957 {
958 	uint32_t i;
959 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
960 	struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
961 	struct smu10_voltage_dependency_table *pclk_vol_table;
962 	bool latency_required = false;
963 
964 	if (pinfo == NULL)
965 		return -EINVAL;
966 
967 	switch (type) {
968 	case amd_pp_mem_clock:
969 		pclk_vol_table = pinfo->vdd_dep_on_mclk;
970 		latency_required = true;
971 		break;
972 	case amd_pp_f_clock:
973 		pclk_vol_table = pinfo->vdd_dep_on_fclk;
974 		latency_required = true;
975 		break;
976 	case amd_pp_dcf_clock:
977 		pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
978 		break;
979 	case amd_pp_disp_clock:
980 		pclk_vol_table = pinfo->vdd_dep_on_dispclk;
981 		break;
982 	case amd_pp_phy_clock:
983 		pclk_vol_table = pinfo->vdd_dep_on_phyclk;
984 		break;
985 	case amd_pp_dpp_clock:
986 		pclk_vol_table = pinfo->vdd_dep_on_dppclk;
987 		break;
988 	default:
989 		return -EINVAL;
990 	}
991 
992 	if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
993 		return -EINVAL;
994 
995 	clocks->num_levels = 0;
996 	for (i = 0; i < pclk_vol_table->count; i++) {
997 		if (pclk_vol_table->entries[i].clk) {
998 			clocks->data[clocks->num_levels].clocks_in_khz =
999 				pclk_vol_table->entries[i].clk * 10;
1000 			clocks->data[clocks->num_levels].latency_in_us = latency_required ?
1001 				smu10_get_mem_latency(hwmgr,
1002 						      pclk_vol_table->entries[i].clk) :
1003 				0;
1004 			clocks->num_levels++;
1005 		}
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
1012 		enum amd_pp_clock_type type,
1013 		struct pp_clock_levels_with_voltage *clocks)
1014 {
1015 	uint32_t i;
1016 	struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1017 	struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
1018 	struct smu10_voltage_dependency_table *pclk_vol_table = NULL;
1019 
1020 	if (pinfo == NULL)
1021 		return -EINVAL;
1022 
1023 	switch (type) {
1024 	case amd_pp_mem_clock:
1025 		pclk_vol_table = pinfo->vdd_dep_on_mclk;
1026 		break;
1027 	case amd_pp_f_clock:
1028 		pclk_vol_table = pinfo->vdd_dep_on_fclk;
1029 		break;
1030 	case amd_pp_dcf_clock:
1031 		pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
1032 		break;
1033 	case amd_pp_soc_clock:
1034 		pclk_vol_table = pinfo->vdd_dep_on_socclk;
1035 		break;
1036 	case amd_pp_disp_clock:
1037 		pclk_vol_table = pinfo->vdd_dep_on_dispclk;
1038 		break;
1039 	case amd_pp_phy_clock:
1040 		pclk_vol_table = pinfo->vdd_dep_on_phyclk;
1041 		break;
1042 	default:
1043 		return -EINVAL;
1044 	}
1045 
1046 	if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
1047 		return -EINVAL;
1048 
1049 	clocks->num_levels = 0;
1050 	for (i = 0; i < pclk_vol_table->count; i++) {
1051 		if (pclk_vol_table->entries[i].clk) {
1052 			clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk  * 10;
1053 			clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol;
1054 			clocks->num_levels++;
1055 		}
1056 	}
1057 
1058 	return 0;
1059 }
1060 
1061 
1062 
1063 static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1064 {
1065 	clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
1066 	return 0;
1067 }
1068 
1069 static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1070 {
1071 	struct amdgpu_device *adev = hwmgr->adev;
1072 	uint32_t reg_value = RREG32_SOC15(THM, 0, mmTHM_TCON_CUR_TMP);
1073 	int cur_temp =
1074 		(reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
1075 
1076 	if (cur_temp & THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK)
1077 		cur_temp = ((cur_temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1078 	else
1079 		cur_temp = (cur_temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1080 
1081 	return cur_temp;
1082 }
1083 
1084 static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1085 			  void *value, int *size)
1086 {
1087 	uint32_t sclk, mclk;
1088 	int ret = 0;
1089 
1090 	switch (idx) {
1091 	case AMDGPU_PP_SENSOR_GFX_SCLK:
1092 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
1093 		sclk = smum_get_argument(hwmgr);
1094 			/* in units of 10KHZ */
1095 		*((uint32_t *)value) = sclk * 100;
1096 		*size = 4;
1097 		break;
1098 	case AMDGPU_PP_SENSOR_GFX_MCLK:
1099 		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
1100 		mclk = smum_get_argument(hwmgr);
1101 			/* in units of 10KHZ */
1102 		*((uint32_t *)value) = mclk * 100;
1103 		*size = 4;
1104 		break;
1105 	case AMDGPU_PP_SENSOR_GPU_TEMP:
1106 		*((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
1107 		break;
1108 	default:
1109 		ret = -EINVAL;
1110 		break;
1111 	}
1112 
1113 	return ret;
1114 }
1115 
1116 static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1117 		void *clock_ranges)
1118 {
1119 	struct smu10_hwmgr *data = hwmgr->backend;
1120 	struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
1121 	Watermarks_t *table = &(data->water_marks_table);
1122 	int result = 0;
1123 
1124 	smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
1125 	smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
1126 	data->water_marks_exist = true;
1127 	return result;
1128 }
1129 
1130 static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
1131 {
1132 
1133 	return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
1134 }
1135 
1136 static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
1137 {
1138 	return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
1139 }
1140 
1141 static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
1142 {
1143 	if (bgate) {
1144 		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1145 						AMD_IP_BLOCK_TYPE_VCN,
1146 						AMD_PG_STATE_GATE);
1147 		smum_send_msg_to_smc_with_parameter(hwmgr,
1148 					PPSMC_MSG_PowerDownVcn, 0);
1149 	} else {
1150 		smum_send_msg_to_smc_with_parameter(hwmgr,
1151 						PPSMC_MSG_PowerUpVcn, 0);
1152 		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1153 						AMD_IP_BLOCK_TYPE_VCN,
1154 						AMD_PG_STATE_UNGATE);
1155 	}
1156 }
1157 
1158 static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
1159 	.backend_init = smu10_hwmgr_backend_init,
1160 	.backend_fini = smu10_hwmgr_backend_fini,
1161 	.asic_setup = NULL,
1162 	.apply_state_adjust_rules = smu10_apply_state_adjust_rules,
1163 	.force_dpm_level = smu10_dpm_force_dpm_level,
1164 	.get_power_state_size = smu10_get_power_state_size,
1165 	.powerdown_uvd = NULL,
1166 	.powergate_uvd = smu10_powergate_vcn,
1167 	.powergate_vce = NULL,
1168 	.get_mclk = smu10_dpm_get_mclk,
1169 	.get_sclk = smu10_dpm_get_sclk,
1170 	.patch_boot_state = smu10_dpm_patch_boot_state,
1171 	.get_pp_table_entry = smu10_dpm_get_pp_table_entry,
1172 	.get_num_of_pp_table_entries = smu10_dpm_get_num_of_pp_table_entries,
1173 	.set_cpu_power_state = smu10_set_cpu_power_state,
1174 	.store_cc6_data = smu10_store_cc6_data,
1175 	.force_clock_level = smu10_force_clock_level,
1176 	.print_clock_levels = smu10_print_clock_levels,
1177 	.get_dal_power_level = smu10_get_dal_power_level,
1178 	.get_performance_level = smu10_get_performance_level,
1179 	.get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
1180 	.get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
1181 	.get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
1182 	.set_watermarks_for_clocks_ranges = smu10_set_watermarks_for_clocks_ranges,
1183 	.get_max_high_clocks = smu10_get_max_high_clocks,
1184 	.read_sensor = smu10_read_sensor,
1185 	.set_active_display_count = smu10_set_active_display_count,
1186 	.set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk,
1187 	.dynamic_state_management_enable = smu10_enable_dpm_tasks,
1188 	.power_off_asic = smu10_power_off_asic,
1189 	.asic_setup = smu10_setup_asic_task,
1190 	.power_state_set = smu10_set_power_state_tasks,
1191 	.dynamic_state_management_disable = smu10_disable_dpm_tasks,
1192 	.powergate_mmhub = smu10_powergate_mmhub,
1193 	.smus_notify_pwe = smu10_smus_notify_pwe,
1194 	.gfx_off_control = smu10_gfx_off_control,
1195 	.display_clock_voltage_request = smu10_display_clock_voltage_request,
1196 	.powergate_gfx = smu10_gfx_off_control,
1197 };
1198 
1199 int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
1200 {
1201 	hwmgr->hwmgr_func = &smu10_hwmgr_funcs;
1202 	hwmgr->pptable_func = &pptable_funcs;
1203 	return 0;
1204 }
1205