1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include "amd_shared.h"
30 #include "amd_powerplay.h"
31 #include "power_state.h"
32 #include "amdgpu.h"
33 #include "hwmgr.h"
34 
35 
36 static const struct amd_pm_funcs pp_dpm_funcs;
37 
38 static int amd_powerplay_create(struct amdgpu_device *adev)
39 {
40 	struct pp_hwmgr *hwmgr;
41 
42 	if (adev == NULL)
43 		return -EINVAL;
44 
45 	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
46 	if (hwmgr == NULL)
47 		return -ENOMEM;
48 
49 	hwmgr->adev = adev;
50 	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
51 	hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
52 	hwmgr->device = amdgpu_cgs_create_device(adev);
53 	lockinit(&hwmgr->smu_lock, "adhwmgrsmul", 0, LK_CANRECURSE);
54 	hwmgr->chip_family = adev->family;
55 	hwmgr->chip_id = adev->asic_type;
56 	hwmgr->feature_mask = adev->powerplay.pp_feature;
57 	hwmgr->display_config = &adev->pm.pm_display_cfg;
58 	adev->powerplay.pp_handle = hwmgr;
59 	adev->powerplay.pp_funcs = &pp_dpm_funcs;
60 	return 0;
61 }
62 
63 
64 static void amd_powerplay_destroy(struct amdgpu_device *adev)
65 {
66 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
67 
68 	kfree(hwmgr->hardcode_pp_table);
69 	hwmgr->hardcode_pp_table = NULL;
70 
71 	kfree(hwmgr);
72 	hwmgr = NULL;
73 }
74 
75 static int pp_early_init(void *handle)
76 {
77 	int ret;
78 	struct amdgpu_device *adev = handle;
79 
80 	ret = amd_powerplay_create(adev);
81 
82 	if (ret != 0)
83 		return ret;
84 
85 	ret = hwmgr_early_init(adev->powerplay.pp_handle);
86 	if (ret)
87 		return -EINVAL;
88 
89 	return 0;
90 }
91 
92 static int pp_sw_init(void *handle)
93 {
94 	struct amdgpu_device *adev = handle;
95 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
96 	int ret = 0;
97 
98 	ret = hwmgr_sw_init(hwmgr);
99 
100 	pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
101 
102 	return ret;
103 }
104 
105 static int pp_sw_fini(void *handle)
106 {
107 	struct amdgpu_device *adev = handle;
108 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
109 
110 	hwmgr_sw_fini(hwmgr);
111 
112 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
113 		release_firmware(adev->pm.fw);
114 		adev->pm.fw = NULL;
115 		amdgpu_ucode_fini_bo(adev);
116 	}
117 
118 	return 0;
119 }
120 
121 static int pp_hw_init(void *handle)
122 {
123 	int ret = 0;
124 	struct amdgpu_device *adev = handle;
125 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
126 
127 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
128 		amdgpu_ucode_init_bo(adev);
129 
130 	ret = hwmgr_hw_init(hwmgr);
131 
132 	if (ret)
133 		pr_err("powerplay hw init failed\n");
134 
135 	return ret;
136 }
137 
138 static int pp_hw_fini(void *handle)
139 {
140 	struct amdgpu_device *adev = handle;
141 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
142 
143 	hwmgr_hw_fini(hwmgr);
144 
145 	return 0;
146 }
147 
148 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
149 {
150 	int r = -EINVAL;
151 	void *cpu_ptr = NULL;
152 	uint64_t gpu_addr;
153 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
154 
155 	if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
156 						PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
157 						&adev->pm.smu_prv_buffer,
158 						(u64 *)&gpu_addr,
159 						&cpu_ptr)) {
160 		DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
161 		return;
162 	}
163 
164 	if (hwmgr->hwmgr_func->notify_cac_buffer_info)
165 		r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
166 					lower_32_bits((unsigned long)cpu_ptr),
167 					upper_32_bits((unsigned long)cpu_ptr),
168 					lower_32_bits(gpu_addr),
169 					upper_32_bits(gpu_addr),
170 					adev->pm.smu_prv_buffer_size);
171 
172 	if (r) {
173 		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
174 		adev->pm.smu_prv_buffer = NULL;
175 		DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
176 	}
177 }
178 
179 static int pp_late_init(void *handle)
180 {
181 	struct amdgpu_device *adev = handle;
182 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
183 
184 	if (hwmgr && hwmgr->pm_en) {
185 		mutex_lock(&hwmgr->smu_lock);
186 		hwmgr_handle_task(hwmgr,
187 					AMD_PP_TASK_COMPLETE_INIT, NULL);
188 		mutex_unlock(&hwmgr->smu_lock);
189 	}
190 	if (adev->pm.smu_prv_buffer_size != 0)
191 		pp_reserve_vram_for_smu(adev);
192 
193 	return 0;
194 }
195 
196 static void pp_late_fini(void *handle)
197 {
198 	struct amdgpu_device *adev = handle;
199 
200 	if (adev->pm.smu_prv_buffer)
201 		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
202 	amd_powerplay_destroy(adev);
203 }
204 
205 
206 static bool pp_is_idle(void *handle)
207 {
208 	return false;
209 }
210 
211 static int pp_wait_for_idle(void *handle)
212 {
213 	return 0;
214 }
215 
216 static int pp_sw_reset(void *handle)
217 {
218 	return 0;
219 }
220 
221 static int pp_set_powergating_state(void *handle,
222 				    enum amd_powergating_state state)
223 {
224 	return 0;
225 }
226 
227 static int pp_suspend(void *handle)
228 {
229 	struct amdgpu_device *adev = handle;
230 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
231 
232 	return hwmgr_suspend(hwmgr);
233 }
234 
235 static int pp_resume(void *handle)
236 {
237 	struct amdgpu_device *adev = handle;
238 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
239 
240 	return hwmgr_resume(hwmgr);
241 }
242 
243 static int pp_set_clockgating_state(void *handle,
244 					  enum amd_clockgating_state state)
245 {
246 	return 0;
247 }
248 
249 static const struct amd_ip_funcs pp_ip_funcs = {
250 	.name = "powerplay",
251 	.early_init = pp_early_init,
252 	.late_init = pp_late_init,
253 	.sw_init = pp_sw_init,
254 	.sw_fini = pp_sw_fini,
255 	.hw_init = pp_hw_init,
256 	.hw_fini = pp_hw_fini,
257 	.late_fini = pp_late_fini,
258 	.suspend = pp_suspend,
259 	.resume = pp_resume,
260 	.is_idle = pp_is_idle,
261 	.wait_for_idle = pp_wait_for_idle,
262 	.soft_reset = pp_sw_reset,
263 	.set_clockgating_state = pp_set_clockgating_state,
264 	.set_powergating_state = pp_set_powergating_state,
265 };
266 
267 const struct amdgpu_ip_block_version pp_smu_ip_block =
268 {
269 	.type = AMD_IP_BLOCK_TYPE_SMC,
270 	.major = 1,
271 	.minor = 0,
272 	.rev = 0,
273 	.funcs = &pp_ip_funcs,
274 };
275 
276 static int pp_dpm_load_fw(void *handle)
277 {
278 	return 0;
279 }
280 
281 static int pp_dpm_fw_loading_complete(void *handle)
282 {
283 	return 0;
284 }
285 
286 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
287 {
288 	struct pp_hwmgr *hwmgr = handle;
289 
290 	if (!hwmgr || !hwmgr->pm_en)
291 		return -EINVAL;
292 
293 	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
294 		pr_info("%s was not implemented.\n", __func__);
295 		return 0;
296 	}
297 
298 	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
299 }
300 
301 static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
302 						enum amd_dpm_forced_level *level)
303 {
304 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
305 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
306 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
307 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
308 
309 	if (!(hwmgr->dpm_level & profile_mode_mask)) {
310 		/* enter umd pstate, save current level, disable gfx cg*/
311 		if (*level & profile_mode_mask) {
312 			hwmgr->saved_dpm_level = hwmgr->dpm_level;
313 			hwmgr->en_umd_pstate = true;
314 			amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
315 						AMD_IP_BLOCK_TYPE_GFX,
316 						AMD_CG_STATE_UNGATE);
317 			amdgpu_device_ip_set_powergating_state(hwmgr->adev,
318 					AMD_IP_BLOCK_TYPE_GFX,
319 					AMD_PG_STATE_UNGATE);
320 		}
321 	} else {
322 		/* exit umd pstate, restore level, enable gfx cg*/
323 		if (!(*level & profile_mode_mask)) {
324 			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
325 				*level = hwmgr->saved_dpm_level;
326 			hwmgr->en_umd_pstate = false;
327 			amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
328 					AMD_IP_BLOCK_TYPE_GFX,
329 					AMD_CG_STATE_GATE);
330 			amdgpu_device_ip_set_powergating_state(hwmgr->adev,
331 					AMD_IP_BLOCK_TYPE_GFX,
332 					AMD_PG_STATE_GATE);
333 		}
334 	}
335 }
336 
337 static int pp_dpm_force_performance_level(void *handle,
338 					enum amd_dpm_forced_level level)
339 {
340 	struct pp_hwmgr *hwmgr = handle;
341 
342 	if (!hwmgr || !hwmgr->pm_en)
343 		return -EINVAL;
344 
345 	if (level == hwmgr->dpm_level)
346 		return 0;
347 
348 	mutex_lock(&hwmgr->smu_lock);
349 	pp_dpm_en_umd_pstate(hwmgr, &level);
350 	hwmgr->request_dpm_level = level;
351 	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
352 	mutex_unlock(&hwmgr->smu_lock);
353 
354 	return 0;
355 }
356 
357 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
358 								void *handle)
359 {
360 	struct pp_hwmgr *hwmgr = handle;
361 	enum amd_dpm_forced_level level;
362 
363 	if (!hwmgr || !hwmgr->pm_en)
364 		return -EINVAL;
365 
366 	mutex_lock(&hwmgr->smu_lock);
367 	level = hwmgr->dpm_level;
368 	mutex_unlock(&hwmgr->smu_lock);
369 	return level;
370 }
371 
372 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
373 {
374 	struct pp_hwmgr *hwmgr = handle;
375 	uint32_t clk = 0;
376 
377 	if (!hwmgr || !hwmgr->pm_en)
378 		return 0;
379 
380 	if (hwmgr->hwmgr_func->get_sclk == NULL) {
381 		pr_info("%s was not implemented.\n", __func__);
382 		return 0;
383 	}
384 	mutex_lock(&hwmgr->smu_lock);
385 	clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
386 	mutex_unlock(&hwmgr->smu_lock);
387 	return clk;
388 }
389 
390 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
391 {
392 	struct pp_hwmgr *hwmgr = handle;
393 	uint32_t clk = 0;
394 
395 	if (!hwmgr || !hwmgr->pm_en)
396 		return 0;
397 
398 	if (hwmgr->hwmgr_func->get_mclk == NULL) {
399 		pr_info("%s was not implemented.\n", __func__);
400 		return 0;
401 	}
402 	mutex_lock(&hwmgr->smu_lock);
403 	clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
404 	mutex_unlock(&hwmgr->smu_lock);
405 	return clk;
406 }
407 
408 static void pp_dpm_powergate_vce(void *handle, bool gate)
409 {
410 	struct pp_hwmgr *hwmgr = handle;
411 
412 	if (!hwmgr || !hwmgr->pm_en)
413 		return;
414 
415 	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
416 		pr_info("%s was not implemented.\n", __func__);
417 		return;
418 	}
419 	mutex_lock(&hwmgr->smu_lock);
420 	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
421 	mutex_unlock(&hwmgr->smu_lock);
422 }
423 
424 static void pp_dpm_powergate_uvd(void *handle, bool gate)
425 {
426 	struct pp_hwmgr *hwmgr = handle;
427 
428 	if (!hwmgr || !hwmgr->pm_en)
429 		return;
430 
431 	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
432 		pr_info("%s was not implemented.\n", __func__);
433 		return;
434 	}
435 	mutex_lock(&hwmgr->smu_lock);
436 	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
437 	mutex_unlock(&hwmgr->smu_lock);
438 }
439 
440 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
441 		enum amd_pm_state_type *user_state)
442 {
443 	int ret = 0;
444 	struct pp_hwmgr *hwmgr = handle;
445 
446 	if (!hwmgr || !hwmgr->pm_en)
447 		return -EINVAL;
448 
449 	mutex_lock(&hwmgr->smu_lock);
450 	ret = hwmgr_handle_task(hwmgr, task_id, user_state);
451 	mutex_unlock(&hwmgr->smu_lock);
452 
453 	return ret;
454 }
455 
456 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
457 {
458 	struct pp_hwmgr *hwmgr = handle;
459 	struct pp_power_state *state;
460 	enum amd_pm_state_type pm_type;
461 
462 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
463 		return -EINVAL;
464 
465 	mutex_lock(&hwmgr->smu_lock);
466 
467 	state = hwmgr->current_ps;
468 
469 	switch (state->classification.ui_label) {
470 	case PP_StateUILabel_Battery:
471 		pm_type = POWER_STATE_TYPE_BATTERY;
472 		break;
473 	case PP_StateUILabel_Balanced:
474 		pm_type = POWER_STATE_TYPE_BALANCED;
475 		break;
476 	case PP_StateUILabel_Performance:
477 		pm_type = POWER_STATE_TYPE_PERFORMANCE;
478 		break;
479 	default:
480 		if (state->classification.flags & PP_StateClassificationFlag_Boot)
481 			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
482 		else
483 			pm_type = POWER_STATE_TYPE_DEFAULT;
484 		break;
485 	}
486 	mutex_unlock(&hwmgr->smu_lock);
487 
488 	return pm_type;
489 }
490 
491 static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
492 {
493 	struct pp_hwmgr *hwmgr = handle;
494 
495 	if (!hwmgr || !hwmgr->pm_en)
496 		return;
497 
498 	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
499 		pr_info("%s was not implemented.\n", __func__);
500 		return;
501 	}
502 	mutex_lock(&hwmgr->smu_lock);
503 	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
504 	mutex_unlock(&hwmgr->smu_lock);
505 }
506 
507 static uint32_t pp_dpm_get_fan_control_mode(void *handle)
508 {
509 	struct pp_hwmgr *hwmgr = handle;
510 	uint32_t mode = 0;
511 
512 	if (!hwmgr || !hwmgr->pm_en)
513 		return 0;
514 
515 	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
516 		pr_info("%s was not implemented.\n", __func__);
517 		return 0;
518 	}
519 	mutex_lock(&hwmgr->smu_lock);
520 	mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
521 	mutex_unlock(&hwmgr->smu_lock);
522 	return mode;
523 }
524 
525 static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
526 {
527 	struct pp_hwmgr *hwmgr = handle;
528 	int ret = 0;
529 
530 	if (!hwmgr || !hwmgr->pm_en)
531 		return -EINVAL;
532 
533 	if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
534 		pr_info("%s was not implemented.\n", __func__);
535 		return 0;
536 	}
537 	mutex_lock(&hwmgr->smu_lock);
538 	ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
539 	mutex_unlock(&hwmgr->smu_lock);
540 	return ret;
541 }
542 
543 static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
544 {
545 	struct pp_hwmgr *hwmgr = handle;
546 	int ret = 0;
547 
548 	if (!hwmgr || !hwmgr->pm_en)
549 		return -EINVAL;
550 
551 	if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
552 		pr_info("%s was not implemented.\n", __func__);
553 		return 0;
554 	}
555 
556 	mutex_lock(&hwmgr->smu_lock);
557 	ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
558 	mutex_unlock(&hwmgr->smu_lock);
559 	return ret;
560 }
561 
562 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
563 {
564 	struct pp_hwmgr *hwmgr = handle;
565 	int ret = 0;
566 
567 	if (!hwmgr || !hwmgr->pm_en)
568 		return -EINVAL;
569 
570 	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
571 		return -EINVAL;
572 
573 	mutex_lock(&hwmgr->smu_lock);
574 	ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
575 	mutex_unlock(&hwmgr->smu_lock);
576 	return ret;
577 }
578 
579 static int pp_dpm_get_pp_num_states(void *handle,
580 		struct pp_states_info *data)
581 {
582 	struct pp_hwmgr *hwmgr = handle;
583 	int i;
584 
585 	memset(data, 0, sizeof(*data));
586 
587 	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
588 		return -EINVAL;
589 
590 	mutex_lock(&hwmgr->smu_lock);
591 
592 	data->nums = hwmgr->num_ps;
593 
594 	for (i = 0; i < hwmgr->num_ps; i++) {
595 		struct pp_power_state *state = (struct pp_power_state *)
596 				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
597 		switch (state->classification.ui_label) {
598 		case PP_StateUILabel_Battery:
599 			data->states[i] = POWER_STATE_TYPE_BATTERY;
600 			break;
601 		case PP_StateUILabel_Balanced:
602 			data->states[i] = POWER_STATE_TYPE_BALANCED;
603 			break;
604 		case PP_StateUILabel_Performance:
605 			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
606 			break;
607 		default:
608 			if (state->classification.flags & PP_StateClassificationFlag_Boot)
609 				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
610 			else
611 				data->states[i] = POWER_STATE_TYPE_DEFAULT;
612 		}
613 	}
614 	mutex_unlock(&hwmgr->smu_lock);
615 	return 0;
616 }
617 
618 static int pp_dpm_get_pp_table(void *handle, char **table)
619 {
620 	struct pp_hwmgr *hwmgr = handle;
621 	int size = 0;
622 
623 	if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
624 		return -EINVAL;
625 
626 	mutex_lock(&hwmgr->smu_lock);
627 	*table = (char *)hwmgr->soft_pp_table;
628 	size = hwmgr->soft_pp_table_size;
629 	mutex_unlock(&hwmgr->smu_lock);
630 	return size;
631 }
632 
633 static int amd_powerplay_reset(void *handle)
634 {
635 	struct pp_hwmgr *hwmgr = handle;
636 	int ret;
637 
638 	ret = hwmgr_hw_fini(hwmgr);
639 	if (ret)
640 		return ret;
641 
642 	ret = hwmgr_hw_init(hwmgr);
643 	if (ret)
644 		return ret;
645 
646 	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
647 }
648 
649 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
650 {
651 	struct pp_hwmgr *hwmgr = handle;
652 	int ret = -ENOMEM;
653 
654 	if (!hwmgr || !hwmgr->pm_en)
655 		return -EINVAL;
656 
657 	mutex_lock(&hwmgr->smu_lock);
658 	if (!hwmgr->hardcode_pp_table) {
659 		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
660 						   hwmgr->soft_pp_table_size,
661 						   GFP_KERNEL);
662 		if (!hwmgr->hardcode_pp_table)
663 			goto err;
664 	}
665 
666 	memcpy(hwmgr->hardcode_pp_table, buf, size);
667 
668 	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
669 
670 	ret = amd_powerplay_reset(handle);
671 	if (ret)
672 		goto err;
673 
674 	if (hwmgr->hwmgr_func->avfs_control) {
675 		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
676 		if (ret)
677 			goto err;
678 	}
679 	mutex_unlock(&hwmgr->smu_lock);
680 	return 0;
681 err:
682 	mutex_unlock(&hwmgr->smu_lock);
683 	return ret;
684 }
685 
686 static int pp_dpm_force_clock_level(void *handle,
687 		enum pp_clock_type type, uint32_t mask)
688 {
689 	struct pp_hwmgr *hwmgr = handle;
690 	int ret = 0;
691 
692 	if (!hwmgr || !hwmgr->pm_en)
693 		return -EINVAL;
694 
695 	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
696 		pr_info("%s was not implemented.\n", __func__);
697 		return 0;
698 	}
699 	mutex_lock(&hwmgr->smu_lock);
700 	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
701 		ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
702 	else
703 		ret = -EINVAL;
704 	mutex_unlock(&hwmgr->smu_lock);
705 	return ret;
706 }
707 
708 static int pp_dpm_print_clock_levels(void *handle,
709 		enum pp_clock_type type, char *buf)
710 {
711 	struct pp_hwmgr *hwmgr = handle;
712 	int ret = 0;
713 
714 	if (!hwmgr || !hwmgr->pm_en)
715 		return -EINVAL;
716 
717 	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
718 		pr_info("%s was not implemented.\n", __func__);
719 		return 0;
720 	}
721 	mutex_lock(&hwmgr->smu_lock);
722 	ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
723 	mutex_unlock(&hwmgr->smu_lock);
724 	return ret;
725 }
726 
727 static int pp_dpm_get_sclk_od(void *handle)
728 {
729 	struct pp_hwmgr *hwmgr = handle;
730 	int ret = 0;
731 
732 	if (!hwmgr || !hwmgr->pm_en)
733 		return -EINVAL;
734 
735 	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
736 		pr_info("%s was not implemented.\n", __func__);
737 		return 0;
738 	}
739 	mutex_lock(&hwmgr->smu_lock);
740 	ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
741 	mutex_unlock(&hwmgr->smu_lock);
742 	return ret;
743 }
744 
745 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
746 {
747 	struct pp_hwmgr *hwmgr = handle;
748 	int ret = 0;
749 
750 	if (!hwmgr || !hwmgr->pm_en)
751 		return -EINVAL;
752 
753 	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
754 		pr_info("%s was not implemented.\n", __func__);
755 		return 0;
756 	}
757 
758 	mutex_lock(&hwmgr->smu_lock);
759 	ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
760 	mutex_unlock(&hwmgr->smu_lock);
761 	return ret;
762 }
763 
764 static int pp_dpm_get_mclk_od(void *handle)
765 {
766 	struct pp_hwmgr *hwmgr = handle;
767 	int ret = 0;
768 
769 	if (!hwmgr || !hwmgr->pm_en)
770 		return -EINVAL;
771 
772 	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
773 		pr_info("%s was not implemented.\n", __func__);
774 		return 0;
775 	}
776 	mutex_lock(&hwmgr->smu_lock);
777 	ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
778 	mutex_unlock(&hwmgr->smu_lock);
779 	return ret;
780 }
781 
782 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
783 {
784 	struct pp_hwmgr *hwmgr = handle;
785 	int ret = 0;
786 
787 	if (!hwmgr || !hwmgr->pm_en)
788 		return -EINVAL;
789 
790 	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
791 		pr_info("%s was not implemented.\n", __func__);
792 		return 0;
793 	}
794 	mutex_lock(&hwmgr->smu_lock);
795 	ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
796 	mutex_unlock(&hwmgr->smu_lock);
797 	return ret;
798 }
799 
800 static int pp_dpm_read_sensor(void *handle, int idx,
801 			      void *value, int *size)
802 {
803 	struct pp_hwmgr *hwmgr = handle;
804 	int ret = 0;
805 
806 	if (!hwmgr || !hwmgr->pm_en || !value)
807 		return -EINVAL;
808 
809 	switch (idx) {
810 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
811 		*((uint32_t *)value) = hwmgr->pstate_sclk;
812 		return 0;
813 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
814 		*((uint32_t *)value) = hwmgr->pstate_mclk;
815 		return 0;
816 	default:
817 		mutex_lock(&hwmgr->smu_lock);
818 		ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
819 		mutex_unlock(&hwmgr->smu_lock);
820 		return ret;
821 	}
822 }
823 
824 static struct amd_vce_state*
825 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
826 {
827 	struct pp_hwmgr *hwmgr = handle;
828 
829 	if (!hwmgr || !hwmgr->pm_en)
830 		return NULL;
831 
832 	if (idx < hwmgr->num_vce_state_tables)
833 		return &hwmgr->vce_states[idx];
834 	return NULL;
835 }
836 
837 static int pp_get_power_profile_mode(void *handle, char *buf)
838 {
839 	struct pp_hwmgr *hwmgr = handle;
840 
841 	if (!hwmgr || !hwmgr->pm_en || !buf)
842 		return -EINVAL;
843 
844 	if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
845 		pr_info("%s was not implemented.\n", __func__);
846 		return snprintf(buf, PAGE_SIZE, "\n");
847 	}
848 
849 	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
850 }
851 
852 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
853 {
854 	struct pp_hwmgr *hwmgr = handle;
855 	int ret = -EINVAL;
856 
857 	if (!hwmgr || !hwmgr->pm_en)
858 		return ret;
859 
860 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
861 		pr_info("%s was not implemented.\n", __func__);
862 		return ret;
863 	}
864 	mutex_lock(&hwmgr->smu_lock);
865 	if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
866 		ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
867 	mutex_unlock(&hwmgr->smu_lock);
868 	return ret;
869 }
870 
871 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
872 {
873 	struct pp_hwmgr *hwmgr = handle;
874 
875 	if (!hwmgr || !hwmgr->pm_en)
876 		return -EINVAL;
877 
878 	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
879 		pr_info("%s was not implemented.\n", __func__);
880 		return -EINVAL;
881 	}
882 
883 	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
884 }
885 
886 static int pp_dpm_switch_power_profile(void *handle,
887 		enum PP_SMC_POWER_PROFILE type, bool en)
888 {
889 	struct pp_hwmgr *hwmgr = handle;
890 	long workload;
891 	uint32_t index;
892 
893 	if (!hwmgr || !hwmgr->pm_en)
894 		return -EINVAL;
895 
896 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
897 		pr_info("%s was not implemented.\n", __func__);
898 		return -EINVAL;
899 	}
900 
901 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
902 		return -EINVAL;
903 
904 	mutex_lock(&hwmgr->smu_lock);
905 
906 	if (!en) {
907 		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
908 		index = fls(hwmgr->workload_mask);
909 		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
910 		workload = hwmgr->workload_setting[index];
911 	} else {
912 		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
913 		index = fls(hwmgr->workload_mask);
914 		index = index <= Workload_Policy_Max ? index - 1 : 0;
915 		workload = hwmgr->workload_setting[index];
916 	}
917 
918 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
919 		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
920 	mutex_unlock(&hwmgr->smu_lock);
921 
922 	return 0;
923 }
924 
925 static int pp_set_power_limit(void *handle, uint32_t limit)
926 {
927 	struct pp_hwmgr *hwmgr = handle;
928 
929 	if (!hwmgr || !hwmgr->pm_en)
930 		return -EINVAL;
931 
932 	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
933 		pr_info("%s was not implemented.\n", __func__);
934 		return -EINVAL;
935 	}
936 
937 	if (limit == 0)
938 		limit = hwmgr->default_power_limit;
939 
940 	if (limit > hwmgr->default_power_limit)
941 		return -EINVAL;
942 
943 	mutex_lock(&hwmgr->smu_lock);
944 	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
945 	hwmgr->power_limit = limit;
946 	mutex_unlock(&hwmgr->smu_lock);
947 	return 0;
948 }
949 
950 static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
951 {
952 	struct pp_hwmgr *hwmgr = handle;
953 
954 	if (!hwmgr || !hwmgr->pm_en ||!limit)
955 		return -EINVAL;
956 
957 	mutex_lock(&hwmgr->smu_lock);
958 
959 	if (default_limit)
960 		*limit = hwmgr->default_power_limit;
961 	else
962 		*limit = hwmgr->power_limit;
963 
964 	mutex_unlock(&hwmgr->smu_lock);
965 
966 	return 0;
967 }
968 
969 static int pp_display_configuration_change(void *handle,
970 	const struct amd_pp_display_configuration *display_config)
971 {
972 	struct pp_hwmgr *hwmgr = handle;
973 
974 	if (!hwmgr || !hwmgr->pm_en)
975 		return -EINVAL;
976 
977 	mutex_lock(&hwmgr->smu_lock);
978 	phm_store_dal_configuration_data(hwmgr, display_config);
979 	mutex_unlock(&hwmgr->smu_lock);
980 	return 0;
981 }
982 
983 static int pp_get_display_power_level(void *handle,
984 		struct amd_pp_simple_clock_info *output)
985 {
986 	struct pp_hwmgr *hwmgr = handle;
987 	int ret = 0;
988 
989 	if (!hwmgr || !hwmgr->pm_en ||!output)
990 		return -EINVAL;
991 
992 	mutex_lock(&hwmgr->smu_lock);
993 	ret = phm_get_dal_power_level(hwmgr, output);
994 	mutex_unlock(&hwmgr->smu_lock);
995 	return ret;
996 }
997 
998 static int pp_get_current_clocks(void *handle,
999 		struct amd_pp_clock_info *clocks)
1000 {
1001 	struct amd_pp_simple_clock_info simple_clocks = { 0 };
1002 	struct pp_clock_info hw_clocks;
1003 	struct pp_hwmgr *hwmgr = handle;
1004 	int ret = 0;
1005 
1006 	if (!hwmgr || !hwmgr->pm_en)
1007 		return -EINVAL;
1008 
1009 	mutex_lock(&hwmgr->smu_lock);
1010 
1011 	phm_get_dal_power_level(hwmgr, &simple_clocks);
1012 
1013 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1014 					PHM_PlatformCaps_PowerContainment))
1015 		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1016 					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1017 	else
1018 		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1019 					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1020 
1021 	if (ret) {
1022 		pr_info("Error in phm_get_clock_info \n");
1023 		mutex_unlock(&hwmgr->smu_lock);
1024 		return -EINVAL;
1025 	}
1026 
1027 	clocks->min_engine_clock = hw_clocks.min_eng_clk;
1028 	clocks->max_engine_clock = hw_clocks.max_eng_clk;
1029 	clocks->min_memory_clock = hw_clocks.min_mem_clk;
1030 	clocks->max_memory_clock = hw_clocks.max_mem_clk;
1031 	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1032 	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1033 
1034 	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1035 	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1036 
1037 	if (simple_clocks.level == 0)
1038 		clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1039 	else
1040 		clocks->max_clocks_state = simple_clocks.level;
1041 
1042 	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1043 		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1044 		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1045 	}
1046 	mutex_unlock(&hwmgr->smu_lock);
1047 	return 0;
1048 }
1049 
1050 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1051 {
1052 	struct pp_hwmgr *hwmgr = handle;
1053 	int ret = 0;
1054 
1055 	if (!hwmgr || !hwmgr->pm_en)
1056 		return -EINVAL;
1057 
1058 	if (clocks == NULL)
1059 		return -EINVAL;
1060 
1061 	mutex_lock(&hwmgr->smu_lock);
1062 	ret = phm_get_clock_by_type(hwmgr, type, clocks);
1063 	mutex_unlock(&hwmgr->smu_lock);
1064 	return ret;
1065 }
1066 
1067 static int pp_get_clock_by_type_with_latency(void *handle,
1068 		enum amd_pp_clock_type type,
1069 		struct pp_clock_levels_with_latency *clocks)
1070 {
1071 	struct pp_hwmgr *hwmgr = handle;
1072 	int ret = 0;
1073 
1074 	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1075 		return -EINVAL;
1076 
1077 	mutex_lock(&hwmgr->smu_lock);
1078 	ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1079 	mutex_unlock(&hwmgr->smu_lock);
1080 	return ret;
1081 }
1082 
1083 static int pp_get_clock_by_type_with_voltage(void *handle,
1084 		enum amd_pp_clock_type type,
1085 		struct pp_clock_levels_with_voltage *clocks)
1086 {
1087 	struct pp_hwmgr *hwmgr = handle;
1088 	int ret = 0;
1089 
1090 	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1091 		return -EINVAL;
1092 
1093 	mutex_lock(&hwmgr->smu_lock);
1094 
1095 	ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1096 
1097 	mutex_unlock(&hwmgr->smu_lock);
1098 	return ret;
1099 }
1100 
1101 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1102 		void *clock_ranges)
1103 {
1104 	struct pp_hwmgr *hwmgr = handle;
1105 	int ret = 0;
1106 
1107 	if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1108 		return -EINVAL;
1109 
1110 	mutex_lock(&hwmgr->smu_lock);
1111 	ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1112 			clock_ranges);
1113 	mutex_unlock(&hwmgr->smu_lock);
1114 
1115 	return ret;
1116 }
1117 
1118 static int pp_display_clock_voltage_request(void *handle,
1119 		struct pp_display_clock_request *clock)
1120 {
1121 	struct pp_hwmgr *hwmgr = handle;
1122 	int ret = 0;
1123 
1124 	if (!hwmgr || !hwmgr->pm_en ||!clock)
1125 		return -EINVAL;
1126 
1127 	mutex_lock(&hwmgr->smu_lock);
1128 	ret = phm_display_clock_voltage_request(hwmgr, clock);
1129 	mutex_unlock(&hwmgr->smu_lock);
1130 
1131 	return ret;
1132 }
1133 
1134 static int pp_get_display_mode_validation_clocks(void *handle,
1135 		struct amd_pp_simple_clock_info *clocks)
1136 {
1137 	struct pp_hwmgr *hwmgr = handle;
1138 	int ret = 0;
1139 
1140 	if (!hwmgr || !hwmgr->pm_en ||!clocks)
1141 		return -EINVAL;
1142 
1143 	clocks->level = PP_DAL_POWERLEVEL_7;
1144 
1145 	mutex_lock(&hwmgr->smu_lock);
1146 
1147 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1148 		ret = phm_get_max_high_clocks(hwmgr, clocks);
1149 
1150 	mutex_unlock(&hwmgr->smu_lock);
1151 	return ret;
1152 }
1153 
1154 static int pp_dpm_powergate_mmhub(void *handle)
1155 {
1156 	struct pp_hwmgr *hwmgr = handle;
1157 
1158 	if (!hwmgr || !hwmgr->pm_en)
1159 		return -EINVAL;
1160 
1161 	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1162 		pr_info("%s was not implemented.\n", __func__);
1163 		return 0;
1164 	}
1165 
1166 	return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1167 }
1168 
1169 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1170 {
1171 	struct pp_hwmgr *hwmgr = handle;
1172 
1173 	if (!hwmgr || !hwmgr->pm_en)
1174 		return 0;
1175 
1176 	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1177 		pr_info("%s was not implemented.\n", __func__);
1178 		return 0;
1179 	}
1180 
1181 	return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1182 }
1183 
1184 static int pp_set_powergating_by_smu(void *handle,
1185 				uint32_t block_type, bool gate)
1186 {
1187 	int ret = 0;
1188 
1189 	switch (block_type) {
1190 	case AMD_IP_BLOCK_TYPE_UVD:
1191 	case AMD_IP_BLOCK_TYPE_VCN:
1192 		pp_dpm_powergate_uvd(handle, gate);
1193 		break;
1194 	case AMD_IP_BLOCK_TYPE_VCE:
1195 		pp_dpm_powergate_vce(handle, gate);
1196 		break;
1197 	case AMD_IP_BLOCK_TYPE_GMC:
1198 		pp_dpm_powergate_mmhub(handle);
1199 		break;
1200 	case AMD_IP_BLOCK_TYPE_GFX:
1201 		ret = pp_dpm_powergate_gfx(handle, gate);
1202 		break;
1203 	default:
1204 		break;
1205 	}
1206 	return ret;
1207 }
1208 
1209 static int pp_notify_smu_enable_pwe(void *handle)
1210 {
1211 	struct pp_hwmgr *hwmgr = handle;
1212 
1213 	if (!hwmgr || !hwmgr->pm_en)
1214 		return -EINVAL;
1215 
1216 	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1217 		pr_info("%s was not implemented.\n", __func__);
1218 		return -EINVAL;;
1219 	}
1220 
1221 	mutex_lock(&hwmgr->smu_lock);
1222 	hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1223 	mutex_unlock(&hwmgr->smu_lock);
1224 
1225 	return 0;
1226 }
1227 
1228 static const struct amd_pm_funcs pp_dpm_funcs = {
1229 	.load_firmware = pp_dpm_load_fw,
1230 	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1231 	.force_performance_level = pp_dpm_force_performance_level,
1232 	.get_performance_level = pp_dpm_get_performance_level,
1233 	.get_current_power_state = pp_dpm_get_current_power_state,
1234 	.dispatch_tasks = pp_dpm_dispatch_tasks,
1235 	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
1236 	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
1237 	.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
1238 	.get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
1239 	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1240 	.get_pp_num_states = pp_dpm_get_pp_num_states,
1241 	.get_pp_table = pp_dpm_get_pp_table,
1242 	.set_pp_table = pp_dpm_set_pp_table,
1243 	.force_clock_level = pp_dpm_force_clock_level,
1244 	.print_clock_levels = pp_dpm_print_clock_levels,
1245 	.get_sclk_od = pp_dpm_get_sclk_od,
1246 	.set_sclk_od = pp_dpm_set_sclk_od,
1247 	.get_mclk_od = pp_dpm_get_mclk_od,
1248 	.set_mclk_od = pp_dpm_set_mclk_od,
1249 	.read_sensor = pp_dpm_read_sensor,
1250 	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
1251 	.switch_power_profile = pp_dpm_switch_power_profile,
1252 	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1253 	.set_powergating_by_smu = pp_set_powergating_by_smu,
1254 	.get_power_profile_mode = pp_get_power_profile_mode,
1255 	.set_power_profile_mode = pp_set_power_profile_mode,
1256 	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1257 	.set_power_limit = pp_set_power_limit,
1258 	.get_power_limit = pp_get_power_limit,
1259 /* export to DC */
1260 	.get_sclk = pp_dpm_get_sclk,
1261 	.get_mclk = pp_dpm_get_mclk,
1262 	.display_configuration_change = pp_display_configuration_change,
1263 	.get_display_power_level = pp_get_display_power_level,
1264 	.get_current_clocks = pp_get_current_clocks,
1265 	.get_clock_by_type = pp_get_clock_by_type,
1266 	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1267 	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1268 	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1269 	.display_clock_voltage_request = pp_display_clock_voltage_request,
1270 	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1271 	.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1272 };
1273