1 /*	$NetBSD: amdgpu_hardwaremanager.c,v 1.2 2021/12/18 23:45:26 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2015 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_hardwaremanager.c,v 1.2 2021/12/18 23:45:26 riastradh Exp $");
27 
28 #include "pp_debug.h"
29 #include <linux/errno.h>
30 #include "hwmgr.h"
31 #include "hardwaremanager.h"
32 #include "power_state.h"
33 
34 
35 #define TEMP_RANGE_MIN (0)
36 #define TEMP_RANGE_MAX (80 * 1000)
37 
38 #define PHM_FUNC_CHECK(hw) \
39 	do {							\
40 		if ((hw) == NULL || (hw)->hwmgr_func == NULL)	\
41 			return -EINVAL;				\
42 	} while (0)
43 
phm_setup_asic(struct pp_hwmgr * hwmgr)44 int phm_setup_asic(struct pp_hwmgr *hwmgr)
45 {
46 	PHM_FUNC_CHECK(hwmgr);
47 
48 	if (NULL != hwmgr->hwmgr_func->asic_setup)
49 		return hwmgr->hwmgr_func->asic_setup(hwmgr);
50 
51 	return 0;
52 }
53 
phm_power_down_asic(struct pp_hwmgr * hwmgr)54 int phm_power_down_asic(struct pp_hwmgr *hwmgr)
55 {
56 	PHM_FUNC_CHECK(hwmgr);
57 
58 	if (NULL != hwmgr->hwmgr_func->power_off_asic)
59 		return hwmgr->hwmgr_func->power_off_asic(hwmgr);
60 
61 	return 0;
62 }
63 
phm_set_power_state(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * pcurrent_state,const struct pp_hw_power_state * pnew_power_state)64 int phm_set_power_state(struct pp_hwmgr *hwmgr,
65 		    const struct pp_hw_power_state *pcurrent_state,
66 		    const struct pp_hw_power_state *pnew_power_state)
67 {
68 	struct phm_set_power_state_input states;
69 
70 	PHM_FUNC_CHECK(hwmgr);
71 
72 	states.pcurrent_state = pcurrent_state;
73 	states.pnew_state = pnew_power_state;
74 
75 	if (NULL != hwmgr->hwmgr_func->power_state_set)
76 		return hwmgr->hwmgr_func->power_state_set(hwmgr, &states);
77 
78 	return 0;
79 }
80 
phm_enable_dynamic_state_management(struct pp_hwmgr * hwmgr)81 int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
82 {
83 	struct amdgpu_device *adev = NULL;
84 	int ret = -EINVAL;
85 	PHM_FUNC_CHECK(hwmgr);
86 	adev = hwmgr->adev;
87 
88 	/* Skip for suspend/resume case */
89 	if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr)
90 	    && !amdgpu_passthrough(adev) && adev->in_suspend) {
91 		pr_info("dpm has been enabled\n");
92 		return 0;
93 	}
94 
95 	if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
96 		ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
97 
98 	return ret;
99 }
100 
phm_disable_dynamic_state_management(struct pp_hwmgr * hwmgr)101 int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
102 {
103 	int ret = -EINVAL;
104 
105 	PHM_FUNC_CHECK(hwmgr);
106 
107 	if (!hwmgr->not_vf)
108 		return 0;
109 
110 	if (!smum_is_dpm_running(hwmgr)) {
111 		pr_info("dpm has been disabled\n");
112 		return 0;
113 	}
114 
115 	if (hwmgr->hwmgr_func->dynamic_state_management_disable)
116 		ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
117 
118 	return ret;
119 }
120 
phm_force_dpm_levels(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level)121 int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
122 {
123 	int ret = 0;
124 
125 	PHM_FUNC_CHECK(hwmgr);
126 
127 	if (hwmgr->hwmgr_func->force_dpm_level != NULL)
128 		ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
129 
130 	return ret;
131 }
132 
phm_apply_state_adjust_rules(struct pp_hwmgr * hwmgr,struct pp_power_state * adjusted_ps,const struct pp_power_state * current_ps)133 int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
134 				   struct pp_power_state *adjusted_ps,
135 			     const struct pp_power_state *current_ps)
136 {
137 	PHM_FUNC_CHECK(hwmgr);
138 
139 	if (hwmgr->hwmgr_func->apply_state_adjust_rules != NULL)
140 		return hwmgr->hwmgr_func->apply_state_adjust_rules(
141 									hwmgr,
142 								 adjusted_ps,
143 								 current_ps);
144 	return 0;
145 }
146 
phm_apply_clock_adjust_rules(struct pp_hwmgr * hwmgr)147 int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr)
148 {
149 	PHM_FUNC_CHECK(hwmgr);
150 
151 	if (hwmgr->hwmgr_func->apply_clocks_adjust_rules != NULL)
152 		return hwmgr->hwmgr_func->apply_clocks_adjust_rules(hwmgr);
153 	return 0;
154 }
155 
phm_powerdown_uvd(struct pp_hwmgr * hwmgr)156 int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
157 {
158 	PHM_FUNC_CHECK(hwmgr);
159 
160 	if (hwmgr->hwmgr_func->powerdown_uvd != NULL)
161 		return hwmgr->hwmgr_func->powerdown_uvd(hwmgr);
162 	return 0;
163 }
164 
165 
phm_disable_clock_power_gatings(struct pp_hwmgr * hwmgr)166 int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
167 {
168 	PHM_FUNC_CHECK(hwmgr);
169 
170 	if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating)
171 		return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr);
172 
173 	return 0;
174 }
175 
phm_pre_display_configuration_changed(struct pp_hwmgr * hwmgr)176 int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr)
177 {
178 	PHM_FUNC_CHECK(hwmgr);
179 
180 	if (NULL != hwmgr->hwmgr_func->pre_display_config_changed)
181 		hwmgr->hwmgr_func->pre_display_config_changed(hwmgr);
182 
183 	return 0;
184 
185 }
186 
phm_display_configuration_changed(struct pp_hwmgr * hwmgr)187 int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
188 {
189 	PHM_FUNC_CHECK(hwmgr);
190 
191 	if (NULL != hwmgr->hwmgr_func->display_config_changed)
192 		hwmgr->hwmgr_func->display_config_changed(hwmgr);
193 
194 	return 0;
195 }
196 
phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr * hwmgr)197 int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
198 {
199 	PHM_FUNC_CHECK(hwmgr);
200 
201 	if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment)
202 			hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment(hwmgr);
203 
204 	return 0;
205 }
206 
phm_stop_thermal_controller(struct pp_hwmgr * hwmgr)207 int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr)
208 {
209 	PHM_FUNC_CHECK(hwmgr);
210 
211 	if (!hwmgr->not_vf)
212 		return 0;
213 
214 	if (hwmgr->hwmgr_func->stop_thermal_controller == NULL)
215 		return -EINVAL;
216 
217 	return hwmgr->hwmgr_func->stop_thermal_controller(hwmgr);
218 }
219 
phm_register_irq_handlers(struct pp_hwmgr * hwmgr)220 int phm_register_irq_handlers(struct pp_hwmgr *hwmgr)
221 {
222 	PHM_FUNC_CHECK(hwmgr);
223 
224 	if (hwmgr->hwmgr_func->register_irq_handlers != NULL)
225 		return hwmgr->hwmgr_func->register_irq_handlers(hwmgr);
226 
227 	return 0;
228 }
229 
230 /**
231 * Initializes the thermal controller subsystem.
232 *
233 * @param    pHwMgr  the address of the powerplay hardware manager.
234 * @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the dispatcher.
235 */
phm_start_thermal_controller(struct pp_hwmgr * hwmgr)236 int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
237 {
238 	int ret = 0;
239 	struct PP_TemperatureRange range = {
240 		TEMP_RANGE_MIN,
241 		TEMP_RANGE_MAX,
242 		TEMP_RANGE_MAX,
243 		TEMP_RANGE_MIN,
244 		TEMP_RANGE_MAX,
245 		TEMP_RANGE_MAX,
246 		TEMP_RANGE_MIN,
247 		TEMP_RANGE_MAX,
248 		TEMP_RANGE_MAX};
249 	struct amdgpu_device *adev = hwmgr->adev;
250 
251 	if (!hwmgr->not_vf)
252 		return 0;
253 
254 	if (hwmgr->hwmgr_func->get_thermal_temperature_range)
255 		hwmgr->hwmgr_func->get_thermal_temperature_range(
256 				hwmgr, &range);
257 
258 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
259 			PHM_PlatformCaps_ThermalController)
260 			&& hwmgr->hwmgr_func->start_thermal_controller != NULL)
261 		ret = hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range);
262 
263 	adev->pm.dpm.thermal.min_temp = range.min;
264 	adev->pm.dpm.thermal.max_temp = range.max;
265 	adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
266 	adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
267 	adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
268 	adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
269 	adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
270 	adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
271 	adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
272 
273 	return ret;
274 }
275 
276 
phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr * hwmgr)277 bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
278 {
279 	PHM_FUNC_CHECK(hwmgr);
280 	if (hwmgr->pp_one_vf)
281 		return false;
282 
283 	if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
284 		return false;
285 
286 	return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
287 }
288 
289 
phm_check_states_equal(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * pstate1,const struct pp_hw_power_state * pstate2,bool * equal)290 int phm_check_states_equal(struct pp_hwmgr *hwmgr,
291 				 const struct pp_hw_power_state *pstate1,
292 				 const struct pp_hw_power_state *pstate2,
293 				 bool *equal)
294 {
295 	PHM_FUNC_CHECK(hwmgr);
296 
297 	if (hwmgr->hwmgr_func->check_states_equal == NULL)
298 		return -EINVAL;
299 
300 	return hwmgr->hwmgr_func->check_states_equal(hwmgr, pstate1, pstate2, equal);
301 }
302 
phm_store_dal_configuration_data(struct pp_hwmgr * hwmgr,const struct amd_pp_display_configuration * display_config)303 int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
304 		    const struct amd_pp_display_configuration *display_config)
305 {
306 	int index = 0;
307 	int number_of_active_display = 0;
308 
309 	PHM_FUNC_CHECK(hwmgr);
310 
311 	if (display_config == NULL)
312 		return -EINVAL;
313 
314 	if (NULL != hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
315 		hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
316 
317 	for (index = 0; index < display_config->num_path_including_non_display; index++) {
318 		if (display_config->displays[index].controller_id != 0)
319 			number_of_active_display++;
320 	}
321 
322 	if (NULL != hwmgr->hwmgr_func->set_active_display_count)
323 		hwmgr->hwmgr_func->set_active_display_count(hwmgr, number_of_active_display);
324 
325 	if (hwmgr->hwmgr_func->store_cc6_data == NULL)
326 		return -EINVAL;
327 
328 	/* TODO: pass other display configuration in the future */
329 
330 	if (hwmgr->hwmgr_func->store_cc6_data)
331 		hwmgr->hwmgr_func->store_cc6_data(hwmgr,
332 				display_config->cpu_pstate_separation_time,
333 				display_config->cpu_cc6_disable,
334 				display_config->cpu_pstate_disable,
335 				display_config->nb_pstate_switch_disable);
336 
337 	return 0;
338 }
339 
phm_get_dal_power_level(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * info)340 int phm_get_dal_power_level(struct pp_hwmgr *hwmgr,
341 		struct amd_pp_simple_clock_info *info)
342 {
343 	PHM_FUNC_CHECK(hwmgr);
344 
345 	if (info == NULL || hwmgr->hwmgr_func->get_dal_power_level == NULL)
346 		return -EINVAL;
347 	return hwmgr->hwmgr_func->get_dal_power_level(hwmgr, info);
348 }
349 
phm_set_cpu_power_state(struct pp_hwmgr * hwmgr)350 int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr)
351 {
352 	PHM_FUNC_CHECK(hwmgr);
353 
354 	if (hwmgr->hwmgr_func->set_cpu_power_state != NULL)
355 		return hwmgr->hwmgr_func->set_cpu_power_state(hwmgr);
356 
357 	return 0;
358 }
359 
360 
phm_get_performance_level(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,PHM_PerformanceLevelDesignation designation,uint32_t index,PHM_PerformanceLevel * level)361 int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
362 				PHM_PerformanceLevelDesignation designation, uint32_t index,
363 				PHM_PerformanceLevel *level)
364 {
365 	PHM_FUNC_CHECK(hwmgr);
366 	if (hwmgr->hwmgr_func->get_performance_level == NULL)
367 		return -EINVAL;
368 
369 	return hwmgr->hwmgr_func->get_performance_level(hwmgr, state, designation, index, level);
370 
371 
372 }
373 
374 
375 /**
376 * Gets Clock Info.
377 *
378 * @param    pHwMgr  the address of the powerplay hardware manager.
379 * @param    pPowerState the address of the Power State structure.
380 * @param    pClockInfo the address of PP_ClockInfo structure where the result will be returned.
381 * @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the back-end.
382 */
phm_get_clock_info(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,struct pp_clock_info * pclock_info,PHM_PerformanceLevelDesignation designation)383 int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *pclock_info,
384 			PHM_PerformanceLevelDesignation designation)
385 {
386 	int result;
387 	PHM_PerformanceLevel performance_level = {0};
388 
389 	PHM_FUNC_CHECK(hwmgr);
390 
391 	PP_ASSERT_WITH_CODE((NULL != state), "Invalid Input!", return -EINVAL);
392 	PP_ASSERT_WITH_CODE((NULL != pclock_info), "Invalid Input!", return -EINVAL);
393 
394 	result = phm_get_performance_level(hwmgr, state, PHM_PerformanceLevelDesignation_Activity, 0, &performance_level);
395 
396 	PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve minimum clocks.", return result);
397 
398 
399 	pclock_info->min_mem_clk = performance_level.memory_clock;
400 	pclock_info->min_eng_clk = performance_level.coreClock;
401 	pclock_info->min_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth;
402 
403 
404 	result = phm_get_performance_level(hwmgr, state, designation,
405 					(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1), &performance_level);
406 
407 	PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve maximum clocks.", return result);
408 
409 	pclock_info->max_mem_clk = performance_level.memory_clock;
410 	pclock_info->max_eng_clk = performance_level.coreClock;
411 	pclock_info->max_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth;
412 
413 	return 0;
414 }
415 
phm_get_current_shallow_sleep_clocks(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,struct pp_clock_info * clock_info)416 int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
417 {
418 	PHM_FUNC_CHECK(hwmgr);
419 
420 	if (hwmgr->hwmgr_func->get_current_shallow_sleep_clocks == NULL)
421 		return -EINVAL;
422 
423 	return hwmgr->hwmgr_func->get_current_shallow_sleep_clocks(hwmgr, state, clock_info);
424 
425 }
426 
phm_get_clock_by_type(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)427 int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
428 {
429 	PHM_FUNC_CHECK(hwmgr);
430 
431 	if (hwmgr->hwmgr_func->get_clock_by_type == NULL)
432 		return -EINVAL;
433 
434 	return hwmgr->hwmgr_func->get_clock_by_type(hwmgr, type, clocks);
435 
436 }
437 
phm_get_clock_by_type_with_latency(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)438 int phm_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
439 		enum amd_pp_clock_type type,
440 		struct pp_clock_levels_with_latency *clocks)
441 {
442 	PHM_FUNC_CHECK(hwmgr);
443 
444 	if (hwmgr->hwmgr_func->get_clock_by_type_with_latency == NULL)
445 		return -EINVAL;
446 
447 	return hwmgr->hwmgr_func->get_clock_by_type_with_latency(hwmgr, type, clocks);
448 
449 }
450 
phm_get_clock_by_type_with_voltage(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)451 int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
452 		enum amd_pp_clock_type type,
453 		struct pp_clock_levels_with_voltage *clocks)
454 {
455 	PHM_FUNC_CHECK(hwmgr);
456 
457 	if (hwmgr->hwmgr_func->get_clock_by_type_with_voltage == NULL)
458 		return -EINVAL;
459 
460 	return hwmgr->hwmgr_func->get_clock_by_type_with_voltage(hwmgr, type, clocks);
461 
462 }
463 
phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr * hwmgr,void * clock_ranges)464 int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
465 					void *clock_ranges)
466 {
467 	PHM_FUNC_CHECK(hwmgr);
468 
469 	if (!hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges)
470 		return -EINVAL;
471 
472 	return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr,
473 								clock_ranges);
474 }
475 
phm_display_clock_voltage_request(struct pp_hwmgr * hwmgr,struct pp_display_clock_request * clock)476 int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
477 		struct pp_display_clock_request *clock)
478 {
479 	PHM_FUNC_CHECK(hwmgr);
480 
481 	if (!hwmgr->hwmgr_func->display_clock_voltage_request)
482 		return -EINVAL;
483 
484 	return hwmgr->hwmgr_func->display_clock_voltage_request(hwmgr, clock);
485 }
486 
phm_get_max_high_clocks(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * clocks)487 int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
488 {
489 	PHM_FUNC_CHECK(hwmgr);
490 
491 	if (hwmgr->hwmgr_func->get_max_high_clocks == NULL)
492 		return -EINVAL;
493 
494 	return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks);
495 }
496 
phm_disable_smc_firmware_ctf(struct pp_hwmgr * hwmgr)497 int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
498 {
499 	PHM_FUNC_CHECK(hwmgr);
500 
501 	if (!hwmgr->not_vf)
502 		return 0;
503 
504 	if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL)
505 		return -EINVAL;
506 
507 	return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr);
508 }
509 
phm_set_active_display_count(struct pp_hwmgr * hwmgr,uint32_t count)510 int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
511 {
512 	PHM_FUNC_CHECK(hwmgr);
513 
514 	if (!hwmgr->hwmgr_func->set_active_display_count)
515 		return -EINVAL;
516 
517 	return hwmgr->hwmgr_func->set_active_display_count(hwmgr, count);
518 }
519 
phm_set_min_deep_sleep_dcefclk(struct pp_hwmgr * hwmgr,uint32_t clock)520 int phm_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
521 {
522 	PHM_FUNC_CHECK(hwmgr);
523 
524 	if (!hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
525 		return -EINVAL;
526 
527 	return hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
528 }
529 
phm_set_hard_min_dcefclk_by_freq(struct pp_hwmgr * hwmgr,uint32_t clock)530 int phm_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
531 {
532 	PHM_FUNC_CHECK(hwmgr);
533 
534 	if (!hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq)
535 		return -EINVAL;
536 
537 	return hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
538 }
539 
phm_set_hard_min_fclk_by_freq(struct pp_hwmgr * hwmgr,uint32_t clock)540 int phm_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
541 {
542 	PHM_FUNC_CHECK(hwmgr);
543 
544 	if (!hwmgr->hwmgr_func->set_hard_min_fclk_by_freq)
545 		return -EINVAL;
546 
547 	return hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
548 }
549 
550