1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "pp_debug.h"
24 #include <linux/errno.h>
25 #include "hwmgr.h"
26 #include "hardwaremanager.h"
27 #include "power_state.h"
28
29
30 #define TEMP_RANGE_MIN (0)
31 #define TEMP_RANGE_MAX (80 * 1000)
32
33 #define PHM_FUNC_CHECK(hw) \
34 do { \
35 if ((hw) == NULL || (hw)->hwmgr_func == NULL) \
36 return -EINVAL; \
37 } while (0)
38
phm_setup_asic(struct pp_hwmgr * hwmgr)39 int phm_setup_asic(struct pp_hwmgr *hwmgr)
40 {
41 PHM_FUNC_CHECK(hwmgr);
42
43 if (NULL != hwmgr->hwmgr_func->asic_setup)
44 return hwmgr->hwmgr_func->asic_setup(hwmgr);
45
46 return 0;
47 }
48
phm_power_down_asic(struct pp_hwmgr * hwmgr)49 int phm_power_down_asic(struct pp_hwmgr *hwmgr)
50 {
51 PHM_FUNC_CHECK(hwmgr);
52
53 if (NULL != hwmgr->hwmgr_func->power_off_asic)
54 return hwmgr->hwmgr_func->power_off_asic(hwmgr);
55
56 return 0;
57 }
58
phm_set_power_state(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * pcurrent_state,const struct pp_hw_power_state * pnew_power_state)59 int phm_set_power_state(struct pp_hwmgr *hwmgr,
60 const struct pp_hw_power_state *pcurrent_state,
61 const struct pp_hw_power_state *pnew_power_state)
62 {
63 struct phm_set_power_state_input states;
64
65 PHM_FUNC_CHECK(hwmgr);
66
67 states.pcurrent_state = pcurrent_state;
68 states.pnew_state = pnew_power_state;
69
70 if (NULL != hwmgr->hwmgr_func->power_state_set)
71 return hwmgr->hwmgr_func->power_state_set(hwmgr, &states);
72
73 return 0;
74 }
75
phm_enable_dynamic_state_management(struct pp_hwmgr * hwmgr)76 int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
77 {
78 struct amdgpu_device *adev = NULL;
79 int ret = -EINVAL;
80 PHM_FUNC_CHECK(hwmgr);
81 adev = hwmgr->adev;
82
83 /* Skip for suspend/resume case */
84 if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr)
85 && !amdgpu_passthrough(adev) && adev->in_suspend
86 && adev->asic_type != CHIP_RAVEN) {
87 pr_info("dpm has been enabled\n");
88 return 0;
89 }
90
91 if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
92 ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
93
94 return ret;
95 }
96
phm_disable_dynamic_state_management(struct pp_hwmgr * hwmgr)97 int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
98 {
99 int ret = -EINVAL;
100
101 PHM_FUNC_CHECK(hwmgr);
102
103 if (!hwmgr->not_vf)
104 return 0;
105
106 if (!smum_is_dpm_running(hwmgr)) {
107 pr_info("dpm has been disabled\n");
108 return 0;
109 }
110
111 if (hwmgr->hwmgr_func->dynamic_state_management_disable)
112 ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
113
114 return ret;
115 }
116
phm_force_dpm_levels(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level)117 int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
118 {
119 int ret = 0;
120
121 PHM_FUNC_CHECK(hwmgr);
122
123 if (hwmgr->hwmgr_func->force_dpm_level != NULL)
124 ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
125
126 return ret;
127 }
128
phm_apply_state_adjust_rules(struct pp_hwmgr * hwmgr,struct pp_power_state * adjusted_ps,const struct pp_power_state * current_ps)129 int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
130 struct pp_power_state *adjusted_ps,
131 const struct pp_power_state *current_ps)
132 {
133 PHM_FUNC_CHECK(hwmgr);
134
135 if (hwmgr->hwmgr_func->apply_state_adjust_rules != NULL)
136 return hwmgr->hwmgr_func->apply_state_adjust_rules(
137 hwmgr,
138 adjusted_ps,
139 current_ps);
140 return 0;
141 }
142
phm_apply_clock_adjust_rules(struct pp_hwmgr * hwmgr)143 int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr)
144 {
145 PHM_FUNC_CHECK(hwmgr);
146
147 if (hwmgr->hwmgr_func->apply_clocks_adjust_rules != NULL)
148 return hwmgr->hwmgr_func->apply_clocks_adjust_rules(hwmgr);
149 return 0;
150 }
151
phm_powerdown_uvd(struct pp_hwmgr * hwmgr)152 int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
153 {
154 PHM_FUNC_CHECK(hwmgr);
155
156 if (hwmgr->hwmgr_func->powerdown_uvd != NULL)
157 return hwmgr->hwmgr_func->powerdown_uvd(hwmgr);
158 return 0;
159 }
160
161
phm_disable_clock_power_gatings(struct pp_hwmgr * hwmgr)162 int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
163 {
164 PHM_FUNC_CHECK(hwmgr);
165
166 if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating)
167 return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr);
168
169 return 0;
170 }
171
phm_pre_display_configuration_changed(struct pp_hwmgr * hwmgr)172 int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr)
173 {
174 PHM_FUNC_CHECK(hwmgr);
175
176 if (NULL != hwmgr->hwmgr_func->pre_display_config_changed)
177 hwmgr->hwmgr_func->pre_display_config_changed(hwmgr);
178
179 return 0;
180
181 }
182
phm_display_configuration_changed(struct pp_hwmgr * hwmgr)183 int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
184 {
185 PHM_FUNC_CHECK(hwmgr);
186
187 if (NULL != hwmgr->hwmgr_func->display_config_changed)
188 hwmgr->hwmgr_func->display_config_changed(hwmgr);
189
190 return 0;
191 }
192
phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr * hwmgr)193 int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
194 {
195 PHM_FUNC_CHECK(hwmgr);
196
197 if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment)
198 hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment(hwmgr);
199
200 return 0;
201 }
202
phm_stop_thermal_controller(struct pp_hwmgr * hwmgr)203 int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr)
204 {
205 PHM_FUNC_CHECK(hwmgr);
206
207 if (!hwmgr->not_vf)
208 return 0;
209
210 if (hwmgr->hwmgr_func->stop_thermal_controller == NULL)
211 return -EINVAL;
212
213 return hwmgr->hwmgr_func->stop_thermal_controller(hwmgr);
214 }
215
phm_register_irq_handlers(struct pp_hwmgr * hwmgr)216 int phm_register_irq_handlers(struct pp_hwmgr *hwmgr)
217 {
218 PHM_FUNC_CHECK(hwmgr);
219
220 if (hwmgr->hwmgr_func->register_irq_handlers != NULL)
221 return hwmgr->hwmgr_func->register_irq_handlers(hwmgr);
222
223 return 0;
224 }
225
226 /**
227 * phm_start_thermal_controller - Initializes the thermal controller subsystem.
228 *
229 * @hwmgr: the address of the powerplay hardware manager.
230 * Exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the dispatcher.
231 */
phm_start_thermal_controller(struct pp_hwmgr * hwmgr)232 int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
233 {
234 int ret = 0;
235 struct PP_TemperatureRange range = {
236 TEMP_RANGE_MIN,
237 TEMP_RANGE_MAX,
238 TEMP_RANGE_MAX,
239 TEMP_RANGE_MIN,
240 TEMP_RANGE_MAX,
241 TEMP_RANGE_MAX,
242 TEMP_RANGE_MIN,
243 TEMP_RANGE_MAX,
244 TEMP_RANGE_MAX,
245 0};
246 struct amdgpu_device *adev = hwmgr->adev;
247
248 if (!hwmgr->not_vf)
249 return 0;
250
251 if (hwmgr->hwmgr_func->get_thermal_temperature_range)
252 hwmgr->hwmgr_func->get_thermal_temperature_range(
253 hwmgr, &range);
254
255 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
256 PHM_PlatformCaps_ThermalController)
257 && hwmgr->hwmgr_func->start_thermal_controller != NULL)
258 ret = hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range);
259
260 adev->pm.dpm.thermal.min_temp = range.min;
261 adev->pm.dpm.thermal.max_temp = range.max;
262 adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
263 adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
264 adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
265 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
266 adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
267 adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
268 adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
269 adev->pm.dpm.thermal.sw_ctf_threshold = range.sw_ctf_threshold;
270
271 return ret;
272 }
273
274
phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr * hwmgr)275 bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
276 {
277 if (hwmgr == NULL ||
278 hwmgr->hwmgr_func == NULL)
279 return false;
280
281 if (hwmgr->pp_one_vf)
282 return false;
283
284 if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
285 return false;
286
287 return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
288 }
289
290
phm_check_states_equal(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * pstate1,const struct pp_hw_power_state * pstate2,bool * equal)291 int phm_check_states_equal(struct pp_hwmgr *hwmgr,
292 const struct pp_hw_power_state *pstate1,
293 const struct pp_hw_power_state *pstate2,
294 bool *equal)
295 {
296 PHM_FUNC_CHECK(hwmgr);
297
298 if (hwmgr->hwmgr_func->check_states_equal == NULL)
299 return -EINVAL;
300
301 return hwmgr->hwmgr_func->check_states_equal(hwmgr, pstate1, pstate2, equal);
302 }
303
phm_store_dal_configuration_data(struct pp_hwmgr * hwmgr,const struct amd_pp_display_configuration * display_config)304 int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
305 const struct amd_pp_display_configuration *display_config)
306 {
307 int index = 0;
308 int number_of_active_display = 0;
309
310 PHM_FUNC_CHECK(hwmgr);
311
312 if (display_config == NULL)
313 return -EINVAL;
314
315 if (NULL != hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
316 hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
317
318 for (index = 0; index < display_config->num_path_including_non_display; index++) {
319 if (display_config->displays[index].controller_id != 0)
320 number_of_active_display++;
321 }
322
323 if (NULL != hwmgr->hwmgr_func->set_active_display_count)
324 hwmgr->hwmgr_func->set_active_display_count(hwmgr, number_of_active_display);
325
326 if (hwmgr->hwmgr_func->store_cc6_data == NULL)
327 return -EINVAL;
328
329 /* TODO: pass other display configuration in the future */
330
331 if (hwmgr->hwmgr_func->store_cc6_data)
332 hwmgr->hwmgr_func->store_cc6_data(hwmgr,
333 display_config->cpu_pstate_separation_time,
334 display_config->cpu_cc6_disable,
335 display_config->cpu_pstate_disable,
336 display_config->nb_pstate_switch_disable);
337
338 return 0;
339 }
340
phm_get_dal_power_level(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * info)341 int phm_get_dal_power_level(struct pp_hwmgr *hwmgr,
342 struct amd_pp_simple_clock_info *info)
343 {
344 PHM_FUNC_CHECK(hwmgr);
345
346 if (info == NULL || hwmgr->hwmgr_func->get_dal_power_level == NULL)
347 return -EINVAL;
348 return hwmgr->hwmgr_func->get_dal_power_level(hwmgr, info);
349 }
350
phm_set_cpu_power_state(struct pp_hwmgr * hwmgr)351 int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr)
352 {
353 PHM_FUNC_CHECK(hwmgr);
354
355 if (hwmgr->hwmgr_func->set_cpu_power_state != NULL)
356 return hwmgr->hwmgr_func->set_cpu_power_state(hwmgr);
357
358 return 0;
359 }
360
361
phm_get_performance_level(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,PHM_PerformanceLevelDesignation designation,uint32_t index,PHM_PerformanceLevel * level)362 int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
363 PHM_PerformanceLevelDesignation designation, uint32_t index,
364 PHM_PerformanceLevel *level)
365 {
366 PHM_FUNC_CHECK(hwmgr);
367 if (hwmgr->hwmgr_func->get_performance_level == NULL)
368 return -EINVAL;
369
370 return hwmgr->hwmgr_func->get_performance_level(hwmgr, state, designation, index, level);
371
372
373 }
374
375
376 /**
377 * phm_get_clock_info
378 *
379 * @hwmgr: the address of the powerplay hardware manager.
380 * @state: the address of the Power State structure.
381 * @pclock_info: the address of PP_ClockInfo structure where the result will be returned.
382 * @designation: PHM performance level designation
383 * Exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the back-end.
384 */
phm_get_clock_info(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,struct pp_clock_info * pclock_info,PHM_PerformanceLevelDesignation designation)385 int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *pclock_info,
386 PHM_PerformanceLevelDesignation designation)
387 {
388 int result;
389 PHM_PerformanceLevel performance_level = {0};
390
391 PHM_FUNC_CHECK(hwmgr);
392
393 PP_ASSERT_WITH_CODE((NULL != state), "Invalid Input!", return -EINVAL);
394 PP_ASSERT_WITH_CODE((NULL != pclock_info), "Invalid Input!", return -EINVAL);
395
396 result = phm_get_performance_level(hwmgr, state, PHM_PerformanceLevelDesignation_Activity, 0, &performance_level);
397
398 PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve minimum clocks.", return result);
399
400
401 pclock_info->min_mem_clk = performance_level.memory_clock;
402 pclock_info->min_eng_clk = performance_level.coreClock;
403 pclock_info->min_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth;
404
405
406 result = phm_get_performance_level(hwmgr, state, designation,
407 (hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1), &performance_level);
408
409 PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve maximum clocks.", return result);
410
411 pclock_info->max_mem_clk = performance_level.memory_clock;
412 pclock_info->max_eng_clk = performance_level.coreClock;
413 pclock_info->max_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth;
414
415 return 0;
416 }
417
phm_get_current_shallow_sleep_clocks(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,struct pp_clock_info * clock_info)418 int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
419 {
420 PHM_FUNC_CHECK(hwmgr);
421
422 if (hwmgr->hwmgr_func->get_current_shallow_sleep_clocks == NULL)
423 return -EINVAL;
424
425 return hwmgr->hwmgr_func->get_current_shallow_sleep_clocks(hwmgr, state, clock_info);
426
427 }
428
phm_get_clock_by_type(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)429 int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
430 {
431 PHM_FUNC_CHECK(hwmgr);
432
433 if (hwmgr->hwmgr_func->get_clock_by_type == NULL)
434 return -EINVAL;
435
436 return hwmgr->hwmgr_func->get_clock_by_type(hwmgr, type, clocks);
437
438 }
439
phm_get_clock_by_type_with_latency(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)440 int phm_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
441 enum amd_pp_clock_type type,
442 struct pp_clock_levels_with_latency *clocks)
443 {
444 PHM_FUNC_CHECK(hwmgr);
445
446 if (hwmgr->hwmgr_func->get_clock_by_type_with_latency == NULL)
447 return -EINVAL;
448
449 return hwmgr->hwmgr_func->get_clock_by_type_with_latency(hwmgr, type, clocks);
450
451 }
452
phm_get_clock_by_type_with_voltage(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)453 int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
454 enum amd_pp_clock_type type,
455 struct pp_clock_levels_with_voltage *clocks)
456 {
457 PHM_FUNC_CHECK(hwmgr);
458
459 if (hwmgr->hwmgr_func->get_clock_by_type_with_voltage == NULL)
460 return -EINVAL;
461
462 return hwmgr->hwmgr_func->get_clock_by_type_with_voltage(hwmgr, type, clocks);
463
464 }
465
phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr * hwmgr,void * clock_ranges)466 int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
467 void *clock_ranges)
468 {
469 PHM_FUNC_CHECK(hwmgr);
470
471 if (!hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges)
472 return -EINVAL;
473
474 return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr,
475 clock_ranges);
476 }
477
phm_display_clock_voltage_request(struct pp_hwmgr * hwmgr,struct pp_display_clock_request * clock)478 int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
479 struct pp_display_clock_request *clock)
480 {
481 PHM_FUNC_CHECK(hwmgr);
482
483 if (!hwmgr->hwmgr_func->display_clock_voltage_request)
484 return -EINVAL;
485
486 return hwmgr->hwmgr_func->display_clock_voltage_request(hwmgr, clock);
487 }
488
phm_get_max_high_clocks(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * clocks)489 int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
490 {
491 PHM_FUNC_CHECK(hwmgr);
492
493 if (hwmgr->hwmgr_func->get_max_high_clocks == NULL)
494 return -EINVAL;
495
496 return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks);
497 }
498
phm_disable_smc_firmware_ctf(struct pp_hwmgr * hwmgr)499 int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
500 {
501 PHM_FUNC_CHECK(hwmgr);
502
503 if (!hwmgr->not_vf)
504 return 0;
505
506 if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL)
507 return -EINVAL;
508
509 return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr);
510 }
511
phm_set_active_display_count(struct pp_hwmgr * hwmgr,uint32_t count)512 int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
513 {
514 PHM_FUNC_CHECK(hwmgr);
515
516 if (!hwmgr->hwmgr_func->set_active_display_count)
517 return -EINVAL;
518
519 return hwmgr->hwmgr_func->set_active_display_count(hwmgr, count);
520 }
521