1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24 #include <linux/string.h>
25 #include <linux/acpi.h>
26 
27 #include <drm/drmP.h>
28 #include <drm/drm_probe_helper.h>
29 #include <drm/amdgpu_drm.h>
30 #include "dm_services.h"
31 #include "amdgpu.h"
32 #include "amdgpu_dm.h"
33 #include "amdgpu_dm_irq.h"
34 #include "amdgpu_pm.h"
35 #include "dm_pp_smu.h"
36 #include "amdgpu_smu.h"
37 
38 
39 bool dm_pp_apply_display_requirements(
40 		const struct dc_context *ctx,
41 		const struct dm_pp_display_configuration *pp_display_cfg)
42 {
43 	struct amdgpu_device *adev = ctx->driver_context;
44 	struct smu_context *smu = &adev->smu;
45 	int i;
46 
47 	if (adev->pm.dpm_enabled) {
48 
49 		memset(&adev->pm.pm_display_cfg, 0,
50 				sizeof(adev->pm.pm_display_cfg));
51 
52 		adev->pm.pm_display_cfg.cpu_cc6_disable =
53 			pp_display_cfg->cpu_cc6_disable;
54 
55 		adev->pm.pm_display_cfg.cpu_pstate_disable =
56 			pp_display_cfg->cpu_pstate_disable;
57 
58 		adev->pm.pm_display_cfg.cpu_pstate_separation_time =
59 			pp_display_cfg->cpu_pstate_separation_time;
60 
61 		adev->pm.pm_display_cfg.nb_pstate_switch_disable =
62 			pp_display_cfg->nb_pstate_switch_disable;
63 
64 		adev->pm.pm_display_cfg.num_display =
65 				pp_display_cfg->display_count;
66 		adev->pm.pm_display_cfg.num_path_including_non_display =
67 				pp_display_cfg->display_count;
68 
69 		adev->pm.pm_display_cfg.min_core_set_clock =
70 				pp_display_cfg->min_engine_clock_khz/10;
71 		adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
72 				pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
73 		adev->pm.pm_display_cfg.min_mem_set_clock =
74 				pp_display_cfg->min_memory_clock_khz/10;
75 
76 		adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
77 				pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
78 		adev->pm.pm_display_cfg.min_dcef_set_clk =
79 				pp_display_cfg->min_dcfclock_khz/10;
80 
81 		adev->pm.pm_display_cfg.multi_monitor_in_sync =
82 				pp_display_cfg->all_displays_in_sync;
83 		adev->pm.pm_display_cfg.min_vblank_time =
84 				pp_display_cfg->avail_mclk_switch_time_us;
85 
86 		adev->pm.pm_display_cfg.display_clk =
87 				pp_display_cfg->disp_clk_khz/10;
88 
89 		adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
90 				pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
91 
92 		adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
93 		adev->pm.pm_display_cfg.line_time_in_us =
94 				pp_display_cfg->line_time_in_us;
95 
96 		adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
97 		adev->pm.pm_display_cfg.crossfire_display_index = -1;
98 		adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
99 
100 		for (i = 0; i < pp_display_cfg->display_count; i++) {
101 			const struct dm_pp_single_disp_config *dc_cfg =
102 						&pp_display_cfg->disp_configs[i];
103 			adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
104 		}
105 
106 		if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change)
107 			adev->powerplay.pp_funcs->display_configuration_change(
108 				adev->powerplay.pp_handle,
109 				&adev->pm.pm_display_cfg);
110 		else
111 			smu_display_configuration_change(smu,
112 							 &adev->pm.pm_display_cfg);
113 
114 		amdgpu_pm_compute_clocks(adev);
115 	}
116 
117 	return true;
118 }
119 
120 static void get_default_clock_levels(
121 		enum dm_pp_clock_type clk_type,
122 		struct dm_pp_clock_levels *clks)
123 {
124 	uint32_t disp_clks_in_khz[6] = {
125 			300000, 400000, 496560, 626090, 685720, 757900 };
126 	uint32_t sclks_in_khz[6] = {
127 			300000, 360000, 423530, 514290, 626090, 720000 };
128 	uint32_t mclks_in_khz[2] = { 333000, 800000 };
129 
130 	switch (clk_type) {
131 	case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
132 		clks->num_levels = 6;
133 		memmove(clks->clocks_in_khz, disp_clks_in_khz,
134 				sizeof(disp_clks_in_khz));
135 		break;
136 	case DM_PP_CLOCK_TYPE_ENGINE_CLK:
137 		clks->num_levels = 6;
138 		memmove(clks->clocks_in_khz, sclks_in_khz,
139 				sizeof(sclks_in_khz));
140 		break;
141 	case DM_PP_CLOCK_TYPE_MEMORY_CLK:
142 		clks->num_levels = 2;
143 		memmove(clks->clocks_in_khz, mclks_in_khz,
144 				sizeof(mclks_in_khz));
145 		break;
146 	default:
147 		clks->num_levels = 0;
148 		break;
149 	}
150 }
151 
152 static enum amd_pp_clock_type dc_to_pp_clock_type(
153 		enum dm_pp_clock_type dm_pp_clk_type)
154 {
155 	enum amd_pp_clock_type amd_pp_clk_type = 0;
156 
157 	switch (dm_pp_clk_type) {
158 	case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
159 		amd_pp_clk_type = amd_pp_disp_clock;
160 		break;
161 	case DM_PP_CLOCK_TYPE_ENGINE_CLK:
162 		amd_pp_clk_type = amd_pp_sys_clock;
163 		break;
164 	case DM_PP_CLOCK_TYPE_MEMORY_CLK:
165 		amd_pp_clk_type = amd_pp_mem_clock;
166 		break;
167 	case DM_PP_CLOCK_TYPE_DCEFCLK:
168 		amd_pp_clk_type  = amd_pp_dcef_clock;
169 		break;
170 	case DM_PP_CLOCK_TYPE_DCFCLK:
171 		amd_pp_clk_type = amd_pp_dcf_clock;
172 		break;
173 	case DM_PP_CLOCK_TYPE_PIXELCLK:
174 		amd_pp_clk_type = amd_pp_pixel_clock;
175 		break;
176 	case DM_PP_CLOCK_TYPE_FCLK:
177 		amd_pp_clk_type = amd_pp_f_clock;
178 		break;
179 	case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
180 		amd_pp_clk_type = amd_pp_phy_clock;
181 		break;
182 	case DM_PP_CLOCK_TYPE_DPPCLK:
183 		amd_pp_clk_type = amd_pp_dpp_clock;
184 		break;
185 	default:
186 		DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
187 				dm_pp_clk_type);
188 		break;
189 	}
190 
191 	return amd_pp_clk_type;
192 }
193 
194 static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
195 			enum PP_DAL_POWERLEVEL max_clocks_state)
196 {
197 	switch (max_clocks_state) {
198 	case PP_DAL_POWERLEVEL_0:
199 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
200 	case PP_DAL_POWERLEVEL_1:
201 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
202 	case PP_DAL_POWERLEVEL_2:
203 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
204 	case PP_DAL_POWERLEVEL_3:
205 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
206 	case PP_DAL_POWERLEVEL_4:
207 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
208 	case PP_DAL_POWERLEVEL_5:
209 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
210 	case PP_DAL_POWERLEVEL_6:
211 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
212 	case PP_DAL_POWERLEVEL_7:
213 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
214 	default:
215 		DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
216 				max_clocks_state);
217 		return DM_PP_CLOCKS_STATE_INVALID;
218 	}
219 }
220 
221 static void pp_to_dc_clock_levels(
222 		const struct amd_pp_clocks *pp_clks,
223 		struct dm_pp_clock_levels *dc_clks,
224 		enum dm_pp_clock_type dc_clk_type)
225 {
226 	uint32_t i;
227 
228 	if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
229 		DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
230 				DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
231 				pp_clks->count,
232 				DM_PP_MAX_CLOCK_LEVELS);
233 
234 		dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
235 	} else
236 		dc_clks->num_levels = pp_clks->count;
237 
238 	DRM_INFO("DM_PPLIB: values for %s clock\n",
239 			DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
240 
241 	for (i = 0; i < dc_clks->num_levels; i++) {
242 		DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
243 		dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
244 	}
245 }
246 
247 static void pp_to_dc_clock_levels_with_latency(
248 		const struct pp_clock_levels_with_latency *pp_clks,
249 		struct dm_pp_clock_levels_with_latency *clk_level_info,
250 		enum dm_pp_clock_type dc_clk_type)
251 {
252 	uint32_t i;
253 
254 	if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
255 		DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
256 				DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
257 				pp_clks->num_levels,
258 				DM_PP_MAX_CLOCK_LEVELS);
259 
260 		clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
261 	} else
262 		clk_level_info->num_levels = pp_clks->num_levels;
263 
264 	DRM_DEBUG("DM_PPLIB: values for %s clock\n",
265 			DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
266 
267 	for (i = 0; i < clk_level_info->num_levels; i++) {
268 		DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
269 		clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
270 		clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
271 	}
272 }
273 
274 static void pp_to_dc_clock_levels_with_voltage(
275 		const struct pp_clock_levels_with_voltage *pp_clks,
276 		struct dm_pp_clock_levels_with_voltage *clk_level_info,
277 		enum dm_pp_clock_type dc_clk_type)
278 {
279 	uint32_t i;
280 
281 	if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
282 		DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
283 				DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
284 				pp_clks->num_levels,
285 				DM_PP_MAX_CLOCK_LEVELS);
286 
287 		clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
288 	} else
289 		clk_level_info->num_levels = pp_clks->num_levels;
290 
291 	DRM_INFO("DM_PPLIB: values for %s clock\n",
292 			DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
293 
294 	for (i = 0; i < clk_level_info->num_levels; i++) {
295 		DRM_INFO("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
296 		clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
297 		clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
298 	}
299 }
300 
301 bool dm_pp_get_clock_levels_by_type(
302 		const struct dc_context *ctx,
303 		enum dm_pp_clock_type clk_type,
304 		struct dm_pp_clock_levels *dc_clks)
305 {
306 	struct amdgpu_device *adev = ctx->driver_context;
307 	void *pp_handle = adev->powerplay.pp_handle;
308 	struct amd_pp_clocks pp_clks = { 0 };
309 	struct amd_pp_simple_clock_info validation_clks = { 0 };
310 	uint32_t i;
311 
312 	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
313 		if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
314 			dc_to_pp_clock_type(clk_type), &pp_clks)) {
315 		/* Error in pplib. Provide default values. */
316 			return true;
317 		}
318 	} else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
319 		if (smu_get_clock_by_type(&adev->smu,
320 					  dc_to_pp_clock_type(clk_type),
321 					  &pp_clks)) {
322 			get_default_clock_levels(clk_type, dc_clks);
323 			return true;
324 		}
325 	}
326 
327 	pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
328 
329 	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
330 		if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
331 						pp_handle, &validation_clks)) {
332 			/* Error in pplib. Provide default values. */
333 			DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
334 			validation_clks.engine_max_clock = 72000;
335 			validation_clks.memory_max_clock = 80000;
336 			validation_clks.level = 0;
337 		}
338 	} else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) {
339 		if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
340 			DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
341 			validation_clks.engine_max_clock = 72000;
342 			validation_clks.memory_max_clock = 80000;
343 			validation_clks.level = 0;
344 		}
345 	}
346 
347 	DRM_INFO("DM_PPLIB: Validation clocks:\n");
348 	DRM_INFO("DM_PPLIB:    engine_max_clock: %d\n",
349 			validation_clks.engine_max_clock);
350 	DRM_INFO("DM_PPLIB:    memory_max_clock: %d\n",
351 			validation_clks.memory_max_clock);
352 	DRM_INFO("DM_PPLIB:    level           : %d\n",
353 			validation_clks.level);
354 
355 	/* Translate 10 kHz to kHz. */
356 	validation_clks.engine_max_clock *= 10;
357 	validation_clks.memory_max_clock *= 10;
358 
359 	/* Determine the highest non-boosted level from the Validation Clocks */
360 	if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
361 		for (i = 0; i < dc_clks->num_levels; i++) {
362 			if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
363 				/* This clock is higher the validation clock.
364 				 * Than means the previous one is the highest
365 				 * non-boosted one. */
366 				DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
367 						dc_clks->num_levels, i);
368 				dc_clks->num_levels = i > 0 ? i : 1;
369 				break;
370 			}
371 		}
372 	} else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
373 		for (i = 0; i < dc_clks->num_levels; i++) {
374 			if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
375 				DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
376 						dc_clks->num_levels, i);
377 				dc_clks->num_levels = i > 0 ? i : 1;
378 				break;
379 			}
380 		}
381 	}
382 
383 	return true;
384 }
385 
386 bool dm_pp_get_clock_levels_by_type_with_latency(
387 	const struct dc_context *ctx,
388 	enum dm_pp_clock_type clk_type,
389 	struct dm_pp_clock_levels_with_latency *clk_level_info)
390 {
391 	struct amdgpu_device *adev = ctx->driver_context;
392 	void *pp_handle = adev->powerplay.pp_handle;
393 	struct pp_clock_levels_with_latency pp_clks = { 0 };
394 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
395 	int ret;
396 
397 	if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
398 		ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
399 						dc_to_pp_clock_type(clk_type),
400 						&pp_clks);
401 		if (ret)
402 			return false;
403 	} else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
404 		if (smu_get_clock_by_type_with_latency(&adev->smu,
405 						       dc_to_pp_clock_type(clk_type),
406 						       &pp_clks))
407 			return false;
408 	}
409 
410 
411 	pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
412 
413 	return true;
414 }
415 
416 bool dm_pp_get_clock_levels_by_type_with_voltage(
417 	const struct dc_context *ctx,
418 	enum dm_pp_clock_type clk_type,
419 	struct dm_pp_clock_levels_with_voltage *clk_level_info)
420 {
421 	struct amdgpu_device *adev = ctx->driver_context;
422 	void *pp_handle = adev->powerplay.pp_handle;
423 	struct pp_clock_levels_with_voltage pp_clk_info = {0};
424 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
425 	int ret;
426 
427 	if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) {
428 		ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle,
429 						dc_to_pp_clock_type(clk_type),
430 						&pp_clk_info);
431 		if (ret)
432 			return false;
433 	} else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_voltage) {
434 		if (smu_get_clock_by_type_with_voltage(&adev->smu,
435 						       dc_to_pp_clock_type(clk_type),
436 						       &pp_clk_info))
437 			return false;
438 	}
439 
440 	pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
441 
442 	return true;
443 }
444 
445 bool dm_pp_notify_wm_clock_changes(
446 	const struct dc_context *ctx,
447 	struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
448 {
449 	/* TODO: to be implemented */
450 	return false;
451 }
452 
453 bool dm_pp_apply_power_level_change_request(
454 	const struct dc_context *ctx,
455 	struct dm_pp_power_level_change_request *level_change_req)
456 {
457 	/* TODO: to be implemented */
458 	return false;
459 }
460 
461 bool dm_pp_apply_clock_for_voltage_request(
462 	const struct dc_context *ctx,
463 	struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
464 {
465 	struct amdgpu_device *adev = ctx->driver_context;
466 	struct pp_display_clock_request pp_clock_request = {0};
467 	int ret = 0;
468 
469 	pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
470 	pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
471 
472 	if (!pp_clock_request.clock_type)
473 		return false;
474 
475 	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request)
476 		ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
477 			adev->powerplay.pp_handle,
478 			&pp_clock_request);
479 	else if (adev->smu.funcs &&
480 		 adev->smu.funcs->display_clock_voltage_request)
481 		ret = smu_display_clock_voltage_request(&adev->smu,
482 							&pp_clock_request);
483 	if (ret)
484 		return false;
485 	return true;
486 }
487 
488 bool dm_pp_get_static_clocks(
489 	const struct dc_context *ctx,
490 	struct dm_pp_static_clock_info *static_clk_info)
491 {
492 	struct amdgpu_device *adev = ctx->driver_context;
493 	struct amd_pp_clock_info pp_clk_info = {0};
494 	int ret = 0;
495 
496 	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks)
497 		ret = adev->powerplay.pp_funcs->get_current_clocks(
498 			adev->powerplay.pp_handle,
499 			&pp_clk_info);
500 	else if (adev->smu.funcs)
501 		ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
502 	if (ret)
503 		return false;
504 
505 	static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
506 	static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
507 	static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
508 
509 	return true;
510 }
511 
512 void pp_rv_set_wm_ranges(struct pp_smu *pp,
513 		struct pp_smu_wm_range_sets *ranges)
514 {
515 	const struct dc_context *ctx = pp->dm;
516 	struct amdgpu_device *adev = ctx->driver_context;
517 	void *pp_handle = adev->powerplay.pp_handle;
518 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
519 	struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
520 	struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
521 	struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
522 	int32_t i;
523 
524 	wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
525 	wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
526 
527 	for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
528 		if (ranges->reader_wm_sets[i].wm_inst > 3)
529 			wm_dce_clocks[i].wm_set_id = WM_SET_A;
530 		else
531 			wm_dce_clocks[i].wm_set_id =
532 					ranges->reader_wm_sets[i].wm_inst;
533 		wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
534 				ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
535 		wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
536 				ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
537 		wm_dce_clocks[i].wm_max_mem_clk_in_khz =
538 				ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
539 		wm_dce_clocks[i].wm_min_mem_clk_in_khz =
540 				ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
541 	}
542 
543 	for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
544 		if (ranges->writer_wm_sets[i].wm_inst > 3)
545 			wm_soc_clocks[i].wm_set_id = WM_SET_A;
546 		else
547 			wm_soc_clocks[i].wm_set_id =
548 					ranges->writer_wm_sets[i].wm_inst;
549 		wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
550 				ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
551 		wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
552 				ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
553 		wm_soc_clocks[i].wm_max_mem_clk_in_khz =
554 				ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
555 		wm_soc_clocks[i].wm_min_mem_clk_in_khz =
556 				ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
557 	}
558 
559 	if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
560 		pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
561 							   &wm_with_clock_ranges);
562 	else if (adev->smu.funcs &&
563 		 adev->smu.funcs->set_watermarks_for_clock_ranges)
564 		smu_set_watermarks_for_clock_ranges(&adev->smu,
565 						    &wm_with_clock_ranges);
566 }
567 
568 void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
569 {
570 	const struct dc_context *ctx = pp->dm;
571 	struct amdgpu_device *adev = ctx->driver_context;
572 	void *pp_handle = adev->powerplay.pp_handle;
573 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
574 
575 	if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
576 		pp_funcs->notify_smu_enable_pwe(pp_handle);
577 	else if (adev->smu.funcs)
578 		smu_notify_smu_enable_pwe(&adev->smu);
579 }
580 
581 void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
582 {
583 	const struct dc_context *ctx = pp->dm;
584 	struct amdgpu_device *adev = ctx->driver_context;
585 	void *pp_handle = adev->powerplay.pp_handle;
586 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
587 
588 	if (!pp_funcs || !pp_funcs->set_active_display_count)
589 		return;
590 
591 	pp_funcs->set_active_display_count(pp_handle, count);
592 }
593 
594 void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock)
595 {
596 	const struct dc_context *ctx = pp->dm;
597 	struct amdgpu_device *adev = ctx->driver_context;
598 	void *pp_handle = adev->powerplay.pp_handle;
599 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
600 
601 	if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
602 		return;
603 
604 	pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock);
605 }
606 
607 void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock)
608 {
609 	const struct dc_context *ctx = pp->dm;
610 	struct amdgpu_device *adev = ctx->driver_context;
611 	void *pp_handle = adev->powerplay.pp_handle;
612 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
613 
614 	if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq)
615 		return;
616 
617 	pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock);
618 }
619 
620 void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
621 {
622 	const struct dc_context *ctx = pp->dm;
623 	struct amdgpu_device *adev = ctx->driver_context;
624 	void *pp_handle = adev->powerplay.pp_handle;
625 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
626 
627 	if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq)
628 		return;
629 
630 	pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
631 }
632 
633 void dm_pp_get_funcs(
634 		struct dc_context *ctx,
635 		struct pp_smu_funcs *funcs)
636 {
637 	funcs->rv_funcs.pp_smu.dm = ctx;
638 	funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
639 	funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
640 	funcs->rv_funcs.set_display_count = pp_rv_set_active_display_count;
641 	funcs->rv_funcs.set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk;
642 	funcs->rv_funcs.set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq;
643 	funcs->rv_funcs.set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq;
644 }
645 
646