1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dccg.h"
27 #include "clk_mgr_internal.h"
28 #include "dcn30_clk_mgr_smu_msg.h"
29 #include "dcn20/dcn20_clk_mgr.h"
30 #include "dce100/dce_clk_mgr.h"
31 #include "dcn30/dcn30_clk_mgr.h"
32 #include "dml/dcn30/dcn30_fpu.h"
33 #include "reg_helper.h"
34 #include "core_types.h"
35 #include "dm_helpers.h"
36 #include "atomfirmware.h"
37 #include "sienna_cichlid_ip_offset.h"
38 #include "dcn/dcn_3_0_0_offset.h"
39 #include "dcn/dcn_3_0_0_sh_mask.h"
40 #include "nbio/nbio_7_4_offset.h"
41 #include "dpcs/dpcs_3_0_0_offset.h"
42 #include "dpcs/dpcs_3_0_0_sh_mask.h"
43 #include "mmhub/mmhub_2_0_0_offset.h"
44 #include "mmhub/mmhub_2_0_0_sh_mask.h"
45 #include "dcn30_smu11_driver_if.h"
46 
47 #undef FN
48 #define FN(reg_name, field_name) \
49 	clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
50 
51 #define REG(reg) \
52 	(clk_mgr->regs->reg)
53 
54 #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
55 
56 #define BASE(seg) BASE_INNER(seg)
57 
58 #define SR(reg_name)\
59 		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
60 					mm ## reg_name
61 
62 #undef CLK_SRI
63 #define CLK_SRI(reg_name, block, inst)\
64 	.reg_name = mm ## block ## _ ## reg_name
65 
66 static const struct clk_mgr_registers clk_mgr_regs = {
67 	CLK_REG_LIST_DCN3()
68 };
69 
70 static const struct clk_mgr_shift clk_mgr_shift = {
71 	CLK_COMMON_MASK_SH_LIST_DCN20_BASE(__SHIFT)
72 };
73 
74 static const struct clk_mgr_mask clk_mgr_mask = {
75 	CLK_COMMON_MASK_SH_LIST_DCN20_BASE(_MASK)
76 };
77 
78 
79 /* Query SMU for all clock states for a particular clock */
80 static void dcn3_init_single_clock(struct clk_mgr_internal *clk_mgr, uint32_t clk, unsigned int *entry_0, unsigned int *num_levels)
81 {
82 	unsigned int i;
83 	char *entry_i = (char *)entry_0;
84 	uint32_t ret = dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
85 
86 	if (ret & (1 << 31))
87 		/* fine-grained, only min and max */
88 		*num_levels = 2;
89 	else
90 		/* discrete, a number of fixed states */
91 		/* will set num_levels to 0 on failure */
92 		*num_levels = ret & 0xFF;
93 
94 	/* if the initial message failed, num_levels will be 0 */
95 	for (i = 0; i < *num_levels; i++) {
96 		*((unsigned int *)entry_i) = (dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
97 		entry_i += sizeof(clk_mgr->base.bw_params->clk_table.entries[0]);
98 	}
99 }
100 
101 static void dcn3_build_wm_range_table(struct clk_mgr_internal *clk_mgr)
102 {
103 	DC_FP_START();
104 	dcn3_fpu_build_wm_range_table(&clk_mgr->base);
105 	DC_FP_END();
106 }
107 
108 void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
109 {
110 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
111 	unsigned int num_levels;
112 
113 	memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
114 	clk_mgr_base->clks.p_state_change_support = true;
115 	clk_mgr_base->clks.prev_p_state_change_support = true;
116 	clk_mgr->smu_present = false;
117 
118 	if (!clk_mgr_base->bw_params)
119 		return;
120 
121 	if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
122 		clk_mgr->smu_present = true;
123 
124 	if (!clk_mgr->smu_present)
125 		return;
126 
127 	// do we fail if these fail? if so, how? do we not care to check?
128 	dcn30_smu_check_driver_if_version(clk_mgr);
129 	dcn30_smu_check_msg_header_version(clk_mgr);
130 
131 	/* DCFCLK */
132 	dcn3_init_single_clock(clk_mgr, PPCLK_DCEFCLK,
133 			&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
134 			&num_levels);
135 	dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, 0);
136 
137 	/* DTBCLK */
138 	dcn3_init_single_clock(clk_mgr, PPCLK_DTBCLK,
139 			&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
140 			&num_levels);
141 
142 	/* SOCCLK */
143 	dcn3_init_single_clock(clk_mgr, PPCLK_SOCCLK,
144 					&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
145 					&num_levels);
146 	// DPREFCLK ???
147 
148 	/* DISPCLK */
149 	dcn3_init_single_clock(clk_mgr, PPCLK_DISPCLK,
150 			&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
151 			&num_levels);
152 
153 	/* DPPCLK */
154 	dcn3_init_single_clock(clk_mgr, PPCLK_PIXCLK,
155 			&clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
156 			&num_levels);
157 
158 	/* PHYCLK */
159 	dcn3_init_single_clock(clk_mgr, PPCLK_PHYCLK,
160 			&clk_mgr_base->bw_params->clk_table.entries[0].phyclk_mhz,
161 			&num_levels);
162 
163 	/* Get UCLK, update bounding box */
164 	clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
165 
166 	/* WM range table */
167 	DC_FP_START();
168 	dcn3_build_wm_range_table(clk_mgr);
169 	DC_FP_END();
170 }
171 
172 static int dcn30_get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
173 {
174 	/* get FbMult value */
175 	struct fixed31_32 pll_req;
176 	/* get FbMult value */
177 	uint32_t pll_req_reg = REG_READ(CLK0_CLK_PLL_REQ);
178 
179 	/* set up a fixed-point number
180 	 * this works because the int part is on the right edge of the register
181 	 * and the frac part is on the left edge
182 	 */
183 	pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int);
184 	pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac;
185 
186 	/* multiply by REFCLK period */
187 	pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
188 
189 	return dc_fixpt_floor(pll_req);
190 }
191 
192 static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
193 			struct dc_state *context,
194 			bool safe_to_lower)
195 {
196 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
197 	struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
198 	struct dc *dc = clk_mgr_base->ctx->dc;
199 	int display_count;
200 	bool update_dppclk = false;
201 	bool update_dispclk = false;
202 	bool enter_display_off = false;
203 	bool dpp_clock_lowered = false;
204 	bool update_pstate_unsupported_clk = false;
205 	struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
206 	bool force_reset = false;
207 	bool update_uclk = false;
208 	bool p_state_change_support;
209 
210 	if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
211 		return;
212 
213 	if (clk_mgr_base->clks.dispclk_khz == 0 ||
214 			(dc->debug.force_clock_mode & 0x1)) {
215 		/* this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3. */
216 		force_reset = true;
217 
218 		dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
219 
220 		/* force_clock_mode 0x1:  force reset the clock even it is the same clock as long as it is in Passive level. */
221 	}
222 	display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
223 
224 	if (display_count == 0)
225 		enter_display_off = true;
226 
227 	if (enter_display_off == safe_to_lower)
228 		dcn30_smu_set_num_of_displays(clk_mgr, display_count);
229 
230 	if (dc->debug.force_min_dcfclk_mhz > 0)
231 		new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
232 				new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
233 
234 	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
235 		clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
236 		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCEFCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));
237 	}
238 
239 	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
240 		clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
241 		dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
242 	}
243 
244 	if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
245 		/* We don't actually care about socclk, don't notify SMU of hard min */
246 		clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
247 
248 	clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
249 	p_state_change_support = new_clocks->p_state_change_support;
250 
251 	// invalidate the current P-State forced min in certain dc_mode_softmax situations
252 	if (dc->clk_mgr->dc_mode_softmax_enabled && safe_to_lower && !p_state_change_support) {
253 		if ((new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) !=
254 				(clk_mgr_base->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000))
255 			update_pstate_unsupported_clk = true;
256 	}
257 
258 	if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support) ||
259 			update_pstate_unsupported_clk) {
260 		clk_mgr_base->clks.p_state_change_support = p_state_change_support;
261 
262 		/* to disable P-State switching, set UCLK min = max */
263 		if (!clk_mgr_base->clks.p_state_change_support) {
264 			if (dc->clk_mgr->dc_mode_softmax_enabled &&
265 				new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
266 				dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
267 					dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
268 			else
269 				dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
270 					clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
271 		}
272 	}
273 
274 	/* Always update saved value, even if new value not set due to P-State switching unsupported */
275 	if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
276 		clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
277 		update_uclk = true;
278 	}
279 
280 	/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
281 	if (clk_mgr_base->clks.p_state_change_support &&
282 			(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support))
283 		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
284 
285 	if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
286 		if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
287 			dpp_clock_lowered = true;
288 
289 		clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
290 		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PIXCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
291 		update_dppclk = true;
292 	}
293 
294 	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
295 		clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
296 		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dispclk_khz));
297 		update_dispclk = true;
298 	}
299 
300 	if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
301 		if (dpp_clock_lowered) {
302 			/* if clock is being lowered, increase DTO before lowering refclk */
303 			dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
304 			dcn20_update_clocks_update_dentist(clk_mgr, context);
305 		} else {
306 			/* if clock is being raised, increase refclk before lowering DTO */
307 			if (update_dppclk || update_dispclk)
308 				dcn20_update_clocks_update_dentist(clk_mgr, context);
309 			/* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
310 			 * that we do not lower dto when it is not safe to lower. We do not need to
311 			 * compare the current and new dppclk before calling this function.*/
312 			dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
313 		}
314 	}
315 
316 	if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
317 		/*update dmcu for wait_loop count*/
318 		dmcu->funcs->set_psr_wait_loop(dmcu,
319 				clk_mgr_base->clks.dispclk_khz / 1000 / 7);
320 }
321 
322 
323 static void dcn3_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
324 {
325 	unsigned int i;
326 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
327 	WatermarksExternal_t *table = (WatermarksExternal_t *) clk_mgr->wm_range_table;
328 
329 	if (!clk_mgr->smu_present)
330 		return;
331 
332 	if (!table)
333 		// should log failure
334 		return;
335 
336 	memset(table, 0, sizeof(*table));
337 
338 	/* collect valid ranges, place in pmfw table */
339 	for (i = 0; i < WM_SET_COUNT; i++)
340 		if (clk_mgr->base.bw_params->wm_table.nv_entries[i].valid) {
341 			table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MinClock = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.min_dcfclk;
342 			table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MaxClock = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.max_dcfclk;
343 			table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MinUclk = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.min_uclk;
344 			table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MaxUclk = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.max_uclk;
345 			table->Watermarks.WatermarkRow[WM_DCEFCLK][i].WmSetting = i;
346 			table->Watermarks.WatermarkRow[WM_DCEFCLK][i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type;
347 		}
348 
349 	dcn30_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32);
350 	dcn30_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF);
351 	dcn30_smu_transfer_wm_table_dram_2_smu(clk_mgr);
352 }
353 
354 /* Set min memclk to minimum, either constrained by the current mode or DPM0 */
355 static void dcn3_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current_mode)
356 {
357 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
358 
359 	if (!clk_mgr->smu_present)
360 		return;
361 
362 	if (current_mode) {
363 		if (clk_mgr_base->clks.p_state_change_support)
364 			dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
365 					khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
366 		else
367 			dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
368 					clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
369 	} else {
370 		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
371 				clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
372 	}
373 }
374 
375 /* Set max memclk to highest DPM value */
376 static void dcn3_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
377 {
378 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
379 
380 	if (!clk_mgr->smu_present)
381 		return;
382 
383 	dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
384 			clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
385 }
386 
387 static void dcn3_set_max_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
388 {
389 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
390 
391 	if (!clk_mgr->smu_present)
392 		return;
393 
394 	dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
395 }
396 static void dcn3_set_min_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
397 {
398 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
399 
400 	if (!clk_mgr->smu_present)
401 		return;
402 	dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
403 }
404 
405 /* Get current memclk states, update bounding box */
406 static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
407 {
408 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
409 	unsigned int num_levels;
410 
411 	if (!clk_mgr->smu_present)
412 		return;
413 
414 	/* Refresh memclk states */
415 	dcn3_init_single_clock(clk_mgr, PPCLK_UCLK,
416 			&clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
417 			&num_levels);
418 	clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
419 
420 	clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
421 
422 	/* Refresh bounding box */
423 	DC_FP_START();
424 	clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
425 			clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
426 	DC_FP_END();
427 }
428 
429 static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base)
430 {
431 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
432 	return clk_mgr->smu_present;
433 }
434 
435 static bool dcn3_are_clock_states_equal(struct dc_clocks *a,
436 					struct dc_clocks *b)
437 {
438 	if (a->dispclk_khz != b->dispclk_khz)
439 		return false;
440 	else if (a->dppclk_khz != b->dppclk_khz)
441 		return false;
442 	else if (a->dcfclk_khz != b->dcfclk_khz)
443 		return false;
444 	else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
445 		return false;
446 	else if (a->dramclk_khz != b->dramclk_khz)
447 		return false;
448 	else if (a->p_state_change_support != b->p_state_change_support)
449 		return false;
450 
451 	return true;
452 }
453 
454 static void dcn3_enable_pme_wa(struct clk_mgr *clk_mgr_base)
455 {
456 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
457 
458 	if (!clk_mgr->smu_present)
459 		return;
460 
461 	dcn30_smu_set_pme_workaround(clk_mgr);
462 }
463 
464 /* Notify clk_mgr of a change in link rate, update phyclk frequency if necessary */
465 static void dcn30_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc_link *link)
466 {
467 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
468 	unsigned int i, max_phyclk_req = clk_mgr_base->bw_params->clk_table.entries[0].phyclk_mhz * 1000;
469 
470 	if (!clk_mgr->smu_present)
471 		return;
472 
473 	/* TODO - DP2.0 HW: calculate link 128b/132 link rate in clock manager with new formula */
474 
475 	clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
476 
477 	for (i = 0; i < MAX_PIPES * 2; i++) {
478 		if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req)
479 			max_phyclk_req = clk_mgr->cur_phyclk_req_table[i];
480 	}
481 
482 	if (max_phyclk_req != clk_mgr_base->clks.phyclk_khz) {
483 		clk_mgr_base->clks.phyclk_khz = max_phyclk_req;
484 		dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PHYCLK, khz_to_mhz_ceil(clk_mgr_base->clks.phyclk_khz));
485 	}
486 }
487 
488 static struct clk_mgr_funcs dcn3_funcs = {
489 		.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
490 		.update_clocks = dcn3_update_clocks,
491 		.init_clocks = dcn3_init_clocks,
492 		.notify_wm_ranges = dcn3_notify_wm_ranges,
493 		.set_hard_min_memclk = dcn3_set_hard_min_memclk,
494 		.set_hard_max_memclk = dcn3_set_hard_max_memclk,
495 		.set_max_memclk = dcn3_set_max_memclk,
496 		.set_min_memclk = dcn3_set_min_memclk,
497 		.get_memclk_states_from_smu = dcn3_get_memclk_states_from_smu,
498 		.are_clock_states_equal = dcn3_are_clock_states_equal,
499 		.enable_pme_wa = dcn3_enable_pme_wa,
500 		.notify_link_rate_change = dcn30_notify_link_rate_change,
501 		.is_smu_present = dcn3_is_smu_present
502 };
503 
504 static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
505 {
506 	dcn2_init_clocks(clk_mgr);
507 
508 /* TODO: Implement the functions and remove the ifndef guard */
509 }
510 
511 struct clk_mgr_funcs dcn3_fpga_funcs = {
512 	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
513 	.update_clocks = dcn2_update_clocks_fpga,
514 	.init_clocks = dcn3_init_clocks_fpga,
515 };
516 
517 /*todo for dcn30 for clk register offset*/
518 void dcn3_clk_mgr_construct(
519 		struct dc_context *ctx,
520 		struct clk_mgr_internal *clk_mgr,
521 		struct pp_smu_funcs *pp_smu,
522 		struct dccg *dccg)
523 {
524 	struct clk_state_registers_and_bypass s = { 0 };
525 
526 	clk_mgr->base.ctx = ctx;
527 	clk_mgr->base.funcs = &dcn3_funcs;
528 	clk_mgr->regs = &clk_mgr_regs;
529 	clk_mgr->clk_mgr_shift = &clk_mgr_shift;
530 	clk_mgr->clk_mgr_mask = &clk_mgr_mask;
531 
532 	clk_mgr->dccg = dccg;
533 	clk_mgr->dfs_bypass_disp_clk = 0;
534 
535 	clk_mgr->dprefclk_ss_percentage = 0;
536 	clk_mgr->dprefclk_ss_divider = 1000;
537 	clk_mgr->ss_on_dprefclk = false;
538 	clk_mgr->dfs_ref_freq_khz = 100000;
539 
540 	clk_mgr->base.dprefclk_khz = 730000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved
541 
542 	/* integer part is now VCO frequency in kHz */
543 	clk_mgr->base.dentist_vco_freq_khz = dcn30_get_vco_frequency_from_reg(clk_mgr);
544 
545 	/* in case we don't get a value from the register, use default */
546 	if (clk_mgr->base.dentist_vco_freq_khz == 0)
547 		clk_mgr->base.dentist_vco_freq_khz = 3650000;
548 	/* Convert dprefclk units from MHz to KHz */
549 	/* Value already divided by 10, some resolution lost */
550 
551 	/*TODO: uncomment assert once dcn3_dump_clk_registers is implemented */
552 	//ASSERT(s.dprefclk != 0);
553 	if (s.dprefclk != 0)
554 		clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
555 
556 	clk_mgr->dfs_bypass_enabled = false;
557 
558 	clk_mgr->smu_present = false;
559 
560 	dce_clock_read_ss_info(clk_mgr);
561 
562 	clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
563 
564 	/* need physical address of table to give to PMFW */
565 	clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx,
566 			DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t),
567 			&clk_mgr->wm_range_table_addr);
568 }
569 
570 void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
571 {
572 	kfree(clk_mgr->base.bw_params);
573 
574 	if (clk_mgr->wm_range_table)
575 		dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART,
576 				clk_mgr->wm_range_table);
577 }
578