1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26 
27 #include "i915_reg.h"
28 #include "intel_de.h"
29 #include "intel_display_types.h"
30 #include "intel_dkl_phy.h"
31 #include "intel_dkl_phy_regs.h"
32 #include "intel_dpio_phy.h"
33 #include "intel_dpll.h"
34 #include "intel_dpll_mgr.h"
35 #include "intel_hti.h"
36 #include "intel_mg_phy_regs.h"
37 #include "intel_pch_refclk.h"
38 #include "intel_tc.h"
39 
40 /**
41  * DOC: Display PLLs
42  *
43  * Display PLLs used for driving outputs vary by platform. While some have
44  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
45  * from a pool. In the latter scenario, it is possible that multiple pipes
46  * share a PLL if their configurations match.
47  *
48  * This file provides an abstraction over display PLLs. The function
49  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
50  * users of a PLL are tracked and that tracking is integrated with the atomic
51  * modset interface. During an atomic operation, required PLLs can be reserved
52  * for a given CRTC and encoder configuration by calling
53  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
54  * with intel_release_shared_dplls().
55  * Changes to the users are first staged in the atomic state, and then made
56  * effective by calling intel_shared_dpll_swap_state() during the atomic
57  * commit phase.
58  */
59 
60 /* platform specific hooks for managing DPLLs */
61 struct intel_shared_dpll_funcs {
62 	/*
63 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
64 	 * the pll is not already enabled.
65 	 */
66 	void (*enable)(struct drm_i915_private *i915,
67 		       struct intel_shared_dpll *pll);
68 
69 	/*
70 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
71 	 * only when it is safe to disable the pll, i.e., there are no more
72 	 * tracked users for it.
73 	 */
74 	void (*disable)(struct drm_i915_private *i915,
75 			struct intel_shared_dpll *pll);
76 
77 	/*
78 	 * Hook for reading the values currently programmed to the DPLL
79 	 * registers. This is used for initial hw state readout and state
80 	 * verification after a mode set.
81 	 */
82 	bool (*get_hw_state)(struct drm_i915_private *i915,
83 			     struct intel_shared_dpll *pll,
84 			     struct intel_dpll_hw_state *hw_state);
85 
86 	/*
87 	 * Hook for calculating the pll's output frequency based on its passed
88 	 * in state.
89 	 */
90 	int (*get_freq)(struct drm_i915_private *i915,
91 			const struct intel_shared_dpll *pll,
92 			const struct intel_dpll_hw_state *pll_state);
93 };
94 
95 struct intel_dpll_mgr {
96 	const struct dpll_info *dpll_info;
97 
98 	int (*compute_dplls)(struct intel_atomic_state *state,
99 			     struct intel_crtc *crtc,
100 			     struct intel_encoder *encoder);
101 	int (*get_dplls)(struct intel_atomic_state *state,
102 			 struct intel_crtc *crtc,
103 			 struct intel_encoder *encoder);
104 	void (*put_dplls)(struct intel_atomic_state *state,
105 			  struct intel_crtc *crtc);
106 	void (*update_active_dpll)(struct intel_atomic_state *state,
107 				   struct intel_crtc *crtc,
108 				   struct intel_encoder *encoder);
109 	void (*update_ref_clks)(struct drm_i915_private *i915);
110 	void (*dump_hw_state)(struct drm_i915_private *i915,
111 			      const struct intel_dpll_hw_state *hw_state);
112 };
113 
114 static void
115 intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
116 				  struct intel_shared_dpll_state *shared_dpll)
117 {
118 	struct intel_shared_dpll *pll;
119 	int i;
120 
121 	/* Copy shared dpll state */
122 	for_each_shared_dpll(i915, pll, i)
123 		shared_dpll[pll->index] = pll->state;
124 }
125 
126 static struct intel_shared_dpll_state *
127 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
128 {
129 	struct intel_atomic_state *state = to_intel_atomic_state(s);
130 
131 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
132 
133 	if (!state->dpll_set) {
134 		state->dpll_set = true;
135 
136 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
137 						  state->shared_dpll);
138 	}
139 
140 	return state->shared_dpll;
141 }
142 
143 /**
144  * intel_get_shared_dpll_by_id - get a DPLL given its id
145  * @i915: i915 device instance
146  * @id: pll id
147  *
148  * Returns:
149  * A pointer to the DPLL with @id
150  */
151 struct intel_shared_dpll *
152 intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
153 			    enum intel_dpll_id id)
154 {
155 	struct intel_shared_dpll *pll;
156 	int i;
157 
158 	for_each_shared_dpll(i915, pll, i) {
159 		if (pll->info->id == id)
160 			return pll;
161 	}
162 
163 	MISSING_CASE(id);
164 	return NULL;
165 }
166 
167 /* For ILK+ */
168 void assert_shared_dpll(struct drm_i915_private *i915,
169 			struct intel_shared_dpll *pll,
170 			bool state)
171 {
172 	bool cur_state;
173 	struct intel_dpll_hw_state hw_state;
174 
175 	if (drm_WARN(&i915->drm, !pll,
176 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
177 		return;
178 
179 	cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
180 	I915_STATE_WARN(i915, cur_state != state,
181 			"%s assertion failure (expected %s, current %s)\n",
182 			pll->info->name, str_on_off(state),
183 			str_on_off(cur_state));
184 }
185 
186 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
187 {
188 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
189 }
190 
191 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
192 {
193 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
194 }
195 
196 static i915_reg_t
197 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
198 			   struct intel_shared_dpll *pll)
199 {
200 	if (IS_DG1(i915))
201 		return DG1_DPLL_ENABLE(pll->info->id);
202 	else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
203 		 (pll->info->id == DPLL_ID_EHL_DPLL4))
204 		return MG_PLL_ENABLE(0);
205 
206 	return ICL_DPLL_ENABLE(pll->info->id);
207 }
208 
209 static i915_reg_t
210 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
211 			struct intel_shared_dpll *pll)
212 {
213 	const enum intel_dpll_id id = pll->info->id;
214 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
215 
216 	if (IS_ALDERLAKE_P(i915))
217 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
218 
219 	return MG_PLL_ENABLE(tc_port);
220 }
221 
222 static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
223 				      struct intel_shared_dpll *pll)
224 {
225 	if (pll->info->power_domain)
226 		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
227 
228 	pll->info->funcs->enable(i915, pll);
229 	pll->on = true;
230 }
231 
232 static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
233 				       struct intel_shared_dpll *pll)
234 {
235 	pll->info->funcs->disable(i915, pll);
236 	pll->on = false;
237 
238 	if (pll->info->power_domain)
239 		intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
240 }
241 
242 /**
243  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
244  * @crtc_state: CRTC, and its state, which has a shared DPLL
245  *
246  * Enable the shared DPLL used by @crtc.
247  */
248 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
249 {
250 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
251 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
252 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
253 	unsigned int pipe_mask = BIT(crtc->pipe);
254 	unsigned int old_mask;
255 
256 	if (drm_WARN_ON(&i915->drm, pll == NULL))
257 		return;
258 
259 	mutex_lock(&i915->display.dpll.lock);
260 	old_mask = pll->active_mask;
261 
262 	if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
263 	    drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
264 		goto out;
265 
266 	pll->active_mask |= pipe_mask;
267 
268 	drm_dbg_kms(&i915->drm,
269 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
270 		    pll->info->name, pll->active_mask, pll->on,
271 		    crtc->base.base.id, crtc->base.name);
272 
273 	if (old_mask) {
274 		drm_WARN_ON(&i915->drm, !pll->on);
275 		assert_shared_dpll_enabled(i915, pll);
276 		goto out;
277 	}
278 	drm_WARN_ON(&i915->drm, pll->on);
279 
280 	drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
281 
282 	_intel_enable_shared_dpll(i915, pll);
283 
284 out:
285 	mutex_unlock(&i915->display.dpll.lock);
286 }
287 
288 /**
289  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
290  * @crtc_state: CRTC, and its state, which has a shared DPLL
291  *
292  * Disable the shared DPLL used by @crtc.
293  */
294 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
295 {
296 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
297 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
298 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
299 	unsigned int pipe_mask = BIT(crtc->pipe);
300 
301 	/* PCH only available on ILK+ */
302 	if (DISPLAY_VER(i915) < 5)
303 		return;
304 
305 	if (pll == NULL)
306 		return;
307 
308 	mutex_lock(&i915->display.dpll.lock);
309 	if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
310 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
311 		     crtc->base.base.id, crtc->base.name))
312 		goto out;
313 
314 	drm_dbg_kms(&i915->drm,
315 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
316 		    pll->info->name, pll->active_mask, pll->on,
317 		    crtc->base.base.id, crtc->base.name);
318 
319 	assert_shared_dpll_enabled(i915, pll);
320 	drm_WARN_ON(&i915->drm, !pll->on);
321 
322 	pll->active_mask &= ~pipe_mask;
323 	if (pll->active_mask)
324 		goto out;
325 
326 	drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
327 
328 	_intel_disable_shared_dpll(i915, pll);
329 
330 out:
331 	mutex_unlock(&i915->display.dpll.lock);
332 }
333 
334 static unsigned long
335 intel_dpll_mask_all(struct drm_i915_private *i915)
336 {
337 	struct intel_shared_dpll *pll;
338 	unsigned long dpll_mask = 0;
339 	int i;
340 
341 	for_each_shared_dpll(i915, pll, i) {
342 		drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
343 
344 		dpll_mask |= BIT(pll->info->id);
345 	}
346 
347 	return dpll_mask;
348 }
349 
350 static struct intel_shared_dpll *
351 intel_find_shared_dpll(struct intel_atomic_state *state,
352 		       const struct intel_crtc *crtc,
353 		       const struct intel_dpll_hw_state *pll_state,
354 		       unsigned long dpll_mask)
355 {
356 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
357 	unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
358 	struct intel_shared_dpll_state *shared_dpll;
359 	struct intel_shared_dpll *unused_pll = NULL;
360 	enum intel_dpll_id id;
361 
362 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
363 
364 	drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
365 
366 	for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
367 		struct intel_shared_dpll *pll;
368 
369 		pll = intel_get_shared_dpll_by_id(i915, id);
370 		if (!pll)
371 			continue;
372 
373 		/* Only want to check enabled timings first */
374 		if (shared_dpll[pll->index].pipe_mask == 0) {
375 			if (!unused_pll)
376 				unused_pll = pll;
377 			continue;
378 		}
379 
380 		if (memcmp(pll_state,
381 			   &shared_dpll[pll->index].hw_state,
382 			   sizeof(*pll_state)) == 0) {
383 			drm_dbg_kms(&i915->drm,
384 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
385 				    crtc->base.base.id, crtc->base.name,
386 				    pll->info->name,
387 				    shared_dpll[pll->index].pipe_mask,
388 				    pll->active_mask);
389 			return pll;
390 		}
391 	}
392 
393 	/* Ok no matching timings, maybe there's a free one? */
394 	if (unused_pll) {
395 		drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
396 			    crtc->base.base.id, crtc->base.name,
397 			    unused_pll->info->name);
398 		return unused_pll;
399 	}
400 
401 	return NULL;
402 }
403 
404 /**
405  * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
406  * @crtc: CRTC on which behalf the reference is taken
407  * @pll: DPLL for which the reference is taken
408  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
409  *
410  * Take a reference for @pll tracking the use of it by @crtc.
411  */
412 static void
413 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
414 				 const struct intel_shared_dpll *pll,
415 				 struct intel_shared_dpll_state *shared_dpll_state)
416 {
417 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
418 
419 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
420 
421 	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
422 
423 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
424 		    crtc->base.base.id, crtc->base.name, pll->info->name);
425 }
426 
427 static void
428 intel_reference_shared_dpll(struct intel_atomic_state *state,
429 			    const struct intel_crtc *crtc,
430 			    const struct intel_shared_dpll *pll,
431 			    const struct intel_dpll_hw_state *pll_state)
432 {
433 	struct intel_shared_dpll_state *shared_dpll;
434 
435 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
436 
437 	if (shared_dpll[pll->index].pipe_mask == 0)
438 		shared_dpll[pll->index].hw_state = *pll_state;
439 
440 	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
441 }
442 
443 /**
444  * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
445  * @crtc: CRTC on which behalf the reference is dropped
446  * @pll: DPLL for which the reference is dropped
447  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
448  *
449  * Drop a reference for @pll tracking the end of use of it by @crtc.
450  */
451 void
452 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
453 				   const struct intel_shared_dpll *pll,
454 				   struct intel_shared_dpll_state *shared_dpll_state)
455 {
456 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
457 
458 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
459 
460 	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
461 
462 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
463 		    crtc->base.base.id, crtc->base.name, pll->info->name);
464 }
465 
466 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
467 					  const struct intel_crtc *crtc,
468 					  const struct intel_shared_dpll *pll)
469 {
470 	struct intel_shared_dpll_state *shared_dpll;
471 
472 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
473 
474 	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
475 }
476 
477 static void intel_put_dpll(struct intel_atomic_state *state,
478 			   struct intel_crtc *crtc)
479 {
480 	const struct intel_crtc_state *old_crtc_state =
481 		intel_atomic_get_old_crtc_state(state, crtc);
482 	struct intel_crtc_state *new_crtc_state =
483 		intel_atomic_get_new_crtc_state(state, crtc);
484 
485 	new_crtc_state->shared_dpll = NULL;
486 
487 	if (!old_crtc_state->shared_dpll)
488 		return;
489 
490 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
491 }
492 
493 /**
494  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
495  * @state: atomic state
496  *
497  * This is the dpll version of drm_atomic_helper_swap_state() since the
498  * helper does not handle driver-specific global state.
499  *
500  * For consistency with atomic helpers this function does a complete swap,
501  * i.e. it also puts the current state into @state, even though there is no
502  * need for that at this moment.
503  */
504 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
505 {
506 	struct drm_i915_private *i915 = to_i915(state->base.dev);
507 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
508 	struct intel_shared_dpll *pll;
509 	int i;
510 
511 	if (!state->dpll_set)
512 		return;
513 
514 	for_each_shared_dpll(i915, pll, i)
515 		swap(pll->state, shared_dpll[pll->index]);
516 }
517 
518 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
519 				      struct intel_shared_dpll *pll,
520 				      struct intel_dpll_hw_state *hw_state)
521 {
522 	const enum intel_dpll_id id = pll->info->id;
523 	intel_wakeref_t wakeref;
524 	u32 val;
525 
526 	wakeref = intel_display_power_get_if_enabled(i915,
527 						     POWER_DOMAIN_DISPLAY_CORE);
528 	if (!wakeref)
529 		return false;
530 
531 	val = intel_de_read(i915, PCH_DPLL(id));
532 	hw_state->dpll = val;
533 	hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
534 	hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
535 
536 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
537 
538 	return val & DPLL_VCO_ENABLE;
539 }
540 
541 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
542 {
543 	u32 val;
544 	bool enabled;
545 
546 	val = intel_de_read(i915, PCH_DREF_CONTROL);
547 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
548 			    DREF_SUPERSPREAD_SOURCE_MASK));
549 	I915_STATE_WARN(i915, !enabled,
550 			"PCH refclk assertion failure, should be active but is disabled\n");
551 }
552 
553 static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
554 				struct intel_shared_dpll *pll)
555 {
556 	const enum intel_dpll_id id = pll->info->id;
557 
558 	/* PCH refclock must be enabled first */
559 	ibx_assert_pch_refclk_enabled(i915);
560 
561 	intel_de_write(i915, PCH_FP0(id), pll->state.hw_state.fp0);
562 	intel_de_write(i915, PCH_FP1(id), pll->state.hw_state.fp1);
563 
564 	intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
565 
566 	/* Wait for the clocks to stabilize. */
567 	intel_de_posting_read(i915, PCH_DPLL(id));
568 	udelay(150);
569 
570 	/* The pixel multiplier can only be updated once the
571 	 * DPLL is enabled and the clocks are stable.
572 	 *
573 	 * So write it again.
574 	 */
575 	intel_de_write(i915, PCH_DPLL(id), pll->state.hw_state.dpll);
576 	intel_de_posting_read(i915, PCH_DPLL(id));
577 	udelay(200);
578 }
579 
580 static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
581 				 struct intel_shared_dpll *pll)
582 {
583 	const enum intel_dpll_id id = pll->info->id;
584 
585 	intel_de_write(i915, PCH_DPLL(id), 0);
586 	intel_de_posting_read(i915, PCH_DPLL(id));
587 	udelay(200);
588 }
589 
590 static int ibx_compute_dpll(struct intel_atomic_state *state,
591 			    struct intel_crtc *crtc,
592 			    struct intel_encoder *encoder)
593 {
594 	return 0;
595 }
596 
597 static int ibx_get_dpll(struct intel_atomic_state *state,
598 			struct intel_crtc *crtc,
599 			struct intel_encoder *encoder)
600 {
601 	struct intel_crtc_state *crtc_state =
602 		intel_atomic_get_new_crtc_state(state, crtc);
603 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
604 	struct intel_shared_dpll *pll;
605 	enum intel_dpll_id id;
606 
607 	if (HAS_PCH_IBX(i915)) {
608 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
609 		id = (enum intel_dpll_id) crtc->pipe;
610 		pll = intel_get_shared_dpll_by_id(i915, id);
611 
612 		drm_dbg_kms(&i915->drm,
613 			    "[CRTC:%d:%s] using pre-allocated %s\n",
614 			    crtc->base.base.id, crtc->base.name,
615 			    pll->info->name);
616 	} else {
617 		pll = intel_find_shared_dpll(state, crtc,
618 					     &crtc_state->dpll_hw_state,
619 					     BIT(DPLL_ID_PCH_PLL_B) |
620 					     BIT(DPLL_ID_PCH_PLL_A));
621 	}
622 
623 	if (!pll)
624 		return -EINVAL;
625 
626 	/* reference the pll */
627 	intel_reference_shared_dpll(state, crtc,
628 				    pll, &crtc_state->dpll_hw_state);
629 
630 	crtc_state->shared_dpll = pll;
631 
632 	return 0;
633 }
634 
635 static void ibx_dump_hw_state(struct drm_i915_private *i915,
636 			      const struct intel_dpll_hw_state *hw_state)
637 {
638 	drm_dbg_kms(&i915->drm,
639 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
640 		    "fp0: 0x%x, fp1: 0x%x\n",
641 		    hw_state->dpll,
642 		    hw_state->dpll_md,
643 		    hw_state->fp0,
644 		    hw_state->fp1);
645 }
646 
647 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
648 	.enable = ibx_pch_dpll_enable,
649 	.disable = ibx_pch_dpll_disable,
650 	.get_hw_state = ibx_pch_dpll_get_hw_state,
651 };
652 
653 static const struct dpll_info pch_plls[] = {
654 	{ .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
655 	{ .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
656 	{}
657 };
658 
659 static const struct intel_dpll_mgr pch_pll_mgr = {
660 	.dpll_info = pch_plls,
661 	.compute_dplls = ibx_compute_dpll,
662 	.get_dplls = ibx_get_dpll,
663 	.put_dplls = intel_put_dpll,
664 	.dump_hw_state = ibx_dump_hw_state,
665 };
666 
667 static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
668 				 struct intel_shared_dpll *pll)
669 {
670 	const enum intel_dpll_id id = pll->info->id;
671 
672 	intel_de_write(i915, WRPLL_CTL(id), pll->state.hw_state.wrpll);
673 	intel_de_posting_read(i915, WRPLL_CTL(id));
674 	udelay(20);
675 }
676 
677 static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
678 				struct intel_shared_dpll *pll)
679 {
680 	intel_de_write(i915, SPLL_CTL, pll->state.hw_state.spll);
681 	intel_de_posting_read(i915, SPLL_CTL);
682 	udelay(20);
683 }
684 
685 static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
686 				  struct intel_shared_dpll *pll)
687 {
688 	const enum intel_dpll_id id = pll->info->id;
689 
690 	intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
691 	intel_de_posting_read(i915, WRPLL_CTL(id));
692 
693 	/*
694 	 * Try to set up the PCH reference clock once all DPLLs
695 	 * that depend on it have been shut down.
696 	 */
697 	if (i915->display.dpll.pch_ssc_use & BIT(id))
698 		intel_init_pch_refclk(i915);
699 }
700 
701 static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
702 				 struct intel_shared_dpll *pll)
703 {
704 	enum intel_dpll_id id = pll->info->id;
705 
706 	intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
707 	intel_de_posting_read(i915, SPLL_CTL);
708 
709 	/*
710 	 * Try to set up the PCH reference clock once all DPLLs
711 	 * that depend on it have been shut down.
712 	 */
713 	if (i915->display.dpll.pch_ssc_use & BIT(id))
714 		intel_init_pch_refclk(i915);
715 }
716 
717 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
718 				       struct intel_shared_dpll *pll,
719 				       struct intel_dpll_hw_state *hw_state)
720 {
721 	const enum intel_dpll_id id = pll->info->id;
722 	intel_wakeref_t wakeref;
723 	u32 val;
724 
725 	wakeref = intel_display_power_get_if_enabled(i915,
726 						     POWER_DOMAIN_DISPLAY_CORE);
727 	if (!wakeref)
728 		return false;
729 
730 	val = intel_de_read(i915, WRPLL_CTL(id));
731 	hw_state->wrpll = val;
732 
733 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
734 
735 	return val & WRPLL_PLL_ENABLE;
736 }
737 
738 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
739 				      struct intel_shared_dpll *pll,
740 				      struct intel_dpll_hw_state *hw_state)
741 {
742 	intel_wakeref_t wakeref;
743 	u32 val;
744 
745 	wakeref = intel_display_power_get_if_enabled(i915,
746 						     POWER_DOMAIN_DISPLAY_CORE);
747 	if (!wakeref)
748 		return false;
749 
750 	val = intel_de_read(i915, SPLL_CTL);
751 	hw_state->spll = val;
752 
753 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
754 
755 	return val & SPLL_PLL_ENABLE;
756 }
757 
758 #define LC_FREQ 2700
759 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
760 
761 #define P_MIN 2
762 #define P_MAX 64
763 #define P_INC 2
764 
765 /* Constraints for PLL good behavior */
766 #define REF_MIN 48
767 #define REF_MAX 400
768 #define VCO_MIN 2400
769 #define VCO_MAX 4800
770 
771 struct hsw_wrpll_rnp {
772 	unsigned p, n2, r2;
773 };
774 
775 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
776 {
777 	switch (clock) {
778 	case 25175000:
779 	case 25200000:
780 	case 27000000:
781 	case 27027000:
782 	case 37762500:
783 	case 37800000:
784 	case 40500000:
785 	case 40541000:
786 	case 54000000:
787 	case 54054000:
788 	case 59341000:
789 	case 59400000:
790 	case 72000000:
791 	case 74176000:
792 	case 74250000:
793 	case 81000000:
794 	case 81081000:
795 	case 89012000:
796 	case 89100000:
797 	case 108000000:
798 	case 108108000:
799 	case 111264000:
800 	case 111375000:
801 	case 148352000:
802 	case 148500000:
803 	case 162000000:
804 	case 162162000:
805 	case 222525000:
806 	case 222750000:
807 	case 296703000:
808 	case 297000000:
809 		return 0;
810 	case 233500000:
811 	case 245250000:
812 	case 247750000:
813 	case 253250000:
814 	case 298000000:
815 		return 1500;
816 	case 169128000:
817 	case 169500000:
818 	case 179500000:
819 	case 202000000:
820 		return 2000;
821 	case 256250000:
822 	case 262500000:
823 	case 270000000:
824 	case 272500000:
825 	case 273750000:
826 	case 280750000:
827 	case 281250000:
828 	case 286000000:
829 	case 291750000:
830 		return 4000;
831 	case 267250000:
832 	case 268500000:
833 		return 5000;
834 	default:
835 		return 1000;
836 	}
837 }
838 
839 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
840 				 unsigned int r2, unsigned int n2,
841 				 unsigned int p,
842 				 struct hsw_wrpll_rnp *best)
843 {
844 	u64 a, b, c, d, diff, diff_best;
845 
846 	/* No best (r,n,p) yet */
847 	if (best->p == 0) {
848 		best->p = p;
849 		best->n2 = n2;
850 		best->r2 = r2;
851 		return;
852 	}
853 
854 	/*
855 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
856 	 * freq2k.
857 	 *
858 	 * delta = 1e6 *
859 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
860 	 *	   freq2k;
861 	 *
862 	 * and we would like delta <= budget.
863 	 *
864 	 * If the discrepancy is above the PPM-based budget, always prefer to
865 	 * improve upon the previous solution.  However, if you're within the
866 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
867 	 */
868 	a = freq2k * budget * p * r2;
869 	b = freq2k * budget * best->p * best->r2;
870 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
871 	diff_best = abs_diff(freq2k * best->p * best->r2,
872 			     LC_FREQ_2K * best->n2);
873 	c = 1000000 * diff;
874 	d = 1000000 * diff_best;
875 
876 	if (a < c && b < d) {
877 		/* If both are above the budget, pick the closer */
878 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
879 			best->p = p;
880 			best->n2 = n2;
881 			best->r2 = r2;
882 		}
883 	} else if (a >= c && b < d) {
884 		/* If A is below the threshold but B is above it?  Update. */
885 		best->p = p;
886 		best->n2 = n2;
887 		best->r2 = r2;
888 	} else if (a >= c && b >= d) {
889 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
890 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
891 			best->p = p;
892 			best->n2 = n2;
893 			best->r2 = r2;
894 		}
895 	}
896 	/* Otherwise a < c && b >= d, do nothing */
897 }
898 
899 static void
900 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
901 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
902 {
903 	u64 freq2k;
904 	unsigned p, n2, r2;
905 	struct hsw_wrpll_rnp best = {};
906 	unsigned budget;
907 
908 	freq2k = clock / 100;
909 
910 	budget = hsw_wrpll_get_budget_for_freq(clock);
911 
912 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
913 	 * and directly pass the LC PLL to it. */
914 	if (freq2k == 5400000) {
915 		*n2_out = 2;
916 		*p_out = 1;
917 		*r2_out = 2;
918 		return;
919 	}
920 
921 	/*
922 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
923 	 * the WR PLL.
924 	 *
925 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
926 	 * Injecting R2 = 2 * R gives:
927 	 *   REF_MAX * r2 > LC_FREQ * 2 and
928 	 *   REF_MIN * r2 < LC_FREQ * 2
929 	 *
930 	 * Which means the desired boundaries for r2 are:
931 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
932 	 *
933 	 */
934 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
935 	     r2 <= LC_FREQ * 2 / REF_MIN;
936 	     r2++) {
937 
938 		/*
939 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
940 		 *
941 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
942 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
943 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
944 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
945 		 *
946 		 * Which means the desired boundaries for n2 are:
947 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
948 		 */
949 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
950 		     n2 <= VCO_MAX * r2 / LC_FREQ;
951 		     n2++) {
952 
953 			for (p = P_MIN; p <= P_MAX; p += P_INC)
954 				hsw_wrpll_update_rnp(freq2k, budget,
955 						     r2, n2, p, &best);
956 		}
957 	}
958 
959 	*n2_out = best.n2;
960 	*p_out = best.p;
961 	*r2_out = best.r2;
962 }
963 
964 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
965 				  const struct intel_shared_dpll *pll,
966 				  const struct intel_dpll_hw_state *pll_state)
967 {
968 	int refclk;
969 	int n, p, r;
970 	u32 wrpll = pll_state->wrpll;
971 
972 	switch (wrpll & WRPLL_REF_MASK) {
973 	case WRPLL_REF_SPECIAL_HSW:
974 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
975 		if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
976 			refclk = i915->display.dpll.ref_clks.nssc;
977 			break;
978 		}
979 		fallthrough;
980 	case WRPLL_REF_PCH_SSC:
981 		/*
982 		 * We could calculate spread here, but our checking
983 		 * code only cares about 5% accuracy, and spread is a max of
984 		 * 0.5% downspread.
985 		 */
986 		refclk = i915->display.dpll.ref_clks.ssc;
987 		break;
988 	case WRPLL_REF_LCPLL:
989 		refclk = 2700000;
990 		break;
991 	default:
992 		MISSING_CASE(wrpll);
993 		return 0;
994 	}
995 
996 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
997 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
998 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
999 
1000 	/* Convert to KHz, p & r have a fixed point portion */
1001 	return (refclk * n / 10) / (p * r) * 2;
1002 }
1003 
1004 static int
1005 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1006 			   struct intel_crtc *crtc)
1007 {
1008 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1009 	struct intel_crtc_state *crtc_state =
1010 		intel_atomic_get_new_crtc_state(state, crtc);
1011 	unsigned int p, n2, r2;
1012 
1013 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1014 
1015 	crtc_state->dpll_hw_state.wrpll =
1016 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1017 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1018 		WRPLL_DIVIDER_POST(p);
1019 
1020 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1021 							&crtc_state->dpll_hw_state);
1022 
1023 	return 0;
1024 }
1025 
1026 static struct intel_shared_dpll *
1027 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1028 		       struct intel_crtc *crtc)
1029 {
1030 	struct intel_crtc_state *crtc_state =
1031 		intel_atomic_get_new_crtc_state(state, crtc);
1032 
1033 	return intel_find_shared_dpll(state, crtc,
1034 				      &crtc_state->dpll_hw_state,
1035 				      BIT(DPLL_ID_WRPLL2) |
1036 				      BIT(DPLL_ID_WRPLL1));
1037 }
1038 
1039 static int
1040 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1041 {
1042 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1043 	int clock = crtc_state->port_clock;
1044 
1045 	switch (clock / 2) {
1046 	case 81000:
1047 	case 135000:
1048 	case 270000:
1049 		return 0;
1050 	default:
1051 		drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1052 			    clock);
1053 		return -EINVAL;
1054 	}
1055 }
1056 
1057 static struct intel_shared_dpll *
1058 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1059 {
1060 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1061 	struct intel_shared_dpll *pll;
1062 	enum intel_dpll_id pll_id;
1063 	int clock = crtc_state->port_clock;
1064 
1065 	switch (clock / 2) {
1066 	case 81000:
1067 		pll_id = DPLL_ID_LCPLL_810;
1068 		break;
1069 	case 135000:
1070 		pll_id = DPLL_ID_LCPLL_1350;
1071 		break;
1072 	case 270000:
1073 		pll_id = DPLL_ID_LCPLL_2700;
1074 		break;
1075 	default:
1076 		MISSING_CASE(clock / 2);
1077 		return NULL;
1078 	}
1079 
1080 	pll = intel_get_shared_dpll_by_id(i915, pll_id);
1081 
1082 	if (!pll)
1083 		return NULL;
1084 
1085 	return pll;
1086 }
1087 
1088 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1089 				  const struct intel_shared_dpll *pll,
1090 				  const struct intel_dpll_hw_state *pll_state)
1091 {
1092 	int link_clock = 0;
1093 
1094 	switch (pll->info->id) {
1095 	case DPLL_ID_LCPLL_810:
1096 		link_clock = 81000;
1097 		break;
1098 	case DPLL_ID_LCPLL_1350:
1099 		link_clock = 135000;
1100 		break;
1101 	case DPLL_ID_LCPLL_2700:
1102 		link_clock = 270000;
1103 		break;
1104 	default:
1105 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1106 		break;
1107 	}
1108 
1109 	return link_clock * 2;
1110 }
1111 
1112 static int
1113 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1114 			  struct intel_crtc *crtc)
1115 {
1116 	struct intel_crtc_state *crtc_state =
1117 		intel_atomic_get_new_crtc_state(state, crtc);
1118 
1119 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1120 		return -EINVAL;
1121 
1122 	crtc_state->dpll_hw_state.spll =
1123 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1124 
1125 	return 0;
1126 }
1127 
1128 static struct intel_shared_dpll *
1129 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1130 		      struct intel_crtc *crtc)
1131 {
1132 	struct intel_crtc_state *crtc_state =
1133 		intel_atomic_get_new_crtc_state(state, crtc);
1134 
1135 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1136 				      BIT(DPLL_ID_SPLL));
1137 }
1138 
1139 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1140 				 const struct intel_shared_dpll *pll,
1141 				 const struct intel_dpll_hw_state *pll_state)
1142 {
1143 	int link_clock = 0;
1144 
1145 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1146 	case SPLL_FREQ_810MHz:
1147 		link_clock = 81000;
1148 		break;
1149 	case SPLL_FREQ_1350MHz:
1150 		link_clock = 135000;
1151 		break;
1152 	case SPLL_FREQ_2700MHz:
1153 		link_clock = 270000;
1154 		break;
1155 	default:
1156 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1157 		break;
1158 	}
1159 
1160 	return link_clock * 2;
1161 }
1162 
1163 static int hsw_compute_dpll(struct intel_atomic_state *state,
1164 			    struct intel_crtc *crtc,
1165 			    struct intel_encoder *encoder)
1166 {
1167 	struct intel_crtc_state *crtc_state =
1168 		intel_atomic_get_new_crtc_state(state, crtc);
1169 
1170 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1171 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1172 	else if (intel_crtc_has_dp_encoder(crtc_state))
1173 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1174 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1175 		return hsw_ddi_spll_compute_dpll(state, crtc);
1176 	else
1177 		return -EINVAL;
1178 }
1179 
1180 static int hsw_get_dpll(struct intel_atomic_state *state,
1181 			struct intel_crtc *crtc,
1182 			struct intel_encoder *encoder)
1183 {
1184 	struct intel_crtc_state *crtc_state =
1185 		intel_atomic_get_new_crtc_state(state, crtc);
1186 	struct intel_shared_dpll *pll = NULL;
1187 
1188 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1189 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1190 	else if (intel_crtc_has_dp_encoder(crtc_state))
1191 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1192 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1193 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1194 
1195 	if (!pll)
1196 		return -EINVAL;
1197 
1198 	intel_reference_shared_dpll(state, crtc,
1199 				    pll, &crtc_state->dpll_hw_state);
1200 
1201 	crtc_state->shared_dpll = pll;
1202 
1203 	return 0;
1204 }
1205 
1206 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1207 {
1208 	i915->display.dpll.ref_clks.ssc = 135000;
1209 	/* Non-SSC is only used on non-ULT HSW. */
1210 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1211 		i915->display.dpll.ref_clks.nssc = 24000;
1212 	else
1213 		i915->display.dpll.ref_clks.nssc = 135000;
1214 }
1215 
1216 static void hsw_dump_hw_state(struct drm_i915_private *i915,
1217 			      const struct intel_dpll_hw_state *hw_state)
1218 {
1219 	drm_dbg_kms(&i915->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1220 		    hw_state->wrpll, hw_state->spll);
1221 }
1222 
1223 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1224 	.enable = hsw_ddi_wrpll_enable,
1225 	.disable = hsw_ddi_wrpll_disable,
1226 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1227 	.get_freq = hsw_ddi_wrpll_get_freq,
1228 };
1229 
1230 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1231 	.enable = hsw_ddi_spll_enable,
1232 	.disable = hsw_ddi_spll_disable,
1233 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1234 	.get_freq = hsw_ddi_spll_get_freq,
1235 };
1236 
1237 static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1238 				 struct intel_shared_dpll *pll)
1239 {
1240 }
1241 
1242 static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1243 				  struct intel_shared_dpll *pll)
1244 {
1245 }
1246 
1247 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1248 				       struct intel_shared_dpll *pll,
1249 				       struct intel_dpll_hw_state *hw_state)
1250 {
1251 	return true;
1252 }
1253 
1254 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1255 	.enable = hsw_ddi_lcpll_enable,
1256 	.disable = hsw_ddi_lcpll_disable,
1257 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1258 	.get_freq = hsw_ddi_lcpll_get_freq,
1259 };
1260 
1261 static const struct dpll_info hsw_plls[] = {
1262 	{ .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1263 	{ .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1264 	{ .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1265 	{ .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1266 	  .flags = INTEL_DPLL_ALWAYS_ON, },
1267 	{ .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1268 	  .flags = INTEL_DPLL_ALWAYS_ON, },
1269 	{ .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1270 	  .flags = INTEL_DPLL_ALWAYS_ON, },
1271 	{}
1272 };
1273 
1274 static const struct intel_dpll_mgr hsw_pll_mgr = {
1275 	.dpll_info = hsw_plls,
1276 	.compute_dplls = hsw_compute_dpll,
1277 	.get_dplls = hsw_get_dpll,
1278 	.put_dplls = intel_put_dpll,
1279 	.update_ref_clks = hsw_update_dpll_ref_clks,
1280 	.dump_hw_state = hsw_dump_hw_state,
1281 };
1282 
1283 struct skl_dpll_regs {
1284 	i915_reg_t ctl, cfgcr1, cfgcr2;
1285 };
1286 
1287 /* this array is indexed by the *shared* pll id */
1288 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1289 	{
1290 		/* DPLL 0 */
1291 		.ctl = LCPLL1_CTL,
1292 		/* DPLL 0 doesn't support HDMI mode */
1293 	},
1294 	{
1295 		/* DPLL 1 */
1296 		.ctl = LCPLL2_CTL,
1297 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1298 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1299 	},
1300 	{
1301 		/* DPLL 2 */
1302 		.ctl = WRPLL_CTL(0),
1303 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1304 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1305 	},
1306 	{
1307 		/* DPLL 3 */
1308 		.ctl = WRPLL_CTL(1),
1309 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1310 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1311 	},
1312 };
1313 
1314 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1315 				    struct intel_shared_dpll *pll)
1316 {
1317 	const enum intel_dpll_id id = pll->info->id;
1318 
1319 	intel_de_rmw(i915, DPLL_CTRL1,
1320 		     DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1321 		     pll->state.hw_state.ctrl1 << (id * 6));
1322 	intel_de_posting_read(i915, DPLL_CTRL1);
1323 }
1324 
1325 static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1326 			       struct intel_shared_dpll *pll)
1327 {
1328 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1329 	const enum intel_dpll_id id = pll->info->id;
1330 
1331 	skl_ddi_pll_write_ctrl1(i915, pll);
1332 
1333 	intel_de_write(i915, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1334 	intel_de_write(i915, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1335 	intel_de_posting_read(i915, regs[id].cfgcr1);
1336 	intel_de_posting_read(i915, regs[id].cfgcr2);
1337 
1338 	/* the enable bit is always bit 31 */
1339 	intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1340 
1341 	if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1342 		drm_err(&i915->drm, "DPLL %d not locked\n", id);
1343 }
1344 
1345 static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1346 				 struct intel_shared_dpll *pll)
1347 {
1348 	skl_ddi_pll_write_ctrl1(i915, pll);
1349 }
1350 
1351 static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1352 				struct intel_shared_dpll *pll)
1353 {
1354 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1355 	const enum intel_dpll_id id = pll->info->id;
1356 
1357 	/* the enable bit is always bit 31 */
1358 	intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1359 	intel_de_posting_read(i915, regs[id].ctl);
1360 }
1361 
1362 static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1363 				  struct intel_shared_dpll *pll)
1364 {
1365 }
1366 
1367 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1368 				     struct intel_shared_dpll *pll,
1369 				     struct intel_dpll_hw_state *hw_state)
1370 {
1371 	u32 val;
1372 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1373 	const enum intel_dpll_id id = pll->info->id;
1374 	intel_wakeref_t wakeref;
1375 	bool ret;
1376 
1377 	wakeref = intel_display_power_get_if_enabled(i915,
1378 						     POWER_DOMAIN_DISPLAY_CORE);
1379 	if (!wakeref)
1380 		return false;
1381 
1382 	ret = false;
1383 
1384 	val = intel_de_read(i915, regs[id].ctl);
1385 	if (!(val & LCPLL_PLL_ENABLE))
1386 		goto out;
1387 
1388 	val = intel_de_read(i915, DPLL_CTRL1);
1389 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1390 
1391 	/* avoid reading back stale values if HDMI mode is not enabled */
1392 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1393 		hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1394 		hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1395 	}
1396 	ret = true;
1397 
1398 out:
1399 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1400 
1401 	return ret;
1402 }
1403 
1404 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1405 				       struct intel_shared_dpll *pll,
1406 				       struct intel_dpll_hw_state *hw_state)
1407 {
1408 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1409 	const enum intel_dpll_id id = pll->info->id;
1410 	intel_wakeref_t wakeref;
1411 	u32 val;
1412 	bool ret;
1413 
1414 	wakeref = intel_display_power_get_if_enabled(i915,
1415 						     POWER_DOMAIN_DISPLAY_CORE);
1416 	if (!wakeref)
1417 		return false;
1418 
1419 	ret = false;
1420 
1421 	/* DPLL0 is always enabled since it drives CDCLK */
1422 	val = intel_de_read(i915, regs[id].ctl);
1423 	if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1424 		goto out;
1425 
1426 	val = intel_de_read(i915, DPLL_CTRL1);
1427 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1428 
1429 	ret = true;
1430 
1431 out:
1432 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1433 
1434 	return ret;
1435 }
1436 
1437 struct skl_wrpll_context {
1438 	u64 min_deviation;		/* current minimal deviation */
1439 	u64 central_freq;		/* chosen central freq */
1440 	u64 dco_freq;			/* chosen dco freq */
1441 	unsigned int p;			/* chosen divider */
1442 };
1443 
1444 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1445 #define SKL_DCO_MAX_PDEVIATION	100
1446 #define SKL_DCO_MAX_NDEVIATION	600
1447 
1448 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1449 				  u64 central_freq,
1450 				  u64 dco_freq,
1451 				  unsigned int divider)
1452 {
1453 	u64 deviation;
1454 
1455 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1456 			      central_freq);
1457 
1458 	/* positive deviation */
1459 	if (dco_freq >= central_freq) {
1460 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1461 		    deviation < ctx->min_deviation) {
1462 			ctx->min_deviation = deviation;
1463 			ctx->central_freq = central_freq;
1464 			ctx->dco_freq = dco_freq;
1465 			ctx->p = divider;
1466 		}
1467 	/* negative deviation */
1468 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1469 		   deviation < ctx->min_deviation) {
1470 		ctx->min_deviation = deviation;
1471 		ctx->central_freq = central_freq;
1472 		ctx->dco_freq = dco_freq;
1473 		ctx->p = divider;
1474 	}
1475 }
1476 
1477 static void skl_wrpll_get_multipliers(unsigned int p,
1478 				      unsigned int *p0 /* out */,
1479 				      unsigned int *p1 /* out */,
1480 				      unsigned int *p2 /* out */)
1481 {
1482 	/* even dividers */
1483 	if (p % 2 == 0) {
1484 		unsigned int half = p / 2;
1485 
1486 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1487 			*p0 = 2;
1488 			*p1 = 1;
1489 			*p2 = half;
1490 		} else if (half % 2 == 0) {
1491 			*p0 = 2;
1492 			*p1 = half / 2;
1493 			*p2 = 2;
1494 		} else if (half % 3 == 0) {
1495 			*p0 = 3;
1496 			*p1 = half / 3;
1497 			*p2 = 2;
1498 		} else if (half % 7 == 0) {
1499 			*p0 = 7;
1500 			*p1 = half / 7;
1501 			*p2 = 2;
1502 		}
1503 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1504 		*p0 = 3;
1505 		*p1 = 1;
1506 		*p2 = p / 3;
1507 	} else if (p == 5 || p == 7) {
1508 		*p0 = p;
1509 		*p1 = 1;
1510 		*p2 = 1;
1511 	} else if (p == 15) {
1512 		*p0 = 3;
1513 		*p1 = 1;
1514 		*p2 = 5;
1515 	} else if (p == 21) {
1516 		*p0 = 7;
1517 		*p1 = 1;
1518 		*p2 = 3;
1519 	} else if (p == 35) {
1520 		*p0 = 7;
1521 		*p1 = 1;
1522 		*p2 = 5;
1523 	}
1524 }
1525 
1526 struct skl_wrpll_params {
1527 	u32 dco_fraction;
1528 	u32 dco_integer;
1529 	u32 qdiv_ratio;
1530 	u32 qdiv_mode;
1531 	u32 kdiv;
1532 	u32 pdiv;
1533 	u32 central_freq;
1534 };
1535 
1536 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1537 				      u64 afe_clock,
1538 				      int ref_clock,
1539 				      u64 central_freq,
1540 				      u32 p0, u32 p1, u32 p2)
1541 {
1542 	u64 dco_freq;
1543 
1544 	switch (central_freq) {
1545 	case 9600000000ULL:
1546 		params->central_freq = 0;
1547 		break;
1548 	case 9000000000ULL:
1549 		params->central_freq = 1;
1550 		break;
1551 	case 8400000000ULL:
1552 		params->central_freq = 3;
1553 	}
1554 
1555 	switch (p0) {
1556 	case 1:
1557 		params->pdiv = 0;
1558 		break;
1559 	case 2:
1560 		params->pdiv = 1;
1561 		break;
1562 	case 3:
1563 		params->pdiv = 2;
1564 		break;
1565 	case 7:
1566 		params->pdiv = 4;
1567 		break;
1568 	default:
1569 		WARN(1, "Incorrect PDiv\n");
1570 	}
1571 
1572 	switch (p2) {
1573 	case 5:
1574 		params->kdiv = 0;
1575 		break;
1576 	case 2:
1577 		params->kdiv = 1;
1578 		break;
1579 	case 3:
1580 		params->kdiv = 2;
1581 		break;
1582 	case 1:
1583 		params->kdiv = 3;
1584 		break;
1585 	default:
1586 		WARN(1, "Incorrect KDiv\n");
1587 	}
1588 
1589 	params->qdiv_ratio = p1;
1590 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1591 
1592 	dco_freq = p0 * p1 * p2 * afe_clock;
1593 
1594 	/*
1595 	 * Intermediate values are in Hz.
1596 	 * Divide by MHz to match bsepc
1597 	 */
1598 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1599 	params->dco_fraction =
1600 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1601 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1602 }
1603 
1604 static int
1605 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1606 			int ref_clock,
1607 			struct skl_wrpll_params *wrpll_params)
1608 {
1609 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1610 						 9000000000ULL,
1611 						 9600000000ULL };
1612 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1613 					    24, 28, 30, 32, 36, 40, 42, 44,
1614 					    48, 52, 54, 56, 60, 64, 66, 68,
1615 					    70, 72, 76, 78, 80, 84, 88, 90,
1616 					    92, 96, 98 };
1617 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1618 	static const struct {
1619 		const u8 *list;
1620 		int n_dividers;
1621 	} dividers[] = {
1622 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1623 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1624 	};
1625 	struct skl_wrpll_context ctx = {
1626 		.min_deviation = U64_MAX,
1627 	};
1628 	unsigned int dco, d, i;
1629 	unsigned int p0, p1, p2;
1630 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1631 
1632 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1633 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1634 			for (i = 0; i < dividers[d].n_dividers; i++) {
1635 				unsigned int p = dividers[d].list[i];
1636 				u64 dco_freq = p * afe_clock;
1637 
1638 				skl_wrpll_try_divider(&ctx,
1639 						      dco_central_freq[dco],
1640 						      dco_freq,
1641 						      p);
1642 				/*
1643 				 * Skip the remaining dividers if we're sure to
1644 				 * have found the definitive divider, we can't
1645 				 * improve a 0 deviation.
1646 				 */
1647 				if (ctx.min_deviation == 0)
1648 					goto skip_remaining_dividers;
1649 			}
1650 		}
1651 
1652 skip_remaining_dividers:
1653 		/*
1654 		 * If a solution is found with an even divider, prefer
1655 		 * this one.
1656 		 */
1657 		if (d == 0 && ctx.p)
1658 			break;
1659 	}
1660 
1661 	if (!ctx.p)
1662 		return -EINVAL;
1663 
1664 	/*
1665 	 * gcc incorrectly analyses that these can be used without being
1666 	 * initialized. To be fair, it's hard to guess.
1667 	 */
1668 	p0 = p1 = p2 = 0;
1669 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1670 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1671 				  ctx.central_freq, p0, p1, p2);
1672 
1673 	return 0;
1674 }
1675 
1676 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1677 				  const struct intel_shared_dpll *pll,
1678 				  const struct intel_dpll_hw_state *pll_state)
1679 {
1680 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1681 	u32 p0, p1, p2, dco_freq;
1682 
1683 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1684 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1685 
1686 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1687 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1688 	else
1689 		p1 = 1;
1690 
1691 
1692 	switch (p0) {
1693 	case DPLL_CFGCR2_PDIV_1:
1694 		p0 = 1;
1695 		break;
1696 	case DPLL_CFGCR2_PDIV_2:
1697 		p0 = 2;
1698 		break;
1699 	case DPLL_CFGCR2_PDIV_3:
1700 		p0 = 3;
1701 		break;
1702 	case DPLL_CFGCR2_PDIV_7_INVALID:
1703 		/*
1704 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1705 		 * handling it the same way as PDIV_7.
1706 		 */
1707 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1708 		fallthrough;
1709 	case DPLL_CFGCR2_PDIV_7:
1710 		p0 = 7;
1711 		break;
1712 	default:
1713 		MISSING_CASE(p0);
1714 		return 0;
1715 	}
1716 
1717 	switch (p2) {
1718 	case DPLL_CFGCR2_KDIV_5:
1719 		p2 = 5;
1720 		break;
1721 	case DPLL_CFGCR2_KDIV_2:
1722 		p2 = 2;
1723 		break;
1724 	case DPLL_CFGCR2_KDIV_3:
1725 		p2 = 3;
1726 		break;
1727 	case DPLL_CFGCR2_KDIV_1:
1728 		p2 = 1;
1729 		break;
1730 	default:
1731 		MISSING_CASE(p2);
1732 		return 0;
1733 	}
1734 
1735 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1736 		   ref_clock;
1737 
1738 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1739 		    ref_clock / 0x8000;
1740 
1741 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1742 		return 0;
1743 
1744 	return dco_freq / (p0 * p1 * p2 * 5);
1745 }
1746 
1747 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1748 {
1749 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1750 	struct skl_wrpll_params wrpll_params = {};
1751 	u32 ctrl1, cfgcr1, cfgcr2;
1752 	int ret;
1753 
1754 	/*
1755 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1756 	 * as the DPLL id in this function.
1757 	 */
1758 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1759 
1760 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1761 
1762 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1763 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1764 	if (ret)
1765 		return ret;
1766 
1767 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1768 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1769 		wrpll_params.dco_integer;
1770 
1771 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1772 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1773 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1774 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1775 		wrpll_params.central_freq;
1776 
1777 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1778 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1779 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1780 
1781 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1782 							&crtc_state->dpll_hw_state);
1783 
1784 	return 0;
1785 }
1786 
1787 static int
1788 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1789 {
1790 	u32 ctrl1;
1791 
1792 	/*
1793 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1794 	 * as the DPLL id in this function.
1795 	 */
1796 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1797 	switch (crtc_state->port_clock / 2) {
1798 	case 81000:
1799 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1800 		break;
1801 	case 135000:
1802 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1803 		break;
1804 	case 270000:
1805 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1806 		break;
1807 		/* eDP 1.4 rates */
1808 	case 162000:
1809 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1810 		break;
1811 	case 108000:
1812 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1813 		break;
1814 	case 216000:
1815 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1816 		break;
1817 	}
1818 
1819 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1820 
1821 	return 0;
1822 }
1823 
1824 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1825 				  const struct intel_shared_dpll *pll,
1826 				  const struct intel_dpll_hw_state *pll_state)
1827 {
1828 	int link_clock = 0;
1829 
1830 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1831 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1832 	case DPLL_CTRL1_LINK_RATE_810:
1833 		link_clock = 81000;
1834 		break;
1835 	case DPLL_CTRL1_LINK_RATE_1080:
1836 		link_clock = 108000;
1837 		break;
1838 	case DPLL_CTRL1_LINK_RATE_1350:
1839 		link_clock = 135000;
1840 		break;
1841 	case DPLL_CTRL1_LINK_RATE_1620:
1842 		link_clock = 162000;
1843 		break;
1844 	case DPLL_CTRL1_LINK_RATE_2160:
1845 		link_clock = 216000;
1846 		break;
1847 	case DPLL_CTRL1_LINK_RATE_2700:
1848 		link_clock = 270000;
1849 		break;
1850 	default:
1851 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1852 		break;
1853 	}
1854 
1855 	return link_clock * 2;
1856 }
1857 
1858 static int skl_compute_dpll(struct intel_atomic_state *state,
1859 			    struct intel_crtc *crtc,
1860 			    struct intel_encoder *encoder)
1861 {
1862 	struct intel_crtc_state *crtc_state =
1863 		intel_atomic_get_new_crtc_state(state, crtc);
1864 
1865 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1866 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1867 	else if (intel_crtc_has_dp_encoder(crtc_state))
1868 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1869 	else
1870 		return -EINVAL;
1871 }
1872 
1873 static int skl_get_dpll(struct intel_atomic_state *state,
1874 			struct intel_crtc *crtc,
1875 			struct intel_encoder *encoder)
1876 {
1877 	struct intel_crtc_state *crtc_state =
1878 		intel_atomic_get_new_crtc_state(state, crtc);
1879 	struct intel_shared_dpll *pll;
1880 
1881 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1882 		pll = intel_find_shared_dpll(state, crtc,
1883 					     &crtc_state->dpll_hw_state,
1884 					     BIT(DPLL_ID_SKL_DPLL0));
1885 	else
1886 		pll = intel_find_shared_dpll(state, crtc,
1887 					     &crtc_state->dpll_hw_state,
1888 					     BIT(DPLL_ID_SKL_DPLL3) |
1889 					     BIT(DPLL_ID_SKL_DPLL2) |
1890 					     BIT(DPLL_ID_SKL_DPLL1));
1891 	if (!pll)
1892 		return -EINVAL;
1893 
1894 	intel_reference_shared_dpll(state, crtc,
1895 				    pll, &crtc_state->dpll_hw_state);
1896 
1897 	crtc_state->shared_dpll = pll;
1898 
1899 	return 0;
1900 }
1901 
1902 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1903 				const struct intel_shared_dpll *pll,
1904 				const struct intel_dpll_hw_state *pll_state)
1905 {
1906 	/*
1907 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1908 	 * the internal shift for each field
1909 	 */
1910 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1911 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1912 	else
1913 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1914 }
1915 
1916 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1917 {
1918 	/* No SSC ref */
1919 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1920 }
1921 
1922 static void skl_dump_hw_state(struct drm_i915_private *i915,
1923 			      const struct intel_dpll_hw_state *hw_state)
1924 {
1925 	drm_dbg_kms(&i915->drm, "dpll_hw_state: "
1926 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1927 		      hw_state->ctrl1,
1928 		      hw_state->cfgcr1,
1929 		      hw_state->cfgcr2);
1930 }
1931 
1932 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1933 	.enable = skl_ddi_pll_enable,
1934 	.disable = skl_ddi_pll_disable,
1935 	.get_hw_state = skl_ddi_pll_get_hw_state,
1936 	.get_freq = skl_ddi_pll_get_freq,
1937 };
1938 
1939 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1940 	.enable = skl_ddi_dpll0_enable,
1941 	.disable = skl_ddi_dpll0_disable,
1942 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1943 	.get_freq = skl_ddi_pll_get_freq,
1944 };
1945 
1946 static const struct dpll_info skl_plls[] = {
1947 	{ .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
1948 	  .flags = INTEL_DPLL_ALWAYS_ON, },
1949 	{ .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
1950 	{ .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
1951 	{ .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
1952 	{}
1953 };
1954 
1955 static const struct intel_dpll_mgr skl_pll_mgr = {
1956 	.dpll_info = skl_plls,
1957 	.compute_dplls = skl_compute_dpll,
1958 	.get_dplls = skl_get_dpll,
1959 	.put_dplls = intel_put_dpll,
1960 	.update_ref_clks = skl_update_dpll_ref_clks,
1961 	.dump_hw_state = skl_dump_hw_state,
1962 };
1963 
1964 static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
1965 			       struct intel_shared_dpll *pll)
1966 {
1967 	u32 temp;
1968 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1969 	enum dpio_phy phy;
1970 	enum dpio_channel ch;
1971 
1972 	bxt_port_to_phy_channel(i915, port, &phy, &ch);
1973 
1974 	/* Non-SSC reference */
1975 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
1976 
1977 	if (IS_GEMINILAKE(i915)) {
1978 		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
1979 			     0, PORT_PLL_POWER_ENABLE);
1980 
1981 		if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
1982 				 PORT_PLL_POWER_STATE), 200))
1983 			drm_err(&i915->drm,
1984 				"Power state not set for PLL:%d\n", port);
1985 	}
1986 
1987 	/* Disable 10 bit clock */
1988 	intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
1989 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
1990 
1991 	/* Write P1 & P2 */
1992 	intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
1993 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
1994 
1995 	/* Write M2 integer */
1996 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
1997 		     PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
1998 
1999 	/* Write N */
2000 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
2001 		     PORT_PLL_N_MASK, pll->state.hw_state.pll1);
2002 
2003 	/* Write M2 fraction */
2004 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
2005 		     PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
2006 
2007 	/* Write M2 fraction enable */
2008 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
2009 		     PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
2010 
2011 	/* Write coeff */
2012 	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2013 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
2014 	temp &= ~PORT_PLL_INT_COEFF_MASK;
2015 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
2016 	temp |= pll->state.hw_state.pll6;
2017 	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
2018 
2019 	/* Write calibration val */
2020 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
2021 		     PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
2022 
2023 	intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2024 		     PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
2025 
2026 	temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2027 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2028 	temp &= ~PORT_PLL_DCO_AMP_MASK;
2029 	temp |= pll->state.hw_state.pll10;
2030 	intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2031 
2032 	/* Recalibrate with new settings */
2033 	temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2034 	temp |= PORT_PLL_RECALIBRATE;
2035 	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2036 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2037 	temp |= pll->state.hw_state.ebb4;
2038 	intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2039 
2040 	/* Enable PLL */
2041 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2042 	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2043 
2044 	if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2045 			200))
2046 		drm_err(&i915->drm, "PLL %d not locked\n", port);
2047 
2048 	if (IS_GEMINILAKE(i915)) {
2049 		temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN0(phy, ch));
2050 		temp |= DCC_DELAY_RANGE_2;
2051 		intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2052 	}
2053 
2054 	/*
2055 	 * While we write to the group register to program all lanes at once we
2056 	 * can read only lane registers and we pick lanes 0/1 for that.
2057 	 */
2058 	temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2059 	temp &= ~LANE_STAGGER_MASK;
2060 	temp &= ~LANESTAGGER_STRAP_OVRD;
2061 	temp |= pll->state.hw_state.pcsdw12;
2062 	intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2063 }
2064 
2065 static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2066 				struct intel_shared_dpll *pll)
2067 {
2068 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2069 
2070 	intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2071 	intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2072 
2073 	if (IS_GEMINILAKE(i915)) {
2074 		intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2075 			     PORT_PLL_POWER_ENABLE, 0);
2076 
2077 		if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2078 				  PORT_PLL_POWER_STATE), 200))
2079 			drm_err(&i915->drm,
2080 				"Power state not reset for PLL:%d\n", port);
2081 	}
2082 }
2083 
2084 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2085 				     struct intel_shared_dpll *pll,
2086 				     struct intel_dpll_hw_state *hw_state)
2087 {
2088 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2089 	intel_wakeref_t wakeref;
2090 	enum dpio_phy phy;
2091 	enum dpio_channel ch;
2092 	u32 val;
2093 	bool ret;
2094 
2095 	bxt_port_to_phy_channel(i915, port, &phy, &ch);
2096 
2097 	wakeref = intel_display_power_get_if_enabled(i915,
2098 						     POWER_DOMAIN_DISPLAY_CORE);
2099 	if (!wakeref)
2100 		return false;
2101 
2102 	ret = false;
2103 
2104 	val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2105 	if (!(val & PORT_PLL_ENABLE))
2106 		goto out;
2107 
2108 	hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2109 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2110 
2111 	hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2112 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2113 
2114 	hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2115 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2116 
2117 	hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2118 	hw_state->pll1 &= PORT_PLL_N_MASK;
2119 
2120 	hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2121 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2122 
2123 	hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2124 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2125 
2126 	hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2127 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2128 			  PORT_PLL_INT_COEFF_MASK |
2129 			  PORT_PLL_GAIN_CTL_MASK;
2130 
2131 	hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2132 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2133 
2134 	hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2135 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2136 
2137 	hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2138 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2139 			   PORT_PLL_DCO_AMP_MASK;
2140 
2141 	/*
2142 	 * While we write to the group register to program all lanes at once we
2143 	 * can read only lane registers. We configure all lanes the same way, so
2144 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2145 	 */
2146 	hw_state->pcsdw12 = intel_de_read(i915,
2147 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2148 	if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2149 		drm_dbg(&i915->drm,
2150 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2151 			hw_state->pcsdw12,
2152 			intel_de_read(i915,
2153 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2154 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2155 
2156 	ret = true;
2157 
2158 out:
2159 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2160 
2161 	return ret;
2162 }
2163 
2164 /* pre-calculated values for DP linkrates */
2165 static const struct dpll bxt_dp_clk_val[] = {
2166 	/* m2 is .22 binary fixed point */
2167 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2168 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2169 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2170 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2171 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2172 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2173 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2174 };
2175 
2176 static int
2177 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2178 			  struct dpll *clk_div)
2179 {
2180 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2181 
2182 	/* Calculate HDMI div */
2183 	/*
2184 	 * FIXME: tie the following calculation into
2185 	 * i9xx_crtc_compute_clock
2186 	 */
2187 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2188 		return -EINVAL;
2189 
2190 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2191 
2192 	return 0;
2193 }
2194 
2195 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2196 				    struct dpll *clk_div)
2197 {
2198 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2199 	int i;
2200 
2201 	*clk_div = bxt_dp_clk_val[0];
2202 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2203 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2204 			*clk_div = bxt_dp_clk_val[i];
2205 			break;
2206 		}
2207 	}
2208 
2209 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2210 
2211 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2212 		    clk_div->dot != crtc_state->port_clock);
2213 }
2214 
2215 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2216 				     const struct dpll *clk_div)
2217 {
2218 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2219 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2220 	int clock = crtc_state->port_clock;
2221 	int vco = clk_div->vco;
2222 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2223 	u32 lanestagger;
2224 
2225 	if (vco >= 6200000 && vco <= 6700000) {
2226 		prop_coef = 4;
2227 		int_coef = 9;
2228 		gain_ctl = 3;
2229 		targ_cnt = 8;
2230 	} else if ((vco > 5400000 && vco < 6200000) ||
2231 			(vco >= 4800000 && vco < 5400000)) {
2232 		prop_coef = 5;
2233 		int_coef = 11;
2234 		gain_ctl = 3;
2235 		targ_cnt = 9;
2236 	} else if (vco == 5400000) {
2237 		prop_coef = 3;
2238 		int_coef = 8;
2239 		gain_ctl = 1;
2240 		targ_cnt = 9;
2241 	} else {
2242 		drm_err(&i915->drm, "Invalid VCO\n");
2243 		return -EINVAL;
2244 	}
2245 
2246 	if (clock > 270000)
2247 		lanestagger = 0x18;
2248 	else if (clock > 135000)
2249 		lanestagger = 0x0d;
2250 	else if (clock > 67000)
2251 		lanestagger = 0x07;
2252 	else if (clock > 33000)
2253 		lanestagger = 0x04;
2254 	else
2255 		lanestagger = 0x02;
2256 
2257 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2258 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2259 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2260 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2261 
2262 	if (clk_div->m2 & 0x3fffff)
2263 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2264 
2265 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2266 		PORT_PLL_INT_COEFF(int_coef) |
2267 		PORT_PLL_GAIN_CTL(gain_ctl);
2268 
2269 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2270 
2271 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2272 
2273 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2274 		PORT_PLL_DCO_AMP_OVR_EN_H;
2275 
2276 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2277 
2278 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2279 
2280 	return 0;
2281 }
2282 
2283 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2284 				const struct intel_shared_dpll *pll,
2285 				const struct intel_dpll_hw_state *pll_state)
2286 {
2287 	struct dpll clock;
2288 
2289 	clock.m1 = 2;
2290 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2291 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2292 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2293 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2294 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2295 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2296 
2297 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2298 }
2299 
2300 static int
2301 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2302 {
2303 	struct dpll clk_div = {};
2304 
2305 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2306 
2307 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2308 }
2309 
2310 static int
2311 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2312 {
2313 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2314 	struct dpll clk_div = {};
2315 	int ret;
2316 
2317 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2318 
2319 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2320 	if (ret)
2321 		return ret;
2322 
2323 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2324 						      &crtc_state->dpll_hw_state);
2325 
2326 	return 0;
2327 }
2328 
2329 static int bxt_compute_dpll(struct intel_atomic_state *state,
2330 			    struct intel_crtc *crtc,
2331 			    struct intel_encoder *encoder)
2332 {
2333 	struct intel_crtc_state *crtc_state =
2334 		intel_atomic_get_new_crtc_state(state, crtc);
2335 
2336 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2337 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2338 	else if (intel_crtc_has_dp_encoder(crtc_state))
2339 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2340 	else
2341 		return -EINVAL;
2342 }
2343 
2344 static int bxt_get_dpll(struct intel_atomic_state *state,
2345 			struct intel_crtc *crtc,
2346 			struct intel_encoder *encoder)
2347 {
2348 	struct intel_crtc_state *crtc_state =
2349 		intel_atomic_get_new_crtc_state(state, crtc);
2350 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2351 	struct intel_shared_dpll *pll;
2352 	enum intel_dpll_id id;
2353 
2354 	/* 1:1 mapping between ports and PLLs */
2355 	id = (enum intel_dpll_id) encoder->port;
2356 	pll = intel_get_shared_dpll_by_id(i915, id);
2357 
2358 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2359 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2360 
2361 	intel_reference_shared_dpll(state, crtc,
2362 				    pll, &crtc_state->dpll_hw_state);
2363 
2364 	crtc_state->shared_dpll = pll;
2365 
2366 	return 0;
2367 }
2368 
2369 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2370 {
2371 	i915->display.dpll.ref_clks.ssc = 100000;
2372 	i915->display.dpll.ref_clks.nssc = 100000;
2373 	/* DSI non-SSC ref 19.2MHz */
2374 }
2375 
2376 static void bxt_dump_hw_state(struct drm_i915_private *i915,
2377 			      const struct intel_dpll_hw_state *hw_state)
2378 {
2379 	drm_dbg_kms(&i915->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2380 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2381 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2382 		    hw_state->ebb0,
2383 		    hw_state->ebb4,
2384 		    hw_state->pll0,
2385 		    hw_state->pll1,
2386 		    hw_state->pll2,
2387 		    hw_state->pll3,
2388 		    hw_state->pll6,
2389 		    hw_state->pll8,
2390 		    hw_state->pll9,
2391 		    hw_state->pll10,
2392 		    hw_state->pcsdw12);
2393 }
2394 
2395 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2396 	.enable = bxt_ddi_pll_enable,
2397 	.disable = bxt_ddi_pll_disable,
2398 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2399 	.get_freq = bxt_ddi_pll_get_freq,
2400 };
2401 
2402 static const struct dpll_info bxt_plls[] = {
2403 	{ .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2404 	{ .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2405 	{ .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2406 	{}
2407 };
2408 
2409 static const struct intel_dpll_mgr bxt_pll_mgr = {
2410 	.dpll_info = bxt_plls,
2411 	.compute_dplls = bxt_compute_dpll,
2412 	.get_dplls = bxt_get_dpll,
2413 	.put_dplls = intel_put_dpll,
2414 	.update_ref_clks = bxt_update_dpll_ref_clks,
2415 	.dump_hw_state = bxt_dump_hw_state,
2416 };
2417 
2418 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2419 				      int *qdiv, int *kdiv)
2420 {
2421 	/* even dividers */
2422 	if (bestdiv % 2 == 0) {
2423 		if (bestdiv == 2) {
2424 			*pdiv = 2;
2425 			*qdiv = 1;
2426 			*kdiv = 1;
2427 		} else if (bestdiv % 4 == 0) {
2428 			*pdiv = 2;
2429 			*qdiv = bestdiv / 4;
2430 			*kdiv = 2;
2431 		} else if (bestdiv % 6 == 0) {
2432 			*pdiv = 3;
2433 			*qdiv = bestdiv / 6;
2434 			*kdiv = 2;
2435 		} else if (bestdiv % 5 == 0) {
2436 			*pdiv = 5;
2437 			*qdiv = bestdiv / 10;
2438 			*kdiv = 2;
2439 		} else if (bestdiv % 14 == 0) {
2440 			*pdiv = 7;
2441 			*qdiv = bestdiv / 14;
2442 			*kdiv = 2;
2443 		}
2444 	} else {
2445 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2446 			*pdiv = bestdiv;
2447 			*qdiv = 1;
2448 			*kdiv = 1;
2449 		} else { /* 9, 15, 21 */
2450 			*pdiv = bestdiv / 3;
2451 			*qdiv = 1;
2452 			*kdiv = 3;
2453 		}
2454 	}
2455 }
2456 
2457 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2458 				      u32 dco_freq, u32 ref_freq,
2459 				      int pdiv, int qdiv, int kdiv)
2460 {
2461 	u32 dco;
2462 
2463 	switch (kdiv) {
2464 	case 1:
2465 		params->kdiv = 1;
2466 		break;
2467 	case 2:
2468 		params->kdiv = 2;
2469 		break;
2470 	case 3:
2471 		params->kdiv = 4;
2472 		break;
2473 	default:
2474 		WARN(1, "Incorrect KDiv\n");
2475 	}
2476 
2477 	switch (pdiv) {
2478 	case 2:
2479 		params->pdiv = 1;
2480 		break;
2481 	case 3:
2482 		params->pdiv = 2;
2483 		break;
2484 	case 5:
2485 		params->pdiv = 4;
2486 		break;
2487 	case 7:
2488 		params->pdiv = 8;
2489 		break;
2490 	default:
2491 		WARN(1, "Incorrect PDiv\n");
2492 	}
2493 
2494 	WARN_ON(kdiv != 2 && qdiv != 1);
2495 
2496 	params->qdiv_ratio = qdiv;
2497 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2498 
2499 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2500 
2501 	params->dco_integer = dco >> 15;
2502 	params->dco_fraction = dco & 0x7fff;
2503 }
2504 
2505 /*
2506  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2507  * Program half of the nominal DCO divider fraction value.
2508  */
2509 static bool
2510 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2511 {
2512 	return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
2513 		 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2514 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2515 		 i915->display.dpll.ref_clks.nssc == 38400;
2516 }
2517 
2518 struct icl_combo_pll_params {
2519 	int clock;
2520 	struct skl_wrpll_params wrpll;
2521 };
2522 
2523 /*
2524  * These values alrea already adjusted: they're the bits we write to the
2525  * registers, not the logical values.
2526  */
2527 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2528 	{ 540000,
2529 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2530 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2531 	{ 270000,
2532 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2533 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2534 	{ 162000,
2535 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2536 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2537 	{ 324000,
2538 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2539 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2540 	{ 216000,
2541 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2542 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2543 	{ 432000,
2544 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2545 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2546 	{ 648000,
2547 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2548 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2549 	{ 810000,
2550 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2551 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2552 };
2553 
2554 
2555 /* Also used for 38.4 MHz values. */
2556 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2557 	{ 540000,
2558 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2559 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2560 	{ 270000,
2561 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2562 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2563 	{ 162000,
2564 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2565 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2566 	{ 324000,
2567 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2568 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2569 	{ 216000,
2570 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2571 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2572 	{ 432000,
2573 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2574 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2575 	{ 648000,
2576 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2577 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2578 	{ 810000,
2579 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2580 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2581 };
2582 
2583 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2584 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2585 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2586 };
2587 
2588 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2589 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2590 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2591 };
2592 
2593 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2594 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2595 	/* the following params are unused */
2596 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2597 };
2598 
2599 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2600 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2601 	/* the following params are unused */
2602 };
2603 
2604 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2605 				 struct skl_wrpll_params *pll_params)
2606 {
2607 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2608 	const struct icl_combo_pll_params *params =
2609 		i915->display.dpll.ref_clks.nssc == 24000 ?
2610 		icl_dp_combo_pll_24MHz_values :
2611 		icl_dp_combo_pll_19_2MHz_values;
2612 	int clock = crtc_state->port_clock;
2613 	int i;
2614 
2615 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2616 		if (clock == params[i].clock) {
2617 			*pll_params = params[i].wrpll;
2618 			return 0;
2619 		}
2620 	}
2621 
2622 	MISSING_CASE(clock);
2623 	return -EINVAL;
2624 }
2625 
2626 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2627 			    struct skl_wrpll_params *pll_params)
2628 {
2629 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2630 
2631 	if (DISPLAY_VER(i915) >= 12) {
2632 		switch (i915->display.dpll.ref_clks.nssc) {
2633 		default:
2634 			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2635 			fallthrough;
2636 		case 19200:
2637 		case 38400:
2638 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2639 			break;
2640 		case 24000:
2641 			*pll_params = tgl_tbt_pll_24MHz_values;
2642 			break;
2643 		}
2644 	} else {
2645 		switch (i915->display.dpll.ref_clks.nssc) {
2646 		default:
2647 			MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2648 			fallthrough;
2649 		case 19200:
2650 		case 38400:
2651 			*pll_params = icl_tbt_pll_19_2MHz_values;
2652 			break;
2653 		case 24000:
2654 			*pll_params = icl_tbt_pll_24MHz_values;
2655 			break;
2656 		}
2657 	}
2658 
2659 	return 0;
2660 }
2661 
2662 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2663 				    const struct intel_shared_dpll *pll,
2664 				    const struct intel_dpll_hw_state *pll_state)
2665 {
2666 	/*
2667 	 * The PLL outputs multiple frequencies at the same time, selection is
2668 	 * made at DDI clock mux level.
2669 	 */
2670 	drm_WARN_ON(&i915->drm, 1);
2671 
2672 	return 0;
2673 }
2674 
2675 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2676 {
2677 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2678 
2679 	/*
2680 	 * For ICL+, the spec states: if reference frequency is 38.4,
2681 	 * use 19.2 because the DPLL automatically divides that by 2.
2682 	 */
2683 	if (ref_clock == 38400)
2684 		ref_clock = 19200;
2685 
2686 	return ref_clock;
2687 }
2688 
2689 static int
2690 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2691 	       struct skl_wrpll_params *wrpll_params)
2692 {
2693 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2694 	int ref_clock = icl_wrpll_ref_clock(i915);
2695 	u32 afe_clock = crtc_state->port_clock * 5;
2696 	u32 dco_min = 7998000;
2697 	u32 dco_max = 10000000;
2698 	u32 dco_mid = (dco_min + dco_max) / 2;
2699 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2700 					 18, 20, 24, 28, 30, 32,  36,  40,
2701 					 42, 44, 48, 50, 52, 54,  56,  60,
2702 					 64, 66, 68, 70, 72, 76,  78,  80,
2703 					 84, 88, 90, 92, 96, 98, 100, 102,
2704 					  3,  5,  7,  9, 15, 21 };
2705 	u32 dco, best_dco = 0, dco_centrality = 0;
2706 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2707 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2708 
2709 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2710 		dco = afe_clock * dividers[d];
2711 
2712 		if (dco <= dco_max && dco >= dco_min) {
2713 			dco_centrality = abs(dco - dco_mid);
2714 
2715 			if (dco_centrality < best_dco_centrality) {
2716 				best_dco_centrality = dco_centrality;
2717 				best_div = dividers[d];
2718 				best_dco = dco;
2719 			}
2720 		}
2721 	}
2722 
2723 	if (best_div == 0)
2724 		return -EINVAL;
2725 
2726 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2727 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2728 				  pdiv, qdiv, kdiv);
2729 
2730 	return 0;
2731 }
2732 
2733 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2734 				      const struct intel_shared_dpll *pll,
2735 				      const struct intel_dpll_hw_state *pll_state)
2736 {
2737 	int ref_clock = icl_wrpll_ref_clock(i915);
2738 	u32 dco_fraction;
2739 	u32 p0, p1, p2, dco_freq;
2740 
2741 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2742 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2743 
2744 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2745 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2746 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2747 	else
2748 		p1 = 1;
2749 
2750 	switch (p0) {
2751 	case DPLL_CFGCR1_PDIV_2:
2752 		p0 = 2;
2753 		break;
2754 	case DPLL_CFGCR1_PDIV_3:
2755 		p0 = 3;
2756 		break;
2757 	case DPLL_CFGCR1_PDIV_5:
2758 		p0 = 5;
2759 		break;
2760 	case DPLL_CFGCR1_PDIV_7:
2761 		p0 = 7;
2762 		break;
2763 	}
2764 
2765 	switch (p2) {
2766 	case DPLL_CFGCR1_KDIV_1:
2767 		p2 = 1;
2768 		break;
2769 	case DPLL_CFGCR1_KDIV_2:
2770 		p2 = 2;
2771 		break;
2772 	case DPLL_CFGCR1_KDIV_3:
2773 		p2 = 3;
2774 		break;
2775 	}
2776 
2777 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2778 		   ref_clock;
2779 
2780 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2781 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2782 
2783 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2784 		dco_fraction *= 2;
2785 
2786 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2787 
2788 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2789 		return 0;
2790 
2791 	return dco_freq / (p0 * p1 * p2 * 5);
2792 }
2793 
2794 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2795 				const struct skl_wrpll_params *pll_params,
2796 				struct intel_dpll_hw_state *pll_state)
2797 {
2798 	u32 dco_fraction = pll_params->dco_fraction;
2799 
2800 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2801 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2802 
2803 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2804 			    pll_params->dco_integer;
2805 
2806 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2807 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2808 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2809 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2810 
2811 	if (DISPLAY_VER(i915) >= 12)
2812 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2813 	else
2814 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2815 
2816 	if (i915->display.vbt.override_afc_startup)
2817 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2818 }
2819 
2820 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2821 				    u32 *target_dco_khz,
2822 				    struct intel_dpll_hw_state *state,
2823 				    bool is_dkl)
2824 {
2825 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2826 	u32 dco_min_freq, dco_max_freq;
2827 	unsigned int i;
2828 	int div2;
2829 
2830 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2831 	dco_max_freq = is_dp ? 8100000 : 10000000;
2832 
2833 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2834 		int div1 = div1_vals[i];
2835 
2836 		for (div2 = 10; div2 > 0; div2--) {
2837 			int dco = div1 * div2 * clock_khz * 5;
2838 			int a_divratio, tlinedrv, inputsel;
2839 			u32 hsdiv;
2840 
2841 			if (dco < dco_min_freq || dco > dco_max_freq)
2842 				continue;
2843 
2844 			if (div2 >= 2) {
2845 				/*
2846 				 * Note: a_divratio not matching TGL BSpec
2847 				 * algorithm but matching hardcoded values and
2848 				 * working on HW for DP alt-mode at least
2849 				 */
2850 				a_divratio = is_dp ? 10 : 5;
2851 				tlinedrv = is_dkl ? 1 : 2;
2852 			} else {
2853 				a_divratio = 5;
2854 				tlinedrv = 0;
2855 			}
2856 			inputsel = is_dp ? 0 : 1;
2857 
2858 			switch (div1) {
2859 			default:
2860 				MISSING_CASE(div1);
2861 				fallthrough;
2862 			case 2:
2863 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2864 				break;
2865 			case 3:
2866 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2867 				break;
2868 			case 5:
2869 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2870 				break;
2871 			case 7:
2872 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2873 				break;
2874 			}
2875 
2876 			*target_dco_khz = dco;
2877 
2878 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2879 
2880 			state->mg_clktop2_coreclkctl1 =
2881 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2882 
2883 			state->mg_clktop2_hsclkctl =
2884 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2885 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2886 				hsdiv |
2887 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2888 
2889 			return 0;
2890 		}
2891 	}
2892 
2893 	return -EINVAL;
2894 }
2895 
2896 /*
2897  * The specification for this function uses real numbers, so the math had to be
2898  * adapted to integer-only calculation, that's why it looks so different.
2899  */
2900 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2901 				 struct intel_dpll_hw_state *pll_state)
2902 {
2903 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2904 	int refclk_khz = i915->display.dpll.ref_clks.nssc;
2905 	int clock = crtc_state->port_clock;
2906 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2907 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2908 	u32 prop_coeff, int_coeff;
2909 	u32 tdc_targetcnt, feedfwgain;
2910 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2911 	u64 tmp;
2912 	bool use_ssc = false;
2913 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2914 	bool is_dkl = DISPLAY_VER(i915) >= 12;
2915 	int ret;
2916 
2917 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2918 				       pll_state, is_dkl);
2919 	if (ret)
2920 		return ret;
2921 
2922 	m1div = 2;
2923 	m2div_int = dco_khz / (refclk_khz * m1div);
2924 	if (m2div_int > 255) {
2925 		if (!is_dkl) {
2926 			m1div = 4;
2927 			m2div_int = dco_khz / (refclk_khz * m1div);
2928 		}
2929 
2930 		if (m2div_int > 255)
2931 			return -EINVAL;
2932 	}
2933 	m2div_rem = dco_khz % (refclk_khz * m1div);
2934 
2935 	tmp = (u64)m2div_rem * (1 << 22);
2936 	do_div(tmp, refclk_khz * m1div);
2937 	m2div_frac = tmp;
2938 
2939 	switch (refclk_khz) {
2940 	case 19200:
2941 		iref_ndiv = 1;
2942 		iref_trim = 28;
2943 		iref_pulse_w = 1;
2944 		break;
2945 	case 24000:
2946 		iref_ndiv = 1;
2947 		iref_trim = 25;
2948 		iref_pulse_w = 2;
2949 		break;
2950 	case 38400:
2951 		iref_ndiv = 2;
2952 		iref_trim = 28;
2953 		iref_pulse_w = 1;
2954 		break;
2955 	default:
2956 		MISSING_CASE(refclk_khz);
2957 		return -EINVAL;
2958 	}
2959 
2960 	/*
2961 	 * tdc_res = 0.000003
2962 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2963 	 *
2964 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2965 	 * was supposed to be a division, but we rearranged the operations of
2966 	 * the formula to avoid early divisions so we don't multiply the
2967 	 * rounding errors.
2968 	 *
2969 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2970 	 * we also rearrange to work with integers.
2971 	 *
2972 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2973 	 * last division by 10.
2974 	 */
2975 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2976 
2977 	/*
2978 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2979 	 * 32 bits. That's not a problem since we round the division down
2980 	 * anyway.
2981 	 */
2982 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2983 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2984 
2985 	if (dco_khz >= 9000000) {
2986 		prop_coeff = 5;
2987 		int_coeff = 10;
2988 	} else {
2989 		prop_coeff = 4;
2990 		int_coeff = 8;
2991 	}
2992 
2993 	if (use_ssc) {
2994 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2995 		do_div(tmp, refclk_khz * m1div * 10000);
2996 		ssc_stepsize = tmp;
2997 
2998 		tmp = mul_u32_u32(dco_khz, 1000);
2999 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3000 	} else {
3001 		ssc_stepsize = 0;
3002 		ssc_steplen = 0;
3003 	}
3004 	ssc_steplog = 4;
3005 
3006 	/* write pll_state calculations */
3007 	if (is_dkl) {
3008 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3009 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3010 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3011 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3012 		if (i915->display.vbt.override_afc_startup) {
3013 			u8 val = i915->display.vbt.override_afc_startup_val;
3014 
3015 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3016 		}
3017 
3018 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3019 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3020 
3021 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3022 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3023 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3024 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3025 
3026 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3027 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3028 
3029 		pll_state->mg_pll_tdc_coldst_bias =
3030 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3031 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3032 
3033 	} else {
3034 		pll_state->mg_pll_div0 =
3035 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3036 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3037 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3038 
3039 		pll_state->mg_pll_div1 =
3040 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3041 			MG_PLL_DIV1_DITHER_DIV_2 |
3042 			MG_PLL_DIV1_NDIVRATIO(1) |
3043 			MG_PLL_DIV1_FBPREDIV(m1div);
3044 
3045 		pll_state->mg_pll_lf =
3046 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3047 			MG_PLL_LF_AFCCNTSEL_512 |
3048 			MG_PLL_LF_GAINCTRL(1) |
3049 			MG_PLL_LF_INT_COEFF(int_coeff) |
3050 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3051 
3052 		pll_state->mg_pll_frac_lock =
3053 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3054 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3055 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3056 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3057 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3058 		if (use_ssc || m2div_rem > 0)
3059 			pll_state->mg_pll_frac_lock |=
3060 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3061 
3062 		pll_state->mg_pll_ssc =
3063 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3064 			MG_PLL_SSC_TYPE(2) |
3065 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3066 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3067 			MG_PLL_SSC_FLLEN |
3068 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3069 
3070 		pll_state->mg_pll_tdc_coldst_bias =
3071 			MG_PLL_TDC_COLDST_COLDSTART |
3072 			MG_PLL_TDC_COLDST_IREFINT_EN |
3073 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3074 			MG_PLL_TDC_TDCOVCCORR_EN |
3075 			MG_PLL_TDC_TDCSEL(3);
3076 
3077 		pll_state->mg_pll_bias =
3078 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3079 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3080 			MG_PLL_BIAS_BIAS_BONUS(10) |
3081 			MG_PLL_BIAS_BIASCAL_EN |
3082 			MG_PLL_BIAS_CTRIM(12) |
3083 			MG_PLL_BIAS_VREF_RDAC(4) |
3084 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3085 
3086 		if (refclk_khz == 38400) {
3087 			pll_state->mg_pll_tdc_coldst_bias_mask =
3088 				MG_PLL_TDC_COLDST_COLDSTART;
3089 			pll_state->mg_pll_bias_mask = 0;
3090 		} else {
3091 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3092 			pll_state->mg_pll_bias_mask = -1U;
3093 		}
3094 
3095 		pll_state->mg_pll_tdc_coldst_bias &=
3096 			pll_state->mg_pll_tdc_coldst_bias_mask;
3097 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3098 	}
3099 
3100 	return 0;
3101 }
3102 
3103 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3104 				   const struct intel_shared_dpll *pll,
3105 				   const struct intel_dpll_hw_state *pll_state)
3106 {
3107 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3108 	u64 tmp;
3109 
3110 	ref_clock = i915->display.dpll.ref_clks.nssc;
3111 
3112 	if (DISPLAY_VER(i915) >= 12) {
3113 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3114 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3115 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3116 
3117 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3118 			m2_frac = pll_state->mg_pll_bias &
3119 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3120 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3121 		} else {
3122 			m2_frac = 0;
3123 		}
3124 	} else {
3125 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3126 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3127 
3128 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3129 			m2_frac = pll_state->mg_pll_div0 &
3130 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3131 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3132 		} else {
3133 			m2_frac = 0;
3134 		}
3135 	}
3136 
3137 	switch (pll_state->mg_clktop2_hsclkctl &
3138 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3139 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3140 		div1 = 2;
3141 		break;
3142 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3143 		div1 = 3;
3144 		break;
3145 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3146 		div1 = 5;
3147 		break;
3148 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3149 		div1 = 7;
3150 		break;
3151 	default:
3152 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3153 		return 0;
3154 	}
3155 
3156 	div2 = (pll_state->mg_clktop2_hsclkctl &
3157 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3158 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3159 
3160 	/* div2 value of 0 is same as 1 means no div */
3161 	if (div2 == 0)
3162 		div2 = 1;
3163 
3164 	/*
3165 	 * Adjust the original formula to delay the division by 2^22 in order to
3166 	 * minimize possible rounding errors.
3167 	 */
3168 	tmp = (u64)m1 * m2_int * ref_clock +
3169 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3170 	tmp = div_u64(tmp, 5 * div1 * div2);
3171 
3172 	return tmp;
3173 }
3174 
3175 /**
3176  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3177  * @crtc_state: state for the CRTC to select the DPLL for
3178  * @port_dpll_id: the active @port_dpll_id to select
3179  *
3180  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3181  * CRTC.
3182  */
3183 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3184 			      enum icl_port_dpll_id port_dpll_id)
3185 {
3186 	struct icl_port_dpll *port_dpll =
3187 		&crtc_state->icl_port_dplls[port_dpll_id];
3188 
3189 	crtc_state->shared_dpll = port_dpll->pll;
3190 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3191 }
3192 
3193 static void icl_update_active_dpll(struct intel_atomic_state *state,
3194 				   struct intel_crtc *crtc,
3195 				   struct intel_encoder *encoder)
3196 {
3197 	struct intel_crtc_state *crtc_state =
3198 		intel_atomic_get_new_crtc_state(state, crtc);
3199 	struct intel_digital_port *primary_port;
3200 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3201 
3202 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3203 		enc_to_mst(encoder)->primary :
3204 		enc_to_dig_port(encoder);
3205 
3206 	if (primary_port &&
3207 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3208 	     intel_tc_port_in_legacy_mode(primary_port)))
3209 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3210 
3211 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3212 }
3213 
3214 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3215 				      struct intel_crtc *crtc)
3216 {
3217 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3218 	struct intel_crtc_state *crtc_state =
3219 		intel_atomic_get_new_crtc_state(state, crtc);
3220 	struct icl_port_dpll *port_dpll =
3221 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3222 	struct skl_wrpll_params pll_params = {};
3223 	int ret;
3224 
3225 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3226 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3227 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3228 	else
3229 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3230 
3231 	if (ret)
3232 		return ret;
3233 
3234 	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3235 
3236 	/* this is mainly for the fastset check */
3237 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3238 
3239 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3240 							    &port_dpll->hw_state);
3241 
3242 	return 0;
3243 }
3244 
3245 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3246 				  struct intel_crtc *crtc,
3247 				  struct intel_encoder *encoder)
3248 {
3249 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3250 	struct intel_crtc_state *crtc_state =
3251 		intel_atomic_get_new_crtc_state(state, crtc);
3252 	struct icl_port_dpll *port_dpll =
3253 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3254 	enum port port = encoder->port;
3255 	unsigned long dpll_mask;
3256 
3257 	if (IS_ALDERLAKE_S(i915)) {
3258 		dpll_mask =
3259 			BIT(DPLL_ID_DG1_DPLL3) |
3260 			BIT(DPLL_ID_DG1_DPLL2) |
3261 			BIT(DPLL_ID_ICL_DPLL1) |
3262 			BIT(DPLL_ID_ICL_DPLL0);
3263 	} else if (IS_DG1(i915)) {
3264 		if (port == PORT_D || port == PORT_E) {
3265 			dpll_mask =
3266 				BIT(DPLL_ID_DG1_DPLL2) |
3267 				BIT(DPLL_ID_DG1_DPLL3);
3268 		} else {
3269 			dpll_mask =
3270 				BIT(DPLL_ID_DG1_DPLL0) |
3271 				BIT(DPLL_ID_DG1_DPLL1);
3272 		}
3273 	} else if (IS_ROCKETLAKE(i915)) {
3274 		dpll_mask =
3275 			BIT(DPLL_ID_EHL_DPLL4) |
3276 			BIT(DPLL_ID_ICL_DPLL1) |
3277 			BIT(DPLL_ID_ICL_DPLL0);
3278 	} else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3279 		   port != PORT_A) {
3280 		dpll_mask =
3281 			BIT(DPLL_ID_EHL_DPLL4) |
3282 			BIT(DPLL_ID_ICL_DPLL1) |
3283 			BIT(DPLL_ID_ICL_DPLL0);
3284 	} else {
3285 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3286 	}
3287 
3288 	/* Eliminate DPLLs from consideration if reserved by HTI */
3289 	dpll_mask &= ~intel_hti_dpll_mask(i915);
3290 
3291 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3292 						&port_dpll->hw_state,
3293 						dpll_mask);
3294 	if (!port_dpll->pll)
3295 		return -EINVAL;
3296 
3297 	intel_reference_shared_dpll(state, crtc,
3298 				    port_dpll->pll, &port_dpll->hw_state);
3299 
3300 	icl_update_active_dpll(state, crtc, encoder);
3301 
3302 	return 0;
3303 }
3304 
3305 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3306 				    struct intel_crtc *crtc)
3307 {
3308 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3309 	struct intel_crtc_state *crtc_state =
3310 		intel_atomic_get_new_crtc_state(state, crtc);
3311 	struct icl_port_dpll *port_dpll =
3312 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3313 	struct skl_wrpll_params pll_params = {};
3314 	int ret;
3315 
3316 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3317 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3318 	if (ret)
3319 		return ret;
3320 
3321 	icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3322 
3323 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3324 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3325 	if (ret)
3326 		return ret;
3327 
3328 	/* this is mainly for the fastset check */
3329 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3330 
3331 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3332 							 &port_dpll->hw_state);
3333 
3334 	return 0;
3335 }
3336 
3337 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3338 				struct intel_crtc *crtc,
3339 				struct intel_encoder *encoder)
3340 {
3341 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3342 	struct intel_crtc_state *crtc_state =
3343 		intel_atomic_get_new_crtc_state(state, crtc);
3344 	struct icl_port_dpll *port_dpll =
3345 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3346 	enum intel_dpll_id dpll_id;
3347 	int ret;
3348 
3349 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3350 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3351 						&port_dpll->hw_state,
3352 						BIT(DPLL_ID_ICL_TBTPLL));
3353 	if (!port_dpll->pll)
3354 		return -EINVAL;
3355 	intel_reference_shared_dpll(state, crtc,
3356 				    port_dpll->pll, &port_dpll->hw_state);
3357 
3358 
3359 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3360 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(i915,
3361 							 encoder->port));
3362 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3363 						&port_dpll->hw_state,
3364 						BIT(dpll_id));
3365 	if (!port_dpll->pll) {
3366 		ret = -EINVAL;
3367 		goto err_unreference_tbt_pll;
3368 	}
3369 	intel_reference_shared_dpll(state, crtc,
3370 				    port_dpll->pll, &port_dpll->hw_state);
3371 
3372 	icl_update_active_dpll(state, crtc, encoder);
3373 
3374 	return 0;
3375 
3376 err_unreference_tbt_pll:
3377 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3378 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3379 
3380 	return ret;
3381 }
3382 
3383 static int icl_compute_dplls(struct intel_atomic_state *state,
3384 			     struct intel_crtc *crtc,
3385 			     struct intel_encoder *encoder)
3386 {
3387 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3388 	enum phy phy = intel_port_to_phy(i915, encoder->port);
3389 
3390 	if (intel_phy_is_combo(i915, phy))
3391 		return icl_compute_combo_phy_dpll(state, crtc);
3392 	else if (intel_phy_is_tc(i915, phy))
3393 		return icl_compute_tc_phy_dplls(state, crtc);
3394 
3395 	MISSING_CASE(phy);
3396 
3397 	return 0;
3398 }
3399 
3400 static int icl_get_dplls(struct intel_atomic_state *state,
3401 			 struct intel_crtc *crtc,
3402 			 struct intel_encoder *encoder)
3403 {
3404 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3405 	enum phy phy = intel_port_to_phy(i915, encoder->port);
3406 
3407 	if (intel_phy_is_combo(i915, phy))
3408 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3409 	else if (intel_phy_is_tc(i915, phy))
3410 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3411 
3412 	MISSING_CASE(phy);
3413 
3414 	return -EINVAL;
3415 }
3416 
3417 static void icl_put_dplls(struct intel_atomic_state *state,
3418 			  struct intel_crtc *crtc)
3419 {
3420 	const struct intel_crtc_state *old_crtc_state =
3421 		intel_atomic_get_old_crtc_state(state, crtc);
3422 	struct intel_crtc_state *new_crtc_state =
3423 		intel_atomic_get_new_crtc_state(state, crtc);
3424 	enum icl_port_dpll_id id;
3425 
3426 	new_crtc_state->shared_dpll = NULL;
3427 
3428 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3429 		const struct icl_port_dpll *old_port_dpll =
3430 			&old_crtc_state->icl_port_dplls[id];
3431 		struct icl_port_dpll *new_port_dpll =
3432 			&new_crtc_state->icl_port_dplls[id];
3433 
3434 		new_port_dpll->pll = NULL;
3435 
3436 		if (!old_port_dpll->pll)
3437 			continue;
3438 
3439 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3440 	}
3441 }
3442 
3443 static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3444 				struct intel_shared_dpll *pll,
3445 				struct intel_dpll_hw_state *hw_state)
3446 {
3447 	const enum intel_dpll_id id = pll->info->id;
3448 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3449 	intel_wakeref_t wakeref;
3450 	bool ret = false;
3451 	u32 val;
3452 
3453 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3454 
3455 	wakeref = intel_display_power_get_if_enabled(i915,
3456 						     POWER_DOMAIN_DISPLAY_CORE);
3457 	if (!wakeref)
3458 		return false;
3459 
3460 	val = intel_de_read(i915, enable_reg);
3461 	if (!(val & PLL_ENABLE))
3462 		goto out;
3463 
3464 	hw_state->mg_refclkin_ctl = intel_de_read(i915,
3465 						  MG_REFCLKIN_CTL(tc_port));
3466 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3467 
3468 	hw_state->mg_clktop2_coreclkctl1 =
3469 		intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3470 	hw_state->mg_clktop2_coreclkctl1 &=
3471 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3472 
3473 	hw_state->mg_clktop2_hsclkctl =
3474 		intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3475 	hw_state->mg_clktop2_hsclkctl &=
3476 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3477 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3478 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3479 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3480 
3481 	hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3482 	hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3483 	hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3484 	hw_state->mg_pll_frac_lock = intel_de_read(i915,
3485 						   MG_PLL_FRAC_LOCK(tc_port));
3486 	hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3487 
3488 	hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3489 	hw_state->mg_pll_tdc_coldst_bias =
3490 		intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3491 
3492 	if (i915->display.dpll.ref_clks.nssc == 38400) {
3493 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3494 		hw_state->mg_pll_bias_mask = 0;
3495 	} else {
3496 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3497 		hw_state->mg_pll_bias_mask = -1U;
3498 	}
3499 
3500 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3501 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3502 
3503 	ret = true;
3504 out:
3505 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3506 	return ret;
3507 }
3508 
3509 static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3510 				 struct intel_shared_dpll *pll,
3511 				 struct intel_dpll_hw_state *hw_state)
3512 {
3513 	const enum intel_dpll_id id = pll->info->id;
3514 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3515 	intel_wakeref_t wakeref;
3516 	bool ret = false;
3517 	u32 val;
3518 
3519 	wakeref = intel_display_power_get_if_enabled(i915,
3520 						     POWER_DOMAIN_DISPLAY_CORE);
3521 	if (!wakeref)
3522 		return false;
3523 
3524 	val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3525 	if (!(val & PLL_ENABLE))
3526 		goto out;
3527 
3528 	/*
3529 	 * All registers read here have the same HIP_INDEX_REG even though
3530 	 * they are on different building blocks
3531 	 */
3532 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3533 						       DKL_REFCLKIN_CTL(tc_port));
3534 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3535 
3536 	hw_state->mg_clktop2_hsclkctl =
3537 		intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3538 	hw_state->mg_clktop2_hsclkctl &=
3539 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3540 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3541 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3542 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3543 
3544 	hw_state->mg_clktop2_coreclkctl1 =
3545 		intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3546 	hw_state->mg_clktop2_coreclkctl1 &=
3547 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3548 
3549 	hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3550 	val = DKL_PLL_DIV0_MASK;
3551 	if (i915->display.vbt.override_afc_startup)
3552 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3553 	hw_state->mg_pll_div0 &= val;
3554 
3555 	hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3556 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3557 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3558 
3559 	hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3560 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3561 				 DKL_PLL_SSC_STEP_LEN_MASK |
3562 				 DKL_PLL_SSC_STEP_NUM_MASK |
3563 				 DKL_PLL_SSC_EN);
3564 
3565 	hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3566 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3567 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3568 
3569 	hw_state->mg_pll_tdc_coldst_bias =
3570 		intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3571 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3572 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3573 
3574 	ret = true;
3575 out:
3576 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3577 	return ret;
3578 }
3579 
3580 static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3581 				 struct intel_shared_dpll *pll,
3582 				 struct intel_dpll_hw_state *hw_state,
3583 				 i915_reg_t enable_reg)
3584 {
3585 	const enum intel_dpll_id id = pll->info->id;
3586 	intel_wakeref_t wakeref;
3587 	bool ret = false;
3588 	u32 val;
3589 
3590 	wakeref = intel_display_power_get_if_enabled(i915,
3591 						     POWER_DOMAIN_DISPLAY_CORE);
3592 	if (!wakeref)
3593 		return false;
3594 
3595 	val = intel_de_read(i915, enable_reg);
3596 	if (!(val & PLL_ENABLE))
3597 		goto out;
3598 
3599 	if (IS_ALDERLAKE_S(i915)) {
3600 		hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3601 		hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3602 	} else if (IS_DG1(i915)) {
3603 		hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3604 		hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3605 	} else if (IS_ROCKETLAKE(i915)) {
3606 		hw_state->cfgcr0 = intel_de_read(i915,
3607 						 RKL_DPLL_CFGCR0(id));
3608 		hw_state->cfgcr1 = intel_de_read(i915,
3609 						 RKL_DPLL_CFGCR1(id));
3610 	} else if (DISPLAY_VER(i915) >= 12) {
3611 		hw_state->cfgcr0 = intel_de_read(i915,
3612 						 TGL_DPLL_CFGCR0(id));
3613 		hw_state->cfgcr1 = intel_de_read(i915,
3614 						 TGL_DPLL_CFGCR1(id));
3615 		if (i915->display.vbt.override_afc_startup) {
3616 			hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3617 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3618 		}
3619 	} else {
3620 		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3621 		    id == DPLL_ID_EHL_DPLL4) {
3622 			hw_state->cfgcr0 = intel_de_read(i915,
3623 							 ICL_DPLL_CFGCR0(4));
3624 			hw_state->cfgcr1 = intel_de_read(i915,
3625 							 ICL_DPLL_CFGCR1(4));
3626 		} else {
3627 			hw_state->cfgcr0 = intel_de_read(i915,
3628 							 ICL_DPLL_CFGCR0(id));
3629 			hw_state->cfgcr1 = intel_de_read(i915,
3630 							 ICL_DPLL_CFGCR1(id));
3631 		}
3632 	}
3633 
3634 	ret = true;
3635 out:
3636 	intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3637 	return ret;
3638 }
3639 
3640 static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3641 				   struct intel_shared_dpll *pll,
3642 				   struct intel_dpll_hw_state *hw_state)
3643 {
3644 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3645 
3646 	return icl_pll_get_hw_state(i915, pll, hw_state, enable_reg);
3647 }
3648 
3649 static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3650 				 struct intel_shared_dpll *pll,
3651 				 struct intel_dpll_hw_state *hw_state)
3652 {
3653 	return icl_pll_get_hw_state(i915, pll, hw_state, TBT_PLL_ENABLE);
3654 }
3655 
3656 static void icl_dpll_write(struct drm_i915_private *i915,
3657 			   struct intel_shared_dpll *pll)
3658 {
3659 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3660 	const enum intel_dpll_id id = pll->info->id;
3661 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3662 
3663 	if (IS_ALDERLAKE_S(i915)) {
3664 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3665 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3666 	} else if (IS_DG1(i915)) {
3667 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3668 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3669 	} else if (IS_ROCKETLAKE(i915)) {
3670 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3671 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3672 	} else if (DISPLAY_VER(i915) >= 12) {
3673 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3674 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3675 		div0_reg = TGL_DPLL0_DIV0(id);
3676 	} else {
3677 		if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3678 		    id == DPLL_ID_EHL_DPLL4) {
3679 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3680 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3681 		} else {
3682 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3683 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3684 		}
3685 	}
3686 
3687 	intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3688 	intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3689 	drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3690 			 !i915_mmio_reg_valid(div0_reg));
3691 	if (i915->display.vbt.override_afc_startup &&
3692 	    i915_mmio_reg_valid(div0_reg))
3693 		intel_de_rmw(i915, div0_reg,
3694 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3695 	intel_de_posting_read(i915, cfgcr1_reg);
3696 }
3697 
3698 static void icl_mg_pll_write(struct drm_i915_private *i915,
3699 			     struct intel_shared_dpll *pll)
3700 {
3701 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3702 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3703 
3704 	/*
3705 	 * Some of the following registers have reserved fields, so program
3706 	 * these with RMW based on a mask. The mask can be fixed or generated
3707 	 * during the calc/readout phase if the mask depends on some other HW
3708 	 * state like refclk, see icl_calc_mg_pll_state().
3709 	 */
3710 	intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3711 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3712 
3713 	intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3714 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3715 		     hw_state->mg_clktop2_coreclkctl1);
3716 
3717 	intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3718 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3719 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3720 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3721 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3722 		     hw_state->mg_clktop2_hsclkctl);
3723 
3724 	intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3725 	intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3726 	intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3727 	intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
3728 		       hw_state->mg_pll_frac_lock);
3729 	intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3730 
3731 	intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3732 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3733 
3734 	intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3735 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3736 		     hw_state->mg_pll_tdc_coldst_bias);
3737 
3738 	intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3739 }
3740 
3741 static void dkl_pll_write(struct drm_i915_private *i915,
3742 			  struct intel_shared_dpll *pll)
3743 {
3744 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3745 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3746 	u32 val;
3747 
3748 	/*
3749 	 * All registers programmed here have the same HIP_INDEX_REG even
3750 	 * though on different building block
3751 	 */
3752 	/* All the registers are RMW */
3753 	val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3754 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3755 	val |= hw_state->mg_refclkin_ctl;
3756 	intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3757 
3758 	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3759 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3760 	val |= hw_state->mg_clktop2_coreclkctl1;
3761 	intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3762 
3763 	val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3764 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3765 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3766 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3767 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3768 	val |= hw_state->mg_clktop2_hsclkctl;
3769 	intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3770 
3771 	val = DKL_PLL_DIV0_MASK;
3772 	if (i915->display.vbt.override_afc_startup)
3773 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3774 	intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3775 			  hw_state->mg_pll_div0);
3776 
3777 	val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3778 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3779 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3780 	val |= hw_state->mg_pll_div1;
3781 	intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3782 
3783 	val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3784 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3785 		 DKL_PLL_SSC_STEP_LEN_MASK |
3786 		 DKL_PLL_SSC_STEP_NUM_MASK |
3787 		 DKL_PLL_SSC_EN);
3788 	val |= hw_state->mg_pll_ssc;
3789 	intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3790 
3791 	val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3792 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3793 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3794 	val |= hw_state->mg_pll_bias;
3795 	intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3796 
3797 	val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3798 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3799 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3800 	val |= hw_state->mg_pll_tdc_coldst_bias;
3801 	intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3802 
3803 	intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3804 }
3805 
3806 static void icl_pll_power_enable(struct drm_i915_private *i915,
3807 				 struct intel_shared_dpll *pll,
3808 				 i915_reg_t enable_reg)
3809 {
3810 	intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
3811 
3812 	/*
3813 	 * The spec says we need to "wait" but it also says it should be
3814 	 * immediate.
3815 	 */
3816 	if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3817 		drm_err(&i915->drm, "PLL %d Power not enabled\n",
3818 			pll->info->id);
3819 }
3820 
3821 static void icl_pll_enable(struct drm_i915_private *i915,
3822 			   struct intel_shared_dpll *pll,
3823 			   i915_reg_t enable_reg)
3824 {
3825 	intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
3826 
3827 	/* Timeout is actually 600us. */
3828 	if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3829 		drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3830 }
3831 
3832 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3833 {
3834 	u32 val;
3835 
3836 	if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3837 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3838 		return;
3839 	/*
3840 	 * Wa_16011069516:adl-p[a0]
3841 	 *
3842 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3843 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3844 	 * sanity check this assumption with a double read, which presumably
3845 	 * returns the correct value even with clock gating on.
3846 	 *
3847 	 * Instead of the usual place for workarounds we apply this one here,
3848 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3849 	 */
3850 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3851 	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3852 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3853 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3854 }
3855 
3856 static void combo_pll_enable(struct drm_i915_private *i915,
3857 			     struct intel_shared_dpll *pll)
3858 {
3859 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3860 
3861 	icl_pll_power_enable(i915, pll, enable_reg);
3862 
3863 	icl_dpll_write(i915, pll);
3864 
3865 	/*
3866 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3867 	 * paths should already be setting the appropriate voltage, hence we do
3868 	 * nothing here.
3869 	 */
3870 
3871 	icl_pll_enable(i915, pll, enable_reg);
3872 
3873 	adlp_cmtg_clock_gating_wa(i915, pll);
3874 
3875 	/* DVFS post sequence would be here. See the comment above. */
3876 }
3877 
3878 static void tbt_pll_enable(struct drm_i915_private *i915,
3879 			   struct intel_shared_dpll *pll)
3880 {
3881 	icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3882 
3883 	icl_dpll_write(i915, pll);
3884 
3885 	/*
3886 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3887 	 * paths should already be setting the appropriate voltage, hence we do
3888 	 * nothing here.
3889 	 */
3890 
3891 	icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3892 
3893 	/* DVFS post sequence would be here. See the comment above. */
3894 }
3895 
3896 static void mg_pll_enable(struct drm_i915_private *i915,
3897 			  struct intel_shared_dpll *pll)
3898 {
3899 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3900 
3901 	icl_pll_power_enable(i915, pll, enable_reg);
3902 
3903 	if (DISPLAY_VER(i915) >= 12)
3904 		dkl_pll_write(i915, pll);
3905 	else
3906 		icl_mg_pll_write(i915, pll);
3907 
3908 	/*
3909 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3910 	 * paths should already be setting the appropriate voltage, hence we do
3911 	 * nothing here.
3912 	 */
3913 
3914 	icl_pll_enable(i915, pll, enable_reg);
3915 
3916 	/* DVFS post sequence would be here. See the comment above. */
3917 }
3918 
3919 static void icl_pll_disable(struct drm_i915_private *i915,
3920 			    struct intel_shared_dpll *pll,
3921 			    i915_reg_t enable_reg)
3922 {
3923 	/* The first steps are done by intel_ddi_post_disable(). */
3924 
3925 	/*
3926 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3927 	 * paths should already be setting the appropriate voltage, hence we do
3928 	 * nothing here.
3929 	 */
3930 
3931 	intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
3932 
3933 	/* Timeout is actually 1us. */
3934 	if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
3935 		drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
3936 
3937 	/* DVFS post sequence would be here. See the comment above. */
3938 
3939 	intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
3940 
3941 	/*
3942 	 * The spec says we need to "wait" but it also says it should be
3943 	 * immediate.
3944 	 */
3945 	if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
3946 		drm_err(&i915->drm, "PLL %d Power not disabled\n",
3947 			pll->info->id);
3948 }
3949 
3950 static void combo_pll_disable(struct drm_i915_private *i915,
3951 			      struct intel_shared_dpll *pll)
3952 {
3953 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3954 
3955 	icl_pll_disable(i915, pll, enable_reg);
3956 }
3957 
3958 static void tbt_pll_disable(struct drm_i915_private *i915,
3959 			    struct intel_shared_dpll *pll)
3960 {
3961 	icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
3962 }
3963 
3964 static void mg_pll_disable(struct drm_i915_private *i915,
3965 			   struct intel_shared_dpll *pll)
3966 {
3967 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3968 
3969 	icl_pll_disable(i915, pll, enable_reg);
3970 }
3971 
3972 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3973 {
3974 	/* No SSC ref */
3975 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3976 }
3977 
3978 static void icl_dump_hw_state(struct drm_i915_private *i915,
3979 			      const struct intel_dpll_hw_state *hw_state)
3980 {
3981 	drm_dbg_kms(&i915->drm,
3982 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3983 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3984 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3985 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3986 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3987 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3988 		    hw_state->cfgcr0, hw_state->cfgcr1,
3989 		    hw_state->div0,
3990 		    hw_state->mg_refclkin_ctl,
3991 		    hw_state->mg_clktop2_coreclkctl1,
3992 		    hw_state->mg_clktop2_hsclkctl,
3993 		    hw_state->mg_pll_div0,
3994 		    hw_state->mg_pll_div1,
3995 		    hw_state->mg_pll_lf,
3996 		    hw_state->mg_pll_frac_lock,
3997 		    hw_state->mg_pll_ssc,
3998 		    hw_state->mg_pll_bias,
3999 		    hw_state->mg_pll_tdc_coldst_bias);
4000 }
4001 
4002 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4003 	.enable = combo_pll_enable,
4004 	.disable = combo_pll_disable,
4005 	.get_hw_state = combo_pll_get_hw_state,
4006 	.get_freq = icl_ddi_combo_pll_get_freq,
4007 };
4008 
4009 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4010 	.enable = tbt_pll_enable,
4011 	.disable = tbt_pll_disable,
4012 	.get_hw_state = tbt_pll_get_hw_state,
4013 	.get_freq = icl_ddi_tbt_pll_get_freq,
4014 };
4015 
4016 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4017 	.enable = mg_pll_enable,
4018 	.disable = mg_pll_disable,
4019 	.get_hw_state = mg_pll_get_hw_state,
4020 	.get_freq = icl_ddi_mg_pll_get_freq,
4021 };
4022 
4023 static const struct dpll_info icl_plls[] = {
4024 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4025 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4026 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
4027 	{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4028 	{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4029 	{ .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4030 	{ .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4031 	{}
4032 };
4033 
4034 static const struct intel_dpll_mgr icl_pll_mgr = {
4035 	.dpll_info = icl_plls,
4036 	.compute_dplls = icl_compute_dplls,
4037 	.get_dplls = icl_get_dplls,
4038 	.put_dplls = icl_put_dplls,
4039 	.update_active_dpll = icl_update_active_dpll,
4040 	.update_ref_clks = icl_update_dpll_ref_clks,
4041 	.dump_hw_state = icl_dump_hw_state,
4042 };
4043 
4044 static const struct dpll_info ehl_plls[] = {
4045 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4046 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4047 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4048 	  .power_domain = POWER_DOMAIN_DC_OFF, },
4049 	{}
4050 };
4051 
4052 static const struct intel_dpll_mgr ehl_pll_mgr = {
4053 	.dpll_info = ehl_plls,
4054 	.compute_dplls = icl_compute_dplls,
4055 	.get_dplls = icl_get_dplls,
4056 	.put_dplls = icl_put_dplls,
4057 	.update_ref_clks = icl_update_dpll_ref_clks,
4058 	.dump_hw_state = icl_dump_hw_state,
4059 };
4060 
4061 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4062 	.enable = mg_pll_enable,
4063 	.disable = mg_pll_disable,
4064 	.get_hw_state = dkl_pll_get_hw_state,
4065 	.get_freq = icl_ddi_mg_pll_get_freq,
4066 };
4067 
4068 static const struct dpll_info tgl_plls[] = {
4069 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4070 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4071 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
4072 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4073 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4074 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4075 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4076 	{ .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4077 	{ .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4078 	{}
4079 };
4080 
4081 static const struct intel_dpll_mgr tgl_pll_mgr = {
4082 	.dpll_info = tgl_plls,
4083 	.compute_dplls = icl_compute_dplls,
4084 	.get_dplls = icl_get_dplls,
4085 	.put_dplls = icl_put_dplls,
4086 	.update_active_dpll = icl_update_active_dpll,
4087 	.update_ref_clks = icl_update_dpll_ref_clks,
4088 	.dump_hw_state = icl_dump_hw_state,
4089 };
4090 
4091 static const struct dpll_info rkl_plls[] = {
4092 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4093 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4094 	{ .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4095 	{}
4096 };
4097 
4098 static const struct intel_dpll_mgr rkl_pll_mgr = {
4099 	.dpll_info = rkl_plls,
4100 	.compute_dplls = icl_compute_dplls,
4101 	.get_dplls = icl_get_dplls,
4102 	.put_dplls = icl_put_dplls,
4103 	.update_ref_clks = icl_update_dpll_ref_clks,
4104 	.dump_hw_state = icl_dump_hw_state,
4105 };
4106 
4107 static const struct dpll_info dg1_plls[] = {
4108 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4109 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4110 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4111 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4112 	{}
4113 };
4114 
4115 static const struct intel_dpll_mgr dg1_pll_mgr = {
4116 	.dpll_info = dg1_plls,
4117 	.compute_dplls = icl_compute_dplls,
4118 	.get_dplls = icl_get_dplls,
4119 	.put_dplls = icl_put_dplls,
4120 	.update_ref_clks = icl_update_dpll_ref_clks,
4121 	.dump_hw_state = icl_dump_hw_state,
4122 };
4123 
4124 static const struct dpll_info adls_plls[] = {
4125 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4126 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4127 	{ .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4128 	{ .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4129 	{}
4130 };
4131 
4132 static const struct intel_dpll_mgr adls_pll_mgr = {
4133 	.dpll_info = adls_plls,
4134 	.compute_dplls = icl_compute_dplls,
4135 	.get_dplls = icl_get_dplls,
4136 	.put_dplls = icl_put_dplls,
4137 	.update_ref_clks = icl_update_dpll_ref_clks,
4138 	.dump_hw_state = icl_dump_hw_state,
4139 };
4140 
4141 static const struct dpll_info adlp_plls[] = {
4142 	{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4143 	{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4144 	{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, },
4145 	{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4146 	{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4147 	{ .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4148 	{ .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4149 	{}
4150 };
4151 
4152 static const struct intel_dpll_mgr adlp_pll_mgr = {
4153 	.dpll_info = adlp_plls,
4154 	.compute_dplls = icl_compute_dplls,
4155 	.get_dplls = icl_get_dplls,
4156 	.put_dplls = icl_put_dplls,
4157 	.update_active_dpll = icl_update_active_dpll,
4158 	.update_ref_clks = icl_update_dpll_ref_clks,
4159 	.dump_hw_state = icl_dump_hw_state,
4160 };
4161 
4162 /**
4163  * intel_shared_dpll_init - Initialize shared DPLLs
4164  * @i915: i915 device
4165  *
4166  * Initialize shared DPLLs for @i915.
4167  */
4168 void intel_shared_dpll_init(struct drm_i915_private *i915)
4169 {
4170 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4171 	const struct dpll_info *dpll_info;
4172 	int i;
4173 
4174 	mutex_init(&i915->display.dpll.lock);
4175 
4176 	if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4177 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4178 		dpll_mgr = NULL;
4179 	else if (IS_ALDERLAKE_P(i915))
4180 		dpll_mgr = &adlp_pll_mgr;
4181 	else if (IS_ALDERLAKE_S(i915))
4182 		dpll_mgr = &adls_pll_mgr;
4183 	else if (IS_DG1(i915))
4184 		dpll_mgr = &dg1_pll_mgr;
4185 	else if (IS_ROCKETLAKE(i915))
4186 		dpll_mgr = &rkl_pll_mgr;
4187 	else if (DISPLAY_VER(i915) >= 12)
4188 		dpll_mgr = &tgl_pll_mgr;
4189 	else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4190 		dpll_mgr = &ehl_pll_mgr;
4191 	else if (DISPLAY_VER(i915) >= 11)
4192 		dpll_mgr = &icl_pll_mgr;
4193 	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
4194 		dpll_mgr = &bxt_pll_mgr;
4195 	else if (DISPLAY_VER(i915) == 9)
4196 		dpll_mgr = &skl_pll_mgr;
4197 	else if (HAS_DDI(i915))
4198 		dpll_mgr = &hsw_pll_mgr;
4199 	else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4200 		dpll_mgr = &pch_pll_mgr;
4201 
4202 	if (!dpll_mgr)
4203 		return;
4204 
4205 	dpll_info = dpll_mgr->dpll_info;
4206 
4207 	for (i = 0; dpll_info[i].name; i++) {
4208 		if (drm_WARN_ON(&i915->drm,
4209 				i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4210 			break;
4211 
4212 		/* must fit into unsigned long bitmask on 32bit */
4213 		if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4214 			break;
4215 
4216 		i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4217 		i915->display.dpll.shared_dplls[i].index = i;
4218 	}
4219 
4220 	i915->display.dpll.mgr = dpll_mgr;
4221 	i915->display.dpll.num_shared_dpll = i;
4222 }
4223 
4224 /**
4225  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4226  * @state: atomic state
4227  * @crtc: CRTC to compute DPLLs for
4228  * @encoder: encoder
4229  *
4230  * This function computes the DPLL state for the given CRTC and encoder.
4231  *
4232  * The new configuration in the atomic commit @state is made effective by
4233  * calling intel_shared_dpll_swap_state().
4234  *
4235  * Returns:
4236  * 0 on success, negative error code on falure.
4237  */
4238 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4239 			       struct intel_crtc *crtc,
4240 			       struct intel_encoder *encoder)
4241 {
4242 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4243 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4244 
4245 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4246 		return -EINVAL;
4247 
4248 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4249 }
4250 
4251 /**
4252  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4253  * @state: atomic state
4254  * @crtc: CRTC to reserve DPLLs for
4255  * @encoder: encoder
4256  *
4257  * This function reserves all required DPLLs for the given CRTC and encoder
4258  * combination in the current atomic commit @state and the new @crtc atomic
4259  * state.
4260  *
4261  * The new configuration in the atomic commit @state is made effective by
4262  * calling intel_shared_dpll_swap_state().
4263  *
4264  * The reserved DPLLs should be released by calling
4265  * intel_release_shared_dplls().
4266  *
4267  * Returns:
4268  * 0 if all required DPLLs were successfully reserved,
4269  * negative error code otherwise.
4270  */
4271 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4272 			       struct intel_crtc *crtc,
4273 			       struct intel_encoder *encoder)
4274 {
4275 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4276 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4277 
4278 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4279 		return -EINVAL;
4280 
4281 	return dpll_mgr->get_dplls(state, crtc, encoder);
4282 }
4283 
4284 /**
4285  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4286  * @state: atomic state
4287  * @crtc: crtc from which the DPLLs are to be released
4288  *
4289  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4290  * from the current atomic commit @state and the old @crtc atomic state.
4291  *
4292  * The new configuration in the atomic commit @state is made effective by
4293  * calling intel_shared_dpll_swap_state().
4294  */
4295 void intel_release_shared_dplls(struct intel_atomic_state *state,
4296 				struct intel_crtc *crtc)
4297 {
4298 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4299 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4300 
4301 	/*
4302 	 * FIXME: this function is called for every platform having a
4303 	 * compute_clock hook, even though the platform doesn't yet support
4304 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4305 	 * called on those.
4306 	 */
4307 	if (!dpll_mgr)
4308 		return;
4309 
4310 	dpll_mgr->put_dplls(state, crtc);
4311 }
4312 
4313 /**
4314  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4315  * @state: atomic state
4316  * @crtc: the CRTC for which to update the active DPLL
4317  * @encoder: encoder determining the type of port DPLL
4318  *
4319  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4320  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4321  * DPLL selected will be based on the current mode of the encoder's port.
4322  */
4323 void intel_update_active_dpll(struct intel_atomic_state *state,
4324 			      struct intel_crtc *crtc,
4325 			      struct intel_encoder *encoder)
4326 {
4327 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4328 	const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4329 
4330 	if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4331 		return;
4332 
4333 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4334 }
4335 
4336 /**
4337  * intel_dpll_get_freq - calculate the DPLL's output frequency
4338  * @i915: i915 device
4339  * @pll: DPLL for which to calculate the output frequency
4340  * @pll_state: DPLL state from which to calculate the output frequency
4341  *
4342  * Return the output frequency corresponding to @pll's passed in @pll_state.
4343  */
4344 int intel_dpll_get_freq(struct drm_i915_private *i915,
4345 			const struct intel_shared_dpll *pll,
4346 			const struct intel_dpll_hw_state *pll_state)
4347 {
4348 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4349 		return 0;
4350 
4351 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4352 }
4353 
4354 /**
4355  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4356  * @i915: i915 device
4357  * @pll: DPLL for which to calculate the output frequency
4358  * @hw_state: DPLL's hardware state
4359  *
4360  * Read out @pll's hardware state into @hw_state.
4361  */
4362 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4363 			     struct intel_shared_dpll *pll,
4364 			     struct intel_dpll_hw_state *hw_state)
4365 {
4366 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4367 }
4368 
4369 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4370 				  struct intel_shared_dpll *pll)
4371 {
4372 	struct intel_crtc *crtc;
4373 
4374 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4375 
4376 	if (pll->on && pll->info->power_domain)
4377 		pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
4378 
4379 	pll->state.pipe_mask = 0;
4380 	for_each_intel_crtc(&i915->drm, crtc) {
4381 		struct intel_crtc_state *crtc_state =
4382 			to_intel_crtc_state(crtc->base.state);
4383 
4384 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4385 			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4386 	}
4387 	pll->active_mask = pll->state.pipe_mask;
4388 
4389 	drm_dbg_kms(&i915->drm,
4390 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4391 		    pll->info->name, pll->state.pipe_mask, pll->on);
4392 }
4393 
4394 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4395 {
4396 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4397 		i915->display.dpll.mgr->update_ref_clks(i915);
4398 }
4399 
4400 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4401 {
4402 	struct intel_shared_dpll *pll;
4403 	int i;
4404 
4405 	for_each_shared_dpll(i915, pll, i)
4406 		readout_dpll_hw_state(i915, pll);
4407 }
4408 
4409 static void sanitize_dpll_state(struct drm_i915_private *i915,
4410 				struct intel_shared_dpll *pll)
4411 {
4412 	if (!pll->on)
4413 		return;
4414 
4415 	adlp_cmtg_clock_gating_wa(i915, pll);
4416 
4417 	if (pll->active_mask)
4418 		return;
4419 
4420 	drm_dbg_kms(&i915->drm,
4421 		    "%s enabled but not in use, disabling\n",
4422 		    pll->info->name);
4423 
4424 	_intel_disable_shared_dpll(i915, pll);
4425 }
4426 
4427 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4428 {
4429 	struct intel_shared_dpll *pll;
4430 	int i;
4431 
4432 	for_each_shared_dpll(i915, pll, i)
4433 		sanitize_dpll_state(i915, pll);
4434 }
4435 
4436 /**
4437  * intel_dpll_dump_hw_state - write hw_state to dmesg
4438  * @i915: i915 drm device
4439  * @hw_state: hw state to be written to the log
4440  *
4441  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4442  */
4443 void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4444 			      const struct intel_dpll_hw_state *hw_state)
4445 {
4446 	if (i915->display.dpll.mgr) {
4447 		i915->display.dpll.mgr->dump_hw_state(i915, hw_state);
4448 	} else {
4449 		/* fallback for platforms that don't use the shared dpll
4450 		 * infrastructure
4451 		 */
4452 		drm_dbg_kms(&i915->drm,
4453 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4454 			    "fp0: 0x%x, fp1: 0x%x\n",
4455 			    hw_state->dpll,
4456 			    hw_state->dpll_md,
4457 			    hw_state->fp0,
4458 			    hw_state->fp1);
4459 	}
4460 }
4461 
4462 static void
4463 verify_single_dpll_state(struct drm_i915_private *i915,
4464 			 struct intel_shared_dpll *pll,
4465 			 struct intel_crtc *crtc,
4466 			 const struct intel_crtc_state *new_crtc_state)
4467 {
4468 	struct intel_dpll_hw_state dpll_hw_state;
4469 	u8 pipe_mask;
4470 	bool active;
4471 
4472 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4473 
4474 	drm_dbg_kms(&i915->drm, "%s\n", pll->info->name);
4475 
4476 	active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4477 
4478 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4479 		I915_STATE_WARN(i915, !pll->on && pll->active_mask,
4480 				"pll in active use but not on in sw tracking\n");
4481 		I915_STATE_WARN(i915, pll->on && !pll->active_mask,
4482 				"pll is on but not used by any active pipe\n");
4483 		I915_STATE_WARN(i915, pll->on != active,
4484 				"pll on state mismatch (expected %i, found %i)\n",
4485 				pll->on, active);
4486 	}
4487 
4488 	if (!crtc) {
4489 		I915_STATE_WARN(i915,
4490 				pll->active_mask & ~pll->state.pipe_mask,
4491 				"more active pll users than references: 0x%x vs 0x%x\n",
4492 				pll->active_mask, pll->state.pipe_mask);
4493 
4494 		return;
4495 	}
4496 
4497 	pipe_mask = BIT(crtc->pipe);
4498 
4499 	if (new_crtc_state->hw.active)
4500 		I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
4501 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4502 				pipe_name(crtc->pipe), pll->active_mask);
4503 	else
4504 		I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4505 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4506 				pipe_name(crtc->pipe), pll->active_mask);
4507 
4508 	I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
4509 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4510 			pipe_mask, pll->state.pipe_mask);
4511 
4512 	I915_STATE_WARN(i915,
4513 			pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4514 					  sizeof(dpll_hw_state)),
4515 			"pll hw state mismatch\n");
4516 }
4517 
4518 void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4519 				    struct intel_crtc *crtc)
4520 {
4521 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4522 	const struct intel_crtc_state *old_crtc_state =
4523 		intel_atomic_get_old_crtc_state(state, crtc);
4524 	const struct intel_crtc_state *new_crtc_state =
4525 		intel_atomic_get_new_crtc_state(state, crtc);
4526 
4527 	if (new_crtc_state->shared_dpll)
4528 		verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4529 					 crtc, new_crtc_state);
4530 
4531 	if (old_crtc_state->shared_dpll &&
4532 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4533 		u8 pipe_mask = BIT(crtc->pipe);
4534 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4535 
4536 		I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4537 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4538 				pipe_name(crtc->pipe), pll->active_mask);
4539 		I915_STATE_WARN(i915, pll->state.pipe_mask & pipe_mask,
4540 				"pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4541 				pipe_name(crtc->pipe), pll->state.pipe_mask);
4542 	}
4543 }
4544 
4545 void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4546 {
4547 	struct drm_i915_private *i915 = to_i915(state->base.dev);
4548 	struct intel_shared_dpll *pll;
4549 	int i;
4550 
4551 	for_each_shared_dpll(i915, pll, i)
4552 		verify_single_dpll_state(i915, pll, NULL, NULL);
4553 }
4554