1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_display_types.h"
25 #include "intel_dpio_phy.h"
26 #include "intel_dpll_mgr.h"
27 
28 /**
29  * DOC: Display PLLs
30  *
31  * Display PLLs used for driving outputs vary by platform. While some have
32  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33  * from a pool. In the latter scenario, it is possible that multiple pipes
34  * share a PLL if their configurations match.
35  *
36  * This file provides an abstraction over display PLLs. The function
37  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
38  * users of a PLL are tracked and that tracking is integrated with the atomic
39  * modset interface. During an atomic operation, required PLLs can be reserved
40  * for a given CRTC and encoder configuration by calling
41  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42  * with intel_release_shared_dplls().
43  * Changes to the users are first staged in the atomic state, and then made
44  * effective by calling intel_shared_dpll_swap_state() during the atomic
45  * commit phase.
46  */
47 
48 struct intel_dpll_mgr {
49 	const struct dpll_info *dpll_info;
50 
51 	bool (*get_dplls)(struct intel_atomic_state *state,
52 			  struct intel_crtc *crtc,
53 			  struct intel_encoder *encoder);
54 	void (*put_dplls)(struct intel_atomic_state *state,
55 			  struct intel_crtc *crtc);
56 	void (*update_active_dpll)(struct intel_atomic_state *state,
57 				   struct intel_crtc *crtc,
58 				   struct intel_encoder *encoder);
59 	void (*update_ref_clks)(struct drm_i915_private *i915);
60 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
61 			      const struct intel_dpll_hw_state *hw_state);
62 };
63 
64 static void
65 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
66 				  struct intel_shared_dpll_state *shared_dpll)
67 {
68 	enum intel_dpll_id i;
69 
70 	/* Copy shared dpll state */
71 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
72 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
73 
74 		shared_dpll[i] = pll->state;
75 	}
76 }
77 
78 static struct intel_shared_dpll_state *
79 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
80 {
81 	struct intel_atomic_state *state = to_intel_atomic_state(s);
82 
83 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
84 
85 	if (!state->dpll_set) {
86 		state->dpll_set = true;
87 
88 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
89 						  state->shared_dpll);
90 	}
91 
92 	return state->shared_dpll;
93 }
94 
95 /**
96  * intel_get_shared_dpll_by_id - get a DPLL given its id
97  * @dev_priv: i915 device instance
98  * @id: pll id
99  *
100  * Returns:
101  * A pointer to the DPLL with @id
102  */
103 struct intel_shared_dpll *
104 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
105 			    enum intel_dpll_id id)
106 {
107 	return &dev_priv->dpll.shared_dplls[id];
108 }
109 
110 /**
111  * intel_get_shared_dpll_id - get the id of a DPLL
112  * @dev_priv: i915 device instance
113  * @pll: the DPLL
114  *
115  * Returns:
116  * The id of @pll
117  */
118 enum intel_dpll_id
119 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
120 			 struct intel_shared_dpll *pll)
121 {
122 	long pll_idx = pll - dev_priv->dpll.shared_dplls;
123 
124 	if (drm_WARN_ON(&dev_priv->drm,
125 			pll_idx < 0 ||
126 			pll_idx >= dev_priv->dpll.num_shared_dpll))
127 		return -1;
128 
129 	return pll_idx;
130 }
131 
132 /* For ILK+ */
133 void assert_shared_dpll(struct drm_i915_private *dev_priv,
134 			struct intel_shared_dpll *pll,
135 			bool state)
136 {
137 	bool cur_state;
138 	struct intel_dpll_hw_state hw_state;
139 
140 	if (drm_WARN(&dev_priv->drm, !pll,
141 		     "asserting DPLL %s with no DPLL\n", onoff(state)))
142 		return;
143 
144 	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
145 	I915_STATE_WARN(cur_state != state,
146 	     "%s assertion failure (expected %s, current %s)\n",
147 			pll->info->name, onoff(state), onoff(cur_state));
148 }
149 
150 static i915_reg_t
151 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
152 			   struct intel_shared_dpll *pll)
153 {
154 
155 	if (IS_ELKHARTLAKE(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
156 		return MG_PLL_ENABLE(0);
157 
158 	return CNL_DPLL_ENABLE(pll->info->id);
159 
160 
161 }
162 /**
163  * intel_prepare_shared_dpll - call a dpll's prepare hook
164  * @crtc_state: CRTC, and its state, which has a shared dpll
165  *
166  * This calls the PLL's prepare hook if it has one and if the PLL is not
167  * already enabled. The prepare hook is platform specific.
168  */
169 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
170 {
171 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
172 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
173 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
174 
175 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
176 		return;
177 
178 	mutex_lock(&dev_priv->dpll.lock);
179 	drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
180 	if (!pll->active_mask) {
181 		drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
182 		drm_WARN_ON(&dev_priv->drm, pll->on);
183 		assert_shared_dpll_disabled(dev_priv, pll);
184 
185 		pll->info->funcs->prepare(dev_priv, pll);
186 	}
187 	mutex_unlock(&dev_priv->dpll.lock);
188 }
189 
190 /**
191  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
192  * @crtc_state: CRTC, and its state, which has a shared DPLL
193  *
194  * Enable the shared DPLL used by @crtc.
195  */
196 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
197 {
198 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
199 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
200 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
201 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
202 	unsigned int old_mask;
203 
204 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
205 		return;
206 
207 	mutex_lock(&dev_priv->dpll.lock);
208 	old_mask = pll->active_mask;
209 
210 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
211 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
212 		goto out;
213 
214 	pll->active_mask |= crtc_mask;
215 
216 	drm_dbg_kms(&dev_priv->drm,
217 		    "enable %s (active %x, on? %d) for crtc %d\n",
218 		    pll->info->name, pll->active_mask, pll->on,
219 		    crtc->base.base.id);
220 
221 	if (old_mask) {
222 		drm_WARN_ON(&dev_priv->drm, !pll->on);
223 		assert_shared_dpll_enabled(dev_priv, pll);
224 		goto out;
225 	}
226 	drm_WARN_ON(&dev_priv->drm, pll->on);
227 
228 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
229 	pll->info->funcs->enable(dev_priv, pll);
230 	pll->on = true;
231 
232 out:
233 	mutex_unlock(&dev_priv->dpll.lock);
234 }
235 
236 /**
237  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
238  * @crtc_state: CRTC, and its state, which has a shared DPLL
239  *
240  * Disable the shared DPLL used by @crtc.
241  */
242 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
243 {
244 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
245 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
246 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
247 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
248 
249 	/* PCH only available on ILK+ */
250 	if (INTEL_GEN(dev_priv) < 5)
251 		return;
252 
253 	if (pll == NULL)
254 		return;
255 
256 	mutex_lock(&dev_priv->dpll.lock);
257 	if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
258 		goto out;
259 
260 	drm_dbg_kms(&dev_priv->drm,
261 		    "disable %s (active %x, on? %d) for crtc %d\n",
262 		    pll->info->name, pll->active_mask, pll->on,
263 		    crtc->base.base.id);
264 
265 	assert_shared_dpll_enabled(dev_priv, pll);
266 	drm_WARN_ON(&dev_priv->drm, !pll->on);
267 
268 	pll->active_mask &= ~crtc_mask;
269 	if (pll->active_mask)
270 		goto out;
271 
272 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
273 	pll->info->funcs->disable(dev_priv, pll);
274 	pll->on = false;
275 
276 out:
277 	mutex_unlock(&dev_priv->dpll.lock);
278 }
279 
280 static struct intel_shared_dpll *
281 intel_find_shared_dpll(struct intel_atomic_state *state,
282 		       const struct intel_crtc *crtc,
283 		       const struct intel_dpll_hw_state *pll_state,
284 		       unsigned long dpll_mask)
285 {
286 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
287 	struct intel_shared_dpll *pll, *unused_pll = NULL;
288 	struct intel_shared_dpll_state *shared_dpll;
289 	enum intel_dpll_id i;
290 
291 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
292 
293 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
294 
295 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
296 		pll = &dev_priv->dpll.shared_dplls[i];
297 
298 		/* Only want to check enabled timings first */
299 		if (shared_dpll[i].crtc_mask == 0) {
300 			if (!unused_pll)
301 				unused_pll = pll;
302 			continue;
303 		}
304 
305 		if (memcmp(pll_state,
306 			   &shared_dpll[i].hw_state,
307 			   sizeof(*pll_state)) == 0) {
308 			drm_dbg_kms(&dev_priv->drm,
309 				    "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
310 				    crtc->base.base.id, crtc->base.name,
311 				    pll->info->name,
312 				    shared_dpll[i].crtc_mask,
313 				    pll->active_mask);
314 			return pll;
315 		}
316 	}
317 
318 	/* Ok no matching timings, maybe there's a free one? */
319 	if (unused_pll) {
320 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
321 			    crtc->base.base.id, crtc->base.name,
322 			    unused_pll->info->name);
323 		return unused_pll;
324 	}
325 
326 	return NULL;
327 }
328 
329 static void
330 intel_reference_shared_dpll(struct intel_atomic_state *state,
331 			    const struct intel_crtc *crtc,
332 			    const struct intel_shared_dpll *pll,
333 			    const struct intel_dpll_hw_state *pll_state)
334 {
335 	struct drm_i915_private *i915 = to_i915(state->base.dev);
336 	struct intel_shared_dpll_state *shared_dpll;
337 	const enum intel_dpll_id id = pll->info->id;
338 
339 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
340 
341 	if (shared_dpll[id].crtc_mask == 0)
342 		shared_dpll[id].hw_state = *pll_state;
343 
344 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
345 		pipe_name(crtc->pipe));
346 
347 	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
348 }
349 
350 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
351 					  const struct intel_crtc *crtc,
352 					  const struct intel_shared_dpll *pll)
353 {
354 	struct intel_shared_dpll_state *shared_dpll;
355 
356 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
357 	shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
358 }
359 
360 static void intel_put_dpll(struct intel_atomic_state *state,
361 			   struct intel_crtc *crtc)
362 {
363 	const struct intel_crtc_state *old_crtc_state =
364 		intel_atomic_get_old_crtc_state(state, crtc);
365 	struct intel_crtc_state *new_crtc_state =
366 		intel_atomic_get_new_crtc_state(state, crtc);
367 
368 	new_crtc_state->shared_dpll = NULL;
369 
370 	if (!old_crtc_state->shared_dpll)
371 		return;
372 
373 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
374 }
375 
376 /**
377  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
378  * @state: atomic state
379  *
380  * This is the dpll version of drm_atomic_helper_swap_state() since the
381  * helper does not handle driver-specific global state.
382  *
383  * For consistency with atomic helpers this function does a complete swap,
384  * i.e. it also puts the current state into @state, even though there is no
385  * need for that at this moment.
386  */
387 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
388 {
389 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
390 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
391 	enum intel_dpll_id i;
392 
393 	if (!state->dpll_set)
394 		return;
395 
396 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
397 		struct intel_shared_dpll *pll =
398 			&dev_priv->dpll.shared_dplls[i];
399 
400 		swap(pll->state, shared_dpll[i]);
401 	}
402 }
403 
404 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
405 				      struct intel_shared_dpll *pll,
406 				      struct intel_dpll_hw_state *hw_state)
407 {
408 	const enum intel_dpll_id id = pll->info->id;
409 	intel_wakeref_t wakeref;
410 	u32 val;
411 
412 	wakeref = intel_display_power_get_if_enabled(dev_priv,
413 						     POWER_DOMAIN_DISPLAY_CORE);
414 	if (!wakeref)
415 		return false;
416 
417 	val = intel_de_read(dev_priv, PCH_DPLL(id));
418 	hw_state->dpll = val;
419 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
420 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
421 
422 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
423 
424 	return val & DPLL_VCO_ENABLE;
425 }
426 
427 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
428 				 struct intel_shared_dpll *pll)
429 {
430 	const enum intel_dpll_id id = pll->info->id;
431 
432 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
433 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
434 }
435 
436 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
437 {
438 	u32 val;
439 	bool enabled;
440 
441 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
442 
443 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
444 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
445 			    DREF_SUPERSPREAD_SOURCE_MASK));
446 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
447 }
448 
449 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
450 				struct intel_shared_dpll *pll)
451 {
452 	const enum intel_dpll_id id = pll->info->id;
453 
454 	/* PCH refclock must be enabled first */
455 	ibx_assert_pch_refclk_enabled(dev_priv);
456 
457 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
458 
459 	/* Wait for the clocks to stabilize. */
460 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
461 	udelay(150);
462 
463 	/* The pixel multiplier can only be updated once the
464 	 * DPLL is enabled and the clocks are stable.
465 	 *
466 	 * So write it again.
467 	 */
468 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
469 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
470 	udelay(200);
471 }
472 
473 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
474 				 struct intel_shared_dpll *pll)
475 {
476 	const enum intel_dpll_id id = pll->info->id;
477 
478 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
479 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
480 	udelay(200);
481 }
482 
483 static bool ibx_get_dpll(struct intel_atomic_state *state,
484 			 struct intel_crtc *crtc,
485 			 struct intel_encoder *encoder)
486 {
487 	struct intel_crtc_state *crtc_state =
488 		intel_atomic_get_new_crtc_state(state, crtc);
489 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
490 	struct intel_shared_dpll *pll;
491 	enum intel_dpll_id i;
492 
493 	if (HAS_PCH_IBX(dev_priv)) {
494 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
495 		i = (enum intel_dpll_id) crtc->pipe;
496 		pll = &dev_priv->dpll.shared_dplls[i];
497 
498 		drm_dbg_kms(&dev_priv->drm,
499 			    "[CRTC:%d:%s] using pre-allocated %s\n",
500 			    crtc->base.base.id, crtc->base.name,
501 			    pll->info->name);
502 	} else {
503 		pll = intel_find_shared_dpll(state, crtc,
504 					     &crtc_state->dpll_hw_state,
505 					     BIT(DPLL_ID_PCH_PLL_B) |
506 					     BIT(DPLL_ID_PCH_PLL_A));
507 	}
508 
509 	if (!pll)
510 		return false;
511 
512 	/* reference the pll */
513 	intel_reference_shared_dpll(state, crtc,
514 				    pll, &crtc_state->dpll_hw_state);
515 
516 	crtc_state->shared_dpll = pll;
517 
518 	return true;
519 }
520 
521 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
522 			      const struct intel_dpll_hw_state *hw_state)
523 {
524 	drm_dbg_kms(&dev_priv->drm,
525 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
526 		    "fp0: 0x%x, fp1: 0x%x\n",
527 		    hw_state->dpll,
528 		    hw_state->dpll_md,
529 		    hw_state->fp0,
530 		    hw_state->fp1);
531 }
532 
533 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
534 	.prepare = ibx_pch_dpll_prepare,
535 	.enable = ibx_pch_dpll_enable,
536 	.disable = ibx_pch_dpll_disable,
537 	.get_hw_state = ibx_pch_dpll_get_hw_state,
538 };
539 
540 static const struct dpll_info pch_plls[] = {
541 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
542 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
543 	{ },
544 };
545 
546 static const struct intel_dpll_mgr pch_pll_mgr = {
547 	.dpll_info = pch_plls,
548 	.get_dplls = ibx_get_dpll,
549 	.put_dplls = intel_put_dpll,
550 	.dump_hw_state = ibx_dump_hw_state,
551 };
552 
553 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
554 			       struct intel_shared_dpll *pll)
555 {
556 	const enum intel_dpll_id id = pll->info->id;
557 
558 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
559 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
560 	udelay(20);
561 }
562 
563 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
564 				struct intel_shared_dpll *pll)
565 {
566 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
567 	intel_de_posting_read(dev_priv, SPLL_CTL);
568 	udelay(20);
569 }
570 
571 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
572 				  struct intel_shared_dpll *pll)
573 {
574 	const enum intel_dpll_id id = pll->info->id;
575 	u32 val;
576 
577 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
578 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
579 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
580 
581 	/*
582 	 * Try to set up the PCH reference clock once all DPLLs
583 	 * that depend on it have been shut down.
584 	 */
585 	if (dev_priv->pch_ssc_use & BIT(id))
586 		intel_init_pch_refclk(dev_priv);
587 }
588 
589 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
590 				 struct intel_shared_dpll *pll)
591 {
592 	enum intel_dpll_id id = pll->info->id;
593 	u32 val;
594 
595 	val = intel_de_read(dev_priv, SPLL_CTL);
596 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
597 	intel_de_posting_read(dev_priv, SPLL_CTL);
598 
599 	/*
600 	 * Try to set up the PCH reference clock once all DPLLs
601 	 * that depend on it have been shut down.
602 	 */
603 	if (dev_priv->pch_ssc_use & BIT(id))
604 		intel_init_pch_refclk(dev_priv);
605 }
606 
607 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
608 				       struct intel_shared_dpll *pll,
609 				       struct intel_dpll_hw_state *hw_state)
610 {
611 	const enum intel_dpll_id id = pll->info->id;
612 	intel_wakeref_t wakeref;
613 	u32 val;
614 
615 	wakeref = intel_display_power_get_if_enabled(dev_priv,
616 						     POWER_DOMAIN_DISPLAY_CORE);
617 	if (!wakeref)
618 		return false;
619 
620 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
621 	hw_state->wrpll = val;
622 
623 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
624 
625 	return val & WRPLL_PLL_ENABLE;
626 }
627 
628 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
629 				      struct intel_shared_dpll *pll,
630 				      struct intel_dpll_hw_state *hw_state)
631 {
632 	intel_wakeref_t wakeref;
633 	u32 val;
634 
635 	wakeref = intel_display_power_get_if_enabled(dev_priv,
636 						     POWER_DOMAIN_DISPLAY_CORE);
637 	if (!wakeref)
638 		return false;
639 
640 	val = intel_de_read(dev_priv, SPLL_CTL);
641 	hw_state->spll = val;
642 
643 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
644 
645 	return val & SPLL_PLL_ENABLE;
646 }
647 
648 #define LC_FREQ 2700
649 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
650 
651 #define P_MIN 2
652 #define P_MAX 64
653 #define P_INC 2
654 
655 /* Constraints for PLL good behavior */
656 #define REF_MIN 48
657 #define REF_MAX 400
658 #define VCO_MIN 2400
659 #define VCO_MAX 4800
660 
661 struct hsw_wrpll_rnp {
662 	unsigned p, n2, r2;
663 };
664 
665 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
666 {
667 	unsigned budget;
668 
669 	switch (clock) {
670 	case 25175000:
671 	case 25200000:
672 	case 27000000:
673 	case 27027000:
674 	case 37762500:
675 	case 37800000:
676 	case 40500000:
677 	case 40541000:
678 	case 54000000:
679 	case 54054000:
680 	case 59341000:
681 	case 59400000:
682 	case 72000000:
683 	case 74176000:
684 	case 74250000:
685 	case 81000000:
686 	case 81081000:
687 	case 89012000:
688 	case 89100000:
689 	case 108000000:
690 	case 108108000:
691 	case 111264000:
692 	case 111375000:
693 	case 148352000:
694 	case 148500000:
695 	case 162000000:
696 	case 162162000:
697 	case 222525000:
698 	case 222750000:
699 	case 296703000:
700 	case 297000000:
701 		budget = 0;
702 		break;
703 	case 233500000:
704 	case 245250000:
705 	case 247750000:
706 	case 253250000:
707 	case 298000000:
708 		budget = 1500;
709 		break;
710 	case 169128000:
711 	case 169500000:
712 	case 179500000:
713 	case 202000000:
714 		budget = 2000;
715 		break;
716 	case 256250000:
717 	case 262500000:
718 	case 270000000:
719 	case 272500000:
720 	case 273750000:
721 	case 280750000:
722 	case 281250000:
723 	case 286000000:
724 	case 291750000:
725 		budget = 4000;
726 		break;
727 	case 267250000:
728 	case 268500000:
729 		budget = 5000;
730 		break;
731 	default:
732 		budget = 1000;
733 		break;
734 	}
735 
736 	return budget;
737 }
738 
739 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
740 				 unsigned int r2, unsigned int n2,
741 				 unsigned int p,
742 				 struct hsw_wrpll_rnp *best)
743 {
744 	u64 a, b, c, d, diff, diff_best;
745 
746 	/* No best (r,n,p) yet */
747 	if (best->p == 0) {
748 		best->p = p;
749 		best->n2 = n2;
750 		best->r2 = r2;
751 		return;
752 	}
753 
754 	/*
755 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
756 	 * freq2k.
757 	 *
758 	 * delta = 1e6 *
759 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
760 	 *	   freq2k;
761 	 *
762 	 * and we would like delta <= budget.
763 	 *
764 	 * If the discrepancy is above the PPM-based budget, always prefer to
765 	 * improve upon the previous solution.  However, if you're within the
766 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
767 	 */
768 	a = freq2k * budget * p * r2;
769 	b = freq2k * budget * best->p * best->r2;
770 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
771 	diff_best = abs_diff(freq2k * best->p * best->r2,
772 			     LC_FREQ_2K * best->n2);
773 	c = 1000000 * diff;
774 	d = 1000000 * diff_best;
775 
776 	if (a < c && b < d) {
777 		/* If both are above the budget, pick the closer */
778 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
779 			best->p = p;
780 			best->n2 = n2;
781 			best->r2 = r2;
782 		}
783 	} else if (a >= c && b < d) {
784 		/* If A is below the threshold but B is above it?  Update. */
785 		best->p = p;
786 		best->n2 = n2;
787 		best->r2 = r2;
788 	} else if (a >= c && b >= d) {
789 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
790 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
791 			best->p = p;
792 			best->n2 = n2;
793 			best->r2 = r2;
794 		}
795 	}
796 	/* Otherwise a < c && b >= d, do nothing */
797 }
798 
799 static void
800 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
801 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
802 {
803 	u64 freq2k;
804 	unsigned p, n2, r2;
805 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
806 	unsigned budget;
807 
808 	freq2k = clock / 100;
809 
810 	budget = hsw_wrpll_get_budget_for_freq(clock);
811 
812 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
813 	 * and directly pass the LC PLL to it. */
814 	if (freq2k == 5400000) {
815 		*n2_out = 2;
816 		*p_out = 1;
817 		*r2_out = 2;
818 		return;
819 	}
820 
821 	/*
822 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
823 	 * the WR PLL.
824 	 *
825 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
826 	 * Injecting R2 = 2 * R gives:
827 	 *   REF_MAX * r2 > LC_FREQ * 2 and
828 	 *   REF_MIN * r2 < LC_FREQ * 2
829 	 *
830 	 * Which means the desired boundaries for r2 are:
831 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
832 	 *
833 	 */
834 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
835 	     r2 <= LC_FREQ * 2 / REF_MIN;
836 	     r2++) {
837 
838 		/*
839 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
840 		 *
841 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
842 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
843 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
844 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
845 		 *
846 		 * Which means the desired boundaries for n2 are:
847 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
848 		 */
849 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
850 		     n2 <= VCO_MAX * r2 / LC_FREQ;
851 		     n2++) {
852 
853 			for (p = P_MIN; p <= P_MAX; p += P_INC)
854 				hsw_wrpll_update_rnp(freq2k, budget,
855 						     r2, n2, p, &best);
856 		}
857 	}
858 
859 	*n2_out = best.n2;
860 	*p_out = best.p;
861 	*r2_out = best.r2;
862 }
863 
864 static struct intel_shared_dpll *
865 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
866 		       struct intel_crtc *crtc)
867 {
868 	struct intel_crtc_state *crtc_state =
869 		intel_atomic_get_new_crtc_state(state, crtc);
870 	struct intel_shared_dpll *pll;
871 	u32 val;
872 	unsigned int p, n2, r2;
873 
874 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
875 
876 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
877 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
878 	      WRPLL_DIVIDER_POST(p);
879 
880 	crtc_state->dpll_hw_state.wrpll = val;
881 
882 	pll = intel_find_shared_dpll(state, crtc,
883 				     &crtc_state->dpll_hw_state,
884 				     BIT(DPLL_ID_WRPLL2) |
885 				     BIT(DPLL_ID_WRPLL1));
886 
887 	if (!pll)
888 		return NULL;
889 
890 	return pll;
891 }
892 
893 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
894 				  const struct intel_shared_dpll *pll)
895 {
896 	int refclk;
897 	int n, p, r;
898 	u32 wrpll = pll->state.hw_state.wrpll;
899 
900 	switch (wrpll & WRPLL_REF_MASK) {
901 	case WRPLL_REF_SPECIAL_HSW:
902 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
903 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
904 			refclk = dev_priv->dpll.ref_clks.nssc;
905 			break;
906 		}
907 		fallthrough;
908 	case WRPLL_REF_PCH_SSC:
909 		/*
910 		 * We could calculate spread here, but our checking
911 		 * code only cares about 5% accuracy, and spread is a max of
912 		 * 0.5% downspread.
913 		 */
914 		refclk = dev_priv->dpll.ref_clks.ssc;
915 		break;
916 	case WRPLL_REF_LCPLL:
917 		refclk = 2700000;
918 		break;
919 	default:
920 		MISSING_CASE(wrpll);
921 		return 0;
922 	}
923 
924 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
925 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
926 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
927 
928 	/* Convert to KHz, p & r have a fixed point portion */
929 	return (refclk * n / 10) / (p * r) * 2;
930 }
931 
932 static struct intel_shared_dpll *
933 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
934 {
935 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
936 	struct intel_shared_dpll *pll;
937 	enum intel_dpll_id pll_id;
938 	int clock = crtc_state->port_clock;
939 
940 	switch (clock / 2) {
941 	case 81000:
942 		pll_id = DPLL_ID_LCPLL_810;
943 		break;
944 	case 135000:
945 		pll_id = DPLL_ID_LCPLL_1350;
946 		break;
947 	case 270000:
948 		pll_id = DPLL_ID_LCPLL_2700;
949 		break;
950 	default:
951 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
952 			    clock);
953 		return NULL;
954 	}
955 
956 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
957 
958 	if (!pll)
959 		return NULL;
960 
961 	return pll;
962 }
963 
964 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
965 				  const struct intel_shared_dpll *pll)
966 {
967 	int link_clock = 0;
968 
969 	switch (pll->info->id) {
970 	case DPLL_ID_LCPLL_810:
971 		link_clock = 81000;
972 		break;
973 	case DPLL_ID_LCPLL_1350:
974 		link_clock = 135000;
975 		break;
976 	case DPLL_ID_LCPLL_2700:
977 		link_clock = 270000;
978 		break;
979 	default:
980 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
981 		break;
982 	}
983 
984 	return link_clock * 2;
985 }
986 
987 static struct intel_shared_dpll *
988 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
989 		      struct intel_crtc *crtc)
990 {
991 	struct intel_crtc_state *crtc_state =
992 		intel_atomic_get_new_crtc_state(state, crtc);
993 
994 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
995 		return NULL;
996 
997 	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
998 					 SPLL_REF_MUXED_SSC;
999 
1000 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1001 				      BIT(DPLL_ID_SPLL));
1002 }
1003 
1004 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1005 				 const struct intel_shared_dpll *pll)
1006 {
1007 	int link_clock = 0;
1008 
1009 	switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) {
1010 	case SPLL_FREQ_810MHz:
1011 		link_clock = 81000;
1012 		break;
1013 	case SPLL_FREQ_1350MHz:
1014 		link_clock = 135000;
1015 		break;
1016 	case SPLL_FREQ_2700MHz:
1017 		link_clock = 270000;
1018 		break;
1019 	default:
1020 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1021 		break;
1022 	}
1023 
1024 	return link_clock * 2;
1025 }
1026 
1027 static bool hsw_get_dpll(struct intel_atomic_state *state,
1028 			 struct intel_crtc *crtc,
1029 			 struct intel_encoder *encoder)
1030 {
1031 	struct intel_crtc_state *crtc_state =
1032 		intel_atomic_get_new_crtc_state(state, crtc);
1033 	struct intel_shared_dpll *pll;
1034 
1035 	memset(&crtc_state->dpll_hw_state, 0,
1036 	       sizeof(crtc_state->dpll_hw_state));
1037 
1038 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1039 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1040 	else if (intel_crtc_has_dp_encoder(crtc_state))
1041 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1042 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1043 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1044 	else
1045 		return false;
1046 
1047 	if (!pll)
1048 		return false;
1049 
1050 	intel_reference_shared_dpll(state, crtc,
1051 				    pll, &crtc_state->dpll_hw_state);
1052 
1053 	crtc_state->shared_dpll = pll;
1054 
1055 	return true;
1056 }
1057 
1058 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1059 {
1060 	i915->dpll.ref_clks.ssc = 135000;
1061 	/* Non-SSC is only used on non-ULT HSW. */
1062 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1063 		i915->dpll.ref_clks.nssc = 24000;
1064 	else
1065 		i915->dpll.ref_clks.nssc = 135000;
1066 }
1067 
1068 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1069 			      const struct intel_dpll_hw_state *hw_state)
1070 {
1071 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1072 		    hw_state->wrpll, hw_state->spll);
1073 }
1074 
1075 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1076 	.enable = hsw_ddi_wrpll_enable,
1077 	.disable = hsw_ddi_wrpll_disable,
1078 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1079 	.get_freq = hsw_ddi_wrpll_get_freq,
1080 };
1081 
1082 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1083 	.enable = hsw_ddi_spll_enable,
1084 	.disable = hsw_ddi_spll_disable,
1085 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1086 	.get_freq = hsw_ddi_spll_get_freq,
1087 };
1088 
1089 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1090 				 struct intel_shared_dpll *pll)
1091 {
1092 }
1093 
1094 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1095 				  struct intel_shared_dpll *pll)
1096 {
1097 }
1098 
1099 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1100 				       struct intel_shared_dpll *pll,
1101 				       struct intel_dpll_hw_state *hw_state)
1102 {
1103 	return true;
1104 }
1105 
1106 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1107 	.enable = hsw_ddi_lcpll_enable,
1108 	.disable = hsw_ddi_lcpll_disable,
1109 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1110 	.get_freq = hsw_ddi_lcpll_get_freq,
1111 };
1112 
1113 static const struct dpll_info hsw_plls[] = {
1114 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1115 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1116 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1117 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1118 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1119 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1120 	{ },
1121 };
1122 
1123 static const struct intel_dpll_mgr hsw_pll_mgr = {
1124 	.dpll_info = hsw_plls,
1125 	.get_dplls = hsw_get_dpll,
1126 	.put_dplls = intel_put_dpll,
1127 	.update_ref_clks = hsw_update_dpll_ref_clks,
1128 	.dump_hw_state = hsw_dump_hw_state,
1129 };
1130 
1131 struct skl_dpll_regs {
1132 	i915_reg_t ctl, cfgcr1, cfgcr2;
1133 };
1134 
1135 /* this array is indexed by the *shared* pll id */
1136 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1137 	{
1138 		/* DPLL 0 */
1139 		.ctl = LCPLL1_CTL,
1140 		/* DPLL 0 doesn't support HDMI mode */
1141 	},
1142 	{
1143 		/* DPLL 1 */
1144 		.ctl = LCPLL2_CTL,
1145 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1146 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1147 	},
1148 	{
1149 		/* DPLL 2 */
1150 		.ctl = WRPLL_CTL(0),
1151 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1152 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1153 	},
1154 	{
1155 		/* DPLL 3 */
1156 		.ctl = WRPLL_CTL(1),
1157 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1158 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1159 	},
1160 };
1161 
1162 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1163 				    struct intel_shared_dpll *pll)
1164 {
1165 	const enum intel_dpll_id id = pll->info->id;
1166 	u32 val;
1167 
1168 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1169 
1170 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1171 		 DPLL_CTRL1_SSC(id) |
1172 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1173 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1174 
1175 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1176 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1177 }
1178 
1179 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1180 			       struct intel_shared_dpll *pll)
1181 {
1182 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1183 	const enum intel_dpll_id id = pll->info->id;
1184 
1185 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1186 
1187 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1188 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1189 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1190 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1191 
1192 	/* the enable bit is always bit 31 */
1193 	intel_de_write(dev_priv, regs[id].ctl,
1194 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1195 
1196 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1197 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1198 }
1199 
1200 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1201 				 struct intel_shared_dpll *pll)
1202 {
1203 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1204 }
1205 
1206 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1207 				struct intel_shared_dpll *pll)
1208 {
1209 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1210 	const enum intel_dpll_id id = pll->info->id;
1211 
1212 	/* the enable bit is always bit 31 */
1213 	intel_de_write(dev_priv, regs[id].ctl,
1214 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1215 	intel_de_posting_read(dev_priv, regs[id].ctl);
1216 }
1217 
1218 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1219 				  struct intel_shared_dpll *pll)
1220 {
1221 }
1222 
1223 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1224 				     struct intel_shared_dpll *pll,
1225 				     struct intel_dpll_hw_state *hw_state)
1226 {
1227 	u32 val;
1228 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1229 	const enum intel_dpll_id id = pll->info->id;
1230 	intel_wakeref_t wakeref;
1231 	bool ret;
1232 
1233 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1234 						     POWER_DOMAIN_DISPLAY_CORE);
1235 	if (!wakeref)
1236 		return false;
1237 
1238 	ret = false;
1239 
1240 	val = intel_de_read(dev_priv, regs[id].ctl);
1241 	if (!(val & LCPLL_PLL_ENABLE))
1242 		goto out;
1243 
1244 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1245 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1246 
1247 	/* avoid reading back stale values if HDMI mode is not enabled */
1248 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1249 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1250 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1251 	}
1252 	ret = true;
1253 
1254 out:
1255 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1256 
1257 	return ret;
1258 }
1259 
1260 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1261 				       struct intel_shared_dpll *pll,
1262 				       struct intel_dpll_hw_state *hw_state)
1263 {
1264 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1265 	const enum intel_dpll_id id = pll->info->id;
1266 	intel_wakeref_t wakeref;
1267 	u32 val;
1268 	bool ret;
1269 
1270 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1271 						     POWER_DOMAIN_DISPLAY_CORE);
1272 	if (!wakeref)
1273 		return false;
1274 
1275 	ret = false;
1276 
1277 	/* DPLL0 is always enabled since it drives CDCLK */
1278 	val = intel_de_read(dev_priv, regs[id].ctl);
1279 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1280 		goto out;
1281 
1282 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1283 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1284 
1285 	ret = true;
1286 
1287 out:
1288 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1289 
1290 	return ret;
1291 }
1292 
1293 struct skl_wrpll_context {
1294 	u64 min_deviation;		/* current minimal deviation */
1295 	u64 central_freq;		/* chosen central freq */
1296 	u64 dco_freq;			/* chosen dco freq */
1297 	unsigned int p;			/* chosen divider */
1298 };
1299 
1300 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1301 {
1302 	memset(ctx, 0, sizeof(*ctx));
1303 
1304 	ctx->min_deviation = U64_MAX;
1305 }
1306 
1307 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1308 #define SKL_DCO_MAX_PDEVIATION	100
1309 #define SKL_DCO_MAX_NDEVIATION	600
1310 
1311 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1312 				  u64 central_freq,
1313 				  u64 dco_freq,
1314 				  unsigned int divider)
1315 {
1316 	u64 deviation;
1317 
1318 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1319 			      central_freq);
1320 
1321 	/* positive deviation */
1322 	if (dco_freq >= central_freq) {
1323 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1324 		    deviation < ctx->min_deviation) {
1325 			ctx->min_deviation = deviation;
1326 			ctx->central_freq = central_freq;
1327 			ctx->dco_freq = dco_freq;
1328 			ctx->p = divider;
1329 		}
1330 	/* negative deviation */
1331 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1332 		   deviation < ctx->min_deviation) {
1333 		ctx->min_deviation = deviation;
1334 		ctx->central_freq = central_freq;
1335 		ctx->dco_freq = dco_freq;
1336 		ctx->p = divider;
1337 	}
1338 }
1339 
1340 static void skl_wrpll_get_multipliers(unsigned int p,
1341 				      unsigned int *p0 /* out */,
1342 				      unsigned int *p1 /* out */,
1343 				      unsigned int *p2 /* out */)
1344 {
1345 	/* even dividers */
1346 	if (p % 2 == 0) {
1347 		unsigned int half = p / 2;
1348 
1349 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1350 			*p0 = 2;
1351 			*p1 = 1;
1352 			*p2 = half;
1353 		} else if (half % 2 == 0) {
1354 			*p0 = 2;
1355 			*p1 = half / 2;
1356 			*p2 = 2;
1357 		} else if (half % 3 == 0) {
1358 			*p0 = 3;
1359 			*p1 = half / 3;
1360 			*p2 = 2;
1361 		} else if (half % 7 == 0) {
1362 			*p0 = 7;
1363 			*p1 = half / 7;
1364 			*p2 = 2;
1365 		}
1366 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1367 		*p0 = 3;
1368 		*p1 = 1;
1369 		*p2 = p / 3;
1370 	} else if (p == 5 || p == 7) {
1371 		*p0 = p;
1372 		*p1 = 1;
1373 		*p2 = 1;
1374 	} else if (p == 15) {
1375 		*p0 = 3;
1376 		*p1 = 1;
1377 		*p2 = 5;
1378 	} else if (p == 21) {
1379 		*p0 = 7;
1380 		*p1 = 1;
1381 		*p2 = 3;
1382 	} else if (p == 35) {
1383 		*p0 = 7;
1384 		*p1 = 1;
1385 		*p2 = 5;
1386 	}
1387 }
1388 
1389 struct skl_wrpll_params {
1390 	u32 dco_fraction;
1391 	u32 dco_integer;
1392 	u32 qdiv_ratio;
1393 	u32 qdiv_mode;
1394 	u32 kdiv;
1395 	u32 pdiv;
1396 	u32 central_freq;
1397 };
1398 
1399 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1400 				      u64 afe_clock,
1401 				      int ref_clock,
1402 				      u64 central_freq,
1403 				      u32 p0, u32 p1, u32 p2)
1404 {
1405 	u64 dco_freq;
1406 
1407 	switch (central_freq) {
1408 	case 9600000000ULL:
1409 		params->central_freq = 0;
1410 		break;
1411 	case 9000000000ULL:
1412 		params->central_freq = 1;
1413 		break;
1414 	case 8400000000ULL:
1415 		params->central_freq = 3;
1416 	}
1417 
1418 	switch (p0) {
1419 	case 1:
1420 		params->pdiv = 0;
1421 		break;
1422 	case 2:
1423 		params->pdiv = 1;
1424 		break;
1425 	case 3:
1426 		params->pdiv = 2;
1427 		break;
1428 	case 7:
1429 		params->pdiv = 4;
1430 		break;
1431 	default:
1432 		WARN(1, "Incorrect PDiv\n");
1433 	}
1434 
1435 	switch (p2) {
1436 	case 5:
1437 		params->kdiv = 0;
1438 		break;
1439 	case 2:
1440 		params->kdiv = 1;
1441 		break;
1442 	case 3:
1443 		params->kdiv = 2;
1444 		break;
1445 	case 1:
1446 		params->kdiv = 3;
1447 		break;
1448 	default:
1449 		WARN(1, "Incorrect KDiv\n");
1450 	}
1451 
1452 	params->qdiv_ratio = p1;
1453 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1454 
1455 	dco_freq = p0 * p1 * p2 * afe_clock;
1456 
1457 	/*
1458 	 * Intermediate values are in Hz.
1459 	 * Divide by MHz to match bsepc
1460 	 */
1461 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1462 	params->dco_fraction =
1463 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1464 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1465 }
1466 
1467 static bool
1468 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1469 			int ref_clock,
1470 			struct skl_wrpll_params *wrpll_params)
1471 {
1472 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1473 	u64 dco_central_freq[3] = { 8400000000ULL,
1474 				    9000000000ULL,
1475 				    9600000000ULL };
1476 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1477 					     24, 28, 30, 32, 36, 40, 42, 44,
1478 					     48, 52, 54, 56, 60, 64, 66, 68,
1479 					     70, 72, 76, 78, 80, 84, 88, 90,
1480 					     92, 96, 98 };
1481 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1482 	static const struct {
1483 		const int *list;
1484 		int n_dividers;
1485 	} dividers[] = {
1486 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1487 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1488 	};
1489 	struct skl_wrpll_context ctx;
1490 	unsigned int dco, d, i;
1491 	unsigned int p0, p1, p2;
1492 
1493 	skl_wrpll_context_init(&ctx);
1494 
1495 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1496 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1497 			for (i = 0; i < dividers[d].n_dividers; i++) {
1498 				unsigned int p = dividers[d].list[i];
1499 				u64 dco_freq = p * afe_clock;
1500 
1501 				skl_wrpll_try_divider(&ctx,
1502 						      dco_central_freq[dco],
1503 						      dco_freq,
1504 						      p);
1505 				/*
1506 				 * Skip the remaining dividers if we're sure to
1507 				 * have found the definitive divider, we can't
1508 				 * improve a 0 deviation.
1509 				 */
1510 				if (ctx.min_deviation == 0)
1511 					goto skip_remaining_dividers;
1512 			}
1513 		}
1514 
1515 skip_remaining_dividers:
1516 		/*
1517 		 * If a solution is found with an even divider, prefer
1518 		 * this one.
1519 		 */
1520 		if (d == 0 && ctx.p)
1521 			break;
1522 	}
1523 
1524 	if (!ctx.p) {
1525 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1526 		return false;
1527 	}
1528 
1529 	/*
1530 	 * gcc incorrectly analyses that these can be used without being
1531 	 * initialized. To be fair, it's hard to guess.
1532 	 */
1533 	p0 = p1 = p2 = 0;
1534 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1535 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1536 				  ctx.central_freq, p0, p1, p2);
1537 
1538 	return true;
1539 }
1540 
1541 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1542 {
1543 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1544 	u32 ctrl1, cfgcr1, cfgcr2;
1545 	struct skl_wrpll_params wrpll_params = { 0, };
1546 
1547 	/*
1548 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1549 	 * as the DPLL id in this function.
1550 	 */
1551 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1552 
1553 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1554 
1555 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1556 				     i915->dpll.ref_clks.nssc,
1557 				     &wrpll_params))
1558 		return false;
1559 
1560 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1561 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1562 		wrpll_params.dco_integer;
1563 
1564 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1565 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1566 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1567 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1568 		wrpll_params.central_freq;
1569 
1570 	memset(&crtc_state->dpll_hw_state, 0,
1571 	       sizeof(crtc_state->dpll_hw_state));
1572 
1573 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1574 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1575 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1576 	return true;
1577 }
1578 
1579 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1580 				  const struct intel_shared_dpll *pll)
1581 {
1582 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
1583 	int ref_clock = i915->dpll.ref_clks.nssc;
1584 	u32 p0, p1, p2, dco_freq;
1585 
1586 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1587 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1588 
1589 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1590 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1591 	else
1592 		p1 = 1;
1593 
1594 
1595 	switch (p0) {
1596 	case DPLL_CFGCR2_PDIV_1:
1597 		p0 = 1;
1598 		break;
1599 	case DPLL_CFGCR2_PDIV_2:
1600 		p0 = 2;
1601 		break;
1602 	case DPLL_CFGCR2_PDIV_3:
1603 		p0 = 3;
1604 		break;
1605 	case DPLL_CFGCR2_PDIV_7:
1606 		p0 = 7;
1607 		break;
1608 	}
1609 
1610 	switch (p2) {
1611 	case DPLL_CFGCR2_KDIV_5:
1612 		p2 = 5;
1613 		break;
1614 	case DPLL_CFGCR2_KDIV_2:
1615 		p2 = 2;
1616 		break;
1617 	case DPLL_CFGCR2_KDIV_3:
1618 		p2 = 3;
1619 		break;
1620 	case DPLL_CFGCR2_KDIV_1:
1621 		p2 = 1;
1622 		break;
1623 	}
1624 
1625 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1626 		   ref_clock;
1627 
1628 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1629 		    ref_clock / 0x8000;
1630 
1631 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1632 		return 0;
1633 
1634 	return dco_freq / (p0 * p1 * p2 * 5);
1635 }
1636 
1637 static bool
1638 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1639 {
1640 	u32 ctrl1;
1641 
1642 	/*
1643 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1644 	 * as the DPLL id in this function.
1645 	 */
1646 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1647 	switch (crtc_state->port_clock / 2) {
1648 	case 81000:
1649 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1650 		break;
1651 	case 135000:
1652 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1653 		break;
1654 	case 270000:
1655 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1656 		break;
1657 		/* eDP 1.4 rates */
1658 	case 162000:
1659 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1660 		break;
1661 	case 108000:
1662 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1663 		break;
1664 	case 216000:
1665 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1666 		break;
1667 	}
1668 
1669 	memset(&crtc_state->dpll_hw_state, 0,
1670 	       sizeof(crtc_state->dpll_hw_state));
1671 
1672 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1673 
1674 	return true;
1675 }
1676 
1677 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1678 				  const struct intel_shared_dpll *pll)
1679 {
1680 	int link_clock = 0;
1681 
1682 	switch ((pll->state.hw_state.ctrl1 &
1683 		 DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1684 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1685 	case DPLL_CTRL1_LINK_RATE_810:
1686 		link_clock = 81000;
1687 		break;
1688 	case DPLL_CTRL1_LINK_RATE_1080:
1689 		link_clock = 108000;
1690 		break;
1691 	case DPLL_CTRL1_LINK_RATE_1350:
1692 		link_clock = 135000;
1693 		break;
1694 	case DPLL_CTRL1_LINK_RATE_1620:
1695 		link_clock = 162000;
1696 		break;
1697 	case DPLL_CTRL1_LINK_RATE_2160:
1698 		link_clock = 216000;
1699 		break;
1700 	case DPLL_CTRL1_LINK_RATE_2700:
1701 		link_clock = 270000;
1702 		break;
1703 	default:
1704 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1705 		break;
1706 	}
1707 
1708 	return link_clock * 2;
1709 }
1710 
1711 static bool skl_get_dpll(struct intel_atomic_state *state,
1712 			 struct intel_crtc *crtc,
1713 			 struct intel_encoder *encoder)
1714 {
1715 	struct intel_crtc_state *crtc_state =
1716 		intel_atomic_get_new_crtc_state(state, crtc);
1717 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1718 	struct intel_shared_dpll *pll;
1719 	bool bret;
1720 
1721 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1722 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1723 		if (!bret) {
1724 			drm_dbg_kms(&i915->drm,
1725 				    "Could not get HDMI pll dividers.\n");
1726 			return false;
1727 		}
1728 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1729 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1730 		if (!bret) {
1731 			drm_dbg_kms(&i915->drm,
1732 				    "Could not set DP dpll HW state.\n");
1733 			return false;
1734 		}
1735 	} else {
1736 		return false;
1737 	}
1738 
1739 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1740 		pll = intel_find_shared_dpll(state, crtc,
1741 					     &crtc_state->dpll_hw_state,
1742 					     BIT(DPLL_ID_SKL_DPLL0));
1743 	else
1744 		pll = intel_find_shared_dpll(state, crtc,
1745 					     &crtc_state->dpll_hw_state,
1746 					     BIT(DPLL_ID_SKL_DPLL3) |
1747 					     BIT(DPLL_ID_SKL_DPLL2) |
1748 					     BIT(DPLL_ID_SKL_DPLL1));
1749 	if (!pll)
1750 		return false;
1751 
1752 	intel_reference_shared_dpll(state, crtc,
1753 				    pll, &crtc_state->dpll_hw_state);
1754 
1755 	crtc_state->shared_dpll = pll;
1756 
1757 	return true;
1758 }
1759 
1760 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1761 				const struct intel_shared_dpll *pll)
1762 {
1763 	/*
1764 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1765 	 * the internal shift for each field
1766 	 */
1767 	if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1768 		return skl_ddi_wrpll_get_freq(i915, pll);
1769 	else
1770 		return skl_ddi_lcpll_get_freq(i915, pll);
1771 }
1772 
1773 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1774 {
1775 	/* No SSC ref */
1776 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1777 }
1778 
1779 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1780 			      const struct intel_dpll_hw_state *hw_state)
1781 {
1782 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1783 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1784 		      hw_state->ctrl1,
1785 		      hw_state->cfgcr1,
1786 		      hw_state->cfgcr2);
1787 }
1788 
1789 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1790 	.enable = skl_ddi_pll_enable,
1791 	.disable = skl_ddi_pll_disable,
1792 	.get_hw_state = skl_ddi_pll_get_hw_state,
1793 	.get_freq = skl_ddi_pll_get_freq,
1794 };
1795 
1796 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1797 	.enable = skl_ddi_dpll0_enable,
1798 	.disable = skl_ddi_dpll0_disable,
1799 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1800 	.get_freq = skl_ddi_pll_get_freq,
1801 };
1802 
1803 static const struct dpll_info skl_plls[] = {
1804 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1805 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1806 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1807 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1808 	{ },
1809 };
1810 
1811 static const struct intel_dpll_mgr skl_pll_mgr = {
1812 	.dpll_info = skl_plls,
1813 	.get_dplls = skl_get_dpll,
1814 	.put_dplls = intel_put_dpll,
1815 	.update_ref_clks = skl_update_dpll_ref_clks,
1816 	.dump_hw_state = skl_dump_hw_state,
1817 };
1818 
1819 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1820 				struct intel_shared_dpll *pll)
1821 {
1822 	u32 temp;
1823 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1824 	enum dpio_phy phy;
1825 	enum dpio_channel ch;
1826 
1827 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1828 
1829 	/* Non-SSC reference */
1830 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1831 	temp |= PORT_PLL_REF_SEL;
1832 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1833 
1834 	if (IS_GEMINILAKE(dev_priv)) {
1835 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1836 		temp |= PORT_PLL_POWER_ENABLE;
1837 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1838 
1839 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1840 				 PORT_PLL_POWER_STATE), 200))
1841 			drm_err(&dev_priv->drm,
1842 				"Power state not set for PLL:%d\n", port);
1843 	}
1844 
1845 	/* Disable 10 bit clock */
1846 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1847 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1848 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1849 
1850 	/* Write P1 & P2 */
1851 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1852 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1853 	temp |= pll->state.hw_state.ebb0;
1854 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1855 
1856 	/* Write M2 integer */
1857 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1858 	temp &= ~PORT_PLL_M2_MASK;
1859 	temp |= pll->state.hw_state.pll0;
1860 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1861 
1862 	/* Write N */
1863 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1864 	temp &= ~PORT_PLL_N_MASK;
1865 	temp |= pll->state.hw_state.pll1;
1866 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1867 
1868 	/* Write M2 fraction */
1869 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1870 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1871 	temp |= pll->state.hw_state.pll2;
1872 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1873 
1874 	/* Write M2 fraction enable */
1875 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1876 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1877 	temp |= pll->state.hw_state.pll3;
1878 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1879 
1880 	/* Write coeff */
1881 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1882 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1883 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1884 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1885 	temp |= pll->state.hw_state.pll6;
1886 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1887 
1888 	/* Write calibration val */
1889 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1890 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1891 	temp |= pll->state.hw_state.pll8;
1892 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1893 
1894 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1895 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1896 	temp |= pll->state.hw_state.pll9;
1897 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1898 
1899 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1900 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1901 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1902 	temp |= pll->state.hw_state.pll10;
1903 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1904 
1905 	/* Recalibrate with new settings */
1906 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1907 	temp |= PORT_PLL_RECALIBRATE;
1908 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1909 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1910 	temp |= pll->state.hw_state.ebb4;
1911 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1912 
1913 	/* Enable PLL */
1914 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1915 	temp |= PORT_PLL_ENABLE;
1916 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1917 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1918 
1919 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1920 			200))
1921 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1922 
1923 	if (IS_GEMINILAKE(dev_priv)) {
1924 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1925 		temp |= DCC_DELAY_RANGE_2;
1926 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1927 	}
1928 
1929 	/*
1930 	 * While we write to the group register to program all lanes at once we
1931 	 * can read only lane registers and we pick lanes 0/1 for that.
1932 	 */
1933 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1934 	temp &= ~LANE_STAGGER_MASK;
1935 	temp &= ~LANESTAGGER_STRAP_OVRD;
1936 	temp |= pll->state.hw_state.pcsdw12;
1937 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1938 }
1939 
1940 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1941 					struct intel_shared_dpll *pll)
1942 {
1943 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1944 	u32 temp;
1945 
1946 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1947 	temp &= ~PORT_PLL_ENABLE;
1948 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1949 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1950 
1951 	if (IS_GEMINILAKE(dev_priv)) {
1952 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1953 		temp &= ~PORT_PLL_POWER_ENABLE;
1954 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1955 
1956 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1957 				  PORT_PLL_POWER_STATE), 200))
1958 			drm_err(&dev_priv->drm,
1959 				"Power state not reset for PLL:%d\n", port);
1960 	}
1961 }
1962 
1963 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1964 					struct intel_shared_dpll *pll,
1965 					struct intel_dpll_hw_state *hw_state)
1966 {
1967 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1968 	intel_wakeref_t wakeref;
1969 	enum dpio_phy phy;
1970 	enum dpio_channel ch;
1971 	u32 val;
1972 	bool ret;
1973 
1974 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1975 
1976 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1977 						     POWER_DOMAIN_DISPLAY_CORE);
1978 	if (!wakeref)
1979 		return false;
1980 
1981 	ret = false;
1982 
1983 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1984 	if (!(val & PORT_PLL_ENABLE))
1985 		goto out;
1986 
1987 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1988 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1989 
1990 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1991 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1992 
1993 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1994 	hw_state->pll0 &= PORT_PLL_M2_MASK;
1995 
1996 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1997 	hw_state->pll1 &= PORT_PLL_N_MASK;
1998 
1999 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2000 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2001 
2002 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2003 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2004 
2005 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2006 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2007 			  PORT_PLL_INT_COEFF_MASK |
2008 			  PORT_PLL_GAIN_CTL_MASK;
2009 
2010 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2011 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2012 
2013 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2014 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2015 
2016 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2017 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2018 			   PORT_PLL_DCO_AMP_MASK;
2019 
2020 	/*
2021 	 * While we write to the group register to program all lanes at once we
2022 	 * can read only lane registers. We configure all lanes the same way, so
2023 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2024 	 */
2025 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2026 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2027 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2028 		drm_dbg(&dev_priv->drm,
2029 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2030 			hw_state->pcsdw12,
2031 			intel_de_read(dev_priv,
2032 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2033 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2034 
2035 	ret = true;
2036 
2037 out:
2038 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2039 
2040 	return ret;
2041 }
2042 
2043 /* bxt clock parameters */
2044 struct bxt_clk_div {
2045 	int clock;
2046 	u32 p1;
2047 	u32 p2;
2048 	u32 m2_int;
2049 	u32 m2_frac;
2050 	bool m2_frac_en;
2051 	u32 n;
2052 
2053 	int vco;
2054 };
2055 
2056 /* pre-calculated values for DP linkrates */
2057 static const struct bxt_clk_div bxt_dp_clk_val[] = {
2058 	{162000, 4, 2, 32, 1677722, 1, 1},
2059 	{270000, 4, 1, 27,       0, 0, 1},
2060 	{540000, 2, 1, 27,       0, 0, 1},
2061 	{216000, 3, 2, 32, 1677722, 1, 1},
2062 	{243000, 4, 1, 24, 1258291, 1, 1},
2063 	{324000, 4, 1, 32, 1677722, 1, 1},
2064 	{432000, 3, 1, 32, 1677722, 1, 1}
2065 };
2066 
2067 static bool
2068 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2069 			  struct bxt_clk_div *clk_div)
2070 {
2071 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2072 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2073 	struct dpll best_clock;
2074 
2075 	/* Calculate HDMI div */
2076 	/*
2077 	 * FIXME: tie the following calculation into
2078 	 * i9xx_crtc_compute_clock
2079 	 */
2080 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2081 		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2082 			crtc_state->port_clock,
2083 			pipe_name(crtc->pipe));
2084 		return false;
2085 	}
2086 
2087 	clk_div->p1 = best_clock.p1;
2088 	clk_div->p2 = best_clock.p2;
2089 	drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2090 	clk_div->n = best_clock.n;
2091 	clk_div->m2_int = best_clock.m2 >> 22;
2092 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2093 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
2094 
2095 	clk_div->vco = best_clock.vco;
2096 
2097 	return true;
2098 }
2099 
2100 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2101 				    struct bxt_clk_div *clk_div)
2102 {
2103 	int clock = crtc_state->port_clock;
2104 	int i;
2105 
2106 	*clk_div = bxt_dp_clk_val[0];
2107 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2108 		if (bxt_dp_clk_val[i].clock == clock) {
2109 			*clk_div = bxt_dp_clk_val[i];
2110 			break;
2111 		}
2112 	}
2113 
2114 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
2115 }
2116 
2117 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2118 				      const struct bxt_clk_div *clk_div)
2119 {
2120 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2121 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2122 	int clock = crtc_state->port_clock;
2123 	int vco = clk_div->vco;
2124 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2125 	u32 lanestagger;
2126 
2127 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2128 
2129 	if (vco >= 6200000 && vco <= 6700000) {
2130 		prop_coef = 4;
2131 		int_coef = 9;
2132 		gain_ctl = 3;
2133 		targ_cnt = 8;
2134 	} else if ((vco > 5400000 && vco < 6200000) ||
2135 			(vco >= 4800000 && vco < 5400000)) {
2136 		prop_coef = 5;
2137 		int_coef = 11;
2138 		gain_ctl = 3;
2139 		targ_cnt = 9;
2140 	} else if (vco == 5400000) {
2141 		prop_coef = 3;
2142 		int_coef = 8;
2143 		gain_ctl = 1;
2144 		targ_cnt = 9;
2145 	} else {
2146 		drm_err(&i915->drm, "Invalid VCO\n");
2147 		return false;
2148 	}
2149 
2150 	if (clock > 270000)
2151 		lanestagger = 0x18;
2152 	else if (clock > 135000)
2153 		lanestagger = 0x0d;
2154 	else if (clock > 67000)
2155 		lanestagger = 0x07;
2156 	else if (clock > 33000)
2157 		lanestagger = 0x04;
2158 	else
2159 		lanestagger = 0x02;
2160 
2161 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2162 	dpll_hw_state->pll0 = clk_div->m2_int;
2163 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2164 	dpll_hw_state->pll2 = clk_div->m2_frac;
2165 
2166 	if (clk_div->m2_frac_en)
2167 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2168 
2169 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2170 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
2171 
2172 	dpll_hw_state->pll8 = targ_cnt;
2173 
2174 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2175 
2176 	dpll_hw_state->pll10 =
2177 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2178 		| PORT_PLL_DCO_AMP_OVR_EN_H;
2179 
2180 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2181 
2182 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2183 
2184 	return true;
2185 }
2186 
2187 static bool
2188 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2189 {
2190 	struct bxt_clk_div clk_div = {};
2191 
2192 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2193 
2194 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2195 }
2196 
2197 static bool
2198 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2199 {
2200 	struct bxt_clk_div clk_div = {};
2201 
2202 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2203 
2204 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2205 }
2206 
2207 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2208 				const struct intel_shared_dpll *pll)
2209 {
2210 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
2211 	struct dpll clock;
2212 
2213 	clock.m1 = 2;
2214 	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2215 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2216 		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2217 	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2218 	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2219 	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2220 
2221 	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2222 }
2223 
2224 static bool bxt_get_dpll(struct intel_atomic_state *state,
2225 			 struct intel_crtc *crtc,
2226 			 struct intel_encoder *encoder)
2227 {
2228 	struct intel_crtc_state *crtc_state =
2229 		intel_atomic_get_new_crtc_state(state, crtc);
2230 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2231 	struct intel_shared_dpll *pll;
2232 	enum intel_dpll_id id;
2233 
2234 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2235 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2236 		return false;
2237 
2238 	if (intel_crtc_has_dp_encoder(crtc_state) &&
2239 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2240 		return false;
2241 
2242 	/* 1:1 mapping between ports and PLLs */
2243 	id = (enum intel_dpll_id) encoder->port;
2244 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2245 
2246 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2247 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2248 
2249 	intel_reference_shared_dpll(state, crtc,
2250 				    pll, &crtc_state->dpll_hw_state);
2251 
2252 	crtc_state->shared_dpll = pll;
2253 
2254 	return true;
2255 }
2256 
2257 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2258 {
2259 	i915->dpll.ref_clks.ssc = 100000;
2260 	i915->dpll.ref_clks.nssc = 100000;
2261 	/* DSI non-SSC ref 19.2MHz */
2262 }
2263 
2264 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2265 			      const struct intel_dpll_hw_state *hw_state)
2266 {
2267 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2268 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2269 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2270 		    hw_state->ebb0,
2271 		    hw_state->ebb4,
2272 		    hw_state->pll0,
2273 		    hw_state->pll1,
2274 		    hw_state->pll2,
2275 		    hw_state->pll3,
2276 		    hw_state->pll6,
2277 		    hw_state->pll8,
2278 		    hw_state->pll9,
2279 		    hw_state->pll10,
2280 		    hw_state->pcsdw12);
2281 }
2282 
2283 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2284 	.enable = bxt_ddi_pll_enable,
2285 	.disable = bxt_ddi_pll_disable,
2286 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2287 	.get_freq = bxt_ddi_pll_get_freq,
2288 };
2289 
2290 static const struct dpll_info bxt_plls[] = {
2291 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2292 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2293 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2294 	{ },
2295 };
2296 
2297 static const struct intel_dpll_mgr bxt_pll_mgr = {
2298 	.dpll_info = bxt_plls,
2299 	.get_dplls = bxt_get_dpll,
2300 	.put_dplls = intel_put_dpll,
2301 	.update_ref_clks = bxt_update_dpll_ref_clks,
2302 	.dump_hw_state = bxt_dump_hw_state,
2303 };
2304 
2305 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2306 			       struct intel_shared_dpll *pll)
2307 {
2308 	const enum intel_dpll_id id = pll->info->id;
2309 	u32 val;
2310 
2311 	/* 1. Enable DPLL power in DPLL_ENABLE. */
2312 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2313 	val |= PLL_POWER_ENABLE;
2314 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2315 
2316 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2317 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2318 				  PLL_POWER_STATE, 5))
2319 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
2320 
2321 	/*
2322 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2323 	 * select DP mode, and set DP link rate.
2324 	 */
2325 	val = pll->state.hw_state.cfgcr0;
2326 	intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
2327 
2328 	/* 4. Reab back to ensure writes completed */
2329 	intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
2330 
2331 	/* 3. Configure DPLL_CFGCR0 */
2332 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2333 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2334 		val = pll->state.hw_state.cfgcr1;
2335 		intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
2336 		/* 4. Reab back to ensure writes completed */
2337 		intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
2338 	}
2339 
2340 	/*
2341 	 * 5. If the frequency will result in a change to the voltage
2342 	 * requirement, follow the Display Voltage Frequency Switching
2343 	 * Sequence Before Frequency Change
2344 	 *
2345 	 * Note: DVFS is actually handled via the cdclk code paths,
2346 	 * hence we do nothing here.
2347 	 */
2348 
2349 	/* 6. Enable DPLL in DPLL_ENABLE. */
2350 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2351 	val |= PLL_ENABLE;
2352 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2353 
2354 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2355 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2356 		drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
2357 
2358 	/*
2359 	 * 8. If the frequency will result in a change to the voltage
2360 	 * requirement, follow the Display Voltage Frequency Switching
2361 	 * Sequence After Frequency Change
2362 	 *
2363 	 * Note: DVFS is actually handled via the cdclk code paths,
2364 	 * hence we do nothing here.
2365 	 */
2366 
2367 	/*
2368 	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2369 	 * Done at intel_ddi_clk_select
2370 	 */
2371 }
2372 
2373 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2374 				struct intel_shared_dpll *pll)
2375 {
2376 	const enum intel_dpll_id id = pll->info->id;
2377 	u32 val;
2378 
2379 	/*
2380 	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2381 	 * Done at intel_ddi_post_disable
2382 	 */
2383 
2384 	/*
2385 	 * 2. If the frequency will result in a change to the voltage
2386 	 * requirement, follow the Display Voltage Frequency Switching
2387 	 * Sequence Before Frequency Change
2388 	 *
2389 	 * Note: DVFS is actually handled via the cdclk code paths,
2390 	 * hence we do nothing here.
2391 	 */
2392 
2393 	/* 3. Disable DPLL through DPLL_ENABLE. */
2394 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2395 	val &= ~PLL_ENABLE;
2396 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2397 
2398 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2399 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2400 		drm_err(&dev_priv->drm, "PLL %d locked\n", id);
2401 
2402 	/*
2403 	 * 5. If the frequency will result in a change to the voltage
2404 	 * requirement, follow the Display Voltage Frequency Switching
2405 	 * Sequence After Frequency Change
2406 	 *
2407 	 * Note: DVFS is actually handled via the cdclk code paths,
2408 	 * hence we do nothing here.
2409 	 */
2410 
2411 	/* 6. Disable DPLL power in DPLL_ENABLE. */
2412 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2413 	val &= ~PLL_POWER_ENABLE;
2414 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2415 
2416 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2417 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2418 				    PLL_POWER_STATE, 5))
2419 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
2420 }
2421 
2422 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2423 				     struct intel_shared_dpll *pll,
2424 				     struct intel_dpll_hw_state *hw_state)
2425 {
2426 	const enum intel_dpll_id id = pll->info->id;
2427 	intel_wakeref_t wakeref;
2428 	u32 val;
2429 	bool ret;
2430 
2431 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2432 						     POWER_DOMAIN_DISPLAY_CORE);
2433 	if (!wakeref)
2434 		return false;
2435 
2436 	ret = false;
2437 
2438 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2439 	if (!(val & PLL_ENABLE))
2440 		goto out;
2441 
2442 	val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
2443 	hw_state->cfgcr0 = val;
2444 
2445 	/* avoid reading back stale values if HDMI mode is not enabled */
2446 	if (val & DPLL_CFGCR0_HDMI_MODE) {
2447 		hw_state->cfgcr1 = intel_de_read(dev_priv,
2448 						 CNL_DPLL_CFGCR1(id));
2449 	}
2450 	ret = true;
2451 
2452 out:
2453 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2454 
2455 	return ret;
2456 }
2457 
2458 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2459 				      int *qdiv, int *kdiv)
2460 {
2461 	/* even dividers */
2462 	if (bestdiv % 2 == 0) {
2463 		if (bestdiv == 2) {
2464 			*pdiv = 2;
2465 			*qdiv = 1;
2466 			*kdiv = 1;
2467 		} else if (bestdiv % 4 == 0) {
2468 			*pdiv = 2;
2469 			*qdiv = bestdiv / 4;
2470 			*kdiv = 2;
2471 		} else if (bestdiv % 6 == 0) {
2472 			*pdiv = 3;
2473 			*qdiv = bestdiv / 6;
2474 			*kdiv = 2;
2475 		} else if (bestdiv % 5 == 0) {
2476 			*pdiv = 5;
2477 			*qdiv = bestdiv / 10;
2478 			*kdiv = 2;
2479 		} else if (bestdiv % 14 == 0) {
2480 			*pdiv = 7;
2481 			*qdiv = bestdiv / 14;
2482 			*kdiv = 2;
2483 		}
2484 	} else {
2485 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2486 			*pdiv = bestdiv;
2487 			*qdiv = 1;
2488 			*kdiv = 1;
2489 		} else { /* 9, 15, 21 */
2490 			*pdiv = bestdiv / 3;
2491 			*qdiv = 1;
2492 			*kdiv = 3;
2493 		}
2494 	}
2495 }
2496 
2497 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2498 				      u32 dco_freq, u32 ref_freq,
2499 				      int pdiv, int qdiv, int kdiv)
2500 {
2501 	u32 dco;
2502 
2503 	switch (kdiv) {
2504 	case 1:
2505 		params->kdiv = 1;
2506 		break;
2507 	case 2:
2508 		params->kdiv = 2;
2509 		break;
2510 	case 3:
2511 		params->kdiv = 4;
2512 		break;
2513 	default:
2514 		WARN(1, "Incorrect KDiv\n");
2515 	}
2516 
2517 	switch (pdiv) {
2518 	case 2:
2519 		params->pdiv = 1;
2520 		break;
2521 	case 3:
2522 		params->pdiv = 2;
2523 		break;
2524 	case 5:
2525 		params->pdiv = 4;
2526 		break;
2527 	case 7:
2528 		params->pdiv = 8;
2529 		break;
2530 	default:
2531 		WARN(1, "Incorrect PDiv\n");
2532 	}
2533 
2534 	WARN_ON(kdiv != 2 && qdiv != 1);
2535 
2536 	params->qdiv_ratio = qdiv;
2537 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2538 
2539 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2540 
2541 	params->dco_integer = dco >> 15;
2542 	params->dco_fraction = dco & 0x7fff;
2543 }
2544 
2545 static bool
2546 __cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2547 			  struct skl_wrpll_params *wrpll_params,
2548 			  int ref_clock)
2549 {
2550 	u32 afe_clock = crtc_state->port_clock * 5;
2551 	u32 dco_min = 7998000;
2552 	u32 dco_max = 10000000;
2553 	u32 dco_mid = (dco_min + dco_max) / 2;
2554 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2555 					 18, 20, 24, 28, 30, 32,  36,  40,
2556 					 42, 44, 48, 50, 52, 54,  56,  60,
2557 					 64, 66, 68, 70, 72, 76,  78,  80,
2558 					 84, 88, 90, 92, 96, 98, 100, 102,
2559 					  3,  5,  7,  9, 15, 21 };
2560 	u32 dco, best_dco = 0, dco_centrality = 0;
2561 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2562 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2563 
2564 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2565 		dco = afe_clock * dividers[d];
2566 
2567 		if ((dco <= dco_max) && (dco >= dco_min)) {
2568 			dco_centrality = abs(dco - dco_mid);
2569 
2570 			if (dco_centrality < best_dco_centrality) {
2571 				best_dco_centrality = dco_centrality;
2572 				best_div = dividers[d];
2573 				best_dco = dco;
2574 			}
2575 		}
2576 	}
2577 
2578 	if (best_div == 0)
2579 		return false;
2580 
2581 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2582 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2583 				  pdiv, qdiv, kdiv);
2584 
2585 	return true;
2586 }
2587 
2588 static bool
2589 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2590 			struct skl_wrpll_params *wrpll_params)
2591 {
2592 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2593 
2594 	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
2595 					 i915->dpll.ref_clks.nssc);
2596 }
2597 
2598 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2599 {
2600 	u32 cfgcr0, cfgcr1;
2601 	struct skl_wrpll_params wrpll_params = { 0, };
2602 
2603 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2604 
2605 	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2606 		return false;
2607 
2608 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2609 		wrpll_params.dco_integer;
2610 
2611 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2612 		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2613 		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2614 		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2615 		DPLL_CFGCR1_CENTRAL_FREQ;
2616 
2617 	memset(&crtc_state->dpll_hw_state, 0,
2618 	       sizeof(crtc_state->dpll_hw_state));
2619 
2620 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2621 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2622 	return true;
2623 }
2624 
2625 /*
2626  * Display WA #22010492432: tgl
2627  * Program half of the nominal DCO divider fraction value.
2628  */
2629 static bool
2630 tgl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2631 {
2632 	return IS_TIGERLAKE(i915) && i915->dpll.ref_clks.nssc == 38400;
2633 }
2634 
2635 static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
2636 				    const struct intel_shared_dpll *pll,
2637 				    int ref_clock)
2638 {
2639 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
2640 	u32 dco_fraction;
2641 	u32 p0, p1, p2, dco_freq;
2642 
2643 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2644 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2645 
2646 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2647 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2648 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2649 	else
2650 		p1 = 1;
2651 
2652 
2653 	switch (p0) {
2654 	case DPLL_CFGCR1_PDIV_2:
2655 		p0 = 2;
2656 		break;
2657 	case DPLL_CFGCR1_PDIV_3:
2658 		p0 = 3;
2659 		break;
2660 	case DPLL_CFGCR1_PDIV_5:
2661 		p0 = 5;
2662 		break;
2663 	case DPLL_CFGCR1_PDIV_7:
2664 		p0 = 7;
2665 		break;
2666 	}
2667 
2668 	switch (p2) {
2669 	case DPLL_CFGCR1_KDIV_1:
2670 		p2 = 1;
2671 		break;
2672 	case DPLL_CFGCR1_KDIV_2:
2673 		p2 = 2;
2674 		break;
2675 	case DPLL_CFGCR1_KDIV_3:
2676 		p2 = 3;
2677 		break;
2678 	}
2679 
2680 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2681 		   ref_clock;
2682 
2683 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2684 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2685 
2686 	if (tgl_combo_pll_div_frac_wa_needed(dev_priv))
2687 		dco_fraction *= 2;
2688 
2689 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2690 
2691 	if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
2692 		return 0;
2693 
2694 	return dco_freq / (p0 * p1 * p2 * 5);
2695 }
2696 
2697 static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
2698 				  const struct intel_shared_dpll *pll)
2699 {
2700 	return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc);
2701 }
2702 
2703 static bool
2704 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2705 {
2706 	u32 cfgcr0;
2707 
2708 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2709 
2710 	switch (crtc_state->port_clock / 2) {
2711 	case 81000:
2712 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2713 		break;
2714 	case 135000:
2715 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2716 		break;
2717 	case 270000:
2718 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2719 		break;
2720 		/* eDP 1.4 rates */
2721 	case 162000:
2722 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2723 		break;
2724 	case 108000:
2725 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2726 		break;
2727 	case 216000:
2728 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2729 		break;
2730 	case 324000:
2731 		/* Some SKUs may require elevated I/O voltage to support this */
2732 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2733 		break;
2734 	case 405000:
2735 		/* Some SKUs may require elevated I/O voltage to support this */
2736 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2737 		break;
2738 	}
2739 
2740 	memset(&crtc_state->dpll_hw_state, 0,
2741 	       sizeof(crtc_state->dpll_hw_state));
2742 
2743 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2744 
2745 	return true;
2746 }
2747 
2748 static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
2749 				  const struct intel_shared_dpll *pll)
2750 {
2751 	int link_clock = 0;
2752 
2753 	switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
2754 	case DPLL_CFGCR0_LINK_RATE_810:
2755 		link_clock = 81000;
2756 		break;
2757 	case DPLL_CFGCR0_LINK_RATE_1080:
2758 		link_clock = 108000;
2759 		break;
2760 	case DPLL_CFGCR0_LINK_RATE_1350:
2761 		link_clock = 135000;
2762 		break;
2763 	case DPLL_CFGCR0_LINK_RATE_1620:
2764 		link_clock = 162000;
2765 		break;
2766 	case DPLL_CFGCR0_LINK_RATE_2160:
2767 		link_clock = 216000;
2768 		break;
2769 	case DPLL_CFGCR0_LINK_RATE_2700:
2770 		link_clock = 270000;
2771 		break;
2772 	case DPLL_CFGCR0_LINK_RATE_3240:
2773 		link_clock = 324000;
2774 		break;
2775 	case DPLL_CFGCR0_LINK_RATE_4050:
2776 		link_clock = 405000;
2777 		break;
2778 	default:
2779 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
2780 		break;
2781 	}
2782 
2783 	return link_clock * 2;
2784 }
2785 
2786 static bool cnl_get_dpll(struct intel_atomic_state *state,
2787 			 struct intel_crtc *crtc,
2788 			 struct intel_encoder *encoder)
2789 {
2790 	struct intel_crtc_state *crtc_state =
2791 		intel_atomic_get_new_crtc_state(state, crtc);
2792 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2793 	struct intel_shared_dpll *pll;
2794 	bool bret;
2795 
2796 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2797 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2798 		if (!bret) {
2799 			drm_dbg_kms(&i915->drm,
2800 				    "Could not get HDMI pll dividers.\n");
2801 			return false;
2802 		}
2803 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2804 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2805 		if (!bret) {
2806 			drm_dbg_kms(&i915->drm,
2807 				    "Could not set DP dpll HW state.\n");
2808 			return false;
2809 		}
2810 	} else {
2811 		drm_dbg_kms(&i915->drm,
2812 			    "Skip DPLL setup for output_types 0x%x\n",
2813 			    crtc_state->output_types);
2814 		return false;
2815 	}
2816 
2817 	pll = intel_find_shared_dpll(state, crtc,
2818 				     &crtc_state->dpll_hw_state,
2819 				     BIT(DPLL_ID_SKL_DPLL2) |
2820 				     BIT(DPLL_ID_SKL_DPLL1) |
2821 				     BIT(DPLL_ID_SKL_DPLL0));
2822 	if (!pll) {
2823 		drm_dbg_kms(&i915->drm, "No PLL selected\n");
2824 		return false;
2825 	}
2826 
2827 	intel_reference_shared_dpll(state, crtc,
2828 				    pll, &crtc_state->dpll_hw_state);
2829 
2830 	crtc_state->shared_dpll = pll;
2831 
2832 	return true;
2833 }
2834 
2835 static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
2836 				const struct intel_shared_dpll *pll)
2837 {
2838 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
2839 		return cnl_ddi_wrpll_get_freq(i915, pll);
2840 	else
2841 		return cnl_ddi_lcpll_get_freq(i915, pll);
2842 }
2843 
2844 static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
2845 {
2846 	/* No SSC reference */
2847 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
2848 }
2849 
2850 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2851 			      const struct intel_dpll_hw_state *hw_state)
2852 {
2853 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
2854 		    "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2855 		    hw_state->cfgcr0,
2856 		    hw_state->cfgcr1);
2857 }
2858 
2859 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2860 	.enable = cnl_ddi_pll_enable,
2861 	.disable = cnl_ddi_pll_disable,
2862 	.get_hw_state = cnl_ddi_pll_get_hw_state,
2863 	.get_freq = cnl_ddi_pll_get_freq,
2864 };
2865 
2866 static const struct dpll_info cnl_plls[] = {
2867 	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2868 	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2869 	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2870 	{ },
2871 };
2872 
2873 static const struct intel_dpll_mgr cnl_pll_mgr = {
2874 	.dpll_info = cnl_plls,
2875 	.get_dplls = cnl_get_dpll,
2876 	.put_dplls = intel_put_dpll,
2877 	.update_ref_clks = cnl_update_dpll_ref_clks,
2878 	.dump_hw_state = cnl_dump_hw_state,
2879 };
2880 
2881 struct icl_combo_pll_params {
2882 	int clock;
2883 	struct skl_wrpll_params wrpll;
2884 };
2885 
2886 /*
2887  * These values alrea already adjusted: they're the bits we write to the
2888  * registers, not the logical values.
2889  */
2890 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2891 	{ 540000,
2892 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2893 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2894 	{ 270000,
2895 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2896 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2897 	{ 162000,
2898 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2899 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2900 	{ 324000,
2901 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2902 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2903 	{ 216000,
2904 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2905 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2906 	{ 432000,
2907 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2908 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2909 	{ 648000,
2910 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2911 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2912 	{ 810000,
2913 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2914 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2915 };
2916 
2917 
2918 /* Also used for 38.4 MHz values. */
2919 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2920 	{ 540000,
2921 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2922 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2923 	{ 270000,
2924 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2925 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2926 	{ 162000,
2927 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2928 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2929 	{ 324000,
2930 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2931 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2932 	{ 216000,
2933 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2934 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2935 	{ 432000,
2936 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2937 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2938 	{ 648000,
2939 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2940 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2941 	{ 810000,
2942 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2943 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2944 };
2945 
2946 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2947 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2948 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2949 };
2950 
2951 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2952 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2953 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2954 };
2955 
2956 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2957 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2958 	/* the following params are unused */
2959 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2960 };
2961 
2962 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2963 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2964 	/* the following params are unused */
2965 };
2966 
2967 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2968 				  struct skl_wrpll_params *pll_params)
2969 {
2970 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2971 	const struct icl_combo_pll_params *params =
2972 		dev_priv->dpll.ref_clks.nssc == 24000 ?
2973 		icl_dp_combo_pll_24MHz_values :
2974 		icl_dp_combo_pll_19_2MHz_values;
2975 	int clock = crtc_state->port_clock;
2976 	int i;
2977 
2978 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2979 		if (clock == params[i].clock) {
2980 			*pll_params = params[i].wrpll;
2981 			return true;
2982 		}
2983 	}
2984 
2985 	MISSING_CASE(clock);
2986 	return false;
2987 }
2988 
2989 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2990 			     struct skl_wrpll_params *pll_params)
2991 {
2992 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2993 
2994 	if (INTEL_GEN(dev_priv) >= 12) {
2995 		switch (dev_priv->dpll.ref_clks.nssc) {
2996 		default:
2997 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2998 			fallthrough;
2999 		case 19200:
3000 		case 38400:
3001 			*pll_params = tgl_tbt_pll_19_2MHz_values;
3002 			break;
3003 		case 24000:
3004 			*pll_params = tgl_tbt_pll_24MHz_values;
3005 			break;
3006 		}
3007 	} else {
3008 		switch (dev_priv->dpll.ref_clks.nssc) {
3009 		default:
3010 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3011 			fallthrough;
3012 		case 19200:
3013 		case 38400:
3014 			*pll_params = icl_tbt_pll_19_2MHz_values;
3015 			break;
3016 		case 24000:
3017 			*pll_params = icl_tbt_pll_24MHz_values;
3018 			break;
3019 		}
3020 	}
3021 
3022 	return true;
3023 }
3024 
3025 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
3026 				    const struct intel_shared_dpll *pll)
3027 {
3028 	/*
3029 	 * The PLL outputs multiple frequencies at the same time, selection is
3030 	 * made at DDI clock mux level.
3031 	 */
3032 	drm_WARN_ON(&i915->drm, 1);
3033 
3034 	return 0;
3035 }
3036 
3037 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
3038 {
3039 	int ref_clock = i915->dpll.ref_clks.nssc;
3040 
3041 	/*
3042 	 * For ICL+, the spec states: if reference frequency is 38.4,
3043 	 * use 19.2 because the DPLL automatically divides that by 2.
3044 	 */
3045 	if (ref_clock == 38400)
3046 		ref_clock = 19200;
3047 
3048 	return ref_clock;
3049 }
3050 
3051 static bool
3052 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
3053 	       struct skl_wrpll_params *wrpll_params)
3054 {
3055 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3056 
3057 	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
3058 					 icl_wrpll_ref_clock(i915));
3059 }
3060 
3061 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
3062 				      const struct intel_shared_dpll *pll)
3063 {
3064 	return __cnl_ddi_wrpll_get_freq(i915, pll,
3065 					icl_wrpll_ref_clock(i915));
3066 }
3067 
3068 static void icl_calc_dpll_state(struct drm_i915_private *i915,
3069 				const struct skl_wrpll_params *pll_params,
3070 				struct intel_dpll_hw_state *pll_state)
3071 {
3072 	u32 dco_fraction = pll_params->dco_fraction;
3073 
3074 	memset(pll_state, 0, sizeof(*pll_state));
3075 
3076 	if (tgl_combo_pll_div_frac_wa_needed(i915))
3077 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
3078 
3079 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
3080 			    pll_params->dco_integer;
3081 
3082 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
3083 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
3084 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
3085 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
3086 
3087 	if (INTEL_GEN(i915) >= 12)
3088 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
3089 	else
3090 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
3091 }
3092 
3093 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
3094 {
3095 	return id - DPLL_ID_ICL_MGPLL1;
3096 }
3097 
3098 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
3099 {
3100 	return tc_port + DPLL_ID_ICL_MGPLL1;
3101 }
3102 
3103 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
3104 				     u32 *target_dco_khz,
3105 				     struct intel_dpll_hw_state *state,
3106 				     bool is_dkl)
3107 {
3108 	u32 dco_min_freq, dco_max_freq;
3109 	int div1_vals[] = {7, 5, 3, 2};
3110 	unsigned int i;
3111 	int div2;
3112 
3113 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
3114 	dco_max_freq = is_dp ? 8100000 : 10000000;
3115 
3116 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
3117 		int div1 = div1_vals[i];
3118 
3119 		for (div2 = 10; div2 > 0; div2--) {
3120 			int dco = div1 * div2 * clock_khz * 5;
3121 			int a_divratio, tlinedrv, inputsel;
3122 			u32 hsdiv;
3123 
3124 			if (dco < dco_min_freq || dco > dco_max_freq)
3125 				continue;
3126 
3127 			if (div2 >= 2) {
3128 				/*
3129 				 * Note: a_divratio not matching TGL BSpec
3130 				 * algorithm but matching hardcoded values and
3131 				 * working on HW for DP alt-mode at least
3132 				 */
3133 				a_divratio = is_dp ? 10 : 5;
3134 				tlinedrv = is_dkl ? 1 : 2;
3135 			} else {
3136 				a_divratio = 5;
3137 				tlinedrv = 0;
3138 			}
3139 			inputsel = is_dp ? 0 : 1;
3140 
3141 			switch (div1) {
3142 			default:
3143 				MISSING_CASE(div1);
3144 				fallthrough;
3145 			case 2:
3146 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
3147 				break;
3148 			case 3:
3149 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
3150 				break;
3151 			case 5:
3152 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
3153 				break;
3154 			case 7:
3155 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
3156 				break;
3157 			}
3158 
3159 			*target_dco_khz = dco;
3160 
3161 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
3162 
3163 			state->mg_clktop2_coreclkctl1 =
3164 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
3165 
3166 			state->mg_clktop2_hsclkctl =
3167 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
3168 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
3169 				hsdiv |
3170 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
3171 
3172 			return true;
3173 		}
3174 	}
3175 
3176 	return false;
3177 }
3178 
3179 /*
3180  * The specification for this function uses real numbers, so the math had to be
3181  * adapted to integer-only calculation, that's why it looks so different.
3182  */
3183 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3184 				  struct intel_dpll_hw_state *pll_state)
3185 {
3186 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3187 	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
3188 	int clock = crtc_state->port_clock;
3189 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3190 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3191 	u32 prop_coeff, int_coeff;
3192 	u32 tdc_targetcnt, feedfwgain;
3193 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3194 	u64 tmp;
3195 	bool use_ssc = false;
3196 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3197 	bool is_dkl = INTEL_GEN(dev_priv) >= 12;
3198 
3199 	memset(pll_state, 0, sizeof(*pll_state));
3200 
3201 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3202 				      pll_state, is_dkl)) {
3203 		drm_dbg_kms(&dev_priv->drm,
3204 			    "Failed to find divisors for clock %d\n", clock);
3205 		return false;
3206 	}
3207 
3208 	m1div = 2;
3209 	m2div_int = dco_khz / (refclk_khz * m1div);
3210 	if (m2div_int > 255) {
3211 		if (!is_dkl) {
3212 			m1div = 4;
3213 			m2div_int = dco_khz / (refclk_khz * m1div);
3214 		}
3215 
3216 		if (m2div_int > 255) {
3217 			drm_dbg_kms(&dev_priv->drm,
3218 				    "Failed to find mdiv for clock %d\n",
3219 				    clock);
3220 			return false;
3221 		}
3222 	}
3223 	m2div_rem = dco_khz % (refclk_khz * m1div);
3224 
3225 	tmp = (u64)m2div_rem * (1 << 22);
3226 	do_div(tmp, refclk_khz * m1div);
3227 	m2div_frac = tmp;
3228 
3229 	switch (refclk_khz) {
3230 	case 19200:
3231 		iref_ndiv = 1;
3232 		iref_trim = 28;
3233 		iref_pulse_w = 1;
3234 		break;
3235 	case 24000:
3236 		iref_ndiv = 1;
3237 		iref_trim = 25;
3238 		iref_pulse_w = 2;
3239 		break;
3240 	case 38400:
3241 		iref_ndiv = 2;
3242 		iref_trim = 28;
3243 		iref_pulse_w = 1;
3244 		break;
3245 	default:
3246 		MISSING_CASE(refclk_khz);
3247 		return false;
3248 	}
3249 
3250 	/*
3251 	 * tdc_res = 0.000003
3252 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3253 	 *
3254 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3255 	 * was supposed to be a division, but we rearranged the operations of
3256 	 * the formula to avoid early divisions so we don't multiply the
3257 	 * rounding errors.
3258 	 *
3259 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3260 	 * we also rearrange to work with integers.
3261 	 *
3262 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3263 	 * last division by 10.
3264 	 */
3265 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3266 
3267 	/*
3268 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3269 	 * 32 bits. That's not a problem since we round the division down
3270 	 * anyway.
3271 	 */
3272 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3273 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3274 
3275 	if (dco_khz >= 9000000) {
3276 		prop_coeff = 5;
3277 		int_coeff = 10;
3278 	} else {
3279 		prop_coeff = 4;
3280 		int_coeff = 8;
3281 	}
3282 
3283 	if (use_ssc) {
3284 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3285 		do_div(tmp, refclk_khz * m1div * 10000);
3286 		ssc_stepsize = tmp;
3287 
3288 		tmp = mul_u32_u32(dco_khz, 1000);
3289 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3290 	} else {
3291 		ssc_stepsize = 0;
3292 		ssc_steplen = 0;
3293 	}
3294 	ssc_steplog = 4;
3295 
3296 	/* write pll_state calculations */
3297 	if (is_dkl) {
3298 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3299 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3300 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3301 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3302 
3303 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3304 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3305 
3306 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3307 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3308 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3309 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3310 
3311 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3312 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3313 
3314 		pll_state->mg_pll_tdc_coldst_bias =
3315 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3316 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3317 
3318 	} else {
3319 		pll_state->mg_pll_div0 =
3320 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3321 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3322 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3323 
3324 		pll_state->mg_pll_div1 =
3325 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3326 			MG_PLL_DIV1_DITHER_DIV_2 |
3327 			MG_PLL_DIV1_NDIVRATIO(1) |
3328 			MG_PLL_DIV1_FBPREDIV(m1div);
3329 
3330 		pll_state->mg_pll_lf =
3331 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3332 			MG_PLL_LF_AFCCNTSEL_512 |
3333 			MG_PLL_LF_GAINCTRL(1) |
3334 			MG_PLL_LF_INT_COEFF(int_coeff) |
3335 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3336 
3337 		pll_state->mg_pll_frac_lock =
3338 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3339 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3340 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3341 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3342 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3343 		if (use_ssc || m2div_rem > 0)
3344 			pll_state->mg_pll_frac_lock |=
3345 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3346 
3347 		pll_state->mg_pll_ssc =
3348 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3349 			MG_PLL_SSC_TYPE(2) |
3350 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3351 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3352 			MG_PLL_SSC_FLLEN |
3353 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3354 
3355 		pll_state->mg_pll_tdc_coldst_bias =
3356 			MG_PLL_TDC_COLDST_COLDSTART |
3357 			MG_PLL_TDC_COLDST_IREFINT_EN |
3358 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3359 			MG_PLL_TDC_TDCOVCCORR_EN |
3360 			MG_PLL_TDC_TDCSEL(3);
3361 
3362 		pll_state->mg_pll_bias =
3363 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3364 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3365 			MG_PLL_BIAS_BIAS_BONUS(10) |
3366 			MG_PLL_BIAS_BIASCAL_EN |
3367 			MG_PLL_BIAS_CTRIM(12) |
3368 			MG_PLL_BIAS_VREF_RDAC(4) |
3369 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3370 
3371 		if (refclk_khz == 38400) {
3372 			pll_state->mg_pll_tdc_coldst_bias_mask =
3373 				MG_PLL_TDC_COLDST_COLDSTART;
3374 			pll_state->mg_pll_bias_mask = 0;
3375 		} else {
3376 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3377 			pll_state->mg_pll_bias_mask = -1U;
3378 		}
3379 
3380 		pll_state->mg_pll_tdc_coldst_bias &=
3381 			pll_state->mg_pll_tdc_coldst_bias_mask;
3382 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3383 	}
3384 
3385 	return true;
3386 }
3387 
3388 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3389 				   const struct intel_shared_dpll *pll)
3390 {
3391 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
3392 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3393 	u64 tmp;
3394 
3395 	ref_clock = dev_priv->dpll.ref_clks.nssc;
3396 
3397 	if (INTEL_GEN(dev_priv) >= 12) {
3398 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3399 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3400 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3401 
3402 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3403 			m2_frac = pll_state->mg_pll_bias &
3404 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3405 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3406 		} else {
3407 			m2_frac = 0;
3408 		}
3409 	} else {
3410 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3411 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3412 
3413 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3414 			m2_frac = pll_state->mg_pll_div0 &
3415 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3416 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3417 		} else {
3418 			m2_frac = 0;
3419 		}
3420 	}
3421 
3422 	switch (pll_state->mg_clktop2_hsclkctl &
3423 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3424 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3425 		div1 = 2;
3426 		break;
3427 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3428 		div1 = 3;
3429 		break;
3430 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3431 		div1 = 5;
3432 		break;
3433 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3434 		div1 = 7;
3435 		break;
3436 	default:
3437 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3438 		return 0;
3439 	}
3440 
3441 	div2 = (pll_state->mg_clktop2_hsclkctl &
3442 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3443 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3444 
3445 	/* div2 value of 0 is same as 1 means no div */
3446 	if (div2 == 0)
3447 		div2 = 1;
3448 
3449 	/*
3450 	 * Adjust the original formula to delay the division by 2^22 in order to
3451 	 * minimize possible rounding errors.
3452 	 */
3453 	tmp = (u64)m1 * m2_int * ref_clock +
3454 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3455 	tmp = div_u64(tmp, 5 * div1 * div2);
3456 
3457 	return tmp;
3458 }
3459 
3460 /**
3461  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3462  * @crtc_state: state for the CRTC to select the DPLL for
3463  * @port_dpll_id: the active @port_dpll_id to select
3464  *
3465  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3466  * CRTC.
3467  */
3468 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3469 			      enum icl_port_dpll_id port_dpll_id)
3470 {
3471 	struct icl_port_dpll *port_dpll =
3472 		&crtc_state->icl_port_dplls[port_dpll_id];
3473 
3474 	crtc_state->shared_dpll = port_dpll->pll;
3475 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3476 }
3477 
3478 static void icl_update_active_dpll(struct intel_atomic_state *state,
3479 				   struct intel_crtc *crtc,
3480 				   struct intel_encoder *encoder)
3481 {
3482 	struct intel_crtc_state *crtc_state =
3483 		intel_atomic_get_new_crtc_state(state, crtc);
3484 	struct intel_digital_port *primary_port;
3485 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3486 
3487 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3488 		enc_to_mst(encoder)->primary :
3489 		enc_to_dig_port(encoder);
3490 
3491 	if (primary_port &&
3492 	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
3493 	     primary_port->tc_mode == TC_PORT_LEGACY))
3494 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3495 
3496 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3497 }
3498 
3499 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3500 {
3501 	if (!(i915->hti_state & HDPORT_ENABLED))
3502 		return 0;
3503 
3504 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3505 }
3506 
3507 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3508 				   struct intel_crtc *crtc,
3509 				   struct intel_encoder *encoder)
3510 {
3511 	struct intel_crtc_state *crtc_state =
3512 		intel_atomic_get_new_crtc_state(state, crtc);
3513 	struct skl_wrpll_params pll_params = { };
3514 	struct icl_port_dpll *port_dpll =
3515 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3516 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3517 	enum port port = encoder->port;
3518 	unsigned long dpll_mask;
3519 	int ret;
3520 
3521 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3522 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3523 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3524 	else
3525 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3526 
3527 	if (!ret) {
3528 		drm_dbg_kms(&dev_priv->drm,
3529 			    "Could not calculate combo PHY PLL state.\n");
3530 
3531 		return false;
3532 	}
3533 
3534 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3535 
3536 	if (IS_ROCKETLAKE(dev_priv)) {
3537 		dpll_mask =
3538 			BIT(DPLL_ID_EHL_DPLL4) |
3539 			BIT(DPLL_ID_ICL_DPLL1) |
3540 			BIT(DPLL_ID_ICL_DPLL0);
3541 	} else if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A) {
3542 		dpll_mask =
3543 			BIT(DPLL_ID_EHL_DPLL4) |
3544 			BIT(DPLL_ID_ICL_DPLL1) |
3545 			BIT(DPLL_ID_ICL_DPLL0);
3546 	} else {
3547 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3548 	}
3549 
3550 	/* Eliminate DPLLs from consideration if reserved by HTI */
3551 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3552 
3553 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3554 						&port_dpll->hw_state,
3555 						dpll_mask);
3556 	if (!port_dpll->pll) {
3557 		drm_dbg_kms(&dev_priv->drm,
3558 			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3559 			    encoder->base.base.id, encoder->base.name);
3560 		return false;
3561 	}
3562 
3563 	intel_reference_shared_dpll(state, crtc,
3564 				    port_dpll->pll, &port_dpll->hw_state);
3565 
3566 	icl_update_active_dpll(state, crtc, encoder);
3567 
3568 	return true;
3569 }
3570 
3571 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3572 				 struct intel_crtc *crtc,
3573 				 struct intel_encoder *encoder)
3574 {
3575 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3576 	struct intel_crtc_state *crtc_state =
3577 		intel_atomic_get_new_crtc_state(state, crtc);
3578 	struct skl_wrpll_params pll_params = { };
3579 	struct icl_port_dpll *port_dpll;
3580 	enum intel_dpll_id dpll_id;
3581 
3582 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3583 	if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3584 		drm_dbg_kms(&dev_priv->drm,
3585 			    "Could not calculate TBT PLL state.\n");
3586 		return false;
3587 	}
3588 
3589 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3590 
3591 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3592 						&port_dpll->hw_state,
3593 						BIT(DPLL_ID_ICL_TBTPLL));
3594 	if (!port_dpll->pll) {
3595 		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3596 		return false;
3597 	}
3598 	intel_reference_shared_dpll(state, crtc,
3599 				    port_dpll->pll, &port_dpll->hw_state);
3600 
3601 
3602 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3603 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3604 		drm_dbg_kms(&dev_priv->drm,
3605 			    "Could not calculate MG PHY PLL state.\n");
3606 		goto err_unreference_tbt_pll;
3607 	}
3608 
3609 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3610 							 encoder->port));
3611 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3612 						&port_dpll->hw_state,
3613 						BIT(dpll_id));
3614 	if (!port_dpll->pll) {
3615 		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3616 		goto err_unreference_tbt_pll;
3617 	}
3618 	intel_reference_shared_dpll(state, crtc,
3619 				    port_dpll->pll, &port_dpll->hw_state);
3620 
3621 	icl_update_active_dpll(state, crtc, encoder);
3622 
3623 	return true;
3624 
3625 err_unreference_tbt_pll:
3626 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3627 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3628 
3629 	return false;
3630 }
3631 
3632 static bool icl_get_dplls(struct intel_atomic_state *state,
3633 			  struct intel_crtc *crtc,
3634 			  struct intel_encoder *encoder)
3635 {
3636 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3637 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3638 
3639 	if (intel_phy_is_combo(dev_priv, phy))
3640 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3641 	else if (intel_phy_is_tc(dev_priv, phy))
3642 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3643 
3644 	MISSING_CASE(phy);
3645 
3646 	return false;
3647 }
3648 
3649 static void icl_put_dplls(struct intel_atomic_state *state,
3650 			  struct intel_crtc *crtc)
3651 {
3652 	const struct intel_crtc_state *old_crtc_state =
3653 		intel_atomic_get_old_crtc_state(state, crtc);
3654 	struct intel_crtc_state *new_crtc_state =
3655 		intel_atomic_get_new_crtc_state(state, crtc);
3656 	enum icl_port_dpll_id id;
3657 
3658 	new_crtc_state->shared_dpll = NULL;
3659 
3660 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3661 		const struct icl_port_dpll *old_port_dpll =
3662 			&old_crtc_state->icl_port_dplls[id];
3663 		struct icl_port_dpll *new_port_dpll =
3664 			&new_crtc_state->icl_port_dplls[id];
3665 
3666 		new_port_dpll->pll = NULL;
3667 
3668 		if (!old_port_dpll->pll)
3669 			continue;
3670 
3671 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3672 	}
3673 }
3674 
3675 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3676 				struct intel_shared_dpll *pll,
3677 				struct intel_dpll_hw_state *hw_state)
3678 {
3679 	const enum intel_dpll_id id = pll->info->id;
3680 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3681 	intel_wakeref_t wakeref;
3682 	bool ret = false;
3683 	u32 val;
3684 
3685 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3686 						     POWER_DOMAIN_DISPLAY_CORE);
3687 	if (!wakeref)
3688 		return false;
3689 
3690 	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3691 	if (!(val & PLL_ENABLE))
3692 		goto out;
3693 
3694 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3695 						  MG_REFCLKIN_CTL(tc_port));
3696 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3697 
3698 	hw_state->mg_clktop2_coreclkctl1 =
3699 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3700 	hw_state->mg_clktop2_coreclkctl1 &=
3701 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3702 
3703 	hw_state->mg_clktop2_hsclkctl =
3704 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3705 	hw_state->mg_clktop2_hsclkctl &=
3706 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3707 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3708 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3709 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3710 
3711 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3712 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3713 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3714 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3715 						   MG_PLL_FRAC_LOCK(tc_port));
3716 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3717 
3718 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3719 	hw_state->mg_pll_tdc_coldst_bias =
3720 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3721 
3722 	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3723 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3724 		hw_state->mg_pll_bias_mask = 0;
3725 	} else {
3726 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3727 		hw_state->mg_pll_bias_mask = -1U;
3728 	}
3729 
3730 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3731 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3732 
3733 	ret = true;
3734 out:
3735 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3736 	return ret;
3737 }
3738 
3739 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3740 				 struct intel_shared_dpll *pll,
3741 				 struct intel_dpll_hw_state *hw_state)
3742 {
3743 	const enum intel_dpll_id id = pll->info->id;
3744 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3745 	intel_wakeref_t wakeref;
3746 	bool ret = false;
3747 	u32 val;
3748 
3749 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3750 						     POWER_DOMAIN_DISPLAY_CORE);
3751 	if (!wakeref)
3752 		return false;
3753 
3754 	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3755 	if (!(val & PLL_ENABLE))
3756 		goto out;
3757 
3758 	/*
3759 	 * All registers read here have the same HIP_INDEX_REG even though
3760 	 * they are on different building blocks
3761 	 */
3762 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3763 		       HIP_INDEX_VAL(tc_port, 0x2));
3764 
3765 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3766 						  DKL_REFCLKIN_CTL(tc_port));
3767 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3768 
3769 	hw_state->mg_clktop2_hsclkctl =
3770 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3771 	hw_state->mg_clktop2_hsclkctl &=
3772 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3773 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3774 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3775 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3776 
3777 	hw_state->mg_clktop2_coreclkctl1 =
3778 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3779 	hw_state->mg_clktop2_coreclkctl1 &=
3780 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3781 
3782 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3783 	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3784 				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3785 				  DKL_PLL_DIV0_FBPREDIV_MASK |
3786 				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3787 
3788 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3789 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3790 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3791 
3792 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3793 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3794 				 DKL_PLL_SSC_STEP_LEN_MASK |
3795 				 DKL_PLL_SSC_STEP_NUM_MASK |
3796 				 DKL_PLL_SSC_EN);
3797 
3798 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3799 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3800 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3801 
3802 	hw_state->mg_pll_tdc_coldst_bias =
3803 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3804 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3805 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3806 
3807 	ret = true;
3808 out:
3809 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3810 	return ret;
3811 }
3812 
3813 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3814 				 struct intel_shared_dpll *pll,
3815 				 struct intel_dpll_hw_state *hw_state,
3816 				 i915_reg_t enable_reg)
3817 {
3818 	const enum intel_dpll_id id = pll->info->id;
3819 	intel_wakeref_t wakeref;
3820 	bool ret = false;
3821 	u32 val;
3822 
3823 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3824 						     POWER_DOMAIN_DISPLAY_CORE);
3825 	if (!wakeref)
3826 		return false;
3827 
3828 	val = intel_de_read(dev_priv, enable_reg);
3829 	if (!(val & PLL_ENABLE))
3830 		goto out;
3831 
3832 	if (IS_ROCKETLAKE(dev_priv)) {
3833 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3834 						 RKL_DPLL_CFGCR0(id));
3835 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3836 						 RKL_DPLL_CFGCR1(id));
3837 	} else if (INTEL_GEN(dev_priv) >= 12) {
3838 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3839 						 TGL_DPLL_CFGCR0(id));
3840 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3841 						 TGL_DPLL_CFGCR1(id));
3842 	} else {
3843 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3844 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3845 							 ICL_DPLL_CFGCR0(4));
3846 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3847 							 ICL_DPLL_CFGCR1(4));
3848 		} else {
3849 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3850 							 ICL_DPLL_CFGCR0(id));
3851 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3852 							 ICL_DPLL_CFGCR1(id));
3853 		}
3854 	}
3855 
3856 	ret = true;
3857 out:
3858 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3859 	return ret;
3860 }
3861 
3862 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3863 				   struct intel_shared_dpll *pll,
3864 				   struct intel_dpll_hw_state *hw_state)
3865 {
3866 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3867 
3868 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3869 }
3870 
3871 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3872 				 struct intel_shared_dpll *pll,
3873 				 struct intel_dpll_hw_state *hw_state)
3874 {
3875 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3876 }
3877 
3878 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3879 			   struct intel_shared_dpll *pll)
3880 {
3881 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3882 	const enum intel_dpll_id id = pll->info->id;
3883 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3884 
3885 	if (IS_ROCKETLAKE(dev_priv)) {
3886 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3887 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3888 	} else if (INTEL_GEN(dev_priv) >= 12) {
3889 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3890 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3891 	} else {
3892 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3893 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3894 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3895 		} else {
3896 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3897 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3898 		}
3899 	}
3900 
3901 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3902 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3903 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3904 }
3905 
3906 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3907 			     struct intel_shared_dpll *pll)
3908 {
3909 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3910 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3911 	u32 val;
3912 
3913 	/*
3914 	 * Some of the following registers have reserved fields, so program
3915 	 * these with RMW based on a mask. The mask can be fixed or generated
3916 	 * during the calc/readout phase if the mask depends on some other HW
3917 	 * state like refclk, see icl_calc_mg_pll_state().
3918 	 */
3919 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3920 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3921 	val |= hw_state->mg_refclkin_ctl;
3922 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3923 
3924 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3925 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3926 	val |= hw_state->mg_clktop2_coreclkctl1;
3927 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3928 
3929 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3930 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3931 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3932 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3933 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3934 	val |= hw_state->mg_clktop2_hsclkctl;
3935 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3936 
3937 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3938 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3939 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3940 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3941 		       hw_state->mg_pll_frac_lock);
3942 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3943 
3944 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3945 	val &= ~hw_state->mg_pll_bias_mask;
3946 	val |= hw_state->mg_pll_bias;
3947 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3948 
3949 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3950 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3951 	val |= hw_state->mg_pll_tdc_coldst_bias;
3952 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3953 
3954 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3955 }
3956 
3957 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3958 			  struct intel_shared_dpll *pll)
3959 {
3960 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3961 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3962 	u32 val;
3963 
3964 	/*
3965 	 * All registers programmed here have the same HIP_INDEX_REG even
3966 	 * though on different building block
3967 	 */
3968 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3969 		       HIP_INDEX_VAL(tc_port, 0x2));
3970 
3971 	/* All the registers are RMW */
3972 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3973 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3974 	val |= hw_state->mg_refclkin_ctl;
3975 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3976 
3977 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3978 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3979 	val |= hw_state->mg_clktop2_coreclkctl1;
3980 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3981 
3982 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3983 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3984 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3985 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3986 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3987 	val |= hw_state->mg_clktop2_hsclkctl;
3988 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3989 
3990 	val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3991 	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
3992 		 DKL_PLL_DIV0_PROP_COEFF_MASK |
3993 		 DKL_PLL_DIV0_FBPREDIV_MASK |
3994 		 DKL_PLL_DIV0_FBDIV_INT_MASK);
3995 	val |= hw_state->mg_pll_div0;
3996 	intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
3997 
3998 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3999 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
4000 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
4001 	val |= hw_state->mg_pll_div1;
4002 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
4003 
4004 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
4005 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
4006 		 DKL_PLL_SSC_STEP_LEN_MASK |
4007 		 DKL_PLL_SSC_STEP_NUM_MASK |
4008 		 DKL_PLL_SSC_EN);
4009 	val |= hw_state->mg_pll_ssc;
4010 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
4011 
4012 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
4013 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
4014 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
4015 	val |= hw_state->mg_pll_bias;
4016 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
4017 
4018 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4019 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
4020 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
4021 	val |= hw_state->mg_pll_tdc_coldst_bias;
4022 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
4023 
4024 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4025 }
4026 
4027 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
4028 				 struct intel_shared_dpll *pll,
4029 				 i915_reg_t enable_reg)
4030 {
4031 	u32 val;
4032 
4033 	val = intel_de_read(dev_priv, enable_reg);
4034 	val |= PLL_POWER_ENABLE;
4035 	intel_de_write(dev_priv, enable_reg, val);
4036 
4037 	/*
4038 	 * The spec says we need to "wait" but it also says it should be
4039 	 * immediate.
4040 	 */
4041 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4042 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
4043 			pll->info->id);
4044 }
4045 
4046 static void icl_pll_enable(struct drm_i915_private *dev_priv,
4047 			   struct intel_shared_dpll *pll,
4048 			   i915_reg_t enable_reg)
4049 {
4050 	u32 val;
4051 
4052 	val = intel_de_read(dev_priv, enable_reg);
4053 	val |= PLL_ENABLE;
4054 	intel_de_write(dev_priv, enable_reg, val);
4055 
4056 	/* Timeout is actually 600us. */
4057 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
4058 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
4059 }
4060 
4061 static void combo_pll_enable(struct drm_i915_private *dev_priv,
4062 			     struct intel_shared_dpll *pll)
4063 {
4064 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4065 
4066 	if (IS_ELKHARTLAKE(dev_priv) &&
4067 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4068 
4069 		/*
4070 		 * We need to disable DC states when this DPLL is enabled.
4071 		 * This can be done by taking a reference on DPLL4 power
4072 		 * domain.
4073 		 */
4074 		pll->wakeref = intel_display_power_get(dev_priv,
4075 						       POWER_DOMAIN_DPLL_DC_OFF);
4076 	}
4077 
4078 	icl_pll_power_enable(dev_priv, pll, enable_reg);
4079 
4080 	icl_dpll_write(dev_priv, pll);
4081 
4082 	/*
4083 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4084 	 * paths should already be setting the appropriate voltage, hence we do
4085 	 * nothing here.
4086 	 */
4087 
4088 	icl_pll_enable(dev_priv, pll, enable_reg);
4089 
4090 	/* DVFS post sequence would be here. See the comment above. */
4091 }
4092 
4093 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
4094 			   struct intel_shared_dpll *pll)
4095 {
4096 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
4097 
4098 	icl_dpll_write(dev_priv, pll);
4099 
4100 	/*
4101 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4102 	 * paths should already be setting the appropriate voltage, hence we do
4103 	 * nothing here.
4104 	 */
4105 
4106 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
4107 
4108 	/* DVFS post sequence would be here. See the comment above. */
4109 }
4110 
4111 static void mg_pll_enable(struct drm_i915_private *dev_priv,
4112 			  struct intel_shared_dpll *pll)
4113 {
4114 	i915_reg_t enable_reg =
4115 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4116 
4117 	icl_pll_power_enable(dev_priv, pll, enable_reg);
4118 
4119 	if (INTEL_GEN(dev_priv) >= 12)
4120 		dkl_pll_write(dev_priv, pll);
4121 	else
4122 		icl_mg_pll_write(dev_priv, pll);
4123 
4124 	/*
4125 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4126 	 * paths should already be setting the appropriate voltage, hence we do
4127 	 * nothing here.
4128 	 */
4129 
4130 	icl_pll_enable(dev_priv, pll, enable_reg);
4131 
4132 	/* DVFS post sequence would be here. See the comment above. */
4133 }
4134 
4135 static void icl_pll_disable(struct drm_i915_private *dev_priv,
4136 			    struct intel_shared_dpll *pll,
4137 			    i915_reg_t enable_reg)
4138 {
4139 	u32 val;
4140 
4141 	/* The first steps are done by intel_ddi_post_disable(). */
4142 
4143 	/*
4144 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4145 	 * paths should already be setting the appropriate voltage, hence we do
4146 	 * nothign here.
4147 	 */
4148 
4149 	val = intel_de_read(dev_priv, enable_reg);
4150 	val &= ~PLL_ENABLE;
4151 	intel_de_write(dev_priv, enable_reg, val);
4152 
4153 	/* Timeout is actually 1us. */
4154 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
4155 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
4156 
4157 	/* DVFS post sequence would be here. See the comment above. */
4158 
4159 	val = intel_de_read(dev_priv, enable_reg);
4160 	val &= ~PLL_POWER_ENABLE;
4161 	intel_de_write(dev_priv, enable_reg, val);
4162 
4163 	/*
4164 	 * The spec says we need to "wait" but it also says it should be
4165 	 * immediate.
4166 	 */
4167 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4168 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
4169 			pll->info->id);
4170 }
4171 
4172 static void combo_pll_disable(struct drm_i915_private *dev_priv,
4173 			      struct intel_shared_dpll *pll)
4174 {
4175 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4176 
4177 	icl_pll_disable(dev_priv, pll, enable_reg);
4178 
4179 	if (IS_ELKHARTLAKE(dev_priv) &&
4180 	    pll->info->id == DPLL_ID_EHL_DPLL4)
4181 		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
4182 					pll->wakeref);
4183 }
4184 
4185 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
4186 			    struct intel_shared_dpll *pll)
4187 {
4188 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
4189 }
4190 
4191 static void mg_pll_disable(struct drm_i915_private *dev_priv,
4192 			   struct intel_shared_dpll *pll)
4193 {
4194 	i915_reg_t enable_reg =
4195 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4196 
4197 	icl_pll_disable(dev_priv, pll, enable_reg);
4198 }
4199 
4200 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4201 {
4202 	/* No SSC ref */
4203 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
4204 }
4205 
4206 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
4207 			      const struct intel_dpll_hw_state *hw_state)
4208 {
4209 	drm_dbg_kms(&dev_priv->drm,
4210 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
4211 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4212 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4213 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4214 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4215 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4216 		    hw_state->cfgcr0, hw_state->cfgcr1,
4217 		    hw_state->mg_refclkin_ctl,
4218 		    hw_state->mg_clktop2_coreclkctl1,
4219 		    hw_state->mg_clktop2_hsclkctl,
4220 		    hw_state->mg_pll_div0,
4221 		    hw_state->mg_pll_div1,
4222 		    hw_state->mg_pll_lf,
4223 		    hw_state->mg_pll_frac_lock,
4224 		    hw_state->mg_pll_ssc,
4225 		    hw_state->mg_pll_bias,
4226 		    hw_state->mg_pll_tdc_coldst_bias);
4227 }
4228 
4229 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4230 	.enable = combo_pll_enable,
4231 	.disable = combo_pll_disable,
4232 	.get_hw_state = combo_pll_get_hw_state,
4233 	.get_freq = icl_ddi_combo_pll_get_freq,
4234 };
4235 
4236 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4237 	.enable = tbt_pll_enable,
4238 	.disable = tbt_pll_disable,
4239 	.get_hw_state = tbt_pll_get_hw_state,
4240 	.get_freq = icl_ddi_tbt_pll_get_freq,
4241 };
4242 
4243 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4244 	.enable = mg_pll_enable,
4245 	.disable = mg_pll_disable,
4246 	.get_hw_state = mg_pll_get_hw_state,
4247 	.get_freq = icl_ddi_mg_pll_get_freq,
4248 };
4249 
4250 static const struct dpll_info icl_plls[] = {
4251 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4252 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4253 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4254 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4255 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4256 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4257 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4258 	{ },
4259 };
4260 
4261 static const struct intel_dpll_mgr icl_pll_mgr = {
4262 	.dpll_info = icl_plls,
4263 	.get_dplls = icl_get_dplls,
4264 	.put_dplls = icl_put_dplls,
4265 	.update_active_dpll = icl_update_active_dpll,
4266 	.update_ref_clks = icl_update_dpll_ref_clks,
4267 	.dump_hw_state = icl_dump_hw_state,
4268 };
4269 
4270 static const struct dpll_info ehl_plls[] = {
4271 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4272 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4273 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4274 	{ },
4275 };
4276 
4277 static const struct intel_dpll_mgr ehl_pll_mgr = {
4278 	.dpll_info = ehl_plls,
4279 	.get_dplls = icl_get_dplls,
4280 	.put_dplls = icl_put_dplls,
4281 	.update_ref_clks = icl_update_dpll_ref_clks,
4282 	.dump_hw_state = icl_dump_hw_state,
4283 };
4284 
4285 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4286 	.enable = mg_pll_enable,
4287 	.disable = mg_pll_disable,
4288 	.get_hw_state = dkl_pll_get_hw_state,
4289 	.get_freq = icl_ddi_mg_pll_get_freq,
4290 };
4291 
4292 static const struct dpll_info tgl_plls[] = {
4293 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4294 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4295 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4296 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4297 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4298 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4299 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4300 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4301 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4302 	{ },
4303 };
4304 
4305 static const struct intel_dpll_mgr tgl_pll_mgr = {
4306 	.dpll_info = tgl_plls,
4307 	.get_dplls = icl_get_dplls,
4308 	.put_dplls = icl_put_dplls,
4309 	.update_active_dpll = icl_update_active_dpll,
4310 	.update_ref_clks = icl_update_dpll_ref_clks,
4311 	.dump_hw_state = icl_dump_hw_state,
4312 };
4313 
4314 static const struct dpll_info rkl_plls[] = {
4315 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4316 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4317 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4318 	{ },
4319 };
4320 
4321 static const struct intel_dpll_mgr rkl_pll_mgr = {
4322 	.dpll_info = rkl_plls,
4323 	.get_dplls = icl_get_dplls,
4324 	.put_dplls = icl_put_dplls,
4325 	.update_ref_clks = icl_update_dpll_ref_clks,
4326 	.dump_hw_state = icl_dump_hw_state,
4327 };
4328 
4329 /**
4330  * intel_shared_dpll_init - Initialize shared DPLLs
4331  * @dev: drm device
4332  *
4333  * Initialize shared DPLLs for @dev.
4334  */
4335 void intel_shared_dpll_init(struct drm_device *dev)
4336 {
4337 	struct drm_i915_private *dev_priv = to_i915(dev);
4338 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4339 	const struct dpll_info *dpll_info;
4340 	int i;
4341 
4342 	if (IS_ROCKETLAKE(dev_priv))
4343 		dpll_mgr = &rkl_pll_mgr;
4344 	else if (INTEL_GEN(dev_priv) >= 12)
4345 		dpll_mgr = &tgl_pll_mgr;
4346 	else if (IS_ELKHARTLAKE(dev_priv))
4347 		dpll_mgr = &ehl_pll_mgr;
4348 	else if (INTEL_GEN(dev_priv) >= 11)
4349 		dpll_mgr = &icl_pll_mgr;
4350 	else if (IS_CANNONLAKE(dev_priv))
4351 		dpll_mgr = &cnl_pll_mgr;
4352 	else if (IS_GEN9_BC(dev_priv))
4353 		dpll_mgr = &skl_pll_mgr;
4354 	else if (IS_GEN9_LP(dev_priv))
4355 		dpll_mgr = &bxt_pll_mgr;
4356 	else if (HAS_DDI(dev_priv))
4357 		dpll_mgr = &hsw_pll_mgr;
4358 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4359 		dpll_mgr = &pch_pll_mgr;
4360 
4361 	if (!dpll_mgr) {
4362 		dev_priv->dpll.num_shared_dpll = 0;
4363 		return;
4364 	}
4365 
4366 	dpll_info = dpll_mgr->dpll_info;
4367 
4368 	for (i = 0; dpll_info[i].name; i++) {
4369 		drm_WARN_ON(dev, i != dpll_info[i].id);
4370 		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4371 	}
4372 
4373 	dev_priv->dpll.mgr = dpll_mgr;
4374 	dev_priv->dpll.num_shared_dpll = i;
4375 	rw_init(&dev_priv->dpll.lock, "dplllk");
4376 
4377 	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4378 }
4379 
4380 /**
4381  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4382  * @state: atomic state
4383  * @crtc: CRTC to reserve DPLLs for
4384  * @encoder: encoder
4385  *
4386  * This function reserves all required DPLLs for the given CRTC and encoder
4387  * combination in the current atomic commit @state and the new @crtc atomic
4388  * state.
4389  *
4390  * The new configuration in the atomic commit @state is made effective by
4391  * calling intel_shared_dpll_swap_state().
4392  *
4393  * The reserved DPLLs should be released by calling
4394  * intel_release_shared_dplls().
4395  *
4396  * Returns:
4397  * True if all required DPLLs were successfully reserved.
4398  */
4399 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4400 				struct intel_crtc *crtc,
4401 				struct intel_encoder *encoder)
4402 {
4403 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4404 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4405 
4406 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4407 		return false;
4408 
4409 	return dpll_mgr->get_dplls(state, crtc, encoder);
4410 }
4411 
4412 /**
4413  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4414  * @state: atomic state
4415  * @crtc: crtc from which the DPLLs are to be released
4416  *
4417  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4418  * from the current atomic commit @state and the old @crtc atomic state.
4419  *
4420  * The new configuration in the atomic commit @state is made effective by
4421  * calling intel_shared_dpll_swap_state().
4422  */
4423 void intel_release_shared_dplls(struct intel_atomic_state *state,
4424 				struct intel_crtc *crtc)
4425 {
4426 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4427 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4428 
4429 	/*
4430 	 * FIXME: this function is called for every platform having a
4431 	 * compute_clock hook, even though the platform doesn't yet support
4432 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4433 	 * called on those.
4434 	 */
4435 	if (!dpll_mgr)
4436 		return;
4437 
4438 	dpll_mgr->put_dplls(state, crtc);
4439 }
4440 
4441 /**
4442  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4443  * @state: atomic state
4444  * @crtc: the CRTC for which to update the active DPLL
4445  * @encoder: encoder determining the type of port DPLL
4446  *
4447  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4448  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4449  * DPLL selected will be based on the current mode of the encoder's port.
4450  */
4451 void intel_update_active_dpll(struct intel_atomic_state *state,
4452 			      struct intel_crtc *crtc,
4453 			      struct intel_encoder *encoder)
4454 {
4455 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4456 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4457 
4458 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4459 		return;
4460 
4461 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4462 }
4463 
4464 /**
4465  * intel_dpll_get_freq - calculate the DPLL's output frequency
4466  * @i915: i915 device
4467  * @pll: DPLL for which to calculate the output frequency
4468  *
4469  * Return the output frequency corresponding to @pll's current state.
4470  */
4471 int intel_dpll_get_freq(struct drm_i915_private *i915,
4472 			const struct intel_shared_dpll *pll)
4473 {
4474 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4475 		return 0;
4476 
4477 	return pll->info->funcs->get_freq(i915, pll);
4478 }
4479 
4480 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4481 				  struct intel_shared_dpll *pll)
4482 {
4483 	struct intel_crtc *crtc;
4484 
4485 	pll->on = pll->info->funcs->get_hw_state(i915, pll,
4486 						 &pll->state.hw_state);
4487 
4488 	if (IS_ELKHARTLAKE(i915) && pll->on &&
4489 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4490 		pll->wakeref = intel_display_power_get(i915,
4491 						       POWER_DOMAIN_DPLL_DC_OFF);
4492 	}
4493 
4494 	pll->state.crtc_mask = 0;
4495 	for_each_intel_crtc(&i915->drm, crtc) {
4496 		struct intel_crtc_state *crtc_state =
4497 			to_intel_crtc_state(crtc->base.state);
4498 
4499 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4500 			pll->state.crtc_mask |= 1 << crtc->pipe;
4501 	}
4502 	pll->active_mask = pll->state.crtc_mask;
4503 
4504 	drm_dbg_kms(&i915->drm,
4505 		    "%s hw state readout: crtc_mask 0x%08x, on %i\n",
4506 		    pll->info->name, pll->state.crtc_mask, pll->on);
4507 }
4508 
4509 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4510 {
4511 	int i;
4512 
4513 	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4514 		i915->dpll.mgr->update_ref_clks(i915);
4515 
4516 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4517 		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4518 }
4519 
4520 static void sanitize_dpll_state(struct drm_i915_private *i915,
4521 				struct intel_shared_dpll *pll)
4522 {
4523 	if (!pll->on || pll->active_mask)
4524 		return;
4525 
4526 	drm_dbg_kms(&i915->drm,
4527 		    "%s enabled but not in use, disabling\n",
4528 		    pll->info->name);
4529 
4530 	pll->info->funcs->disable(i915, pll);
4531 	pll->on = false;
4532 }
4533 
4534 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4535 {
4536 	int i;
4537 
4538 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4539 		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4540 }
4541 
4542 /**
4543  * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
4544  * @dev_priv: i915 drm device
4545  * @hw_state: hw state to be written to the log
4546  *
4547  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4548  */
4549 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4550 			      const struct intel_dpll_hw_state *hw_state)
4551 {
4552 	if (dev_priv->dpll.mgr) {
4553 		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4554 	} else {
4555 		/* fallback for platforms that don't use the shared dpll
4556 		 * infrastructure
4557 		 */
4558 		drm_dbg_kms(&dev_priv->drm,
4559 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4560 			    "fp0: 0x%x, fp1: 0x%x\n",
4561 			    hw_state->dpll,
4562 			    hw_state->dpll_md,
4563 			    hw_state->fp0,
4564 			    hw_state->fp1);
4565 	}
4566 }
4567