xref: /dragonfly/sys/dev/drm/i915/intel_dpll_mgr.c (revision 2b57e6df)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_drv.h"
25 #include <asm/int-ll64.h>
26 
27 /**
28  * DOC: Display PLLs
29  *
30  * Display PLLs used for driving outputs vary by platform. While some have
31  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
32  * from a pool. In the latter scenario, it is possible that multiple pipes
33  * share a PLL if their configurations match.
34  *
35  * This file provides an abstraction over display PLLs. The function
36  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
37  * users of a PLL are tracked and that tracking is integrated with the atomic
38  * modest interface. During an atomic operation, a PLL can be requested for a
39  * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
40  * a previously used PLL can be released with intel_release_shared_dpll().
41  * Changes to the users are first staged in the atomic state, and then made
42  * effective by calling intel_shared_dpll_swap_state() during the atomic
43  * commit phase.
44  */
45 
46 static void
47 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
48 				  struct intel_shared_dpll_state *shared_dpll)
49 {
50 	enum intel_dpll_id i;
51 
52 	/* Copy shared dpll state */
53 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
54 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
55 
56 		shared_dpll[i] = pll->state;
57 	}
58 }
59 
60 static struct intel_shared_dpll_state *
61 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
62 {
63 	struct intel_atomic_state *state = to_intel_atomic_state(s);
64 
65 	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
66 
67 	if (!state->dpll_set) {
68 		state->dpll_set = true;
69 
70 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
71 						  state->shared_dpll);
72 	}
73 
74 	return state->shared_dpll;
75 }
76 
77 /**
78  * intel_get_shared_dpll_by_id - get a DPLL given its id
79  * @dev_priv: i915 device instance
80  * @id: pll id
81  *
82  * Returns:
83  * A pointer to the DPLL with @id
84  */
85 struct intel_shared_dpll *
86 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
87 			    enum intel_dpll_id id)
88 {
89 	return &dev_priv->shared_dplls[id];
90 }
91 
92 /**
93  * intel_get_shared_dpll_id - get the id of a DPLL
94  * @dev_priv: i915 device instance
95  * @pll: the DPLL
96  *
97  * Returns:
98  * The id of @pll
99  */
100 enum intel_dpll_id
101 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
102 			 struct intel_shared_dpll *pll)
103 {
104 	if (WARN_ON(pll < dev_priv->shared_dplls||
105 		    pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
106 		return -1;
107 
108 	return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
109 }
110 
111 /* For ILK+ */
112 void assert_shared_dpll(struct drm_i915_private *dev_priv,
113 			struct intel_shared_dpll *pll,
114 			bool state)
115 {
116 	bool cur_state;
117 	struct intel_dpll_hw_state hw_state;
118 
119 	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
120 		return;
121 
122 	cur_state = pll->funcs.get_hw_state(dev_priv, pll, &hw_state);
123 	I915_STATE_WARN(cur_state != state,
124 	     "%s assertion failure (expected %s, current %s)\n",
125 			pll->name, onoff(state), onoff(cur_state));
126 }
127 
128 /**
129  * intel_prepare_shared_dpll - call a dpll's prepare hook
130  * @crtc: CRTC which has a shared dpll
131  *
132  * This calls the PLL's prepare hook if it has one and if the PLL is not
133  * already enabled. The prepare hook is platform specific.
134  */
135 void intel_prepare_shared_dpll(struct intel_crtc *crtc)
136 {
137 	struct drm_device *dev = crtc->base.dev;
138 	struct drm_i915_private *dev_priv = to_i915(dev);
139 	struct intel_shared_dpll *pll = crtc->config->shared_dpll;
140 
141 	if (WARN_ON(pll == NULL))
142 		return;
143 
144 	mutex_lock(&dev_priv->dpll_lock);
145 	WARN_ON(!pll->state.crtc_mask);
146 	if (!pll->active_mask) {
147 		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
148 		WARN_ON(pll->on);
149 		assert_shared_dpll_disabled(dev_priv, pll);
150 
151 		pll->funcs.prepare(dev_priv, pll);
152 	}
153 	mutex_unlock(&dev_priv->dpll_lock);
154 }
155 
156 /**
157  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
158  * @crtc: CRTC which has a shared DPLL
159  *
160  * Enable the shared DPLL used by @crtc.
161  */
162 void intel_enable_shared_dpll(struct intel_crtc *crtc)
163 {
164 	struct drm_device *dev = crtc->base.dev;
165 	struct drm_i915_private *dev_priv = to_i915(dev);
166 	struct intel_shared_dpll *pll = crtc->config->shared_dpll;
167 	unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
168 	unsigned old_mask;
169 
170 	if (WARN_ON(pll == NULL))
171 		return;
172 
173 	mutex_lock(&dev_priv->dpll_lock);
174 	old_mask = pll->active_mask;
175 
176 	if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
177 	    WARN_ON(pll->active_mask & crtc_mask))
178 		goto out;
179 
180 	pll->active_mask |= crtc_mask;
181 
182 	DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
183 		      pll->name, pll->active_mask, pll->on,
184 		      crtc->base.base.id);
185 
186 	if (old_mask) {
187 		WARN_ON(!pll->on);
188 		assert_shared_dpll_enabled(dev_priv, pll);
189 		goto out;
190 	}
191 	WARN_ON(pll->on);
192 
193 	DRM_DEBUG_KMS("enabling %s\n", pll->name);
194 	pll->funcs.enable(dev_priv, pll);
195 	pll->on = true;
196 
197 out:
198 	mutex_unlock(&dev_priv->dpll_lock);
199 }
200 
201 /**
202  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
203  * @crtc: CRTC which has a shared DPLL
204  *
205  * Disable the shared DPLL used by @crtc.
206  */
207 void intel_disable_shared_dpll(struct intel_crtc *crtc)
208 {
209 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
210 	struct intel_shared_dpll *pll = crtc->config->shared_dpll;
211 	unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
212 
213 	/* PCH only available on ILK+ */
214 	if (INTEL_GEN(dev_priv) < 5)
215 		return;
216 
217 	if (pll == NULL)
218 		return;
219 
220 	mutex_lock(&dev_priv->dpll_lock);
221 	if (WARN_ON(!(pll->active_mask & crtc_mask)))
222 		goto out;
223 
224 	DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
225 		      pll->name, pll->active_mask, pll->on,
226 		      crtc->base.base.id);
227 
228 	assert_shared_dpll_enabled(dev_priv, pll);
229 	WARN_ON(!pll->on);
230 
231 	pll->active_mask &= ~crtc_mask;
232 	if (pll->active_mask)
233 		goto out;
234 
235 	DRM_DEBUG_KMS("disabling %s\n", pll->name);
236 	pll->funcs.disable(dev_priv, pll);
237 	pll->on = false;
238 
239 out:
240 	mutex_unlock(&dev_priv->dpll_lock);
241 }
242 
243 static struct intel_shared_dpll *
244 intel_find_shared_dpll(struct intel_crtc *crtc,
245 		       struct intel_crtc_state *crtc_state,
246 		       enum intel_dpll_id range_min,
247 		       enum intel_dpll_id range_max)
248 {
249 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
250 	struct intel_shared_dpll *pll;
251 	struct intel_shared_dpll_state *shared_dpll;
252 	enum intel_dpll_id i;
253 
254 	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
255 
256 	for (i = range_min; i <= range_max; i++) {
257 		pll = &dev_priv->shared_dplls[i];
258 
259 		/* Only want to check enabled timings first */
260 		if (shared_dpll[i].crtc_mask == 0)
261 			continue;
262 
263 		if (memcmp(&crtc_state->dpll_hw_state,
264 			   &shared_dpll[i].hw_state,
265 			   sizeof(crtc_state->dpll_hw_state)) == 0) {
266 			DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
267 				      crtc->base.base.id, crtc->base.name, pll->name,
268 				      shared_dpll[i].crtc_mask,
269 				      pll->active_mask);
270 			return pll;
271 		}
272 	}
273 
274 	/* Ok no matching timings, maybe there's a free one? */
275 	for (i = range_min; i <= range_max; i++) {
276 		pll = &dev_priv->shared_dplls[i];
277 		if (shared_dpll[i].crtc_mask == 0) {
278 			DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
279 				      crtc->base.base.id, crtc->base.name, pll->name);
280 			return pll;
281 		}
282 	}
283 
284 	return NULL;
285 }
286 
287 static void
288 intel_reference_shared_dpll(struct intel_shared_dpll *pll,
289 			    struct intel_crtc_state *crtc_state)
290 {
291 	struct intel_shared_dpll_state *shared_dpll;
292 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
293 	enum intel_dpll_id i = pll->id;
294 
295 	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
296 
297 	if (shared_dpll[i].crtc_mask == 0)
298 		shared_dpll[i].hw_state =
299 			crtc_state->dpll_hw_state;
300 
301 	crtc_state->shared_dpll = pll;
302 	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
303 			 pipe_name(crtc->pipe));
304 
305 	shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe;
306 }
307 
308 /**
309  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
310  * @state: atomic state
311  *
312  * This is the dpll version of drm_atomic_helper_swap_state() since the
313  * helper does not handle driver-specific global state.
314  *
315  * For consistency with atomic helpers this function does a complete swap,
316  * i.e. it also puts the current state into @state, even though there is no
317  * need for that at this moment.
318  */
319 void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
320 {
321 	struct drm_i915_private *dev_priv = to_i915(state->dev);
322 	struct intel_shared_dpll_state *shared_dpll;
323 	struct intel_shared_dpll *pll;
324 	enum intel_dpll_id i;
325 
326 	if (!to_intel_atomic_state(state)->dpll_set)
327 		return;
328 
329 	shared_dpll = to_intel_atomic_state(state)->shared_dpll;
330 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
331 		struct intel_shared_dpll_state tmp;
332 
333 		pll = &dev_priv->shared_dplls[i];
334 
335 		tmp = pll->state;
336 		pll->state = shared_dpll[i];
337 		shared_dpll[i] = tmp;
338 	}
339 }
340 
341 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
342 				      struct intel_shared_dpll *pll,
343 				      struct intel_dpll_hw_state *hw_state)
344 {
345 	uint32_t val;
346 
347 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
348 		return false;
349 
350 	val = I915_READ(PCH_DPLL(pll->id));
351 	hw_state->dpll = val;
352 	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
353 	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
354 
355 	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
356 
357 	return val & DPLL_VCO_ENABLE;
358 }
359 
360 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
361 				 struct intel_shared_dpll *pll)
362 {
363 	I915_WRITE(PCH_FP0(pll->id), pll->state.hw_state.fp0);
364 	I915_WRITE(PCH_FP1(pll->id), pll->state.hw_state.fp1);
365 }
366 
367 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
368 {
369 	u32 val;
370 	bool enabled;
371 
372 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
373 
374 	val = I915_READ(PCH_DREF_CONTROL);
375 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
376 			    DREF_SUPERSPREAD_SOURCE_MASK));
377 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
378 }
379 
380 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
381 				struct intel_shared_dpll *pll)
382 {
383 	/* PCH refclock must be enabled first */
384 	ibx_assert_pch_refclk_enabled(dev_priv);
385 
386 	I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
387 
388 	/* Wait for the clocks to stabilize. */
389 	POSTING_READ(PCH_DPLL(pll->id));
390 	udelay(150);
391 
392 	/* The pixel multiplier can only be updated once the
393 	 * DPLL is enabled and the clocks are stable.
394 	 *
395 	 * So write it again.
396 	 */
397 	I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
398 	POSTING_READ(PCH_DPLL(pll->id));
399 	udelay(200);
400 }
401 
402 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
403 				 struct intel_shared_dpll *pll)
404 {
405 	struct drm_device *dev = &dev_priv->drm;
406 	struct intel_crtc *crtc;
407 
408 	/* Make sure no transcoder isn't still depending on us. */
409 	for_each_intel_crtc(dev, crtc) {
410 		if (crtc->config->shared_dpll == pll)
411 			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
412 	}
413 
414 	I915_WRITE(PCH_DPLL(pll->id), 0);
415 	POSTING_READ(PCH_DPLL(pll->id));
416 	udelay(200);
417 }
418 
419 static struct intel_shared_dpll *
420 ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
421 	     struct intel_encoder *encoder)
422 {
423 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
424 	struct intel_shared_dpll *pll;
425 	enum intel_dpll_id i;
426 
427 	if (HAS_PCH_IBX(dev_priv)) {
428 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
429 		i = (enum intel_dpll_id) crtc->pipe;
430 		pll = &dev_priv->shared_dplls[i];
431 
432 		DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
433 			      crtc->base.base.id, crtc->base.name, pll->name);
434 	} else {
435 		pll = intel_find_shared_dpll(crtc, crtc_state,
436 					     DPLL_ID_PCH_PLL_A,
437 					     DPLL_ID_PCH_PLL_B);
438 	}
439 
440 	if (!pll)
441 		return NULL;
442 
443 	/* reference the pll */
444 	intel_reference_shared_dpll(pll, crtc_state);
445 
446 	return pll;
447 }
448 
449 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
450 			      struct intel_dpll_hw_state *hw_state)
451 {
452 	DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
453 		      "fp0: 0x%x, fp1: 0x%x\n",
454 		      hw_state->dpll,
455 		      hw_state->dpll_md,
456 		      hw_state->fp0,
457 		      hw_state->fp1);
458 }
459 
460 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
461 	.prepare = ibx_pch_dpll_prepare,
462 	.enable = ibx_pch_dpll_enable,
463 	.disable = ibx_pch_dpll_disable,
464 	.get_hw_state = ibx_pch_dpll_get_hw_state,
465 };
466 
467 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
468 			       struct intel_shared_dpll *pll)
469 {
470 	I915_WRITE(WRPLL_CTL(pll->id), pll->state.hw_state.wrpll);
471 	POSTING_READ(WRPLL_CTL(pll->id));
472 	udelay(20);
473 }
474 
475 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
476 				struct intel_shared_dpll *pll)
477 {
478 	I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
479 	POSTING_READ(SPLL_CTL);
480 	udelay(20);
481 }
482 
483 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
484 				  struct intel_shared_dpll *pll)
485 {
486 	uint32_t val;
487 
488 	val = I915_READ(WRPLL_CTL(pll->id));
489 	I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
490 	POSTING_READ(WRPLL_CTL(pll->id));
491 }
492 
493 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
494 				 struct intel_shared_dpll *pll)
495 {
496 	uint32_t val;
497 
498 	val = I915_READ(SPLL_CTL);
499 	I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
500 	POSTING_READ(SPLL_CTL);
501 }
502 
503 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
504 				       struct intel_shared_dpll *pll,
505 				       struct intel_dpll_hw_state *hw_state)
506 {
507 	uint32_t val;
508 
509 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
510 		return false;
511 
512 	val = I915_READ(WRPLL_CTL(pll->id));
513 	hw_state->wrpll = val;
514 
515 	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
516 
517 	return val & WRPLL_PLL_ENABLE;
518 }
519 
520 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
521 				      struct intel_shared_dpll *pll,
522 				      struct intel_dpll_hw_state *hw_state)
523 {
524 	uint32_t val;
525 
526 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
527 		return false;
528 
529 	val = I915_READ(SPLL_CTL);
530 	hw_state->spll = val;
531 
532 	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
533 
534 	return val & SPLL_PLL_ENABLE;
535 }
536 
537 #define LC_FREQ 2700
538 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
539 
540 #define P_MIN 2
541 #define P_MAX 64
542 #define P_INC 2
543 
544 /* Constraints for PLL good behavior */
545 #define REF_MIN 48
546 #define REF_MAX 400
547 #define VCO_MIN 2400
548 #define VCO_MAX 4800
549 
550 struct hsw_wrpll_rnp {
551 	unsigned p, n2, r2;
552 };
553 
554 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
555 {
556 	unsigned budget;
557 
558 	switch (clock) {
559 	case 25175000:
560 	case 25200000:
561 	case 27000000:
562 	case 27027000:
563 	case 37762500:
564 	case 37800000:
565 	case 40500000:
566 	case 40541000:
567 	case 54000000:
568 	case 54054000:
569 	case 59341000:
570 	case 59400000:
571 	case 72000000:
572 	case 74176000:
573 	case 74250000:
574 	case 81000000:
575 	case 81081000:
576 	case 89012000:
577 	case 89100000:
578 	case 108000000:
579 	case 108108000:
580 	case 111264000:
581 	case 111375000:
582 	case 148352000:
583 	case 148500000:
584 	case 162000000:
585 	case 162162000:
586 	case 222525000:
587 	case 222750000:
588 	case 296703000:
589 	case 297000000:
590 		budget = 0;
591 		break;
592 	case 233500000:
593 	case 245250000:
594 	case 247750000:
595 	case 253250000:
596 	case 298000000:
597 		budget = 1500;
598 		break;
599 	case 169128000:
600 	case 169500000:
601 	case 179500000:
602 	case 202000000:
603 		budget = 2000;
604 		break;
605 	case 256250000:
606 	case 262500000:
607 	case 270000000:
608 	case 272500000:
609 	case 273750000:
610 	case 280750000:
611 	case 281250000:
612 	case 286000000:
613 	case 291750000:
614 		budget = 4000;
615 		break;
616 	case 267250000:
617 	case 268500000:
618 		budget = 5000;
619 		break;
620 	default:
621 		budget = 1000;
622 		break;
623 	}
624 
625 	return budget;
626 }
627 
628 static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
629 				 unsigned r2, unsigned n2, unsigned p,
630 				 struct hsw_wrpll_rnp *best)
631 {
632 	uint64_t a, b, c, d, diff, diff_best;
633 
634 	/* No best (r,n,p) yet */
635 	if (best->p == 0) {
636 		best->p = p;
637 		best->n2 = n2;
638 		best->r2 = r2;
639 		return;
640 	}
641 
642 	/*
643 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
644 	 * freq2k.
645 	 *
646 	 * delta = 1e6 *
647 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
648 	 *	   freq2k;
649 	 *
650 	 * and we would like delta <= budget.
651 	 *
652 	 * If the discrepancy is above the PPM-based budget, always prefer to
653 	 * improve upon the previous solution.  However, if you're within the
654 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
655 	 */
656 	a = freq2k * budget * p * r2;
657 	b = freq2k * budget * best->p * best->r2;
658 	diff = abs_diff((u64)freq2k * p * r2, LC_FREQ_2K * n2);
659 	diff_best = abs_diff((u64)freq2k * best->p * best->r2,
660 			     LC_FREQ_2K * best->n2);
661 	c = 1000000 * diff;
662 	d = 1000000 * diff_best;
663 
664 	if (a < c && b < d) {
665 		/* If both are above the budget, pick the closer */
666 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
667 			best->p = p;
668 			best->n2 = n2;
669 			best->r2 = r2;
670 		}
671 	} else if (a >= c && b < d) {
672 		/* If A is below the threshold but B is above it?  Update. */
673 		best->p = p;
674 		best->n2 = n2;
675 		best->r2 = r2;
676 	} else if (a >= c && b >= d) {
677 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
678 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
679 			best->p = p;
680 			best->n2 = n2;
681 			best->r2 = r2;
682 		}
683 	}
684 	/* Otherwise a < c && b >= d, do nothing */
685 }
686 
687 static void
688 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
689 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
690 {
691 	uint64_t freq2k;
692 	unsigned p, n2, r2;
693 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
694 	unsigned budget;
695 
696 	freq2k = clock / 100;
697 
698 	budget = hsw_wrpll_get_budget_for_freq(clock);
699 
700 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
701 	 * and directly pass the LC PLL to it. */
702 	if (freq2k == 5400000) {
703 		*n2_out = 2;
704 		*p_out = 1;
705 		*r2_out = 2;
706 		return;
707 	}
708 
709 	/*
710 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
711 	 * the WR PLL.
712 	 *
713 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
714 	 * Injecting R2 = 2 * R gives:
715 	 *   REF_MAX * r2 > LC_FREQ * 2 and
716 	 *   REF_MIN * r2 < LC_FREQ * 2
717 	 *
718 	 * Which means the desired boundaries for r2 are:
719 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
720 	 *
721 	 */
722 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
723 	     r2 <= LC_FREQ * 2 / REF_MIN;
724 	     r2++) {
725 
726 		/*
727 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
728 		 *
729 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
730 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
731 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
732 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
733 		 *
734 		 * Which means the desired boundaries for n2 are:
735 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
736 		 */
737 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
738 		     n2 <= VCO_MAX * r2 / LC_FREQ;
739 		     n2++) {
740 
741 			for (p = P_MIN; p <= P_MAX; p += P_INC)
742 				hsw_wrpll_update_rnp(freq2k, budget,
743 						     r2, n2, p, &best);
744 		}
745 	}
746 
747 	*n2_out = best.n2;
748 	*p_out = best.p;
749 	*r2_out = best.r2;
750 }
751 
752 static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
753 						       struct intel_crtc *crtc,
754 						       struct intel_crtc_state *crtc_state)
755 {
756 	struct intel_shared_dpll *pll;
757 	uint32_t val;
758 	unsigned int p, n2, r2;
759 
760 	hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
761 
762 	val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
763 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
764 	      WRPLL_DIVIDER_POST(p);
765 
766 	crtc_state->dpll_hw_state.wrpll = val;
767 
768 	pll = intel_find_shared_dpll(crtc, crtc_state,
769 				     DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
770 
771 	if (!pll)
772 		return NULL;
773 
774 	return pll;
775 }
776 
777 static struct intel_shared_dpll *
778 hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock)
779 {
780 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
781 	struct intel_shared_dpll *pll;
782 	enum intel_dpll_id pll_id;
783 
784 	switch (clock / 2) {
785 	case 81000:
786 		pll_id = DPLL_ID_LCPLL_810;
787 		break;
788 	case 135000:
789 		pll_id = DPLL_ID_LCPLL_1350;
790 		break;
791 	case 270000:
792 		pll_id = DPLL_ID_LCPLL_2700;
793 		break;
794 	default:
795 		DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
796 		return NULL;
797 	}
798 
799 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
800 
801 	if (!pll)
802 		return NULL;
803 
804 	return pll;
805 }
806 
807 static struct intel_shared_dpll *
808 hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
809 	     struct intel_encoder *encoder)
810 {
811 	struct intel_shared_dpll *pll;
812 	int clock = crtc_state->port_clock;
813 
814 	memset(&crtc_state->dpll_hw_state, 0,
815 	       sizeof(crtc_state->dpll_hw_state));
816 
817 	if (encoder->type == INTEL_OUTPUT_HDMI) {
818 		pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
819 
820 	} else if (encoder->type == INTEL_OUTPUT_DP ||
821 		   encoder->type == INTEL_OUTPUT_DP_MST ||
822 		   encoder->type == INTEL_OUTPUT_EDP) {
823 		pll = hsw_ddi_dp_get_dpll(encoder, clock);
824 
825 	} else if (encoder->type == INTEL_OUTPUT_ANALOG) {
826 		if (WARN_ON(crtc_state->port_clock / 2 != 135000))
827 			return NULL;
828 
829 		crtc_state->dpll_hw_state.spll =
830 			SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
831 
832 		pll = intel_find_shared_dpll(crtc, crtc_state,
833 					     DPLL_ID_SPLL, DPLL_ID_SPLL);
834 	} else {
835 		return NULL;
836 	}
837 
838 	if (!pll)
839 		return NULL;
840 
841 	intel_reference_shared_dpll(pll, crtc_state);
842 
843 	return pll;
844 }
845 
846 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
847 			      struct intel_dpll_hw_state *hw_state)
848 {
849 	DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
850 		      hw_state->wrpll, hw_state->spll);
851 }
852 
853 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
854 	.enable = hsw_ddi_wrpll_enable,
855 	.disable = hsw_ddi_wrpll_disable,
856 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
857 };
858 
859 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
860 	.enable = hsw_ddi_spll_enable,
861 	.disable = hsw_ddi_spll_disable,
862 	.get_hw_state = hsw_ddi_spll_get_hw_state,
863 };
864 
865 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
866 				 struct intel_shared_dpll *pll)
867 {
868 }
869 
870 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
871 				  struct intel_shared_dpll *pll)
872 {
873 }
874 
875 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
876 				       struct intel_shared_dpll *pll,
877 				       struct intel_dpll_hw_state *hw_state)
878 {
879 	return true;
880 }
881 
882 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
883 	.enable = hsw_ddi_lcpll_enable,
884 	.disable = hsw_ddi_lcpll_disable,
885 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
886 };
887 
888 struct skl_dpll_regs {
889 	i915_reg_t ctl, cfgcr1, cfgcr2;
890 };
891 
892 /* this array is indexed by the *shared* pll id */
893 static const struct skl_dpll_regs skl_dpll_regs[4] = {
894 	{
895 		/* DPLL 0 */
896 		.ctl = LCPLL1_CTL,
897 		/* DPLL 0 doesn't support HDMI mode */
898 	},
899 	{
900 		/* DPLL 1 */
901 		.ctl = LCPLL2_CTL,
902 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
903 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
904 	},
905 	{
906 		/* DPLL 2 */
907 		.ctl = WRPLL_CTL(0),
908 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
909 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
910 	},
911 	{
912 		/* DPLL 3 */
913 		.ctl = WRPLL_CTL(1),
914 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
915 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
916 	},
917 };
918 
919 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
920 				    struct intel_shared_dpll *pll)
921 {
922 	uint32_t val;
923 
924 	val = I915_READ(DPLL_CTRL1);
925 
926 	val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) |
927 		 DPLL_CTRL1_LINK_RATE_MASK(pll->id));
928 	val |= pll->state.hw_state.ctrl1 << (pll->id * 6);
929 
930 	I915_WRITE(DPLL_CTRL1, val);
931 	POSTING_READ(DPLL_CTRL1);
932 }
933 
934 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
935 			       struct intel_shared_dpll *pll)
936 {
937 	const struct skl_dpll_regs *regs = skl_dpll_regs;
938 
939 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
940 
941 	I915_WRITE(regs[pll->id].cfgcr1, pll->state.hw_state.cfgcr1);
942 	I915_WRITE(regs[pll->id].cfgcr2, pll->state.hw_state.cfgcr2);
943 	POSTING_READ(regs[pll->id].cfgcr1);
944 	POSTING_READ(regs[pll->id].cfgcr2);
945 
946 	/* the enable bit is always bit 31 */
947 	I915_WRITE(regs[pll->id].ctl,
948 		   I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
949 
950 	if (intel_wait_for_register(dev_priv,
951 				    DPLL_STATUS,
952 				    DPLL_LOCK(pll->id),
953 				    DPLL_LOCK(pll->id),
954 				    5))
955 		DRM_ERROR("DPLL %d not locked\n", pll->id);
956 }
957 
958 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
959 				 struct intel_shared_dpll *pll)
960 {
961 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
962 }
963 
964 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
965 				struct intel_shared_dpll *pll)
966 {
967 	const struct skl_dpll_regs *regs = skl_dpll_regs;
968 
969 	/* the enable bit is always bit 31 */
970 	I915_WRITE(regs[pll->id].ctl,
971 		   I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
972 	POSTING_READ(regs[pll->id].ctl);
973 }
974 
975 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
976 				  struct intel_shared_dpll *pll)
977 {
978 }
979 
980 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
981 				     struct intel_shared_dpll *pll,
982 				     struct intel_dpll_hw_state *hw_state)
983 {
984 	uint32_t val;
985 	const struct skl_dpll_regs *regs = skl_dpll_regs;
986 	bool ret;
987 
988 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
989 		return false;
990 
991 	ret = false;
992 
993 	val = I915_READ(regs[pll->id].ctl);
994 	if (!(val & LCPLL_PLL_ENABLE))
995 		goto out;
996 
997 	val = I915_READ(DPLL_CTRL1);
998 	hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
999 
1000 	/* avoid reading back stale values if HDMI mode is not enabled */
1001 	if (val & DPLL_CTRL1_HDMI_MODE(pll->id)) {
1002 		hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
1003 		hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
1004 	}
1005 	ret = true;
1006 
1007 out:
1008 	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1009 
1010 	return ret;
1011 }
1012 
1013 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1014 				       struct intel_shared_dpll *pll,
1015 				       struct intel_dpll_hw_state *hw_state)
1016 {
1017 	uint32_t val;
1018 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1019 	bool ret;
1020 
1021 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
1022 		return false;
1023 
1024 	ret = false;
1025 
1026 	/* DPLL0 is always enabled since it drives CDCLK */
1027 	val = I915_READ(regs[pll->id].ctl);
1028 	if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
1029 		goto out;
1030 
1031 	val = I915_READ(DPLL_CTRL1);
1032 	hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
1033 
1034 	ret = true;
1035 
1036 out:
1037 	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1038 
1039 	return ret;
1040 }
1041 
1042 struct skl_wrpll_context {
1043 	uint64_t min_deviation;		/* current minimal deviation */
1044 	uint64_t central_freq;		/* chosen central freq */
1045 	uint64_t dco_freq;		/* chosen dco freq */
1046 	unsigned int p;			/* chosen divider */
1047 };
1048 
1049 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1050 {
1051 	memset(ctx, 0, sizeof(*ctx));
1052 
1053 	ctx->min_deviation = U64_MAX;
1054 }
1055 
1056 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1057 #define SKL_DCO_MAX_PDEVIATION	100
1058 #define SKL_DCO_MAX_NDEVIATION	600
1059 
1060 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1061 				  uint64_t central_freq,
1062 				  uint64_t dco_freq,
1063 				  unsigned int divider)
1064 {
1065 	uint64_t deviation;
1066 
1067 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1068 			      central_freq);
1069 
1070 	/* positive deviation */
1071 	if (dco_freq >= central_freq) {
1072 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1073 		    deviation < ctx->min_deviation) {
1074 			ctx->min_deviation = deviation;
1075 			ctx->central_freq = central_freq;
1076 			ctx->dco_freq = dco_freq;
1077 			ctx->p = divider;
1078 		}
1079 	/* negative deviation */
1080 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1081 		   deviation < ctx->min_deviation) {
1082 		ctx->min_deviation = deviation;
1083 		ctx->central_freq = central_freq;
1084 		ctx->dco_freq = dco_freq;
1085 		ctx->p = divider;
1086 	}
1087 }
1088 
1089 static void skl_wrpll_get_multipliers(unsigned int p,
1090 				      unsigned int *p0 /* out */,
1091 				      unsigned int *p1 /* out */,
1092 				      unsigned int *p2 /* out */)
1093 {
1094 	/* even dividers */
1095 	if (p % 2 == 0) {
1096 		unsigned int half = p / 2;
1097 
1098 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1099 			*p0 = 2;
1100 			*p1 = 1;
1101 			*p2 = half;
1102 		} else if (half % 2 == 0) {
1103 			*p0 = 2;
1104 			*p1 = half / 2;
1105 			*p2 = 2;
1106 		} else if (half % 3 == 0) {
1107 			*p0 = 3;
1108 			*p1 = half / 3;
1109 			*p2 = 2;
1110 		} else if (half % 7 == 0) {
1111 			*p0 = 7;
1112 			*p1 = half / 7;
1113 			*p2 = 2;
1114 		}
1115 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1116 		*p0 = 3;
1117 		*p1 = 1;
1118 		*p2 = p / 3;
1119 	} else if (p == 5 || p == 7) {
1120 		*p0 = p;
1121 		*p1 = 1;
1122 		*p2 = 1;
1123 	} else if (p == 15) {
1124 		*p0 = 3;
1125 		*p1 = 1;
1126 		*p2 = 5;
1127 	} else if (p == 21) {
1128 		*p0 = 7;
1129 		*p1 = 1;
1130 		*p2 = 3;
1131 	} else if (p == 35) {
1132 		*p0 = 7;
1133 		*p1 = 1;
1134 		*p2 = 5;
1135 	}
1136 }
1137 
1138 struct skl_wrpll_params {
1139 	uint32_t        dco_fraction;
1140 	uint32_t        dco_integer;
1141 	uint32_t        qdiv_ratio;
1142 	uint32_t        qdiv_mode;
1143 	uint32_t        kdiv;
1144 	uint32_t        pdiv;
1145 	uint32_t        central_freq;
1146 };
1147 
1148 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1149 				      uint64_t afe_clock,
1150 				      uint64_t central_freq,
1151 				      uint32_t p0, uint32_t p1, uint32_t p2)
1152 {
1153 	uint64_t dco_freq;
1154 
1155 	switch (central_freq) {
1156 	case 9600000000ULL:
1157 		params->central_freq = 0;
1158 		break;
1159 	case 9000000000ULL:
1160 		params->central_freq = 1;
1161 		break;
1162 	case 8400000000ULL:
1163 		params->central_freq = 3;
1164 	}
1165 
1166 	switch (p0) {
1167 	case 1:
1168 		params->pdiv = 0;
1169 		break;
1170 	case 2:
1171 		params->pdiv = 1;
1172 		break;
1173 	case 3:
1174 		params->pdiv = 2;
1175 		break;
1176 	case 7:
1177 		params->pdiv = 4;
1178 		break;
1179 	default:
1180 		WARN(1, "Incorrect PDiv\n");
1181 	}
1182 
1183 	switch (p2) {
1184 	case 5:
1185 		params->kdiv = 0;
1186 		break;
1187 	case 2:
1188 		params->kdiv = 1;
1189 		break;
1190 	case 3:
1191 		params->kdiv = 2;
1192 		break;
1193 	case 1:
1194 		params->kdiv = 3;
1195 		break;
1196 	default:
1197 		WARN(1, "Incorrect KDiv\n");
1198 	}
1199 
1200 	params->qdiv_ratio = p1;
1201 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1202 
1203 	dco_freq = p0 * p1 * p2 * afe_clock;
1204 
1205 	/*
1206 	 * Intermediate values are in Hz.
1207 	 * Divide by MHz to match bsepc
1208 	 */
1209 	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1210 	params->dco_fraction =
1211 		div_u64((div_u64(dco_freq, 24) -
1212 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1213 }
1214 
1215 static bool
1216 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1217 			struct skl_wrpll_params *wrpll_params)
1218 {
1219 	uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1220 	uint64_t dco_central_freq[3] = {8400000000ULL,
1221 					9000000000ULL,
1222 					9600000000ULL};
1223 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1224 					     24, 28, 30, 32, 36, 40, 42, 44,
1225 					     48, 52, 54, 56, 60, 64, 66, 68,
1226 					     70, 72, 76, 78, 80, 84, 88, 90,
1227 					     92, 96, 98 };
1228 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1229 	static const struct {
1230 		const int *list;
1231 		int n_dividers;
1232 	} dividers[] = {
1233 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1234 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1235 	};
1236 	struct skl_wrpll_context ctx;
1237 	unsigned int dco, d, i;
1238 	unsigned int p0, p1, p2;
1239 
1240 	skl_wrpll_context_init(&ctx);
1241 
1242 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1243 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1244 			for (i = 0; i < dividers[d].n_dividers; i++) {
1245 				unsigned int p = dividers[d].list[i];
1246 				uint64_t dco_freq = p * afe_clock;
1247 
1248 				skl_wrpll_try_divider(&ctx,
1249 						      dco_central_freq[dco],
1250 						      dco_freq,
1251 						      p);
1252 				/*
1253 				 * Skip the remaining dividers if we're sure to
1254 				 * have found the definitive divider, we can't
1255 				 * improve a 0 deviation.
1256 				 */
1257 				if (ctx.min_deviation == 0)
1258 					goto skip_remaining_dividers;
1259 			}
1260 		}
1261 
1262 skip_remaining_dividers:
1263 		/*
1264 		 * If a solution is found with an even divider, prefer
1265 		 * this one.
1266 		 */
1267 		if (d == 0 && ctx.p)
1268 			break;
1269 	}
1270 
1271 	if (!ctx.p) {
1272 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1273 		return false;
1274 	}
1275 
1276 	/*
1277 	 * gcc incorrectly analyses that these can be used without being
1278 	 * initialized. To be fair, it's hard to guess.
1279 	 */
1280 	p0 = p1 = p2 = 0;
1281 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1282 	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1283 				  p0, p1, p2);
1284 
1285 	return true;
1286 }
1287 
1288 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
1289 				      struct intel_crtc_state *crtc_state,
1290 				      int clock)
1291 {
1292 	uint32_t ctrl1, cfgcr1, cfgcr2;
1293 	struct skl_wrpll_params wrpll_params = { 0, };
1294 
1295 	/*
1296 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1297 	 * as the DPLL id in this function.
1298 	 */
1299 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1300 
1301 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1302 
1303 	if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
1304 		return false;
1305 
1306 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1307 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1308 		wrpll_params.dco_integer;
1309 
1310 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1311 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1312 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1313 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1314 		wrpll_params.central_freq;
1315 
1316 	memset(&crtc_state->dpll_hw_state, 0,
1317 	       sizeof(crtc_state->dpll_hw_state));
1318 
1319 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1320 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1321 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1322 	return true;
1323 }
1324 
1325 
1326 static bool
1327 skl_ddi_dp_set_dpll_hw_state(int clock,
1328 			     struct intel_dpll_hw_state *dpll_hw_state)
1329 {
1330 	uint32_t ctrl1;
1331 
1332 	/*
1333 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1334 	 * as the DPLL id in this function.
1335 	 */
1336 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1337 	switch (clock / 2) {
1338 	case 81000:
1339 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1340 		break;
1341 	case 135000:
1342 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1343 		break;
1344 	case 270000:
1345 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1346 		break;
1347 		/* eDP 1.4 rates */
1348 	case 162000:
1349 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1350 		break;
1351 	case 108000:
1352 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1353 		break;
1354 	case 216000:
1355 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1356 		break;
1357 	}
1358 
1359 	dpll_hw_state->ctrl1 = ctrl1;
1360 	return true;
1361 }
1362 
1363 static struct intel_shared_dpll *
1364 skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1365 	     struct intel_encoder *encoder)
1366 {
1367 	struct intel_shared_dpll *pll;
1368 	int clock = crtc_state->port_clock;
1369 	bool bret;
1370 	struct intel_dpll_hw_state dpll_hw_state;
1371 
1372 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
1373 
1374 	if (encoder->type == INTEL_OUTPUT_HDMI) {
1375 		bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
1376 		if (!bret) {
1377 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
1378 			return NULL;
1379 		}
1380 	} else if (encoder->type == INTEL_OUTPUT_DP ||
1381 		   encoder->type == INTEL_OUTPUT_DP_MST ||
1382 		   encoder->type == INTEL_OUTPUT_EDP) {
1383 		bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
1384 		if (!bret) {
1385 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
1386 			return NULL;
1387 		}
1388 		crtc_state->dpll_hw_state = dpll_hw_state;
1389 	} else {
1390 		return NULL;
1391 	}
1392 
1393 	if (encoder->type == INTEL_OUTPUT_EDP)
1394 		pll = intel_find_shared_dpll(crtc, crtc_state,
1395 					     DPLL_ID_SKL_DPLL0,
1396 					     DPLL_ID_SKL_DPLL0);
1397 	else
1398 		pll = intel_find_shared_dpll(crtc, crtc_state,
1399 					     DPLL_ID_SKL_DPLL1,
1400 					     DPLL_ID_SKL_DPLL3);
1401 	if (!pll)
1402 		return NULL;
1403 
1404 	intel_reference_shared_dpll(pll, crtc_state);
1405 
1406 	return pll;
1407 }
1408 
1409 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1410 			      struct intel_dpll_hw_state *hw_state)
1411 {
1412 	DRM_DEBUG_KMS("dpll_hw_state: "
1413 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1414 		      hw_state->ctrl1,
1415 		      hw_state->cfgcr1,
1416 		      hw_state->cfgcr2);
1417 }
1418 
1419 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1420 	.enable = skl_ddi_pll_enable,
1421 	.disable = skl_ddi_pll_disable,
1422 	.get_hw_state = skl_ddi_pll_get_hw_state,
1423 };
1424 
1425 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1426 	.enable = skl_ddi_dpll0_enable,
1427 	.disable = skl_ddi_dpll0_disable,
1428 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1429 };
1430 
1431 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1432 				struct intel_shared_dpll *pll)
1433 {
1434 	uint32_t temp;
1435 	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
1436 	enum dpio_phy phy;
1437 	enum dpio_channel ch;
1438 
1439 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1440 
1441 	/* Non-SSC reference */
1442 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1443 	temp |= PORT_PLL_REF_SEL;
1444 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1445 
1446 	if (IS_GEMINILAKE(dev_priv)) {
1447 		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1448 		temp |= PORT_PLL_POWER_ENABLE;
1449 		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1450 
1451 		if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1452 				 PORT_PLL_POWER_STATE), 200))
1453 			DRM_ERROR("Power state not set for PLL:%d\n", port);
1454 	}
1455 
1456 	/* Disable 10 bit clock */
1457 	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1458 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1459 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1460 
1461 	/* Write P1 & P2 */
1462 	temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1463 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1464 	temp |= pll->state.hw_state.ebb0;
1465 	I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
1466 
1467 	/* Write M2 integer */
1468 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1469 	temp &= ~PORT_PLL_M2_MASK;
1470 	temp |= pll->state.hw_state.pll0;
1471 	I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
1472 
1473 	/* Write N */
1474 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1475 	temp &= ~PORT_PLL_N_MASK;
1476 	temp |= pll->state.hw_state.pll1;
1477 	I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
1478 
1479 	/* Write M2 fraction */
1480 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1481 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1482 	temp |= pll->state.hw_state.pll2;
1483 	I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
1484 
1485 	/* Write M2 fraction enable */
1486 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1487 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1488 	temp |= pll->state.hw_state.pll3;
1489 	I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
1490 
1491 	/* Write coeff */
1492 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1493 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1494 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1495 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1496 	temp |= pll->state.hw_state.pll6;
1497 	I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
1498 
1499 	/* Write calibration val */
1500 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1501 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1502 	temp |= pll->state.hw_state.pll8;
1503 	I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
1504 
1505 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1506 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1507 	temp |= pll->state.hw_state.pll9;
1508 	I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
1509 
1510 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1511 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1512 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1513 	temp |= pll->state.hw_state.pll10;
1514 	I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
1515 
1516 	/* Recalibrate with new settings */
1517 	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1518 	temp |= PORT_PLL_RECALIBRATE;
1519 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1520 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1521 	temp |= pll->state.hw_state.ebb4;
1522 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1523 
1524 	/* Enable PLL */
1525 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1526 	temp |= PORT_PLL_ENABLE;
1527 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1528 	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1529 
1530 	if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1531 			200))
1532 		DRM_ERROR("PLL %d not locked\n", port);
1533 
1534 	if (IS_GEMINILAKE(dev_priv)) {
1535 		temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
1536 		temp |= DCC_DELAY_RANGE_2;
1537 		I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1538 	}
1539 
1540 	/*
1541 	 * While we write to the group register to program all lanes at once we
1542 	 * can read only lane registers and we pick lanes 0/1 for that.
1543 	 */
1544 	temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1545 	temp &= ~LANE_STAGGER_MASK;
1546 	temp &= ~LANESTAGGER_STRAP_OVRD;
1547 	temp |= pll->state.hw_state.pcsdw12;
1548 	I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1549 }
1550 
1551 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1552 					struct intel_shared_dpll *pll)
1553 {
1554 	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
1555 	uint32_t temp;
1556 
1557 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1558 	temp &= ~PORT_PLL_ENABLE;
1559 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1560 	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1561 
1562 	if (IS_GEMINILAKE(dev_priv)) {
1563 		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1564 		temp &= ~PORT_PLL_POWER_ENABLE;
1565 		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1566 
1567 		if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1568 				PORT_PLL_POWER_STATE), 200))
1569 			DRM_ERROR("Power state not reset for PLL:%d\n", port);
1570 	}
1571 }
1572 
1573 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1574 					struct intel_shared_dpll *pll,
1575 					struct intel_dpll_hw_state *hw_state)
1576 {
1577 	enum port port = (enum port)pll->id;	/* 1:1 port->PLL mapping */
1578 	uint32_t val;
1579 	bool ret;
1580 	enum dpio_phy phy;
1581 	enum dpio_channel ch;
1582 
1583 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1584 
1585 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
1586 		return false;
1587 
1588 	ret = false;
1589 
1590 	val = I915_READ(BXT_PORT_PLL_ENABLE(port));
1591 	if (!(val & PORT_PLL_ENABLE))
1592 		goto out;
1593 
1594 	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1595 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1596 
1597 	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1598 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1599 
1600 	hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1601 	hw_state->pll0 &= PORT_PLL_M2_MASK;
1602 
1603 	hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1604 	hw_state->pll1 &= PORT_PLL_N_MASK;
1605 
1606 	hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1607 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1608 
1609 	hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1610 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1611 
1612 	hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1613 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1614 			  PORT_PLL_INT_COEFF_MASK |
1615 			  PORT_PLL_GAIN_CTL_MASK;
1616 
1617 	hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1618 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1619 
1620 	hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1621 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1622 
1623 	hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1624 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1625 			   PORT_PLL_DCO_AMP_MASK;
1626 
1627 	/*
1628 	 * While we write to the group register to program all lanes at once we
1629 	 * can read only lane registers. We configure all lanes the same way, so
1630 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1631 	 */
1632 	hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1633 	if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1634 		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1635 				 hw_state->pcsdw12,
1636 				 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
1637 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1638 
1639 	ret = true;
1640 
1641 out:
1642 	intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1643 
1644 	return ret;
1645 }
1646 
1647 /* bxt clock parameters */
1648 struct bxt_clk_div {
1649 	int clock;
1650 	uint32_t p1;
1651 	uint32_t p2;
1652 	uint32_t m2_int;
1653 	uint32_t m2_frac;
1654 	bool m2_frac_en;
1655 	uint32_t n;
1656 
1657 	int vco;
1658 };
1659 
1660 /* pre-calculated values for DP linkrates */
1661 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1662 	{162000, 4, 2, 32, 1677722, 1, 1},
1663 	{270000, 4, 1, 27,       0, 0, 1},
1664 	{540000, 2, 1, 27,       0, 0, 1},
1665 	{216000, 3, 2, 32, 1677722, 1, 1},
1666 	{243000, 4, 1, 24, 1258291, 1, 1},
1667 	{324000, 4, 1, 32, 1677722, 1, 1},
1668 	{432000, 3, 1, 32, 1677722, 1, 1}
1669 };
1670 
1671 static bool
1672 bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
1673 			  struct intel_crtc_state *crtc_state, int clock,
1674 			  struct bxt_clk_div *clk_div)
1675 {
1676 	struct dpll best_clock;
1677 
1678 	/* Calculate HDMI div */
1679 	/*
1680 	 * FIXME: tie the following calculation into
1681 	 * i9xx_crtc_compute_clock
1682 	 */
1683 	if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
1684 		DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
1685 				 clock, pipe_name(intel_crtc->pipe));
1686 		return false;
1687 	}
1688 
1689 	clk_div->p1 = best_clock.p1;
1690 	clk_div->p2 = best_clock.p2;
1691 	WARN_ON(best_clock.m1 != 2);
1692 	clk_div->n = best_clock.n;
1693 	clk_div->m2_int = best_clock.m2 >> 22;
1694 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1695 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
1696 
1697 	clk_div->vco = best_clock.vco;
1698 
1699 	return true;
1700 }
1701 
1702 static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
1703 {
1704 	int i;
1705 
1706 	*clk_div = bxt_dp_clk_val[0];
1707 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1708 		if (bxt_dp_clk_val[i].clock == clock) {
1709 			*clk_div = bxt_dp_clk_val[i];
1710 			break;
1711 		}
1712 	}
1713 
1714 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1715 }
1716 
1717 static bool bxt_ddi_set_dpll_hw_state(int clock,
1718 			  struct bxt_clk_div *clk_div,
1719 			  struct intel_dpll_hw_state *dpll_hw_state)
1720 {
1721 	int vco = clk_div->vco;
1722 	uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
1723 	uint32_t lanestagger;
1724 
1725 	if (vco >= 6200000 && vco <= 6700000) {
1726 		prop_coef = 4;
1727 		int_coef = 9;
1728 		gain_ctl = 3;
1729 		targ_cnt = 8;
1730 	} else if ((vco > 5400000 && vco < 6200000) ||
1731 			(vco >= 4800000 && vco < 5400000)) {
1732 		prop_coef = 5;
1733 		int_coef = 11;
1734 		gain_ctl = 3;
1735 		targ_cnt = 9;
1736 	} else if (vco == 5400000) {
1737 		prop_coef = 3;
1738 		int_coef = 8;
1739 		gain_ctl = 1;
1740 		targ_cnt = 9;
1741 	} else {
1742 		DRM_ERROR("Invalid VCO\n");
1743 		return false;
1744 	}
1745 
1746 	if (clock > 270000)
1747 		lanestagger = 0x18;
1748 	else if (clock > 135000)
1749 		lanestagger = 0x0d;
1750 	else if (clock > 67000)
1751 		lanestagger = 0x07;
1752 	else if (clock > 33000)
1753 		lanestagger = 0x04;
1754 	else
1755 		lanestagger = 0x02;
1756 
1757 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1758 	dpll_hw_state->pll0 = clk_div->m2_int;
1759 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1760 	dpll_hw_state->pll2 = clk_div->m2_frac;
1761 
1762 	if (clk_div->m2_frac_en)
1763 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1764 
1765 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1766 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1767 
1768 	dpll_hw_state->pll8 = targ_cnt;
1769 
1770 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1771 
1772 	dpll_hw_state->pll10 =
1773 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1774 		| PORT_PLL_DCO_AMP_OVR_EN_H;
1775 
1776 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1777 
1778 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1779 
1780 	return true;
1781 }
1782 
1783 static bool
1784 bxt_ddi_dp_set_dpll_hw_state(int clock,
1785 			     struct intel_dpll_hw_state *dpll_hw_state)
1786 {
1787 	struct bxt_clk_div clk_div = {0};
1788 
1789 	bxt_ddi_dp_pll_dividers(clock, &clk_div);
1790 
1791 	return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
1792 }
1793 
1794 static bool
1795 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc,
1796 			       struct intel_crtc_state *crtc_state, int clock,
1797 			       struct intel_dpll_hw_state *dpll_hw_state)
1798 {
1799 	struct bxt_clk_div clk_div = { };
1800 
1801 	bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div);
1802 
1803 	return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
1804 }
1805 
1806 static struct intel_shared_dpll *
1807 bxt_get_dpll(struct intel_crtc *crtc,
1808 		struct intel_crtc_state *crtc_state,
1809 		struct intel_encoder *encoder)
1810 {
1811 	struct intel_dpll_hw_state dpll_hw_state = { };
1812 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1813 	struct intel_digital_port *intel_dig_port;
1814 	struct intel_shared_dpll *pll;
1815 	int i, clock = crtc_state->port_clock;
1816 
1817 	if (encoder->type == INTEL_OUTPUT_HDMI &&
1818 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
1819 					    &dpll_hw_state))
1820 		return NULL;
1821 
1822 	if ((encoder->type == INTEL_OUTPUT_DP ||
1823 	     encoder->type == INTEL_OUTPUT_EDP ||
1824 	     encoder->type == INTEL_OUTPUT_DP_MST) &&
1825 	    !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
1826 		return NULL;
1827 
1828 	memset(&crtc_state->dpll_hw_state, 0,
1829 	       sizeof(crtc_state->dpll_hw_state));
1830 
1831 	crtc_state->dpll_hw_state = dpll_hw_state;
1832 
1833 	if (encoder->type == INTEL_OUTPUT_DP_MST) {
1834 		struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
1835 
1836 		intel_dig_port = intel_mst->primary;
1837 	} else
1838 		intel_dig_port = enc_to_dig_port(&encoder->base);
1839 
1840 	/* 1:1 mapping between ports and PLLs */
1841 	i = (enum intel_dpll_id) intel_dig_port->port;
1842 	pll = intel_get_shared_dpll_by_id(dev_priv, i);
1843 
1844 	DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1845 		      crtc->base.base.id, crtc->base.name, pll->name);
1846 
1847 	intel_reference_shared_dpll(pll, crtc_state);
1848 
1849 	return pll;
1850 }
1851 
1852 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1853 			      struct intel_dpll_hw_state *hw_state)
1854 {
1855 	DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1856 		      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1857 		      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1858 		      hw_state->ebb0,
1859 		      hw_state->ebb4,
1860 		      hw_state->pll0,
1861 		      hw_state->pll1,
1862 		      hw_state->pll2,
1863 		      hw_state->pll3,
1864 		      hw_state->pll6,
1865 		      hw_state->pll8,
1866 		      hw_state->pll9,
1867 		      hw_state->pll10,
1868 		      hw_state->pcsdw12);
1869 }
1870 
1871 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1872 	.enable = bxt_ddi_pll_enable,
1873 	.disable = bxt_ddi_pll_disable,
1874 	.get_hw_state = bxt_ddi_pll_get_hw_state,
1875 };
1876 
1877 static void intel_ddi_pll_init(struct drm_device *dev)
1878 {
1879 	struct drm_i915_private *dev_priv = to_i915(dev);
1880 
1881 	if (INTEL_GEN(dev_priv) < 9) {
1882 		uint32_t val = I915_READ(LCPLL_CTL);
1883 
1884 		/*
1885 		 * The LCPLL register should be turned on by the BIOS. For now
1886 		 * let's just check its state and print errors in case
1887 		 * something is wrong.  Don't even try to turn it on.
1888 		 */
1889 
1890 		if (val & LCPLL_CD_SOURCE_FCLK)
1891 			DRM_ERROR("CDCLK source is not LCPLL\n");
1892 
1893 		if (val & LCPLL_PLL_DISABLE)
1894 			DRM_ERROR("LCPLL is disabled\n");
1895 	}
1896 }
1897 
1898 struct dpll_info {
1899 	const char *name;
1900 	const int id;
1901 	const struct intel_shared_dpll_funcs *funcs;
1902 	uint32_t flags;
1903 };
1904 
1905 struct intel_dpll_mgr {
1906 	const struct dpll_info *dpll_info;
1907 
1908 	struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
1909 					      struct intel_crtc_state *crtc_state,
1910 					      struct intel_encoder *encoder);
1911 
1912 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1913 			      struct intel_dpll_hw_state *hw_state);
1914 };
1915 
1916 static const struct dpll_info pch_plls[] = {
1917 	{ "PCH DPLL A", DPLL_ID_PCH_PLL_A, &ibx_pch_dpll_funcs, 0 },
1918 	{ "PCH DPLL B", DPLL_ID_PCH_PLL_B, &ibx_pch_dpll_funcs, 0 },
1919 	{ NULL, -1, NULL, 0 },
1920 };
1921 
1922 static const struct intel_dpll_mgr pch_pll_mgr = {
1923 	.dpll_info = pch_plls,
1924 	.get_dpll = ibx_get_dpll,
1925 	.dump_hw_state = ibx_dump_hw_state,
1926 };
1927 
1928 static const struct dpll_info hsw_plls[] = {
1929 	{ "WRPLL 1",    DPLL_ID_WRPLL1,     &hsw_ddi_wrpll_funcs, 0 },
1930 	{ "WRPLL 2",    DPLL_ID_WRPLL2,     &hsw_ddi_wrpll_funcs, 0 },
1931 	{ "SPLL",       DPLL_ID_SPLL,       &hsw_ddi_spll_funcs,  0 },
1932 	{ "LCPLL 810",  DPLL_ID_LCPLL_810,  &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
1933 	{ "LCPLL 1350", DPLL_ID_LCPLL_1350, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
1934 	{ "LCPLL 2700", DPLL_ID_LCPLL_2700, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
1935 	{ NULL, -1, NULL, },
1936 };
1937 
1938 static const struct intel_dpll_mgr hsw_pll_mgr = {
1939 	.dpll_info = hsw_plls,
1940 	.get_dpll = hsw_get_dpll,
1941 	.dump_hw_state = hsw_dump_hw_state,
1942 };
1943 
1944 static const struct dpll_info skl_plls[] = {
1945 	{ "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON },
1946 	{ "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs,   0 },
1947 	{ "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs,   0 },
1948 	{ "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs,   0 },
1949 	{ NULL, -1, NULL, },
1950 };
1951 
1952 static const struct intel_dpll_mgr skl_pll_mgr = {
1953 	.dpll_info = skl_plls,
1954 	.get_dpll = skl_get_dpll,
1955 	.dump_hw_state = skl_dump_hw_state,
1956 };
1957 
1958 static const struct dpll_info bxt_plls[] = {
1959 	{ "PORT PLL A", DPLL_ID_SKL_DPLL0, &bxt_ddi_pll_funcs, 0 },
1960 	{ "PORT PLL B", DPLL_ID_SKL_DPLL1, &bxt_ddi_pll_funcs, 0 },
1961 	{ "PORT PLL C", DPLL_ID_SKL_DPLL2, &bxt_ddi_pll_funcs, 0 },
1962 	{ NULL, -1, NULL, },
1963 };
1964 
1965 static const struct intel_dpll_mgr bxt_pll_mgr = {
1966 	.dpll_info = bxt_plls,
1967 	.get_dpll = bxt_get_dpll,
1968 	.dump_hw_state = bxt_dump_hw_state,
1969 };
1970 
1971 /**
1972  * intel_shared_dpll_init - Initialize shared DPLLs
1973  * @dev: drm device
1974  *
1975  * Initialize shared DPLLs for @dev.
1976  */
1977 void intel_shared_dpll_init(struct drm_device *dev)
1978 {
1979 	struct drm_i915_private *dev_priv = to_i915(dev);
1980 	const struct intel_dpll_mgr *dpll_mgr = NULL;
1981 	const struct dpll_info *dpll_info;
1982 	int i;
1983 
1984 	if (IS_GEN9_BC(dev_priv))
1985 		dpll_mgr = &skl_pll_mgr;
1986 	else if (IS_GEN9_LP(dev_priv))
1987 		dpll_mgr = &bxt_pll_mgr;
1988 	else if (HAS_DDI(dev_priv))
1989 		dpll_mgr = &hsw_pll_mgr;
1990 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
1991 		dpll_mgr = &pch_pll_mgr;
1992 
1993 	if (!dpll_mgr) {
1994 		dev_priv->num_shared_dpll = 0;
1995 		return;
1996 	}
1997 
1998 	dpll_info = dpll_mgr->dpll_info;
1999 
2000 	for (i = 0; dpll_info[i].id >= 0; i++) {
2001 		WARN_ON(i != dpll_info[i].id);
2002 
2003 		dev_priv->shared_dplls[i].id = dpll_info[i].id;
2004 		dev_priv->shared_dplls[i].name = dpll_info[i].name;
2005 		dev_priv->shared_dplls[i].funcs = *dpll_info[i].funcs;
2006 		dev_priv->shared_dplls[i].flags = dpll_info[i].flags;
2007 	}
2008 
2009 	dev_priv->dpll_mgr = dpll_mgr;
2010 	dev_priv->num_shared_dpll = i;
2011 	lockinit(&dev_priv->dpll_lock, "dpll_lock", 0, LK_CANRECURSE);
2012 
2013 	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
2014 
2015 	/* FIXME: Move this to a more suitable place */
2016 	if (HAS_DDI(dev_priv))
2017 		intel_ddi_pll_init(dev);
2018 }
2019 
2020 /**
2021  * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
2022  * @crtc: CRTC
2023  * @crtc_state: atomic state for @crtc
2024  * @encoder: encoder
2025  *
2026  * Find an appropriate DPLL for the given CRTC and encoder combination. A
2027  * reference from the @crtc to the returned pll is registered in the atomic
2028  * state. That configuration is made effective by calling
2029  * intel_shared_dpll_swap_state(). The reference should be released by calling
2030  * intel_release_shared_dpll().
2031  *
2032  * Returns:
2033  * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state.
2034  */
2035 struct intel_shared_dpll *
2036 intel_get_shared_dpll(struct intel_crtc *crtc,
2037 		      struct intel_crtc_state *crtc_state,
2038 		      struct intel_encoder *encoder)
2039 {
2040 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2041 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
2042 
2043 	if (WARN_ON(!dpll_mgr))
2044 		return NULL;
2045 
2046 	return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
2047 }
2048 
2049 /**
2050  * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
2051  * @dpll: dpll in use by @crtc
2052  * @crtc: crtc
2053  * @state: atomic state
2054  *
2055  * This function releases the reference from @crtc to @dpll from the
2056  * atomic @state. The new configuration is made effective by calling
2057  * intel_shared_dpll_swap_state().
2058  */
2059 void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
2060 			       struct intel_crtc *crtc,
2061 			       struct drm_atomic_state *state)
2062 {
2063 	struct intel_shared_dpll_state *shared_dpll_state;
2064 
2065 	shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
2066 	shared_dpll_state[dpll->id].crtc_mask &= ~(1 << crtc->pipe);
2067 }
2068 
2069 /**
2070  * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
2071  * @dev_priv: i915 drm device
2072  * @hw_state: hw state to be written to the log
2073  *
2074  * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
2075  */
2076 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
2077 			      struct intel_dpll_hw_state *hw_state)
2078 {
2079 	if (dev_priv->dpll_mgr) {
2080 		dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
2081 	} else {
2082 		/* fallback for platforms that don't use the shared dpll
2083 		 * infrastructure
2084 		 */
2085 		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
2086 			      "fp0: 0x%x, fp1: 0x%x\n",
2087 			      hw_state->dpll,
2088 			      hw_state->dpll_md,
2089 			      hw_state->fp0,
2090 			      hw_state->fp1);
2091 	}
2092 }
2093