1 /*
2 * Copyright © 2006-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26
27 #include "bxt_dpio_phy_regs.h"
28 #include "i915_reg.h"
29 #include "intel_de.h"
30 #include "intel_display_types.h"
31 #include "intel_dkl_phy.h"
32 #include "intel_dkl_phy_regs.h"
33 #include "intel_dpio_phy.h"
34 #include "intel_dpll.h"
35 #include "intel_dpll_mgr.h"
36 #include "intel_hti.h"
37 #include "intel_mg_phy_regs.h"
38 #include "intel_pch_refclk.h"
39 #include "intel_tc.h"
40
41 /**
42 * DOC: Display PLLs
43 *
44 * Display PLLs used for driving outputs vary by platform. While some have
45 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
46 * from a pool. In the latter scenario, it is possible that multiple pipes
47 * share a PLL if their configurations match.
48 *
49 * This file provides an abstraction over display PLLs. The function
50 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
51 * users of a PLL are tracked and that tracking is integrated with the atomic
52 * modset interface. During an atomic operation, required PLLs can be reserved
53 * for a given CRTC and encoder configuration by calling
54 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
55 * with intel_release_shared_dplls().
56 * Changes to the users are first staged in the atomic state, and then made
57 * effective by calling intel_shared_dpll_swap_state() during the atomic
58 * commit phase.
59 */
60
61 /* platform specific hooks for managing DPLLs */
62 struct intel_shared_dpll_funcs {
63 /*
64 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
65 * the pll is not already enabled.
66 */
67 void (*enable)(struct drm_i915_private *i915,
68 struct intel_shared_dpll *pll,
69 const struct intel_dpll_hw_state *dpll_hw_state);
70
71 /*
72 * Hook for disabling the pll, called from intel_disable_shared_dpll()
73 * only when it is safe to disable the pll, i.e., there are no more
74 * tracked users for it.
75 */
76 void (*disable)(struct drm_i915_private *i915,
77 struct intel_shared_dpll *pll);
78
79 /*
80 * Hook for reading the values currently programmed to the DPLL
81 * registers. This is used for initial hw state readout and state
82 * verification after a mode set.
83 */
84 bool (*get_hw_state)(struct drm_i915_private *i915,
85 struct intel_shared_dpll *pll,
86 struct intel_dpll_hw_state *dpll_hw_state);
87
88 /*
89 * Hook for calculating the pll's output frequency based on its passed
90 * in state.
91 */
92 int (*get_freq)(struct drm_i915_private *i915,
93 const struct intel_shared_dpll *pll,
94 const struct intel_dpll_hw_state *dpll_hw_state);
95 };
96
97 struct intel_dpll_mgr {
98 const struct dpll_info *dpll_info;
99
100 int (*compute_dplls)(struct intel_atomic_state *state,
101 struct intel_crtc *crtc,
102 struct intel_encoder *encoder);
103 int (*get_dplls)(struct intel_atomic_state *state,
104 struct intel_crtc *crtc,
105 struct intel_encoder *encoder);
106 void (*put_dplls)(struct intel_atomic_state *state,
107 struct intel_crtc *crtc);
108 void (*update_active_dpll)(struct intel_atomic_state *state,
109 struct intel_crtc *crtc,
110 struct intel_encoder *encoder);
111 void (*update_ref_clks)(struct drm_i915_private *i915);
112 void (*dump_hw_state)(struct drm_printer *p,
113 const struct intel_dpll_hw_state *dpll_hw_state);
114 bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
115 const struct intel_dpll_hw_state *b);
116 };
117
118 static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private * i915,struct intel_shared_dpll_state * shared_dpll)119 intel_atomic_duplicate_dpll_state(struct drm_i915_private *i915,
120 struct intel_shared_dpll_state *shared_dpll)
121 {
122 struct intel_shared_dpll *pll;
123 int i;
124
125 /* Copy shared dpll state */
126 for_each_shared_dpll(i915, pll, i)
127 shared_dpll[pll->index] = pll->state;
128 }
129
130 static struct intel_shared_dpll_state *
intel_atomic_get_shared_dpll_state(struct drm_atomic_state * s)131 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
132 {
133 struct intel_atomic_state *state = to_intel_atomic_state(s);
134
135 drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
136
137 if (!state->dpll_set) {
138 state->dpll_set = true;
139
140 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
141 state->shared_dpll);
142 }
143
144 return state->shared_dpll;
145 }
146
147 /**
148 * intel_get_shared_dpll_by_id - get a DPLL given its id
149 * @i915: i915 device instance
150 * @id: pll id
151 *
152 * Returns:
153 * A pointer to the DPLL with @id
154 */
155 struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private * i915,enum intel_dpll_id id)156 intel_get_shared_dpll_by_id(struct drm_i915_private *i915,
157 enum intel_dpll_id id)
158 {
159 struct intel_shared_dpll *pll;
160 int i;
161
162 for_each_shared_dpll(i915, pll, i) {
163 if (pll->info->id == id)
164 return pll;
165 }
166
167 MISSING_CASE(id);
168 return NULL;
169 }
170
171 /* For ILK+ */
assert_shared_dpll(struct drm_i915_private * i915,struct intel_shared_dpll * pll,bool state)172 void assert_shared_dpll(struct drm_i915_private *i915,
173 struct intel_shared_dpll *pll,
174 bool state)
175 {
176 bool cur_state;
177 struct intel_dpll_hw_state hw_state;
178
179 if (drm_WARN(&i915->drm, !pll,
180 "asserting DPLL %s with no DPLL\n", str_on_off(state)))
181 return;
182
183 cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
184 I915_STATE_WARN(i915, cur_state != state,
185 "%s assertion failure (expected %s, current %s)\n",
186 pll->info->name, str_on_off(state),
187 str_on_off(cur_state));
188 }
189
icl_pll_id_to_tc_port(enum intel_dpll_id id)190 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
191 {
192 return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
193 }
194
icl_tc_port_to_pll_id(enum tc_port tc_port)195 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
196 {
197 return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
198 }
199
200 static i915_reg_t
intel_combo_pll_enable_reg(struct drm_i915_private * i915,struct intel_shared_dpll * pll)201 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
202 struct intel_shared_dpll *pll)
203 {
204 if (IS_DG1(i915))
205 return DG1_DPLL_ENABLE(pll->info->id);
206 else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
207 (pll->info->id == DPLL_ID_EHL_DPLL4))
208 return MG_PLL_ENABLE(0);
209
210 return ICL_DPLL_ENABLE(pll->info->id);
211 }
212
213 static i915_reg_t
intel_tc_pll_enable_reg(struct drm_i915_private * i915,struct intel_shared_dpll * pll)214 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
215 struct intel_shared_dpll *pll)
216 {
217 const enum intel_dpll_id id = pll->info->id;
218 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
219
220 if (IS_ALDERLAKE_P(i915))
221 return ADLP_PORTTC_PLL_ENABLE(tc_port);
222
223 return MG_PLL_ENABLE(tc_port);
224 }
225
_intel_enable_shared_dpll(struct drm_i915_private * i915,struct intel_shared_dpll * pll)226 static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
227 struct intel_shared_dpll *pll)
228 {
229 if (pll->info->power_domain)
230 pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
231
232 pll->info->funcs->enable(i915, pll, &pll->state.hw_state);
233 pll->on = true;
234 }
235
_intel_disable_shared_dpll(struct drm_i915_private * i915,struct intel_shared_dpll * pll)236 static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
237 struct intel_shared_dpll *pll)
238 {
239 pll->info->funcs->disable(i915, pll);
240 pll->on = false;
241
242 if (pll->info->power_domain)
243 intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
244 }
245
246 /**
247 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
248 * @crtc_state: CRTC, and its state, which has a shared DPLL
249 *
250 * Enable the shared DPLL used by @crtc.
251 */
intel_enable_shared_dpll(const struct intel_crtc_state * crtc_state)252 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
253 {
254 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
255 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
256 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
257 unsigned int pipe_mask = BIT(crtc->pipe);
258 unsigned int old_mask;
259
260 if (drm_WARN_ON(&i915->drm, pll == NULL))
261 return;
262
263 mutex_lock(&i915->display.dpll.lock);
264 old_mask = pll->active_mask;
265
266 if (drm_WARN_ON(&i915->drm, !(pll->state.pipe_mask & pipe_mask)) ||
267 drm_WARN_ON(&i915->drm, pll->active_mask & pipe_mask))
268 goto out;
269
270 pll->active_mask |= pipe_mask;
271
272 drm_dbg_kms(&i915->drm,
273 "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
274 pll->info->name, pll->active_mask, pll->on,
275 crtc->base.base.id, crtc->base.name);
276
277 if (old_mask) {
278 drm_WARN_ON(&i915->drm, !pll->on);
279 assert_shared_dpll_enabled(i915, pll);
280 goto out;
281 }
282 drm_WARN_ON(&i915->drm, pll->on);
283
284 drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
285
286 _intel_enable_shared_dpll(i915, pll);
287
288 out:
289 mutex_unlock(&i915->display.dpll.lock);
290 }
291
292 /**
293 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
294 * @crtc_state: CRTC, and its state, which has a shared DPLL
295 *
296 * Disable the shared DPLL used by @crtc.
297 */
intel_disable_shared_dpll(const struct intel_crtc_state * crtc_state)298 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
299 {
300 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
301 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
302 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
303 unsigned int pipe_mask = BIT(crtc->pipe);
304
305 /* PCH only available on ILK+ */
306 if (DISPLAY_VER(i915) < 5)
307 return;
308
309 if (pll == NULL)
310 return;
311
312 mutex_lock(&i915->display.dpll.lock);
313 if (drm_WARN(&i915->drm, !(pll->active_mask & pipe_mask),
314 "%s not used by [CRTC:%d:%s]\n", pll->info->name,
315 crtc->base.base.id, crtc->base.name))
316 goto out;
317
318 drm_dbg_kms(&i915->drm,
319 "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
320 pll->info->name, pll->active_mask, pll->on,
321 crtc->base.base.id, crtc->base.name);
322
323 assert_shared_dpll_enabled(i915, pll);
324 drm_WARN_ON(&i915->drm, !pll->on);
325
326 pll->active_mask &= ~pipe_mask;
327 if (pll->active_mask)
328 goto out;
329
330 drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
331
332 _intel_disable_shared_dpll(i915, pll);
333
334 out:
335 mutex_unlock(&i915->display.dpll.lock);
336 }
337
338 static unsigned long
intel_dpll_mask_all(struct drm_i915_private * i915)339 intel_dpll_mask_all(struct drm_i915_private *i915)
340 {
341 struct intel_shared_dpll *pll;
342 unsigned long dpll_mask = 0;
343 int i;
344
345 for_each_shared_dpll(i915, pll, i) {
346 drm_WARN_ON(&i915->drm, dpll_mask & BIT(pll->info->id));
347
348 dpll_mask |= BIT(pll->info->id);
349 }
350
351 return dpll_mask;
352 }
353
354 static struct intel_shared_dpll *
intel_find_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_dpll_hw_state * dpll_hw_state,unsigned long dpll_mask)355 intel_find_shared_dpll(struct intel_atomic_state *state,
356 const struct intel_crtc *crtc,
357 const struct intel_dpll_hw_state *dpll_hw_state,
358 unsigned long dpll_mask)
359 {
360 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
361 unsigned long dpll_mask_all = intel_dpll_mask_all(i915);
362 struct intel_shared_dpll_state *shared_dpll;
363 struct intel_shared_dpll *unused_pll = NULL;
364 enum intel_dpll_id id;
365
366 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
367
368 drm_WARN_ON(&i915->drm, dpll_mask & ~dpll_mask_all);
369
370 for_each_set_bit(id, &dpll_mask, fls(dpll_mask_all)) {
371 struct intel_shared_dpll *pll;
372
373 pll = intel_get_shared_dpll_by_id(i915, id);
374 if (!pll)
375 continue;
376
377 /* Only want to check enabled timings first */
378 if (shared_dpll[pll->index].pipe_mask == 0) {
379 if (!unused_pll)
380 unused_pll = pll;
381 continue;
382 }
383
384 if (memcmp(dpll_hw_state,
385 &shared_dpll[pll->index].hw_state,
386 sizeof(*dpll_hw_state)) == 0) {
387 drm_dbg_kms(&i915->drm,
388 "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
389 crtc->base.base.id, crtc->base.name,
390 pll->info->name,
391 shared_dpll[pll->index].pipe_mask,
392 pll->active_mask);
393 return pll;
394 }
395 }
396
397 /* Ok no matching timings, maybe there's a free one? */
398 if (unused_pll) {
399 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] allocated %s\n",
400 crtc->base.base.id, crtc->base.name,
401 unused_pll->info->name);
402 return unused_pll;
403 }
404
405 return NULL;
406 }
407
408 /**
409 * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
410 * @crtc: CRTC on which behalf the reference is taken
411 * @pll: DPLL for which the reference is taken
412 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
413 *
414 * Take a reference for @pll tracking the use of it by @crtc.
415 */
416 static void
intel_reference_shared_dpll_crtc(const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,struct intel_shared_dpll_state * shared_dpll_state)417 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
418 const struct intel_shared_dpll *pll,
419 struct intel_shared_dpll_state *shared_dpll_state)
420 {
421 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
422
423 drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
424
425 shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
426
427 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
428 crtc->base.base.id, crtc->base.name, pll->info->name);
429 }
430
431 static void
intel_reference_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)432 intel_reference_shared_dpll(struct intel_atomic_state *state,
433 const struct intel_crtc *crtc,
434 const struct intel_shared_dpll *pll,
435 const struct intel_dpll_hw_state *dpll_hw_state)
436 {
437 struct intel_shared_dpll_state *shared_dpll;
438
439 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
440
441 if (shared_dpll[pll->index].pipe_mask == 0)
442 shared_dpll[pll->index].hw_state = *dpll_hw_state;
443
444 intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
445 }
446
447 /**
448 * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
449 * @crtc: CRTC on which behalf the reference is dropped
450 * @pll: DPLL for which the reference is dropped
451 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
452 *
453 * Drop a reference for @pll tracking the end of use of it by @crtc.
454 */
455 void
intel_unreference_shared_dpll_crtc(const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,struct intel_shared_dpll_state * shared_dpll_state)456 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
457 const struct intel_shared_dpll *pll,
458 struct intel_shared_dpll_state *shared_dpll_state)
459 {
460 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
461
462 drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
463
464 shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
465
466 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
467 crtc->base.base.id, crtc->base.name, pll->info->name);
468 }
469
intel_unreference_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_shared_dpll * pll)470 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
471 const struct intel_crtc *crtc,
472 const struct intel_shared_dpll *pll)
473 {
474 struct intel_shared_dpll_state *shared_dpll;
475
476 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
477
478 intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[pll->index]);
479 }
480
intel_put_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)481 static void intel_put_dpll(struct intel_atomic_state *state,
482 struct intel_crtc *crtc)
483 {
484 const struct intel_crtc_state *old_crtc_state =
485 intel_atomic_get_old_crtc_state(state, crtc);
486 struct intel_crtc_state *new_crtc_state =
487 intel_atomic_get_new_crtc_state(state, crtc);
488
489 new_crtc_state->shared_dpll = NULL;
490
491 if (!old_crtc_state->shared_dpll)
492 return;
493
494 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
495 }
496
497 /**
498 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
499 * @state: atomic state
500 *
501 * This is the dpll version of drm_atomic_helper_swap_state() since the
502 * helper does not handle driver-specific global state.
503 *
504 * For consistency with atomic helpers this function does a complete swap,
505 * i.e. it also puts the current state into @state, even though there is no
506 * need for that at this moment.
507 */
intel_shared_dpll_swap_state(struct intel_atomic_state * state)508 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
509 {
510 struct drm_i915_private *i915 = to_i915(state->base.dev);
511 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
512 struct intel_shared_dpll *pll;
513 int i;
514
515 if (!state->dpll_set)
516 return;
517
518 for_each_shared_dpll(i915, pll, i)
519 swap(pll->state, shared_dpll[pll->index]);
520 }
521
ibx_pch_dpll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)522 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
523 struct intel_shared_dpll *pll,
524 struct intel_dpll_hw_state *dpll_hw_state)
525 {
526 struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
527 const enum intel_dpll_id id = pll->info->id;
528 intel_wakeref_t wakeref;
529 u32 val;
530
531 wakeref = intel_display_power_get_if_enabled(i915,
532 POWER_DOMAIN_DISPLAY_CORE);
533 if (!wakeref)
534 return false;
535
536 val = intel_de_read(i915, PCH_DPLL(id));
537 hw_state->dpll = val;
538 hw_state->fp0 = intel_de_read(i915, PCH_FP0(id));
539 hw_state->fp1 = intel_de_read(i915, PCH_FP1(id));
540
541 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
542
543 return val & DPLL_VCO_ENABLE;
544 }
545
ibx_assert_pch_refclk_enabled(struct drm_i915_private * i915)546 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
547 {
548 u32 val;
549 bool enabled;
550
551 val = intel_de_read(i915, PCH_DREF_CONTROL);
552 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
553 DREF_SUPERSPREAD_SOURCE_MASK));
554 I915_STATE_WARN(i915, !enabled,
555 "PCH refclk assertion failure, should be active but is disabled\n");
556 }
557
ibx_pch_dpll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)558 static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
559 struct intel_shared_dpll *pll,
560 const struct intel_dpll_hw_state *dpll_hw_state)
561 {
562 const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
563 const enum intel_dpll_id id = pll->info->id;
564
565 /* PCH refclock must be enabled first */
566 ibx_assert_pch_refclk_enabled(i915);
567
568 intel_de_write(i915, PCH_FP0(id), hw_state->fp0);
569 intel_de_write(i915, PCH_FP1(id), hw_state->fp1);
570
571 intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
572
573 /* Wait for the clocks to stabilize. */
574 intel_de_posting_read(i915, PCH_DPLL(id));
575 udelay(150);
576
577 /* The pixel multiplier can only be updated once the
578 * DPLL is enabled and the clocks are stable.
579 *
580 * So write it again.
581 */
582 intel_de_write(i915, PCH_DPLL(id), hw_state->dpll);
583 intel_de_posting_read(i915, PCH_DPLL(id));
584 udelay(200);
585 }
586
ibx_pch_dpll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)587 static void ibx_pch_dpll_disable(struct drm_i915_private *i915,
588 struct intel_shared_dpll *pll)
589 {
590 const enum intel_dpll_id id = pll->info->id;
591
592 intel_de_write(i915, PCH_DPLL(id), 0);
593 intel_de_posting_read(i915, PCH_DPLL(id));
594 udelay(200);
595 }
596
ibx_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)597 static int ibx_compute_dpll(struct intel_atomic_state *state,
598 struct intel_crtc *crtc,
599 struct intel_encoder *encoder)
600 {
601 return 0;
602 }
603
ibx_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)604 static int ibx_get_dpll(struct intel_atomic_state *state,
605 struct intel_crtc *crtc,
606 struct intel_encoder *encoder)
607 {
608 struct intel_crtc_state *crtc_state =
609 intel_atomic_get_new_crtc_state(state, crtc);
610 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
611 struct intel_shared_dpll *pll;
612 enum intel_dpll_id id;
613
614 if (HAS_PCH_IBX(i915)) {
615 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
616 id = (enum intel_dpll_id) crtc->pipe;
617 pll = intel_get_shared_dpll_by_id(i915, id);
618
619 drm_dbg_kms(&i915->drm,
620 "[CRTC:%d:%s] using pre-allocated %s\n",
621 crtc->base.base.id, crtc->base.name,
622 pll->info->name);
623 } else {
624 pll = intel_find_shared_dpll(state, crtc,
625 &crtc_state->dpll_hw_state,
626 BIT(DPLL_ID_PCH_PLL_B) |
627 BIT(DPLL_ID_PCH_PLL_A));
628 }
629
630 if (!pll)
631 return -EINVAL;
632
633 /* reference the pll */
634 intel_reference_shared_dpll(state, crtc,
635 pll, &crtc_state->dpll_hw_state);
636
637 crtc_state->shared_dpll = pll;
638
639 return 0;
640 }
641
ibx_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)642 static void ibx_dump_hw_state(struct drm_printer *p,
643 const struct intel_dpll_hw_state *dpll_hw_state)
644 {
645 const struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
646
647 drm_printf(p, "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
648 "fp0: 0x%x, fp1: 0x%x\n",
649 hw_state->dpll,
650 hw_state->dpll_md,
651 hw_state->fp0,
652 hw_state->fp1);
653 }
654
ibx_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)655 static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *_a,
656 const struct intel_dpll_hw_state *_b)
657 {
658 const struct i9xx_dpll_hw_state *a = &_a->i9xx;
659 const struct i9xx_dpll_hw_state *b = &_b->i9xx;
660
661 return a->dpll == b->dpll &&
662 a->dpll_md == b->dpll_md &&
663 a->fp0 == b->fp0 &&
664 a->fp1 == b->fp1;
665 }
666
667 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
668 .enable = ibx_pch_dpll_enable,
669 .disable = ibx_pch_dpll_disable,
670 .get_hw_state = ibx_pch_dpll_get_hw_state,
671 };
672
673 static const struct dpll_info pch_plls[] = {
674 { .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
675 { .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
676 {}
677 };
678
679 static const struct intel_dpll_mgr pch_pll_mgr = {
680 .dpll_info = pch_plls,
681 .compute_dplls = ibx_compute_dpll,
682 .get_dplls = ibx_get_dpll,
683 .put_dplls = intel_put_dpll,
684 .dump_hw_state = ibx_dump_hw_state,
685 .compare_hw_state = ibx_compare_hw_state,
686 };
687
hsw_ddi_wrpll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)688 static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
689 struct intel_shared_dpll *pll,
690 const struct intel_dpll_hw_state *dpll_hw_state)
691 {
692 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
693 const enum intel_dpll_id id = pll->info->id;
694
695 intel_de_write(i915, WRPLL_CTL(id), hw_state->wrpll);
696 intel_de_posting_read(i915, WRPLL_CTL(id));
697 udelay(20);
698 }
699
hsw_ddi_spll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)700 static void hsw_ddi_spll_enable(struct drm_i915_private *i915,
701 struct intel_shared_dpll *pll,
702 const struct intel_dpll_hw_state *dpll_hw_state)
703 {
704 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
705
706 intel_de_write(i915, SPLL_CTL, hw_state->spll);
707 intel_de_posting_read(i915, SPLL_CTL);
708 udelay(20);
709 }
710
hsw_ddi_wrpll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)711 static void hsw_ddi_wrpll_disable(struct drm_i915_private *i915,
712 struct intel_shared_dpll *pll)
713 {
714 const enum intel_dpll_id id = pll->info->id;
715
716 intel_de_rmw(i915, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
717 intel_de_posting_read(i915, WRPLL_CTL(id));
718
719 /*
720 * Try to set up the PCH reference clock once all DPLLs
721 * that depend on it have been shut down.
722 */
723 if (i915->display.dpll.pch_ssc_use & BIT(id))
724 intel_init_pch_refclk(i915);
725 }
726
hsw_ddi_spll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)727 static void hsw_ddi_spll_disable(struct drm_i915_private *i915,
728 struct intel_shared_dpll *pll)
729 {
730 enum intel_dpll_id id = pll->info->id;
731
732 intel_de_rmw(i915, SPLL_CTL, SPLL_PLL_ENABLE, 0);
733 intel_de_posting_read(i915, SPLL_CTL);
734
735 /*
736 * Try to set up the PCH reference clock once all DPLLs
737 * that depend on it have been shut down.
738 */
739 if (i915->display.dpll.pch_ssc_use & BIT(id))
740 intel_init_pch_refclk(i915);
741 }
742
hsw_ddi_wrpll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)743 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *i915,
744 struct intel_shared_dpll *pll,
745 struct intel_dpll_hw_state *dpll_hw_state)
746 {
747 struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
748 const enum intel_dpll_id id = pll->info->id;
749 intel_wakeref_t wakeref;
750 u32 val;
751
752 wakeref = intel_display_power_get_if_enabled(i915,
753 POWER_DOMAIN_DISPLAY_CORE);
754 if (!wakeref)
755 return false;
756
757 val = intel_de_read(i915, WRPLL_CTL(id));
758 hw_state->wrpll = val;
759
760 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
761
762 return val & WRPLL_PLL_ENABLE;
763 }
764
hsw_ddi_spll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)765 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *i915,
766 struct intel_shared_dpll *pll,
767 struct intel_dpll_hw_state *dpll_hw_state)
768 {
769 struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
770 intel_wakeref_t wakeref;
771 u32 val;
772
773 wakeref = intel_display_power_get_if_enabled(i915,
774 POWER_DOMAIN_DISPLAY_CORE);
775 if (!wakeref)
776 return false;
777
778 val = intel_de_read(i915, SPLL_CTL);
779 hw_state->spll = val;
780
781 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
782
783 return val & SPLL_PLL_ENABLE;
784 }
785
786 #define LC_FREQ 2700
787 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
788
789 #define P_MIN 2
790 #define P_MAX 64
791 #define P_INC 2
792
793 /* Constraints for PLL good behavior */
794 #define REF_MIN 48
795 #define REF_MAX 400
796 #define VCO_MIN 2400
797 #define VCO_MAX 4800
798
799 struct hsw_wrpll_rnp {
800 unsigned p, n2, r2;
801 };
802
hsw_wrpll_get_budget_for_freq(int clock)803 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
804 {
805 switch (clock) {
806 case 25175000:
807 case 25200000:
808 case 27000000:
809 case 27027000:
810 case 37762500:
811 case 37800000:
812 case 40500000:
813 case 40541000:
814 case 54000000:
815 case 54054000:
816 case 59341000:
817 case 59400000:
818 case 72000000:
819 case 74176000:
820 case 74250000:
821 case 81000000:
822 case 81081000:
823 case 89012000:
824 case 89100000:
825 case 108000000:
826 case 108108000:
827 case 111264000:
828 case 111375000:
829 case 148352000:
830 case 148500000:
831 case 162000000:
832 case 162162000:
833 case 222525000:
834 case 222750000:
835 case 296703000:
836 case 297000000:
837 return 0;
838 case 233500000:
839 case 245250000:
840 case 247750000:
841 case 253250000:
842 case 298000000:
843 return 1500;
844 case 169128000:
845 case 169500000:
846 case 179500000:
847 case 202000000:
848 return 2000;
849 case 256250000:
850 case 262500000:
851 case 270000000:
852 case 272500000:
853 case 273750000:
854 case 280750000:
855 case 281250000:
856 case 286000000:
857 case 291750000:
858 return 4000;
859 case 267250000:
860 case 268500000:
861 return 5000;
862 default:
863 return 1000;
864 }
865 }
866
hsw_wrpll_update_rnp(u64 freq2k,unsigned int budget,unsigned int r2,unsigned int n2,unsigned int p,struct hsw_wrpll_rnp * best)867 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
868 unsigned int r2, unsigned int n2,
869 unsigned int p,
870 struct hsw_wrpll_rnp *best)
871 {
872 u64 a, b, c, d, diff, diff_best;
873
874 /* No best (r,n,p) yet */
875 if (best->p == 0) {
876 best->p = p;
877 best->n2 = n2;
878 best->r2 = r2;
879 return;
880 }
881
882 /*
883 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
884 * freq2k.
885 *
886 * delta = 1e6 *
887 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
888 * freq2k;
889 *
890 * and we would like delta <= budget.
891 *
892 * If the discrepancy is above the PPM-based budget, always prefer to
893 * improve upon the previous solution. However, if you're within the
894 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
895 */
896 a = freq2k * budget * p * r2;
897 b = freq2k * budget * best->p * best->r2;
898 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
899 diff_best = abs_diff(freq2k * best->p * best->r2,
900 LC_FREQ_2K * best->n2);
901 c = 1000000 * diff;
902 d = 1000000 * diff_best;
903
904 if (a < c && b < d) {
905 /* If both are above the budget, pick the closer */
906 if (best->p * best->r2 * diff < p * r2 * diff_best) {
907 best->p = p;
908 best->n2 = n2;
909 best->r2 = r2;
910 }
911 } else if (a >= c && b < d) {
912 /* If A is below the threshold but B is above it? Update. */
913 best->p = p;
914 best->n2 = n2;
915 best->r2 = r2;
916 } else if (a >= c && b >= d) {
917 /* Both are below the limit, so pick the higher n2/(r2*r2) */
918 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
919 best->p = p;
920 best->n2 = n2;
921 best->r2 = r2;
922 }
923 }
924 /* Otherwise a < c && b >= d, do nothing */
925 }
926
927 static void
hsw_ddi_calculate_wrpll(int clock,unsigned * r2_out,unsigned * n2_out,unsigned * p_out)928 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
929 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
930 {
931 u64 freq2k;
932 unsigned p, n2, r2;
933 struct hsw_wrpll_rnp best = {};
934 unsigned budget;
935
936 freq2k = clock / 100;
937
938 budget = hsw_wrpll_get_budget_for_freq(clock);
939
940 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
941 * and directly pass the LC PLL to it. */
942 if (freq2k == 5400000) {
943 *n2_out = 2;
944 *p_out = 1;
945 *r2_out = 2;
946 return;
947 }
948
949 /*
950 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
951 * the WR PLL.
952 *
953 * We want R so that REF_MIN <= Ref <= REF_MAX.
954 * Injecting R2 = 2 * R gives:
955 * REF_MAX * r2 > LC_FREQ * 2 and
956 * REF_MIN * r2 < LC_FREQ * 2
957 *
958 * Which means the desired boundaries for r2 are:
959 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
960 *
961 */
962 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
963 r2 <= LC_FREQ * 2 / REF_MIN;
964 r2++) {
965
966 /*
967 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
968 *
969 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
970 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
971 * VCO_MAX * r2 > n2 * LC_FREQ and
972 * VCO_MIN * r2 < n2 * LC_FREQ)
973 *
974 * Which means the desired boundaries for n2 are:
975 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
976 */
977 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
978 n2 <= VCO_MAX * r2 / LC_FREQ;
979 n2++) {
980
981 for (p = P_MIN; p <= P_MAX; p += P_INC)
982 hsw_wrpll_update_rnp(freq2k, budget,
983 r2, n2, p, &best);
984 }
985 }
986
987 *n2_out = best.n2;
988 *p_out = best.p;
989 *r2_out = best.r2;
990 }
991
hsw_ddi_wrpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)992 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *i915,
993 const struct intel_shared_dpll *pll,
994 const struct intel_dpll_hw_state *dpll_hw_state)
995 {
996 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
997 int refclk;
998 int n, p, r;
999 u32 wrpll = hw_state->wrpll;
1000
1001 switch (wrpll & WRPLL_REF_MASK) {
1002 case WRPLL_REF_SPECIAL_HSW:
1003 /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
1004 if (IS_HASWELL(i915) && !IS_HASWELL_ULT(i915)) {
1005 refclk = i915->display.dpll.ref_clks.nssc;
1006 break;
1007 }
1008 fallthrough;
1009 case WRPLL_REF_PCH_SSC:
1010 /*
1011 * We could calculate spread here, but our checking
1012 * code only cares about 5% accuracy, and spread is a max of
1013 * 0.5% downspread.
1014 */
1015 refclk = i915->display.dpll.ref_clks.ssc;
1016 break;
1017 case WRPLL_REF_LCPLL:
1018 refclk = 2700000;
1019 break;
1020 default:
1021 MISSING_CASE(wrpll);
1022 return 0;
1023 }
1024
1025 r = wrpll & WRPLL_DIVIDER_REF_MASK;
1026 p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
1027 n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
1028
1029 /* Convert to KHz, p & r have a fixed point portion */
1030 return (refclk * n / 10) / (p * r) * 2;
1031 }
1032
1033 static int
hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1034 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
1035 struct intel_crtc *crtc)
1036 {
1037 struct drm_i915_private *i915 = to_i915(state->base.dev);
1038 struct intel_crtc_state *crtc_state =
1039 intel_atomic_get_new_crtc_state(state, crtc);
1040 struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1041 unsigned int p, n2, r2;
1042
1043 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
1044
1045 hw_state->wrpll =
1046 WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
1047 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
1048 WRPLL_DIVIDER_POST(p);
1049
1050 crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
1051 &crtc_state->dpll_hw_state);
1052
1053 return 0;
1054 }
1055
1056 static struct intel_shared_dpll *
hsw_ddi_wrpll_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1057 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
1058 struct intel_crtc *crtc)
1059 {
1060 struct intel_crtc_state *crtc_state =
1061 intel_atomic_get_new_crtc_state(state, crtc);
1062
1063 return intel_find_shared_dpll(state, crtc,
1064 &crtc_state->dpll_hw_state,
1065 BIT(DPLL_ID_WRPLL2) |
1066 BIT(DPLL_ID_WRPLL1));
1067 }
1068
1069 static int
hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state * crtc_state)1070 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
1071 {
1072 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1073 int clock = crtc_state->port_clock;
1074
1075 switch (clock / 2) {
1076 case 81000:
1077 case 135000:
1078 case 270000:
1079 return 0;
1080 default:
1081 drm_dbg_kms(&i915->drm, "Invalid clock for DP: %d\n",
1082 clock);
1083 return -EINVAL;
1084 }
1085 }
1086
1087 static struct intel_shared_dpll *
hsw_ddi_lcpll_get_dpll(struct intel_crtc_state * crtc_state)1088 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1089 {
1090 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1091 struct intel_shared_dpll *pll;
1092 enum intel_dpll_id pll_id;
1093 int clock = crtc_state->port_clock;
1094
1095 switch (clock / 2) {
1096 case 81000:
1097 pll_id = DPLL_ID_LCPLL_810;
1098 break;
1099 case 135000:
1100 pll_id = DPLL_ID_LCPLL_1350;
1101 break;
1102 case 270000:
1103 pll_id = DPLL_ID_LCPLL_2700;
1104 break;
1105 default:
1106 MISSING_CASE(clock / 2);
1107 return NULL;
1108 }
1109
1110 pll = intel_get_shared_dpll_by_id(i915, pll_id);
1111
1112 if (!pll)
1113 return NULL;
1114
1115 return pll;
1116 }
1117
hsw_ddi_lcpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1118 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1119 const struct intel_shared_dpll *pll,
1120 const struct intel_dpll_hw_state *dpll_hw_state)
1121 {
1122 int link_clock = 0;
1123
1124 switch (pll->info->id) {
1125 case DPLL_ID_LCPLL_810:
1126 link_clock = 81000;
1127 break;
1128 case DPLL_ID_LCPLL_1350:
1129 link_clock = 135000;
1130 break;
1131 case DPLL_ID_LCPLL_2700:
1132 link_clock = 270000;
1133 break;
1134 default:
1135 drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1136 break;
1137 }
1138
1139 return link_clock * 2;
1140 }
1141
1142 static int
hsw_ddi_spll_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1143 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1144 struct intel_crtc *crtc)
1145 {
1146 struct intel_crtc_state *crtc_state =
1147 intel_atomic_get_new_crtc_state(state, crtc);
1148 struct hsw_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.hsw;
1149
1150 if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1151 return -EINVAL;
1152
1153 hw_state->spll =
1154 SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1155
1156 return 0;
1157 }
1158
1159 static struct intel_shared_dpll *
hsw_ddi_spll_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)1160 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1161 struct intel_crtc *crtc)
1162 {
1163 struct intel_crtc_state *crtc_state =
1164 intel_atomic_get_new_crtc_state(state, crtc);
1165
1166 return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1167 BIT(DPLL_ID_SPLL));
1168 }
1169
hsw_ddi_spll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1170 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1171 const struct intel_shared_dpll *pll,
1172 const struct intel_dpll_hw_state *dpll_hw_state)
1173 {
1174 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1175 int link_clock = 0;
1176
1177 switch (hw_state->spll & SPLL_FREQ_MASK) {
1178 case SPLL_FREQ_810MHz:
1179 link_clock = 81000;
1180 break;
1181 case SPLL_FREQ_1350MHz:
1182 link_clock = 135000;
1183 break;
1184 case SPLL_FREQ_2700MHz:
1185 link_clock = 270000;
1186 break;
1187 default:
1188 drm_WARN(&i915->drm, 1, "bad spll freq\n");
1189 break;
1190 }
1191
1192 return link_clock * 2;
1193 }
1194
hsw_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1195 static int hsw_compute_dpll(struct intel_atomic_state *state,
1196 struct intel_crtc *crtc,
1197 struct intel_encoder *encoder)
1198 {
1199 struct intel_crtc_state *crtc_state =
1200 intel_atomic_get_new_crtc_state(state, crtc);
1201
1202 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1203 return hsw_ddi_wrpll_compute_dpll(state, crtc);
1204 else if (intel_crtc_has_dp_encoder(crtc_state))
1205 return hsw_ddi_lcpll_compute_dpll(crtc_state);
1206 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1207 return hsw_ddi_spll_compute_dpll(state, crtc);
1208 else
1209 return -EINVAL;
1210 }
1211
hsw_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1212 static int hsw_get_dpll(struct intel_atomic_state *state,
1213 struct intel_crtc *crtc,
1214 struct intel_encoder *encoder)
1215 {
1216 struct intel_crtc_state *crtc_state =
1217 intel_atomic_get_new_crtc_state(state, crtc);
1218 struct intel_shared_dpll *pll = NULL;
1219
1220 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1221 pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1222 else if (intel_crtc_has_dp_encoder(crtc_state))
1223 pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1224 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1225 pll = hsw_ddi_spll_get_dpll(state, crtc);
1226
1227 if (!pll)
1228 return -EINVAL;
1229
1230 intel_reference_shared_dpll(state, crtc,
1231 pll, &crtc_state->dpll_hw_state);
1232
1233 crtc_state->shared_dpll = pll;
1234
1235 return 0;
1236 }
1237
hsw_update_dpll_ref_clks(struct drm_i915_private * i915)1238 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1239 {
1240 i915->display.dpll.ref_clks.ssc = 135000;
1241 /* Non-SSC is only used on non-ULT HSW. */
1242 if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1243 i915->display.dpll.ref_clks.nssc = 24000;
1244 else
1245 i915->display.dpll.ref_clks.nssc = 135000;
1246 }
1247
hsw_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)1248 static void hsw_dump_hw_state(struct drm_printer *p,
1249 const struct intel_dpll_hw_state *dpll_hw_state)
1250 {
1251 const struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
1252
1253 drm_printf(p, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1254 hw_state->wrpll, hw_state->spll);
1255 }
1256
hsw_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)1257 static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *_a,
1258 const struct intel_dpll_hw_state *_b)
1259 {
1260 const struct hsw_dpll_hw_state *a = &_a->hsw;
1261 const struct hsw_dpll_hw_state *b = &_b->hsw;
1262
1263 return a->wrpll == b->wrpll &&
1264 a->spll == b->spll;
1265 }
1266
1267 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1268 .enable = hsw_ddi_wrpll_enable,
1269 .disable = hsw_ddi_wrpll_disable,
1270 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
1271 .get_freq = hsw_ddi_wrpll_get_freq,
1272 };
1273
1274 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1275 .enable = hsw_ddi_spll_enable,
1276 .disable = hsw_ddi_spll_disable,
1277 .get_hw_state = hsw_ddi_spll_get_hw_state,
1278 .get_freq = hsw_ddi_spll_get_freq,
1279 };
1280
hsw_ddi_lcpll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * hw_state)1281 static void hsw_ddi_lcpll_enable(struct drm_i915_private *i915,
1282 struct intel_shared_dpll *pll,
1283 const struct intel_dpll_hw_state *hw_state)
1284 {
1285 }
1286
hsw_ddi_lcpll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)1287 static void hsw_ddi_lcpll_disable(struct drm_i915_private *i915,
1288 struct intel_shared_dpll *pll)
1289 {
1290 }
1291
hsw_ddi_lcpll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)1292 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *i915,
1293 struct intel_shared_dpll *pll,
1294 struct intel_dpll_hw_state *dpll_hw_state)
1295 {
1296 return true;
1297 }
1298
1299 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1300 .enable = hsw_ddi_lcpll_enable,
1301 .disable = hsw_ddi_lcpll_disable,
1302 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
1303 .get_freq = hsw_ddi_lcpll_get_freq,
1304 };
1305
1306 static const struct dpll_info hsw_plls[] = {
1307 { .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
1308 { .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
1309 { .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
1310 { .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
1311 .always_on = true, },
1312 { .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
1313 .always_on = true, },
1314 { .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
1315 .always_on = true, },
1316 {}
1317 };
1318
1319 static const struct intel_dpll_mgr hsw_pll_mgr = {
1320 .dpll_info = hsw_plls,
1321 .compute_dplls = hsw_compute_dpll,
1322 .get_dplls = hsw_get_dpll,
1323 .put_dplls = intel_put_dpll,
1324 .update_ref_clks = hsw_update_dpll_ref_clks,
1325 .dump_hw_state = hsw_dump_hw_state,
1326 .compare_hw_state = hsw_compare_hw_state,
1327 };
1328
1329 struct skl_dpll_regs {
1330 i915_reg_t ctl, cfgcr1, cfgcr2;
1331 };
1332
1333 /* this array is indexed by the *shared* pll id */
1334 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1335 {
1336 /* DPLL 0 */
1337 .ctl = LCPLL1_CTL,
1338 /* DPLL 0 doesn't support HDMI mode */
1339 },
1340 {
1341 /* DPLL 1 */
1342 .ctl = LCPLL2_CTL,
1343 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1344 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1345 },
1346 {
1347 /* DPLL 2 */
1348 .ctl = WRPLL_CTL(0),
1349 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1350 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1351 },
1352 {
1353 /* DPLL 3 */
1354 .ctl = WRPLL_CTL(1),
1355 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1356 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1357 },
1358 };
1359
skl_ddi_pll_write_ctrl1(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct skl_dpll_hw_state * hw_state)1360 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *i915,
1361 struct intel_shared_dpll *pll,
1362 const struct skl_dpll_hw_state *hw_state)
1363 {
1364 const enum intel_dpll_id id = pll->info->id;
1365
1366 intel_de_rmw(i915, DPLL_CTRL1,
1367 DPLL_CTRL1_HDMI_MODE(id) |
1368 DPLL_CTRL1_SSC(id) |
1369 DPLL_CTRL1_LINK_RATE_MASK(id),
1370 hw_state->ctrl1 << (id * 6));
1371 intel_de_posting_read(i915, DPLL_CTRL1);
1372 }
1373
skl_ddi_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1374 static void skl_ddi_pll_enable(struct drm_i915_private *i915,
1375 struct intel_shared_dpll *pll,
1376 const struct intel_dpll_hw_state *dpll_hw_state)
1377 {
1378 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1379 const struct skl_dpll_regs *regs = skl_dpll_regs;
1380 const enum intel_dpll_id id = pll->info->id;
1381
1382 skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
1383
1384 intel_de_write(i915, regs[id].cfgcr1, hw_state->cfgcr1);
1385 intel_de_write(i915, regs[id].cfgcr2, hw_state->cfgcr2);
1386 intel_de_posting_read(i915, regs[id].cfgcr1);
1387 intel_de_posting_read(i915, regs[id].cfgcr2);
1388
1389 /* the enable bit is always bit 31 */
1390 intel_de_rmw(i915, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1391
1392 if (intel_de_wait_for_set(i915, DPLL_STATUS, DPLL_LOCK(id), 5))
1393 drm_err(&i915->drm, "DPLL %d not locked\n", id);
1394 }
1395
skl_ddi_dpll0_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1396 static void skl_ddi_dpll0_enable(struct drm_i915_private *i915,
1397 struct intel_shared_dpll *pll,
1398 const struct intel_dpll_hw_state *dpll_hw_state)
1399 {
1400 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1401
1402 skl_ddi_pll_write_ctrl1(i915, pll, hw_state);
1403 }
1404
skl_ddi_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)1405 static void skl_ddi_pll_disable(struct drm_i915_private *i915,
1406 struct intel_shared_dpll *pll)
1407 {
1408 const struct skl_dpll_regs *regs = skl_dpll_regs;
1409 const enum intel_dpll_id id = pll->info->id;
1410
1411 /* the enable bit is always bit 31 */
1412 intel_de_rmw(i915, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1413 intel_de_posting_read(i915, regs[id].ctl);
1414 }
1415
skl_ddi_dpll0_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)1416 static void skl_ddi_dpll0_disable(struct drm_i915_private *i915,
1417 struct intel_shared_dpll *pll)
1418 {
1419 }
1420
skl_ddi_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)1421 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *i915,
1422 struct intel_shared_dpll *pll,
1423 struct intel_dpll_hw_state *dpll_hw_state)
1424 {
1425 struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1426 const struct skl_dpll_regs *regs = skl_dpll_regs;
1427 const enum intel_dpll_id id = pll->info->id;
1428 intel_wakeref_t wakeref;
1429 bool ret;
1430 u32 val;
1431
1432 wakeref = intel_display_power_get_if_enabled(i915,
1433 POWER_DOMAIN_DISPLAY_CORE);
1434 if (!wakeref)
1435 return false;
1436
1437 ret = false;
1438
1439 val = intel_de_read(i915, regs[id].ctl);
1440 if (!(val & LCPLL_PLL_ENABLE))
1441 goto out;
1442
1443 val = intel_de_read(i915, DPLL_CTRL1);
1444 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1445
1446 /* avoid reading back stale values if HDMI mode is not enabled */
1447 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1448 hw_state->cfgcr1 = intel_de_read(i915, regs[id].cfgcr1);
1449 hw_state->cfgcr2 = intel_de_read(i915, regs[id].cfgcr2);
1450 }
1451 ret = true;
1452
1453 out:
1454 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1455
1456 return ret;
1457 }
1458
skl_ddi_dpll0_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)1459 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *i915,
1460 struct intel_shared_dpll *pll,
1461 struct intel_dpll_hw_state *dpll_hw_state)
1462 {
1463 struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1464 const struct skl_dpll_regs *regs = skl_dpll_regs;
1465 const enum intel_dpll_id id = pll->info->id;
1466 intel_wakeref_t wakeref;
1467 u32 val;
1468 bool ret;
1469
1470 wakeref = intel_display_power_get_if_enabled(i915,
1471 POWER_DOMAIN_DISPLAY_CORE);
1472 if (!wakeref)
1473 return false;
1474
1475 ret = false;
1476
1477 /* DPLL0 is always enabled since it drives CDCLK */
1478 val = intel_de_read(i915, regs[id].ctl);
1479 if (drm_WARN_ON(&i915->drm, !(val & LCPLL_PLL_ENABLE)))
1480 goto out;
1481
1482 val = intel_de_read(i915, DPLL_CTRL1);
1483 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1484
1485 ret = true;
1486
1487 out:
1488 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1489
1490 return ret;
1491 }
1492
1493 struct skl_wrpll_context {
1494 u64 min_deviation; /* current minimal deviation */
1495 u64 central_freq; /* chosen central freq */
1496 u64 dco_freq; /* chosen dco freq */
1497 unsigned int p; /* chosen divider */
1498 };
1499
1500 /* DCO freq must be within +1%/-6% of the DCO central freq */
1501 #define SKL_DCO_MAX_PDEVIATION 100
1502 #define SKL_DCO_MAX_NDEVIATION 600
1503
skl_wrpll_try_divider(struct skl_wrpll_context * ctx,u64 central_freq,u64 dco_freq,unsigned int divider)1504 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1505 u64 central_freq,
1506 u64 dco_freq,
1507 unsigned int divider)
1508 {
1509 u64 deviation;
1510
1511 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1512 central_freq);
1513
1514 /* positive deviation */
1515 if (dco_freq >= central_freq) {
1516 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1517 deviation < ctx->min_deviation) {
1518 ctx->min_deviation = deviation;
1519 ctx->central_freq = central_freq;
1520 ctx->dco_freq = dco_freq;
1521 ctx->p = divider;
1522 }
1523 /* negative deviation */
1524 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1525 deviation < ctx->min_deviation) {
1526 ctx->min_deviation = deviation;
1527 ctx->central_freq = central_freq;
1528 ctx->dco_freq = dco_freq;
1529 ctx->p = divider;
1530 }
1531 }
1532
skl_wrpll_get_multipliers(unsigned int p,unsigned int * p0,unsigned int * p1,unsigned int * p2)1533 static void skl_wrpll_get_multipliers(unsigned int p,
1534 unsigned int *p0 /* out */,
1535 unsigned int *p1 /* out */,
1536 unsigned int *p2 /* out */)
1537 {
1538 /* even dividers */
1539 if (p % 2 == 0) {
1540 unsigned int half = p / 2;
1541
1542 if (half == 1 || half == 2 || half == 3 || half == 5) {
1543 *p0 = 2;
1544 *p1 = 1;
1545 *p2 = half;
1546 } else if (half % 2 == 0) {
1547 *p0 = 2;
1548 *p1 = half / 2;
1549 *p2 = 2;
1550 } else if (half % 3 == 0) {
1551 *p0 = 3;
1552 *p1 = half / 3;
1553 *p2 = 2;
1554 } else if (half % 7 == 0) {
1555 *p0 = 7;
1556 *p1 = half / 7;
1557 *p2 = 2;
1558 }
1559 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1560 *p0 = 3;
1561 *p1 = 1;
1562 *p2 = p / 3;
1563 } else if (p == 5 || p == 7) {
1564 *p0 = p;
1565 *p1 = 1;
1566 *p2 = 1;
1567 } else if (p == 15) {
1568 *p0 = 3;
1569 *p1 = 1;
1570 *p2 = 5;
1571 } else if (p == 21) {
1572 *p0 = 7;
1573 *p1 = 1;
1574 *p2 = 3;
1575 } else if (p == 35) {
1576 *p0 = 7;
1577 *p1 = 1;
1578 *p2 = 5;
1579 }
1580 }
1581
1582 struct skl_wrpll_params {
1583 u32 dco_fraction;
1584 u32 dco_integer;
1585 u32 qdiv_ratio;
1586 u32 qdiv_mode;
1587 u32 kdiv;
1588 u32 pdiv;
1589 u32 central_freq;
1590 };
1591
skl_wrpll_params_populate(struct skl_wrpll_params * params,u64 afe_clock,int ref_clock,u64 central_freq,u32 p0,u32 p1,u32 p2)1592 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1593 u64 afe_clock,
1594 int ref_clock,
1595 u64 central_freq,
1596 u32 p0, u32 p1, u32 p2)
1597 {
1598 u64 dco_freq;
1599
1600 switch (central_freq) {
1601 case 9600000000ULL:
1602 params->central_freq = 0;
1603 break;
1604 case 9000000000ULL:
1605 params->central_freq = 1;
1606 break;
1607 case 8400000000ULL:
1608 params->central_freq = 3;
1609 }
1610
1611 switch (p0) {
1612 case 1:
1613 params->pdiv = 0;
1614 break;
1615 case 2:
1616 params->pdiv = 1;
1617 break;
1618 case 3:
1619 params->pdiv = 2;
1620 break;
1621 case 7:
1622 params->pdiv = 4;
1623 break;
1624 default:
1625 WARN(1, "Incorrect PDiv\n");
1626 }
1627
1628 switch (p2) {
1629 case 5:
1630 params->kdiv = 0;
1631 break;
1632 case 2:
1633 params->kdiv = 1;
1634 break;
1635 case 3:
1636 params->kdiv = 2;
1637 break;
1638 case 1:
1639 params->kdiv = 3;
1640 break;
1641 default:
1642 WARN(1, "Incorrect KDiv\n");
1643 }
1644
1645 params->qdiv_ratio = p1;
1646 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1647
1648 dco_freq = p0 * p1 * p2 * afe_clock;
1649
1650 /*
1651 * Intermediate values are in Hz.
1652 * Divide by MHz to match bsepc
1653 */
1654 params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1655 params->dco_fraction =
1656 div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1657 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1658 }
1659
1660 static int
skl_ddi_calculate_wrpll(int clock,int ref_clock,struct skl_wrpll_params * wrpll_params)1661 skl_ddi_calculate_wrpll(int clock,
1662 int ref_clock,
1663 struct skl_wrpll_params *wrpll_params)
1664 {
1665 static const u64 dco_central_freq[3] = { 8400000000ULL,
1666 9000000000ULL,
1667 9600000000ULL };
1668 static const u8 even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1669 24, 28, 30, 32, 36, 40, 42, 44,
1670 48, 52, 54, 56, 60, 64, 66, 68,
1671 70, 72, 76, 78, 80, 84, 88, 90,
1672 92, 96, 98 };
1673 static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1674 static const struct {
1675 const u8 *list;
1676 int n_dividers;
1677 } dividers[] = {
1678 { even_dividers, ARRAY_SIZE(even_dividers) },
1679 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1680 };
1681 struct skl_wrpll_context ctx = {
1682 .min_deviation = U64_MAX,
1683 };
1684 unsigned int dco, d, i;
1685 unsigned int p0, p1, p2;
1686 u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
1687
1688 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1689 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1690 for (i = 0; i < dividers[d].n_dividers; i++) {
1691 unsigned int p = dividers[d].list[i];
1692 u64 dco_freq = p * afe_clock;
1693
1694 skl_wrpll_try_divider(&ctx,
1695 dco_central_freq[dco],
1696 dco_freq,
1697 p);
1698 /*
1699 * Skip the remaining dividers if we're sure to
1700 * have found the definitive divider, we can't
1701 * improve a 0 deviation.
1702 */
1703 if (ctx.min_deviation == 0)
1704 goto skip_remaining_dividers;
1705 }
1706 }
1707
1708 skip_remaining_dividers:
1709 /*
1710 * If a solution is found with an even divider, prefer
1711 * this one.
1712 */
1713 if (d == 0 && ctx.p)
1714 break;
1715 }
1716
1717 if (!ctx.p)
1718 return -EINVAL;
1719
1720 /*
1721 * gcc incorrectly analyses that these can be used without being
1722 * initialized. To be fair, it's hard to guess.
1723 */
1724 p0 = p1 = p2 = 0;
1725 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1726 skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1727 ctx.central_freq, p0, p1, p2);
1728
1729 return 0;
1730 }
1731
skl_ddi_wrpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1732 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1733 const struct intel_shared_dpll *pll,
1734 const struct intel_dpll_hw_state *dpll_hw_state)
1735 {
1736 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1737 int ref_clock = i915->display.dpll.ref_clks.nssc;
1738 u32 p0, p1, p2, dco_freq;
1739
1740 p0 = hw_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1741 p2 = hw_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1742
1743 if (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
1744 p1 = (hw_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1745 else
1746 p1 = 1;
1747
1748
1749 switch (p0) {
1750 case DPLL_CFGCR2_PDIV_1:
1751 p0 = 1;
1752 break;
1753 case DPLL_CFGCR2_PDIV_2:
1754 p0 = 2;
1755 break;
1756 case DPLL_CFGCR2_PDIV_3:
1757 p0 = 3;
1758 break;
1759 case DPLL_CFGCR2_PDIV_7_INVALID:
1760 /*
1761 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1762 * handling it the same way as PDIV_7.
1763 */
1764 drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1765 fallthrough;
1766 case DPLL_CFGCR2_PDIV_7:
1767 p0 = 7;
1768 break;
1769 default:
1770 MISSING_CASE(p0);
1771 return 0;
1772 }
1773
1774 switch (p2) {
1775 case DPLL_CFGCR2_KDIV_5:
1776 p2 = 5;
1777 break;
1778 case DPLL_CFGCR2_KDIV_2:
1779 p2 = 2;
1780 break;
1781 case DPLL_CFGCR2_KDIV_3:
1782 p2 = 3;
1783 break;
1784 case DPLL_CFGCR2_KDIV_1:
1785 p2 = 1;
1786 break;
1787 default:
1788 MISSING_CASE(p2);
1789 return 0;
1790 }
1791
1792 dco_freq = (hw_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1793 ref_clock;
1794
1795 dco_freq += ((hw_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1796 ref_clock / 0x8000;
1797
1798 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1799 return 0;
1800
1801 return dco_freq / (p0 * p1 * p2 * 5);
1802 }
1803
skl_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state)1804 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1805 {
1806 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1807 struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1808 struct skl_wrpll_params wrpll_params = {};
1809 int ret;
1810
1811 ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
1812 i915->display.dpll.ref_clks.nssc, &wrpll_params);
1813 if (ret)
1814 return ret;
1815
1816 /*
1817 * See comment in intel_dpll_hw_state to understand why we always use 0
1818 * as the DPLL id in this function.
1819 */
1820 hw_state->ctrl1 =
1821 DPLL_CTRL1_OVERRIDE(0) |
1822 DPLL_CTRL1_HDMI_MODE(0);
1823
1824 hw_state->cfgcr1 =
1825 DPLL_CFGCR1_FREQ_ENABLE |
1826 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1827 wrpll_params.dco_integer;
1828
1829 hw_state->cfgcr2 =
1830 DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1831 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1832 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1833 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1834 wrpll_params.central_freq;
1835
1836 crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1837 &crtc_state->dpll_hw_state);
1838
1839 return 0;
1840 }
1841
1842 static int
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)1843 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1844 {
1845 struct skl_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.skl;
1846 u32 ctrl1;
1847
1848 /*
1849 * See comment in intel_dpll_hw_state to understand why we always use 0
1850 * as the DPLL id in this function.
1851 */
1852 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1853 switch (crtc_state->port_clock / 2) {
1854 case 81000:
1855 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1856 break;
1857 case 135000:
1858 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1859 break;
1860 case 270000:
1861 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1862 break;
1863 /* eDP 1.4 rates */
1864 case 162000:
1865 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1866 break;
1867 case 108000:
1868 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1869 break;
1870 case 216000:
1871 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1872 break;
1873 }
1874
1875 hw_state->ctrl1 = ctrl1;
1876
1877 return 0;
1878 }
1879
skl_ddi_lcpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1880 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1881 const struct intel_shared_dpll *pll,
1882 const struct intel_dpll_hw_state *dpll_hw_state)
1883 {
1884 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1885 int link_clock = 0;
1886
1887 switch ((hw_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1888 DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1889 case DPLL_CTRL1_LINK_RATE_810:
1890 link_clock = 81000;
1891 break;
1892 case DPLL_CTRL1_LINK_RATE_1080:
1893 link_clock = 108000;
1894 break;
1895 case DPLL_CTRL1_LINK_RATE_1350:
1896 link_clock = 135000;
1897 break;
1898 case DPLL_CTRL1_LINK_RATE_1620:
1899 link_clock = 162000;
1900 break;
1901 case DPLL_CTRL1_LINK_RATE_2160:
1902 link_clock = 216000;
1903 break;
1904 case DPLL_CTRL1_LINK_RATE_2700:
1905 link_clock = 270000;
1906 break;
1907 default:
1908 drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1909 break;
1910 }
1911
1912 return link_clock * 2;
1913 }
1914
skl_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1915 static int skl_compute_dpll(struct intel_atomic_state *state,
1916 struct intel_crtc *crtc,
1917 struct intel_encoder *encoder)
1918 {
1919 struct intel_crtc_state *crtc_state =
1920 intel_atomic_get_new_crtc_state(state, crtc);
1921
1922 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1923 return skl_ddi_hdmi_pll_dividers(crtc_state);
1924 else if (intel_crtc_has_dp_encoder(crtc_state))
1925 return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1926 else
1927 return -EINVAL;
1928 }
1929
skl_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1930 static int skl_get_dpll(struct intel_atomic_state *state,
1931 struct intel_crtc *crtc,
1932 struct intel_encoder *encoder)
1933 {
1934 struct intel_crtc_state *crtc_state =
1935 intel_atomic_get_new_crtc_state(state, crtc);
1936 struct intel_shared_dpll *pll;
1937
1938 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1939 pll = intel_find_shared_dpll(state, crtc,
1940 &crtc_state->dpll_hw_state,
1941 BIT(DPLL_ID_SKL_DPLL0));
1942 else
1943 pll = intel_find_shared_dpll(state, crtc,
1944 &crtc_state->dpll_hw_state,
1945 BIT(DPLL_ID_SKL_DPLL3) |
1946 BIT(DPLL_ID_SKL_DPLL2) |
1947 BIT(DPLL_ID_SKL_DPLL1));
1948 if (!pll)
1949 return -EINVAL;
1950
1951 intel_reference_shared_dpll(state, crtc,
1952 pll, &crtc_state->dpll_hw_state);
1953
1954 crtc_state->shared_dpll = pll;
1955
1956 return 0;
1957 }
1958
skl_ddi_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)1959 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1960 const struct intel_shared_dpll *pll,
1961 const struct intel_dpll_hw_state *dpll_hw_state)
1962 {
1963 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1964
1965 /*
1966 * ctrl1 register is already shifted for each pll, just use 0 to get
1967 * the internal shift for each field
1968 */
1969 if (hw_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1970 return skl_ddi_wrpll_get_freq(i915, pll, dpll_hw_state);
1971 else
1972 return skl_ddi_lcpll_get_freq(i915, pll, dpll_hw_state);
1973 }
1974
skl_update_dpll_ref_clks(struct drm_i915_private * i915)1975 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1976 {
1977 /* No SSC ref */
1978 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1979 }
1980
skl_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)1981 static void skl_dump_hw_state(struct drm_printer *p,
1982 const struct intel_dpll_hw_state *dpll_hw_state)
1983 {
1984 const struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
1985
1986 drm_printf(p, "dpll_hw_state: ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1987 hw_state->ctrl1, hw_state->cfgcr1, hw_state->cfgcr2);
1988 }
1989
skl_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)1990 static bool skl_compare_hw_state(const struct intel_dpll_hw_state *_a,
1991 const struct intel_dpll_hw_state *_b)
1992 {
1993 const struct skl_dpll_hw_state *a = &_a->skl;
1994 const struct skl_dpll_hw_state *b = &_b->skl;
1995
1996 return a->ctrl1 == b->ctrl1 &&
1997 a->cfgcr1 == b->cfgcr1 &&
1998 a->cfgcr2 == b->cfgcr2;
1999 }
2000
2001 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
2002 .enable = skl_ddi_pll_enable,
2003 .disable = skl_ddi_pll_disable,
2004 .get_hw_state = skl_ddi_pll_get_hw_state,
2005 .get_freq = skl_ddi_pll_get_freq,
2006 };
2007
2008 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
2009 .enable = skl_ddi_dpll0_enable,
2010 .disable = skl_ddi_dpll0_disable,
2011 .get_hw_state = skl_ddi_dpll0_get_hw_state,
2012 .get_freq = skl_ddi_pll_get_freq,
2013 };
2014
2015 static const struct dpll_info skl_plls[] = {
2016 { .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
2017 .always_on = true, },
2018 { .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2019 { .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2020 { .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
2021 {}
2022 };
2023
2024 static const struct intel_dpll_mgr skl_pll_mgr = {
2025 .dpll_info = skl_plls,
2026 .compute_dplls = skl_compute_dpll,
2027 .get_dplls = skl_get_dpll,
2028 .put_dplls = intel_put_dpll,
2029 .update_ref_clks = skl_update_dpll_ref_clks,
2030 .dump_hw_state = skl_dump_hw_state,
2031 .compare_hw_state = skl_compare_hw_state,
2032 };
2033
bxt_ddi_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2034 static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
2035 struct intel_shared_dpll *pll,
2036 const struct intel_dpll_hw_state *dpll_hw_state)
2037 {
2038 const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2039 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2040 enum dpio_phy phy;
2041 enum dpio_channel ch;
2042 u32 temp;
2043
2044 bxt_port_to_phy_channel(i915, port, &phy, &ch);
2045
2046 /* Non-SSC reference */
2047 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
2048
2049 if (IS_GEMINILAKE(i915)) {
2050 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2051 0, PORT_PLL_POWER_ENABLE);
2052
2053 if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2054 PORT_PLL_POWER_STATE), 200))
2055 drm_err(&i915->drm,
2056 "Power state not set for PLL:%d\n", port);
2057 }
2058
2059 /* Disable 10 bit clock */
2060 intel_de_rmw(i915, BXT_PORT_PLL_EBB_4(phy, ch),
2061 PORT_PLL_10BIT_CLK_ENABLE, 0);
2062
2063 /* Write P1 & P2 */
2064 intel_de_rmw(i915, BXT_PORT_PLL_EBB_0(phy, ch),
2065 PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, hw_state->ebb0);
2066
2067 /* Write M2 integer */
2068 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 0),
2069 PORT_PLL_M2_INT_MASK, hw_state->pll0);
2070
2071 /* Write N */
2072 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 1),
2073 PORT_PLL_N_MASK, hw_state->pll1);
2074
2075 /* Write M2 fraction */
2076 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 2),
2077 PORT_PLL_M2_FRAC_MASK, hw_state->pll2);
2078
2079 /* Write M2 fraction enable */
2080 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 3),
2081 PORT_PLL_M2_FRAC_ENABLE, hw_state->pll3);
2082
2083 /* Write coeff */
2084 temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2085 temp &= ~PORT_PLL_PROP_COEFF_MASK;
2086 temp &= ~PORT_PLL_INT_COEFF_MASK;
2087 temp &= ~PORT_PLL_GAIN_CTL_MASK;
2088 temp |= hw_state->pll6;
2089 intel_de_write(i915, BXT_PORT_PLL(phy, ch, 6), temp);
2090
2091 /* Write calibration val */
2092 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 8),
2093 PORT_PLL_TARGET_CNT_MASK, hw_state->pll8);
2094
2095 intel_de_rmw(i915, BXT_PORT_PLL(phy, ch, 9),
2096 PORT_PLL_LOCK_THRESHOLD_MASK, hw_state->pll9);
2097
2098 temp = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2099 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
2100 temp &= ~PORT_PLL_DCO_AMP_MASK;
2101 temp |= hw_state->pll10;
2102 intel_de_write(i915, BXT_PORT_PLL(phy, ch, 10), temp);
2103
2104 /* Recalibrate with new settings */
2105 temp = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2106 temp |= PORT_PLL_RECALIBRATE;
2107 intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2108 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2109 temp |= hw_state->ebb4;
2110 intel_de_write(i915, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2111
2112 /* Enable PLL */
2113 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
2114 intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2115
2116 if (wait_for_us((intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2117 200))
2118 drm_err(&i915->drm, "PLL %d not locked\n", port);
2119
2120 if (IS_GEMINILAKE(i915)) {
2121 temp = intel_de_read(i915, BXT_PORT_TX_DW5_LN(phy, ch, 0));
2122 temp |= DCC_DELAY_RANGE_2;
2123 intel_de_write(i915, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2124 }
2125
2126 /*
2127 * While we write to the group register to program all lanes at once we
2128 * can read only lane registers and we pick lanes 0/1 for that.
2129 */
2130 temp = intel_de_read(i915, BXT_PORT_PCS_DW12_LN01(phy, ch));
2131 temp &= ~LANE_STAGGER_MASK;
2132 temp &= ~LANESTAGGER_STRAP_OVRD;
2133 temp |= hw_state->pcsdw12;
2134 intel_de_write(i915, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2135 }
2136
bxt_ddi_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)2137 static void bxt_ddi_pll_disable(struct drm_i915_private *i915,
2138 struct intel_shared_dpll *pll)
2139 {
2140 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2141
2142 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2143 intel_de_posting_read(i915, BXT_PORT_PLL_ENABLE(port));
2144
2145 if (IS_GEMINILAKE(i915)) {
2146 intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port),
2147 PORT_PLL_POWER_ENABLE, 0);
2148
2149 if (wait_for_us(!(intel_de_read(i915, BXT_PORT_PLL_ENABLE(port)) &
2150 PORT_PLL_POWER_STATE), 200))
2151 drm_err(&i915->drm,
2152 "Power state not reset for PLL:%d\n", port);
2153 }
2154 }
2155
bxt_ddi_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)2156 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
2157 struct intel_shared_dpll *pll,
2158 struct intel_dpll_hw_state *dpll_hw_state)
2159 {
2160 struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2161 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2162 intel_wakeref_t wakeref;
2163 enum dpio_phy phy;
2164 enum dpio_channel ch;
2165 u32 val;
2166 bool ret;
2167
2168 bxt_port_to_phy_channel(i915, port, &phy, &ch);
2169
2170 wakeref = intel_display_power_get_if_enabled(i915,
2171 POWER_DOMAIN_DISPLAY_CORE);
2172 if (!wakeref)
2173 return false;
2174
2175 ret = false;
2176
2177 val = intel_de_read(i915, BXT_PORT_PLL_ENABLE(port));
2178 if (!(val & PORT_PLL_ENABLE))
2179 goto out;
2180
2181 hw_state->ebb0 = intel_de_read(i915, BXT_PORT_PLL_EBB_0(phy, ch));
2182 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2183
2184 hw_state->ebb4 = intel_de_read(i915, BXT_PORT_PLL_EBB_4(phy, ch));
2185 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2186
2187 hw_state->pll0 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 0));
2188 hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2189
2190 hw_state->pll1 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 1));
2191 hw_state->pll1 &= PORT_PLL_N_MASK;
2192
2193 hw_state->pll2 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 2));
2194 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2195
2196 hw_state->pll3 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 3));
2197 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2198
2199 hw_state->pll6 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 6));
2200 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2201 PORT_PLL_INT_COEFF_MASK |
2202 PORT_PLL_GAIN_CTL_MASK;
2203
2204 hw_state->pll8 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 8));
2205 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2206
2207 hw_state->pll9 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 9));
2208 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2209
2210 hw_state->pll10 = intel_de_read(i915, BXT_PORT_PLL(phy, ch, 10));
2211 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2212 PORT_PLL_DCO_AMP_MASK;
2213
2214 /*
2215 * While we write to the group register to program all lanes at once we
2216 * can read only lane registers. We configure all lanes the same way, so
2217 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2218 */
2219 hw_state->pcsdw12 = intel_de_read(i915,
2220 BXT_PORT_PCS_DW12_LN01(phy, ch));
2221 if (intel_de_read(i915, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2222 drm_dbg(&i915->drm,
2223 "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2224 hw_state->pcsdw12,
2225 intel_de_read(i915,
2226 BXT_PORT_PCS_DW12_LN23(phy, ch)));
2227 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2228
2229 ret = true;
2230
2231 out:
2232 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2233
2234 return ret;
2235 }
2236
2237 /* pre-calculated values for DP linkrates */
2238 static const struct dpll bxt_dp_clk_val[] = {
2239 /* m2 is .22 binary fixed point */
2240 { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2241 { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2242 { .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2243 { .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2244 { .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2245 { .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2246 { .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2247 };
2248
2249 static int
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state,struct dpll * clk_div)2250 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2251 struct dpll *clk_div)
2252 {
2253 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2254
2255 /* Calculate HDMI div */
2256 /*
2257 * FIXME: tie the following calculation into
2258 * i9xx_crtc_compute_clock
2259 */
2260 if (!bxt_find_best_dpll(crtc_state, clk_div))
2261 return -EINVAL;
2262
2263 drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2264
2265 return 0;
2266 }
2267
bxt_ddi_dp_pll_dividers(struct intel_crtc_state * crtc_state,struct dpll * clk_div)2268 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2269 struct dpll *clk_div)
2270 {
2271 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2272 int i;
2273
2274 *clk_div = bxt_dp_clk_val[0];
2275 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2276 if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2277 *clk_div = bxt_dp_clk_val[i];
2278 break;
2279 }
2280 }
2281
2282 chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2283
2284 drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2285 clk_div->dot != crtc_state->port_clock);
2286 }
2287
bxt_ddi_set_dpll_hw_state(struct intel_crtc_state * crtc_state,const struct dpll * clk_div)2288 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2289 const struct dpll *clk_div)
2290 {
2291 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2292 struct bxt_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.bxt;
2293 int clock = crtc_state->port_clock;
2294 int vco = clk_div->vco;
2295 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2296 u32 lanestagger;
2297
2298 if (vco >= 6200000 && vco <= 6700000) {
2299 prop_coef = 4;
2300 int_coef = 9;
2301 gain_ctl = 3;
2302 targ_cnt = 8;
2303 } else if ((vco > 5400000 && vco < 6200000) ||
2304 (vco >= 4800000 && vco < 5400000)) {
2305 prop_coef = 5;
2306 int_coef = 11;
2307 gain_ctl = 3;
2308 targ_cnt = 9;
2309 } else if (vco == 5400000) {
2310 prop_coef = 3;
2311 int_coef = 8;
2312 gain_ctl = 1;
2313 targ_cnt = 9;
2314 } else {
2315 drm_err(&i915->drm, "Invalid VCO\n");
2316 return -EINVAL;
2317 }
2318
2319 if (clock > 270000)
2320 lanestagger = 0x18;
2321 else if (clock > 135000)
2322 lanestagger = 0x0d;
2323 else if (clock > 67000)
2324 lanestagger = 0x07;
2325 else if (clock > 33000)
2326 lanestagger = 0x04;
2327 else
2328 lanestagger = 0x02;
2329
2330 hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2331 hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2332 hw_state->pll1 = PORT_PLL_N(clk_div->n);
2333 hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2334
2335 if (clk_div->m2 & 0x3fffff)
2336 hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2337
2338 hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2339 PORT_PLL_INT_COEFF(int_coef) |
2340 PORT_PLL_GAIN_CTL(gain_ctl);
2341
2342 hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2343
2344 hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2345
2346 hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2347 PORT_PLL_DCO_AMP_OVR_EN_H;
2348
2349 hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2350
2351 hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2352
2353 return 0;
2354 }
2355
bxt_ddi_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2356 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2357 const struct intel_shared_dpll *pll,
2358 const struct intel_dpll_hw_state *dpll_hw_state)
2359 {
2360 const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2361 struct dpll clock;
2362
2363 clock.m1 = 2;
2364 clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, hw_state->pll0) << 22;
2365 if (hw_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2366 clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
2367 hw_state->pll2);
2368 clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, hw_state->pll1);
2369 clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, hw_state->ebb0);
2370 clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, hw_state->ebb0);
2371
2372 return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2373 }
2374
2375 static int
bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)2376 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2377 {
2378 struct dpll clk_div = {};
2379
2380 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2381
2382 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2383 }
2384
2385 static int
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state * crtc_state)2386 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2387 {
2388 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2389 struct dpll clk_div = {};
2390 int ret;
2391
2392 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2393
2394 ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2395 if (ret)
2396 return ret;
2397
2398 crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2399 &crtc_state->dpll_hw_state);
2400
2401 return 0;
2402 }
2403
bxt_compute_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)2404 static int bxt_compute_dpll(struct intel_atomic_state *state,
2405 struct intel_crtc *crtc,
2406 struct intel_encoder *encoder)
2407 {
2408 struct intel_crtc_state *crtc_state =
2409 intel_atomic_get_new_crtc_state(state, crtc);
2410
2411 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2412 return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2413 else if (intel_crtc_has_dp_encoder(crtc_state))
2414 return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2415 else
2416 return -EINVAL;
2417 }
2418
bxt_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)2419 static int bxt_get_dpll(struct intel_atomic_state *state,
2420 struct intel_crtc *crtc,
2421 struct intel_encoder *encoder)
2422 {
2423 struct intel_crtc_state *crtc_state =
2424 intel_atomic_get_new_crtc_state(state, crtc);
2425 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2426 struct intel_shared_dpll *pll;
2427 enum intel_dpll_id id;
2428
2429 /* 1:1 mapping between ports and PLLs */
2430 id = (enum intel_dpll_id) encoder->port;
2431 pll = intel_get_shared_dpll_by_id(i915, id);
2432
2433 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2434 crtc->base.base.id, crtc->base.name, pll->info->name);
2435
2436 intel_reference_shared_dpll(state, crtc,
2437 pll, &crtc_state->dpll_hw_state);
2438
2439 crtc_state->shared_dpll = pll;
2440
2441 return 0;
2442 }
2443
bxt_update_dpll_ref_clks(struct drm_i915_private * i915)2444 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2445 {
2446 i915->display.dpll.ref_clks.ssc = 100000;
2447 i915->display.dpll.ref_clks.nssc = 100000;
2448 /* DSI non-SSC ref 19.2MHz */
2449 }
2450
bxt_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)2451 static void bxt_dump_hw_state(struct drm_printer *p,
2452 const struct intel_dpll_hw_state *dpll_hw_state)
2453 {
2454 const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
2455
2456 drm_printf(p, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2457 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2458 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2459 hw_state->ebb0, hw_state->ebb4,
2460 hw_state->pll0, hw_state->pll1, hw_state->pll2, hw_state->pll3,
2461 hw_state->pll6, hw_state->pll8, hw_state->pll9, hw_state->pll10,
2462 hw_state->pcsdw12);
2463 }
2464
bxt_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)2465 static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *_a,
2466 const struct intel_dpll_hw_state *_b)
2467 {
2468 const struct bxt_dpll_hw_state *a = &_a->bxt;
2469 const struct bxt_dpll_hw_state *b = &_b->bxt;
2470
2471 return a->ebb0 == b->ebb0 &&
2472 a->ebb4 == b->ebb4 &&
2473 a->pll0 == b->pll0 &&
2474 a->pll1 == b->pll1 &&
2475 a->pll2 == b->pll2 &&
2476 a->pll3 == b->pll3 &&
2477 a->pll6 == b->pll6 &&
2478 a->pll8 == b->pll8 &&
2479 a->pll10 == b->pll10 &&
2480 a->pcsdw12 == b->pcsdw12;
2481 }
2482
2483 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2484 .enable = bxt_ddi_pll_enable,
2485 .disable = bxt_ddi_pll_disable,
2486 .get_hw_state = bxt_ddi_pll_get_hw_state,
2487 .get_freq = bxt_ddi_pll_get_freq,
2488 };
2489
2490 static const struct dpll_info bxt_plls[] = {
2491 { .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
2492 { .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
2493 { .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
2494 {}
2495 };
2496
2497 static const struct intel_dpll_mgr bxt_pll_mgr = {
2498 .dpll_info = bxt_plls,
2499 .compute_dplls = bxt_compute_dpll,
2500 .get_dplls = bxt_get_dpll,
2501 .put_dplls = intel_put_dpll,
2502 .update_ref_clks = bxt_update_dpll_ref_clks,
2503 .dump_hw_state = bxt_dump_hw_state,
2504 .compare_hw_state = bxt_compare_hw_state,
2505 };
2506
icl_wrpll_get_multipliers(int bestdiv,int * pdiv,int * qdiv,int * kdiv)2507 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2508 int *qdiv, int *kdiv)
2509 {
2510 /* even dividers */
2511 if (bestdiv % 2 == 0) {
2512 if (bestdiv == 2) {
2513 *pdiv = 2;
2514 *qdiv = 1;
2515 *kdiv = 1;
2516 } else if (bestdiv % 4 == 0) {
2517 *pdiv = 2;
2518 *qdiv = bestdiv / 4;
2519 *kdiv = 2;
2520 } else if (bestdiv % 6 == 0) {
2521 *pdiv = 3;
2522 *qdiv = bestdiv / 6;
2523 *kdiv = 2;
2524 } else if (bestdiv % 5 == 0) {
2525 *pdiv = 5;
2526 *qdiv = bestdiv / 10;
2527 *kdiv = 2;
2528 } else if (bestdiv % 14 == 0) {
2529 *pdiv = 7;
2530 *qdiv = bestdiv / 14;
2531 *kdiv = 2;
2532 }
2533 } else {
2534 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2535 *pdiv = bestdiv;
2536 *qdiv = 1;
2537 *kdiv = 1;
2538 } else { /* 9, 15, 21 */
2539 *pdiv = bestdiv / 3;
2540 *qdiv = 1;
2541 *kdiv = 3;
2542 }
2543 }
2544 }
2545
icl_wrpll_params_populate(struct skl_wrpll_params * params,u32 dco_freq,u32 ref_freq,int pdiv,int qdiv,int kdiv)2546 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2547 u32 dco_freq, u32 ref_freq,
2548 int pdiv, int qdiv, int kdiv)
2549 {
2550 u32 dco;
2551
2552 switch (kdiv) {
2553 case 1:
2554 params->kdiv = 1;
2555 break;
2556 case 2:
2557 params->kdiv = 2;
2558 break;
2559 case 3:
2560 params->kdiv = 4;
2561 break;
2562 default:
2563 WARN(1, "Incorrect KDiv\n");
2564 }
2565
2566 switch (pdiv) {
2567 case 2:
2568 params->pdiv = 1;
2569 break;
2570 case 3:
2571 params->pdiv = 2;
2572 break;
2573 case 5:
2574 params->pdiv = 4;
2575 break;
2576 case 7:
2577 params->pdiv = 8;
2578 break;
2579 default:
2580 WARN(1, "Incorrect PDiv\n");
2581 }
2582
2583 WARN_ON(kdiv != 2 && qdiv != 1);
2584
2585 params->qdiv_ratio = qdiv;
2586 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2587
2588 dco = div_u64((u64)dco_freq << 15, ref_freq);
2589
2590 params->dco_integer = dco >> 15;
2591 params->dco_fraction = dco & 0x7fff;
2592 }
2593
2594 /*
2595 * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2596 * Program half of the nominal DCO divider fraction value.
2597 */
2598 static bool
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private * i915)2599 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2600 {
2601 return ((IS_ELKHARTLAKE(i915) &&
2602 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2603 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2604 i915->display.dpll.ref_clks.nssc == 38400;
2605 }
2606
2607 struct icl_combo_pll_params {
2608 int clock;
2609 struct skl_wrpll_params wrpll;
2610 };
2611
2612 /*
2613 * These values alrea already adjusted: they're the bits we write to the
2614 * registers, not the logical values.
2615 */
2616 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2617 { 540000,
2618 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2619 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2620 { 270000,
2621 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2622 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2623 { 162000,
2624 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2625 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2626 { 324000,
2627 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2628 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2629 { 216000,
2630 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2631 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2632 { 432000,
2633 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2634 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2635 { 648000,
2636 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2637 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2638 { 810000,
2639 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2640 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2641 };
2642
2643
2644 /* Also used for 38.4 MHz values. */
2645 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2646 { 540000,
2647 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2648 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2649 { 270000,
2650 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2651 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2652 { 162000,
2653 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2654 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2655 { 324000,
2656 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2657 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2658 { 216000,
2659 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2660 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2661 { 432000,
2662 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2663 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2664 { 648000,
2665 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2666 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2667 { 810000,
2668 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2669 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2670 };
2671
2672 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2673 .dco_integer = 0x151, .dco_fraction = 0x4000,
2674 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2675 };
2676
2677 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2678 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2679 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2680 };
2681
2682 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2683 .dco_integer = 0x54, .dco_fraction = 0x3000,
2684 /* the following params are unused */
2685 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2686 };
2687
2688 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2689 .dco_integer = 0x43, .dco_fraction = 0x4000,
2690 /* the following params are unused */
2691 };
2692
icl_calc_dp_combo_pll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * pll_params)2693 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2694 struct skl_wrpll_params *pll_params)
2695 {
2696 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2697 const struct icl_combo_pll_params *params =
2698 i915->display.dpll.ref_clks.nssc == 24000 ?
2699 icl_dp_combo_pll_24MHz_values :
2700 icl_dp_combo_pll_19_2MHz_values;
2701 int clock = crtc_state->port_clock;
2702 int i;
2703
2704 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2705 if (clock == params[i].clock) {
2706 *pll_params = params[i].wrpll;
2707 return 0;
2708 }
2709 }
2710
2711 MISSING_CASE(clock);
2712 return -EINVAL;
2713 }
2714
icl_calc_tbt_pll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * pll_params)2715 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2716 struct skl_wrpll_params *pll_params)
2717 {
2718 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2719
2720 if (DISPLAY_VER(i915) >= 12) {
2721 switch (i915->display.dpll.ref_clks.nssc) {
2722 default:
2723 MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2724 fallthrough;
2725 case 19200:
2726 case 38400:
2727 *pll_params = tgl_tbt_pll_19_2MHz_values;
2728 break;
2729 case 24000:
2730 *pll_params = tgl_tbt_pll_24MHz_values;
2731 break;
2732 }
2733 } else {
2734 switch (i915->display.dpll.ref_clks.nssc) {
2735 default:
2736 MISSING_CASE(i915->display.dpll.ref_clks.nssc);
2737 fallthrough;
2738 case 19200:
2739 case 38400:
2740 *pll_params = icl_tbt_pll_19_2MHz_values;
2741 break;
2742 case 24000:
2743 *pll_params = icl_tbt_pll_24MHz_values;
2744 break;
2745 }
2746 }
2747
2748 return 0;
2749 }
2750
icl_ddi_tbt_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2751 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2752 const struct intel_shared_dpll *pll,
2753 const struct intel_dpll_hw_state *dpll_hw_state)
2754 {
2755 /*
2756 * The PLL outputs multiple frequencies at the same time, selection is
2757 * made at DDI clock mux level.
2758 */
2759 drm_WARN_ON(&i915->drm, 1);
2760
2761 return 0;
2762 }
2763
icl_wrpll_ref_clock(struct drm_i915_private * i915)2764 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2765 {
2766 int ref_clock = i915->display.dpll.ref_clks.nssc;
2767
2768 /*
2769 * For ICL+, the spec states: if reference frequency is 38.4,
2770 * use 19.2 because the DPLL automatically divides that by 2.
2771 */
2772 if (ref_clock == 38400)
2773 ref_clock = 19200;
2774
2775 return ref_clock;
2776 }
2777
2778 static int
icl_calc_wrpll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * wrpll_params)2779 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2780 struct skl_wrpll_params *wrpll_params)
2781 {
2782 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2783 int ref_clock = icl_wrpll_ref_clock(i915);
2784 u32 afe_clock = crtc_state->port_clock * 5;
2785 u32 dco_min = 7998000;
2786 u32 dco_max = 10000000;
2787 u32 dco_mid = (dco_min + dco_max) / 2;
2788 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2789 18, 20, 24, 28, 30, 32, 36, 40,
2790 42, 44, 48, 50, 52, 54, 56, 60,
2791 64, 66, 68, 70, 72, 76, 78, 80,
2792 84, 88, 90, 92, 96, 98, 100, 102,
2793 3, 5, 7, 9, 15, 21 };
2794 u32 dco, best_dco = 0, dco_centrality = 0;
2795 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2796 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2797
2798 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2799 dco = afe_clock * dividers[d];
2800
2801 if (dco <= dco_max && dco >= dco_min) {
2802 dco_centrality = abs(dco - dco_mid);
2803
2804 if (dco_centrality < best_dco_centrality) {
2805 best_dco_centrality = dco_centrality;
2806 best_div = dividers[d];
2807 best_dco = dco;
2808 }
2809 }
2810 }
2811
2812 if (best_div == 0)
2813 return -EINVAL;
2814
2815 icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2816 icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2817 pdiv, qdiv, kdiv);
2818
2819 return 0;
2820 }
2821
icl_ddi_combo_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)2822 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2823 const struct intel_shared_dpll *pll,
2824 const struct intel_dpll_hw_state *dpll_hw_state)
2825 {
2826 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2827 int ref_clock = icl_wrpll_ref_clock(i915);
2828 u32 dco_fraction;
2829 u32 p0, p1, p2, dco_freq;
2830
2831 p0 = hw_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2832 p2 = hw_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2833
2834 if (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2835 p1 = (hw_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2836 DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2837 else
2838 p1 = 1;
2839
2840 switch (p0) {
2841 case DPLL_CFGCR1_PDIV_2:
2842 p0 = 2;
2843 break;
2844 case DPLL_CFGCR1_PDIV_3:
2845 p0 = 3;
2846 break;
2847 case DPLL_CFGCR1_PDIV_5:
2848 p0 = 5;
2849 break;
2850 case DPLL_CFGCR1_PDIV_7:
2851 p0 = 7;
2852 break;
2853 }
2854
2855 switch (p2) {
2856 case DPLL_CFGCR1_KDIV_1:
2857 p2 = 1;
2858 break;
2859 case DPLL_CFGCR1_KDIV_2:
2860 p2 = 2;
2861 break;
2862 case DPLL_CFGCR1_KDIV_3:
2863 p2 = 3;
2864 break;
2865 }
2866
2867 dco_freq = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2868 ref_clock;
2869
2870 dco_fraction = (hw_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2871 DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2872
2873 if (ehl_combo_pll_div_frac_wa_needed(i915))
2874 dco_fraction *= 2;
2875
2876 dco_freq += (dco_fraction * ref_clock) / 0x8000;
2877
2878 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2879 return 0;
2880
2881 return dco_freq / (p0 * p1 * p2 * 5);
2882 }
2883
icl_calc_dpll_state(struct drm_i915_private * i915,const struct skl_wrpll_params * pll_params,struct intel_dpll_hw_state * dpll_hw_state)2884 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2885 const struct skl_wrpll_params *pll_params,
2886 struct intel_dpll_hw_state *dpll_hw_state)
2887 {
2888 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2889 u32 dco_fraction = pll_params->dco_fraction;
2890
2891 if (ehl_combo_pll_div_frac_wa_needed(i915))
2892 dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2893
2894 hw_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2895 pll_params->dco_integer;
2896
2897 hw_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2898 DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2899 DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2900 DPLL_CFGCR1_PDIV(pll_params->pdiv);
2901
2902 if (DISPLAY_VER(i915) >= 12)
2903 hw_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2904 else
2905 hw_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2906
2907 if (i915->display.vbt.override_afc_startup)
2908 hw_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2909 }
2910
icl_mg_pll_find_divisors(int clock_khz,bool is_dp,bool use_ssc,u32 * target_dco_khz,struct icl_dpll_hw_state * hw_state,bool is_dkl)2911 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2912 u32 *target_dco_khz,
2913 struct icl_dpll_hw_state *hw_state,
2914 bool is_dkl)
2915 {
2916 static const u8 div1_vals[] = { 7, 5, 3, 2 };
2917 u32 dco_min_freq, dco_max_freq;
2918 unsigned int i;
2919 int div2;
2920
2921 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2922 dco_max_freq = is_dp ? 8100000 : 10000000;
2923
2924 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2925 int div1 = div1_vals[i];
2926
2927 for (div2 = 10; div2 > 0; div2--) {
2928 int dco = div1 * div2 * clock_khz * 5;
2929 int a_divratio, tlinedrv, inputsel;
2930 u32 hsdiv;
2931
2932 if (dco < dco_min_freq || dco > dco_max_freq)
2933 continue;
2934
2935 if (div2 >= 2) {
2936 /*
2937 * Note: a_divratio not matching TGL BSpec
2938 * algorithm but matching hardcoded values and
2939 * working on HW for DP alt-mode at least
2940 */
2941 a_divratio = is_dp ? 10 : 5;
2942 tlinedrv = is_dkl ? 1 : 2;
2943 } else {
2944 a_divratio = 5;
2945 tlinedrv = 0;
2946 }
2947 inputsel = is_dp ? 0 : 1;
2948
2949 switch (div1) {
2950 default:
2951 MISSING_CASE(div1);
2952 fallthrough;
2953 case 2:
2954 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2955 break;
2956 case 3:
2957 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2958 break;
2959 case 5:
2960 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2961 break;
2962 case 7:
2963 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2964 break;
2965 }
2966
2967 *target_dco_khz = dco;
2968
2969 hw_state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2970
2971 hw_state->mg_clktop2_coreclkctl1 =
2972 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2973
2974 hw_state->mg_clktop2_hsclkctl =
2975 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2976 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2977 hsdiv |
2978 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2979
2980 return 0;
2981 }
2982 }
2983
2984 return -EINVAL;
2985 }
2986
2987 /*
2988 * The specification for this function uses real numbers, so the math had to be
2989 * adapted to integer-only calculation, that's why it looks so different.
2990 */
icl_calc_mg_pll_state(struct intel_crtc_state * crtc_state,struct intel_dpll_hw_state * dpll_hw_state)2991 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2992 struct intel_dpll_hw_state *dpll_hw_state)
2993 {
2994 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2995 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
2996 int refclk_khz = i915->display.dpll.ref_clks.nssc;
2997 int clock = crtc_state->port_clock;
2998 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2999 u32 iref_ndiv, iref_trim, iref_pulse_w;
3000 u32 prop_coeff, int_coeff;
3001 u32 tdc_targetcnt, feedfwgain;
3002 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3003 u64 tmp;
3004 bool use_ssc = false;
3005 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3006 bool is_dkl = DISPLAY_VER(i915) >= 12;
3007 int ret;
3008
3009 ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3010 hw_state, is_dkl);
3011 if (ret)
3012 return ret;
3013
3014 m1div = 2;
3015 m2div_int = dco_khz / (refclk_khz * m1div);
3016 if (m2div_int > 255) {
3017 if (!is_dkl) {
3018 m1div = 4;
3019 m2div_int = dco_khz / (refclk_khz * m1div);
3020 }
3021
3022 if (m2div_int > 255)
3023 return -EINVAL;
3024 }
3025 m2div_rem = dco_khz % (refclk_khz * m1div);
3026
3027 tmp = (u64)m2div_rem * (1 << 22);
3028 do_div(tmp, refclk_khz * m1div);
3029 m2div_frac = tmp;
3030
3031 switch (refclk_khz) {
3032 case 19200:
3033 iref_ndiv = 1;
3034 iref_trim = 28;
3035 iref_pulse_w = 1;
3036 break;
3037 case 24000:
3038 iref_ndiv = 1;
3039 iref_trim = 25;
3040 iref_pulse_w = 2;
3041 break;
3042 case 38400:
3043 iref_ndiv = 2;
3044 iref_trim = 28;
3045 iref_pulse_w = 1;
3046 break;
3047 default:
3048 MISSING_CASE(refclk_khz);
3049 return -EINVAL;
3050 }
3051
3052 /*
3053 * tdc_res = 0.000003
3054 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3055 *
3056 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3057 * was supposed to be a division, but we rearranged the operations of
3058 * the formula to avoid early divisions so we don't multiply the
3059 * rounding errors.
3060 *
3061 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3062 * we also rearrange to work with integers.
3063 *
3064 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3065 * last division by 10.
3066 */
3067 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3068
3069 /*
3070 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3071 * 32 bits. That's not a problem since we round the division down
3072 * anyway.
3073 */
3074 feedfwgain = (use_ssc || m2div_rem > 0) ?
3075 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3076
3077 if (dco_khz >= 9000000) {
3078 prop_coeff = 5;
3079 int_coeff = 10;
3080 } else {
3081 prop_coeff = 4;
3082 int_coeff = 8;
3083 }
3084
3085 if (use_ssc) {
3086 tmp = mul_u32_u32(dco_khz, 47 * 32);
3087 do_div(tmp, refclk_khz * m1div * 10000);
3088 ssc_stepsize = tmp;
3089
3090 tmp = mul_u32_u32(dco_khz, 1000);
3091 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3092 } else {
3093 ssc_stepsize = 0;
3094 ssc_steplen = 0;
3095 }
3096 ssc_steplog = 4;
3097
3098 /* write pll_state calculations */
3099 if (is_dkl) {
3100 hw_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3101 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3102 DKL_PLL_DIV0_FBPREDIV(m1div) |
3103 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3104 if (i915->display.vbt.override_afc_startup) {
3105 u8 val = i915->display.vbt.override_afc_startup_val;
3106
3107 hw_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
3108 }
3109
3110 hw_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3111 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3112
3113 hw_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3114 DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3115 DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3116 (use_ssc ? DKL_PLL_SSC_EN : 0);
3117
3118 hw_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3119 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3120
3121 hw_state->mg_pll_tdc_coldst_bias =
3122 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3123 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3124
3125 } else {
3126 hw_state->mg_pll_div0 =
3127 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3128 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3129 MG_PLL_DIV0_FBDIV_INT(m2div_int);
3130
3131 hw_state->mg_pll_div1 =
3132 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3133 MG_PLL_DIV1_DITHER_DIV_2 |
3134 MG_PLL_DIV1_NDIVRATIO(1) |
3135 MG_PLL_DIV1_FBPREDIV(m1div);
3136
3137 hw_state->mg_pll_lf =
3138 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3139 MG_PLL_LF_AFCCNTSEL_512 |
3140 MG_PLL_LF_GAINCTRL(1) |
3141 MG_PLL_LF_INT_COEFF(int_coeff) |
3142 MG_PLL_LF_PROP_COEFF(prop_coeff);
3143
3144 hw_state->mg_pll_frac_lock =
3145 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3146 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3147 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3148 MG_PLL_FRAC_LOCK_DCODITHEREN |
3149 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3150 if (use_ssc || m2div_rem > 0)
3151 hw_state->mg_pll_frac_lock |=
3152 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3153
3154 hw_state->mg_pll_ssc =
3155 (use_ssc ? MG_PLL_SSC_EN : 0) |
3156 MG_PLL_SSC_TYPE(2) |
3157 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3158 MG_PLL_SSC_STEPNUM(ssc_steplog) |
3159 MG_PLL_SSC_FLLEN |
3160 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3161
3162 hw_state->mg_pll_tdc_coldst_bias =
3163 MG_PLL_TDC_COLDST_COLDSTART |
3164 MG_PLL_TDC_COLDST_IREFINT_EN |
3165 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3166 MG_PLL_TDC_TDCOVCCORR_EN |
3167 MG_PLL_TDC_TDCSEL(3);
3168
3169 hw_state->mg_pll_bias =
3170 MG_PLL_BIAS_BIAS_GB_SEL(3) |
3171 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3172 MG_PLL_BIAS_BIAS_BONUS(10) |
3173 MG_PLL_BIAS_BIASCAL_EN |
3174 MG_PLL_BIAS_CTRIM(12) |
3175 MG_PLL_BIAS_VREF_RDAC(4) |
3176 MG_PLL_BIAS_IREFTRIM(iref_trim);
3177
3178 if (refclk_khz == 38400) {
3179 hw_state->mg_pll_tdc_coldst_bias_mask =
3180 MG_PLL_TDC_COLDST_COLDSTART;
3181 hw_state->mg_pll_bias_mask = 0;
3182 } else {
3183 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3184 hw_state->mg_pll_bias_mask = -1U;
3185 }
3186
3187 hw_state->mg_pll_tdc_coldst_bias &=
3188 hw_state->mg_pll_tdc_coldst_bias_mask;
3189 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3190 }
3191
3192 return 0;
3193 }
3194
icl_ddi_mg_pll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)3195 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *i915,
3196 const struct intel_shared_dpll *pll,
3197 const struct intel_dpll_hw_state *dpll_hw_state)
3198 {
3199 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3200 u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3201 u64 tmp;
3202
3203 ref_clock = i915->display.dpll.ref_clks.nssc;
3204
3205 if (DISPLAY_VER(i915) >= 12) {
3206 m1 = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3207 m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3208 m2_int = hw_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3209
3210 if (hw_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3211 m2_frac = hw_state->mg_pll_bias &
3212 DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3213 m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3214 } else {
3215 m2_frac = 0;
3216 }
3217 } else {
3218 m1 = hw_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3219 m2_int = hw_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3220
3221 if (hw_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3222 m2_frac = hw_state->mg_pll_div0 &
3223 MG_PLL_DIV0_FBDIV_FRAC_MASK;
3224 m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3225 } else {
3226 m2_frac = 0;
3227 }
3228 }
3229
3230 switch (hw_state->mg_clktop2_hsclkctl &
3231 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3232 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3233 div1 = 2;
3234 break;
3235 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3236 div1 = 3;
3237 break;
3238 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3239 div1 = 5;
3240 break;
3241 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3242 div1 = 7;
3243 break;
3244 default:
3245 MISSING_CASE(hw_state->mg_clktop2_hsclkctl);
3246 return 0;
3247 }
3248
3249 div2 = (hw_state->mg_clktop2_hsclkctl &
3250 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3251 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3252
3253 /* div2 value of 0 is same as 1 means no div */
3254 if (div2 == 0)
3255 div2 = 1;
3256
3257 /*
3258 * Adjust the original formula to delay the division by 2^22 in order to
3259 * minimize possible rounding errors.
3260 */
3261 tmp = (u64)m1 * m2_int * ref_clock +
3262 (((u64)m1 * m2_frac * ref_clock) >> 22);
3263 tmp = div_u64(tmp, 5 * div1 * div2);
3264
3265 return tmp;
3266 }
3267
3268 /**
3269 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3270 * @crtc_state: state for the CRTC to select the DPLL for
3271 * @port_dpll_id: the active @port_dpll_id to select
3272 *
3273 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3274 * CRTC.
3275 */
icl_set_active_port_dpll(struct intel_crtc_state * crtc_state,enum icl_port_dpll_id port_dpll_id)3276 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3277 enum icl_port_dpll_id port_dpll_id)
3278 {
3279 struct icl_port_dpll *port_dpll =
3280 &crtc_state->icl_port_dplls[port_dpll_id];
3281
3282 crtc_state->shared_dpll = port_dpll->pll;
3283 crtc_state->dpll_hw_state = port_dpll->hw_state;
3284 }
3285
icl_update_active_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3286 static void icl_update_active_dpll(struct intel_atomic_state *state,
3287 struct intel_crtc *crtc,
3288 struct intel_encoder *encoder)
3289 {
3290 struct intel_crtc_state *crtc_state =
3291 intel_atomic_get_new_crtc_state(state, crtc);
3292 struct intel_digital_port *primary_port;
3293 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3294
3295 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3296 enc_to_mst(encoder)->primary :
3297 enc_to_dig_port(encoder);
3298
3299 if (primary_port &&
3300 (intel_tc_port_in_dp_alt_mode(primary_port) ||
3301 intel_tc_port_in_legacy_mode(primary_port)))
3302 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3303
3304 icl_set_active_port_dpll(crtc_state, port_dpll_id);
3305 }
3306
icl_compute_combo_phy_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)3307 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3308 struct intel_crtc *crtc)
3309 {
3310 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3311 struct intel_crtc_state *crtc_state =
3312 intel_atomic_get_new_crtc_state(state, crtc);
3313 struct icl_port_dpll *port_dpll =
3314 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3315 struct skl_wrpll_params pll_params = {};
3316 int ret;
3317
3318 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3319 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3320 ret = icl_calc_wrpll(crtc_state, &pll_params);
3321 else
3322 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3323
3324 if (ret)
3325 return ret;
3326
3327 icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3328
3329 /* this is mainly for the fastset check */
3330 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3331
3332 crtc_state->port_clock = icl_ddi_combo_pll_get_freq(i915, NULL,
3333 &port_dpll->hw_state);
3334
3335 return 0;
3336 }
3337
icl_get_combo_phy_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3338 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3339 struct intel_crtc *crtc,
3340 struct intel_encoder *encoder)
3341 {
3342 struct intel_display *display = to_intel_display(crtc);
3343 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3344 struct intel_crtc_state *crtc_state =
3345 intel_atomic_get_new_crtc_state(state, crtc);
3346 struct icl_port_dpll *port_dpll =
3347 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3348 enum port port = encoder->port;
3349 unsigned long dpll_mask;
3350
3351 if (IS_ALDERLAKE_S(i915)) {
3352 dpll_mask =
3353 BIT(DPLL_ID_DG1_DPLL3) |
3354 BIT(DPLL_ID_DG1_DPLL2) |
3355 BIT(DPLL_ID_ICL_DPLL1) |
3356 BIT(DPLL_ID_ICL_DPLL0);
3357 } else if (IS_DG1(i915)) {
3358 if (port == PORT_D || port == PORT_E) {
3359 dpll_mask =
3360 BIT(DPLL_ID_DG1_DPLL2) |
3361 BIT(DPLL_ID_DG1_DPLL3);
3362 } else {
3363 dpll_mask =
3364 BIT(DPLL_ID_DG1_DPLL0) |
3365 BIT(DPLL_ID_DG1_DPLL1);
3366 }
3367 } else if (IS_ROCKETLAKE(i915)) {
3368 dpll_mask =
3369 BIT(DPLL_ID_EHL_DPLL4) |
3370 BIT(DPLL_ID_ICL_DPLL1) |
3371 BIT(DPLL_ID_ICL_DPLL0);
3372 } else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3373 port != PORT_A) {
3374 dpll_mask =
3375 BIT(DPLL_ID_EHL_DPLL4) |
3376 BIT(DPLL_ID_ICL_DPLL1) |
3377 BIT(DPLL_ID_ICL_DPLL0);
3378 } else {
3379 dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3380 }
3381
3382 /* Eliminate DPLLs from consideration if reserved by HTI */
3383 dpll_mask &= ~intel_hti_dpll_mask(display);
3384
3385 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3386 &port_dpll->hw_state,
3387 dpll_mask);
3388 if (!port_dpll->pll)
3389 return -EINVAL;
3390
3391 intel_reference_shared_dpll(state, crtc,
3392 port_dpll->pll, &port_dpll->hw_state);
3393
3394 icl_update_active_dpll(state, crtc, encoder);
3395
3396 return 0;
3397 }
3398
icl_compute_tc_phy_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)3399 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3400 struct intel_crtc *crtc)
3401 {
3402 struct drm_i915_private *i915 = to_i915(state->base.dev);
3403 struct intel_crtc_state *crtc_state =
3404 intel_atomic_get_new_crtc_state(state, crtc);
3405 const struct intel_crtc_state *old_crtc_state =
3406 intel_atomic_get_old_crtc_state(state, crtc);
3407 struct icl_port_dpll *port_dpll =
3408 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3409 struct skl_wrpll_params pll_params = {};
3410 int ret;
3411
3412 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3413 ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3414 if (ret)
3415 return ret;
3416
3417 icl_calc_dpll_state(i915, &pll_params, &port_dpll->hw_state);
3418
3419 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3420 ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3421 if (ret)
3422 return ret;
3423
3424 /* this is mainly for the fastset check */
3425 if (old_crtc_state->shared_dpll &&
3426 old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
3427 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3428 else
3429 icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3430
3431 crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
3432 &port_dpll->hw_state);
3433
3434 return 0;
3435 }
3436
icl_get_tc_phy_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3437 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3438 struct intel_crtc *crtc,
3439 struct intel_encoder *encoder)
3440 {
3441 struct intel_crtc_state *crtc_state =
3442 intel_atomic_get_new_crtc_state(state, crtc);
3443 struct icl_port_dpll *port_dpll =
3444 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3445 enum intel_dpll_id dpll_id;
3446 int ret;
3447
3448 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3449 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3450 &port_dpll->hw_state,
3451 BIT(DPLL_ID_ICL_TBTPLL));
3452 if (!port_dpll->pll)
3453 return -EINVAL;
3454 intel_reference_shared_dpll(state, crtc,
3455 port_dpll->pll, &port_dpll->hw_state);
3456
3457
3458 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3459 dpll_id = icl_tc_port_to_pll_id(intel_encoder_to_tc(encoder));
3460 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3461 &port_dpll->hw_state,
3462 BIT(dpll_id));
3463 if (!port_dpll->pll) {
3464 ret = -EINVAL;
3465 goto err_unreference_tbt_pll;
3466 }
3467 intel_reference_shared_dpll(state, crtc,
3468 port_dpll->pll, &port_dpll->hw_state);
3469
3470 icl_update_active_dpll(state, crtc, encoder);
3471
3472 return 0;
3473
3474 err_unreference_tbt_pll:
3475 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3476 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3477
3478 return ret;
3479 }
3480
icl_compute_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3481 static int icl_compute_dplls(struct intel_atomic_state *state,
3482 struct intel_crtc *crtc,
3483 struct intel_encoder *encoder)
3484 {
3485 if (intel_encoder_is_combo(encoder))
3486 return icl_compute_combo_phy_dpll(state, crtc);
3487 else if (intel_encoder_is_tc(encoder))
3488 return icl_compute_tc_phy_dplls(state, crtc);
3489
3490 MISSING_CASE(encoder->port);
3491
3492 return 0;
3493 }
3494
icl_get_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3495 static int icl_get_dplls(struct intel_atomic_state *state,
3496 struct intel_crtc *crtc,
3497 struct intel_encoder *encoder)
3498 {
3499 if (intel_encoder_is_combo(encoder))
3500 return icl_get_combo_phy_dpll(state, crtc, encoder);
3501 else if (intel_encoder_is_tc(encoder))
3502 return icl_get_tc_phy_dplls(state, crtc, encoder);
3503
3504 MISSING_CASE(encoder->port);
3505
3506 return -EINVAL;
3507 }
3508
icl_put_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)3509 static void icl_put_dplls(struct intel_atomic_state *state,
3510 struct intel_crtc *crtc)
3511 {
3512 const struct intel_crtc_state *old_crtc_state =
3513 intel_atomic_get_old_crtc_state(state, crtc);
3514 struct intel_crtc_state *new_crtc_state =
3515 intel_atomic_get_new_crtc_state(state, crtc);
3516 enum icl_port_dpll_id id;
3517
3518 new_crtc_state->shared_dpll = NULL;
3519
3520 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3521 const struct icl_port_dpll *old_port_dpll =
3522 &old_crtc_state->icl_port_dplls[id];
3523 struct icl_port_dpll *new_port_dpll =
3524 &new_crtc_state->icl_port_dplls[id];
3525
3526 new_port_dpll->pll = NULL;
3527
3528 if (!old_port_dpll->pll)
3529 continue;
3530
3531 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3532 }
3533 }
3534
mg_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3535 static bool mg_pll_get_hw_state(struct drm_i915_private *i915,
3536 struct intel_shared_dpll *pll,
3537 struct intel_dpll_hw_state *dpll_hw_state)
3538 {
3539 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3540 const enum intel_dpll_id id = pll->info->id;
3541 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3542 intel_wakeref_t wakeref;
3543 bool ret = false;
3544 u32 val;
3545
3546 i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
3547
3548 wakeref = intel_display_power_get_if_enabled(i915,
3549 POWER_DOMAIN_DISPLAY_CORE);
3550 if (!wakeref)
3551 return false;
3552
3553 val = intel_de_read(i915, enable_reg);
3554 if (!(val & PLL_ENABLE))
3555 goto out;
3556
3557 hw_state->mg_refclkin_ctl = intel_de_read(i915,
3558 MG_REFCLKIN_CTL(tc_port));
3559 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3560
3561 hw_state->mg_clktop2_coreclkctl1 =
3562 intel_de_read(i915, MG_CLKTOP2_CORECLKCTL1(tc_port));
3563 hw_state->mg_clktop2_coreclkctl1 &=
3564 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3565
3566 hw_state->mg_clktop2_hsclkctl =
3567 intel_de_read(i915, MG_CLKTOP2_HSCLKCTL(tc_port));
3568 hw_state->mg_clktop2_hsclkctl &=
3569 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3570 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3571 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3572 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3573
3574 hw_state->mg_pll_div0 = intel_de_read(i915, MG_PLL_DIV0(tc_port));
3575 hw_state->mg_pll_div1 = intel_de_read(i915, MG_PLL_DIV1(tc_port));
3576 hw_state->mg_pll_lf = intel_de_read(i915, MG_PLL_LF(tc_port));
3577 hw_state->mg_pll_frac_lock = intel_de_read(i915,
3578 MG_PLL_FRAC_LOCK(tc_port));
3579 hw_state->mg_pll_ssc = intel_de_read(i915, MG_PLL_SSC(tc_port));
3580
3581 hw_state->mg_pll_bias = intel_de_read(i915, MG_PLL_BIAS(tc_port));
3582 hw_state->mg_pll_tdc_coldst_bias =
3583 intel_de_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3584
3585 if (i915->display.dpll.ref_clks.nssc == 38400) {
3586 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3587 hw_state->mg_pll_bias_mask = 0;
3588 } else {
3589 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3590 hw_state->mg_pll_bias_mask = -1U;
3591 }
3592
3593 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3594 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3595
3596 ret = true;
3597 out:
3598 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3599 return ret;
3600 }
3601
dkl_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3602 static bool dkl_pll_get_hw_state(struct drm_i915_private *i915,
3603 struct intel_shared_dpll *pll,
3604 struct intel_dpll_hw_state *dpll_hw_state)
3605 {
3606 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3607 const enum intel_dpll_id id = pll->info->id;
3608 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3609 intel_wakeref_t wakeref;
3610 bool ret = false;
3611 u32 val;
3612
3613 wakeref = intel_display_power_get_if_enabled(i915,
3614 POWER_DOMAIN_DISPLAY_CORE);
3615 if (!wakeref)
3616 return false;
3617
3618 val = intel_de_read(i915, intel_tc_pll_enable_reg(i915, pll));
3619 if (!(val & PLL_ENABLE))
3620 goto out;
3621
3622 /*
3623 * All registers read here have the same HIP_INDEX_REG even though
3624 * they are on different building blocks
3625 */
3626 hw_state->mg_refclkin_ctl = intel_dkl_phy_read(i915,
3627 DKL_REFCLKIN_CTL(tc_port));
3628 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3629
3630 hw_state->mg_clktop2_hsclkctl =
3631 intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3632 hw_state->mg_clktop2_hsclkctl &=
3633 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3634 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3635 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3636 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3637
3638 hw_state->mg_clktop2_coreclkctl1 =
3639 intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3640 hw_state->mg_clktop2_coreclkctl1 &=
3641 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3642
3643 hw_state->mg_pll_div0 = intel_dkl_phy_read(i915, DKL_PLL_DIV0(tc_port));
3644 val = DKL_PLL_DIV0_MASK;
3645 if (i915->display.vbt.override_afc_startup)
3646 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3647 hw_state->mg_pll_div0 &= val;
3648
3649 hw_state->mg_pll_div1 = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3650 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3651 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3652
3653 hw_state->mg_pll_ssc = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3654 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3655 DKL_PLL_SSC_STEP_LEN_MASK |
3656 DKL_PLL_SSC_STEP_NUM_MASK |
3657 DKL_PLL_SSC_EN);
3658
3659 hw_state->mg_pll_bias = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3660 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3661 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3662
3663 hw_state->mg_pll_tdc_coldst_bias =
3664 intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3665 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3666 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3667
3668 ret = true;
3669 out:
3670 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3671 return ret;
3672 }
3673
icl_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state,i915_reg_t enable_reg)3674 static bool icl_pll_get_hw_state(struct drm_i915_private *i915,
3675 struct intel_shared_dpll *pll,
3676 struct intel_dpll_hw_state *dpll_hw_state,
3677 i915_reg_t enable_reg)
3678 {
3679 struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3680 const enum intel_dpll_id id = pll->info->id;
3681 intel_wakeref_t wakeref;
3682 bool ret = false;
3683 u32 val;
3684
3685 wakeref = intel_display_power_get_if_enabled(i915,
3686 POWER_DOMAIN_DISPLAY_CORE);
3687 if (!wakeref)
3688 return false;
3689
3690 val = intel_de_read(i915, enable_reg);
3691 if (!(val & PLL_ENABLE))
3692 goto out;
3693
3694 if (IS_ALDERLAKE_S(i915)) {
3695 hw_state->cfgcr0 = intel_de_read(i915, ADLS_DPLL_CFGCR0(id));
3696 hw_state->cfgcr1 = intel_de_read(i915, ADLS_DPLL_CFGCR1(id));
3697 } else if (IS_DG1(i915)) {
3698 hw_state->cfgcr0 = intel_de_read(i915, DG1_DPLL_CFGCR0(id));
3699 hw_state->cfgcr1 = intel_de_read(i915, DG1_DPLL_CFGCR1(id));
3700 } else if (IS_ROCKETLAKE(i915)) {
3701 hw_state->cfgcr0 = intel_de_read(i915,
3702 RKL_DPLL_CFGCR0(id));
3703 hw_state->cfgcr1 = intel_de_read(i915,
3704 RKL_DPLL_CFGCR1(id));
3705 } else if (DISPLAY_VER(i915) >= 12) {
3706 hw_state->cfgcr0 = intel_de_read(i915,
3707 TGL_DPLL_CFGCR0(id));
3708 hw_state->cfgcr1 = intel_de_read(i915,
3709 TGL_DPLL_CFGCR1(id));
3710 if (i915->display.vbt.override_afc_startup) {
3711 hw_state->div0 = intel_de_read(i915, TGL_DPLL0_DIV0(id));
3712 hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3713 }
3714 } else {
3715 if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3716 id == DPLL_ID_EHL_DPLL4) {
3717 hw_state->cfgcr0 = intel_de_read(i915,
3718 ICL_DPLL_CFGCR0(4));
3719 hw_state->cfgcr1 = intel_de_read(i915,
3720 ICL_DPLL_CFGCR1(4));
3721 } else {
3722 hw_state->cfgcr0 = intel_de_read(i915,
3723 ICL_DPLL_CFGCR0(id));
3724 hw_state->cfgcr1 = intel_de_read(i915,
3725 ICL_DPLL_CFGCR1(id));
3726 }
3727 }
3728
3729 ret = true;
3730 out:
3731 intel_display_power_put(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3732 return ret;
3733 }
3734
combo_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3735 static bool combo_pll_get_hw_state(struct drm_i915_private *i915,
3736 struct intel_shared_dpll *pll,
3737 struct intel_dpll_hw_state *dpll_hw_state)
3738 {
3739 i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3740
3741 return icl_pll_get_hw_state(i915, pll, dpll_hw_state, enable_reg);
3742 }
3743
tbt_pll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)3744 static bool tbt_pll_get_hw_state(struct drm_i915_private *i915,
3745 struct intel_shared_dpll *pll,
3746 struct intel_dpll_hw_state *dpll_hw_state)
3747 {
3748 return icl_pll_get_hw_state(i915, pll, dpll_hw_state, TBT_PLL_ENABLE);
3749 }
3750
icl_dpll_write(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct icl_dpll_hw_state * hw_state)3751 static void icl_dpll_write(struct drm_i915_private *i915,
3752 struct intel_shared_dpll *pll,
3753 const struct icl_dpll_hw_state *hw_state)
3754 {
3755 const enum intel_dpll_id id = pll->info->id;
3756 i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3757
3758 if (IS_ALDERLAKE_S(i915)) {
3759 cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3760 cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3761 } else if (IS_DG1(i915)) {
3762 cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3763 cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3764 } else if (IS_ROCKETLAKE(i915)) {
3765 cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3766 cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3767 } else if (DISPLAY_VER(i915) >= 12) {
3768 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3769 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3770 div0_reg = TGL_DPLL0_DIV0(id);
3771 } else {
3772 if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
3773 id == DPLL_ID_EHL_DPLL4) {
3774 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3775 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3776 } else {
3777 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3778 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3779 }
3780 }
3781
3782 intel_de_write(i915, cfgcr0_reg, hw_state->cfgcr0);
3783 intel_de_write(i915, cfgcr1_reg, hw_state->cfgcr1);
3784 drm_WARN_ON_ONCE(&i915->drm, i915->display.vbt.override_afc_startup &&
3785 !i915_mmio_reg_valid(div0_reg));
3786 if (i915->display.vbt.override_afc_startup &&
3787 i915_mmio_reg_valid(div0_reg))
3788 intel_de_rmw(i915, div0_reg,
3789 TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3790 intel_de_posting_read(i915, cfgcr1_reg);
3791 }
3792
icl_mg_pll_write(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct icl_dpll_hw_state * hw_state)3793 static void icl_mg_pll_write(struct drm_i915_private *i915,
3794 struct intel_shared_dpll *pll,
3795 const struct icl_dpll_hw_state *hw_state)
3796 {
3797 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3798
3799 /*
3800 * Some of the following registers have reserved fields, so program
3801 * these with RMW based on a mask. The mask can be fixed or generated
3802 * during the calc/readout phase if the mask depends on some other HW
3803 * state like refclk, see icl_calc_mg_pll_state().
3804 */
3805 intel_de_rmw(i915, MG_REFCLKIN_CTL(tc_port),
3806 MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3807
3808 intel_de_rmw(i915, MG_CLKTOP2_CORECLKCTL1(tc_port),
3809 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3810 hw_state->mg_clktop2_coreclkctl1);
3811
3812 intel_de_rmw(i915, MG_CLKTOP2_HSCLKCTL(tc_port),
3813 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3814 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3815 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3816 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3817 hw_state->mg_clktop2_hsclkctl);
3818
3819 intel_de_write(i915, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3820 intel_de_write(i915, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3821 intel_de_write(i915, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3822 intel_de_write(i915, MG_PLL_FRAC_LOCK(tc_port),
3823 hw_state->mg_pll_frac_lock);
3824 intel_de_write(i915, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3825
3826 intel_de_rmw(i915, MG_PLL_BIAS(tc_port),
3827 hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3828
3829 intel_de_rmw(i915, MG_PLL_TDC_COLDST_BIAS(tc_port),
3830 hw_state->mg_pll_tdc_coldst_bias_mask,
3831 hw_state->mg_pll_tdc_coldst_bias);
3832
3833 intel_de_posting_read(i915, MG_PLL_TDC_COLDST_BIAS(tc_port));
3834 }
3835
dkl_pll_write(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct icl_dpll_hw_state * hw_state)3836 static void dkl_pll_write(struct drm_i915_private *i915,
3837 struct intel_shared_dpll *pll,
3838 const struct icl_dpll_hw_state *hw_state)
3839 {
3840 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3841 u32 val;
3842
3843 /*
3844 * All registers programmed here have the same HIP_INDEX_REG even
3845 * though on different building block
3846 */
3847 /* All the registers are RMW */
3848 val = intel_dkl_phy_read(i915, DKL_REFCLKIN_CTL(tc_port));
3849 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3850 val |= hw_state->mg_refclkin_ctl;
3851 intel_dkl_phy_write(i915, DKL_REFCLKIN_CTL(tc_port), val);
3852
3853 val = intel_dkl_phy_read(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3854 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3855 val |= hw_state->mg_clktop2_coreclkctl1;
3856 intel_dkl_phy_write(i915, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3857
3858 val = intel_dkl_phy_read(i915, DKL_CLKTOP2_HSCLKCTL(tc_port));
3859 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3860 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3861 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3862 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3863 val |= hw_state->mg_clktop2_hsclkctl;
3864 intel_dkl_phy_write(i915, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3865
3866 val = DKL_PLL_DIV0_MASK;
3867 if (i915->display.vbt.override_afc_startup)
3868 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3869 intel_dkl_phy_rmw(i915, DKL_PLL_DIV0(tc_port), val,
3870 hw_state->mg_pll_div0);
3871
3872 val = intel_dkl_phy_read(i915, DKL_PLL_DIV1(tc_port));
3873 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3874 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3875 val |= hw_state->mg_pll_div1;
3876 intel_dkl_phy_write(i915, DKL_PLL_DIV1(tc_port), val);
3877
3878 val = intel_dkl_phy_read(i915, DKL_PLL_SSC(tc_port));
3879 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3880 DKL_PLL_SSC_STEP_LEN_MASK |
3881 DKL_PLL_SSC_STEP_NUM_MASK |
3882 DKL_PLL_SSC_EN);
3883 val |= hw_state->mg_pll_ssc;
3884 intel_dkl_phy_write(i915, DKL_PLL_SSC(tc_port), val);
3885
3886 val = intel_dkl_phy_read(i915, DKL_PLL_BIAS(tc_port));
3887 val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3888 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3889 val |= hw_state->mg_pll_bias;
3890 intel_dkl_phy_write(i915, DKL_PLL_BIAS(tc_port), val);
3891
3892 val = intel_dkl_phy_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3893 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3894 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3895 val |= hw_state->mg_pll_tdc_coldst_bias;
3896 intel_dkl_phy_write(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3897
3898 intel_dkl_phy_posting_read(i915, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3899 }
3900
icl_pll_power_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3901 static void icl_pll_power_enable(struct drm_i915_private *i915,
3902 struct intel_shared_dpll *pll,
3903 i915_reg_t enable_reg)
3904 {
3905 intel_de_rmw(i915, enable_reg, 0, PLL_POWER_ENABLE);
3906
3907 /*
3908 * The spec says we need to "wait" but it also says it should be
3909 * immediate.
3910 */
3911 if (intel_de_wait_for_set(i915, enable_reg, PLL_POWER_STATE, 1))
3912 drm_err(&i915->drm, "PLL %d Power not enabled\n",
3913 pll->info->id);
3914 }
3915
icl_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3916 static void icl_pll_enable(struct drm_i915_private *i915,
3917 struct intel_shared_dpll *pll,
3918 i915_reg_t enable_reg)
3919 {
3920 intel_de_rmw(i915, enable_reg, 0, PLL_ENABLE);
3921
3922 /* Timeout is actually 600us. */
3923 if (intel_de_wait_for_set(i915, enable_reg, PLL_LOCK, 1))
3924 drm_err(&i915->drm, "PLL %d not locked\n", pll->info->id);
3925 }
3926
adlp_cmtg_clock_gating_wa(struct drm_i915_private * i915,struct intel_shared_dpll * pll)3927 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3928 {
3929 u32 val;
3930
3931 if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3932 pll->info->id != DPLL_ID_ICL_DPLL0)
3933 return;
3934 /*
3935 * Wa_16011069516:adl-p[a0]
3936 *
3937 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3938 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3939 * sanity check this assumption with a double read, which presumably
3940 * returns the correct value even with clock gating on.
3941 *
3942 * Instead of the usual place for workarounds we apply this one here,
3943 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3944 */
3945 val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3946 val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3947 if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3948 drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3949 }
3950
combo_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)3951 static void combo_pll_enable(struct drm_i915_private *i915,
3952 struct intel_shared_dpll *pll,
3953 const struct intel_dpll_hw_state *dpll_hw_state)
3954 {
3955 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3956 i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
3957
3958 icl_pll_power_enable(i915, pll, enable_reg);
3959
3960 icl_dpll_write(i915, pll, hw_state);
3961
3962 /*
3963 * DVFS pre sequence would be here, but in our driver the cdclk code
3964 * paths should already be setting the appropriate voltage, hence we do
3965 * nothing here.
3966 */
3967
3968 icl_pll_enable(i915, pll, enable_reg);
3969
3970 adlp_cmtg_clock_gating_wa(i915, pll);
3971
3972 /* DVFS post sequence would be here. See the comment above. */
3973 }
3974
tbt_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)3975 static void tbt_pll_enable(struct drm_i915_private *i915,
3976 struct intel_shared_dpll *pll,
3977 const struct intel_dpll_hw_state *dpll_hw_state)
3978 {
3979 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
3980
3981 icl_pll_power_enable(i915, pll, TBT_PLL_ENABLE);
3982
3983 icl_dpll_write(i915, pll, hw_state);
3984
3985 /*
3986 * DVFS pre sequence would be here, but in our driver the cdclk code
3987 * paths should already be setting the appropriate voltage, hence we do
3988 * nothing here.
3989 */
3990
3991 icl_pll_enable(i915, pll, TBT_PLL_ENABLE);
3992
3993 /* DVFS post sequence would be here. See the comment above. */
3994 }
3995
mg_pll_enable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)3996 static void mg_pll_enable(struct drm_i915_private *i915,
3997 struct intel_shared_dpll *pll,
3998 const struct intel_dpll_hw_state *dpll_hw_state)
3999 {
4000 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4001 i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4002
4003 icl_pll_power_enable(i915, pll, enable_reg);
4004
4005 if (DISPLAY_VER(i915) >= 12)
4006 dkl_pll_write(i915, pll, hw_state);
4007 else
4008 icl_mg_pll_write(i915, pll, hw_state);
4009
4010 /*
4011 * DVFS pre sequence would be here, but in our driver the cdclk code
4012 * paths should already be setting the appropriate voltage, hence we do
4013 * nothing here.
4014 */
4015
4016 icl_pll_enable(i915, pll, enable_reg);
4017
4018 /* DVFS post sequence would be here. See the comment above. */
4019 }
4020
icl_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll,i915_reg_t enable_reg)4021 static void icl_pll_disable(struct drm_i915_private *i915,
4022 struct intel_shared_dpll *pll,
4023 i915_reg_t enable_reg)
4024 {
4025 /* The first steps are done by intel_ddi_post_disable(). */
4026
4027 /*
4028 * DVFS pre sequence would be here, but in our driver the cdclk code
4029 * paths should already be setting the appropriate voltage, hence we do
4030 * nothing here.
4031 */
4032
4033 intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
4034
4035 /* Timeout is actually 1us. */
4036 if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 1))
4037 drm_err(&i915->drm, "PLL %d locked\n", pll->info->id);
4038
4039 /* DVFS post sequence would be here. See the comment above. */
4040
4041 intel_de_rmw(i915, enable_reg, PLL_POWER_ENABLE, 0);
4042
4043 /*
4044 * The spec says we need to "wait" but it also says it should be
4045 * immediate.
4046 */
4047 if (intel_de_wait_for_clear(i915, enable_reg, PLL_POWER_STATE, 1))
4048 drm_err(&i915->drm, "PLL %d Power not disabled\n",
4049 pll->info->id);
4050 }
4051
combo_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4052 static void combo_pll_disable(struct drm_i915_private *i915,
4053 struct intel_shared_dpll *pll)
4054 {
4055 i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
4056
4057 icl_pll_disable(i915, pll, enable_reg);
4058 }
4059
tbt_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4060 static void tbt_pll_disable(struct drm_i915_private *i915,
4061 struct intel_shared_dpll *pll)
4062 {
4063 icl_pll_disable(i915, pll, TBT_PLL_ENABLE);
4064 }
4065
mg_pll_disable(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4066 static void mg_pll_disable(struct drm_i915_private *i915,
4067 struct intel_shared_dpll *pll)
4068 {
4069 i915_reg_t enable_reg = intel_tc_pll_enable_reg(i915, pll);
4070
4071 icl_pll_disable(i915, pll, enable_reg);
4072 }
4073
icl_update_dpll_ref_clks(struct drm_i915_private * i915)4074 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4075 {
4076 /* No SSC ref */
4077 i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
4078 }
4079
icl_dump_hw_state(struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)4080 static void icl_dump_hw_state(struct drm_printer *p,
4081 const struct intel_dpll_hw_state *dpll_hw_state)
4082 {
4083 const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
4084
4085 drm_printf(p, "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4086 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4087 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4088 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4089 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4090 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4091 hw_state->cfgcr0, hw_state->cfgcr1, hw_state->div0,
4092 hw_state->mg_refclkin_ctl,
4093 hw_state->mg_clktop2_coreclkctl1,
4094 hw_state->mg_clktop2_hsclkctl,
4095 hw_state->mg_pll_div0,
4096 hw_state->mg_pll_div1,
4097 hw_state->mg_pll_lf,
4098 hw_state->mg_pll_frac_lock,
4099 hw_state->mg_pll_ssc,
4100 hw_state->mg_pll_bias,
4101 hw_state->mg_pll_tdc_coldst_bias);
4102 }
4103
icl_compare_hw_state(const struct intel_dpll_hw_state * _a,const struct intel_dpll_hw_state * _b)4104 static bool icl_compare_hw_state(const struct intel_dpll_hw_state *_a,
4105 const struct intel_dpll_hw_state *_b)
4106 {
4107 const struct icl_dpll_hw_state *a = &_a->icl;
4108 const struct icl_dpll_hw_state *b = &_b->icl;
4109
4110 /* FIXME split combo vs. mg more thoroughly */
4111 return a->cfgcr0 == b->cfgcr0 &&
4112 a->cfgcr1 == b->cfgcr1 &&
4113 a->div0 == b->div0 &&
4114 a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
4115 a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
4116 a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
4117 a->mg_pll_div0 == b->mg_pll_div0 &&
4118 a->mg_pll_div1 == b->mg_pll_div1 &&
4119 a->mg_pll_lf == b->mg_pll_lf &&
4120 a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
4121 a->mg_pll_ssc == b->mg_pll_ssc &&
4122 a->mg_pll_bias == b->mg_pll_bias &&
4123 a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
4124 }
4125
4126 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4127 .enable = combo_pll_enable,
4128 .disable = combo_pll_disable,
4129 .get_hw_state = combo_pll_get_hw_state,
4130 .get_freq = icl_ddi_combo_pll_get_freq,
4131 };
4132
4133 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4134 .enable = tbt_pll_enable,
4135 .disable = tbt_pll_disable,
4136 .get_hw_state = tbt_pll_get_hw_state,
4137 .get_freq = icl_ddi_tbt_pll_get_freq,
4138 };
4139
4140 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4141 .enable = mg_pll_enable,
4142 .disable = mg_pll_disable,
4143 .get_hw_state = mg_pll_get_hw_state,
4144 .get_freq = icl_ddi_mg_pll_get_freq,
4145 };
4146
4147 static const struct dpll_info icl_plls[] = {
4148 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4149 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4150 { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4151 .is_alt_port_dpll = true, },
4152 { .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4153 { .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4154 { .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4155 { .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4156 {}
4157 };
4158
4159 static const struct intel_dpll_mgr icl_pll_mgr = {
4160 .dpll_info = icl_plls,
4161 .compute_dplls = icl_compute_dplls,
4162 .get_dplls = icl_get_dplls,
4163 .put_dplls = icl_put_dplls,
4164 .update_active_dpll = icl_update_active_dpll,
4165 .update_ref_clks = icl_update_dpll_ref_clks,
4166 .dump_hw_state = icl_dump_hw_state,
4167 .compare_hw_state = icl_compare_hw_state,
4168 };
4169
4170 static const struct dpll_info ehl_plls[] = {
4171 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4172 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4173 { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
4174 .power_domain = POWER_DOMAIN_DC_OFF, },
4175 {}
4176 };
4177
4178 static const struct intel_dpll_mgr ehl_pll_mgr = {
4179 .dpll_info = ehl_plls,
4180 .compute_dplls = icl_compute_dplls,
4181 .get_dplls = icl_get_dplls,
4182 .put_dplls = icl_put_dplls,
4183 .update_ref_clks = icl_update_dpll_ref_clks,
4184 .dump_hw_state = icl_dump_hw_state,
4185 .compare_hw_state = icl_compare_hw_state,
4186 };
4187
4188 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4189 .enable = mg_pll_enable,
4190 .disable = mg_pll_disable,
4191 .get_hw_state = dkl_pll_get_hw_state,
4192 .get_freq = icl_ddi_mg_pll_get_freq,
4193 };
4194
4195 static const struct dpll_info tgl_plls[] = {
4196 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4197 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4198 { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4199 .is_alt_port_dpll = true, },
4200 { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4201 { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4202 { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4203 { .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4204 { .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
4205 { .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
4206 {}
4207 };
4208
4209 static const struct intel_dpll_mgr tgl_pll_mgr = {
4210 .dpll_info = tgl_plls,
4211 .compute_dplls = icl_compute_dplls,
4212 .get_dplls = icl_get_dplls,
4213 .put_dplls = icl_put_dplls,
4214 .update_active_dpll = icl_update_active_dpll,
4215 .update_ref_clks = icl_update_dpll_ref_clks,
4216 .dump_hw_state = icl_dump_hw_state,
4217 .compare_hw_state = icl_compare_hw_state,
4218 };
4219
4220 static const struct dpll_info rkl_plls[] = {
4221 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4222 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4223 { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
4224 {}
4225 };
4226
4227 static const struct intel_dpll_mgr rkl_pll_mgr = {
4228 .dpll_info = rkl_plls,
4229 .compute_dplls = icl_compute_dplls,
4230 .get_dplls = icl_get_dplls,
4231 .put_dplls = icl_put_dplls,
4232 .update_ref_clks = icl_update_dpll_ref_clks,
4233 .dump_hw_state = icl_dump_hw_state,
4234 .compare_hw_state = icl_compare_hw_state,
4235 };
4236
4237 static const struct dpll_info dg1_plls[] = {
4238 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
4239 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
4240 { .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4241 { .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4242 {}
4243 };
4244
4245 static const struct intel_dpll_mgr dg1_pll_mgr = {
4246 .dpll_info = dg1_plls,
4247 .compute_dplls = icl_compute_dplls,
4248 .get_dplls = icl_get_dplls,
4249 .put_dplls = icl_put_dplls,
4250 .update_ref_clks = icl_update_dpll_ref_clks,
4251 .dump_hw_state = icl_dump_hw_state,
4252 .compare_hw_state = icl_compare_hw_state,
4253 };
4254
4255 static const struct dpll_info adls_plls[] = {
4256 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4257 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4258 { .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
4259 { .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
4260 {}
4261 };
4262
4263 static const struct intel_dpll_mgr adls_pll_mgr = {
4264 .dpll_info = adls_plls,
4265 .compute_dplls = icl_compute_dplls,
4266 .get_dplls = icl_get_dplls,
4267 .put_dplls = icl_put_dplls,
4268 .update_ref_clks = icl_update_dpll_ref_clks,
4269 .dump_hw_state = icl_dump_hw_state,
4270 .compare_hw_state = icl_compare_hw_state,
4271 };
4272
4273 static const struct dpll_info adlp_plls[] = {
4274 { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
4275 { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
4276 { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
4277 .is_alt_port_dpll = true, },
4278 { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
4279 { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
4280 { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
4281 { .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
4282 {}
4283 };
4284
4285 static const struct intel_dpll_mgr adlp_pll_mgr = {
4286 .dpll_info = adlp_plls,
4287 .compute_dplls = icl_compute_dplls,
4288 .get_dplls = icl_get_dplls,
4289 .put_dplls = icl_put_dplls,
4290 .update_active_dpll = icl_update_active_dpll,
4291 .update_ref_clks = icl_update_dpll_ref_clks,
4292 .dump_hw_state = icl_dump_hw_state,
4293 .compare_hw_state = icl_compare_hw_state,
4294 };
4295
4296 /**
4297 * intel_shared_dpll_init - Initialize shared DPLLs
4298 * @i915: i915 device
4299 *
4300 * Initialize shared DPLLs for @i915.
4301 */
intel_shared_dpll_init(struct drm_i915_private * i915)4302 void intel_shared_dpll_init(struct drm_i915_private *i915)
4303 {
4304 const struct intel_dpll_mgr *dpll_mgr = NULL;
4305 const struct dpll_info *dpll_info;
4306 int i;
4307
4308 mutex_init(&i915->display.dpll.lock);
4309
4310 if (DISPLAY_VER(i915) >= 14 || IS_DG2(i915))
4311 /* No shared DPLLs on DG2; port PLLs are part of the PHY */
4312 dpll_mgr = NULL;
4313 else if (IS_ALDERLAKE_P(i915))
4314 dpll_mgr = &adlp_pll_mgr;
4315 else if (IS_ALDERLAKE_S(i915))
4316 dpll_mgr = &adls_pll_mgr;
4317 else if (IS_DG1(i915))
4318 dpll_mgr = &dg1_pll_mgr;
4319 else if (IS_ROCKETLAKE(i915))
4320 dpll_mgr = &rkl_pll_mgr;
4321 else if (DISPLAY_VER(i915) >= 12)
4322 dpll_mgr = &tgl_pll_mgr;
4323 else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915))
4324 dpll_mgr = &ehl_pll_mgr;
4325 else if (DISPLAY_VER(i915) >= 11)
4326 dpll_mgr = &icl_pll_mgr;
4327 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
4328 dpll_mgr = &bxt_pll_mgr;
4329 else if (DISPLAY_VER(i915) == 9)
4330 dpll_mgr = &skl_pll_mgr;
4331 else if (HAS_DDI(i915))
4332 dpll_mgr = &hsw_pll_mgr;
4333 else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
4334 dpll_mgr = &pch_pll_mgr;
4335
4336 if (!dpll_mgr)
4337 return;
4338
4339 dpll_info = dpll_mgr->dpll_info;
4340
4341 for (i = 0; dpll_info[i].name; i++) {
4342 if (drm_WARN_ON(&i915->drm,
4343 i >= ARRAY_SIZE(i915->display.dpll.shared_dplls)))
4344 break;
4345
4346 /* must fit into unsigned long bitmask on 32bit */
4347 if (drm_WARN_ON(&i915->drm, dpll_info[i].id >= 32))
4348 break;
4349
4350 i915->display.dpll.shared_dplls[i].info = &dpll_info[i];
4351 i915->display.dpll.shared_dplls[i].index = i;
4352 }
4353
4354 i915->display.dpll.mgr = dpll_mgr;
4355 i915->display.dpll.num_shared_dpll = i;
4356 }
4357
4358 /**
4359 * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4360 * @state: atomic state
4361 * @crtc: CRTC to compute DPLLs for
4362 * @encoder: encoder
4363 *
4364 * This function computes the DPLL state for the given CRTC and encoder.
4365 *
4366 * The new configuration in the atomic commit @state is made effective by
4367 * calling intel_shared_dpll_swap_state().
4368 *
4369 * Returns:
4370 * 0 on success, negative error code on falure.
4371 */
intel_compute_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4372 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4373 struct intel_crtc *crtc,
4374 struct intel_encoder *encoder)
4375 {
4376 struct drm_i915_private *i915 = to_i915(state->base.dev);
4377 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4378
4379 if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4380 return -EINVAL;
4381
4382 return dpll_mgr->compute_dplls(state, crtc, encoder);
4383 }
4384
4385 /**
4386 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4387 * @state: atomic state
4388 * @crtc: CRTC to reserve DPLLs for
4389 * @encoder: encoder
4390 *
4391 * This function reserves all required DPLLs for the given CRTC and encoder
4392 * combination in the current atomic commit @state and the new @crtc atomic
4393 * state.
4394 *
4395 * The new configuration in the atomic commit @state is made effective by
4396 * calling intel_shared_dpll_swap_state().
4397 *
4398 * The reserved DPLLs should be released by calling
4399 * intel_release_shared_dplls().
4400 *
4401 * Returns:
4402 * 0 if all required DPLLs were successfully reserved,
4403 * negative error code otherwise.
4404 */
intel_reserve_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4405 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4406 struct intel_crtc *crtc,
4407 struct intel_encoder *encoder)
4408 {
4409 struct drm_i915_private *i915 = to_i915(state->base.dev);
4410 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4411
4412 if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4413 return -EINVAL;
4414
4415 return dpll_mgr->get_dplls(state, crtc, encoder);
4416 }
4417
4418 /**
4419 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4420 * @state: atomic state
4421 * @crtc: crtc from which the DPLLs are to be released
4422 *
4423 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4424 * from the current atomic commit @state and the old @crtc atomic state.
4425 *
4426 * The new configuration in the atomic commit @state is made effective by
4427 * calling intel_shared_dpll_swap_state().
4428 */
intel_release_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)4429 void intel_release_shared_dplls(struct intel_atomic_state *state,
4430 struct intel_crtc *crtc)
4431 {
4432 struct drm_i915_private *i915 = to_i915(state->base.dev);
4433 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4434
4435 /*
4436 * FIXME: this function is called for every platform having a
4437 * compute_clock hook, even though the platform doesn't yet support
4438 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4439 * called on those.
4440 */
4441 if (!dpll_mgr)
4442 return;
4443
4444 dpll_mgr->put_dplls(state, crtc);
4445 }
4446
4447 /**
4448 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4449 * @state: atomic state
4450 * @crtc: the CRTC for which to update the active DPLL
4451 * @encoder: encoder determining the type of port DPLL
4452 *
4453 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4454 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4455 * DPLL selected will be based on the current mode of the encoder's port.
4456 */
intel_update_active_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4457 void intel_update_active_dpll(struct intel_atomic_state *state,
4458 struct intel_crtc *crtc,
4459 struct intel_encoder *encoder)
4460 {
4461 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4462 const struct intel_dpll_mgr *dpll_mgr = i915->display.dpll.mgr;
4463
4464 if (drm_WARN_ON(&i915->drm, !dpll_mgr))
4465 return;
4466
4467 dpll_mgr->update_active_dpll(state, crtc, encoder);
4468 }
4469
4470 /**
4471 * intel_dpll_get_freq - calculate the DPLL's output frequency
4472 * @i915: i915 device
4473 * @pll: DPLL for which to calculate the output frequency
4474 * @dpll_hw_state: DPLL state from which to calculate the output frequency
4475 *
4476 * Return the output frequency corresponding to @pll's passed in @dpll_hw_state.
4477 */
intel_dpll_get_freq(struct drm_i915_private * i915,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * dpll_hw_state)4478 int intel_dpll_get_freq(struct drm_i915_private *i915,
4479 const struct intel_shared_dpll *pll,
4480 const struct intel_dpll_hw_state *dpll_hw_state)
4481 {
4482 if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4483 return 0;
4484
4485 return pll->info->funcs->get_freq(i915, pll, dpll_hw_state);
4486 }
4487
4488 /**
4489 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4490 * @i915: i915 device
4491 * @pll: DPLL for which to calculate the output frequency
4492 * @dpll_hw_state: DPLL's hardware state
4493 *
4494 * Read out @pll's hardware state into @dpll_hw_state.
4495 */
intel_dpll_get_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * dpll_hw_state)4496 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4497 struct intel_shared_dpll *pll,
4498 struct intel_dpll_hw_state *dpll_hw_state)
4499 {
4500 return pll->info->funcs->get_hw_state(i915, pll, dpll_hw_state);
4501 }
4502
readout_dpll_hw_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4503 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4504 struct intel_shared_dpll *pll)
4505 {
4506 struct intel_crtc *crtc;
4507
4508 pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4509
4510 if (pll->on && pll->info->power_domain)
4511 pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
4512
4513 pll->state.pipe_mask = 0;
4514 for_each_intel_crtc(&i915->drm, crtc) {
4515 struct intel_crtc_state *crtc_state =
4516 to_intel_crtc_state(crtc->base.state);
4517
4518 if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4519 intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4520 }
4521 pll->active_mask = pll->state.pipe_mask;
4522
4523 drm_dbg_kms(&i915->drm,
4524 "%s hw state readout: pipe_mask 0x%x, on %i\n",
4525 pll->info->name, pll->state.pipe_mask, pll->on);
4526 }
4527
intel_dpll_update_ref_clks(struct drm_i915_private * i915)4528 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4529 {
4530 if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4531 i915->display.dpll.mgr->update_ref_clks(i915);
4532 }
4533
intel_dpll_readout_hw_state(struct drm_i915_private * i915)4534 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4535 {
4536 struct intel_shared_dpll *pll;
4537 int i;
4538
4539 for_each_shared_dpll(i915, pll, i)
4540 readout_dpll_hw_state(i915, pll);
4541 }
4542
sanitize_dpll_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll)4543 static void sanitize_dpll_state(struct drm_i915_private *i915,
4544 struct intel_shared_dpll *pll)
4545 {
4546 if (!pll->on)
4547 return;
4548
4549 adlp_cmtg_clock_gating_wa(i915, pll);
4550
4551 if (pll->active_mask)
4552 return;
4553
4554 drm_dbg_kms(&i915->drm,
4555 "%s enabled but not in use, disabling\n",
4556 pll->info->name);
4557
4558 _intel_disable_shared_dpll(i915, pll);
4559 }
4560
intel_dpll_sanitize_state(struct drm_i915_private * i915)4561 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4562 {
4563 struct intel_shared_dpll *pll;
4564 int i;
4565
4566 for_each_shared_dpll(i915, pll, i)
4567 sanitize_dpll_state(i915, pll);
4568 }
4569
4570 /**
4571 * intel_dpll_dump_hw_state - dump hw_state
4572 * @i915: i915 drm device
4573 * @p: where to print the state to
4574 * @dpll_hw_state: hw state to be dumped
4575 *
4576 * Dumo out the relevant values in @dpll_hw_state.
4577 */
intel_dpll_dump_hw_state(struct drm_i915_private * i915,struct drm_printer * p,const struct intel_dpll_hw_state * dpll_hw_state)4578 void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
4579 struct drm_printer *p,
4580 const struct intel_dpll_hw_state *dpll_hw_state)
4581 {
4582 if (i915->display.dpll.mgr) {
4583 i915->display.dpll.mgr->dump_hw_state(p, dpll_hw_state);
4584 } else {
4585 /* fallback for platforms that don't use the shared dpll
4586 * infrastructure
4587 */
4588 ibx_dump_hw_state(p, dpll_hw_state);
4589 }
4590 }
4591
4592 /**
4593 * intel_dpll_compare_hw_state - compare the two states
4594 * @i915: i915 drm device
4595 * @a: first DPLL hw state
4596 * @b: second DPLL hw state
4597 *
4598 * Compare DPLL hw states @a and @b.
4599 *
4600 * Returns: true if the states are equal, false if the differ
4601 */
intel_dpll_compare_hw_state(struct drm_i915_private * i915,const struct intel_dpll_hw_state * a,const struct intel_dpll_hw_state * b)4602 bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
4603 const struct intel_dpll_hw_state *a,
4604 const struct intel_dpll_hw_state *b)
4605 {
4606 if (i915->display.dpll.mgr) {
4607 return i915->display.dpll.mgr->compare_hw_state(a, b);
4608 } else {
4609 /* fallback for platforms that don't use the shared dpll
4610 * infrastructure
4611 */
4612 return ibx_compare_hw_state(a, b);
4613 }
4614 }
4615
4616 static void
verify_single_dpll_state(struct drm_i915_private * i915,struct intel_shared_dpll * pll,struct intel_crtc * crtc,const struct intel_crtc_state * new_crtc_state)4617 verify_single_dpll_state(struct drm_i915_private *i915,
4618 struct intel_shared_dpll *pll,
4619 struct intel_crtc *crtc,
4620 const struct intel_crtc_state *new_crtc_state)
4621 {
4622 struct intel_dpll_hw_state dpll_hw_state = {};
4623 u8 pipe_mask;
4624 bool active;
4625
4626 active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
4627
4628 if (!pll->info->always_on) {
4629 I915_STATE_WARN(i915, !pll->on && pll->active_mask,
4630 "%s: pll in active use but not on in sw tracking\n",
4631 pll->info->name);
4632 I915_STATE_WARN(i915, pll->on && !pll->active_mask,
4633 "%s: pll is on but not used by any active pipe\n",
4634 pll->info->name);
4635 I915_STATE_WARN(i915, pll->on != active,
4636 "%s: pll on state mismatch (expected %i, found %i)\n",
4637 pll->info->name, pll->on, active);
4638 }
4639
4640 if (!crtc) {
4641 I915_STATE_WARN(i915,
4642 pll->active_mask & ~pll->state.pipe_mask,
4643 "%s: more active pll users than references: 0x%x vs 0x%x\n",
4644 pll->info->name, pll->active_mask, pll->state.pipe_mask);
4645
4646 return;
4647 }
4648
4649 pipe_mask = BIT(crtc->pipe);
4650
4651 if (new_crtc_state->hw.active)
4652 I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
4653 "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4654 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4655 else
4656 I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4657 "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4658 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4659
4660 I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
4661 "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4662 pll->info->name, pipe_mask, pll->state.pipe_mask);
4663
4664 I915_STATE_WARN(i915,
4665 pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4666 sizeof(dpll_hw_state)),
4667 "%s: pll hw state mismatch\n",
4668 pll->info->name);
4669 }
4670
has_alt_port_dpll(const struct intel_shared_dpll * old_pll,const struct intel_shared_dpll * new_pll)4671 static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
4672 const struct intel_shared_dpll *new_pll)
4673 {
4674 return old_pll && new_pll && old_pll != new_pll &&
4675 (old_pll->info->is_alt_port_dpll || new_pll->info->is_alt_port_dpll);
4676 }
4677
intel_shared_dpll_state_verify(struct intel_atomic_state * state,struct intel_crtc * crtc)4678 void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
4679 struct intel_crtc *crtc)
4680 {
4681 struct drm_i915_private *i915 = to_i915(state->base.dev);
4682 const struct intel_crtc_state *old_crtc_state =
4683 intel_atomic_get_old_crtc_state(state, crtc);
4684 const struct intel_crtc_state *new_crtc_state =
4685 intel_atomic_get_new_crtc_state(state, crtc);
4686
4687 if (new_crtc_state->shared_dpll)
4688 verify_single_dpll_state(i915, new_crtc_state->shared_dpll,
4689 crtc, new_crtc_state);
4690
4691 if (old_crtc_state->shared_dpll &&
4692 old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4693 u8 pipe_mask = BIT(crtc->pipe);
4694 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4695
4696 I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
4697 "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4698 pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
4699
4700 /* TC ports have both MG/TC and TBT PLL referenced simultaneously */
4701 I915_STATE_WARN(i915, !has_alt_port_dpll(old_crtc_state->shared_dpll,
4702 new_crtc_state->shared_dpll) &&
4703 pll->state.pipe_mask & pipe_mask,
4704 "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
4705 pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
4706 }
4707 }
4708
intel_shared_dpll_verify_disabled(struct intel_atomic_state * state)4709 void intel_shared_dpll_verify_disabled(struct intel_atomic_state *state)
4710 {
4711 struct drm_i915_private *i915 = to_i915(state->base.dev);
4712 struct intel_shared_dpll *pll;
4713 int i;
4714
4715 for_each_shared_dpll(i915, pll, i)
4716 verify_single_dpll_state(i915, pll, NULL, NULL);
4717 }
4718