1 /* 2 * Copyright © 2006-2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "intel_drv.h" 25 26 /** 27 * DOC: Display PLLs 28 * 29 * Display PLLs used for driving outputs vary by platform. While some have 30 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL 31 * from a pool. In the latter scenario, it is possible that multiple pipes 32 * share a PLL if their configurations match. 33 * 34 * This file provides an abstraction over display PLLs. The function 35 * intel_shared_dpll_init() initializes the PLLs for the given platform. The 36 * users of a PLL are tracked and that tracking is integrated with the atomic 37 * modest interface. During an atomic operation, a PLL can be requested for a 38 * given CRTC and encoder configuration by calling intel_get_shared_dpll() and 39 * a previously used PLL can be released with intel_release_shared_dpll(). 40 * Changes to the users are first staged in the atomic state, and then made 41 * effective by calling intel_shared_dpll_swap_state() during the atomic 42 * commit phase. 43 */ 44 45 static void 46 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, 47 struct intel_shared_dpll_state *shared_dpll) 48 { 49 enum intel_dpll_id i; 50 51 /* Copy shared dpll state */ 52 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 53 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 54 55 shared_dpll[i] = pll->state; 56 } 57 } 58 59 static struct intel_shared_dpll_state * 60 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s) 61 { 62 struct intel_atomic_state *state = to_intel_atomic_state(s); 63 64 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex)); 65 66 if (!state->dpll_set) { 67 state->dpll_set = true; 68 69 intel_atomic_duplicate_dpll_state(to_i915(s->dev), 70 state->shared_dpll); 71 } 72 73 return state->shared_dpll; 74 } 75 76 /** 77 * intel_get_shared_dpll_by_id - get a DPLL given its id 78 * @dev_priv: i915 device instance 79 * @id: pll id 80 * 81 * Returns: 82 * A pointer to the DPLL with @id 83 */ 84 struct intel_shared_dpll * 85 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, 86 enum intel_dpll_id id) 87 { 88 return &dev_priv->shared_dplls[id]; 89 } 90 91 /** 92 * intel_get_shared_dpll_id - get the id of a DPLL 93 * @dev_priv: i915 device instance 94 * @pll: the DPLL 95 * 96 * Returns: 97 * The id of @pll 98 */ 99 enum intel_dpll_id 100 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv, 101 struct intel_shared_dpll *pll) 102 { 103 if (WARN_ON(pll < dev_priv->shared_dplls|| 104 pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll])) 105 return -1; 106 107 return (enum intel_dpll_id) (pll - dev_priv->shared_dplls); 108 } 109 110 /* For ILK+ */ 111 void assert_shared_dpll(struct drm_i915_private *dev_priv, 112 struct intel_shared_dpll *pll, 113 bool state) 114 { 115 bool cur_state; 116 struct intel_dpll_hw_state hw_state; 117 118 if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state))) 119 return; 120 121 cur_state = pll->funcs.get_hw_state(dev_priv, pll, &hw_state); 122 I915_STATE_WARN(cur_state != state, 123 "%s assertion failure (expected %s, current %s)\n", 124 pll->name, onoff(state), onoff(cur_state)); 125 } 126 127 /** 128 * intel_prepare_shared_dpll - call a dpll's prepare hook 129 * @crtc: CRTC which has a shared dpll 130 * 131 * This calls the PLL's prepare hook if it has one and if the PLL is not 132 * already enabled. The prepare hook is platform specific. 133 */ 134 void intel_prepare_shared_dpll(struct intel_crtc *crtc) 135 { 136 struct drm_device *dev = crtc->base.dev; 137 struct drm_i915_private *dev_priv = to_i915(dev); 138 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 139 140 if (WARN_ON(pll == NULL)) 141 return; 142 143 mutex_lock(&dev_priv->dpll_lock); 144 WARN_ON(!pll->state.crtc_mask); 145 if (!pll->active_mask) { 146 DRM_DEBUG_DRIVER("setting up %s\n", pll->name); 147 WARN_ON(pll->on); 148 assert_shared_dpll_disabled(dev_priv, pll); 149 150 pll->funcs.prepare(dev_priv, pll); 151 } 152 mutex_unlock(&dev_priv->dpll_lock); 153 } 154 155 /** 156 * intel_enable_shared_dpll - enable a CRTC's shared DPLL 157 * @crtc: CRTC which has a shared DPLL 158 * 159 * Enable the shared DPLL used by @crtc. 160 */ 161 void intel_enable_shared_dpll(struct intel_crtc *crtc) 162 { 163 struct drm_device *dev = crtc->base.dev; 164 struct drm_i915_private *dev_priv = to_i915(dev); 165 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 166 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); 167 unsigned old_mask; 168 169 if (WARN_ON(pll == NULL)) 170 return; 171 172 mutex_lock(&dev_priv->dpll_lock); 173 old_mask = pll->active_mask; 174 175 if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) || 176 WARN_ON(pll->active_mask & crtc_mask)) 177 goto out; 178 179 pll->active_mask |= crtc_mask; 180 181 DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n", 182 pll->name, pll->active_mask, pll->on, 183 crtc->base.base.id); 184 185 if (old_mask) { 186 WARN_ON(!pll->on); 187 assert_shared_dpll_enabled(dev_priv, pll); 188 goto out; 189 } 190 WARN_ON(pll->on); 191 192 DRM_DEBUG_KMS("enabling %s\n", pll->name); 193 pll->funcs.enable(dev_priv, pll); 194 pll->on = true; 195 196 out: 197 mutex_unlock(&dev_priv->dpll_lock); 198 } 199 200 /** 201 * intel_disable_shared_dpll - disable a CRTC's shared DPLL 202 * @crtc: CRTC which has a shared DPLL 203 * 204 * Disable the shared DPLL used by @crtc. 205 */ 206 void intel_disable_shared_dpll(struct intel_crtc *crtc) 207 { 208 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 209 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 210 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); 211 212 /* PCH only available on ILK+ */ 213 if (INTEL_GEN(dev_priv) < 5) 214 return; 215 216 if (pll == NULL) 217 return; 218 219 mutex_lock(&dev_priv->dpll_lock); 220 if (WARN_ON(!(pll->active_mask & crtc_mask))) 221 goto out; 222 223 DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n", 224 pll->name, pll->active_mask, pll->on, 225 crtc->base.base.id); 226 227 assert_shared_dpll_enabled(dev_priv, pll); 228 WARN_ON(!pll->on); 229 230 pll->active_mask &= ~crtc_mask; 231 if (pll->active_mask) 232 goto out; 233 234 DRM_DEBUG_KMS("disabling %s\n", pll->name); 235 pll->funcs.disable(dev_priv, pll); 236 pll->on = false; 237 238 out: 239 mutex_unlock(&dev_priv->dpll_lock); 240 } 241 242 static struct intel_shared_dpll * 243 intel_find_shared_dpll(struct intel_crtc *crtc, 244 struct intel_crtc_state *crtc_state, 245 enum intel_dpll_id range_min, 246 enum intel_dpll_id range_max) 247 { 248 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 249 struct intel_shared_dpll *pll; 250 struct intel_shared_dpll_state *shared_dpll; 251 enum intel_dpll_id i; 252 253 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); 254 255 for (i = range_min; i <= range_max; i++) { 256 pll = &dev_priv->shared_dplls[i]; 257 258 /* Only want to check enabled timings first */ 259 if (shared_dpll[i].crtc_mask == 0) 260 continue; 261 262 if (memcmp(&crtc_state->dpll_hw_state, 263 &shared_dpll[i].hw_state, 264 sizeof(crtc_state->dpll_hw_state)) == 0) { 265 DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n", 266 crtc->base.base.id, crtc->base.name, pll->name, 267 shared_dpll[i].crtc_mask, 268 pll->active_mask); 269 return pll; 270 } 271 } 272 273 /* Ok no matching timings, maybe there's a free one? */ 274 for (i = range_min; i <= range_max; i++) { 275 pll = &dev_priv->shared_dplls[i]; 276 if (shared_dpll[i].crtc_mask == 0) { 277 DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n", 278 crtc->base.base.id, crtc->base.name, pll->name); 279 return pll; 280 } 281 } 282 283 return NULL; 284 } 285 286 static void 287 intel_reference_shared_dpll(struct intel_shared_dpll *pll, 288 struct intel_crtc_state *crtc_state) 289 { 290 struct intel_shared_dpll_state *shared_dpll; 291 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 292 enum intel_dpll_id i = pll->id; 293 294 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); 295 296 if (shared_dpll[i].crtc_mask == 0) 297 shared_dpll[i].hw_state = 298 crtc_state->dpll_hw_state; 299 300 crtc_state->shared_dpll = pll; 301 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, 302 pipe_name(crtc->pipe)); 303 304 shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe; 305 } 306 307 /** 308 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective 309 * @state: atomic state 310 * 311 * This is the dpll version of drm_atomic_helper_swap_state() since the 312 * helper does not handle driver-specific global state. 313 * 314 * For consistency with atomic helpers this function does a complete swap, 315 * i.e. it also puts the current state into @state, even though there is no 316 * need for that at this moment. 317 */ 318 void intel_shared_dpll_swap_state(struct drm_atomic_state *state) 319 { 320 struct drm_i915_private *dev_priv = to_i915(state->dev); 321 struct intel_shared_dpll_state *shared_dpll; 322 struct intel_shared_dpll *pll; 323 enum intel_dpll_id i; 324 325 if (!to_intel_atomic_state(state)->dpll_set) 326 return; 327 328 shared_dpll = to_intel_atomic_state(state)->shared_dpll; 329 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 330 struct intel_shared_dpll_state tmp; 331 332 pll = &dev_priv->shared_dplls[i]; 333 334 tmp = pll->state; 335 pll->state = shared_dpll[i]; 336 shared_dpll[i] = tmp; 337 } 338 } 339 340 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, 341 struct intel_shared_dpll *pll, 342 struct intel_dpll_hw_state *hw_state) 343 { 344 uint32_t val; 345 346 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 347 return false; 348 349 val = I915_READ(PCH_DPLL(pll->id)); 350 hw_state->dpll = val; 351 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 352 hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); 353 354 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 355 356 return val & DPLL_VCO_ENABLE; 357 } 358 359 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv, 360 struct intel_shared_dpll *pll) 361 { 362 I915_WRITE(PCH_FP0(pll->id), pll->state.hw_state.fp0); 363 I915_WRITE(PCH_FP1(pll->id), pll->state.hw_state.fp1); 364 } 365 366 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 367 { 368 u32 val; 369 bool enabled; 370 371 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 372 373 val = I915_READ(PCH_DREF_CONTROL); 374 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 375 DREF_SUPERSPREAD_SOURCE_MASK)); 376 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); 377 } 378 379 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 380 struct intel_shared_dpll *pll) 381 { 382 /* PCH refclock must be enabled first */ 383 ibx_assert_pch_refclk_enabled(dev_priv); 384 385 I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll); 386 387 /* Wait for the clocks to stabilize. */ 388 POSTING_READ(PCH_DPLL(pll->id)); 389 udelay(150); 390 391 /* The pixel multiplier can only be updated once the 392 * DPLL is enabled and the clocks are stable. 393 * 394 * So write it again. 395 */ 396 I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll); 397 POSTING_READ(PCH_DPLL(pll->id)); 398 udelay(200); 399 } 400 401 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, 402 struct intel_shared_dpll *pll) 403 { 404 struct drm_device *dev = &dev_priv->drm; 405 struct intel_crtc *crtc; 406 407 /* Make sure no transcoder isn't still depending on us. */ 408 for_each_intel_crtc(dev, crtc) { 409 if (crtc->config->shared_dpll == pll) 410 assert_pch_transcoder_disabled(dev_priv, crtc->pipe); 411 } 412 413 I915_WRITE(PCH_DPLL(pll->id), 0); 414 POSTING_READ(PCH_DPLL(pll->id)); 415 udelay(200); 416 } 417 418 static struct intel_shared_dpll * 419 ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, 420 struct intel_encoder *encoder) 421 { 422 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 423 struct intel_shared_dpll *pll; 424 enum intel_dpll_id i; 425 426 if (HAS_PCH_IBX(dev_priv)) { 427 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 428 i = (enum intel_dpll_id) crtc->pipe; 429 pll = &dev_priv->shared_dplls[i]; 430 431 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", 432 crtc->base.base.id, crtc->base.name, pll->name); 433 } else { 434 pll = intel_find_shared_dpll(crtc, crtc_state, 435 DPLL_ID_PCH_PLL_A, 436 DPLL_ID_PCH_PLL_B); 437 } 438 439 if (!pll) 440 return NULL; 441 442 /* reference the pll */ 443 intel_reference_shared_dpll(pll, crtc_state); 444 445 return pll; 446 } 447 448 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv, 449 struct intel_dpll_hw_state *hw_state) 450 { 451 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 452 "fp0: 0x%x, fp1: 0x%x\n", 453 hw_state->dpll, 454 hw_state->dpll_md, 455 hw_state->fp0, 456 hw_state->fp1); 457 } 458 459 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = { 460 .prepare = ibx_pch_dpll_prepare, 461 .enable = ibx_pch_dpll_enable, 462 .disable = ibx_pch_dpll_disable, 463 .get_hw_state = ibx_pch_dpll_get_hw_state, 464 }; 465 466 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, 467 struct intel_shared_dpll *pll) 468 { 469 I915_WRITE(WRPLL_CTL(pll->id), pll->state.hw_state.wrpll); 470 POSTING_READ(WRPLL_CTL(pll->id)); 471 udelay(20); 472 } 473 474 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, 475 struct intel_shared_dpll *pll) 476 { 477 I915_WRITE(SPLL_CTL, pll->state.hw_state.spll); 478 POSTING_READ(SPLL_CTL); 479 udelay(20); 480 } 481 482 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, 483 struct intel_shared_dpll *pll) 484 { 485 uint32_t val; 486 487 val = I915_READ(WRPLL_CTL(pll->id)); 488 I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE); 489 POSTING_READ(WRPLL_CTL(pll->id)); 490 } 491 492 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, 493 struct intel_shared_dpll *pll) 494 { 495 uint32_t val; 496 497 val = I915_READ(SPLL_CTL); 498 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); 499 POSTING_READ(SPLL_CTL); 500 } 501 502 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, 503 struct intel_shared_dpll *pll, 504 struct intel_dpll_hw_state *hw_state) 505 { 506 uint32_t val; 507 508 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 509 return false; 510 511 val = I915_READ(WRPLL_CTL(pll->id)); 512 hw_state->wrpll = val; 513 514 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 515 516 return val & WRPLL_PLL_ENABLE; 517 } 518 519 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, 520 struct intel_shared_dpll *pll, 521 struct intel_dpll_hw_state *hw_state) 522 { 523 uint32_t val; 524 525 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 526 return false; 527 528 val = I915_READ(SPLL_CTL); 529 hw_state->spll = val; 530 531 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 532 533 return val & SPLL_PLL_ENABLE; 534 } 535 536 #define LC_FREQ 2700 537 #define LC_FREQ_2K U64_C(LC_FREQ * 2000) 538 539 #define P_MIN 2 540 #define P_MAX 64 541 #define P_INC 2 542 543 /* Constraints for PLL good behavior */ 544 #define REF_MIN 48 545 #define REF_MAX 400 546 #define VCO_MIN 2400 547 #define VCO_MAX 4800 548 549 struct hsw_wrpll_rnp { 550 unsigned p, n2, r2; 551 }; 552 553 static unsigned hsw_wrpll_get_budget_for_freq(int clock) 554 { 555 unsigned budget; 556 557 switch (clock) { 558 case 25175000: 559 case 25200000: 560 case 27000000: 561 case 27027000: 562 case 37762500: 563 case 37800000: 564 case 40500000: 565 case 40541000: 566 case 54000000: 567 case 54054000: 568 case 59341000: 569 case 59400000: 570 case 72000000: 571 case 74176000: 572 case 74250000: 573 case 81000000: 574 case 81081000: 575 case 89012000: 576 case 89100000: 577 case 108000000: 578 case 108108000: 579 case 111264000: 580 case 111375000: 581 case 148352000: 582 case 148500000: 583 case 162000000: 584 case 162162000: 585 case 222525000: 586 case 222750000: 587 case 296703000: 588 case 297000000: 589 budget = 0; 590 break; 591 case 233500000: 592 case 245250000: 593 case 247750000: 594 case 253250000: 595 case 298000000: 596 budget = 1500; 597 break; 598 case 169128000: 599 case 169500000: 600 case 179500000: 601 case 202000000: 602 budget = 2000; 603 break; 604 case 256250000: 605 case 262500000: 606 case 270000000: 607 case 272500000: 608 case 273750000: 609 case 280750000: 610 case 281250000: 611 case 286000000: 612 case 291750000: 613 budget = 4000; 614 break; 615 case 267250000: 616 case 268500000: 617 budget = 5000; 618 break; 619 default: 620 budget = 1000; 621 break; 622 } 623 624 return budget; 625 } 626 627 static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget, 628 unsigned r2, unsigned n2, unsigned p, 629 struct hsw_wrpll_rnp *best) 630 { 631 uint64_t a, b, c, d, diff, diff_best; 632 633 /* No best (r,n,p) yet */ 634 if (best->p == 0) { 635 best->p = p; 636 best->n2 = n2; 637 best->r2 = r2; 638 return; 639 } 640 641 /* 642 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to 643 * freq2k. 644 * 645 * delta = 1e6 * 646 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) / 647 * freq2k; 648 * 649 * and we would like delta <= budget. 650 * 651 * If the discrepancy is above the PPM-based budget, always prefer to 652 * improve upon the previous solution. However, if you're within the 653 * budget, try to maximize Ref * VCO, that is N / (P * R^2). 654 */ 655 a = freq2k * budget * p * r2; 656 b = freq2k * budget * best->p * best->r2; 657 diff = abs_diff((u64)freq2k * p * r2, LC_FREQ_2K * n2); 658 diff_best = abs_diff((u64)freq2k * best->p * best->r2, 659 LC_FREQ_2K * best->n2); 660 c = 1000000 * diff; 661 d = 1000000 * diff_best; 662 663 if (a < c && b < d) { 664 /* If both are above the budget, pick the closer */ 665 if (best->p * best->r2 * diff < p * r2 * diff_best) { 666 best->p = p; 667 best->n2 = n2; 668 best->r2 = r2; 669 } 670 } else if (a >= c && b < d) { 671 /* If A is below the threshold but B is above it? Update. */ 672 best->p = p; 673 best->n2 = n2; 674 best->r2 = r2; 675 } else if (a >= c && b >= d) { 676 /* Both are below the limit, so pick the higher n2/(r2*r2) */ 677 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) { 678 best->p = p; 679 best->n2 = n2; 680 best->r2 = r2; 681 } 682 } 683 /* Otherwise a < c && b >= d, do nothing */ 684 } 685 686 static void 687 hsw_ddi_calculate_wrpll(int clock /* in Hz */, 688 unsigned *r2_out, unsigned *n2_out, unsigned *p_out) 689 { 690 uint64_t freq2k; 691 unsigned p, n2, r2; 692 struct hsw_wrpll_rnp best = { 0, 0, 0 }; 693 unsigned budget; 694 695 freq2k = clock / 100; 696 697 budget = hsw_wrpll_get_budget_for_freq(clock); 698 699 /* Special case handling for 540 pixel clock: bypass WR PLL entirely 700 * and directly pass the LC PLL to it. */ 701 if (freq2k == 5400000) { 702 *n2_out = 2; 703 *p_out = 1; 704 *r2_out = 2; 705 return; 706 } 707 708 /* 709 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by 710 * the WR PLL. 711 * 712 * We want R so that REF_MIN <= Ref <= REF_MAX. 713 * Injecting R2 = 2 * R gives: 714 * REF_MAX * r2 > LC_FREQ * 2 and 715 * REF_MIN * r2 < LC_FREQ * 2 716 * 717 * Which means the desired boundaries for r2 are: 718 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN 719 * 720 */ 721 for (r2 = LC_FREQ * 2 / REF_MAX + 1; 722 r2 <= LC_FREQ * 2 / REF_MIN; 723 r2++) { 724 725 /* 726 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R 727 * 728 * Once again we want VCO_MIN <= VCO <= VCO_MAX. 729 * Injecting R2 = 2 * R and N2 = 2 * N, we get: 730 * VCO_MAX * r2 > n2 * LC_FREQ and 731 * VCO_MIN * r2 < n2 * LC_FREQ) 732 * 733 * Which means the desired boundaries for n2 are: 734 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ 735 */ 736 for (n2 = VCO_MIN * r2 / LC_FREQ + 1; 737 n2 <= VCO_MAX * r2 / LC_FREQ; 738 n2++) { 739 740 for (p = P_MIN; p <= P_MAX; p += P_INC) 741 hsw_wrpll_update_rnp(freq2k, budget, 742 r2, n2, p, &best); 743 } 744 } 745 746 *n2_out = best.n2; 747 *p_out = best.p; 748 *r2_out = best.r2; 749 } 750 751 static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock, 752 struct intel_crtc *crtc, 753 struct intel_crtc_state *crtc_state) 754 { 755 struct intel_shared_dpll *pll; 756 uint32_t val; 757 unsigned int p, n2, r2; 758 759 hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); 760 761 val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL | 762 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | 763 WRPLL_DIVIDER_POST(p); 764 765 crtc_state->dpll_hw_state.wrpll = val; 766 767 pll = intel_find_shared_dpll(crtc, crtc_state, 768 DPLL_ID_WRPLL1, DPLL_ID_WRPLL2); 769 770 if (!pll) 771 return NULL; 772 773 return pll; 774 } 775 776 static struct intel_shared_dpll * 777 hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock) 778 { 779 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 780 struct intel_shared_dpll *pll; 781 enum intel_dpll_id pll_id; 782 783 switch (clock / 2) { 784 case 81000: 785 pll_id = DPLL_ID_LCPLL_810; 786 break; 787 case 135000: 788 pll_id = DPLL_ID_LCPLL_1350; 789 break; 790 case 270000: 791 pll_id = DPLL_ID_LCPLL_2700; 792 break; 793 default: 794 DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock); 795 return NULL; 796 } 797 798 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id); 799 800 if (!pll) 801 return NULL; 802 803 return pll; 804 } 805 806 static struct intel_shared_dpll * 807 hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, 808 struct intel_encoder *encoder) 809 { 810 struct intel_shared_dpll *pll; 811 int clock = crtc_state->port_clock; 812 813 memset(&crtc_state->dpll_hw_state, 0, 814 sizeof(crtc_state->dpll_hw_state)); 815 816 if (encoder->type == INTEL_OUTPUT_HDMI) { 817 pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state); 818 819 } else if (encoder->type == INTEL_OUTPUT_DP || 820 encoder->type == INTEL_OUTPUT_DP_MST || 821 encoder->type == INTEL_OUTPUT_EDP) { 822 pll = hsw_ddi_dp_get_dpll(encoder, clock); 823 824 } else if (encoder->type == INTEL_OUTPUT_ANALOG) { 825 if (WARN_ON(crtc_state->port_clock / 2 != 135000)) 826 return NULL; 827 828 crtc_state->dpll_hw_state.spll = 829 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; 830 831 pll = intel_find_shared_dpll(crtc, crtc_state, 832 DPLL_ID_SPLL, DPLL_ID_SPLL); 833 } else { 834 return NULL; 835 } 836 837 if (!pll) 838 return NULL; 839 840 intel_reference_shared_dpll(pll, crtc_state); 841 842 return pll; 843 } 844 845 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv, 846 struct intel_dpll_hw_state *hw_state) 847 { 848 DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", 849 hw_state->wrpll, hw_state->spll); 850 } 851 852 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = { 853 .enable = hsw_ddi_wrpll_enable, 854 .disable = hsw_ddi_wrpll_disable, 855 .get_hw_state = hsw_ddi_wrpll_get_hw_state, 856 }; 857 858 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = { 859 .enable = hsw_ddi_spll_enable, 860 .disable = hsw_ddi_spll_disable, 861 .get_hw_state = hsw_ddi_spll_get_hw_state, 862 }; 863 864 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv, 865 struct intel_shared_dpll *pll) 866 { 867 } 868 869 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv, 870 struct intel_shared_dpll *pll) 871 { 872 } 873 874 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv, 875 struct intel_shared_dpll *pll, 876 struct intel_dpll_hw_state *hw_state) 877 { 878 return true; 879 } 880 881 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = { 882 .enable = hsw_ddi_lcpll_enable, 883 .disable = hsw_ddi_lcpll_disable, 884 .get_hw_state = hsw_ddi_lcpll_get_hw_state, 885 }; 886 887 struct skl_dpll_regs { 888 i915_reg_t ctl, cfgcr1, cfgcr2; 889 }; 890 891 /* this array is indexed by the *shared* pll id */ 892 static const struct skl_dpll_regs skl_dpll_regs[4] = { 893 { 894 /* DPLL 0 */ 895 .ctl = LCPLL1_CTL, 896 /* DPLL 0 doesn't support HDMI mode */ 897 }, 898 { 899 /* DPLL 1 */ 900 .ctl = LCPLL2_CTL, 901 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1), 902 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1), 903 }, 904 { 905 /* DPLL 2 */ 906 .ctl = WRPLL_CTL(0), 907 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2), 908 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2), 909 }, 910 { 911 /* DPLL 3 */ 912 .ctl = WRPLL_CTL(1), 913 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3), 914 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3), 915 }, 916 }; 917 918 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv, 919 struct intel_shared_dpll *pll) 920 { 921 uint32_t val; 922 923 val = I915_READ(DPLL_CTRL1); 924 925 val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) | 926 DPLL_CTRL1_LINK_RATE_MASK(pll->id)); 927 val |= pll->state.hw_state.ctrl1 << (pll->id * 6); 928 929 I915_WRITE(DPLL_CTRL1, val); 930 POSTING_READ(DPLL_CTRL1); 931 } 932 933 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, 934 struct intel_shared_dpll *pll) 935 { 936 const struct skl_dpll_regs *regs = skl_dpll_regs; 937 938 skl_ddi_pll_write_ctrl1(dev_priv, pll); 939 940 I915_WRITE(regs[pll->id].cfgcr1, pll->state.hw_state.cfgcr1); 941 I915_WRITE(regs[pll->id].cfgcr2, pll->state.hw_state.cfgcr2); 942 POSTING_READ(regs[pll->id].cfgcr1); 943 POSTING_READ(regs[pll->id].cfgcr2); 944 945 /* the enable bit is always bit 31 */ 946 I915_WRITE(regs[pll->id].ctl, 947 I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE); 948 949 if (intel_wait_for_register(dev_priv, 950 DPLL_STATUS, 951 DPLL_LOCK(pll->id), 952 DPLL_LOCK(pll->id), 953 5)) 954 DRM_ERROR("DPLL %d not locked\n", pll->id); 955 } 956 957 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv, 958 struct intel_shared_dpll *pll) 959 { 960 skl_ddi_pll_write_ctrl1(dev_priv, pll); 961 } 962 963 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv, 964 struct intel_shared_dpll *pll) 965 { 966 const struct skl_dpll_regs *regs = skl_dpll_regs; 967 968 /* the enable bit is always bit 31 */ 969 I915_WRITE(regs[pll->id].ctl, 970 I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE); 971 POSTING_READ(regs[pll->id].ctl); 972 } 973 974 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv, 975 struct intel_shared_dpll *pll) 976 { 977 } 978 979 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, 980 struct intel_shared_dpll *pll, 981 struct intel_dpll_hw_state *hw_state) 982 { 983 uint32_t val; 984 const struct skl_dpll_regs *regs = skl_dpll_regs; 985 bool ret; 986 987 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 988 return false; 989 990 ret = false; 991 992 val = I915_READ(regs[pll->id].ctl); 993 if (!(val & LCPLL_PLL_ENABLE)) 994 goto out; 995 996 val = I915_READ(DPLL_CTRL1); 997 hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f; 998 999 /* avoid reading back stale values if HDMI mode is not enabled */ 1000 if (val & DPLL_CTRL1_HDMI_MODE(pll->id)) { 1001 hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1); 1002 hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2); 1003 } 1004 ret = true; 1005 1006 out: 1007 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 1008 1009 return ret; 1010 } 1011 1012 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, 1013 struct intel_shared_dpll *pll, 1014 struct intel_dpll_hw_state *hw_state) 1015 { 1016 uint32_t val; 1017 const struct skl_dpll_regs *regs = skl_dpll_regs; 1018 bool ret; 1019 1020 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 1021 return false; 1022 1023 ret = false; 1024 1025 /* DPLL0 is always enabled since it drives CDCLK */ 1026 val = I915_READ(regs[pll->id].ctl); 1027 if (WARN_ON(!(val & LCPLL_PLL_ENABLE))) 1028 goto out; 1029 1030 val = I915_READ(DPLL_CTRL1); 1031 hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f; 1032 1033 ret = true; 1034 1035 out: 1036 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 1037 1038 return ret; 1039 } 1040 1041 struct skl_wrpll_context { 1042 uint64_t min_deviation; /* current minimal deviation */ 1043 uint64_t central_freq; /* chosen central freq */ 1044 uint64_t dco_freq; /* chosen dco freq */ 1045 unsigned int p; /* chosen divider */ 1046 }; 1047 1048 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx) 1049 { 1050 memset(ctx, 0, sizeof(*ctx)); 1051 1052 ctx->min_deviation = U64_MAX; 1053 } 1054 1055 /* DCO freq must be within +1%/-6% of the DCO central freq */ 1056 #define SKL_DCO_MAX_PDEVIATION 100 1057 #define SKL_DCO_MAX_NDEVIATION 600 1058 1059 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx, 1060 uint64_t central_freq, 1061 uint64_t dco_freq, 1062 unsigned int divider) 1063 { 1064 uint64_t deviation; 1065 1066 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq), 1067 central_freq); 1068 1069 /* positive deviation */ 1070 if (dco_freq >= central_freq) { 1071 if (deviation < SKL_DCO_MAX_PDEVIATION && 1072 deviation < ctx->min_deviation) { 1073 ctx->min_deviation = deviation; 1074 ctx->central_freq = central_freq; 1075 ctx->dco_freq = dco_freq; 1076 ctx->p = divider; 1077 } 1078 /* negative deviation */ 1079 } else if (deviation < SKL_DCO_MAX_NDEVIATION && 1080 deviation < ctx->min_deviation) { 1081 ctx->min_deviation = deviation; 1082 ctx->central_freq = central_freq; 1083 ctx->dco_freq = dco_freq; 1084 ctx->p = divider; 1085 } 1086 } 1087 1088 static void skl_wrpll_get_multipliers(unsigned int p, 1089 unsigned int *p0 /* out */, 1090 unsigned int *p1 /* out */, 1091 unsigned int *p2 /* out */) 1092 { 1093 /* even dividers */ 1094 if (p % 2 == 0) { 1095 unsigned int half = p / 2; 1096 1097 if (half == 1 || half == 2 || half == 3 || half == 5) { 1098 *p0 = 2; 1099 *p1 = 1; 1100 *p2 = half; 1101 } else if (half % 2 == 0) { 1102 *p0 = 2; 1103 *p1 = half / 2; 1104 *p2 = 2; 1105 } else if (half % 3 == 0) { 1106 *p0 = 3; 1107 *p1 = half / 3; 1108 *p2 = 2; 1109 } else if (half % 7 == 0) { 1110 *p0 = 7; 1111 *p1 = half / 7; 1112 *p2 = 2; 1113 } 1114 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */ 1115 *p0 = 3; 1116 *p1 = 1; 1117 *p2 = p / 3; 1118 } else if (p == 5 || p == 7) { 1119 *p0 = p; 1120 *p1 = 1; 1121 *p2 = 1; 1122 } else if (p == 15) { 1123 *p0 = 3; 1124 *p1 = 1; 1125 *p2 = 5; 1126 } else if (p == 21) { 1127 *p0 = 7; 1128 *p1 = 1; 1129 *p2 = 3; 1130 } else if (p == 35) { 1131 *p0 = 7; 1132 *p1 = 1; 1133 *p2 = 5; 1134 } 1135 } 1136 1137 struct skl_wrpll_params { 1138 uint32_t dco_fraction; 1139 uint32_t dco_integer; 1140 uint32_t qdiv_ratio; 1141 uint32_t qdiv_mode; 1142 uint32_t kdiv; 1143 uint32_t pdiv; 1144 uint32_t central_freq; 1145 }; 1146 1147 static void skl_wrpll_params_populate(struct skl_wrpll_params *params, 1148 uint64_t afe_clock, 1149 uint64_t central_freq, 1150 uint32_t p0, uint32_t p1, uint32_t p2) 1151 { 1152 uint64_t dco_freq; 1153 1154 switch (central_freq) { 1155 case 9600000000ULL: 1156 params->central_freq = 0; 1157 break; 1158 case 9000000000ULL: 1159 params->central_freq = 1; 1160 break; 1161 case 8400000000ULL: 1162 params->central_freq = 3; 1163 } 1164 1165 switch (p0) { 1166 case 1: 1167 params->pdiv = 0; 1168 break; 1169 case 2: 1170 params->pdiv = 1; 1171 break; 1172 case 3: 1173 params->pdiv = 2; 1174 break; 1175 case 7: 1176 params->pdiv = 4; 1177 break; 1178 default: 1179 WARN(1, "Incorrect PDiv\n"); 1180 } 1181 1182 switch (p2) { 1183 case 5: 1184 params->kdiv = 0; 1185 break; 1186 case 2: 1187 params->kdiv = 1; 1188 break; 1189 case 3: 1190 params->kdiv = 2; 1191 break; 1192 case 1: 1193 params->kdiv = 3; 1194 break; 1195 default: 1196 WARN(1, "Incorrect KDiv\n"); 1197 } 1198 1199 params->qdiv_ratio = p1; 1200 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1; 1201 1202 dco_freq = p0 * p1 * p2 * afe_clock; 1203 1204 /* 1205 * Intermediate values are in Hz. 1206 * Divide by MHz to match bsepc 1207 */ 1208 params->dco_integer = div_u64(dco_freq, 24 * MHz(1)); 1209 params->dco_fraction = 1210 div_u64((div_u64(dco_freq, 24) - 1211 params->dco_integer * MHz(1)) * 0x8000, MHz(1)); 1212 } 1213 1214 static bool 1215 skl_ddi_calculate_wrpll(int clock /* in Hz */, 1216 struct skl_wrpll_params *wrpll_params) 1217 { 1218 uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ 1219 uint64_t dco_central_freq[3] = {8400000000ULL, 1220 9000000000ULL, 1221 9600000000ULL}; 1222 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20, 1223 24, 28, 30, 32, 36, 40, 42, 44, 1224 48, 52, 54, 56, 60, 64, 66, 68, 1225 70, 72, 76, 78, 80, 84, 88, 90, 1226 92, 96, 98 }; 1227 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 }; 1228 static const struct { 1229 const int *list; 1230 int n_dividers; 1231 } dividers[] = { 1232 { even_dividers, ARRAY_SIZE(even_dividers) }, 1233 { odd_dividers, ARRAY_SIZE(odd_dividers) }, 1234 }; 1235 struct skl_wrpll_context ctx; 1236 unsigned int dco, d, i; 1237 unsigned int p0, p1, p2; 1238 1239 skl_wrpll_context_init(&ctx); 1240 1241 for (d = 0; d < ARRAY_SIZE(dividers); d++) { 1242 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) { 1243 for (i = 0; i < dividers[d].n_dividers; i++) { 1244 unsigned int p = dividers[d].list[i]; 1245 uint64_t dco_freq = p * afe_clock; 1246 1247 skl_wrpll_try_divider(&ctx, 1248 dco_central_freq[dco], 1249 dco_freq, 1250 p); 1251 /* 1252 * Skip the remaining dividers if we're sure to 1253 * have found the definitive divider, we can't 1254 * improve a 0 deviation. 1255 */ 1256 if (ctx.min_deviation == 0) 1257 goto skip_remaining_dividers; 1258 } 1259 } 1260 1261 skip_remaining_dividers: 1262 /* 1263 * If a solution is found with an even divider, prefer 1264 * this one. 1265 */ 1266 if (d == 0 && ctx.p) 1267 break; 1268 } 1269 1270 if (!ctx.p) { 1271 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock); 1272 return false; 1273 } 1274 1275 /* 1276 * gcc incorrectly analyses that these can be used without being 1277 * initialized. To be fair, it's hard to guess. 1278 */ 1279 p0 = p1 = p2 = 0; 1280 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2); 1281 skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq, 1282 p0, p1, p2); 1283 1284 return true; 1285 } 1286 1287 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, 1288 struct intel_crtc_state *crtc_state, 1289 int clock) 1290 { 1291 uint32_t ctrl1, cfgcr1, cfgcr2; 1292 struct skl_wrpll_params wrpll_params = { 0, }; 1293 1294 /* 1295 * See comment in intel_dpll_hw_state to understand why we always use 0 1296 * as the DPLL id in this function. 1297 */ 1298 ctrl1 = DPLL_CTRL1_OVERRIDE(0); 1299 1300 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); 1301 1302 if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params)) 1303 return false; 1304 1305 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | 1306 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | 1307 wrpll_params.dco_integer; 1308 1309 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) | 1310 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) | 1311 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | 1312 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | 1313 wrpll_params.central_freq; 1314 1315 memset(&crtc_state->dpll_hw_state, 0, 1316 sizeof(crtc_state->dpll_hw_state)); 1317 1318 crtc_state->dpll_hw_state.ctrl1 = ctrl1; 1319 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; 1320 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; 1321 return true; 1322 } 1323 1324 static bool 1325 skl_ddi_dp_set_dpll_hw_state(int clock, 1326 struct intel_dpll_hw_state *dpll_hw_state) 1327 { 1328 uint32_t ctrl1; 1329 1330 /* 1331 * See comment in intel_dpll_hw_state to understand why we always use 0 1332 * as the DPLL id in this function. 1333 */ 1334 ctrl1 = DPLL_CTRL1_OVERRIDE(0); 1335 switch (clock / 2) { 1336 case 81000: 1337 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); 1338 break; 1339 case 135000: 1340 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0); 1341 break; 1342 case 270000: 1343 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0); 1344 break; 1345 /* eDP 1.4 rates */ 1346 case 162000: 1347 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0); 1348 break; 1349 case 108000: 1350 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0); 1351 break; 1352 case 216000: 1353 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0); 1354 break; 1355 } 1356 1357 dpll_hw_state->ctrl1 = ctrl1; 1358 return true; 1359 } 1360 1361 static struct intel_shared_dpll * 1362 skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, 1363 struct intel_encoder *encoder) 1364 { 1365 struct intel_shared_dpll *pll; 1366 int clock = crtc_state->port_clock; 1367 bool bret; 1368 struct intel_dpll_hw_state dpll_hw_state; 1369 1370 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 1371 1372 if (encoder->type == INTEL_OUTPUT_HDMI) { 1373 bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock); 1374 if (!bret) { 1375 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); 1376 return NULL; 1377 } 1378 } else if (encoder->type == INTEL_OUTPUT_DP || 1379 encoder->type == INTEL_OUTPUT_DP_MST || 1380 encoder->type == INTEL_OUTPUT_EDP) { 1381 bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state); 1382 if (!bret) { 1383 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); 1384 return NULL; 1385 } 1386 crtc_state->dpll_hw_state = dpll_hw_state; 1387 } else { 1388 return NULL; 1389 } 1390 1391 if (encoder->type == INTEL_OUTPUT_EDP) 1392 pll = intel_find_shared_dpll(crtc, crtc_state, 1393 DPLL_ID_SKL_DPLL0, 1394 DPLL_ID_SKL_DPLL0); 1395 else 1396 pll = intel_find_shared_dpll(crtc, crtc_state, 1397 DPLL_ID_SKL_DPLL1, 1398 DPLL_ID_SKL_DPLL3); 1399 if (!pll) 1400 return NULL; 1401 1402 intel_reference_shared_dpll(pll, crtc_state); 1403 1404 return pll; 1405 } 1406 1407 static void skl_dump_hw_state(struct drm_i915_private *dev_priv, 1408 struct intel_dpll_hw_state *hw_state) 1409 { 1410 DRM_DEBUG_KMS("dpll_hw_state: " 1411 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", 1412 hw_state->ctrl1, 1413 hw_state->cfgcr1, 1414 hw_state->cfgcr2); 1415 } 1416 1417 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = { 1418 .enable = skl_ddi_pll_enable, 1419 .disable = skl_ddi_pll_disable, 1420 .get_hw_state = skl_ddi_pll_get_hw_state, 1421 }; 1422 1423 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = { 1424 .enable = skl_ddi_dpll0_enable, 1425 .disable = skl_ddi_dpll0_disable, 1426 .get_hw_state = skl_ddi_dpll0_get_hw_state, 1427 }; 1428 1429 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, 1430 struct intel_shared_dpll *pll) 1431 { 1432 uint32_t temp; 1433 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ 1434 enum dpio_phy phy; 1435 enum dpio_channel ch; 1436 1437 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); 1438 1439 /* Non-SSC reference */ 1440 temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1441 temp |= PORT_PLL_REF_SEL; 1442 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1443 1444 if (IS_GEMINILAKE(dev_priv)) { 1445 temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1446 temp |= PORT_PLL_POWER_ENABLE; 1447 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1448 1449 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & 1450 PORT_PLL_POWER_STATE), 200)) 1451 DRM_ERROR("Power state not set for PLL:%d\n", port); 1452 } 1453 1454 /* Disable 10 bit clock */ 1455 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch)); 1456 temp &= ~PORT_PLL_10BIT_CLK_ENABLE; 1457 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp); 1458 1459 /* Write P1 & P2 */ 1460 temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch)); 1461 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK); 1462 temp |= pll->state.hw_state.ebb0; 1463 I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp); 1464 1465 /* Write M2 integer */ 1466 temp = I915_READ(BXT_PORT_PLL(phy, ch, 0)); 1467 temp &= ~PORT_PLL_M2_MASK; 1468 temp |= pll->state.hw_state.pll0; 1469 I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp); 1470 1471 /* Write N */ 1472 temp = I915_READ(BXT_PORT_PLL(phy, ch, 1)); 1473 temp &= ~PORT_PLL_N_MASK; 1474 temp |= pll->state.hw_state.pll1; 1475 I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp); 1476 1477 /* Write M2 fraction */ 1478 temp = I915_READ(BXT_PORT_PLL(phy, ch, 2)); 1479 temp &= ~PORT_PLL_M2_FRAC_MASK; 1480 temp |= pll->state.hw_state.pll2; 1481 I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp); 1482 1483 /* Write M2 fraction enable */ 1484 temp = I915_READ(BXT_PORT_PLL(phy, ch, 3)); 1485 temp &= ~PORT_PLL_M2_FRAC_ENABLE; 1486 temp |= pll->state.hw_state.pll3; 1487 I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp); 1488 1489 /* Write coeff */ 1490 temp = I915_READ(BXT_PORT_PLL(phy, ch, 6)); 1491 temp &= ~PORT_PLL_PROP_COEFF_MASK; 1492 temp &= ~PORT_PLL_INT_COEFF_MASK; 1493 temp &= ~PORT_PLL_GAIN_CTL_MASK; 1494 temp |= pll->state.hw_state.pll6; 1495 I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp); 1496 1497 /* Write calibration val */ 1498 temp = I915_READ(BXT_PORT_PLL(phy, ch, 8)); 1499 temp &= ~PORT_PLL_TARGET_CNT_MASK; 1500 temp |= pll->state.hw_state.pll8; 1501 I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp); 1502 1503 temp = I915_READ(BXT_PORT_PLL(phy, ch, 9)); 1504 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK; 1505 temp |= pll->state.hw_state.pll9; 1506 I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp); 1507 1508 temp = I915_READ(BXT_PORT_PLL(phy, ch, 10)); 1509 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H; 1510 temp &= ~PORT_PLL_DCO_AMP_MASK; 1511 temp |= pll->state.hw_state.pll10; 1512 I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp); 1513 1514 /* Recalibrate with new settings */ 1515 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch)); 1516 temp |= PORT_PLL_RECALIBRATE; 1517 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp); 1518 temp &= ~PORT_PLL_10BIT_CLK_ENABLE; 1519 temp |= pll->state.hw_state.ebb4; 1520 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp); 1521 1522 /* Enable PLL */ 1523 temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1524 temp |= PORT_PLL_ENABLE; 1525 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1526 POSTING_READ(BXT_PORT_PLL_ENABLE(port)); 1527 1528 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), 1529 200)) 1530 DRM_ERROR("PLL %d not locked\n", port); 1531 1532 if (IS_GEMINILAKE(dev_priv)) { 1533 temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch)); 1534 temp |= DCC_DELAY_RANGE_2; 1535 I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp); 1536 } 1537 1538 /* 1539 * While we write to the group register to program all lanes at once we 1540 * can read only lane registers and we pick lanes 0/1 for that. 1541 */ 1542 temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch)); 1543 temp &= ~LANE_STAGGER_MASK; 1544 temp &= ~LANESTAGGER_STRAP_OVRD; 1545 temp |= pll->state.hw_state.pcsdw12; 1546 I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp); 1547 } 1548 1549 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, 1550 struct intel_shared_dpll *pll) 1551 { 1552 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ 1553 uint32_t temp; 1554 1555 temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1556 temp &= ~PORT_PLL_ENABLE; 1557 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1558 POSTING_READ(BXT_PORT_PLL_ENABLE(port)); 1559 1560 if (IS_GEMINILAKE(dev_priv)) { 1561 temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1562 temp &= ~PORT_PLL_POWER_ENABLE; 1563 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1564 1565 if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) & 1566 PORT_PLL_POWER_STATE), 200)) 1567 DRM_ERROR("Power state not reset for PLL:%d\n", port); 1568 } 1569 } 1570 1571 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, 1572 struct intel_shared_dpll *pll, 1573 struct intel_dpll_hw_state *hw_state) 1574 { 1575 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ 1576 uint32_t val; 1577 bool ret; 1578 enum dpio_phy phy; 1579 enum dpio_channel ch; 1580 1581 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); 1582 1583 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 1584 return false; 1585 1586 ret = false; 1587 1588 val = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1589 if (!(val & PORT_PLL_ENABLE)) 1590 goto out; 1591 1592 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch)); 1593 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; 1594 1595 hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch)); 1596 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE; 1597 1598 hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0)); 1599 hw_state->pll0 &= PORT_PLL_M2_MASK; 1600 1601 hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1)); 1602 hw_state->pll1 &= PORT_PLL_N_MASK; 1603 1604 hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2)); 1605 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK; 1606 1607 hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3)); 1608 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE; 1609 1610 hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6)); 1611 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK | 1612 PORT_PLL_INT_COEFF_MASK | 1613 PORT_PLL_GAIN_CTL_MASK; 1614 1615 hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8)); 1616 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK; 1617 1618 hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9)); 1619 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK; 1620 1621 hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10)); 1622 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H | 1623 PORT_PLL_DCO_AMP_MASK; 1624 1625 /* 1626 * While we write to the group register to program all lanes at once we 1627 * can read only lane registers. We configure all lanes the same way, so 1628 * here just read out lanes 0/1 and output a note if lanes 2/3 differ. 1629 */ 1630 hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch)); 1631 if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12) 1632 DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n", 1633 hw_state->pcsdw12, 1634 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch))); 1635 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; 1636 1637 ret = true; 1638 1639 out: 1640 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 1641 1642 return ret; 1643 } 1644 1645 /* bxt clock parameters */ 1646 struct bxt_clk_div { 1647 int clock; 1648 uint32_t p1; 1649 uint32_t p2; 1650 uint32_t m2_int; 1651 uint32_t m2_frac; 1652 bool m2_frac_en; 1653 uint32_t n; 1654 1655 int vco; 1656 }; 1657 1658 /* pre-calculated values for DP linkrates */ 1659 static const struct bxt_clk_div bxt_dp_clk_val[] = { 1660 {162000, 4, 2, 32, 1677722, 1, 1}, 1661 {270000, 4, 1, 27, 0, 0, 1}, 1662 {540000, 2, 1, 27, 0, 0, 1}, 1663 {216000, 3, 2, 32, 1677722, 1, 1}, 1664 {243000, 4, 1, 24, 1258291, 1, 1}, 1665 {324000, 4, 1, 32, 1677722, 1, 1}, 1666 {432000, 3, 1, 32, 1677722, 1, 1} 1667 }; 1668 1669 static bool 1670 bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc, 1671 struct intel_crtc_state *crtc_state, int clock, 1672 struct bxt_clk_div *clk_div) 1673 { 1674 struct dpll best_clock; 1675 1676 /* Calculate HDMI div */ 1677 /* 1678 * FIXME: tie the following calculation into 1679 * i9xx_crtc_compute_clock 1680 */ 1681 if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) { 1682 DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n", 1683 clock, pipe_name(intel_crtc->pipe)); 1684 return false; 1685 } 1686 1687 clk_div->p1 = best_clock.p1; 1688 clk_div->p2 = best_clock.p2; 1689 WARN_ON(best_clock.m1 != 2); 1690 clk_div->n = best_clock.n; 1691 clk_div->m2_int = best_clock.m2 >> 22; 1692 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1); 1693 clk_div->m2_frac_en = clk_div->m2_frac != 0; 1694 1695 clk_div->vco = best_clock.vco; 1696 1697 return true; 1698 } 1699 1700 static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div) 1701 { 1702 int i; 1703 1704 *clk_div = bxt_dp_clk_val[0]; 1705 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) { 1706 if (bxt_dp_clk_val[i].clock == clock) { 1707 *clk_div = bxt_dp_clk_val[i]; 1708 break; 1709 } 1710 } 1711 1712 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2; 1713 } 1714 1715 static bool bxt_ddi_set_dpll_hw_state(int clock, 1716 struct bxt_clk_div *clk_div, 1717 struct intel_dpll_hw_state *dpll_hw_state) 1718 { 1719 int vco = clk_div->vco; 1720 uint32_t prop_coef, int_coef, gain_ctl, targ_cnt; 1721 uint32_t lanestagger; 1722 1723 if (vco >= 6200000 && vco <= 6700000) { 1724 prop_coef = 4; 1725 int_coef = 9; 1726 gain_ctl = 3; 1727 targ_cnt = 8; 1728 } else if ((vco > 5400000 && vco < 6200000) || 1729 (vco >= 4800000 && vco < 5400000)) { 1730 prop_coef = 5; 1731 int_coef = 11; 1732 gain_ctl = 3; 1733 targ_cnt = 9; 1734 } else if (vco == 5400000) { 1735 prop_coef = 3; 1736 int_coef = 8; 1737 gain_ctl = 1; 1738 targ_cnt = 9; 1739 } else { 1740 DRM_ERROR("Invalid VCO\n"); 1741 return false; 1742 } 1743 1744 if (clock > 270000) 1745 lanestagger = 0x18; 1746 else if (clock > 135000) 1747 lanestagger = 0x0d; 1748 else if (clock > 67000) 1749 lanestagger = 0x07; 1750 else if (clock > 33000) 1751 lanestagger = 0x04; 1752 else 1753 lanestagger = 0x02; 1754 1755 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2); 1756 dpll_hw_state->pll0 = clk_div->m2_int; 1757 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n); 1758 dpll_hw_state->pll2 = clk_div->m2_frac; 1759 1760 if (clk_div->m2_frac_en) 1761 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE; 1762 1763 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef); 1764 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl); 1765 1766 dpll_hw_state->pll8 = targ_cnt; 1767 1768 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT; 1769 1770 dpll_hw_state->pll10 = 1771 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT) 1772 | PORT_PLL_DCO_AMP_OVR_EN_H; 1773 1774 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE; 1775 1776 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger; 1777 1778 return true; 1779 } 1780 1781 static bool 1782 bxt_ddi_dp_set_dpll_hw_state(int clock, 1783 struct intel_dpll_hw_state *dpll_hw_state) 1784 { 1785 struct bxt_clk_div clk_div = {0}; 1786 1787 bxt_ddi_dp_pll_dividers(clock, &clk_div); 1788 1789 return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state); 1790 } 1791 1792 static bool 1793 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc, 1794 struct intel_crtc_state *crtc_state, int clock, 1795 struct intel_dpll_hw_state *dpll_hw_state) 1796 { 1797 struct bxt_clk_div clk_div = { }; 1798 1799 bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div); 1800 1801 return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state); 1802 } 1803 1804 static struct intel_shared_dpll * 1805 bxt_get_dpll(struct intel_crtc *crtc, 1806 struct intel_crtc_state *crtc_state, 1807 struct intel_encoder *encoder) 1808 { 1809 struct intel_dpll_hw_state dpll_hw_state = { }; 1810 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1811 struct intel_digital_port *intel_dig_port; 1812 struct intel_shared_dpll *pll; 1813 int i, clock = crtc_state->port_clock; 1814 1815 if (encoder->type == INTEL_OUTPUT_HDMI && 1816 !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock, 1817 &dpll_hw_state)) 1818 return NULL; 1819 1820 if ((encoder->type == INTEL_OUTPUT_DP || 1821 encoder->type == INTEL_OUTPUT_EDP || 1822 encoder->type == INTEL_OUTPUT_DP_MST) && 1823 !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state)) 1824 return NULL; 1825 1826 memset(&crtc_state->dpll_hw_state, 0, 1827 sizeof(crtc_state->dpll_hw_state)); 1828 1829 crtc_state->dpll_hw_state = dpll_hw_state; 1830 1831 if (encoder->type == INTEL_OUTPUT_DP_MST) { 1832 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); 1833 1834 intel_dig_port = intel_mst->primary; 1835 } else 1836 intel_dig_port = enc_to_dig_port(&encoder->base); 1837 1838 /* 1:1 mapping between ports and PLLs */ 1839 i = (enum intel_dpll_id) intel_dig_port->port; 1840 pll = intel_get_shared_dpll_by_id(dev_priv, i); 1841 1842 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", 1843 crtc->base.base.id, crtc->base.name, pll->name); 1844 1845 intel_reference_shared_dpll(pll, crtc_state); 1846 1847 return pll; 1848 } 1849 1850 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv, 1851 struct intel_dpll_hw_state *hw_state) 1852 { 1853 DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," 1854 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " 1855 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", 1856 hw_state->ebb0, 1857 hw_state->ebb4, 1858 hw_state->pll0, 1859 hw_state->pll1, 1860 hw_state->pll2, 1861 hw_state->pll3, 1862 hw_state->pll6, 1863 hw_state->pll8, 1864 hw_state->pll9, 1865 hw_state->pll10, 1866 hw_state->pcsdw12); 1867 } 1868 1869 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { 1870 .enable = bxt_ddi_pll_enable, 1871 .disable = bxt_ddi_pll_disable, 1872 .get_hw_state = bxt_ddi_pll_get_hw_state, 1873 }; 1874 1875 static void intel_ddi_pll_init(struct drm_device *dev) 1876 { 1877 struct drm_i915_private *dev_priv = to_i915(dev); 1878 1879 if (INTEL_GEN(dev_priv) < 9) { 1880 uint32_t val = I915_READ(LCPLL_CTL); 1881 1882 /* 1883 * The LCPLL register should be turned on by the BIOS. For now 1884 * let's just check its state and print errors in case 1885 * something is wrong. Don't even try to turn it on. 1886 */ 1887 1888 if (val & LCPLL_CD_SOURCE_FCLK) 1889 DRM_ERROR("CDCLK source is not LCPLL\n"); 1890 1891 if (val & LCPLL_PLL_DISABLE) 1892 DRM_ERROR("LCPLL is disabled\n"); 1893 } 1894 } 1895 1896 struct dpll_info { 1897 const char *name; 1898 const int id; 1899 const struct intel_shared_dpll_funcs *funcs; 1900 uint32_t flags; 1901 }; 1902 1903 struct intel_dpll_mgr { 1904 const struct dpll_info *dpll_info; 1905 1906 struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc, 1907 struct intel_crtc_state *crtc_state, 1908 struct intel_encoder *encoder); 1909 1910 void (*dump_hw_state)(struct drm_i915_private *dev_priv, 1911 struct intel_dpll_hw_state *hw_state); 1912 }; 1913 1914 static const struct dpll_info pch_plls[] = { 1915 { "PCH DPLL A", DPLL_ID_PCH_PLL_A, &ibx_pch_dpll_funcs, 0 }, 1916 { "PCH DPLL B", DPLL_ID_PCH_PLL_B, &ibx_pch_dpll_funcs, 0 }, 1917 { NULL, -1, NULL, 0 }, 1918 }; 1919 1920 static const struct intel_dpll_mgr pch_pll_mgr = { 1921 .dpll_info = pch_plls, 1922 .get_dpll = ibx_get_dpll, 1923 .dump_hw_state = ibx_dump_hw_state, 1924 }; 1925 1926 static const struct dpll_info hsw_plls[] = { 1927 { "WRPLL 1", DPLL_ID_WRPLL1, &hsw_ddi_wrpll_funcs, 0 }, 1928 { "WRPLL 2", DPLL_ID_WRPLL2, &hsw_ddi_wrpll_funcs, 0 }, 1929 { "SPLL", DPLL_ID_SPLL, &hsw_ddi_spll_funcs, 0 }, 1930 { "LCPLL 810", DPLL_ID_LCPLL_810, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON }, 1931 { "LCPLL 1350", DPLL_ID_LCPLL_1350, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON }, 1932 { "LCPLL 2700", DPLL_ID_LCPLL_2700, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON }, 1933 { NULL, -1, NULL, }, 1934 }; 1935 1936 static const struct intel_dpll_mgr hsw_pll_mgr = { 1937 .dpll_info = hsw_plls, 1938 .get_dpll = hsw_get_dpll, 1939 .dump_hw_state = hsw_dump_hw_state, 1940 }; 1941 1942 static const struct dpll_info skl_plls[] = { 1943 { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON }, 1944 { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 }, 1945 { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 }, 1946 { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 }, 1947 { NULL, -1, NULL, }, 1948 }; 1949 1950 static const struct intel_dpll_mgr skl_pll_mgr = { 1951 .dpll_info = skl_plls, 1952 .get_dpll = skl_get_dpll, 1953 .dump_hw_state = skl_dump_hw_state, 1954 }; 1955 1956 static const struct dpll_info bxt_plls[] = { 1957 { "PORT PLL A", DPLL_ID_SKL_DPLL0, &bxt_ddi_pll_funcs, 0 }, 1958 { "PORT PLL B", DPLL_ID_SKL_DPLL1, &bxt_ddi_pll_funcs, 0 }, 1959 { "PORT PLL C", DPLL_ID_SKL_DPLL2, &bxt_ddi_pll_funcs, 0 }, 1960 { NULL, -1, NULL, }, 1961 }; 1962 1963 static const struct intel_dpll_mgr bxt_pll_mgr = { 1964 .dpll_info = bxt_plls, 1965 .get_dpll = bxt_get_dpll, 1966 .dump_hw_state = bxt_dump_hw_state, 1967 }; 1968 1969 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, 1970 struct intel_shared_dpll *pll) 1971 { 1972 uint32_t val; 1973 1974 /* 1. Enable DPLL power in DPLL_ENABLE. */ 1975 val = I915_READ(CNL_DPLL_ENABLE(pll->id)); 1976 val |= PLL_POWER_ENABLE; 1977 I915_WRITE(CNL_DPLL_ENABLE(pll->id), val); 1978 1979 /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */ 1980 if (intel_wait_for_register(dev_priv, 1981 CNL_DPLL_ENABLE(pll->id), 1982 PLL_POWER_STATE, 1983 PLL_POWER_STATE, 1984 5)) 1985 DRM_ERROR("PLL %d Power not enabled\n", pll->id); 1986 1987 /* 1988 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable, 1989 * select DP mode, and set DP link rate. 1990 */ 1991 val = pll->state.hw_state.cfgcr0; 1992 I915_WRITE(CNL_DPLL_CFGCR0(pll->id), val); 1993 1994 /* 4. Reab back to ensure writes completed */ 1995 POSTING_READ(CNL_DPLL_CFGCR0(pll->id)); 1996 1997 /* 3. Configure DPLL_CFGCR0 */ 1998 /* Avoid touch CFGCR1 if HDMI mode is not enabled */ 1999 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { 2000 val = pll->state.hw_state.cfgcr1; 2001 I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); 2002 /* 4. Reab back to ensure writes completed */ 2003 POSTING_READ(CNL_DPLL_CFGCR1(pll->id)); 2004 } 2005 2006 /* 2007 * 5. If the frequency will result in a change to the voltage 2008 * requirement, follow the Display Voltage Frequency Switching 2009 * Sequence Before Frequency Change 2010 * 2011 * FIXME: (DVFS) is used to adjust the display voltage to match the 2012 * display clock frequencies 2013 */ 2014 2015 /* 6. Enable DPLL in DPLL_ENABLE. */ 2016 val = I915_READ(CNL_DPLL_ENABLE(pll->id)); 2017 val |= PLL_ENABLE; 2018 I915_WRITE(CNL_DPLL_ENABLE(pll->id), val); 2019 2020 /* 7. Wait for PLL lock status in DPLL_ENABLE. */ 2021 if (intel_wait_for_register(dev_priv, 2022 CNL_DPLL_ENABLE(pll->id), 2023 PLL_LOCK, 2024 PLL_LOCK, 2025 5)) 2026 DRM_ERROR("PLL %d not locked\n", pll->id); 2027 2028 /* 2029 * 8. If the frequency will result in a change to the voltage 2030 * requirement, follow the Display Voltage Frequency Switching 2031 * Sequence After Frequency Change 2032 * 2033 * FIXME: (DVFS) is used to adjust the display voltage to match the 2034 * display clock frequencies 2035 */ 2036 2037 /* 2038 * 9. turn on the clock for the DDI and map the DPLL to the DDI 2039 * Done at intel_ddi_clk_select 2040 */ 2041 } 2042 2043 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv, 2044 struct intel_shared_dpll *pll) 2045 { 2046 uint32_t val; 2047 2048 /* 2049 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI. 2050 * Done at intel_ddi_post_disable 2051 */ 2052 2053 /* 2054 * 2. If the frequency will result in a change to the voltage 2055 * requirement, follow the Display Voltage Frequency Switching 2056 * Sequence Before Frequency Change 2057 * 2058 * FIXME: (DVFS) is used to adjust the display voltage to match the 2059 * display clock frequencies 2060 */ 2061 2062 /* 3. Disable DPLL through DPLL_ENABLE. */ 2063 val = I915_READ(CNL_DPLL_ENABLE(pll->id)); 2064 val &= ~PLL_ENABLE; 2065 I915_WRITE(CNL_DPLL_ENABLE(pll->id), val); 2066 2067 /* 4. Wait for PLL not locked status in DPLL_ENABLE. */ 2068 if (intel_wait_for_register(dev_priv, 2069 CNL_DPLL_ENABLE(pll->id), 2070 PLL_LOCK, 2071 0, 2072 5)) 2073 DRM_ERROR("PLL %d locked\n", pll->id); 2074 2075 /* 2076 * 5. If the frequency will result in a change to the voltage 2077 * requirement, follow the Display Voltage Frequency Switching 2078 * Sequence After Frequency Change 2079 * 2080 * FIXME: (DVFS) is used to adjust the display voltage to match the 2081 * display clock frequencies 2082 */ 2083 2084 /* 6. Disable DPLL power in DPLL_ENABLE. */ 2085 val = I915_READ(CNL_DPLL_ENABLE(pll->id)); 2086 val &= ~PLL_POWER_ENABLE; 2087 I915_WRITE(CNL_DPLL_ENABLE(pll->id), val); 2088 2089 /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */ 2090 if (intel_wait_for_register(dev_priv, 2091 CNL_DPLL_ENABLE(pll->id), 2092 PLL_POWER_STATE, 2093 0, 2094 5)) 2095 DRM_ERROR("PLL %d Power not disabled\n", pll->id); 2096 } 2097 2098 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, 2099 struct intel_shared_dpll *pll, 2100 struct intel_dpll_hw_state *hw_state) 2101 { 2102 uint32_t val; 2103 bool ret; 2104 2105 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2106 return false; 2107 2108 ret = false; 2109 2110 val = I915_READ(CNL_DPLL_ENABLE(pll->id)); 2111 if (!(val & PLL_ENABLE)) 2112 goto out; 2113 2114 val = I915_READ(CNL_DPLL_CFGCR0(pll->id)); 2115 hw_state->cfgcr0 = val; 2116 2117 /* avoid reading back stale values if HDMI mode is not enabled */ 2118 if (val & DPLL_CFGCR0_HDMI_MODE) { 2119 hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll->id)); 2120 } 2121 ret = true; 2122 2123 out: 2124 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 2125 2126 return ret; 2127 } 2128 2129 static void cnl_wrpll_get_multipliers(unsigned int bestdiv, 2130 unsigned int *pdiv, 2131 unsigned int *qdiv, 2132 unsigned int *kdiv) 2133 { 2134 /* even dividers */ 2135 if (bestdiv % 2 == 0) { 2136 if (bestdiv == 2) { 2137 *pdiv = 2; 2138 *qdiv = 1; 2139 *kdiv = 1; 2140 } else if (bestdiv % 4 == 0) { 2141 *pdiv = 2; 2142 *qdiv = bestdiv / 4; 2143 *kdiv = 2; 2144 } else if (bestdiv % 6 == 0) { 2145 *pdiv = 3; 2146 *qdiv = bestdiv / 6; 2147 *kdiv = 2; 2148 } else if (bestdiv % 5 == 0) { 2149 *pdiv = 5; 2150 *qdiv = bestdiv / 10; 2151 *kdiv = 2; 2152 } else if (bestdiv % 14 == 0) { 2153 *pdiv = 7; 2154 *qdiv = bestdiv / 14; 2155 *kdiv = 2; 2156 } 2157 } else { 2158 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) { 2159 *pdiv = bestdiv; 2160 *qdiv = 1; 2161 *kdiv = 1; 2162 } else { /* 9, 15, 21 */ 2163 *pdiv = bestdiv / 3; 2164 *qdiv = 1; 2165 *kdiv = 3; 2166 } 2167 } 2168 } 2169 2170 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params, uint32_t dco_freq, 2171 uint32_t ref_freq, uint32_t pdiv, uint32_t qdiv, 2172 uint32_t kdiv) 2173 { 2174 switch (kdiv) { 2175 case 1: 2176 params->kdiv = 1; 2177 break; 2178 case 2: 2179 params->kdiv = 2; 2180 break; 2181 case 3: 2182 params->kdiv = 4; 2183 break; 2184 default: 2185 WARN(1, "Incorrect KDiv\n"); 2186 } 2187 2188 switch (pdiv) { 2189 case 2: 2190 params->pdiv = 1; 2191 break; 2192 case 3: 2193 params->pdiv = 2; 2194 break; 2195 case 5: 2196 params->pdiv = 4; 2197 break; 2198 case 7: 2199 params->pdiv = 8; 2200 break; 2201 default: 2202 WARN(1, "Incorrect PDiv\n"); 2203 } 2204 2205 if (kdiv != 2) 2206 qdiv = 1; 2207 2208 params->qdiv_ratio = qdiv; 2209 params->qdiv_mode = (qdiv == 1) ? 0 : 1; 2210 2211 params->dco_integer = div_u64(dco_freq, ref_freq); 2212 params->dco_fraction = div_u64((div_u64((uint64_t)dco_freq<<15, (uint64_t)ref_freq) - 2213 ((uint64_t)params->dco_integer<<15)) * 0x8000, 0x8000); 2214 } 2215 2216 static bool 2217 cnl_ddi_calculate_wrpll(int clock /* in Hz */, 2218 struct drm_i915_private *dev_priv, 2219 struct skl_wrpll_params *wrpll_params) 2220 { 2221 uint64_t afe_clock = clock * 5 / KHz(1); /* clocks in kHz */ 2222 unsigned int dco_min = 7998 * KHz(1); 2223 unsigned int dco_max = 10000 * KHz(1); 2224 unsigned int dco_mid = (dco_min + dco_max) / 2; 2225 2226 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16, 2227 18, 20, 24, 28, 30, 32, 36, 40, 2228 42, 44, 48, 50, 52, 54, 56, 60, 2229 64, 66, 68, 70, 72, 76, 78, 80, 2230 84, 88, 90, 92, 96, 98, 100, 102, 2231 3, 5, 7, 9, 15, 21 }; 2232 unsigned int d, dco; 2233 unsigned int dco_centrality = 0; 2234 unsigned int best_dco_centrality = 999999; 2235 unsigned int best_div = 0; 2236 unsigned int best_dco = 0; 2237 unsigned int pdiv = 0, qdiv = 0, kdiv = 0; 2238 2239 for (d = 0; d < ARRAY_SIZE(dividers); d++) { 2240 dco = afe_clock * dividers[d]; 2241 2242 if ((dco <= dco_max) && (dco >= dco_min)) { 2243 dco_centrality = abs(dco - dco_mid); 2244 2245 if (dco_centrality < best_dco_centrality) { 2246 best_dco_centrality = dco_centrality; 2247 best_div = dividers[d]; 2248 best_dco = dco; 2249 } 2250 } 2251 } 2252 2253 if (best_div == 0) 2254 return false; 2255 2256 cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv); 2257 2258 cnl_wrpll_params_populate(wrpll_params, best_dco, 2259 dev_priv->cdclk.hw.ref, pdiv, qdiv, kdiv); 2260 2261 return true; 2262 } 2263 2264 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, 2265 struct intel_crtc_state *crtc_state, 2266 int clock) 2267 { 2268 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2269 uint32_t cfgcr0, cfgcr1; 2270 struct skl_wrpll_params wrpll_params = { 0, }; 2271 2272 cfgcr0 = DPLL_CFGCR0_HDMI_MODE; 2273 2274 if (!cnl_ddi_calculate_wrpll(clock * 1000, dev_priv, &wrpll_params)) 2275 return false; 2276 2277 cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) | 2278 wrpll_params.dco_integer; 2279 2280 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) | 2281 DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) | 2282 DPLL_CFGCR1_KDIV(wrpll_params.kdiv) | 2283 DPLL_CFGCR1_PDIV(wrpll_params.pdiv) | 2284 wrpll_params.central_freq | 2285 DPLL_CFGCR1_CENTRAL_FREQ; 2286 2287 memset(&crtc_state->dpll_hw_state, 0, 2288 sizeof(crtc_state->dpll_hw_state)); 2289 2290 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0; 2291 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; 2292 return true; 2293 } 2294 2295 static bool 2296 cnl_ddi_dp_set_dpll_hw_state(int clock, 2297 struct intel_dpll_hw_state *dpll_hw_state) 2298 { 2299 uint32_t cfgcr0; 2300 2301 cfgcr0 = DPLL_CFGCR0_SSC_ENABLE; 2302 2303 switch (clock / 2) { 2304 case 81000: 2305 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810; 2306 break; 2307 case 135000: 2308 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350; 2309 break; 2310 case 270000: 2311 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700; 2312 break; 2313 /* eDP 1.4 rates */ 2314 case 162000: 2315 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620; 2316 break; 2317 case 108000: 2318 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080; 2319 break; 2320 case 216000: 2321 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160; 2322 break; 2323 case 324000: 2324 /* Some SKUs may require elevated I/O voltage to support this */ 2325 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240; 2326 break; 2327 case 405000: 2328 /* Some SKUs may require elevated I/O voltage to support this */ 2329 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050; 2330 break; 2331 } 2332 2333 dpll_hw_state->cfgcr0 = cfgcr0; 2334 return true; 2335 } 2336 2337 static struct intel_shared_dpll * 2338 cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, 2339 struct intel_encoder *encoder) 2340 { 2341 struct intel_shared_dpll *pll; 2342 int clock = crtc_state->port_clock; 2343 bool bret; 2344 struct intel_dpll_hw_state dpll_hw_state; 2345 2346 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 2347 2348 if (encoder->type == INTEL_OUTPUT_HDMI) { 2349 bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock); 2350 if (!bret) { 2351 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); 2352 return NULL; 2353 } 2354 } else if (encoder->type == INTEL_OUTPUT_DP || 2355 encoder->type == INTEL_OUTPUT_DP_MST || 2356 encoder->type == INTEL_OUTPUT_EDP) { 2357 bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state); 2358 if (!bret) { 2359 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); 2360 return NULL; 2361 } 2362 crtc_state->dpll_hw_state = dpll_hw_state; 2363 } else { 2364 DRM_DEBUG_KMS("Skip DPLL setup for encoder %d\n", 2365 encoder->type); 2366 return NULL; 2367 } 2368 2369 pll = intel_find_shared_dpll(crtc, crtc_state, 2370 DPLL_ID_SKL_DPLL0, 2371 DPLL_ID_SKL_DPLL2); 2372 if (!pll) { 2373 DRM_DEBUG_KMS("No PLL selected\n"); 2374 return NULL; 2375 } 2376 2377 intel_reference_shared_dpll(pll, crtc_state); 2378 2379 return pll; 2380 } 2381 2382 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv, 2383 struct intel_dpll_hw_state *hw_state) 2384 { 2385 DRM_DEBUG_KMS("dpll_hw_state: " 2386 "cfgcr0: 0x%x, cfgcr1: 0x%x\n", 2387 hw_state->cfgcr0, 2388 hw_state->cfgcr1); 2389 } 2390 2391 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = { 2392 .enable = cnl_ddi_pll_enable, 2393 .disable = cnl_ddi_pll_disable, 2394 .get_hw_state = cnl_ddi_pll_get_hw_state, 2395 }; 2396 2397 static const struct dpll_info cnl_plls[] = { 2398 { "DPLL 0", DPLL_ID_SKL_DPLL0, &cnl_ddi_pll_funcs, 0 }, 2399 { "DPLL 1", DPLL_ID_SKL_DPLL1, &cnl_ddi_pll_funcs, 0 }, 2400 { "DPLL 2", DPLL_ID_SKL_DPLL2, &cnl_ddi_pll_funcs, 0 }, 2401 { NULL, -1, NULL, }, 2402 }; 2403 2404 static const struct intel_dpll_mgr cnl_pll_mgr = { 2405 .dpll_info = cnl_plls, 2406 .get_dpll = cnl_get_dpll, 2407 .dump_hw_state = cnl_dump_hw_state, 2408 }; 2409 2410 /** 2411 * intel_shared_dpll_init - Initialize shared DPLLs 2412 * @dev: drm device 2413 * 2414 * Initialize shared DPLLs for @dev. 2415 */ 2416 void intel_shared_dpll_init(struct drm_device *dev) 2417 { 2418 struct drm_i915_private *dev_priv = to_i915(dev); 2419 const struct intel_dpll_mgr *dpll_mgr = NULL; 2420 const struct dpll_info *dpll_info; 2421 int i; 2422 2423 if (IS_CANNONLAKE(dev_priv)) 2424 dpll_mgr = &cnl_pll_mgr; 2425 else if (IS_GEN9_BC(dev_priv)) 2426 dpll_mgr = &skl_pll_mgr; 2427 else if (IS_GEN9_LP(dev_priv)) 2428 dpll_mgr = &bxt_pll_mgr; 2429 else if (HAS_DDI(dev_priv)) 2430 dpll_mgr = &hsw_pll_mgr; 2431 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 2432 dpll_mgr = &pch_pll_mgr; 2433 2434 if (!dpll_mgr) { 2435 dev_priv->num_shared_dpll = 0; 2436 return; 2437 } 2438 2439 dpll_info = dpll_mgr->dpll_info; 2440 2441 for (i = 0; dpll_info[i].id >= 0; i++) { 2442 WARN_ON(i != dpll_info[i].id); 2443 2444 dev_priv->shared_dplls[i].id = dpll_info[i].id; 2445 dev_priv->shared_dplls[i].name = dpll_info[i].name; 2446 dev_priv->shared_dplls[i].funcs = *dpll_info[i].funcs; 2447 dev_priv->shared_dplls[i].flags = dpll_info[i].flags; 2448 } 2449 2450 dev_priv->dpll_mgr = dpll_mgr; 2451 dev_priv->num_shared_dpll = i; 2452 lockinit(&dev_priv->dpll_lock, "dpll_lock", 0, LK_CANRECURSE); 2453 2454 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); 2455 2456 /* FIXME: Move this to a more suitable place */ 2457 if (HAS_DDI(dev_priv)) 2458 intel_ddi_pll_init(dev); 2459 } 2460 2461 /** 2462 * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination 2463 * @crtc: CRTC 2464 * @crtc_state: atomic state for @crtc 2465 * @encoder: encoder 2466 * 2467 * Find an appropriate DPLL for the given CRTC and encoder combination. A 2468 * reference from the @crtc to the returned pll is registered in the atomic 2469 * state. That configuration is made effective by calling 2470 * intel_shared_dpll_swap_state(). The reference should be released by calling 2471 * intel_release_shared_dpll(). 2472 * 2473 * Returns: 2474 * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state. 2475 */ 2476 struct intel_shared_dpll * 2477 intel_get_shared_dpll(struct intel_crtc *crtc, 2478 struct intel_crtc_state *crtc_state, 2479 struct intel_encoder *encoder) 2480 { 2481 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2482 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; 2483 2484 if (WARN_ON(!dpll_mgr)) 2485 return NULL; 2486 2487 return dpll_mgr->get_dpll(crtc, crtc_state, encoder); 2488 } 2489 2490 /** 2491 * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state 2492 * @dpll: dpll in use by @crtc 2493 * @crtc: crtc 2494 * @state: atomic state 2495 * 2496 * This function releases the reference from @crtc to @dpll from the 2497 * atomic @state. The new configuration is made effective by calling 2498 * intel_shared_dpll_swap_state(). 2499 */ 2500 void intel_release_shared_dpll(struct intel_shared_dpll *dpll, 2501 struct intel_crtc *crtc, 2502 struct drm_atomic_state *state) 2503 { 2504 struct intel_shared_dpll_state *shared_dpll_state; 2505 2506 shared_dpll_state = intel_atomic_get_shared_dpll_state(state); 2507 shared_dpll_state[dpll->id].crtc_mask &= ~(1 << crtc->pipe); 2508 } 2509 2510 /** 2511 * intel_shared_dpll_dump_hw_state - write hw_state to dmesg 2512 * @dev_priv: i915 drm device 2513 * @hw_state: hw state to be written to the log 2514 * 2515 * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS. 2516 */ 2517 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, 2518 struct intel_dpll_hw_state *hw_state) 2519 { 2520 if (dev_priv->dpll_mgr) { 2521 dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state); 2522 } else { 2523 /* fallback for platforms that don't use the shared dpll 2524 * infrastructure 2525 */ 2526 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 2527 "fp0: 0x%x, fp1: 0x%x\n", 2528 hw_state->dpll, 2529 hw_state->dpll_md, 2530 hw_state->fp0, 2531 hw_state->fp1); 2532 } 2533 } 2534