1 /* 2 * Copyright © 2006-2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "intel_drv.h" 25 #include <asm/int-ll64.h> 26 27 struct intel_shared_dpll * 28 skl_find_link_pll(struct drm_i915_private *dev_priv, int clock) 29 { 30 struct intel_shared_dpll *pll = NULL; 31 struct intel_dpll_hw_state dpll_hw_state; 32 enum intel_dpll_id i; 33 bool found = false; 34 35 if (!skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state)) 36 return pll; 37 38 for (i = DPLL_ID_SKL_DPLL1; i <= DPLL_ID_SKL_DPLL3; i++) { 39 pll = &dev_priv->shared_dplls[i]; 40 41 /* Only want to check enabled timings first */ 42 if (pll->config.crtc_mask == 0) 43 continue; 44 45 if (memcmp(&dpll_hw_state, &pll->config.hw_state, 46 sizeof(pll->config.hw_state)) == 0) { 47 found = true; 48 break; 49 } 50 } 51 52 /* Ok no matching timings, maybe there's a free one? */ 53 for (i = DPLL_ID_SKL_DPLL1; 54 ((found == false) && (i <= DPLL_ID_SKL_DPLL3)); i++) { 55 pll = &dev_priv->shared_dplls[i]; 56 if (pll->config.crtc_mask == 0) { 57 pll->config.hw_state = dpll_hw_state; 58 break; 59 } 60 } 61 62 return pll; 63 } 64 65 struct intel_shared_dpll * 66 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, 67 enum intel_dpll_id id) 68 { 69 return &dev_priv->shared_dplls[id]; 70 } 71 72 enum intel_dpll_id 73 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv, 74 struct intel_shared_dpll *pll) 75 { 76 if (WARN_ON(pll < dev_priv->shared_dplls|| 77 pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll])) 78 return -1; 79 80 return (enum intel_dpll_id) (pll - dev_priv->shared_dplls); 81 } 82 83 void 84 intel_shared_dpll_config_get(struct intel_shared_dpll_config *config, 85 struct intel_shared_dpll *pll, 86 struct intel_crtc *crtc) 87 { 88 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 89 enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll); 90 91 config[id].crtc_mask |= 1 << crtc->pipe; 92 } 93 94 void 95 intel_shared_dpll_config_put(struct intel_shared_dpll_config *config, 96 struct intel_shared_dpll *pll, 97 struct intel_crtc *crtc) 98 { 99 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 100 enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll); 101 102 config[id].crtc_mask &= ~(1 << crtc->pipe); 103 } 104 105 /* For ILK+ */ 106 void assert_shared_dpll(struct drm_i915_private *dev_priv, 107 struct intel_shared_dpll *pll, 108 bool state) 109 { 110 bool cur_state; 111 struct intel_dpll_hw_state hw_state; 112 113 if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state))) 114 return; 115 116 cur_state = pll->funcs.get_hw_state(dev_priv, pll, &hw_state); 117 I915_STATE_WARN(cur_state != state, 118 "%s assertion failure (expected %s, current %s)\n", 119 pll->name, onoff(state), onoff(cur_state)); 120 } 121 122 void intel_prepare_shared_dpll(struct intel_crtc *crtc) 123 { 124 struct drm_device *dev = crtc->base.dev; 125 struct drm_i915_private *dev_priv = to_i915(dev); 126 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 127 128 if (WARN_ON(pll == NULL)) 129 return; 130 131 mutex_lock(&dev_priv->dpll_lock); 132 WARN_ON(!pll->config.crtc_mask); 133 if (!pll->active_mask) { 134 DRM_DEBUG_DRIVER("setting up %s\n", pll->name); 135 WARN_ON(pll->on); 136 assert_shared_dpll_disabled(dev_priv, pll); 137 138 pll->funcs.mode_set(dev_priv, pll); 139 } 140 mutex_unlock(&dev_priv->dpll_lock); 141 } 142 143 /** 144 * intel_enable_shared_dpll - enable PCH PLL 145 * @dev_priv: i915 private structure 146 * @pipe: pipe PLL to enable 147 * 148 * The PCH PLL needs to be enabled before the PCH transcoder, since it 149 * drives the transcoder clock. 150 */ 151 void intel_enable_shared_dpll(struct intel_crtc *crtc) 152 { 153 struct drm_device *dev = crtc->base.dev; 154 struct drm_i915_private *dev_priv = to_i915(dev); 155 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 156 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); 157 unsigned old_mask; 158 159 if (WARN_ON(pll == NULL)) 160 return; 161 162 mutex_lock(&dev_priv->dpll_lock); 163 old_mask = pll->active_mask; 164 165 if (WARN_ON(!(pll->config.crtc_mask & crtc_mask)) || 166 WARN_ON(pll->active_mask & crtc_mask)) 167 goto out; 168 169 pll->active_mask |= crtc_mask; 170 171 DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n", 172 pll->name, pll->active_mask, pll->on, 173 crtc->base.base.id); 174 175 if (old_mask) { 176 WARN_ON(!pll->on); 177 assert_shared_dpll_enabled(dev_priv, pll); 178 goto out; 179 } 180 WARN_ON(pll->on); 181 182 DRM_DEBUG_KMS("enabling %s\n", pll->name); 183 pll->funcs.enable(dev_priv, pll); 184 pll->on = true; 185 186 out: 187 mutex_unlock(&dev_priv->dpll_lock); 188 } 189 190 void intel_disable_shared_dpll(struct intel_crtc *crtc) 191 { 192 struct drm_device *dev = crtc->base.dev; 193 struct drm_i915_private *dev_priv = to_i915(dev); 194 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 195 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); 196 197 /* PCH only available on ILK+ */ 198 if (INTEL_INFO(dev)->gen < 5) 199 return; 200 201 if (pll == NULL) 202 return; 203 204 mutex_lock(&dev_priv->dpll_lock); 205 if (WARN_ON(!(pll->active_mask & crtc_mask))) 206 goto out; 207 208 DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n", 209 pll->name, pll->active_mask, pll->on, 210 crtc->base.base.id); 211 212 assert_shared_dpll_enabled(dev_priv, pll); 213 WARN_ON(!pll->on); 214 215 pll->active_mask &= ~crtc_mask; 216 if (pll->active_mask) 217 goto out; 218 219 DRM_DEBUG_KMS("disabling %s\n", pll->name); 220 pll->funcs.disable(dev_priv, pll); 221 pll->on = false; 222 223 out: 224 mutex_unlock(&dev_priv->dpll_lock); 225 } 226 227 static struct intel_shared_dpll * 228 intel_find_shared_dpll(struct intel_crtc *crtc, 229 struct intel_crtc_state *crtc_state, 230 enum intel_dpll_id range_min, 231 enum intel_dpll_id range_max) 232 { 233 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 234 struct intel_shared_dpll *pll; 235 struct intel_shared_dpll_config *shared_dpll; 236 enum intel_dpll_id i; 237 238 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); 239 240 for (i = range_min; i <= range_max; i++) { 241 pll = &dev_priv->shared_dplls[i]; 242 243 /* Only want to check enabled timings first */ 244 if (shared_dpll[i].crtc_mask == 0) 245 continue; 246 247 if (memcmp(&crtc_state->dpll_hw_state, 248 &shared_dpll[i].hw_state, 249 sizeof(crtc_state->dpll_hw_state)) == 0) { 250 DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n", 251 crtc->base.base.id, crtc->base.name, pll->name, 252 shared_dpll[i].crtc_mask, 253 pll->active_mask); 254 return pll; 255 } 256 } 257 258 /* Ok no matching timings, maybe there's a free one? */ 259 for (i = range_min; i <= range_max; i++) { 260 pll = &dev_priv->shared_dplls[i]; 261 if (shared_dpll[i].crtc_mask == 0) { 262 DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n", 263 crtc->base.base.id, crtc->base.name, pll->name); 264 return pll; 265 } 266 } 267 268 return NULL; 269 } 270 271 static void 272 intel_reference_shared_dpll(struct intel_shared_dpll *pll, 273 struct intel_crtc_state *crtc_state) 274 { 275 struct intel_shared_dpll_config *shared_dpll; 276 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 277 enum intel_dpll_id i = pll->id; 278 279 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); 280 281 if (shared_dpll[i].crtc_mask == 0) 282 shared_dpll[i].hw_state = 283 crtc_state->dpll_hw_state; 284 285 crtc_state->shared_dpll = pll; 286 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, 287 pipe_name(crtc->pipe)); 288 289 intel_shared_dpll_config_get(shared_dpll, pll, crtc); 290 } 291 292 void intel_shared_dpll_commit(struct drm_atomic_state *state) 293 { 294 struct drm_i915_private *dev_priv = to_i915(state->dev); 295 struct intel_shared_dpll_config *shared_dpll; 296 struct intel_shared_dpll *pll; 297 enum intel_dpll_id i; 298 299 if (!to_intel_atomic_state(state)->dpll_set) 300 return; 301 302 shared_dpll = to_intel_atomic_state(state)->shared_dpll; 303 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 304 pll = &dev_priv->shared_dplls[i]; 305 pll->config = shared_dpll[i]; 306 } 307 } 308 309 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, 310 struct intel_shared_dpll *pll, 311 struct intel_dpll_hw_state *hw_state) 312 { 313 uint32_t val; 314 315 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 316 return false; 317 318 val = I915_READ(PCH_DPLL(pll->id)); 319 hw_state->dpll = val; 320 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 321 hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); 322 323 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 324 325 return val & DPLL_VCO_ENABLE; 326 } 327 328 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, 329 struct intel_shared_dpll *pll) 330 { 331 I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0); 332 I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1); 333 } 334 335 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 336 { 337 u32 val; 338 bool enabled; 339 340 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 341 342 val = I915_READ(PCH_DREF_CONTROL); 343 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 344 DREF_SUPERSPREAD_SOURCE_MASK)); 345 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); 346 } 347 348 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 349 struct intel_shared_dpll *pll) 350 { 351 /* PCH refclock must be enabled first */ 352 ibx_assert_pch_refclk_enabled(dev_priv); 353 354 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); 355 356 /* Wait for the clocks to stabilize. */ 357 POSTING_READ(PCH_DPLL(pll->id)); 358 udelay(150); 359 360 /* The pixel multiplier can only be updated once the 361 * DPLL is enabled and the clocks are stable. 362 * 363 * So write it again. 364 */ 365 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); 366 POSTING_READ(PCH_DPLL(pll->id)); 367 udelay(200); 368 } 369 370 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, 371 struct intel_shared_dpll *pll) 372 { 373 struct drm_device *dev = &dev_priv->drm; 374 struct intel_crtc *crtc; 375 376 /* Make sure no transcoder isn't still depending on us. */ 377 for_each_intel_crtc(dev, crtc) { 378 if (crtc->config->shared_dpll == pll) 379 assert_pch_transcoder_disabled(dev_priv, crtc->pipe); 380 } 381 382 I915_WRITE(PCH_DPLL(pll->id), 0); 383 POSTING_READ(PCH_DPLL(pll->id)); 384 udelay(200); 385 } 386 387 static struct intel_shared_dpll * 388 ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, 389 struct intel_encoder *encoder) 390 { 391 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 392 struct intel_shared_dpll *pll; 393 enum intel_dpll_id i; 394 395 if (HAS_PCH_IBX(dev_priv)) { 396 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 397 i = (enum intel_dpll_id) crtc->pipe; 398 pll = &dev_priv->shared_dplls[i]; 399 400 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", 401 crtc->base.base.id, crtc->base.name, pll->name); 402 } else { 403 pll = intel_find_shared_dpll(crtc, crtc_state, 404 DPLL_ID_PCH_PLL_A, 405 DPLL_ID_PCH_PLL_B); 406 } 407 408 if (!pll) 409 return NULL; 410 411 /* reference the pll */ 412 intel_reference_shared_dpll(pll, crtc_state); 413 414 return pll; 415 } 416 417 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = { 418 .mode_set = ibx_pch_dpll_mode_set, 419 .enable = ibx_pch_dpll_enable, 420 .disable = ibx_pch_dpll_disable, 421 .get_hw_state = ibx_pch_dpll_get_hw_state, 422 }; 423 424 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, 425 struct intel_shared_dpll *pll) 426 { 427 I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll); 428 POSTING_READ(WRPLL_CTL(pll->id)); 429 udelay(20); 430 } 431 432 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, 433 struct intel_shared_dpll *pll) 434 { 435 I915_WRITE(SPLL_CTL, pll->config.hw_state.spll); 436 POSTING_READ(SPLL_CTL); 437 udelay(20); 438 } 439 440 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, 441 struct intel_shared_dpll *pll) 442 { 443 uint32_t val; 444 445 val = I915_READ(WRPLL_CTL(pll->id)); 446 I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE); 447 POSTING_READ(WRPLL_CTL(pll->id)); 448 } 449 450 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, 451 struct intel_shared_dpll *pll) 452 { 453 uint32_t val; 454 455 val = I915_READ(SPLL_CTL); 456 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); 457 POSTING_READ(SPLL_CTL); 458 } 459 460 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, 461 struct intel_shared_dpll *pll, 462 struct intel_dpll_hw_state *hw_state) 463 { 464 uint32_t val; 465 466 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 467 return false; 468 469 val = I915_READ(WRPLL_CTL(pll->id)); 470 hw_state->wrpll = val; 471 472 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 473 474 return val & WRPLL_PLL_ENABLE; 475 } 476 477 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, 478 struct intel_shared_dpll *pll, 479 struct intel_dpll_hw_state *hw_state) 480 { 481 uint32_t val; 482 483 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 484 return false; 485 486 val = I915_READ(SPLL_CTL); 487 hw_state->spll = val; 488 489 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 490 491 return val & SPLL_PLL_ENABLE; 492 } 493 494 #define LC_FREQ 2700 495 #define LC_FREQ_2K U64_C(LC_FREQ * 2000) 496 497 #define P_MIN 2 498 #define P_MAX 64 499 #define P_INC 2 500 501 /* Constraints for PLL good behavior */ 502 #define REF_MIN 48 503 #define REF_MAX 400 504 #define VCO_MIN 2400 505 #define VCO_MAX 4800 506 507 struct hsw_wrpll_rnp { 508 unsigned p, n2, r2; 509 }; 510 511 static unsigned hsw_wrpll_get_budget_for_freq(int clock) 512 { 513 unsigned budget; 514 515 switch (clock) { 516 case 25175000: 517 case 25200000: 518 case 27000000: 519 case 27027000: 520 case 37762500: 521 case 37800000: 522 case 40500000: 523 case 40541000: 524 case 54000000: 525 case 54054000: 526 case 59341000: 527 case 59400000: 528 case 72000000: 529 case 74176000: 530 case 74250000: 531 case 81000000: 532 case 81081000: 533 case 89012000: 534 case 89100000: 535 case 108000000: 536 case 108108000: 537 case 111264000: 538 case 111375000: 539 case 148352000: 540 case 148500000: 541 case 162000000: 542 case 162162000: 543 case 222525000: 544 case 222750000: 545 case 296703000: 546 case 297000000: 547 budget = 0; 548 break; 549 case 233500000: 550 case 245250000: 551 case 247750000: 552 case 253250000: 553 case 298000000: 554 budget = 1500; 555 break; 556 case 169128000: 557 case 169500000: 558 case 179500000: 559 case 202000000: 560 budget = 2000; 561 break; 562 case 256250000: 563 case 262500000: 564 case 270000000: 565 case 272500000: 566 case 273750000: 567 case 280750000: 568 case 281250000: 569 case 286000000: 570 case 291750000: 571 budget = 4000; 572 break; 573 case 267250000: 574 case 268500000: 575 budget = 5000; 576 break; 577 default: 578 budget = 1000; 579 break; 580 } 581 582 return budget; 583 } 584 585 static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget, 586 unsigned r2, unsigned n2, unsigned p, 587 struct hsw_wrpll_rnp *best) 588 { 589 uint64_t a, b, c, d, diff, diff_best; 590 591 /* No best (r,n,p) yet */ 592 if (best->p == 0) { 593 best->p = p; 594 best->n2 = n2; 595 best->r2 = r2; 596 return; 597 } 598 599 /* 600 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to 601 * freq2k. 602 * 603 * delta = 1e6 * 604 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) / 605 * freq2k; 606 * 607 * and we would like delta <= budget. 608 * 609 * If the discrepancy is above the PPM-based budget, always prefer to 610 * improve upon the previous solution. However, if you're within the 611 * budget, try to maximize Ref * VCO, that is N / (P * R^2). 612 */ 613 a = freq2k * budget * p * r2; 614 b = freq2k * budget * best->p * best->r2; 615 diff = abs_diff((u64)freq2k * p * r2, LC_FREQ_2K * n2); 616 diff_best = abs_diff((u64)freq2k * best->p * best->r2, 617 LC_FREQ_2K * best->n2); 618 c = 1000000 * diff; 619 d = 1000000 * diff_best; 620 621 if (a < c && b < d) { 622 /* If both are above the budget, pick the closer */ 623 if (best->p * best->r2 * diff < p * r2 * diff_best) { 624 best->p = p; 625 best->n2 = n2; 626 best->r2 = r2; 627 } 628 } else if (a >= c && b < d) { 629 /* If A is below the threshold but B is above it? Update. */ 630 best->p = p; 631 best->n2 = n2; 632 best->r2 = r2; 633 } else if (a >= c && b >= d) { 634 /* Both are below the limit, so pick the higher n2/(r2*r2) */ 635 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) { 636 best->p = p; 637 best->n2 = n2; 638 best->r2 = r2; 639 } 640 } 641 /* Otherwise a < c && b >= d, do nothing */ 642 } 643 644 static void 645 hsw_ddi_calculate_wrpll(int clock /* in Hz */, 646 unsigned *r2_out, unsigned *n2_out, unsigned *p_out) 647 { 648 uint64_t freq2k; 649 unsigned p, n2, r2; 650 struct hsw_wrpll_rnp best = { 0, 0, 0 }; 651 unsigned budget; 652 653 freq2k = clock / 100; 654 655 budget = hsw_wrpll_get_budget_for_freq(clock); 656 657 /* Special case handling for 540 pixel clock: bypass WR PLL entirely 658 * and directly pass the LC PLL to it. */ 659 if (freq2k == 5400000) { 660 *n2_out = 2; 661 *p_out = 1; 662 *r2_out = 2; 663 return; 664 } 665 666 /* 667 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by 668 * the WR PLL. 669 * 670 * We want R so that REF_MIN <= Ref <= REF_MAX. 671 * Injecting R2 = 2 * R gives: 672 * REF_MAX * r2 > LC_FREQ * 2 and 673 * REF_MIN * r2 < LC_FREQ * 2 674 * 675 * Which means the desired boundaries for r2 are: 676 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN 677 * 678 */ 679 for (r2 = LC_FREQ * 2 / REF_MAX + 1; 680 r2 <= LC_FREQ * 2 / REF_MIN; 681 r2++) { 682 683 /* 684 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R 685 * 686 * Once again we want VCO_MIN <= VCO <= VCO_MAX. 687 * Injecting R2 = 2 * R and N2 = 2 * N, we get: 688 * VCO_MAX * r2 > n2 * LC_FREQ and 689 * VCO_MIN * r2 < n2 * LC_FREQ) 690 * 691 * Which means the desired boundaries for n2 are: 692 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ 693 */ 694 for (n2 = VCO_MIN * r2 / LC_FREQ + 1; 695 n2 <= VCO_MAX * r2 / LC_FREQ; 696 n2++) { 697 698 for (p = P_MIN; p <= P_MAX; p += P_INC) 699 hsw_wrpll_update_rnp(freq2k, budget, 700 r2, n2, p, &best); 701 } 702 } 703 704 *n2_out = best.n2; 705 *p_out = best.p; 706 *r2_out = best.r2; 707 } 708 709 static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock, 710 struct intel_crtc *crtc, 711 struct intel_crtc_state *crtc_state) 712 { 713 struct intel_shared_dpll *pll; 714 uint32_t val; 715 unsigned int p, n2, r2; 716 717 hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); 718 719 val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL | 720 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | 721 WRPLL_DIVIDER_POST(p); 722 723 crtc_state->dpll_hw_state.wrpll = val; 724 725 pll = intel_find_shared_dpll(crtc, crtc_state, 726 DPLL_ID_WRPLL1, DPLL_ID_WRPLL2); 727 728 if (!pll) 729 return NULL; 730 731 return pll; 732 } 733 734 struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, 735 int clock) 736 { 737 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 738 struct intel_shared_dpll *pll; 739 enum intel_dpll_id pll_id; 740 741 switch (clock / 2) { 742 case 81000: 743 pll_id = DPLL_ID_LCPLL_810; 744 break; 745 case 135000: 746 pll_id = DPLL_ID_LCPLL_1350; 747 break; 748 case 270000: 749 pll_id = DPLL_ID_LCPLL_2700; 750 break; 751 default: 752 DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock); 753 return NULL; 754 } 755 756 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id); 757 758 if (!pll) 759 return NULL; 760 761 return pll; 762 } 763 764 static struct intel_shared_dpll * 765 hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, 766 struct intel_encoder *encoder) 767 { 768 struct intel_shared_dpll *pll; 769 int clock = crtc_state->port_clock; 770 771 memset(&crtc_state->dpll_hw_state, 0, 772 sizeof(crtc_state->dpll_hw_state)); 773 774 if (encoder->type == INTEL_OUTPUT_HDMI) { 775 pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state); 776 777 } else if (encoder->type == INTEL_OUTPUT_DP || 778 encoder->type == INTEL_OUTPUT_DP_MST || 779 encoder->type == INTEL_OUTPUT_EDP) { 780 pll = hsw_ddi_dp_get_dpll(encoder, clock); 781 782 } else if (encoder->type == INTEL_OUTPUT_ANALOG) { 783 if (WARN_ON(crtc_state->port_clock / 2 != 135000)) 784 return NULL; 785 786 crtc_state->dpll_hw_state.spll = 787 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; 788 789 pll = intel_find_shared_dpll(crtc, crtc_state, 790 DPLL_ID_SPLL, DPLL_ID_SPLL); 791 } else { 792 return NULL; 793 } 794 795 if (!pll) 796 return NULL; 797 798 intel_reference_shared_dpll(pll, crtc_state); 799 800 return pll; 801 } 802 803 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = { 804 .enable = hsw_ddi_wrpll_enable, 805 .disable = hsw_ddi_wrpll_disable, 806 .get_hw_state = hsw_ddi_wrpll_get_hw_state, 807 }; 808 809 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = { 810 .enable = hsw_ddi_spll_enable, 811 .disable = hsw_ddi_spll_disable, 812 .get_hw_state = hsw_ddi_spll_get_hw_state, 813 }; 814 815 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv, 816 struct intel_shared_dpll *pll) 817 { 818 } 819 820 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv, 821 struct intel_shared_dpll *pll) 822 { 823 } 824 825 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv, 826 struct intel_shared_dpll *pll, 827 struct intel_dpll_hw_state *hw_state) 828 { 829 return true; 830 } 831 832 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = { 833 .enable = hsw_ddi_lcpll_enable, 834 .disable = hsw_ddi_lcpll_disable, 835 .get_hw_state = hsw_ddi_lcpll_get_hw_state, 836 }; 837 838 struct skl_dpll_regs { 839 i915_reg_t ctl, cfgcr1, cfgcr2; 840 }; 841 842 /* this array is indexed by the *shared* pll id */ 843 static const struct skl_dpll_regs skl_dpll_regs[4] = { 844 { 845 /* DPLL 0 */ 846 .ctl = LCPLL1_CTL, 847 /* DPLL 0 doesn't support HDMI mode */ 848 }, 849 { 850 /* DPLL 1 */ 851 .ctl = LCPLL2_CTL, 852 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1), 853 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1), 854 }, 855 { 856 /* DPLL 2 */ 857 .ctl = WRPLL_CTL(0), 858 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2), 859 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2), 860 }, 861 { 862 /* DPLL 3 */ 863 .ctl = WRPLL_CTL(1), 864 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3), 865 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3), 866 }, 867 }; 868 869 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv, 870 struct intel_shared_dpll *pll) 871 { 872 uint32_t val; 873 874 val = I915_READ(DPLL_CTRL1); 875 876 val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) | 877 DPLL_CTRL1_LINK_RATE_MASK(pll->id)); 878 val |= pll->config.hw_state.ctrl1 << (pll->id * 6); 879 880 I915_WRITE(DPLL_CTRL1, val); 881 POSTING_READ(DPLL_CTRL1); 882 } 883 884 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, 885 struct intel_shared_dpll *pll) 886 { 887 const struct skl_dpll_regs *regs = skl_dpll_regs; 888 889 skl_ddi_pll_write_ctrl1(dev_priv, pll); 890 891 I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1); 892 I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2); 893 POSTING_READ(regs[pll->id].cfgcr1); 894 POSTING_READ(regs[pll->id].cfgcr2); 895 896 /* the enable bit is always bit 31 */ 897 I915_WRITE(regs[pll->id].ctl, 898 I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE); 899 900 if (intel_wait_for_register(dev_priv, 901 DPLL_STATUS, 902 DPLL_LOCK(pll->id), 903 DPLL_LOCK(pll->id), 904 5)) 905 DRM_ERROR("DPLL %d not locked\n", pll->id); 906 } 907 908 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv, 909 struct intel_shared_dpll *pll) 910 { 911 skl_ddi_pll_write_ctrl1(dev_priv, pll); 912 } 913 914 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv, 915 struct intel_shared_dpll *pll) 916 { 917 const struct skl_dpll_regs *regs = skl_dpll_regs; 918 919 /* the enable bit is always bit 31 */ 920 I915_WRITE(regs[pll->id].ctl, 921 I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE); 922 POSTING_READ(regs[pll->id].ctl); 923 } 924 925 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv, 926 struct intel_shared_dpll *pll) 927 { 928 } 929 930 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, 931 struct intel_shared_dpll *pll, 932 struct intel_dpll_hw_state *hw_state) 933 { 934 uint32_t val; 935 const struct skl_dpll_regs *regs = skl_dpll_regs; 936 bool ret; 937 938 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 939 return false; 940 941 ret = false; 942 943 val = I915_READ(regs[pll->id].ctl); 944 if (!(val & LCPLL_PLL_ENABLE)) 945 goto out; 946 947 val = I915_READ(DPLL_CTRL1); 948 hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f; 949 950 /* avoid reading back stale values if HDMI mode is not enabled */ 951 if (val & DPLL_CTRL1_HDMI_MODE(pll->id)) { 952 hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1); 953 hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2); 954 } 955 ret = true; 956 957 out: 958 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 959 960 return ret; 961 } 962 963 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, 964 struct intel_shared_dpll *pll, 965 struct intel_dpll_hw_state *hw_state) 966 { 967 uint32_t val; 968 const struct skl_dpll_regs *regs = skl_dpll_regs; 969 bool ret; 970 971 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 972 return false; 973 974 ret = false; 975 976 /* DPLL0 is always enabled since it drives CDCLK */ 977 val = I915_READ(regs[pll->id].ctl); 978 if (WARN_ON(!(val & LCPLL_PLL_ENABLE))) 979 goto out; 980 981 val = I915_READ(DPLL_CTRL1); 982 hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f; 983 984 ret = true; 985 986 out: 987 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 988 989 return ret; 990 } 991 992 struct skl_wrpll_context { 993 uint64_t min_deviation; /* current minimal deviation */ 994 uint64_t central_freq; /* chosen central freq */ 995 uint64_t dco_freq; /* chosen dco freq */ 996 unsigned int p; /* chosen divider */ 997 }; 998 999 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx) 1000 { 1001 memset(ctx, 0, sizeof(*ctx)); 1002 1003 ctx->min_deviation = U64_MAX; 1004 } 1005 1006 /* DCO freq must be within +1%/-6% of the DCO central freq */ 1007 #define SKL_DCO_MAX_PDEVIATION 100 1008 #define SKL_DCO_MAX_NDEVIATION 600 1009 1010 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx, 1011 uint64_t central_freq, 1012 uint64_t dco_freq, 1013 unsigned int divider) 1014 { 1015 uint64_t deviation; 1016 1017 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq), 1018 central_freq); 1019 1020 /* positive deviation */ 1021 if (dco_freq >= central_freq) { 1022 if (deviation < SKL_DCO_MAX_PDEVIATION && 1023 deviation < ctx->min_deviation) { 1024 ctx->min_deviation = deviation; 1025 ctx->central_freq = central_freq; 1026 ctx->dco_freq = dco_freq; 1027 ctx->p = divider; 1028 } 1029 /* negative deviation */ 1030 } else if (deviation < SKL_DCO_MAX_NDEVIATION && 1031 deviation < ctx->min_deviation) { 1032 ctx->min_deviation = deviation; 1033 ctx->central_freq = central_freq; 1034 ctx->dco_freq = dco_freq; 1035 ctx->p = divider; 1036 } 1037 } 1038 1039 static void skl_wrpll_get_multipliers(unsigned int p, 1040 unsigned int *p0 /* out */, 1041 unsigned int *p1 /* out */, 1042 unsigned int *p2 /* out */) 1043 { 1044 /* even dividers */ 1045 if (p % 2 == 0) { 1046 unsigned int half = p / 2; 1047 1048 if (half == 1 || half == 2 || half == 3 || half == 5) { 1049 *p0 = 2; 1050 *p1 = 1; 1051 *p2 = half; 1052 } else if (half % 2 == 0) { 1053 *p0 = 2; 1054 *p1 = half / 2; 1055 *p2 = 2; 1056 } else if (half % 3 == 0) { 1057 *p0 = 3; 1058 *p1 = half / 3; 1059 *p2 = 2; 1060 } else if (half % 7 == 0) { 1061 *p0 = 7; 1062 *p1 = half / 7; 1063 *p2 = 2; 1064 } 1065 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */ 1066 *p0 = 3; 1067 *p1 = 1; 1068 *p2 = p / 3; 1069 } else if (p == 5 || p == 7) { 1070 *p0 = p; 1071 *p1 = 1; 1072 *p2 = 1; 1073 } else if (p == 15) { 1074 *p0 = 3; 1075 *p1 = 1; 1076 *p2 = 5; 1077 } else if (p == 21) { 1078 *p0 = 7; 1079 *p1 = 1; 1080 *p2 = 3; 1081 } else if (p == 35) { 1082 *p0 = 7; 1083 *p1 = 1; 1084 *p2 = 5; 1085 } 1086 } 1087 1088 struct skl_wrpll_params { 1089 uint32_t dco_fraction; 1090 uint32_t dco_integer; 1091 uint32_t qdiv_ratio; 1092 uint32_t qdiv_mode; 1093 uint32_t kdiv; 1094 uint32_t pdiv; 1095 uint32_t central_freq; 1096 }; 1097 1098 static void skl_wrpll_params_populate(struct skl_wrpll_params *params, 1099 uint64_t afe_clock, 1100 uint64_t central_freq, 1101 uint32_t p0, uint32_t p1, uint32_t p2) 1102 { 1103 uint64_t dco_freq; 1104 1105 switch (central_freq) { 1106 case 9600000000ULL: 1107 params->central_freq = 0; 1108 break; 1109 case 9000000000ULL: 1110 params->central_freq = 1; 1111 break; 1112 case 8400000000ULL: 1113 params->central_freq = 3; 1114 } 1115 1116 switch (p0) { 1117 case 1: 1118 params->pdiv = 0; 1119 break; 1120 case 2: 1121 params->pdiv = 1; 1122 break; 1123 case 3: 1124 params->pdiv = 2; 1125 break; 1126 case 7: 1127 params->pdiv = 4; 1128 break; 1129 default: 1130 WARN(1, "Incorrect PDiv\n"); 1131 } 1132 1133 switch (p2) { 1134 case 5: 1135 params->kdiv = 0; 1136 break; 1137 case 2: 1138 params->kdiv = 1; 1139 break; 1140 case 3: 1141 params->kdiv = 2; 1142 break; 1143 case 1: 1144 params->kdiv = 3; 1145 break; 1146 default: 1147 WARN(1, "Incorrect KDiv\n"); 1148 } 1149 1150 params->qdiv_ratio = p1; 1151 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1; 1152 1153 dco_freq = p0 * p1 * p2 * afe_clock; 1154 1155 /* 1156 * Intermediate values are in Hz. 1157 * Divide by MHz to match bsepc 1158 */ 1159 params->dco_integer = div_u64(dco_freq, 24 * MHz(1)); 1160 params->dco_fraction = 1161 div_u64((div_u64(dco_freq, 24) - 1162 params->dco_integer * MHz(1)) * 0x8000, MHz(1)); 1163 } 1164 1165 static bool 1166 skl_ddi_calculate_wrpll(int clock /* in Hz */, 1167 struct skl_wrpll_params *wrpll_params) 1168 { 1169 uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ 1170 uint64_t dco_central_freq[3] = {8400000000ULL, 1171 9000000000ULL, 1172 9600000000ULL}; 1173 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20, 1174 24, 28, 30, 32, 36, 40, 42, 44, 1175 48, 52, 54, 56, 60, 64, 66, 68, 1176 70, 72, 76, 78, 80, 84, 88, 90, 1177 92, 96, 98 }; 1178 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 }; 1179 static const struct { 1180 const int *list; 1181 int n_dividers; 1182 } dividers[] = { 1183 { even_dividers, ARRAY_SIZE(even_dividers) }, 1184 { odd_dividers, ARRAY_SIZE(odd_dividers) }, 1185 }; 1186 struct skl_wrpll_context ctx; 1187 unsigned int dco, d, i; 1188 unsigned int p0, p1, p2; 1189 1190 skl_wrpll_context_init(&ctx); 1191 1192 for (d = 0; d < ARRAY_SIZE(dividers); d++) { 1193 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) { 1194 for (i = 0; i < dividers[d].n_dividers; i++) { 1195 unsigned int p = dividers[d].list[i]; 1196 uint64_t dco_freq = p * afe_clock; 1197 1198 skl_wrpll_try_divider(&ctx, 1199 dco_central_freq[dco], 1200 dco_freq, 1201 p); 1202 /* 1203 * Skip the remaining dividers if we're sure to 1204 * have found the definitive divider, we can't 1205 * improve a 0 deviation. 1206 */ 1207 if (ctx.min_deviation == 0) 1208 goto skip_remaining_dividers; 1209 } 1210 } 1211 1212 skip_remaining_dividers: 1213 /* 1214 * If a solution is found with an even divider, prefer 1215 * this one. 1216 */ 1217 if (d == 0 && ctx.p) 1218 break; 1219 } 1220 1221 if (!ctx.p) { 1222 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock); 1223 return false; 1224 } 1225 1226 /* 1227 * gcc incorrectly analyses that these can be used without being 1228 * initialized. To be fair, it's hard to guess. 1229 */ 1230 p0 = p1 = p2 = 0; 1231 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2); 1232 skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq, 1233 p0, p1, p2); 1234 1235 return true; 1236 } 1237 1238 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, 1239 struct intel_crtc_state *crtc_state, 1240 int clock) 1241 { 1242 uint32_t ctrl1, cfgcr1, cfgcr2; 1243 struct skl_wrpll_params wrpll_params = { 0, }; 1244 1245 /* 1246 * See comment in intel_dpll_hw_state to understand why we always use 0 1247 * as the DPLL id in this function. 1248 */ 1249 ctrl1 = DPLL_CTRL1_OVERRIDE(0); 1250 1251 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); 1252 1253 if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params)) 1254 return false; 1255 1256 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | 1257 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | 1258 wrpll_params.dco_integer; 1259 1260 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) | 1261 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) | 1262 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | 1263 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | 1264 wrpll_params.central_freq; 1265 1266 memset(&crtc_state->dpll_hw_state, 0, 1267 sizeof(crtc_state->dpll_hw_state)); 1268 1269 crtc_state->dpll_hw_state.ctrl1 = ctrl1; 1270 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; 1271 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; 1272 return true; 1273 } 1274 1275 1276 bool skl_ddi_dp_set_dpll_hw_state(int clock, 1277 struct intel_dpll_hw_state *dpll_hw_state) 1278 { 1279 uint32_t ctrl1; 1280 1281 /* 1282 * See comment in intel_dpll_hw_state to understand why we always use 0 1283 * as the DPLL id in this function. 1284 */ 1285 ctrl1 = DPLL_CTRL1_OVERRIDE(0); 1286 switch (clock / 2) { 1287 case 81000: 1288 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); 1289 break; 1290 case 135000: 1291 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0); 1292 break; 1293 case 270000: 1294 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0); 1295 break; 1296 /* eDP 1.4 rates */ 1297 case 162000: 1298 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0); 1299 break; 1300 case 108000: 1301 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0); 1302 break; 1303 case 216000: 1304 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0); 1305 break; 1306 } 1307 1308 dpll_hw_state->ctrl1 = ctrl1; 1309 return true; 1310 } 1311 1312 static struct intel_shared_dpll * 1313 skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, 1314 struct intel_encoder *encoder) 1315 { 1316 struct intel_shared_dpll *pll; 1317 int clock = crtc_state->port_clock; 1318 bool bret; 1319 struct intel_dpll_hw_state dpll_hw_state; 1320 1321 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 1322 1323 if (encoder->type == INTEL_OUTPUT_HDMI) { 1324 bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock); 1325 if (!bret) { 1326 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); 1327 return NULL; 1328 } 1329 } else if (encoder->type == INTEL_OUTPUT_DP || 1330 encoder->type == INTEL_OUTPUT_DP_MST || 1331 encoder->type == INTEL_OUTPUT_EDP) { 1332 bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state); 1333 if (!bret) { 1334 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); 1335 return NULL; 1336 } 1337 crtc_state->dpll_hw_state = dpll_hw_state; 1338 } else { 1339 return NULL; 1340 } 1341 1342 if (encoder->type == INTEL_OUTPUT_EDP) 1343 pll = intel_find_shared_dpll(crtc, crtc_state, 1344 DPLL_ID_SKL_DPLL0, 1345 DPLL_ID_SKL_DPLL0); 1346 else 1347 pll = intel_find_shared_dpll(crtc, crtc_state, 1348 DPLL_ID_SKL_DPLL1, 1349 DPLL_ID_SKL_DPLL3); 1350 if (!pll) 1351 return NULL; 1352 1353 intel_reference_shared_dpll(pll, crtc_state); 1354 1355 return pll; 1356 } 1357 1358 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = { 1359 .enable = skl_ddi_pll_enable, 1360 .disable = skl_ddi_pll_disable, 1361 .get_hw_state = skl_ddi_pll_get_hw_state, 1362 }; 1363 1364 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = { 1365 .enable = skl_ddi_dpll0_enable, 1366 .disable = skl_ddi_dpll0_disable, 1367 .get_hw_state = skl_ddi_dpll0_get_hw_state, 1368 }; 1369 1370 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, 1371 struct intel_shared_dpll *pll) 1372 { 1373 uint32_t temp; 1374 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ 1375 1376 /* Non-SSC reference */ 1377 temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1378 temp |= PORT_PLL_REF_SEL; 1379 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1380 1381 /* Disable 10 bit clock */ 1382 temp = I915_READ(BXT_PORT_PLL_EBB_4(port)); 1383 temp &= ~PORT_PLL_10BIT_CLK_ENABLE; 1384 I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp); 1385 1386 /* Write P1 & P2 */ 1387 temp = I915_READ(BXT_PORT_PLL_EBB_0(port)); 1388 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK); 1389 temp |= pll->config.hw_state.ebb0; 1390 I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp); 1391 1392 /* Write M2 integer */ 1393 temp = I915_READ(BXT_PORT_PLL(port, 0)); 1394 temp &= ~PORT_PLL_M2_MASK; 1395 temp |= pll->config.hw_state.pll0; 1396 I915_WRITE(BXT_PORT_PLL(port, 0), temp); 1397 1398 /* Write N */ 1399 temp = I915_READ(BXT_PORT_PLL(port, 1)); 1400 temp &= ~PORT_PLL_N_MASK; 1401 temp |= pll->config.hw_state.pll1; 1402 I915_WRITE(BXT_PORT_PLL(port, 1), temp); 1403 1404 /* Write M2 fraction */ 1405 temp = I915_READ(BXT_PORT_PLL(port, 2)); 1406 temp &= ~PORT_PLL_M2_FRAC_MASK; 1407 temp |= pll->config.hw_state.pll2; 1408 I915_WRITE(BXT_PORT_PLL(port, 2), temp); 1409 1410 /* Write M2 fraction enable */ 1411 temp = I915_READ(BXT_PORT_PLL(port, 3)); 1412 temp &= ~PORT_PLL_M2_FRAC_ENABLE; 1413 temp |= pll->config.hw_state.pll3; 1414 I915_WRITE(BXT_PORT_PLL(port, 3), temp); 1415 1416 /* Write coeff */ 1417 temp = I915_READ(BXT_PORT_PLL(port, 6)); 1418 temp &= ~PORT_PLL_PROP_COEFF_MASK; 1419 temp &= ~PORT_PLL_INT_COEFF_MASK; 1420 temp &= ~PORT_PLL_GAIN_CTL_MASK; 1421 temp |= pll->config.hw_state.pll6; 1422 I915_WRITE(BXT_PORT_PLL(port, 6), temp); 1423 1424 /* Write calibration val */ 1425 temp = I915_READ(BXT_PORT_PLL(port, 8)); 1426 temp &= ~PORT_PLL_TARGET_CNT_MASK; 1427 temp |= pll->config.hw_state.pll8; 1428 I915_WRITE(BXT_PORT_PLL(port, 8), temp); 1429 1430 temp = I915_READ(BXT_PORT_PLL(port, 9)); 1431 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK; 1432 temp |= pll->config.hw_state.pll9; 1433 I915_WRITE(BXT_PORT_PLL(port, 9), temp); 1434 1435 temp = I915_READ(BXT_PORT_PLL(port, 10)); 1436 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H; 1437 temp &= ~PORT_PLL_DCO_AMP_MASK; 1438 temp |= pll->config.hw_state.pll10; 1439 I915_WRITE(BXT_PORT_PLL(port, 10), temp); 1440 1441 /* Recalibrate with new settings */ 1442 temp = I915_READ(BXT_PORT_PLL_EBB_4(port)); 1443 temp |= PORT_PLL_RECALIBRATE; 1444 I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp); 1445 temp &= ~PORT_PLL_10BIT_CLK_ENABLE; 1446 temp |= pll->config.hw_state.ebb4; 1447 I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp); 1448 1449 /* Enable PLL */ 1450 temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1451 temp |= PORT_PLL_ENABLE; 1452 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1453 POSTING_READ(BXT_PORT_PLL_ENABLE(port)); 1454 1455 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), 1456 200)) 1457 DRM_ERROR("PLL %d not locked\n", port); 1458 1459 /* 1460 * While we write to the group register to program all lanes at once we 1461 * can read only lane registers and we pick lanes 0/1 for that. 1462 */ 1463 temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port)); 1464 temp &= ~LANE_STAGGER_MASK; 1465 temp &= ~LANESTAGGER_STRAP_OVRD; 1466 temp |= pll->config.hw_state.pcsdw12; 1467 I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp); 1468 } 1469 1470 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, 1471 struct intel_shared_dpll *pll) 1472 { 1473 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ 1474 uint32_t temp; 1475 1476 temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1477 temp &= ~PORT_PLL_ENABLE; 1478 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1479 POSTING_READ(BXT_PORT_PLL_ENABLE(port)); 1480 } 1481 1482 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, 1483 struct intel_shared_dpll *pll, 1484 struct intel_dpll_hw_state *hw_state) 1485 { 1486 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ 1487 uint32_t val; 1488 bool ret; 1489 1490 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 1491 return false; 1492 1493 ret = false; 1494 1495 val = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1496 if (!(val & PORT_PLL_ENABLE)) 1497 goto out; 1498 1499 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port)); 1500 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; 1501 1502 hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port)); 1503 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE; 1504 1505 hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0)); 1506 hw_state->pll0 &= PORT_PLL_M2_MASK; 1507 1508 hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1)); 1509 hw_state->pll1 &= PORT_PLL_N_MASK; 1510 1511 hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2)); 1512 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK; 1513 1514 hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3)); 1515 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE; 1516 1517 hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6)); 1518 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK | 1519 PORT_PLL_INT_COEFF_MASK | 1520 PORT_PLL_GAIN_CTL_MASK; 1521 1522 hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8)); 1523 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK; 1524 1525 hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9)); 1526 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK; 1527 1528 hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10)); 1529 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H | 1530 PORT_PLL_DCO_AMP_MASK; 1531 1532 /* 1533 * While we write to the group register to program all lanes at once we 1534 * can read only lane registers. We configure all lanes the same way, so 1535 * here just read out lanes 0/1 and output a note if lanes 2/3 differ. 1536 */ 1537 hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port)); 1538 if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12) 1539 DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n", 1540 hw_state->pcsdw12, 1541 I915_READ(BXT_PORT_PCS_DW12_LN23(port))); 1542 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; 1543 1544 ret = true; 1545 1546 out: 1547 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 1548 1549 return ret; 1550 } 1551 1552 /* bxt clock parameters */ 1553 struct bxt_clk_div { 1554 int clock; 1555 uint32_t p1; 1556 uint32_t p2; 1557 uint32_t m2_int; 1558 uint32_t m2_frac; 1559 bool m2_frac_en; 1560 uint32_t n; 1561 1562 int vco; 1563 }; 1564 1565 /* pre-calculated values for DP linkrates */ 1566 static const struct bxt_clk_div bxt_dp_clk_val[] = { 1567 {162000, 4, 2, 32, 1677722, 1, 1}, 1568 {270000, 4, 1, 27, 0, 0, 1}, 1569 {540000, 2, 1, 27, 0, 0, 1}, 1570 {216000, 3, 2, 32, 1677722, 1, 1}, 1571 {243000, 4, 1, 24, 1258291, 1, 1}, 1572 {324000, 4, 1, 32, 1677722, 1, 1}, 1573 {432000, 3, 1, 32, 1677722, 1, 1} 1574 }; 1575 1576 static bool 1577 bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc, 1578 struct intel_crtc_state *crtc_state, int clock, 1579 struct bxt_clk_div *clk_div) 1580 { 1581 struct dpll best_clock; 1582 1583 /* Calculate HDMI div */ 1584 /* 1585 * FIXME: tie the following calculation into 1586 * i9xx_crtc_compute_clock 1587 */ 1588 if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) { 1589 DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n", 1590 clock, pipe_name(intel_crtc->pipe)); 1591 return false; 1592 } 1593 1594 clk_div->p1 = best_clock.p1; 1595 clk_div->p2 = best_clock.p2; 1596 WARN_ON(best_clock.m1 != 2); 1597 clk_div->n = best_clock.n; 1598 clk_div->m2_int = best_clock.m2 >> 22; 1599 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1); 1600 clk_div->m2_frac_en = clk_div->m2_frac != 0; 1601 1602 clk_div->vco = best_clock.vco; 1603 1604 return true; 1605 } 1606 1607 static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div) 1608 { 1609 int i; 1610 1611 *clk_div = bxt_dp_clk_val[0]; 1612 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) { 1613 if (bxt_dp_clk_val[i].clock == clock) { 1614 *clk_div = bxt_dp_clk_val[i]; 1615 break; 1616 } 1617 } 1618 1619 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2; 1620 } 1621 1622 static bool bxt_ddi_set_dpll_hw_state(int clock, 1623 struct bxt_clk_div *clk_div, 1624 struct intel_dpll_hw_state *dpll_hw_state) 1625 { 1626 int vco = clk_div->vco; 1627 uint32_t prop_coef, int_coef, gain_ctl, targ_cnt; 1628 uint32_t lanestagger; 1629 1630 if (vco >= 6200000 && vco <= 6700000) { 1631 prop_coef = 4; 1632 int_coef = 9; 1633 gain_ctl = 3; 1634 targ_cnt = 8; 1635 } else if ((vco > 5400000 && vco < 6200000) || 1636 (vco >= 4800000 && vco < 5400000)) { 1637 prop_coef = 5; 1638 int_coef = 11; 1639 gain_ctl = 3; 1640 targ_cnt = 9; 1641 } else if (vco == 5400000) { 1642 prop_coef = 3; 1643 int_coef = 8; 1644 gain_ctl = 1; 1645 targ_cnt = 9; 1646 } else { 1647 DRM_ERROR("Invalid VCO\n"); 1648 return false; 1649 } 1650 1651 if (clock > 270000) 1652 lanestagger = 0x18; 1653 else if (clock > 135000) 1654 lanestagger = 0x0d; 1655 else if (clock > 67000) 1656 lanestagger = 0x07; 1657 else if (clock > 33000) 1658 lanestagger = 0x04; 1659 else 1660 lanestagger = 0x02; 1661 1662 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2); 1663 dpll_hw_state->pll0 = clk_div->m2_int; 1664 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n); 1665 dpll_hw_state->pll2 = clk_div->m2_frac; 1666 1667 if (clk_div->m2_frac_en) 1668 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE; 1669 1670 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef); 1671 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl); 1672 1673 dpll_hw_state->pll8 = targ_cnt; 1674 1675 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT; 1676 1677 dpll_hw_state->pll10 = 1678 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT) 1679 | PORT_PLL_DCO_AMP_OVR_EN_H; 1680 1681 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE; 1682 1683 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger; 1684 1685 return true; 1686 } 1687 1688 bool bxt_ddi_dp_set_dpll_hw_state(int clock, 1689 struct intel_dpll_hw_state *dpll_hw_state) 1690 { 1691 struct bxt_clk_div clk_div = {0}; 1692 1693 bxt_ddi_dp_pll_dividers(clock, &clk_div); 1694 1695 return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state); 1696 } 1697 1698 static bool 1699 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc, 1700 struct intel_crtc_state *crtc_state, int clock, 1701 struct intel_dpll_hw_state *dpll_hw_state) 1702 { 1703 struct bxt_clk_div clk_div = { }; 1704 1705 bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div); 1706 1707 return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state); 1708 } 1709 1710 static struct intel_shared_dpll * 1711 bxt_get_dpll(struct intel_crtc *crtc, 1712 struct intel_crtc_state *crtc_state, 1713 struct intel_encoder *encoder) 1714 { 1715 struct intel_dpll_hw_state dpll_hw_state = { }; 1716 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1717 struct intel_digital_port *intel_dig_port; 1718 struct intel_shared_dpll *pll; 1719 int i, clock = crtc_state->port_clock; 1720 1721 if (encoder->type == INTEL_OUTPUT_HDMI && 1722 !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock, 1723 &dpll_hw_state)) 1724 return NULL; 1725 1726 if ((encoder->type == INTEL_OUTPUT_DP || 1727 encoder->type == INTEL_OUTPUT_EDP) && 1728 !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state)) 1729 return NULL; 1730 1731 memset(&crtc_state->dpll_hw_state, 0, 1732 sizeof(crtc_state->dpll_hw_state)); 1733 1734 crtc_state->dpll_hw_state = dpll_hw_state; 1735 1736 if (encoder->type == INTEL_OUTPUT_DP_MST) { 1737 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); 1738 1739 intel_dig_port = intel_mst->primary; 1740 } else 1741 intel_dig_port = enc_to_dig_port(&encoder->base); 1742 1743 /* 1:1 mapping between ports and PLLs */ 1744 i = (enum intel_dpll_id) intel_dig_port->port; 1745 pll = intel_get_shared_dpll_by_id(dev_priv, i); 1746 1747 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", 1748 crtc->base.base.id, crtc->base.name, pll->name); 1749 1750 intel_reference_shared_dpll(pll, crtc_state); 1751 1752 return pll; 1753 } 1754 1755 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { 1756 .enable = bxt_ddi_pll_enable, 1757 .disable = bxt_ddi_pll_disable, 1758 .get_hw_state = bxt_ddi_pll_get_hw_state, 1759 }; 1760 1761 static void intel_ddi_pll_init(struct drm_device *dev) 1762 { 1763 struct drm_i915_private *dev_priv = to_i915(dev); 1764 1765 if (INTEL_GEN(dev_priv) < 9) { 1766 uint32_t val = I915_READ(LCPLL_CTL); 1767 1768 /* 1769 * The LCPLL register should be turned on by the BIOS. For now 1770 * let's just check its state and print errors in case 1771 * something is wrong. Don't even try to turn it on. 1772 */ 1773 1774 if (val & LCPLL_CD_SOURCE_FCLK) 1775 DRM_ERROR("CDCLK source is not LCPLL\n"); 1776 1777 if (val & LCPLL_PLL_DISABLE) 1778 DRM_ERROR("LCPLL is disabled\n"); 1779 } 1780 } 1781 1782 struct dpll_info { 1783 const char *name; 1784 const int id; 1785 const struct intel_shared_dpll_funcs *funcs; 1786 uint32_t flags; 1787 }; 1788 1789 struct intel_dpll_mgr { 1790 const struct dpll_info *dpll_info; 1791 1792 struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc, 1793 struct intel_crtc_state *crtc_state, 1794 struct intel_encoder *encoder); 1795 }; 1796 1797 static const struct dpll_info pch_plls[] = { 1798 { "PCH DPLL A", DPLL_ID_PCH_PLL_A, &ibx_pch_dpll_funcs, 0 }, 1799 { "PCH DPLL B", DPLL_ID_PCH_PLL_B, &ibx_pch_dpll_funcs, 0 }, 1800 { NULL, -1, NULL, 0 }, 1801 }; 1802 1803 static const struct intel_dpll_mgr pch_pll_mgr = { 1804 .dpll_info = pch_plls, 1805 .get_dpll = ibx_get_dpll, 1806 }; 1807 1808 static const struct dpll_info hsw_plls[] = { 1809 { "WRPLL 1", DPLL_ID_WRPLL1, &hsw_ddi_wrpll_funcs, 0 }, 1810 { "WRPLL 2", DPLL_ID_WRPLL2, &hsw_ddi_wrpll_funcs, 0 }, 1811 { "SPLL", DPLL_ID_SPLL, &hsw_ddi_spll_funcs, 0 }, 1812 { "LCPLL 810", DPLL_ID_LCPLL_810, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON }, 1813 { "LCPLL 1350", DPLL_ID_LCPLL_1350, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON }, 1814 { "LCPLL 2700", DPLL_ID_LCPLL_2700, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON }, 1815 { NULL, -1, NULL, }, 1816 }; 1817 1818 static const struct intel_dpll_mgr hsw_pll_mgr = { 1819 .dpll_info = hsw_plls, 1820 .get_dpll = hsw_get_dpll, 1821 }; 1822 1823 static const struct dpll_info skl_plls[] = { 1824 { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON }, 1825 { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 }, 1826 { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 }, 1827 { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 }, 1828 { NULL, -1, NULL, }, 1829 }; 1830 1831 static const struct intel_dpll_mgr skl_pll_mgr = { 1832 .dpll_info = skl_plls, 1833 .get_dpll = skl_get_dpll, 1834 }; 1835 1836 static const struct dpll_info bxt_plls[] = { 1837 { "PORT PLL A", DPLL_ID_SKL_DPLL0, &bxt_ddi_pll_funcs, 0 }, 1838 { "PORT PLL B", DPLL_ID_SKL_DPLL1, &bxt_ddi_pll_funcs, 0 }, 1839 { "PORT PLL C", DPLL_ID_SKL_DPLL2, &bxt_ddi_pll_funcs, 0 }, 1840 { NULL, -1, NULL, }, 1841 }; 1842 1843 static const struct intel_dpll_mgr bxt_pll_mgr = { 1844 .dpll_info = bxt_plls, 1845 .get_dpll = bxt_get_dpll, 1846 }; 1847 1848 void intel_shared_dpll_init(struct drm_device *dev) 1849 { 1850 struct drm_i915_private *dev_priv = to_i915(dev); 1851 const struct intel_dpll_mgr *dpll_mgr = NULL; 1852 const struct dpll_info *dpll_info; 1853 int i; 1854 1855 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 1856 dpll_mgr = &skl_pll_mgr; 1857 else if (IS_BROXTON(dev_priv)) 1858 dpll_mgr = &bxt_pll_mgr; 1859 else if (HAS_DDI(dev_priv)) 1860 dpll_mgr = &hsw_pll_mgr; 1861 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 1862 dpll_mgr = &pch_pll_mgr; 1863 1864 if (!dpll_mgr) { 1865 dev_priv->num_shared_dpll = 0; 1866 return; 1867 } 1868 1869 dpll_info = dpll_mgr->dpll_info; 1870 1871 for (i = 0; dpll_info[i].id >= 0; i++) { 1872 WARN_ON(i != dpll_info[i].id); 1873 1874 dev_priv->shared_dplls[i].id = dpll_info[i].id; 1875 dev_priv->shared_dplls[i].name = dpll_info[i].name; 1876 dev_priv->shared_dplls[i].funcs = *dpll_info[i].funcs; 1877 dev_priv->shared_dplls[i].flags = dpll_info[i].flags; 1878 } 1879 1880 dev_priv->dpll_mgr = dpll_mgr; 1881 dev_priv->num_shared_dpll = i; 1882 lockinit(&dev_priv->dpll_lock, "dpll_lock", 0, LK_CANRECURSE); 1883 1884 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); 1885 1886 /* FIXME: Move this to a more suitable place */ 1887 if (HAS_DDI(dev_priv)) 1888 intel_ddi_pll_init(dev); 1889 } 1890 1891 struct intel_shared_dpll * 1892 intel_get_shared_dpll(struct intel_crtc *crtc, 1893 struct intel_crtc_state *crtc_state, 1894 struct intel_encoder *encoder) 1895 { 1896 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1897 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; 1898 1899 if (WARN_ON(!dpll_mgr)) 1900 return NULL; 1901 1902 return dpll_mgr->get_dpll(crtc, crtc_state, encoder); 1903 } 1904