1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <ddb/ddb.h> 28 #include <sys/limits.h> 29 30 #include <drm/drmP.h> 31 #include <drm/drm_edid.h> 32 #include "intel_drv.h" 33 #include <drm/i915_drm.h> 34 #include "i915_drv.h" 35 #include <drm/drm_dp_helper.h> 36 #include <drm/drm_crtc_helper.h> 37 38 #include <linux/err.h> 39 40 bool intel_pipe_has_type(struct drm_crtc *crtc, int type); 41 static void intel_increase_pllclock(struct drm_crtc *crtc); 42 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 43 44 typedef struct { 45 /* given values */ 46 int n; 47 int m1, m2; 48 int p1, p2; 49 /* derived values */ 50 int dot; 51 int vco; 52 int m; 53 int p; 54 } intel_clock_t; 55 56 typedef struct { 57 int min, max; 58 } intel_range_t; 59 60 typedef struct { 61 int dot_limit; 62 int p2_slow, p2_fast; 63 } intel_p2_t; 64 65 #define INTEL_P2_NUM 2 66 typedef struct intel_limit intel_limit_t; 67 struct intel_limit { 68 intel_range_t dot, vco, n, m, m1, m2, p, p1; 69 intel_p2_t p2; 70 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 71 int, int, intel_clock_t *, intel_clock_t *); 72 }; 73 74 /* FDI */ 75 #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ 76 77 int 78 intel_pch_rawclk(struct drm_device *dev) 79 { 80 struct drm_i915_private *dev_priv = dev->dev_private; 81 82 WARN_ON(!HAS_PCH_SPLIT(dev)); 83 84 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; 85 } 86 87 static bool 88 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 89 int target, int refclk, intel_clock_t *match_clock, 90 intel_clock_t *best_clock); 91 static bool 92 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 93 int target, int refclk, intel_clock_t *match_clock, 94 intel_clock_t *best_clock); 95 96 static bool 97 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 98 int target, int refclk, intel_clock_t *match_clock, 99 intel_clock_t *best_clock); 100 static bool 101 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, 102 int target, int refclk, intel_clock_t *match_clock, 103 intel_clock_t *best_clock); 104 105 static bool 106 intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, 107 int target, int refclk, intel_clock_t *match_clock, 108 intel_clock_t *best_clock); 109 110 static inline u32 /* units of 100MHz */ 111 intel_fdi_link_freq(struct drm_device *dev) 112 { 113 if (IS_GEN5(dev)) { 114 struct drm_i915_private *dev_priv = dev->dev_private; 115 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; 116 } else 117 return 27; 118 } 119 120 static const intel_limit_t intel_limits_i8xx_dvo = { 121 .dot = { .min = 25000, .max = 350000 }, 122 .vco = { .min = 930000, .max = 1400000 }, 123 .n = { .min = 3, .max = 16 }, 124 .m = { .min = 96, .max = 140 }, 125 .m1 = { .min = 18, .max = 26 }, 126 .m2 = { .min = 6, .max = 16 }, 127 .p = { .min = 4, .max = 128 }, 128 .p1 = { .min = 2, .max = 33 }, 129 .p2 = { .dot_limit = 165000, 130 .p2_slow = 4, .p2_fast = 2 }, 131 .find_pll = intel_find_best_PLL, 132 }; 133 134 static const intel_limit_t intel_limits_i8xx_lvds = { 135 .dot = { .min = 25000, .max = 350000 }, 136 .vco = { .min = 930000, .max = 1400000 }, 137 .n = { .min = 3, .max = 16 }, 138 .m = { .min = 96, .max = 140 }, 139 .m1 = { .min = 18, .max = 26 }, 140 .m2 = { .min = 6, .max = 16 }, 141 .p = { .min = 4, .max = 128 }, 142 .p1 = { .min = 1, .max = 6 }, 143 .p2 = { .dot_limit = 165000, 144 .p2_slow = 14, .p2_fast = 7 }, 145 .find_pll = intel_find_best_PLL, 146 }; 147 148 static const intel_limit_t intel_limits_i9xx_sdvo = { 149 .dot = { .min = 20000, .max = 400000 }, 150 .vco = { .min = 1400000, .max = 2800000 }, 151 .n = { .min = 1, .max = 6 }, 152 .m = { .min = 70, .max = 120 }, 153 .m1 = { .min = 8, .max = 18 }, 154 .m2 = { .min = 3, .max = 7 }, 155 .p = { .min = 5, .max = 80 }, 156 .p1 = { .min = 1, .max = 8 }, 157 .p2 = { .dot_limit = 200000, 158 .p2_slow = 10, .p2_fast = 5 }, 159 .find_pll = intel_find_best_PLL, 160 }; 161 162 static const intel_limit_t intel_limits_i9xx_lvds = { 163 .dot = { .min = 20000, .max = 400000 }, 164 .vco = { .min = 1400000, .max = 2800000 }, 165 .n = { .min = 1, .max = 6 }, 166 .m = { .min = 70, .max = 120 }, 167 .m1 = { .min = 10, .max = 22 }, 168 .m2 = { .min = 5, .max = 9 }, 169 .p = { .min = 7, .max = 98 }, 170 .p1 = { .min = 1, .max = 8 }, 171 .p2 = { .dot_limit = 112000, 172 .p2_slow = 14, .p2_fast = 7 }, 173 .find_pll = intel_find_best_PLL, 174 }; 175 176 177 static const intel_limit_t intel_limits_g4x_sdvo = { 178 .dot = { .min = 25000, .max = 270000 }, 179 .vco = { .min = 1750000, .max = 3500000}, 180 .n = { .min = 1, .max = 4 }, 181 .m = { .min = 104, .max = 138 }, 182 .m1 = { .min = 17, .max = 23 }, 183 .m2 = { .min = 5, .max = 11 }, 184 .p = { .min = 10, .max = 30 }, 185 .p1 = { .min = 1, .max = 3}, 186 .p2 = { .dot_limit = 270000, 187 .p2_slow = 10, 188 .p2_fast = 10 189 }, 190 .find_pll = intel_g4x_find_best_PLL, 191 }; 192 193 static const intel_limit_t intel_limits_g4x_hdmi = { 194 .dot = { .min = 22000, .max = 400000 }, 195 .vco = { .min = 1750000, .max = 3500000}, 196 .n = { .min = 1, .max = 4 }, 197 .m = { .min = 104, .max = 138 }, 198 .m1 = { .min = 16, .max = 23 }, 199 .m2 = { .min = 5, .max = 11 }, 200 .p = { .min = 5, .max = 80 }, 201 .p1 = { .min = 1, .max = 8}, 202 .p2 = { .dot_limit = 165000, 203 .p2_slow = 10, .p2_fast = 5 }, 204 .find_pll = intel_g4x_find_best_PLL, 205 }; 206 207 static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 208 .dot = { .min = 20000, .max = 115000 }, 209 .vco = { .min = 1750000, .max = 3500000 }, 210 .n = { .min = 1, .max = 3 }, 211 .m = { .min = 104, .max = 138 }, 212 .m1 = { .min = 17, .max = 23 }, 213 .m2 = { .min = 5, .max = 11 }, 214 .p = { .min = 28, .max = 112 }, 215 .p1 = { .min = 2, .max = 8 }, 216 .p2 = { .dot_limit = 0, 217 .p2_slow = 14, .p2_fast = 14 218 }, 219 .find_pll = intel_g4x_find_best_PLL, 220 }; 221 222 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 223 .dot = { .min = 80000, .max = 224000 }, 224 .vco = { .min = 1750000, .max = 3500000 }, 225 .n = { .min = 1, .max = 3 }, 226 .m = { .min = 104, .max = 138 }, 227 .m1 = { .min = 17, .max = 23 }, 228 .m2 = { .min = 5, .max = 11 }, 229 .p = { .min = 14, .max = 42 }, 230 .p1 = { .min = 2, .max = 6 }, 231 .p2 = { .dot_limit = 0, 232 .p2_slow = 7, .p2_fast = 7 233 }, 234 .find_pll = intel_g4x_find_best_PLL, 235 }; 236 237 static const intel_limit_t intel_limits_g4x_display_port = { 238 .dot = { .min = 161670, .max = 227000 }, 239 .vco = { .min = 1750000, .max = 3500000}, 240 .n = { .min = 1, .max = 2 }, 241 .m = { .min = 97, .max = 108 }, 242 .m1 = { .min = 0x10, .max = 0x12 }, 243 .m2 = { .min = 0x05, .max = 0x06 }, 244 .p = { .min = 10, .max = 20 }, 245 .p1 = { .min = 1, .max = 2}, 246 .p2 = { .dot_limit = 0, 247 .p2_slow = 10, .p2_fast = 10 }, 248 .find_pll = intel_find_pll_g4x_dp, 249 }; 250 251 static const intel_limit_t intel_limits_pineview_sdvo = { 252 .dot = { .min = 20000, .max = 400000}, 253 .vco = { .min = 1700000, .max = 3500000 }, 254 /* Pineview's Ncounter is a ring counter */ 255 .n = { .min = 3, .max = 6 }, 256 .m = { .min = 2, .max = 256 }, 257 /* Pineview only has one combined m divider, which we treat as m2. */ 258 .m1 = { .min = 0, .max = 0 }, 259 .m2 = { .min = 0, .max = 254 }, 260 .p = { .min = 5, .max = 80 }, 261 .p1 = { .min = 1, .max = 8 }, 262 .p2 = { .dot_limit = 200000, 263 .p2_slow = 10, .p2_fast = 5 }, 264 .find_pll = intel_find_best_PLL, 265 }; 266 267 static const intel_limit_t intel_limits_pineview_lvds = { 268 .dot = { .min = 20000, .max = 400000 }, 269 .vco = { .min = 1700000, .max = 3500000 }, 270 .n = { .min = 3, .max = 6 }, 271 .m = { .min = 2, .max = 256 }, 272 .m1 = { .min = 0, .max = 0 }, 273 .m2 = { .min = 0, .max = 254 }, 274 .p = { .min = 7, .max = 112 }, 275 .p1 = { .min = 1, .max = 8 }, 276 .p2 = { .dot_limit = 112000, 277 .p2_slow = 14, .p2_fast = 14 }, 278 .find_pll = intel_find_best_PLL, 279 }; 280 281 /* Ironlake / Sandybridge 282 * 283 * We calculate clock using (register_value + 2) for N/M1/M2, so here 284 * the range value for them is (actual_value - 2). 285 */ 286 static const intel_limit_t intel_limits_ironlake_dac = { 287 .dot = { .min = 25000, .max = 350000 }, 288 .vco = { .min = 1760000, .max = 3510000 }, 289 .n = { .min = 1, .max = 5 }, 290 .m = { .min = 79, .max = 127 }, 291 .m1 = { .min = 12, .max = 22 }, 292 .m2 = { .min = 5, .max = 9 }, 293 .p = { .min = 5, .max = 80 }, 294 .p1 = { .min = 1, .max = 8 }, 295 .p2 = { .dot_limit = 225000, 296 .p2_slow = 10, .p2_fast = 5 }, 297 .find_pll = intel_g4x_find_best_PLL, 298 }; 299 300 static const intel_limit_t intel_limits_ironlake_single_lvds = { 301 .dot = { .min = 25000, .max = 350000 }, 302 .vco = { .min = 1760000, .max = 3510000 }, 303 .n = { .min = 1, .max = 3 }, 304 .m = { .min = 79, .max = 118 }, 305 .m1 = { .min = 12, .max = 22 }, 306 .m2 = { .min = 5, .max = 9 }, 307 .p = { .min = 28, .max = 112 }, 308 .p1 = { .min = 2, .max = 8 }, 309 .p2 = { .dot_limit = 225000, 310 .p2_slow = 14, .p2_fast = 14 }, 311 .find_pll = intel_g4x_find_best_PLL, 312 }; 313 314 static const intel_limit_t intel_limits_ironlake_dual_lvds = { 315 .dot = { .min = 25000, .max = 350000 }, 316 .vco = { .min = 1760000, .max = 3510000 }, 317 .n = { .min = 1, .max = 3 }, 318 .m = { .min = 79, .max = 127 }, 319 .m1 = { .min = 12, .max = 22 }, 320 .m2 = { .min = 5, .max = 9 }, 321 .p = { .min = 14, .max = 56 }, 322 .p1 = { .min = 2, .max = 8 }, 323 .p2 = { .dot_limit = 225000, 324 .p2_slow = 7, .p2_fast = 7 }, 325 .find_pll = intel_g4x_find_best_PLL, 326 }; 327 328 /* LVDS 100mhz refclk limits. */ 329 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 330 .dot = { .min = 25000, .max = 350000 }, 331 .vco = { .min = 1760000, .max = 3510000 }, 332 .n = { .min = 1, .max = 2 }, 333 .m = { .min = 79, .max = 126 }, 334 .m1 = { .min = 12, .max = 22 }, 335 .m2 = { .min = 5, .max = 9 }, 336 .p = { .min = 28, .max = 112 }, 337 .p1 = { .min = 2, .max = 8 }, 338 .p2 = { .dot_limit = 225000, 339 .p2_slow = 14, .p2_fast = 14 }, 340 .find_pll = intel_g4x_find_best_PLL, 341 }; 342 343 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 344 .dot = { .min = 25000, .max = 350000 }, 345 .vco = { .min = 1760000, .max = 3510000 }, 346 .n = { .min = 1, .max = 3 }, 347 .m = { .min = 79, .max = 126 }, 348 .m1 = { .min = 12, .max = 22 }, 349 .m2 = { .min = 5, .max = 9 }, 350 .p = { .min = 14, .max = 42 }, 351 .p1 = { .min = 2, .max = 6 }, 352 .p2 = { .dot_limit = 225000, 353 .p2_slow = 7, .p2_fast = 7 }, 354 .find_pll = intel_g4x_find_best_PLL, 355 }; 356 357 static const intel_limit_t intel_limits_ironlake_display_port = { 358 .dot = { .min = 25000, .max = 350000 }, 359 .vco = { .min = 1760000, .max = 3510000}, 360 .n = { .min = 1, .max = 2 }, 361 .m = { .min = 81, .max = 90 }, 362 .m1 = { .min = 12, .max = 22 }, 363 .m2 = { .min = 5, .max = 9 }, 364 .p = { .min = 10, .max = 20 }, 365 .p1 = { .min = 1, .max = 2}, 366 .p2 = { .dot_limit = 0, 367 .p2_slow = 10, .p2_fast = 10 }, 368 .find_pll = intel_find_pll_ironlake_dp, 369 }; 370 371 static const intel_limit_t intel_limits_vlv_dac = { 372 .dot = { .min = 25000, .max = 270000 }, 373 .vco = { .min = 4000000, .max = 6000000 }, 374 .n = { .min = 1, .max = 7 }, 375 .m = { .min = 22, .max = 450 }, /* guess */ 376 .m1 = { .min = 2, .max = 3 }, 377 .m2 = { .min = 11, .max = 156 }, 378 .p = { .min = 10, .max = 30 }, 379 .p1 = { .min = 2, .max = 3 }, 380 .p2 = { .dot_limit = 270000, 381 .p2_slow = 2, .p2_fast = 20 }, 382 .find_pll = intel_vlv_find_best_pll, 383 }; 384 385 static const intel_limit_t intel_limits_vlv_hdmi = { 386 .dot = { .min = 20000, .max = 165000 }, 387 .vco = { .min = 4000000, .max = 5994000}, 388 .n = { .min = 1, .max = 7 }, 389 .m = { .min = 60, .max = 300 }, /* guess */ 390 .m1 = { .min = 2, .max = 3 }, 391 .m2 = { .min = 11, .max = 156 }, 392 .p = { .min = 10, .max = 30 }, 393 .p1 = { .min = 2, .max = 3 }, 394 .p2 = { .dot_limit = 270000, 395 .p2_slow = 2, .p2_fast = 20 }, 396 .find_pll = intel_vlv_find_best_pll, 397 }; 398 399 static const intel_limit_t intel_limits_vlv_dp = { 400 .dot = { .min = 25000, .max = 270000 }, 401 .vco = { .min = 4000000, .max = 6000000 }, 402 .n = { .min = 1, .max = 7 }, 403 .m = { .min = 22, .max = 450 }, 404 .m1 = { .min = 2, .max = 3 }, 405 .m2 = { .min = 11, .max = 156 }, 406 .p = { .min = 10, .max = 30 }, 407 .p1 = { .min = 2, .max = 3 }, 408 .p2 = { .dot_limit = 270000, 409 .p2_slow = 2, .p2_fast = 20 }, 410 .find_pll = intel_vlv_find_best_pll, 411 }; 412 413 u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) 414 { 415 u32 val = 0; 416 417 spin_lock(&dev_priv->dpio_lock); 418 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { 419 DRM_ERROR("DPIO idle wait timed out\n"); 420 goto out_unlock; 421 } 422 423 I915_WRITE(DPIO_REG, reg); 424 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID | 425 DPIO_BYTE); 426 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { 427 DRM_ERROR("DPIO read wait timed out\n"); 428 goto out_unlock; 429 } 430 val = I915_READ(DPIO_DATA); 431 432 out_unlock: 433 spin_unlock(&dev_priv->dpio_lock); 434 return val; 435 } 436 437 static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, 438 u32 val) 439 { 440 spin_lock(&dev_priv->dpio_lock); 441 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { 442 DRM_ERROR("DPIO idle wait timed out\n"); 443 goto out_unlock; 444 } 445 446 I915_WRITE(DPIO_DATA, val); 447 I915_WRITE(DPIO_REG, reg); 448 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID | 449 DPIO_BYTE); 450 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) 451 DRM_ERROR("DPIO write wait timed out\n"); 452 453 out_unlock: 454 spin_unlock(&dev_priv->dpio_lock); 455 } 456 457 static void vlv_init_dpio(struct drm_device *dev) 458 { 459 struct drm_i915_private *dev_priv = dev->dev_private; 460 461 /* Reset the DPIO config */ 462 I915_WRITE(DPIO_CTL, 0); 463 POSTING_READ(DPIO_CTL); 464 I915_WRITE(DPIO_CTL, 1); 465 POSTING_READ(DPIO_CTL); 466 } 467 468 static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) 469 { 470 DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident); 471 return 1; 472 } 473 474 static const struct dmi_system_id intel_dual_link_lvds[] = { 475 { 476 .callback = intel_dual_link_lvds_callback, 477 .ident = "Apple MacBook Pro (Core i5/i7 Series)", 478 .matches = { 479 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 480 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), 481 }, 482 }, 483 { } /* terminating entry */ 484 }; 485 486 static bool is_dual_link_lvds(struct drm_i915_private *dev_priv, 487 unsigned int reg) 488 { 489 unsigned int val; 490 491 /* use the module option value if specified */ 492 if (i915_lvds_channel_mode > 0) 493 return i915_lvds_channel_mode == 2; 494 495 if (dmi_check_system(intel_dual_link_lvds)) 496 return true; 497 498 if (dev_priv->lvds_val) 499 val = dev_priv->lvds_val; 500 else { 501 /* BIOS should set the proper LVDS register value at boot, but 502 * in reality, it doesn't set the value when the lid is closed; 503 * we need to check "the value to be set" in VBT when LVDS 504 * register is uninitialized. 505 */ 506 val = I915_READ(reg); 507 if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED))) 508 val = dev_priv->bios_lvds_val; 509 dev_priv->lvds_val = val; 510 } 511 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; 512 } 513 514 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 515 int refclk) 516 { 517 struct drm_device *dev = crtc->dev; 518 struct drm_i915_private *dev_priv = dev->dev_private; 519 const intel_limit_t *limit; 520 521 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 522 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) { 523 /* LVDS dual channel */ 524 if (refclk == 100000) 525 limit = &intel_limits_ironlake_dual_lvds_100m; 526 else 527 limit = &intel_limits_ironlake_dual_lvds; 528 } else { 529 if (refclk == 100000) 530 limit = &intel_limits_ironlake_single_lvds_100m; 531 else 532 limit = &intel_limits_ironlake_single_lvds; 533 } 534 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 535 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 536 limit = &intel_limits_ironlake_display_port; 537 else 538 limit = &intel_limits_ironlake_dac; 539 540 return limit; 541 } 542 543 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 544 { 545 struct drm_device *dev = crtc->dev; 546 struct drm_i915_private *dev_priv = dev->dev_private; 547 const intel_limit_t *limit; 548 549 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 550 if (is_dual_link_lvds(dev_priv, LVDS)) 551 /* LVDS with dual channel */ 552 limit = &intel_limits_g4x_dual_channel_lvds; 553 else 554 /* LVDS with dual channel */ 555 limit = &intel_limits_g4x_single_channel_lvds; 556 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || 557 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { 558 limit = &intel_limits_g4x_hdmi; 559 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { 560 limit = &intel_limits_g4x_sdvo; 561 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 562 limit = &intel_limits_g4x_display_port; 563 } else /* The option is for other outputs */ 564 limit = &intel_limits_i9xx_sdvo; 565 566 return limit; 567 } 568 569 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) 570 { 571 struct drm_device *dev = crtc->dev; 572 const intel_limit_t *limit; 573 574 if (HAS_PCH_SPLIT(dev)) 575 limit = intel_ironlake_limit(crtc, refclk); 576 else if (IS_G4X(dev)) { 577 limit = intel_g4x_limit(crtc); 578 } else if (IS_PINEVIEW(dev)) { 579 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 580 limit = &intel_limits_pineview_lvds; 581 else 582 limit = &intel_limits_pineview_sdvo; 583 } else if (IS_VALLEYVIEW(dev)) { 584 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) 585 limit = &intel_limits_vlv_dac; 586 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 587 limit = &intel_limits_vlv_hdmi; 588 else 589 limit = &intel_limits_vlv_dp; 590 } else if (!IS_GEN2(dev)) { 591 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 592 limit = &intel_limits_i9xx_lvds; 593 else 594 limit = &intel_limits_i9xx_sdvo; 595 } else { 596 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 597 limit = &intel_limits_i8xx_lvds; 598 else 599 limit = &intel_limits_i8xx_dvo; 600 } 601 return limit; 602 } 603 604 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 605 static void pineview_clock(int refclk, intel_clock_t *clock) 606 { 607 clock->m = clock->m2 + 2; 608 clock->p = clock->p1 * clock->p2; 609 clock->vco = refclk * clock->m / clock->n; 610 clock->dot = clock->vco / clock->p; 611 } 612 613 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) 614 { 615 if (IS_PINEVIEW(dev)) { 616 pineview_clock(refclk, clock); 617 return; 618 } 619 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 620 clock->p = clock->p1 * clock->p2; 621 clock->vco = refclk * clock->m / (clock->n + 2); 622 clock->dot = clock->vco / clock->p; 623 } 624 625 /** 626 * Returns whether any output on the specified pipe is of the specified type 627 */ 628 bool intel_pipe_has_type(struct drm_crtc *crtc, int type) 629 { 630 struct drm_device *dev = crtc->dev; 631 struct intel_encoder *encoder; 632 633 for_each_encoder_on_crtc(dev, crtc, encoder) 634 if (encoder->type == type) 635 return true; 636 637 return false; 638 } 639 640 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 641 /** 642 * Returns whether the given set of divisors are valid for a given refclk with 643 * the given connectors. 644 */ 645 646 static bool intel_PLL_is_valid(struct drm_device *dev, 647 const intel_limit_t *limit, 648 const intel_clock_t *clock) 649 { 650 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 651 INTELPllInvalid("p1 out of range\n"); 652 if (clock->p < limit->p.min || limit->p.max < clock->p) 653 INTELPllInvalid("p out of range\n"); 654 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 655 INTELPllInvalid("m2 out of range\n"); 656 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 657 INTELPllInvalid("m1 out of range\n"); 658 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) 659 INTELPllInvalid("m1 <= m2\n"); 660 if (clock->m < limit->m.min || limit->m.max < clock->m) 661 INTELPllInvalid("m out of range\n"); 662 if (clock->n < limit->n.min || limit->n.max < clock->n) 663 INTELPllInvalid("n out of range\n"); 664 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 665 INTELPllInvalid("vco out of range\n"); 666 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 667 * connector, etc., rather than just a single range. 668 */ 669 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 670 INTELPllInvalid("dot out of range\n"); 671 672 return true; 673 } 674 675 static bool 676 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 677 int target, int refclk, intel_clock_t *match_clock, 678 intel_clock_t *best_clock) 679 680 { 681 struct drm_device *dev = crtc->dev; 682 struct drm_i915_private *dev_priv = dev->dev_private; 683 intel_clock_t clock; 684 int err = target; 685 686 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 687 (I915_READ(LVDS)) != 0) { 688 /* 689 * For LVDS, if the panel is on, just rely on its current 690 * settings for dual-channel. We haven't figured out how to 691 * reliably set up different single/dual channel state, if we 692 * even can. 693 */ 694 if (is_dual_link_lvds(dev_priv, LVDS)) 695 clock.p2 = limit->p2.p2_fast; 696 else 697 clock.p2 = limit->p2.p2_slow; 698 } else { 699 if (target < limit->p2.dot_limit) 700 clock.p2 = limit->p2.p2_slow; 701 else 702 clock.p2 = limit->p2.p2_fast; 703 } 704 705 memset(best_clock, 0, sizeof(*best_clock)); 706 707 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 708 clock.m1++) { 709 for (clock.m2 = limit->m2.min; 710 clock.m2 <= limit->m2.max; clock.m2++) { 711 /* m1 is always 0 in Pineview */ 712 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) 713 break; 714 for (clock.n = limit->n.min; 715 clock.n <= limit->n.max; clock.n++) { 716 for (clock.p1 = limit->p1.min; 717 clock.p1 <= limit->p1.max; clock.p1++) { 718 int this_err; 719 720 intel_clock(dev, refclk, &clock); 721 if (!intel_PLL_is_valid(dev, limit, 722 &clock)) 723 continue; 724 if (match_clock && 725 clock.p != match_clock->p) 726 continue; 727 728 this_err = abs(clock.dot - target); 729 if (this_err < err) { 730 *best_clock = clock; 731 err = this_err; 732 } 733 } 734 } 735 } 736 } 737 738 return (err != target); 739 } 740 741 static bool 742 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 743 int target, int refclk, intel_clock_t *match_clock, 744 intel_clock_t *best_clock) 745 { 746 struct drm_device *dev = crtc->dev; 747 struct drm_i915_private *dev_priv = dev->dev_private; 748 intel_clock_t clock; 749 int max_n; 750 bool found; 751 /* approximately equals target * 0.00585 */ 752 int err_most = (target >> 8) + (target >> 9); 753 found = false; 754 755 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 756 int lvds_reg; 757 758 if (HAS_PCH_SPLIT(dev)) 759 lvds_reg = PCH_LVDS; 760 else 761 lvds_reg = LVDS; 762 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == 763 LVDS_CLKB_POWER_UP) 764 clock.p2 = limit->p2.p2_fast; 765 else 766 clock.p2 = limit->p2.p2_slow; 767 } else { 768 if (target < limit->p2.dot_limit) 769 clock.p2 = limit->p2.p2_slow; 770 else 771 clock.p2 = limit->p2.p2_fast; 772 } 773 774 memset(best_clock, 0, sizeof(*best_clock)); 775 max_n = limit->n.max; 776 /* based on hardware requirement, prefer smaller n to precision */ 777 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 778 /* based on hardware requirement, prefere larger m1,m2 */ 779 for (clock.m1 = limit->m1.max; 780 clock.m1 >= limit->m1.min; clock.m1--) { 781 for (clock.m2 = limit->m2.max; 782 clock.m2 >= limit->m2.min; clock.m2--) { 783 for (clock.p1 = limit->p1.max; 784 clock.p1 >= limit->p1.min; clock.p1--) { 785 int this_err; 786 787 intel_clock(dev, refclk, &clock); 788 if (!intel_PLL_is_valid(dev, limit, 789 &clock)) 790 continue; 791 if (match_clock && 792 clock.p != match_clock->p) 793 continue; 794 795 this_err = abs(clock.dot - target); 796 if (this_err < err_most) { 797 *best_clock = clock; 798 err_most = this_err; 799 max_n = clock.n; 800 found = true; 801 } 802 } 803 } 804 } 805 } 806 return found; 807 } 808 809 static bool 810 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 811 int target, int refclk, intel_clock_t *match_clock, 812 intel_clock_t *best_clock) 813 { 814 struct drm_device *dev = crtc->dev; 815 intel_clock_t clock; 816 817 if (target < 200000) { 818 clock.n = 1; 819 clock.p1 = 2; 820 clock.p2 = 10; 821 clock.m1 = 12; 822 clock.m2 = 9; 823 } else { 824 clock.n = 2; 825 clock.p1 = 1; 826 clock.p2 = 10; 827 clock.m1 = 14; 828 clock.m2 = 8; 829 } 830 intel_clock(dev, refclk, &clock); 831 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 832 return true; 833 } 834 835 /* DisplayPort has only two frequencies, 162MHz and 270MHz */ 836 static bool 837 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 838 int target, int refclk, intel_clock_t *match_clock, 839 intel_clock_t *best_clock) 840 { 841 intel_clock_t clock; 842 if (target < 200000) { 843 clock.p1 = 2; 844 clock.p2 = 10; 845 clock.n = 2; 846 clock.m1 = 23; 847 clock.m2 = 8; 848 } else { 849 clock.p1 = 1; 850 clock.p2 = 10; 851 clock.n = 1; 852 clock.m1 = 14; 853 clock.m2 = 2; 854 } 855 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); 856 clock.p = (clock.p1 * clock.p2); 857 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; 858 clock.vco = 0; 859 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 860 return true; 861 } 862 863 static bool 864 intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, 865 int target, int refclk, intel_clock_t *match_clock, 866 intel_clock_t *best_clock) 867 { 868 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; 869 u32 m, n, fastclk; 870 u32 updrate, minupdate, fracbits, p; 871 unsigned long bestppm, ppm, absppm; 872 int dotclk, flag; 873 874 flag = 0; 875 dotclk = target * 1000; 876 bestppm = 1000000; 877 ppm = absppm = 0; 878 fastclk = dotclk / (2*100); 879 updrate = 0; 880 minupdate = 19200; 881 fracbits = 1; 882 n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0; 883 bestm1 = bestm2 = bestp1 = bestp2 = 0; 884 885 /* based on hardware requirement, prefer smaller n to precision */ 886 for (n = limit->n.min; n <= ((refclk) / minupdate); n++) { 887 updrate = refclk / n; 888 for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) { 889 for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) { 890 if (p2 > 10) 891 p2 = p2 - 1; 892 p = p1 * p2; 893 /* based on hardware requirement, prefer bigger m1,m2 values */ 894 for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) { 895 m2 = (((2*(fastclk * p * n / m1 )) + 896 refclk) / (2*refclk)); 897 m = m1 * m2; 898 vco = updrate * m; 899 if (vco >= limit->vco.min && vco < limit->vco.max) { 900 ppm = 1000000 * ((vco / p) - fastclk) / fastclk; 901 absppm = (ppm > 0) ? ppm : (-ppm); 902 if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) { 903 bestppm = 0; 904 flag = 1; 905 } 906 if (absppm < bestppm - 10) { 907 bestppm = absppm; 908 flag = 1; 909 } 910 if (flag) { 911 bestn = n; 912 bestm1 = m1; 913 bestm2 = m2; 914 bestp1 = p1; 915 bestp2 = p2; 916 flag = 0; 917 } 918 } 919 } 920 } 921 } 922 } 923 best_clock->n = bestn; 924 best_clock->m1 = bestm1; 925 best_clock->m2 = bestm2; 926 best_clock->p1 = bestp1; 927 best_clock->p2 = bestp2; 928 929 return true; 930 } 931 932 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 933 enum i915_pipe pipe) 934 { 935 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 936 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 937 938 return intel_crtc->cpu_transcoder; 939 } 940 941 static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) 942 { 943 struct drm_i915_private *dev_priv = dev->dev_private; 944 u32 frame, frame_reg = PIPEFRAME(pipe); 945 946 frame = I915_READ(frame_reg); 947 948 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50)) 949 DRM_DEBUG_KMS("vblank wait timed out\n"); 950 } 951 952 /** 953 * intel_wait_for_vblank - wait for vblank on a given pipe 954 * @dev: drm device 955 * @pipe: pipe to wait for 956 * 957 * Wait for vblank to occur on a given pipe. Needed for various bits of 958 * mode setting code. 959 */ 960 void intel_wait_for_vblank(struct drm_device *dev, int pipe) 961 { 962 struct drm_i915_private *dev_priv = dev->dev_private; 963 int pipestat_reg = PIPESTAT(pipe); 964 965 if (INTEL_INFO(dev)->gen >= 5) { 966 ironlake_wait_for_vblank(dev, pipe); 967 return; 968 } 969 970 /* Clear existing vblank status. Note this will clear any other 971 * sticky status fields as well. 972 * 973 * This races with i915_driver_irq_handler() with the result 974 * that either function could miss a vblank event. Here it is not 975 * fatal, as we will either wait upon the next vblank interrupt or 976 * timeout. Generally speaking intel_wait_for_vblank() is only 977 * called during modeset at which time the GPU should be idle and 978 * should *not* be performing page flips and thus not waiting on 979 * vblanks... 980 * Currently, the result of us stealing a vblank from the irq 981 * handler is that a single frame will be skipped during swapbuffers. 982 */ 983 I915_WRITE(pipestat_reg, 984 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); 985 986 /* Wait for vblank interrupt bit to set */ 987 if (wait_for(I915_READ(pipestat_reg) & 988 PIPE_VBLANK_INTERRUPT_STATUS, 989 50)) 990 DRM_DEBUG_KMS("vblank wait timed out\n"); 991 } 992 993 /* 994 * intel_wait_for_pipe_off - wait for pipe to turn off 995 * @dev: drm device 996 * @pipe: pipe to wait for 997 * 998 * After disabling a pipe, we can't wait for vblank in the usual way, 999 * spinning on the vblank interrupt status bit, since we won't actually 1000 * see an interrupt when the pipe is disabled. 1001 * 1002 * On Gen4 and above: 1003 * wait for the pipe register state bit to turn off 1004 * 1005 * Otherwise: 1006 * wait for the display line value to settle (it usually 1007 * ends up stopping at the start of the next frame). 1008 * 1009 */ 1010 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 1011 { 1012 struct drm_i915_private *dev_priv = dev->dev_private; 1013 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1014 pipe); 1015 1016 if (INTEL_INFO(dev)->gen >= 4) { 1017 int reg = PIPECONF(cpu_transcoder); 1018 1019 /* Wait for the Pipe State to go off */ 1020 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1021 100)) 1022 WARN(1, "pipe_off wait timed out\n"); 1023 } else { 1024 u32 last_line, line_mask; 1025 int reg = PIPEDSL(pipe); 1026 unsigned long timeout = jiffies + msecs_to_jiffies(100); 1027 1028 if (IS_GEN2(dev)) 1029 line_mask = DSL_LINEMASK_GEN2; 1030 else 1031 line_mask = DSL_LINEMASK_GEN3; 1032 1033 /* Wait for the display line to settle */ 1034 do { 1035 last_line = I915_READ(reg) & line_mask; 1036 mdelay(5); 1037 } while (((I915_READ(reg) & line_mask) != last_line) && 1038 time_after(timeout, jiffies)); 1039 if (time_after(jiffies, timeout)) 1040 WARN(1, "pipe_off wait timed out\n"); 1041 } 1042 } 1043 1044 static const char *state_string(bool enabled) 1045 { 1046 return enabled ? "on" : "off"; 1047 } 1048 1049 /* Only for pre-ILK configs */ 1050 static void assert_pll(struct drm_i915_private *dev_priv, 1051 enum i915_pipe pipe, bool state) 1052 { 1053 int reg; 1054 u32 val; 1055 bool cur_state; 1056 1057 reg = DPLL(pipe); 1058 val = I915_READ(reg); 1059 cur_state = !!(val & DPLL_VCO_ENABLE); 1060 WARN(cur_state != state, 1061 "PLL state assertion failure (expected %s, current %s)\n", 1062 state_string(state), state_string(cur_state)); 1063 } 1064 #define assert_pll_enabled(d, p) assert_pll(d, p, true) 1065 #define assert_pll_disabled(d, p) assert_pll(d, p, false) 1066 1067 /* For ILK+ */ 1068 static void assert_pch_pll(struct drm_i915_private *dev_priv, 1069 struct intel_pch_pll *pll, 1070 struct intel_crtc *crtc, 1071 bool state) 1072 { 1073 u32 val; 1074 bool cur_state; 1075 1076 if (HAS_PCH_LPT(dev_priv->dev)) { 1077 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n"); 1078 return; 1079 } 1080 1081 if (WARN (!pll, 1082 "asserting PCH PLL %s with no PLL\n", state_string(state))) 1083 return; 1084 1085 val = I915_READ(pll->pll_reg); 1086 cur_state = !!(val & DPLL_VCO_ENABLE); 1087 WARN(cur_state != state, 1088 "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n", 1089 pll->pll_reg, state_string(state), state_string(cur_state), val); 1090 1091 /* Make sure the selected PLL is correctly attached to the transcoder */ 1092 if (crtc && HAS_PCH_CPT(dev_priv->dev)) { 1093 u32 pch_dpll; 1094 1095 pch_dpll = I915_READ(PCH_DPLL_SEL); 1096 cur_state = pll->pll_reg == _PCH_DPLL_B; 1097 if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state, 1098 "PLL[%d] not attached to this transcoder %d: %08x\n", 1099 cur_state, crtc->pipe, pch_dpll)) { 1100 cur_state = !!(val >> (4*crtc->pipe + 3)); 1101 WARN(cur_state != state, 1102 "PLL[%d] not %s on this transcoder %d: %08x\n", 1103 pll->pll_reg == _PCH_DPLL_B, 1104 state_string(state), 1105 crtc->pipe, 1106 val); 1107 } 1108 } 1109 } 1110 #define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true) 1111 #define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false) 1112 1113 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1114 enum i915_pipe pipe, bool state) 1115 { 1116 int reg; 1117 u32 val; 1118 bool cur_state; 1119 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1120 pipe); 1121 1122 if (IS_HASWELL(dev_priv->dev)) { 1123 /* On Haswell, DDI is used instead of FDI_TX_CTL */ 1124 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1125 val = I915_READ(reg); 1126 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1127 } else { 1128 reg = FDI_TX_CTL(pipe); 1129 val = I915_READ(reg); 1130 cur_state = !!(val & FDI_TX_ENABLE); 1131 } 1132 WARN(cur_state != state, 1133 "FDI TX state assertion failure (expected %s, current %s)\n", 1134 state_string(state), state_string(cur_state)); 1135 } 1136 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1137 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1138 1139 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1140 enum i915_pipe pipe, bool state) 1141 { 1142 int reg; 1143 u32 val; 1144 bool cur_state; 1145 1146 reg = FDI_RX_CTL(pipe); 1147 val = I915_READ(reg); 1148 cur_state = !!(val & FDI_RX_ENABLE); 1149 WARN(cur_state != state, 1150 "FDI RX state assertion failure (expected %s, current %s)\n", 1151 state_string(state), state_string(cur_state)); 1152 } 1153 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1154 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1155 1156 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1157 enum i915_pipe pipe) 1158 { 1159 int reg; 1160 u32 val; 1161 1162 /* ILK FDI PLL is always enabled */ 1163 if (dev_priv->info->gen == 5) 1164 return; 1165 1166 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1167 if (IS_HASWELL(dev_priv->dev)) 1168 return; 1169 1170 reg = FDI_TX_CTL(pipe); 1171 val = I915_READ(reg); 1172 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1173 } 1174 1175 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, 1176 enum i915_pipe pipe) 1177 { 1178 int reg; 1179 u32 val; 1180 1181 reg = FDI_RX_CTL(pipe); 1182 val = I915_READ(reg); 1183 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); 1184 } 1185 1186 static void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1187 enum i915_pipe pipe) 1188 { 1189 int pp_reg, lvds_reg; 1190 u32 val; 1191 enum i915_pipe panel_pipe = PIPE_A; 1192 bool locked = true; 1193 1194 if (HAS_PCH_SPLIT(dev_priv->dev)) { 1195 pp_reg = PCH_PP_CONTROL; 1196 lvds_reg = PCH_LVDS; 1197 } else { 1198 pp_reg = PP_CONTROL; 1199 lvds_reg = LVDS; 1200 } 1201 1202 val = I915_READ(pp_reg); 1203 if (!(val & PANEL_POWER_ON) || 1204 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) 1205 locked = false; 1206 1207 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) 1208 panel_pipe = PIPE_B; 1209 1210 WARN(panel_pipe == pipe && locked, 1211 "panel assertion failure, pipe %c regs locked\n", 1212 pipe_name(pipe)); 1213 } 1214 1215 void assert_pipe(struct drm_i915_private *dev_priv, 1216 enum i915_pipe pipe, bool state) 1217 { 1218 int reg; 1219 u32 val; 1220 bool cur_state; 1221 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1222 pipe); 1223 1224 /* if we need the pipe A quirk it must be always on */ 1225 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 1226 state = true; 1227 1228 reg = PIPECONF(cpu_transcoder); 1229 val = I915_READ(reg); 1230 cur_state = !!(val & PIPECONF_ENABLE); 1231 WARN(cur_state != state, 1232 "pipe %c assertion failure (expected %s, current %s)\n", 1233 pipe_name(pipe), state_string(state), state_string(cur_state)); 1234 } 1235 1236 static void assert_plane(struct drm_i915_private *dev_priv, 1237 enum plane plane, bool state) 1238 { 1239 int reg; 1240 u32 val; 1241 bool cur_state; 1242 1243 reg = DSPCNTR(plane); 1244 val = I915_READ(reg); 1245 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1246 WARN(cur_state != state, 1247 "plane %c assertion failure (expected %s, current %s)\n", 1248 plane_name(plane), state_string(state), state_string(cur_state)); 1249 } 1250 1251 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 1252 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 1253 1254 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1255 enum i915_pipe pipe) 1256 { 1257 int reg, i; 1258 u32 val; 1259 int cur_pipe; 1260 1261 /* Planes are fixed to pipes on ILK+ */ 1262 if (HAS_PCH_SPLIT(dev_priv->dev)) { 1263 reg = DSPCNTR(pipe); 1264 val = I915_READ(reg); 1265 WARN((val & DISPLAY_PLANE_ENABLE), 1266 "plane %c assertion failure, should be disabled but not\n", 1267 plane_name(pipe)); 1268 return; 1269 } 1270 1271 /* Need to check both planes against the pipe */ 1272 for (i = 0; i < 2; i++) { 1273 reg = DSPCNTR(i); 1274 val = I915_READ(reg); 1275 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1276 DISPPLANE_SEL_PIPE_SHIFT; 1277 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1278 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1279 plane_name(i), pipe_name(pipe)); 1280 } 1281 } 1282 1283 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1284 { 1285 u32 val; 1286 bool enabled; 1287 1288 if (HAS_PCH_LPT(dev_priv->dev)) { 1289 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n"); 1290 return; 1291 } 1292 1293 val = I915_READ(PCH_DREF_CONTROL); 1294 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1295 DREF_SUPERSPREAD_SOURCE_MASK)); 1296 WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); 1297 } 1298 1299 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, 1300 enum i915_pipe pipe) 1301 { 1302 int reg; 1303 u32 val; 1304 bool enabled; 1305 1306 reg = TRANSCONF(pipe); 1307 val = I915_READ(reg); 1308 enabled = !!(val & TRANS_ENABLE); 1309 WARN(enabled, 1310 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1311 pipe_name(pipe)); 1312 } 1313 1314 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1315 enum i915_pipe pipe, u32 port_sel, u32 val) 1316 { 1317 if ((val & DP_PORT_EN) == 0) 1318 return false; 1319 1320 if (HAS_PCH_CPT(dev_priv->dev)) { 1321 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); 1322 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); 1323 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1324 return false; 1325 } else { 1326 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1327 return false; 1328 } 1329 return true; 1330 } 1331 1332 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1333 enum i915_pipe pipe, u32 val) 1334 { 1335 if ((val & PORT_ENABLE) == 0) 1336 return false; 1337 1338 if (HAS_PCH_CPT(dev_priv->dev)) { 1339 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1340 return false; 1341 } else { 1342 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) 1343 return false; 1344 } 1345 return true; 1346 } 1347 1348 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1349 enum i915_pipe pipe, u32 val) 1350 { 1351 if ((val & LVDS_PORT_EN) == 0) 1352 return false; 1353 1354 if (HAS_PCH_CPT(dev_priv->dev)) { 1355 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1356 return false; 1357 } else { 1358 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1359 return false; 1360 } 1361 return true; 1362 } 1363 1364 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1365 enum i915_pipe pipe, u32 val) 1366 { 1367 if ((val & ADPA_DAC_ENABLE) == 0) 1368 return false; 1369 if (HAS_PCH_CPT(dev_priv->dev)) { 1370 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1371 return false; 1372 } else { 1373 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1374 return false; 1375 } 1376 return true; 1377 } 1378 1379 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1380 enum i915_pipe pipe, int reg, u32 port_sel) 1381 { 1382 u32 val = I915_READ(reg); 1383 WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1384 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1385 reg, pipe_name(pipe)); 1386 1387 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 1388 && (val & DP_PIPEB_SELECT), 1389 "IBX PCH dp port still using transcoder B\n"); 1390 } 1391 1392 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1393 enum i915_pipe pipe, int reg) 1394 { 1395 u32 val = I915_READ(reg); 1396 WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1397 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1398 reg, pipe_name(pipe)); 1399 1400 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0 1401 && (val & SDVO_PIPE_B_SELECT), 1402 "IBX PCH hdmi port still using transcoder B\n"); 1403 } 1404 1405 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1406 enum i915_pipe pipe) 1407 { 1408 int reg; 1409 u32 val; 1410 1411 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1412 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1413 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1414 1415 reg = PCH_ADPA; 1416 val = I915_READ(reg); 1417 WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1418 "PCH VGA enabled on transcoder %c, should be disabled\n", 1419 pipe_name(pipe)); 1420 1421 reg = PCH_LVDS; 1422 val = I915_READ(reg); 1423 WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1424 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1425 pipe_name(pipe)); 1426 1427 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); 1428 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); 1429 assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); 1430 } 1431 1432 /** 1433 * intel_enable_pll - enable a PLL 1434 * @dev_priv: i915 private structure 1435 * @pipe: pipe PLL to enable 1436 * 1437 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to 1438 * make sure the PLL reg is writable first though, since the panel write 1439 * protect mechanism may be enabled. 1440 * 1441 * Note! This is for pre-ILK only. 1442 * 1443 * Unfortunately needed by dvo_ns2501 since the dvo depends on it running. 1444 */ 1445 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1446 { 1447 int reg; 1448 u32 val; 1449 1450 /* No really, not for ILK+ */ 1451 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5); 1452 1453 /* PLL is protected by panel, make sure we can write it */ 1454 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) 1455 assert_panel_unlocked(dev_priv, pipe); 1456 1457 reg = DPLL(pipe); 1458 val = I915_READ(reg); 1459 val |= DPLL_VCO_ENABLE; 1460 1461 /* We do this three times for luck */ 1462 I915_WRITE(reg, val); 1463 POSTING_READ(reg); 1464 udelay(150); /* wait for warmup */ 1465 I915_WRITE(reg, val); 1466 POSTING_READ(reg); 1467 udelay(150); /* wait for warmup */ 1468 I915_WRITE(reg, val); 1469 POSTING_READ(reg); 1470 udelay(150); /* wait for warmup */ 1471 } 1472 1473 /** 1474 * intel_disable_pll - disable a PLL 1475 * @dev_priv: i915 private structure 1476 * @pipe: pipe PLL to disable 1477 * 1478 * Disable the PLL for @pipe, making sure the pipe is off first. 1479 * 1480 * Note! This is for pre-ILK only. 1481 */ 1482 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1483 { 1484 int reg; 1485 u32 val; 1486 1487 /* Don't disable pipe A or pipe A PLLs if needed */ 1488 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1489 return; 1490 1491 /* Make sure the pipe isn't still relying on us */ 1492 assert_pipe_disabled(dev_priv, pipe); 1493 1494 reg = DPLL(pipe); 1495 val = I915_READ(reg); 1496 val &= ~DPLL_VCO_ENABLE; 1497 I915_WRITE(reg, val); 1498 POSTING_READ(reg); 1499 } 1500 1501 /* SBI access */ 1502 static void 1503 intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 1504 enum intel_sbi_destination destination) 1505 { 1506 u32 tmp; 1507 1508 spin_lock(&dev_priv->dpio_lock); 1509 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) { 1510 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1511 goto out_unlock; 1512 } 1513 1514 I915_WRITE(SBI_ADDR, (reg << 16)); 1515 I915_WRITE(SBI_DATA, value); 1516 1517 if (destination == SBI_ICLK) 1518 tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR; 1519 else 1520 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR; 1521 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp); 1522 1523 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1524 100)) { 1525 DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); 1526 goto out_unlock; 1527 } 1528 1529 out_unlock: 1530 spin_unlock(&dev_priv->dpio_lock); 1531 } 1532 1533 static u32 1534 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 1535 enum intel_sbi_destination destination) 1536 { 1537 u32 value = 0; 1538 1539 spin_lock(&dev_priv->dpio_lock); 1540 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) { 1541 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1542 goto out_unlock; 1543 } 1544 1545 I915_WRITE(SBI_ADDR, (reg << 16)); 1546 1547 if (destination == SBI_ICLK) 1548 value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD; 1549 else 1550 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; 1551 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY); 1552 1553 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1554 100)) { 1555 DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); 1556 goto out_unlock; 1557 } 1558 1559 value = I915_READ(SBI_DATA); 1560 1561 out_unlock: 1562 spin_unlock(&dev_priv->dpio_lock); 1563 return value; 1564 } 1565 1566 /** 1567 * ironlake_enable_pch_pll - enable PCH PLL 1568 * @dev_priv: i915 private structure 1569 * @pipe: pipe PLL to enable 1570 * 1571 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1572 * drives the transcoder clock. 1573 */ 1574 static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc) 1575 { 1576 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; 1577 struct intel_pch_pll *pll; 1578 int reg; 1579 u32 val; 1580 1581 /* PCH PLLs only available on ILK, SNB and IVB */ 1582 BUG_ON(dev_priv->info->gen < 5); 1583 pll = intel_crtc->pch_pll; 1584 if (pll == NULL) 1585 return; 1586 1587 if (WARN_ON(pll->refcount == 0)) 1588 return; 1589 1590 DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n", 1591 pll->pll_reg, pll->active, pll->on, 1592 intel_crtc->base.base.id); 1593 1594 /* PCH refclock must be enabled first */ 1595 assert_pch_refclk_enabled(dev_priv); 1596 1597 if (pll->active++ && pll->on) { 1598 assert_pch_pll_enabled(dev_priv, pll, NULL); 1599 return; 1600 } 1601 1602 DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg); 1603 1604 reg = pll->pll_reg; 1605 val = I915_READ(reg); 1606 val |= DPLL_VCO_ENABLE; 1607 I915_WRITE(reg, val); 1608 POSTING_READ(reg); 1609 udelay(200); 1610 1611 pll->on = true; 1612 } 1613 1614 static void intel_disable_pch_pll(struct intel_crtc *intel_crtc) 1615 { 1616 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; 1617 struct intel_pch_pll *pll = intel_crtc->pch_pll; 1618 int reg; 1619 u32 val; 1620 1621 /* PCH only available on ILK+ */ 1622 BUG_ON(dev_priv->info->gen < 5); 1623 if (pll == NULL) 1624 return; 1625 1626 if (WARN_ON(pll->refcount == 0)) 1627 return; 1628 1629 DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n", 1630 pll->pll_reg, pll->active, pll->on, 1631 intel_crtc->base.base.id); 1632 1633 if (WARN_ON(pll->active == 0)) { 1634 assert_pch_pll_disabled(dev_priv, pll, NULL); 1635 return; 1636 } 1637 1638 if (--pll->active) { 1639 assert_pch_pll_enabled(dev_priv, pll, NULL); 1640 return; 1641 } 1642 1643 DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg); 1644 1645 /* Make sure transcoder isn't still depending on us */ 1646 assert_transcoder_disabled(dev_priv, intel_crtc->pipe); 1647 1648 reg = pll->pll_reg; 1649 val = I915_READ(reg); 1650 val &= ~DPLL_VCO_ENABLE; 1651 I915_WRITE(reg, val); 1652 POSTING_READ(reg); 1653 udelay(200); 1654 1655 pll->on = false; 1656 } 1657 1658 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1659 enum i915_pipe pipe) 1660 { 1661 struct drm_device *dev = dev_priv->dev; 1662 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1663 uint32_t reg, val, pipeconf_val; 1664 1665 /* PCH only available on ILK+ */ 1666 BUG_ON(dev_priv->info->gen < 5); 1667 1668 /* Make sure PCH DPLL is enabled */ 1669 assert_pch_pll_enabled(dev_priv, 1670 to_intel_crtc(crtc)->pch_pll, 1671 to_intel_crtc(crtc)); 1672 1673 /* FDI must be feeding us bits for PCH ports */ 1674 assert_fdi_tx_enabled(dev_priv, pipe); 1675 assert_fdi_rx_enabled(dev_priv, pipe); 1676 1677 if (HAS_PCH_CPT(dev)) { 1678 /* Workaround: Set the timing override bit before enabling the 1679 * pch transcoder. */ 1680 reg = TRANS_CHICKEN2(pipe); 1681 val = I915_READ(reg); 1682 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1683 I915_WRITE(reg, val); 1684 } 1685 1686 reg = TRANSCONF(pipe); 1687 val = I915_READ(reg); 1688 pipeconf_val = I915_READ(PIPECONF(pipe)); 1689 1690 if (HAS_PCH_IBX(dev_priv->dev)) { 1691 /* 1692 * make the BPC in transcoder be consistent with 1693 * that in pipeconf reg. 1694 */ 1695 val &= ~PIPE_BPC_MASK; 1696 val |= pipeconf_val & PIPE_BPC_MASK; 1697 } 1698 1699 val &= ~TRANS_INTERLACE_MASK; 1700 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1701 if (HAS_PCH_IBX(dev_priv->dev) && 1702 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) 1703 val |= TRANS_LEGACY_INTERLACED_ILK; 1704 else 1705 val |= TRANS_INTERLACED; 1706 else 1707 val |= TRANS_PROGRESSIVE; 1708 1709 I915_WRITE(reg, val | TRANS_ENABLE); 1710 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 1711 DRM_ERROR("failed to enable transcoder %d\n", pipe); 1712 } 1713 1714 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1715 enum transcoder cpu_transcoder) 1716 { 1717 u32 val, pipeconf_val; 1718 1719 /* PCH only available on ILK+ */ 1720 BUG_ON(dev_priv->info->gen < 5); 1721 1722 /* FDI must be feeding us bits for PCH ports */ 1723 assert_fdi_tx_enabled(dev_priv, cpu_transcoder); 1724 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 1725 1726 /* Workaround: set timing override bit. */ 1727 val = I915_READ(_TRANSA_CHICKEN2); 1728 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1729 I915_WRITE(_TRANSA_CHICKEN2, val); 1730 1731 val = TRANS_ENABLE; 1732 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1733 1734 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1735 PIPECONF_INTERLACED_ILK) 1736 val |= TRANS_INTERLACED; 1737 else 1738 val |= TRANS_PROGRESSIVE; 1739 1740 I915_WRITE(TRANSCONF(TRANSCODER_A), val); 1741 if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100)) 1742 DRM_ERROR("Failed to enable PCH transcoder\n"); 1743 } 1744 1745 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1746 enum i915_pipe pipe) 1747 { 1748 struct drm_device *dev = dev_priv->dev; 1749 uint32_t reg, val; 1750 1751 /* FDI relies on the transcoder */ 1752 assert_fdi_tx_disabled(dev_priv, pipe); 1753 assert_fdi_rx_disabled(dev_priv, pipe); 1754 1755 /* Ports must be off as well */ 1756 assert_pch_ports_disabled(dev_priv, pipe); 1757 1758 reg = TRANSCONF(pipe); 1759 val = I915_READ(reg); 1760 val &= ~TRANS_ENABLE; 1761 I915_WRITE(reg, val); 1762 /* wait for PCH transcoder off, transcoder state */ 1763 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 1764 DRM_ERROR("failed to disable transcoder %d\n", pipe); 1765 1766 if (!HAS_PCH_IBX(dev)) { 1767 /* Workaround: Clear the timing override chicken bit again. */ 1768 reg = TRANS_CHICKEN2(pipe); 1769 val = I915_READ(reg); 1770 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1771 I915_WRITE(reg, val); 1772 } 1773 } 1774 1775 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1776 { 1777 u32 val; 1778 1779 val = I915_READ(_TRANSACONF); 1780 val &= ~TRANS_ENABLE; 1781 I915_WRITE(_TRANSACONF, val); 1782 /* wait for PCH transcoder off, transcoder state */ 1783 if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50)) 1784 DRM_ERROR("Failed to disable PCH transcoder\n"); 1785 1786 /* Workaround: clear timing override bit. */ 1787 val = I915_READ(_TRANSA_CHICKEN2); 1788 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1789 I915_WRITE(_TRANSA_CHICKEN2, val); 1790 } 1791 1792 /** 1793 * intel_enable_pipe - enable a pipe, asserting requirements 1794 * @dev_priv: i915 private structure 1795 * @pipe: pipe to enable 1796 * @pch_port: on ILK+, is this pipe driving a PCH port or not 1797 * 1798 * Enable @pipe, making sure that various hardware specific requirements 1799 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 1800 * 1801 * @pipe should be %PIPE_A or %PIPE_B. 1802 * 1803 * Will wait until the pipe is actually running (i.e. first vblank) before 1804 * returning. 1805 */ 1806 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 1807 bool pch_port) 1808 { 1809 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1810 pipe); 1811 enum transcoder pch_transcoder; 1812 int reg; 1813 u32 val; 1814 1815 if (IS_HASWELL(dev_priv->dev)) 1816 pch_transcoder = TRANSCODER_A; 1817 else 1818 pch_transcoder = pipe; 1819 1820 /* 1821 * A pipe without a PLL won't actually be able to drive bits from 1822 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1823 * need the check. 1824 */ 1825 if (!HAS_PCH_SPLIT(dev_priv->dev)) 1826 assert_pll_enabled(dev_priv, pipe); 1827 else { 1828 if (pch_port) { 1829 /* if driving the PCH, we need FDI enabled */ 1830 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 1831 assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder); 1832 } 1833 /* FIXME: assert CPU port conditions for SNB+ */ 1834 } 1835 1836 reg = PIPECONF(cpu_transcoder); 1837 val = I915_READ(reg); 1838 if (val & PIPECONF_ENABLE) 1839 return; 1840 1841 I915_WRITE(reg, val | PIPECONF_ENABLE); 1842 intel_wait_for_vblank(dev_priv->dev, pipe); 1843 } 1844 1845 /** 1846 * intel_disable_pipe - disable a pipe, asserting requirements 1847 * @dev_priv: i915 private structure 1848 * @pipe: pipe to disable 1849 * 1850 * Disable @pipe, making sure that various hardware specific requirements 1851 * are met, if applicable, e.g. plane disabled, panel fitter off, etc. 1852 * 1853 * @pipe should be %PIPE_A or %PIPE_B. 1854 * 1855 * Will wait until the pipe has shut down before returning. 1856 */ 1857 static void intel_disable_pipe(struct drm_i915_private *dev_priv, 1858 enum i915_pipe pipe) 1859 { 1860 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1861 pipe); 1862 int reg; 1863 u32 val; 1864 1865 /* 1866 * Make sure planes won't keep trying to pump pixels to us, 1867 * or we might hang the display. 1868 */ 1869 assert_planes_disabled(dev_priv, pipe); 1870 1871 /* Don't disable pipe A or pipe A PLLs if needed */ 1872 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1873 return; 1874 1875 reg = PIPECONF(cpu_transcoder); 1876 val = I915_READ(reg); 1877 if ((val & PIPECONF_ENABLE) == 0) 1878 return; 1879 1880 I915_WRITE(reg, val & ~PIPECONF_ENABLE); 1881 intel_wait_for_pipe_off(dev_priv->dev, pipe); 1882 } 1883 1884 /* 1885 * Plane regs are double buffered, going from enabled->disabled needs a 1886 * trigger in order to latch. The display address reg provides this. 1887 */ 1888 void intel_flush_display_plane(struct drm_i915_private *dev_priv, 1889 enum plane plane) 1890 { 1891 if (dev_priv->info->gen >= 4) 1892 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); 1893 else 1894 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); 1895 } 1896 1897 /** 1898 * intel_enable_plane - enable a display plane on a given pipe 1899 * @dev_priv: i915 private structure 1900 * @plane: plane to enable 1901 * @pipe: pipe being fed 1902 * 1903 * Enable @plane on @pipe, making sure that @pipe is running first. 1904 */ 1905 static void intel_enable_plane(struct drm_i915_private *dev_priv, 1906 enum plane plane, enum i915_pipe pipe) 1907 { 1908 int reg; 1909 u32 val; 1910 1911 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 1912 assert_pipe_enabled(dev_priv, pipe); 1913 1914 reg = DSPCNTR(plane); 1915 val = I915_READ(reg); 1916 if (val & DISPLAY_PLANE_ENABLE) 1917 return; 1918 1919 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); 1920 intel_flush_display_plane(dev_priv, plane); 1921 intel_wait_for_vblank(dev_priv->dev, pipe); 1922 } 1923 1924 /** 1925 * intel_disable_plane - disable a display plane 1926 * @dev_priv: i915 private structure 1927 * @plane: plane to disable 1928 * @pipe: pipe consuming the data 1929 * 1930 * Disable @plane; should be an independent operation. 1931 */ 1932 static void intel_disable_plane(struct drm_i915_private *dev_priv, 1933 enum plane plane, enum i915_pipe pipe) 1934 { 1935 int reg; 1936 u32 val; 1937 1938 reg = DSPCNTR(plane); 1939 val = I915_READ(reg); 1940 if ((val & DISPLAY_PLANE_ENABLE) == 0) 1941 return; 1942 1943 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); 1944 intel_flush_display_plane(dev_priv, plane); 1945 intel_wait_for_vblank(dev_priv->dev, pipe); 1946 } 1947 1948 int 1949 intel_pin_and_fence_fb_obj(struct drm_device *dev, 1950 struct drm_i915_gem_object *obj, 1951 struct intel_ring_buffer *pipelined) 1952 { 1953 struct drm_i915_private *dev_priv = dev->dev_private; 1954 u32 alignment; 1955 int ret; 1956 1957 switch (obj->tiling_mode) { 1958 case I915_TILING_NONE: 1959 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1960 alignment = 128 * 1024; 1961 else if (INTEL_INFO(dev)->gen >= 4) 1962 alignment = 4 * 1024; 1963 else 1964 alignment = 64 * 1024; 1965 break; 1966 case I915_TILING_X: 1967 /* pin() will align the object as required by fence */ 1968 alignment = 0; 1969 break; 1970 case I915_TILING_Y: 1971 /* FIXME: Is this true? */ 1972 DRM_ERROR("Y tiled not allowed for scan out buffers\n"); 1973 return -EINVAL; 1974 default: 1975 BUG(); 1976 } 1977 1978 dev_priv->mm.interruptible = false; 1979 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); 1980 if (ret) 1981 goto err_interruptible; 1982 1983 /* Install a fence for tiled scan-out. Pre-i965 always needs a 1984 * fence, whereas 965+ only requires a fence if using 1985 * framebuffer compression. For simplicity, we always install 1986 * a fence as the cost is not that onerous. 1987 */ 1988 ret = i915_gem_object_get_fence(obj); 1989 if (ret) 1990 goto err_unpin; 1991 1992 i915_gem_object_pin_fence(obj); 1993 1994 dev_priv->mm.interruptible = true; 1995 return 0; 1996 1997 err_unpin: 1998 i915_gem_object_unpin(obj); 1999 err_interruptible: 2000 dev_priv->mm.interruptible = true; 2001 return ret; 2002 } 2003 2004 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) 2005 { 2006 i915_gem_object_unpin_fence(obj); 2007 i915_gem_object_unpin(obj); 2008 } 2009 2010 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 2011 * is assumed to be a power-of-two. */ 2012 unsigned long intel_gen4_compute_page_offset(int *x, int *y, 2013 unsigned int tiling_mode, 2014 unsigned int cpp, 2015 unsigned int pitch) 2016 { 2017 if (tiling_mode != I915_TILING_NONE) { 2018 unsigned int tile_rows, tiles; 2019 2020 tile_rows = *y / 8; 2021 *y %= 8; 2022 2023 tiles = *x / (512/cpp); 2024 *x %= 512/cpp; 2025 2026 return tile_rows * pitch * 8 + tiles * 4096; 2027 } else { 2028 unsigned int offset; 2029 2030 offset = *y * pitch + *x * cpp; 2031 *y = 0; 2032 *x = (offset & 4095) / cpp; 2033 return offset & -4096; 2034 } 2035 } 2036 2037 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, 2038 int x, int y) 2039 { 2040 struct drm_device *dev = crtc->dev; 2041 struct drm_i915_private *dev_priv = dev->dev_private; 2042 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2043 struct intel_framebuffer *intel_fb; 2044 struct drm_i915_gem_object *obj; 2045 int plane = intel_crtc->plane; 2046 unsigned long linear_offset; 2047 u32 dspcntr; 2048 u32 reg; 2049 2050 switch (plane) { 2051 case 0: 2052 case 1: 2053 break; 2054 default: 2055 DRM_ERROR("Can't update plane %d in SAREA\n", plane); 2056 return -EINVAL; 2057 } 2058 2059 intel_fb = to_intel_framebuffer(fb); 2060 obj = intel_fb->obj; 2061 2062 reg = DSPCNTR(plane); 2063 dspcntr = I915_READ(reg); 2064 /* Mask out pixel format bits in case we change it */ 2065 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 2066 switch (fb->pixel_format) { 2067 case DRM_FORMAT_C8: 2068 dspcntr |= DISPPLANE_8BPP; 2069 break; 2070 case DRM_FORMAT_XRGB1555: 2071 case DRM_FORMAT_ARGB1555: 2072 dspcntr |= DISPPLANE_BGRX555; 2073 break; 2074 case DRM_FORMAT_RGB565: 2075 dspcntr |= DISPPLANE_BGRX565; 2076 break; 2077 case DRM_FORMAT_XRGB8888: 2078 case DRM_FORMAT_ARGB8888: 2079 dspcntr |= DISPPLANE_BGRX888; 2080 break; 2081 case DRM_FORMAT_XBGR8888: 2082 case DRM_FORMAT_ABGR8888: 2083 dspcntr |= DISPPLANE_RGBX888; 2084 break; 2085 case DRM_FORMAT_XRGB2101010: 2086 case DRM_FORMAT_ARGB2101010: 2087 dspcntr |= DISPPLANE_BGRX101010; 2088 break; 2089 case DRM_FORMAT_XBGR2101010: 2090 case DRM_FORMAT_ABGR2101010: 2091 dspcntr |= DISPPLANE_RGBX101010; 2092 break; 2093 default: 2094 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); 2095 return -EINVAL; 2096 } 2097 2098 if (INTEL_INFO(dev)->gen >= 4) { 2099 if (obj->tiling_mode != I915_TILING_NONE) 2100 dspcntr |= DISPPLANE_TILED; 2101 else 2102 dspcntr &= ~DISPPLANE_TILED; 2103 } 2104 2105 I915_WRITE(reg, dspcntr); 2106 2107 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 2108 2109 if (INTEL_INFO(dev)->gen >= 4) { 2110 intel_crtc->dspaddr_offset = 2111 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2112 fb->bits_per_pixel / 8, 2113 fb->pitches[0]); 2114 linear_offset -= intel_crtc->dspaddr_offset; 2115 } else { 2116 intel_crtc->dspaddr_offset = linear_offset; 2117 } 2118 2119 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", 2120 obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); 2121 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2122 if (INTEL_INFO(dev)->gen >= 4) { 2123 I915_MODIFY_DISPBASE(DSPSURF(plane), 2124 obj->gtt_offset + intel_crtc->dspaddr_offset); 2125 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2126 I915_WRITE(DSPLINOFF(plane), linear_offset); 2127 } else 2128 I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset); 2129 POSTING_READ(reg); 2130 2131 return 0; 2132 } 2133 2134 static int ironlake_update_plane(struct drm_crtc *crtc, 2135 struct drm_framebuffer *fb, int x, int y) 2136 { 2137 struct drm_device *dev = crtc->dev; 2138 struct drm_i915_private *dev_priv = dev->dev_private; 2139 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2140 struct intel_framebuffer *intel_fb; 2141 struct drm_i915_gem_object *obj; 2142 int plane = intel_crtc->plane; 2143 unsigned long linear_offset; 2144 u32 dspcntr; 2145 u32 reg; 2146 2147 switch (plane) { 2148 case 0: 2149 case 1: 2150 case 2: 2151 break; 2152 default: 2153 DRM_ERROR("Can't update plane %d in SAREA\n", plane); 2154 return -EINVAL; 2155 } 2156 2157 intel_fb = to_intel_framebuffer(fb); 2158 obj = intel_fb->obj; 2159 2160 reg = DSPCNTR(plane); 2161 dspcntr = I915_READ(reg); 2162 /* Mask out pixel format bits in case we change it */ 2163 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 2164 switch (fb->pixel_format) { 2165 case DRM_FORMAT_C8: 2166 dspcntr |= DISPPLANE_8BPP; 2167 break; 2168 case DRM_FORMAT_RGB565: 2169 dspcntr |= DISPPLANE_BGRX565; 2170 break; 2171 case DRM_FORMAT_XRGB8888: 2172 case DRM_FORMAT_ARGB8888: 2173 dspcntr |= DISPPLANE_BGRX888; 2174 break; 2175 case DRM_FORMAT_XBGR8888: 2176 case DRM_FORMAT_ABGR8888: 2177 dspcntr |= DISPPLANE_RGBX888; 2178 break; 2179 case DRM_FORMAT_XRGB2101010: 2180 case DRM_FORMAT_ARGB2101010: 2181 dspcntr |= DISPPLANE_BGRX101010; 2182 break; 2183 case DRM_FORMAT_XBGR2101010: 2184 case DRM_FORMAT_ABGR2101010: 2185 dspcntr |= DISPPLANE_RGBX101010; 2186 break; 2187 default: 2188 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); 2189 return -EINVAL; 2190 } 2191 2192 if (obj->tiling_mode != I915_TILING_NONE) 2193 dspcntr |= DISPPLANE_TILED; 2194 else 2195 dspcntr &= ~DISPPLANE_TILED; 2196 2197 /* must disable */ 2198 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2199 2200 I915_WRITE(reg, dspcntr); 2201 2202 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 2203 intel_crtc->dspaddr_offset = 2204 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2205 fb->bits_per_pixel / 8, 2206 fb->pitches[0]); 2207 linear_offset -= intel_crtc->dspaddr_offset; 2208 2209 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", 2210 obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); 2211 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2212 I915_MODIFY_DISPBASE(DSPSURF(plane), 2213 obj->gtt_offset + intel_crtc->dspaddr_offset); 2214 if (IS_HASWELL(dev)) { 2215 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2216 } else { 2217 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2218 I915_WRITE(DSPLINOFF(plane), linear_offset); 2219 } 2220 POSTING_READ(reg); 2221 2222 return 0; 2223 } 2224 2225 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 2226 static int 2227 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 2228 int x, int y, enum mode_set_atomic state) 2229 { 2230 struct drm_device *dev = crtc->dev; 2231 struct drm_i915_private *dev_priv = dev->dev_private; 2232 2233 if (dev_priv->display.disable_fbc) 2234 dev_priv->display.disable_fbc(dev); 2235 intel_increase_pllclock(crtc); 2236 2237 return dev_priv->display.update_plane(crtc, fb, x, y); 2238 } 2239 2240 static int 2241 intel_finish_fb(struct drm_framebuffer *old_fb) 2242 { 2243 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 2244 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2245 bool was_interruptible = dev_priv->mm.interruptible; 2246 int ret; 2247 2248 wait_event(dev_priv->pending_flip_queue, 2249 atomic_read(&dev_priv->mm.wedged) || 2250 atomic_read(&obj->pending_flip) == 0); 2251 2252 /* Big Hammer, we also need to ensure that any pending 2253 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 2254 * current scanout is retired before unpinning the old 2255 * framebuffer. 2256 * 2257 * This should only fail upon a hung GPU, in which case we 2258 * can safely continue. 2259 */ 2260 dev_priv->mm.interruptible = false; 2261 ret = i915_gem_object_finish_gpu(obj); 2262 dev_priv->mm.interruptible = was_interruptible; 2263 2264 return ret; 2265 } 2266 2267 static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y) 2268 { 2269 struct drm_device *dev = crtc->dev; 2270 #if 0 2271 struct drm_i915_master_private *master_priv; 2272 #else 2273 drm_i915_private_t *dev_priv = dev->dev_private; 2274 #endif 2275 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2276 2277 #if 0 2278 if (!dev->primary->master) 2279 return; 2280 2281 master_priv = dev->primary->master->driver_priv; 2282 if (!master_priv->sarea_priv) 2283 return; 2284 #else 2285 if (!dev_priv->sarea_priv) 2286 return; 2287 #endif 2288 2289 switch (intel_crtc->pipe) { 2290 case 0: 2291 #if 0 2292 master_priv->sarea_priv->pipeA_x = x; 2293 master_priv->sarea_priv->pipeA_y = y; 2294 #else 2295 dev_priv->sarea_priv->planeA_x = x; 2296 dev_priv->sarea_priv->planeA_y = y; 2297 #endif 2298 break; 2299 case 1: 2300 #if 0 2301 master_priv->sarea_priv->pipeB_x = x; 2302 master_priv->sarea_priv->pipeB_y = y; 2303 #else 2304 dev_priv->sarea_priv->planeB_x = x; 2305 dev_priv->sarea_priv->planeB_y = y; 2306 #endif 2307 break; 2308 default: 2309 break; 2310 } 2311 } 2312 2313 static int 2314 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 2315 struct drm_framebuffer *fb) 2316 { 2317 struct drm_device *dev = crtc->dev; 2318 struct drm_i915_private *dev_priv = dev->dev_private; 2319 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2320 struct drm_framebuffer *old_fb; 2321 int ret; 2322 2323 /* no fb bound */ 2324 if (!fb) { 2325 DRM_ERROR("No FB bound\n"); 2326 return 0; 2327 } 2328 2329 if(intel_crtc->plane > dev_priv->num_pipe) { 2330 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n", 2331 intel_crtc->plane, 2332 dev_priv->num_pipe); 2333 return -EINVAL; 2334 } 2335 2336 DRM_LOCK(dev); 2337 ret = intel_pin_and_fence_fb_obj(dev, 2338 to_intel_framebuffer(fb)->obj, 2339 NULL); 2340 if (ret != 0) { 2341 DRM_UNLOCK(dev); 2342 DRM_ERROR("pin & fence failed\n"); 2343 return ret; 2344 } 2345 2346 if (crtc->fb) 2347 intel_finish_fb(crtc->fb); 2348 2349 ret = dev_priv->display.update_plane(crtc, fb, x, y); 2350 if (ret) { 2351 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); 2352 DRM_UNLOCK(dev); 2353 DRM_ERROR("failed to update base address\n"); 2354 return ret; 2355 } 2356 2357 old_fb = crtc->fb; 2358 crtc->fb = fb; 2359 crtc->x = x; 2360 crtc->y = y; 2361 2362 if (old_fb) { 2363 intel_wait_for_vblank(dev, intel_crtc->pipe); 2364 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); 2365 } 2366 2367 intel_update_fbc(dev); 2368 DRM_UNLOCK(dev); 2369 2370 intel_crtc_update_sarea_pos(crtc, x, y); 2371 2372 return 0; 2373 } 2374 2375 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) 2376 { 2377 struct drm_device *dev = crtc->dev; 2378 struct drm_i915_private *dev_priv = dev->dev_private; 2379 u32 dpa_ctl; 2380 2381 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); 2382 dpa_ctl = I915_READ(DP_A); 2383 dpa_ctl &= ~DP_PLL_FREQ_MASK; 2384 2385 if (clock < 200000) { 2386 u32 temp; 2387 dpa_ctl |= DP_PLL_FREQ_160MHZ; 2388 /* workaround for 160Mhz: 2389 1) program 0x4600c bits 15:0 = 0x8124 2390 2) program 0x46010 bit 0 = 1 2391 3) program 0x46034 bit 24 = 1 2392 4) program 0x64000 bit 14 = 1 2393 */ 2394 temp = I915_READ(0x4600c); 2395 temp &= 0xffff0000; 2396 I915_WRITE(0x4600c, temp | 0x8124); 2397 2398 temp = I915_READ(0x46010); 2399 I915_WRITE(0x46010, temp | 1); 2400 2401 temp = I915_READ(0x46034); 2402 I915_WRITE(0x46034, temp | (1 << 24)); 2403 } else { 2404 dpa_ctl |= DP_PLL_FREQ_270MHZ; 2405 } 2406 I915_WRITE(DP_A, dpa_ctl); 2407 2408 POSTING_READ(DP_A); 2409 udelay(500); 2410 } 2411 2412 static void intel_fdi_normal_train(struct drm_crtc *crtc) 2413 { 2414 struct drm_device *dev = crtc->dev; 2415 struct drm_i915_private *dev_priv = dev->dev_private; 2416 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2417 int pipe = intel_crtc->pipe; 2418 u32 reg, temp; 2419 2420 /* enable normal train */ 2421 reg = FDI_TX_CTL(pipe); 2422 temp = I915_READ(reg); 2423 if (IS_IVYBRIDGE(dev)) { 2424 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 2425 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 2426 } else { 2427 temp &= ~FDI_LINK_TRAIN_NONE; 2428 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 2429 } 2430 I915_WRITE(reg, temp); 2431 2432 reg = FDI_RX_CTL(pipe); 2433 temp = I915_READ(reg); 2434 if (HAS_PCH_CPT(dev)) { 2435 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2436 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 2437 } else { 2438 temp &= ~FDI_LINK_TRAIN_NONE; 2439 temp |= FDI_LINK_TRAIN_NONE; 2440 } 2441 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 2442 2443 /* wait one idle pattern time */ 2444 POSTING_READ(reg); 2445 udelay(1000); 2446 2447 /* IVB wants error correction enabled */ 2448 if (IS_IVYBRIDGE(dev)) 2449 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 2450 FDI_FE_ERRC_ENABLE); 2451 } 2452 2453 static void ivb_modeset_global_resources(struct drm_device *dev) 2454 { 2455 struct drm_i915_private *dev_priv = dev->dev_private; 2456 struct intel_crtc *pipe_B_crtc = 2457 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); 2458 struct intel_crtc *pipe_C_crtc = 2459 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); 2460 uint32_t temp; 2461 2462 /* When everything is off disable fdi C so that we could enable fdi B 2463 * with all lanes. XXX: This misses the case where a pipe is not using 2464 * any pch resources and so doesn't need any fdi lanes. */ 2465 if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) { 2466 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 2467 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 2468 2469 temp = I915_READ(SOUTH_CHICKEN1); 2470 temp &= ~FDI_BC_BIFURCATION_SELECT; 2471 DRM_DEBUG_KMS("disabling fdi C rx\n"); 2472 I915_WRITE(SOUTH_CHICKEN1, temp); 2473 } 2474 } 2475 2476 /* The FDI link training functions for ILK/Ibexpeak. */ 2477 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 2478 { 2479 struct drm_device *dev = crtc->dev; 2480 struct drm_i915_private *dev_priv = dev->dev_private; 2481 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2482 int pipe = intel_crtc->pipe; 2483 int plane = intel_crtc->plane; 2484 u32 reg, temp, tries; 2485 2486 /* FDI needs bits from pipe & plane first */ 2487 assert_pipe_enabled(dev_priv, pipe); 2488 assert_plane_enabled(dev_priv, plane); 2489 2490 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2491 for train result */ 2492 reg = FDI_RX_IMR(pipe); 2493 temp = I915_READ(reg); 2494 temp &= ~FDI_RX_SYMBOL_LOCK; 2495 temp &= ~FDI_RX_BIT_LOCK; 2496 I915_WRITE(reg, temp); 2497 I915_READ(reg); 2498 udelay(150); 2499 2500 /* enable CPU FDI TX and PCH FDI RX */ 2501 reg = FDI_TX_CTL(pipe); 2502 temp = I915_READ(reg); 2503 temp &= ~(7 << 19); 2504 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2505 temp &= ~FDI_LINK_TRAIN_NONE; 2506 temp |= FDI_LINK_TRAIN_PATTERN_1; 2507 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2508 2509 reg = FDI_RX_CTL(pipe); 2510 temp = I915_READ(reg); 2511 temp &= ~FDI_LINK_TRAIN_NONE; 2512 temp |= FDI_LINK_TRAIN_PATTERN_1; 2513 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2514 2515 POSTING_READ(reg); 2516 udelay(150); 2517 2518 /* Ironlake workaround, enable clock pointer after FDI enable*/ 2519 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2520 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 2521 FDI_RX_PHASE_SYNC_POINTER_EN); 2522 2523 reg = FDI_RX_IIR(pipe); 2524 for (tries = 0; tries < 5; tries++) { 2525 temp = I915_READ(reg); 2526 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2527 2528 if ((temp & FDI_RX_BIT_LOCK)) { 2529 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2530 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2531 break; 2532 } 2533 } 2534 if (tries == 5) 2535 DRM_ERROR("FDI train 1 fail!\n"); 2536 2537 /* Train 2 */ 2538 reg = FDI_TX_CTL(pipe); 2539 temp = I915_READ(reg); 2540 temp &= ~FDI_LINK_TRAIN_NONE; 2541 temp |= FDI_LINK_TRAIN_PATTERN_2; 2542 I915_WRITE(reg, temp); 2543 2544 reg = FDI_RX_CTL(pipe); 2545 temp = I915_READ(reg); 2546 temp &= ~FDI_LINK_TRAIN_NONE; 2547 temp |= FDI_LINK_TRAIN_PATTERN_2; 2548 I915_WRITE(reg, temp); 2549 2550 POSTING_READ(reg); 2551 udelay(150); 2552 2553 reg = FDI_RX_IIR(pipe); 2554 for (tries = 0; tries < 5; tries++) { 2555 temp = I915_READ(reg); 2556 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2557 2558 if (temp & FDI_RX_SYMBOL_LOCK) { 2559 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2560 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2561 break; 2562 } 2563 } 2564 if (tries == 5) 2565 DRM_ERROR("FDI train 2 fail!\n"); 2566 2567 DRM_DEBUG_KMS("FDI train done\n"); 2568 2569 } 2570 2571 static const int snb_b_fdi_train_param[] = { 2572 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 2573 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 2574 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 2575 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 2576 }; 2577 2578 /* The FDI link training functions for SNB/Cougarpoint. */ 2579 static void gen6_fdi_link_train(struct drm_crtc *crtc) 2580 { 2581 struct drm_device *dev = crtc->dev; 2582 struct drm_i915_private *dev_priv = dev->dev_private; 2583 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2584 int pipe = intel_crtc->pipe; 2585 u32 reg, temp, i, retry; 2586 2587 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2588 for train result */ 2589 reg = FDI_RX_IMR(pipe); 2590 temp = I915_READ(reg); 2591 temp &= ~FDI_RX_SYMBOL_LOCK; 2592 temp &= ~FDI_RX_BIT_LOCK; 2593 I915_WRITE(reg, temp); 2594 2595 POSTING_READ(reg); 2596 udelay(150); 2597 2598 /* enable CPU FDI TX and PCH FDI RX */ 2599 reg = FDI_TX_CTL(pipe); 2600 temp = I915_READ(reg); 2601 temp &= ~(7 << 19); 2602 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2603 temp &= ~FDI_LINK_TRAIN_NONE; 2604 temp |= FDI_LINK_TRAIN_PATTERN_1; 2605 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2606 /* SNB-B */ 2607 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2608 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2609 2610 I915_WRITE(FDI_RX_MISC(pipe), 2611 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 2612 2613 reg = FDI_RX_CTL(pipe); 2614 temp = I915_READ(reg); 2615 if (HAS_PCH_CPT(dev)) { 2616 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2617 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2618 } else { 2619 temp &= ~FDI_LINK_TRAIN_NONE; 2620 temp |= FDI_LINK_TRAIN_PATTERN_1; 2621 } 2622 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2623 2624 POSTING_READ(reg); 2625 udelay(150); 2626 2627 for (i = 0; i < 4; i++) { 2628 reg = FDI_TX_CTL(pipe); 2629 temp = I915_READ(reg); 2630 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2631 temp |= snb_b_fdi_train_param[i]; 2632 I915_WRITE(reg, temp); 2633 2634 POSTING_READ(reg); 2635 udelay(500); 2636 2637 for (retry = 0; retry < 5; retry++) { 2638 reg = FDI_RX_IIR(pipe); 2639 temp = I915_READ(reg); 2640 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2641 if (temp & FDI_RX_BIT_LOCK) { 2642 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2643 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2644 break; 2645 } 2646 udelay(50); 2647 } 2648 if (retry < 5) 2649 break; 2650 } 2651 if (i == 4) 2652 DRM_ERROR("FDI train 1 fail!\n"); 2653 2654 /* Train 2 */ 2655 reg = FDI_TX_CTL(pipe); 2656 temp = I915_READ(reg); 2657 temp &= ~FDI_LINK_TRAIN_NONE; 2658 temp |= FDI_LINK_TRAIN_PATTERN_2; 2659 if (IS_GEN6(dev)) { 2660 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2661 /* SNB-B */ 2662 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2663 } 2664 I915_WRITE(reg, temp); 2665 2666 reg = FDI_RX_CTL(pipe); 2667 temp = I915_READ(reg); 2668 if (HAS_PCH_CPT(dev)) { 2669 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2670 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 2671 } else { 2672 temp &= ~FDI_LINK_TRAIN_NONE; 2673 temp |= FDI_LINK_TRAIN_PATTERN_2; 2674 } 2675 I915_WRITE(reg, temp); 2676 2677 POSTING_READ(reg); 2678 udelay(150); 2679 2680 for (i = 0; i < 4; i++) { 2681 reg = FDI_TX_CTL(pipe); 2682 temp = I915_READ(reg); 2683 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2684 temp |= snb_b_fdi_train_param[i]; 2685 I915_WRITE(reg, temp); 2686 2687 POSTING_READ(reg); 2688 udelay(500); 2689 2690 for (retry = 0; retry < 5; retry++) { 2691 reg = FDI_RX_IIR(pipe); 2692 temp = I915_READ(reg); 2693 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2694 if (temp & FDI_RX_SYMBOL_LOCK) { 2695 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2696 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2697 break; 2698 } 2699 udelay(50); 2700 } 2701 if (retry < 5) 2702 break; 2703 } 2704 if (i == 4) 2705 DRM_ERROR("FDI train 2 fail!\n"); 2706 2707 DRM_DEBUG_KMS("FDI train done.\n"); 2708 } 2709 2710 /* Manual link training for Ivy Bridge A0 parts */ 2711 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 2712 { 2713 struct drm_device *dev = crtc->dev; 2714 struct drm_i915_private *dev_priv = dev->dev_private; 2715 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2716 int pipe = intel_crtc->pipe; 2717 u32 reg, temp, i; 2718 2719 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2720 for train result */ 2721 reg = FDI_RX_IMR(pipe); 2722 temp = I915_READ(reg); 2723 temp &= ~FDI_RX_SYMBOL_LOCK; 2724 temp &= ~FDI_RX_BIT_LOCK; 2725 I915_WRITE(reg, temp); 2726 2727 POSTING_READ(reg); 2728 udelay(150); 2729 2730 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 2731 I915_READ(FDI_RX_IIR(pipe))); 2732 2733 /* enable CPU FDI TX and PCH FDI RX */ 2734 reg = FDI_TX_CTL(pipe); 2735 temp = I915_READ(reg); 2736 temp &= ~(7 << 19); 2737 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2738 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 2739 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 2740 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2741 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2742 temp |= FDI_COMPOSITE_SYNC; 2743 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2744 2745 I915_WRITE(FDI_RX_MISC(pipe), 2746 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 2747 2748 reg = FDI_RX_CTL(pipe); 2749 temp = I915_READ(reg); 2750 temp &= ~FDI_LINK_TRAIN_AUTO; 2751 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2752 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2753 temp |= FDI_COMPOSITE_SYNC; 2754 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2755 2756 POSTING_READ(reg); 2757 udelay(150); 2758 2759 for (i = 0; i < 4; i++) { 2760 reg = FDI_TX_CTL(pipe); 2761 temp = I915_READ(reg); 2762 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2763 temp |= snb_b_fdi_train_param[i]; 2764 I915_WRITE(reg, temp); 2765 2766 POSTING_READ(reg); 2767 udelay(500); 2768 2769 reg = FDI_RX_IIR(pipe); 2770 temp = I915_READ(reg); 2771 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2772 2773 if (temp & FDI_RX_BIT_LOCK || 2774 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 2775 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2776 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i); 2777 break; 2778 } 2779 } 2780 if (i == 4) 2781 DRM_ERROR("FDI train 1 fail!\n"); 2782 2783 /* Train 2 */ 2784 reg = FDI_TX_CTL(pipe); 2785 temp = I915_READ(reg); 2786 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 2787 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 2788 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2789 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2790 I915_WRITE(reg, temp); 2791 2792 reg = FDI_RX_CTL(pipe); 2793 temp = I915_READ(reg); 2794 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2795 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 2796 I915_WRITE(reg, temp); 2797 2798 POSTING_READ(reg); 2799 udelay(150); 2800 2801 for (i = 0; i < 4; i++) { 2802 reg = FDI_TX_CTL(pipe); 2803 temp = I915_READ(reg); 2804 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2805 temp |= snb_b_fdi_train_param[i]; 2806 I915_WRITE(reg, temp); 2807 2808 POSTING_READ(reg); 2809 udelay(500); 2810 2811 reg = FDI_RX_IIR(pipe); 2812 temp = I915_READ(reg); 2813 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2814 2815 if (temp & FDI_RX_SYMBOL_LOCK) { 2816 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2817 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i); 2818 break; 2819 } 2820 } 2821 if (i == 4) 2822 DRM_ERROR("FDI train 2 fail!\n"); 2823 2824 DRM_DEBUG_KMS("FDI train done.\n"); 2825 } 2826 2827 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 2828 { 2829 struct drm_device *dev = intel_crtc->base.dev; 2830 struct drm_i915_private *dev_priv = dev->dev_private; 2831 int pipe = intel_crtc->pipe; 2832 u32 reg, temp; 2833 2834 2835 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 2836 reg = FDI_RX_CTL(pipe); 2837 temp = I915_READ(reg); 2838 temp &= ~((0x7 << 19) | (0x7 << 16)); 2839 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2840 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2841 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 2842 2843 POSTING_READ(reg); 2844 udelay(200); 2845 2846 /* Switch from Rawclk to PCDclk */ 2847 temp = I915_READ(reg); 2848 I915_WRITE(reg, temp | FDI_PCDCLK); 2849 2850 POSTING_READ(reg); 2851 udelay(200); 2852 2853 /* On Haswell, the PLL configuration for ports and pipes is handled 2854 * separately, as part of DDI setup */ 2855 if (!IS_HASWELL(dev)) { 2856 /* Enable CPU FDI TX PLL, always on for Ironlake */ 2857 reg = FDI_TX_CTL(pipe); 2858 temp = I915_READ(reg); 2859 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 2860 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 2861 2862 POSTING_READ(reg); 2863 udelay(100); 2864 } 2865 } 2866 } 2867 2868 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 2869 { 2870 struct drm_device *dev = intel_crtc->base.dev; 2871 struct drm_i915_private *dev_priv = dev->dev_private; 2872 int pipe = intel_crtc->pipe; 2873 u32 reg, temp; 2874 2875 /* Switch from PCDclk to Rawclk */ 2876 reg = FDI_RX_CTL(pipe); 2877 temp = I915_READ(reg); 2878 I915_WRITE(reg, temp & ~FDI_PCDCLK); 2879 2880 /* Disable CPU FDI TX PLL */ 2881 reg = FDI_TX_CTL(pipe); 2882 temp = I915_READ(reg); 2883 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 2884 2885 POSTING_READ(reg); 2886 udelay(100); 2887 2888 reg = FDI_RX_CTL(pipe); 2889 temp = I915_READ(reg); 2890 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 2891 2892 /* Wait for the clocks to turn off. */ 2893 POSTING_READ(reg); 2894 udelay(100); 2895 } 2896 2897 static void ironlake_fdi_disable(struct drm_crtc *crtc) 2898 { 2899 struct drm_device *dev = crtc->dev; 2900 struct drm_i915_private *dev_priv = dev->dev_private; 2901 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2902 int pipe = intel_crtc->pipe; 2903 u32 reg, temp; 2904 2905 /* disable CPU FDI tx and PCH FDI rx */ 2906 reg = FDI_TX_CTL(pipe); 2907 temp = I915_READ(reg); 2908 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 2909 POSTING_READ(reg); 2910 2911 reg = FDI_RX_CTL(pipe); 2912 temp = I915_READ(reg); 2913 temp &= ~(0x7 << 16); 2914 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2915 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 2916 2917 POSTING_READ(reg); 2918 udelay(100); 2919 2920 /* Ironlake workaround, disable clock pointer after downing FDI */ 2921 if (HAS_PCH_IBX(dev)) { 2922 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2923 } 2924 2925 /* still set train pattern 1 */ 2926 reg = FDI_TX_CTL(pipe); 2927 temp = I915_READ(reg); 2928 temp &= ~FDI_LINK_TRAIN_NONE; 2929 temp |= FDI_LINK_TRAIN_PATTERN_1; 2930 I915_WRITE(reg, temp); 2931 2932 reg = FDI_RX_CTL(pipe); 2933 temp = I915_READ(reg); 2934 if (HAS_PCH_CPT(dev)) { 2935 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2936 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2937 } else { 2938 temp &= ~FDI_LINK_TRAIN_NONE; 2939 temp |= FDI_LINK_TRAIN_PATTERN_1; 2940 } 2941 /* BPC in FDI rx is consistent with that in PIPECONF */ 2942 temp &= ~(0x07 << 16); 2943 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2944 I915_WRITE(reg, temp); 2945 2946 POSTING_READ(reg); 2947 udelay(100); 2948 } 2949 2950 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 2951 { 2952 struct drm_device *dev = crtc->dev; 2953 struct drm_i915_private *dev_priv = dev->dev_private; 2954 bool pending; 2955 2956 if (atomic_read(&dev_priv->mm.wedged)) 2957 return false; 2958 2959 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 2960 pending = to_intel_crtc(crtc)->unpin_work != NULL; 2961 lockmgr(&dev->event_lock, LK_RELEASE); 2962 2963 return pending; 2964 } 2965 2966 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 2967 { 2968 struct drm_device *dev = crtc->dev; 2969 struct drm_i915_private *dev_priv = dev->dev_private; 2970 2971 if (crtc->fb == NULL) 2972 return; 2973 2974 wait_event(dev_priv->pending_flip_queue, 2975 !intel_crtc_has_pending_flip(crtc)); 2976 2977 DRM_LOCK(dev); 2978 intel_finish_fb(crtc->fb); 2979 DRM_UNLOCK(dev); 2980 } 2981 2982 static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc) 2983 { 2984 struct drm_device *dev = crtc->dev; 2985 struct intel_encoder *intel_encoder; 2986 2987 /* 2988 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that 2989 * must be driven by its own crtc; no sharing is possible. 2990 */ 2991 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 2992 switch (intel_encoder->type) { 2993 case INTEL_OUTPUT_EDP: 2994 if (!intel_encoder_is_pch_edp(&intel_encoder->base)) 2995 return false; 2996 continue; 2997 } 2998 } 2999 3000 return true; 3001 } 3002 3003 static bool haswell_crtc_driving_pch(struct drm_crtc *crtc) 3004 { 3005 return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG); 3006 } 3007 3008 /* Program iCLKIP clock to the desired frequency */ 3009 static void lpt_program_iclkip(struct drm_crtc *crtc) 3010 { 3011 struct drm_device *dev = crtc->dev; 3012 struct drm_i915_private *dev_priv = dev->dev_private; 3013 u32 divsel, phaseinc, auxdiv, phasedir = 0; 3014 u32 temp; 3015 3016 /* It is necessary to ungate the pixclk gate prior to programming 3017 * the divisors, and gate it back when it is done. 3018 */ 3019 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 3020 3021 /* Disable SSCCTL */ 3022 intel_sbi_write(dev_priv, SBI_SSCCTL6, 3023 intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | 3024 SBI_SSCCTL_DISABLE, 3025 SBI_ICLK); 3026 3027 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 3028 if (crtc->mode.clock == 20000) { 3029 auxdiv = 1; 3030 divsel = 0x41; 3031 phaseinc = 0x20; 3032 } else { 3033 /* The iCLK virtual clock root frequency is in MHz, 3034 * but the crtc->mode.clock in in KHz. To get the divisors, 3035 * it is necessary to divide one by another, so we 3036 * convert the virtual clock precision to KHz here for higher 3037 * precision. 3038 */ 3039 u32 iclk_virtual_root_freq = 172800 * 1000; 3040 u32 iclk_pi_range = 64; 3041 u32 desired_divisor, msb_divisor_value, pi_value; 3042 3043 desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock); 3044 msb_divisor_value = desired_divisor / iclk_pi_range; 3045 pi_value = desired_divisor % iclk_pi_range; 3046 3047 auxdiv = 0; 3048 divsel = msb_divisor_value - 2; 3049 phaseinc = pi_value; 3050 } 3051 3052 /* This should not happen with any sane values */ 3053 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 3054 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 3055 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 3056 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 3057 3058 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 3059 crtc->mode.clock, 3060 auxdiv, 3061 divsel, 3062 phasedir, 3063 phaseinc); 3064 3065 /* Program SSCDIVINTPHASE6 */ 3066 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 3067 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 3068 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 3069 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 3070 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 3071 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 3072 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 3073 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 3074 3075 /* Program SSCAUXDIV */ 3076 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 3077 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 3078 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 3079 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 3080 3081 /* Enable modulator and associated divider */ 3082 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 3083 temp &= ~SBI_SSCCTL_DISABLE; 3084 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 3085 3086 /* Wait for initialization time */ 3087 udelay(24); 3088 3089 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 3090 } 3091 3092 /* 3093 * Enable PCH resources required for PCH ports: 3094 * - PCH PLLs 3095 * - FDI training & RX/TX 3096 * - update transcoder timings 3097 * - DP transcoding bits 3098 * - transcoder 3099 */ 3100 static void ironlake_pch_enable(struct drm_crtc *crtc) 3101 { 3102 struct drm_device *dev = crtc->dev; 3103 struct drm_i915_private *dev_priv = dev->dev_private; 3104 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3105 int pipe = intel_crtc->pipe; 3106 u32 reg, temp; 3107 3108 assert_transcoder_disabled(dev_priv, pipe); 3109 3110 /* Write the TU size bits before fdi link training, so that error 3111 * detection works. */ 3112 I915_WRITE(FDI_RX_TUSIZE1(pipe), 3113 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 3114 3115 /* For PCH output, training FDI link */ 3116 dev_priv->display.fdi_link_train(crtc); 3117 3118 /* XXX: pch pll's can be enabled any time before we enable the PCH 3119 * transcoder, and we actually should do this to not upset any PCH 3120 * transcoder that already use the clock when we share it. 3121 * 3122 * Note that enable_pch_pll tries to do the right thing, but get_pch_pll 3123 * unconditionally resets the pll - we need that to have the right LVDS 3124 * enable sequence. */ 3125 ironlake_enable_pch_pll(intel_crtc); 3126 3127 if (HAS_PCH_CPT(dev)) { 3128 u32 sel; 3129 3130 temp = I915_READ(PCH_DPLL_SEL); 3131 switch (pipe) { 3132 default: 3133 case 0: 3134 temp |= TRANSA_DPLL_ENABLE; 3135 sel = TRANSA_DPLLB_SEL; 3136 break; 3137 case 1: 3138 temp |= TRANSB_DPLL_ENABLE; 3139 sel = TRANSB_DPLLB_SEL; 3140 break; 3141 case 2: 3142 temp |= TRANSC_DPLL_ENABLE; 3143 sel = TRANSC_DPLLB_SEL; 3144 break; 3145 } 3146 if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B) 3147 temp |= sel; 3148 else 3149 temp &= ~sel; 3150 I915_WRITE(PCH_DPLL_SEL, temp); 3151 } 3152 3153 /* set transcoder timing, panel must allow it */ 3154 assert_panel_unlocked(dev_priv, pipe); 3155 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); 3156 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); 3157 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); 3158 3159 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); 3160 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); 3161 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 3162 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); 3163 3164 intel_fdi_normal_train(crtc); 3165 3166 /* For PCH DP, enable TRANS_DP_CTL */ 3167 if (HAS_PCH_CPT(dev) && 3168 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 3169 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 3170 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 3171 reg = TRANS_DP_CTL(pipe); 3172 temp = I915_READ(reg); 3173 temp &= ~(TRANS_DP_PORT_SEL_MASK | 3174 TRANS_DP_SYNC_MASK | 3175 TRANS_DP_BPC_MASK); 3176 temp |= (TRANS_DP_OUTPUT_ENABLE | 3177 TRANS_DP_ENH_FRAMING); 3178 temp |= bpc << 9; /* same format but at 11:9 */ 3179 3180 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 3181 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 3182 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 3183 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 3184 3185 switch (intel_trans_dp_port_sel(crtc)) { 3186 case PCH_DP_B: 3187 temp |= TRANS_DP_PORT_SEL_B; 3188 break; 3189 case PCH_DP_C: 3190 temp |= TRANS_DP_PORT_SEL_C; 3191 break; 3192 case PCH_DP_D: 3193 temp |= TRANS_DP_PORT_SEL_D; 3194 break; 3195 default: 3196 BUG(); 3197 } 3198 3199 I915_WRITE(reg, temp); 3200 } 3201 3202 ironlake_enable_pch_transcoder(dev_priv, pipe); 3203 } 3204 3205 static void lpt_pch_enable(struct drm_crtc *crtc) 3206 { 3207 struct drm_device *dev = crtc->dev; 3208 struct drm_i915_private *dev_priv = dev->dev_private; 3209 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3210 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 3211 3212 assert_transcoder_disabled(dev_priv, TRANSCODER_A); 3213 3214 lpt_program_iclkip(crtc); 3215 3216 /* Set transcoder timing. */ 3217 I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder))); 3218 I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder))); 3219 I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder))); 3220 3221 I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder))); 3222 I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder))); 3223 I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder))); 3224 I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder))); 3225 3226 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 3227 } 3228 3229 static void intel_put_pch_pll(struct intel_crtc *intel_crtc) 3230 { 3231 struct intel_pch_pll *pll = intel_crtc->pch_pll; 3232 3233 if (pll == NULL) 3234 return; 3235 3236 if (pll->refcount == 0) { 3237 WARN(1, "bad PCH PLL refcount\n"); 3238 return; 3239 } 3240 3241 --pll->refcount; 3242 intel_crtc->pch_pll = NULL; 3243 } 3244 3245 static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp) 3246 { 3247 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; 3248 struct intel_pch_pll *pll; 3249 int i; 3250 3251 pll = intel_crtc->pch_pll; 3252 if (pll) { 3253 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n", 3254 intel_crtc->base.base.id, pll->pll_reg); 3255 goto prepare; 3256 } 3257 3258 if (HAS_PCH_IBX(dev_priv->dev)) { 3259 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 3260 i = intel_crtc->pipe; 3261 pll = &dev_priv->pch_plls[i]; 3262 3263 DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n", 3264 intel_crtc->base.base.id, pll->pll_reg); 3265 3266 goto found; 3267 } 3268 3269 for (i = 0; i < dev_priv->num_pch_pll; i++) { 3270 pll = &dev_priv->pch_plls[i]; 3271 3272 /* Only want to check enabled timings first */ 3273 if (pll->refcount == 0) 3274 continue; 3275 3276 if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) && 3277 fp == I915_READ(pll->fp0_reg)) { 3278 DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n", 3279 intel_crtc->base.base.id, 3280 pll->pll_reg, pll->refcount, pll->active); 3281 3282 goto found; 3283 } 3284 } 3285 3286 /* Ok no matching timings, maybe there's a free one? */ 3287 for (i = 0; i < dev_priv->num_pch_pll; i++) { 3288 pll = &dev_priv->pch_plls[i]; 3289 if (pll->refcount == 0) { 3290 DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n", 3291 intel_crtc->base.base.id, pll->pll_reg); 3292 goto found; 3293 } 3294 } 3295 3296 return NULL; 3297 3298 found: 3299 intel_crtc->pch_pll = pll; 3300 pll->refcount++; 3301 DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe); 3302 prepare: /* separate function? */ 3303 DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg); 3304 3305 /* Wait for the clocks to stabilize before rewriting the regs */ 3306 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE); 3307 POSTING_READ(pll->pll_reg); 3308 udelay(150); 3309 3310 I915_WRITE(pll->fp0_reg, fp); 3311 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE); 3312 pll->on = false; 3313 return pll; 3314 } 3315 3316 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) 3317 { 3318 struct drm_i915_private *dev_priv = dev->dev_private; 3319 int dslreg = PIPEDSL(pipe); 3320 u32 temp; 3321 3322 temp = I915_READ(dslreg); 3323 udelay(500); 3324 if (wait_for(I915_READ(dslreg) != temp, 5)) { 3325 if (wait_for(I915_READ(dslreg) != temp, 5)) 3326 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); 3327 } 3328 } 3329 3330 static void ironlake_crtc_enable(struct drm_crtc *crtc) 3331 { 3332 struct drm_device *dev = crtc->dev; 3333 struct drm_i915_private *dev_priv = dev->dev_private; 3334 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3335 struct intel_encoder *encoder; 3336 int pipe = intel_crtc->pipe; 3337 int plane = intel_crtc->plane; 3338 u32 temp; 3339 bool is_pch_port; 3340 3341 WARN_ON(!crtc->enabled); 3342 3343 if (intel_crtc->active) 3344 return; 3345 3346 intel_crtc->active = true; 3347 intel_update_watermarks(dev); 3348 3349 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 3350 temp = I915_READ(PCH_LVDS); 3351 if ((temp & LVDS_PORT_EN) == 0) 3352 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 3353 } 3354 3355 is_pch_port = ironlake_crtc_driving_pch(crtc); 3356 3357 if (is_pch_port) { 3358 /* Note: FDI PLL enabling _must_ be done before we enable the 3359 * cpu pipes, hence this is separate from all the other fdi/pch 3360 * enabling. */ 3361 ironlake_fdi_pll_enable(intel_crtc); 3362 } else { 3363 assert_fdi_tx_disabled(dev_priv, pipe); 3364 assert_fdi_rx_disabled(dev_priv, pipe); 3365 } 3366 3367 for_each_encoder_on_crtc(dev, crtc, encoder) 3368 if (encoder->pre_enable) 3369 encoder->pre_enable(encoder); 3370 3371 /* Enable panel fitting for LVDS */ 3372 if (dev_priv->pch_pf_size && 3373 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 3374 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 3375 /* Force use of hard-coded filter coefficients 3376 * as some pre-programmed values are broken, 3377 * e.g. x201. 3378 */ 3379 if (IS_IVYBRIDGE(dev)) 3380 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 3381 PF_PIPE_SEL_IVB(pipe)); 3382 else 3383 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 3384 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); 3385 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); 3386 } 3387 3388 /* 3389 * On ILK+ LUT must be loaded before the pipe is running but with 3390 * clocks enabled 3391 */ 3392 intel_crtc_load_lut(crtc); 3393 3394 intel_enable_pipe(dev_priv, pipe, is_pch_port); 3395 intel_enable_plane(dev_priv, plane, pipe); 3396 3397 if (is_pch_port) 3398 ironlake_pch_enable(crtc); 3399 3400 DRM_LOCK(dev); 3401 intel_update_fbc(dev); 3402 DRM_UNLOCK(dev); 3403 3404 intel_crtc_update_cursor(crtc, true); 3405 3406 for_each_encoder_on_crtc(dev, crtc, encoder) 3407 encoder->enable(encoder); 3408 3409 if (HAS_PCH_CPT(dev)) 3410 intel_cpt_verify_modeset(dev, intel_crtc->pipe); 3411 3412 /* 3413 * There seems to be a race in PCH platform hw (at least on some 3414 * outputs) where an enabled pipe still completes any pageflip right 3415 * away (as if the pipe is off) instead of waiting for vblank. As soon 3416 * as the first vblank happend, everything works as expected. Hence just 3417 * wait for one vblank before returning to avoid strange things 3418 * happening. 3419 */ 3420 intel_wait_for_vblank(dev, intel_crtc->pipe); 3421 } 3422 3423 static void haswell_crtc_enable(struct drm_crtc *crtc) 3424 { 3425 struct drm_device *dev = crtc->dev; 3426 struct drm_i915_private *dev_priv = dev->dev_private; 3427 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3428 struct intel_encoder *encoder; 3429 int pipe = intel_crtc->pipe; 3430 int plane = intel_crtc->plane; 3431 bool is_pch_port; 3432 3433 WARN_ON(!crtc->enabled); 3434 3435 if (intel_crtc->active) 3436 return; 3437 3438 intel_crtc->active = true; 3439 intel_update_watermarks(dev); 3440 3441 is_pch_port = haswell_crtc_driving_pch(crtc); 3442 3443 if (is_pch_port) 3444 dev_priv->display.fdi_link_train(crtc); 3445 3446 for_each_encoder_on_crtc(dev, crtc, encoder) 3447 if (encoder->pre_enable) 3448 encoder->pre_enable(encoder); 3449 3450 intel_ddi_enable_pipe_clock(intel_crtc); 3451 3452 /* Enable panel fitting for eDP */ 3453 if (dev_priv->pch_pf_size && 3454 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { 3455 /* Force use of hard-coded filter coefficients 3456 * as some pre-programmed values are broken, 3457 * e.g. x201. 3458 */ 3459 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 3460 PF_PIPE_SEL_IVB(pipe)); 3461 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); 3462 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); 3463 } 3464 3465 /* 3466 * On ILK+ LUT must be loaded before the pipe is running but with 3467 * clocks enabled 3468 */ 3469 intel_crtc_load_lut(crtc); 3470 3471 intel_ddi_set_pipe_settings(crtc); 3472 intel_ddi_enable_pipe_func(crtc); 3473 3474 intel_enable_pipe(dev_priv, pipe, is_pch_port); 3475 intel_enable_plane(dev_priv, plane, pipe); 3476 3477 if (is_pch_port) 3478 lpt_pch_enable(crtc); 3479 3480 DRM_LOCK(dev); 3481 intel_update_fbc(dev); 3482 DRM_UNLOCK(dev); 3483 3484 intel_crtc_update_cursor(crtc, true); 3485 3486 for_each_encoder_on_crtc(dev, crtc, encoder) 3487 encoder->enable(encoder); 3488 3489 /* 3490 * There seems to be a race in PCH platform hw (at least on some 3491 * outputs) where an enabled pipe still completes any pageflip right 3492 * away (as if the pipe is off) instead of waiting for vblank. As soon 3493 * as the first vblank happend, everything works as expected. Hence just 3494 * wait for one vblank before returning to avoid strange things 3495 * happening. 3496 */ 3497 intel_wait_for_vblank(dev, intel_crtc->pipe); 3498 } 3499 3500 static void ironlake_crtc_disable(struct drm_crtc *crtc) 3501 { 3502 struct drm_device *dev = crtc->dev; 3503 struct drm_i915_private *dev_priv = dev->dev_private; 3504 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3505 struct intel_encoder *encoder; 3506 int pipe = intel_crtc->pipe; 3507 int plane = intel_crtc->plane; 3508 u32 reg, temp; 3509 3510 3511 if (!intel_crtc->active) 3512 return; 3513 3514 for_each_encoder_on_crtc(dev, crtc, encoder) 3515 encoder->disable(encoder); 3516 3517 intel_crtc_wait_for_pending_flips(crtc); 3518 drm_vblank_off(dev, pipe); 3519 intel_crtc_update_cursor(crtc, false); 3520 3521 intel_disable_plane(dev_priv, plane, pipe); 3522 3523 if (dev_priv->cfb_plane == plane) 3524 intel_disable_fbc(dev); 3525 3526 intel_disable_pipe(dev_priv, pipe); 3527 3528 /* Disable PF */ 3529 I915_WRITE(PF_CTL(pipe), 0); 3530 I915_WRITE(PF_WIN_SZ(pipe), 0); 3531 3532 for_each_encoder_on_crtc(dev, crtc, encoder) 3533 if (encoder->post_disable) 3534 encoder->post_disable(encoder); 3535 3536 ironlake_fdi_disable(crtc); 3537 3538 ironlake_disable_pch_transcoder(dev_priv, pipe); 3539 3540 if (HAS_PCH_CPT(dev)) { 3541 /* disable TRANS_DP_CTL */ 3542 reg = TRANS_DP_CTL(pipe); 3543 temp = I915_READ(reg); 3544 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); 3545 temp |= TRANS_DP_PORT_SEL_NONE; 3546 I915_WRITE(reg, temp); 3547 3548 /* disable DPLL_SEL */ 3549 temp = I915_READ(PCH_DPLL_SEL); 3550 switch (pipe) { 3551 case 0: 3552 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); 3553 break; 3554 case 1: 3555 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 3556 break; 3557 case 2: 3558 /* C shares PLL A or B */ 3559 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); 3560 break; 3561 default: 3562 BUG(); /* wtf */ 3563 } 3564 I915_WRITE(PCH_DPLL_SEL, temp); 3565 } 3566 3567 /* disable PCH DPLL */ 3568 intel_disable_pch_pll(intel_crtc); 3569 3570 ironlake_fdi_pll_disable(intel_crtc); 3571 3572 intel_crtc->active = false; 3573 intel_update_watermarks(dev); 3574 3575 DRM_LOCK(dev); 3576 intel_update_fbc(dev); 3577 DRM_UNLOCK(dev); 3578 } 3579 3580 static void haswell_crtc_disable(struct drm_crtc *crtc) 3581 { 3582 struct drm_device *dev = crtc->dev; 3583 struct drm_i915_private *dev_priv = dev->dev_private; 3584 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3585 struct intel_encoder *encoder; 3586 int pipe = intel_crtc->pipe; 3587 int plane = intel_crtc->plane; 3588 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 3589 bool is_pch_port; 3590 3591 if (!intel_crtc->active) 3592 return; 3593 3594 is_pch_port = haswell_crtc_driving_pch(crtc); 3595 3596 for_each_encoder_on_crtc(dev, crtc, encoder) 3597 encoder->disable(encoder); 3598 3599 intel_crtc_wait_for_pending_flips(crtc); 3600 drm_vblank_off(dev, pipe); 3601 intel_crtc_update_cursor(crtc, false); 3602 3603 intel_disable_plane(dev_priv, plane, pipe); 3604 3605 if (dev_priv->cfb_plane == plane) 3606 intel_disable_fbc(dev); 3607 3608 intel_disable_pipe(dev_priv, pipe); 3609 3610 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 3611 3612 /* Disable PF */ 3613 I915_WRITE(PF_CTL(pipe), 0); 3614 I915_WRITE(PF_WIN_SZ(pipe), 0); 3615 3616 intel_ddi_disable_pipe_clock(intel_crtc); 3617 3618 for_each_encoder_on_crtc(dev, crtc, encoder) 3619 if (encoder->post_disable) 3620 encoder->post_disable(encoder); 3621 3622 if (is_pch_port) { 3623 lpt_disable_pch_transcoder(dev_priv); 3624 intel_ddi_fdi_disable(crtc); 3625 } 3626 3627 intel_crtc->active = false; 3628 intel_update_watermarks(dev); 3629 3630 DRM_LOCK(dev); 3631 intel_update_fbc(dev); 3632 DRM_UNLOCK(dev); 3633 } 3634 3635 static void ironlake_crtc_off(struct drm_crtc *crtc) 3636 { 3637 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3638 intel_put_pch_pll(intel_crtc); 3639 } 3640 3641 static void haswell_crtc_off(struct drm_crtc *crtc) 3642 { 3643 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3644 3645 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might 3646 * start using it. */ 3647 intel_crtc->cpu_transcoder = intel_crtc->pipe; 3648 3649 intel_ddi_put_crtc_pll(crtc); 3650 } 3651 3652 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 3653 { 3654 if (!enable && intel_crtc->overlay) { 3655 struct drm_device *dev = intel_crtc->base.dev; 3656 struct drm_i915_private *dev_priv = dev->dev_private; 3657 3658 DRM_LOCK(dev); 3659 dev_priv->mm.interruptible = false; 3660 (void) intel_overlay_switch_off(intel_crtc->overlay); 3661 dev_priv->mm.interruptible = true; 3662 DRM_UNLOCK(dev); 3663 } 3664 3665 /* Let userspace switch the overlay on again. In most cases userspace 3666 * has to recompute where to put it anyway. 3667 */ 3668 } 3669 3670 static void i9xx_crtc_enable(struct drm_crtc *crtc) 3671 { 3672 struct drm_device *dev = crtc->dev; 3673 struct drm_i915_private *dev_priv = dev->dev_private; 3674 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3675 struct intel_encoder *encoder; 3676 int pipe = intel_crtc->pipe; 3677 int plane = intel_crtc->plane; 3678 3679 WARN_ON(!crtc->enabled); 3680 3681 if (intel_crtc->active) 3682 return; 3683 3684 intel_crtc->active = true; 3685 intel_update_watermarks(dev); 3686 3687 intel_enable_pll(dev_priv, pipe); 3688 intel_enable_pipe(dev_priv, pipe, false); 3689 intel_enable_plane(dev_priv, plane, pipe); 3690 3691 intel_crtc_load_lut(crtc); 3692 intel_update_fbc(dev); 3693 3694 /* Give the overlay scaler a chance to enable if it's on this pipe */ 3695 intel_crtc_dpms_overlay(intel_crtc, true); 3696 intel_crtc_update_cursor(crtc, true); 3697 3698 for_each_encoder_on_crtc(dev, crtc, encoder) 3699 encoder->enable(encoder); 3700 } 3701 3702 static void i9xx_crtc_disable(struct drm_crtc *crtc) 3703 { 3704 struct drm_device *dev = crtc->dev; 3705 struct drm_i915_private *dev_priv = dev->dev_private; 3706 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3707 struct intel_encoder *encoder; 3708 int pipe = intel_crtc->pipe; 3709 int plane = intel_crtc->plane; 3710 u32 pctl; 3711 3712 3713 if (!intel_crtc->active) 3714 return; 3715 3716 for_each_encoder_on_crtc(dev, crtc, encoder) 3717 encoder->disable(encoder); 3718 3719 /* Give the overlay scaler a chance to disable if it's on this pipe */ 3720 intel_crtc_wait_for_pending_flips(crtc); 3721 drm_vblank_off(dev, pipe); 3722 intel_crtc_dpms_overlay(intel_crtc, false); 3723 intel_crtc_update_cursor(crtc, false); 3724 3725 if (dev_priv->cfb_plane == plane) 3726 intel_disable_fbc(dev); 3727 3728 intel_disable_plane(dev_priv, plane, pipe); 3729 intel_disable_pipe(dev_priv, pipe); 3730 3731 /* Disable pannel fitter if it is on this pipe. */ 3732 pctl = I915_READ(PFIT_CONTROL); 3733 if ((pctl & PFIT_ENABLE) && 3734 ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe) 3735 I915_WRITE(PFIT_CONTROL, 0); 3736 3737 intel_disable_pll(dev_priv, pipe); 3738 3739 intel_crtc->active = false; 3740 intel_update_fbc(dev); 3741 intel_update_watermarks(dev); 3742 } 3743 3744 static void i9xx_crtc_off(struct drm_crtc *crtc) 3745 { 3746 } 3747 3748 static void intel_crtc_update_sarea(struct drm_crtc *crtc, 3749 bool enabled) 3750 { 3751 struct drm_device *dev = crtc->dev; 3752 struct drm_i915_private *dev_priv = dev->dev_private; 3753 #if 0 3754 struct drm_i915_master_private *master_priv; 3755 #endif 3756 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3757 int pipe = intel_crtc->pipe; 3758 3759 #if 0 3760 if (!dev->primary->master) 3761 return; 3762 3763 master_priv = dev->primary->master->driver_priv; 3764 if (!master_priv->sarea_priv) 3765 return; 3766 #else 3767 if (!dev_priv->sarea_priv) 3768 return; 3769 #endif 3770 3771 switch (pipe) { 3772 case 0: 3773 #if 0 3774 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; 3775 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; 3776 #else 3777 dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0; 3778 dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0; 3779 #endif 3780 break; 3781 case 1: 3782 #if 0 3783 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; 3784 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; 3785 #else 3786 dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0; 3787 dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0; 3788 #endif 3789 break; 3790 default: 3791 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); 3792 break; 3793 } 3794 } 3795 3796 /** 3797 * Sets the power management mode of the pipe and plane. 3798 */ 3799 void intel_crtc_update_dpms(struct drm_crtc *crtc) 3800 { 3801 struct drm_device *dev = crtc->dev; 3802 struct drm_i915_private *dev_priv = dev->dev_private; 3803 struct intel_encoder *intel_encoder; 3804 bool enable = false; 3805 3806 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 3807 enable |= intel_encoder->connectors_active; 3808 3809 if (enable) 3810 dev_priv->display.crtc_enable(crtc); 3811 else 3812 dev_priv->display.crtc_disable(crtc); 3813 3814 intel_crtc_update_sarea(crtc, enable); 3815 } 3816 3817 static void intel_crtc_noop(struct drm_crtc *crtc) 3818 { 3819 } 3820 3821 static void intel_crtc_disable(struct drm_crtc *crtc) 3822 { 3823 struct drm_device *dev = crtc->dev; 3824 struct drm_connector *connector; 3825 struct drm_i915_private *dev_priv = dev->dev_private; 3826 3827 /* crtc should still be enabled when we disable it. */ 3828 WARN_ON(!crtc->enabled); 3829 3830 dev_priv->display.crtc_disable(crtc); 3831 intel_crtc_update_sarea(crtc, false); 3832 dev_priv->display.off(crtc); 3833 3834 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); 3835 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); 3836 3837 if (crtc->fb) { 3838 DRM_LOCK(dev); 3839 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 3840 DRM_UNLOCK(dev); 3841 crtc->fb = NULL; 3842 } 3843 3844 /* Update computed state. */ 3845 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 3846 if (!connector->encoder || !connector->encoder->crtc) 3847 continue; 3848 3849 if (connector->encoder->crtc != crtc) 3850 continue; 3851 3852 connector->dpms = DRM_MODE_DPMS_OFF; 3853 to_intel_encoder(connector->encoder)->connectors_active = false; 3854 } 3855 } 3856 3857 void intel_modeset_disable(struct drm_device *dev) 3858 { 3859 struct drm_crtc *crtc; 3860 3861 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3862 if (crtc->enabled) 3863 intel_crtc_disable(crtc); 3864 } 3865 } 3866 3867 void intel_encoder_noop(struct drm_encoder *encoder) 3868 { 3869 } 3870 3871 void intel_encoder_destroy(struct drm_encoder *encoder) 3872 { 3873 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3874 3875 drm_encoder_cleanup(encoder); 3876 drm_free(intel_encoder, DRM_MEM_KMS); 3877 } 3878 3879 /* Simple dpms helper for encodres with just one connector, no cloning and only 3880 * one kind of off state. It clamps all !ON modes to fully OFF and changes the 3881 * state of the entire output pipe. */ 3882 void intel_encoder_dpms(struct intel_encoder *encoder, int mode) 3883 { 3884 if (mode == DRM_MODE_DPMS_ON) { 3885 encoder->connectors_active = true; 3886 3887 intel_crtc_update_dpms(encoder->base.crtc); 3888 } else { 3889 encoder->connectors_active = false; 3890 3891 intel_crtc_update_dpms(encoder->base.crtc); 3892 } 3893 } 3894 3895 /* Cross check the actual hw state with our own modeset state tracking (and it's 3896 * internal consistency). */ 3897 static void intel_connector_check_state(struct intel_connector *connector) 3898 { 3899 if (connector->get_hw_state(connector)) { 3900 struct intel_encoder *encoder = connector->encoder; 3901 struct drm_crtc *crtc; 3902 bool encoder_enabled; 3903 enum i915_pipe pipe; 3904 3905 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 3906 connector->base.base.id, 3907 drm_get_connector_name(&connector->base)); 3908 3909 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, 3910 "wrong connector dpms state\n"); 3911 WARN(connector->base.encoder != &encoder->base, 3912 "active connector not linked to encoder\n"); 3913 WARN(!encoder->connectors_active, 3914 "encoder->connectors_active not set\n"); 3915 3916 encoder_enabled = encoder->get_hw_state(encoder, &pipe); 3917 WARN(!encoder_enabled, "encoder not enabled\n"); 3918 if (WARN_ON(!encoder->base.crtc)) 3919 return; 3920 3921 crtc = encoder->base.crtc; 3922 3923 WARN(!crtc->enabled, "crtc not enabled\n"); 3924 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); 3925 WARN(pipe != to_intel_crtc(crtc)->pipe, 3926 "encoder active on the wrong pipe\n"); 3927 } 3928 } 3929 3930 /* Even simpler default implementation, if there's really no special case to 3931 * consider. */ 3932 void intel_connector_dpms(struct drm_connector *connector, int mode) 3933 { 3934 struct intel_encoder *encoder = intel_attached_encoder(connector); 3935 3936 /* All the simple cases only support two dpms states. */ 3937 if (mode != DRM_MODE_DPMS_ON) 3938 mode = DRM_MODE_DPMS_OFF; 3939 3940 if (mode == connector->dpms) 3941 return; 3942 3943 connector->dpms = mode; 3944 3945 /* Only need to change hw state when actually enabled */ 3946 if (encoder->base.crtc) 3947 intel_encoder_dpms(encoder, mode); 3948 else 3949 WARN_ON(encoder->connectors_active != false); 3950 3951 intel_modeset_check_state(connector->dev); 3952 } 3953 3954 /* Simple connector->get_hw_state implementation for encoders that support only 3955 * one connector and no cloning and hence the encoder state determines the state 3956 * of the connector. */ 3957 bool intel_connector_get_hw_state(struct intel_connector *connector) 3958 { 3959 enum i915_pipe pipe = 0; 3960 struct intel_encoder *encoder = connector->encoder; 3961 3962 return encoder->get_hw_state(encoder, &pipe); 3963 } 3964 3965 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, 3966 const struct drm_display_mode *mode, 3967 struct drm_display_mode *adjusted_mode) 3968 { 3969 struct drm_device *dev = crtc->dev; 3970 3971 if (HAS_PCH_SPLIT(dev)) { 3972 /* FDI link clock is fixed at 2.7G */ 3973 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) 3974 return false; 3975 } 3976 3977 /* All interlaced capable intel hw wants timings in frames. Note though 3978 * that intel_lvds_mode_fixup does some funny tricks with the crtc 3979 * timings, so we need to be careful not to clobber these.*/ 3980 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) 3981 drm_mode_set_crtcinfo(adjusted_mode, 0); 3982 3983 /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes 3984 * with a hsync front porch of 0. 3985 */ 3986 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 3987 adjusted_mode->hsync_start == adjusted_mode->hdisplay) 3988 return false; 3989 3990 return true; 3991 } 3992 3993 static int valleyview_get_display_clock_speed(struct drm_device *dev) 3994 { 3995 return 400000; /* FIXME */ 3996 } 3997 3998 static int i945_get_display_clock_speed(struct drm_device *dev) 3999 { 4000 return 400000; 4001 } 4002 4003 static int i915_get_display_clock_speed(struct drm_device *dev) 4004 { 4005 return 333000; 4006 } 4007 4008 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 4009 { 4010 return 200000; 4011 } 4012 4013 static int i915gm_get_display_clock_speed(struct drm_device *dev) 4014 { 4015 u16 gcfgc = 0; 4016 4017 gcfgc = pci_read_config(dev->dev, GCFGC, 2); 4018 4019 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 4020 return 133000; 4021 else { 4022 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 4023 case GC_DISPLAY_CLOCK_333_MHZ: 4024 return 333000; 4025 default: 4026 case GC_DISPLAY_CLOCK_190_200_MHZ: 4027 return 190000; 4028 } 4029 } 4030 } 4031 4032 static int i865_get_display_clock_speed(struct drm_device *dev) 4033 { 4034 return 266000; 4035 } 4036 4037 static int i855_get_display_clock_speed(struct drm_device *dev) 4038 { 4039 u16 hpllcc = 0; 4040 /* Assume that the hardware is in the high speed state. This 4041 * should be the default. 4042 */ 4043 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 4044 case GC_CLOCK_133_200: 4045 case GC_CLOCK_100_200: 4046 return 200000; 4047 case GC_CLOCK_166_250: 4048 return 250000; 4049 case GC_CLOCK_100_133: 4050 return 133000; 4051 } 4052 4053 /* Shouldn't happen */ 4054 return 0; 4055 } 4056 4057 static int i830_get_display_clock_speed(struct drm_device *dev) 4058 { 4059 return 133000; 4060 } 4061 4062 struct fdi_m_n { 4063 u32 tu; 4064 u32 gmch_m; 4065 u32 gmch_n; 4066 u32 link_m; 4067 u32 link_n; 4068 }; 4069 4070 static void 4071 fdi_reduce_ratio(u32 *num, u32 *den) 4072 { 4073 while (*num > 0xffffff || *den > 0xffffff) { 4074 *num >>= 1; 4075 *den >>= 1; 4076 } 4077 } 4078 4079 static void 4080 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, 4081 int link_clock, struct fdi_m_n *m_n) 4082 { 4083 m_n->tu = 64; /* default size */ 4084 4085 /* BUG_ON(pixel_clock > INT_MAX / 36); */ 4086 m_n->gmch_m = bits_per_pixel * pixel_clock; 4087 m_n->gmch_n = link_clock * nlanes * 8; 4088 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 4089 4090 m_n->link_m = pixel_clock; 4091 m_n->link_n = link_clock; 4092 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); 4093 } 4094 4095 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 4096 { 4097 if (i915_panel_use_ssc >= 0) 4098 return i915_panel_use_ssc != 0; 4099 return dev_priv->lvds_use_ssc 4100 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 4101 } 4102 4103 /** 4104 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send 4105 * @crtc: CRTC structure 4106 * @mode: requested mode 4107 * 4108 * A pipe may be connected to one or more outputs. Based on the depth of the 4109 * attached framebuffer, choose a good color depth to use on the pipe. 4110 * 4111 * If possible, match the pipe depth to the fb depth. In some cases, this 4112 * isn't ideal, because the connected output supports a lesser or restricted 4113 * set of depths. Resolve that here: 4114 * LVDS typically supports only 6bpc, so clamp down in that case 4115 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc 4116 * Displays may support a restricted set as well, check EDID and clamp as 4117 * appropriate. 4118 * DP may want to dither down to 6bpc to fit larger modes 4119 * 4120 * RETURNS: 4121 * Dithering requirement (i.e. false if display bpc and pipe bpc match, 4122 * true if they don't match). 4123 */ 4124 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, 4125 struct drm_framebuffer *fb, 4126 unsigned int *pipe_bpp, 4127 struct drm_display_mode *mode) 4128 { 4129 struct drm_device *dev = crtc->dev; 4130 struct drm_i915_private *dev_priv = dev->dev_private; 4131 struct drm_connector *connector; 4132 struct intel_encoder *intel_encoder; 4133 unsigned int display_bpc = UINT_MAX, bpc; 4134 4135 /* Walk the encoders & connectors on this crtc, get min bpc */ 4136 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 4137 4138 if (intel_encoder->type == INTEL_OUTPUT_LVDS) { 4139 unsigned int lvds_bpc; 4140 4141 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == 4142 LVDS_A3_POWER_UP) 4143 lvds_bpc = 8; 4144 else 4145 lvds_bpc = 6; 4146 4147 if (lvds_bpc < display_bpc) { 4148 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); 4149 display_bpc = lvds_bpc; 4150 } 4151 continue; 4152 } 4153 4154 /* Not one of the known troublemakers, check the EDID */ 4155 list_for_each_entry(connector, &dev->mode_config.connector_list, 4156 head) { 4157 if (connector->encoder != &intel_encoder->base) 4158 continue; 4159 4160 /* Don't use an invalid EDID bpc value */ 4161 if (connector->display_info.bpc && 4162 connector->display_info.bpc < display_bpc) { 4163 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 4164 display_bpc = connector->display_info.bpc; 4165 } 4166 } 4167 4168 if (intel_encoder->type == INTEL_OUTPUT_EDP) { 4169 /* Use VBT settings if we have an eDP panel */ 4170 unsigned int edp_bpc = dev_priv->edp.bpp / 3; 4171 4172 if (edp_bpc && edp_bpc < display_bpc) { 4173 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); 4174 display_bpc = edp_bpc; 4175 } 4176 continue; 4177 } 4178 4179 /* 4180 * HDMI is either 12 or 8, so if the display lets 10bpc sneak 4181 * through, clamp it down. (Note: >12bpc will be caught below.) 4182 */ 4183 if (intel_encoder->type == INTEL_OUTPUT_HDMI) { 4184 if (display_bpc > 8 && display_bpc < 12) { 4185 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); 4186 display_bpc = 12; 4187 } else { 4188 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); 4189 display_bpc = 8; 4190 } 4191 } 4192 } 4193 4194 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 4195 DRM_DEBUG_KMS("Dithering DP to 6bpc\n"); 4196 display_bpc = 6; 4197 } 4198 4199 /* 4200 * We could just drive the pipe at the highest bpc all the time and 4201 * enable dithering as needed, but that costs bandwidth. So choose 4202 * the minimum value that expresses the full color range of the fb but 4203 * also stays within the max display bpc discovered above. 4204 */ 4205 4206 switch (fb->depth) { 4207 case 8: 4208 bpc = 8; /* since we go through a colormap */ 4209 break; 4210 case 15: 4211 case 16: 4212 bpc = 6; /* min is 18bpp */ 4213 break; 4214 case 24: 4215 bpc = 8; 4216 break; 4217 case 30: 4218 bpc = 10; 4219 break; 4220 case 48: 4221 bpc = 12; 4222 break; 4223 default: 4224 DRM_DEBUG("unsupported depth, assuming 24 bits\n"); 4225 bpc = min((unsigned int)8, display_bpc); 4226 break; 4227 } 4228 4229 display_bpc = min(display_bpc, bpc); 4230 4231 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", 4232 bpc, display_bpc); 4233 4234 *pipe_bpp = display_bpc * 3; 4235 4236 return display_bpc != bpc; 4237 } 4238 4239 static int vlv_get_refclk(struct drm_crtc *crtc) 4240 { 4241 struct drm_device *dev = crtc->dev; 4242 struct drm_i915_private *dev_priv = dev->dev_private; 4243 int refclk = 27000; /* for DP & HDMI */ 4244 4245 return 100000; /* only one validated so far */ 4246 4247 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { 4248 refclk = 96000; 4249 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 4250 if (intel_panel_use_ssc(dev_priv)) 4251 refclk = 100000; 4252 else 4253 refclk = 96000; 4254 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { 4255 refclk = 100000; 4256 } 4257 4258 return refclk; 4259 } 4260 4261 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) 4262 { 4263 struct drm_device *dev = crtc->dev; 4264 struct drm_i915_private *dev_priv = dev->dev_private; 4265 int refclk; 4266 4267 if (IS_VALLEYVIEW(dev)) { 4268 refclk = vlv_get_refclk(crtc); 4269 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 4270 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 4271 refclk = dev_priv->lvds_ssc_freq * 1000; 4272 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 4273 refclk / 1000); 4274 } else if (!IS_GEN2(dev)) { 4275 refclk = 96000; 4276 } else { 4277 refclk = 48000; 4278 } 4279 4280 return refclk; 4281 } 4282 4283 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode, 4284 intel_clock_t *clock) 4285 { 4286 /* SDVO TV has fixed PLL values depend on its clock range, 4287 this mirrors vbios setting. */ 4288 if (adjusted_mode->clock >= 100000 4289 && adjusted_mode->clock < 140500) { 4290 clock->p1 = 2; 4291 clock->p2 = 10; 4292 clock->n = 3; 4293 clock->m1 = 16; 4294 clock->m2 = 8; 4295 } else if (adjusted_mode->clock >= 140500 4296 && adjusted_mode->clock <= 200000) { 4297 clock->p1 = 1; 4298 clock->p2 = 10; 4299 clock->n = 6; 4300 clock->m1 = 12; 4301 clock->m2 = 8; 4302 } 4303 } 4304 4305 static void i9xx_update_pll_dividers(struct drm_crtc *crtc, 4306 intel_clock_t *clock, 4307 intel_clock_t *reduced_clock) 4308 { 4309 struct drm_device *dev = crtc->dev; 4310 struct drm_i915_private *dev_priv = dev->dev_private; 4311 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4312 int pipe = intel_crtc->pipe; 4313 u32 fp, fp2 = 0; 4314 4315 if (IS_PINEVIEW(dev)) { 4316 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2; 4317 if (reduced_clock) 4318 fp2 = (1 << reduced_clock->n) << 16 | 4319 reduced_clock->m1 << 8 | reduced_clock->m2; 4320 } else { 4321 fp = clock->n << 16 | clock->m1 << 8 | clock->m2; 4322 if (reduced_clock) 4323 fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 | 4324 reduced_clock->m2; 4325 } 4326 4327 I915_WRITE(FP0(pipe), fp); 4328 4329 intel_crtc->lowfreq_avail = false; 4330 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 4331 reduced_clock && i915_powersave) { 4332 I915_WRITE(FP1(pipe), fp2); 4333 intel_crtc->lowfreq_avail = true; 4334 } else { 4335 I915_WRITE(FP1(pipe), fp); 4336 } 4337 } 4338 4339 static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock, 4340 struct drm_display_mode *adjusted_mode) 4341 { 4342 struct drm_device *dev = crtc->dev; 4343 struct drm_i915_private *dev_priv = dev->dev_private; 4344 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4345 int pipe = intel_crtc->pipe; 4346 u32 temp; 4347 4348 temp = I915_READ(LVDS); 4349 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 4350 if (pipe == 1) { 4351 temp |= LVDS_PIPEB_SELECT; 4352 } else { 4353 temp &= ~LVDS_PIPEB_SELECT; 4354 } 4355 /* set the corresponsding LVDS_BORDER bit */ 4356 temp |= dev_priv->lvds_border_bits; 4357 /* Set the B0-B3 data pairs corresponding to whether we're going to 4358 * set the DPLLs for dual-channel mode or not. 4359 */ 4360 if (clock->p2 == 7) 4361 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 4362 else 4363 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 4364 4365 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 4366 * appropriately here, but we need to look more thoroughly into how 4367 * panels behave in the two modes. 4368 */ 4369 /* set the dithering flag on LVDS as needed */ 4370 if (INTEL_INFO(dev)->gen >= 4) { 4371 if (dev_priv->lvds_dither) 4372 temp |= LVDS_ENABLE_DITHER; 4373 else 4374 temp &= ~LVDS_ENABLE_DITHER; 4375 } 4376 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 4377 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 4378 temp |= LVDS_HSYNC_POLARITY; 4379 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 4380 temp |= LVDS_VSYNC_POLARITY; 4381 I915_WRITE(LVDS, temp); 4382 } 4383 4384 static void vlv_update_pll(struct drm_crtc *crtc, 4385 struct drm_display_mode *mode, 4386 struct drm_display_mode *adjusted_mode, 4387 intel_clock_t *clock, intel_clock_t *reduced_clock, 4388 int num_connectors) 4389 { 4390 struct drm_device *dev = crtc->dev; 4391 struct drm_i915_private *dev_priv = dev->dev_private; 4392 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4393 int pipe = intel_crtc->pipe; 4394 u32 dpll, mdiv, pdiv; 4395 u32 bestn, bestm1, bestm2, bestp1, bestp2; 4396 bool is_sdvo; 4397 u32 temp; 4398 4399 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || 4400 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); 4401 4402 dpll = DPLL_VGA_MODE_DIS; 4403 dpll |= DPLL_EXT_BUFFER_ENABLE_VLV; 4404 dpll |= DPLL_REFA_CLK_ENABLE_VLV; 4405 dpll |= DPLL_INTEGRATED_CLOCK_VLV; 4406 4407 I915_WRITE(DPLL(pipe), dpll); 4408 POSTING_READ(DPLL(pipe)); 4409 4410 bestn = clock->n; 4411 bestm1 = clock->m1; 4412 bestm2 = clock->m2; 4413 bestp1 = clock->p1; 4414 bestp2 = clock->p2; 4415 4416 /* 4417 * In Valleyview PLL and program lane counter registers are exposed 4418 * through DPIO interface 4419 */ 4420 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 4421 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 4422 mdiv |= ((bestn << DPIO_N_SHIFT)); 4423 mdiv |= (1 << DPIO_POST_DIV_SHIFT); 4424 mdiv |= (1 << DPIO_K_SHIFT); 4425 mdiv |= DPIO_ENABLE_CALIBRATION; 4426 intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); 4427 4428 intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); 4429 4430 pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) | 4431 (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) | 4432 (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) | 4433 (5 << DPIO_CLK_BIAS_CTL_SHIFT); 4434 intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv); 4435 4436 intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b); 4437 4438 dpll |= DPLL_VCO_ENABLE; 4439 I915_WRITE(DPLL(pipe), dpll); 4440 POSTING_READ(DPLL(pipe)); 4441 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 4442 DRM_ERROR("DPLL %d failed to lock\n", pipe); 4443 4444 intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620); 4445 4446 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 4447 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4448 4449 I915_WRITE(DPLL(pipe), dpll); 4450 4451 /* Wait for the clocks to stabilize. */ 4452 POSTING_READ(DPLL(pipe)); 4453 udelay(150); 4454 4455 temp = 0; 4456 if (is_sdvo) { 4457 temp = intel_mode_get_pixel_multiplier(adjusted_mode); 4458 if (temp > 1) 4459 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4460 else 4461 temp = 0; 4462 } 4463 I915_WRITE(DPLL_MD(pipe), temp); 4464 POSTING_READ(DPLL_MD(pipe)); 4465 4466 /* Now program lane control registers */ 4467 if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) 4468 || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 4469 { 4470 temp = 0x1000C4; 4471 if(pipe == 1) 4472 temp |= (1 << 21); 4473 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp); 4474 } 4475 if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP)) 4476 { 4477 temp = 0x1000C4; 4478 if(pipe == 1) 4479 temp |= (1 << 21); 4480 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); 4481 } 4482 } 4483 4484 static void i9xx_update_pll(struct drm_crtc *crtc, 4485 struct drm_display_mode *mode, 4486 struct drm_display_mode *adjusted_mode, 4487 intel_clock_t *clock, intel_clock_t *reduced_clock, 4488 int num_connectors) 4489 { 4490 struct drm_device *dev = crtc->dev; 4491 struct drm_i915_private *dev_priv = dev->dev_private; 4492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4493 int pipe = intel_crtc->pipe; 4494 u32 dpll; 4495 bool is_sdvo; 4496 4497 i9xx_update_pll_dividers(crtc, clock, reduced_clock); 4498 4499 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || 4500 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); 4501 4502 dpll = DPLL_VGA_MODE_DIS; 4503 4504 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 4505 dpll |= DPLLB_MODE_LVDS; 4506 else 4507 dpll |= DPLLB_MODE_DAC_SERIAL; 4508 if (is_sdvo) { 4509 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4510 if (pixel_multiplier > 1) { 4511 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4512 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 4513 } 4514 dpll |= DPLL_DVO_HIGH_SPEED; 4515 } 4516 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 4517 dpll |= DPLL_DVO_HIGH_SPEED; 4518 4519 /* compute bitmask from p1 value */ 4520 if (IS_PINEVIEW(dev)) 4521 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 4522 else { 4523 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4524 if (IS_G4X(dev) && reduced_clock) 4525 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 4526 } 4527 switch (clock->p2) { 4528 case 5: 4529 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 4530 break; 4531 case 7: 4532 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 4533 break; 4534 case 10: 4535 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 4536 break; 4537 case 14: 4538 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 4539 break; 4540 } 4541 if (INTEL_INFO(dev)->gen >= 4) 4542 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 4543 4544 if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) 4545 dpll |= PLL_REF_INPUT_TVCLKINBC; 4546 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) 4547 /* XXX: just matching BIOS for now */ 4548 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 4549 dpll |= 3; 4550 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 4551 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4552 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4553 else 4554 dpll |= PLL_REF_INPUT_DREFCLK; 4555 4556 dpll |= DPLL_VCO_ENABLE; 4557 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4558 POSTING_READ(DPLL(pipe)); 4559 udelay(150); 4560 4561 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4562 * This is an exception to the general rule that mode_set doesn't turn 4563 * things on. 4564 */ 4565 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 4566 intel_update_lvds(crtc, clock, adjusted_mode); 4567 4568 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 4569 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4570 4571 I915_WRITE(DPLL(pipe), dpll); 4572 4573 /* Wait for the clocks to stabilize. */ 4574 POSTING_READ(DPLL(pipe)); 4575 udelay(150); 4576 4577 if (INTEL_INFO(dev)->gen >= 4) { 4578 u32 temp = 0; 4579 if (is_sdvo) { 4580 temp = intel_mode_get_pixel_multiplier(adjusted_mode); 4581 if (temp > 1) 4582 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4583 else 4584 temp = 0; 4585 } 4586 I915_WRITE(DPLL_MD(pipe), temp); 4587 } else { 4588 /* The pixel multiplier can only be updated once the 4589 * DPLL is enabled and the clocks are stable. 4590 * 4591 * So write it again. 4592 */ 4593 I915_WRITE(DPLL(pipe), dpll); 4594 } 4595 } 4596 4597 static void i8xx_update_pll(struct drm_crtc *crtc, 4598 struct drm_display_mode *adjusted_mode, 4599 intel_clock_t *clock, intel_clock_t *reduced_clock, 4600 int num_connectors) 4601 { 4602 struct drm_device *dev = crtc->dev; 4603 struct drm_i915_private *dev_priv = dev->dev_private; 4604 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4605 int pipe = intel_crtc->pipe; 4606 u32 dpll; 4607 4608 i9xx_update_pll_dividers(crtc, clock, reduced_clock); 4609 4610 dpll = DPLL_VGA_MODE_DIS; 4611 4612 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 4613 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4614 } else { 4615 if (clock->p1 == 2) 4616 dpll |= PLL_P1_DIVIDE_BY_TWO; 4617 else 4618 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4619 if (clock->p2 == 4) 4620 dpll |= PLL_P2_DIVIDE_BY_4; 4621 } 4622 4623 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) 4624 /* XXX: just matching BIOS for now */ 4625 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 4626 dpll |= 3; 4627 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 4628 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4629 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4630 else 4631 dpll |= PLL_REF_INPUT_DREFCLK; 4632 4633 dpll |= DPLL_VCO_ENABLE; 4634 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4635 POSTING_READ(DPLL(pipe)); 4636 udelay(150); 4637 4638 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4639 * This is an exception to the general rule that mode_set doesn't turn 4640 * things on. 4641 */ 4642 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 4643 intel_update_lvds(crtc, clock, adjusted_mode); 4644 4645 I915_WRITE(DPLL(pipe), dpll); 4646 4647 /* Wait for the clocks to stabilize. */ 4648 POSTING_READ(DPLL(pipe)); 4649 udelay(150); 4650 4651 /* The pixel multiplier can only be updated once the 4652 * DPLL is enabled and the clocks are stable. 4653 * 4654 * So write it again. 4655 */ 4656 I915_WRITE(DPLL(pipe), dpll); 4657 } 4658 4659 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc, 4660 struct drm_display_mode *mode, 4661 struct drm_display_mode *adjusted_mode) 4662 { 4663 struct drm_device *dev = intel_crtc->base.dev; 4664 struct drm_i915_private *dev_priv = dev->dev_private; 4665 enum i915_pipe pipe = intel_crtc->pipe; 4666 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 4667 uint32_t vsyncshift; 4668 4669 if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4670 /* the chip adds 2 halflines automatically */ 4671 adjusted_mode->crtc_vtotal -= 1; 4672 adjusted_mode->crtc_vblank_end -= 1; 4673 vsyncshift = adjusted_mode->crtc_hsync_start 4674 - adjusted_mode->crtc_htotal / 2; 4675 } else { 4676 vsyncshift = 0; 4677 } 4678 4679 if (INTEL_INFO(dev)->gen > 3) 4680 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 4681 4682 I915_WRITE(HTOTAL(cpu_transcoder), 4683 (adjusted_mode->crtc_hdisplay - 1) | 4684 ((adjusted_mode->crtc_htotal - 1) << 16)); 4685 I915_WRITE(HBLANK(cpu_transcoder), 4686 (adjusted_mode->crtc_hblank_start - 1) | 4687 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 4688 I915_WRITE(HSYNC(cpu_transcoder), 4689 (adjusted_mode->crtc_hsync_start - 1) | 4690 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 4691 4692 I915_WRITE(VTOTAL(cpu_transcoder), 4693 (adjusted_mode->crtc_vdisplay - 1) | 4694 ((adjusted_mode->crtc_vtotal - 1) << 16)); 4695 I915_WRITE(VBLANK(cpu_transcoder), 4696 (adjusted_mode->crtc_vblank_start - 1) | 4697 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 4698 I915_WRITE(VSYNC(cpu_transcoder), 4699 (adjusted_mode->crtc_vsync_start - 1) | 4700 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 4701 4702 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 4703 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 4704 * documented on the DDI_FUNC_CTL register description, EDP Input Select 4705 * bits. */ 4706 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && 4707 (pipe == PIPE_B || pipe == PIPE_C)) 4708 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 4709 4710 /* pipesrc controls the size that is scaled from, which should 4711 * always be the user's requested size. 4712 */ 4713 I915_WRITE(PIPESRC(pipe), 4714 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 4715 } 4716 4717 static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 4718 struct drm_display_mode *mode, 4719 struct drm_display_mode *adjusted_mode, 4720 int x, int y, 4721 struct drm_framebuffer *fb) 4722 { 4723 struct drm_device *dev = crtc->dev; 4724 struct drm_i915_private *dev_priv = dev->dev_private; 4725 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4726 int pipe = intel_crtc->pipe; 4727 int plane = intel_crtc->plane; 4728 int refclk, num_connectors = 0; 4729 intel_clock_t clock, reduced_clock; 4730 u32 dspcntr, pipeconf; 4731 bool ok, has_reduced_clock = false, is_sdvo = false; 4732 bool is_lvds = false, is_tv = false, is_dp = false; 4733 struct intel_encoder *encoder; 4734 const intel_limit_t *limit; 4735 int ret; 4736 4737 for_each_encoder_on_crtc(dev, crtc, encoder) { 4738 switch (encoder->type) { 4739 case INTEL_OUTPUT_LVDS: 4740 is_lvds = true; 4741 break; 4742 case INTEL_OUTPUT_SDVO: 4743 case INTEL_OUTPUT_HDMI: 4744 is_sdvo = true; 4745 if (encoder->needs_tv_clock) 4746 is_tv = true; 4747 break; 4748 case INTEL_OUTPUT_TVOUT: 4749 is_tv = true; 4750 break; 4751 case INTEL_OUTPUT_DISPLAYPORT: 4752 is_dp = true; 4753 break; 4754 } 4755 4756 num_connectors++; 4757 } 4758 4759 refclk = i9xx_get_refclk(crtc, num_connectors); 4760 4761 /* 4762 * Returns a set of divisors for the desired target clock with the given 4763 * refclk, or FALSE. The returned values represent the clock equation: 4764 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 4765 */ 4766 limit = intel_limit(crtc, refclk); 4767 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, 4768 &clock); 4769 if (!ok) { 4770 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 4771 return -EINVAL; 4772 } 4773 4774 /* Ensure that the cursor is valid for the new mode before changing... */ 4775 intel_crtc_update_cursor(crtc, true); 4776 4777 if (is_lvds && dev_priv->lvds_downclock_avail) { 4778 /* 4779 * Ensure we match the reduced clock's P to the target clock. 4780 * If the clocks don't match, we can't switch the display clock 4781 * by using the FP0/FP1. In such case we will disable the LVDS 4782 * downclock feature. 4783 */ 4784 has_reduced_clock = limit->find_pll(limit, crtc, 4785 dev_priv->lvds_downclock, 4786 refclk, 4787 &clock, 4788 &reduced_clock); 4789 } 4790 4791 if (is_sdvo && is_tv) 4792 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); 4793 4794 if (IS_GEN2(dev)) 4795 i8xx_update_pll(crtc, adjusted_mode, &clock, 4796 has_reduced_clock ? &reduced_clock : NULL, 4797 num_connectors); 4798 else if (IS_VALLEYVIEW(dev)) 4799 vlv_update_pll(crtc, mode, adjusted_mode, &clock, 4800 has_reduced_clock ? &reduced_clock : NULL, 4801 num_connectors); 4802 else 4803 i9xx_update_pll(crtc, mode, adjusted_mode, &clock, 4804 has_reduced_clock ? &reduced_clock : NULL, 4805 num_connectors); 4806 4807 /* setup pipeconf */ 4808 pipeconf = I915_READ(PIPECONF(pipe)); 4809 4810 /* Set up the display plane register */ 4811 dspcntr = DISPPLANE_GAMMA_ENABLE; 4812 4813 if (pipe == 0) 4814 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 4815 else 4816 dspcntr |= DISPPLANE_SEL_PIPE_B; 4817 4818 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { 4819 /* Enable pixel doubling when the dot clock is > 90% of the (display) 4820 * core speed. 4821 * 4822 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the 4823 * pipe == 0 check? 4824 */ 4825 if (mode->clock > 4826 dev_priv->display.get_display_clock_speed(dev) * 9 / 10) 4827 pipeconf |= PIPECONF_DOUBLE_WIDE; 4828 else 4829 pipeconf &= ~PIPECONF_DOUBLE_WIDE; 4830 } 4831 4832 /* default to 8bpc */ 4833 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); 4834 if (is_dp) { 4835 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 4836 pipeconf |= PIPECONF_BPP_6 | 4837 PIPECONF_DITHER_EN | 4838 PIPECONF_DITHER_TYPE_SP; 4839 } 4840 } 4841 4842 if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { 4843 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 4844 pipeconf |= PIPECONF_BPP_6 | 4845 PIPECONF_ENABLE | 4846 I965_PIPECONF_ACTIVE; 4847 } 4848 } 4849 4850 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 4851 drm_mode_debug_printmodeline(mode); 4852 4853 if (HAS_PIPE_CXSR(dev)) { 4854 if (intel_crtc->lowfreq_avail) { 4855 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 4856 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 4857 } else { 4858 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 4859 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 4860 } 4861 } 4862 4863 pipeconf &= ~PIPECONF_INTERLACE_MASK; 4864 if (!IS_GEN2(dev) && 4865 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 4866 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 4867 else 4868 pipeconf |= PIPECONF_PROGRESSIVE; 4869 4870 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); 4871 4872 /* pipesrc and dspsize control the size that is scaled from, 4873 * which should always be the user's requested size. 4874 */ 4875 I915_WRITE(DSPSIZE(plane), 4876 ((mode->vdisplay - 1) << 16) | 4877 (mode->hdisplay - 1)); 4878 I915_WRITE(DSPPOS(plane), 0); 4879 4880 I915_WRITE(PIPECONF(pipe), pipeconf); 4881 POSTING_READ(PIPECONF(pipe)); 4882 intel_enable_pipe(dev_priv, pipe, false); 4883 4884 intel_wait_for_vblank(dev, pipe); 4885 4886 I915_WRITE(DSPCNTR(plane), dspcntr); 4887 POSTING_READ(DSPCNTR(plane)); 4888 4889 ret = intel_pipe_set_base(crtc, x, y, fb); 4890 4891 intel_update_watermarks(dev); 4892 4893 return ret; 4894 } 4895 4896 static void ironlake_init_pch_refclk(struct drm_device *dev) 4897 { 4898 struct drm_i915_private *dev_priv = dev->dev_private; 4899 struct drm_mode_config *mode_config = &dev->mode_config; 4900 struct intel_encoder *encoder; 4901 u32 temp; 4902 bool has_lvds = false; 4903 bool has_cpu_edp = false; 4904 bool has_pch_edp = false; 4905 bool has_panel = false; 4906 bool has_ck505 = false; 4907 bool can_ssc = false; 4908 4909 /* We need to take the global config into account */ 4910 list_for_each_entry(encoder, &mode_config->encoder_list, 4911 base.head) { 4912 switch (encoder->type) { 4913 case INTEL_OUTPUT_LVDS: 4914 has_panel = true; 4915 has_lvds = true; 4916 break; 4917 case INTEL_OUTPUT_EDP: 4918 has_panel = true; 4919 if (intel_encoder_is_pch_edp(&encoder->base)) 4920 has_pch_edp = true; 4921 else 4922 has_cpu_edp = true; 4923 break; 4924 } 4925 } 4926 4927 if (HAS_PCH_IBX(dev)) { 4928 has_ck505 = dev_priv->display_clock_mode; 4929 can_ssc = has_ck505; 4930 } else { 4931 has_ck505 = false; 4932 can_ssc = true; 4933 } 4934 4935 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", 4936 has_panel, has_lvds, has_pch_edp, has_cpu_edp, 4937 has_ck505); 4938 4939 /* Ironlake: try to setup display ref clock before DPLL 4940 * enabling. This is only under driver's control after 4941 * PCH B stepping, previous chipset stepping should be 4942 * ignoring this setting. 4943 */ 4944 temp = I915_READ(PCH_DREF_CONTROL); 4945 /* Always enable nonspread source */ 4946 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 4947 4948 if (has_ck505) 4949 temp |= DREF_NONSPREAD_CK505_ENABLE; 4950 else 4951 temp |= DREF_NONSPREAD_SOURCE_ENABLE; 4952 4953 if (has_panel) { 4954 temp &= ~DREF_SSC_SOURCE_MASK; 4955 temp |= DREF_SSC_SOURCE_ENABLE; 4956 4957 /* SSC must be turned on before enabling the CPU output */ 4958 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 4959 DRM_DEBUG_KMS("Using SSC on panel\n"); 4960 temp |= DREF_SSC1_ENABLE; 4961 } else 4962 temp &= ~DREF_SSC1_ENABLE; 4963 4964 /* Get SSC going before enabling the outputs */ 4965 I915_WRITE(PCH_DREF_CONTROL, temp); 4966 POSTING_READ(PCH_DREF_CONTROL); 4967 udelay(200); 4968 4969 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 4970 4971 /* Enable CPU source on CPU attached eDP */ 4972 if (has_cpu_edp) { 4973 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 4974 DRM_DEBUG_KMS("Using SSC on eDP\n"); 4975 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 4976 } 4977 else 4978 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 4979 } else 4980 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 4981 4982 I915_WRITE(PCH_DREF_CONTROL, temp); 4983 POSTING_READ(PCH_DREF_CONTROL); 4984 udelay(200); 4985 } else { 4986 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 4987 4988 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 4989 4990 /* Turn off CPU output */ 4991 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 4992 4993 I915_WRITE(PCH_DREF_CONTROL, temp); 4994 POSTING_READ(PCH_DREF_CONTROL); 4995 udelay(200); 4996 4997 /* Turn off the SSC source */ 4998 temp &= ~DREF_SSC_SOURCE_MASK; 4999 temp |= DREF_SSC_SOURCE_DISABLE; 5000 5001 /* Turn off SSC1 */ 5002 temp &= ~ DREF_SSC1_ENABLE; 5003 5004 I915_WRITE(PCH_DREF_CONTROL, temp); 5005 POSTING_READ(PCH_DREF_CONTROL); 5006 udelay(200); 5007 } 5008 } 5009 5010 /* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ 5011 static void lpt_init_pch_refclk(struct drm_device *dev) 5012 { 5013 struct drm_i915_private *dev_priv = dev->dev_private; 5014 struct drm_mode_config *mode_config = &dev->mode_config; 5015 struct intel_encoder *encoder; 5016 bool has_vga = false; 5017 bool is_sdv = false; 5018 u32 tmp; 5019 5020 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 5021 switch (encoder->type) { 5022 case INTEL_OUTPUT_ANALOG: 5023 has_vga = true; 5024 break; 5025 } 5026 } 5027 5028 if (!has_vga) 5029 return; 5030 5031 /* XXX: Rip out SDV support once Haswell ships for real. */ 5032 if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00) 5033 is_sdv = true; 5034 5035 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 5036 tmp &= ~SBI_SSCCTL_DISABLE; 5037 tmp |= SBI_SSCCTL_PATHALT; 5038 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 5039 5040 udelay(24); 5041 5042 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 5043 tmp &= ~SBI_SSCCTL_PATHALT; 5044 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 5045 5046 if (!is_sdv) { 5047 tmp = I915_READ(SOUTH_CHICKEN2); 5048 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 5049 I915_WRITE(SOUTH_CHICKEN2, tmp); 5050 5051 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & 5052 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 5053 DRM_ERROR("FDI mPHY reset assert timeout\n"); 5054 5055 tmp = I915_READ(SOUTH_CHICKEN2); 5056 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 5057 I915_WRITE(SOUTH_CHICKEN2, tmp); 5058 5059 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & 5060 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 5061 100)) 5062 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 5063 } 5064 5065 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 5066 tmp &= ~(0xFF << 24); 5067 tmp |= (0x12 << 24); 5068 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 5069 5070 if (!is_sdv) { 5071 tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY); 5072 tmp &= ~(0x3 << 6); 5073 tmp |= (1 << 6) | (1 << 0); 5074 intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY); 5075 } 5076 5077 if (is_sdv) { 5078 tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY); 5079 tmp |= 0x7FFF; 5080 intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY); 5081 } 5082 5083 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 5084 tmp |= (1 << 11); 5085 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 5086 5087 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 5088 tmp |= (1 << 11); 5089 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 5090 5091 if (is_sdv) { 5092 tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY); 5093 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); 5094 intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY); 5095 5096 tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY); 5097 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); 5098 intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY); 5099 5100 tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY); 5101 tmp |= (0x3F << 8); 5102 intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY); 5103 5104 tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY); 5105 tmp |= (0x3F << 8); 5106 intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY); 5107 } 5108 5109 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 5110 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 5111 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 5112 5113 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 5114 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 5115 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 5116 5117 if (!is_sdv) { 5118 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 5119 tmp &= ~(7 << 13); 5120 tmp |= (5 << 13); 5121 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 5122 5123 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 5124 tmp &= ~(7 << 13); 5125 tmp |= (5 << 13); 5126 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 5127 } 5128 5129 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 5130 tmp &= ~0xFF; 5131 tmp |= 0x1C; 5132 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 5133 5134 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 5135 tmp &= ~0xFF; 5136 tmp |= 0x1C; 5137 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 5138 5139 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 5140 tmp &= ~(0xFF << 16); 5141 tmp |= (0x1C << 16); 5142 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 5143 5144 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 5145 tmp &= ~(0xFF << 16); 5146 tmp |= (0x1C << 16); 5147 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 5148 5149 if (!is_sdv) { 5150 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 5151 tmp |= (1 << 27); 5152 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 5153 5154 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 5155 tmp |= (1 << 27); 5156 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 5157 5158 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 5159 tmp &= ~(0xF << 28); 5160 tmp |= (4 << 28); 5161 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 5162 5163 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 5164 tmp &= ~(0xF << 28); 5165 tmp |= (4 << 28); 5166 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 5167 } 5168 5169 /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */ 5170 tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); 5171 tmp |= SBI_DBUFF0_ENABLE; 5172 intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); 5173 } 5174 5175 /* 5176 * Initialize reference clocks when the driver loads 5177 */ 5178 void intel_init_pch_refclk(struct drm_device *dev) 5179 { 5180 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 5181 ironlake_init_pch_refclk(dev); 5182 else if (HAS_PCH_LPT(dev)) 5183 lpt_init_pch_refclk(dev); 5184 } 5185 5186 static int ironlake_get_refclk(struct drm_crtc *crtc) 5187 { 5188 struct drm_device *dev = crtc->dev; 5189 struct drm_i915_private *dev_priv = dev->dev_private; 5190 struct intel_encoder *encoder; 5191 struct intel_encoder *edp_encoder = NULL; 5192 int num_connectors = 0; 5193 bool is_lvds = false; 5194 5195 for_each_encoder_on_crtc(dev, crtc, encoder) { 5196 switch (encoder->type) { 5197 case INTEL_OUTPUT_LVDS: 5198 is_lvds = true; 5199 break; 5200 case INTEL_OUTPUT_EDP: 5201 edp_encoder = encoder; 5202 break; 5203 } 5204 num_connectors++; 5205 } 5206 5207 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 5208 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 5209 dev_priv->lvds_ssc_freq); 5210 return dev_priv->lvds_ssc_freq * 1000; 5211 } 5212 5213 return 120000; 5214 } 5215 5216 static void ironlake_set_pipeconf(struct drm_crtc *crtc, 5217 struct drm_display_mode *adjusted_mode, 5218 bool dither) 5219 { 5220 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 5221 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5222 int pipe = intel_crtc->pipe; 5223 uint32_t val; 5224 5225 val = I915_READ(PIPECONF(pipe)); 5226 5227 val &= ~PIPE_BPC_MASK; 5228 switch (intel_crtc->bpp) { 5229 case 18: 5230 val |= PIPE_6BPC; 5231 break; 5232 case 24: 5233 val |= PIPE_8BPC; 5234 break; 5235 case 30: 5236 val |= PIPE_10BPC; 5237 break; 5238 case 36: 5239 val |= PIPE_12BPC; 5240 break; 5241 default: 5242 /* Case prevented by intel_choose_pipe_bpp_dither. */ 5243 BUG(); 5244 } 5245 5246 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); 5247 if (dither) 5248 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 5249 5250 val &= ~PIPECONF_INTERLACE_MASK; 5251 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 5252 val |= PIPECONF_INTERLACED_ILK; 5253 else 5254 val |= PIPECONF_PROGRESSIVE; 5255 5256 I915_WRITE(PIPECONF(pipe), val); 5257 POSTING_READ(PIPECONF(pipe)); 5258 } 5259 5260 static void haswell_set_pipeconf(struct drm_crtc *crtc, 5261 struct drm_display_mode *adjusted_mode, 5262 bool dither) 5263 { 5264 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 5265 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5266 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 5267 uint32_t val; 5268 5269 val = I915_READ(PIPECONF(cpu_transcoder)); 5270 5271 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); 5272 if (dither) 5273 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 5274 5275 val &= ~PIPECONF_INTERLACE_MASK_HSW; 5276 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 5277 val |= PIPECONF_INTERLACED_ILK; 5278 else 5279 val |= PIPECONF_PROGRESSIVE; 5280 5281 I915_WRITE(PIPECONF(cpu_transcoder), val); 5282 POSTING_READ(PIPECONF(cpu_transcoder)); 5283 } 5284 5285 static bool ironlake_compute_clocks(struct drm_crtc *crtc, 5286 struct drm_display_mode *adjusted_mode, 5287 intel_clock_t *clock, 5288 bool *has_reduced_clock, 5289 intel_clock_t *reduced_clock) 5290 { 5291 struct drm_device *dev = crtc->dev; 5292 struct drm_i915_private *dev_priv = dev->dev_private; 5293 struct intel_encoder *intel_encoder; 5294 int refclk; 5295 const intel_limit_t *limit; 5296 bool ret, is_sdvo = false, is_tv = false, is_lvds = false; 5297 5298 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 5299 switch (intel_encoder->type) { 5300 case INTEL_OUTPUT_LVDS: 5301 is_lvds = true; 5302 break; 5303 case INTEL_OUTPUT_SDVO: 5304 case INTEL_OUTPUT_HDMI: 5305 is_sdvo = true; 5306 if (intel_encoder->needs_tv_clock) 5307 is_tv = true; 5308 break; 5309 case INTEL_OUTPUT_TVOUT: 5310 is_tv = true; 5311 break; 5312 } 5313 } 5314 5315 refclk = ironlake_get_refclk(crtc); 5316 5317 /* 5318 * Returns a set of divisors for the desired target clock with the given 5319 * refclk, or FALSE. The returned values represent the clock equation: 5320 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 5321 */ 5322 limit = intel_limit(crtc, refclk); 5323 ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, 5324 clock); 5325 if (!ret) 5326 return false; 5327 5328 if (is_lvds && dev_priv->lvds_downclock_avail) { 5329 /* 5330 * Ensure we match the reduced clock's P to the target clock. 5331 * If the clocks don't match, we can't switch the display clock 5332 * by using the FP0/FP1. In such case we will disable the LVDS 5333 * downclock feature. 5334 */ 5335 *has_reduced_clock = limit->find_pll(limit, crtc, 5336 dev_priv->lvds_downclock, 5337 refclk, 5338 clock, 5339 reduced_clock); 5340 } 5341 5342 if (is_sdvo && is_tv) 5343 i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock); 5344 5345 return true; 5346 } 5347 5348 static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) 5349 { 5350 struct drm_i915_private *dev_priv = dev->dev_private; 5351 uint32_t temp; 5352 5353 temp = I915_READ(SOUTH_CHICKEN1); 5354 if (temp & FDI_BC_BIFURCATION_SELECT) 5355 return; 5356 5357 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 5358 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 5359 5360 temp |= FDI_BC_BIFURCATION_SELECT; 5361 DRM_DEBUG_KMS("enabling fdi C rx\n"); 5362 I915_WRITE(SOUTH_CHICKEN1, temp); 5363 POSTING_READ(SOUTH_CHICKEN1); 5364 } 5365 5366 static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc) 5367 { 5368 struct drm_device *dev = intel_crtc->base.dev; 5369 struct drm_i915_private *dev_priv = dev->dev_private; 5370 struct intel_crtc *pipe_B_crtc = 5371 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); 5372 5373 DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n", 5374 intel_crtc->pipe, intel_crtc->fdi_lanes); 5375 if (intel_crtc->fdi_lanes > 4) { 5376 DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n", 5377 intel_crtc->pipe, intel_crtc->fdi_lanes); 5378 /* Clamp lanes to avoid programming the hw with bogus values. */ 5379 intel_crtc->fdi_lanes = 4; 5380 5381 return false; 5382 } 5383 5384 if (dev_priv->num_pipe == 2) 5385 return true; 5386 5387 switch (intel_crtc->pipe) { 5388 case PIPE_A: 5389 return true; 5390 case PIPE_B: 5391 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && 5392 intel_crtc->fdi_lanes > 2) { 5393 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", 5394 intel_crtc->pipe, intel_crtc->fdi_lanes); 5395 /* Clamp lanes to avoid programming the hw with bogus values. */ 5396 intel_crtc->fdi_lanes = 2; 5397 5398 return false; 5399 } 5400 5401 if (intel_crtc->fdi_lanes > 2) 5402 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); 5403 else 5404 cpt_enable_fdi_bc_bifurcation(dev); 5405 5406 return true; 5407 case PIPE_C: 5408 if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) { 5409 if (intel_crtc->fdi_lanes > 2) { 5410 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", 5411 intel_crtc->pipe, intel_crtc->fdi_lanes); 5412 /* Clamp lanes to avoid programming the hw with bogus values. */ 5413 intel_crtc->fdi_lanes = 2; 5414 5415 return false; 5416 } 5417 } else { 5418 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 5419 return false; 5420 } 5421 5422 cpt_enable_fdi_bc_bifurcation(dev); 5423 5424 return true; 5425 default: 5426 BUG(); 5427 } 5428 } 5429 5430 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 5431 { 5432 /* 5433 * Account for spread spectrum to avoid 5434 * oversubscribing the link. Max center spread 5435 * is 2.5%; use 5% for safety's sake. 5436 */ 5437 u32 bps = target_clock * bpp * 21 / 20; 5438 return bps / (link_bw * 8) + 1; 5439 } 5440 5441 static void ironlake_set_m_n(struct drm_crtc *crtc, 5442 struct drm_display_mode *mode, 5443 struct drm_display_mode *adjusted_mode) 5444 { 5445 struct drm_device *dev = crtc->dev; 5446 struct drm_i915_private *dev_priv = dev->dev_private; 5447 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5448 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 5449 struct intel_encoder *intel_encoder, *edp_encoder = NULL; 5450 struct fdi_m_n m_n = {0}; 5451 int target_clock, pixel_multiplier, lane, link_bw; 5452 bool is_dp = false, is_cpu_edp = false; 5453 5454 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 5455 switch (intel_encoder->type) { 5456 case INTEL_OUTPUT_DISPLAYPORT: 5457 is_dp = true; 5458 break; 5459 case INTEL_OUTPUT_EDP: 5460 is_dp = true; 5461 if (!intel_encoder_is_pch_edp(&intel_encoder->base)) 5462 is_cpu_edp = true; 5463 edp_encoder = intel_encoder; 5464 break; 5465 } 5466 } 5467 5468 /* FDI link */ 5469 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 5470 lane = 0; 5471 /* CPU eDP doesn't require FDI link, so just set DP M/N 5472 according to current link config */ 5473 if (is_cpu_edp) { 5474 intel_edp_link_config(edp_encoder, &lane, &link_bw); 5475 } else { 5476 /* FDI is a binary signal running at ~2.7GHz, encoding 5477 * each output octet as 10 bits. The actual frequency 5478 * is stored as a divider into a 100MHz clock, and the 5479 * mode pixel clock is stored in units of 1KHz. 5480 * Hence the bw of each lane in terms of the mode signal 5481 * is: 5482 */ 5483 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 5484 } 5485 5486 /* [e]DP over FDI requires target mode clock instead of link clock. */ 5487 if (edp_encoder) 5488 target_clock = intel_edp_target_clock(edp_encoder, mode); 5489 else if (is_dp) 5490 target_clock = mode->clock; 5491 else 5492 target_clock = adjusted_mode->clock; 5493 5494 if (!lane) 5495 lane = ironlake_get_lanes_required(target_clock, link_bw, 5496 intel_crtc->bpp); 5497 5498 intel_crtc->fdi_lanes = lane; 5499 5500 if (pixel_multiplier > 1) 5501 link_bw *= pixel_multiplier; 5502 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, 5503 &m_n); 5504 5505 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m); 5506 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); 5507 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); 5508 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); 5509 } 5510 5511 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, 5512 struct drm_display_mode *adjusted_mode, 5513 intel_clock_t *clock, u32 fp) 5514 { 5515 struct drm_crtc *crtc = &intel_crtc->base; 5516 struct drm_device *dev = crtc->dev; 5517 struct drm_i915_private *dev_priv = dev->dev_private; 5518 struct intel_encoder *intel_encoder; 5519 uint32_t dpll; 5520 int factor, pixel_multiplier, num_connectors = 0; 5521 bool is_lvds = false, is_sdvo = false, is_tv = false; 5522 bool is_dp = false, is_cpu_edp = false; 5523 5524 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 5525 switch (intel_encoder->type) { 5526 case INTEL_OUTPUT_LVDS: 5527 is_lvds = true; 5528 break; 5529 case INTEL_OUTPUT_SDVO: 5530 case INTEL_OUTPUT_HDMI: 5531 is_sdvo = true; 5532 if (intel_encoder->needs_tv_clock) 5533 is_tv = true; 5534 break; 5535 case INTEL_OUTPUT_TVOUT: 5536 is_tv = true; 5537 break; 5538 case INTEL_OUTPUT_DISPLAYPORT: 5539 is_dp = true; 5540 break; 5541 case INTEL_OUTPUT_EDP: 5542 is_dp = true; 5543 if (!intel_encoder_is_pch_edp(&intel_encoder->base)) 5544 is_cpu_edp = true; 5545 break; 5546 } 5547 5548 num_connectors++; 5549 } 5550 5551 /* Enable autotuning of the PLL clock (if permissible) */ 5552 factor = 21; 5553 if (is_lvds) { 5554 if ((intel_panel_use_ssc(dev_priv) && 5555 dev_priv->lvds_ssc_freq == 100) || 5556 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) 5557 factor = 25; 5558 } else if (is_sdvo && is_tv) 5559 factor = 20; 5560 5561 if (clock->m < factor * clock->n) 5562 fp |= FP_CB_TUNE; 5563 5564 dpll = 0; 5565 5566 if (is_lvds) 5567 dpll |= DPLLB_MODE_LVDS; 5568 else 5569 dpll |= DPLLB_MODE_DAC_SERIAL; 5570 if (is_sdvo) { 5571 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 5572 if (pixel_multiplier > 1) { 5573 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 5574 } 5575 dpll |= DPLL_DVO_HIGH_SPEED; 5576 } 5577 if (is_dp && !is_cpu_edp) 5578 dpll |= DPLL_DVO_HIGH_SPEED; 5579 5580 /* compute bitmask from p1 value */ 5581 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5582 /* also FPA1 */ 5583 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 5584 5585 switch (clock->p2) { 5586 case 5: 5587 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 5588 break; 5589 case 7: 5590 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 5591 break; 5592 case 10: 5593 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 5594 break; 5595 case 14: 5596 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 5597 break; 5598 } 5599 5600 if (is_sdvo && is_tv) 5601 dpll |= PLL_REF_INPUT_TVCLKINBC; 5602 else if (is_tv) 5603 /* XXX: just matching BIOS for now */ 5604 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 5605 dpll |= 3; 5606 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 5607 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 5608 else 5609 dpll |= PLL_REF_INPUT_DREFCLK; 5610 5611 return dpll; 5612 } 5613 5614 static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 5615 struct drm_display_mode *mode, 5616 struct drm_display_mode *adjusted_mode, 5617 int x, int y, 5618 struct drm_framebuffer *fb) 5619 { 5620 struct drm_device *dev = crtc->dev; 5621 struct drm_i915_private *dev_priv = dev->dev_private; 5622 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5623 int pipe = intel_crtc->pipe; 5624 int plane = intel_crtc->plane; 5625 int num_connectors = 0; 5626 intel_clock_t clock, reduced_clock; 5627 u32 dpll, fp = 0, fp2 = 0; 5628 bool ok, has_reduced_clock = false; 5629 bool is_lvds = false, is_dp = false, is_cpu_edp = false; 5630 struct intel_encoder *encoder; 5631 u32 temp; 5632 int ret; 5633 bool dither, fdi_config_ok; 5634 5635 for_each_encoder_on_crtc(dev, crtc, encoder) { 5636 switch (encoder->type) { 5637 case INTEL_OUTPUT_LVDS: 5638 is_lvds = true; 5639 break; 5640 case INTEL_OUTPUT_DISPLAYPORT: 5641 is_dp = true; 5642 break; 5643 case INTEL_OUTPUT_EDP: 5644 is_dp = true; 5645 if (!intel_encoder_is_pch_edp(&encoder->base)) 5646 is_cpu_edp = true; 5647 break; 5648 } 5649 5650 num_connectors++; 5651 } 5652 5653 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 5654 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 5655 5656 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, 5657 &has_reduced_clock, &reduced_clock); 5658 if (!ok) { 5659 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 5660 return -EINVAL; 5661 } 5662 5663 /* Ensure that the cursor is valid for the new mode before changing... */ 5664 intel_crtc_update_cursor(crtc, true); 5665 5666 /* determine panel color depth */ 5667 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, 5668 adjusted_mode); 5669 if (is_lvds && dev_priv->lvds_dither) 5670 dither = true; 5671 5672 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 5673 if (has_reduced_clock) 5674 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | 5675 reduced_clock.m2; 5676 5677 dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp); 5678 5679 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 5680 drm_mode_debug_printmodeline(mode); 5681 5682 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 5683 if (!is_cpu_edp) { 5684 struct intel_pch_pll *pll; 5685 5686 pll = intel_get_pch_pll(intel_crtc, dpll, fp); 5687 if (pll == NULL) { 5688 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n", 5689 pipe); 5690 return -EINVAL; 5691 } 5692 } else 5693 intel_put_pch_pll(intel_crtc); 5694 5695 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 5696 * This is an exception to the general rule that mode_set doesn't turn 5697 * things on. 5698 */ 5699 if (is_lvds) { 5700 temp = I915_READ(PCH_LVDS); 5701 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 5702 if (HAS_PCH_CPT(dev)) { 5703 temp &= ~PORT_TRANS_SEL_MASK; 5704 temp |= PORT_TRANS_SEL_CPT(pipe); 5705 } else { 5706 if (pipe == 1) 5707 temp |= LVDS_PIPEB_SELECT; 5708 else 5709 temp &= ~LVDS_PIPEB_SELECT; 5710 } 5711 5712 /* set the corresponsding LVDS_BORDER bit */ 5713 temp |= dev_priv->lvds_border_bits; 5714 /* Set the B0-B3 data pairs corresponding to whether we're going to 5715 * set the DPLLs for dual-channel mode or not. 5716 */ 5717 if (clock.p2 == 7) 5718 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 5719 else 5720 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 5721 5722 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 5723 * appropriately here, but we need to look more thoroughly into how 5724 * panels behave in the two modes. 5725 */ 5726 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 5727 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 5728 temp |= LVDS_HSYNC_POLARITY; 5729 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 5730 temp |= LVDS_VSYNC_POLARITY; 5731 I915_WRITE(PCH_LVDS, temp); 5732 } 5733 5734 if (is_dp && !is_cpu_edp) { 5735 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5736 } else { 5737 /* For non-DP output, clear any trans DP clock recovery setting.*/ 5738 I915_WRITE(TRANSDATA_M1(pipe), 0); 5739 I915_WRITE(TRANSDATA_N1(pipe), 0); 5740 I915_WRITE(TRANSDPLINK_M1(pipe), 0); 5741 I915_WRITE(TRANSDPLINK_N1(pipe), 0); 5742 } 5743 5744 if (intel_crtc->pch_pll) { 5745 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); 5746 5747 /* Wait for the clocks to stabilize. */ 5748 POSTING_READ(intel_crtc->pch_pll->pll_reg); 5749 udelay(150); 5750 5751 /* The pixel multiplier can only be updated once the 5752 * DPLL is enabled and the clocks are stable. 5753 * 5754 * So write it again. 5755 */ 5756 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); 5757 } 5758 5759 intel_crtc->lowfreq_avail = false; 5760 if (intel_crtc->pch_pll) { 5761 if (is_lvds && has_reduced_clock && i915_powersave) { 5762 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); 5763 intel_crtc->lowfreq_avail = true; 5764 } else { 5765 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); 5766 } 5767 } 5768 5769 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); 5770 5771 /* Note, this also computes intel_crtc->fdi_lanes which is used below in 5772 * ironlake_check_fdi_lanes. */ 5773 5774 ironlake_set_m_n(crtc, mode, adjusted_mode); 5775 5776 fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc); 5777 5778 if (is_cpu_edp) 5779 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 5780 5781 ironlake_set_pipeconf(crtc, adjusted_mode, dither); 5782 5783 intel_wait_for_vblank(dev, pipe); 5784 5785 /* Set up the display plane register */ 5786 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); 5787 POSTING_READ(DSPCNTR(plane)); 5788 5789 ret = intel_pipe_set_base(crtc, x, y, fb); 5790 5791 intel_update_watermarks(dev); 5792 5793 intel_update_linetime_watermarks(dev, pipe, adjusted_mode); 5794 5795 return fdi_config_ok ? ret : -EINVAL; 5796 } 5797 5798 static int haswell_crtc_mode_set(struct drm_crtc *crtc, 5799 struct drm_display_mode *mode, 5800 struct drm_display_mode *adjusted_mode, 5801 int x, int y, 5802 struct drm_framebuffer *fb) 5803 { 5804 struct drm_device *dev = crtc->dev; 5805 struct drm_i915_private *dev_priv = dev->dev_private; 5806 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5807 int pipe = intel_crtc->pipe; 5808 int plane = intel_crtc->plane; 5809 int num_connectors = 0; 5810 intel_clock_t clock, reduced_clock; 5811 u32 dpll = 0, fp = 0, fp2 = 0; 5812 bool ok, has_reduced_clock = false; 5813 bool is_lvds = false, is_dp = false, is_cpu_edp = false; 5814 struct intel_encoder *encoder; 5815 u32 temp; 5816 int ret; 5817 bool dither; 5818 5819 for_each_encoder_on_crtc(dev, crtc, encoder) { 5820 switch (encoder->type) { 5821 case INTEL_OUTPUT_LVDS: 5822 is_lvds = true; 5823 break; 5824 case INTEL_OUTPUT_DISPLAYPORT: 5825 is_dp = true; 5826 break; 5827 case INTEL_OUTPUT_EDP: 5828 is_dp = true; 5829 if (!intel_encoder_is_pch_edp(&encoder->base)) 5830 is_cpu_edp = true; 5831 break; 5832 } 5833 5834 num_connectors++; 5835 } 5836 5837 if (is_cpu_edp) 5838 intel_crtc->cpu_transcoder = TRANSCODER_EDP; 5839 else 5840 intel_crtc->cpu_transcoder = pipe; 5841 5842 /* We are not sure yet this won't happen. */ 5843 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", 5844 INTEL_PCH_TYPE(dev)); 5845 5846 WARN(num_connectors != 1, "%d connectors attached to pipe %c\n", 5847 num_connectors, pipe_name(pipe)); 5848 5849 WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) & 5850 (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE)); 5851 5852 WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE); 5853 5854 if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock)) 5855 return -EINVAL; 5856 5857 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 5858 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, 5859 &has_reduced_clock, 5860 &reduced_clock); 5861 if (!ok) { 5862 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 5863 return -EINVAL; 5864 } 5865 } 5866 5867 /* Ensure that the cursor is valid for the new mode before changing... */ 5868 intel_crtc_update_cursor(crtc, true); 5869 5870 /* determine panel color depth */ 5871 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, 5872 adjusted_mode); 5873 if (is_lvds && dev_priv->lvds_dither) 5874 dither = true; 5875 5876 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 5877 drm_mode_debug_printmodeline(mode); 5878 5879 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 5880 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 5881 if (has_reduced_clock) 5882 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | 5883 reduced_clock.m2; 5884 5885 dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, 5886 fp); 5887 5888 /* CPU eDP is the only output that doesn't need a PCH PLL of its 5889 * own on pre-Haswell/LPT generation */ 5890 if (!is_cpu_edp) { 5891 struct intel_pch_pll *pll; 5892 5893 pll = intel_get_pch_pll(intel_crtc, dpll, fp); 5894 if (pll == NULL) { 5895 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n", 5896 pipe); 5897 return -EINVAL; 5898 } 5899 } else 5900 intel_put_pch_pll(intel_crtc); 5901 5902 /* The LVDS pin pair needs to be on before the DPLLs are 5903 * enabled. This is an exception to the general rule that 5904 * mode_set doesn't turn things on. 5905 */ 5906 if (is_lvds) { 5907 temp = I915_READ(PCH_LVDS); 5908 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 5909 if (HAS_PCH_CPT(dev)) { 5910 temp &= ~PORT_TRANS_SEL_MASK; 5911 temp |= PORT_TRANS_SEL_CPT(pipe); 5912 } else { 5913 if (pipe == 1) 5914 temp |= LVDS_PIPEB_SELECT; 5915 else 5916 temp &= ~LVDS_PIPEB_SELECT; 5917 } 5918 5919 /* set the corresponsding LVDS_BORDER bit */ 5920 temp |= dev_priv->lvds_border_bits; 5921 /* Set the B0-B3 data pairs corresponding to whether 5922 * we're going to set the DPLLs for dual-channel mode or 5923 * not. 5924 */ 5925 if (clock.p2 == 7) 5926 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 5927 else 5928 temp &= ~(LVDS_B0B3_POWER_UP | 5929 LVDS_CLKB_POWER_UP); 5930 5931 /* It would be nice to set 24 vs 18-bit mode 5932 * (LVDS_A3_POWER_UP) appropriately here, but we need to 5933 * look more thoroughly into how panels behave in the 5934 * two modes. 5935 */ 5936 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 5937 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 5938 temp |= LVDS_HSYNC_POLARITY; 5939 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 5940 temp |= LVDS_VSYNC_POLARITY; 5941 I915_WRITE(PCH_LVDS, temp); 5942 } 5943 } 5944 5945 if (is_dp && !is_cpu_edp) { 5946 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5947 } else { 5948 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 5949 /* For non-DP output, clear any trans DP clock recovery 5950 * setting.*/ 5951 I915_WRITE(TRANSDATA_M1(pipe), 0); 5952 I915_WRITE(TRANSDATA_N1(pipe), 0); 5953 I915_WRITE(TRANSDPLINK_M1(pipe), 0); 5954 I915_WRITE(TRANSDPLINK_N1(pipe), 0); 5955 } 5956 } 5957 5958 intel_crtc->lowfreq_avail = false; 5959 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 5960 if (intel_crtc->pch_pll) { 5961 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); 5962 5963 /* Wait for the clocks to stabilize. */ 5964 POSTING_READ(intel_crtc->pch_pll->pll_reg); 5965 udelay(150); 5966 5967 /* The pixel multiplier can only be updated once the 5968 * DPLL is enabled and the clocks are stable. 5969 * 5970 * So write it again. 5971 */ 5972 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); 5973 } 5974 5975 if (intel_crtc->pch_pll) { 5976 if (is_lvds && has_reduced_clock && i915_powersave) { 5977 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); 5978 intel_crtc->lowfreq_avail = true; 5979 } else { 5980 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); 5981 } 5982 } 5983 } 5984 5985 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); 5986 5987 if (!is_dp || is_cpu_edp) 5988 ironlake_set_m_n(crtc, mode, adjusted_mode); 5989 5990 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 5991 if (is_cpu_edp) 5992 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 5993 5994 haswell_set_pipeconf(crtc, adjusted_mode, dither); 5995 5996 /* Set up the display plane register */ 5997 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); 5998 POSTING_READ(DSPCNTR(plane)); 5999 6000 ret = intel_pipe_set_base(crtc, x, y, fb); 6001 6002 intel_update_watermarks(dev); 6003 6004 intel_update_linetime_watermarks(dev, pipe, adjusted_mode); 6005 6006 return ret; 6007 } 6008 6009 static int intel_crtc_mode_set(struct drm_crtc *crtc, 6010 struct drm_display_mode *mode, 6011 struct drm_display_mode *adjusted_mode, 6012 int x, int y, 6013 struct drm_framebuffer *fb) 6014 { 6015 struct drm_device *dev = crtc->dev; 6016 struct drm_i915_private *dev_priv = dev->dev_private; 6017 struct drm_encoder_helper_funcs *encoder_funcs; 6018 struct intel_encoder *encoder; 6019 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6020 int pipe = intel_crtc->pipe; 6021 int ret; 6022 6023 drm_vblank_pre_modeset(dev, pipe); 6024 6025 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, 6026 x, y, fb); 6027 drm_vblank_post_modeset(dev, pipe); 6028 6029 if (ret != 0) 6030 return ret; 6031 6032 for_each_encoder_on_crtc(dev, crtc, encoder) { 6033 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", 6034 encoder->base.base.id, 6035 drm_get_encoder_name(&encoder->base), 6036 mode->base.id, mode->name); 6037 encoder_funcs = encoder->base.helper_private; 6038 encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode); 6039 } 6040 6041 return 0; 6042 } 6043 6044 static bool intel_eld_uptodate(struct drm_connector *connector, 6045 int reg_eldv, uint32_t bits_eldv, 6046 int reg_elda, uint32_t bits_elda, 6047 int reg_edid) 6048 { 6049 struct drm_i915_private *dev_priv = connector->dev->dev_private; 6050 uint8_t *eld = connector->eld; 6051 uint32_t i; 6052 6053 i = I915_READ(reg_eldv); 6054 i &= bits_eldv; 6055 6056 if (!eld[0]) 6057 return !i; 6058 6059 if (!i) 6060 return false; 6061 6062 i = I915_READ(reg_elda); 6063 i &= ~bits_elda; 6064 I915_WRITE(reg_elda, i); 6065 6066 for (i = 0; i < eld[2]; i++) 6067 if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) 6068 return false; 6069 6070 return true; 6071 } 6072 6073 static void g4x_write_eld(struct drm_connector *connector, 6074 struct drm_crtc *crtc) 6075 { 6076 struct drm_i915_private *dev_priv = connector->dev->dev_private; 6077 uint8_t *eld = connector->eld; 6078 uint32_t eldv; 6079 uint32_t len; 6080 uint32_t i; 6081 6082 i = I915_READ(G4X_AUD_VID_DID); 6083 6084 if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) 6085 eldv = G4X_ELDV_DEVCL_DEVBLC; 6086 else 6087 eldv = G4X_ELDV_DEVCTG; 6088 6089 if (intel_eld_uptodate(connector, 6090 G4X_AUD_CNTL_ST, eldv, 6091 G4X_AUD_CNTL_ST, G4X_ELD_ADDR, 6092 G4X_HDMIW_HDMIEDID)) 6093 return; 6094 6095 i = I915_READ(G4X_AUD_CNTL_ST); 6096 i &= ~(eldv | G4X_ELD_ADDR); 6097 len = (i >> 9) & 0x1f; /* ELD buffer size */ 6098 I915_WRITE(G4X_AUD_CNTL_ST, i); 6099 6100 if (!eld[0]) 6101 return; 6102 6103 len = min_t(uint8_t, eld[2], len); 6104 DRM_DEBUG_DRIVER("ELD size %d\n", len); 6105 for (i = 0; i < len; i++) 6106 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); 6107 6108 i = I915_READ(G4X_AUD_CNTL_ST); 6109 i |= eldv; 6110 I915_WRITE(G4X_AUD_CNTL_ST, i); 6111 } 6112 6113 static void haswell_write_eld(struct drm_connector *connector, 6114 struct drm_crtc *crtc) 6115 { 6116 struct drm_i915_private *dev_priv = connector->dev->dev_private; 6117 uint8_t *eld = connector->eld; 6118 struct drm_device *dev = crtc->dev; 6119 uint32_t eldv; 6120 uint32_t i; 6121 int len; 6122 int pipe = to_intel_crtc(crtc)->pipe; 6123 int tmp; 6124 6125 int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe); 6126 int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe); 6127 int aud_config = HSW_AUD_CFG(pipe); 6128 int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD; 6129 6130 6131 DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n"); 6132 6133 /* Audio output enable */ 6134 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n"); 6135 tmp = I915_READ(aud_cntrl_st2); 6136 tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4)); 6137 I915_WRITE(aud_cntrl_st2, tmp); 6138 6139 /* Wait for 1 vertical blank */ 6140 intel_wait_for_vblank(dev, pipe); 6141 6142 /* Set ELD valid state */ 6143 tmp = I915_READ(aud_cntrl_st2); 6144 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp); 6145 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); 6146 I915_WRITE(aud_cntrl_st2, tmp); 6147 tmp = I915_READ(aud_cntrl_st2); 6148 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp); 6149 6150 /* Enable HDMI mode */ 6151 tmp = I915_READ(aud_config); 6152 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp); 6153 /* clear N_programing_enable and N_value_index */ 6154 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); 6155 I915_WRITE(aud_config, tmp); 6156 6157 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); 6158 6159 eldv = AUDIO_ELD_VALID_A << (pipe * 4); 6160 6161 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 6162 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 6163 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 6164 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 6165 } else 6166 I915_WRITE(aud_config, 0); 6167 6168 if (intel_eld_uptodate(connector, 6169 aud_cntrl_st2, eldv, 6170 aud_cntl_st, IBX_ELD_ADDRESS, 6171 hdmiw_hdmiedid)) 6172 return; 6173 6174 i = I915_READ(aud_cntrl_st2); 6175 i &= ~eldv; 6176 I915_WRITE(aud_cntrl_st2, i); 6177 6178 if (!eld[0]) 6179 return; 6180 6181 i = I915_READ(aud_cntl_st); 6182 i &= ~IBX_ELD_ADDRESS; 6183 I915_WRITE(aud_cntl_st, i); 6184 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ 6185 DRM_DEBUG_DRIVER("port num:%d\n", i); 6186 6187 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ 6188 DRM_DEBUG_DRIVER("ELD size %d\n", len); 6189 for (i = 0; i < len; i++) 6190 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); 6191 6192 i = I915_READ(aud_cntrl_st2); 6193 i |= eldv; 6194 I915_WRITE(aud_cntrl_st2, i); 6195 6196 } 6197 6198 static void ironlake_write_eld(struct drm_connector *connector, 6199 struct drm_crtc *crtc) 6200 { 6201 struct drm_i915_private *dev_priv = connector->dev->dev_private; 6202 uint8_t *eld = connector->eld; 6203 uint32_t eldv; 6204 uint32_t i; 6205 int len; 6206 int hdmiw_hdmiedid; 6207 int aud_config; 6208 int aud_cntl_st; 6209 int aud_cntrl_st2; 6210 int pipe = to_intel_crtc(crtc)->pipe; 6211 6212 if (HAS_PCH_IBX(connector->dev)) { 6213 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe); 6214 aud_config = IBX_AUD_CFG(pipe); 6215 aud_cntl_st = IBX_AUD_CNTL_ST(pipe); 6216 aud_cntrl_st2 = IBX_AUD_CNTL_ST2; 6217 } else { 6218 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); 6219 aud_config = CPT_AUD_CFG(pipe); 6220 aud_cntl_st = CPT_AUD_CNTL_ST(pipe); 6221 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; 6222 } 6223 6224 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); 6225 6226 i = I915_READ(aud_cntl_st); 6227 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ 6228 if (!i) { 6229 DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); 6230 /* operate blindly on all ports */ 6231 eldv = IBX_ELD_VALIDB; 6232 eldv |= IBX_ELD_VALIDB << 4; 6233 eldv |= IBX_ELD_VALIDB << 8; 6234 } else { 6235 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i); 6236 eldv = IBX_ELD_VALIDB << ((i - 1) * 4); 6237 } 6238 6239 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 6240 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 6241 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 6242 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 6243 } else 6244 I915_WRITE(aud_config, 0); 6245 6246 if (intel_eld_uptodate(connector, 6247 aud_cntrl_st2, eldv, 6248 aud_cntl_st, IBX_ELD_ADDRESS, 6249 hdmiw_hdmiedid)) 6250 return; 6251 6252 i = I915_READ(aud_cntrl_st2); 6253 i &= ~eldv; 6254 I915_WRITE(aud_cntrl_st2, i); 6255 6256 if (!eld[0]) 6257 return; 6258 6259 i = I915_READ(aud_cntl_st); 6260 i &= ~IBX_ELD_ADDRESS; 6261 I915_WRITE(aud_cntl_st, i); 6262 6263 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ 6264 DRM_DEBUG_DRIVER("ELD size %d\n", len); 6265 for (i = 0; i < len; i++) 6266 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); 6267 6268 i = I915_READ(aud_cntrl_st2); 6269 i |= eldv; 6270 I915_WRITE(aud_cntrl_st2, i); 6271 } 6272 6273 void intel_write_eld(struct drm_encoder *encoder, 6274 struct drm_display_mode *mode) 6275 { 6276 struct drm_crtc *crtc = encoder->crtc; 6277 struct drm_connector *connector; 6278 struct drm_device *dev = encoder->dev; 6279 struct drm_i915_private *dev_priv = dev->dev_private; 6280 6281 connector = drm_select_eld(encoder, mode); 6282 if (!connector) 6283 return; 6284 6285 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 6286 connector->base.id, 6287 drm_get_connector_name(connector), 6288 connector->encoder->base.id, 6289 drm_get_encoder_name(connector->encoder)); 6290 6291 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; 6292 6293 if (dev_priv->display.write_eld) 6294 dev_priv->display.write_eld(connector, crtc); 6295 } 6296 6297 /** Loads the palette/gamma unit for the CRTC with the prepared values */ 6298 void intel_crtc_load_lut(struct drm_crtc *crtc) 6299 { 6300 struct drm_device *dev = crtc->dev; 6301 struct drm_i915_private *dev_priv = dev->dev_private; 6302 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6303 int palreg = PALETTE(intel_crtc->pipe); 6304 int i; 6305 6306 /* The clocks have to be on to load the palette. */ 6307 if (!crtc->enabled || !intel_crtc->active) 6308 return; 6309 6310 /* use legacy palette for Ironlake */ 6311 if (HAS_PCH_SPLIT(dev)) 6312 palreg = LGC_PALETTE(intel_crtc->pipe); 6313 6314 for (i = 0; i < 256; i++) { 6315 I915_WRITE(palreg + 4 * i, 6316 (intel_crtc->lut_r[i] << 16) | 6317 (intel_crtc->lut_g[i] << 8) | 6318 intel_crtc->lut_b[i]); 6319 } 6320 } 6321 6322 static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 6323 { 6324 struct drm_device *dev = crtc->dev; 6325 struct drm_i915_private *dev_priv = dev->dev_private; 6326 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6327 bool visible = base != 0; 6328 u32 cntl; 6329 6330 if (intel_crtc->cursor_visible == visible) 6331 return; 6332 6333 cntl = I915_READ(_CURACNTR); 6334 if (visible) { 6335 /* On these chipsets we can only modify the base whilst 6336 * the cursor is disabled. 6337 */ 6338 I915_WRITE(_CURABASE, base); 6339 6340 cntl &= ~(CURSOR_FORMAT_MASK); 6341 /* XXX width must be 64, stride 256 => 0x00 << 28 */ 6342 cntl |= CURSOR_ENABLE | 6343 CURSOR_GAMMA_ENABLE | 6344 CURSOR_FORMAT_ARGB; 6345 } else 6346 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); 6347 I915_WRITE(_CURACNTR, cntl); 6348 6349 intel_crtc->cursor_visible = visible; 6350 } 6351 6352 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 6353 { 6354 struct drm_device *dev = crtc->dev; 6355 struct drm_i915_private *dev_priv = dev->dev_private; 6356 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6357 int pipe = intel_crtc->pipe; 6358 bool visible = base != 0; 6359 6360 if (intel_crtc->cursor_visible != visible) { 6361 uint32_t cntl = I915_READ(CURCNTR(pipe)); 6362 if (base) { 6363 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); 6364 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 6365 cntl |= pipe << 28; /* Connect to correct pipe */ 6366 } else { 6367 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 6368 cntl |= CURSOR_MODE_DISABLE; 6369 } 6370 I915_WRITE(CURCNTR(pipe), cntl); 6371 6372 intel_crtc->cursor_visible = visible; 6373 } 6374 /* and commit changes on next vblank */ 6375 I915_WRITE(CURBASE(pipe), base); 6376 } 6377 6378 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) 6379 { 6380 struct drm_device *dev = crtc->dev; 6381 struct drm_i915_private *dev_priv = dev->dev_private; 6382 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6383 int pipe = intel_crtc->pipe; 6384 bool visible = base != 0; 6385 6386 if (intel_crtc->cursor_visible != visible) { 6387 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); 6388 if (base) { 6389 cntl &= ~CURSOR_MODE; 6390 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 6391 } else { 6392 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 6393 cntl |= CURSOR_MODE_DISABLE; 6394 } 6395 I915_WRITE(CURCNTR_IVB(pipe), cntl); 6396 6397 intel_crtc->cursor_visible = visible; 6398 } 6399 /* and commit changes on next vblank */ 6400 I915_WRITE(CURBASE_IVB(pipe), base); 6401 } 6402 6403 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 6404 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 6405 bool on) 6406 { 6407 struct drm_device *dev = crtc->dev; 6408 struct drm_i915_private *dev_priv = dev->dev_private; 6409 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6410 int pipe = intel_crtc->pipe; 6411 int x = intel_crtc->cursor_x; 6412 int y = intel_crtc->cursor_y; 6413 u32 base, pos; 6414 bool visible; 6415 6416 pos = 0; 6417 6418 if (on && crtc->enabled && crtc->fb) { 6419 base = intel_crtc->cursor_addr; 6420 if (x > (int) crtc->fb->width) 6421 base = 0; 6422 6423 if (y > (int) crtc->fb->height) 6424 base = 0; 6425 } else 6426 base = 0; 6427 6428 if (x < 0) { 6429 if (x + intel_crtc->cursor_width < 0) 6430 base = 0; 6431 6432 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 6433 x = -x; 6434 } 6435 pos |= x << CURSOR_X_SHIFT; 6436 6437 if (y < 0) { 6438 if (y + intel_crtc->cursor_height < 0) 6439 base = 0; 6440 6441 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 6442 y = -y; 6443 } 6444 pos |= y << CURSOR_Y_SHIFT; 6445 6446 visible = base != 0; 6447 if (!visible && !intel_crtc->cursor_visible) 6448 return; 6449 6450 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 6451 I915_WRITE(CURPOS_IVB(pipe), pos); 6452 ivb_update_cursor(crtc, base); 6453 } else { 6454 I915_WRITE(CURPOS(pipe), pos); 6455 if (IS_845G(dev) || IS_I865G(dev)) 6456 i845_update_cursor(crtc, base); 6457 else 6458 i9xx_update_cursor(crtc, base); 6459 } 6460 } 6461 6462 static int intel_crtc_cursor_set(struct drm_crtc *crtc, 6463 struct drm_file *file, 6464 uint32_t handle, 6465 uint32_t width, uint32_t height) 6466 { 6467 struct drm_device *dev = crtc->dev; 6468 struct drm_i915_private *dev_priv = dev->dev_private; 6469 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6470 struct drm_i915_gem_object *obj; 6471 uint32_t addr; 6472 int ret; 6473 6474 /* if we want to turn off the cursor ignore width and height */ 6475 if (!handle) { 6476 DRM_DEBUG_KMS("cursor off\n"); 6477 addr = 0; 6478 obj = NULL; 6479 DRM_LOCK(dev); 6480 goto finish; 6481 } 6482 6483 /* Currently we only support 64x64 cursors */ 6484 if (width != 64 || height != 64) { 6485 DRM_ERROR("we currently only support 64x64 cursors\n"); 6486 return -EINVAL; 6487 } 6488 6489 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); 6490 if (&obj->base == NULL) 6491 return -ENOENT; 6492 6493 if (obj->base.size < width * height * 4) { 6494 DRM_ERROR("buffer is to small\n"); 6495 ret = -ENOMEM; 6496 goto fail; 6497 } 6498 6499 /* we only need to pin inside GTT if cursor is non-phy */ 6500 DRM_LOCK(dev); 6501 if (!dev_priv->info->cursor_needs_physical) { 6502 if (obj->tiling_mode) { 6503 DRM_ERROR("cursor cannot be tiled\n"); 6504 ret = -EINVAL; 6505 goto fail_locked; 6506 } 6507 6508 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL); 6509 if (ret) { 6510 DRM_ERROR("failed to move cursor bo into the GTT\n"); 6511 goto fail_locked; 6512 } 6513 6514 ret = i915_gem_object_put_fence(obj); 6515 if (ret) { 6516 DRM_ERROR("failed to release fence for cursor"); 6517 goto fail_unpin; 6518 } 6519 6520 addr = obj->gtt_offset; 6521 } else { 6522 int align = IS_I830(dev) ? 16 * 1024 : 256; 6523 ret = i915_gem_attach_phys_object(dev, obj, 6524 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, 6525 align); 6526 if (ret) { 6527 DRM_ERROR("failed to attach phys object\n"); 6528 goto fail_locked; 6529 } 6530 addr = obj->phys_obj->handle->busaddr; 6531 } 6532 6533 if (IS_GEN2(dev)) 6534 I915_WRITE(CURSIZE, (height << 12) | width); 6535 6536 finish: 6537 if (intel_crtc->cursor_bo) { 6538 if (dev_priv->info->cursor_needs_physical) { 6539 if (intel_crtc->cursor_bo != obj) 6540 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 6541 } else 6542 i915_gem_object_unpin(intel_crtc->cursor_bo); 6543 drm_gem_object_unreference(&intel_crtc->cursor_bo->base); 6544 } 6545 6546 DRM_UNLOCK(dev); 6547 6548 intel_crtc->cursor_addr = addr; 6549 intel_crtc->cursor_bo = obj; 6550 intel_crtc->cursor_width = width; 6551 intel_crtc->cursor_height = height; 6552 6553 intel_crtc_update_cursor(crtc, true); 6554 6555 return 0; 6556 fail_unpin: 6557 i915_gem_object_unpin(obj); 6558 fail_locked: 6559 DRM_UNLOCK(dev); 6560 fail: 6561 drm_gem_object_unreference_unlocked(&obj->base); 6562 return ret; 6563 } 6564 6565 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 6566 { 6567 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6568 6569 intel_crtc->cursor_x = x; 6570 intel_crtc->cursor_y = y; 6571 6572 intel_crtc_update_cursor(crtc, true); 6573 6574 return 0; 6575 } 6576 6577 /** Sets the color ramps on behalf of RandR */ 6578 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 6579 u16 blue, int regno) 6580 { 6581 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6582 6583 intel_crtc->lut_r[regno] = red >> 8; 6584 intel_crtc->lut_g[regno] = green >> 8; 6585 intel_crtc->lut_b[regno] = blue >> 8; 6586 } 6587 6588 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 6589 u16 *blue, int regno) 6590 { 6591 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6592 6593 *red = intel_crtc->lut_r[regno] << 8; 6594 *green = intel_crtc->lut_g[regno] << 8; 6595 *blue = intel_crtc->lut_b[regno] << 8; 6596 } 6597 6598 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 6599 u16 *blue, uint32_t start, uint32_t size) 6600 { 6601 int end = (start + size > 256) ? 256 : start + size, i; 6602 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6603 6604 for (i = start; i < end; i++) { 6605 intel_crtc->lut_r[i] = red[i] >> 8; 6606 intel_crtc->lut_g[i] = green[i] >> 8; 6607 intel_crtc->lut_b[i] = blue[i] >> 8; 6608 } 6609 6610 intel_crtc_load_lut(crtc); 6611 } 6612 6613 /** 6614 * Get a pipe with a simple mode set on it for doing load-based monitor 6615 * detection. 6616 * 6617 * It will be up to the load-detect code to adjust the pipe as appropriate for 6618 * its requirements. The pipe will be connected to no other encoders. 6619 * 6620 * Currently this code will only succeed if there is a pipe with no encoders 6621 * configured for it. In the future, it could choose to temporarily disable 6622 * some outputs to free up a pipe for its use. 6623 * 6624 * \return crtc, or NULL if no pipes are available. 6625 */ 6626 6627 /* VESA 640x480x72Hz mode to set on the pipe */ 6628 static struct drm_display_mode load_detect_mode = { 6629 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 6630 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 6631 }; 6632 6633 static struct drm_framebuffer * 6634 intel_framebuffer_create(struct drm_device *dev, 6635 struct drm_mode_fb_cmd2 *mode_cmd, 6636 struct drm_i915_gem_object *obj) 6637 { 6638 struct intel_framebuffer *intel_fb; 6639 int ret; 6640 6641 intel_fb = kmalloc(sizeof(*intel_fb), DRM_MEM_KMS, M_WAITOK | M_ZERO); 6642 if (!intel_fb) { 6643 drm_gem_object_unreference_unlocked(&obj->base); 6644 return ERR_PTR(-ENOMEM); 6645 } 6646 6647 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 6648 if (ret) { 6649 drm_gem_object_unreference_unlocked(&obj->base); 6650 kfree(intel_fb, DRM_MEM_KMS); 6651 return ERR_PTR(ret); 6652 } 6653 6654 return &intel_fb->base; 6655 } 6656 6657 static u32 6658 intel_framebuffer_pitch_for_width(int width, int bpp) 6659 { 6660 u32 pitch = howmany(width * bpp, 8); 6661 return roundup2(pitch, 64); 6662 } 6663 6664 static u32 6665 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 6666 { 6667 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 6668 return roundup2(pitch * mode->vdisplay, PAGE_SIZE); 6669 } 6670 6671 static struct drm_framebuffer * 6672 intel_framebuffer_create_for_mode(struct drm_device *dev, 6673 struct drm_display_mode *mode, 6674 int depth, int bpp) 6675 { 6676 struct drm_i915_gem_object *obj; 6677 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 6678 6679 obj = i915_gem_alloc_object(dev, 6680 intel_framebuffer_size_for_mode(mode, bpp)); 6681 if (obj == NULL) 6682 return ERR_PTR(-ENOMEM); 6683 6684 mode_cmd.width = mode->hdisplay; 6685 mode_cmd.height = mode->vdisplay; 6686 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 6687 bpp); 6688 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 6689 6690 return intel_framebuffer_create(dev, &mode_cmd, obj); 6691 } 6692 6693 static struct drm_framebuffer * 6694 mode_fits_in_fbdev(struct drm_device *dev, 6695 struct drm_display_mode *mode) 6696 { 6697 struct drm_i915_private *dev_priv = dev->dev_private; 6698 struct drm_i915_gem_object *obj; 6699 struct drm_framebuffer *fb; 6700 6701 if (dev_priv->fbdev == NULL) 6702 return NULL; 6703 6704 obj = dev_priv->fbdev->ifb.obj; 6705 if (obj == NULL) 6706 return NULL; 6707 6708 fb = &dev_priv->fbdev->ifb.base; 6709 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 6710 fb->bits_per_pixel)) 6711 return NULL; 6712 6713 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 6714 return NULL; 6715 6716 return fb; 6717 } 6718 6719 bool intel_get_load_detect_pipe(struct drm_connector *connector, 6720 struct drm_display_mode *mode, 6721 struct intel_load_detect_pipe *old) 6722 { 6723 struct intel_crtc *intel_crtc; 6724 struct intel_encoder *intel_encoder = 6725 intel_attached_encoder(connector); 6726 struct drm_crtc *possible_crtc; 6727 struct drm_encoder *encoder = &intel_encoder->base; 6728 struct drm_crtc *crtc = NULL; 6729 struct drm_device *dev = encoder->dev; 6730 struct drm_framebuffer *fb; 6731 int i = -1; 6732 6733 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 6734 connector->base.id, drm_get_connector_name(connector), 6735 encoder->base.id, drm_get_encoder_name(encoder)); 6736 6737 /* 6738 * Algorithm gets a little messy: 6739 * 6740 * - if the connector already has an assigned crtc, use it (but make 6741 * sure it's on first) 6742 * 6743 * - try to find the first unused crtc that can drive this connector, 6744 * and use that if we find one 6745 */ 6746 6747 /* See if we already have a CRTC for this connector */ 6748 if (encoder->crtc) { 6749 crtc = encoder->crtc; 6750 6751 old->dpms_mode = connector->dpms; 6752 old->load_detect_temp = false; 6753 6754 /* Make sure the crtc and connector are running */ 6755 if (connector->dpms != DRM_MODE_DPMS_ON) 6756 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); 6757 6758 return true; 6759 } 6760 6761 /* Find an unused one (if possible) */ 6762 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { 6763 i++; 6764 if (!(encoder->possible_crtcs & (1 << i))) 6765 continue; 6766 if (!possible_crtc->enabled) { 6767 crtc = possible_crtc; 6768 break; 6769 } 6770 } 6771 6772 /* 6773 * If we didn't find an unused CRTC, don't use any. 6774 */ 6775 if (!crtc) { 6776 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 6777 return false; 6778 } 6779 6780 intel_encoder->new_crtc = to_intel_crtc(crtc); 6781 to_intel_connector(connector)->new_encoder = intel_encoder; 6782 6783 intel_crtc = to_intel_crtc(crtc); 6784 old->dpms_mode = connector->dpms; 6785 old->load_detect_temp = true; 6786 old->release_fb = NULL; 6787 6788 if (!mode) 6789 mode = &load_detect_mode; 6790 6791 /* We need a framebuffer large enough to accommodate all accesses 6792 * that the plane may generate whilst we perform load detection. 6793 * We can not rely on the fbcon either being present (we get called 6794 * during its initialisation to detect all boot displays, or it may 6795 * not even exist) or that it is large enough to satisfy the 6796 * requested mode. 6797 */ 6798 fb = mode_fits_in_fbdev(dev, mode); 6799 if (fb == NULL) { 6800 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 6801 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 6802 old->release_fb = fb; 6803 } else 6804 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 6805 if (IS_ERR(fb)) { 6806 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 6807 return false; 6808 } 6809 6810 if (!intel_set_mode(crtc, mode, 0, 0, fb)) { 6811 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 6812 if (old->release_fb) 6813 old->release_fb->funcs->destroy(old->release_fb); 6814 return false; 6815 } 6816 6817 /* let the connector get through one full cycle before testing */ 6818 intel_wait_for_vblank(dev, intel_crtc->pipe); 6819 6820 return true; 6821 } 6822 6823 void intel_release_load_detect_pipe(struct drm_connector *connector, 6824 struct intel_load_detect_pipe *old) 6825 { 6826 struct intel_encoder *intel_encoder = 6827 intel_attached_encoder(connector); 6828 struct drm_encoder *encoder = &intel_encoder->base; 6829 6830 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 6831 connector->base.id, drm_get_connector_name(connector), 6832 encoder->base.id, drm_get_encoder_name(encoder)); 6833 6834 if (old->load_detect_temp) { 6835 struct drm_crtc *crtc = encoder->crtc; 6836 6837 to_intel_connector(connector)->new_encoder = NULL; 6838 intel_encoder->new_crtc = NULL; 6839 intel_set_mode(crtc, NULL, 0, 0, NULL); 6840 6841 if (old->release_fb) 6842 old->release_fb->funcs->destroy(old->release_fb); 6843 6844 return; 6845 } 6846 6847 /* Switch crtc and encoder back off if necessary */ 6848 if (old->dpms_mode != DRM_MODE_DPMS_ON) 6849 connector->funcs->dpms(connector, old->dpms_mode); 6850 } 6851 6852 /* Returns the clock of the currently programmed mode of the given pipe. */ 6853 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) 6854 { 6855 struct drm_i915_private *dev_priv = dev->dev_private; 6856 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6857 int pipe = intel_crtc->pipe; 6858 u32 dpll = I915_READ(DPLL(pipe)); 6859 u32 fp; 6860 intel_clock_t clock; 6861 6862 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 6863 fp = I915_READ(FP0(pipe)); 6864 else 6865 fp = I915_READ(FP1(pipe)); 6866 6867 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 6868 if (IS_PINEVIEW(dev)) { 6869 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 6870 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 6871 } else { 6872 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 6873 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 6874 } 6875 6876 if (!IS_GEN2(dev)) { 6877 if (IS_PINEVIEW(dev)) 6878 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 6879 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 6880 else 6881 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 6882 DPLL_FPA01_P1_POST_DIV_SHIFT); 6883 6884 switch (dpll & DPLL_MODE_MASK) { 6885 case DPLLB_MODE_DAC_SERIAL: 6886 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 6887 5 : 10; 6888 break; 6889 case DPLLB_MODE_LVDS: 6890 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 6891 7 : 14; 6892 break; 6893 default: 6894 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 6895 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 6896 return 0; 6897 } 6898 6899 /* XXX: Handle the 100Mhz refclk */ 6900 intel_clock(dev, 96000, &clock); 6901 } else { 6902 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 6903 6904 if (is_lvds) { 6905 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 6906 DPLL_FPA01_P1_POST_DIV_SHIFT); 6907 clock.p2 = 14; 6908 6909 if ((dpll & PLL_REF_INPUT_MASK) == 6910 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 6911 /* XXX: might not be 66MHz */ 6912 intel_clock(dev, 66000, &clock); 6913 } else 6914 intel_clock(dev, 48000, &clock); 6915 } else { 6916 if (dpll & PLL_P1_DIVIDE_BY_TWO) 6917 clock.p1 = 2; 6918 else { 6919 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 6920 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 6921 } 6922 if (dpll & PLL_P2_DIVIDE_BY_4) 6923 clock.p2 = 4; 6924 else 6925 clock.p2 = 2; 6926 6927 intel_clock(dev, 48000, &clock); 6928 } 6929 } 6930 6931 /* XXX: It would be nice to validate the clocks, but we can't reuse 6932 * i830PllIsValid() because it relies on the xf86_config connector 6933 * configuration being accurate, which it isn't necessarily. 6934 */ 6935 6936 return clock.dot; 6937 } 6938 6939 /** Returns the currently programmed mode of the given pipe. */ 6940 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 6941 struct drm_crtc *crtc) 6942 { 6943 struct drm_i915_private *dev_priv = dev->dev_private; 6944 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6945 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 6946 struct drm_display_mode *mode; 6947 int htot = I915_READ(HTOTAL(cpu_transcoder)); 6948 int hsync = I915_READ(HSYNC(cpu_transcoder)); 6949 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 6950 int vsync = I915_READ(VSYNC(cpu_transcoder)); 6951 6952 mode = kmalloc(sizeof(*mode), DRM_MEM_KMS, M_WAITOK | M_ZERO); 6953 if (!mode) 6954 return NULL; 6955 6956 mode->clock = intel_crtc_clock_get(dev, crtc); 6957 mode->hdisplay = (htot & 0xffff) + 1; 6958 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 6959 mode->hsync_start = (hsync & 0xffff) + 1; 6960 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 6961 mode->vdisplay = (vtot & 0xffff) + 1; 6962 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 6963 mode->vsync_start = (vsync & 0xffff) + 1; 6964 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 6965 6966 drm_mode_set_name(mode); 6967 6968 return mode; 6969 } 6970 6971 static void intel_increase_pllclock(struct drm_crtc *crtc) 6972 { 6973 struct drm_device *dev = crtc->dev; 6974 drm_i915_private_t *dev_priv = dev->dev_private; 6975 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6976 int pipe = intel_crtc->pipe; 6977 int dpll_reg = DPLL(pipe); 6978 int dpll; 6979 6980 if (HAS_PCH_SPLIT(dev)) 6981 return; 6982 6983 if (!dev_priv->lvds_downclock_avail) 6984 return; 6985 6986 dpll = I915_READ(dpll_reg); 6987 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { 6988 DRM_DEBUG_DRIVER("upclocking LVDS\n"); 6989 6990 assert_panel_unlocked(dev_priv, pipe); 6991 6992 dpll &= ~DISPLAY_RATE_SELECT_FPA1; 6993 I915_WRITE(dpll_reg, dpll); 6994 intel_wait_for_vblank(dev, pipe); 6995 6996 dpll = I915_READ(dpll_reg); 6997 if (dpll & DISPLAY_RATE_SELECT_FPA1) 6998 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); 6999 } 7000 } 7001 7002 static void intel_decrease_pllclock(struct drm_crtc *crtc) 7003 { 7004 struct drm_device *dev = crtc->dev; 7005 drm_i915_private_t *dev_priv = dev->dev_private; 7006 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7007 7008 if (HAS_PCH_SPLIT(dev)) 7009 return; 7010 7011 if (!dev_priv->lvds_downclock_avail) 7012 return; 7013 7014 /* 7015 * Since this is called by a timer, we should never get here in 7016 * the manual case. 7017 */ 7018 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 7019 int pipe = intel_crtc->pipe; 7020 int dpll_reg = DPLL(pipe); 7021 int dpll; 7022 7023 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 7024 7025 assert_panel_unlocked(dev_priv, pipe); 7026 7027 dpll = I915_READ(dpll_reg); 7028 dpll |= DISPLAY_RATE_SELECT_FPA1; 7029 I915_WRITE(dpll_reg, dpll); 7030 intel_wait_for_vblank(dev, pipe); 7031 dpll = I915_READ(dpll_reg); 7032 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 7033 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 7034 } 7035 } 7036 7037 void intel_mark_busy(struct drm_device *dev) 7038 { 7039 i915_update_gfx_val(dev->dev_private); 7040 } 7041 7042 void intel_mark_idle(struct drm_device *dev) 7043 { 7044 struct drm_crtc *crtc; 7045 7046 if (!i915_powersave) 7047 return; 7048 7049 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 7050 if (!crtc->fb) 7051 continue; 7052 7053 intel_decrease_pllclock(crtc); 7054 } 7055 } 7056 7057 void intel_mark_fb_busy(struct drm_i915_gem_object *obj) 7058 { 7059 struct drm_device *dev = obj->base.dev; 7060 struct drm_crtc *crtc; 7061 7062 if (!i915_powersave) 7063 return; 7064 7065 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 7066 if (!crtc->fb) 7067 continue; 7068 7069 if (to_intel_framebuffer(crtc->fb)->obj == obj) 7070 intel_increase_pllclock(crtc); 7071 } 7072 } 7073 7074 static void intel_crtc_destroy(struct drm_crtc *crtc) 7075 { 7076 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7077 struct drm_device *dev = crtc->dev; 7078 struct intel_unpin_work *work; 7079 7080 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 7081 work = intel_crtc->unpin_work; 7082 intel_crtc->unpin_work = NULL; 7083 lockmgr(&dev->event_lock, LK_RELEASE); 7084 7085 if (work) { 7086 cancel_work_sync(&work->work); 7087 kfree(work, DRM_MEM_KMS); 7088 } 7089 7090 drm_crtc_cleanup(crtc); 7091 7092 drm_free(intel_crtc, DRM_MEM_KMS); 7093 } 7094 7095 static void intel_unpin_work_fn(struct work_struct *__work) 7096 { 7097 struct intel_unpin_work *work = 7098 container_of(__work, struct intel_unpin_work, work); 7099 struct drm_device *dev = work->crtc->dev; 7100 7101 DRM_LOCK(dev); 7102 intel_unpin_fb_obj(work->old_fb_obj); 7103 drm_gem_object_unreference(&work->pending_flip_obj->base); 7104 drm_gem_object_unreference(&work->old_fb_obj->base); 7105 7106 intel_update_fbc(dev); 7107 DRM_UNLOCK(dev); 7108 7109 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); 7110 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); 7111 7112 drm_free(work, DRM_MEM_KMS); 7113 } 7114 7115 static void do_intel_finish_page_flip(struct drm_device *dev, 7116 struct drm_crtc *crtc) 7117 { 7118 drm_i915_private_t *dev_priv = dev->dev_private; 7119 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7120 struct intel_unpin_work *work; 7121 struct drm_i915_gem_object *obj; 7122 7123 /* Ignore early vblank irqs */ 7124 if (intel_crtc == NULL) 7125 return; 7126 7127 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 7128 work = intel_crtc->unpin_work; 7129 7130 /* Ensure we don't miss a work->pending update ... */ 7131 cpu_lfence(); 7132 7133 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 7134 lockmgr(&dev->event_lock, LK_RELEASE); 7135 return; 7136 } 7137 7138 /* and that the unpin work is consistent wrt ->pending. */ 7139 cpu_lfence(); 7140 7141 intel_crtc->unpin_work = NULL; 7142 7143 if (work->event) 7144 drm_send_vblank_event(dev, intel_crtc->pipe, work->event); 7145 7146 drm_vblank_put(dev, intel_crtc->pipe); 7147 7148 lockmgr(&dev->event_lock, LK_RELEASE); 7149 7150 obj = work->old_fb_obj; 7151 7152 atomic_clear_mask(1 << intel_crtc->plane, 7153 &obj->pending_flip.counter); 7154 wake_up(&dev_priv->pending_flip_queue); 7155 7156 queue_work(dev_priv->wq, &work->work); 7157 } 7158 7159 void intel_finish_page_flip(struct drm_device *dev, int pipe) 7160 { 7161 drm_i915_private_t *dev_priv = dev->dev_private; 7162 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 7163 7164 do_intel_finish_page_flip(dev, crtc); 7165 } 7166 7167 void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 7168 { 7169 drm_i915_private_t *dev_priv = dev->dev_private; 7170 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 7171 7172 do_intel_finish_page_flip(dev, crtc); 7173 } 7174 7175 void intel_prepare_page_flip(struct drm_device *dev, int plane) 7176 { 7177 drm_i915_private_t *dev_priv = dev->dev_private; 7178 struct intel_crtc *intel_crtc = 7179 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 7180 7181 /* NB: An MMIO update of the plane base pointer will also 7182 * generate a page-flip completion irq, i.e. every modeset 7183 * is also accompanied by a spurious intel_prepare_page_flip(). 7184 */ 7185 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 7186 if (intel_crtc->unpin_work) 7187 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 7188 lockmgr(&dev->event_lock, LK_RELEASE); 7189 } 7190 7191 inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) 7192 { 7193 /* Ensure that the work item is consistent when activating it ... */ 7194 cpu_ccfence(); 7195 atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); 7196 /* and that it is marked active as soon as the irq could fire. */ 7197 cpu_ccfence(); 7198 } 7199 7200 static int intel_gen2_queue_flip(struct drm_device *dev, 7201 struct drm_crtc *crtc, 7202 struct drm_framebuffer *fb, 7203 struct drm_i915_gem_object *obj) 7204 { 7205 struct drm_i915_private *dev_priv = dev->dev_private; 7206 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7207 u32 flip_mask; 7208 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 7209 int ret; 7210 7211 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 7212 if (ret) 7213 goto err; 7214 7215 ret = intel_ring_begin(ring, 6); 7216 if (ret) 7217 goto err_unpin; 7218 7219 /* Can't queue multiple flips, so wait for the previous 7220 * one to finish before executing the next. 7221 */ 7222 if (intel_crtc->plane) 7223 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 7224 else 7225 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 7226 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 7227 intel_ring_emit(ring, MI_NOOP); 7228 intel_ring_emit(ring, MI_DISPLAY_FLIP | 7229 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7230 intel_ring_emit(ring, fb->pitches[0]); 7231 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7232 intel_ring_emit(ring, 0); /* aux display base address, unused */ 7233 7234 intel_mark_page_flip_active(intel_crtc); 7235 intel_ring_advance(ring); 7236 return 0; 7237 7238 err_unpin: 7239 intel_unpin_fb_obj(obj); 7240 err: 7241 return ret; 7242 } 7243 7244 static int intel_gen3_queue_flip(struct drm_device *dev, 7245 struct drm_crtc *crtc, 7246 struct drm_framebuffer *fb, 7247 struct drm_i915_gem_object *obj) 7248 { 7249 struct drm_i915_private *dev_priv = dev->dev_private; 7250 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7251 u32 flip_mask; 7252 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 7253 int ret; 7254 7255 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 7256 if (ret) 7257 goto err; 7258 7259 ret = intel_ring_begin(ring, 6); 7260 if (ret) 7261 goto err_unpin; 7262 7263 if (intel_crtc->plane) 7264 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 7265 else 7266 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 7267 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 7268 intel_ring_emit(ring, MI_NOOP); 7269 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 7270 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7271 intel_ring_emit(ring, fb->pitches[0]); 7272 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7273 intel_ring_emit(ring, MI_NOOP); 7274 7275 intel_mark_page_flip_active(intel_crtc); 7276 intel_ring_advance(ring); 7277 return 0; 7278 7279 err_unpin: 7280 intel_unpin_fb_obj(obj); 7281 err: 7282 return ret; 7283 } 7284 7285 static int intel_gen4_queue_flip(struct drm_device *dev, 7286 struct drm_crtc *crtc, 7287 struct drm_framebuffer *fb, 7288 struct drm_i915_gem_object *obj) 7289 { 7290 struct drm_i915_private *dev_priv = dev->dev_private; 7291 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7292 uint32_t pf, pipesrc; 7293 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 7294 int ret; 7295 7296 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 7297 if (ret) 7298 goto err; 7299 7300 ret = intel_ring_begin(ring, 4); 7301 if (ret) 7302 goto err_unpin; 7303 7304 /* i965+ uses the linear or tiled offsets from the 7305 * Display Registers (which do not change across a page-flip) 7306 * so we need only reprogram the base address. 7307 */ 7308 intel_ring_emit(ring, MI_DISPLAY_FLIP | 7309 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7310 intel_ring_emit(ring, fb->pitches[0]); 7311 intel_ring_emit(ring, 7312 (obj->gtt_offset + intel_crtc->dspaddr_offset) | 7313 obj->tiling_mode); 7314 7315 /* XXX Enabling the panel-fitter across page-flip is so far 7316 * untested on non-native modes, so ignore it for now. 7317 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 7318 */ 7319 pf = 0; 7320 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 7321 intel_ring_emit(ring, pf | pipesrc); 7322 7323 intel_mark_page_flip_active(intel_crtc); 7324 intel_ring_advance(ring); 7325 return 0; 7326 7327 err_unpin: 7328 intel_unpin_fb_obj(obj); 7329 err: 7330 return ret; 7331 } 7332 7333 static int intel_gen6_queue_flip(struct drm_device *dev, 7334 struct drm_crtc *crtc, 7335 struct drm_framebuffer *fb, 7336 struct drm_i915_gem_object *obj) 7337 { 7338 struct drm_i915_private *dev_priv = dev->dev_private; 7339 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7340 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 7341 uint32_t pf, pipesrc; 7342 int ret; 7343 7344 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 7345 if (ret) 7346 goto err; 7347 7348 ret = intel_ring_begin(ring, 4); 7349 if (ret) 7350 goto err_unpin; 7351 7352 intel_ring_emit(ring, MI_DISPLAY_FLIP | 7353 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 7354 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 7355 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7356 7357 /* Contrary to the suggestions in the documentation, 7358 * "Enable Panel Fitter" does not seem to be required when page 7359 * flipping with a non-native mode, and worse causes a normal 7360 * modeset to fail. 7361 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 7362 */ 7363 pf = 0; 7364 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 7365 intel_ring_emit(ring, pf | pipesrc); 7366 7367 intel_mark_page_flip_active(intel_crtc); 7368 intel_ring_advance(ring); 7369 return 0; 7370 7371 err_unpin: 7372 intel_unpin_fb_obj(obj); 7373 err: 7374 return ret; 7375 } 7376 7377 /* 7378 * On gen7 we currently use the blit ring because (in early silicon at least) 7379 * the render ring doesn't give us interrpts for page flip completion, which 7380 * means clients will hang after the first flip is queued. Fortunately the 7381 * blit ring generates interrupts properly, so use it instead. 7382 */ 7383 static int intel_gen7_queue_flip(struct drm_device *dev, 7384 struct drm_crtc *crtc, 7385 struct drm_framebuffer *fb, 7386 struct drm_i915_gem_object *obj) 7387 { 7388 struct drm_i915_private *dev_priv = dev->dev_private; 7389 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7390 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 7391 uint32_t plane_bit = 0; 7392 int ret; 7393 7394 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 7395 if (ret) 7396 goto err; 7397 7398 switch(intel_crtc->plane) { 7399 case PLANE_A: 7400 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 7401 break; 7402 case PLANE_B: 7403 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 7404 break; 7405 case PLANE_C: 7406 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 7407 break; 7408 default: 7409 WARN_ONCE(1, "unknown plane in flip command\n"); 7410 ret = -ENODEV; 7411 goto err_unpin; 7412 } 7413 7414 ret = intel_ring_begin(ring, 4); 7415 if (ret) 7416 goto err_unpin; 7417 7418 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 7419 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 7420 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7421 intel_ring_emit(ring, (MI_NOOP)); 7422 7423 intel_mark_page_flip_active(intel_crtc); 7424 intel_ring_advance(ring); 7425 return 0; 7426 7427 err_unpin: 7428 intel_unpin_fb_obj(obj); 7429 err: 7430 return ret; 7431 } 7432 7433 static int intel_default_queue_flip(struct drm_device *dev, 7434 struct drm_crtc *crtc, 7435 struct drm_framebuffer *fb, 7436 struct drm_i915_gem_object *obj) 7437 { 7438 return -ENODEV; 7439 } 7440 7441 static int intel_crtc_page_flip(struct drm_crtc *crtc, 7442 struct drm_framebuffer *fb, 7443 struct drm_pending_vblank_event *event) 7444 { 7445 struct drm_device *dev = crtc->dev; 7446 struct drm_i915_private *dev_priv = dev->dev_private; 7447 struct drm_framebuffer *old_fb = crtc->fb; 7448 struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; 7449 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7450 struct intel_unpin_work *work; 7451 int ret; 7452 7453 /* Can't change pixel format via MI display flips. */ 7454 if (fb->pixel_format != crtc->fb->pixel_format) 7455 return -EINVAL; 7456 7457 /* 7458 * TILEOFF/LINOFF registers can't be changed via MI display flips. 7459 * Note that pitch changes could also affect these register. 7460 */ 7461 if (INTEL_INFO(dev)->gen > 3 && 7462 (fb->offsets[0] != crtc->fb->offsets[0] || 7463 fb->pitches[0] != crtc->fb->pitches[0])) 7464 return -EINVAL; 7465 7466 work = kmalloc(sizeof *work, DRM_MEM_KMS, M_WAITOK | M_ZERO); 7467 if (work == NULL) 7468 return -ENOMEM; 7469 7470 work->event = event; 7471 work->crtc = crtc; 7472 work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; 7473 INIT_WORK(&work->work, intel_unpin_work_fn); 7474 7475 ret = drm_vblank_get(dev, intel_crtc->pipe); 7476 if (ret) 7477 goto free_work; 7478 7479 /* We borrow the event spin lock for protecting unpin_work */ 7480 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 7481 if (intel_crtc->unpin_work) { 7482 lockmgr(&dev->event_lock, LK_RELEASE); 7483 drm_free(work, DRM_MEM_KMS); 7484 drm_vblank_put(dev, intel_crtc->pipe); 7485 7486 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 7487 return -EBUSY; 7488 } 7489 intel_crtc->unpin_work = work; 7490 lockmgr(&dev->event_lock, LK_RELEASE); 7491 7492 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 7493 flush_workqueue(dev_priv->wq); 7494 7495 ret = i915_mutex_lock_interruptible(dev); 7496 if (ret) 7497 goto cleanup; 7498 7499 /* Reference the objects for the scheduled work. */ 7500 drm_gem_object_reference(&work->old_fb_obj->base); 7501 drm_gem_object_reference(&obj->base); 7502 7503 crtc->fb = fb; 7504 7505 work->pending_flip_obj = obj; 7506 7507 work->enable_stall_check = true; 7508 7509 /* Block clients from rendering to the new back buffer until 7510 * the flip occurs and the object is no longer visible. 7511 */ 7512 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7513 atomic_inc(&intel_crtc->unpin_work_count); 7514 7515 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 7516 if (ret) 7517 goto cleanup_pending; 7518 7519 intel_disable_fbc(dev); 7520 intel_mark_fb_busy(obj); 7521 DRM_UNLOCK(dev); 7522 7523 return 0; 7524 7525 cleanup_pending: 7526 atomic_dec(&intel_crtc->unpin_work_count); 7527 crtc->fb = old_fb; 7528 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7529 drm_gem_object_unreference(&work->old_fb_obj->base); 7530 drm_gem_object_unreference(&obj->base); 7531 DRM_UNLOCK(dev); 7532 7533 cleanup: 7534 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 7535 intel_crtc->unpin_work = NULL; 7536 lockmgr(&dev->event_lock, LK_RELEASE); 7537 7538 drm_vblank_put(dev, intel_crtc->pipe); 7539 free_work: 7540 drm_free(work, DRM_MEM_KMS); 7541 7542 return ret; 7543 } 7544 7545 static struct drm_crtc_helper_funcs intel_helper_funcs = { 7546 .mode_set_base_atomic = intel_pipe_set_base_atomic, 7547 .load_lut = intel_crtc_load_lut, 7548 .disable = intel_crtc_noop, 7549 }; 7550 7551 bool intel_encoder_check_is_cloned(struct intel_encoder *encoder) 7552 { 7553 struct intel_encoder *other_encoder; 7554 struct drm_crtc *crtc = &encoder->new_crtc->base; 7555 7556 if (WARN_ON(!crtc)) 7557 return false; 7558 7559 list_for_each_entry(other_encoder, 7560 &crtc->dev->mode_config.encoder_list, 7561 base.head) { 7562 7563 if (&other_encoder->new_crtc->base != crtc || 7564 encoder == other_encoder) 7565 continue; 7566 else 7567 return true; 7568 } 7569 7570 return false; 7571 } 7572 7573 static bool intel_encoder_crtc_ok(struct drm_encoder *encoder, 7574 struct drm_crtc *crtc) 7575 { 7576 struct drm_device *dev; 7577 struct drm_crtc *tmp; 7578 int crtc_mask = 1; 7579 7580 WARN(!crtc, "checking null crtc?\n"); 7581 /* profmakx: this is to prevent the kernel from panicing 7582 */ 7583 if(!crtc) { 7584 return false; 7585 } 7586 7587 7588 dev = crtc->dev; 7589 7590 list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) { 7591 if (tmp == crtc) 7592 break; 7593 crtc_mask <<= 1; 7594 } 7595 7596 if (encoder->possible_crtcs & crtc_mask) 7597 return true; 7598 return false; 7599 } 7600 7601 /** 7602 * intel_modeset_update_staged_output_state 7603 * 7604 * Updates the staged output configuration state, e.g. after we've read out the 7605 * current hw state. 7606 */ 7607 static void intel_modeset_update_staged_output_state(struct drm_device *dev) 7608 { 7609 struct intel_encoder *encoder; 7610 struct intel_connector *connector; 7611 7612 list_for_each_entry(connector, &dev->mode_config.connector_list, 7613 base.head) { 7614 connector->new_encoder = 7615 to_intel_encoder(connector->base.encoder); 7616 } 7617 7618 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 7619 base.head) { 7620 encoder->new_crtc = 7621 to_intel_crtc(encoder->base.crtc); 7622 } 7623 } 7624 7625 /** 7626 * intel_modeset_commit_output_state 7627 * 7628 * This function copies the stage display pipe configuration to the real one. 7629 */ 7630 static void intel_modeset_commit_output_state(struct drm_device *dev) 7631 { 7632 struct intel_encoder *encoder; 7633 struct intel_connector *connector; 7634 7635 list_for_each_entry(connector, &dev->mode_config.connector_list, 7636 base.head) { 7637 connector->base.encoder = &connector->new_encoder->base; 7638 } 7639 7640 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 7641 base.head) { 7642 encoder->base.crtc = &encoder->new_crtc->base; 7643 } 7644 } 7645 7646 static struct drm_display_mode * 7647 intel_modeset_adjusted_mode(struct drm_crtc *crtc, 7648 struct drm_display_mode *mode) 7649 { 7650 struct drm_device *dev = crtc->dev; 7651 struct drm_display_mode *adjusted_mode; 7652 struct drm_encoder_helper_funcs *encoder_funcs; 7653 struct intel_encoder *encoder; 7654 7655 adjusted_mode = drm_mode_duplicate(dev, mode); 7656 if (!adjusted_mode) 7657 return ERR_PTR(-ENOMEM); 7658 7659 /* Pass our mode to the connectors and the CRTC to give them a chance to 7660 * adjust it according to limitations or connector properties, and also 7661 * a chance to reject the mode entirely. 7662 */ 7663 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 7664 base.head) { 7665 7666 if (&encoder->new_crtc->base != crtc) 7667 continue; 7668 encoder_funcs = encoder->base.helper_private; 7669 if (!(encoder_funcs->mode_fixup(&encoder->base, mode, 7670 adjusted_mode))) { 7671 DRM_DEBUG_KMS("Encoder fixup failed\n"); 7672 goto fail; 7673 } 7674 } 7675 7676 if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) { 7677 DRM_DEBUG_KMS("CRTC fixup failed\n"); 7678 goto fail; 7679 } 7680 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 7681 7682 return adjusted_mode; 7683 fail: 7684 drm_mode_destroy(dev, adjusted_mode); 7685 return ERR_PTR(-EINVAL); 7686 } 7687 7688 /* Computes which crtcs are affected and sets the relevant bits in the mask. For 7689 * simplicity we use the crtc's pipe number (because it's easier to obtain). */ 7690 static void 7691 intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, 7692 unsigned *prepare_pipes, unsigned *disable_pipes) 7693 { 7694 struct intel_crtc *intel_crtc; 7695 struct drm_device *dev = crtc->dev; 7696 struct intel_encoder *encoder; 7697 struct intel_connector *connector; 7698 struct drm_crtc *tmp_crtc; 7699 7700 *disable_pipes = *modeset_pipes = *prepare_pipes = 0; 7701 7702 /* Check which crtcs have changed outputs connected to them, these need 7703 * to be part of the prepare_pipes mask. We don't (yet) support global 7704 * modeset across multiple crtcs, so modeset_pipes will only have one 7705 * bit set at most. */ 7706 list_for_each_entry(connector, &dev->mode_config.connector_list, 7707 base.head) { 7708 if (connector->base.encoder == &connector->new_encoder->base) 7709 continue; 7710 7711 if (connector->base.encoder) { 7712 tmp_crtc = connector->base.encoder->crtc; 7713 7714 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; 7715 } 7716 7717 if (connector->new_encoder) 7718 *prepare_pipes |= 7719 1 << connector->new_encoder->new_crtc->pipe; 7720 } 7721 7722 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 7723 base.head) { 7724 if (encoder->base.crtc == &encoder->new_crtc->base) 7725 continue; 7726 7727 if (encoder->base.crtc) { 7728 tmp_crtc = encoder->base.crtc; 7729 7730 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; 7731 } 7732 7733 if (encoder->new_crtc) 7734 *prepare_pipes |= 1 << encoder->new_crtc->pipe; 7735 } 7736 7737 /* Check for any pipes that will be fully disabled ... */ 7738 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 7739 base.head) { 7740 bool used = false; 7741 7742 /* Don't try to disable disabled crtcs. */ 7743 if (!intel_crtc->base.enabled) 7744 continue; 7745 7746 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 7747 base.head) { 7748 if (encoder->new_crtc == intel_crtc) 7749 used = true; 7750 } 7751 7752 if (!used) 7753 *disable_pipes |= 1 << intel_crtc->pipe; 7754 } 7755 7756 7757 /* set_mode is also used to update properties on life display pipes. */ 7758 intel_crtc = to_intel_crtc(crtc); 7759 if (crtc->enabled) 7760 *prepare_pipes |= 1 << intel_crtc->pipe; 7761 7762 /* 7763 * For simplicity do a full modeset on any pipe where the output routing 7764 * changed. We could be more clever, but that would require us to be 7765 * more careful with calling the relevant encoder->mode_set functions. 7766 */ 7767 if (*prepare_pipes) 7768 *modeset_pipes = *prepare_pipes; 7769 7770 /* ... and mask these out. */ 7771 *modeset_pipes &= ~(*disable_pipes); 7772 *prepare_pipes &= ~(*disable_pipes); 7773 7774 /* 7775 * HACK: We don't (yet) fully support global modesets. intel_set_config 7776 * obies this rule, but the modeset restore mode of 7777 * intel_modeset_setup_hw_state does not. 7778 */ 7779 *modeset_pipes &= 1 << intel_crtc->pipe; 7780 *prepare_pipes &= 1 << intel_crtc->pipe; 7781 } 7782 7783 static bool intel_crtc_in_use(struct drm_crtc *crtc) 7784 { 7785 struct drm_encoder *encoder; 7786 struct drm_device *dev = crtc->dev; 7787 7788 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 7789 if (encoder->crtc == crtc) 7790 return true; 7791 7792 return false; 7793 } 7794 7795 static void 7796 intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) 7797 { 7798 struct intel_encoder *intel_encoder; 7799 struct intel_crtc *intel_crtc; 7800 struct drm_connector *connector; 7801 7802 list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list, 7803 base.head) { 7804 if (!intel_encoder->base.crtc) 7805 continue; 7806 7807 intel_crtc = to_intel_crtc(intel_encoder->base.crtc); 7808 7809 if (prepare_pipes & (1 << intel_crtc->pipe)) 7810 intel_encoder->connectors_active = false; 7811 } 7812 7813 intel_modeset_commit_output_state(dev); 7814 7815 /* Update computed state. */ 7816 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 7817 base.head) { 7818 intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base); 7819 } 7820 7821 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 7822 if (!connector->encoder || !connector->encoder->crtc) 7823 continue; 7824 7825 intel_crtc = to_intel_crtc(connector->encoder->crtc); 7826 7827 if (prepare_pipes & (1 << intel_crtc->pipe)) { 7828 struct drm_property *dpms_property = 7829 dev->mode_config.dpms_property; 7830 7831 connector->dpms = DRM_MODE_DPMS_ON; 7832 drm_object_property_set_value(&connector->base, 7833 dpms_property, 7834 DRM_MODE_DPMS_ON); 7835 7836 intel_encoder = to_intel_encoder(connector->encoder); 7837 intel_encoder->connectors_active = true; 7838 } 7839 } 7840 7841 } 7842 7843 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ 7844 list_for_each_entry((intel_crtc), \ 7845 &(dev)->mode_config.crtc_list, \ 7846 base.head) \ 7847 if (mask & (1 <<(intel_crtc)->pipe)) \ 7848 7849 void 7850 intel_modeset_check_state(struct drm_device *dev) 7851 { 7852 struct intel_crtc *crtc; 7853 struct intel_encoder *encoder; 7854 struct intel_connector *connector; 7855 7856 list_for_each_entry(connector, &dev->mode_config.connector_list, 7857 base.head) { 7858 /* This also checks the encoder/connector hw state with the 7859 * ->get_hw_state callbacks. */ 7860 intel_connector_check_state(connector); 7861 7862 WARN(&connector->new_encoder->base != connector->base.encoder, 7863 "connector's staged encoder doesn't match current encoder\n"); 7864 } 7865 7866 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 7867 base.head) { 7868 bool enabled = false; 7869 bool active = false; 7870 enum i915_pipe pipe, tracked_pipe; 7871 7872 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 7873 encoder->base.base.id, 7874 drm_get_encoder_name(&encoder->base)); 7875 7876 WARN(&encoder->new_crtc->base != encoder->base.crtc, 7877 "encoder's stage crtc doesn't match current crtc\n"); 7878 WARN(encoder->connectors_active && !encoder->base.crtc, 7879 "encoder's active_connectors set, but no crtc\n"); 7880 7881 list_for_each_entry(connector, &dev->mode_config.connector_list, 7882 base.head) { 7883 if (connector->base.encoder != &encoder->base) 7884 continue; 7885 enabled = true; 7886 if (connector->base.dpms != DRM_MODE_DPMS_OFF) 7887 active = true; 7888 } 7889 WARN(!!encoder->base.crtc != enabled, 7890 "encoder's enabled state mismatch " 7891 "(expected %i, found %i)\n", 7892 !!encoder->base.crtc, enabled); 7893 WARN(active && !encoder->base.crtc, 7894 "active encoder with no crtc\n"); 7895 7896 WARN(encoder->connectors_active != active, 7897 "encoder's computed active state doesn't match tracked active state " 7898 "(expected %i, found %i)\n", active, encoder->connectors_active); 7899 7900 active = encoder->get_hw_state(encoder, &pipe); 7901 WARN(active != encoder->connectors_active, 7902 "encoder's hw state doesn't match sw tracking " 7903 "(expected %i, found %i)\n", 7904 encoder->connectors_active, active); 7905 7906 if (!encoder->base.crtc) 7907 continue; 7908 7909 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; 7910 WARN(active && pipe != tracked_pipe, 7911 "active encoder's pipe doesn't match" 7912 "(expected %i, found %i)\n", 7913 tracked_pipe, pipe); 7914 7915 } 7916 7917 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 7918 base.head) { 7919 bool enabled = false; 7920 bool active = false; 7921 7922 DRM_DEBUG_KMS("[CRTC:%d]\n", 7923 crtc->base.base.id); 7924 7925 WARN(crtc->active && !crtc->base.enabled, 7926 "active crtc, but not enabled in sw tracking\n"); 7927 7928 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 7929 base.head) { 7930 if (encoder->base.crtc != &crtc->base) 7931 continue; 7932 enabled = true; 7933 if (encoder->connectors_active) 7934 active = true; 7935 } 7936 WARN(active != crtc->active, 7937 "crtc's computed active state doesn't match tracked active state " 7938 "(expected %i, found %i)\n", active, crtc->active); 7939 WARN(enabled != crtc->base.enabled, 7940 "crtc's computed enabled state doesn't match tracked enabled state " 7941 "(expected %i, found %i)\n", enabled, crtc->base.enabled); 7942 7943 assert_pipe(dev->dev_private, crtc->pipe, crtc->active); 7944 } 7945 } 7946 7947 bool intel_set_mode(struct drm_crtc *crtc, 7948 struct drm_display_mode *mode, 7949 int x, int y, struct drm_framebuffer *fb) 7950 { 7951 struct drm_device *dev = crtc->dev; 7952 drm_i915_private_t *dev_priv = dev->dev_private; 7953 struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; 7954 struct intel_crtc *intel_crtc; 7955 unsigned disable_pipes, prepare_pipes, modeset_pipes; 7956 bool ret = true; 7957 7958 intel_modeset_affected_pipes(crtc, &modeset_pipes, 7959 &prepare_pipes, &disable_pipes); 7960 7961 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n", 7962 modeset_pipes, prepare_pipes, disable_pipes); 7963 7964 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) 7965 intel_crtc_disable(&intel_crtc->base); 7966 7967 saved_hwmode = crtc->hwmode; 7968 saved_mode = crtc->mode; 7969 7970 /* Hack: Because we don't (yet) support global modeset on multiple 7971 * crtcs, we don't keep track of the new mode for more than one crtc. 7972 * Hence simply check whether any bit is set in modeset_pipes in all the 7973 * pieces of code that are not yet converted to deal with mutliple crtcs 7974 * changing their mode at the same time. */ 7975 adjusted_mode = NULL; 7976 if (modeset_pipes) { 7977 adjusted_mode = intel_modeset_adjusted_mode(crtc, mode); 7978 if (IS_ERR(adjusted_mode)) { 7979 return false; 7980 } 7981 } 7982 7983 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { 7984 if (intel_crtc->base.enabled) 7985 dev_priv->display.crtc_disable(&intel_crtc->base); 7986 } 7987 7988 /* crtc->mode is already used by the ->mode_set callbacks, hence we need 7989 * to set it here already despite that we pass it down the callchain. 7990 */ 7991 if (modeset_pipes) 7992 crtc->mode = *mode; 7993 7994 /* Only after disabling all output pipelines that will be changed can we 7995 * update the the output configuration. */ 7996 intel_modeset_update_state(dev, prepare_pipes); 7997 7998 if (dev_priv->display.modeset_global_resources) 7999 dev_priv->display.modeset_global_resources(dev); 8000 8001 /* Set up the DPLL and any encoders state that needs to adjust or depend 8002 * on the DPLL. 8003 */ 8004 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 8005 ret = !intel_crtc_mode_set(&intel_crtc->base, 8006 mode, adjusted_mode, 8007 x, y, fb); 8008 if (!ret) 8009 goto done; 8010 } 8011 8012 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 8013 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) 8014 dev_priv->display.crtc_enable(&intel_crtc->base); 8015 8016 if (modeset_pipes) { 8017 /* Store real post-adjustment hardware mode. */ 8018 crtc->hwmode = *adjusted_mode; 8019 8020 /* Calculate and store various constants which 8021 * are later needed by vblank and swap-completion 8022 * timestamping. They are derived from true hwmode. 8023 */ 8024 drm_calc_timestamping_constants(crtc); 8025 } 8026 8027 /* FIXME: add subpixel order */ 8028 done: 8029 drm_mode_destroy(dev, adjusted_mode); 8030 if (!ret && crtc->enabled) { 8031 crtc->hwmode = saved_hwmode; 8032 crtc->mode = saved_mode; 8033 } else { 8034 intel_modeset_check_state(dev); 8035 } 8036 8037 return ret; 8038 } 8039 8040 #undef for_each_intel_crtc_masked 8041 8042 static void intel_set_config_free(struct intel_set_config *config) 8043 { 8044 if (!config) 8045 return; 8046 8047 drm_free(config->save_connector_encoders, DRM_MEM_KMS); 8048 drm_free(config->save_encoder_crtcs, DRM_MEM_KMS); 8049 drm_free(config, DRM_MEM_KMS); 8050 } 8051 8052 static int intel_set_config_save_state(struct drm_device *dev, 8053 struct intel_set_config *config) 8054 { 8055 struct drm_encoder *encoder; 8056 struct drm_connector *connector; 8057 int count; 8058 8059 config->save_encoder_crtcs = 8060 kmalloc(dev->mode_config.num_encoder * 8061 sizeof(struct drm_crtc *), DRM_MEM_KMS, M_WAITOK | M_ZERO ); 8062 if (!config->save_encoder_crtcs) 8063 return -ENOMEM; 8064 8065 config->save_connector_encoders = 8066 kmalloc(dev->mode_config.num_connector * 8067 sizeof(struct drm_encoder *), DRM_MEM_KMS, M_WAITOK | M_ZERO ); 8068 if (!config->save_connector_encoders) 8069 return -ENOMEM; 8070 8071 /* Copy data. Note that driver private data is not affected. 8072 * Should anything bad happen only the expected state is 8073 * restored, not the drivers personal bookkeeping. 8074 */ 8075 count = 0; 8076 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 8077 config->save_encoder_crtcs[count++] = encoder->crtc; 8078 } 8079 8080 count = 0; 8081 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 8082 config->save_connector_encoders[count++] = connector->encoder; 8083 } 8084 8085 return 0; 8086 } 8087 8088 static void intel_set_config_restore_state(struct drm_device *dev, 8089 struct intel_set_config *config) 8090 { 8091 struct intel_encoder *encoder; 8092 struct intel_connector *connector; 8093 int count; 8094 8095 count = 0; 8096 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 8097 encoder->new_crtc = 8098 to_intel_crtc(config->save_encoder_crtcs[count++]); 8099 } 8100 8101 count = 0; 8102 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { 8103 connector->new_encoder = 8104 to_intel_encoder(config->save_connector_encoders[count++]); 8105 } 8106 } 8107 8108 static void 8109 intel_set_config_compute_mode_changes(struct drm_mode_set *set, 8110 struct intel_set_config *config) 8111 { 8112 8113 /* We should be able to check here if the fb has the same properties 8114 * and then just flip_or_move it */ 8115 if (set->crtc->fb != set->fb) { 8116 /* If we have no fb then treat it as a full mode set */ 8117 if (set->crtc->fb == NULL) { 8118 DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); 8119 config->mode_changed = true; 8120 } else if (set->fb == NULL) { 8121 config->mode_changed = true; 8122 } else if (set->fb->depth != set->crtc->fb->depth) { 8123 config->mode_changed = true; 8124 } else if (set->fb->bits_per_pixel != 8125 set->crtc->fb->bits_per_pixel) { 8126 config->mode_changed = true; 8127 } else 8128 config->fb_changed = true; 8129 } 8130 8131 if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y)) 8132 config->fb_changed = true; 8133 8134 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { 8135 DRM_DEBUG_KMS("modes are different, full mode set\n"); 8136 drm_mode_debug_printmodeline(&set->crtc->mode); 8137 drm_mode_debug_printmodeline(set->mode); 8138 config->mode_changed = true; 8139 } 8140 } 8141 8142 static int 8143 intel_modeset_stage_output_state(struct drm_device *dev, 8144 struct drm_mode_set *set, 8145 struct intel_set_config *config) 8146 { 8147 struct drm_crtc *new_crtc; 8148 struct intel_connector *connector; 8149 struct intel_encoder *encoder; 8150 int count, ro; 8151 8152 /* The upper layers ensure that we either disabl a crtc or have a list 8153 * of connectors. For paranoia, double-check this. */ 8154 WARN_ON(!set->fb && (set->num_connectors != 0)); 8155 WARN_ON(set->fb && (set->num_connectors == 0)); 8156 8157 count = 0; 8158 list_for_each_entry(connector, &dev->mode_config.connector_list, 8159 base.head) { 8160 /* Otherwise traverse passed in connector list and get encoders 8161 * for them. */ 8162 for (ro = 0; ro < set->num_connectors; ro++) { 8163 if (set->connectors[ro] == &connector->base) { 8164 connector->new_encoder = connector->encoder; 8165 break; 8166 } 8167 } 8168 8169 /* If we disable the crtc, disable all its connectors. Also, if 8170 * the connector is on the changing crtc but not on the new 8171 * connector list, disable it. */ 8172 if ((!set->fb || ro == set->num_connectors) && 8173 connector->base.encoder && 8174 connector->base.encoder->crtc == set->crtc) { 8175 connector->new_encoder = NULL; 8176 8177 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", 8178 connector->base.base.id, 8179 drm_get_connector_name(&connector->base)); 8180 } 8181 8182 8183 if (&connector->new_encoder->base != connector->base.encoder) { 8184 DRM_DEBUG_KMS("encoder changed, full mode switch\n"); 8185 config->mode_changed = true; 8186 } 8187 } 8188 /* connector->new_encoder is now updated for all connectors. */ 8189 8190 /* Update crtc of enabled connectors. */ 8191 count = 0; 8192 list_for_each_entry(connector, &dev->mode_config.connector_list, 8193 base.head) { 8194 if (!connector->new_encoder) 8195 continue; 8196 8197 new_crtc = connector->new_encoder->base.crtc; 8198 8199 for (ro = 0; ro < set->num_connectors; ro++) { 8200 if (set->connectors[ro] == &connector->base) 8201 new_crtc = set->crtc; 8202 } 8203 8204 /* Make sure the new CRTC will work with the encoder */ 8205 if (!intel_encoder_crtc_ok(&connector->new_encoder->base, 8206 new_crtc)) { 8207 return -EINVAL; 8208 } 8209 connector->encoder->new_crtc = to_intel_crtc(new_crtc); 8210 8211 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", 8212 connector->base.base.id, 8213 drm_get_connector_name(&connector->base), 8214 new_crtc->base.id); 8215 } 8216 8217 /* Check for any encoders that needs to be disabled. */ 8218 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 8219 base.head) { 8220 list_for_each_entry(connector, 8221 &dev->mode_config.connector_list, 8222 base.head) { 8223 if (connector->new_encoder == encoder) { 8224 WARN_ON(!connector->new_encoder->new_crtc); 8225 8226 goto next_encoder; 8227 } 8228 } 8229 encoder->new_crtc = NULL; 8230 next_encoder: 8231 /* Only now check for crtc changes so we don't miss encoders 8232 * that will be disabled. */ 8233 if (&encoder->new_crtc->base != encoder->base.crtc) { 8234 DRM_DEBUG_KMS("crtc changed, full mode switch\n"); 8235 config->mode_changed = true; 8236 } 8237 } 8238 /* Now we've also updated encoder->new_crtc for all encoders. */ 8239 8240 return 0; 8241 } 8242 8243 static int intel_crtc_set_config(struct drm_mode_set *set) 8244 { 8245 struct drm_device *dev; 8246 struct drm_mode_set save_set; 8247 struct intel_set_config *config; 8248 int ret; 8249 8250 BUG_ON(!set); 8251 BUG_ON(!set->crtc); 8252 BUG_ON(!set->crtc->helper_private); 8253 8254 if (!set->mode) 8255 set->fb = NULL; 8256 8257 /* The fb helper likes to play gross jokes with ->mode_set_config. 8258 * Unfortunately the crtc helper doesn't do much at all for this case, 8259 * so we have to cope with this madness until the fb helper is fixed up. */ 8260 if (set->fb && set->num_connectors == 0) 8261 return 0; 8262 8263 if (set->fb) { 8264 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", 8265 set->crtc->base.id, set->fb->base.id, 8266 (int)set->num_connectors, set->x, set->y); 8267 } else { 8268 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); 8269 } 8270 8271 dev = set->crtc->dev; 8272 8273 ret = -ENOMEM; 8274 config = kmalloc(sizeof(*config), DRM_MEM_KMS, M_WAITOK | M_ZERO); 8275 if (!config) 8276 goto out_config; 8277 8278 ret = intel_set_config_save_state(dev, config); 8279 if (ret) 8280 goto out_config; 8281 8282 save_set.crtc = set->crtc; 8283 save_set.mode = &set->crtc->mode; 8284 save_set.x = set->crtc->x; 8285 save_set.y = set->crtc->y; 8286 save_set.fb = set->crtc->fb; 8287 8288 /* Compute whether we need a full modeset, only an fb base update or no 8289 * change at all. In the future we might also check whether only the 8290 * mode changed, e.g. for LVDS where we only change the panel fitter in 8291 * such cases. */ 8292 intel_set_config_compute_mode_changes(set, config); 8293 8294 ret = intel_modeset_stage_output_state(dev, set, config); 8295 if (ret) 8296 goto fail; 8297 8298 if (config->mode_changed) { 8299 if (set->mode) { 8300 DRM_DEBUG_KMS("attempting to set mode from" 8301 " userspace\n"); 8302 drm_mode_debug_printmodeline(set->mode); 8303 } 8304 8305 if (!intel_set_mode(set->crtc, set->mode, 8306 set->x, set->y, set->fb)) { 8307 DRM_ERROR("failed to set mode on [CRTC:%d]\n", 8308 set->crtc->base.id); 8309 ret = -EINVAL; 8310 goto fail; 8311 } 8312 } else if (config->fb_changed) { 8313 ret = intel_pipe_set_base(set->crtc, 8314 set->x, set->y, set->fb); 8315 } 8316 8317 intel_set_config_free(config); 8318 8319 return 0; 8320 8321 fail: 8322 intel_set_config_restore_state(dev, config); 8323 8324 /* Try to restore the config */ 8325 if (config->mode_changed && 8326 !intel_set_mode(save_set.crtc, save_set.mode, 8327 save_set.x, save_set.y, save_set.fb)) 8328 DRM_ERROR("failed to restore config after modeset failure\n"); 8329 8330 out_config: 8331 intel_set_config_free(config); 8332 return ret; 8333 } 8334 8335 static const struct drm_crtc_funcs intel_crtc_funcs = { 8336 .cursor_set = intel_crtc_cursor_set, 8337 .cursor_move = intel_crtc_cursor_move, 8338 .gamma_set = intel_crtc_gamma_set, 8339 .set_config = intel_crtc_set_config, 8340 .destroy = intel_crtc_destroy, 8341 .page_flip = intel_crtc_page_flip, 8342 }; 8343 8344 static void intel_cpu_pll_init(struct drm_device *dev) 8345 { 8346 if (IS_HASWELL(dev)) 8347 intel_ddi_pll_init(dev); 8348 } 8349 8350 static void intel_pch_pll_init(struct drm_device *dev) 8351 { 8352 drm_i915_private_t *dev_priv = dev->dev_private; 8353 int i; 8354 8355 if (dev_priv->num_pch_pll == 0) { 8356 DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n"); 8357 return; 8358 } 8359 8360 for (i = 0; i < dev_priv->num_pch_pll; i++) { 8361 dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i); 8362 dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i); 8363 dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i); 8364 } 8365 } 8366 8367 static void intel_crtc_init(struct drm_device *dev, int pipe) 8368 { 8369 drm_i915_private_t *dev_priv = dev->dev_private; 8370 struct intel_crtc *intel_crtc; 8371 int i; 8372 8373 intel_crtc = kmalloc(sizeof(struct intel_crtc) + 8374 (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), 8375 DRM_MEM_KMS, M_WAITOK | M_ZERO); 8376 8377 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); 8378 8379 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 8380 for (i = 0; i < 256; i++) { 8381 intel_crtc->lut_r[i] = i; 8382 intel_crtc->lut_g[i] = i; 8383 intel_crtc->lut_b[i] = i; 8384 } 8385 8386 /* Swap pipes & planes for FBC on pre-965 */ 8387 intel_crtc->pipe = pipe; 8388 intel_crtc->plane = pipe; 8389 intel_crtc->cpu_transcoder = pipe; 8390 if (IS_MOBILE(dev) && IS_GEN3(dev)) { 8391 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 8392 intel_crtc->plane = !pipe; 8393 } 8394 8395 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 8396 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 8397 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 8398 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 8399 8400 intel_crtc->bpp = 24; /* default for pre-Ironlake */ 8401 8402 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 8403 } 8404 8405 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 8406 struct drm_file *file) 8407 { 8408 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 8409 struct drm_mode_object *drmmode_obj; 8410 struct intel_crtc *crtc; 8411 8412 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 8413 return -ENODEV; 8414 8415 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, 8416 DRM_MODE_OBJECT_CRTC); 8417 8418 if (!drmmode_obj) { 8419 DRM_ERROR("no such CRTC id\n"); 8420 return -EINVAL; 8421 } 8422 8423 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); 8424 pipe_from_crtc_id->pipe = crtc->pipe; 8425 8426 return 0; 8427 } 8428 8429 static int intel_encoder_clones(struct intel_encoder *encoder) 8430 { 8431 struct drm_device *dev = encoder->base.dev; 8432 struct intel_encoder *source_encoder; 8433 int index_mask = 0; 8434 int entry = 0; 8435 8436 list_for_each_entry(source_encoder, 8437 &dev->mode_config.encoder_list, base.head) { 8438 8439 if (encoder == source_encoder) 8440 index_mask |= (1 << entry); 8441 8442 /* Intel hw has only one MUX where enocoders could be cloned. */ 8443 if (encoder->cloneable && source_encoder->cloneable) 8444 index_mask |= (1 << entry); 8445 8446 entry++; 8447 } 8448 8449 return index_mask; 8450 } 8451 8452 static bool has_edp_a(struct drm_device *dev) 8453 { 8454 struct drm_i915_private *dev_priv = dev->dev_private; 8455 8456 if (!IS_MOBILE(dev)) 8457 return false; 8458 8459 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 8460 return false; 8461 8462 if (IS_GEN5(dev) && 8463 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) 8464 return false; 8465 8466 return true; 8467 } 8468 8469 static void intel_setup_outputs(struct drm_device *dev) 8470 { 8471 struct drm_i915_private *dev_priv = dev->dev_private; 8472 struct intel_encoder *encoder; 8473 bool dpd_is_edp = false; 8474 bool has_lvds; 8475 8476 has_lvds = intel_lvds_init(dev); 8477 if (!has_lvds && !HAS_PCH_SPLIT(dev)) { 8478 /* disable the panel fitter on everything but LVDS */ 8479 I915_WRITE(PFIT_CONTROL, 0); 8480 } 8481 8482 if (!(IS_HASWELL(dev) && 8483 (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES))) 8484 intel_crt_init(dev); 8485 8486 if (IS_HASWELL(dev)) { 8487 int found; 8488 8489 /* Haswell uses DDI functions to detect digital outputs */ 8490 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; 8491 /* DDI A only supports eDP */ 8492 if (found) 8493 intel_ddi_init(dev, PORT_A); 8494 8495 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 8496 * register */ 8497 found = I915_READ(SFUSE_STRAP); 8498 8499 if (found & SFUSE_STRAP_DDIB_DETECTED) 8500 intel_ddi_init(dev, PORT_B); 8501 if (found & SFUSE_STRAP_DDIC_DETECTED) 8502 intel_ddi_init(dev, PORT_C); 8503 if (found & SFUSE_STRAP_DDID_DETECTED) 8504 intel_ddi_init(dev, PORT_D); 8505 } else if (HAS_PCH_SPLIT(dev)) { 8506 int found; 8507 dpd_is_edp = intel_dpd_is_edp(dev); 8508 8509 if (has_edp_a(dev)) 8510 intel_dp_init(dev, DP_A, PORT_A); 8511 8512 if (I915_READ(HDMIB) & PORT_DETECTED) { 8513 /* PCH SDVOB multiplex with HDMIB */ 8514 found = intel_sdvo_init(dev, PCH_SDVOB, true); 8515 if (!found) 8516 intel_hdmi_init(dev, HDMIB, PORT_B); 8517 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 8518 intel_dp_init(dev, PCH_DP_B, PORT_B); 8519 } 8520 8521 if (I915_READ(HDMIC) & PORT_DETECTED) 8522 intel_hdmi_init(dev, HDMIC, PORT_C); 8523 8524 if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED) 8525 intel_hdmi_init(dev, HDMID, PORT_D); 8526 8527 if (I915_READ(PCH_DP_C) & DP_DETECTED) 8528 intel_dp_init(dev, PCH_DP_C, PORT_C); 8529 8530 if (I915_READ(PCH_DP_D) & DP_DETECTED) 8531 intel_dp_init(dev, PCH_DP_D, PORT_D); 8532 } else if (IS_VALLEYVIEW(dev)) { 8533 int found; 8534 8535 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ 8536 if (I915_READ(DP_C) & DP_DETECTED) 8537 intel_dp_init(dev, DP_C, PORT_C); 8538 8539 if (I915_READ(SDVOB) & PORT_DETECTED) { 8540 /* SDVOB multiplex with HDMIB */ 8541 found = intel_sdvo_init(dev, SDVOB, true); 8542 if (!found) 8543 intel_hdmi_init(dev, SDVOB, PORT_B); 8544 if (!found && (I915_READ(DP_B) & DP_DETECTED)) 8545 intel_dp_init(dev, DP_B, PORT_B); 8546 } 8547 8548 if (I915_READ(SDVOC) & PORT_DETECTED) 8549 intel_hdmi_init(dev, SDVOC, PORT_C); 8550 8551 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 8552 bool found = false; 8553 8554 if (I915_READ(SDVOB) & SDVO_DETECTED) { 8555 DRM_DEBUG_KMS("probing SDVOB\n"); 8556 found = intel_sdvo_init(dev, SDVOB, true); 8557 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 8558 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 8559 intel_hdmi_init(dev, SDVOB, PORT_B); 8560 } 8561 8562 if (!found && SUPPORTS_INTEGRATED_DP(dev)) { 8563 DRM_DEBUG_KMS("probing DP_B\n"); 8564 intel_dp_init(dev, DP_B, PORT_B); 8565 } 8566 } 8567 8568 /* Before G4X SDVOC doesn't have its own detect register */ 8569 8570 if (I915_READ(SDVOB) & SDVO_DETECTED) { 8571 DRM_DEBUG_KMS("probing SDVOC\n"); 8572 found = intel_sdvo_init(dev, SDVOC, false); 8573 } 8574 8575 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 8576 8577 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 8578 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 8579 intel_hdmi_init(dev, SDVOC, PORT_C); 8580 } 8581 if (SUPPORTS_INTEGRATED_DP(dev)) { 8582 DRM_DEBUG_KMS("probing DP_C\n"); 8583 intel_dp_init(dev, DP_C, PORT_C); 8584 } 8585 } 8586 8587 if (SUPPORTS_INTEGRATED_DP(dev) && 8588 (I915_READ(DP_D) & DP_DETECTED)) { 8589 DRM_DEBUG_KMS("probing DP_D\n"); 8590 intel_dp_init(dev, DP_D, PORT_D); 8591 } 8592 } else if (IS_GEN2(dev)) { 8593 #if 0 8594 intel_dvo_init(dev); 8595 #endif 8596 } 8597 8598 if (SUPPORTS_TV(dev)) 8599 intel_tv_init(dev); 8600 8601 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 8602 encoder->base.possible_crtcs = encoder->crtc_mask; 8603 encoder->base.possible_clones = 8604 intel_encoder_clones(encoder); 8605 } 8606 8607 intel_init_pch_refclk(dev); 8608 8609 drm_helper_move_panel_connectors_to_head(dev); 8610 } 8611 8612 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 8613 { 8614 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 8615 8616 drm_framebuffer_cleanup(fb); 8617 drm_gem_object_unreference_unlocked(&intel_fb->obj->base); 8618 8619 drm_free(intel_fb, DRM_MEM_KMS); 8620 } 8621 8622 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 8623 struct drm_file *file, 8624 unsigned int *handle) 8625 { 8626 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 8627 struct drm_i915_gem_object *obj = intel_fb->obj; 8628 8629 return drm_gem_handle_create(file, &obj->base, handle); 8630 } 8631 8632 static const struct drm_framebuffer_funcs intel_fb_funcs = { 8633 .destroy = intel_user_framebuffer_destroy, 8634 .create_handle = intel_user_framebuffer_create_handle, 8635 }; 8636 8637 int intel_framebuffer_init(struct drm_device *dev, 8638 struct intel_framebuffer *intel_fb, 8639 struct drm_mode_fb_cmd2 *mode_cmd, 8640 struct drm_i915_gem_object *obj) 8641 { 8642 int ret; 8643 8644 if (obj->tiling_mode == I915_TILING_Y) { 8645 DRM_DEBUG("hardware does not support tiling Y\n"); 8646 return -EINVAL; 8647 } 8648 8649 if (mode_cmd->pitches[0] & 63) { 8650 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", 8651 mode_cmd->pitches[0]); 8652 return -EINVAL; 8653 } 8654 8655 /* FIXME <= Gen4 stride limits are bit unclear */ 8656 if (mode_cmd->pitches[0] > 32768) { 8657 DRM_DEBUG("pitch (%d) must be at less than 32768\n", 8658 mode_cmd->pitches[0]); 8659 return -EINVAL; 8660 } 8661 8662 if (obj->tiling_mode != I915_TILING_NONE && 8663 mode_cmd->pitches[0] != obj->stride) { 8664 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", 8665 mode_cmd->pitches[0], obj->stride); 8666 return -EINVAL; 8667 } 8668 8669 /* Reject formats not supported by any plane early. */ 8670 switch (mode_cmd->pixel_format) { 8671 case DRM_FORMAT_C8: 8672 case DRM_FORMAT_RGB565: 8673 case DRM_FORMAT_XRGB8888: 8674 case DRM_FORMAT_ARGB8888: 8675 break; 8676 case DRM_FORMAT_XRGB1555: 8677 case DRM_FORMAT_ARGB1555: 8678 if (INTEL_INFO(dev)->gen > 3) { 8679 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); 8680 return -EINVAL; 8681 } 8682 break; 8683 case DRM_FORMAT_XBGR8888: 8684 case DRM_FORMAT_ABGR8888: 8685 case DRM_FORMAT_XRGB2101010: 8686 case DRM_FORMAT_ARGB2101010: 8687 case DRM_FORMAT_XBGR2101010: 8688 case DRM_FORMAT_ABGR2101010: 8689 if (INTEL_INFO(dev)->gen < 4) { 8690 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); 8691 return -EINVAL; 8692 } 8693 break; 8694 case DRM_FORMAT_YUYV: 8695 case DRM_FORMAT_UYVY: 8696 case DRM_FORMAT_YVYU: 8697 case DRM_FORMAT_VYUY: 8698 if (INTEL_INFO(dev)->gen < 5) { 8699 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); 8700 return -EINVAL; 8701 } 8702 break; 8703 default: 8704 DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); 8705 return -EINVAL; 8706 } 8707 8708 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 8709 if (mode_cmd->offsets[0] != 0) 8710 return -EINVAL; 8711 8712 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 8713 if (ret) { 8714 DRM_ERROR("framebuffer init failed %d\n", ret); 8715 return ret; 8716 } 8717 8718 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 8719 intel_fb->obj = obj; 8720 return 0; 8721 } 8722 8723 static struct drm_framebuffer * 8724 intel_user_framebuffer_create(struct drm_device *dev, 8725 struct drm_file *filp, 8726 struct drm_mode_fb_cmd2 *mode_cmd) 8727 { 8728 struct drm_i915_gem_object *obj; 8729 8730 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 8731 mode_cmd->handles[0])); 8732 if (&obj->base == NULL) 8733 return ERR_PTR(-ENOENT); 8734 8735 return intel_framebuffer_create(dev, mode_cmd, obj); 8736 } 8737 8738 static const struct drm_mode_config_funcs intel_mode_funcs = { 8739 .fb_create = intel_user_framebuffer_create, 8740 .output_poll_changed = intel_fb_output_poll_changed, 8741 }; 8742 8743 /* Set up chip specific display functions */ 8744 static void intel_init_display(struct drm_device *dev) 8745 { 8746 struct drm_i915_private *dev_priv = dev->dev_private; 8747 8748 /* We always want a DPMS function */ 8749 if (IS_HASWELL(dev)) { 8750 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; 8751 dev_priv->display.crtc_enable = haswell_crtc_enable; 8752 dev_priv->display.crtc_disable = haswell_crtc_disable; 8753 dev_priv->display.off = haswell_crtc_off; 8754 dev_priv->display.update_plane = ironlake_update_plane; 8755 } else if (HAS_PCH_SPLIT(dev)) { 8756 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 8757 dev_priv->display.crtc_enable = ironlake_crtc_enable; 8758 dev_priv->display.crtc_disable = ironlake_crtc_disable; 8759 dev_priv->display.off = ironlake_crtc_off; 8760 dev_priv->display.update_plane = ironlake_update_plane; 8761 } else { 8762 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 8763 dev_priv->display.crtc_enable = i9xx_crtc_enable; 8764 dev_priv->display.crtc_disable = i9xx_crtc_disable; 8765 dev_priv->display.off = i9xx_crtc_off; 8766 dev_priv->display.update_plane = i9xx_update_plane; 8767 } 8768 8769 /* Returns the core display clock speed */ 8770 if (IS_VALLEYVIEW(dev)) 8771 dev_priv->display.get_display_clock_speed = 8772 valleyview_get_display_clock_speed; 8773 else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) 8774 dev_priv->display.get_display_clock_speed = 8775 i945_get_display_clock_speed; 8776 else if (IS_I915G(dev)) 8777 dev_priv->display.get_display_clock_speed = 8778 i915_get_display_clock_speed; 8779 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) 8780 dev_priv->display.get_display_clock_speed = 8781 i9xx_misc_get_display_clock_speed; 8782 else if (IS_I915GM(dev)) 8783 dev_priv->display.get_display_clock_speed = 8784 i915gm_get_display_clock_speed; 8785 else if (IS_I865G(dev)) 8786 dev_priv->display.get_display_clock_speed = 8787 i865_get_display_clock_speed; 8788 else if (IS_I85X(dev)) 8789 dev_priv->display.get_display_clock_speed = 8790 i855_get_display_clock_speed; 8791 else /* 852, 830 */ 8792 dev_priv->display.get_display_clock_speed = 8793 i830_get_display_clock_speed; 8794 8795 if (HAS_PCH_SPLIT(dev)) { 8796 if (IS_GEN5(dev)) { 8797 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 8798 dev_priv->display.write_eld = ironlake_write_eld; 8799 } else if (IS_GEN6(dev)) { 8800 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 8801 dev_priv->display.write_eld = ironlake_write_eld; 8802 } else if (IS_IVYBRIDGE(dev)) { 8803 /* FIXME: detect B0+ stepping and use auto training */ 8804 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 8805 dev_priv->display.write_eld = ironlake_write_eld; 8806 dev_priv->display.modeset_global_resources = 8807 ivb_modeset_global_resources; 8808 } else if (IS_HASWELL(dev)) { 8809 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 8810 dev_priv->display.write_eld = haswell_write_eld; 8811 } else 8812 dev_priv->display.update_wm = NULL; 8813 } else if (IS_G4X(dev)) { 8814 dev_priv->display.write_eld = g4x_write_eld; 8815 } 8816 8817 /* Default just returns -ENODEV to indicate unsupported */ 8818 dev_priv->display.queue_flip = intel_default_queue_flip; 8819 8820 switch (INTEL_INFO(dev)->gen) { 8821 case 2: 8822 dev_priv->display.queue_flip = intel_gen2_queue_flip; 8823 break; 8824 8825 case 3: 8826 dev_priv->display.queue_flip = intel_gen3_queue_flip; 8827 break; 8828 8829 case 4: 8830 case 5: 8831 dev_priv->display.queue_flip = intel_gen4_queue_flip; 8832 break; 8833 8834 case 6: 8835 dev_priv->display.queue_flip = intel_gen6_queue_flip; 8836 break; 8837 case 7: 8838 dev_priv->display.queue_flip = intel_gen7_queue_flip; 8839 break; 8840 } 8841 } 8842 8843 /* 8844 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 8845 * resume, or other times. This quirk makes sure that's the case for 8846 * affected systems. 8847 */ 8848 static void quirk_pipea_force(struct drm_device *dev) 8849 { 8850 struct drm_i915_private *dev_priv = dev->dev_private; 8851 8852 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 8853 DRM_INFO("applying pipe a force quirk\n"); 8854 } 8855 8856 /* 8857 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 8858 */ 8859 static void quirk_ssc_force_disable(struct drm_device *dev) 8860 { 8861 struct drm_i915_private *dev_priv = dev->dev_private; 8862 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 8863 DRM_INFO("applying lvds SSC disable quirk\n"); 8864 } 8865 8866 /* 8867 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 8868 * brightness value 8869 */ 8870 static void quirk_invert_brightness(struct drm_device *dev) 8871 { 8872 struct drm_i915_private *dev_priv = dev->dev_private; 8873 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 8874 DRM_INFO("applying inverted panel brightness quirk\n"); 8875 } 8876 8877 struct intel_quirk { 8878 int device; 8879 int subsystem_vendor; 8880 int subsystem_device; 8881 void (*hook)(struct drm_device *dev); 8882 }; 8883 8884 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 8885 struct intel_dmi_quirk { 8886 void (*hook)(struct drm_device *dev); 8887 const struct dmi_system_id (*dmi_id_list)[]; 8888 }; 8889 8890 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 8891 { 8892 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 8893 return 1; 8894 } 8895 8896 static const struct intel_dmi_quirk intel_dmi_quirks[] = { 8897 { 8898 .dmi_id_list = &(const struct dmi_system_id[]) { 8899 { 8900 .callback = intel_dmi_reverse_brightness, 8901 .ident = "NCR Corporation", 8902 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 8903 DMI_MATCH(DMI_PRODUCT_NAME, ""), 8904 }, 8905 }, 8906 { } /* terminating entry */ 8907 }, 8908 .hook = quirk_invert_brightness, 8909 }, 8910 }; 8911 8912 #define PCI_ANY_ID (~0u) 8913 8914 static struct intel_quirk intel_quirks[] = { 8915 /* HP Mini needs pipe A force quirk (LP: #322104) */ 8916 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, 8917 8918 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 8919 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 8920 8921 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 8922 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 8923 8924 /* 830/845 need to leave pipe A & dpll A up */ 8925 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 8926 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 8927 8928 /* Lenovo U160 cannot use SSC on LVDS */ 8929 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 8930 8931 /* Sony Vaio Y cannot use SSC on LVDS */ 8932 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 8933 8934 /* Acer Aspire 5734Z must invert backlight brightness */ 8935 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 8936 8937 /* Acer Aspire 4736Z */ 8938 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 8939 8940 /* Acer/eMachines G725 */ 8941 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 8942 8943 /* Acer/eMachines e725 */ 8944 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 8945 8946 /* Acer/Packard Bell NCL20 */ 8947 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 8948 }; 8949 8950 static void intel_init_quirks(struct drm_device *dev) 8951 { 8952 device_t d; 8953 int i; 8954 8955 d = dev->dev; 8956 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 8957 struct intel_quirk *q = &intel_quirks[i]; 8958 if (pci_get_device(d) == q->device && 8959 (pci_get_subvendor(d) == q->subsystem_vendor || 8960 q->subsystem_vendor == PCI_ANY_ID) && 8961 (pci_get_subdevice(d) == q->subsystem_device || 8962 q->subsystem_device == PCI_ANY_ID)) 8963 q->hook(dev); 8964 } 8965 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 8966 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 8967 intel_dmi_quirks[i].hook(dev); 8968 } 8969 } 8970 8971 /* Disable the VGA plane that we never use */ 8972 static void i915_disable_vga(struct drm_device *dev) 8973 { 8974 struct drm_i915_private *dev_priv = dev->dev_private; 8975 u8 sr1; 8976 u32 vga_reg; 8977 8978 if (HAS_PCH_SPLIT(dev)) 8979 vga_reg = CPU_VGACNTRL; 8980 else 8981 vga_reg = VGACNTRL; 8982 8983 #if 0 8984 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 8985 #endif 8986 outb(VGA_SR_INDEX, 1); 8987 sr1 = inb(VGA_SR_DATA); 8988 outb(VGA_SR_DATA, sr1 | 1 << 5); 8989 #if 0 8990 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 8991 #endif 8992 udelay(300); 8993 8994 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 8995 POSTING_READ(vga_reg); 8996 } 8997 8998 void intel_modeset_init_hw(struct drm_device *dev) 8999 { 9000 /* We attempt to init the necessary power wells early in the initialization 9001 * time, so the subsystems that expect power to be enabled can work. 9002 */ 9003 intel_init_power_wells(dev); 9004 9005 intel_prepare_ddi(dev); 9006 9007 intel_init_clock_gating(dev); 9008 9009 DRM_LOCK(dev); 9010 intel_enable_gt_powersave(dev); 9011 DRM_UNLOCK(dev); 9012 } 9013 9014 void intel_modeset_init(struct drm_device *dev) 9015 { 9016 struct drm_i915_private *dev_priv = dev->dev_private; 9017 int i, ret; 9018 9019 drm_mode_config_init(dev); 9020 9021 dev->mode_config.min_width = 0; 9022 dev->mode_config.min_height = 0; 9023 9024 dev->mode_config.preferred_depth = 24; 9025 dev->mode_config.prefer_shadow = 1; 9026 9027 dev->mode_config.funcs = &intel_mode_funcs; 9028 9029 intel_init_quirks(dev); 9030 9031 intel_init_pm(dev); 9032 9033 intel_init_display(dev); 9034 9035 if (IS_GEN2(dev)) { 9036 dev->mode_config.max_width = 2048; 9037 dev->mode_config.max_height = 2048; 9038 } else if (IS_GEN3(dev)) { 9039 dev->mode_config.max_width = 4096; 9040 dev->mode_config.max_height = 4096; 9041 } else { 9042 dev->mode_config.max_width = 8192; 9043 dev->mode_config.max_height = 8192; 9044 } 9045 dev->mode_config.fb_base = dev->agp->base; 9046 9047 DRM_DEBUG_KMS("%d display pipe%s available.\n", 9048 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); 9049 9050 for (i = 0; i < dev_priv->num_pipe; i++) { 9051 intel_crtc_init(dev, i); 9052 ret = intel_plane_init(dev, i); 9053 if (ret) 9054 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); 9055 } 9056 9057 intel_cpu_pll_init(dev); 9058 intel_pch_pll_init(dev); 9059 9060 /* Just disable it once at startup */ 9061 i915_disable_vga(dev); 9062 intel_setup_outputs(dev); 9063 } 9064 9065 static void 9066 intel_connector_break_all_links(struct intel_connector *connector) 9067 { 9068 connector->base.dpms = DRM_MODE_DPMS_OFF; 9069 connector->base.encoder = NULL; 9070 connector->encoder->connectors_active = false; 9071 connector->encoder->base.crtc = NULL; 9072 } 9073 9074 static void intel_enable_pipe_a(struct drm_device *dev) 9075 { 9076 struct intel_connector *connector; 9077 struct drm_connector *crt = NULL; 9078 struct intel_load_detect_pipe load_detect_temp; 9079 9080 /* We can't just switch on the pipe A, we need to set things up with a 9081 * proper mode and output configuration. As a gross hack, enable pipe A 9082 * by enabling the load detect pipe once. */ 9083 list_for_each_entry(connector, 9084 &dev->mode_config.connector_list, 9085 base.head) { 9086 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 9087 crt = &connector->base; 9088 break; 9089 } 9090 } 9091 9092 if (!crt) 9093 return; 9094 9095 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp)) 9096 intel_release_load_detect_pipe(crt, &load_detect_temp); 9097 9098 9099 } 9100 9101 static bool 9102 intel_check_plane_mapping(struct intel_crtc *crtc) 9103 { 9104 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 9105 u32 reg, val; 9106 9107 if (dev_priv->num_pipe == 1) 9108 return true; 9109 9110 reg = DSPCNTR(!crtc->plane); 9111 val = I915_READ(reg); 9112 9113 if ((val & DISPLAY_PLANE_ENABLE) && 9114 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 9115 return false; 9116 9117 return true; 9118 } 9119 9120 static void intel_sanitize_crtc(struct intel_crtc *crtc) 9121 { 9122 struct drm_device *dev = crtc->base.dev; 9123 struct drm_i915_private *dev_priv = dev->dev_private; 9124 u32 reg; 9125 9126 /* Clear any frame start delays used for debugging left by the BIOS */ 9127 reg = PIPECONF(crtc->cpu_transcoder); 9128 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 9129 9130 /* We need to sanitize the plane -> pipe mapping first because this will 9131 * disable the crtc (and hence change the state) if it is wrong. Note 9132 * that gen4+ has a fixed plane -> pipe mapping. */ 9133 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 9134 struct intel_connector *connector; 9135 bool plane; 9136 9137 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 9138 crtc->base.base.id); 9139 9140 /* Pipe has the wrong plane attached and the plane is active. 9141 * Temporarily change the plane mapping and disable everything 9142 * ... */ 9143 plane = crtc->plane; 9144 crtc->plane = !plane; 9145 dev_priv->display.crtc_disable(&crtc->base); 9146 crtc->plane = plane; 9147 9148 /* ... and break all links. */ 9149 list_for_each_entry(connector, &dev->mode_config.connector_list, 9150 base.head) { 9151 if (connector->encoder->base.crtc != &crtc->base) 9152 continue; 9153 9154 intel_connector_break_all_links(connector); 9155 } 9156 9157 WARN_ON(crtc->active); 9158 crtc->base.enabled = false; 9159 } 9160 9161 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 9162 crtc->pipe == PIPE_A && !crtc->active) { 9163 /* BIOS forgot to enable pipe A, this mostly happens after 9164 * resume. Force-enable the pipe to fix this, the update_dpms 9165 * call below we restore the pipe to the right state, but leave 9166 * the required bits on. */ 9167 intel_enable_pipe_a(dev); 9168 } 9169 9170 /* Adjust the state of the output pipe according to whether we 9171 * have active connectors/encoders. */ 9172 intel_crtc_update_dpms(&crtc->base); 9173 9174 if (crtc->active != crtc->base.enabled) { 9175 struct intel_encoder *encoder; 9176 9177 /* This can happen either due to bugs in the get_hw_state 9178 * functions or because the pipe is force-enabled due to the 9179 * pipe A quirk. */ 9180 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", 9181 crtc->base.base.id, 9182 crtc->base.enabled ? "enabled" : "disabled", 9183 crtc->active ? "enabled" : "disabled"); 9184 9185 crtc->base.enabled = crtc->active; 9186 9187 /* Because we only establish the connector -> encoder -> 9188 * crtc links if something is active, this means the 9189 * crtc is now deactivated. Break the links. connector 9190 * -> encoder links are only establish when things are 9191 * actually up, hence no need to break them. */ 9192 WARN_ON(crtc->active); 9193 9194 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 9195 WARN_ON(encoder->connectors_active); 9196 encoder->base.crtc = NULL; 9197 } 9198 } 9199 } 9200 9201 static void intel_sanitize_encoder(struct intel_encoder *encoder) 9202 { 9203 struct intel_connector *connector; 9204 struct drm_device *dev = encoder->base.dev; 9205 9206 /* We need to check both for a crtc link (meaning that the 9207 * encoder is active and trying to read from a pipe) and the 9208 * pipe itself being active. */ 9209 bool has_active_crtc = encoder->base.crtc && 9210 to_intel_crtc(encoder->base.crtc)->active; 9211 9212 if (encoder->connectors_active && !has_active_crtc) { 9213 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 9214 encoder->base.base.id, 9215 drm_get_encoder_name(&encoder->base)); 9216 9217 /* Connector is active, but has no active pipe. This is 9218 * fallout from our resume register restoring. Disable 9219 * the encoder manually again. */ 9220 if (encoder->base.crtc) { 9221 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 9222 encoder->base.base.id, 9223 drm_get_encoder_name(&encoder->base)); 9224 encoder->disable(encoder); 9225 } 9226 9227 /* Inconsistent output/port/pipe state happens presumably due to 9228 * a bug in one of the get_hw_state functions. Or someplace else 9229 * in our code, like the register restore mess on resume. Clamp 9230 * things to off as a safer default. */ 9231 list_for_each_entry(connector, 9232 &dev->mode_config.connector_list, 9233 base.head) { 9234 if (connector->encoder != encoder) 9235 continue; 9236 9237 intel_connector_break_all_links(connector); 9238 } 9239 } 9240 /* Enabled encoders without active connectors will be fixed in 9241 * the crtc fixup. */ 9242 } 9243 9244 static void i915_redisable_vga(struct drm_device *dev) 9245 { 9246 struct drm_i915_private *dev_priv = dev->dev_private; 9247 u32 vga_reg; 9248 9249 if (HAS_PCH_SPLIT(dev)) 9250 vga_reg = CPU_VGACNTRL; 9251 else 9252 vga_reg = VGACNTRL; 9253 9254 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 9255 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 9256 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 9257 POSTING_READ(vga_reg); 9258 } 9259 } 9260 9261 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm 9262 * and i915 state tracking structures. */ 9263 void intel_modeset_setup_hw_state(struct drm_device *dev, 9264 bool force_restore) 9265 { 9266 struct drm_i915_private *dev_priv = dev->dev_private; 9267 enum i915_pipe pipe; 9268 u32 tmp; 9269 struct intel_crtc *crtc; 9270 struct intel_encoder *encoder; 9271 struct intel_connector *connector; 9272 9273 if (IS_HASWELL(dev)) { 9274 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 9275 9276 if (tmp & TRANS_DDI_FUNC_ENABLE) { 9277 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 9278 case TRANS_DDI_EDP_INPUT_A_ON: 9279 case TRANS_DDI_EDP_INPUT_A_ONOFF: 9280 pipe = PIPE_A; 9281 break; 9282 case TRANS_DDI_EDP_INPUT_B_ONOFF: 9283 pipe = PIPE_B; 9284 break; 9285 case TRANS_DDI_EDP_INPUT_C_ONOFF: 9286 pipe = PIPE_C; 9287 break; 9288 } 9289 9290 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 9291 crtc->cpu_transcoder = TRANSCODER_EDP; 9292 9293 DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n", 9294 pipe_name(pipe)); 9295 } 9296 } 9297 9298 for_each_pipe(pipe) { 9299 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 9300 9301 tmp = I915_READ(PIPECONF(crtc->cpu_transcoder)); 9302 if (tmp & PIPECONF_ENABLE) 9303 crtc->active = true; 9304 else 9305 crtc->active = false; 9306 9307 crtc->base.enabled = crtc->active; 9308 9309 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 9310 crtc->base.base.id, 9311 crtc->active ? "enabled" : "disabled"); 9312 } 9313 9314 if (IS_HASWELL(dev)) 9315 intel_ddi_setup_hw_pll_state(dev); 9316 9317 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9318 base.head) { 9319 pipe = 0; 9320 9321 if (encoder->get_hw_state(encoder, &pipe)) { 9322 encoder->base.crtc = 9323 dev_priv->pipe_to_crtc_mapping[pipe]; 9324 } else { 9325 encoder->base.crtc = NULL; 9326 } 9327 9328 encoder->connectors_active = false; 9329 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n", 9330 encoder->base.base.id, 9331 drm_get_encoder_name(&encoder->base), 9332 encoder->base.crtc ? "enabled" : "disabled", 9333 pipe); 9334 } 9335 9336 list_for_each_entry(connector, &dev->mode_config.connector_list, 9337 base.head) { 9338 if (connector->get_hw_state(connector)) { 9339 connector->base.dpms = DRM_MODE_DPMS_ON; 9340 connector->encoder->connectors_active = true; 9341 connector->base.encoder = &connector->encoder->base; 9342 } else { 9343 connector->base.dpms = DRM_MODE_DPMS_OFF; 9344 connector->base.encoder = NULL; 9345 } 9346 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 9347 connector->base.base.id, 9348 drm_get_connector_name(&connector->base), 9349 connector->base.encoder ? "enabled" : "disabled"); 9350 } 9351 9352 /* HW state is read out, now we need to sanitize this mess. */ 9353 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9354 base.head) { 9355 intel_sanitize_encoder(encoder); 9356 } 9357 9358 for_each_pipe(pipe) { 9359 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 9360 intel_sanitize_crtc(crtc); 9361 } 9362 9363 if (force_restore) { 9364 for_each_pipe(pipe) { 9365 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 9366 intel_set_mode(&crtc->base, &crtc->base.mode, 9367 crtc->base.x, crtc->base.y, crtc->base.fb); 9368 } 9369 9370 i915_redisable_vga(dev); 9371 } else { 9372 intel_modeset_update_staged_output_state(dev); 9373 } 9374 9375 intel_modeset_check_state(dev); 9376 9377 drm_mode_config_reset(dev); 9378 } 9379 9380 void intel_modeset_gem_init(struct drm_device *dev) 9381 { 9382 intel_modeset_init_hw(dev); 9383 9384 intel_setup_overlay(dev); 9385 9386 lockmgr(&dev->mode_config.mutex, LK_EXCLUSIVE); 9387 intel_modeset_setup_hw_state(dev, false); 9388 lockmgr(&dev->mode_config.mutex, LK_RELEASE); 9389 } 9390 9391 void intel_modeset_cleanup(struct drm_device *dev) 9392 { 9393 struct drm_i915_private *dev_priv = dev->dev_private; 9394 struct drm_crtc *crtc; 9395 struct intel_crtc *intel_crtc; 9396 9397 drm_kms_helper_poll_fini(dev); 9398 DRM_LOCK(dev); 9399 9400 #if 0 9401 intel_unregister_dsm_handler(); 9402 #endif 9403 9404 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 9405 /* Skip inactive CRTCs */ 9406 if (!crtc->fb) 9407 continue; 9408 9409 intel_crtc = to_intel_crtc(crtc); 9410 intel_increase_pllclock(crtc); 9411 } 9412 9413 intel_disable_fbc(dev); 9414 9415 intel_disable_gt_powersave(dev); 9416 9417 ironlake_teardown_rc6(dev); 9418 9419 if (IS_VALLEYVIEW(dev)) 9420 vlv_init_dpio(dev); 9421 9422 DRM_UNLOCK(dev); 9423 9424 /* Disable the irq before mode object teardown, for the irq might 9425 * enqueue unpin/hotplug work. */ 9426 drm_irq_uninstall(dev); 9427 cancel_work_sync(&dev_priv->hotplug_work); 9428 cancel_work_sync(&dev_priv->rps.work); 9429 9430 /* flush any delayed tasks or pending work */ 9431 flush_scheduled_work(); 9432 9433 /* destroy backlight, if any, before the connectors */ 9434 intel_panel_destroy_backlight(dev); 9435 9436 drm_mode_config_cleanup(dev); 9437 } 9438 9439 /* 9440 * Return which encoder is currently attached for connector. 9441 */ 9442 struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 9443 { 9444 return &intel_attached_encoder(connector)->base; 9445 } 9446 9447 void intel_connector_attach_encoder(struct intel_connector *connector, 9448 struct intel_encoder *encoder) 9449 { 9450 connector->encoder = encoder; 9451 drm_mode_connector_attach_encoder(&connector->base, 9452 &encoder->base); 9453 } 9454 9455 /* 9456 * set vga decode state - true == enable VGA decode 9457 */ 9458 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 9459 { 9460 struct drm_i915_private *dev_priv = dev->dev_private; 9461 u16 gmch_ctrl; 9462 9463 gmch_ctrl = pci_read_config(dev_priv->bridge_dev, INTEL_GMCH_CTRL, 2); 9464 if (state) 9465 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 9466 else 9467 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 9468 pci_write_config(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl, 2); 9469 return 0; 9470 } 9471 9472 #ifdef CONFIG_DEBUG_FS 9473 #include <linux/seq_file.h> 9474 9475 struct intel_display_error_state { 9476 struct intel_cursor_error_state { 9477 u32 control; 9478 u32 position; 9479 u32 base; 9480 u32 size; 9481 } cursor[I915_MAX_PIPES]; 9482 9483 struct intel_pipe_error_state { 9484 u32 conf; 9485 u32 source; 9486 9487 u32 htotal; 9488 u32 hblank; 9489 u32 hsync; 9490 u32 vtotal; 9491 u32 vblank; 9492 u32 vsync; 9493 } pipe[I915_MAX_PIPES]; 9494 9495 struct intel_plane_error_state { 9496 u32 control; 9497 u32 stride; 9498 u32 size; 9499 u32 pos; 9500 u32 addr; 9501 u32 surface; 9502 u32 tile_offset; 9503 } plane[I915_MAX_PIPES]; 9504 }; 9505 9506 struct intel_display_error_state * 9507 intel_display_capture_error_state(struct drm_device *dev) 9508 { 9509 drm_i915_private_t *dev_priv = dev->dev_private; 9510 struct intel_display_error_state *error; 9511 enum transcoder cpu_transcoder; 9512 int i; 9513 9514 error = kmalloc(sizeof(*error), DRM_MEM_KMS, M_NOWAIT); 9515 if (error == NULL) 9516 return NULL; 9517 9518 for_each_pipe(i) { 9519 cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); 9520 9521 error->cursor[i].control = I915_READ(CURCNTR(i)); 9522 error->cursor[i].position = I915_READ(CURPOS(i)); 9523 error->cursor[i].base = I915_READ(CURBASE(i)); 9524 9525 error->plane[i].control = I915_READ(DSPCNTR(i)); 9526 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 9527 error->plane[i].size = I915_READ(DSPSIZE(i)); 9528 error->plane[i].pos = I915_READ(DSPPOS(i)); 9529 error->plane[i].addr = I915_READ(DSPADDR(i)); 9530 if (INTEL_INFO(dev)->gen >= 4) { 9531 error->plane[i].surface = I915_READ(DSPSURF(i)); 9532 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 9533 } 9534 9535 error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 9536 error->pipe[i].source = I915_READ(PIPESRC(i)); 9537 error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 9538 error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 9539 error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 9540 error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 9541 error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 9542 error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 9543 } 9544 9545 return error; 9546 } 9547 9548 void 9549 intel_display_print_error_state(struct sbuf *m, 9550 struct drm_device *dev, 9551 struct intel_display_error_state *error) 9552 { 9553 int i; 9554 9555 for (i = 0; i < 2; i++) { 9556 sbuf_printf(m, "Pipe [%d]:\n", i); 9557 sbuf_printf(m, " CONF: %08x\n", error->pipe[i].conf); 9558 sbuf_printf(m, " SRC: %08x\n", error->pipe[i].source); 9559 sbuf_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); 9560 sbuf_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); 9561 sbuf_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); 9562 sbuf_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); 9563 sbuf_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); 9564 sbuf_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); 9565 9566 sbuf_printf(m, "Plane [%d]:\n", i); 9567 sbuf_printf(m, " CNTR: %08x\n", error->plane[i].control); 9568 sbuf_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 9569 sbuf_printf(m, " SIZE: %08x\n", error->plane[i].size); 9570 sbuf_printf(m, " POS: %08x\n", error->plane[i].pos); 9571 sbuf_printf(m, " ADDR: %08x\n", error->plane[i].addr); 9572 if (INTEL_INFO(dev)->gen >= 4) { 9573 sbuf_printf(m, " SURF: %08x\n", error->plane[i].surface); 9574 sbuf_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 9575 } 9576 9577 sbuf_printf(m, "Cursor [%d]:\n", i); 9578 sbuf_printf(m, " CNTR: %08x\n", error->cursor[i].control); 9579 sbuf_printf(m, " POS: %08x\n", error->cursor[i].position); 9580 sbuf_printf(m, " BASE: %08x\n", error->cursor[i].base); 9581 } 9582 } 9583 #endif 9584