1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 * $FreeBSD: src/sys/dev/drm2/i915/intel_display.c,v 1.2 2012/05/24 19:13:54 dim Exp $ 27 */ 28 29 #include <ddb/ddb.h> 30 #include <sys/limits.h> 31 32 #include <drm/drmP.h> 33 #include <drm/drm_edid.h> 34 #include "intel_drv.h" 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include <drm/drm_dp_helper.h> 38 #include <drm/drm_crtc_helper.h> 39 40 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 41 42 bool intel_pipe_has_type(struct drm_crtc *crtc, int type); 43 static void intel_increase_pllclock(struct drm_crtc *crtc); 44 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 45 46 typedef struct { 47 /* given values */ 48 int n; 49 int m1, m2; 50 int p1, p2; 51 /* derived values */ 52 int dot; 53 int vco; 54 int m; 55 int p; 56 } intel_clock_t; 57 58 typedef struct { 59 int min, max; 60 } intel_range_t; 61 62 typedef struct { 63 int dot_limit; 64 int p2_slow, p2_fast; 65 } intel_p2_t; 66 67 #define INTEL_P2_NUM 2 68 typedef struct intel_limit intel_limit_t; 69 struct intel_limit { 70 intel_range_t dot, vco, n, m, m1, m2, p, p1; 71 intel_p2_t p2; 72 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 73 int, int, intel_clock_t *, intel_clock_t *); 74 }; 75 76 /* FDI */ 77 #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ 78 79 static bool 80 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 81 int target, int refclk, intel_clock_t *match_clock, 82 intel_clock_t *best_clock); 83 static bool 84 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 85 int target, int refclk, intel_clock_t *match_clock, 86 intel_clock_t *best_clock); 87 88 static bool 89 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 90 int target, int refclk, intel_clock_t *match_clock, 91 intel_clock_t *best_clock); 92 static bool 93 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, 94 int target, int refclk, intel_clock_t *match_clock, 95 intel_clock_t *best_clock); 96 97 static inline u32 /* units of 100MHz */ 98 intel_fdi_link_freq(struct drm_device *dev) 99 { 100 if (IS_GEN5(dev)) { 101 struct drm_i915_private *dev_priv = dev->dev_private; 102 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; 103 } else 104 return 27; 105 } 106 107 static const intel_limit_t intel_limits_i8xx_dvo = { 108 .dot = { .min = 25000, .max = 350000 }, 109 .vco = { .min = 930000, .max = 1400000 }, 110 .n = { .min = 3, .max = 16 }, 111 .m = { .min = 96, .max = 140 }, 112 .m1 = { .min = 18, .max = 26 }, 113 .m2 = { .min = 6, .max = 16 }, 114 .p = { .min = 4, .max = 128 }, 115 .p1 = { .min = 2, .max = 33 }, 116 .p2 = { .dot_limit = 165000, 117 .p2_slow = 4, .p2_fast = 2 }, 118 .find_pll = intel_find_best_PLL, 119 }; 120 121 static const intel_limit_t intel_limits_i8xx_lvds = { 122 .dot = { .min = 25000, .max = 350000 }, 123 .vco = { .min = 930000, .max = 1400000 }, 124 .n = { .min = 3, .max = 16 }, 125 .m = { .min = 96, .max = 140 }, 126 .m1 = { .min = 18, .max = 26 }, 127 .m2 = { .min = 6, .max = 16 }, 128 .p = { .min = 4, .max = 128 }, 129 .p1 = { .min = 1, .max = 6 }, 130 .p2 = { .dot_limit = 165000, 131 .p2_slow = 14, .p2_fast = 7 }, 132 .find_pll = intel_find_best_PLL, 133 }; 134 135 static const intel_limit_t intel_limits_i9xx_sdvo = { 136 .dot = { .min = 20000, .max = 400000 }, 137 .vco = { .min = 1400000, .max = 2800000 }, 138 .n = { .min = 1, .max = 6 }, 139 .m = { .min = 70, .max = 120 }, 140 .m1 = { .min = 10, .max = 22 }, 141 .m2 = { .min = 5, .max = 9 }, 142 .p = { .min = 5, .max = 80 }, 143 .p1 = { .min = 1, .max = 8 }, 144 .p2 = { .dot_limit = 200000, 145 .p2_slow = 10, .p2_fast = 5 }, 146 .find_pll = intel_find_best_PLL, 147 }; 148 149 static const intel_limit_t intel_limits_i9xx_lvds = { 150 .dot = { .min = 20000, .max = 400000 }, 151 .vco = { .min = 1400000, .max = 2800000 }, 152 .n = { .min = 1, .max = 6 }, 153 .m = { .min = 70, .max = 120 }, 154 .m1 = { .min = 10, .max = 22 }, 155 .m2 = { .min = 5, .max = 9 }, 156 .p = { .min = 7, .max = 98 }, 157 .p1 = { .min = 1, .max = 8 }, 158 .p2 = { .dot_limit = 112000, 159 .p2_slow = 14, .p2_fast = 7 }, 160 .find_pll = intel_find_best_PLL, 161 }; 162 163 164 static const intel_limit_t intel_limits_g4x_sdvo = { 165 .dot = { .min = 25000, .max = 270000 }, 166 .vco = { .min = 1750000, .max = 3500000}, 167 .n = { .min = 1, .max = 4 }, 168 .m = { .min = 104, .max = 138 }, 169 .m1 = { .min = 17, .max = 23 }, 170 .m2 = { .min = 5, .max = 11 }, 171 .p = { .min = 10, .max = 30 }, 172 .p1 = { .min = 1, .max = 3}, 173 .p2 = { .dot_limit = 270000, 174 .p2_slow = 10, 175 .p2_fast = 10 176 }, 177 .find_pll = intel_g4x_find_best_PLL, 178 }; 179 180 static const intel_limit_t intel_limits_g4x_hdmi = { 181 .dot = { .min = 22000, .max = 400000 }, 182 .vco = { .min = 1750000, .max = 3500000}, 183 .n = { .min = 1, .max = 4 }, 184 .m = { .min = 104, .max = 138 }, 185 .m1 = { .min = 16, .max = 23 }, 186 .m2 = { .min = 5, .max = 11 }, 187 .p = { .min = 5, .max = 80 }, 188 .p1 = { .min = 1, .max = 8}, 189 .p2 = { .dot_limit = 165000, 190 .p2_slow = 10, .p2_fast = 5 }, 191 .find_pll = intel_g4x_find_best_PLL, 192 }; 193 194 static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 195 .dot = { .min = 20000, .max = 115000 }, 196 .vco = { .min = 1750000, .max = 3500000 }, 197 .n = { .min = 1, .max = 3 }, 198 .m = { .min = 104, .max = 138 }, 199 .m1 = { .min = 17, .max = 23 }, 200 .m2 = { .min = 5, .max = 11 }, 201 .p = { .min = 28, .max = 112 }, 202 .p1 = { .min = 2, .max = 8 }, 203 .p2 = { .dot_limit = 0, 204 .p2_slow = 14, .p2_fast = 14 205 }, 206 .find_pll = intel_g4x_find_best_PLL, 207 }; 208 209 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 210 .dot = { .min = 80000, .max = 224000 }, 211 .vco = { .min = 1750000, .max = 3500000 }, 212 .n = { .min = 1, .max = 3 }, 213 .m = { .min = 104, .max = 138 }, 214 .m1 = { .min = 17, .max = 23 }, 215 .m2 = { .min = 5, .max = 11 }, 216 .p = { .min = 14, .max = 42 }, 217 .p1 = { .min = 2, .max = 6 }, 218 .p2 = { .dot_limit = 0, 219 .p2_slow = 7, .p2_fast = 7 220 }, 221 .find_pll = intel_g4x_find_best_PLL, 222 }; 223 224 static const intel_limit_t intel_limits_g4x_display_port = { 225 .dot = { .min = 161670, .max = 227000 }, 226 .vco = { .min = 1750000, .max = 3500000}, 227 .n = { .min = 1, .max = 2 }, 228 .m = { .min = 97, .max = 108 }, 229 .m1 = { .min = 0x10, .max = 0x12 }, 230 .m2 = { .min = 0x05, .max = 0x06 }, 231 .p = { .min = 10, .max = 20 }, 232 .p1 = { .min = 1, .max = 2}, 233 .p2 = { .dot_limit = 0, 234 .p2_slow = 10, .p2_fast = 10 }, 235 .find_pll = intel_find_pll_g4x_dp, 236 }; 237 238 static const intel_limit_t intel_limits_pineview_sdvo = { 239 .dot = { .min = 20000, .max = 400000}, 240 .vco = { .min = 1700000, .max = 3500000 }, 241 /* Pineview's Ncounter is a ring counter */ 242 .n = { .min = 3, .max = 6 }, 243 .m = { .min = 2, .max = 256 }, 244 /* Pineview only has one combined m divider, which we treat as m2. */ 245 .m1 = { .min = 0, .max = 0 }, 246 .m2 = { .min = 0, .max = 254 }, 247 .p = { .min = 5, .max = 80 }, 248 .p1 = { .min = 1, .max = 8 }, 249 .p2 = { .dot_limit = 200000, 250 .p2_slow = 10, .p2_fast = 5 }, 251 .find_pll = intel_find_best_PLL, 252 }; 253 254 static const intel_limit_t intel_limits_pineview_lvds = { 255 .dot = { .min = 20000, .max = 400000 }, 256 .vco = { .min = 1700000, .max = 3500000 }, 257 .n = { .min = 3, .max = 6 }, 258 .m = { .min = 2, .max = 256 }, 259 .m1 = { .min = 0, .max = 0 }, 260 .m2 = { .min = 0, .max = 254 }, 261 .p = { .min = 7, .max = 112 }, 262 .p1 = { .min = 1, .max = 8 }, 263 .p2 = { .dot_limit = 112000, 264 .p2_slow = 14, .p2_fast = 14 }, 265 .find_pll = intel_find_best_PLL, 266 }; 267 268 /* Ironlake / Sandybridge 269 * 270 * We calculate clock using (register_value + 2) for N/M1/M2, so here 271 * the range value for them is (actual_value - 2). 272 */ 273 static const intel_limit_t intel_limits_ironlake_dac = { 274 .dot = { .min = 25000, .max = 350000 }, 275 .vco = { .min = 1760000, .max = 3510000 }, 276 .n = { .min = 1, .max = 5 }, 277 .m = { .min = 79, .max = 127 }, 278 .m1 = { .min = 12, .max = 22 }, 279 .m2 = { .min = 5, .max = 9 }, 280 .p = { .min = 5, .max = 80 }, 281 .p1 = { .min = 1, .max = 8 }, 282 .p2 = { .dot_limit = 225000, 283 .p2_slow = 10, .p2_fast = 5 }, 284 .find_pll = intel_g4x_find_best_PLL, 285 }; 286 287 static const intel_limit_t intel_limits_ironlake_single_lvds = { 288 .dot = { .min = 25000, .max = 350000 }, 289 .vco = { .min = 1760000, .max = 3510000 }, 290 .n = { .min = 1, .max = 3 }, 291 .m = { .min = 79, .max = 118 }, 292 .m1 = { .min = 12, .max = 22 }, 293 .m2 = { .min = 5, .max = 9 }, 294 .p = { .min = 28, .max = 112 }, 295 .p1 = { .min = 2, .max = 8 }, 296 .p2 = { .dot_limit = 225000, 297 .p2_slow = 14, .p2_fast = 14 }, 298 .find_pll = intel_g4x_find_best_PLL, 299 }; 300 301 static const intel_limit_t intel_limits_ironlake_dual_lvds = { 302 .dot = { .min = 25000, .max = 350000 }, 303 .vco = { .min = 1760000, .max = 3510000 }, 304 .n = { .min = 1, .max = 3 }, 305 .m = { .min = 79, .max = 127 }, 306 .m1 = { .min = 12, .max = 22 }, 307 .m2 = { .min = 5, .max = 9 }, 308 .p = { .min = 14, .max = 56 }, 309 .p1 = { .min = 2, .max = 8 }, 310 .p2 = { .dot_limit = 225000, 311 .p2_slow = 7, .p2_fast = 7 }, 312 .find_pll = intel_g4x_find_best_PLL, 313 }; 314 315 /* LVDS 100mhz refclk limits. */ 316 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 317 .dot = { .min = 25000, .max = 350000 }, 318 .vco = { .min = 1760000, .max = 3510000 }, 319 .n = { .min = 1, .max = 2 }, 320 .m = { .min = 79, .max = 126 }, 321 .m1 = { .min = 12, .max = 22 }, 322 .m2 = { .min = 5, .max = 9 }, 323 .p = { .min = 28, .max = 112 }, 324 .p1 = { .min = 2, .max = 8 }, 325 .p2 = { .dot_limit = 225000, 326 .p2_slow = 14, .p2_fast = 14 }, 327 .find_pll = intel_g4x_find_best_PLL, 328 }; 329 330 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 331 .dot = { .min = 25000, .max = 350000 }, 332 .vco = { .min = 1760000, .max = 3510000 }, 333 .n = { .min = 1, .max = 3 }, 334 .m = { .min = 79, .max = 126 }, 335 .m1 = { .min = 12, .max = 22 }, 336 .m2 = { .min = 5, .max = 9 }, 337 .p = { .min = 14, .max = 42 }, 338 .p1 = { .min = 2, .max = 6 }, 339 .p2 = { .dot_limit = 225000, 340 .p2_slow = 7, .p2_fast = 7 }, 341 .find_pll = intel_g4x_find_best_PLL, 342 }; 343 344 static const intel_limit_t intel_limits_ironlake_display_port = { 345 .dot = { .min = 25000, .max = 350000 }, 346 .vco = { .min = 1760000, .max = 3510000}, 347 .n = { .min = 1, .max = 2 }, 348 .m = { .min = 81, .max = 90 }, 349 .m1 = { .min = 12, .max = 22 }, 350 .m2 = { .min = 5, .max = 9 }, 351 .p = { .min = 10, .max = 20 }, 352 .p1 = { .min = 1, .max = 2}, 353 .p2 = { .dot_limit = 0, 354 .p2_slow = 10, .p2_fast = 10 }, 355 .find_pll = intel_find_pll_ironlake_dp, 356 }; 357 358 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 359 int refclk) 360 { 361 struct drm_device *dev = crtc->dev; 362 struct drm_i915_private *dev_priv = dev->dev_private; 363 const intel_limit_t *limit; 364 365 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 366 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == 367 LVDS_CLKB_POWER_UP) { 368 /* LVDS dual channel */ 369 if (refclk == 100000) 370 limit = &intel_limits_ironlake_dual_lvds_100m; 371 else 372 limit = &intel_limits_ironlake_dual_lvds; 373 } else { 374 if (refclk == 100000) 375 limit = &intel_limits_ironlake_single_lvds_100m; 376 else 377 limit = &intel_limits_ironlake_single_lvds; 378 } 379 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 380 HAS_eDP) 381 limit = &intel_limits_ironlake_display_port; 382 else 383 limit = &intel_limits_ironlake_dac; 384 385 return limit; 386 } 387 388 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 389 { 390 struct drm_device *dev = crtc->dev; 391 struct drm_i915_private *dev_priv = dev->dev_private; 392 const intel_limit_t *limit; 393 394 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 395 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 396 LVDS_CLKB_POWER_UP) 397 /* LVDS with dual channel */ 398 limit = &intel_limits_g4x_dual_channel_lvds; 399 else 400 /* LVDS with dual channel */ 401 limit = &intel_limits_g4x_single_channel_lvds; 402 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || 403 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { 404 limit = &intel_limits_g4x_hdmi; 405 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { 406 limit = &intel_limits_g4x_sdvo; 407 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 408 limit = &intel_limits_g4x_display_port; 409 } else /* The option is for other outputs */ 410 limit = &intel_limits_i9xx_sdvo; 411 412 return limit; 413 } 414 415 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) 416 { 417 struct drm_device *dev = crtc->dev; 418 const intel_limit_t *limit; 419 420 if (HAS_PCH_SPLIT(dev)) 421 limit = intel_ironlake_limit(crtc, refclk); 422 else if (IS_G4X(dev)) { 423 limit = intel_g4x_limit(crtc); 424 } else if (IS_PINEVIEW(dev)) { 425 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 426 limit = &intel_limits_pineview_lvds; 427 else 428 limit = &intel_limits_pineview_sdvo; 429 } else if (!IS_GEN2(dev)) { 430 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 431 limit = &intel_limits_i9xx_lvds; 432 else 433 limit = &intel_limits_i9xx_sdvo; 434 } else { 435 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 436 limit = &intel_limits_i8xx_lvds; 437 else 438 limit = &intel_limits_i8xx_dvo; 439 } 440 return limit; 441 } 442 443 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 444 static void pineview_clock(int refclk, intel_clock_t *clock) 445 { 446 clock->m = clock->m2 + 2; 447 clock->p = clock->p1 * clock->p2; 448 clock->vco = refclk * clock->m / clock->n; 449 clock->dot = clock->vco / clock->p; 450 } 451 452 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) 453 { 454 if (IS_PINEVIEW(dev)) { 455 pineview_clock(refclk, clock); 456 return; 457 } 458 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 459 clock->p = clock->p1 * clock->p2; 460 clock->vco = refclk * clock->m / (clock->n + 2); 461 clock->dot = clock->vco / clock->p; 462 } 463 464 /** 465 * Returns whether any output on the specified pipe is of the specified type 466 */ 467 bool intel_pipe_has_type(struct drm_crtc *crtc, int type) 468 { 469 struct drm_device *dev = crtc->dev; 470 struct drm_mode_config *mode_config = &dev->mode_config; 471 struct intel_encoder *encoder; 472 473 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 474 if (encoder->base.crtc == crtc && encoder->type == type) 475 return true; 476 477 return false; 478 } 479 480 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 481 /** 482 * Returns whether the given set of divisors are valid for a given refclk with 483 * the given connectors. 484 */ 485 486 static bool intel_PLL_is_valid(struct drm_device *dev, 487 const intel_limit_t *limit, 488 const intel_clock_t *clock) 489 { 490 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 491 INTELPllInvalid("p1 out of range\n"); 492 if (clock->p < limit->p.min || limit->p.max < clock->p) 493 INTELPllInvalid("p out of range\n"); 494 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 495 INTELPllInvalid("m2 out of range\n"); 496 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 497 INTELPllInvalid("m1 out of range\n"); 498 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) 499 INTELPllInvalid("m1 <= m2\n"); 500 if (clock->m < limit->m.min || limit->m.max < clock->m) 501 INTELPllInvalid("m out of range\n"); 502 if (clock->n < limit->n.min || limit->n.max < clock->n) 503 INTELPllInvalid("n out of range\n"); 504 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 505 INTELPllInvalid("vco out of range\n"); 506 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 507 * connector, etc., rather than just a single range. 508 */ 509 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 510 INTELPllInvalid("dot out of range\n"); 511 512 return true; 513 } 514 515 static bool 516 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 517 int target, int refclk, intel_clock_t *match_clock, 518 intel_clock_t *best_clock) 519 520 { 521 struct drm_device *dev = crtc->dev; 522 struct drm_i915_private *dev_priv = dev->dev_private; 523 intel_clock_t clock; 524 int err = target; 525 526 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 527 (I915_READ(LVDS)) != 0) { 528 /* 529 * For LVDS, if the panel is on, just rely on its current 530 * settings for dual-channel. We haven't figured out how to 531 * reliably set up different single/dual channel state, if we 532 * even can. 533 */ 534 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 535 LVDS_CLKB_POWER_UP) 536 clock.p2 = limit->p2.p2_fast; 537 else 538 clock.p2 = limit->p2.p2_slow; 539 } else { 540 if (target < limit->p2.dot_limit) 541 clock.p2 = limit->p2.p2_slow; 542 else 543 clock.p2 = limit->p2.p2_fast; 544 } 545 546 memset(best_clock, 0, sizeof(*best_clock)); 547 548 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 549 clock.m1++) { 550 for (clock.m2 = limit->m2.min; 551 clock.m2 <= limit->m2.max; clock.m2++) { 552 /* m1 is always 0 in Pineview */ 553 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) 554 break; 555 for (clock.n = limit->n.min; 556 clock.n <= limit->n.max; clock.n++) { 557 for (clock.p1 = limit->p1.min; 558 clock.p1 <= limit->p1.max; clock.p1++) { 559 int this_err; 560 561 intel_clock(dev, refclk, &clock); 562 if (!intel_PLL_is_valid(dev, limit, 563 &clock)) 564 continue; 565 if (match_clock && 566 clock.p != match_clock->p) 567 continue; 568 569 this_err = abs(clock.dot - target); 570 if (this_err < err) { 571 *best_clock = clock; 572 err = this_err; 573 } 574 } 575 } 576 } 577 } 578 579 return (err != target); 580 } 581 582 static bool 583 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 584 int target, int refclk, intel_clock_t *match_clock, 585 intel_clock_t *best_clock) 586 { 587 struct drm_device *dev = crtc->dev; 588 struct drm_i915_private *dev_priv = dev->dev_private; 589 intel_clock_t clock; 590 int max_n; 591 bool found; 592 /* approximately equals target * 0.00585 */ 593 int err_most = (target >> 8) + (target >> 9); 594 found = false; 595 596 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 597 int lvds_reg; 598 599 if (HAS_PCH_SPLIT(dev)) 600 lvds_reg = PCH_LVDS; 601 else 602 lvds_reg = LVDS; 603 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == 604 LVDS_CLKB_POWER_UP) 605 clock.p2 = limit->p2.p2_fast; 606 else 607 clock.p2 = limit->p2.p2_slow; 608 } else { 609 if (target < limit->p2.dot_limit) 610 clock.p2 = limit->p2.p2_slow; 611 else 612 clock.p2 = limit->p2.p2_fast; 613 } 614 615 memset(best_clock, 0, sizeof(*best_clock)); 616 max_n = limit->n.max; 617 /* based on hardware requirement, prefer smaller n to precision */ 618 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 619 /* based on hardware requirement, prefere larger m1,m2 */ 620 for (clock.m1 = limit->m1.max; 621 clock.m1 >= limit->m1.min; clock.m1--) { 622 for (clock.m2 = limit->m2.max; 623 clock.m2 >= limit->m2.min; clock.m2--) { 624 for (clock.p1 = limit->p1.max; 625 clock.p1 >= limit->p1.min; clock.p1--) { 626 int this_err; 627 628 intel_clock(dev, refclk, &clock); 629 if (!intel_PLL_is_valid(dev, limit, 630 &clock)) 631 continue; 632 if (match_clock && 633 clock.p != match_clock->p) 634 continue; 635 636 this_err = abs(clock.dot - target); 637 if (this_err < err_most) { 638 *best_clock = clock; 639 err_most = this_err; 640 max_n = clock.n; 641 found = true; 642 } 643 } 644 } 645 } 646 } 647 return found; 648 } 649 650 static bool 651 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 652 int target, int refclk, intel_clock_t *match_clock, 653 intel_clock_t *best_clock) 654 { 655 struct drm_device *dev = crtc->dev; 656 intel_clock_t clock; 657 658 if (target < 200000) { 659 clock.n = 1; 660 clock.p1 = 2; 661 clock.p2 = 10; 662 clock.m1 = 12; 663 clock.m2 = 9; 664 } else { 665 clock.n = 2; 666 clock.p1 = 1; 667 clock.p2 = 10; 668 clock.m1 = 14; 669 clock.m2 = 8; 670 } 671 intel_clock(dev, refclk, &clock); 672 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 673 return true; 674 } 675 676 /* DisplayPort has only two frequencies, 162MHz and 270MHz */ 677 static bool 678 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 679 int target, int refclk, intel_clock_t *match_clock, 680 intel_clock_t *best_clock) 681 { 682 intel_clock_t clock; 683 if (target < 200000) { 684 clock.p1 = 2; 685 clock.p2 = 10; 686 clock.n = 2; 687 clock.m1 = 23; 688 clock.m2 = 8; 689 } else { 690 clock.p1 = 1; 691 clock.p2 = 10; 692 clock.n = 1; 693 clock.m1 = 14; 694 clock.m2 = 2; 695 } 696 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); 697 clock.p = (clock.p1 * clock.p2); 698 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; 699 clock.vco = 0; 700 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 701 return true; 702 } 703 704 /** 705 * intel_wait_for_vblank - wait for vblank on a given pipe 706 * @dev: drm device 707 * @pipe: pipe to wait for 708 * 709 * Wait for vblank to occur on a given pipe. Needed for various bits of 710 * mode setting code. 711 */ 712 void intel_wait_for_vblank(struct drm_device *dev, int pipe) 713 { 714 struct drm_i915_private *dev_priv = dev->dev_private; 715 int pipestat_reg = PIPESTAT(pipe); 716 717 /* Clear existing vblank status. Note this will clear any other 718 * sticky status fields as well. 719 * 720 * This races with i915_driver_irq_handler() with the result 721 * that either function could miss a vblank event. Here it is not 722 * fatal, as we will either wait upon the next vblank interrupt or 723 * timeout. Generally speaking intel_wait_for_vblank() is only 724 * called during modeset at which time the GPU should be idle and 725 * should *not* be performing page flips and thus not waiting on 726 * vblanks... 727 * Currently, the result of us stealing a vblank from the irq 728 * handler is that a single frame will be skipped during swapbuffers. 729 */ 730 I915_WRITE(pipestat_reg, 731 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); 732 733 /* Wait for vblank interrupt bit to set */ 734 if (_intel_wait_for(dev, 735 I915_READ(pipestat_reg) & PIPE_VBLANK_INTERRUPT_STATUS, 736 50, 1, "915vbl")) 737 DRM_DEBUG_KMS("vblank wait timed out\n"); 738 } 739 740 /* 741 * intel_wait_for_pipe_off - wait for pipe to turn off 742 * @dev: drm device 743 * @pipe: pipe to wait for 744 * 745 * After disabling a pipe, we can't wait for vblank in the usual way, 746 * spinning on the vblank interrupt status bit, since we won't actually 747 * see an interrupt when the pipe is disabled. 748 * 749 * On Gen4 and above: 750 * wait for the pipe register state bit to turn off 751 * 752 * Otherwise: 753 * wait for the display line value to settle (it usually 754 * ends up stopping at the start of the next frame). 755 * 756 */ 757 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 758 { 759 struct drm_i915_private *dev_priv = dev->dev_private; 760 761 if (INTEL_INFO(dev)->gen >= 4) { 762 int reg = PIPECONF(pipe); 763 764 /* Wait for the Pipe State to go off */ 765 if (_intel_wait_for(dev, 766 (I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100, 767 1, "915pip")) 768 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 769 } else { 770 u32 last_line; 771 int reg = PIPEDSL(pipe); 772 unsigned long timeout = jiffies + msecs_to_jiffies(100); 773 774 /* Wait for the display line to settle */ 775 do { 776 last_line = I915_READ(reg) & DSL_LINEMASK; 777 DELAY(5000); 778 } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) && 779 time_after(timeout, jiffies)); 780 if (time_after(jiffies, timeout)) 781 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 782 } 783 } 784 785 static const char *state_string(bool enabled) 786 { 787 return enabled ? "on" : "off"; 788 } 789 790 /* Only for pre-ILK configs */ 791 static void assert_pll(struct drm_i915_private *dev_priv, 792 enum i915_pipe pipe, bool state) 793 { 794 int reg; 795 u32 val; 796 bool cur_state; 797 798 reg = DPLL(pipe); 799 val = I915_READ(reg); 800 cur_state = !!(val & DPLL_VCO_ENABLE); 801 if (cur_state != state) 802 kprintf("PLL state assertion failure (expected %s, current %s)\n", 803 state_string(state), state_string(cur_state)); 804 } 805 #define assert_pll_enabled(d, p) assert_pll(d, p, true) 806 #define assert_pll_disabled(d, p) assert_pll(d, p, false) 807 808 /* For ILK+ */ 809 static void assert_pch_pll(struct drm_i915_private *dev_priv, 810 enum i915_pipe pipe, bool state) 811 { 812 int reg; 813 u32 val; 814 bool cur_state; 815 816 if (HAS_PCH_CPT(dev_priv->dev)) { 817 u32 pch_dpll; 818 819 pch_dpll = I915_READ(PCH_DPLL_SEL); 820 821 /* Make sure the selected PLL is enabled to the transcoder */ 822 KASSERT(((pch_dpll >> (4 * pipe)) & 8) != 0, 823 ("transcoder %d PLL not enabled\n", pipe)); 824 825 /* Convert the transcoder pipe number to a pll pipe number */ 826 pipe = (pch_dpll >> (4 * pipe)) & 1; 827 } 828 829 reg = PCH_DPLL(pipe); 830 val = I915_READ(reg); 831 cur_state = !!(val & DPLL_VCO_ENABLE); 832 if (cur_state != state) 833 kprintf("PCH PLL state assertion failure (expected %s, current %s)\n", 834 state_string(state), state_string(cur_state)); 835 } 836 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) 837 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) 838 839 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 840 enum i915_pipe pipe, bool state) 841 { 842 int reg; 843 u32 val; 844 bool cur_state; 845 846 reg = FDI_TX_CTL(pipe); 847 val = I915_READ(reg); 848 cur_state = !!(val & FDI_TX_ENABLE); 849 if (cur_state != state) 850 kprintf("FDI TX state assertion failure (expected %s, current %s)\n", 851 state_string(state), state_string(cur_state)); 852 } 853 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 854 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 855 856 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 857 enum i915_pipe pipe, bool state) 858 { 859 int reg; 860 u32 val; 861 bool cur_state; 862 863 reg = FDI_RX_CTL(pipe); 864 val = I915_READ(reg); 865 cur_state = !!(val & FDI_RX_ENABLE); 866 if (cur_state != state) 867 kprintf("FDI RX state assertion failure (expected %s, current %s)\n", 868 state_string(state), state_string(cur_state)); 869 } 870 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 871 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 872 873 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 874 enum i915_pipe pipe) 875 { 876 int reg; 877 u32 val; 878 879 /* ILK FDI PLL is always enabled */ 880 if (dev_priv->info->gen == 5) 881 return; 882 883 reg = FDI_TX_CTL(pipe); 884 val = I915_READ(reg); 885 if (!(val & FDI_TX_PLL_ENABLE)) 886 kprintf("FDI TX PLL assertion failure, should be active but is disabled\n"); 887 } 888 889 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, 890 enum i915_pipe pipe) 891 { 892 int reg; 893 u32 val; 894 895 reg = FDI_RX_CTL(pipe); 896 val = I915_READ(reg); 897 if (!(val & FDI_RX_PLL_ENABLE)) 898 kprintf("FDI RX PLL assertion failure, should be active but is disabled\n"); 899 } 900 901 static void assert_panel_unlocked(struct drm_i915_private *dev_priv, 902 enum i915_pipe pipe) 903 { 904 int pp_reg, lvds_reg; 905 u32 val; 906 enum i915_pipe panel_pipe = PIPE_A; 907 bool locked = true; 908 909 if (HAS_PCH_SPLIT(dev_priv->dev)) { 910 pp_reg = PCH_PP_CONTROL; 911 lvds_reg = PCH_LVDS; 912 } else { 913 pp_reg = PP_CONTROL; 914 lvds_reg = LVDS; 915 } 916 917 val = I915_READ(pp_reg); 918 if (!(val & PANEL_POWER_ON) || 919 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) 920 locked = false; 921 922 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) 923 panel_pipe = PIPE_B; 924 925 if (panel_pipe == pipe && locked) 926 kprintf("panel assertion failure, pipe %c regs locked\n", 927 pipe_name(pipe)); 928 } 929 930 void assert_pipe(struct drm_i915_private *dev_priv, 931 enum i915_pipe pipe, bool state) 932 { 933 int reg; 934 u32 val; 935 bool cur_state; 936 937 /* if we need the pipe A quirk it must be always on */ 938 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 939 state = true; 940 941 reg = PIPECONF(pipe); 942 val = I915_READ(reg); 943 cur_state = !!(val & PIPECONF_ENABLE); 944 if (cur_state != state) 945 kprintf("pipe %c assertion failure (expected %s, current %s)\n", 946 pipe_name(pipe), state_string(state), state_string(cur_state)); 947 } 948 949 static void assert_plane(struct drm_i915_private *dev_priv, 950 enum plane plane, bool state) 951 { 952 int reg; 953 u32 val; 954 bool cur_state; 955 956 reg = DSPCNTR(plane); 957 val = I915_READ(reg); 958 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 959 if (cur_state != state) 960 kprintf("plane %c assertion failure, (expected %s, current %s)\n", 961 plane_name(plane), state_string(state), state_string(cur_state)); 962 } 963 964 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 965 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 966 967 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 968 enum i915_pipe pipe) 969 { 970 int reg, i; 971 u32 val; 972 int cur_pipe; 973 974 /* Planes are fixed to pipes on ILK+ */ 975 if (HAS_PCH_SPLIT(dev_priv->dev)) { 976 reg = DSPCNTR(pipe); 977 val = I915_READ(reg); 978 if ((val & DISPLAY_PLANE_ENABLE) != 0) 979 kprintf("plane %c assertion failure, should be disabled but not\n", 980 plane_name(pipe)); 981 return; 982 } 983 984 /* Need to check both planes against the pipe */ 985 for (i = 0; i < 2; i++) { 986 reg = DSPCNTR(i); 987 val = I915_READ(reg); 988 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 989 DISPPLANE_SEL_PIPE_SHIFT; 990 if ((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe) 991 kprintf("plane %c assertion failure, should be off on pipe %c but is still active\n", 992 plane_name(i), pipe_name(pipe)); 993 } 994 } 995 996 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 997 { 998 u32 val; 999 bool enabled; 1000 1001 val = I915_READ(PCH_DREF_CONTROL); 1002 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1003 DREF_SUPERSPREAD_SOURCE_MASK)); 1004 if (!enabled) 1005 kprintf("PCH refclk assertion failure, should be active but is disabled\n"); 1006 } 1007 1008 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, 1009 enum i915_pipe pipe) 1010 { 1011 int reg; 1012 u32 val; 1013 bool enabled; 1014 1015 reg = TRANSCONF(pipe); 1016 val = I915_READ(reg); 1017 enabled = !!(val & TRANS_ENABLE); 1018 if (enabled) 1019 kprintf("transcoder assertion failed, should be off on pipe %c but is still active\n", 1020 pipe_name(pipe)); 1021 } 1022 1023 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1024 enum i915_pipe pipe, u32 val) 1025 { 1026 if ((val & PORT_ENABLE) == 0) 1027 return false; 1028 1029 if (HAS_PCH_CPT(dev_priv->dev)) { 1030 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1031 return false; 1032 } else { 1033 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) 1034 return false; 1035 } 1036 return true; 1037 } 1038 1039 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1040 enum i915_pipe pipe, u32 val) 1041 { 1042 if ((val & LVDS_PORT_EN) == 0) 1043 return false; 1044 1045 if (HAS_PCH_CPT(dev_priv->dev)) { 1046 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1047 return false; 1048 } else { 1049 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1050 return false; 1051 } 1052 return true; 1053 } 1054 1055 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1056 enum i915_pipe pipe, u32 val) 1057 { 1058 if ((val & ADPA_DAC_ENABLE) == 0) 1059 return false; 1060 if (HAS_PCH_CPT(dev_priv->dev)) { 1061 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1062 return false; 1063 } else { 1064 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1065 return false; 1066 } 1067 return true; 1068 } 1069 1070 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1071 enum i915_pipe pipe, u32 port_sel, u32 val) 1072 { 1073 if ((val & DP_PORT_EN) == 0) 1074 return false; 1075 1076 if (HAS_PCH_CPT(dev_priv->dev)) { 1077 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); 1078 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); 1079 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1080 return false; 1081 } else { 1082 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1083 return false; 1084 } 1085 return true; 1086 } 1087 1088 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1089 enum i915_pipe pipe, int reg, u32 port_sel) 1090 { 1091 u32 val = I915_READ(reg); 1092 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) 1093 kprintf("PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1094 reg, pipe_name(pipe)); 1095 } 1096 1097 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1098 enum i915_pipe pipe, int reg) 1099 { 1100 u32 val = I915_READ(reg); 1101 if (hdmi_pipe_enabled(dev_priv, val, pipe)) 1102 kprintf("PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1103 reg, pipe_name(pipe)); 1104 } 1105 1106 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1107 enum i915_pipe pipe) 1108 { 1109 int reg; 1110 u32 val; 1111 1112 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1113 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1114 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1115 1116 reg = PCH_ADPA; 1117 val = I915_READ(reg); 1118 if (adpa_pipe_enabled(dev_priv, val, pipe)) 1119 kprintf("PCH VGA enabled on transcoder %c, should be disabled\n", 1120 pipe_name(pipe)); 1121 1122 reg = PCH_LVDS; 1123 val = I915_READ(reg); 1124 if (lvds_pipe_enabled(dev_priv, val, pipe)) 1125 kprintf("PCH LVDS enabled on transcoder %c, should be disabled\n", 1126 pipe_name(pipe)); 1127 1128 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); 1129 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); 1130 assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); 1131 } 1132 1133 /** 1134 * intel_enable_pll - enable a PLL 1135 * @dev_priv: i915 private structure 1136 * @pipe: pipe PLL to enable 1137 * 1138 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to 1139 * make sure the PLL reg is writable first though, since the panel write 1140 * protect mechanism may be enabled. 1141 * 1142 * Note! This is for pre-ILK only. 1143 */ 1144 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1145 { 1146 int reg; 1147 u32 val; 1148 1149 /* No really, not for ILK+ */ 1150 KASSERT(dev_priv->info->gen < 5, ("Wrong device gen")); 1151 1152 /* PLL is protected by panel, make sure we can write it */ 1153 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) 1154 assert_panel_unlocked(dev_priv, pipe); 1155 1156 reg = DPLL(pipe); 1157 val = I915_READ(reg); 1158 val |= DPLL_VCO_ENABLE; 1159 1160 /* We do this three times for luck */ 1161 I915_WRITE(reg, val); 1162 POSTING_READ(reg); 1163 DELAY(150); /* wait for warmup */ 1164 I915_WRITE(reg, val); 1165 POSTING_READ(reg); 1166 DELAY(150); /* wait for warmup */ 1167 I915_WRITE(reg, val); 1168 POSTING_READ(reg); 1169 DELAY(150); /* wait for warmup */ 1170 } 1171 1172 /** 1173 * intel_disable_pll - disable a PLL 1174 * @dev_priv: i915 private structure 1175 * @pipe: pipe PLL to disable 1176 * 1177 * Disable the PLL for @pipe, making sure the pipe is off first. 1178 * 1179 * Note! This is for pre-ILK only. 1180 */ 1181 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1182 { 1183 int reg; 1184 u32 val; 1185 1186 /* Don't disable pipe A or pipe A PLLs if needed */ 1187 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1188 return; 1189 1190 /* Make sure the pipe isn't still relying on us */ 1191 assert_pipe_disabled(dev_priv, pipe); 1192 1193 reg = DPLL(pipe); 1194 val = I915_READ(reg); 1195 val &= ~DPLL_VCO_ENABLE; 1196 I915_WRITE(reg, val); 1197 POSTING_READ(reg); 1198 } 1199 1200 /** 1201 * intel_enable_pch_pll - enable PCH PLL 1202 * @dev_priv: i915 private structure 1203 * @pipe: pipe PLL to enable 1204 * 1205 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1206 * drives the transcoder clock. 1207 */ 1208 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, 1209 enum i915_pipe pipe) 1210 { 1211 int reg; 1212 u32 val; 1213 1214 if (pipe > 1) 1215 return; 1216 1217 /* PCH only available on ILK+ */ 1218 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen")); 1219 1220 /* PCH refclock must be enabled first */ 1221 assert_pch_refclk_enabled(dev_priv); 1222 1223 reg = PCH_DPLL(pipe); 1224 val = I915_READ(reg); 1225 val |= DPLL_VCO_ENABLE; 1226 I915_WRITE(reg, val); 1227 POSTING_READ(reg); 1228 DELAY(200); 1229 } 1230 1231 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, 1232 enum i915_pipe pipe) 1233 { 1234 int reg; 1235 u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL, 1236 pll_sel = TRANSC_DPLL_ENABLE; 1237 1238 if (pipe > 1) 1239 return; 1240 1241 /* PCH only available on ILK+ */ 1242 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen")); 1243 1244 /* Make sure transcoder isn't still depending on us */ 1245 assert_transcoder_disabled(dev_priv, pipe); 1246 1247 if (pipe == 0) 1248 pll_sel |= TRANSC_DPLLA_SEL; 1249 else if (pipe == 1) 1250 pll_sel |= TRANSC_DPLLB_SEL; 1251 1252 1253 if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel) 1254 return; 1255 1256 reg = PCH_DPLL(pipe); 1257 val = I915_READ(reg); 1258 val &= ~DPLL_VCO_ENABLE; 1259 I915_WRITE(reg, val); 1260 POSTING_READ(reg); 1261 DELAY(200); 1262 } 1263 1264 static void intel_enable_transcoder(struct drm_i915_private *dev_priv, 1265 enum i915_pipe pipe) 1266 { 1267 int reg; 1268 u32 val, pipeconf_val; 1269 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1270 1271 /* PCH only available on ILK+ */ 1272 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen")); 1273 1274 /* Make sure PCH DPLL is enabled */ 1275 assert_pch_pll_enabled(dev_priv, pipe); 1276 1277 /* FDI must be feeding us bits for PCH ports */ 1278 assert_fdi_tx_enabled(dev_priv, pipe); 1279 assert_fdi_rx_enabled(dev_priv, pipe); 1280 1281 1282 reg = TRANSCONF(pipe); 1283 val = I915_READ(reg); 1284 pipeconf_val = I915_READ(PIPECONF(pipe)); 1285 1286 if (HAS_PCH_IBX(dev_priv->dev)) { 1287 /* 1288 * make the BPC in transcoder be consistent with 1289 * that in pipeconf reg. 1290 */ 1291 val &= ~PIPE_BPC_MASK; 1292 val |= pipeconf_val & PIPE_BPC_MASK; 1293 } 1294 1295 val &= ~TRANS_INTERLACE_MASK; 1296 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1297 if (HAS_PCH_IBX(dev_priv->dev) && 1298 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) 1299 val |= TRANS_LEGACY_INTERLACED_ILK; 1300 else 1301 val |= TRANS_INTERLACED; 1302 else 1303 val |= TRANS_PROGRESSIVE; 1304 1305 I915_WRITE(reg, val | TRANS_ENABLE); 1306 if (_intel_wait_for(dev_priv->dev, I915_READ(reg) & TRANS_STATE_ENABLE, 1307 100, 1, "915trc")) 1308 DRM_ERROR("failed to enable transcoder %d\n", pipe); 1309 } 1310 1311 static void intel_disable_transcoder(struct drm_i915_private *dev_priv, 1312 enum i915_pipe pipe) 1313 { 1314 int reg; 1315 u32 val; 1316 1317 /* FDI relies on the transcoder */ 1318 assert_fdi_tx_disabled(dev_priv, pipe); 1319 assert_fdi_rx_disabled(dev_priv, pipe); 1320 1321 /* Ports must be off as well */ 1322 assert_pch_ports_disabled(dev_priv, pipe); 1323 1324 reg = TRANSCONF(pipe); 1325 val = I915_READ(reg); 1326 val &= ~TRANS_ENABLE; 1327 I915_WRITE(reg, val); 1328 /* wait for PCH transcoder off, transcoder state */ 1329 if (_intel_wait_for(dev_priv->dev, 1330 (I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50, 1331 1, "915trd")) 1332 DRM_ERROR("failed to disable transcoder %d\n", pipe); 1333 } 1334 1335 /** 1336 * intel_enable_pipe - enable a pipe, asserting requirements 1337 * @dev_priv: i915 private structure 1338 * @pipe: pipe to enable 1339 * @pch_port: on ILK+, is this pipe driving a PCH port or not 1340 * 1341 * Enable @pipe, making sure that various hardware specific requirements 1342 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 1343 * 1344 * @pipe should be %PIPE_A or %PIPE_B. 1345 * 1346 * Will wait until the pipe is actually running (i.e. first vblank) before 1347 * returning. 1348 */ 1349 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 1350 bool pch_port) 1351 { 1352 int reg; 1353 u32 val; 1354 1355 /* 1356 * A pipe without a PLL won't actually be able to drive bits from 1357 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1358 * need the check. 1359 */ 1360 if (!HAS_PCH_SPLIT(dev_priv->dev)) 1361 assert_pll_enabled(dev_priv, pipe); 1362 else { 1363 if (pch_port) { 1364 /* if driving the PCH, we need FDI enabled */ 1365 assert_fdi_rx_pll_enabled(dev_priv, pipe); 1366 assert_fdi_tx_pll_enabled(dev_priv, pipe); 1367 } 1368 /* FIXME: assert CPU port conditions for SNB+ */ 1369 } 1370 1371 reg = PIPECONF(pipe); 1372 val = I915_READ(reg); 1373 if (val & PIPECONF_ENABLE) 1374 return; 1375 1376 I915_WRITE(reg, val | PIPECONF_ENABLE); 1377 intel_wait_for_vblank(dev_priv->dev, pipe); 1378 } 1379 1380 /** 1381 * intel_disable_pipe - disable a pipe, asserting requirements 1382 * @dev_priv: i915 private structure 1383 * @pipe: pipe to disable 1384 * 1385 * Disable @pipe, making sure that various hardware specific requirements 1386 * are met, if applicable, e.g. plane disabled, panel fitter off, etc. 1387 * 1388 * @pipe should be %PIPE_A or %PIPE_B. 1389 * 1390 * Will wait until the pipe has shut down before returning. 1391 */ 1392 static void intel_disable_pipe(struct drm_i915_private *dev_priv, 1393 enum i915_pipe pipe) 1394 { 1395 int reg; 1396 u32 val; 1397 1398 /* 1399 * Make sure planes won't keep trying to pump pixels to us, 1400 * or we might hang the display. 1401 */ 1402 assert_planes_disabled(dev_priv, pipe); 1403 1404 /* Don't disable pipe A or pipe A PLLs if needed */ 1405 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1406 return; 1407 1408 reg = PIPECONF(pipe); 1409 val = I915_READ(reg); 1410 if ((val & PIPECONF_ENABLE) == 0) 1411 return; 1412 1413 I915_WRITE(reg, val & ~PIPECONF_ENABLE); 1414 intel_wait_for_pipe_off(dev_priv->dev, pipe); 1415 } 1416 1417 /* 1418 * Plane regs are double buffered, going from enabled->disabled needs a 1419 * trigger in order to latch. The display address reg provides this. 1420 */ 1421 void intel_flush_display_plane(struct drm_i915_private *dev_priv, 1422 enum plane plane) 1423 { 1424 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); 1425 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); 1426 } 1427 1428 /** 1429 * intel_enable_plane - enable a display plane on a given pipe 1430 * @dev_priv: i915 private structure 1431 * @plane: plane to enable 1432 * @pipe: pipe being fed 1433 * 1434 * Enable @plane on @pipe, making sure that @pipe is running first. 1435 */ 1436 static void intel_enable_plane(struct drm_i915_private *dev_priv, 1437 enum plane plane, enum i915_pipe pipe) 1438 { 1439 int reg; 1440 u32 val; 1441 1442 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 1443 assert_pipe_enabled(dev_priv, pipe); 1444 1445 reg = DSPCNTR(plane); 1446 val = I915_READ(reg); 1447 if (val & DISPLAY_PLANE_ENABLE) 1448 return; 1449 1450 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); 1451 intel_flush_display_plane(dev_priv, plane); 1452 intel_wait_for_vblank(dev_priv->dev, pipe); 1453 } 1454 1455 /** 1456 * intel_disable_plane - disable a display plane 1457 * @dev_priv: i915 private structure 1458 * @plane: plane to disable 1459 * @pipe: pipe consuming the data 1460 * 1461 * Disable @plane; should be an independent operation. 1462 */ 1463 static void intel_disable_plane(struct drm_i915_private *dev_priv, 1464 enum plane plane, enum i915_pipe pipe) 1465 { 1466 int reg; 1467 u32 val; 1468 1469 reg = DSPCNTR(plane); 1470 val = I915_READ(reg); 1471 if ((val & DISPLAY_PLANE_ENABLE) == 0) 1472 return; 1473 1474 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); 1475 intel_flush_display_plane(dev_priv, plane); 1476 intel_wait_for_vblank(dev_priv->dev, pipe); 1477 } 1478 1479 static void disable_pch_dp(struct drm_i915_private *dev_priv, 1480 enum i915_pipe pipe, int reg, u32 port_sel) 1481 { 1482 u32 val = I915_READ(reg); 1483 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) { 1484 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); 1485 I915_WRITE(reg, val & ~DP_PORT_EN); 1486 } 1487 } 1488 1489 static void disable_pch_hdmi(struct drm_i915_private *dev_priv, 1490 enum i915_pipe pipe, int reg) 1491 { 1492 u32 val = I915_READ(reg); 1493 if (hdmi_pipe_enabled(dev_priv, val, pipe)) { 1494 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", 1495 reg, pipe); 1496 I915_WRITE(reg, val & ~PORT_ENABLE); 1497 } 1498 } 1499 1500 /* Disable any ports connected to this transcoder */ 1501 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, 1502 enum i915_pipe pipe) 1503 { 1504 u32 reg, val; 1505 1506 val = I915_READ(PCH_PP_CONTROL); 1507 I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); 1508 1509 disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1510 disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1511 disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1512 1513 reg = PCH_ADPA; 1514 val = I915_READ(reg); 1515 if (adpa_pipe_enabled(dev_priv, val, pipe)) 1516 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); 1517 1518 reg = PCH_LVDS; 1519 val = I915_READ(reg); 1520 if (lvds_pipe_enabled(dev_priv, val, pipe)) { 1521 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); 1522 I915_WRITE(reg, val & ~LVDS_PORT_EN); 1523 POSTING_READ(reg); 1524 DELAY(100); 1525 } 1526 1527 disable_pch_hdmi(dev_priv, pipe, HDMIB); 1528 disable_pch_hdmi(dev_priv, pipe, HDMIC); 1529 disable_pch_hdmi(dev_priv, pipe, HDMID); 1530 } 1531 1532 int 1533 intel_pin_and_fence_fb_obj(struct drm_device *dev, 1534 struct drm_i915_gem_object *obj, 1535 struct intel_ring_buffer *pipelined) 1536 { 1537 struct drm_i915_private *dev_priv = dev->dev_private; 1538 u32 alignment; 1539 int ret; 1540 1541 alignment = 0; /* shut gcc */ 1542 switch (obj->tiling_mode) { 1543 case I915_TILING_NONE: 1544 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1545 alignment = 128 * 1024; 1546 else if (INTEL_INFO(dev)->gen >= 4) 1547 alignment = 4 * 1024; 1548 else 1549 alignment = 64 * 1024; 1550 break; 1551 case I915_TILING_X: 1552 /* pin() will align the object as required by fence */ 1553 alignment = 0; 1554 break; 1555 case I915_TILING_Y: 1556 /* FIXME: Is this true? */ 1557 DRM_ERROR("Y tiled not allowed for scan out buffers\n"); 1558 return -EINVAL; 1559 default: 1560 KASSERT(0, ("Wrong tiling for fb obj")); 1561 } 1562 1563 dev_priv->mm.interruptible = false; 1564 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); 1565 if (ret) 1566 goto err_interruptible; 1567 1568 /* Install a fence for tiled scan-out. Pre-i965 always needs a 1569 * fence, whereas 965+ only requires a fence if using 1570 * framebuffer compression. For simplicity, we always install 1571 * a fence as the cost is not that onerous. 1572 */ 1573 if (obj->tiling_mode != I915_TILING_NONE) { 1574 ret = i915_gem_object_get_fence(obj, pipelined); 1575 if (ret) 1576 goto err_unpin; 1577 1578 i915_gem_object_pin_fence(obj); 1579 } 1580 1581 dev_priv->mm.interruptible = true; 1582 return 0; 1583 1584 err_unpin: 1585 i915_gem_object_unpin(obj); 1586 err_interruptible: 1587 dev_priv->mm.interruptible = true; 1588 return ret; 1589 } 1590 1591 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) 1592 { 1593 i915_gem_object_unpin_fence(obj); 1594 i915_gem_object_unpin(obj); 1595 } 1596 1597 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, 1598 int x, int y) 1599 { 1600 struct drm_device *dev = crtc->dev; 1601 struct drm_i915_private *dev_priv = dev->dev_private; 1602 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1603 struct intel_framebuffer *intel_fb; 1604 struct drm_i915_gem_object *obj; 1605 int plane = intel_crtc->plane; 1606 unsigned long Start, Offset; 1607 u32 dspcntr; 1608 u32 reg; 1609 1610 switch (plane) { 1611 case 0: 1612 case 1: 1613 break; 1614 default: 1615 DRM_ERROR("Can't update plane %d in SAREA\n", plane); 1616 return -EINVAL; 1617 } 1618 1619 intel_fb = to_intel_framebuffer(fb); 1620 obj = intel_fb->obj; 1621 1622 reg = DSPCNTR(plane); 1623 dspcntr = I915_READ(reg); 1624 /* Mask out pixel format bits in case we change it */ 1625 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 1626 switch (fb->bits_per_pixel) { 1627 case 8: 1628 dspcntr |= DISPPLANE_8BPP; 1629 break; 1630 case 16: 1631 if (fb->depth == 15) 1632 dspcntr |= DISPPLANE_15_16BPP; 1633 else 1634 dspcntr |= DISPPLANE_16BPP; 1635 break; 1636 case 24: 1637 case 32: 1638 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 1639 break; 1640 default: 1641 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); 1642 return -EINVAL; 1643 } 1644 if (INTEL_INFO(dev)->gen >= 4) { 1645 if (obj->tiling_mode != I915_TILING_NONE) 1646 dspcntr |= DISPPLANE_TILED; 1647 else 1648 dspcntr &= ~DISPPLANE_TILED; 1649 } 1650 1651 I915_WRITE(reg, dspcntr); 1652 1653 Start = obj->gtt_offset; 1654 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 1655 1656 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 1657 Start, Offset, x, y, fb->pitches[0]); 1658 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 1659 if (INTEL_INFO(dev)->gen >= 4) { 1660 I915_WRITE(DSPSURF(plane), Start); 1661 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 1662 I915_WRITE(DSPADDR(plane), Offset); 1663 } else 1664 I915_WRITE(DSPADDR(plane), Start + Offset); 1665 POSTING_READ(reg); 1666 1667 return (0); 1668 } 1669 1670 static int ironlake_update_plane(struct drm_crtc *crtc, 1671 struct drm_framebuffer *fb, int x, int y) 1672 { 1673 struct drm_device *dev = crtc->dev; 1674 struct drm_i915_private *dev_priv = dev->dev_private; 1675 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1676 struct intel_framebuffer *intel_fb; 1677 struct drm_i915_gem_object *obj; 1678 int plane = intel_crtc->plane; 1679 unsigned long Start, Offset; 1680 u32 dspcntr; 1681 u32 reg; 1682 1683 switch (plane) { 1684 case 0: 1685 case 1: 1686 case 2: 1687 break; 1688 default: 1689 DRM_ERROR("Can't update plane %d in SAREA\n", plane); 1690 return -EINVAL; 1691 } 1692 1693 intel_fb = to_intel_framebuffer(fb); 1694 obj = intel_fb->obj; 1695 1696 reg = DSPCNTR(plane); 1697 dspcntr = I915_READ(reg); 1698 /* Mask out pixel format bits in case we change it */ 1699 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 1700 switch (fb->bits_per_pixel) { 1701 case 8: 1702 dspcntr |= DISPPLANE_8BPP; 1703 break; 1704 case 16: 1705 if (fb->depth != 16) { 1706 DRM_ERROR("bpp 16, depth %d\n", fb->depth); 1707 return -EINVAL; 1708 } 1709 1710 dspcntr |= DISPPLANE_16BPP; 1711 break; 1712 case 24: 1713 case 32: 1714 if (fb->depth == 24) 1715 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 1716 else if (fb->depth == 30) 1717 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; 1718 else { 1719 DRM_ERROR("bpp %d depth %d\n", fb->bits_per_pixel, 1720 fb->depth); 1721 return -EINVAL; 1722 } 1723 break; 1724 default: 1725 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); 1726 return -EINVAL; 1727 } 1728 1729 if (obj->tiling_mode != I915_TILING_NONE) 1730 dspcntr |= DISPPLANE_TILED; 1731 else 1732 dspcntr &= ~DISPPLANE_TILED; 1733 1734 /* must disable */ 1735 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1736 1737 I915_WRITE(reg, dspcntr); 1738 1739 Start = obj->gtt_offset; 1740 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 1741 1742 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 1743 Start, Offset, x, y, fb->pitches[0]); 1744 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 1745 I915_WRITE(DSPSURF(plane), Start); 1746 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 1747 I915_WRITE(DSPADDR(plane), Offset); 1748 POSTING_READ(reg); 1749 1750 return 0; 1751 } 1752 1753 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 1754 static int 1755 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 1756 int x, int y, enum mode_set_atomic state) 1757 { 1758 struct drm_device *dev = crtc->dev; 1759 struct drm_i915_private *dev_priv = dev->dev_private; 1760 1761 if (dev_priv->display.disable_fbc) 1762 dev_priv->display.disable_fbc(dev); 1763 intel_increase_pllclock(crtc); 1764 1765 return dev_priv->display.update_plane(crtc, fb, x, y); 1766 } 1767 1768 static int 1769 intel_finish_fb(struct drm_framebuffer *old_fb) 1770 { 1771 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 1772 struct drm_device *dev = obj->base.dev; 1773 struct drm_i915_private *dev_priv = dev->dev_private; 1774 bool was_interruptible = dev_priv->mm.interruptible; 1775 int ret; 1776 1777 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 1778 while (!atomic_load_acq_int(&dev_priv->mm.wedged) && 1779 atomic_load_acq_int(&obj->pending_flip) != 0) { 1780 lksleep(&obj->pending_flip, &dev->event_lock, 1781 0, "915flp", 0); 1782 } 1783 lockmgr(&dev->event_lock, LK_RELEASE); 1784 1785 /* Big Hammer, we also need to ensure that any pending 1786 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 1787 * current scanout is retired before unpinning the old 1788 * framebuffer. 1789 * 1790 * This should only fail upon a hung GPU, in which case we 1791 * can safely continue. 1792 */ 1793 dev_priv->mm.interruptible = false; 1794 ret = i915_gem_object_finish_gpu(obj); 1795 dev_priv->mm.interruptible = was_interruptible; 1796 return ret; 1797 } 1798 1799 static int 1800 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 1801 struct drm_framebuffer *old_fb) 1802 { 1803 struct drm_device *dev = crtc->dev; 1804 #if 0 1805 struct drm_i915_master_private *master_priv; 1806 #else 1807 drm_i915_private_t *dev_priv = dev->dev_private; 1808 #endif 1809 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1810 int ret; 1811 1812 /* no fb bound */ 1813 if (!crtc->fb) { 1814 DRM_ERROR("No FB bound\n"); 1815 return 0; 1816 } 1817 1818 switch (intel_crtc->plane) { 1819 case 0: 1820 case 1: 1821 break; 1822 case 2: 1823 if (IS_IVYBRIDGE(dev)) 1824 break; 1825 /* fall through otherwise */ 1826 default: 1827 DRM_ERROR("no plane for crtc\n"); 1828 return -EINVAL; 1829 } 1830 1831 DRM_LOCK(dev); 1832 ret = intel_pin_and_fence_fb_obj(dev, 1833 to_intel_framebuffer(crtc->fb)->obj, 1834 NULL); 1835 if (ret != 0) { 1836 DRM_UNLOCK(dev); 1837 DRM_ERROR("pin & fence failed\n"); 1838 return ret; 1839 } 1840 1841 if (old_fb) 1842 intel_finish_fb(old_fb); 1843 1844 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 1845 LEAVE_ATOMIC_MODE_SET); 1846 if (ret) { 1847 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 1848 DRM_UNLOCK(dev); 1849 DRM_ERROR("failed to update base address\n"); 1850 return ret; 1851 } 1852 1853 if (old_fb) { 1854 intel_wait_for_vblank(dev, intel_crtc->pipe); 1855 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); 1856 } 1857 1858 DRM_UNLOCK(dev); 1859 1860 #if 0 1861 if (!dev->primary->master) 1862 return 0; 1863 1864 master_priv = dev->primary->master->driver_priv; 1865 if (!master_priv->sarea_priv) 1866 return 0; 1867 1868 if (intel_crtc->pipe) { 1869 master_priv->sarea_priv->pipeB_x = x; 1870 master_priv->sarea_priv->pipeB_y = y; 1871 } else { 1872 master_priv->sarea_priv->pipeA_x = x; 1873 master_priv->sarea_priv->pipeA_y = y; 1874 } 1875 #else 1876 1877 if (!dev_priv->sarea_priv) 1878 return 0; 1879 1880 if (intel_crtc->pipe) { 1881 dev_priv->sarea_priv->planeB_x = x; 1882 dev_priv->sarea_priv->planeB_y = y; 1883 } else { 1884 dev_priv->sarea_priv->planeA_x = x; 1885 dev_priv->sarea_priv->planeA_y = y; 1886 } 1887 #endif 1888 1889 return 0; 1890 } 1891 1892 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) 1893 { 1894 struct drm_device *dev = crtc->dev; 1895 struct drm_i915_private *dev_priv = dev->dev_private; 1896 u32 dpa_ctl; 1897 1898 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); 1899 dpa_ctl = I915_READ(DP_A); 1900 dpa_ctl &= ~DP_PLL_FREQ_MASK; 1901 1902 if (clock < 200000) { 1903 u32 temp; 1904 dpa_ctl |= DP_PLL_FREQ_160MHZ; 1905 /* workaround for 160Mhz: 1906 1) program 0x4600c bits 15:0 = 0x8124 1907 2) program 0x46010 bit 0 = 1 1908 3) program 0x46034 bit 24 = 1 1909 4) program 0x64000 bit 14 = 1 1910 */ 1911 temp = I915_READ(0x4600c); 1912 temp &= 0xffff0000; 1913 I915_WRITE(0x4600c, temp | 0x8124); 1914 1915 temp = I915_READ(0x46010); 1916 I915_WRITE(0x46010, temp | 1); 1917 1918 temp = I915_READ(0x46034); 1919 I915_WRITE(0x46034, temp | (1 << 24)); 1920 } else { 1921 dpa_ctl |= DP_PLL_FREQ_270MHZ; 1922 } 1923 I915_WRITE(DP_A, dpa_ctl); 1924 1925 POSTING_READ(DP_A); 1926 DELAY(500); 1927 } 1928 1929 static void intel_fdi_normal_train(struct drm_crtc *crtc) 1930 { 1931 struct drm_device *dev = crtc->dev; 1932 struct drm_i915_private *dev_priv = dev->dev_private; 1933 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1934 int pipe = intel_crtc->pipe; 1935 u32 reg, temp; 1936 1937 /* enable normal train */ 1938 reg = FDI_TX_CTL(pipe); 1939 temp = I915_READ(reg); 1940 if (IS_IVYBRIDGE(dev)) { 1941 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 1942 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 1943 } else { 1944 temp &= ~FDI_LINK_TRAIN_NONE; 1945 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 1946 } 1947 I915_WRITE(reg, temp); 1948 1949 reg = FDI_RX_CTL(pipe); 1950 temp = I915_READ(reg); 1951 if (HAS_PCH_CPT(dev)) { 1952 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 1953 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 1954 } else { 1955 temp &= ~FDI_LINK_TRAIN_NONE; 1956 temp |= FDI_LINK_TRAIN_NONE; 1957 } 1958 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 1959 1960 /* wait one idle pattern time */ 1961 POSTING_READ(reg); 1962 DELAY(1000); 1963 1964 /* IVB wants error correction enabled */ 1965 if (IS_IVYBRIDGE(dev)) 1966 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 1967 FDI_FE_ERRC_ENABLE); 1968 } 1969 1970 static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) 1971 { 1972 struct drm_i915_private *dev_priv = dev->dev_private; 1973 u32 flags = I915_READ(SOUTH_CHICKEN1); 1974 1975 flags |= FDI_PHASE_SYNC_OVR(pipe); 1976 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */ 1977 flags |= FDI_PHASE_SYNC_EN(pipe); 1978 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */ 1979 POSTING_READ(SOUTH_CHICKEN1); 1980 } 1981 1982 /* The FDI link training functions for ILK/Ibexpeak. */ 1983 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 1984 { 1985 struct drm_device *dev = crtc->dev; 1986 struct drm_i915_private *dev_priv = dev->dev_private; 1987 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1988 int pipe = intel_crtc->pipe; 1989 int plane = intel_crtc->plane; 1990 u32 reg, temp, tries; 1991 1992 /* FDI needs bits from pipe & plane first */ 1993 assert_pipe_enabled(dev_priv, pipe); 1994 assert_plane_enabled(dev_priv, plane); 1995 1996 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 1997 for train result */ 1998 reg = FDI_RX_IMR(pipe); 1999 temp = I915_READ(reg); 2000 temp &= ~FDI_RX_SYMBOL_LOCK; 2001 temp &= ~FDI_RX_BIT_LOCK; 2002 I915_WRITE(reg, temp); 2003 I915_READ(reg); 2004 DELAY(150); 2005 2006 /* enable CPU FDI TX and PCH FDI RX */ 2007 reg = FDI_TX_CTL(pipe); 2008 temp = I915_READ(reg); 2009 temp &= ~(7 << 19); 2010 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2011 temp &= ~FDI_LINK_TRAIN_NONE; 2012 temp |= FDI_LINK_TRAIN_PATTERN_1; 2013 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2014 2015 reg = FDI_RX_CTL(pipe); 2016 temp = I915_READ(reg); 2017 temp &= ~FDI_LINK_TRAIN_NONE; 2018 temp |= FDI_LINK_TRAIN_PATTERN_1; 2019 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2020 2021 POSTING_READ(reg); 2022 DELAY(150); 2023 2024 /* Ironlake workaround, enable clock pointer after FDI enable*/ 2025 if (HAS_PCH_IBX(dev)) { 2026 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2027 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 2028 FDI_RX_PHASE_SYNC_POINTER_EN); 2029 } 2030 2031 reg = FDI_RX_IIR(pipe); 2032 for (tries = 0; tries < 5; tries++) { 2033 temp = I915_READ(reg); 2034 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2035 2036 if ((temp & FDI_RX_BIT_LOCK)) { 2037 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2038 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2039 break; 2040 } 2041 } 2042 if (tries == 5) 2043 DRM_ERROR("FDI train 1 fail!\n"); 2044 2045 /* Train 2 */ 2046 reg = FDI_TX_CTL(pipe); 2047 temp = I915_READ(reg); 2048 temp &= ~FDI_LINK_TRAIN_NONE; 2049 temp |= FDI_LINK_TRAIN_PATTERN_2; 2050 I915_WRITE(reg, temp); 2051 2052 reg = FDI_RX_CTL(pipe); 2053 temp = I915_READ(reg); 2054 temp &= ~FDI_LINK_TRAIN_NONE; 2055 temp |= FDI_LINK_TRAIN_PATTERN_2; 2056 I915_WRITE(reg, temp); 2057 2058 POSTING_READ(reg); 2059 DELAY(150); 2060 2061 reg = FDI_RX_IIR(pipe); 2062 for (tries = 0; tries < 5; tries++) { 2063 temp = I915_READ(reg); 2064 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2065 2066 if (temp & FDI_RX_SYMBOL_LOCK) { 2067 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2068 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2069 break; 2070 } 2071 } 2072 if (tries == 5) 2073 DRM_ERROR("FDI train 2 fail!\n"); 2074 2075 DRM_DEBUG_KMS("FDI train done\n"); 2076 2077 } 2078 2079 static const int snb_b_fdi_train_param[] = { 2080 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 2081 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 2082 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 2083 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 2084 }; 2085 2086 /* The FDI link training functions for SNB/Cougarpoint. */ 2087 static void gen6_fdi_link_train(struct drm_crtc *crtc) 2088 { 2089 struct drm_device *dev = crtc->dev; 2090 struct drm_i915_private *dev_priv = dev->dev_private; 2091 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2092 int pipe = intel_crtc->pipe; 2093 u32 reg, temp, i; 2094 2095 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2096 for train result */ 2097 reg = FDI_RX_IMR(pipe); 2098 temp = I915_READ(reg); 2099 temp &= ~FDI_RX_SYMBOL_LOCK; 2100 temp &= ~FDI_RX_BIT_LOCK; 2101 I915_WRITE(reg, temp); 2102 2103 POSTING_READ(reg); 2104 DELAY(150); 2105 2106 /* enable CPU FDI TX and PCH FDI RX */ 2107 reg = FDI_TX_CTL(pipe); 2108 temp = I915_READ(reg); 2109 temp &= ~(7 << 19); 2110 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2111 temp &= ~FDI_LINK_TRAIN_NONE; 2112 temp |= FDI_LINK_TRAIN_PATTERN_1; 2113 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2114 /* SNB-B */ 2115 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2116 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2117 2118 reg = FDI_RX_CTL(pipe); 2119 temp = I915_READ(reg); 2120 if (HAS_PCH_CPT(dev)) { 2121 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2122 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2123 } else { 2124 temp &= ~FDI_LINK_TRAIN_NONE; 2125 temp |= FDI_LINK_TRAIN_PATTERN_1; 2126 } 2127 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2128 2129 POSTING_READ(reg); 2130 DELAY(150); 2131 2132 if (HAS_PCH_CPT(dev)) 2133 cpt_phase_pointer_enable(dev, pipe); 2134 2135 for (i = 0; i < 4; i++) { 2136 reg = FDI_TX_CTL(pipe); 2137 temp = I915_READ(reg); 2138 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2139 temp |= snb_b_fdi_train_param[i]; 2140 I915_WRITE(reg, temp); 2141 2142 POSTING_READ(reg); 2143 DELAY(500); 2144 2145 reg = FDI_RX_IIR(pipe); 2146 temp = I915_READ(reg); 2147 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2148 2149 if (temp & FDI_RX_BIT_LOCK) { 2150 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2151 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2152 break; 2153 } 2154 } 2155 if (i == 4) 2156 DRM_ERROR("FDI train 1 fail!\n"); 2157 2158 /* Train 2 */ 2159 reg = FDI_TX_CTL(pipe); 2160 temp = I915_READ(reg); 2161 temp &= ~FDI_LINK_TRAIN_NONE; 2162 temp |= FDI_LINK_TRAIN_PATTERN_2; 2163 if (IS_GEN6(dev)) { 2164 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2165 /* SNB-B */ 2166 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2167 } 2168 I915_WRITE(reg, temp); 2169 2170 reg = FDI_RX_CTL(pipe); 2171 temp = I915_READ(reg); 2172 if (HAS_PCH_CPT(dev)) { 2173 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2174 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 2175 } else { 2176 temp &= ~FDI_LINK_TRAIN_NONE; 2177 temp |= FDI_LINK_TRAIN_PATTERN_2; 2178 } 2179 I915_WRITE(reg, temp); 2180 2181 POSTING_READ(reg); 2182 DELAY(150); 2183 2184 for (i = 0; i < 4; i++) { 2185 reg = FDI_TX_CTL(pipe); 2186 temp = I915_READ(reg); 2187 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2188 temp |= snb_b_fdi_train_param[i]; 2189 I915_WRITE(reg, temp); 2190 2191 POSTING_READ(reg); 2192 DELAY(500); 2193 2194 reg = FDI_RX_IIR(pipe); 2195 temp = I915_READ(reg); 2196 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2197 2198 if (temp & FDI_RX_SYMBOL_LOCK) { 2199 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2200 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2201 break; 2202 } 2203 } 2204 if (i == 4) 2205 DRM_ERROR("FDI train 2 fail!\n"); 2206 2207 DRM_DEBUG_KMS("FDI train done.\n"); 2208 } 2209 2210 /* Manual link training for Ivy Bridge A0 parts */ 2211 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 2212 { 2213 struct drm_device *dev = crtc->dev; 2214 struct drm_i915_private *dev_priv = dev->dev_private; 2215 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2216 int pipe = intel_crtc->pipe; 2217 u32 reg, temp, i; 2218 2219 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2220 for train result */ 2221 reg = FDI_RX_IMR(pipe); 2222 temp = I915_READ(reg); 2223 temp &= ~FDI_RX_SYMBOL_LOCK; 2224 temp &= ~FDI_RX_BIT_LOCK; 2225 I915_WRITE(reg, temp); 2226 2227 POSTING_READ(reg); 2228 DELAY(150); 2229 2230 /* enable CPU FDI TX and PCH FDI RX */ 2231 reg = FDI_TX_CTL(pipe); 2232 temp = I915_READ(reg); 2233 temp &= ~(7 << 19); 2234 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2235 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 2236 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 2237 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2238 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2239 temp |= FDI_COMPOSITE_SYNC; 2240 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2241 2242 reg = FDI_RX_CTL(pipe); 2243 temp = I915_READ(reg); 2244 temp &= ~FDI_LINK_TRAIN_AUTO; 2245 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2246 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2247 temp |= FDI_COMPOSITE_SYNC; 2248 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2249 2250 POSTING_READ(reg); 2251 DELAY(150); 2252 2253 for (i = 0; i < 4; i++) { 2254 reg = FDI_TX_CTL(pipe); 2255 temp = I915_READ(reg); 2256 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2257 temp |= snb_b_fdi_train_param[i]; 2258 I915_WRITE(reg, temp); 2259 2260 POSTING_READ(reg); 2261 DELAY(500); 2262 2263 reg = FDI_RX_IIR(pipe); 2264 temp = I915_READ(reg); 2265 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2266 2267 if (temp & FDI_RX_BIT_LOCK || 2268 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 2269 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2270 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2271 break; 2272 } 2273 } 2274 if (i == 4) 2275 DRM_ERROR("FDI train 1 fail!\n"); 2276 2277 /* Train 2 */ 2278 reg = FDI_TX_CTL(pipe); 2279 temp = I915_READ(reg); 2280 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 2281 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 2282 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2283 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2284 I915_WRITE(reg, temp); 2285 2286 reg = FDI_RX_CTL(pipe); 2287 temp = I915_READ(reg); 2288 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2289 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 2290 I915_WRITE(reg, temp); 2291 2292 POSTING_READ(reg); 2293 DELAY(150); 2294 2295 for (i = 0; i < 4; i++ ) { 2296 reg = FDI_TX_CTL(pipe); 2297 temp = I915_READ(reg); 2298 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2299 temp |= snb_b_fdi_train_param[i]; 2300 I915_WRITE(reg, temp); 2301 2302 POSTING_READ(reg); 2303 DELAY(500); 2304 2305 reg = FDI_RX_IIR(pipe); 2306 temp = I915_READ(reg); 2307 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2308 2309 if (temp & FDI_RX_SYMBOL_LOCK) { 2310 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2311 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2312 break; 2313 } 2314 } 2315 if (i == 4) 2316 DRM_ERROR("FDI train 2 fail!\n"); 2317 2318 DRM_DEBUG_KMS("FDI train done.\n"); 2319 } 2320 2321 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) 2322 { 2323 struct drm_device *dev = crtc->dev; 2324 struct drm_i915_private *dev_priv = dev->dev_private; 2325 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2326 int pipe = intel_crtc->pipe; 2327 u32 reg, temp; 2328 2329 /* Write the TU size bits so error detection works */ 2330 I915_WRITE(FDI_RX_TUSIZE1(pipe), 2331 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 2332 2333 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 2334 reg = FDI_RX_CTL(pipe); 2335 temp = I915_READ(reg); 2336 temp &= ~((0x7 << 19) | (0x7 << 16)); 2337 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2338 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2339 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 2340 2341 POSTING_READ(reg); 2342 DELAY(200); 2343 2344 /* Switch from Rawclk to PCDclk */ 2345 temp = I915_READ(reg); 2346 I915_WRITE(reg, temp | FDI_PCDCLK); 2347 2348 POSTING_READ(reg); 2349 DELAY(200); 2350 2351 /* Enable CPU FDI TX PLL, always on for Ironlake */ 2352 reg = FDI_TX_CTL(pipe); 2353 temp = I915_READ(reg); 2354 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 2355 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 2356 2357 POSTING_READ(reg); 2358 DELAY(100); 2359 } 2360 } 2361 2362 static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) 2363 { 2364 struct drm_i915_private *dev_priv = dev->dev_private; 2365 u32 flags = I915_READ(SOUTH_CHICKEN1); 2366 2367 flags &= ~(FDI_PHASE_SYNC_EN(pipe)); 2368 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */ 2369 flags &= ~(FDI_PHASE_SYNC_OVR(pipe)); 2370 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */ 2371 POSTING_READ(SOUTH_CHICKEN1); 2372 } 2373 2374 static void ironlake_fdi_disable(struct drm_crtc *crtc) 2375 { 2376 struct drm_device *dev = crtc->dev; 2377 struct drm_i915_private *dev_priv = dev->dev_private; 2378 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2379 int pipe = intel_crtc->pipe; 2380 u32 reg, temp; 2381 2382 /* disable CPU FDI tx and PCH FDI rx */ 2383 reg = FDI_TX_CTL(pipe); 2384 temp = I915_READ(reg); 2385 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 2386 POSTING_READ(reg); 2387 2388 reg = FDI_RX_CTL(pipe); 2389 temp = I915_READ(reg); 2390 temp &= ~(0x7 << 16); 2391 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2392 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 2393 2394 POSTING_READ(reg); 2395 DELAY(100); 2396 2397 /* Ironlake workaround, disable clock pointer after downing FDI */ 2398 if (HAS_PCH_IBX(dev)) { 2399 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2400 I915_WRITE(FDI_RX_CHICKEN(pipe), 2401 I915_READ(FDI_RX_CHICKEN(pipe) & 2402 ~FDI_RX_PHASE_SYNC_POINTER_EN)); 2403 } else if (HAS_PCH_CPT(dev)) { 2404 cpt_phase_pointer_disable(dev, pipe); 2405 } 2406 2407 /* still set train pattern 1 */ 2408 reg = FDI_TX_CTL(pipe); 2409 temp = I915_READ(reg); 2410 temp &= ~FDI_LINK_TRAIN_NONE; 2411 temp |= FDI_LINK_TRAIN_PATTERN_1; 2412 I915_WRITE(reg, temp); 2413 2414 reg = FDI_RX_CTL(pipe); 2415 temp = I915_READ(reg); 2416 if (HAS_PCH_CPT(dev)) { 2417 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2418 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2419 } else { 2420 temp &= ~FDI_LINK_TRAIN_NONE; 2421 temp |= FDI_LINK_TRAIN_PATTERN_1; 2422 } 2423 /* BPC in FDI rx is consistent with that in PIPECONF */ 2424 temp &= ~(0x07 << 16); 2425 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2426 I915_WRITE(reg, temp); 2427 2428 POSTING_READ(reg); 2429 DELAY(100); 2430 } 2431 2432 /* 2433 * When we disable a pipe, we need to clear any pending scanline wait events 2434 * to avoid hanging the ring, which we assume we are waiting on. 2435 */ 2436 static void intel_clear_scanline_wait(struct drm_device *dev) 2437 { 2438 struct drm_i915_private *dev_priv = dev->dev_private; 2439 struct intel_ring_buffer *ring; 2440 u32 tmp; 2441 2442 if (IS_GEN2(dev)) 2443 /* Can't break the hang on i8xx */ 2444 return; 2445 2446 ring = LP_RING(dev_priv); 2447 tmp = I915_READ_CTL(ring); 2448 if (tmp & RING_WAIT) 2449 I915_WRITE_CTL(ring, tmp); 2450 } 2451 2452 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 2453 { 2454 struct drm_i915_gem_object *obj; 2455 struct drm_i915_private *dev_priv; 2456 struct drm_device *dev; 2457 2458 if (crtc->fb == NULL) 2459 return; 2460 2461 obj = to_intel_framebuffer(crtc->fb)->obj; 2462 dev = crtc->dev; 2463 dev_priv = dev->dev_private; 2464 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 2465 while (atomic_load_acq_int(&obj->pending_flip) != 0) 2466 lksleep(&obj->pending_flip, &dev->event_lock, 0, "915wfl", 0); 2467 lockmgr(&dev->event_lock, LK_RELEASE); 2468 } 2469 2470 static bool intel_crtc_driving_pch(struct drm_crtc *crtc) 2471 { 2472 struct drm_device *dev = crtc->dev; 2473 struct drm_mode_config *mode_config = &dev->mode_config; 2474 struct intel_encoder *encoder; 2475 2476 /* 2477 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that 2478 * must be driven by its own crtc; no sharing is possible. 2479 */ 2480 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 2481 if (encoder->base.crtc != crtc) 2482 continue; 2483 2484 switch (encoder->type) { 2485 case INTEL_OUTPUT_EDP: 2486 if (!intel_encoder_is_pch_edp(&encoder->base)) 2487 return false; 2488 continue; 2489 } 2490 } 2491 2492 return true; 2493 } 2494 2495 /* 2496 * Enable PCH resources required for PCH ports: 2497 * - PCH PLLs 2498 * - FDI training & RX/TX 2499 * - update transcoder timings 2500 * - DP transcoding bits 2501 * - transcoder 2502 */ 2503 static void ironlake_pch_enable(struct drm_crtc *crtc) 2504 { 2505 struct drm_device *dev = crtc->dev; 2506 struct drm_i915_private *dev_priv = dev->dev_private; 2507 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2508 int pipe = intel_crtc->pipe; 2509 u32 reg, temp, transc_sel; 2510 2511 /* For PCH output, training FDI link */ 2512 dev_priv->display.fdi_link_train(crtc); 2513 2514 intel_enable_pch_pll(dev_priv, pipe); 2515 2516 if (HAS_PCH_CPT(dev)) { 2517 transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL : 2518 TRANSC_DPLLB_SEL; 2519 2520 /* Be sure PCH DPLL SEL is set */ 2521 temp = I915_READ(PCH_DPLL_SEL); 2522 if (pipe == 0) { 2523 temp &= ~(TRANSA_DPLLB_SEL); 2524 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); 2525 } else if (pipe == 1) { 2526 temp &= ~(TRANSB_DPLLB_SEL); 2527 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 2528 } else if (pipe == 2) { 2529 temp &= ~(TRANSC_DPLLB_SEL); 2530 temp |= (TRANSC_DPLL_ENABLE | transc_sel); 2531 } 2532 I915_WRITE(PCH_DPLL_SEL, temp); 2533 } 2534 2535 /* set transcoder timing, panel must allow it */ 2536 assert_panel_unlocked(dev_priv, pipe); 2537 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); 2538 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); 2539 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); 2540 2541 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); 2542 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); 2543 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 2544 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); 2545 2546 intel_fdi_normal_train(crtc); 2547 2548 /* For PCH DP, enable TRANS_DP_CTL */ 2549 if (HAS_PCH_CPT(dev) && 2550 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 2551 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2552 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 2553 reg = TRANS_DP_CTL(pipe); 2554 temp = I915_READ(reg); 2555 temp &= ~(TRANS_DP_PORT_SEL_MASK | 2556 TRANS_DP_SYNC_MASK | 2557 TRANS_DP_BPC_MASK); 2558 temp |= (TRANS_DP_OUTPUT_ENABLE | 2559 TRANS_DP_ENH_FRAMING); 2560 temp |= bpc << 9; /* same format but at 11:9 */ 2561 2562 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 2563 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 2564 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 2565 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 2566 2567 switch (intel_trans_dp_port_sel(crtc)) { 2568 case PCH_DP_B: 2569 temp |= TRANS_DP_PORT_SEL_B; 2570 break; 2571 case PCH_DP_C: 2572 temp |= TRANS_DP_PORT_SEL_C; 2573 break; 2574 case PCH_DP_D: 2575 temp |= TRANS_DP_PORT_SEL_D; 2576 break; 2577 default: 2578 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); 2579 temp |= TRANS_DP_PORT_SEL_B; 2580 break; 2581 } 2582 2583 I915_WRITE(reg, temp); 2584 } 2585 2586 intel_enable_transcoder(dev_priv, pipe); 2587 } 2588 2589 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) 2590 { 2591 struct drm_i915_private *dev_priv = dev->dev_private; 2592 int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe); 2593 u32 temp; 2594 2595 temp = I915_READ(dslreg); 2596 DELAY(500); 2597 if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1, "915cp1")) { 2598 /* Without this, mode sets may fail silently on FDI */ 2599 I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS); 2600 DELAY(250); 2601 I915_WRITE(tc2reg, 0); 2602 if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1, 2603 "915cp2")) 2604 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); 2605 } 2606 } 2607 2608 static void ironlake_crtc_enable(struct drm_crtc *crtc) 2609 { 2610 struct drm_device *dev = crtc->dev; 2611 struct drm_i915_private *dev_priv = dev->dev_private; 2612 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2613 int pipe = intel_crtc->pipe; 2614 int plane = intel_crtc->plane; 2615 u32 temp; 2616 bool is_pch_port; 2617 2618 if (intel_crtc->active) 2619 return; 2620 2621 intel_crtc->active = true; 2622 intel_update_watermarks(dev); 2623 2624 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 2625 temp = I915_READ(PCH_LVDS); 2626 if ((temp & LVDS_PORT_EN) == 0) 2627 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 2628 } 2629 2630 is_pch_port = intel_crtc_driving_pch(crtc); 2631 2632 if (is_pch_port) 2633 ironlake_fdi_pll_enable(crtc); 2634 else 2635 ironlake_fdi_disable(crtc); 2636 2637 /* Enable panel fitting for LVDS */ 2638 if (dev_priv->pch_pf_size && 2639 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { 2640 /* Force use of hard-coded filter coefficients 2641 * as some pre-programmed values are broken, 2642 * e.g. x201. 2643 */ 2644 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 2645 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); 2646 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); 2647 } 2648 2649 intel_enable_pipe(dev_priv, pipe, is_pch_port); 2650 intel_enable_plane(dev_priv, plane, pipe); 2651 2652 if (is_pch_port) 2653 ironlake_pch_enable(crtc); 2654 2655 intel_crtc_load_lut(crtc); 2656 2657 DRM_LOCK(dev); 2658 intel_update_fbc(dev); 2659 DRM_UNLOCK(dev); 2660 2661 intel_crtc_update_cursor(crtc, true); 2662 } 2663 2664 static void ironlake_crtc_disable(struct drm_crtc *crtc) 2665 { 2666 struct drm_device *dev = crtc->dev; 2667 struct drm_i915_private *dev_priv = dev->dev_private; 2668 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2669 int pipe = intel_crtc->pipe; 2670 int plane = intel_crtc->plane; 2671 u32 reg, temp; 2672 2673 if (!intel_crtc->active) 2674 return; 2675 2676 intel_crtc_wait_for_pending_flips(crtc); 2677 drm_vblank_off(dev, pipe); 2678 intel_crtc_update_cursor(crtc, false); 2679 2680 intel_disable_plane(dev_priv, plane, pipe); 2681 2682 if (dev_priv->cfb_plane == plane) 2683 intel_disable_fbc(dev); 2684 2685 intel_disable_pipe(dev_priv, pipe); 2686 2687 /* Disable PF */ 2688 I915_WRITE(PF_CTL(pipe), 0); 2689 I915_WRITE(PF_WIN_SZ(pipe), 0); 2690 2691 ironlake_fdi_disable(crtc); 2692 2693 /* This is a horrible layering violation; we should be doing this in 2694 * the connector/encoder ->prepare instead, but we don't always have 2695 * enough information there about the config to know whether it will 2696 * actually be necessary or just cause undesired flicker. 2697 */ 2698 intel_disable_pch_ports(dev_priv, pipe); 2699 2700 intel_disable_transcoder(dev_priv, pipe); 2701 2702 if (HAS_PCH_CPT(dev)) { 2703 /* disable TRANS_DP_CTL */ 2704 reg = TRANS_DP_CTL(pipe); 2705 temp = I915_READ(reg); 2706 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); 2707 temp |= TRANS_DP_PORT_SEL_NONE; 2708 I915_WRITE(reg, temp); 2709 2710 /* disable DPLL_SEL */ 2711 temp = I915_READ(PCH_DPLL_SEL); 2712 switch (pipe) { 2713 case 0: 2714 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); 2715 break; 2716 case 1: 2717 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 2718 break; 2719 case 2: 2720 /* C shares PLL A or B */ 2721 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); 2722 break; 2723 default: 2724 KASSERT(1, ("Wrong pipe %d", pipe)); /* wtf */ 2725 } 2726 I915_WRITE(PCH_DPLL_SEL, temp); 2727 } 2728 2729 /* disable PCH DPLL */ 2730 if (!intel_crtc->no_pll) 2731 intel_disable_pch_pll(dev_priv, pipe); 2732 2733 /* Switch from PCDclk to Rawclk */ 2734 reg = FDI_RX_CTL(pipe); 2735 temp = I915_READ(reg); 2736 I915_WRITE(reg, temp & ~FDI_PCDCLK); 2737 2738 /* Disable CPU FDI TX PLL */ 2739 reg = FDI_TX_CTL(pipe); 2740 temp = I915_READ(reg); 2741 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 2742 2743 POSTING_READ(reg); 2744 DELAY(100); 2745 2746 reg = FDI_RX_CTL(pipe); 2747 temp = I915_READ(reg); 2748 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 2749 2750 /* Wait for the clocks to turn off. */ 2751 POSTING_READ(reg); 2752 DELAY(100); 2753 2754 intel_crtc->active = false; 2755 intel_update_watermarks(dev); 2756 2757 DRM_LOCK(dev); 2758 intel_update_fbc(dev); 2759 intel_clear_scanline_wait(dev); 2760 DRM_UNLOCK(dev); 2761 } 2762 2763 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) 2764 { 2765 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2766 int pipe = intel_crtc->pipe; 2767 int plane = intel_crtc->plane; 2768 2769 /* XXX: When our outputs are all unaware of DPMS modes other than off 2770 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 2771 */ 2772 switch (mode) { 2773 case DRM_MODE_DPMS_ON: 2774 case DRM_MODE_DPMS_STANDBY: 2775 case DRM_MODE_DPMS_SUSPEND: 2776 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); 2777 ironlake_crtc_enable(crtc); 2778 break; 2779 2780 case DRM_MODE_DPMS_OFF: 2781 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); 2782 ironlake_crtc_disable(crtc); 2783 break; 2784 } 2785 } 2786 2787 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 2788 { 2789 if (!enable && intel_crtc->overlay) { 2790 struct drm_device *dev = intel_crtc->base.dev; 2791 struct drm_i915_private *dev_priv = dev->dev_private; 2792 2793 DRM_LOCK(dev); 2794 dev_priv->mm.interruptible = false; 2795 (void) intel_overlay_switch_off(intel_crtc->overlay); 2796 dev_priv->mm.interruptible = true; 2797 DRM_UNLOCK(dev); 2798 } 2799 2800 /* Let userspace switch the overlay on again. In most cases userspace 2801 * has to recompute where to put it anyway. 2802 */ 2803 } 2804 2805 static void i9xx_crtc_enable(struct drm_crtc *crtc) 2806 { 2807 struct drm_device *dev = crtc->dev; 2808 struct drm_i915_private *dev_priv = dev->dev_private; 2809 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2810 int pipe = intel_crtc->pipe; 2811 int plane = intel_crtc->plane; 2812 2813 if (intel_crtc->active) 2814 return; 2815 2816 intel_crtc->active = true; 2817 intel_update_watermarks(dev); 2818 2819 intel_enable_pll(dev_priv, pipe); 2820 intel_enable_pipe(dev_priv, pipe, false); 2821 intel_enable_plane(dev_priv, plane, pipe); 2822 2823 intel_crtc_load_lut(crtc); 2824 intel_update_fbc(dev); 2825 2826 /* Give the overlay scaler a chance to enable if it's on this pipe */ 2827 intel_crtc_dpms_overlay(intel_crtc, true); 2828 intel_crtc_update_cursor(crtc, true); 2829 } 2830 2831 static void i9xx_crtc_disable(struct drm_crtc *crtc) 2832 { 2833 struct drm_device *dev = crtc->dev; 2834 struct drm_i915_private *dev_priv = dev->dev_private; 2835 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2836 int pipe = intel_crtc->pipe; 2837 int plane = intel_crtc->plane; 2838 2839 if (!intel_crtc->active) 2840 return; 2841 2842 /* Give the overlay scaler a chance to disable if it's on this pipe */ 2843 intel_crtc_wait_for_pending_flips(crtc); 2844 drm_vblank_off(dev, pipe); 2845 intel_crtc_dpms_overlay(intel_crtc, false); 2846 intel_crtc_update_cursor(crtc, false); 2847 2848 if (dev_priv->cfb_plane == plane) 2849 intel_disable_fbc(dev); 2850 2851 intel_disable_plane(dev_priv, plane, pipe); 2852 intel_disable_pipe(dev_priv, pipe); 2853 intel_disable_pll(dev_priv, pipe); 2854 2855 intel_crtc->active = false; 2856 intel_update_fbc(dev); 2857 intel_update_watermarks(dev); 2858 intel_clear_scanline_wait(dev); 2859 } 2860 2861 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) 2862 { 2863 /* XXX: When our outputs are all unaware of DPMS modes other than off 2864 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 2865 */ 2866 switch (mode) { 2867 case DRM_MODE_DPMS_ON: 2868 case DRM_MODE_DPMS_STANDBY: 2869 case DRM_MODE_DPMS_SUSPEND: 2870 i9xx_crtc_enable(crtc); 2871 break; 2872 case DRM_MODE_DPMS_OFF: 2873 i9xx_crtc_disable(crtc); 2874 break; 2875 } 2876 } 2877 2878 /** 2879 * Sets the power management mode of the pipe and plane. 2880 */ 2881 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) 2882 { 2883 struct drm_device *dev = crtc->dev; 2884 struct drm_i915_private *dev_priv = dev->dev_private; 2885 #if 0 2886 struct drm_i915_master_private *master_priv; 2887 #endif 2888 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2889 int pipe = intel_crtc->pipe; 2890 bool enabled; 2891 2892 if (intel_crtc->dpms_mode == mode) 2893 return; 2894 2895 intel_crtc->dpms_mode = mode; 2896 2897 dev_priv->display.dpms(crtc, mode); 2898 2899 #if 0 2900 if (!dev->primary->master) 2901 return; 2902 2903 master_priv = dev->primary->master->driver_priv; 2904 if (!master_priv->sarea_priv) 2905 return; 2906 #else 2907 if (!dev_priv->sarea_priv) 2908 return; 2909 #endif 2910 2911 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; 2912 2913 switch (pipe) { 2914 case 0: 2915 #if 0 2916 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; 2917 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; 2918 #else 2919 dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0; 2920 dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0; 2921 #endif 2922 break; 2923 case 1: 2924 #if 0 2925 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; 2926 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; 2927 #else 2928 dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0; 2929 dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0; 2930 #endif 2931 break; 2932 default: 2933 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); 2934 break; 2935 } 2936 } 2937 2938 static void intel_crtc_disable(struct drm_crtc *crtc) 2939 { 2940 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 2941 struct drm_device *dev = crtc->dev; 2942 2943 /* Flush any pending WAITs before we disable the pipe. Note that 2944 * we need to drop the struct_mutex in order to acquire it again 2945 * during the lowlevel dpms routines around a couple of the 2946 * operations. It does not look trivial nor desirable to move 2947 * that locking higher. So instead we leave a window for the 2948 * submission of further commands on the fb before we can actually 2949 * disable it. This race with userspace exists anyway, and we can 2950 * only rely on the pipe being disabled by userspace after it 2951 * receives the hotplug notification and has flushed any pending 2952 * batches. 2953 */ 2954 if (crtc->fb) { 2955 DRM_LOCK(dev); 2956 intel_finish_fb(crtc->fb); 2957 DRM_UNLOCK(dev); 2958 } 2959 2960 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 2961 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); 2962 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); 2963 2964 if (crtc->fb) { 2965 DRM_LOCK(dev); 2966 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 2967 DRM_UNLOCK(dev); 2968 } 2969 } 2970 2971 /* Prepare for a mode set. 2972 * 2973 * Note we could be a lot smarter here. We need to figure out which outputs 2974 * will be enabled, which disabled (in short, how the config will changes) 2975 * and perform the minimum necessary steps to accomplish that, e.g. updating 2976 * watermarks, FBC configuration, making sure PLLs are programmed correctly, 2977 * panel fitting is in the proper state, etc. 2978 */ 2979 static void i9xx_crtc_prepare(struct drm_crtc *crtc) 2980 { 2981 i9xx_crtc_disable(crtc); 2982 } 2983 2984 static void i9xx_crtc_commit(struct drm_crtc *crtc) 2985 { 2986 i9xx_crtc_enable(crtc); 2987 } 2988 2989 static void ironlake_crtc_prepare(struct drm_crtc *crtc) 2990 { 2991 ironlake_crtc_disable(crtc); 2992 } 2993 2994 static void ironlake_crtc_commit(struct drm_crtc *crtc) 2995 { 2996 ironlake_crtc_enable(crtc); 2997 } 2998 2999 void intel_encoder_prepare(struct drm_encoder *encoder) 3000 { 3001 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3002 /* lvds has its own version of prepare see intel_lvds_prepare */ 3003 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); 3004 } 3005 3006 void intel_encoder_commit(struct drm_encoder *encoder) 3007 { 3008 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3009 struct drm_device *dev = encoder->dev; 3010 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3011 struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc); 3012 3013 /* lvds has its own version of commit see intel_lvds_commit */ 3014 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 3015 3016 if (HAS_PCH_CPT(dev)) 3017 intel_cpt_verify_modeset(dev, intel_crtc->pipe); 3018 } 3019 3020 void intel_encoder_destroy(struct drm_encoder *encoder) 3021 { 3022 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3023 3024 drm_encoder_cleanup(encoder); 3025 drm_free(intel_encoder, DRM_MEM_KMS); 3026 } 3027 3028 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, 3029 const struct drm_display_mode *mode, 3030 struct drm_display_mode *adjusted_mode) 3031 { 3032 struct drm_device *dev = crtc->dev; 3033 3034 if (HAS_PCH_SPLIT(dev)) { 3035 /* FDI link clock is fixed at 2.7G */ 3036 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) 3037 return false; 3038 } 3039 3040 /* All interlaced capable intel hw wants timings in frames. Note though 3041 * that intel_lvds_mode_fixup does some funny tricks with the crtc 3042 * timings, so we need to be careful not to clobber these.*/ 3043 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) 3044 drm_mode_set_crtcinfo(adjusted_mode, 0); 3045 3046 return true; 3047 } 3048 3049 static int i945_get_display_clock_speed(struct drm_device *dev) 3050 { 3051 return 400000; 3052 } 3053 3054 static int i915_get_display_clock_speed(struct drm_device *dev) 3055 { 3056 return 333000; 3057 } 3058 3059 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 3060 { 3061 return 200000; 3062 } 3063 3064 static int i915gm_get_display_clock_speed(struct drm_device *dev) 3065 { 3066 u16 gcfgc = 0; 3067 3068 gcfgc = pci_read_config(dev->dev, GCFGC, 2); 3069 3070 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 3071 return 133000; 3072 else { 3073 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 3074 case GC_DISPLAY_CLOCK_333_MHZ: 3075 return 333000; 3076 default: 3077 case GC_DISPLAY_CLOCK_190_200_MHZ: 3078 return 190000; 3079 } 3080 } 3081 } 3082 3083 static int i865_get_display_clock_speed(struct drm_device *dev) 3084 { 3085 return 266000; 3086 } 3087 3088 static int i855_get_display_clock_speed(struct drm_device *dev) 3089 { 3090 u16 hpllcc = 0; 3091 /* Assume that the hardware is in the high speed state. This 3092 * should be the default. 3093 */ 3094 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 3095 case GC_CLOCK_133_200: 3096 case GC_CLOCK_100_200: 3097 return 200000; 3098 case GC_CLOCK_166_250: 3099 return 250000; 3100 case GC_CLOCK_100_133: 3101 return 133000; 3102 } 3103 3104 /* Shouldn't happen */ 3105 return 0; 3106 } 3107 3108 static int i830_get_display_clock_speed(struct drm_device *dev) 3109 { 3110 return 133000; 3111 } 3112 3113 struct fdi_m_n { 3114 u32 tu; 3115 u32 gmch_m; 3116 u32 gmch_n; 3117 u32 link_m; 3118 u32 link_n; 3119 }; 3120 3121 static void 3122 fdi_reduce_ratio(u32 *num, u32 *den) 3123 { 3124 while (*num > 0xffffff || *den > 0xffffff) { 3125 *num >>= 1; 3126 *den >>= 1; 3127 } 3128 } 3129 3130 static void 3131 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, 3132 int link_clock, struct fdi_m_n *m_n) 3133 { 3134 m_n->tu = 64; /* default size */ 3135 3136 /* BUG_ON(pixel_clock > INT_MAX / 36); */ 3137 m_n->gmch_m = bits_per_pixel * pixel_clock; 3138 m_n->gmch_n = link_clock * nlanes * 8; 3139 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 3140 3141 m_n->link_m = pixel_clock; 3142 m_n->link_n = link_clock; 3143 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); 3144 } 3145 3146 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 3147 { 3148 if (i915_panel_use_ssc >= 0) 3149 return i915_panel_use_ssc != 0; 3150 return dev_priv->lvds_use_ssc 3151 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 3152 } 3153 3154 /** 3155 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send 3156 * @crtc: CRTC structure 3157 * @mode: requested mode 3158 * 3159 * A pipe may be connected to one or more outputs. Based on the depth of the 3160 * attached framebuffer, choose a good color depth to use on the pipe. 3161 * 3162 * If possible, match the pipe depth to the fb depth. In some cases, this 3163 * isn't ideal, because the connected output supports a lesser or restricted 3164 * set of depths. Resolve that here: 3165 * LVDS typically supports only 6bpc, so clamp down in that case 3166 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc 3167 * Displays may support a restricted set as well, check EDID and clamp as 3168 * appropriate. 3169 * DP may want to dither down to 6bpc to fit larger modes 3170 * 3171 * RETURNS: 3172 * Dithering requirement (i.e. false if display bpc and pipe bpc match, 3173 * true if they don't match). 3174 */ 3175 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, 3176 unsigned int *pipe_bpp, 3177 struct drm_display_mode *mode) 3178 { 3179 struct drm_device *dev = crtc->dev; 3180 struct drm_i915_private *dev_priv = dev->dev_private; 3181 struct drm_encoder *encoder; 3182 struct drm_connector *connector; 3183 unsigned int display_bpc = UINT_MAX, bpc; 3184 3185 /* Walk the encoders & connectors on this crtc, get min bpc */ 3186 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3187 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3188 3189 if (encoder->crtc != crtc) 3190 continue; 3191 3192 if (intel_encoder->type == INTEL_OUTPUT_LVDS) { 3193 unsigned int lvds_bpc; 3194 3195 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == 3196 LVDS_A3_POWER_UP) 3197 lvds_bpc = 8; 3198 else 3199 lvds_bpc = 6; 3200 3201 if (lvds_bpc < display_bpc) { 3202 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); 3203 display_bpc = lvds_bpc; 3204 } 3205 continue; 3206 } 3207 3208 if (intel_encoder->type == INTEL_OUTPUT_EDP) { 3209 /* Use VBT settings if we have an eDP panel */ 3210 unsigned int edp_bpc = dev_priv->edp.bpp / 3; 3211 3212 if (edp_bpc < display_bpc) { 3213 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); 3214 display_bpc = edp_bpc; 3215 } 3216 continue; 3217 } 3218 3219 /* Not one of the known troublemakers, check the EDID */ 3220 list_for_each_entry(connector, &dev->mode_config.connector_list, 3221 head) { 3222 if (connector->encoder != encoder) 3223 continue; 3224 3225 /* Don't use an invalid EDID bpc value */ 3226 if (connector->display_info.bpc && 3227 connector->display_info.bpc < display_bpc) { 3228 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 3229 display_bpc = connector->display_info.bpc; 3230 } 3231 } 3232 3233 /* 3234 * HDMI is either 12 or 8, so if the display lets 10bpc sneak 3235 * through, clamp it down. (Note: >12bpc will be caught below.) 3236 */ 3237 if (intel_encoder->type == INTEL_OUTPUT_HDMI) { 3238 if (display_bpc > 8 && display_bpc < 12) { 3239 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); 3240 display_bpc = 12; 3241 } else { 3242 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); 3243 display_bpc = 8; 3244 } 3245 } 3246 } 3247 3248 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 3249 DRM_DEBUG_KMS("Dithering DP to 6bpc\n"); 3250 display_bpc = 6; 3251 } 3252 3253 /* 3254 * We could just drive the pipe at the highest bpc all the time and 3255 * enable dithering as needed, but that costs bandwidth. So choose 3256 * the minimum value that expresses the full color range of the fb but 3257 * also stays within the max display bpc discovered above. 3258 */ 3259 3260 switch (crtc->fb->depth) { 3261 case 8: 3262 bpc = 8; /* since we go through a colormap */ 3263 break; 3264 case 15: 3265 case 16: 3266 bpc = 6; /* min is 18bpp */ 3267 break; 3268 case 24: 3269 bpc = 8; 3270 break; 3271 case 30: 3272 bpc = 10; 3273 break; 3274 case 48: 3275 bpc = 12; 3276 break; 3277 default: 3278 DRM_DEBUG("unsupported depth, assuming 24 bits\n"); 3279 bpc = min((unsigned int)8, display_bpc); 3280 break; 3281 } 3282 3283 display_bpc = min(display_bpc, bpc); 3284 3285 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", 3286 bpc, display_bpc); 3287 3288 *pipe_bpp = display_bpc * 3; 3289 3290 return display_bpc != bpc; 3291 } 3292 3293 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) 3294 { 3295 struct drm_device *dev = crtc->dev; 3296 struct drm_i915_private *dev_priv = dev->dev_private; 3297 int refclk; 3298 3299 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 3300 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 3301 refclk = dev_priv->lvds_ssc_freq * 1000; 3302 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 3303 refclk / 1000); 3304 } else if (!IS_GEN2(dev)) { 3305 refclk = 96000; 3306 } else { 3307 refclk = 48000; 3308 } 3309 3310 return refclk; 3311 } 3312 3313 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode, 3314 intel_clock_t *clock) 3315 { 3316 /* SDVO TV has fixed PLL values depend on its clock range, 3317 this mirrors vbios setting. */ 3318 if (adjusted_mode->clock >= 100000 3319 && adjusted_mode->clock < 140500) { 3320 clock->p1 = 2; 3321 clock->p2 = 10; 3322 clock->n = 3; 3323 clock->m1 = 16; 3324 clock->m2 = 8; 3325 } else if (adjusted_mode->clock >= 140500 3326 && adjusted_mode->clock <= 200000) { 3327 clock->p1 = 1; 3328 clock->p2 = 10; 3329 clock->n = 6; 3330 clock->m1 = 12; 3331 clock->m2 = 8; 3332 } 3333 } 3334 3335 static void i9xx_update_pll_dividers(struct drm_crtc *crtc, 3336 intel_clock_t *clock, 3337 intel_clock_t *reduced_clock) 3338 { 3339 struct drm_device *dev = crtc->dev; 3340 struct drm_i915_private *dev_priv = dev->dev_private; 3341 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3342 int pipe = intel_crtc->pipe; 3343 u32 fp, fp2 = 0; 3344 3345 if (IS_PINEVIEW(dev)) { 3346 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2; 3347 if (reduced_clock) 3348 fp2 = (1 << reduced_clock->n) << 16 | 3349 reduced_clock->m1 << 8 | reduced_clock->m2; 3350 } else { 3351 fp = clock->n << 16 | clock->m1 << 8 | clock->m2; 3352 if (reduced_clock) 3353 fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 | 3354 reduced_clock->m2; 3355 } 3356 3357 I915_WRITE(FP0(pipe), fp); 3358 3359 intel_crtc->lowfreq_avail = false; 3360 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 3361 reduced_clock && i915_powersave) { 3362 I915_WRITE(FP1(pipe), fp2); 3363 intel_crtc->lowfreq_avail = true; 3364 } else { 3365 I915_WRITE(FP1(pipe), fp); 3366 } 3367 } 3368 3369 static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 3370 struct drm_display_mode *mode, 3371 struct drm_display_mode *adjusted_mode, 3372 int x, int y, 3373 struct drm_framebuffer *old_fb) 3374 { 3375 struct drm_device *dev = crtc->dev; 3376 struct drm_i915_private *dev_priv = dev->dev_private; 3377 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3378 int pipe = intel_crtc->pipe; 3379 int plane = intel_crtc->plane; 3380 int refclk, num_connectors = 0; 3381 intel_clock_t clock, reduced_clock; 3382 u32 dpll, dspcntr, pipeconf, vsyncshift; 3383 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 3384 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3385 struct drm_mode_config *mode_config = &dev->mode_config; 3386 struct intel_encoder *encoder; 3387 const intel_limit_t *limit; 3388 int ret; 3389 u32 temp; 3390 u32 lvds_sync = 0; 3391 3392 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 3393 if (encoder->base.crtc != crtc) 3394 continue; 3395 3396 switch (encoder->type) { 3397 case INTEL_OUTPUT_LVDS: 3398 is_lvds = true; 3399 break; 3400 case INTEL_OUTPUT_SDVO: 3401 case INTEL_OUTPUT_HDMI: 3402 is_sdvo = true; 3403 if (encoder->needs_tv_clock) 3404 is_tv = true; 3405 break; 3406 case INTEL_OUTPUT_DVO: 3407 is_dvo = true; 3408 break; 3409 case INTEL_OUTPUT_TVOUT: 3410 is_tv = true; 3411 break; 3412 case INTEL_OUTPUT_ANALOG: 3413 is_crt = true; 3414 break; 3415 case INTEL_OUTPUT_DISPLAYPORT: 3416 is_dp = true; 3417 break; 3418 } 3419 3420 num_connectors++; 3421 } 3422 3423 refclk = i9xx_get_refclk(crtc, num_connectors); 3424 3425 /* 3426 * Returns a set of divisors for the desired target clock with the given 3427 * refclk, or false. The returned values represent the clock equation: 3428 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 3429 */ 3430 limit = intel_limit(crtc, refclk); 3431 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, 3432 &clock); 3433 if (!ok) { 3434 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 3435 return -EINVAL; 3436 } 3437 3438 /* Ensure that the cursor is valid for the new mode before changing... */ 3439 intel_crtc_update_cursor(crtc, true); 3440 3441 if (is_lvds && dev_priv->lvds_downclock_avail) { 3442 /* 3443 * Ensure we match the reduced clock's P to the target clock. 3444 * If the clocks don't match, we can't switch the display clock 3445 * by using the FP0/FP1. In such case we will disable the LVDS 3446 * downclock feature. 3447 */ 3448 has_reduced_clock = limit->find_pll(limit, crtc, 3449 dev_priv->lvds_downclock, 3450 refclk, 3451 &clock, 3452 &reduced_clock); 3453 } 3454 3455 if (is_sdvo && is_tv) 3456 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); 3457 3458 i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ? 3459 &reduced_clock : NULL); 3460 3461 dpll = DPLL_VGA_MODE_DIS; 3462 3463 if (!IS_GEN2(dev)) { 3464 if (is_lvds) 3465 dpll |= DPLLB_MODE_LVDS; 3466 else 3467 dpll |= DPLLB_MODE_DAC_SERIAL; 3468 if (is_sdvo) { 3469 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 3470 if (pixel_multiplier > 1) { 3471 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 3472 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 3473 } 3474 dpll |= DPLL_DVO_HIGH_SPEED; 3475 } 3476 if (is_dp) 3477 dpll |= DPLL_DVO_HIGH_SPEED; 3478 3479 /* compute bitmask from p1 value */ 3480 if (IS_PINEVIEW(dev)) 3481 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 3482 else { 3483 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3484 if (IS_G4X(dev) && has_reduced_clock) 3485 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3486 } 3487 switch (clock.p2) { 3488 case 5: 3489 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 3490 break; 3491 case 7: 3492 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 3493 break; 3494 case 10: 3495 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 3496 break; 3497 case 14: 3498 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 3499 break; 3500 } 3501 if (INTEL_INFO(dev)->gen >= 4) 3502 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 3503 } else { 3504 if (is_lvds) { 3505 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3506 } else { 3507 if (clock.p1 == 2) 3508 dpll |= PLL_P1_DIVIDE_BY_TWO; 3509 else 3510 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3511 if (clock.p2 == 4) 3512 dpll |= PLL_P2_DIVIDE_BY_4; 3513 } 3514 } 3515 3516 if (is_sdvo && is_tv) 3517 dpll |= PLL_REF_INPUT_TVCLKINBC; 3518 else if (is_tv) 3519 /* XXX: just matching BIOS for now */ 3520 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 3521 dpll |= 3; 3522 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 3523 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 3524 else 3525 dpll |= PLL_REF_INPUT_DREFCLK; 3526 3527 /* setup pipeconf */ 3528 pipeconf = I915_READ(PIPECONF(pipe)); 3529 3530 /* Set up the display plane register */ 3531 dspcntr = DISPPLANE_GAMMA_ENABLE; 3532 3533 if (pipe == 0) 3534 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 3535 else 3536 dspcntr |= DISPPLANE_SEL_PIPE_B; 3537 3538 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { 3539 /* Enable pixel doubling when the dot clock is > 90% of the (display) 3540 * core speed. 3541 * 3542 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the 3543 * pipe == 0 check? 3544 */ 3545 if (mode->clock > 3546 dev_priv->display.get_display_clock_speed(dev) * 9 / 10) 3547 pipeconf |= PIPECONF_DOUBLE_WIDE; 3548 else 3549 pipeconf &= ~PIPECONF_DOUBLE_WIDE; 3550 } 3551 3552 /* default to 8bpc */ 3553 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); 3554 if (is_dp) { 3555 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 3556 pipeconf |= PIPECONF_BPP_6 | 3557 PIPECONF_DITHER_EN | 3558 PIPECONF_DITHER_TYPE_SP; 3559 } 3560 } 3561 3562 dpll |= DPLL_VCO_ENABLE; 3563 3564 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 3565 drm_mode_debug_printmodeline(mode); 3566 3567 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 3568 3569 POSTING_READ(DPLL(pipe)); 3570 DELAY(150); 3571 3572 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 3573 * This is an exception to the general rule that mode_set doesn't turn 3574 * things on. 3575 */ 3576 if (is_lvds) { 3577 temp = I915_READ(LVDS); 3578 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 3579 if (pipe == 1) { 3580 temp |= LVDS_PIPEB_SELECT; 3581 } else { 3582 temp &= ~LVDS_PIPEB_SELECT; 3583 } 3584 /* set the corresponsding LVDS_BORDER bit */ 3585 temp |= dev_priv->lvds_border_bits; 3586 /* Set the B0-B3 data pairs corresponding to whether we're going to 3587 * set the DPLLs for dual-channel mode or not. 3588 */ 3589 if (clock.p2 == 7) 3590 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 3591 else 3592 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 3593 3594 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 3595 * appropriately here, but we need to look more thoroughly into how 3596 * panels behave in the two modes. 3597 */ 3598 /* set the dithering flag on LVDS as needed */ 3599 if (INTEL_INFO(dev)->gen >= 4) { 3600 if (dev_priv->lvds_dither) 3601 temp |= LVDS_ENABLE_DITHER; 3602 else 3603 temp &= ~LVDS_ENABLE_DITHER; 3604 } 3605 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 3606 lvds_sync |= LVDS_HSYNC_POLARITY; 3607 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 3608 lvds_sync |= LVDS_VSYNC_POLARITY; 3609 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) 3610 != lvds_sync) { 3611 char flags[2] = "-+"; 3612 DRM_INFO("Changing LVDS panel from " 3613 "(%chsync, %cvsync) to (%chsync, %cvsync)\n", 3614 flags[!(temp & LVDS_HSYNC_POLARITY)], 3615 flags[!(temp & LVDS_VSYNC_POLARITY)], 3616 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], 3617 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); 3618 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 3619 temp |= lvds_sync; 3620 } 3621 I915_WRITE(LVDS, temp); 3622 } 3623 3624 if (is_dp) { 3625 intel_dp_set_m_n(crtc, mode, adjusted_mode); 3626 } 3627 3628 I915_WRITE(DPLL(pipe), dpll); 3629 3630 /* Wait for the clocks to stabilize. */ 3631 POSTING_READ(DPLL(pipe)); 3632 DELAY(150); 3633 3634 if (INTEL_INFO(dev)->gen >= 4) { 3635 temp = 0; 3636 if (is_sdvo) { 3637 temp = intel_mode_get_pixel_multiplier(adjusted_mode); 3638 if (temp > 1) 3639 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 3640 else 3641 temp = 0; 3642 } 3643 I915_WRITE(DPLL_MD(pipe), temp); 3644 } else { 3645 /* The pixel multiplier can only be updated once the 3646 * DPLL is enabled and the clocks are stable. 3647 * 3648 * So write it again. 3649 */ 3650 I915_WRITE(DPLL(pipe), dpll); 3651 } 3652 3653 if (HAS_PIPE_CXSR(dev)) { 3654 if (intel_crtc->lowfreq_avail) { 3655 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 3656 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 3657 } else { 3658 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 3659 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 3660 } 3661 } 3662 3663 pipeconf &= ~PIPECONF_INTERLACE_MASK; 3664 if (!IS_GEN2(dev) && 3665 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 3666 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 3667 /* the chip adds 2 halflines automatically */ 3668 adjusted_mode->crtc_vtotal -= 1; 3669 adjusted_mode->crtc_vblank_end -= 1; 3670 vsyncshift = adjusted_mode->crtc_hsync_start 3671 - adjusted_mode->crtc_htotal/2; 3672 } else { 3673 pipeconf |= PIPECONF_PROGRESSIVE; 3674 vsyncshift = 0; 3675 } 3676 3677 if (!IS_GEN3(dev)) 3678 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift); 3679 3680 I915_WRITE(HTOTAL(pipe), 3681 (adjusted_mode->crtc_hdisplay - 1) | 3682 ((adjusted_mode->crtc_htotal - 1) << 16)); 3683 I915_WRITE(HBLANK(pipe), 3684 (adjusted_mode->crtc_hblank_start - 1) | 3685 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 3686 I915_WRITE(HSYNC(pipe), 3687 (adjusted_mode->crtc_hsync_start - 1) | 3688 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 3689 3690 I915_WRITE(VTOTAL(pipe), 3691 (adjusted_mode->crtc_vdisplay - 1) | 3692 ((adjusted_mode->crtc_vtotal - 1) << 16)); 3693 I915_WRITE(VBLANK(pipe), 3694 (adjusted_mode->crtc_vblank_start - 1) | 3695 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 3696 I915_WRITE(VSYNC(pipe), 3697 (adjusted_mode->crtc_vsync_start - 1) | 3698 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 3699 3700 /* pipesrc and dspsize control the size that is scaled from, 3701 * which should always be the user's requested size. 3702 */ 3703 I915_WRITE(DSPSIZE(plane), 3704 ((mode->vdisplay - 1) << 16) | 3705 (mode->hdisplay - 1)); 3706 I915_WRITE(DSPPOS(plane), 0); 3707 I915_WRITE(PIPESRC(pipe), 3708 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 3709 3710 I915_WRITE(PIPECONF(pipe), pipeconf); 3711 POSTING_READ(PIPECONF(pipe)); 3712 intel_enable_pipe(dev_priv, pipe, false); 3713 3714 intel_wait_for_vblank(dev, pipe); 3715 3716 I915_WRITE(DSPCNTR(plane), dspcntr); 3717 POSTING_READ(DSPCNTR(plane)); 3718 intel_enable_plane(dev_priv, plane, pipe); 3719 3720 ret = intel_pipe_set_base(crtc, x, y, old_fb); 3721 3722 intel_update_watermarks(dev); 3723 3724 return ret; 3725 } 3726 3727 /* 3728 * Initialize reference clocks when the driver loads 3729 */ 3730 void ironlake_init_pch_refclk(struct drm_device *dev) 3731 { 3732 struct drm_i915_private *dev_priv = dev->dev_private; 3733 struct drm_mode_config *mode_config = &dev->mode_config; 3734 struct intel_encoder *encoder; 3735 u32 temp; 3736 bool has_lvds = false; 3737 bool has_cpu_edp = false; 3738 bool has_pch_edp = false; 3739 bool has_panel = false; 3740 bool has_ck505 = false; 3741 bool can_ssc = false; 3742 3743 /* We need to take the global config into account */ 3744 list_for_each_entry(encoder, &mode_config->encoder_list, 3745 base.head) { 3746 switch (encoder->type) { 3747 case INTEL_OUTPUT_LVDS: 3748 has_panel = true; 3749 has_lvds = true; 3750 break; 3751 case INTEL_OUTPUT_EDP: 3752 has_panel = true; 3753 if (intel_encoder_is_pch_edp(&encoder->base)) 3754 has_pch_edp = true; 3755 else 3756 has_cpu_edp = true; 3757 break; 3758 } 3759 } 3760 3761 if (HAS_PCH_IBX(dev)) { 3762 has_ck505 = dev_priv->display_clock_mode; 3763 can_ssc = has_ck505; 3764 } else { 3765 has_ck505 = false; 3766 can_ssc = true; 3767 } 3768 3769 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", 3770 has_panel, has_lvds, has_pch_edp, has_cpu_edp, 3771 has_ck505); 3772 3773 /* Ironlake: try to setup display ref clock before DPLL 3774 * enabling. This is only under driver's control after 3775 * PCH B stepping, previous chipset stepping should be 3776 * ignoring this setting. 3777 */ 3778 temp = I915_READ(PCH_DREF_CONTROL); 3779 /* Always enable nonspread source */ 3780 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 3781 3782 if (has_ck505) 3783 temp |= DREF_NONSPREAD_CK505_ENABLE; 3784 else 3785 temp |= DREF_NONSPREAD_SOURCE_ENABLE; 3786 3787 if (has_panel) { 3788 temp &= ~DREF_SSC_SOURCE_MASK; 3789 temp |= DREF_SSC_SOURCE_ENABLE; 3790 3791 /* SSC must be turned on before enabling the CPU output */ 3792 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 3793 DRM_DEBUG_KMS("Using SSC on panel\n"); 3794 temp |= DREF_SSC1_ENABLE; 3795 } else 3796 temp &= ~DREF_SSC1_ENABLE; 3797 3798 /* Get SSC going before enabling the outputs */ 3799 I915_WRITE(PCH_DREF_CONTROL, temp); 3800 POSTING_READ(PCH_DREF_CONTROL); 3801 DELAY(200); 3802 3803 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 3804 3805 /* Enable CPU source on CPU attached eDP */ 3806 if (has_cpu_edp) { 3807 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 3808 DRM_DEBUG_KMS("Using SSC on eDP\n"); 3809 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 3810 } 3811 else 3812 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 3813 } else 3814 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 3815 3816 I915_WRITE(PCH_DREF_CONTROL, temp); 3817 POSTING_READ(PCH_DREF_CONTROL); 3818 DELAY(200); 3819 } else { 3820 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 3821 3822 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 3823 3824 /* Turn off CPU output */ 3825 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 3826 3827 I915_WRITE(PCH_DREF_CONTROL, temp); 3828 POSTING_READ(PCH_DREF_CONTROL); 3829 DELAY(200); 3830 3831 /* Turn off the SSC source */ 3832 temp &= ~DREF_SSC_SOURCE_MASK; 3833 temp |= DREF_SSC_SOURCE_DISABLE; 3834 3835 /* Turn off SSC1 */ 3836 temp &= ~ DREF_SSC1_ENABLE; 3837 3838 I915_WRITE(PCH_DREF_CONTROL, temp); 3839 POSTING_READ(PCH_DREF_CONTROL); 3840 DELAY(200); 3841 } 3842 } 3843 3844 static int ironlake_get_refclk(struct drm_crtc *crtc) 3845 { 3846 struct drm_device *dev = crtc->dev; 3847 struct drm_i915_private *dev_priv = dev->dev_private; 3848 struct intel_encoder *encoder; 3849 struct drm_mode_config *mode_config = &dev->mode_config; 3850 struct intel_encoder *edp_encoder = NULL; 3851 int num_connectors = 0; 3852 bool is_lvds = false; 3853 3854 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 3855 if (encoder->base.crtc != crtc) 3856 continue; 3857 3858 switch (encoder->type) { 3859 case INTEL_OUTPUT_LVDS: 3860 is_lvds = true; 3861 break; 3862 case INTEL_OUTPUT_EDP: 3863 edp_encoder = encoder; 3864 break; 3865 } 3866 num_connectors++; 3867 } 3868 3869 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 3870 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 3871 dev_priv->lvds_ssc_freq); 3872 return dev_priv->lvds_ssc_freq * 1000; 3873 } 3874 3875 return 120000; 3876 } 3877 3878 static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 3879 struct drm_display_mode *mode, 3880 struct drm_display_mode *adjusted_mode, 3881 int x, int y, 3882 struct drm_framebuffer *old_fb) 3883 { 3884 struct drm_device *dev = crtc->dev; 3885 struct drm_i915_private *dev_priv = dev->dev_private; 3886 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3887 int pipe = intel_crtc->pipe; 3888 int plane = intel_crtc->plane; 3889 int refclk, num_connectors = 0; 3890 intel_clock_t clock, reduced_clock; 3891 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; 3892 bool ok, has_reduced_clock = false, is_sdvo = false; 3893 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3894 struct intel_encoder *has_edp_encoder = NULL; 3895 struct drm_mode_config *mode_config = &dev->mode_config; 3896 struct intel_encoder *encoder; 3897 const intel_limit_t *limit; 3898 int ret; 3899 struct fdi_m_n m_n = {0}; 3900 u32 temp; 3901 u32 lvds_sync = 0; 3902 int target_clock, pixel_multiplier, lane, link_bw, factor; 3903 unsigned int pipe_bpp; 3904 bool dither; 3905 3906 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 3907 if (encoder->base.crtc != crtc) 3908 continue; 3909 3910 switch (encoder->type) { 3911 case INTEL_OUTPUT_LVDS: 3912 is_lvds = true; 3913 break; 3914 case INTEL_OUTPUT_SDVO: 3915 case INTEL_OUTPUT_HDMI: 3916 is_sdvo = true; 3917 if (encoder->needs_tv_clock) 3918 is_tv = true; 3919 break; 3920 case INTEL_OUTPUT_TVOUT: 3921 is_tv = true; 3922 break; 3923 case INTEL_OUTPUT_ANALOG: 3924 is_crt = true; 3925 break; 3926 case INTEL_OUTPUT_DISPLAYPORT: 3927 is_dp = true; 3928 break; 3929 case INTEL_OUTPUT_EDP: 3930 has_edp_encoder = encoder; 3931 break; 3932 } 3933 3934 num_connectors++; 3935 } 3936 3937 refclk = ironlake_get_refclk(crtc); 3938 3939 /* 3940 * Returns a set of divisors for the desired target clock with the given 3941 * refclk, or false. The returned values represent the clock equation: 3942 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 3943 */ 3944 limit = intel_limit(crtc, refclk); 3945 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, 3946 &clock); 3947 if (!ok) { 3948 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 3949 return -EINVAL; 3950 } 3951 3952 /* Ensure that the cursor is valid for the new mode before changing... */ 3953 intel_crtc_update_cursor(crtc, true); 3954 3955 if (is_lvds && dev_priv->lvds_downclock_avail) { 3956 /* 3957 * Ensure we match the reduced clock's P to the target clock. 3958 * If the clocks don't match, we can't switch the display clock 3959 * by using the FP0/FP1. In such case we will disable the LVDS 3960 * downclock feature. 3961 */ 3962 has_reduced_clock = limit->find_pll(limit, crtc, 3963 dev_priv->lvds_downclock, 3964 refclk, 3965 &clock, 3966 &reduced_clock); 3967 } 3968 /* SDVO TV has fixed PLL values depend on its clock range, 3969 this mirrors vbios setting. */ 3970 if (is_sdvo && is_tv) { 3971 if (adjusted_mode->clock >= 100000 3972 && adjusted_mode->clock < 140500) { 3973 clock.p1 = 2; 3974 clock.p2 = 10; 3975 clock.n = 3; 3976 clock.m1 = 16; 3977 clock.m2 = 8; 3978 } else if (adjusted_mode->clock >= 140500 3979 && adjusted_mode->clock <= 200000) { 3980 clock.p1 = 1; 3981 clock.p2 = 10; 3982 clock.n = 6; 3983 clock.m1 = 12; 3984 clock.m2 = 8; 3985 } 3986 } 3987 3988 /* FDI link */ 3989 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 3990 lane = 0; 3991 /* CPU eDP doesn't require FDI link, so just set DP M/N 3992 according to current link config */ 3993 if (has_edp_encoder && 3994 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 3995 target_clock = mode->clock; 3996 intel_edp_link_config(has_edp_encoder, 3997 &lane, &link_bw); 3998 } else { 3999 /* [e]DP over FDI requires target mode clock 4000 instead of link clock */ 4001 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4002 target_clock = mode->clock; 4003 else 4004 target_clock = adjusted_mode->clock; 4005 4006 /* FDI is a binary signal running at ~2.7GHz, encoding 4007 * each output octet as 10 bits. The actual frequency 4008 * is stored as a divider into a 100MHz clock, and the 4009 * mode pixel clock is stored in units of 1KHz. 4010 * Hence the bw of each lane in terms of the mode signal 4011 * is: 4012 */ 4013 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 4014 } 4015 4016 /* determine panel color depth */ 4017 temp = I915_READ(PIPECONF(pipe)); 4018 temp &= ~PIPE_BPC_MASK; 4019 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode); 4020 switch (pipe_bpp) { 4021 case 18: 4022 temp |= PIPE_6BPC; 4023 break; 4024 case 24: 4025 temp |= PIPE_8BPC; 4026 break; 4027 case 30: 4028 temp |= PIPE_10BPC; 4029 break; 4030 case 36: 4031 temp |= PIPE_12BPC; 4032 break; 4033 default: 4034 kprintf("intel_choose_pipe_bpp returned invalid value %d\n", 4035 pipe_bpp); 4036 temp |= PIPE_8BPC; 4037 pipe_bpp = 24; 4038 break; 4039 } 4040 4041 intel_crtc->bpp = pipe_bpp; 4042 I915_WRITE(PIPECONF(pipe), temp); 4043 4044 if (!lane) { 4045 /* 4046 * Account for spread spectrum to avoid 4047 * oversubscribing the link. Max center spread 4048 * is 2.5%; use 5% for safety's sake. 4049 */ 4050 u32 bps = target_clock * intel_crtc->bpp * 21 / 20; 4051 lane = bps / (link_bw * 8) + 1; 4052 } 4053 4054 intel_crtc->fdi_lanes = lane; 4055 4056 if (pixel_multiplier > 1) 4057 link_bw *= pixel_multiplier; 4058 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, 4059 &m_n); 4060 4061 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 4062 if (has_reduced_clock) 4063 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | 4064 reduced_clock.m2; 4065 4066 /* Enable autotuning of the PLL clock (if permissible) */ 4067 factor = 21; 4068 if (is_lvds) { 4069 if ((intel_panel_use_ssc(dev_priv) && 4070 dev_priv->lvds_ssc_freq == 100) || 4071 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) 4072 factor = 25; 4073 } else if (is_sdvo && is_tv) 4074 factor = 20; 4075 4076 if (clock.m < factor * clock.n) 4077 fp |= FP_CB_TUNE; 4078 4079 dpll = 0; 4080 4081 if (is_lvds) 4082 dpll |= DPLLB_MODE_LVDS; 4083 else 4084 dpll |= DPLLB_MODE_DAC_SERIAL; 4085 if (is_sdvo) { 4086 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4087 if (pixel_multiplier > 1) { 4088 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 4089 } 4090 dpll |= DPLL_DVO_HIGH_SPEED; 4091 } 4092 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4093 dpll |= DPLL_DVO_HIGH_SPEED; 4094 4095 /* compute bitmask from p1 value */ 4096 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4097 /* also FPA1 */ 4098 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 4099 4100 switch (clock.p2) { 4101 case 5: 4102 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 4103 break; 4104 case 7: 4105 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 4106 break; 4107 case 10: 4108 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 4109 break; 4110 case 14: 4111 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 4112 break; 4113 } 4114 4115 if (is_sdvo && is_tv) 4116 dpll |= PLL_REF_INPUT_TVCLKINBC; 4117 else if (is_tv) 4118 /* XXX: just matching BIOS for now */ 4119 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 4120 dpll |= 3; 4121 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4122 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4123 else 4124 dpll |= PLL_REF_INPUT_DREFCLK; 4125 4126 /* setup pipeconf */ 4127 pipeconf = I915_READ(PIPECONF(pipe)); 4128 4129 /* Set up the display plane register */ 4130 dspcntr = DISPPLANE_GAMMA_ENABLE; 4131 4132 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 4133 drm_mode_debug_printmodeline(mode); 4134 4135 /* PCH eDP needs FDI, but CPU eDP does not */ 4136 if (!intel_crtc->no_pll) { 4137 if (!has_edp_encoder || 4138 intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4139 I915_WRITE(PCH_FP0(pipe), fp); 4140 I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4141 4142 POSTING_READ(PCH_DPLL(pipe)); 4143 DELAY(150); 4144 } 4145 } else { 4146 if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) && 4147 fp == I915_READ(PCH_FP0(0))) { 4148 intel_crtc->use_pll_a = true; 4149 DRM_DEBUG_KMS("using pipe a dpll\n"); 4150 } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) && 4151 fp == I915_READ(PCH_FP0(1))) { 4152 intel_crtc->use_pll_a = false; 4153 DRM_DEBUG_KMS("using pipe b dpll\n"); 4154 } else { 4155 DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n"); 4156 return -EINVAL; 4157 } 4158 } 4159 4160 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4161 * This is an exception to the general rule that mode_set doesn't turn 4162 * things on. 4163 */ 4164 if (is_lvds) { 4165 temp = I915_READ(PCH_LVDS); 4166 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 4167 if (HAS_PCH_CPT(dev)) { 4168 temp &= ~PORT_TRANS_SEL_MASK; 4169 temp |= PORT_TRANS_SEL_CPT(pipe); 4170 } else { 4171 if (pipe == 1) 4172 temp |= LVDS_PIPEB_SELECT; 4173 else 4174 temp &= ~LVDS_PIPEB_SELECT; 4175 } 4176 4177 /* set the corresponsding LVDS_BORDER bit */ 4178 temp |= dev_priv->lvds_border_bits; 4179 /* Set the B0-B3 data pairs corresponding to whether we're going to 4180 * set the DPLLs for dual-channel mode or not. 4181 */ 4182 if (clock.p2 == 7) 4183 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 4184 else 4185 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 4186 4187 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 4188 * appropriately here, but we need to look more thoroughly into how 4189 * panels behave in the two modes. 4190 */ 4191 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 4192 lvds_sync |= LVDS_HSYNC_POLARITY; 4193 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 4194 lvds_sync |= LVDS_VSYNC_POLARITY; 4195 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) 4196 != lvds_sync) { 4197 char flags[2] = "-+"; 4198 DRM_INFO("Changing LVDS panel from " 4199 "(%chsync, %cvsync) to (%chsync, %cvsync)\n", 4200 flags[!(temp & LVDS_HSYNC_POLARITY)], 4201 flags[!(temp & LVDS_VSYNC_POLARITY)], 4202 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], 4203 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); 4204 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 4205 temp |= lvds_sync; 4206 } 4207 I915_WRITE(PCH_LVDS, temp); 4208 } 4209 4210 pipeconf &= ~PIPECONF_DITHER_EN; 4211 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 4212 if ((is_lvds && dev_priv->lvds_dither) || dither) { 4213 pipeconf |= PIPECONF_DITHER_EN; 4214 pipeconf |= PIPECONF_DITHER_TYPE_SP; 4215 } 4216 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4217 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4218 } else { 4219 /* For non-DP output, clear any trans DP clock recovery setting.*/ 4220 I915_WRITE(TRANSDATA_M1(pipe), 0); 4221 I915_WRITE(TRANSDATA_N1(pipe), 0); 4222 I915_WRITE(TRANSDPLINK_M1(pipe), 0); 4223 I915_WRITE(TRANSDPLINK_N1(pipe), 0); 4224 } 4225 4226 if (!intel_crtc->no_pll && 4227 (!has_edp_encoder || 4228 intel_encoder_is_pch_edp(&has_edp_encoder->base))) { 4229 I915_WRITE(PCH_DPLL(pipe), dpll); 4230 4231 /* Wait for the clocks to stabilize. */ 4232 POSTING_READ(PCH_DPLL(pipe)); 4233 DELAY(150); 4234 4235 /* The pixel multiplier can only be updated once the 4236 * DPLL is enabled and the clocks are stable. 4237 * 4238 * So write it again. 4239 */ 4240 I915_WRITE(PCH_DPLL(pipe), dpll); 4241 } 4242 4243 intel_crtc->lowfreq_avail = false; 4244 if (!intel_crtc->no_pll) { 4245 if (is_lvds && has_reduced_clock && i915_powersave) { 4246 I915_WRITE(PCH_FP1(pipe), fp2); 4247 intel_crtc->lowfreq_avail = true; 4248 if (HAS_PIPE_CXSR(dev)) { 4249 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 4250 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 4251 } 4252 } else { 4253 I915_WRITE(PCH_FP1(pipe), fp); 4254 if (HAS_PIPE_CXSR(dev)) { 4255 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 4256 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 4257 } 4258 } 4259 } 4260 4261 pipeconf &= ~PIPECONF_INTERLACE_MASK; 4262 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4263 pipeconf |= PIPECONF_INTERLACED_ILK; 4264 /* the chip adds 2 halflines automatically */ 4265 adjusted_mode->crtc_vtotal -= 1; 4266 adjusted_mode->crtc_vblank_end -= 1; 4267 I915_WRITE(VSYNCSHIFT(pipe), 4268 adjusted_mode->crtc_hsync_start 4269 - adjusted_mode->crtc_htotal/2); 4270 } else { 4271 pipeconf |= PIPECONF_PROGRESSIVE; 4272 I915_WRITE(VSYNCSHIFT(pipe), 0); 4273 } 4274 4275 I915_WRITE(HTOTAL(pipe), 4276 (adjusted_mode->crtc_hdisplay - 1) | 4277 ((adjusted_mode->crtc_htotal - 1) << 16)); 4278 I915_WRITE(HBLANK(pipe), 4279 (adjusted_mode->crtc_hblank_start - 1) | 4280 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 4281 I915_WRITE(HSYNC(pipe), 4282 (adjusted_mode->crtc_hsync_start - 1) | 4283 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 4284 4285 I915_WRITE(VTOTAL(pipe), 4286 (adjusted_mode->crtc_vdisplay - 1) | 4287 ((adjusted_mode->crtc_vtotal - 1) << 16)); 4288 I915_WRITE(VBLANK(pipe), 4289 (adjusted_mode->crtc_vblank_start - 1) | 4290 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 4291 I915_WRITE(VSYNC(pipe), 4292 (adjusted_mode->crtc_vsync_start - 1) | 4293 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 4294 4295 /* pipesrc controls the size that is scaled from, which should 4296 * always be the user's requested size. 4297 */ 4298 I915_WRITE(PIPESRC(pipe), 4299 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 4300 4301 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 4302 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 4303 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 4304 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 4305 4306 if (has_edp_encoder && 4307 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4308 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 4309 } 4310 4311 I915_WRITE(PIPECONF(pipe), pipeconf); 4312 POSTING_READ(PIPECONF(pipe)); 4313 4314 intel_wait_for_vblank(dev, pipe); 4315 4316 I915_WRITE(DSPCNTR(plane), dspcntr); 4317 POSTING_READ(DSPCNTR(plane)); 4318 4319 ret = intel_pipe_set_base(crtc, x, y, old_fb); 4320 4321 intel_update_watermarks(dev); 4322 4323 return ret; 4324 } 4325 4326 static int intel_crtc_mode_set(struct drm_crtc *crtc, 4327 struct drm_display_mode *mode, 4328 struct drm_display_mode *adjusted_mode, 4329 int x, int y, 4330 struct drm_framebuffer *old_fb) 4331 { 4332 struct drm_device *dev = crtc->dev; 4333 struct drm_i915_private *dev_priv = dev->dev_private; 4334 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4335 int pipe = intel_crtc->pipe; 4336 int ret; 4337 4338 drm_vblank_pre_modeset(dev, pipe); 4339 4340 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, 4341 x, y, old_fb); 4342 drm_vblank_post_modeset(dev, pipe); 4343 4344 if (ret) 4345 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; 4346 else 4347 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON; 4348 4349 return ret; 4350 } 4351 4352 static bool intel_eld_uptodate(struct drm_connector *connector, 4353 int reg_eldv, uint32_t bits_eldv, 4354 int reg_elda, uint32_t bits_elda, 4355 int reg_edid) 4356 { 4357 struct drm_i915_private *dev_priv = connector->dev->dev_private; 4358 uint8_t *eld = connector->eld; 4359 uint32_t i; 4360 4361 i = I915_READ(reg_eldv); 4362 i &= bits_eldv; 4363 4364 if (!eld[0]) 4365 return !i; 4366 4367 if (!i) 4368 return false; 4369 4370 i = I915_READ(reg_elda); 4371 i &= ~bits_elda; 4372 I915_WRITE(reg_elda, i); 4373 4374 for (i = 0; i < eld[2]; i++) 4375 if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) 4376 return false; 4377 4378 return true; 4379 } 4380 4381 static void g4x_write_eld(struct drm_connector *connector, 4382 struct drm_crtc *crtc) 4383 { 4384 struct drm_i915_private *dev_priv = connector->dev->dev_private; 4385 uint8_t *eld = connector->eld; 4386 uint32_t eldv; 4387 uint32_t len; 4388 uint32_t i; 4389 4390 i = I915_READ(G4X_AUD_VID_DID); 4391 4392 if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) 4393 eldv = G4X_ELDV_DEVCL_DEVBLC; 4394 else 4395 eldv = G4X_ELDV_DEVCTG; 4396 4397 if (intel_eld_uptodate(connector, 4398 G4X_AUD_CNTL_ST, eldv, 4399 G4X_AUD_CNTL_ST, G4X_ELD_ADDR, 4400 G4X_HDMIW_HDMIEDID)) 4401 return; 4402 4403 i = I915_READ(G4X_AUD_CNTL_ST); 4404 i &= ~(eldv | G4X_ELD_ADDR); 4405 len = (i >> 9) & 0x1f; /* ELD buffer size */ 4406 I915_WRITE(G4X_AUD_CNTL_ST, i); 4407 4408 if (!eld[0]) 4409 return; 4410 4411 if (eld[2] < (uint8_t)len) 4412 len = eld[2]; 4413 DRM_DEBUG_KMS("ELD size %d\n", len); 4414 for (i = 0; i < len; i++) 4415 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); 4416 4417 i = I915_READ(G4X_AUD_CNTL_ST); 4418 i |= eldv; 4419 I915_WRITE(G4X_AUD_CNTL_ST, i); 4420 } 4421 4422 static void ironlake_write_eld(struct drm_connector *connector, 4423 struct drm_crtc *crtc) 4424 { 4425 struct drm_i915_private *dev_priv = connector->dev->dev_private; 4426 uint8_t *eld = connector->eld; 4427 uint32_t eldv; 4428 uint32_t i; 4429 int len; 4430 int hdmiw_hdmiedid; 4431 int aud_config; 4432 int aud_cntl_st; 4433 int aud_cntrl_st2; 4434 4435 if (HAS_PCH_IBX(connector->dev)) { 4436 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; 4437 aud_config = IBX_AUD_CONFIG_A; 4438 aud_cntl_st = IBX_AUD_CNTL_ST_A; 4439 aud_cntrl_st2 = IBX_AUD_CNTL_ST2; 4440 } else { 4441 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; 4442 aud_config = CPT_AUD_CONFIG_A; 4443 aud_cntl_st = CPT_AUD_CNTL_ST_A; 4444 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; 4445 } 4446 4447 i = to_intel_crtc(crtc)->pipe; 4448 hdmiw_hdmiedid += i * 0x100; 4449 aud_cntl_st += i * 0x100; 4450 aud_config += i * 0x100; 4451 4452 DRM_DEBUG_KMS("ELD on pipe %c\n", pipe_name(i)); 4453 4454 i = I915_READ(aud_cntl_st); 4455 i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */ 4456 if (!i) { 4457 DRM_DEBUG_KMS("Audio directed to unknown port\n"); 4458 /* operate blindly on all ports */ 4459 eldv = IBX_ELD_VALIDB; 4460 eldv |= IBX_ELD_VALIDB << 4; 4461 eldv |= IBX_ELD_VALIDB << 8; 4462 } else { 4463 DRM_DEBUG_KMS("ELD on port %c\n", 'A' + i); 4464 eldv = IBX_ELD_VALIDB << ((i - 1) * 4); 4465 } 4466 4467 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 4468 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 4469 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 4470 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 4471 } else 4472 I915_WRITE(aud_config, 0); 4473 4474 if (intel_eld_uptodate(connector, 4475 aud_cntrl_st2, eldv, 4476 aud_cntl_st, IBX_ELD_ADDRESS, 4477 hdmiw_hdmiedid)) 4478 return; 4479 4480 i = I915_READ(aud_cntrl_st2); 4481 i &= ~eldv; 4482 I915_WRITE(aud_cntrl_st2, i); 4483 4484 if (!eld[0]) 4485 return; 4486 4487 i = I915_READ(aud_cntl_st); 4488 i &= ~IBX_ELD_ADDRESS; 4489 I915_WRITE(aud_cntl_st, i); 4490 4491 /* 84 bytes of hw ELD buffer */ 4492 len = 21; 4493 if (eld[2] < (uint8_t)len) 4494 len = eld[2]; 4495 DRM_DEBUG_KMS("ELD size %d\n", len); 4496 for (i = 0; i < len; i++) 4497 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); 4498 4499 i = I915_READ(aud_cntrl_st2); 4500 i |= eldv; 4501 I915_WRITE(aud_cntrl_st2, i); 4502 } 4503 4504 void intel_write_eld(struct drm_encoder *encoder, 4505 struct drm_display_mode *mode) 4506 { 4507 struct drm_crtc *crtc = encoder->crtc; 4508 struct drm_connector *connector; 4509 struct drm_device *dev = encoder->dev; 4510 struct drm_i915_private *dev_priv = dev->dev_private; 4511 4512 connector = drm_select_eld(encoder, mode); 4513 if (!connector) 4514 return; 4515 4516 DRM_DEBUG_KMS("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 4517 connector->base.id, 4518 drm_get_connector_name(connector), 4519 connector->encoder->base.id, 4520 drm_get_encoder_name(connector->encoder)); 4521 4522 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; 4523 4524 if (dev_priv->display.write_eld) 4525 dev_priv->display.write_eld(connector, crtc); 4526 } 4527 4528 /** Loads the palette/gamma unit for the CRTC with the prepared values */ 4529 void intel_crtc_load_lut(struct drm_crtc *crtc) 4530 { 4531 struct drm_device *dev = crtc->dev; 4532 struct drm_i915_private *dev_priv = dev->dev_private; 4533 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4534 int palreg = PALETTE(intel_crtc->pipe); 4535 int i; 4536 4537 /* The clocks have to be on to load the palette. */ 4538 if (!crtc->enabled || !intel_crtc->active) 4539 return; 4540 4541 /* use legacy palette for Ironlake */ 4542 if (HAS_PCH_SPLIT(dev)) 4543 palreg = LGC_PALETTE(intel_crtc->pipe); 4544 4545 for (i = 0; i < 256; i++) { 4546 I915_WRITE(palreg + 4 * i, 4547 (intel_crtc->lut_r[i] << 16) | 4548 (intel_crtc->lut_g[i] << 8) | 4549 intel_crtc->lut_b[i]); 4550 } 4551 } 4552 4553 static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 4554 { 4555 struct drm_device *dev = crtc->dev; 4556 struct drm_i915_private *dev_priv = dev->dev_private; 4557 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4558 bool visible = base != 0; 4559 u32 cntl; 4560 4561 if (intel_crtc->cursor_visible == visible) 4562 return; 4563 4564 cntl = I915_READ(_CURACNTR); 4565 if (visible) { 4566 /* On these chipsets we can only modify the base whilst 4567 * the cursor is disabled. 4568 */ 4569 I915_WRITE(_CURABASE, base); 4570 4571 cntl &= ~(CURSOR_FORMAT_MASK); 4572 /* XXX width must be 64, stride 256 => 0x00 << 28 */ 4573 cntl |= CURSOR_ENABLE | 4574 CURSOR_GAMMA_ENABLE | 4575 CURSOR_FORMAT_ARGB; 4576 } else 4577 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); 4578 I915_WRITE(_CURACNTR, cntl); 4579 4580 intel_crtc->cursor_visible = visible; 4581 } 4582 4583 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 4584 { 4585 struct drm_device *dev = crtc->dev; 4586 struct drm_i915_private *dev_priv = dev->dev_private; 4587 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4588 int pipe = intel_crtc->pipe; 4589 bool visible = base != 0; 4590 4591 if (intel_crtc->cursor_visible != visible) { 4592 uint32_t cntl = I915_READ(CURCNTR(pipe)); 4593 if (base) { 4594 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); 4595 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 4596 cntl |= pipe << 28; /* Connect to correct pipe */ 4597 } else { 4598 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 4599 cntl |= CURSOR_MODE_DISABLE; 4600 } 4601 I915_WRITE(CURCNTR(pipe), cntl); 4602 4603 intel_crtc->cursor_visible = visible; 4604 } 4605 /* and commit changes on next vblank */ 4606 I915_WRITE(CURBASE(pipe), base); 4607 } 4608 4609 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) 4610 { 4611 struct drm_device *dev = crtc->dev; 4612 struct drm_i915_private *dev_priv = dev->dev_private; 4613 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4614 int pipe = intel_crtc->pipe; 4615 bool visible = base != 0; 4616 4617 if (intel_crtc->cursor_visible != visible) { 4618 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); 4619 if (base) { 4620 cntl &= ~CURSOR_MODE; 4621 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 4622 } else { 4623 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 4624 cntl |= CURSOR_MODE_DISABLE; 4625 } 4626 I915_WRITE(CURCNTR_IVB(pipe), cntl); 4627 4628 intel_crtc->cursor_visible = visible; 4629 } 4630 /* and commit changes on next vblank */ 4631 I915_WRITE(CURBASE_IVB(pipe), base); 4632 } 4633 4634 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 4635 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 4636 bool on) 4637 { 4638 struct drm_device *dev = crtc->dev; 4639 struct drm_i915_private *dev_priv = dev->dev_private; 4640 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4641 int pipe = intel_crtc->pipe; 4642 int x = intel_crtc->cursor_x; 4643 int y = intel_crtc->cursor_y; 4644 u32 base, pos; 4645 bool visible; 4646 4647 pos = 0; 4648 4649 if (on && crtc->enabled && crtc->fb) { 4650 base = intel_crtc->cursor_addr; 4651 if (x > (int) crtc->fb->width) 4652 base = 0; 4653 4654 if (y > (int) crtc->fb->height) 4655 base = 0; 4656 } else 4657 base = 0; 4658 4659 if (x < 0) { 4660 if (x + intel_crtc->cursor_width < 0) 4661 base = 0; 4662 4663 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 4664 x = -x; 4665 } 4666 pos |= x << CURSOR_X_SHIFT; 4667 4668 if (y < 0) { 4669 if (y + intel_crtc->cursor_height < 0) 4670 base = 0; 4671 4672 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 4673 y = -y; 4674 } 4675 pos |= y << CURSOR_Y_SHIFT; 4676 4677 visible = base != 0; 4678 if (!visible && !intel_crtc->cursor_visible) 4679 return; 4680 4681 if (IS_IVYBRIDGE(dev)) { 4682 I915_WRITE(CURPOS_IVB(pipe), pos); 4683 ivb_update_cursor(crtc, base); 4684 } else { 4685 I915_WRITE(CURPOS(pipe), pos); 4686 if (IS_845G(dev) || IS_I865G(dev)) 4687 i845_update_cursor(crtc, base); 4688 else 4689 i9xx_update_cursor(crtc, base); 4690 } 4691 4692 if (visible) 4693 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); 4694 } 4695 4696 static int intel_crtc_cursor_set(struct drm_crtc *crtc, 4697 struct drm_file *file, 4698 uint32_t handle, 4699 uint32_t width, uint32_t height) 4700 { 4701 struct drm_device *dev = crtc->dev; 4702 struct drm_i915_private *dev_priv = dev->dev_private; 4703 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4704 struct drm_i915_gem_object *obj; 4705 uint32_t addr; 4706 int ret; 4707 4708 DRM_DEBUG_KMS("\n"); 4709 4710 /* if we want to turn off the cursor ignore width and height */ 4711 if (!handle) { 4712 DRM_DEBUG_KMS("cursor off\n"); 4713 addr = 0; 4714 obj = NULL; 4715 DRM_LOCK(dev); 4716 goto finish; 4717 } 4718 4719 /* Currently we only support 64x64 cursors */ 4720 if (width != 64 || height != 64) { 4721 DRM_ERROR("we currently only support 64x64 cursors\n"); 4722 return -EINVAL; 4723 } 4724 4725 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); 4726 if (&obj->base == NULL) 4727 return -ENOENT; 4728 4729 if (obj->base.size < width * height * 4) { 4730 DRM_ERROR("buffer is to small\n"); 4731 ret = -ENOMEM; 4732 goto fail; 4733 } 4734 4735 /* we only need to pin inside GTT if cursor is non-phy */ 4736 DRM_LOCK(dev); 4737 if (!dev_priv->info->cursor_needs_physical) { 4738 if (obj->tiling_mode) { 4739 DRM_ERROR("cursor cannot be tiled\n"); 4740 ret = -EINVAL; 4741 goto fail_locked; 4742 } 4743 4744 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL); 4745 if (ret) { 4746 DRM_ERROR("failed to move cursor bo into the GTT\n"); 4747 goto fail_locked; 4748 } 4749 4750 ret = i915_gem_object_put_fence(obj); 4751 if (ret) { 4752 DRM_ERROR("failed to release fence for cursor\n"); 4753 goto fail_unpin; 4754 } 4755 4756 addr = obj->gtt_offset; 4757 } else { 4758 int align = IS_I830(dev) ? 16 * 1024 : 256; 4759 ret = i915_gem_attach_phys_object(dev, obj, 4760 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, 4761 align); 4762 if (ret) { 4763 DRM_ERROR("failed to attach phys object\n"); 4764 goto fail_locked; 4765 } 4766 addr = obj->phys_obj->handle->busaddr; 4767 } 4768 4769 if (IS_GEN2(dev)) 4770 I915_WRITE(CURSIZE, (height << 12) | width); 4771 4772 finish: 4773 if (intel_crtc->cursor_bo) { 4774 if (dev_priv->info->cursor_needs_physical) { 4775 if (intel_crtc->cursor_bo != obj) 4776 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 4777 } else 4778 i915_gem_object_unpin(intel_crtc->cursor_bo); 4779 drm_gem_object_unreference(&intel_crtc->cursor_bo->base); 4780 } 4781 4782 DRM_UNLOCK(dev); 4783 4784 intel_crtc->cursor_addr = addr; 4785 intel_crtc->cursor_bo = obj; 4786 intel_crtc->cursor_width = width; 4787 intel_crtc->cursor_height = height; 4788 4789 intel_crtc_update_cursor(crtc, true); 4790 4791 return 0; 4792 fail_unpin: 4793 i915_gem_object_unpin(obj); 4794 fail_locked: 4795 DRM_UNLOCK(dev); 4796 fail: 4797 drm_gem_object_unreference_unlocked(&obj->base); 4798 return ret; 4799 } 4800 4801 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 4802 { 4803 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4804 4805 intel_crtc->cursor_x = x; 4806 intel_crtc->cursor_y = y; 4807 4808 intel_crtc_update_cursor(crtc, true); 4809 4810 return 0; 4811 } 4812 4813 /** Sets the color ramps on behalf of RandR */ 4814 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 4815 u16 blue, int regno) 4816 { 4817 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4818 4819 intel_crtc->lut_r[regno] = red >> 8; 4820 intel_crtc->lut_g[regno] = green >> 8; 4821 intel_crtc->lut_b[regno] = blue >> 8; 4822 } 4823 4824 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 4825 u16 *blue, int regno) 4826 { 4827 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4828 4829 *red = intel_crtc->lut_r[regno] << 8; 4830 *green = intel_crtc->lut_g[regno] << 8; 4831 *blue = intel_crtc->lut_b[regno] << 8; 4832 } 4833 4834 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 4835 u16 *blue, uint32_t start, uint32_t size) 4836 { 4837 int end = (start + size > 256) ? 256 : start + size, i; 4838 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4839 4840 for (i = start; i < end; i++) { 4841 intel_crtc->lut_r[i] = red[i] >> 8; 4842 intel_crtc->lut_g[i] = green[i] >> 8; 4843 intel_crtc->lut_b[i] = blue[i] >> 8; 4844 } 4845 4846 intel_crtc_load_lut(crtc); 4847 } 4848 4849 /** 4850 * Get a pipe with a simple mode set on it for doing load-based monitor 4851 * detection. 4852 * 4853 * It will be up to the load-detect code to adjust the pipe as appropriate for 4854 * its requirements. The pipe will be connected to no other encoders. 4855 * 4856 * Currently this code will only succeed if there is a pipe with no encoders 4857 * configured for it. In the future, it could choose to temporarily disable 4858 * some outputs to free up a pipe for its use. 4859 * 4860 * \return crtc, or NULL if no pipes are available. 4861 */ 4862 4863 /* VESA 640x480x72Hz mode to set on the pipe */ 4864 static struct drm_display_mode load_detect_mode = { 4865 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 4866 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 4867 }; 4868 4869 static int 4870 intel_framebuffer_create(struct drm_device *dev, 4871 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj, 4872 struct drm_framebuffer **res) 4873 { 4874 struct intel_framebuffer *intel_fb; 4875 int ret; 4876 4877 intel_fb = kmalloc(sizeof(*intel_fb), DRM_MEM_KMS, M_WAITOK | M_ZERO); 4878 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 4879 if (ret) { 4880 drm_gem_object_unreference_unlocked(&obj->base); 4881 drm_free(intel_fb, DRM_MEM_KMS); 4882 return (ret); 4883 } 4884 4885 *res = &intel_fb->base; 4886 return (0); 4887 } 4888 4889 static u32 4890 intel_framebuffer_pitch_for_width(int width, int bpp) 4891 { 4892 u32 pitch = howmany(width * bpp, 8); 4893 return roundup2(pitch, 64); 4894 } 4895 4896 static u32 4897 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 4898 { 4899 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 4900 return roundup2(pitch * mode->vdisplay, PAGE_SIZE); 4901 } 4902 4903 static int 4904 intel_framebuffer_create_for_mode(struct drm_device *dev, 4905 struct drm_display_mode *mode, int depth, int bpp, 4906 struct drm_framebuffer **res) 4907 { 4908 struct drm_i915_gem_object *obj; 4909 struct drm_mode_fb_cmd2 mode_cmd; 4910 4911 obj = i915_gem_alloc_object(dev, 4912 intel_framebuffer_size_for_mode(mode, bpp)); 4913 if (obj == NULL) 4914 return (-ENOMEM); 4915 4916 mode_cmd.width = mode->hdisplay; 4917 mode_cmd.height = mode->vdisplay; 4918 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 4919 bpp); 4920 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 4921 4922 return (intel_framebuffer_create(dev, &mode_cmd, obj, res)); 4923 } 4924 4925 static int 4926 mode_fits_in_fbdev(struct drm_device *dev, 4927 struct drm_display_mode *mode, struct drm_framebuffer **res) 4928 { 4929 struct drm_i915_private *dev_priv = dev->dev_private; 4930 struct drm_i915_gem_object *obj; 4931 struct drm_framebuffer *fb; 4932 4933 if (dev_priv->fbdev == NULL) { 4934 *res = NULL; 4935 return (0); 4936 } 4937 4938 obj = dev_priv->fbdev->ifb.obj; 4939 if (obj == NULL) { 4940 *res = NULL; 4941 return (0); 4942 } 4943 4944 fb = &dev_priv->fbdev->ifb.base; 4945 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 4946 fb->bits_per_pixel)) { 4947 *res = NULL; 4948 return (0); 4949 } 4950 4951 if (obj->base.size < mode->vdisplay * fb->pitches[0]) { 4952 *res = NULL; 4953 return (0); 4954 } 4955 4956 *res = fb; 4957 return (0); 4958 } 4959 4960 bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 4961 struct drm_connector *connector, 4962 struct drm_display_mode *mode, 4963 struct intel_load_detect_pipe *old) 4964 { 4965 struct intel_crtc *intel_crtc; 4966 struct drm_crtc *possible_crtc; 4967 struct drm_encoder *encoder = &intel_encoder->base; 4968 struct drm_crtc *crtc = NULL; 4969 struct drm_device *dev = encoder->dev; 4970 struct drm_framebuffer *old_fb; 4971 int i = -1, r; 4972 4973 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 4974 connector->base.id, drm_get_connector_name(connector), 4975 encoder->base.id, drm_get_encoder_name(encoder)); 4976 4977 /* 4978 * Algorithm gets a little messy: 4979 * 4980 * - if the connector already has an assigned crtc, use it (but make 4981 * sure it's on first) 4982 * 4983 * - try to find the first unused crtc that can drive this connector, 4984 * and use that if we find one 4985 */ 4986 4987 /* See if we already have a CRTC for this connector */ 4988 if (encoder->crtc) { 4989 crtc = encoder->crtc; 4990 4991 intel_crtc = to_intel_crtc(crtc); 4992 old->dpms_mode = intel_crtc->dpms_mode; 4993 old->load_detect_temp = false; 4994 4995 /* Make sure the crtc and connector are running */ 4996 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { 4997 struct drm_encoder_helper_funcs *encoder_funcs; 4998 struct drm_crtc_helper_funcs *crtc_funcs; 4999 5000 crtc_funcs = crtc->helper_private; 5001 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 5002 5003 encoder_funcs = encoder->helper_private; 5004 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 5005 } 5006 5007 return true; 5008 } 5009 5010 /* Find an unused one (if possible) */ 5011 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { 5012 i++; 5013 if (!(encoder->possible_crtcs & (1 << i))) 5014 continue; 5015 if (!possible_crtc->enabled) { 5016 crtc = possible_crtc; 5017 break; 5018 } 5019 } 5020 5021 /* 5022 * If we didn't find an unused CRTC, don't use any. 5023 */ 5024 if (!crtc) { 5025 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 5026 return false; 5027 } 5028 5029 encoder->crtc = crtc; 5030 connector->encoder = encoder; 5031 5032 intel_crtc = to_intel_crtc(crtc); 5033 old->dpms_mode = intel_crtc->dpms_mode; 5034 old->load_detect_temp = true; 5035 old->release_fb = NULL; 5036 5037 if (!mode) 5038 mode = &load_detect_mode; 5039 5040 old_fb = crtc->fb; 5041 5042 /* We need a framebuffer large enough to accommodate all accesses 5043 * that the plane may generate whilst we perform load detection. 5044 * We can not rely on the fbcon either being present (we get called 5045 * during its initialisation to detect all boot displays, or it may 5046 * not even exist) or that it is large enough to satisfy the 5047 * requested mode. 5048 */ 5049 r = mode_fits_in_fbdev(dev, mode, &crtc->fb); 5050 if (crtc->fb == NULL) { 5051 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 5052 r = intel_framebuffer_create_for_mode(dev, mode, 24, 32, 5053 &crtc->fb); 5054 old->release_fb = crtc->fb; 5055 } else 5056 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 5057 if (r != 0) { 5058 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 5059 crtc->fb = old_fb; 5060 return false; 5061 } 5062 5063 if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { 5064 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 5065 if (old->release_fb) 5066 old->release_fb->funcs->destroy(old->release_fb); 5067 crtc->fb = old_fb; 5068 return false; 5069 } 5070 5071 /* let the connector get through one full cycle before testing */ 5072 intel_wait_for_vblank(dev, intel_crtc->pipe); 5073 5074 return true; 5075 } 5076 5077 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 5078 struct drm_connector *connector, 5079 struct intel_load_detect_pipe *old) 5080 { 5081 struct drm_encoder *encoder = &intel_encoder->base; 5082 struct drm_device *dev = encoder->dev; 5083 struct drm_crtc *crtc = encoder->crtc; 5084 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 5085 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 5086 5087 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 5088 connector->base.id, drm_get_connector_name(connector), 5089 encoder->base.id, drm_get_encoder_name(encoder)); 5090 5091 if (old->load_detect_temp) { 5092 connector->encoder = NULL; 5093 drm_helper_disable_unused_functions(dev); 5094 5095 if (old->release_fb) 5096 old->release_fb->funcs->destroy(old->release_fb); 5097 5098 return; 5099 } 5100 5101 /* Switch crtc and encoder back off if necessary */ 5102 if (old->dpms_mode != DRM_MODE_DPMS_ON) { 5103 encoder_funcs->dpms(encoder, old->dpms_mode); 5104 crtc_funcs->dpms(crtc, old->dpms_mode); 5105 } 5106 } 5107 5108 /* Returns the clock of the currently programmed mode of the given pipe. */ 5109 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) 5110 { 5111 struct drm_i915_private *dev_priv = dev->dev_private; 5112 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5113 int pipe = intel_crtc->pipe; 5114 u32 dpll = I915_READ(DPLL(pipe)); 5115 u32 fp; 5116 intel_clock_t clock; 5117 5118 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 5119 fp = I915_READ(FP0(pipe)); 5120 else 5121 fp = I915_READ(FP1(pipe)); 5122 5123 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 5124 if (IS_PINEVIEW(dev)) { 5125 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 5126 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 5127 } else { 5128 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 5129 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 5130 } 5131 5132 if (!IS_GEN2(dev)) { 5133 if (IS_PINEVIEW(dev)) 5134 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 5135 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 5136 else 5137 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 5138 DPLL_FPA01_P1_POST_DIV_SHIFT); 5139 5140 switch (dpll & DPLL_MODE_MASK) { 5141 case DPLLB_MODE_DAC_SERIAL: 5142 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 5143 5 : 10; 5144 break; 5145 case DPLLB_MODE_LVDS: 5146 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 5147 7 : 14; 5148 break; 5149 default: 5150 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 5151 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 5152 return 0; 5153 } 5154 5155 /* XXX: Handle the 100Mhz refclk */ 5156 intel_clock(dev, 96000, &clock); 5157 } else { 5158 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 5159 5160 if (is_lvds) { 5161 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 5162 DPLL_FPA01_P1_POST_DIV_SHIFT); 5163 clock.p2 = 14; 5164 5165 if ((dpll & PLL_REF_INPUT_MASK) == 5166 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 5167 /* XXX: might not be 66MHz */ 5168 intel_clock(dev, 66000, &clock); 5169 } else 5170 intel_clock(dev, 48000, &clock); 5171 } else { 5172 if (dpll & PLL_P1_DIVIDE_BY_TWO) 5173 clock.p1 = 2; 5174 else { 5175 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 5176 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 5177 } 5178 if (dpll & PLL_P2_DIVIDE_BY_4) 5179 clock.p2 = 4; 5180 else 5181 clock.p2 = 2; 5182 5183 intel_clock(dev, 48000, &clock); 5184 } 5185 } 5186 5187 /* XXX: It would be nice to validate the clocks, but we can't reuse 5188 * i830PllIsValid() because it relies on the xf86_config connector 5189 * configuration being accurate, which it isn't necessarily. 5190 */ 5191 5192 return clock.dot; 5193 } 5194 5195 /** Returns the currently programmed mode of the given pipe. */ 5196 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 5197 struct drm_crtc *crtc) 5198 { 5199 struct drm_i915_private *dev_priv = dev->dev_private; 5200 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5201 int pipe = intel_crtc->pipe; 5202 struct drm_display_mode *mode; 5203 int htot = I915_READ(HTOTAL(pipe)); 5204 int hsync = I915_READ(HSYNC(pipe)); 5205 int vtot = I915_READ(VTOTAL(pipe)); 5206 int vsync = I915_READ(VSYNC(pipe)); 5207 5208 mode = kmalloc(sizeof(*mode), DRM_MEM_KMS, M_WAITOK | M_ZERO); 5209 5210 mode->clock = intel_crtc_clock_get(dev, crtc); 5211 mode->hdisplay = (htot & 0xffff) + 1; 5212 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 5213 mode->hsync_start = (hsync & 0xffff) + 1; 5214 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 5215 mode->vdisplay = (vtot & 0xffff) + 1; 5216 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 5217 mode->vsync_start = (vsync & 0xffff) + 1; 5218 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 5219 5220 drm_mode_set_name(mode); 5221 drm_mode_set_crtcinfo(mode, 0); 5222 5223 return mode; 5224 } 5225 5226 #define GPU_IDLE_TIMEOUT (500 /* ms */ * 1000 / hz) 5227 5228 /* When this timer fires, we've been idle for awhile */ 5229 static void intel_gpu_idle_timer(void *arg) 5230 { 5231 struct drm_device *dev = arg; 5232 drm_i915_private_t *dev_priv = dev->dev_private; 5233 5234 if (!list_empty(&dev_priv->mm.active_list)) { 5235 /* Still processing requests, so just re-arm the timer. */ 5236 callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT, 5237 i915_hangcheck_elapsed, dev); 5238 return; 5239 } 5240 5241 dev_priv->busy = false; 5242 taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task); 5243 } 5244 5245 #define CRTC_IDLE_TIMEOUT (1000 /* ms */ * 1000 / hz) 5246 5247 static void intel_crtc_idle_timer(void *arg) 5248 { 5249 struct intel_crtc *intel_crtc = arg; 5250 struct drm_crtc *crtc = &intel_crtc->base; 5251 drm_i915_private_t *dev_priv = crtc->dev->dev_private; 5252 struct intel_framebuffer *intel_fb; 5253 5254 intel_fb = to_intel_framebuffer(crtc->fb); 5255 if (intel_fb && intel_fb->obj->active) { 5256 /* The framebuffer is still being accessed by the GPU. */ 5257 callout_reset(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT, 5258 i915_hangcheck_elapsed, crtc->dev); 5259 return; 5260 } 5261 5262 intel_crtc->busy = false; 5263 taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task); 5264 } 5265 5266 static void intel_increase_pllclock(struct drm_crtc *crtc) 5267 { 5268 struct drm_device *dev = crtc->dev; 5269 drm_i915_private_t *dev_priv = dev->dev_private; 5270 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5271 int pipe = intel_crtc->pipe; 5272 int dpll_reg = DPLL(pipe); 5273 int dpll; 5274 5275 if (HAS_PCH_SPLIT(dev)) 5276 return; 5277 5278 if (!dev_priv->lvds_downclock_avail) 5279 return; 5280 5281 dpll = I915_READ(dpll_reg); 5282 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { 5283 DRM_DEBUG_DRIVER("upclocking LVDS\n"); 5284 5285 assert_panel_unlocked(dev_priv, pipe); 5286 5287 dpll &= ~DISPLAY_RATE_SELECT_FPA1; 5288 I915_WRITE(dpll_reg, dpll); 5289 intel_wait_for_vblank(dev, pipe); 5290 5291 dpll = I915_READ(dpll_reg); 5292 if (dpll & DISPLAY_RATE_SELECT_FPA1) 5293 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); 5294 } 5295 5296 /* Schedule downclock */ 5297 callout_reset(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT, 5298 intel_crtc_idle_timer, intel_crtc); 5299 } 5300 5301 static void intel_decrease_pllclock(struct drm_crtc *crtc) 5302 { 5303 struct drm_device *dev = crtc->dev; 5304 drm_i915_private_t *dev_priv = dev->dev_private; 5305 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5306 5307 if (HAS_PCH_SPLIT(dev)) 5308 return; 5309 5310 if (!dev_priv->lvds_downclock_avail) 5311 return; 5312 5313 /* 5314 * Since this is called by a timer, we should never get here in 5315 * the manual case. 5316 */ 5317 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 5318 int pipe = intel_crtc->pipe; 5319 int dpll_reg = DPLL(pipe); 5320 u32 dpll; 5321 5322 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 5323 5324 assert_panel_unlocked(dev_priv, pipe); 5325 5326 dpll = I915_READ(dpll_reg); 5327 dpll |= DISPLAY_RATE_SELECT_FPA1; 5328 I915_WRITE(dpll_reg, dpll); 5329 intel_wait_for_vblank(dev, pipe); 5330 dpll = I915_READ(dpll_reg); 5331 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 5332 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 5333 } 5334 } 5335 5336 /** 5337 * intel_idle_update - adjust clocks for idleness 5338 * @work: work struct 5339 * 5340 * Either the GPU or display (or both) went idle. Check the busy status 5341 * here and adjust the CRTC and GPU clocks as necessary. 5342 */ 5343 static void intel_idle_update(void *arg, int pending) 5344 { 5345 drm_i915_private_t *dev_priv = arg; 5346 struct drm_device *dev = dev_priv->dev; 5347 struct drm_crtc *crtc; 5348 struct intel_crtc *intel_crtc; 5349 5350 if (!i915_powersave) 5351 return; 5352 5353 DRM_LOCK(dev); 5354 5355 i915_update_gfx_val(dev_priv); 5356 5357 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 5358 /* Skip inactive CRTCs */ 5359 if (!crtc->fb) 5360 continue; 5361 5362 intel_crtc = to_intel_crtc(crtc); 5363 if (!intel_crtc->busy) 5364 intel_decrease_pllclock(crtc); 5365 } 5366 5367 DRM_UNLOCK(dev); 5368 } 5369 5370 /** 5371 * intel_mark_busy - mark the GPU and possibly the display busy 5372 * @dev: drm device 5373 * @obj: object we're operating on 5374 * 5375 * Callers can use this function to indicate that the GPU is busy processing 5376 * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout 5377 * buffer), we'll also mark the display as busy, so we know to increase its 5378 * clock frequency. 5379 */ 5380 void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) 5381 { 5382 drm_i915_private_t *dev_priv = dev->dev_private; 5383 struct drm_crtc *crtc = NULL; 5384 struct intel_framebuffer *intel_fb; 5385 struct intel_crtc *intel_crtc; 5386 5387 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 5388 return; 5389 5390 if (!dev_priv->busy) 5391 dev_priv->busy = true; 5392 else 5393 callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT, 5394 intel_gpu_idle_timer, dev); 5395 5396 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 5397 if (!crtc->fb) 5398 continue; 5399 5400 intel_crtc = to_intel_crtc(crtc); 5401 intel_fb = to_intel_framebuffer(crtc->fb); 5402 if (intel_fb->obj == obj) { 5403 if (!intel_crtc->busy) { 5404 /* Non-busy -> busy, upclock */ 5405 intel_increase_pllclock(crtc); 5406 intel_crtc->busy = true; 5407 } else { 5408 /* Busy -> busy, put off timer */ 5409 callout_reset(&intel_crtc->idle_callout, 5410 CRTC_IDLE_TIMEOUT, intel_crtc_idle_timer, 5411 intel_crtc); 5412 } 5413 } 5414 } 5415 } 5416 5417 static void intel_crtc_destroy(struct drm_crtc *crtc) 5418 { 5419 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5420 struct drm_device *dev = crtc->dev; 5421 struct drm_i915_private *dev_priv = dev->dev_private; 5422 struct intel_unpin_work *work; 5423 5424 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5425 work = intel_crtc->unpin_work; 5426 intel_crtc->unpin_work = NULL; 5427 lockmgr(&dev->event_lock, LK_RELEASE); 5428 5429 if (work) { 5430 taskqueue_cancel(dev_priv->tq, &work->task, NULL); 5431 taskqueue_drain(dev_priv->tq, &work->task); 5432 drm_free(work, DRM_MEM_KMS); 5433 } 5434 5435 drm_crtc_cleanup(crtc); 5436 5437 drm_free(intel_crtc, DRM_MEM_KMS); 5438 } 5439 5440 static void intel_unpin_work_fn(void *arg, int pending) 5441 { 5442 struct intel_unpin_work *work = arg; 5443 struct drm_device *dev; 5444 5445 dev = work->dev; 5446 DRM_LOCK(dev); 5447 intel_unpin_fb_obj(work->old_fb_obj); 5448 drm_gem_object_unreference(&work->pending_flip_obj->base); 5449 drm_gem_object_unreference(&work->old_fb_obj->base); 5450 5451 intel_update_fbc(work->dev); 5452 DRM_UNLOCK(dev); 5453 drm_free(work, DRM_MEM_KMS); 5454 } 5455 5456 static void do_intel_finish_page_flip(struct drm_device *dev, 5457 struct drm_crtc *crtc) 5458 { 5459 drm_i915_private_t *dev_priv = dev->dev_private; 5460 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5461 struct intel_unpin_work *work; 5462 struct drm_i915_gem_object *obj; 5463 5464 /* Ignore early vblank irqs */ 5465 if (intel_crtc == NULL) 5466 return; 5467 5468 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5469 work = intel_crtc->unpin_work; 5470 if (work == NULL || !work->pending) { 5471 lockmgr(&dev->event_lock, LK_RELEASE); 5472 return; 5473 } 5474 5475 intel_crtc->unpin_work = NULL; 5476 5477 if (work->event) 5478 drm_send_vblank_event(dev, intel_crtc->pipe, work->event); 5479 5480 drm_vblank_put(dev, intel_crtc->pipe); 5481 5482 lockmgr(&dev->event_lock, LK_RELEASE); 5483 5484 obj = work->old_fb_obj; 5485 5486 atomic_clear_int(&obj->pending_flip, 1 << intel_crtc->plane); 5487 wakeup(&obj->pending_flip); 5488 5489 taskqueue_enqueue(dev_priv->tq, &work->task); 5490 } 5491 5492 void intel_finish_page_flip(struct drm_device *dev, int pipe) 5493 { 5494 drm_i915_private_t *dev_priv = dev->dev_private; 5495 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 5496 5497 do_intel_finish_page_flip(dev, crtc); 5498 } 5499 5500 void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 5501 { 5502 drm_i915_private_t *dev_priv = dev->dev_private; 5503 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 5504 5505 do_intel_finish_page_flip(dev, crtc); 5506 } 5507 5508 void intel_prepare_page_flip(struct drm_device *dev, int plane) 5509 { 5510 drm_i915_private_t *dev_priv = dev->dev_private; 5511 struct intel_crtc *intel_crtc = 5512 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 5513 5514 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5515 if (intel_crtc->unpin_work) { 5516 if ((++intel_crtc->unpin_work->pending) > 1) 5517 DRM_ERROR("Prepared flip multiple times\n"); 5518 } else { 5519 DRM_DEBUG("preparing flip with no unpin work?\n"); 5520 } 5521 lockmgr(&dev->event_lock, LK_RELEASE); 5522 } 5523 5524 static int intel_gen2_queue_flip(struct drm_device *dev, 5525 struct drm_crtc *crtc, 5526 struct drm_framebuffer *fb, 5527 struct drm_i915_gem_object *obj) 5528 { 5529 struct drm_i915_private *dev_priv = dev->dev_private; 5530 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5531 unsigned long offset; 5532 u32 flip_mask; 5533 int ret; 5534 5535 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5536 if (ret) 5537 goto out; 5538 5539 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5540 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; 5541 5542 ret = BEGIN_LP_RING(6); 5543 if (ret) 5544 goto out; 5545 5546 /* Can't queue multiple flips, so wait for the previous 5547 * one to finish before executing the next. 5548 */ 5549 if (intel_crtc->plane) 5550 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5551 else 5552 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 5553 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5554 OUT_RING(MI_NOOP); 5555 OUT_RING(MI_DISPLAY_FLIP | 5556 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5557 OUT_RING(fb->pitches[0]); 5558 OUT_RING(obj->gtt_offset + offset); 5559 OUT_RING(0); /* aux display base address, unused */ 5560 ADVANCE_LP_RING(); 5561 out: 5562 return ret; 5563 } 5564 5565 static int intel_gen3_queue_flip(struct drm_device *dev, 5566 struct drm_crtc *crtc, 5567 struct drm_framebuffer *fb, 5568 struct drm_i915_gem_object *obj) 5569 { 5570 struct drm_i915_private *dev_priv = dev->dev_private; 5571 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5572 unsigned long offset; 5573 u32 flip_mask; 5574 int ret; 5575 5576 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5577 if (ret) 5578 goto out; 5579 5580 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5581 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; 5582 5583 ret = BEGIN_LP_RING(6); 5584 if (ret) 5585 goto out; 5586 5587 if (intel_crtc->plane) 5588 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5589 else 5590 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 5591 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5592 OUT_RING(MI_NOOP); 5593 OUT_RING(MI_DISPLAY_FLIP_I915 | 5594 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5595 OUT_RING(fb->pitches[0]); 5596 OUT_RING(obj->gtt_offset + offset); 5597 OUT_RING(MI_NOOP); 5598 5599 ADVANCE_LP_RING(); 5600 out: 5601 return ret; 5602 } 5603 5604 static int intel_gen4_queue_flip(struct drm_device *dev, 5605 struct drm_crtc *crtc, 5606 struct drm_framebuffer *fb, 5607 struct drm_i915_gem_object *obj) 5608 { 5609 struct drm_i915_private *dev_priv = dev->dev_private; 5610 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5611 uint32_t pf, pipesrc; 5612 int ret; 5613 5614 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5615 if (ret) 5616 goto out; 5617 5618 ret = BEGIN_LP_RING(4); 5619 if (ret) 5620 goto out; 5621 5622 /* i965+ uses the linear or tiled offsets from the 5623 * Display Registers (which do not change across a page-flip) 5624 * so we need only reprogram the base address. 5625 */ 5626 OUT_RING(MI_DISPLAY_FLIP | 5627 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5628 OUT_RING(fb->pitches[0]); 5629 OUT_RING(obj->gtt_offset | obj->tiling_mode); 5630 5631 /* XXX Enabling the panel-fitter across page-flip is so far 5632 * untested on non-native modes, so ignore it for now. 5633 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 5634 */ 5635 pf = 0; 5636 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 5637 OUT_RING(pf | pipesrc); 5638 ADVANCE_LP_RING(); 5639 out: 5640 return ret; 5641 } 5642 5643 static int intel_gen6_queue_flip(struct drm_device *dev, 5644 struct drm_crtc *crtc, 5645 struct drm_framebuffer *fb, 5646 struct drm_i915_gem_object *obj) 5647 { 5648 struct drm_i915_private *dev_priv = dev->dev_private; 5649 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5650 uint32_t pf, pipesrc; 5651 int ret; 5652 5653 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5654 if (ret) 5655 goto out; 5656 5657 ret = BEGIN_LP_RING(4); 5658 if (ret) 5659 goto out; 5660 5661 OUT_RING(MI_DISPLAY_FLIP | 5662 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5663 OUT_RING(fb->pitches[0] | obj->tiling_mode); 5664 OUT_RING(obj->gtt_offset); 5665 5666 /* Contrary to the suggestions in the documentation, 5667 * "Enable Panel Fitter" does not seem to be required when page 5668 * flipping with a non-native mode, and worse causes a normal 5669 * modeset to fail. 5670 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 5671 */ 5672 pf = 0; 5673 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 5674 OUT_RING(pf | pipesrc); 5675 ADVANCE_LP_RING(); 5676 out: 5677 return ret; 5678 } 5679 5680 /* 5681 * On gen7 we currently use the blit ring because (in early silicon at least) 5682 * the render ring doesn't give us interrpts for page flip completion, which 5683 * means clients will hang after the first flip is queued. Fortunately the 5684 * blit ring generates interrupts properly, so use it instead. 5685 */ 5686 static int intel_gen7_queue_flip(struct drm_device *dev, 5687 struct drm_crtc *crtc, 5688 struct drm_framebuffer *fb, 5689 struct drm_i915_gem_object *obj) 5690 { 5691 struct drm_i915_private *dev_priv = dev->dev_private; 5692 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5693 struct intel_ring_buffer *ring = &dev_priv->rings[BCS]; 5694 int ret; 5695 5696 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 5697 if (ret) 5698 goto out; 5699 5700 ret = intel_ring_begin(ring, 4); 5701 if (ret) 5702 goto out; 5703 5704 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); 5705 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 5706 intel_ring_emit(ring, (obj->gtt_offset)); 5707 intel_ring_emit(ring, (MI_NOOP)); 5708 intel_ring_advance(ring); 5709 out: 5710 return ret; 5711 } 5712 5713 static int intel_default_queue_flip(struct drm_device *dev, 5714 struct drm_crtc *crtc, 5715 struct drm_framebuffer *fb, 5716 struct drm_i915_gem_object *obj) 5717 { 5718 return -ENODEV; 5719 } 5720 5721 static int intel_crtc_page_flip(struct drm_crtc *crtc, 5722 struct drm_framebuffer *fb, 5723 struct drm_pending_vblank_event *event) 5724 { 5725 struct drm_device *dev = crtc->dev; 5726 struct drm_i915_private *dev_priv = dev->dev_private; 5727 struct intel_framebuffer *intel_fb; 5728 struct drm_i915_gem_object *obj; 5729 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5730 struct intel_unpin_work *work; 5731 int ret; 5732 5733 work = kmalloc(sizeof *work, DRM_MEM_KMS, M_WAITOK | M_ZERO); 5734 5735 work->event = event; 5736 work->dev = crtc->dev; 5737 intel_fb = to_intel_framebuffer(crtc->fb); 5738 work->old_fb_obj = intel_fb->obj; 5739 TASK_INIT(&work->task, 0, intel_unpin_work_fn, work); 5740 5741 ret = drm_vblank_get(dev, intel_crtc->pipe); 5742 if (ret) 5743 goto free_work; 5744 5745 /* We borrow the event spin lock for protecting unpin_work */ 5746 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5747 if (intel_crtc->unpin_work) { 5748 lockmgr(&dev->event_lock, LK_RELEASE); 5749 drm_free(work, DRM_MEM_KMS); 5750 drm_vblank_put(dev, intel_crtc->pipe); 5751 5752 DRM_DEBUG("flip queue: crtc already busy\n"); 5753 return -EBUSY; 5754 } 5755 intel_crtc->unpin_work = work; 5756 lockmgr(&dev->event_lock, LK_RELEASE); 5757 5758 intel_fb = to_intel_framebuffer(fb); 5759 obj = intel_fb->obj; 5760 5761 DRM_LOCK(dev); 5762 5763 /* Reference the objects for the scheduled work. */ 5764 drm_gem_object_reference(&work->old_fb_obj->base); 5765 drm_gem_object_reference(&obj->base); 5766 5767 crtc->fb = fb; 5768 5769 work->pending_flip_obj = obj; 5770 5771 work->enable_stall_check = true; 5772 5773 /* Block clients from rendering to the new back buffer until 5774 * the flip occurs and the object is no longer visible. 5775 */ 5776 atomic_set_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane); 5777 5778 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 5779 if (ret) 5780 goto cleanup_pending; 5781 intel_disable_fbc(dev); 5782 DRM_UNLOCK(dev); 5783 5784 return 0; 5785 5786 cleanup_pending: 5787 atomic_clear_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane); 5788 drm_gem_object_unreference(&work->old_fb_obj->base); 5789 drm_gem_object_unreference(&obj->base); 5790 DRM_UNLOCK(dev); 5791 5792 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5793 intel_crtc->unpin_work = NULL; 5794 lockmgr(&dev->event_lock, LK_RELEASE); 5795 5796 drm_vblank_put(dev, intel_crtc->pipe); 5797 free_work: 5798 drm_free(work, DRM_MEM_KMS); 5799 5800 return ret; 5801 } 5802 5803 static void intel_sanitize_modesetting(struct drm_device *dev, 5804 int pipe, int plane) 5805 { 5806 struct drm_i915_private *dev_priv = dev->dev_private; 5807 u32 reg, val; 5808 5809 /* Clear any frame start delays used for debugging left by the BIOS */ 5810 for_each_pipe(pipe) { 5811 reg = PIPECONF(pipe); 5812 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 5813 } 5814 5815 if (HAS_PCH_SPLIT(dev)) 5816 return; 5817 5818 /* Who knows what state these registers were left in by the BIOS or 5819 * grub? 5820 * 5821 * If we leave the registers in a conflicting state (e.g. with the 5822 * display plane reading from the other pipe than the one we intend 5823 * to use) then when we attempt to teardown the active mode, we will 5824 * not disable the pipes and planes in the correct order -- leaving 5825 * a plane reading from a disabled pipe and possibly leading to 5826 * undefined behaviour. 5827 */ 5828 5829 reg = DSPCNTR(plane); 5830 val = I915_READ(reg); 5831 5832 if ((val & DISPLAY_PLANE_ENABLE) == 0) 5833 return; 5834 if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) 5835 return; 5836 5837 /* This display plane is active and attached to the other CPU pipe. */ 5838 pipe = !pipe; 5839 5840 /* Disable the plane and wait for it to stop reading from the pipe. */ 5841 intel_disable_plane(dev_priv, plane, pipe); 5842 intel_disable_pipe(dev_priv, pipe); 5843 } 5844 5845 static void intel_crtc_reset(struct drm_crtc *crtc) 5846 { 5847 struct drm_device *dev = crtc->dev; 5848 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5849 5850 /* Reset flags back to the 'unknown' status so that they 5851 * will be correctly set on the initial modeset. 5852 */ 5853 intel_crtc->dpms_mode = -1; 5854 5855 /* We need to fix up any BIOS configuration that conflicts with 5856 * our expectations. 5857 */ 5858 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); 5859 } 5860 5861 static struct drm_crtc_helper_funcs intel_helper_funcs = { 5862 .dpms = intel_crtc_dpms, 5863 .mode_fixup = intel_crtc_mode_fixup, 5864 .mode_set = intel_crtc_mode_set, 5865 .mode_set_base = intel_pipe_set_base, 5866 .mode_set_base_atomic = intel_pipe_set_base_atomic, 5867 .load_lut = intel_crtc_load_lut, 5868 .disable = intel_crtc_disable, 5869 }; 5870 5871 static const struct drm_crtc_funcs intel_crtc_funcs = { 5872 .reset = intel_crtc_reset, 5873 .cursor_set = intel_crtc_cursor_set, 5874 .cursor_move = intel_crtc_cursor_move, 5875 .gamma_set = intel_crtc_gamma_set, 5876 .set_config = drm_crtc_helper_set_config, 5877 .destroy = intel_crtc_destroy, 5878 .page_flip = intel_crtc_page_flip, 5879 }; 5880 5881 static void intel_crtc_init(struct drm_device *dev, int pipe) 5882 { 5883 drm_i915_private_t *dev_priv = dev->dev_private; 5884 struct intel_crtc *intel_crtc; 5885 int i; 5886 5887 intel_crtc = kmalloc(sizeof(struct intel_crtc) + 5888 (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), 5889 DRM_MEM_KMS, M_WAITOK | M_ZERO); 5890 5891 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); 5892 5893 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 5894 for (i = 0; i < 256; i++) { 5895 intel_crtc->lut_r[i] = i; 5896 intel_crtc->lut_g[i] = i; 5897 intel_crtc->lut_b[i] = i; 5898 } 5899 5900 /* Swap pipes & planes for FBC on pre-965 */ 5901 intel_crtc->pipe = pipe; 5902 intel_crtc->plane = pipe; 5903 if (IS_MOBILE(dev) && IS_GEN3(dev)) { 5904 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 5905 intel_crtc->plane = !pipe; 5906 } 5907 5908 KASSERT(pipe < DRM_ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) && 5909 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] == NULL, 5910 ("plane_to_crtc is already initialized")); 5911 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 5912 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5913 5914 intel_crtc_reset(&intel_crtc->base); 5915 intel_crtc->active = true; /* force the pipe off on setup_init_config */ 5916 intel_crtc->bpp = 24; /* default for pre-Ironlake */ 5917 5918 if (HAS_PCH_SPLIT(dev)) { 5919 if (pipe == 2 && IS_IVYBRIDGE(dev)) 5920 intel_crtc->no_pll = true; 5921 intel_helper_funcs.prepare = ironlake_crtc_prepare; 5922 intel_helper_funcs.commit = ironlake_crtc_commit; 5923 } else { 5924 intel_helper_funcs.prepare = i9xx_crtc_prepare; 5925 intel_helper_funcs.commit = i9xx_crtc_commit; 5926 } 5927 5928 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 5929 5930 intel_crtc->busy = false; 5931 5932 callout_init_mp(&intel_crtc->idle_callout); 5933 } 5934 5935 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 5936 struct drm_file *file) 5937 { 5938 drm_i915_private_t *dev_priv = dev->dev_private; 5939 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 5940 struct drm_mode_object *drmmode_obj; 5941 struct intel_crtc *crtc; 5942 5943 if (!dev_priv) { 5944 DRM_ERROR("called with no initialization\n"); 5945 return -EINVAL; 5946 } 5947 5948 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, 5949 DRM_MODE_OBJECT_CRTC); 5950 5951 if (!drmmode_obj) { 5952 DRM_ERROR("no such CRTC id\n"); 5953 return -EINVAL; 5954 } 5955 5956 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); 5957 pipe_from_crtc_id->pipe = crtc->pipe; 5958 5959 return 0; 5960 } 5961 5962 static int intel_encoder_clones(struct drm_device *dev, int type_mask) 5963 { 5964 struct intel_encoder *encoder; 5965 int index_mask = 0; 5966 int entry = 0; 5967 5968 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 5969 if (type_mask & encoder->clone_mask) 5970 index_mask |= (1 << entry); 5971 entry++; 5972 } 5973 5974 return index_mask; 5975 } 5976 5977 static bool has_edp_a(struct drm_device *dev) 5978 { 5979 struct drm_i915_private *dev_priv = dev->dev_private; 5980 5981 if (!IS_MOBILE(dev)) 5982 return false; 5983 5984 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 5985 return false; 5986 5987 if (IS_GEN5(dev) && 5988 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) 5989 return false; 5990 5991 return true; 5992 } 5993 5994 static void intel_setup_outputs(struct drm_device *dev) 5995 { 5996 struct drm_i915_private *dev_priv = dev->dev_private; 5997 struct intel_encoder *encoder; 5998 bool dpd_is_edp = false; 5999 bool has_lvds; 6000 6001 has_lvds = intel_lvds_init(dev); 6002 if (!has_lvds && !HAS_PCH_SPLIT(dev)) { 6003 /* disable the panel fitter on everything but LVDS */ 6004 I915_WRITE(PFIT_CONTROL, 0); 6005 } 6006 6007 if (HAS_PCH_SPLIT(dev)) { 6008 dpd_is_edp = intel_dpd_is_edp(dev); 6009 6010 if (has_edp_a(dev)) 6011 intel_dp_init(dev, DP_A); 6012 6013 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 6014 intel_dp_init(dev, PCH_DP_D); 6015 } 6016 6017 intel_crt_init(dev); 6018 6019 if (HAS_PCH_SPLIT(dev)) { 6020 int found; 6021 6022 DRM_DEBUG_KMS( 6023 "HDMIB %d PCH_DP_B %d HDMIC %d HDMID %d PCH_DP_C %d PCH_DP_D %d LVDS %d\n", 6024 (I915_READ(HDMIB) & PORT_DETECTED) != 0, 6025 (I915_READ(PCH_DP_B) & DP_DETECTED) != 0, 6026 (I915_READ(HDMIC) & PORT_DETECTED) != 0, 6027 (I915_READ(HDMID) & PORT_DETECTED) != 0, 6028 (I915_READ(PCH_DP_C) & DP_DETECTED) != 0, 6029 (I915_READ(PCH_DP_D) & DP_DETECTED) != 0, 6030 (I915_READ(PCH_LVDS) & LVDS_DETECTED) != 0); 6031 6032 if (I915_READ(HDMIB) & PORT_DETECTED) { 6033 /* PCH SDVOB multiplex with HDMIB */ 6034 found = intel_sdvo_init(dev, PCH_SDVOB); 6035 if (!found) 6036 intel_hdmi_init(dev, HDMIB); 6037 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 6038 intel_dp_init(dev, PCH_DP_B); 6039 } 6040 6041 if (I915_READ(HDMIC) & PORT_DETECTED) 6042 intel_hdmi_init(dev, HDMIC); 6043 6044 if (I915_READ(HDMID) & PORT_DETECTED) 6045 intel_hdmi_init(dev, HDMID); 6046 6047 if (I915_READ(PCH_DP_C) & DP_DETECTED) 6048 intel_dp_init(dev, PCH_DP_C); 6049 6050 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 6051 intel_dp_init(dev, PCH_DP_D); 6052 6053 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 6054 bool found = false; 6055 6056 if (I915_READ(SDVOB) & SDVO_DETECTED) { 6057 DRM_DEBUG_KMS("probing SDVOB\n"); 6058 found = intel_sdvo_init(dev, SDVOB); 6059 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 6060 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 6061 intel_hdmi_init(dev, SDVOB); 6062 } 6063 6064 if (!found && SUPPORTS_INTEGRATED_DP(dev)) { 6065 DRM_DEBUG_KMS("probing DP_B\n"); 6066 intel_dp_init(dev, DP_B); 6067 } 6068 } 6069 6070 /* Before G4X SDVOC doesn't have its own detect register */ 6071 6072 if (I915_READ(SDVOB) & SDVO_DETECTED) { 6073 DRM_DEBUG_KMS("probing SDVOC\n"); 6074 found = intel_sdvo_init(dev, SDVOC); 6075 } 6076 6077 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 6078 6079 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 6080 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 6081 intel_hdmi_init(dev, SDVOC); 6082 } 6083 if (SUPPORTS_INTEGRATED_DP(dev)) { 6084 DRM_DEBUG_KMS("probing DP_C\n"); 6085 intel_dp_init(dev, DP_C); 6086 } 6087 } 6088 6089 if (SUPPORTS_INTEGRATED_DP(dev) && 6090 (I915_READ(DP_D) & DP_DETECTED)) { 6091 DRM_DEBUG_KMS("probing DP_D\n"); 6092 intel_dp_init(dev, DP_D); 6093 } 6094 } else if (IS_GEN2(dev)) { 6095 #if 1 6096 KIB_NOTYET(); 6097 #else 6098 intel_dvo_init(dev); 6099 #endif 6100 } 6101 6102 if (SUPPORTS_TV(dev)) 6103 intel_tv_init(dev); 6104 6105 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 6106 encoder->base.possible_crtcs = encoder->crtc_mask; 6107 encoder->base.possible_clones = 6108 intel_encoder_clones(dev, encoder->clone_mask); 6109 } 6110 6111 /* disable all the possible outputs/crtcs before entering KMS mode */ 6112 drm_helper_disable_unused_functions(dev); 6113 6114 if (HAS_PCH_SPLIT(dev)) 6115 ironlake_init_pch_refclk(dev); 6116 } 6117 6118 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 6119 { 6120 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 6121 6122 drm_framebuffer_cleanup(fb); 6123 drm_gem_object_unreference_unlocked(&intel_fb->obj->base); 6124 6125 drm_free(intel_fb, DRM_MEM_KMS); 6126 } 6127 6128 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 6129 struct drm_file *file, 6130 unsigned int *handle) 6131 { 6132 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 6133 struct drm_i915_gem_object *obj = intel_fb->obj; 6134 6135 return drm_gem_handle_create(file, &obj->base, handle); 6136 } 6137 6138 static const struct drm_framebuffer_funcs intel_fb_funcs = { 6139 .destroy = intel_user_framebuffer_destroy, 6140 .create_handle = intel_user_framebuffer_create_handle, 6141 }; 6142 6143 int intel_framebuffer_init(struct drm_device *dev, 6144 struct intel_framebuffer *intel_fb, 6145 struct drm_mode_fb_cmd2 *mode_cmd, 6146 struct drm_i915_gem_object *obj) 6147 { 6148 int ret; 6149 6150 if (obj->tiling_mode == I915_TILING_Y) 6151 return -EINVAL; 6152 6153 if (mode_cmd->pitches[0] & 63) 6154 return -EINVAL; 6155 6156 switch (mode_cmd->pixel_format) { 6157 case DRM_FORMAT_RGB332: 6158 case DRM_FORMAT_RGB565: 6159 case DRM_FORMAT_XRGB8888: 6160 case DRM_FORMAT_XBGR8888: 6161 case DRM_FORMAT_ARGB8888: 6162 case DRM_FORMAT_XRGB2101010: 6163 case DRM_FORMAT_ARGB2101010: 6164 /* RGB formats are common across chipsets */ 6165 break; 6166 case DRM_FORMAT_YUYV: 6167 case DRM_FORMAT_UYVY: 6168 case DRM_FORMAT_YVYU: 6169 case DRM_FORMAT_VYUY: 6170 break; 6171 default: 6172 DRM_DEBUG_KMS("unsupported pixel format %u\n", 6173 mode_cmd->pixel_format); 6174 return -EINVAL; 6175 } 6176 6177 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 6178 if (ret) { 6179 DRM_ERROR("framebuffer init failed %d\n", ret); 6180 return ret; 6181 } 6182 6183 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 6184 intel_fb->obj = obj; 6185 return 0; 6186 } 6187 6188 static int 6189 intel_user_framebuffer_create(struct drm_device *dev, 6190 struct drm_file *filp, struct drm_mode_fb_cmd2 *mode_cmd, 6191 struct drm_framebuffer **res) 6192 { 6193 struct drm_i915_gem_object *obj; 6194 6195 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 6196 mode_cmd->handles[0])); 6197 if (&obj->base == NULL) 6198 return (-ENOENT); 6199 6200 return (intel_framebuffer_create(dev, mode_cmd, obj, res)); 6201 } 6202 6203 static const struct drm_mode_config_funcs intel_mode_funcs = { 6204 .fb_create = intel_user_framebuffer_create, 6205 .output_poll_changed = intel_fb_output_poll_changed, 6206 }; 6207 6208 /* Set up chip specific display functions */ 6209 static void intel_init_display(struct drm_device *dev) 6210 { 6211 struct drm_i915_private *dev_priv = dev->dev_private; 6212 6213 /* We always want a DPMS function */ 6214 if (HAS_PCH_SPLIT(dev)) { 6215 dev_priv->display.dpms = ironlake_crtc_dpms; 6216 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 6217 dev_priv->display.update_plane = ironlake_update_plane; 6218 } else { 6219 dev_priv->display.dpms = i9xx_crtc_dpms; 6220 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 6221 dev_priv->display.update_plane = i9xx_update_plane; 6222 } 6223 6224 if (I915_HAS_FBC(dev)) { 6225 if (HAS_PCH_SPLIT(dev)) { 6226 dev_priv->display.fbc_enabled = ironlake_fbc_enabled; 6227 dev_priv->display.enable_fbc = ironlake_enable_fbc; 6228 dev_priv->display.disable_fbc = ironlake_disable_fbc; 6229 } else if (IS_GM45(dev)) { 6230 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 6231 dev_priv->display.enable_fbc = g4x_enable_fbc; 6232 dev_priv->display.disable_fbc = g4x_disable_fbc; 6233 } else if (IS_CRESTLINE(dev)) { 6234 dev_priv->display.fbc_enabled = i8xx_fbc_enabled; 6235 dev_priv->display.enable_fbc = i8xx_enable_fbc; 6236 dev_priv->display.disable_fbc = i8xx_disable_fbc; 6237 } 6238 /* 855GM needs testing */ 6239 } 6240 6241 /* Returns the core display clock speed */ 6242 if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) 6243 dev_priv->display.get_display_clock_speed = 6244 i945_get_display_clock_speed; 6245 else if (IS_I915G(dev)) 6246 dev_priv->display.get_display_clock_speed = 6247 i915_get_display_clock_speed; 6248 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) 6249 dev_priv->display.get_display_clock_speed = 6250 i9xx_misc_get_display_clock_speed; 6251 else if (IS_I915GM(dev)) 6252 dev_priv->display.get_display_clock_speed = 6253 i915gm_get_display_clock_speed; 6254 else if (IS_I865G(dev)) 6255 dev_priv->display.get_display_clock_speed = 6256 i865_get_display_clock_speed; 6257 else if (IS_I85X(dev)) 6258 dev_priv->display.get_display_clock_speed = 6259 i855_get_display_clock_speed; 6260 else /* 852, 830 */ 6261 dev_priv->display.get_display_clock_speed = 6262 i830_get_display_clock_speed; 6263 6264 /* For FIFO watermark updates */ 6265 if (HAS_PCH_SPLIT(dev)) { 6266 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get; 6267 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put; 6268 6269 /* IVB configs may use multi-threaded forcewake */ 6270 if (IS_IVYBRIDGE(dev)) { 6271 u32 ecobus; 6272 6273 /* A small trick here - if the bios hasn't configured MT forcewake, 6274 * and if the device is in RC6, then force_wake_mt_get will not wake 6275 * the device and the ECOBUS read will return zero. Which will be 6276 * (correctly) interpreted by the test below as MT forcewake being 6277 * disabled. 6278 */ 6279 DRM_LOCK(dev); 6280 __gen6_gt_force_wake_mt_get(dev_priv); 6281 ecobus = I915_READ_NOTRACE(ECOBUS); 6282 __gen6_gt_force_wake_mt_put(dev_priv); 6283 DRM_UNLOCK(dev); 6284 6285 if (ecobus & FORCEWAKE_MT_ENABLE) { 6286 DRM_DEBUG_KMS("Using MT version of forcewake\n"); 6287 dev_priv->display.force_wake_get = 6288 __gen6_gt_force_wake_mt_get; 6289 dev_priv->display.force_wake_put = 6290 __gen6_gt_force_wake_mt_put; 6291 } 6292 } 6293 6294 if (HAS_PCH_IBX(dev)) 6295 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; 6296 else if (HAS_PCH_CPT(dev)) 6297 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; 6298 6299 if (IS_GEN5(dev)) { 6300 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) 6301 dev_priv->display.update_wm = ironlake_update_wm; 6302 else { 6303 DRM_DEBUG_KMS("Failed to get proper latency. " 6304 "Disable CxSR\n"); 6305 dev_priv->display.update_wm = NULL; 6306 } 6307 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 6308 dev_priv->display.init_clock_gating = ironlake_init_clock_gating; 6309 dev_priv->display.write_eld = ironlake_write_eld; 6310 } else if (IS_GEN6(dev)) { 6311 if (SNB_READ_WM0_LATENCY()) { 6312 dev_priv->display.update_wm = sandybridge_update_wm; 6313 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 6314 } else { 6315 DRM_DEBUG_KMS("Failed to read display plane latency. " 6316 "Disable CxSR\n"); 6317 dev_priv->display.update_wm = NULL; 6318 } 6319 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 6320 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 6321 dev_priv->display.write_eld = ironlake_write_eld; 6322 } else if (IS_IVYBRIDGE(dev)) { 6323 /* FIXME: detect B0+ stepping and use auto training */ 6324 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 6325 if (SNB_READ_WM0_LATENCY()) { 6326 dev_priv->display.update_wm = sandybridge_update_wm; 6327 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 6328 } else { 6329 DRM_DEBUG_KMS("Failed to read display plane latency. " 6330 "Disable CxSR\n"); 6331 dev_priv->display.update_wm = NULL; 6332 } 6333 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 6334 dev_priv->display.write_eld = ironlake_write_eld; 6335 } else 6336 dev_priv->display.update_wm = NULL; 6337 } else if (IS_PINEVIEW(dev)) { 6338 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), 6339 dev_priv->is_ddr3, 6340 dev_priv->fsb_freq, 6341 dev_priv->mem_freq)) { 6342 DRM_INFO("failed to find known CxSR latency " 6343 "(found ddr%s fsb freq %d, mem freq %d), " 6344 "disabling CxSR\n", 6345 (dev_priv->is_ddr3 == 1) ? "3" : "2", 6346 dev_priv->fsb_freq, dev_priv->mem_freq); 6347 /* Disable CxSR and never update its watermark again */ 6348 pineview_disable_cxsr(dev); 6349 dev_priv->display.update_wm = NULL; 6350 } else 6351 dev_priv->display.update_wm = pineview_update_wm; 6352 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 6353 } else if (IS_G4X(dev)) { 6354 dev_priv->display.write_eld = g4x_write_eld; 6355 dev_priv->display.update_wm = g4x_update_wm; 6356 dev_priv->display.init_clock_gating = g4x_init_clock_gating; 6357 } else if (IS_GEN4(dev)) { 6358 dev_priv->display.update_wm = i965_update_wm; 6359 if (IS_CRESTLINE(dev)) 6360 dev_priv->display.init_clock_gating = crestline_init_clock_gating; 6361 else if (IS_BROADWATER(dev)) 6362 dev_priv->display.init_clock_gating = broadwater_init_clock_gating; 6363 } else if (IS_GEN3(dev)) { 6364 dev_priv->display.update_wm = i9xx_update_wm; 6365 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 6366 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 6367 } else if (IS_I865G(dev)) { 6368 dev_priv->display.update_wm = i830_update_wm; 6369 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 6370 dev_priv->display.get_fifo_size = i830_get_fifo_size; 6371 } else if (IS_I85X(dev)) { 6372 dev_priv->display.update_wm = i9xx_update_wm; 6373 dev_priv->display.get_fifo_size = i85x_get_fifo_size; 6374 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 6375 } else { 6376 dev_priv->display.update_wm = i830_update_wm; 6377 dev_priv->display.init_clock_gating = i830_init_clock_gating; 6378 if (IS_845G(dev)) 6379 dev_priv->display.get_fifo_size = i845_get_fifo_size; 6380 else 6381 dev_priv->display.get_fifo_size = i830_get_fifo_size; 6382 } 6383 6384 /* Default just returns -ENODEV to indicate unsupported */ 6385 dev_priv->display.queue_flip = intel_default_queue_flip; 6386 6387 switch (INTEL_INFO(dev)->gen) { 6388 case 2: 6389 dev_priv->display.queue_flip = intel_gen2_queue_flip; 6390 break; 6391 6392 case 3: 6393 dev_priv->display.queue_flip = intel_gen3_queue_flip; 6394 break; 6395 6396 case 4: 6397 case 5: 6398 dev_priv->display.queue_flip = intel_gen4_queue_flip; 6399 break; 6400 6401 case 6: 6402 dev_priv->display.queue_flip = intel_gen6_queue_flip; 6403 break; 6404 case 7: 6405 dev_priv->display.queue_flip = intel_gen7_queue_flip; 6406 break; 6407 } 6408 } 6409 6410 /* 6411 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 6412 * resume, or other times. This quirk makes sure that's the case for 6413 * affected systems. 6414 */ 6415 static void quirk_pipea_force(struct drm_device *dev) 6416 { 6417 struct drm_i915_private *dev_priv = dev->dev_private; 6418 6419 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 6420 DRM_DEBUG("applying pipe a force quirk\n"); 6421 } 6422 6423 /* 6424 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 6425 */ 6426 static void quirk_ssc_force_disable(struct drm_device *dev) 6427 { 6428 struct drm_i915_private *dev_priv = dev->dev_private; 6429 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 6430 } 6431 6432 struct intel_quirk { 6433 int device; 6434 int subsystem_vendor; 6435 int subsystem_device; 6436 void (*hook)(struct drm_device *dev); 6437 }; 6438 6439 #define PCI_ANY_ID (~0u) 6440 6441 struct intel_quirk intel_quirks[] = { 6442 /* HP Mini needs pipe A force quirk (LP: #322104) */ 6443 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, 6444 6445 /* Thinkpad R31 needs pipe A force quirk */ 6446 { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, 6447 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 6448 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 6449 6450 /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ 6451 { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, 6452 /* ThinkPad X40 needs pipe A force quirk */ 6453 6454 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 6455 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 6456 6457 /* 855 & before need to leave pipe A & dpll A up */ 6458 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 6459 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 6460 6461 /* Lenovo U160 cannot use SSC on LVDS */ 6462 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 6463 6464 /* Sony Vaio Y cannot use SSC on LVDS */ 6465 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 6466 }; 6467 6468 static void intel_init_quirks(struct drm_device *dev) 6469 { 6470 struct intel_quirk *q; 6471 device_t d; 6472 int i; 6473 6474 d = dev->dev; 6475 for (i = 0; i < DRM_ARRAY_SIZE(intel_quirks); i++) { 6476 q = &intel_quirks[i]; 6477 if (pci_get_device(d) == q->device && 6478 (pci_get_subvendor(d) == q->subsystem_vendor || 6479 q->subsystem_vendor == PCI_ANY_ID) && 6480 (pci_get_subdevice(d) == q->subsystem_device || 6481 q->subsystem_device == PCI_ANY_ID)) 6482 q->hook(dev); 6483 } 6484 } 6485 6486 /* Disable the VGA plane that we never use */ 6487 static void i915_disable_vga(struct drm_device *dev) 6488 { 6489 struct drm_i915_private *dev_priv = dev->dev_private; 6490 u8 sr1; 6491 u32 vga_reg; 6492 6493 if (HAS_PCH_SPLIT(dev)) 6494 vga_reg = CPU_VGACNTRL; 6495 else 6496 vga_reg = VGACNTRL; 6497 6498 #if 0 6499 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 6500 #endif 6501 outb(VGA_SR_INDEX, 1); 6502 sr1 = inb(VGA_SR_DATA); 6503 outb(VGA_SR_DATA, sr1 | 1 << 5); 6504 #if 0 6505 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 6506 #endif 6507 DELAY(300); 6508 6509 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 6510 POSTING_READ(vga_reg); 6511 } 6512 6513 void intel_modeset_init(struct drm_device *dev) 6514 { 6515 struct drm_i915_private *dev_priv = dev->dev_private; 6516 int i, ret; 6517 6518 drm_mode_config_init(dev); 6519 6520 dev->mode_config.min_width = 0; 6521 dev->mode_config.min_height = 0; 6522 6523 dev->mode_config.preferred_depth = 24; 6524 dev->mode_config.prefer_shadow = 1; 6525 6526 dev->mode_config.funcs = __DECONST(struct drm_mode_config_funcs *, 6527 &intel_mode_funcs); 6528 6529 intel_init_quirks(dev); 6530 6531 intel_init_display(dev); 6532 6533 if (IS_GEN2(dev)) { 6534 dev->mode_config.max_width = 2048; 6535 dev->mode_config.max_height = 2048; 6536 } else if (IS_GEN3(dev)) { 6537 dev->mode_config.max_width = 4096; 6538 dev->mode_config.max_height = 4096; 6539 } else { 6540 dev->mode_config.max_width = 8192; 6541 dev->mode_config.max_height = 8192; 6542 } 6543 dev->mode_config.fb_base = dev->agp->base; 6544 6545 DRM_DEBUG_KMS("%d display pipe%s available.\n", 6546 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); 6547 6548 for (i = 0; i < dev_priv->num_pipe; i++) { 6549 intel_crtc_init(dev, i); 6550 ret = intel_plane_init(dev, i); 6551 if (ret) 6552 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); 6553 } 6554 6555 /* Just disable it once at startup */ 6556 i915_disable_vga(dev); 6557 intel_setup_outputs(dev); 6558 6559 intel_init_clock_gating(dev); 6560 6561 if (IS_IRONLAKE_M(dev)) { 6562 ironlake_enable_drps(dev); 6563 intel_init_emon(dev); 6564 } 6565 6566 if (IS_GEN6(dev)) { 6567 gen6_enable_rps(dev_priv); 6568 gen6_update_ring_freq(dev_priv); 6569 } 6570 6571 TASK_INIT(&dev_priv->idle_task, 0, intel_idle_update, dev_priv); 6572 callout_init_mp(&dev_priv->idle_callout); 6573 } 6574 6575 void intel_modeset_gem_init(struct drm_device *dev) 6576 { 6577 if (IS_IRONLAKE_M(dev)) 6578 ironlake_enable_rc6(dev); 6579 6580 intel_setup_overlay(dev); 6581 } 6582 6583 void intel_modeset_cleanup(struct drm_device *dev) 6584 { 6585 struct drm_i915_private *dev_priv = dev->dev_private; 6586 struct drm_crtc *crtc; 6587 struct intel_crtc *intel_crtc; 6588 6589 drm_kms_helper_poll_fini(dev); 6590 DRM_LOCK(dev); 6591 6592 #if 0 6593 intel_unregister_dsm_handler(); 6594 #endif 6595 6596 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 6597 /* Skip inactive CRTCs */ 6598 if (!crtc->fb) 6599 continue; 6600 6601 intel_crtc = to_intel_crtc(crtc); 6602 intel_increase_pllclock(crtc); 6603 } 6604 6605 intel_disable_fbc(dev); 6606 6607 if (IS_IRONLAKE_M(dev)) 6608 ironlake_disable_drps(dev); 6609 if (IS_GEN6(dev)) 6610 gen6_disable_rps(dev); 6611 6612 if (IS_IRONLAKE_M(dev)) 6613 ironlake_disable_rc6(dev); 6614 6615 /* Disable the irq before mode object teardown, for the irq might 6616 * enqueue unpin/hotplug work. */ 6617 drm_irq_uninstall(dev); 6618 DRM_UNLOCK(dev); 6619 6620 if (taskqueue_cancel(dev_priv->tq, &dev_priv->hotplug_task, NULL)) 6621 taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task); 6622 if (taskqueue_cancel(dev_priv->tq, &dev_priv->rps_task, NULL)) 6623 taskqueue_drain(dev_priv->tq, &dev_priv->rps_task); 6624 6625 /* Shut off idle work before the crtcs get freed. */ 6626 if (taskqueue_cancel(dev_priv->tq, &dev_priv->idle_task, NULL)) 6627 taskqueue_drain(dev_priv->tq, &dev_priv->idle_task); 6628 6629 drm_mode_config_cleanup(dev); 6630 } 6631 6632 /* 6633 * Return which encoder is currently attached for connector. 6634 */ 6635 struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 6636 { 6637 return &intel_attached_encoder(connector)->base; 6638 } 6639 6640 void intel_connector_attach_encoder(struct intel_connector *connector, 6641 struct intel_encoder *encoder) 6642 { 6643 connector->encoder = encoder; 6644 drm_mode_connector_attach_encoder(&connector->base, 6645 &encoder->base); 6646 } 6647 6648 /* 6649 * set vga decode state - true == enable VGA decode 6650 */ 6651 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 6652 { 6653 struct drm_i915_private *dev_priv; 6654 device_t bridge_dev; 6655 u16 gmch_ctrl; 6656 6657 dev_priv = dev->dev_private; 6658 bridge_dev = intel_gtt_get_bridge_device(); 6659 gmch_ctrl = pci_read_config(bridge_dev, INTEL_GMCH_CTRL, 2); 6660 if (state) 6661 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 6662 else 6663 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 6664 pci_write_config(bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl, 2); 6665 return (0); 6666 } 6667 6668 struct intel_display_error_state { 6669 struct intel_cursor_error_state { 6670 u32 control; 6671 u32 position; 6672 u32 base; 6673 u32 size; 6674 } cursor[2]; 6675 6676 struct intel_pipe_error_state { 6677 u32 conf; 6678 u32 source; 6679 6680 u32 htotal; 6681 u32 hblank; 6682 u32 hsync; 6683 u32 vtotal; 6684 u32 vblank; 6685 u32 vsync; 6686 } pipe[2]; 6687 6688 struct intel_plane_error_state { 6689 u32 control; 6690 u32 stride; 6691 u32 size; 6692 u32 pos; 6693 u32 addr; 6694 u32 surface; 6695 u32 tile_offset; 6696 } plane[2]; 6697 }; 6698 6699 struct intel_display_error_state * 6700 intel_display_capture_error_state(struct drm_device *dev) 6701 { 6702 drm_i915_private_t *dev_priv = dev->dev_private; 6703 struct intel_display_error_state *error; 6704 int i; 6705 6706 error = kmalloc(sizeof(*error), DRM_MEM_KMS, M_NOWAIT); 6707 if (error == NULL) 6708 return NULL; 6709 6710 for (i = 0; i < 2; i++) { 6711 error->cursor[i].control = I915_READ(CURCNTR(i)); 6712 error->cursor[i].position = I915_READ(CURPOS(i)); 6713 error->cursor[i].base = I915_READ(CURBASE(i)); 6714 6715 error->plane[i].control = I915_READ(DSPCNTR(i)); 6716 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 6717 error->plane[i].size = I915_READ(DSPSIZE(i)); 6718 error->plane[i].pos = I915_READ(DSPPOS(i)); 6719 error->plane[i].addr = I915_READ(DSPADDR(i)); 6720 if (INTEL_INFO(dev)->gen >= 4) { 6721 error->plane[i].surface = I915_READ(DSPSURF(i)); 6722 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 6723 } 6724 6725 error->pipe[i].conf = I915_READ(PIPECONF(i)); 6726 error->pipe[i].source = I915_READ(PIPESRC(i)); 6727 error->pipe[i].htotal = I915_READ(HTOTAL(i)); 6728 error->pipe[i].hblank = I915_READ(HBLANK(i)); 6729 error->pipe[i].hsync = I915_READ(HSYNC(i)); 6730 error->pipe[i].vtotal = I915_READ(VTOTAL(i)); 6731 error->pipe[i].vblank = I915_READ(VBLANK(i)); 6732 error->pipe[i].vsync = I915_READ(VSYNC(i)); 6733 } 6734 6735 return error; 6736 } 6737 6738 void 6739 intel_display_print_error_state(struct sbuf *m, 6740 struct drm_device *dev, 6741 struct intel_display_error_state *error) 6742 { 6743 int i; 6744 6745 for (i = 0; i < 2; i++) { 6746 sbuf_printf(m, "Pipe [%d]:\n", i); 6747 sbuf_printf(m, " CONF: %08x\n", error->pipe[i].conf); 6748 sbuf_printf(m, " SRC: %08x\n", error->pipe[i].source); 6749 sbuf_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); 6750 sbuf_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); 6751 sbuf_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); 6752 sbuf_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); 6753 sbuf_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); 6754 sbuf_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); 6755 6756 sbuf_printf(m, "Plane [%d]:\n", i); 6757 sbuf_printf(m, " CNTR: %08x\n", error->plane[i].control); 6758 sbuf_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 6759 sbuf_printf(m, " SIZE: %08x\n", error->plane[i].size); 6760 sbuf_printf(m, " POS: %08x\n", error->plane[i].pos); 6761 sbuf_printf(m, " ADDR: %08x\n", error->plane[i].addr); 6762 if (INTEL_INFO(dev)->gen >= 4) { 6763 sbuf_printf(m, " SURF: %08x\n", error->plane[i].surface); 6764 sbuf_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 6765 } 6766 6767 sbuf_printf(m, "Cursor [%d]:\n", i); 6768 sbuf_printf(m, " CNTR: %08x\n", error->cursor[i].control); 6769 sbuf_printf(m, " POS: %08x\n", error->cursor[i].position); 6770 sbuf_printf(m, " BASE: %08x\n", error->cursor[i].base); 6771 } 6772 } 6773