1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 * $FreeBSD: src/sys/dev/drm2/i915/intel_display.c,v 1.2 2012/05/24 19:13:54 dim Exp $ 27 */ 28 29 #include <ddb/ddb.h> 30 #include <sys/limits.h> 31 32 #include <drm/drmP.h> 33 #include <drm/drm_edid.h> 34 #include "intel_drv.h" 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include <drm/drm_dp_helper.h> 38 #include <drm/drm_crtc_helper.h> 39 40 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 41 42 bool intel_pipe_has_type(struct drm_crtc *crtc, int type); 43 static void intel_increase_pllclock(struct drm_crtc *crtc); 44 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 45 46 typedef struct { 47 /* given values */ 48 int n; 49 int m1, m2; 50 int p1, p2; 51 /* derived values */ 52 int dot; 53 int vco; 54 int m; 55 int p; 56 } intel_clock_t; 57 58 typedef struct { 59 int min, max; 60 } intel_range_t; 61 62 typedef struct { 63 int dot_limit; 64 int p2_slow, p2_fast; 65 } intel_p2_t; 66 67 #define INTEL_P2_NUM 2 68 typedef struct intel_limit intel_limit_t; 69 struct intel_limit { 70 intel_range_t dot, vco, n, m, m1, m2, p, p1; 71 intel_p2_t p2; 72 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 73 int, int, intel_clock_t *, intel_clock_t *); 74 }; 75 76 /* FDI */ 77 #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ 78 79 static bool 80 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 81 int target, int refclk, intel_clock_t *match_clock, 82 intel_clock_t *best_clock); 83 static bool 84 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 85 int target, int refclk, intel_clock_t *match_clock, 86 intel_clock_t *best_clock); 87 88 static bool 89 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 90 int target, int refclk, intel_clock_t *match_clock, 91 intel_clock_t *best_clock); 92 static bool 93 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, 94 int target, int refclk, intel_clock_t *match_clock, 95 intel_clock_t *best_clock); 96 97 static inline u32 /* units of 100MHz */ 98 intel_fdi_link_freq(struct drm_device *dev) 99 { 100 if (IS_GEN5(dev)) { 101 struct drm_i915_private *dev_priv = dev->dev_private; 102 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; 103 } else 104 return 27; 105 } 106 107 static const intel_limit_t intel_limits_i8xx_dvo = { 108 .dot = { .min = 25000, .max = 350000 }, 109 .vco = { .min = 930000, .max = 1400000 }, 110 .n = { .min = 3, .max = 16 }, 111 .m = { .min = 96, .max = 140 }, 112 .m1 = { .min = 18, .max = 26 }, 113 .m2 = { .min = 6, .max = 16 }, 114 .p = { .min = 4, .max = 128 }, 115 .p1 = { .min = 2, .max = 33 }, 116 .p2 = { .dot_limit = 165000, 117 .p2_slow = 4, .p2_fast = 2 }, 118 .find_pll = intel_find_best_PLL, 119 }; 120 121 static const intel_limit_t intel_limits_i8xx_lvds = { 122 .dot = { .min = 25000, .max = 350000 }, 123 .vco = { .min = 930000, .max = 1400000 }, 124 .n = { .min = 3, .max = 16 }, 125 .m = { .min = 96, .max = 140 }, 126 .m1 = { .min = 18, .max = 26 }, 127 .m2 = { .min = 6, .max = 16 }, 128 .p = { .min = 4, .max = 128 }, 129 .p1 = { .min = 1, .max = 6 }, 130 .p2 = { .dot_limit = 165000, 131 .p2_slow = 14, .p2_fast = 7 }, 132 .find_pll = intel_find_best_PLL, 133 }; 134 135 static const intel_limit_t intel_limits_i9xx_sdvo = { 136 .dot = { .min = 20000, .max = 400000 }, 137 .vco = { .min = 1400000, .max = 2800000 }, 138 .n = { .min = 1, .max = 6 }, 139 .m = { .min = 70, .max = 120 }, 140 .m1 = { .min = 10, .max = 22 }, 141 .m2 = { .min = 5, .max = 9 }, 142 .p = { .min = 5, .max = 80 }, 143 .p1 = { .min = 1, .max = 8 }, 144 .p2 = { .dot_limit = 200000, 145 .p2_slow = 10, .p2_fast = 5 }, 146 .find_pll = intel_find_best_PLL, 147 }; 148 149 static const intel_limit_t intel_limits_i9xx_lvds = { 150 .dot = { .min = 20000, .max = 400000 }, 151 .vco = { .min = 1400000, .max = 2800000 }, 152 .n = { .min = 1, .max = 6 }, 153 .m = { .min = 70, .max = 120 }, 154 .m1 = { .min = 10, .max = 22 }, 155 .m2 = { .min = 5, .max = 9 }, 156 .p = { .min = 7, .max = 98 }, 157 .p1 = { .min = 1, .max = 8 }, 158 .p2 = { .dot_limit = 112000, 159 .p2_slow = 14, .p2_fast = 7 }, 160 .find_pll = intel_find_best_PLL, 161 }; 162 163 164 static const intel_limit_t intel_limits_g4x_sdvo = { 165 .dot = { .min = 25000, .max = 270000 }, 166 .vco = { .min = 1750000, .max = 3500000}, 167 .n = { .min = 1, .max = 4 }, 168 .m = { .min = 104, .max = 138 }, 169 .m1 = { .min = 17, .max = 23 }, 170 .m2 = { .min = 5, .max = 11 }, 171 .p = { .min = 10, .max = 30 }, 172 .p1 = { .min = 1, .max = 3}, 173 .p2 = { .dot_limit = 270000, 174 .p2_slow = 10, 175 .p2_fast = 10 176 }, 177 .find_pll = intel_g4x_find_best_PLL, 178 }; 179 180 static const intel_limit_t intel_limits_g4x_hdmi = { 181 .dot = { .min = 22000, .max = 400000 }, 182 .vco = { .min = 1750000, .max = 3500000}, 183 .n = { .min = 1, .max = 4 }, 184 .m = { .min = 104, .max = 138 }, 185 .m1 = { .min = 16, .max = 23 }, 186 .m2 = { .min = 5, .max = 11 }, 187 .p = { .min = 5, .max = 80 }, 188 .p1 = { .min = 1, .max = 8}, 189 .p2 = { .dot_limit = 165000, 190 .p2_slow = 10, .p2_fast = 5 }, 191 .find_pll = intel_g4x_find_best_PLL, 192 }; 193 194 static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 195 .dot = { .min = 20000, .max = 115000 }, 196 .vco = { .min = 1750000, .max = 3500000 }, 197 .n = { .min = 1, .max = 3 }, 198 .m = { .min = 104, .max = 138 }, 199 .m1 = { .min = 17, .max = 23 }, 200 .m2 = { .min = 5, .max = 11 }, 201 .p = { .min = 28, .max = 112 }, 202 .p1 = { .min = 2, .max = 8 }, 203 .p2 = { .dot_limit = 0, 204 .p2_slow = 14, .p2_fast = 14 205 }, 206 .find_pll = intel_g4x_find_best_PLL, 207 }; 208 209 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 210 .dot = { .min = 80000, .max = 224000 }, 211 .vco = { .min = 1750000, .max = 3500000 }, 212 .n = { .min = 1, .max = 3 }, 213 .m = { .min = 104, .max = 138 }, 214 .m1 = { .min = 17, .max = 23 }, 215 .m2 = { .min = 5, .max = 11 }, 216 .p = { .min = 14, .max = 42 }, 217 .p1 = { .min = 2, .max = 6 }, 218 .p2 = { .dot_limit = 0, 219 .p2_slow = 7, .p2_fast = 7 220 }, 221 .find_pll = intel_g4x_find_best_PLL, 222 }; 223 224 static const intel_limit_t intel_limits_g4x_display_port = { 225 .dot = { .min = 161670, .max = 227000 }, 226 .vco = { .min = 1750000, .max = 3500000}, 227 .n = { .min = 1, .max = 2 }, 228 .m = { .min = 97, .max = 108 }, 229 .m1 = { .min = 0x10, .max = 0x12 }, 230 .m2 = { .min = 0x05, .max = 0x06 }, 231 .p = { .min = 10, .max = 20 }, 232 .p1 = { .min = 1, .max = 2}, 233 .p2 = { .dot_limit = 0, 234 .p2_slow = 10, .p2_fast = 10 }, 235 .find_pll = intel_find_pll_g4x_dp, 236 }; 237 238 static const intel_limit_t intel_limits_pineview_sdvo = { 239 .dot = { .min = 20000, .max = 400000}, 240 .vco = { .min = 1700000, .max = 3500000 }, 241 /* Pineview's Ncounter is a ring counter */ 242 .n = { .min = 3, .max = 6 }, 243 .m = { .min = 2, .max = 256 }, 244 /* Pineview only has one combined m divider, which we treat as m2. */ 245 .m1 = { .min = 0, .max = 0 }, 246 .m2 = { .min = 0, .max = 254 }, 247 .p = { .min = 5, .max = 80 }, 248 .p1 = { .min = 1, .max = 8 }, 249 .p2 = { .dot_limit = 200000, 250 .p2_slow = 10, .p2_fast = 5 }, 251 .find_pll = intel_find_best_PLL, 252 }; 253 254 static const intel_limit_t intel_limits_pineview_lvds = { 255 .dot = { .min = 20000, .max = 400000 }, 256 .vco = { .min = 1700000, .max = 3500000 }, 257 .n = { .min = 3, .max = 6 }, 258 .m = { .min = 2, .max = 256 }, 259 .m1 = { .min = 0, .max = 0 }, 260 .m2 = { .min = 0, .max = 254 }, 261 .p = { .min = 7, .max = 112 }, 262 .p1 = { .min = 1, .max = 8 }, 263 .p2 = { .dot_limit = 112000, 264 .p2_slow = 14, .p2_fast = 14 }, 265 .find_pll = intel_find_best_PLL, 266 }; 267 268 /* Ironlake / Sandybridge 269 * 270 * We calculate clock using (register_value + 2) for N/M1/M2, so here 271 * the range value for them is (actual_value - 2). 272 */ 273 static const intel_limit_t intel_limits_ironlake_dac = { 274 .dot = { .min = 25000, .max = 350000 }, 275 .vco = { .min = 1760000, .max = 3510000 }, 276 .n = { .min = 1, .max = 5 }, 277 .m = { .min = 79, .max = 127 }, 278 .m1 = { .min = 12, .max = 22 }, 279 .m2 = { .min = 5, .max = 9 }, 280 .p = { .min = 5, .max = 80 }, 281 .p1 = { .min = 1, .max = 8 }, 282 .p2 = { .dot_limit = 225000, 283 .p2_slow = 10, .p2_fast = 5 }, 284 .find_pll = intel_g4x_find_best_PLL, 285 }; 286 287 static const intel_limit_t intel_limits_ironlake_single_lvds = { 288 .dot = { .min = 25000, .max = 350000 }, 289 .vco = { .min = 1760000, .max = 3510000 }, 290 .n = { .min = 1, .max = 3 }, 291 .m = { .min = 79, .max = 118 }, 292 .m1 = { .min = 12, .max = 22 }, 293 .m2 = { .min = 5, .max = 9 }, 294 .p = { .min = 28, .max = 112 }, 295 .p1 = { .min = 2, .max = 8 }, 296 .p2 = { .dot_limit = 225000, 297 .p2_slow = 14, .p2_fast = 14 }, 298 .find_pll = intel_g4x_find_best_PLL, 299 }; 300 301 static const intel_limit_t intel_limits_ironlake_dual_lvds = { 302 .dot = { .min = 25000, .max = 350000 }, 303 .vco = { .min = 1760000, .max = 3510000 }, 304 .n = { .min = 1, .max = 3 }, 305 .m = { .min = 79, .max = 127 }, 306 .m1 = { .min = 12, .max = 22 }, 307 .m2 = { .min = 5, .max = 9 }, 308 .p = { .min = 14, .max = 56 }, 309 .p1 = { .min = 2, .max = 8 }, 310 .p2 = { .dot_limit = 225000, 311 .p2_slow = 7, .p2_fast = 7 }, 312 .find_pll = intel_g4x_find_best_PLL, 313 }; 314 315 /* LVDS 100mhz refclk limits. */ 316 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 317 .dot = { .min = 25000, .max = 350000 }, 318 .vco = { .min = 1760000, .max = 3510000 }, 319 .n = { .min = 1, .max = 2 }, 320 .m = { .min = 79, .max = 126 }, 321 .m1 = { .min = 12, .max = 22 }, 322 .m2 = { .min = 5, .max = 9 }, 323 .p = { .min = 28, .max = 112 }, 324 .p1 = { .min = 2, .max = 8 }, 325 .p2 = { .dot_limit = 225000, 326 .p2_slow = 14, .p2_fast = 14 }, 327 .find_pll = intel_g4x_find_best_PLL, 328 }; 329 330 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 331 .dot = { .min = 25000, .max = 350000 }, 332 .vco = { .min = 1760000, .max = 3510000 }, 333 .n = { .min = 1, .max = 3 }, 334 .m = { .min = 79, .max = 126 }, 335 .m1 = { .min = 12, .max = 22 }, 336 .m2 = { .min = 5, .max = 9 }, 337 .p = { .min = 14, .max = 42 }, 338 .p1 = { .min = 2, .max = 6 }, 339 .p2 = { .dot_limit = 225000, 340 .p2_slow = 7, .p2_fast = 7 }, 341 .find_pll = intel_g4x_find_best_PLL, 342 }; 343 344 static const intel_limit_t intel_limits_ironlake_display_port = { 345 .dot = { .min = 25000, .max = 350000 }, 346 .vco = { .min = 1760000, .max = 3510000}, 347 .n = { .min = 1, .max = 2 }, 348 .m = { .min = 81, .max = 90 }, 349 .m1 = { .min = 12, .max = 22 }, 350 .m2 = { .min = 5, .max = 9 }, 351 .p = { .min = 10, .max = 20 }, 352 .p1 = { .min = 1, .max = 2}, 353 .p2 = { .dot_limit = 0, 354 .p2_slow = 10, .p2_fast = 10 }, 355 .find_pll = intel_find_pll_ironlake_dp, 356 }; 357 358 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 359 int refclk) 360 { 361 struct drm_device *dev = crtc->dev; 362 struct drm_i915_private *dev_priv = dev->dev_private; 363 const intel_limit_t *limit; 364 365 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 366 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == 367 LVDS_CLKB_POWER_UP) { 368 /* LVDS dual channel */ 369 if (refclk == 100000) 370 limit = &intel_limits_ironlake_dual_lvds_100m; 371 else 372 limit = &intel_limits_ironlake_dual_lvds; 373 } else { 374 if (refclk == 100000) 375 limit = &intel_limits_ironlake_single_lvds_100m; 376 else 377 limit = &intel_limits_ironlake_single_lvds; 378 } 379 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 380 HAS_eDP) 381 limit = &intel_limits_ironlake_display_port; 382 else 383 limit = &intel_limits_ironlake_dac; 384 385 return limit; 386 } 387 388 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 389 { 390 struct drm_device *dev = crtc->dev; 391 struct drm_i915_private *dev_priv = dev->dev_private; 392 const intel_limit_t *limit; 393 394 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 395 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 396 LVDS_CLKB_POWER_UP) 397 /* LVDS with dual channel */ 398 limit = &intel_limits_g4x_dual_channel_lvds; 399 else 400 /* LVDS with dual channel */ 401 limit = &intel_limits_g4x_single_channel_lvds; 402 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || 403 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { 404 limit = &intel_limits_g4x_hdmi; 405 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { 406 limit = &intel_limits_g4x_sdvo; 407 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 408 limit = &intel_limits_g4x_display_port; 409 } else /* The option is for other outputs */ 410 limit = &intel_limits_i9xx_sdvo; 411 412 return limit; 413 } 414 415 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) 416 { 417 struct drm_device *dev = crtc->dev; 418 const intel_limit_t *limit; 419 420 if (HAS_PCH_SPLIT(dev)) 421 limit = intel_ironlake_limit(crtc, refclk); 422 else if (IS_G4X(dev)) { 423 limit = intel_g4x_limit(crtc); 424 } else if (IS_PINEVIEW(dev)) { 425 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 426 limit = &intel_limits_pineview_lvds; 427 else 428 limit = &intel_limits_pineview_sdvo; 429 } else if (!IS_GEN2(dev)) { 430 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 431 limit = &intel_limits_i9xx_lvds; 432 else 433 limit = &intel_limits_i9xx_sdvo; 434 } else { 435 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 436 limit = &intel_limits_i8xx_lvds; 437 else 438 limit = &intel_limits_i8xx_dvo; 439 } 440 return limit; 441 } 442 443 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 444 static void pineview_clock(int refclk, intel_clock_t *clock) 445 { 446 clock->m = clock->m2 + 2; 447 clock->p = clock->p1 * clock->p2; 448 clock->vco = refclk * clock->m / clock->n; 449 clock->dot = clock->vco / clock->p; 450 } 451 452 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) 453 { 454 if (IS_PINEVIEW(dev)) { 455 pineview_clock(refclk, clock); 456 return; 457 } 458 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 459 clock->p = clock->p1 * clock->p2; 460 clock->vco = refclk * clock->m / (clock->n + 2); 461 clock->dot = clock->vco / clock->p; 462 } 463 464 /** 465 * Returns whether any output on the specified pipe is of the specified type 466 */ 467 bool intel_pipe_has_type(struct drm_crtc *crtc, int type) 468 { 469 struct drm_device *dev = crtc->dev; 470 struct drm_mode_config *mode_config = &dev->mode_config; 471 struct intel_encoder *encoder; 472 473 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 474 if (encoder->base.crtc == crtc && encoder->type == type) 475 return true; 476 477 return false; 478 } 479 480 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 481 /** 482 * Returns whether the given set of divisors are valid for a given refclk with 483 * the given connectors. 484 */ 485 486 static bool intel_PLL_is_valid(struct drm_device *dev, 487 const intel_limit_t *limit, 488 const intel_clock_t *clock) 489 { 490 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 491 INTELPllInvalid("p1 out of range\n"); 492 if (clock->p < limit->p.min || limit->p.max < clock->p) 493 INTELPllInvalid("p out of range\n"); 494 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 495 INTELPllInvalid("m2 out of range\n"); 496 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 497 INTELPllInvalid("m1 out of range\n"); 498 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) 499 INTELPllInvalid("m1 <= m2\n"); 500 if (clock->m < limit->m.min || limit->m.max < clock->m) 501 INTELPllInvalid("m out of range\n"); 502 if (clock->n < limit->n.min || limit->n.max < clock->n) 503 INTELPllInvalid("n out of range\n"); 504 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 505 INTELPllInvalid("vco out of range\n"); 506 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 507 * connector, etc., rather than just a single range. 508 */ 509 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 510 INTELPllInvalid("dot out of range\n"); 511 512 return true; 513 } 514 515 static bool 516 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 517 int target, int refclk, intel_clock_t *match_clock, 518 intel_clock_t *best_clock) 519 520 { 521 struct drm_device *dev = crtc->dev; 522 struct drm_i915_private *dev_priv = dev->dev_private; 523 intel_clock_t clock; 524 int err = target; 525 526 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 527 (I915_READ(LVDS)) != 0) { 528 /* 529 * For LVDS, if the panel is on, just rely on its current 530 * settings for dual-channel. We haven't figured out how to 531 * reliably set up different single/dual channel state, if we 532 * even can. 533 */ 534 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 535 LVDS_CLKB_POWER_UP) 536 clock.p2 = limit->p2.p2_fast; 537 else 538 clock.p2 = limit->p2.p2_slow; 539 } else { 540 if (target < limit->p2.dot_limit) 541 clock.p2 = limit->p2.p2_slow; 542 else 543 clock.p2 = limit->p2.p2_fast; 544 } 545 546 memset(best_clock, 0, sizeof(*best_clock)); 547 548 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 549 clock.m1++) { 550 for (clock.m2 = limit->m2.min; 551 clock.m2 <= limit->m2.max; clock.m2++) { 552 /* m1 is always 0 in Pineview */ 553 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) 554 break; 555 for (clock.n = limit->n.min; 556 clock.n <= limit->n.max; clock.n++) { 557 for (clock.p1 = limit->p1.min; 558 clock.p1 <= limit->p1.max; clock.p1++) { 559 int this_err; 560 561 intel_clock(dev, refclk, &clock); 562 if (!intel_PLL_is_valid(dev, limit, 563 &clock)) 564 continue; 565 if (match_clock && 566 clock.p != match_clock->p) 567 continue; 568 569 this_err = abs(clock.dot - target); 570 if (this_err < err) { 571 *best_clock = clock; 572 err = this_err; 573 } 574 } 575 } 576 } 577 } 578 579 return (err != target); 580 } 581 582 static bool 583 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 584 int target, int refclk, intel_clock_t *match_clock, 585 intel_clock_t *best_clock) 586 { 587 struct drm_device *dev = crtc->dev; 588 struct drm_i915_private *dev_priv = dev->dev_private; 589 intel_clock_t clock; 590 int max_n; 591 bool found; 592 /* approximately equals target * 0.00585 */ 593 int err_most = (target >> 8) + (target >> 9); 594 found = false; 595 596 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 597 int lvds_reg; 598 599 if (HAS_PCH_SPLIT(dev)) 600 lvds_reg = PCH_LVDS; 601 else 602 lvds_reg = LVDS; 603 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == 604 LVDS_CLKB_POWER_UP) 605 clock.p2 = limit->p2.p2_fast; 606 else 607 clock.p2 = limit->p2.p2_slow; 608 } else { 609 if (target < limit->p2.dot_limit) 610 clock.p2 = limit->p2.p2_slow; 611 else 612 clock.p2 = limit->p2.p2_fast; 613 } 614 615 memset(best_clock, 0, sizeof(*best_clock)); 616 max_n = limit->n.max; 617 /* based on hardware requirement, prefer smaller n to precision */ 618 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 619 /* based on hardware requirement, prefere larger m1,m2 */ 620 for (clock.m1 = limit->m1.max; 621 clock.m1 >= limit->m1.min; clock.m1--) { 622 for (clock.m2 = limit->m2.max; 623 clock.m2 >= limit->m2.min; clock.m2--) { 624 for (clock.p1 = limit->p1.max; 625 clock.p1 >= limit->p1.min; clock.p1--) { 626 int this_err; 627 628 intel_clock(dev, refclk, &clock); 629 if (!intel_PLL_is_valid(dev, limit, 630 &clock)) 631 continue; 632 if (match_clock && 633 clock.p != match_clock->p) 634 continue; 635 636 this_err = abs(clock.dot - target); 637 if (this_err < err_most) { 638 *best_clock = clock; 639 err_most = this_err; 640 max_n = clock.n; 641 found = true; 642 } 643 } 644 } 645 } 646 } 647 return found; 648 } 649 650 static bool 651 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 652 int target, int refclk, intel_clock_t *match_clock, 653 intel_clock_t *best_clock) 654 { 655 struct drm_device *dev = crtc->dev; 656 intel_clock_t clock; 657 658 if (target < 200000) { 659 clock.n = 1; 660 clock.p1 = 2; 661 clock.p2 = 10; 662 clock.m1 = 12; 663 clock.m2 = 9; 664 } else { 665 clock.n = 2; 666 clock.p1 = 1; 667 clock.p2 = 10; 668 clock.m1 = 14; 669 clock.m2 = 8; 670 } 671 intel_clock(dev, refclk, &clock); 672 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 673 return true; 674 } 675 676 /* DisplayPort has only two frequencies, 162MHz and 270MHz */ 677 static bool 678 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 679 int target, int refclk, intel_clock_t *match_clock, 680 intel_clock_t *best_clock) 681 { 682 intel_clock_t clock; 683 if (target < 200000) { 684 clock.p1 = 2; 685 clock.p2 = 10; 686 clock.n = 2; 687 clock.m1 = 23; 688 clock.m2 = 8; 689 } else { 690 clock.p1 = 1; 691 clock.p2 = 10; 692 clock.n = 1; 693 clock.m1 = 14; 694 clock.m2 = 2; 695 } 696 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); 697 clock.p = (clock.p1 * clock.p2); 698 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; 699 clock.vco = 0; 700 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 701 return true; 702 } 703 704 /** 705 * intel_wait_for_vblank - wait for vblank on a given pipe 706 * @dev: drm device 707 * @pipe: pipe to wait for 708 * 709 * Wait for vblank to occur on a given pipe. Needed for various bits of 710 * mode setting code. 711 */ 712 void intel_wait_for_vblank(struct drm_device *dev, int pipe) 713 { 714 struct drm_i915_private *dev_priv = dev->dev_private; 715 int pipestat_reg = PIPESTAT(pipe); 716 717 /* Clear existing vblank status. Note this will clear any other 718 * sticky status fields as well. 719 * 720 * This races with i915_driver_irq_handler() with the result 721 * that either function could miss a vblank event. Here it is not 722 * fatal, as we will either wait upon the next vblank interrupt or 723 * timeout. Generally speaking intel_wait_for_vblank() is only 724 * called during modeset at which time the GPU should be idle and 725 * should *not* be performing page flips and thus not waiting on 726 * vblanks... 727 * Currently, the result of us stealing a vblank from the irq 728 * handler is that a single frame will be skipped during swapbuffers. 729 */ 730 I915_WRITE(pipestat_reg, 731 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); 732 733 /* Wait for vblank interrupt bit to set */ 734 if (_intel_wait_for(dev, 735 I915_READ(pipestat_reg) & PIPE_VBLANK_INTERRUPT_STATUS, 736 50, 1, "915vbl")) 737 DRM_DEBUG_KMS("vblank wait timed out\n"); 738 } 739 740 /* 741 * intel_wait_for_pipe_off - wait for pipe to turn off 742 * @dev: drm device 743 * @pipe: pipe to wait for 744 * 745 * After disabling a pipe, we can't wait for vblank in the usual way, 746 * spinning on the vblank interrupt status bit, since we won't actually 747 * see an interrupt when the pipe is disabled. 748 * 749 * On Gen4 and above: 750 * wait for the pipe register state bit to turn off 751 * 752 * Otherwise: 753 * wait for the display line value to settle (it usually 754 * ends up stopping at the start of the next frame). 755 * 756 */ 757 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 758 { 759 struct drm_i915_private *dev_priv = dev->dev_private; 760 761 if (INTEL_INFO(dev)->gen >= 4) { 762 int reg = PIPECONF(pipe); 763 764 /* Wait for the Pipe State to go off */ 765 if (_intel_wait_for(dev, 766 (I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100, 767 1, "915pip")) 768 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 769 } else { 770 u32 last_line, line_mask; 771 int reg = PIPEDSL(pipe); 772 unsigned long timeout = jiffies + msecs_to_jiffies(100); 773 774 if (IS_GEN2(dev)) 775 line_mask = DSL_LINEMASK_GEN2; 776 else 777 line_mask = DSL_LINEMASK_GEN3; 778 779 /* Wait for the display line to settle */ 780 do { 781 last_line = I915_READ(reg) & line_mask; 782 DELAY(5000); 783 } while (((I915_READ(reg) & line_mask) != last_line) && 784 time_after(timeout, jiffies)); 785 if (time_after(jiffies, timeout)) 786 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 787 } 788 } 789 790 static const char *state_string(bool enabled) 791 { 792 return enabled ? "on" : "off"; 793 } 794 795 /* Only for pre-ILK configs */ 796 static void assert_pll(struct drm_i915_private *dev_priv, 797 enum i915_pipe pipe, bool state) 798 { 799 int reg; 800 u32 val; 801 bool cur_state; 802 803 reg = DPLL(pipe); 804 val = I915_READ(reg); 805 cur_state = !!(val & DPLL_VCO_ENABLE); 806 if (cur_state != state) 807 kprintf("PLL state assertion failure (expected %s, current %s)\n", 808 state_string(state), state_string(cur_state)); 809 } 810 #define assert_pll_enabled(d, p) assert_pll(d, p, true) 811 #define assert_pll_disabled(d, p) assert_pll(d, p, false) 812 813 /* For ILK+ */ 814 static void assert_pch_pll(struct drm_i915_private *dev_priv, 815 enum i915_pipe pipe, bool state) 816 { 817 int reg; 818 u32 val; 819 bool cur_state; 820 821 if (HAS_PCH_CPT(dev_priv->dev)) { 822 u32 pch_dpll; 823 824 pch_dpll = I915_READ(PCH_DPLL_SEL); 825 826 /* Make sure the selected PLL is enabled to the transcoder */ 827 KASSERT(((pch_dpll >> (4 * pipe)) & 8) != 0, 828 ("transcoder %d PLL not enabled\n", pipe)); 829 830 /* Convert the transcoder pipe number to a pll pipe number */ 831 pipe = (pch_dpll >> (4 * pipe)) & 1; 832 } 833 834 reg = _PCH_DPLL(pipe); 835 val = I915_READ(reg); 836 cur_state = !!(val & DPLL_VCO_ENABLE); 837 if (cur_state != state) 838 kprintf("PCH PLL state assertion failure (expected %s, current %s)\n", 839 state_string(state), state_string(cur_state)); 840 } 841 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) 842 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) 843 844 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 845 enum i915_pipe pipe, bool state) 846 { 847 int reg; 848 u32 val; 849 bool cur_state; 850 851 reg = FDI_TX_CTL(pipe); 852 val = I915_READ(reg); 853 cur_state = !!(val & FDI_TX_ENABLE); 854 if (cur_state != state) 855 kprintf("FDI TX state assertion failure (expected %s, current %s)\n", 856 state_string(state), state_string(cur_state)); 857 } 858 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 859 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 860 861 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 862 enum i915_pipe pipe, bool state) 863 { 864 int reg; 865 u32 val; 866 bool cur_state; 867 868 reg = FDI_RX_CTL(pipe); 869 val = I915_READ(reg); 870 cur_state = !!(val & FDI_RX_ENABLE); 871 if (cur_state != state) 872 kprintf("FDI RX state assertion failure (expected %s, current %s)\n", 873 state_string(state), state_string(cur_state)); 874 } 875 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 876 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 877 878 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 879 enum i915_pipe pipe) 880 { 881 int reg; 882 u32 val; 883 884 /* ILK FDI PLL is always enabled */ 885 if (dev_priv->info->gen == 5) 886 return; 887 888 reg = FDI_TX_CTL(pipe); 889 val = I915_READ(reg); 890 if (!(val & FDI_TX_PLL_ENABLE)) 891 kprintf("FDI TX PLL assertion failure, should be active but is disabled\n"); 892 } 893 894 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, 895 enum i915_pipe pipe) 896 { 897 int reg; 898 u32 val; 899 900 reg = FDI_RX_CTL(pipe); 901 val = I915_READ(reg); 902 if (!(val & FDI_RX_PLL_ENABLE)) 903 kprintf("FDI RX PLL assertion failure, should be active but is disabled\n"); 904 } 905 906 static void assert_panel_unlocked(struct drm_i915_private *dev_priv, 907 enum i915_pipe pipe) 908 { 909 int pp_reg, lvds_reg; 910 u32 val; 911 enum i915_pipe panel_pipe = PIPE_A; 912 bool locked = true; 913 914 if (HAS_PCH_SPLIT(dev_priv->dev)) { 915 pp_reg = PCH_PP_CONTROL; 916 lvds_reg = PCH_LVDS; 917 } else { 918 pp_reg = PP_CONTROL; 919 lvds_reg = LVDS; 920 } 921 922 val = I915_READ(pp_reg); 923 if (!(val & PANEL_POWER_ON) || 924 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) 925 locked = false; 926 927 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) 928 panel_pipe = PIPE_B; 929 930 if (panel_pipe == pipe && locked) 931 kprintf("panel assertion failure, pipe %c regs locked\n", 932 pipe_name(pipe)); 933 } 934 935 void assert_pipe(struct drm_i915_private *dev_priv, 936 enum i915_pipe pipe, bool state) 937 { 938 int reg; 939 u32 val; 940 bool cur_state; 941 942 /* if we need the pipe A quirk it must be always on */ 943 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 944 state = true; 945 946 reg = PIPECONF(pipe); 947 val = I915_READ(reg); 948 cur_state = !!(val & PIPECONF_ENABLE); 949 if (cur_state != state) 950 kprintf("pipe %c assertion failure (expected %s, current %s)\n", 951 pipe_name(pipe), state_string(state), state_string(cur_state)); 952 } 953 954 static void assert_plane(struct drm_i915_private *dev_priv, 955 enum plane plane, bool state) 956 { 957 int reg; 958 u32 val; 959 bool cur_state; 960 961 reg = DSPCNTR(plane); 962 val = I915_READ(reg); 963 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 964 if (cur_state != state) 965 kprintf("plane %c assertion failure, (expected %s, current %s)\n", 966 plane_name(plane), state_string(state), state_string(cur_state)); 967 } 968 969 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 970 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 971 972 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 973 enum i915_pipe pipe) 974 { 975 int reg, i; 976 u32 val; 977 int cur_pipe; 978 979 /* Planes are fixed to pipes on ILK+ */ 980 if (HAS_PCH_SPLIT(dev_priv->dev)) { 981 reg = DSPCNTR(pipe); 982 val = I915_READ(reg); 983 if ((val & DISPLAY_PLANE_ENABLE) != 0) 984 kprintf("plane %c assertion failure, should be disabled but not\n", 985 plane_name(pipe)); 986 return; 987 } 988 989 /* Need to check both planes against the pipe */ 990 for (i = 0; i < 2; i++) { 991 reg = DSPCNTR(i); 992 val = I915_READ(reg); 993 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 994 DISPPLANE_SEL_PIPE_SHIFT; 995 if ((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe) 996 kprintf("plane %c assertion failure, should be off on pipe %c but is still active\n", 997 plane_name(i), pipe_name(pipe)); 998 } 999 } 1000 1001 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1002 { 1003 u32 val; 1004 bool enabled; 1005 1006 val = I915_READ(PCH_DREF_CONTROL); 1007 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1008 DREF_SUPERSPREAD_SOURCE_MASK)); 1009 if (!enabled) 1010 kprintf("PCH refclk assertion failure, should be active but is disabled\n"); 1011 } 1012 1013 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, 1014 enum i915_pipe pipe) 1015 { 1016 int reg; 1017 u32 val; 1018 bool enabled; 1019 1020 reg = TRANSCONF(pipe); 1021 val = I915_READ(reg); 1022 enabled = !!(val & TRANS_ENABLE); 1023 if (enabled) 1024 kprintf("transcoder assertion failed, should be off on pipe %c but is still active\n", 1025 pipe_name(pipe)); 1026 } 1027 1028 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1029 enum i915_pipe pipe, u32 val) 1030 { 1031 if ((val & PORT_ENABLE) == 0) 1032 return false; 1033 1034 if (HAS_PCH_CPT(dev_priv->dev)) { 1035 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1036 return false; 1037 } else { 1038 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) 1039 return false; 1040 } 1041 return true; 1042 } 1043 1044 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1045 enum i915_pipe pipe, u32 val) 1046 { 1047 if ((val & LVDS_PORT_EN) == 0) 1048 return false; 1049 1050 if (HAS_PCH_CPT(dev_priv->dev)) { 1051 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1052 return false; 1053 } else { 1054 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1055 return false; 1056 } 1057 return true; 1058 } 1059 1060 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1061 enum i915_pipe pipe, u32 val) 1062 { 1063 if ((val & ADPA_DAC_ENABLE) == 0) 1064 return false; 1065 if (HAS_PCH_CPT(dev_priv->dev)) { 1066 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1067 return false; 1068 } else { 1069 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1070 return false; 1071 } 1072 return true; 1073 } 1074 1075 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1076 enum i915_pipe pipe, u32 port_sel, u32 val) 1077 { 1078 if ((val & DP_PORT_EN) == 0) 1079 return false; 1080 1081 if (HAS_PCH_CPT(dev_priv->dev)) { 1082 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); 1083 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); 1084 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1085 return false; 1086 } else { 1087 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1088 return false; 1089 } 1090 return true; 1091 } 1092 1093 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1094 enum i915_pipe pipe, int reg, u32 port_sel) 1095 { 1096 u32 val = I915_READ(reg); 1097 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) 1098 kprintf("PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1099 reg, pipe_name(pipe)); 1100 } 1101 1102 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1103 enum i915_pipe pipe, int reg) 1104 { 1105 u32 val = I915_READ(reg); 1106 if (hdmi_pipe_enabled(dev_priv, val, pipe)) 1107 kprintf("PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1108 reg, pipe_name(pipe)); 1109 } 1110 1111 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1112 enum i915_pipe pipe) 1113 { 1114 int reg; 1115 u32 val; 1116 1117 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1118 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1119 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1120 1121 reg = PCH_ADPA; 1122 val = I915_READ(reg); 1123 if (adpa_pipe_enabled(dev_priv, val, pipe)) 1124 kprintf("PCH VGA enabled on transcoder %c, should be disabled\n", 1125 pipe_name(pipe)); 1126 1127 reg = PCH_LVDS; 1128 val = I915_READ(reg); 1129 if (lvds_pipe_enabled(dev_priv, val, pipe)) 1130 kprintf("PCH LVDS enabled on transcoder %c, should be disabled\n", 1131 pipe_name(pipe)); 1132 1133 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); 1134 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); 1135 assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); 1136 } 1137 1138 /** 1139 * intel_enable_pll - enable a PLL 1140 * @dev_priv: i915 private structure 1141 * @pipe: pipe PLL to enable 1142 * 1143 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to 1144 * make sure the PLL reg is writable first though, since the panel write 1145 * protect mechanism may be enabled. 1146 * 1147 * Note! This is for pre-ILK only. 1148 */ 1149 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1150 { 1151 int reg; 1152 u32 val; 1153 1154 /* No really, not for ILK+ */ 1155 KASSERT(dev_priv->info->gen < 5, ("Wrong device gen")); 1156 1157 /* PLL is protected by panel, make sure we can write it */ 1158 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) 1159 assert_panel_unlocked(dev_priv, pipe); 1160 1161 reg = DPLL(pipe); 1162 val = I915_READ(reg); 1163 val |= DPLL_VCO_ENABLE; 1164 1165 /* We do this three times for luck */ 1166 I915_WRITE(reg, val); 1167 POSTING_READ(reg); 1168 DELAY(150); /* wait for warmup */ 1169 I915_WRITE(reg, val); 1170 POSTING_READ(reg); 1171 DELAY(150); /* wait for warmup */ 1172 I915_WRITE(reg, val); 1173 POSTING_READ(reg); 1174 DELAY(150); /* wait for warmup */ 1175 } 1176 1177 /** 1178 * intel_disable_pll - disable a PLL 1179 * @dev_priv: i915 private structure 1180 * @pipe: pipe PLL to disable 1181 * 1182 * Disable the PLL for @pipe, making sure the pipe is off first. 1183 * 1184 * Note! This is for pre-ILK only. 1185 */ 1186 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1187 { 1188 int reg; 1189 u32 val; 1190 1191 /* Don't disable pipe A or pipe A PLLs if needed */ 1192 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1193 return; 1194 1195 /* Make sure the pipe isn't still relying on us */ 1196 assert_pipe_disabled(dev_priv, pipe); 1197 1198 reg = DPLL(pipe); 1199 val = I915_READ(reg); 1200 val &= ~DPLL_VCO_ENABLE; 1201 I915_WRITE(reg, val); 1202 POSTING_READ(reg); 1203 } 1204 1205 /** 1206 * intel_enable_pch_pll - enable PCH PLL 1207 * @dev_priv: i915 private structure 1208 * @pipe: pipe PLL to enable 1209 * 1210 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1211 * drives the transcoder clock. 1212 */ 1213 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, 1214 enum i915_pipe pipe) 1215 { 1216 int reg; 1217 u32 val; 1218 1219 if (pipe > 1) 1220 return; 1221 1222 /* PCH only available on ILK+ */ 1223 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen")); 1224 1225 /* PCH refclock must be enabled first */ 1226 assert_pch_refclk_enabled(dev_priv); 1227 1228 reg = _PCH_DPLL(pipe); 1229 val = I915_READ(reg); 1230 val |= DPLL_VCO_ENABLE; 1231 I915_WRITE(reg, val); 1232 POSTING_READ(reg); 1233 DELAY(200); 1234 } 1235 1236 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, 1237 enum i915_pipe pipe) 1238 { 1239 int reg; 1240 u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL, 1241 pll_sel = TRANSC_DPLL_ENABLE; 1242 1243 if (pipe > 1) 1244 return; 1245 1246 /* PCH only available on ILK+ */ 1247 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen")); 1248 1249 /* Make sure transcoder isn't still depending on us */ 1250 assert_transcoder_disabled(dev_priv, pipe); 1251 1252 if (pipe == 0) 1253 pll_sel |= TRANSC_DPLLA_SEL; 1254 else if (pipe == 1) 1255 pll_sel |= TRANSC_DPLLB_SEL; 1256 1257 1258 if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel) 1259 return; 1260 1261 reg = _PCH_DPLL(pipe); 1262 val = I915_READ(reg); 1263 val &= ~DPLL_VCO_ENABLE; 1264 I915_WRITE(reg, val); 1265 POSTING_READ(reg); 1266 DELAY(200); 1267 } 1268 1269 static void intel_enable_transcoder(struct drm_i915_private *dev_priv, 1270 enum i915_pipe pipe) 1271 { 1272 int reg; 1273 u32 val, pipeconf_val; 1274 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1275 1276 /* PCH only available on ILK+ */ 1277 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen")); 1278 1279 /* Make sure PCH DPLL is enabled */ 1280 assert_pch_pll_enabled(dev_priv, pipe); 1281 1282 /* FDI must be feeding us bits for PCH ports */ 1283 assert_fdi_tx_enabled(dev_priv, pipe); 1284 assert_fdi_rx_enabled(dev_priv, pipe); 1285 1286 1287 reg = TRANSCONF(pipe); 1288 val = I915_READ(reg); 1289 pipeconf_val = I915_READ(PIPECONF(pipe)); 1290 1291 if (HAS_PCH_IBX(dev_priv->dev)) { 1292 /* 1293 * make the BPC in transcoder be consistent with 1294 * that in pipeconf reg. 1295 */ 1296 val &= ~PIPE_BPC_MASK; 1297 val |= pipeconf_val & PIPE_BPC_MASK; 1298 } 1299 1300 val &= ~TRANS_INTERLACE_MASK; 1301 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1302 if (HAS_PCH_IBX(dev_priv->dev) && 1303 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) 1304 val |= TRANS_LEGACY_INTERLACED_ILK; 1305 else 1306 val |= TRANS_INTERLACED; 1307 else 1308 val |= TRANS_PROGRESSIVE; 1309 1310 I915_WRITE(reg, val | TRANS_ENABLE); 1311 if (_intel_wait_for(dev_priv->dev, I915_READ(reg) & TRANS_STATE_ENABLE, 1312 100, 1, "915trc")) 1313 DRM_ERROR("failed to enable transcoder %d\n", pipe); 1314 } 1315 1316 static void intel_disable_transcoder(struct drm_i915_private *dev_priv, 1317 enum i915_pipe pipe) 1318 { 1319 int reg; 1320 u32 val; 1321 1322 /* FDI relies on the transcoder */ 1323 assert_fdi_tx_disabled(dev_priv, pipe); 1324 assert_fdi_rx_disabled(dev_priv, pipe); 1325 1326 /* Ports must be off as well */ 1327 assert_pch_ports_disabled(dev_priv, pipe); 1328 1329 reg = TRANSCONF(pipe); 1330 val = I915_READ(reg); 1331 val &= ~TRANS_ENABLE; 1332 I915_WRITE(reg, val); 1333 /* wait for PCH transcoder off, transcoder state */ 1334 if (_intel_wait_for(dev_priv->dev, 1335 (I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50, 1336 1, "915trd")) 1337 DRM_ERROR("failed to disable transcoder %d\n", pipe); 1338 } 1339 1340 /** 1341 * intel_enable_pipe - enable a pipe, asserting requirements 1342 * @dev_priv: i915 private structure 1343 * @pipe: pipe to enable 1344 * @pch_port: on ILK+, is this pipe driving a PCH port or not 1345 * 1346 * Enable @pipe, making sure that various hardware specific requirements 1347 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 1348 * 1349 * @pipe should be %PIPE_A or %PIPE_B. 1350 * 1351 * Will wait until the pipe is actually running (i.e. first vblank) before 1352 * returning. 1353 */ 1354 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 1355 bool pch_port) 1356 { 1357 int reg; 1358 u32 val; 1359 1360 /* 1361 * A pipe without a PLL won't actually be able to drive bits from 1362 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1363 * need the check. 1364 */ 1365 if (!HAS_PCH_SPLIT(dev_priv->dev)) 1366 assert_pll_enabled(dev_priv, pipe); 1367 else { 1368 if (pch_port) { 1369 /* if driving the PCH, we need FDI enabled */ 1370 assert_fdi_rx_pll_enabled(dev_priv, pipe); 1371 assert_fdi_tx_pll_enabled(dev_priv, pipe); 1372 } 1373 /* FIXME: assert CPU port conditions for SNB+ */ 1374 } 1375 1376 reg = PIPECONF(pipe); 1377 val = I915_READ(reg); 1378 if (val & PIPECONF_ENABLE) 1379 return; 1380 1381 I915_WRITE(reg, val | PIPECONF_ENABLE); 1382 intel_wait_for_vblank(dev_priv->dev, pipe); 1383 } 1384 1385 /** 1386 * intel_disable_pipe - disable a pipe, asserting requirements 1387 * @dev_priv: i915 private structure 1388 * @pipe: pipe to disable 1389 * 1390 * Disable @pipe, making sure that various hardware specific requirements 1391 * are met, if applicable, e.g. plane disabled, panel fitter off, etc. 1392 * 1393 * @pipe should be %PIPE_A or %PIPE_B. 1394 * 1395 * Will wait until the pipe has shut down before returning. 1396 */ 1397 static void intel_disable_pipe(struct drm_i915_private *dev_priv, 1398 enum i915_pipe pipe) 1399 { 1400 int reg; 1401 u32 val; 1402 1403 /* 1404 * Make sure planes won't keep trying to pump pixels to us, 1405 * or we might hang the display. 1406 */ 1407 assert_planes_disabled(dev_priv, pipe); 1408 1409 /* Don't disable pipe A or pipe A PLLs if needed */ 1410 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1411 return; 1412 1413 reg = PIPECONF(pipe); 1414 val = I915_READ(reg); 1415 if ((val & PIPECONF_ENABLE) == 0) 1416 return; 1417 1418 I915_WRITE(reg, val & ~PIPECONF_ENABLE); 1419 intel_wait_for_pipe_off(dev_priv->dev, pipe); 1420 } 1421 1422 /* 1423 * Plane regs are double buffered, going from enabled->disabled needs a 1424 * trigger in order to latch. The display address reg provides this. 1425 */ 1426 void intel_flush_display_plane(struct drm_i915_private *dev_priv, 1427 enum plane plane) 1428 { 1429 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); 1430 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); 1431 } 1432 1433 /** 1434 * intel_enable_plane - enable a display plane on a given pipe 1435 * @dev_priv: i915 private structure 1436 * @plane: plane to enable 1437 * @pipe: pipe being fed 1438 * 1439 * Enable @plane on @pipe, making sure that @pipe is running first. 1440 */ 1441 static void intel_enable_plane(struct drm_i915_private *dev_priv, 1442 enum plane plane, enum i915_pipe pipe) 1443 { 1444 int reg; 1445 u32 val; 1446 1447 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 1448 assert_pipe_enabled(dev_priv, pipe); 1449 1450 reg = DSPCNTR(plane); 1451 val = I915_READ(reg); 1452 if (val & DISPLAY_PLANE_ENABLE) 1453 return; 1454 1455 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); 1456 intel_flush_display_plane(dev_priv, plane); 1457 intel_wait_for_vblank(dev_priv->dev, pipe); 1458 } 1459 1460 /** 1461 * intel_disable_plane - disable a display plane 1462 * @dev_priv: i915 private structure 1463 * @plane: plane to disable 1464 * @pipe: pipe consuming the data 1465 * 1466 * Disable @plane; should be an independent operation. 1467 */ 1468 static void intel_disable_plane(struct drm_i915_private *dev_priv, 1469 enum plane plane, enum i915_pipe pipe) 1470 { 1471 int reg; 1472 u32 val; 1473 1474 reg = DSPCNTR(plane); 1475 val = I915_READ(reg); 1476 if ((val & DISPLAY_PLANE_ENABLE) == 0) 1477 return; 1478 1479 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); 1480 intel_flush_display_plane(dev_priv, plane); 1481 intel_wait_for_vblank(dev_priv->dev, pipe); 1482 } 1483 1484 static void disable_pch_dp(struct drm_i915_private *dev_priv, 1485 enum i915_pipe pipe, int reg, u32 port_sel) 1486 { 1487 u32 val = I915_READ(reg); 1488 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) { 1489 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); 1490 I915_WRITE(reg, val & ~DP_PORT_EN); 1491 } 1492 } 1493 1494 static void disable_pch_hdmi(struct drm_i915_private *dev_priv, 1495 enum i915_pipe pipe, int reg) 1496 { 1497 u32 val = I915_READ(reg); 1498 if (hdmi_pipe_enabled(dev_priv, val, pipe)) { 1499 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", 1500 reg, pipe); 1501 I915_WRITE(reg, val & ~PORT_ENABLE); 1502 } 1503 } 1504 1505 /* Disable any ports connected to this transcoder */ 1506 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, 1507 enum i915_pipe pipe) 1508 { 1509 u32 reg, val; 1510 1511 val = I915_READ(PCH_PP_CONTROL); 1512 I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); 1513 1514 disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1515 disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1516 disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1517 1518 reg = PCH_ADPA; 1519 val = I915_READ(reg); 1520 if (adpa_pipe_enabled(dev_priv, val, pipe)) 1521 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); 1522 1523 reg = PCH_LVDS; 1524 val = I915_READ(reg); 1525 if (lvds_pipe_enabled(dev_priv, val, pipe)) { 1526 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); 1527 I915_WRITE(reg, val & ~LVDS_PORT_EN); 1528 POSTING_READ(reg); 1529 DELAY(100); 1530 } 1531 1532 disable_pch_hdmi(dev_priv, pipe, HDMIB); 1533 disable_pch_hdmi(dev_priv, pipe, HDMIC); 1534 disable_pch_hdmi(dev_priv, pipe, HDMID); 1535 } 1536 1537 int 1538 intel_pin_and_fence_fb_obj(struct drm_device *dev, 1539 struct drm_i915_gem_object *obj, 1540 struct intel_ring_buffer *pipelined) 1541 { 1542 struct drm_i915_private *dev_priv = dev->dev_private; 1543 u32 alignment; 1544 int ret; 1545 1546 alignment = 0; /* shut gcc */ 1547 switch (obj->tiling_mode) { 1548 case I915_TILING_NONE: 1549 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1550 alignment = 128 * 1024; 1551 else if (INTEL_INFO(dev)->gen >= 4) 1552 alignment = 4 * 1024; 1553 else 1554 alignment = 64 * 1024; 1555 break; 1556 case I915_TILING_X: 1557 /* pin() will align the object as required by fence */ 1558 alignment = 0; 1559 break; 1560 case I915_TILING_Y: 1561 /* FIXME: Is this true? */ 1562 DRM_ERROR("Y tiled not allowed for scan out buffers\n"); 1563 return -EINVAL; 1564 default: 1565 KASSERT(0, ("Wrong tiling for fb obj")); 1566 } 1567 1568 dev_priv->mm.interruptible = false; 1569 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); 1570 if (ret) 1571 goto err_interruptible; 1572 1573 /* Install a fence for tiled scan-out. Pre-i965 always needs a 1574 * fence, whereas 965+ only requires a fence if using 1575 * framebuffer compression. For simplicity, we always install 1576 * a fence as the cost is not that onerous. 1577 */ 1578 if (obj->tiling_mode != I915_TILING_NONE) { 1579 ret = i915_gem_object_get_fence(obj, pipelined); 1580 if (ret) 1581 goto err_unpin; 1582 1583 i915_gem_object_pin_fence(obj); 1584 } 1585 1586 dev_priv->mm.interruptible = true; 1587 return 0; 1588 1589 err_unpin: 1590 i915_gem_object_unpin(obj); 1591 err_interruptible: 1592 dev_priv->mm.interruptible = true; 1593 return ret; 1594 } 1595 1596 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) 1597 { 1598 i915_gem_object_unpin_fence(obj); 1599 i915_gem_object_unpin(obj); 1600 } 1601 1602 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 1603 * is assumed to be a power-of-two. */ 1604 unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, 1605 unsigned int bpp, 1606 unsigned int pitch) 1607 { 1608 int tile_rows, tiles; 1609 1610 tile_rows = *y / 8; 1611 *y %= 8; 1612 tiles = *x / (512/bpp); 1613 *x %= 512/bpp; 1614 1615 return tile_rows * pitch * 8 + tiles * 4096; 1616 } 1617 1618 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, 1619 int x, int y) 1620 { 1621 struct drm_device *dev = crtc->dev; 1622 struct drm_i915_private *dev_priv = dev->dev_private; 1623 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1624 struct intel_framebuffer *intel_fb; 1625 struct drm_i915_gem_object *obj; 1626 int plane = intel_crtc->plane; 1627 unsigned long Start, Offset; 1628 u32 dspcntr; 1629 u32 reg; 1630 1631 switch (plane) { 1632 case 0: 1633 case 1: 1634 break; 1635 default: 1636 DRM_ERROR("Can't update plane %d in SAREA\n", plane); 1637 return -EINVAL; 1638 } 1639 1640 intel_fb = to_intel_framebuffer(fb); 1641 obj = intel_fb->obj; 1642 1643 reg = DSPCNTR(plane); 1644 dspcntr = I915_READ(reg); 1645 /* Mask out pixel format bits in case we change it */ 1646 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 1647 switch (fb->bits_per_pixel) { 1648 case 8: 1649 dspcntr |= DISPPLANE_8BPP; 1650 break; 1651 case 16: 1652 if (fb->depth == 15) 1653 dspcntr |= DISPPLANE_BGRX555; 1654 else 1655 dspcntr |= DISPPLANE_BGRX565; 1656 break; 1657 case 24: 1658 case 32: 1659 dspcntr |= DISPPLANE_BGRX888; 1660 break; 1661 default: 1662 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); 1663 return -EINVAL; 1664 } 1665 if (INTEL_INFO(dev)->gen >= 4) { 1666 if (obj->tiling_mode != I915_TILING_NONE) 1667 dspcntr |= DISPPLANE_TILED; 1668 else 1669 dspcntr &= ~DISPPLANE_TILED; 1670 } 1671 1672 I915_WRITE(reg, dspcntr); 1673 1674 Start = obj->gtt_offset; 1675 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 1676 1677 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 1678 Start, Offset, x, y, fb->pitches[0]); 1679 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 1680 if (INTEL_INFO(dev)->gen >= 4) { 1681 I915_WRITE(DSPSURF(plane), Start); 1682 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 1683 I915_WRITE(DSPADDR(plane), Offset); 1684 } else 1685 I915_WRITE(DSPADDR(plane), Start + Offset); 1686 POSTING_READ(reg); 1687 1688 return (0); 1689 } 1690 1691 static int ironlake_update_plane(struct drm_crtc *crtc, 1692 struct drm_framebuffer *fb, int x, int y) 1693 { 1694 struct drm_device *dev = crtc->dev; 1695 struct drm_i915_private *dev_priv = dev->dev_private; 1696 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1697 struct intel_framebuffer *intel_fb; 1698 struct drm_i915_gem_object *obj; 1699 int plane = intel_crtc->plane; 1700 unsigned long Start, Offset; 1701 u32 dspcntr; 1702 u32 reg; 1703 1704 switch (plane) { 1705 case 0: 1706 case 1: 1707 case 2: 1708 break; 1709 default: 1710 DRM_ERROR("Can't update plane %d in SAREA\n", plane); 1711 return -EINVAL; 1712 } 1713 1714 intel_fb = to_intel_framebuffer(fb); 1715 obj = intel_fb->obj; 1716 1717 reg = DSPCNTR(plane); 1718 dspcntr = I915_READ(reg); 1719 /* Mask out pixel format bits in case we change it */ 1720 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 1721 switch (fb->bits_per_pixel) { 1722 case 8: 1723 dspcntr |= DISPPLANE_8BPP; 1724 break; 1725 case 16: 1726 if (fb->depth != 16) { 1727 DRM_ERROR("bpp 16, depth %d\n", fb->depth); 1728 return -EINVAL; 1729 } 1730 1731 dspcntr |= DISPPLANE_BGRX565; 1732 break; 1733 case 24: 1734 case 32: 1735 if (fb->depth == 24) 1736 dspcntr |= DISPPLANE_BGRX888; 1737 else if (fb->depth == 30) 1738 dspcntr |= DISPPLANE_BGRX101010; 1739 else { 1740 DRM_ERROR("bpp %d depth %d\n", fb->bits_per_pixel, 1741 fb->depth); 1742 return -EINVAL; 1743 } 1744 break; 1745 default: 1746 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); 1747 return -EINVAL; 1748 } 1749 1750 if (obj->tiling_mode != I915_TILING_NONE) 1751 dspcntr |= DISPPLANE_TILED; 1752 else 1753 dspcntr &= ~DISPPLANE_TILED; 1754 1755 /* must disable */ 1756 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1757 1758 I915_WRITE(reg, dspcntr); 1759 1760 Start = obj->gtt_offset; 1761 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 1762 1763 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 1764 Start, Offset, x, y, fb->pitches[0]); 1765 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 1766 I915_WRITE(DSPSURF(plane), Start); 1767 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 1768 I915_WRITE(DSPADDR(plane), Offset); 1769 POSTING_READ(reg); 1770 1771 return 0; 1772 } 1773 1774 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 1775 static int 1776 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 1777 int x, int y, enum mode_set_atomic state) 1778 { 1779 struct drm_device *dev = crtc->dev; 1780 struct drm_i915_private *dev_priv = dev->dev_private; 1781 1782 if (dev_priv->display.disable_fbc) 1783 dev_priv->display.disable_fbc(dev); 1784 intel_increase_pllclock(crtc); 1785 1786 return dev_priv->display.update_plane(crtc, fb, x, y); 1787 } 1788 1789 static int 1790 intel_finish_fb(struct drm_framebuffer *old_fb) 1791 { 1792 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 1793 struct drm_device *dev = obj->base.dev; 1794 struct drm_i915_private *dev_priv = dev->dev_private; 1795 bool was_interruptible = dev_priv->mm.interruptible; 1796 int ret; 1797 1798 /* XXX */ lockmgr(&dev->event_lock, LK_EXCLUSIVE); 1799 while (!atomic_read(&dev_priv->mm.wedged) && 1800 atomic_read(&obj->pending_flip) != 0) { 1801 lksleep(&obj->pending_flip, &dev->event_lock, 1802 0, "915flp", 0); 1803 } 1804 /* XXX */ lockmgr(&dev->event_lock, LK_RELEASE); 1805 1806 /* Big Hammer, we also need to ensure that any pending 1807 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 1808 * current scanout is retired before unpinning the old 1809 * framebuffer. 1810 * 1811 * This should only fail upon a hung GPU, in which case we 1812 * can safely continue. 1813 */ 1814 dev_priv->mm.interruptible = false; 1815 ret = i915_gem_object_finish_gpu(obj); 1816 dev_priv->mm.interruptible = was_interruptible; 1817 return ret; 1818 } 1819 1820 static int 1821 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 1822 struct drm_framebuffer *old_fb) 1823 { 1824 struct drm_device *dev = crtc->dev; 1825 #if 0 1826 struct drm_i915_master_private *master_priv; 1827 #else 1828 drm_i915_private_t *dev_priv = dev->dev_private; 1829 #endif 1830 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1831 int ret; 1832 1833 /* no fb bound */ 1834 if (!crtc->fb) { 1835 DRM_ERROR("No FB bound\n"); 1836 return 0; 1837 } 1838 1839 switch (intel_crtc->plane) { 1840 case 0: 1841 case 1: 1842 break; 1843 case 2: 1844 if (IS_IVYBRIDGE(dev)) 1845 break; 1846 /* fall through otherwise */ 1847 default: 1848 DRM_ERROR("no plane for crtc\n"); 1849 return -EINVAL; 1850 } 1851 1852 DRM_LOCK(dev); 1853 ret = intel_pin_and_fence_fb_obj(dev, 1854 to_intel_framebuffer(crtc->fb)->obj, 1855 NULL); 1856 if (ret != 0) { 1857 DRM_UNLOCK(dev); 1858 DRM_ERROR("pin & fence failed\n"); 1859 return ret; 1860 } 1861 1862 if (old_fb) 1863 intel_finish_fb(old_fb); 1864 1865 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 1866 LEAVE_ATOMIC_MODE_SET); 1867 if (ret) { 1868 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 1869 DRM_UNLOCK(dev); 1870 DRM_ERROR("failed to update base address\n"); 1871 return ret; 1872 } 1873 1874 if (old_fb) { 1875 intel_wait_for_vblank(dev, intel_crtc->pipe); 1876 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); 1877 } 1878 1879 DRM_UNLOCK(dev); 1880 1881 #if 0 1882 if (!dev->primary->master) 1883 return 0; 1884 1885 master_priv = dev->primary->master->driver_priv; 1886 if (!master_priv->sarea_priv) 1887 return 0; 1888 1889 if (intel_crtc->pipe) { 1890 master_priv->sarea_priv->pipeB_x = x; 1891 master_priv->sarea_priv->pipeB_y = y; 1892 } else { 1893 master_priv->sarea_priv->pipeA_x = x; 1894 master_priv->sarea_priv->pipeA_y = y; 1895 } 1896 #else 1897 1898 if (!dev_priv->sarea_priv) 1899 return 0; 1900 1901 if (intel_crtc->pipe) { 1902 dev_priv->sarea_priv->planeB_x = x; 1903 dev_priv->sarea_priv->planeB_y = y; 1904 } else { 1905 dev_priv->sarea_priv->planeA_x = x; 1906 dev_priv->sarea_priv->planeA_y = y; 1907 } 1908 #endif 1909 1910 return 0; 1911 } 1912 1913 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) 1914 { 1915 struct drm_device *dev = crtc->dev; 1916 struct drm_i915_private *dev_priv = dev->dev_private; 1917 u32 dpa_ctl; 1918 1919 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); 1920 dpa_ctl = I915_READ(DP_A); 1921 dpa_ctl &= ~DP_PLL_FREQ_MASK; 1922 1923 if (clock < 200000) { 1924 u32 temp; 1925 dpa_ctl |= DP_PLL_FREQ_160MHZ; 1926 /* workaround for 160Mhz: 1927 1) program 0x4600c bits 15:0 = 0x8124 1928 2) program 0x46010 bit 0 = 1 1929 3) program 0x46034 bit 24 = 1 1930 4) program 0x64000 bit 14 = 1 1931 */ 1932 temp = I915_READ(0x4600c); 1933 temp &= 0xffff0000; 1934 I915_WRITE(0x4600c, temp | 0x8124); 1935 1936 temp = I915_READ(0x46010); 1937 I915_WRITE(0x46010, temp | 1); 1938 1939 temp = I915_READ(0x46034); 1940 I915_WRITE(0x46034, temp | (1 << 24)); 1941 } else { 1942 dpa_ctl |= DP_PLL_FREQ_270MHZ; 1943 } 1944 I915_WRITE(DP_A, dpa_ctl); 1945 1946 POSTING_READ(DP_A); 1947 DELAY(500); 1948 } 1949 1950 static void intel_fdi_normal_train(struct drm_crtc *crtc) 1951 { 1952 struct drm_device *dev = crtc->dev; 1953 struct drm_i915_private *dev_priv = dev->dev_private; 1954 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1955 int pipe = intel_crtc->pipe; 1956 u32 reg, temp; 1957 1958 /* enable normal train */ 1959 reg = FDI_TX_CTL(pipe); 1960 temp = I915_READ(reg); 1961 if (IS_IVYBRIDGE(dev)) { 1962 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 1963 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 1964 } else { 1965 temp &= ~FDI_LINK_TRAIN_NONE; 1966 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 1967 } 1968 I915_WRITE(reg, temp); 1969 1970 reg = FDI_RX_CTL(pipe); 1971 temp = I915_READ(reg); 1972 if (HAS_PCH_CPT(dev)) { 1973 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 1974 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 1975 } else { 1976 temp &= ~FDI_LINK_TRAIN_NONE; 1977 temp |= FDI_LINK_TRAIN_NONE; 1978 } 1979 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 1980 1981 /* wait one idle pattern time */ 1982 POSTING_READ(reg); 1983 DELAY(1000); 1984 1985 /* IVB wants error correction enabled */ 1986 if (IS_IVYBRIDGE(dev)) 1987 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 1988 FDI_FE_ERRC_ENABLE); 1989 } 1990 1991 static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) 1992 { 1993 struct drm_i915_private *dev_priv = dev->dev_private; 1994 u32 flags = I915_READ(SOUTH_CHICKEN1); 1995 1996 flags |= FDI_PHASE_SYNC_OVR(pipe); 1997 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */ 1998 flags |= FDI_PHASE_SYNC_EN(pipe); 1999 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */ 2000 POSTING_READ(SOUTH_CHICKEN1); 2001 } 2002 2003 /* The FDI link training functions for ILK/Ibexpeak. */ 2004 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 2005 { 2006 struct drm_device *dev = crtc->dev; 2007 struct drm_i915_private *dev_priv = dev->dev_private; 2008 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2009 int pipe = intel_crtc->pipe; 2010 int plane = intel_crtc->plane; 2011 u32 reg, temp, tries; 2012 2013 /* FDI needs bits from pipe & plane first */ 2014 assert_pipe_enabled(dev_priv, pipe); 2015 assert_plane_enabled(dev_priv, plane); 2016 2017 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2018 for train result */ 2019 reg = FDI_RX_IMR(pipe); 2020 temp = I915_READ(reg); 2021 temp &= ~FDI_RX_SYMBOL_LOCK; 2022 temp &= ~FDI_RX_BIT_LOCK; 2023 I915_WRITE(reg, temp); 2024 I915_READ(reg); 2025 DELAY(150); 2026 2027 /* enable CPU FDI TX and PCH FDI RX */ 2028 reg = FDI_TX_CTL(pipe); 2029 temp = I915_READ(reg); 2030 temp &= ~(7 << 19); 2031 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2032 temp &= ~FDI_LINK_TRAIN_NONE; 2033 temp |= FDI_LINK_TRAIN_PATTERN_1; 2034 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2035 2036 reg = FDI_RX_CTL(pipe); 2037 temp = I915_READ(reg); 2038 temp &= ~FDI_LINK_TRAIN_NONE; 2039 temp |= FDI_LINK_TRAIN_PATTERN_1; 2040 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2041 2042 POSTING_READ(reg); 2043 DELAY(150); 2044 2045 /* Ironlake workaround, enable clock pointer after FDI enable*/ 2046 if (HAS_PCH_IBX(dev)) { 2047 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2048 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 2049 FDI_RX_PHASE_SYNC_POINTER_EN); 2050 } 2051 2052 reg = FDI_RX_IIR(pipe); 2053 for (tries = 0; tries < 5; tries++) { 2054 temp = I915_READ(reg); 2055 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2056 2057 if ((temp & FDI_RX_BIT_LOCK)) { 2058 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2059 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2060 break; 2061 } 2062 } 2063 if (tries == 5) 2064 DRM_ERROR("FDI train 1 fail!\n"); 2065 2066 /* Train 2 */ 2067 reg = FDI_TX_CTL(pipe); 2068 temp = I915_READ(reg); 2069 temp &= ~FDI_LINK_TRAIN_NONE; 2070 temp |= FDI_LINK_TRAIN_PATTERN_2; 2071 I915_WRITE(reg, temp); 2072 2073 reg = FDI_RX_CTL(pipe); 2074 temp = I915_READ(reg); 2075 temp &= ~FDI_LINK_TRAIN_NONE; 2076 temp |= FDI_LINK_TRAIN_PATTERN_2; 2077 I915_WRITE(reg, temp); 2078 2079 POSTING_READ(reg); 2080 DELAY(150); 2081 2082 reg = FDI_RX_IIR(pipe); 2083 for (tries = 0; tries < 5; tries++) { 2084 temp = I915_READ(reg); 2085 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2086 2087 if (temp & FDI_RX_SYMBOL_LOCK) { 2088 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2089 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2090 break; 2091 } 2092 } 2093 if (tries == 5) 2094 DRM_ERROR("FDI train 2 fail!\n"); 2095 2096 DRM_DEBUG_KMS("FDI train done\n"); 2097 2098 } 2099 2100 static const int snb_b_fdi_train_param[] = { 2101 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 2102 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 2103 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 2104 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 2105 }; 2106 2107 /* The FDI link training functions for SNB/Cougarpoint. */ 2108 static void gen6_fdi_link_train(struct drm_crtc *crtc) 2109 { 2110 struct drm_device *dev = crtc->dev; 2111 struct drm_i915_private *dev_priv = dev->dev_private; 2112 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2113 int pipe = intel_crtc->pipe; 2114 u32 reg, temp, i; 2115 2116 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2117 for train result */ 2118 reg = FDI_RX_IMR(pipe); 2119 temp = I915_READ(reg); 2120 temp &= ~FDI_RX_SYMBOL_LOCK; 2121 temp &= ~FDI_RX_BIT_LOCK; 2122 I915_WRITE(reg, temp); 2123 2124 POSTING_READ(reg); 2125 DELAY(150); 2126 2127 /* enable CPU FDI TX and PCH FDI RX */ 2128 reg = FDI_TX_CTL(pipe); 2129 temp = I915_READ(reg); 2130 temp &= ~(7 << 19); 2131 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2132 temp &= ~FDI_LINK_TRAIN_NONE; 2133 temp |= FDI_LINK_TRAIN_PATTERN_1; 2134 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2135 /* SNB-B */ 2136 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2137 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2138 2139 reg = FDI_RX_CTL(pipe); 2140 temp = I915_READ(reg); 2141 if (HAS_PCH_CPT(dev)) { 2142 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2143 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2144 } else { 2145 temp &= ~FDI_LINK_TRAIN_NONE; 2146 temp |= FDI_LINK_TRAIN_PATTERN_1; 2147 } 2148 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2149 2150 POSTING_READ(reg); 2151 DELAY(150); 2152 2153 if (HAS_PCH_CPT(dev)) 2154 cpt_phase_pointer_enable(dev, pipe); 2155 2156 for (i = 0; i < 4; i++) { 2157 reg = FDI_TX_CTL(pipe); 2158 temp = I915_READ(reg); 2159 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2160 temp |= snb_b_fdi_train_param[i]; 2161 I915_WRITE(reg, temp); 2162 2163 POSTING_READ(reg); 2164 DELAY(500); 2165 2166 reg = FDI_RX_IIR(pipe); 2167 temp = I915_READ(reg); 2168 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2169 2170 if (temp & FDI_RX_BIT_LOCK) { 2171 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2172 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2173 break; 2174 } 2175 } 2176 if (i == 4) 2177 DRM_ERROR("FDI train 1 fail!\n"); 2178 2179 /* Train 2 */ 2180 reg = FDI_TX_CTL(pipe); 2181 temp = I915_READ(reg); 2182 temp &= ~FDI_LINK_TRAIN_NONE; 2183 temp |= FDI_LINK_TRAIN_PATTERN_2; 2184 if (IS_GEN6(dev)) { 2185 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2186 /* SNB-B */ 2187 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2188 } 2189 I915_WRITE(reg, temp); 2190 2191 reg = FDI_RX_CTL(pipe); 2192 temp = I915_READ(reg); 2193 if (HAS_PCH_CPT(dev)) { 2194 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2195 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 2196 } else { 2197 temp &= ~FDI_LINK_TRAIN_NONE; 2198 temp |= FDI_LINK_TRAIN_PATTERN_2; 2199 } 2200 I915_WRITE(reg, temp); 2201 2202 POSTING_READ(reg); 2203 DELAY(150); 2204 2205 for (i = 0; i < 4; i++) { 2206 reg = FDI_TX_CTL(pipe); 2207 temp = I915_READ(reg); 2208 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2209 temp |= snb_b_fdi_train_param[i]; 2210 I915_WRITE(reg, temp); 2211 2212 POSTING_READ(reg); 2213 DELAY(500); 2214 2215 reg = FDI_RX_IIR(pipe); 2216 temp = I915_READ(reg); 2217 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2218 2219 if (temp & FDI_RX_SYMBOL_LOCK) { 2220 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2221 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2222 break; 2223 } 2224 } 2225 if (i == 4) 2226 DRM_ERROR("FDI train 2 fail!\n"); 2227 2228 DRM_DEBUG_KMS("FDI train done.\n"); 2229 } 2230 2231 /* Manual link training for Ivy Bridge A0 parts */ 2232 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 2233 { 2234 struct drm_device *dev = crtc->dev; 2235 struct drm_i915_private *dev_priv = dev->dev_private; 2236 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2237 int pipe = intel_crtc->pipe; 2238 u32 reg, temp, i; 2239 2240 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2241 for train result */ 2242 reg = FDI_RX_IMR(pipe); 2243 temp = I915_READ(reg); 2244 temp &= ~FDI_RX_SYMBOL_LOCK; 2245 temp &= ~FDI_RX_BIT_LOCK; 2246 I915_WRITE(reg, temp); 2247 2248 POSTING_READ(reg); 2249 DELAY(150); 2250 2251 /* enable CPU FDI TX and PCH FDI RX */ 2252 reg = FDI_TX_CTL(pipe); 2253 temp = I915_READ(reg); 2254 temp &= ~(7 << 19); 2255 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2256 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 2257 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 2258 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2259 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2260 temp |= FDI_COMPOSITE_SYNC; 2261 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2262 2263 reg = FDI_RX_CTL(pipe); 2264 temp = I915_READ(reg); 2265 temp &= ~FDI_LINK_TRAIN_AUTO; 2266 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2267 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2268 temp |= FDI_COMPOSITE_SYNC; 2269 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2270 2271 POSTING_READ(reg); 2272 DELAY(150); 2273 2274 for (i = 0; i < 4; i++) { 2275 reg = FDI_TX_CTL(pipe); 2276 temp = I915_READ(reg); 2277 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2278 temp |= snb_b_fdi_train_param[i]; 2279 I915_WRITE(reg, temp); 2280 2281 POSTING_READ(reg); 2282 DELAY(500); 2283 2284 reg = FDI_RX_IIR(pipe); 2285 temp = I915_READ(reg); 2286 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2287 2288 if (temp & FDI_RX_BIT_LOCK || 2289 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 2290 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2291 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2292 break; 2293 } 2294 } 2295 if (i == 4) 2296 DRM_ERROR("FDI train 1 fail!\n"); 2297 2298 /* Train 2 */ 2299 reg = FDI_TX_CTL(pipe); 2300 temp = I915_READ(reg); 2301 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 2302 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 2303 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2304 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2305 I915_WRITE(reg, temp); 2306 2307 reg = FDI_RX_CTL(pipe); 2308 temp = I915_READ(reg); 2309 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2310 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 2311 I915_WRITE(reg, temp); 2312 2313 POSTING_READ(reg); 2314 DELAY(150); 2315 2316 for (i = 0; i < 4; i++ ) { 2317 reg = FDI_TX_CTL(pipe); 2318 temp = I915_READ(reg); 2319 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2320 temp |= snb_b_fdi_train_param[i]; 2321 I915_WRITE(reg, temp); 2322 2323 POSTING_READ(reg); 2324 DELAY(500); 2325 2326 reg = FDI_RX_IIR(pipe); 2327 temp = I915_READ(reg); 2328 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2329 2330 if (temp & FDI_RX_SYMBOL_LOCK) { 2331 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2332 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2333 break; 2334 } 2335 } 2336 if (i == 4) 2337 DRM_ERROR("FDI train 2 fail!\n"); 2338 2339 DRM_DEBUG_KMS("FDI train done.\n"); 2340 } 2341 2342 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) 2343 { 2344 struct drm_device *dev = crtc->dev; 2345 struct drm_i915_private *dev_priv = dev->dev_private; 2346 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2347 int pipe = intel_crtc->pipe; 2348 u32 reg, temp; 2349 2350 /* Write the TU size bits so error detection works */ 2351 I915_WRITE(FDI_RX_TUSIZE1(pipe), 2352 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 2353 2354 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 2355 reg = FDI_RX_CTL(pipe); 2356 temp = I915_READ(reg); 2357 temp &= ~((0x7 << 19) | (0x7 << 16)); 2358 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2359 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2360 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 2361 2362 POSTING_READ(reg); 2363 DELAY(200); 2364 2365 /* Switch from Rawclk to PCDclk */ 2366 temp = I915_READ(reg); 2367 I915_WRITE(reg, temp | FDI_PCDCLK); 2368 2369 POSTING_READ(reg); 2370 DELAY(200); 2371 2372 /* Enable CPU FDI TX PLL, always on for Ironlake */ 2373 reg = FDI_TX_CTL(pipe); 2374 temp = I915_READ(reg); 2375 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 2376 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 2377 2378 POSTING_READ(reg); 2379 DELAY(100); 2380 } 2381 } 2382 2383 static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) 2384 { 2385 struct drm_i915_private *dev_priv = dev->dev_private; 2386 u32 flags = I915_READ(SOUTH_CHICKEN1); 2387 2388 flags &= ~(FDI_PHASE_SYNC_EN(pipe)); 2389 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */ 2390 flags &= ~(FDI_PHASE_SYNC_OVR(pipe)); 2391 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */ 2392 POSTING_READ(SOUTH_CHICKEN1); 2393 } 2394 2395 static void ironlake_fdi_disable(struct drm_crtc *crtc) 2396 { 2397 struct drm_device *dev = crtc->dev; 2398 struct drm_i915_private *dev_priv = dev->dev_private; 2399 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2400 int pipe = intel_crtc->pipe; 2401 u32 reg, temp; 2402 2403 /* disable CPU FDI tx and PCH FDI rx */ 2404 reg = FDI_TX_CTL(pipe); 2405 temp = I915_READ(reg); 2406 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 2407 POSTING_READ(reg); 2408 2409 reg = FDI_RX_CTL(pipe); 2410 temp = I915_READ(reg); 2411 temp &= ~(0x7 << 16); 2412 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2413 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 2414 2415 POSTING_READ(reg); 2416 DELAY(100); 2417 2418 /* Ironlake workaround, disable clock pointer after downing FDI */ 2419 if (HAS_PCH_IBX(dev)) { 2420 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2421 I915_WRITE(FDI_RX_CHICKEN(pipe), 2422 I915_READ(FDI_RX_CHICKEN(pipe) & 2423 ~FDI_RX_PHASE_SYNC_POINTER_EN)); 2424 } else if (HAS_PCH_CPT(dev)) { 2425 cpt_phase_pointer_disable(dev, pipe); 2426 } 2427 2428 /* still set train pattern 1 */ 2429 reg = FDI_TX_CTL(pipe); 2430 temp = I915_READ(reg); 2431 temp &= ~FDI_LINK_TRAIN_NONE; 2432 temp |= FDI_LINK_TRAIN_PATTERN_1; 2433 I915_WRITE(reg, temp); 2434 2435 reg = FDI_RX_CTL(pipe); 2436 temp = I915_READ(reg); 2437 if (HAS_PCH_CPT(dev)) { 2438 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2439 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2440 } else { 2441 temp &= ~FDI_LINK_TRAIN_NONE; 2442 temp |= FDI_LINK_TRAIN_PATTERN_1; 2443 } 2444 /* BPC in FDI rx is consistent with that in PIPECONF */ 2445 temp &= ~(0x07 << 16); 2446 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2447 I915_WRITE(reg, temp); 2448 2449 POSTING_READ(reg); 2450 DELAY(100); 2451 } 2452 2453 /* 2454 * When we disable a pipe, we need to clear any pending scanline wait events 2455 * to avoid hanging the ring, which we assume we are waiting on. 2456 */ 2457 static void intel_clear_scanline_wait(struct drm_device *dev) 2458 { 2459 struct drm_i915_private *dev_priv = dev->dev_private; 2460 struct intel_ring_buffer *ring; 2461 u32 tmp; 2462 2463 if (IS_GEN2(dev)) 2464 /* Can't break the hang on i8xx */ 2465 return; 2466 2467 ring = LP_RING(dev_priv); 2468 tmp = I915_READ_CTL(ring); 2469 if (tmp & RING_WAIT) 2470 I915_WRITE_CTL(ring, tmp); 2471 } 2472 2473 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 2474 { 2475 struct drm_i915_gem_object *obj; 2476 struct drm_i915_private *dev_priv; 2477 struct drm_device *dev; 2478 2479 if (crtc->fb == NULL) 2480 return; 2481 2482 obj = to_intel_framebuffer(crtc->fb)->obj; 2483 dev = crtc->dev; 2484 dev_priv = dev->dev_private; 2485 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 2486 while (atomic_read(&obj->pending_flip) != 0) 2487 lksleep(&obj->pending_flip, &dev->event_lock, 0, "915wfl", 0); 2488 lockmgr(&dev->event_lock, LK_RELEASE); 2489 } 2490 2491 static bool intel_crtc_driving_pch(struct drm_crtc *crtc) 2492 { 2493 struct drm_device *dev = crtc->dev; 2494 struct drm_mode_config *mode_config = &dev->mode_config; 2495 struct intel_encoder *encoder; 2496 2497 /* 2498 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that 2499 * must be driven by its own crtc; no sharing is possible. 2500 */ 2501 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 2502 if (encoder->base.crtc != crtc) 2503 continue; 2504 2505 switch (encoder->type) { 2506 case INTEL_OUTPUT_EDP: 2507 if (!intel_encoder_is_pch_edp(&encoder->base)) 2508 return false; 2509 continue; 2510 } 2511 } 2512 2513 return true; 2514 } 2515 2516 /* 2517 * Enable PCH resources required for PCH ports: 2518 * - PCH PLLs 2519 * - FDI training & RX/TX 2520 * - update transcoder timings 2521 * - DP transcoding bits 2522 * - transcoder 2523 */ 2524 static void ironlake_pch_enable(struct drm_crtc *crtc) 2525 { 2526 struct drm_device *dev = crtc->dev; 2527 struct drm_i915_private *dev_priv = dev->dev_private; 2528 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2529 int pipe = intel_crtc->pipe; 2530 u32 reg, temp, transc_sel; 2531 2532 /* For PCH output, training FDI link */ 2533 dev_priv->display.fdi_link_train(crtc); 2534 2535 intel_enable_pch_pll(dev_priv, pipe); 2536 2537 if (HAS_PCH_CPT(dev)) { 2538 transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL : 2539 TRANSC_DPLLB_SEL; 2540 2541 /* Be sure PCH DPLL SEL is set */ 2542 temp = I915_READ(PCH_DPLL_SEL); 2543 if (pipe == 0) { 2544 temp &= ~(TRANSA_DPLLB_SEL); 2545 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); 2546 } else if (pipe == 1) { 2547 temp &= ~(TRANSB_DPLLB_SEL); 2548 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 2549 } else if (pipe == 2) { 2550 temp &= ~(TRANSC_DPLLB_SEL); 2551 temp |= (TRANSC_DPLL_ENABLE | transc_sel); 2552 } 2553 I915_WRITE(PCH_DPLL_SEL, temp); 2554 } 2555 2556 /* set transcoder timing, panel must allow it */ 2557 assert_panel_unlocked(dev_priv, pipe); 2558 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); 2559 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); 2560 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); 2561 2562 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); 2563 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); 2564 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 2565 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); 2566 2567 intel_fdi_normal_train(crtc); 2568 2569 /* For PCH DP, enable TRANS_DP_CTL */ 2570 if (HAS_PCH_CPT(dev) && 2571 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 2572 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2573 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 2574 reg = TRANS_DP_CTL(pipe); 2575 temp = I915_READ(reg); 2576 temp &= ~(TRANS_DP_PORT_SEL_MASK | 2577 TRANS_DP_SYNC_MASK | 2578 TRANS_DP_BPC_MASK); 2579 temp |= (TRANS_DP_OUTPUT_ENABLE | 2580 TRANS_DP_ENH_FRAMING); 2581 temp |= bpc << 9; /* same format but at 11:9 */ 2582 2583 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 2584 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 2585 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 2586 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 2587 2588 switch (intel_trans_dp_port_sel(crtc)) { 2589 case PCH_DP_B: 2590 temp |= TRANS_DP_PORT_SEL_B; 2591 break; 2592 case PCH_DP_C: 2593 temp |= TRANS_DP_PORT_SEL_C; 2594 break; 2595 case PCH_DP_D: 2596 temp |= TRANS_DP_PORT_SEL_D; 2597 break; 2598 default: 2599 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); 2600 temp |= TRANS_DP_PORT_SEL_B; 2601 break; 2602 } 2603 2604 I915_WRITE(reg, temp); 2605 } 2606 2607 intel_enable_transcoder(dev_priv, pipe); 2608 } 2609 2610 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) 2611 { 2612 struct drm_i915_private *dev_priv = dev->dev_private; 2613 int dslreg = PIPEDSL(pipe); 2614 u32 temp; 2615 2616 temp = I915_READ(dslreg); 2617 udelay(500); 2618 if (wait_for(I915_READ(dslreg) != temp, 5)) { 2619 if (wait_for(I915_READ(dslreg) != temp, 5)) 2620 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); 2621 } 2622 } 2623 2624 static void ironlake_crtc_enable(struct drm_crtc *crtc) 2625 { 2626 struct drm_device *dev = crtc->dev; 2627 struct drm_i915_private *dev_priv = dev->dev_private; 2628 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2629 int pipe = intel_crtc->pipe; 2630 int plane = intel_crtc->plane; 2631 u32 temp; 2632 bool is_pch_port; 2633 2634 if (intel_crtc->active) 2635 return; 2636 2637 intel_crtc->active = true; 2638 intel_update_watermarks(dev); 2639 2640 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 2641 temp = I915_READ(PCH_LVDS); 2642 if ((temp & LVDS_PORT_EN) == 0) 2643 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 2644 } 2645 2646 is_pch_port = intel_crtc_driving_pch(crtc); 2647 2648 if (is_pch_port) 2649 ironlake_fdi_pll_enable(crtc); 2650 else 2651 ironlake_fdi_disable(crtc); 2652 2653 /* Enable panel fitting for LVDS */ 2654 if (dev_priv->pch_pf_size && 2655 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { 2656 /* Force use of hard-coded filter coefficients 2657 * as some pre-programmed values are broken, 2658 * e.g. x201. 2659 */ 2660 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 2661 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); 2662 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); 2663 } 2664 2665 intel_enable_pipe(dev_priv, pipe, is_pch_port); 2666 intel_enable_plane(dev_priv, plane, pipe); 2667 2668 if (is_pch_port) 2669 ironlake_pch_enable(crtc); 2670 2671 intel_crtc_load_lut(crtc); 2672 2673 DRM_LOCK(dev); 2674 intel_update_fbc(dev); 2675 DRM_UNLOCK(dev); 2676 2677 intel_crtc_update_cursor(crtc, true); 2678 } 2679 2680 static void ironlake_crtc_disable(struct drm_crtc *crtc) 2681 { 2682 struct drm_device *dev = crtc->dev; 2683 struct drm_i915_private *dev_priv = dev->dev_private; 2684 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2685 int pipe = intel_crtc->pipe; 2686 int plane = intel_crtc->plane; 2687 u32 reg, temp; 2688 2689 if (!intel_crtc->active) 2690 return; 2691 2692 intel_crtc_wait_for_pending_flips(crtc); 2693 drm_vblank_off(dev, pipe); 2694 intel_crtc_update_cursor(crtc, false); 2695 2696 intel_disable_plane(dev_priv, plane, pipe); 2697 2698 if (dev_priv->cfb_plane == plane) 2699 intel_disable_fbc(dev); 2700 2701 intel_disable_pipe(dev_priv, pipe); 2702 2703 /* Disable PF */ 2704 I915_WRITE(PF_CTL(pipe), 0); 2705 I915_WRITE(PF_WIN_SZ(pipe), 0); 2706 2707 ironlake_fdi_disable(crtc); 2708 2709 /* This is a horrible layering violation; we should be doing this in 2710 * the connector/encoder ->prepare instead, but we don't always have 2711 * enough information there about the config to know whether it will 2712 * actually be necessary or just cause undesired flicker. 2713 */ 2714 intel_disable_pch_ports(dev_priv, pipe); 2715 2716 intel_disable_transcoder(dev_priv, pipe); 2717 2718 if (HAS_PCH_CPT(dev)) { 2719 /* disable TRANS_DP_CTL */ 2720 reg = TRANS_DP_CTL(pipe); 2721 temp = I915_READ(reg); 2722 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); 2723 temp |= TRANS_DP_PORT_SEL_NONE; 2724 I915_WRITE(reg, temp); 2725 2726 /* disable DPLL_SEL */ 2727 temp = I915_READ(PCH_DPLL_SEL); 2728 switch (pipe) { 2729 case 0: 2730 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); 2731 break; 2732 case 1: 2733 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 2734 break; 2735 case 2: 2736 /* C shares PLL A or B */ 2737 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); 2738 break; 2739 default: 2740 KASSERT(1, ("Wrong pipe %d", pipe)); /* wtf */ 2741 } 2742 I915_WRITE(PCH_DPLL_SEL, temp); 2743 } 2744 2745 /* disable PCH DPLL */ 2746 if (!intel_crtc->no_pll) 2747 intel_disable_pch_pll(dev_priv, pipe); 2748 2749 /* Switch from PCDclk to Rawclk */ 2750 reg = FDI_RX_CTL(pipe); 2751 temp = I915_READ(reg); 2752 I915_WRITE(reg, temp & ~FDI_PCDCLK); 2753 2754 /* Disable CPU FDI TX PLL */ 2755 reg = FDI_TX_CTL(pipe); 2756 temp = I915_READ(reg); 2757 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 2758 2759 POSTING_READ(reg); 2760 DELAY(100); 2761 2762 reg = FDI_RX_CTL(pipe); 2763 temp = I915_READ(reg); 2764 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 2765 2766 /* Wait for the clocks to turn off. */ 2767 POSTING_READ(reg); 2768 DELAY(100); 2769 2770 intel_crtc->active = false; 2771 intel_update_watermarks(dev); 2772 2773 DRM_LOCK(dev); 2774 intel_update_fbc(dev); 2775 intel_clear_scanline_wait(dev); 2776 DRM_UNLOCK(dev); 2777 } 2778 2779 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) 2780 { 2781 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2782 int pipe = intel_crtc->pipe; 2783 int plane = intel_crtc->plane; 2784 2785 /* XXX: When our outputs are all unaware of DPMS modes other than off 2786 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 2787 */ 2788 switch (mode) { 2789 case DRM_MODE_DPMS_ON: 2790 case DRM_MODE_DPMS_STANDBY: 2791 case DRM_MODE_DPMS_SUSPEND: 2792 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); 2793 ironlake_crtc_enable(crtc); 2794 break; 2795 2796 case DRM_MODE_DPMS_OFF: 2797 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); 2798 ironlake_crtc_disable(crtc); 2799 break; 2800 } 2801 } 2802 2803 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 2804 { 2805 if (!enable && intel_crtc->overlay) { 2806 struct drm_device *dev = intel_crtc->base.dev; 2807 struct drm_i915_private *dev_priv = dev->dev_private; 2808 2809 DRM_LOCK(dev); 2810 dev_priv->mm.interruptible = false; 2811 (void) intel_overlay_switch_off(intel_crtc->overlay); 2812 dev_priv->mm.interruptible = true; 2813 DRM_UNLOCK(dev); 2814 } 2815 2816 /* Let userspace switch the overlay on again. In most cases userspace 2817 * has to recompute where to put it anyway. 2818 */ 2819 } 2820 2821 static void i9xx_crtc_enable(struct drm_crtc *crtc) 2822 { 2823 struct drm_device *dev = crtc->dev; 2824 struct drm_i915_private *dev_priv = dev->dev_private; 2825 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2826 int pipe = intel_crtc->pipe; 2827 int plane = intel_crtc->plane; 2828 2829 if (intel_crtc->active) 2830 return; 2831 2832 intel_crtc->active = true; 2833 intel_update_watermarks(dev); 2834 2835 intel_enable_pll(dev_priv, pipe); 2836 intel_enable_pipe(dev_priv, pipe, false); 2837 intel_enable_plane(dev_priv, plane, pipe); 2838 2839 intel_crtc_load_lut(crtc); 2840 intel_update_fbc(dev); 2841 2842 /* Give the overlay scaler a chance to enable if it's on this pipe */ 2843 intel_crtc_dpms_overlay(intel_crtc, true); 2844 intel_crtc_update_cursor(crtc, true); 2845 } 2846 2847 static void i9xx_crtc_disable(struct drm_crtc *crtc) 2848 { 2849 struct drm_device *dev = crtc->dev; 2850 struct drm_i915_private *dev_priv = dev->dev_private; 2851 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2852 int pipe = intel_crtc->pipe; 2853 int plane = intel_crtc->plane; 2854 2855 if (!intel_crtc->active) 2856 return; 2857 2858 /* Give the overlay scaler a chance to disable if it's on this pipe */ 2859 intel_crtc_wait_for_pending_flips(crtc); 2860 drm_vblank_off(dev, pipe); 2861 intel_crtc_dpms_overlay(intel_crtc, false); 2862 intel_crtc_update_cursor(crtc, false); 2863 2864 if (dev_priv->cfb_plane == plane) 2865 intel_disable_fbc(dev); 2866 2867 intel_disable_plane(dev_priv, plane, pipe); 2868 intel_disable_pipe(dev_priv, pipe); 2869 intel_disable_pll(dev_priv, pipe); 2870 2871 intel_crtc->active = false; 2872 intel_update_fbc(dev); 2873 intel_update_watermarks(dev); 2874 intel_clear_scanline_wait(dev); 2875 } 2876 2877 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) 2878 { 2879 /* XXX: When our outputs are all unaware of DPMS modes other than off 2880 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 2881 */ 2882 switch (mode) { 2883 case DRM_MODE_DPMS_ON: 2884 case DRM_MODE_DPMS_STANDBY: 2885 case DRM_MODE_DPMS_SUSPEND: 2886 i9xx_crtc_enable(crtc); 2887 break; 2888 case DRM_MODE_DPMS_OFF: 2889 i9xx_crtc_disable(crtc); 2890 break; 2891 } 2892 } 2893 2894 /** 2895 * Sets the power management mode of the pipe and plane. 2896 */ 2897 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) 2898 { 2899 struct drm_device *dev = crtc->dev; 2900 struct drm_i915_private *dev_priv = dev->dev_private; 2901 #if 0 2902 struct drm_i915_master_private *master_priv; 2903 #endif 2904 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2905 int pipe = intel_crtc->pipe; 2906 bool enabled; 2907 2908 if (intel_crtc->dpms_mode == mode) 2909 return; 2910 2911 intel_crtc->dpms_mode = mode; 2912 2913 dev_priv->display.dpms(crtc, mode); 2914 2915 #if 0 2916 if (!dev->primary->master) 2917 return; 2918 2919 master_priv = dev->primary->master->driver_priv; 2920 if (!master_priv->sarea_priv) 2921 return; 2922 #else 2923 if (!dev_priv->sarea_priv) 2924 return; 2925 #endif 2926 2927 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; 2928 2929 switch (pipe) { 2930 case 0: 2931 #if 0 2932 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; 2933 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; 2934 #else 2935 dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0; 2936 dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0; 2937 #endif 2938 break; 2939 case 1: 2940 #if 0 2941 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; 2942 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; 2943 #else 2944 dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0; 2945 dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0; 2946 #endif 2947 break; 2948 default: 2949 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); 2950 break; 2951 } 2952 } 2953 2954 static void intel_crtc_disable(struct drm_crtc *crtc) 2955 { 2956 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 2957 struct drm_device *dev = crtc->dev; 2958 2959 /* Flush any pending WAITs before we disable the pipe. Note that 2960 * we need to drop the struct_mutex in order to acquire it again 2961 * during the lowlevel dpms routines around a couple of the 2962 * operations. It does not look trivial nor desirable to move 2963 * that locking higher. So instead we leave a window for the 2964 * submission of further commands on the fb before we can actually 2965 * disable it. This race with userspace exists anyway, and we can 2966 * only rely on the pipe being disabled by userspace after it 2967 * receives the hotplug notification and has flushed any pending 2968 * batches. 2969 */ 2970 if (crtc->fb) { 2971 DRM_LOCK(dev); 2972 intel_finish_fb(crtc->fb); 2973 DRM_UNLOCK(dev); 2974 } 2975 2976 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 2977 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); 2978 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); 2979 2980 if (crtc->fb) { 2981 DRM_LOCK(dev); 2982 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 2983 DRM_UNLOCK(dev); 2984 } 2985 } 2986 2987 /* Prepare for a mode set. 2988 * 2989 * Note we could be a lot smarter here. We need to figure out which outputs 2990 * will be enabled, which disabled (in short, how the config will changes) 2991 * and perform the minimum necessary steps to accomplish that, e.g. updating 2992 * watermarks, FBC configuration, making sure PLLs are programmed correctly, 2993 * panel fitting is in the proper state, etc. 2994 */ 2995 static void i9xx_crtc_prepare(struct drm_crtc *crtc) 2996 { 2997 i9xx_crtc_disable(crtc); 2998 } 2999 3000 static void i9xx_crtc_commit(struct drm_crtc *crtc) 3001 { 3002 i9xx_crtc_enable(crtc); 3003 } 3004 3005 static void ironlake_crtc_prepare(struct drm_crtc *crtc) 3006 { 3007 ironlake_crtc_disable(crtc); 3008 } 3009 3010 static void ironlake_crtc_commit(struct drm_crtc *crtc) 3011 { 3012 ironlake_crtc_enable(crtc); 3013 } 3014 3015 void intel_encoder_prepare(struct drm_encoder *encoder) 3016 { 3017 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3018 /* lvds has its own version of prepare see intel_lvds_prepare */ 3019 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); 3020 } 3021 3022 void intel_encoder_commit(struct drm_encoder *encoder) 3023 { 3024 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3025 struct drm_device *dev = encoder->dev; 3026 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3027 struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc); 3028 3029 /* lvds has its own version of commit see intel_lvds_commit */ 3030 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 3031 3032 if (HAS_PCH_CPT(dev)) 3033 intel_cpt_verify_modeset(dev, intel_crtc->pipe); 3034 } 3035 3036 void intel_encoder_destroy(struct drm_encoder *encoder) 3037 { 3038 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3039 3040 drm_encoder_cleanup(encoder); 3041 drm_free(intel_encoder, DRM_MEM_KMS); 3042 } 3043 3044 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, 3045 const struct drm_display_mode *mode, 3046 struct drm_display_mode *adjusted_mode) 3047 { 3048 struct drm_device *dev = crtc->dev; 3049 3050 if (HAS_PCH_SPLIT(dev)) { 3051 /* FDI link clock is fixed at 2.7G */ 3052 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) 3053 return false; 3054 } 3055 3056 /* All interlaced capable intel hw wants timings in frames. Note though 3057 * that intel_lvds_mode_fixup does some funny tricks with the crtc 3058 * timings, so we need to be careful not to clobber these.*/ 3059 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) 3060 drm_mode_set_crtcinfo(adjusted_mode, 0); 3061 3062 return true; 3063 } 3064 3065 static int i945_get_display_clock_speed(struct drm_device *dev) 3066 { 3067 return 400000; 3068 } 3069 3070 static int i915_get_display_clock_speed(struct drm_device *dev) 3071 { 3072 return 333000; 3073 } 3074 3075 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 3076 { 3077 return 200000; 3078 } 3079 3080 static int i915gm_get_display_clock_speed(struct drm_device *dev) 3081 { 3082 u16 gcfgc = 0; 3083 3084 gcfgc = pci_read_config(dev->dev, GCFGC, 2); 3085 3086 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 3087 return 133000; 3088 else { 3089 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 3090 case GC_DISPLAY_CLOCK_333_MHZ: 3091 return 333000; 3092 default: 3093 case GC_DISPLAY_CLOCK_190_200_MHZ: 3094 return 190000; 3095 } 3096 } 3097 } 3098 3099 static int i865_get_display_clock_speed(struct drm_device *dev) 3100 { 3101 return 266000; 3102 } 3103 3104 static int i855_get_display_clock_speed(struct drm_device *dev) 3105 { 3106 u16 hpllcc = 0; 3107 /* Assume that the hardware is in the high speed state. This 3108 * should be the default. 3109 */ 3110 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 3111 case GC_CLOCK_133_200: 3112 case GC_CLOCK_100_200: 3113 return 200000; 3114 case GC_CLOCK_166_250: 3115 return 250000; 3116 case GC_CLOCK_100_133: 3117 return 133000; 3118 } 3119 3120 /* Shouldn't happen */ 3121 return 0; 3122 } 3123 3124 static int i830_get_display_clock_speed(struct drm_device *dev) 3125 { 3126 return 133000; 3127 } 3128 3129 struct fdi_m_n { 3130 u32 tu; 3131 u32 gmch_m; 3132 u32 gmch_n; 3133 u32 link_m; 3134 u32 link_n; 3135 }; 3136 3137 static void 3138 fdi_reduce_ratio(u32 *num, u32 *den) 3139 { 3140 while (*num > 0xffffff || *den > 0xffffff) { 3141 *num >>= 1; 3142 *den >>= 1; 3143 } 3144 } 3145 3146 static void 3147 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, 3148 int link_clock, struct fdi_m_n *m_n) 3149 { 3150 m_n->tu = 64; /* default size */ 3151 3152 /* BUG_ON(pixel_clock > INT_MAX / 36); */ 3153 m_n->gmch_m = bits_per_pixel * pixel_clock; 3154 m_n->gmch_n = link_clock * nlanes * 8; 3155 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 3156 3157 m_n->link_m = pixel_clock; 3158 m_n->link_n = link_clock; 3159 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); 3160 } 3161 3162 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 3163 { 3164 if (i915_panel_use_ssc >= 0) 3165 return i915_panel_use_ssc != 0; 3166 return dev_priv->lvds_use_ssc 3167 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 3168 } 3169 3170 /** 3171 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send 3172 * @crtc: CRTC structure 3173 * @mode: requested mode 3174 * 3175 * A pipe may be connected to one or more outputs. Based on the depth of the 3176 * attached framebuffer, choose a good color depth to use on the pipe. 3177 * 3178 * If possible, match the pipe depth to the fb depth. In some cases, this 3179 * isn't ideal, because the connected output supports a lesser or restricted 3180 * set of depths. Resolve that here: 3181 * LVDS typically supports only 6bpc, so clamp down in that case 3182 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc 3183 * Displays may support a restricted set as well, check EDID and clamp as 3184 * appropriate. 3185 * DP may want to dither down to 6bpc to fit larger modes 3186 * 3187 * RETURNS: 3188 * Dithering requirement (i.e. false if display bpc and pipe bpc match, 3189 * true if they don't match). 3190 */ 3191 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, 3192 unsigned int *pipe_bpp, 3193 struct drm_display_mode *mode) 3194 { 3195 struct drm_device *dev = crtc->dev; 3196 struct drm_i915_private *dev_priv = dev->dev_private; 3197 struct drm_encoder *encoder; 3198 struct drm_connector *connector; 3199 unsigned int display_bpc = UINT_MAX, bpc; 3200 3201 /* Walk the encoders & connectors on this crtc, get min bpc */ 3202 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3203 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3204 3205 if (encoder->crtc != crtc) 3206 continue; 3207 3208 if (intel_encoder->type == INTEL_OUTPUT_LVDS) { 3209 unsigned int lvds_bpc; 3210 3211 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == 3212 LVDS_A3_POWER_UP) 3213 lvds_bpc = 8; 3214 else 3215 lvds_bpc = 6; 3216 3217 if (lvds_bpc < display_bpc) { 3218 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); 3219 display_bpc = lvds_bpc; 3220 } 3221 continue; 3222 } 3223 3224 if (intel_encoder->type == INTEL_OUTPUT_EDP) { 3225 /* Use VBT settings if we have an eDP panel */ 3226 unsigned int edp_bpc = dev_priv->edp.bpp / 3; 3227 3228 if (edp_bpc < display_bpc) { 3229 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); 3230 display_bpc = edp_bpc; 3231 } 3232 continue; 3233 } 3234 3235 /* Not one of the known troublemakers, check the EDID */ 3236 list_for_each_entry(connector, &dev->mode_config.connector_list, 3237 head) { 3238 if (connector->encoder != encoder) 3239 continue; 3240 3241 /* Don't use an invalid EDID bpc value */ 3242 if (connector->display_info.bpc && 3243 connector->display_info.bpc < display_bpc) { 3244 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 3245 display_bpc = connector->display_info.bpc; 3246 } 3247 } 3248 3249 /* 3250 * HDMI is either 12 or 8, so if the display lets 10bpc sneak 3251 * through, clamp it down. (Note: >12bpc will be caught below.) 3252 */ 3253 if (intel_encoder->type == INTEL_OUTPUT_HDMI) { 3254 if (display_bpc > 8 && display_bpc < 12) { 3255 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); 3256 display_bpc = 12; 3257 } else { 3258 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); 3259 display_bpc = 8; 3260 } 3261 } 3262 } 3263 3264 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 3265 DRM_DEBUG_KMS("Dithering DP to 6bpc\n"); 3266 display_bpc = 6; 3267 } 3268 3269 /* 3270 * We could just drive the pipe at the highest bpc all the time and 3271 * enable dithering as needed, but that costs bandwidth. So choose 3272 * the minimum value that expresses the full color range of the fb but 3273 * also stays within the max display bpc discovered above. 3274 */ 3275 3276 switch (crtc->fb->depth) { 3277 case 8: 3278 bpc = 8; /* since we go through a colormap */ 3279 break; 3280 case 15: 3281 case 16: 3282 bpc = 6; /* min is 18bpp */ 3283 break; 3284 case 24: 3285 bpc = 8; 3286 break; 3287 case 30: 3288 bpc = 10; 3289 break; 3290 case 48: 3291 bpc = 12; 3292 break; 3293 default: 3294 DRM_DEBUG("unsupported depth, assuming 24 bits\n"); 3295 bpc = min((unsigned int)8, display_bpc); 3296 break; 3297 } 3298 3299 display_bpc = min(display_bpc, bpc); 3300 3301 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", 3302 bpc, display_bpc); 3303 3304 *pipe_bpp = display_bpc * 3; 3305 3306 return display_bpc != bpc; 3307 } 3308 3309 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) 3310 { 3311 struct drm_device *dev = crtc->dev; 3312 struct drm_i915_private *dev_priv = dev->dev_private; 3313 int refclk; 3314 3315 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 3316 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 3317 refclk = dev_priv->lvds_ssc_freq * 1000; 3318 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 3319 refclk / 1000); 3320 } else if (!IS_GEN2(dev)) { 3321 refclk = 96000; 3322 } else { 3323 refclk = 48000; 3324 } 3325 3326 return refclk; 3327 } 3328 3329 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode, 3330 intel_clock_t *clock) 3331 { 3332 /* SDVO TV has fixed PLL values depend on its clock range, 3333 this mirrors vbios setting. */ 3334 if (adjusted_mode->clock >= 100000 3335 && adjusted_mode->clock < 140500) { 3336 clock->p1 = 2; 3337 clock->p2 = 10; 3338 clock->n = 3; 3339 clock->m1 = 16; 3340 clock->m2 = 8; 3341 } else if (adjusted_mode->clock >= 140500 3342 && adjusted_mode->clock <= 200000) { 3343 clock->p1 = 1; 3344 clock->p2 = 10; 3345 clock->n = 6; 3346 clock->m1 = 12; 3347 clock->m2 = 8; 3348 } 3349 } 3350 3351 static void i9xx_update_pll_dividers(struct drm_crtc *crtc, 3352 intel_clock_t *clock, 3353 intel_clock_t *reduced_clock) 3354 { 3355 struct drm_device *dev = crtc->dev; 3356 struct drm_i915_private *dev_priv = dev->dev_private; 3357 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3358 int pipe = intel_crtc->pipe; 3359 u32 fp, fp2 = 0; 3360 3361 if (IS_PINEVIEW(dev)) { 3362 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2; 3363 if (reduced_clock) 3364 fp2 = (1 << reduced_clock->n) << 16 | 3365 reduced_clock->m1 << 8 | reduced_clock->m2; 3366 } else { 3367 fp = clock->n << 16 | clock->m1 << 8 | clock->m2; 3368 if (reduced_clock) 3369 fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 | 3370 reduced_clock->m2; 3371 } 3372 3373 I915_WRITE(FP0(pipe), fp); 3374 3375 intel_crtc->lowfreq_avail = false; 3376 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 3377 reduced_clock && i915_powersave) { 3378 I915_WRITE(FP1(pipe), fp2); 3379 intel_crtc->lowfreq_avail = true; 3380 } else { 3381 I915_WRITE(FP1(pipe), fp); 3382 } 3383 } 3384 3385 static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 3386 struct drm_display_mode *mode, 3387 struct drm_display_mode *adjusted_mode, 3388 int x, int y, 3389 struct drm_framebuffer *old_fb) 3390 { 3391 struct drm_device *dev = crtc->dev; 3392 struct drm_i915_private *dev_priv = dev->dev_private; 3393 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3394 int pipe = intel_crtc->pipe; 3395 int plane = intel_crtc->plane; 3396 int refclk, num_connectors = 0; 3397 intel_clock_t clock, reduced_clock; 3398 u32 dpll, dspcntr, pipeconf, vsyncshift; 3399 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 3400 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3401 struct drm_mode_config *mode_config = &dev->mode_config; 3402 struct intel_encoder *encoder; 3403 const intel_limit_t *limit; 3404 int ret; 3405 u32 temp; 3406 u32 lvds_sync = 0; 3407 3408 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 3409 if (encoder->base.crtc != crtc) 3410 continue; 3411 3412 switch (encoder->type) { 3413 case INTEL_OUTPUT_LVDS: 3414 is_lvds = true; 3415 break; 3416 case INTEL_OUTPUT_SDVO: 3417 case INTEL_OUTPUT_HDMI: 3418 is_sdvo = true; 3419 if (encoder->needs_tv_clock) 3420 is_tv = true; 3421 break; 3422 case INTEL_OUTPUT_DVO: 3423 is_dvo = true; 3424 break; 3425 case INTEL_OUTPUT_TVOUT: 3426 is_tv = true; 3427 break; 3428 case INTEL_OUTPUT_ANALOG: 3429 is_crt = true; 3430 break; 3431 case INTEL_OUTPUT_DISPLAYPORT: 3432 is_dp = true; 3433 break; 3434 } 3435 3436 num_connectors++; 3437 } 3438 3439 refclk = i9xx_get_refclk(crtc, num_connectors); 3440 3441 /* 3442 * Returns a set of divisors for the desired target clock with the given 3443 * refclk, or false. The returned values represent the clock equation: 3444 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 3445 */ 3446 limit = intel_limit(crtc, refclk); 3447 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, 3448 &clock); 3449 if (!ok) { 3450 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 3451 return -EINVAL; 3452 } 3453 3454 /* Ensure that the cursor is valid for the new mode before changing... */ 3455 intel_crtc_update_cursor(crtc, true); 3456 3457 if (is_lvds && dev_priv->lvds_downclock_avail) { 3458 /* 3459 * Ensure we match the reduced clock's P to the target clock. 3460 * If the clocks don't match, we can't switch the display clock 3461 * by using the FP0/FP1. In such case we will disable the LVDS 3462 * downclock feature. 3463 */ 3464 has_reduced_clock = limit->find_pll(limit, crtc, 3465 dev_priv->lvds_downclock, 3466 refclk, 3467 &clock, 3468 &reduced_clock); 3469 } 3470 3471 if (is_sdvo && is_tv) 3472 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); 3473 3474 i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ? 3475 &reduced_clock : NULL); 3476 3477 dpll = DPLL_VGA_MODE_DIS; 3478 3479 if (!IS_GEN2(dev)) { 3480 if (is_lvds) 3481 dpll |= DPLLB_MODE_LVDS; 3482 else 3483 dpll |= DPLLB_MODE_DAC_SERIAL; 3484 if (is_sdvo) { 3485 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 3486 if (pixel_multiplier > 1) { 3487 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 3488 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 3489 } 3490 dpll |= DPLL_DVO_HIGH_SPEED; 3491 } 3492 if (is_dp) 3493 dpll |= DPLL_DVO_HIGH_SPEED; 3494 3495 /* compute bitmask from p1 value */ 3496 if (IS_PINEVIEW(dev)) 3497 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 3498 else { 3499 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3500 if (IS_G4X(dev) && has_reduced_clock) 3501 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3502 } 3503 switch (clock.p2) { 3504 case 5: 3505 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 3506 break; 3507 case 7: 3508 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 3509 break; 3510 case 10: 3511 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 3512 break; 3513 case 14: 3514 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 3515 break; 3516 } 3517 if (INTEL_INFO(dev)->gen >= 4) 3518 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 3519 } else { 3520 if (is_lvds) { 3521 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3522 } else { 3523 if (clock.p1 == 2) 3524 dpll |= PLL_P1_DIVIDE_BY_TWO; 3525 else 3526 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3527 if (clock.p2 == 4) 3528 dpll |= PLL_P2_DIVIDE_BY_4; 3529 } 3530 } 3531 3532 if (is_sdvo && is_tv) 3533 dpll |= PLL_REF_INPUT_TVCLKINBC; 3534 else if (is_tv) 3535 /* XXX: just matching BIOS for now */ 3536 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 3537 dpll |= 3; 3538 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 3539 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 3540 else 3541 dpll |= PLL_REF_INPUT_DREFCLK; 3542 3543 /* setup pipeconf */ 3544 pipeconf = I915_READ(PIPECONF(pipe)); 3545 3546 /* Set up the display plane register */ 3547 dspcntr = DISPPLANE_GAMMA_ENABLE; 3548 3549 if (pipe == 0) 3550 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 3551 else 3552 dspcntr |= DISPPLANE_SEL_PIPE_B; 3553 3554 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { 3555 /* Enable pixel doubling when the dot clock is > 90% of the (display) 3556 * core speed. 3557 * 3558 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the 3559 * pipe == 0 check? 3560 */ 3561 if (mode->clock > 3562 dev_priv->display.get_display_clock_speed(dev) * 9 / 10) 3563 pipeconf |= PIPECONF_DOUBLE_WIDE; 3564 else 3565 pipeconf &= ~PIPECONF_DOUBLE_WIDE; 3566 } 3567 3568 /* default to 8bpc */ 3569 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); 3570 if (is_dp) { 3571 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 3572 pipeconf |= PIPECONF_BPP_6 | 3573 PIPECONF_DITHER_EN | 3574 PIPECONF_DITHER_TYPE_SP; 3575 } 3576 } 3577 3578 dpll |= DPLL_VCO_ENABLE; 3579 3580 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 3581 drm_mode_debug_printmodeline(mode); 3582 3583 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 3584 3585 POSTING_READ(DPLL(pipe)); 3586 DELAY(150); 3587 3588 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 3589 * This is an exception to the general rule that mode_set doesn't turn 3590 * things on. 3591 */ 3592 if (is_lvds) { 3593 temp = I915_READ(LVDS); 3594 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 3595 if (pipe == 1) { 3596 temp |= LVDS_PIPEB_SELECT; 3597 } else { 3598 temp &= ~LVDS_PIPEB_SELECT; 3599 } 3600 /* set the corresponsding LVDS_BORDER bit */ 3601 temp |= dev_priv->lvds_border_bits; 3602 /* Set the B0-B3 data pairs corresponding to whether we're going to 3603 * set the DPLLs for dual-channel mode or not. 3604 */ 3605 if (clock.p2 == 7) 3606 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 3607 else 3608 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 3609 3610 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 3611 * appropriately here, but we need to look more thoroughly into how 3612 * panels behave in the two modes. 3613 */ 3614 /* set the dithering flag on LVDS as needed */ 3615 if (INTEL_INFO(dev)->gen >= 4) { 3616 if (dev_priv->lvds_dither) 3617 temp |= LVDS_ENABLE_DITHER; 3618 else 3619 temp &= ~LVDS_ENABLE_DITHER; 3620 } 3621 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 3622 lvds_sync |= LVDS_HSYNC_POLARITY; 3623 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 3624 lvds_sync |= LVDS_VSYNC_POLARITY; 3625 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) 3626 != lvds_sync) { 3627 char flags[2] = "-+"; 3628 DRM_INFO("Changing LVDS panel from " 3629 "(%chsync, %cvsync) to (%chsync, %cvsync)\n", 3630 flags[!(temp & LVDS_HSYNC_POLARITY)], 3631 flags[!(temp & LVDS_VSYNC_POLARITY)], 3632 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], 3633 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); 3634 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 3635 temp |= lvds_sync; 3636 } 3637 I915_WRITE(LVDS, temp); 3638 } 3639 3640 if (is_dp) { 3641 intel_dp_set_m_n(crtc, mode, adjusted_mode); 3642 } 3643 3644 I915_WRITE(DPLL(pipe), dpll); 3645 3646 /* Wait for the clocks to stabilize. */ 3647 POSTING_READ(DPLL(pipe)); 3648 DELAY(150); 3649 3650 if (INTEL_INFO(dev)->gen >= 4) { 3651 temp = 0; 3652 if (is_sdvo) { 3653 temp = intel_mode_get_pixel_multiplier(adjusted_mode); 3654 if (temp > 1) 3655 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 3656 else 3657 temp = 0; 3658 } 3659 I915_WRITE(DPLL_MD(pipe), temp); 3660 } else { 3661 /* The pixel multiplier can only be updated once the 3662 * DPLL is enabled and the clocks are stable. 3663 * 3664 * So write it again. 3665 */ 3666 I915_WRITE(DPLL(pipe), dpll); 3667 } 3668 3669 if (HAS_PIPE_CXSR(dev)) { 3670 if (intel_crtc->lowfreq_avail) { 3671 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 3672 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 3673 } else { 3674 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 3675 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 3676 } 3677 } 3678 3679 pipeconf &= ~PIPECONF_INTERLACE_MASK; 3680 if (!IS_GEN2(dev) && 3681 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 3682 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 3683 /* the chip adds 2 halflines automatically */ 3684 adjusted_mode->crtc_vtotal -= 1; 3685 adjusted_mode->crtc_vblank_end -= 1; 3686 vsyncshift = adjusted_mode->crtc_hsync_start 3687 - adjusted_mode->crtc_htotal/2; 3688 } else { 3689 pipeconf |= PIPECONF_PROGRESSIVE; 3690 vsyncshift = 0; 3691 } 3692 3693 if (!IS_GEN3(dev)) 3694 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift); 3695 3696 I915_WRITE(HTOTAL(pipe), 3697 (adjusted_mode->crtc_hdisplay - 1) | 3698 ((adjusted_mode->crtc_htotal - 1) << 16)); 3699 I915_WRITE(HBLANK(pipe), 3700 (adjusted_mode->crtc_hblank_start - 1) | 3701 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 3702 I915_WRITE(HSYNC(pipe), 3703 (adjusted_mode->crtc_hsync_start - 1) | 3704 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 3705 3706 I915_WRITE(VTOTAL(pipe), 3707 (adjusted_mode->crtc_vdisplay - 1) | 3708 ((adjusted_mode->crtc_vtotal - 1) << 16)); 3709 I915_WRITE(VBLANK(pipe), 3710 (adjusted_mode->crtc_vblank_start - 1) | 3711 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 3712 I915_WRITE(VSYNC(pipe), 3713 (adjusted_mode->crtc_vsync_start - 1) | 3714 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 3715 3716 /* pipesrc and dspsize control the size that is scaled from, 3717 * which should always be the user's requested size. 3718 */ 3719 I915_WRITE(DSPSIZE(plane), 3720 ((mode->vdisplay - 1) << 16) | 3721 (mode->hdisplay - 1)); 3722 I915_WRITE(DSPPOS(plane), 0); 3723 I915_WRITE(PIPESRC(pipe), 3724 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 3725 3726 I915_WRITE(PIPECONF(pipe), pipeconf); 3727 POSTING_READ(PIPECONF(pipe)); 3728 intel_enable_pipe(dev_priv, pipe, false); 3729 3730 intel_wait_for_vblank(dev, pipe); 3731 3732 I915_WRITE(DSPCNTR(plane), dspcntr); 3733 POSTING_READ(DSPCNTR(plane)); 3734 intel_enable_plane(dev_priv, plane, pipe); 3735 3736 ret = intel_pipe_set_base(crtc, x, y, old_fb); 3737 3738 intel_update_watermarks(dev); 3739 3740 return ret; 3741 } 3742 3743 /* 3744 * Initialize reference clocks when the driver loads 3745 */ 3746 void ironlake_init_pch_refclk(struct drm_device *dev) 3747 { 3748 struct drm_i915_private *dev_priv = dev->dev_private; 3749 struct drm_mode_config *mode_config = &dev->mode_config; 3750 struct intel_encoder *encoder; 3751 u32 temp; 3752 bool has_lvds = false; 3753 bool has_cpu_edp = false; 3754 bool has_pch_edp = false; 3755 bool has_panel = false; 3756 bool has_ck505 = false; 3757 bool can_ssc = false; 3758 3759 /* We need to take the global config into account */ 3760 list_for_each_entry(encoder, &mode_config->encoder_list, 3761 base.head) { 3762 switch (encoder->type) { 3763 case INTEL_OUTPUT_LVDS: 3764 has_panel = true; 3765 has_lvds = true; 3766 break; 3767 case INTEL_OUTPUT_EDP: 3768 has_panel = true; 3769 if (intel_encoder_is_pch_edp(&encoder->base)) 3770 has_pch_edp = true; 3771 else 3772 has_cpu_edp = true; 3773 break; 3774 } 3775 } 3776 3777 if (HAS_PCH_IBX(dev)) { 3778 has_ck505 = dev_priv->display_clock_mode; 3779 can_ssc = has_ck505; 3780 } else { 3781 has_ck505 = false; 3782 can_ssc = true; 3783 } 3784 3785 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", 3786 has_panel, has_lvds, has_pch_edp, has_cpu_edp, 3787 has_ck505); 3788 3789 /* Ironlake: try to setup display ref clock before DPLL 3790 * enabling. This is only under driver's control after 3791 * PCH B stepping, previous chipset stepping should be 3792 * ignoring this setting. 3793 */ 3794 temp = I915_READ(PCH_DREF_CONTROL); 3795 /* Always enable nonspread source */ 3796 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 3797 3798 if (has_ck505) 3799 temp |= DREF_NONSPREAD_CK505_ENABLE; 3800 else 3801 temp |= DREF_NONSPREAD_SOURCE_ENABLE; 3802 3803 if (has_panel) { 3804 temp &= ~DREF_SSC_SOURCE_MASK; 3805 temp |= DREF_SSC_SOURCE_ENABLE; 3806 3807 /* SSC must be turned on before enabling the CPU output */ 3808 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 3809 DRM_DEBUG_KMS("Using SSC on panel\n"); 3810 temp |= DREF_SSC1_ENABLE; 3811 } else 3812 temp &= ~DREF_SSC1_ENABLE; 3813 3814 /* Get SSC going before enabling the outputs */ 3815 I915_WRITE(PCH_DREF_CONTROL, temp); 3816 POSTING_READ(PCH_DREF_CONTROL); 3817 DELAY(200); 3818 3819 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 3820 3821 /* Enable CPU source on CPU attached eDP */ 3822 if (has_cpu_edp) { 3823 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 3824 DRM_DEBUG_KMS("Using SSC on eDP\n"); 3825 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 3826 } 3827 else 3828 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 3829 } else 3830 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 3831 3832 I915_WRITE(PCH_DREF_CONTROL, temp); 3833 POSTING_READ(PCH_DREF_CONTROL); 3834 DELAY(200); 3835 } else { 3836 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 3837 3838 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 3839 3840 /* Turn off CPU output */ 3841 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 3842 3843 I915_WRITE(PCH_DREF_CONTROL, temp); 3844 POSTING_READ(PCH_DREF_CONTROL); 3845 DELAY(200); 3846 3847 /* Turn off the SSC source */ 3848 temp &= ~DREF_SSC_SOURCE_MASK; 3849 temp |= DREF_SSC_SOURCE_DISABLE; 3850 3851 /* Turn off SSC1 */ 3852 temp &= ~ DREF_SSC1_ENABLE; 3853 3854 I915_WRITE(PCH_DREF_CONTROL, temp); 3855 POSTING_READ(PCH_DREF_CONTROL); 3856 DELAY(200); 3857 } 3858 } 3859 3860 static int ironlake_get_refclk(struct drm_crtc *crtc) 3861 { 3862 struct drm_device *dev = crtc->dev; 3863 struct drm_i915_private *dev_priv = dev->dev_private; 3864 struct intel_encoder *encoder; 3865 struct drm_mode_config *mode_config = &dev->mode_config; 3866 struct intel_encoder *edp_encoder = NULL; 3867 int num_connectors = 0; 3868 bool is_lvds = false; 3869 3870 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 3871 if (encoder->base.crtc != crtc) 3872 continue; 3873 3874 switch (encoder->type) { 3875 case INTEL_OUTPUT_LVDS: 3876 is_lvds = true; 3877 break; 3878 case INTEL_OUTPUT_EDP: 3879 edp_encoder = encoder; 3880 break; 3881 } 3882 num_connectors++; 3883 } 3884 3885 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 3886 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 3887 dev_priv->lvds_ssc_freq); 3888 return dev_priv->lvds_ssc_freq * 1000; 3889 } 3890 3891 return 120000; 3892 } 3893 3894 static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 3895 struct drm_display_mode *mode, 3896 struct drm_display_mode *adjusted_mode, 3897 int x, int y, 3898 struct drm_framebuffer *old_fb) 3899 { 3900 struct drm_device *dev = crtc->dev; 3901 struct drm_i915_private *dev_priv = dev->dev_private; 3902 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3903 int pipe = intel_crtc->pipe; 3904 int plane = intel_crtc->plane; 3905 int refclk, num_connectors = 0; 3906 intel_clock_t clock, reduced_clock; 3907 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; 3908 bool ok, has_reduced_clock = false, is_sdvo = false; 3909 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3910 struct intel_encoder *has_edp_encoder = NULL; 3911 struct drm_mode_config *mode_config = &dev->mode_config; 3912 struct intel_encoder *encoder; 3913 const intel_limit_t *limit; 3914 int ret; 3915 struct fdi_m_n m_n = {0}; 3916 u32 temp; 3917 u32 lvds_sync = 0; 3918 int target_clock, pixel_multiplier, lane, link_bw, factor; 3919 unsigned int pipe_bpp; 3920 bool dither; 3921 3922 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 3923 if (encoder->base.crtc != crtc) 3924 continue; 3925 3926 switch (encoder->type) { 3927 case INTEL_OUTPUT_LVDS: 3928 is_lvds = true; 3929 break; 3930 case INTEL_OUTPUT_SDVO: 3931 case INTEL_OUTPUT_HDMI: 3932 is_sdvo = true; 3933 if (encoder->needs_tv_clock) 3934 is_tv = true; 3935 break; 3936 case INTEL_OUTPUT_TVOUT: 3937 is_tv = true; 3938 break; 3939 case INTEL_OUTPUT_ANALOG: 3940 is_crt = true; 3941 break; 3942 case INTEL_OUTPUT_DISPLAYPORT: 3943 is_dp = true; 3944 break; 3945 case INTEL_OUTPUT_EDP: 3946 has_edp_encoder = encoder; 3947 break; 3948 } 3949 3950 num_connectors++; 3951 } 3952 3953 refclk = ironlake_get_refclk(crtc); 3954 3955 /* 3956 * Returns a set of divisors for the desired target clock with the given 3957 * refclk, or false. The returned values represent the clock equation: 3958 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 3959 */ 3960 limit = intel_limit(crtc, refclk); 3961 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, 3962 &clock); 3963 if (!ok) { 3964 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 3965 return -EINVAL; 3966 } 3967 3968 /* Ensure that the cursor is valid for the new mode before changing... */ 3969 intel_crtc_update_cursor(crtc, true); 3970 3971 if (is_lvds && dev_priv->lvds_downclock_avail) { 3972 /* 3973 * Ensure we match the reduced clock's P to the target clock. 3974 * If the clocks don't match, we can't switch the display clock 3975 * by using the FP0/FP1. In such case we will disable the LVDS 3976 * downclock feature. 3977 */ 3978 has_reduced_clock = limit->find_pll(limit, crtc, 3979 dev_priv->lvds_downclock, 3980 refclk, 3981 &clock, 3982 &reduced_clock); 3983 } 3984 /* SDVO TV has fixed PLL values depend on its clock range, 3985 this mirrors vbios setting. */ 3986 if (is_sdvo && is_tv) { 3987 if (adjusted_mode->clock >= 100000 3988 && adjusted_mode->clock < 140500) { 3989 clock.p1 = 2; 3990 clock.p2 = 10; 3991 clock.n = 3; 3992 clock.m1 = 16; 3993 clock.m2 = 8; 3994 } else if (adjusted_mode->clock >= 140500 3995 && adjusted_mode->clock <= 200000) { 3996 clock.p1 = 1; 3997 clock.p2 = 10; 3998 clock.n = 6; 3999 clock.m1 = 12; 4000 clock.m2 = 8; 4001 } 4002 } 4003 4004 /* FDI link */ 4005 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4006 lane = 0; 4007 /* CPU eDP doesn't require FDI link, so just set DP M/N 4008 according to current link config */ 4009 if (has_edp_encoder && 4010 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4011 target_clock = mode->clock; 4012 intel_edp_link_config(has_edp_encoder, 4013 &lane, &link_bw); 4014 } else { 4015 /* [e]DP over FDI requires target mode clock 4016 instead of link clock */ 4017 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4018 target_clock = mode->clock; 4019 else 4020 target_clock = adjusted_mode->clock; 4021 4022 /* FDI is a binary signal running at ~2.7GHz, encoding 4023 * each output octet as 10 bits. The actual frequency 4024 * is stored as a divider into a 100MHz clock, and the 4025 * mode pixel clock is stored in units of 1KHz. 4026 * Hence the bw of each lane in terms of the mode signal 4027 * is: 4028 */ 4029 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 4030 } 4031 4032 /* determine panel color depth */ 4033 temp = I915_READ(PIPECONF(pipe)); 4034 temp &= ~PIPE_BPC_MASK; 4035 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode); 4036 switch (pipe_bpp) { 4037 case 18: 4038 temp |= PIPE_6BPC; 4039 break; 4040 case 24: 4041 temp |= PIPE_8BPC; 4042 break; 4043 case 30: 4044 temp |= PIPE_10BPC; 4045 break; 4046 case 36: 4047 temp |= PIPE_12BPC; 4048 break; 4049 default: 4050 kprintf("intel_choose_pipe_bpp returned invalid value %d\n", 4051 pipe_bpp); 4052 temp |= PIPE_8BPC; 4053 pipe_bpp = 24; 4054 break; 4055 } 4056 4057 intel_crtc->bpp = pipe_bpp; 4058 I915_WRITE(PIPECONF(pipe), temp); 4059 4060 if (!lane) { 4061 /* 4062 * Account for spread spectrum to avoid 4063 * oversubscribing the link. Max center spread 4064 * is 2.5%; use 5% for safety's sake. 4065 */ 4066 u32 bps = target_clock * intel_crtc->bpp * 21 / 20; 4067 lane = bps / (link_bw * 8) + 1; 4068 } 4069 4070 intel_crtc->fdi_lanes = lane; 4071 4072 if (pixel_multiplier > 1) 4073 link_bw *= pixel_multiplier; 4074 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, 4075 &m_n); 4076 4077 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 4078 if (has_reduced_clock) 4079 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | 4080 reduced_clock.m2; 4081 4082 /* Enable autotuning of the PLL clock (if permissible) */ 4083 factor = 21; 4084 if (is_lvds) { 4085 if ((intel_panel_use_ssc(dev_priv) && 4086 dev_priv->lvds_ssc_freq == 100) || 4087 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) 4088 factor = 25; 4089 } else if (is_sdvo && is_tv) 4090 factor = 20; 4091 4092 if (clock.m < factor * clock.n) 4093 fp |= FP_CB_TUNE; 4094 4095 dpll = 0; 4096 4097 if (is_lvds) 4098 dpll |= DPLLB_MODE_LVDS; 4099 else 4100 dpll |= DPLLB_MODE_DAC_SERIAL; 4101 if (is_sdvo) { 4102 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4103 if (pixel_multiplier > 1) { 4104 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 4105 } 4106 dpll |= DPLL_DVO_HIGH_SPEED; 4107 } 4108 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4109 dpll |= DPLL_DVO_HIGH_SPEED; 4110 4111 /* compute bitmask from p1 value */ 4112 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4113 /* also FPA1 */ 4114 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 4115 4116 switch (clock.p2) { 4117 case 5: 4118 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 4119 break; 4120 case 7: 4121 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 4122 break; 4123 case 10: 4124 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 4125 break; 4126 case 14: 4127 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 4128 break; 4129 } 4130 4131 if (is_sdvo && is_tv) 4132 dpll |= PLL_REF_INPUT_TVCLKINBC; 4133 else if (is_tv) 4134 /* XXX: just matching BIOS for now */ 4135 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 4136 dpll |= 3; 4137 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4138 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4139 else 4140 dpll |= PLL_REF_INPUT_DREFCLK; 4141 4142 /* setup pipeconf */ 4143 pipeconf = I915_READ(PIPECONF(pipe)); 4144 4145 /* Set up the display plane register */ 4146 dspcntr = DISPPLANE_GAMMA_ENABLE; 4147 4148 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 4149 drm_mode_debug_printmodeline(mode); 4150 4151 /* PCH eDP needs FDI, but CPU eDP does not */ 4152 if (!intel_crtc->no_pll) { 4153 if (!has_edp_encoder || 4154 intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4155 I915_WRITE(_PCH_FP0(pipe), fp); 4156 I915_WRITE(_PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4157 4158 POSTING_READ(_PCH_DPLL(pipe)); 4159 DELAY(150); 4160 } 4161 } else { 4162 if (dpll == (I915_READ(_PCH_DPLL(0)) & 0x7fffffff) && 4163 fp == I915_READ(_PCH_FP0(0))) { 4164 intel_crtc->use_pll_a = true; 4165 DRM_DEBUG_KMS("using pipe a dpll\n"); 4166 } else if (dpll == (I915_READ(_PCH_DPLL(1)) & 0x7fffffff) && 4167 fp == I915_READ(_PCH_FP0(1))) { 4168 intel_crtc->use_pll_a = false; 4169 DRM_DEBUG_KMS("using pipe b dpll\n"); 4170 } else { 4171 DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n"); 4172 return -EINVAL; 4173 } 4174 } 4175 4176 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4177 * This is an exception to the general rule that mode_set doesn't turn 4178 * things on. 4179 */ 4180 if (is_lvds) { 4181 temp = I915_READ(PCH_LVDS); 4182 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 4183 if (HAS_PCH_CPT(dev)) { 4184 temp &= ~PORT_TRANS_SEL_MASK; 4185 temp |= PORT_TRANS_SEL_CPT(pipe); 4186 } else { 4187 if (pipe == 1) 4188 temp |= LVDS_PIPEB_SELECT; 4189 else 4190 temp &= ~LVDS_PIPEB_SELECT; 4191 } 4192 4193 /* set the corresponsding LVDS_BORDER bit */ 4194 temp |= dev_priv->lvds_border_bits; 4195 /* Set the B0-B3 data pairs corresponding to whether we're going to 4196 * set the DPLLs for dual-channel mode or not. 4197 */ 4198 if (clock.p2 == 7) 4199 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 4200 else 4201 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 4202 4203 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 4204 * appropriately here, but we need to look more thoroughly into how 4205 * panels behave in the two modes. 4206 */ 4207 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 4208 lvds_sync |= LVDS_HSYNC_POLARITY; 4209 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 4210 lvds_sync |= LVDS_VSYNC_POLARITY; 4211 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) 4212 != lvds_sync) { 4213 char flags[2] = "-+"; 4214 DRM_INFO("Changing LVDS panel from " 4215 "(%chsync, %cvsync) to (%chsync, %cvsync)\n", 4216 flags[!(temp & LVDS_HSYNC_POLARITY)], 4217 flags[!(temp & LVDS_VSYNC_POLARITY)], 4218 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], 4219 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); 4220 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 4221 temp |= lvds_sync; 4222 } 4223 I915_WRITE(PCH_LVDS, temp); 4224 } 4225 4226 pipeconf &= ~PIPECONF_DITHER_EN; 4227 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 4228 if ((is_lvds && dev_priv->lvds_dither) || dither) { 4229 pipeconf |= PIPECONF_DITHER_EN; 4230 pipeconf |= PIPECONF_DITHER_TYPE_SP; 4231 } 4232 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4233 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4234 } else { 4235 /* For non-DP output, clear any trans DP clock recovery setting.*/ 4236 I915_WRITE(TRANSDATA_M1(pipe), 0); 4237 I915_WRITE(TRANSDATA_N1(pipe), 0); 4238 I915_WRITE(TRANSDPLINK_M1(pipe), 0); 4239 I915_WRITE(TRANSDPLINK_N1(pipe), 0); 4240 } 4241 4242 if (!intel_crtc->no_pll && 4243 (!has_edp_encoder || 4244 intel_encoder_is_pch_edp(&has_edp_encoder->base))) { 4245 I915_WRITE(_PCH_DPLL(pipe), dpll); 4246 4247 /* Wait for the clocks to stabilize. */ 4248 POSTING_READ(_PCH_DPLL(pipe)); 4249 DELAY(150); 4250 4251 /* The pixel multiplier can only be updated once the 4252 * DPLL is enabled and the clocks are stable. 4253 * 4254 * So write it again. 4255 */ 4256 I915_WRITE(_PCH_DPLL(pipe), dpll); 4257 } 4258 4259 intel_crtc->lowfreq_avail = false; 4260 if (!intel_crtc->no_pll) { 4261 if (is_lvds && has_reduced_clock && i915_powersave) { 4262 I915_WRITE(_PCH_FP1(pipe), fp2); 4263 intel_crtc->lowfreq_avail = true; 4264 if (HAS_PIPE_CXSR(dev)) { 4265 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 4266 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 4267 } 4268 } else { 4269 I915_WRITE(_PCH_FP1(pipe), fp); 4270 if (HAS_PIPE_CXSR(dev)) { 4271 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 4272 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 4273 } 4274 } 4275 } 4276 4277 pipeconf &= ~PIPECONF_INTERLACE_MASK; 4278 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4279 pipeconf |= PIPECONF_INTERLACED_ILK; 4280 /* the chip adds 2 halflines automatically */ 4281 adjusted_mode->crtc_vtotal -= 1; 4282 adjusted_mode->crtc_vblank_end -= 1; 4283 I915_WRITE(VSYNCSHIFT(pipe), 4284 adjusted_mode->crtc_hsync_start 4285 - adjusted_mode->crtc_htotal/2); 4286 } else { 4287 pipeconf |= PIPECONF_PROGRESSIVE; 4288 I915_WRITE(VSYNCSHIFT(pipe), 0); 4289 } 4290 4291 I915_WRITE(HTOTAL(pipe), 4292 (adjusted_mode->crtc_hdisplay - 1) | 4293 ((adjusted_mode->crtc_htotal - 1) << 16)); 4294 I915_WRITE(HBLANK(pipe), 4295 (adjusted_mode->crtc_hblank_start - 1) | 4296 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 4297 I915_WRITE(HSYNC(pipe), 4298 (adjusted_mode->crtc_hsync_start - 1) | 4299 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 4300 4301 I915_WRITE(VTOTAL(pipe), 4302 (adjusted_mode->crtc_vdisplay - 1) | 4303 ((adjusted_mode->crtc_vtotal - 1) << 16)); 4304 I915_WRITE(VBLANK(pipe), 4305 (adjusted_mode->crtc_vblank_start - 1) | 4306 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 4307 I915_WRITE(VSYNC(pipe), 4308 (adjusted_mode->crtc_vsync_start - 1) | 4309 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 4310 4311 /* pipesrc controls the size that is scaled from, which should 4312 * always be the user's requested size. 4313 */ 4314 I915_WRITE(PIPESRC(pipe), 4315 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 4316 4317 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 4318 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 4319 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 4320 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 4321 4322 if (has_edp_encoder && 4323 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4324 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 4325 } 4326 4327 I915_WRITE(PIPECONF(pipe), pipeconf); 4328 POSTING_READ(PIPECONF(pipe)); 4329 4330 intel_wait_for_vblank(dev, pipe); 4331 4332 I915_WRITE(DSPCNTR(plane), dspcntr); 4333 POSTING_READ(DSPCNTR(plane)); 4334 4335 ret = intel_pipe_set_base(crtc, x, y, old_fb); 4336 4337 intel_update_watermarks(dev); 4338 4339 return ret; 4340 } 4341 4342 static int intel_crtc_mode_set(struct drm_crtc *crtc, 4343 struct drm_display_mode *mode, 4344 struct drm_display_mode *adjusted_mode, 4345 int x, int y, 4346 struct drm_framebuffer *old_fb) 4347 { 4348 struct drm_device *dev = crtc->dev; 4349 struct drm_i915_private *dev_priv = dev->dev_private; 4350 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4351 int pipe = intel_crtc->pipe; 4352 int ret; 4353 4354 drm_vblank_pre_modeset(dev, pipe); 4355 4356 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, 4357 x, y, old_fb); 4358 drm_vblank_post_modeset(dev, pipe); 4359 4360 if (ret) 4361 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; 4362 else 4363 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON; 4364 4365 return ret; 4366 } 4367 4368 static bool intel_eld_uptodate(struct drm_connector *connector, 4369 int reg_eldv, uint32_t bits_eldv, 4370 int reg_elda, uint32_t bits_elda, 4371 int reg_edid) 4372 { 4373 struct drm_i915_private *dev_priv = connector->dev->dev_private; 4374 uint8_t *eld = connector->eld; 4375 uint32_t i; 4376 4377 i = I915_READ(reg_eldv); 4378 i &= bits_eldv; 4379 4380 if (!eld[0]) 4381 return !i; 4382 4383 if (!i) 4384 return false; 4385 4386 i = I915_READ(reg_elda); 4387 i &= ~bits_elda; 4388 I915_WRITE(reg_elda, i); 4389 4390 for (i = 0; i < eld[2]; i++) 4391 if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) 4392 return false; 4393 4394 return true; 4395 } 4396 4397 static void g4x_write_eld(struct drm_connector *connector, 4398 struct drm_crtc *crtc) 4399 { 4400 struct drm_i915_private *dev_priv = connector->dev->dev_private; 4401 uint8_t *eld = connector->eld; 4402 uint32_t eldv; 4403 uint32_t len; 4404 uint32_t i; 4405 4406 i = I915_READ(G4X_AUD_VID_DID); 4407 4408 if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) 4409 eldv = G4X_ELDV_DEVCL_DEVBLC; 4410 else 4411 eldv = G4X_ELDV_DEVCTG; 4412 4413 if (intel_eld_uptodate(connector, 4414 G4X_AUD_CNTL_ST, eldv, 4415 G4X_AUD_CNTL_ST, G4X_ELD_ADDR, 4416 G4X_HDMIW_HDMIEDID)) 4417 return; 4418 4419 i = I915_READ(G4X_AUD_CNTL_ST); 4420 i &= ~(eldv | G4X_ELD_ADDR); 4421 len = (i >> 9) & 0x1f; /* ELD buffer size */ 4422 I915_WRITE(G4X_AUD_CNTL_ST, i); 4423 4424 if (!eld[0]) 4425 return; 4426 4427 if (eld[2] < (uint8_t)len) 4428 len = eld[2]; 4429 DRM_DEBUG_KMS("ELD size %d\n", len); 4430 for (i = 0; i < len; i++) 4431 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); 4432 4433 i = I915_READ(G4X_AUD_CNTL_ST); 4434 i |= eldv; 4435 I915_WRITE(G4X_AUD_CNTL_ST, i); 4436 } 4437 4438 static void ironlake_write_eld(struct drm_connector *connector, 4439 struct drm_crtc *crtc) 4440 { 4441 struct drm_i915_private *dev_priv = connector->dev->dev_private; 4442 uint8_t *eld = connector->eld; 4443 uint32_t eldv; 4444 uint32_t i; 4445 int len; 4446 int hdmiw_hdmiedid; 4447 int aud_config; 4448 int aud_cntl_st; 4449 int aud_cntrl_st2; 4450 4451 if (HAS_PCH_IBX(connector->dev)) { 4452 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; 4453 aud_config = IBX_AUD_CONFIG_A; 4454 aud_cntl_st = IBX_AUD_CNTL_ST_A; 4455 aud_cntrl_st2 = IBX_AUD_CNTL_ST2; 4456 } else { 4457 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; 4458 aud_config = CPT_AUD_CONFIG_A; 4459 aud_cntl_st = CPT_AUD_CNTL_ST_A; 4460 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; 4461 } 4462 4463 i = to_intel_crtc(crtc)->pipe; 4464 hdmiw_hdmiedid += i * 0x100; 4465 aud_cntl_st += i * 0x100; 4466 aud_config += i * 0x100; 4467 4468 DRM_DEBUG_KMS("ELD on pipe %c\n", pipe_name(i)); 4469 4470 i = I915_READ(aud_cntl_st); 4471 i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */ 4472 if (!i) { 4473 DRM_DEBUG_KMS("Audio directed to unknown port\n"); 4474 /* operate blindly on all ports */ 4475 eldv = IBX_ELD_VALIDB; 4476 eldv |= IBX_ELD_VALIDB << 4; 4477 eldv |= IBX_ELD_VALIDB << 8; 4478 } else { 4479 DRM_DEBUG_KMS("ELD on port %c\n", 'A' + i); 4480 eldv = IBX_ELD_VALIDB << ((i - 1) * 4); 4481 } 4482 4483 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 4484 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 4485 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 4486 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 4487 } else 4488 I915_WRITE(aud_config, 0); 4489 4490 if (intel_eld_uptodate(connector, 4491 aud_cntrl_st2, eldv, 4492 aud_cntl_st, IBX_ELD_ADDRESS, 4493 hdmiw_hdmiedid)) 4494 return; 4495 4496 i = I915_READ(aud_cntrl_st2); 4497 i &= ~eldv; 4498 I915_WRITE(aud_cntrl_st2, i); 4499 4500 if (!eld[0]) 4501 return; 4502 4503 i = I915_READ(aud_cntl_st); 4504 i &= ~IBX_ELD_ADDRESS; 4505 I915_WRITE(aud_cntl_st, i); 4506 4507 /* 84 bytes of hw ELD buffer */ 4508 len = 21; 4509 if (eld[2] < (uint8_t)len) 4510 len = eld[2]; 4511 DRM_DEBUG_KMS("ELD size %d\n", len); 4512 for (i = 0; i < len; i++) 4513 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); 4514 4515 i = I915_READ(aud_cntrl_st2); 4516 i |= eldv; 4517 I915_WRITE(aud_cntrl_st2, i); 4518 } 4519 4520 void intel_write_eld(struct drm_encoder *encoder, 4521 struct drm_display_mode *mode) 4522 { 4523 struct drm_crtc *crtc = encoder->crtc; 4524 struct drm_connector *connector; 4525 struct drm_device *dev = encoder->dev; 4526 struct drm_i915_private *dev_priv = dev->dev_private; 4527 4528 connector = drm_select_eld(encoder, mode); 4529 if (!connector) 4530 return; 4531 4532 DRM_DEBUG_KMS("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 4533 connector->base.id, 4534 drm_get_connector_name(connector), 4535 connector->encoder->base.id, 4536 drm_get_encoder_name(connector->encoder)); 4537 4538 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; 4539 4540 if (dev_priv->display.write_eld) 4541 dev_priv->display.write_eld(connector, crtc); 4542 } 4543 4544 /** Loads the palette/gamma unit for the CRTC with the prepared values */ 4545 void intel_crtc_load_lut(struct drm_crtc *crtc) 4546 { 4547 struct drm_device *dev = crtc->dev; 4548 struct drm_i915_private *dev_priv = dev->dev_private; 4549 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4550 int palreg = PALETTE(intel_crtc->pipe); 4551 int i; 4552 4553 /* The clocks have to be on to load the palette. */ 4554 if (!crtc->enabled || !intel_crtc->active) 4555 return; 4556 4557 /* use legacy palette for Ironlake */ 4558 if (HAS_PCH_SPLIT(dev)) 4559 palreg = LGC_PALETTE(intel_crtc->pipe); 4560 4561 for (i = 0; i < 256; i++) { 4562 I915_WRITE(palreg + 4 * i, 4563 (intel_crtc->lut_r[i] << 16) | 4564 (intel_crtc->lut_g[i] << 8) | 4565 intel_crtc->lut_b[i]); 4566 } 4567 } 4568 4569 static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 4570 { 4571 struct drm_device *dev = crtc->dev; 4572 struct drm_i915_private *dev_priv = dev->dev_private; 4573 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4574 bool visible = base != 0; 4575 u32 cntl; 4576 4577 if (intel_crtc->cursor_visible == visible) 4578 return; 4579 4580 cntl = I915_READ(_CURACNTR); 4581 if (visible) { 4582 /* On these chipsets we can only modify the base whilst 4583 * the cursor is disabled. 4584 */ 4585 I915_WRITE(_CURABASE, base); 4586 4587 cntl &= ~(CURSOR_FORMAT_MASK); 4588 /* XXX width must be 64, stride 256 => 0x00 << 28 */ 4589 cntl |= CURSOR_ENABLE | 4590 CURSOR_GAMMA_ENABLE | 4591 CURSOR_FORMAT_ARGB; 4592 } else 4593 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); 4594 I915_WRITE(_CURACNTR, cntl); 4595 4596 intel_crtc->cursor_visible = visible; 4597 } 4598 4599 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 4600 { 4601 struct drm_device *dev = crtc->dev; 4602 struct drm_i915_private *dev_priv = dev->dev_private; 4603 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4604 int pipe = intel_crtc->pipe; 4605 bool visible = base != 0; 4606 4607 if (intel_crtc->cursor_visible != visible) { 4608 uint32_t cntl = I915_READ(CURCNTR(pipe)); 4609 if (base) { 4610 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); 4611 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 4612 cntl |= pipe << 28; /* Connect to correct pipe */ 4613 } else { 4614 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 4615 cntl |= CURSOR_MODE_DISABLE; 4616 } 4617 I915_WRITE(CURCNTR(pipe), cntl); 4618 4619 intel_crtc->cursor_visible = visible; 4620 } 4621 /* and commit changes on next vblank */ 4622 I915_WRITE(CURBASE(pipe), base); 4623 } 4624 4625 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) 4626 { 4627 struct drm_device *dev = crtc->dev; 4628 struct drm_i915_private *dev_priv = dev->dev_private; 4629 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4630 int pipe = intel_crtc->pipe; 4631 bool visible = base != 0; 4632 4633 if (intel_crtc->cursor_visible != visible) { 4634 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); 4635 if (base) { 4636 cntl &= ~CURSOR_MODE; 4637 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 4638 } else { 4639 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 4640 cntl |= CURSOR_MODE_DISABLE; 4641 } 4642 I915_WRITE(CURCNTR_IVB(pipe), cntl); 4643 4644 intel_crtc->cursor_visible = visible; 4645 } 4646 /* and commit changes on next vblank */ 4647 I915_WRITE(CURBASE_IVB(pipe), base); 4648 } 4649 4650 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 4651 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 4652 bool on) 4653 { 4654 struct drm_device *dev = crtc->dev; 4655 struct drm_i915_private *dev_priv = dev->dev_private; 4656 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4657 int pipe = intel_crtc->pipe; 4658 int x = intel_crtc->cursor_x; 4659 int y = intel_crtc->cursor_y; 4660 u32 base, pos; 4661 bool visible; 4662 4663 pos = 0; 4664 4665 if (on && crtc->enabled && crtc->fb) { 4666 base = intel_crtc->cursor_addr; 4667 if (x > (int) crtc->fb->width) 4668 base = 0; 4669 4670 if (y > (int) crtc->fb->height) 4671 base = 0; 4672 } else 4673 base = 0; 4674 4675 if (x < 0) { 4676 if (x + intel_crtc->cursor_width < 0) 4677 base = 0; 4678 4679 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 4680 x = -x; 4681 } 4682 pos |= x << CURSOR_X_SHIFT; 4683 4684 if (y < 0) { 4685 if (y + intel_crtc->cursor_height < 0) 4686 base = 0; 4687 4688 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 4689 y = -y; 4690 } 4691 pos |= y << CURSOR_Y_SHIFT; 4692 4693 visible = base != 0; 4694 if (!visible && !intel_crtc->cursor_visible) 4695 return; 4696 4697 if (IS_IVYBRIDGE(dev)) { 4698 I915_WRITE(CURPOS_IVB(pipe), pos); 4699 ivb_update_cursor(crtc, base); 4700 } else { 4701 I915_WRITE(CURPOS(pipe), pos); 4702 if (IS_845G(dev) || IS_I865G(dev)) 4703 i845_update_cursor(crtc, base); 4704 else 4705 i9xx_update_cursor(crtc, base); 4706 } 4707 } 4708 4709 static int intel_crtc_cursor_set(struct drm_crtc *crtc, 4710 struct drm_file *file, 4711 uint32_t handle, 4712 uint32_t width, uint32_t height) 4713 { 4714 struct drm_device *dev = crtc->dev; 4715 struct drm_i915_private *dev_priv = dev->dev_private; 4716 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4717 struct drm_i915_gem_object *obj; 4718 uint32_t addr; 4719 int ret; 4720 4721 DRM_DEBUG_KMS("\n"); 4722 4723 /* if we want to turn off the cursor ignore width and height */ 4724 if (!handle) { 4725 DRM_DEBUG_KMS("cursor off\n"); 4726 addr = 0; 4727 obj = NULL; 4728 DRM_LOCK(dev); 4729 goto finish; 4730 } 4731 4732 /* Currently we only support 64x64 cursors */ 4733 if (width != 64 || height != 64) { 4734 DRM_ERROR("we currently only support 64x64 cursors\n"); 4735 return -EINVAL; 4736 } 4737 4738 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); 4739 if (&obj->base == NULL) 4740 return -ENOENT; 4741 4742 if (obj->base.size < width * height * 4) { 4743 DRM_ERROR("buffer is to small\n"); 4744 ret = -ENOMEM; 4745 goto fail; 4746 } 4747 4748 /* we only need to pin inside GTT if cursor is non-phy */ 4749 DRM_LOCK(dev); 4750 if (!dev_priv->info->cursor_needs_physical) { 4751 if (obj->tiling_mode) { 4752 DRM_ERROR("cursor cannot be tiled\n"); 4753 ret = -EINVAL; 4754 goto fail_locked; 4755 } 4756 4757 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL); 4758 if (ret) { 4759 DRM_ERROR("failed to move cursor bo into the GTT\n"); 4760 goto fail_locked; 4761 } 4762 4763 ret = i915_gem_object_put_fence(obj); 4764 if (ret) { 4765 DRM_ERROR("failed to release fence for cursor\n"); 4766 goto fail_unpin; 4767 } 4768 4769 addr = obj->gtt_offset; 4770 } else { 4771 int align = IS_I830(dev) ? 16 * 1024 : 256; 4772 ret = i915_gem_attach_phys_object(dev, obj, 4773 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, 4774 align); 4775 if (ret) { 4776 DRM_ERROR("failed to attach phys object\n"); 4777 goto fail_locked; 4778 } 4779 addr = obj->phys_obj->handle->busaddr; 4780 } 4781 4782 if (IS_GEN2(dev)) 4783 I915_WRITE(CURSIZE, (height << 12) | width); 4784 4785 finish: 4786 if (intel_crtc->cursor_bo) { 4787 if (dev_priv->info->cursor_needs_physical) { 4788 if (intel_crtc->cursor_bo != obj) 4789 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 4790 } else 4791 i915_gem_object_unpin(intel_crtc->cursor_bo); 4792 drm_gem_object_unreference(&intel_crtc->cursor_bo->base); 4793 } 4794 4795 DRM_UNLOCK(dev); 4796 4797 intel_crtc->cursor_addr = addr; 4798 intel_crtc->cursor_bo = obj; 4799 intel_crtc->cursor_width = width; 4800 intel_crtc->cursor_height = height; 4801 4802 intel_crtc_update_cursor(crtc, true); 4803 4804 return 0; 4805 fail_unpin: 4806 i915_gem_object_unpin(obj); 4807 fail_locked: 4808 DRM_UNLOCK(dev); 4809 fail: 4810 drm_gem_object_unreference_unlocked(&obj->base); 4811 return ret; 4812 } 4813 4814 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 4815 { 4816 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4817 4818 intel_crtc->cursor_x = x; 4819 intel_crtc->cursor_y = y; 4820 4821 intel_crtc_update_cursor(crtc, true); 4822 4823 return 0; 4824 } 4825 4826 /** Sets the color ramps on behalf of RandR */ 4827 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 4828 u16 blue, int regno) 4829 { 4830 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4831 4832 intel_crtc->lut_r[regno] = red >> 8; 4833 intel_crtc->lut_g[regno] = green >> 8; 4834 intel_crtc->lut_b[regno] = blue >> 8; 4835 } 4836 4837 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 4838 u16 *blue, int regno) 4839 { 4840 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4841 4842 *red = intel_crtc->lut_r[regno] << 8; 4843 *green = intel_crtc->lut_g[regno] << 8; 4844 *blue = intel_crtc->lut_b[regno] << 8; 4845 } 4846 4847 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 4848 u16 *blue, uint32_t start, uint32_t size) 4849 { 4850 int end = (start + size > 256) ? 256 : start + size, i; 4851 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4852 4853 for (i = start; i < end; i++) { 4854 intel_crtc->lut_r[i] = red[i] >> 8; 4855 intel_crtc->lut_g[i] = green[i] >> 8; 4856 intel_crtc->lut_b[i] = blue[i] >> 8; 4857 } 4858 4859 intel_crtc_load_lut(crtc); 4860 } 4861 4862 /** 4863 * Get a pipe with a simple mode set on it for doing load-based monitor 4864 * detection. 4865 * 4866 * It will be up to the load-detect code to adjust the pipe as appropriate for 4867 * its requirements. The pipe will be connected to no other encoders. 4868 * 4869 * Currently this code will only succeed if there is a pipe with no encoders 4870 * configured for it. In the future, it could choose to temporarily disable 4871 * some outputs to free up a pipe for its use. 4872 * 4873 * \return crtc, or NULL if no pipes are available. 4874 */ 4875 4876 /* VESA 640x480x72Hz mode to set on the pipe */ 4877 static struct drm_display_mode load_detect_mode = { 4878 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 4879 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 4880 }; 4881 4882 static int 4883 intel_framebuffer_create(struct drm_device *dev, 4884 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj, 4885 struct drm_framebuffer **res) 4886 { 4887 struct intel_framebuffer *intel_fb; 4888 int ret; 4889 4890 intel_fb = kmalloc(sizeof(*intel_fb), DRM_MEM_KMS, M_WAITOK | M_ZERO); 4891 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 4892 if (ret) { 4893 drm_gem_object_unreference_unlocked(&obj->base); 4894 drm_free(intel_fb, DRM_MEM_KMS); 4895 return (ret); 4896 } 4897 4898 *res = &intel_fb->base; 4899 return (0); 4900 } 4901 4902 static u32 4903 intel_framebuffer_pitch_for_width(int width, int bpp) 4904 { 4905 u32 pitch = howmany(width * bpp, 8); 4906 return roundup2(pitch, 64); 4907 } 4908 4909 static u32 4910 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 4911 { 4912 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 4913 return roundup2(pitch * mode->vdisplay, PAGE_SIZE); 4914 } 4915 4916 static int 4917 intel_framebuffer_create_for_mode(struct drm_device *dev, 4918 struct drm_display_mode *mode, int depth, int bpp, 4919 struct drm_framebuffer **res) 4920 { 4921 struct drm_i915_gem_object *obj; 4922 struct drm_mode_fb_cmd2 mode_cmd; 4923 4924 obj = i915_gem_alloc_object(dev, 4925 intel_framebuffer_size_for_mode(mode, bpp)); 4926 if (obj == NULL) 4927 return (-ENOMEM); 4928 4929 mode_cmd.width = mode->hdisplay; 4930 mode_cmd.height = mode->vdisplay; 4931 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 4932 bpp); 4933 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 4934 4935 return (intel_framebuffer_create(dev, &mode_cmd, obj, res)); 4936 } 4937 4938 static int 4939 mode_fits_in_fbdev(struct drm_device *dev, 4940 struct drm_display_mode *mode, struct drm_framebuffer **res) 4941 { 4942 struct drm_i915_private *dev_priv = dev->dev_private; 4943 struct drm_i915_gem_object *obj; 4944 struct drm_framebuffer *fb; 4945 4946 if (dev_priv->fbdev == NULL) { 4947 *res = NULL; 4948 return (0); 4949 } 4950 4951 obj = dev_priv->fbdev->ifb.obj; 4952 if (obj == NULL) { 4953 *res = NULL; 4954 return (0); 4955 } 4956 4957 fb = &dev_priv->fbdev->ifb.base; 4958 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 4959 fb->bits_per_pixel)) { 4960 *res = NULL; 4961 return (0); 4962 } 4963 4964 if (obj->base.size < mode->vdisplay * fb->pitches[0]) { 4965 *res = NULL; 4966 return (0); 4967 } 4968 4969 *res = fb; 4970 return (0); 4971 } 4972 4973 bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 4974 struct drm_connector *connector, 4975 struct drm_display_mode *mode, 4976 struct intel_load_detect_pipe *old) 4977 { 4978 struct intel_crtc *intel_crtc; 4979 struct drm_crtc *possible_crtc; 4980 struct drm_encoder *encoder = &intel_encoder->base; 4981 struct drm_crtc *crtc = NULL; 4982 struct drm_device *dev = encoder->dev; 4983 struct drm_framebuffer *old_fb; 4984 int i = -1, r; 4985 4986 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 4987 connector->base.id, drm_get_connector_name(connector), 4988 encoder->base.id, drm_get_encoder_name(encoder)); 4989 4990 /* 4991 * Algorithm gets a little messy: 4992 * 4993 * - if the connector already has an assigned crtc, use it (but make 4994 * sure it's on first) 4995 * 4996 * - try to find the first unused crtc that can drive this connector, 4997 * and use that if we find one 4998 */ 4999 5000 /* See if we already have a CRTC for this connector */ 5001 if (encoder->crtc) { 5002 crtc = encoder->crtc; 5003 5004 intel_crtc = to_intel_crtc(crtc); 5005 old->dpms_mode = intel_crtc->dpms_mode; 5006 old->load_detect_temp = false; 5007 5008 /* Make sure the crtc and connector are running */ 5009 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { 5010 struct drm_encoder_helper_funcs *encoder_funcs; 5011 struct drm_crtc_helper_funcs *crtc_funcs; 5012 5013 crtc_funcs = crtc->helper_private; 5014 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 5015 5016 encoder_funcs = encoder->helper_private; 5017 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 5018 } 5019 5020 return true; 5021 } 5022 5023 /* Find an unused one (if possible) */ 5024 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { 5025 i++; 5026 if (!(encoder->possible_crtcs & (1 << i))) 5027 continue; 5028 if (!possible_crtc->enabled) { 5029 crtc = possible_crtc; 5030 break; 5031 } 5032 } 5033 5034 /* 5035 * If we didn't find an unused CRTC, don't use any. 5036 */ 5037 if (!crtc) { 5038 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 5039 return false; 5040 } 5041 5042 encoder->crtc = crtc; 5043 connector->encoder = encoder; 5044 5045 intel_crtc = to_intel_crtc(crtc); 5046 old->dpms_mode = intel_crtc->dpms_mode; 5047 old->load_detect_temp = true; 5048 old->release_fb = NULL; 5049 5050 if (!mode) 5051 mode = &load_detect_mode; 5052 5053 old_fb = crtc->fb; 5054 5055 /* We need a framebuffer large enough to accommodate all accesses 5056 * that the plane may generate whilst we perform load detection. 5057 * We can not rely on the fbcon either being present (we get called 5058 * during its initialisation to detect all boot displays, or it may 5059 * not even exist) or that it is large enough to satisfy the 5060 * requested mode. 5061 */ 5062 r = mode_fits_in_fbdev(dev, mode, &crtc->fb); 5063 if (crtc->fb == NULL) { 5064 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 5065 r = intel_framebuffer_create_for_mode(dev, mode, 24, 32, 5066 &crtc->fb); 5067 old->release_fb = crtc->fb; 5068 } else 5069 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 5070 if (r != 0) { 5071 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 5072 crtc->fb = old_fb; 5073 return false; 5074 } 5075 5076 if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { 5077 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 5078 if (old->release_fb) 5079 old->release_fb->funcs->destroy(old->release_fb); 5080 crtc->fb = old_fb; 5081 return false; 5082 } 5083 5084 /* let the connector get through one full cycle before testing */ 5085 intel_wait_for_vblank(dev, intel_crtc->pipe); 5086 5087 return true; 5088 } 5089 5090 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 5091 struct drm_connector *connector, 5092 struct intel_load_detect_pipe *old) 5093 { 5094 struct drm_encoder *encoder = &intel_encoder->base; 5095 struct drm_device *dev = encoder->dev; 5096 struct drm_crtc *crtc = encoder->crtc; 5097 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 5098 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 5099 5100 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 5101 connector->base.id, drm_get_connector_name(connector), 5102 encoder->base.id, drm_get_encoder_name(encoder)); 5103 5104 if (old->load_detect_temp) { 5105 connector->encoder = NULL; 5106 drm_helper_disable_unused_functions(dev); 5107 5108 if (old->release_fb) 5109 old->release_fb->funcs->destroy(old->release_fb); 5110 5111 return; 5112 } 5113 5114 /* Switch crtc and encoder back off if necessary */ 5115 if (old->dpms_mode != DRM_MODE_DPMS_ON) { 5116 encoder_funcs->dpms(encoder, old->dpms_mode); 5117 crtc_funcs->dpms(crtc, old->dpms_mode); 5118 } 5119 } 5120 5121 /* Returns the clock of the currently programmed mode of the given pipe. */ 5122 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) 5123 { 5124 struct drm_i915_private *dev_priv = dev->dev_private; 5125 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5126 int pipe = intel_crtc->pipe; 5127 u32 dpll = I915_READ(DPLL(pipe)); 5128 u32 fp; 5129 intel_clock_t clock; 5130 5131 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 5132 fp = I915_READ(FP0(pipe)); 5133 else 5134 fp = I915_READ(FP1(pipe)); 5135 5136 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 5137 if (IS_PINEVIEW(dev)) { 5138 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 5139 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 5140 } else { 5141 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 5142 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 5143 } 5144 5145 if (!IS_GEN2(dev)) { 5146 if (IS_PINEVIEW(dev)) 5147 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 5148 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 5149 else 5150 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 5151 DPLL_FPA01_P1_POST_DIV_SHIFT); 5152 5153 switch (dpll & DPLL_MODE_MASK) { 5154 case DPLLB_MODE_DAC_SERIAL: 5155 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 5156 5 : 10; 5157 break; 5158 case DPLLB_MODE_LVDS: 5159 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 5160 7 : 14; 5161 break; 5162 default: 5163 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 5164 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 5165 return 0; 5166 } 5167 5168 /* XXX: Handle the 100Mhz refclk */ 5169 intel_clock(dev, 96000, &clock); 5170 } else { 5171 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 5172 5173 if (is_lvds) { 5174 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 5175 DPLL_FPA01_P1_POST_DIV_SHIFT); 5176 clock.p2 = 14; 5177 5178 if ((dpll & PLL_REF_INPUT_MASK) == 5179 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 5180 /* XXX: might not be 66MHz */ 5181 intel_clock(dev, 66000, &clock); 5182 } else 5183 intel_clock(dev, 48000, &clock); 5184 } else { 5185 if (dpll & PLL_P1_DIVIDE_BY_TWO) 5186 clock.p1 = 2; 5187 else { 5188 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 5189 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 5190 } 5191 if (dpll & PLL_P2_DIVIDE_BY_4) 5192 clock.p2 = 4; 5193 else 5194 clock.p2 = 2; 5195 5196 intel_clock(dev, 48000, &clock); 5197 } 5198 } 5199 5200 /* XXX: It would be nice to validate the clocks, but we can't reuse 5201 * i830PllIsValid() because it relies on the xf86_config connector 5202 * configuration being accurate, which it isn't necessarily. 5203 */ 5204 5205 return clock.dot; 5206 } 5207 5208 /** Returns the currently programmed mode of the given pipe. */ 5209 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 5210 struct drm_crtc *crtc) 5211 { 5212 struct drm_i915_private *dev_priv = dev->dev_private; 5213 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5214 int pipe = intel_crtc->pipe; 5215 struct drm_display_mode *mode; 5216 int htot = I915_READ(HTOTAL(pipe)); 5217 int hsync = I915_READ(HSYNC(pipe)); 5218 int vtot = I915_READ(VTOTAL(pipe)); 5219 int vsync = I915_READ(VSYNC(pipe)); 5220 5221 mode = kmalloc(sizeof(*mode), DRM_MEM_KMS, M_WAITOK | M_ZERO); 5222 5223 mode->clock = intel_crtc_clock_get(dev, crtc); 5224 mode->hdisplay = (htot & 0xffff) + 1; 5225 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 5226 mode->hsync_start = (hsync & 0xffff) + 1; 5227 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 5228 mode->vdisplay = (vtot & 0xffff) + 1; 5229 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 5230 mode->vsync_start = (vsync & 0xffff) + 1; 5231 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 5232 5233 drm_mode_set_name(mode); 5234 drm_mode_set_crtcinfo(mode, 0); 5235 5236 return mode; 5237 } 5238 5239 static void intel_increase_pllclock(struct drm_crtc *crtc) 5240 { 5241 struct drm_device *dev = crtc->dev; 5242 drm_i915_private_t *dev_priv = dev->dev_private; 5243 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5244 int pipe = intel_crtc->pipe; 5245 int dpll_reg = DPLL(pipe); 5246 int dpll; 5247 5248 if (HAS_PCH_SPLIT(dev)) 5249 return; 5250 5251 if (!dev_priv->lvds_downclock_avail) 5252 return; 5253 5254 dpll = I915_READ(dpll_reg); 5255 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { 5256 DRM_DEBUG_DRIVER("upclocking LVDS\n"); 5257 5258 assert_panel_unlocked(dev_priv, pipe); 5259 5260 dpll &= ~DISPLAY_RATE_SELECT_FPA1; 5261 I915_WRITE(dpll_reg, dpll); 5262 intel_wait_for_vblank(dev, pipe); 5263 5264 dpll = I915_READ(dpll_reg); 5265 if (dpll & DISPLAY_RATE_SELECT_FPA1) 5266 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); 5267 } 5268 } 5269 5270 static void intel_decrease_pllclock(struct drm_crtc *crtc) 5271 { 5272 struct drm_device *dev = crtc->dev; 5273 drm_i915_private_t *dev_priv = dev->dev_private; 5274 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5275 5276 if (HAS_PCH_SPLIT(dev)) 5277 return; 5278 5279 if (!dev_priv->lvds_downclock_avail) 5280 return; 5281 5282 /* 5283 * Since this is called by a timer, we should never get here in 5284 * the manual case. 5285 */ 5286 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 5287 int pipe = intel_crtc->pipe; 5288 int dpll_reg = DPLL(pipe); 5289 u32 dpll; 5290 5291 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 5292 5293 assert_panel_unlocked(dev_priv, pipe); 5294 5295 dpll = I915_READ(dpll_reg); 5296 dpll |= DISPLAY_RATE_SELECT_FPA1; 5297 I915_WRITE(dpll_reg, dpll); 5298 intel_wait_for_vblank(dev, pipe); 5299 dpll = I915_READ(dpll_reg); 5300 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 5301 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 5302 } 5303 } 5304 5305 void intel_mark_busy(struct drm_device *dev) 5306 { 5307 i915_update_gfx_val(dev->dev_private); 5308 } 5309 5310 void intel_mark_idle(struct drm_device *dev) 5311 { 5312 struct drm_crtc *crtc; 5313 5314 if (!i915_powersave) 5315 return; 5316 5317 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 5318 if (!crtc->fb) 5319 continue; 5320 5321 intel_decrease_pllclock(crtc); 5322 } 5323 } 5324 5325 static void intel_crtc_destroy(struct drm_crtc *crtc) 5326 { 5327 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5328 struct drm_device *dev = crtc->dev; 5329 struct intel_unpin_work *work; 5330 5331 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5332 work = intel_crtc->unpin_work; 5333 intel_crtc->unpin_work = NULL; 5334 lockmgr(&dev->event_lock, LK_RELEASE); 5335 5336 if (work) { 5337 cancel_work_sync(&work->work); 5338 kfree(work, DRM_MEM_KMS); 5339 } 5340 5341 drm_crtc_cleanup(crtc); 5342 5343 drm_free(intel_crtc, DRM_MEM_KMS); 5344 } 5345 5346 static void intel_unpin_work_fn(struct work_struct *__work) 5347 { 5348 struct intel_unpin_work *work = 5349 container_of(__work, struct intel_unpin_work, work); 5350 struct drm_device *dev; 5351 5352 dev = work->dev; 5353 DRM_LOCK(dev); 5354 intel_unpin_fb_obj(work->old_fb_obj); 5355 drm_gem_object_unreference(&work->pending_flip_obj->base); 5356 drm_gem_object_unreference(&work->old_fb_obj->base); 5357 5358 intel_update_fbc(work->dev); 5359 DRM_UNLOCK(dev); 5360 drm_free(work, DRM_MEM_KMS); 5361 } 5362 5363 static void do_intel_finish_page_flip(struct drm_device *dev, 5364 struct drm_crtc *crtc) 5365 { 5366 drm_i915_private_t *dev_priv = dev->dev_private; 5367 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5368 struct intel_unpin_work *work; 5369 struct drm_i915_gem_object *obj; 5370 5371 /* Ignore early vblank irqs */ 5372 if (intel_crtc == NULL) 5373 return; 5374 5375 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5376 work = intel_crtc->unpin_work; 5377 if (work == NULL || !atomic_read(&work->pending)) { 5378 lockmgr(&dev->event_lock, LK_RELEASE); 5379 return; 5380 } 5381 5382 intel_crtc->unpin_work = NULL; 5383 5384 if (work->event) 5385 drm_send_vblank_event(dev, intel_crtc->pipe, work->event); 5386 5387 drm_vblank_put(dev, intel_crtc->pipe); 5388 5389 lockmgr(&dev->event_lock, LK_RELEASE); 5390 5391 obj = work->old_fb_obj; 5392 5393 atomic_clear_mask(1 << intel_crtc->plane, 5394 &obj->pending_flip.counter); 5395 wakeup(&obj->pending_flip); 5396 5397 queue_work(dev_priv->wq, &work->work); 5398 } 5399 5400 void intel_finish_page_flip(struct drm_device *dev, int pipe) 5401 { 5402 drm_i915_private_t *dev_priv = dev->dev_private; 5403 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 5404 5405 do_intel_finish_page_flip(dev, crtc); 5406 } 5407 5408 void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 5409 { 5410 drm_i915_private_t *dev_priv = dev->dev_private; 5411 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 5412 5413 do_intel_finish_page_flip(dev, crtc); 5414 } 5415 5416 void intel_prepare_page_flip(struct drm_device *dev, int plane) 5417 { 5418 drm_i915_private_t *dev_priv = dev->dev_private; 5419 struct intel_crtc *intel_crtc = 5420 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 5421 5422 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5423 if (intel_crtc->unpin_work) 5424 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 5425 lockmgr(&dev->event_lock, LK_RELEASE); 5426 } 5427 5428 static int intel_gen2_queue_flip(struct drm_device *dev, 5429 struct drm_crtc *crtc, 5430 struct drm_framebuffer *fb, 5431 struct drm_i915_gem_object *obj) 5432 { 5433 struct drm_i915_private *dev_priv = dev->dev_private; 5434 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5435 unsigned long offset; 5436 u32 flip_mask; 5437 int ret; 5438 5439 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5440 if (ret) 5441 goto out; 5442 5443 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5444 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; 5445 5446 ret = BEGIN_LP_RING(6); 5447 if (ret) 5448 goto out; 5449 5450 /* Can't queue multiple flips, so wait for the previous 5451 * one to finish before executing the next. 5452 */ 5453 if (intel_crtc->plane) 5454 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5455 else 5456 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 5457 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5458 OUT_RING(MI_NOOP); 5459 OUT_RING(MI_DISPLAY_FLIP | 5460 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5461 OUT_RING(fb->pitches[0]); 5462 OUT_RING(obj->gtt_offset + offset); 5463 OUT_RING(0); /* aux display base address, unused */ 5464 ADVANCE_LP_RING(); 5465 out: 5466 return ret; 5467 } 5468 5469 static int intel_gen3_queue_flip(struct drm_device *dev, 5470 struct drm_crtc *crtc, 5471 struct drm_framebuffer *fb, 5472 struct drm_i915_gem_object *obj) 5473 { 5474 struct drm_i915_private *dev_priv = dev->dev_private; 5475 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5476 unsigned long offset; 5477 u32 flip_mask; 5478 int ret; 5479 5480 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5481 if (ret) 5482 goto out; 5483 5484 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5485 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; 5486 5487 ret = BEGIN_LP_RING(6); 5488 if (ret) 5489 goto out; 5490 5491 if (intel_crtc->plane) 5492 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5493 else 5494 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 5495 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5496 OUT_RING(MI_NOOP); 5497 OUT_RING(MI_DISPLAY_FLIP_I915 | 5498 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5499 OUT_RING(fb->pitches[0]); 5500 OUT_RING(obj->gtt_offset + offset); 5501 OUT_RING(MI_NOOP); 5502 5503 ADVANCE_LP_RING(); 5504 out: 5505 return ret; 5506 } 5507 5508 static int intel_gen4_queue_flip(struct drm_device *dev, 5509 struct drm_crtc *crtc, 5510 struct drm_framebuffer *fb, 5511 struct drm_i915_gem_object *obj) 5512 { 5513 struct drm_i915_private *dev_priv = dev->dev_private; 5514 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5515 uint32_t pf, pipesrc; 5516 int ret; 5517 5518 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5519 if (ret) 5520 goto out; 5521 5522 ret = BEGIN_LP_RING(4); 5523 if (ret) 5524 goto out; 5525 5526 /* i965+ uses the linear or tiled offsets from the 5527 * Display Registers (which do not change across a page-flip) 5528 * so we need only reprogram the base address. 5529 */ 5530 OUT_RING(MI_DISPLAY_FLIP | 5531 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5532 OUT_RING(fb->pitches[0]); 5533 OUT_RING(obj->gtt_offset | obj->tiling_mode); 5534 5535 /* XXX Enabling the panel-fitter across page-flip is so far 5536 * untested on non-native modes, so ignore it for now. 5537 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 5538 */ 5539 pf = 0; 5540 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 5541 OUT_RING(pf | pipesrc); 5542 ADVANCE_LP_RING(); 5543 out: 5544 return ret; 5545 } 5546 5547 static int intel_gen6_queue_flip(struct drm_device *dev, 5548 struct drm_crtc *crtc, 5549 struct drm_framebuffer *fb, 5550 struct drm_i915_gem_object *obj) 5551 { 5552 struct drm_i915_private *dev_priv = dev->dev_private; 5553 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5554 uint32_t pf, pipesrc; 5555 int ret; 5556 5557 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5558 if (ret) 5559 goto out; 5560 5561 ret = BEGIN_LP_RING(4); 5562 if (ret) 5563 goto out; 5564 5565 OUT_RING(MI_DISPLAY_FLIP | 5566 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5567 OUT_RING(fb->pitches[0] | obj->tiling_mode); 5568 OUT_RING(obj->gtt_offset); 5569 5570 /* Contrary to the suggestions in the documentation, 5571 * "Enable Panel Fitter" does not seem to be required when page 5572 * flipping with a non-native mode, and worse causes a normal 5573 * modeset to fail. 5574 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 5575 */ 5576 pf = 0; 5577 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 5578 OUT_RING(pf | pipesrc); 5579 ADVANCE_LP_RING(); 5580 out: 5581 return ret; 5582 } 5583 5584 /* 5585 * On gen7 we currently use the blit ring because (in early silicon at least) 5586 * the render ring doesn't give us interrpts for page flip completion, which 5587 * means clients will hang after the first flip is queued. Fortunately the 5588 * blit ring generates interrupts properly, so use it instead. 5589 */ 5590 static int intel_gen7_queue_flip(struct drm_device *dev, 5591 struct drm_crtc *crtc, 5592 struct drm_framebuffer *fb, 5593 struct drm_i915_gem_object *obj) 5594 { 5595 struct drm_i915_private *dev_priv = dev->dev_private; 5596 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5597 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 5598 int ret; 5599 5600 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 5601 if (ret) 5602 goto out; 5603 5604 ret = intel_ring_begin(ring, 4); 5605 if (ret) 5606 goto out; 5607 5608 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); 5609 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 5610 intel_ring_emit(ring, (obj->gtt_offset)); 5611 intel_ring_emit(ring, (MI_NOOP)); 5612 intel_ring_advance(ring); 5613 out: 5614 return ret; 5615 } 5616 5617 static int intel_default_queue_flip(struct drm_device *dev, 5618 struct drm_crtc *crtc, 5619 struct drm_framebuffer *fb, 5620 struct drm_i915_gem_object *obj) 5621 { 5622 return -ENODEV; 5623 } 5624 5625 static int intel_crtc_page_flip(struct drm_crtc *crtc, 5626 struct drm_framebuffer *fb, 5627 struct drm_pending_vblank_event *event) 5628 { 5629 struct drm_device *dev = crtc->dev; 5630 struct drm_i915_private *dev_priv = dev->dev_private; 5631 struct intel_framebuffer *intel_fb; 5632 struct drm_i915_gem_object *obj; 5633 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5634 struct intel_unpin_work *work; 5635 int ret; 5636 5637 work = kmalloc(sizeof *work, DRM_MEM_KMS, M_WAITOK | M_ZERO); 5638 5639 work->event = event; 5640 work->dev = crtc->dev; 5641 intel_fb = to_intel_framebuffer(crtc->fb); 5642 work->old_fb_obj = intel_fb->obj; 5643 INIT_WORK(&work->work, intel_unpin_work_fn); 5644 5645 ret = drm_vblank_get(dev, intel_crtc->pipe); 5646 if (ret) 5647 goto free_work; 5648 5649 /* We borrow the event spin lock for protecting unpin_work */ 5650 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5651 if (intel_crtc->unpin_work) { 5652 lockmgr(&dev->event_lock, LK_RELEASE); 5653 drm_free(work, DRM_MEM_KMS); 5654 drm_vblank_put(dev, intel_crtc->pipe); 5655 5656 DRM_DEBUG("flip queue: crtc already busy\n"); 5657 return -EBUSY; 5658 } 5659 intel_crtc->unpin_work = work; 5660 lockmgr(&dev->event_lock, LK_RELEASE); 5661 5662 intel_fb = to_intel_framebuffer(fb); 5663 obj = intel_fb->obj; 5664 5665 DRM_LOCK(dev); 5666 5667 /* Reference the objects for the scheduled work. */ 5668 drm_gem_object_reference(&work->old_fb_obj->base); 5669 drm_gem_object_reference(&obj->base); 5670 5671 crtc->fb = fb; 5672 5673 work->pending_flip_obj = obj; 5674 5675 work->enable_stall_check = true; 5676 5677 /* Block clients from rendering to the new back buffer until 5678 * the flip occurs and the object is no longer visible. 5679 */ 5680 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 5681 5682 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 5683 if (ret) 5684 goto cleanup_pending; 5685 intel_disable_fbc(dev); 5686 DRM_UNLOCK(dev); 5687 5688 return 0; 5689 5690 cleanup_pending: 5691 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 5692 drm_gem_object_unreference(&work->old_fb_obj->base); 5693 drm_gem_object_unreference(&obj->base); 5694 DRM_UNLOCK(dev); 5695 5696 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5697 intel_crtc->unpin_work = NULL; 5698 lockmgr(&dev->event_lock, LK_RELEASE); 5699 5700 drm_vblank_put(dev, intel_crtc->pipe); 5701 free_work: 5702 drm_free(work, DRM_MEM_KMS); 5703 5704 return ret; 5705 } 5706 5707 static void intel_sanitize_modesetting(struct drm_device *dev, 5708 int pipe, int plane) 5709 { 5710 struct drm_i915_private *dev_priv = dev->dev_private; 5711 u32 reg, val; 5712 5713 /* Clear any frame start delays used for debugging left by the BIOS */ 5714 for_each_pipe(pipe) { 5715 reg = PIPECONF(pipe); 5716 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 5717 } 5718 5719 if (HAS_PCH_SPLIT(dev)) 5720 return; 5721 5722 /* Who knows what state these registers were left in by the BIOS or 5723 * grub? 5724 * 5725 * If we leave the registers in a conflicting state (e.g. with the 5726 * display plane reading from the other pipe than the one we intend 5727 * to use) then when we attempt to teardown the active mode, we will 5728 * not disable the pipes and planes in the correct order -- leaving 5729 * a plane reading from a disabled pipe and possibly leading to 5730 * undefined behaviour. 5731 */ 5732 5733 reg = DSPCNTR(plane); 5734 val = I915_READ(reg); 5735 5736 if ((val & DISPLAY_PLANE_ENABLE) == 0) 5737 return; 5738 if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) 5739 return; 5740 5741 /* This display plane is active and attached to the other CPU pipe. */ 5742 pipe = !pipe; 5743 5744 /* Disable the plane and wait for it to stop reading from the pipe. */ 5745 intel_disable_plane(dev_priv, plane, pipe); 5746 intel_disable_pipe(dev_priv, pipe); 5747 } 5748 5749 static void intel_crtc_reset(struct drm_crtc *crtc) 5750 { 5751 struct drm_device *dev = crtc->dev; 5752 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5753 5754 /* Reset flags back to the 'unknown' status so that they 5755 * will be correctly set on the initial modeset. 5756 */ 5757 intel_crtc->dpms_mode = -1; 5758 5759 /* We need to fix up any BIOS configuration that conflicts with 5760 * our expectations. 5761 */ 5762 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); 5763 } 5764 5765 static struct drm_crtc_helper_funcs intel_helper_funcs = { 5766 .dpms = intel_crtc_dpms, 5767 .mode_fixup = intel_crtc_mode_fixup, 5768 .mode_set = intel_crtc_mode_set, 5769 .mode_set_base = intel_pipe_set_base, 5770 .mode_set_base_atomic = intel_pipe_set_base_atomic, 5771 .load_lut = intel_crtc_load_lut, 5772 .disable = intel_crtc_disable, 5773 }; 5774 5775 static const struct drm_crtc_funcs intel_crtc_funcs = { 5776 .reset = intel_crtc_reset, 5777 .cursor_set = intel_crtc_cursor_set, 5778 .cursor_move = intel_crtc_cursor_move, 5779 .gamma_set = intel_crtc_gamma_set, 5780 .set_config = drm_crtc_helper_set_config, 5781 .destroy = intel_crtc_destroy, 5782 .page_flip = intel_crtc_page_flip, 5783 }; 5784 5785 static void intel_crtc_init(struct drm_device *dev, int pipe) 5786 { 5787 drm_i915_private_t *dev_priv = dev->dev_private; 5788 struct intel_crtc *intel_crtc; 5789 int i; 5790 5791 intel_crtc = kmalloc(sizeof(struct intel_crtc) + 5792 (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), 5793 DRM_MEM_KMS, M_WAITOK | M_ZERO); 5794 5795 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); 5796 5797 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 5798 for (i = 0; i < 256; i++) { 5799 intel_crtc->lut_r[i] = i; 5800 intel_crtc->lut_g[i] = i; 5801 intel_crtc->lut_b[i] = i; 5802 } 5803 5804 /* Swap pipes & planes for FBC on pre-965 */ 5805 intel_crtc->pipe = pipe; 5806 intel_crtc->plane = pipe; 5807 if (IS_MOBILE(dev) && IS_GEN3(dev)) { 5808 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 5809 intel_crtc->plane = !pipe; 5810 } 5811 5812 KASSERT(pipe < DRM_ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) && 5813 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] == NULL, 5814 ("plane_to_crtc is already initialized")); 5815 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 5816 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5817 5818 intel_crtc_reset(&intel_crtc->base); 5819 intel_crtc->active = true; /* force the pipe off on setup_init_config */ 5820 intel_crtc->bpp = 24; /* default for pre-Ironlake */ 5821 5822 if (HAS_PCH_SPLIT(dev)) { 5823 if (pipe == 2 && IS_IVYBRIDGE(dev)) 5824 intel_crtc->no_pll = true; 5825 intel_helper_funcs.prepare = ironlake_crtc_prepare; 5826 intel_helper_funcs.commit = ironlake_crtc_commit; 5827 } else { 5828 intel_helper_funcs.prepare = i9xx_crtc_prepare; 5829 intel_helper_funcs.commit = i9xx_crtc_commit; 5830 } 5831 5832 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 5833 5834 intel_crtc->busy = false; 5835 5836 callout_init_mp(&intel_crtc->idle_callout); 5837 } 5838 5839 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 5840 struct drm_file *file) 5841 { 5842 drm_i915_private_t *dev_priv = dev->dev_private; 5843 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 5844 struct drm_mode_object *drmmode_obj; 5845 struct intel_crtc *crtc; 5846 5847 if (!dev_priv) { 5848 DRM_ERROR("called with no initialization\n"); 5849 return -EINVAL; 5850 } 5851 5852 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, 5853 DRM_MODE_OBJECT_CRTC); 5854 5855 if (!drmmode_obj) { 5856 DRM_ERROR("no such CRTC id\n"); 5857 return -EINVAL; 5858 } 5859 5860 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); 5861 pipe_from_crtc_id->pipe = crtc->pipe; 5862 5863 return 0; 5864 } 5865 5866 static int intel_encoder_clones(struct drm_device *dev, int type_mask) 5867 { 5868 struct intel_encoder *encoder; 5869 int index_mask = 0; 5870 int entry = 0; 5871 5872 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 5873 if (type_mask & encoder->clone_mask) 5874 index_mask |= (1 << entry); 5875 entry++; 5876 } 5877 5878 return index_mask; 5879 } 5880 5881 static bool has_edp_a(struct drm_device *dev) 5882 { 5883 struct drm_i915_private *dev_priv = dev->dev_private; 5884 5885 if (!IS_MOBILE(dev)) 5886 return false; 5887 5888 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 5889 return false; 5890 5891 if (IS_GEN5(dev) && 5892 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) 5893 return false; 5894 5895 return true; 5896 } 5897 5898 static void intel_setup_outputs(struct drm_device *dev) 5899 { 5900 struct drm_i915_private *dev_priv = dev->dev_private; 5901 struct intel_encoder *encoder; 5902 bool dpd_is_edp = false; 5903 bool has_lvds; 5904 5905 has_lvds = intel_lvds_init(dev); 5906 if (!has_lvds && !HAS_PCH_SPLIT(dev)) { 5907 /* disable the panel fitter on everything but LVDS */ 5908 I915_WRITE(PFIT_CONTROL, 0); 5909 } 5910 5911 if (HAS_PCH_SPLIT(dev)) { 5912 dpd_is_edp = intel_dpd_is_edp(dev); 5913 5914 if (has_edp_a(dev)) 5915 intel_dp_init(dev, DP_A); 5916 5917 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 5918 intel_dp_init(dev, PCH_DP_D); 5919 } 5920 5921 intel_crt_init(dev); 5922 5923 if (HAS_PCH_SPLIT(dev)) { 5924 int found; 5925 5926 DRM_DEBUG_KMS( 5927 "HDMIB %d PCH_DP_B %d HDMIC %d HDMID %d PCH_DP_C %d PCH_DP_D %d LVDS %d\n", 5928 (I915_READ(HDMIB) & PORT_DETECTED) != 0, 5929 (I915_READ(PCH_DP_B) & DP_DETECTED) != 0, 5930 (I915_READ(HDMIC) & PORT_DETECTED) != 0, 5931 (I915_READ(HDMID) & PORT_DETECTED) != 0, 5932 (I915_READ(PCH_DP_C) & DP_DETECTED) != 0, 5933 (I915_READ(PCH_DP_D) & DP_DETECTED) != 0, 5934 (I915_READ(PCH_LVDS) & LVDS_DETECTED) != 0); 5935 5936 if (I915_READ(HDMIB) & PORT_DETECTED) { 5937 /* PCH SDVOB multiplex with HDMIB */ 5938 found = intel_sdvo_init(dev, PCH_SDVOB); 5939 if (!found) 5940 intel_hdmi_init(dev, HDMIB); 5941 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 5942 intel_dp_init(dev, PCH_DP_B); 5943 } 5944 5945 if (I915_READ(HDMIC) & PORT_DETECTED) 5946 intel_hdmi_init(dev, HDMIC); 5947 5948 if (I915_READ(HDMID) & PORT_DETECTED) 5949 intel_hdmi_init(dev, HDMID); 5950 5951 if (I915_READ(PCH_DP_C) & DP_DETECTED) 5952 intel_dp_init(dev, PCH_DP_C); 5953 5954 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 5955 intel_dp_init(dev, PCH_DP_D); 5956 5957 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 5958 bool found = false; 5959 5960 if (I915_READ(SDVOB) & SDVO_DETECTED) { 5961 DRM_DEBUG_KMS("probing SDVOB\n"); 5962 found = intel_sdvo_init(dev, SDVOB); 5963 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 5964 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 5965 intel_hdmi_init(dev, SDVOB); 5966 } 5967 5968 if (!found && SUPPORTS_INTEGRATED_DP(dev)) { 5969 DRM_DEBUG_KMS("probing DP_B\n"); 5970 intel_dp_init(dev, DP_B); 5971 } 5972 } 5973 5974 /* Before G4X SDVOC doesn't have its own detect register */ 5975 5976 if (I915_READ(SDVOB) & SDVO_DETECTED) { 5977 DRM_DEBUG_KMS("probing SDVOC\n"); 5978 found = intel_sdvo_init(dev, SDVOC); 5979 } 5980 5981 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 5982 5983 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 5984 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 5985 intel_hdmi_init(dev, SDVOC); 5986 } 5987 if (SUPPORTS_INTEGRATED_DP(dev)) { 5988 DRM_DEBUG_KMS("probing DP_C\n"); 5989 intel_dp_init(dev, DP_C); 5990 } 5991 } 5992 5993 if (SUPPORTS_INTEGRATED_DP(dev) && 5994 (I915_READ(DP_D) & DP_DETECTED)) { 5995 DRM_DEBUG_KMS("probing DP_D\n"); 5996 intel_dp_init(dev, DP_D); 5997 } 5998 } else if (IS_GEN2(dev)) { 5999 #if 1 6000 KIB_NOTYET(); 6001 #else 6002 intel_dvo_init(dev); 6003 #endif 6004 } 6005 6006 if (SUPPORTS_TV(dev)) 6007 intel_tv_init(dev); 6008 6009 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 6010 encoder->base.possible_crtcs = encoder->crtc_mask; 6011 encoder->base.possible_clones = 6012 intel_encoder_clones(dev, encoder->clone_mask); 6013 } 6014 6015 /* disable all the possible outputs/crtcs before entering KMS mode */ 6016 drm_helper_disable_unused_functions(dev); 6017 6018 if (HAS_PCH_SPLIT(dev)) 6019 ironlake_init_pch_refclk(dev); 6020 } 6021 6022 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 6023 { 6024 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 6025 6026 drm_framebuffer_cleanup(fb); 6027 drm_gem_object_unreference_unlocked(&intel_fb->obj->base); 6028 6029 drm_free(intel_fb, DRM_MEM_KMS); 6030 } 6031 6032 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 6033 struct drm_file *file, 6034 unsigned int *handle) 6035 { 6036 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 6037 struct drm_i915_gem_object *obj = intel_fb->obj; 6038 6039 return drm_gem_handle_create(file, &obj->base, handle); 6040 } 6041 6042 static const struct drm_framebuffer_funcs intel_fb_funcs = { 6043 .destroy = intel_user_framebuffer_destroy, 6044 .create_handle = intel_user_framebuffer_create_handle, 6045 }; 6046 6047 int intel_framebuffer_init(struct drm_device *dev, 6048 struct intel_framebuffer *intel_fb, 6049 struct drm_mode_fb_cmd2 *mode_cmd, 6050 struct drm_i915_gem_object *obj) 6051 { 6052 int ret; 6053 6054 if (obj->tiling_mode == I915_TILING_Y) 6055 return -EINVAL; 6056 6057 if (mode_cmd->pitches[0] & 63) 6058 return -EINVAL; 6059 6060 switch (mode_cmd->pixel_format) { 6061 case DRM_FORMAT_RGB332: 6062 case DRM_FORMAT_RGB565: 6063 case DRM_FORMAT_XRGB8888: 6064 case DRM_FORMAT_XBGR8888: 6065 case DRM_FORMAT_ARGB8888: 6066 case DRM_FORMAT_XRGB2101010: 6067 case DRM_FORMAT_ARGB2101010: 6068 /* RGB formats are common across chipsets */ 6069 break; 6070 case DRM_FORMAT_YUYV: 6071 case DRM_FORMAT_UYVY: 6072 case DRM_FORMAT_YVYU: 6073 case DRM_FORMAT_VYUY: 6074 break; 6075 default: 6076 DRM_DEBUG_KMS("unsupported pixel format %u\n", 6077 mode_cmd->pixel_format); 6078 return -EINVAL; 6079 } 6080 6081 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 6082 if (ret) { 6083 DRM_ERROR("framebuffer init failed %d\n", ret); 6084 return ret; 6085 } 6086 6087 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 6088 intel_fb->obj = obj; 6089 return 0; 6090 } 6091 6092 static int 6093 intel_user_framebuffer_create(struct drm_device *dev, 6094 struct drm_file *filp, struct drm_mode_fb_cmd2 *mode_cmd, 6095 struct drm_framebuffer **res) 6096 { 6097 struct drm_i915_gem_object *obj; 6098 6099 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 6100 mode_cmd->handles[0])); 6101 if (&obj->base == NULL) 6102 return (-ENOENT); 6103 6104 return (intel_framebuffer_create(dev, mode_cmd, obj, res)); 6105 } 6106 6107 static const struct drm_mode_config_funcs intel_mode_funcs = { 6108 .fb_create = intel_user_framebuffer_create, 6109 .output_poll_changed = intel_fb_output_poll_changed, 6110 }; 6111 6112 /* Set up chip specific display functions */ 6113 static void intel_init_display(struct drm_device *dev) 6114 { 6115 struct drm_i915_private *dev_priv = dev->dev_private; 6116 6117 /* We always want a DPMS function */ 6118 if (HAS_PCH_SPLIT(dev)) { 6119 dev_priv->display.dpms = ironlake_crtc_dpms; 6120 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 6121 dev_priv->display.update_plane = ironlake_update_plane; 6122 } else { 6123 dev_priv->display.dpms = i9xx_crtc_dpms; 6124 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 6125 dev_priv->display.update_plane = i9xx_update_plane; 6126 } 6127 6128 if (I915_HAS_FBC(dev)) { 6129 if (HAS_PCH_SPLIT(dev)) { 6130 dev_priv->display.fbc_enabled = ironlake_fbc_enabled; 6131 dev_priv->display.enable_fbc = ironlake_enable_fbc; 6132 dev_priv->display.disable_fbc = ironlake_disable_fbc; 6133 } else if (IS_GM45(dev)) { 6134 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 6135 dev_priv->display.enable_fbc = g4x_enable_fbc; 6136 dev_priv->display.disable_fbc = g4x_disable_fbc; 6137 } else if (IS_CRESTLINE(dev)) { 6138 dev_priv->display.fbc_enabled = i8xx_fbc_enabled; 6139 dev_priv->display.enable_fbc = i8xx_enable_fbc; 6140 dev_priv->display.disable_fbc = i8xx_disable_fbc; 6141 } 6142 /* 855GM needs testing */ 6143 } 6144 6145 /* Returns the core display clock speed */ 6146 if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) 6147 dev_priv->display.get_display_clock_speed = 6148 i945_get_display_clock_speed; 6149 else if (IS_I915G(dev)) 6150 dev_priv->display.get_display_clock_speed = 6151 i915_get_display_clock_speed; 6152 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) 6153 dev_priv->display.get_display_clock_speed = 6154 i9xx_misc_get_display_clock_speed; 6155 else if (IS_I915GM(dev)) 6156 dev_priv->display.get_display_clock_speed = 6157 i915gm_get_display_clock_speed; 6158 else if (IS_I865G(dev)) 6159 dev_priv->display.get_display_clock_speed = 6160 i865_get_display_clock_speed; 6161 else if (IS_I85X(dev)) 6162 dev_priv->display.get_display_clock_speed = 6163 i855_get_display_clock_speed; 6164 else /* 852, 830 */ 6165 dev_priv->display.get_display_clock_speed = 6166 i830_get_display_clock_speed; 6167 6168 /* For FIFO watermark updates */ 6169 if (HAS_PCH_SPLIT(dev)) { 6170 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get; 6171 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put; 6172 6173 /* IVB configs may use multi-threaded forcewake */ 6174 if (IS_IVYBRIDGE(dev)) { 6175 u32 ecobus; 6176 6177 /* A small trick here - if the bios hasn't configured MT forcewake, 6178 * and if the device is in RC6, then force_wake_mt_get will not wake 6179 * the device and the ECOBUS read will return zero. Which will be 6180 * (correctly) interpreted by the test below as MT forcewake being 6181 * disabled. 6182 */ 6183 DRM_LOCK(dev); 6184 __gen6_gt_force_wake_mt_get(dev_priv); 6185 ecobus = I915_READ_NOTRACE(ECOBUS); 6186 __gen6_gt_force_wake_mt_put(dev_priv); 6187 DRM_UNLOCK(dev); 6188 6189 if (ecobus & FORCEWAKE_MT_ENABLE) { 6190 DRM_DEBUG_KMS("Using MT version of forcewake\n"); 6191 dev_priv->display.force_wake_get = 6192 __gen6_gt_force_wake_mt_get; 6193 dev_priv->display.force_wake_put = 6194 __gen6_gt_force_wake_mt_put; 6195 } 6196 } 6197 6198 if (HAS_PCH_IBX(dev)) 6199 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; 6200 else if (HAS_PCH_CPT(dev)) 6201 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; 6202 6203 if (IS_GEN5(dev)) { 6204 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) 6205 dev_priv->display.update_wm = ironlake_update_wm; 6206 else { 6207 DRM_DEBUG_KMS("Failed to get proper latency. " 6208 "Disable CxSR\n"); 6209 dev_priv->display.update_wm = NULL; 6210 } 6211 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 6212 dev_priv->display.init_clock_gating = ironlake_init_clock_gating; 6213 dev_priv->display.write_eld = ironlake_write_eld; 6214 } else if (IS_GEN6(dev)) { 6215 if (SNB_READ_WM0_LATENCY()) { 6216 dev_priv->display.update_wm = sandybridge_update_wm; 6217 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 6218 } else { 6219 DRM_DEBUG_KMS("Failed to read display plane latency. " 6220 "Disable CxSR\n"); 6221 dev_priv->display.update_wm = NULL; 6222 } 6223 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 6224 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 6225 dev_priv->display.write_eld = ironlake_write_eld; 6226 } else if (IS_IVYBRIDGE(dev)) { 6227 /* FIXME: detect B0+ stepping and use auto training */ 6228 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 6229 if (SNB_READ_WM0_LATENCY()) { 6230 dev_priv->display.update_wm = sandybridge_update_wm; 6231 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 6232 } else { 6233 DRM_DEBUG_KMS("Failed to read display plane latency. " 6234 "Disable CxSR\n"); 6235 dev_priv->display.update_wm = NULL; 6236 } 6237 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 6238 dev_priv->display.write_eld = ironlake_write_eld; 6239 } else 6240 dev_priv->display.update_wm = NULL; 6241 } else if (IS_PINEVIEW(dev)) { 6242 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), 6243 dev_priv->is_ddr3, 6244 dev_priv->fsb_freq, 6245 dev_priv->mem_freq)) { 6246 DRM_INFO("failed to find known CxSR latency " 6247 "(found ddr%s fsb freq %d, mem freq %d), " 6248 "disabling CxSR\n", 6249 (dev_priv->is_ddr3 == 1) ? "3" : "2", 6250 dev_priv->fsb_freq, dev_priv->mem_freq); 6251 /* Disable CxSR and never update its watermark again */ 6252 pineview_disable_cxsr(dev); 6253 dev_priv->display.update_wm = NULL; 6254 } else 6255 dev_priv->display.update_wm = pineview_update_wm; 6256 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 6257 } else if (IS_G4X(dev)) { 6258 dev_priv->display.write_eld = g4x_write_eld; 6259 dev_priv->display.update_wm = g4x_update_wm; 6260 dev_priv->display.init_clock_gating = g4x_init_clock_gating; 6261 } else if (IS_GEN4(dev)) { 6262 dev_priv->display.update_wm = i965_update_wm; 6263 if (IS_CRESTLINE(dev)) 6264 dev_priv->display.init_clock_gating = crestline_init_clock_gating; 6265 else if (IS_BROADWATER(dev)) 6266 dev_priv->display.init_clock_gating = broadwater_init_clock_gating; 6267 } else if (IS_GEN3(dev)) { 6268 dev_priv->display.update_wm = i9xx_update_wm; 6269 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 6270 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 6271 } else if (IS_I865G(dev)) { 6272 dev_priv->display.update_wm = i830_update_wm; 6273 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 6274 dev_priv->display.get_fifo_size = i830_get_fifo_size; 6275 } else if (IS_I85X(dev)) { 6276 dev_priv->display.update_wm = i9xx_update_wm; 6277 dev_priv->display.get_fifo_size = i85x_get_fifo_size; 6278 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 6279 } else { 6280 dev_priv->display.update_wm = i830_update_wm; 6281 dev_priv->display.init_clock_gating = i830_init_clock_gating; 6282 if (IS_845G(dev)) 6283 dev_priv->display.get_fifo_size = i845_get_fifo_size; 6284 else 6285 dev_priv->display.get_fifo_size = i830_get_fifo_size; 6286 } 6287 6288 /* Default just returns -ENODEV to indicate unsupported */ 6289 dev_priv->display.queue_flip = intel_default_queue_flip; 6290 6291 switch (INTEL_INFO(dev)->gen) { 6292 case 2: 6293 dev_priv->display.queue_flip = intel_gen2_queue_flip; 6294 break; 6295 6296 case 3: 6297 dev_priv->display.queue_flip = intel_gen3_queue_flip; 6298 break; 6299 6300 case 4: 6301 case 5: 6302 dev_priv->display.queue_flip = intel_gen4_queue_flip; 6303 break; 6304 6305 case 6: 6306 dev_priv->display.queue_flip = intel_gen6_queue_flip; 6307 break; 6308 case 7: 6309 dev_priv->display.queue_flip = intel_gen7_queue_flip; 6310 break; 6311 } 6312 } 6313 6314 /* 6315 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 6316 * resume, or other times. This quirk makes sure that's the case for 6317 * affected systems. 6318 */ 6319 static void quirk_pipea_force(struct drm_device *dev) 6320 { 6321 struct drm_i915_private *dev_priv = dev->dev_private; 6322 6323 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 6324 DRM_DEBUG("applying pipe a force quirk\n"); 6325 } 6326 6327 /* 6328 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 6329 */ 6330 static void quirk_ssc_force_disable(struct drm_device *dev) 6331 { 6332 struct drm_i915_private *dev_priv = dev->dev_private; 6333 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 6334 } 6335 6336 struct intel_quirk { 6337 int device; 6338 int subsystem_vendor; 6339 int subsystem_device; 6340 void (*hook)(struct drm_device *dev); 6341 }; 6342 6343 #define PCI_ANY_ID (~0u) 6344 6345 struct intel_quirk intel_quirks[] = { 6346 /* HP Mini needs pipe A force quirk (LP: #322104) */ 6347 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, 6348 6349 /* Thinkpad R31 needs pipe A force quirk */ 6350 { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, 6351 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 6352 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 6353 6354 /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ 6355 { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, 6356 /* ThinkPad X40 needs pipe A force quirk */ 6357 6358 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 6359 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 6360 6361 /* 855 & before need to leave pipe A & dpll A up */ 6362 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 6363 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 6364 6365 /* Lenovo U160 cannot use SSC on LVDS */ 6366 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 6367 6368 /* Sony Vaio Y cannot use SSC on LVDS */ 6369 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 6370 }; 6371 6372 static void intel_init_quirks(struct drm_device *dev) 6373 { 6374 struct intel_quirk *q; 6375 device_t d; 6376 int i; 6377 6378 d = dev->dev; 6379 for (i = 0; i < DRM_ARRAY_SIZE(intel_quirks); i++) { 6380 q = &intel_quirks[i]; 6381 if (pci_get_device(d) == q->device && 6382 (pci_get_subvendor(d) == q->subsystem_vendor || 6383 q->subsystem_vendor == PCI_ANY_ID) && 6384 (pci_get_subdevice(d) == q->subsystem_device || 6385 q->subsystem_device == PCI_ANY_ID)) 6386 q->hook(dev); 6387 } 6388 } 6389 6390 /* Disable the VGA plane that we never use */ 6391 static void i915_disable_vga(struct drm_device *dev) 6392 { 6393 struct drm_i915_private *dev_priv = dev->dev_private; 6394 u8 sr1; 6395 u32 vga_reg; 6396 6397 if (HAS_PCH_SPLIT(dev)) 6398 vga_reg = CPU_VGACNTRL; 6399 else 6400 vga_reg = VGACNTRL; 6401 6402 #if 0 6403 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 6404 #endif 6405 outb(VGA_SR_INDEX, 1); 6406 sr1 = inb(VGA_SR_DATA); 6407 outb(VGA_SR_DATA, sr1 | 1 << 5); 6408 #if 0 6409 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 6410 #endif 6411 DELAY(300); 6412 6413 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 6414 POSTING_READ(vga_reg); 6415 } 6416 6417 void intel_modeset_init(struct drm_device *dev) 6418 { 6419 struct drm_i915_private *dev_priv = dev->dev_private; 6420 int i, ret; 6421 6422 drm_mode_config_init(dev); 6423 6424 dev->mode_config.min_width = 0; 6425 dev->mode_config.min_height = 0; 6426 6427 dev->mode_config.preferred_depth = 24; 6428 dev->mode_config.prefer_shadow = 1; 6429 6430 dev->mode_config.funcs = __DECONST(struct drm_mode_config_funcs *, 6431 &intel_mode_funcs); 6432 6433 intel_init_quirks(dev); 6434 6435 intel_init_display(dev); 6436 6437 if (IS_GEN2(dev)) { 6438 dev->mode_config.max_width = 2048; 6439 dev->mode_config.max_height = 2048; 6440 } else if (IS_GEN3(dev)) { 6441 dev->mode_config.max_width = 4096; 6442 dev->mode_config.max_height = 4096; 6443 } else { 6444 dev->mode_config.max_width = 8192; 6445 dev->mode_config.max_height = 8192; 6446 } 6447 dev->mode_config.fb_base = dev->agp->base; 6448 6449 DRM_DEBUG_KMS("%d display pipe%s available.\n", 6450 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); 6451 6452 for (i = 0; i < dev_priv->num_pipe; i++) { 6453 intel_crtc_init(dev, i); 6454 ret = intel_plane_init(dev, i); 6455 if (ret) 6456 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); 6457 } 6458 6459 /* Just disable it once at startup */ 6460 i915_disable_vga(dev); 6461 intel_setup_outputs(dev); 6462 6463 intel_init_clock_gating(dev); 6464 6465 if (IS_IRONLAKE_M(dev)) { 6466 ironlake_enable_drps(dev); 6467 intel_init_emon(dev); 6468 } 6469 6470 if (IS_GEN6(dev)) { 6471 gen6_enable_rps(dev_priv); 6472 gen6_update_ring_freq(dev_priv); 6473 } 6474 6475 callout_init_mp(&dev_priv->idle_callout); 6476 } 6477 6478 void intel_modeset_gem_init(struct drm_device *dev) 6479 { 6480 if (IS_IRONLAKE_M(dev)) 6481 ironlake_enable_rc6(dev); 6482 6483 intel_setup_overlay(dev); 6484 } 6485 6486 void intel_modeset_cleanup(struct drm_device *dev) 6487 { 6488 struct drm_i915_private *dev_priv = dev->dev_private; 6489 struct drm_crtc *crtc; 6490 struct intel_crtc *intel_crtc; 6491 6492 drm_kms_helper_poll_fini(dev); 6493 DRM_LOCK(dev); 6494 6495 #if 0 6496 intel_unregister_dsm_handler(); 6497 #endif 6498 6499 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 6500 /* Skip inactive CRTCs */ 6501 if (!crtc->fb) 6502 continue; 6503 6504 intel_crtc = to_intel_crtc(crtc); 6505 intel_increase_pllclock(crtc); 6506 } 6507 6508 intel_disable_fbc(dev); 6509 6510 if (IS_IRONLAKE_M(dev)) 6511 ironlake_disable_drps(dev); 6512 if (IS_GEN6(dev)) 6513 gen6_disable_rps(dev); 6514 6515 if (IS_IRONLAKE_M(dev)) 6516 ironlake_disable_rc6(dev); 6517 6518 DRM_UNLOCK(dev); 6519 6520 /* Disable the irq before mode object teardown, for the irq might 6521 * enqueue unpin/hotplug work. */ 6522 drm_irq_uninstall(dev); 6523 cancel_work_sync(&dev_priv->hotplug_work); 6524 cancel_work_sync(&dev_priv->rps.work); 6525 6526 /* flush any delayed tasks or pending work */ 6527 flush_scheduled_work(); 6528 6529 drm_mode_config_cleanup(dev); 6530 } 6531 6532 /* 6533 * Return which encoder is currently attached for connector. 6534 */ 6535 struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 6536 { 6537 return &intel_attached_encoder(connector)->base; 6538 } 6539 6540 void intel_connector_attach_encoder(struct intel_connector *connector, 6541 struct intel_encoder *encoder) 6542 { 6543 connector->encoder = encoder; 6544 drm_mode_connector_attach_encoder(&connector->base, 6545 &encoder->base); 6546 } 6547 6548 /* 6549 * set vga decode state - true == enable VGA decode 6550 */ 6551 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 6552 { 6553 struct drm_i915_private *dev_priv; 6554 device_t bridge_dev; 6555 u16 gmch_ctrl; 6556 6557 dev_priv = dev->dev_private; 6558 bridge_dev = intel_gtt_get_bridge_device(); 6559 gmch_ctrl = pci_read_config(bridge_dev, INTEL_GMCH_CTRL, 2); 6560 if (state) 6561 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 6562 else 6563 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 6564 pci_write_config(bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl, 2); 6565 return (0); 6566 } 6567 6568 struct intel_display_error_state { 6569 struct intel_cursor_error_state { 6570 u32 control; 6571 u32 position; 6572 u32 base; 6573 u32 size; 6574 } cursor[2]; 6575 6576 struct intel_pipe_error_state { 6577 u32 conf; 6578 u32 source; 6579 6580 u32 htotal; 6581 u32 hblank; 6582 u32 hsync; 6583 u32 vtotal; 6584 u32 vblank; 6585 u32 vsync; 6586 } pipe[2]; 6587 6588 struct intel_plane_error_state { 6589 u32 control; 6590 u32 stride; 6591 u32 size; 6592 u32 pos; 6593 u32 addr; 6594 u32 surface; 6595 u32 tile_offset; 6596 } plane[2]; 6597 }; 6598 6599 struct intel_display_error_state * 6600 intel_display_capture_error_state(struct drm_device *dev) 6601 { 6602 drm_i915_private_t *dev_priv = dev->dev_private; 6603 struct intel_display_error_state *error; 6604 int i; 6605 6606 error = kmalloc(sizeof(*error), DRM_MEM_KMS, M_NOWAIT); 6607 if (error == NULL) 6608 return NULL; 6609 6610 for (i = 0; i < 2; i++) { 6611 error->cursor[i].control = I915_READ(CURCNTR(i)); 6612 error->cursor[i].position = I915_READ(CURPOS(i)); 6613 error->cursor[i].base = I915_READ(CURBASE(i)); 6614 6615 error->plane[i].control = I915_READ(DSPCNTR(i)); 6616 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 6617 error->plane[i].size = I915_READ(DSPSIZE(i)); 6618 error->plane[i].pos = I915_READ(DSPPOS(i)); 6619 error->plane[i].addr = I915_READ(DSPADDR(i)); 6620 if (INTEL_INFO(dev)->gen >= 4) { 6621 error->plane[i].surface = I915_READ(DSPSURF(i)); 6622 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 6623 } 6624 6625 error->pipe[i].conf = I915_READ(PIPECONF(i)); 6626 error->pipe[i].source = I915_READ(PIPESRC(i)); 6627 error->pipe[i].htotal = I915_READ(HTOTAL(i)); 6628 error->pipe[i].hblank = I915_READ(HBLANK(i)); 6629 error->pipe[i].hsync = I915_READ(HSYNC(i)); 6630 error->pipe[i].vtotal = I915_READ(VTOTAL(i)); 6631 error->pipe[i].vblank = I915_READ(VBLANK(i)); 6632 error->pipe[i].vsync = I915_READ(VSYNC(i)); 6633 } 6634 6635 return error; 6636 } 6637 6638 void 6639 intel_display_print_error_state(struct sbuf *m, 6640 struct drm_device *dev, 6641 struct intel_display_error_state *error) 6642 { 6643 int i; 6644 6645 for (i = 0; i < 2; i++) { 6646 sbuf_printf(m, "Pipe [%d]:\n", i); 6647 sbuf_printf(m, " CONF: %08x\n", error->pipe[i].conf); 6648 sbuf_printf(m, " SRC: %08x\n", error->pipe[i].source); 6649 sbuf_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); 6650 sbuf_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); 6651 sbuf_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); 6652 sbuf_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); 6653 sbuf_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); 6654 sbuf_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); 6655 6656 sbuf_printf(m, "Plane [%d]:\n", i); 6657 sbuf_printf(m, " CNTR: %08x\n", error->plane[i].control); 6658 sbuf_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 6659 sbuf_printf(m, " SIZE: %08x\n", error->plane[i].size); 6660 sbuf_printf(m, " POS: %08x\n", error->plane[i].pos); 6661 sbuf_printf(m, " ADDR: %08x\n", error->plane[i].addr); 6662 if (INTEL_INFO(dev)->gen >= 4) { 6663 sbuf_printf(m, " SURF: %08x\n", error->plane[i].surface); 6664 sbuf_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 6665 } 6666 6667 sbuf_printf(m, "Cursor [%d]:\n", i); 6668 sbuf_printf(m, " CNTR: %08x\n", error->cursor[i].control); 6669 sbuf_printf(m, " POS: %08x\n", error->cursor[i].position); 6670 sbuf_printf(m, " BASE: %08x\n", error->cursor[i].base); 6671 } 6672 } 6673