1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dmi.h> 28 #include <linux/module.h> 29 #include <linux/i2c.h> 30 #include <linux/kernel.h> 31 #include <drm/drm_edid.h> 32 #include <drm/drmP.h> 33 #include "intel_drv.h" 34 #include <drm/i915_drm.h> 35 #include "i915_drv.h" 36 #include "i915_trace.h" 37 #include <drm/drm_dp_helper.h> 38 #include <drm/drm_crtc_helper.h> 39 #include <drm/drm_plane_helper.h> 40 #include <drm/drm_rect.h> 41 42 /* Primary plane formats supported by all gen */ 43 #define COMMON_PRIMARY_FORMATS \ 44 DRM_FORMAT_C8, \ 45 DRM_FORMAT_RGB565, \ 46 DRM_FORMAT_XRGB8888, \ 47 DRM_FORMAT_ARGB8888 48 49 /* Primary plane formats for gen <= 3 */ 50 static const uint32_t intel_primary_formats_gen2[] = { 51 COMMON_PRIMARY_FORMATS, 52 DRM_FORMAT_XRGB1555, 53 DRM_FORMAT_ARGB1555, 54 }; 55 56 /* Primary plane formats for gen >= 4 */ 57 static const uint32_t intel_primary_formats_gen4[] = { 58 COMMON_PRIMARY_FORMATS, \ 59 DRM_FORMAT_XBGR8888, 60 DRM_FORMAT_ABGR8888, 61 DRM_FORMAT_XRGB2101010, 62 DRM_FORMAT_ARGB2101010, 63 DRM_FORMAT_XBGR2101010, 64 DRM_FORMAT_ABGR2101010, 65 }; 66 67 /* Cursor formats */ 68 static const uint32_t intel_cursor_formats[] = { 69 DRM_FORMAT_ARGB8888, 70 }; 71 72 #define DIV_ROUND_CLOSEST_ULL(ll, d) \ 73 ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) 74 75 static void intel_increase_pllclock(struct drm_device *dev, 76 enum i915_pipe pipe); 77 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 78 79 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 80 struct intel_crtc_config *pipe_config); 81 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 82 struct intel_crtc_config *pipe_config); 83 84 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, 85 int x, int y, struct drm_framebuffer *old_fb); 86 static int intel_framebuffer_init(struct drm_device *dev, 87 struct intel_framebuffer *ifb, 88 struct drm_mode_fb_cmd2 *mode_cmd, 89 struct drm_i915_gem_object *obj); 90 static void intel_dp_set_m_n(struct intel_crtc *crtc); 91 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 92 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 93 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 94 struct intel_link_m_n *m_n); 95 static void ironlake_set_pipeconf(struct drm_crtc *crtc); 96 static void haswell_set_pipeconf(struct drm_crtc *crtc); 97 static void intel_set_pipe_csc(struct drm_crtc *crtc); 98 static void vlv_prepare_pll(struct intel_crtc *crtc); 99 100 typedef struct { 101 int min, max; 102 } intel_range_t; 103 104 typedef struct { 105 int dot_limit; 106 int p2_slow, p2_fast; 107 } intel_p2_t; 108 109 typedef struct intel_limit intel_limit_t; 110 struct intel_limit { 111 intel_range_t dot, vco, n, m, m1, m2, p, p1; 112 intel_p2_t p2; 113 }; 114 115 int 116 intel_pch_rawclk(struct drm_device *dev) 117 { 118 struct drm_i915_private *dev_priv = dev->dev_private; 119 120 WARN_ON(!HAS_PCH_SPLIT(dev)); 121 122 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; 123 } 124 125 static inline u32 /* units of 100MHz */ 126 intel_fdi_link_freq(struct drm_device *dev) 127 { 128 if (IS_GEN5(dev)) { 129 struct drm_i915_private *dev_priv = dev->dev_private; 130 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; 131 } else 132 return 27; 133 } 134 135 static const intel_limit_t intel_limits_i8xx_dac = { 136 .dot = { .min = 25000, .max = 350000 }, 137 .vco = { .min = 908000, .max = 1512000 }, 138 .n = { .min = 2, .max = 16 }, 139 .m = { .min = 96, .max = 140 }, 140 .m1 = { .min = 18, .max = 26 }, 141 .m2 = { .min = 6, .max = 16 }, 142 .p = { .min = 4, .max = 128 }, 143 .p1 = { .min = 2, .max = 33 }, 144 .p2 = { .dot_limit = 165000, 145 .p2_slow = 4, .p2_fast = 2 }, 146 }; 147 148 static const intel_limit_t intel_limits_i8xx_dvo = { 149 .dot = { .min = 25000, .max = 350000 }, 150 .vco = { .min = 908000, .max = 1512000 }, 151 .n = { .min = 2, .max = 16 }, 152 .m = { .min = 96, .max = 140 }, 153 .m1 = { .min = 18, .max = 26 }, 154 .m2 = { .min = 6, .max = 16 }, 155 .p = { .min = 4, .max = 128 }, 156 .p1 = { .min = 2, .max = 33 }, 157 .p2 = { .dot_limit = 165000, 158 .p2_slow = 4, .p2_fast = 4 }, 159 }; 160 161 static const intel_limit_t intel_limits_i8xx_lvds = { 162 .dot = { .min = 25000, .max = 350000 }, 163 .vco = { .min = 908000, .max = 1512000 }, 164 .n = { .min = 2, .max = 16 }, 165 .m = { .min = 96, .max = 140 }, 166 .m1 = { .min = 18, .max = 26 }, 167 .m2 = { .min = 6, .max = 16 }, 168 .p = { .min = 4, .max = 128 }, 169 .p1 = { .min = 1, .max = 6 }, 170 .p2 = { .dot_limit = 165000, 171 .p2_slow = 14, .p2_fast = 7 }, 172 }; 173 174 static const intel_limit_t intel_limits_i9xx_sdvo = { 175 .dot = { .min = 20000, .max = 400000 }, 176 .vco = { .min = 1400000, .max = 2800000 }, 177 .n = { .min = 1, .max = 6 }, 178 .m = { .min = 70, .max = 120 }, 179 .m1 = { .min = 8, .max = 18 }, 180 .m2 = { .min = 3, .max = 7 }, 181 .p = { .min = 5, .max = 80 }, 182 .p1 = { .min = 1, .max = 8 }, 183 .p2 = { .dot_limit = 200000, 184 .p2_slow = 10, .p2_fast = 5 }, 185 }; 186 187 static const intel_limit_t intel_limits_i9xx_lvds = { 188 .dot = { .min = 20000, .max = 400000 }, 189 .vco = { .min = 1400000, .max = 2800000 }, 190 .n = { .min = 1, .max = 6 }, 191 .m = { .min = 70, .max = 120 }, 192 .m1 = { .min = 8, .max = 18 }, 193 .m2 = { .min = 3, .max = 7 }, 194 .p = { .min = 7, .max = 98 }, 195 .p1 = { .min = 1, .max = 8 }, 196 .p2 = { .dot_limit = 112000, 197 .p2_slow = 14, .p2_fast = 7 }, 198 }; 199 200 201 static const intel_limit_t intel_limits_g4x_sdvo = { 202 .dot = { .min = 25000, .max = 270000 }, 203 .vco = { .min = 1750000, .max = 3500000}, 204 .n = { .min = 1, .max = 4 }, 205 .m = { .min = 104, .max = 138 }, 206 .m1 = { .min = 17, .max = 23 }, 207 .m2 = { .min = 5, .max = 11 }, 208 .p = { .min = 10, .max = 30 }, 209 .p1 = { .min = 1, .max = 3}, 210 .p2 = { .dot_limit = 270000, 211 .p2_slow = 10, 212 .p2_fast = 10 213 }, 214 }; 215 216 static const intel_limit_t intel_limits_g4x_hdmi = { 217 .dot = { .min = 22000, .max = 400000 }, 218 .vco = { .min = 1750000, .max = 3500000}, 219 .n = { .min = 1, .max = 4 }, 220 .m = { .min = 104, .max = 138 }, 221 .m1 = { .min = 16, .max = 23 }, 222 .m2 = { .min = 5, .max = 11 }, 223 .p = { .min = 5, .max = 80 }, 224 .p1 = { .min = 1, .max = 8}, 225 .p2 = { .dot_limit = 165000, 226 .p2_slow = 10, .p2_fast = 5 }, 227 }; 228 229 static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 230 .dot = { .min = 20000, .max = 115000 }, 231 .vco = { .min = 1750000, .max = 3500000 }, 232 .n = { .min = 1, .max = 3 }, 233 .m = { .min = 104, .max = 138 }, 234 .m1 = { .min = 17, .max = 23 }, 235 .m2 = { .min = 5, .max = 11 }, 236 .p = { .min = 28, .max = 112 }, 237 .p1 = { .min = 2, .max = 8 }, 238 .p2 = { .dot_limit = 0, 239 .p2_slow = 14, .p2_fast = 14 240 }, 241 }; 242 243 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 244 .dot = { .min = 80000, .max = 224000 }, 245 .vco = { .min = 1750000, .max = 3500000 }, 246 .n = { .min = 1, .max = 3 }, 247 .m = { .min = 104, .max = 138 }, 248 .m1 = { .min = 17, .max = 23 }, 249 .m2 = { .min = 5, .max = 11 }, 250 .p = { .min = 14, .max = 42 }, 251 .p1 = { .min = 2, .max = 6 }, 252 .p2 = { .dot_limit = 0, 253 .p2_slow = 7, .p2_fast = 7 254 }, 255 }; 256 257 static const intel_limit_t intel_limits_pineview_sdvo = { 258 .dot = { .min = 20000, .max = 400000}, 259 .vco = { .min = 1700000, .max = 3500000 }, 260 /* Pineview's Ncounter is a ring counter */ 261 .n = { .min = 3, .max = 6 }, 262 .m = { .min = 2, .max = 256 }, 263 /* Pineview only has one combined m divider, which we treat as m2. */ 264 .m1 = { .min = 0, .max = 0 }, 265 .m2 = { .min = 0, .max = 254 }, 266 .p = { .min = 5, .max = 80 }, 267 .p1 = { .min = 1, .max = 8 }, 268 .p2 = { .dot_limit = 200000, 269 .p2_slow = 10, .p2_fast = 5 }, 270 }; 271 272 static const intel_limit_t intel_limits_pineview_lvds = { 273 .dot = { .min = 20000, .max = 400000 }, 274 .vco = { .min = 1700000, .max = 3500000 }, 275 .n = { .min = 3, .max = 6 }, 276 .m = { .min = 2, .max = 256 }, 277 .m1 = { .min = 0, .max = 0 }, 278 .m2 = { .min = 0, .max = 254 }, 279 .p = { .min = 7, .max = 112 }, 280 .p1 = { .min = 1, .max = 8 }, 281 .p2 = { .dot_limit = 112000, 282 .p2_slow = 14, .p2_fast = 14 }, 283 }; 284 285 /* Ironlake / Sandybridge 286 * 287 * We calculate clock using (register_value + 2) for N/M1/M2, so here 288 * the range value for them is (actual_value - 2). 289 */ 290 static const intel_limit_t intel_limits_ironlake_dac = { 291 .dot = { .min = 25000, .max = 350000 }, 292 .vco = { .min = 1760000, .max = 3510000 }, 293 .n = { .min = 1, .max = 5 }, 294 .m = { .min = 79, .max = 127 }, 295 .m1 = { .min = 12, .max = 22 }, 296 .m2 = { .min = 5, .max = 9 }, 297 .p = { .min = 5, .max = 80 }, 298 .p1 = { .min = 1, .max = 8 }, 299 .p2 = { .dot_limit = 225000, 300 .p2_slow = 10, .p2_fast = 5 }, 301 }; 302 303 static const intel_limit_t intel_limits_ironlake_single_lvds = { 304 .dot = { .min = 25000, .max = 350000 }, 305 .vco = { .min = 1760000, .max = 3510000 }, 306 .n = { .min = 1, .max = 3 }, 307 .m = { .min = 79, .max = 118 }, 308 .m1 = { .min = 12, .max = 22 }, 309 .m2 = { .min = 5, .max = 9 }, 310 .p = { .min = 28, .max = 112 }, 311 .p1 = { .min = 2, .max = 8 }, 312 .p2 = { .dot_limit = 225000, 313 .p2_slow = 14, .p2_fast = 14 }, 314 }; 315 316 static const intel_limit_t intel_limits_ironlake_dual_lvds = { 317 .dot = { .min = 25000, .max = 350000 }, 318 .vco = { .min = 1760000, .max = 3510000 }, 319 .n = { .min = 1, .max = 3 }, 320 .m = { .min = 79, .max = 127 }, 321 .m1 = { .min = 12, .max = 22 }, 322 .m2 = { .min = 5, .max = 9 }, 323 .p = { .min = 14, .max = 56 }, 324 .p1 = { .min = 2, .max = 8 }, 325 .p2 = { .dot_limit = 225000, 326 .p2_slow = 7, .p2_fast = 7 }, 327 }; 328 329 /* LVDS 100mhz refclk limits. */ 330 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 331 .dot = { .min = 25000, .max = 350000 }, 332 .vco = { .min = 1760000, .max = 3510000 }, 333 .n = { .min = 1, .max = 2 }, 334 .m = { .min = 79, .max = 126 }, 335 .m1 = { .min = 12, .max = 22 }, 336 .m2 = { .min = 5, .max = 9 }, 337 .p = { .min = 28, .max = 112 }, 338 .p1 = { .min = 2, .max = 8 }, 339 .p2 = { .dot_limit = 225000, 340 .p2_slow = 14, .p2_fast = 14 }, 341 }; 342 343 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 344 .dot = { .min = 25000, .max = 350000 }, 345 .vco = { .min = 1760000, .max = 3510000 }, 346 .n = { .min = 1, .max = 3 }, 347 .m = { .min = 79, .max = 126 }, 348 .m1 = { .min = 12, .max = 22 }, 349 .m2 = { .min = 5, .max = 9 }, 350 .p = { .min = 14, .max = 42 }, 351 .p1 = { .min = 2, .max = 6 }, 352 .p2 = { .dot_limit = 225000, 353 .p2_slow = 7, .p2_fast = 7 }, 354 }; 355 356 static const intel_limit_t intel_limits_vlv = { 357 /* 358 * These are the data rate limits (measured in fast clocks) 359 * since those are the strictest limits we have. The fast 360 * clock and actual rate limits are more relaxed, so checking 361 * them would make no difference. 362 */ 363 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 364 .vco = { .min = 4000000, .max = 6000000 }, 365 .n = { .min = 1, .max = 7 }, 366 .m1 = { .min = 2, .max = 3 }, 367 .m2 = { .min = 11, .max = 156 }, 368 .p1 = { .min = 2, .max = 3 }, 369 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 370 }; 371 372 static const intel_limit_t intel_limits_chv = { 373 /* 374 * These are the data rate limits (measured in fast clocks) 375 * since those are the strictest limits we have. The fast 376 * clock and actual rate limits are more relaxed, so checking 377 * them would make no difference. 378 */ 379 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 380 .vco = { .min = 4860000, .max = 6700000 }, 381 .n = { .min = 1, .max = 1 }, 382 .m1 = { .min = 2, .max = 2 }, 383 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 384 .p1 = { .min = 2, .max = 4 }, 385 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 386 }; 387 388 static void vlv_clock(int refclk, intel_clock_t *clock) 389 { 390 clock->m = clock->m1 * clock->m2; 391 clock->p = clock->p1 * clock->p2; 392 if (WARN_ON(clock->n == 0 || clock->p == 0)) 393 return; 394 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 395 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 396 } 397 398 /** 399 * Returns whether any output on the specified pipe is of the specified type 400 */ 401 static bool intel_pipe_has_type(struct drm_crtc *crtc, int type) 402 { 403 struct drm_device *dev = crtc->dev; 404 struct intel_encoder *encoder; 405 406 for_each_encoder_on_crtc(dev, crtc, encoder) 407 if (encoder->type == type) 408 return true; 409 410 return false; 411 } 412 413 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 414 int refclk) 415 { 416 struct drm_device *dev = crtc->dev; 417 const intel_limit_t *limit; 418 419 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 420 if (intel_is_dual_link_lvds(dev)) { 421 if (refclk == 100000) 422 limit = &intel_limits_ironlake_dual_lvds_100m; 423 else 424 limit = &intel_limits_ironlake_dual_lvds; 425 } else { 426 if (refclk == 100000) 427 limit = &intel_limits_ironlake_single_lvds_100m; 428 else 429 limit = &intel_limits_ironlake_single_lvds; 430 } 431 } else 432 limit = &intel_limits_ironlake_dac; 433 434 return limit; 435 } 436 437 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 438 { 439 struct drm_device *dev = crtc->dev; 440 const intel_limit_t *limit; 441 442 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 443 if (intel_is_dual_link_lvds(dev)) 444 limit = &intel_limits_g4x_dual_channel_lvds; 445 else 446 limit = &intel_limits_g4x_single_channel_lvds; 447 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || 448 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { 449 limit = &intel_limits_g4x_hdmi; 450 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { 451 limit = &intel_limits_g4x_sdvo; 452 } else /* The option is for other outputs */ 453 limit = &intel_limits_i9xx_sdvo; 454 455 return limit; 456 } 457 458 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) 459 { 460 struct drm_device *dev = crtc->dev; 461 const intel_limit_t *limit; 462 463 if (HAS_PCH_SPLIT(dev)) 464 limit = intel_ironlake_limit(crtc, refclk); 465 else if (IS_G4X(dev)) { 466 limit = intel_g4x_limit(crtc); 467 } else if (IS_PINEVIEW(dev)) { 468 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 469 limit = &intel_limits_pineview_lvds; 470 else 471 limit = &intel_limits_pineview_sdvo; 472 } else if (IS_CHERRYVIEW(dev)) { 473 limit = &intel_limits_chv; 474 } else if (IS_VALLEYVIEW(dev)) { 475 limit = &intel_limits_vlv; 476 } else if (!IS_GEN2(dev)) { 477 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 478 limit = &intel_limits_i9xx_lvds; 479 else 480 limit = &intel_limits_i9xx_sdvo; 481 } else { 482 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 483 limit = &intel_limits_i8xx_lvds; 484 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO)) 485 limit = &intel_limits_i8xx_dvo; 486 else 487 limit = &intel_limits_i8xx_dac; 488 } 489 return limit; 490 } 491 492 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 493 static void pineview_clock(int refclk, intel_clock_t *clock) 494 { 495 clock->m = clock->m2 + 2; 496 clock->p = clock->p1 * clock->p2; 497 if (WARN_ON(clock->n == 0 || clock->p == 0)) 498 return; 499 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 500 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 501 } 502 503 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 504 { 505 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 506 } 507 508 static void i9xx_clock(int refclk, intel_clock_t *clock) 509 { 510 clock->m = i9xx_dpll_compute_m(clock); 511 clock->p = clock->p1 * clock->p2; 512 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 513 return; 514 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 515 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 516 } 517 518 static void chv_clock(int refclk, intel_clock_t *clock) 519 { 520 clock->m = clock->m1 * clock->m2; 521 clock->p = clock->p1 * clock->p2; 522 if (WARN_ON(clock->n == 0 || clock->p == 0)) 523 return; 524 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 525 clock->n << 22); 526 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 527 } 528 529 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 530 /** 531 * Returns whether the given set of divisors are valid for a given refclk with 532 * the given connectors. 533 */ 534 535 static bool intel_PLL_is_valid(struct drm_device *dev, 536 const intel_limit_t *limit, 537 const intel_clock_t *clock) 538 { 539 if (clock->n < limit->n.min || limit->n.max < clock->n) 540 INTELPllInvalid("n out of range\n"); 541 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 542 INTELPllInvalid("p1 out of range\n"); 543 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 544 INTELPllInvalid("m2 out of range\n"); 545 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 546 INTELPllInvalid("m1 out of range\n"); 547 548 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev)) 549 if (clock->m1 <= clock->m2) 550 INTELPllInvalid("m1 <= m2\n"); 551 552 if (!IS_VALLEYVIEW(dev)) { 553 if (clock->p < limit->p.min || limit->p.max < clock->p) 554 INTELPllInvalid("p out of range\n"); 555 if (clock->m < limit->m.min || limit->m.max < clock->m) 556 INTELPllInvalid("m out of range\n"); 557 } 558 559 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 560 INTELPllInvalid("vco out of range\n"); 561 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 562 * connector, etc., rather than just a single range. 563 */ 564 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 565 INTELPllInvalid("dot out of range\n"); 566 567 return true; 568 } 569 570 static bool 571 i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 572 int target, int refclk, intel_clock_t *match_clock, 573 intel_clock_t *best_clock) 574 { 575 struct drm_device *dev = crtc->dev; 576 intel_clock_t clock; 577 int err = target; 578 579 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 580 /* 581 * For LVDS just rely on its current settings for dual-channel. 582 * We haven't figured out how to reliably set up different 583 * single/dual channel state, if we even can. 584 */ 585 if (intel_is_dual_link_lvds(dev)) 586 clock.p2 = limit->p2.p2_fast; 587 else 588 clock.p2 = limit->p2.p2_slow; 589 } else { 590 if (target < limit->p2.dot_limit) 591 clock.p2 = limit->p2.p2_slow; 592 else 593 clock.p2 = limit->p2.p2_fast; 594 } 595 596 memset(best_clock, 0, sizeof(*best_clock)); 597 598 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 599 clock.m1++) { 600 for (clock.m2 = limit->m2.min; 601 clock.m2 <= limit->m2.max; clock.m2++) { 602 if (clock.m2 >= clock.m1) 603 break; 604 for (clock.n = limit->n.min; 605 clock.n <= limit->n.max; clock.n++) { 606 for (clock.p1 = limit->p1.min; 607 clock.p1 <= limit->p1.max; clock.p1++) { 608 int this_err; 609 610 i9xx_clock(refclk, &clock); 611 if (!intel_PLL_is_valid(dev, limit, 612 &clock)) 613 continue; 614 if (match_clock && 615 clock.p != match_clock->p) 616 continue; 617 618 this_err = abs(clock.dot - target); 619 if (this_err < err) { 620 *best_clock = clock; 621 err = this_err; 622 } 623 } 624 } 625 } 626 } 627 628 return (err != target); 629 } 630 631 static bool 632 pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 633 int target, int refclk, intel_clock_t *match_clock, 634 intel_clock_t *best_clock) 635 { 636 struct drm_device *dev = crtc->dev; 637 intel_clock_t clock; 638 int err = target; 639 640 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 641 /* 642 * For LVDS just rely on its current settings for dual-channel. 643 * We haven't figured out how to reliably set up different 644 * single/dual channel state, if we even can. 645 */ 646 if (intel_is_dual_link_lvds(dev)) 647 clock.p2 = limit->p2.p2_fast; 648 else 649 clock.p2 = limit->p2.p2_slow; 650 } else { 651 if (target < limit->p2.dot_limit) 652 clock.p2 = limit->p2.p2_slow; 653 else 654 clock.p2 = limit->p2.p2_fast; 655 } 656 657 memset(best_clock, 0, sizeof(*best_clock)); 658 659 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 660 clock.m1++) { 661 for (clock.m2 = limit->m2.min; 662 clock.m2 <= limit->m2.max; clock.m2++) { 663 for (clock.n = limit->n.min; 664 clock.n <= limit->n.max; clock.n++) { 665 for (clock.p1 = limit->p1.min; 666 clock.p1 <= limit->p1.max; clock.p1++) { 667 int this_err; 668 669 pineview_clock(refclk, &clock); 670 if (!intel_PLL_is_valid(dev, limit, 671 &clock)) 672 continue; 673 if (match_clock && 674 clock.p != match_clock->p) 675 continue; 676 677 this_err = abs(clock.dot - target); 678 if (this_err < err) { 679 *best_clock = clock; 680 err = this_err; 681 } 682 } 683 } 684 } 685 } 686 687 return (err != target); 688 } 689 690 static bool 691 g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 692 int target, int refclk, intel_clock_t *match_clock, 693 intel_clock_t *best_clock) 694 { 695 struct drm_device *dev = crtc->dev; 696 intel_clock_t clock; 697 int max_n; 698 bool found; 699 /* approximately equals target * 0.00585 */ 700 int err_most = (target >> 8) + (target >> 9); 701 found = false; 702 703 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 704 if (intel_is_dual_link_lvds(dev)) 705 clock.p2 = limit->p2.p2_fast; 706 else 707 clock.p2 = limit->p2.p2_slow; 708 } else { 709 if (target < limit->p2.dot_limit) 710 clock.p2 = limit->p2.p2_slow; 711 else 712 clock.p2 = limit->p2.p2_fast; 713 } 714 715 memset(best_clock, 0, sizeof(*best_clock)); 716 max_n = limit->n.max; 717 /* based on hardware requirement, prefer smaller n to precision */ 718 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 719 /* based on hardware requirement, prefere larger m1,m2 */ 720 for (clock.m1 = limit->m1.max; 721 clock.m1 >= limit->m1.min; clock.m1--) { 722 for (clock.m2 = limit->m2.max; 723 clock.m2 >= limit->m2.min; clock.m2--) { 724 for (clock.p1 = limit->p1.max; 725 clock.p1 >= limit->p1.min; clock.p1--) { 726 int this_err; 727 728 i9xx_clock(refclk, &clock); 729 if (!intel_PLL_is_valid(dev, limit, 730 &clock)) 731 continue; 732 733 this_err = abs(clock.dot - target); 734 if (this_err < err_most) { 735 *best_clock = clock; 736 err_most = this_err; 737 max_n = clock.n; 738 found = true; 739 } 740 } 741 } 742 } 743 } 744 return found; 745 } 746 747 static bool 748 vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 749 int target, int refclk, intel_clock_t *match_clock, 750 intel_clock_t *best_clock) 751 { 752 struct drm_device *dev = crtc->dev; 753 intel_clock_t clock; 754 unsigned int bestppm = 1000000; 755 /* min update 19.2 MHz */ 756 int max_n = min(limit->n.max, refclk / 19200); 757 bool found = false; 758 759 target *= 5; /* fast clock */ 760 761 memset(best_clock, 0, sizeof(*best_clock)); 762 763 /* based on hardware requirement, prefer smaller n to precision */ 764 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 765 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 766 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 767 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 768 clock.p = clock.p1 * clock.p2; 769 /* based on hardware requirement, prefer bigger m1,m2 values */ 770 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 771 unsigned int ppm, diff; 772 773 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 774 refclk * clock.m1); 775 776 vlv_clock(refclk, &clock); 777 778 if (!intel_PLL_is_valid(dev, limit, 779 &clock)) 780 continue; 781 782 diff = abs(clock.dot - target); 783 ppm = div_u64(1000000ULL * diff, target); 784 785 if (ppm < 100 && clock.p > best_clock->p) { 786 bestppm = 0; 787 *best_clock = clock; 788 found = true; 789 } 790 791 if (bestppm >= 10 && ppm < bestppm - 10) { 792 bestppm = ppm; 793 *best_clock = clock; 794 found = true; 795 } 796 } 797 } 798 } 799 } 800 801 return found; 802 } 803 804 static bool 805 chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 806 int target, int refclk, intel_clock_t *match_clock, 807 intel_clock_t *best_clock) 808 { 809 struct drm_device *dev = crtc->dev; 810 intel_clock_t clock; 811 uint64_t m2; 812 int found = false; 813 814 memset(best_clock, 0, sizeof(*best_clock)); 815 816 /* 817 * Based on hardware doc, the n always set to 1, and m1 always 818 * set to 2. If requires to support 200Mhz refclk, we need to 819 * revisit this because n may not 1 anymore. 820 */ 821 clock.n = 1, clock.m1 = 2; 822 target *= 5; /* fast clock */ 823 824 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 825 for (clock.p2 = limit->p2.p2_fast; 826 clock.p2 >= limit->p2.p2_slow; 827 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 828 829 clock.p = clock.p1 * clock.p2; 830 831 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * 832 clock.n) << 22, refclk * clock.m1); 833 834 if (m2 > INT_MAX/clock.m1) 835 continue; 836 837 clock.m2 = m2; 838 839 chv_clock(refclk, &clock); 840 841 if (!intel_PLL_is_valid(dev, limit, &clock)) 842 continue; 843 844 /* based on hardware requirement, prefer bigger p 845 */ 846 if (clock.p > best_clock->p) { 847 *best_clock = clock; 848 found = true; 849 } 850 } 851 } 852 853 return found; 854 } 855 856 bool intel_crtc_active(struct drm_crtc *crtc) 857 { 858 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 859 860 /* Be paranoid as we can arrive here with only partial 861 * state retrieved from the hardware during setup. 862 * 863 * We can ditch the adjusted_mode.crtc_clock check as soon 864 * as Haswell has gained clock readout/fastboot support. 865 * 866 * We can ditch the crtc->primary->fb check as soon as we can 867 * properly reconstruct framebuffers. 868 */ 869 return intel_crtc->active && crtc->primary->fb && 870 intel_crtc->config.adjusted_mode.crtc_clock; 871 } 872 873 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 874 enum i915_pipe pipe) 875 { 876 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 877 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 878 879 return intel_crtc->config.cpu_transcoder; 880 } 881 882 static void g4x_wait_for_vblank(struct drm_device *dev, int pipe) 883 { 884 struct drm_i915_private *dev_priv = dev->dev_private; 885 u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe); 886 887 frame = I915_READ(frame_reg); 888 889 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50)) 890 WARN(1, "vblank wait timed out\n"); 891 } 892 893 /** 894 * intel_wait_for_vblank - wait for vblank on a given pipe 895 * @dev: drm device 896 * @pipe: pipe to wait for 897 * 898 * Wait for vblank to occur on a given pipe. Needed for various bits of 899 * mode setting code. 900 */ 901 void intel_wait_for_vblank(struct drm_device *dev, int pipe) 902 { 903 struct drm_i915_private *dev_priv = dev->dev_private; 904 int pipestat_reg = PIPESTAT(pipe); 905 906 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 907 g4x_wait_for_vblank(dev, pipe); 908 return; 909 } 910 911 /* Clear existing vblank status. Note this will clear any other 912 * sticky status fields as well. 913 * 914 * This races with i915_driver_irq_handler() with the result 915 * that either function could miss a vblank event. Here it is not 916 * fatal, as we will either wait upon the next vblank interrupt or 917 * timeout. Generally speaking intel_wait_for_vblank() is only 918 * called during modeset at which time the GPU should be idle and 919 * should *not* be performing page flips and thus not waiting on 920 * vblanks... 921 * Currently, the result of us stealing a vblank from the irq 922 * handler is that a single frame will be skipped during swapbuffers. 923 */ 924 I915_WRITE(pipestat_reg, 925 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); 926 927 /* Wait for vblank interrupt bit to set */ 928 if (wait_for(I915_READ(pipestat_reg) & 929 PIPE_VBLANK_INTERRUPT_STATUS, 930 50)) 931 DRM_DEBUG_KMS("vblank wait timed out\n"); 932 } 933 934 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe) 935 { 936 struct drm_i915_private *dev_priv = dev->dev_private; 937 u32 reg = PIPEDSL(pipe); 938 u32 line1, line2; 939 u32 line_mask; 940 941 if (IS_GEN2(dev)) 942 line_mask = DSL_LINEMASK_GEN2; 943 else 944 line_mask = DSL_LINEMASK_GEN3; 945 946 line1 = I915_READ(reg) & line_mask; 947 mdelay(5); 948 line2 = I915_READ(reg) & line_mask; 949 950 return line1 == line2; 951 } 952 953 /* 954 * intel_wait_for_pipe_off - wait for pipe to turn off 955 * @dev: drm device 956 * @pipe: pipe to wait for 957 * 958 * After disabling a pipe, we can't wait for vblank in the usual way, 959 * spinning on the vblank interrupt status bit, since we won't actually 960 * see an interrupt when the pipe is disabled. 961 * 962 * On Gen4 and above: 963 * wait for the pipe register state bit to turn off 964 * 965 * Otherwise: 966 * wait for the display line value to settle (it usually 967 * ends up stopping at the start of the next frame). 968 * 969 */ 970 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 971 { 972 struct drm_i915_private *dev_priv = dev->dev_private; 973 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 974 pipe); 975 976 if (INTEL_INFO(dev)->gen >= 4) { 977 int reg = PIPECONF(cpu_transcoder); 978 979 /* Wait for the Pipe State to go off */ 980 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 981 100)) 982 WARN(1, "pipe_off wait timed out\n"); 983 } else { 984 /* Wait for the display line to settle */ 985 if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) 986 WARN(1, "pipe_off wait timed out\n"); 987 } 988 } 989 990 /* 991 * ibx_digital_port_connected - is the specified port connected? 992 * @dev_priv: i915 private structure 993 * @port: the port to test 994 * 995 * Returns true if @port is connected, false otherwise. 996 */ 997 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, 998 struct intel_digital_port *port) 999 { 1000 u32 bit; 1001 1002 if (HAS_PCH_IBX(dev_priv->dev)) { 1003 switch (port->port) { 1004 case PORT_B: 1005 bit = SDE_PORTB_HOTPLUG; 1006 break; 1007 case PORT_C: 1008 bit = SDE_PORTC_HOTPLUG; 1009 break; 1010 case PORT_D: 1011 bit = SDE_PORTD_HOTPLUG; 1012 break; 1013 default: 1014 return true; 1015 } 1016 } else { 1017 switch (port->port) { 1018 case PORT_B: 1019 bit = SDE_PORTB_HOTPLUG_CPT; 1020 break; 1021 case PORT_C: 1022 bit = SDE_PORTC_HOTPLUG_CPT; 1023 break; 1024 case PORT_D: 1025 bit = SDE_PORTD_HOTPLUG_CPT; 1026 break; 1027 default: 1028 return true; 1029 } 1030 } 1031 1032 return I915_READ(SDEISR) & bit; 1033 } 1034 1035 static const char *state_string(bool enabled) 1036 { 1037 return enabled ? "on" : "off"; 1038 } 1039 1040 /* Only for pre-ILK configs */ 1041 void assert_pll(struct drm_i915_private *dev_priv, 1042 enum i915_pipe pipe, bool state) 1043 { 1044 int reg; 1045 u32 val; 1046 bool cur_state; 1047 1048 reg = DPLL(pipe); 1049 val = I915_READ(reg); 1050 cur_state = !!(val & DPLL_VCO_ENABLE); 1051 WARN(cur_state != state, 1052 "PLL state assertion failure (expected %s, current %s)\n", 1053 state_string(state), state_string(cur_state)); 1054 } 1055 1056 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1057 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1058 { 1059 u32 val; 1060 bool cur_state; 1061 1062 mutex_lock(&dev_priv->dpio_lock); 1063 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1064 mutex_unlock(&dev_priv->dpio_lock); 1065 1066 cur_state = val & DSI_PLL_VCO_EN; 1067 WARN(cur_state != state, 1068 "DSI PLL state assertion failure (expected %s, current %s)\n", 1069 state_string(state), state_string(cur_state)); 1070 } 1071 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) 1072 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) 1073 1074 struct intel_shared_dpll * 1075 intel_crtc_to_shared_dpll(struct intel_crtc *crtc) 1076 { 1077 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1078 1079 if (crtc->config.shared_dpll < 0) 1080 return NULL; 1081 1082 return &dev_priv->shared_dplls[crtc->config.shared_dpll]; 1083 } 1084 1085 /* For ILK+ */ 1086 void assert_shared_dpll(struct drm_i915_private *dev_priv, 1087 struct intel_shared_dpll *pll, 1088 bool state) 1089 { 1090 bool cur_state; 1091 struct intel_dpll_hw_state hw_state; 1092 1093 if (WARN (!pll, 1094 "asserting DPLL %s with no DPLL\n", state_string(state))) 1095 return; 1096 1097 cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); 1098 WARN(cur_state != state, 1099 "%s assertion failure (expected %s, current %s)\n", 1100 pll->name, state_string(state), state_string(cur_state)); 1101 } 1102 1103 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1104 enum i915_pipe pipe, bool state) 1105 { 1106 int reg; 1107 u32 val; 1108 bool cur_state; 1109 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1110 pipe); 1111 1112 if (HAS_DDI(dev_priv->dev)) { 1113 /* DDI does not have a specific FDI_TX register */ 1114 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1115 val = I915_READ(reg); 1116 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1117 } else { 1118 reg = FDI_TX_CTL(pipe); 1119 val = I915_READ(reg); 1120 cur_state = !!(val & FDI_TX_ENABLE); 1121 } 1122 WARN(cur_state != state, 1123 "FDI TX state assertion failure (expected %s, current %s)\n", 1124 state_string(state), state_string(cur_state)); 1125 } 1126 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1127 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1128 1129 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1130 enum i915_pipe pipe, bool state) 1131 { 1132 int reg; 1133 u32 val; 1134 bool cur_state; 1135 1136 reg = FDI_RX_CTL(pipe); 1137 val = I915_READ(reg); 1138 cur_state = !!(val & FDI_RX_ENABLE); 1139 WARN(cur_state != state, 1140 "FDI RX state assertion failure (expected %s, current %s)\n", 1141 state_string(state), state_string(cur_state)); 1142 } 1143 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1144 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1145 1146 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1147 enum i915_pipe pipe) 1148 { 1149 int reg; 1150 u32 val; 1151 1152 /* ILK FDI PLL is always enabled */ 1153 if (INTEL_INFO(dev_priv->dev)->gen == 5) 1154 return; 1155 1156 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1157 if (HAS_DDI(dev_priv->dev)) 1158 return; 1159 1160 reg = FDI_TX_CTL(pipe); 1161 val = I915_READ(reg); 1162 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1163 } 1164 1165 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1166 enum i915_pipe pipe, bool state) 1167 { 1168 int reg; 1169 u32 val; 1170 bool cur_state; 1171 1172 reg = FDI_RX_CTL(pipe); 1173 val = I915_READ(reg); 1174 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1175 WARN(cur_state != state, 1176 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1177 state_string(state), state_string(cur_state)); 1178 } 1179 1180 static void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1181 enum i915_pipe pipe) 1182 { 1183 int pp_reg, lvds_reg; 1184 u32 val; 1185 enum i915_pipe panel_pipe = PIPE_A; 1186 bool locked = true; 1187 1188 if (HAS_PCH_SPLIT(dev_priv->dev)) { 1189 pp_reg = PCH_PP_CONTROL; 1190 lvds_reg = PCH_LVDS; 1191 } else { 1192 pp_reg = PP_CONTROL; 1193 lvds_reg = LVDS; 1194 } 1195 1196 val = I915_READ(pp_reg); 1197 if (!(val & PANEL_POWER_ON) || 1198 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) 1199 locked = false; 1200 1201 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) 1202 panel_pipe = PIPE_B; 1203 1204 WARN(panel_pipe == pipe && locked, 1205 "panel assertion failure, pipe %c regs locked\n", 1206 pipe_name(pipe)); 1207 } 1208 1209 static void assert_cursor(struct drm_i915_private *dev_priv, 1210 enum i915_pipe pipe, bool state) 1211 { 1212 struct drm_device *dev = dev_priv->dev; 1213 bool cur_state; 1214 1215 if (IS_845G(dev) || IS_I865G(dev)) 1216 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 1217 else 1218 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1219 1220 WARN(cur_state != state, 1221 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1222 pipe_name(pipe), state_string(state), state_string(cur_state)); 1223 } 1224 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) 1225 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) 1226 1227 void assert_pipe(struct drm_i915_private *dev_priv, 1228 enum i915_pipe pipe, bool state) 1229 { 1230 int reg; 1231 u32 val; 1232 bool cur_state; 1233 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1234 pipe); 1235 1236 /* if we need the pipe A quirk it must be always on */ 1237 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 1238 state = true; 1239 1240 if (!intel_display_power_enabled(dev_priv, 1241 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { 1242 cur_state = false; 1243 } else { 1244 reg = PIPECONF(cpu_transcoder); 1245 val = I915_READ(reg); 1246 cur_state = !!(val & PIPECONF_ENABLE); 1247 } 1248 1249 WARN(cur_state != state, 1250 "pipe %c assertion failure (expected %s, current %s)\n", 1251 pipe_name(pipe), state_string(state), state_string(cur_state)); 1252 } 1253 1254 static void assert_plane(struct drm_i915_private *dev_priv, 1255 enum plane plane, bool state) 1256 { 1257 int reg; 1258 u32 val; 1259 bool cur_state; 1260 1261 reg = DSPCNTR(plane); 1262 val = I915_READ(reg); 1263 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1264 WARN(cur_state != state, 1265 "plane %c assertion failure (expected %s, current %s)\n", 1266 plane_name(plane), state_string(state), state_string(cur_state)); 1267 } 1268 1269 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 1270 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 1271 1272 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1273 enum i915_pipe pipe) 1274 { 1275 struct drm_device *dev = dev_priv->dev; 1276 int reg, i; 1277 u32 val; 1278 int cur_pipe; 1279 1280 /* Primary planes are fixed to pipes on gen4+ */ 1281 if (INTEL_INFO(dev)->gen >= 4) { 1282 reg = DSPCNTR(pipe); 1283 val = I915_READ(reg); 1284 WARN(val & DISPLAY_PLANE_ENABLE, 1285 "plane %c assertion failure, should be disabled but not\n", 1286 plane_name(pipe)); 1287 return; 1288 } 1289 1290 /* Need to check both planes against the pipe */ 1291 for_each_pipe(i) { 1292 reg = DSPCNTR(i); 1293 val = I915_READ(reg); 1294 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1295 DISPPLANE_SEL_PIPE_SHIFT; 1296 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1297 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1298 plane_name(i), pipe_name(pipe)); 1299 } 1300 } 1301 1302 static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1303 enum i915_pipe pipe) 1304 { 1305 struct drm_device *dev = dev_priv->dev; 1306 int reg, sprite; 1307 u32 val; 1308 1309 if (IS_VALLEYVIEW(dev)) { 1310 for_each_sprite(pipe, sprite) { 1311 reg = SPCNTR(pipe, sprite); 1312 val = I915_READ(reg); 1313 WARN(val & SP_ENABLE, 1314 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1315 sprite_name(pipe, sprite), pipe_name(pipe)); 1316 } 1317 } else if (INTEL_INFO(dev)->gen >= 7) { 1318 reg = SPRCTL(pipe); 1319 val = I915_READ(reg); 1320 WARN(val & SPRITE_ENABLE, 1321 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1322 plane_name(pipe), pipe_name(pipe)); 1323 } else if (INTEL_INFO(dev)->gen >= 5) { 1324 reg = DVSCNTR(pipe); 1325 val = I915_READ(reg); 1326 WARN(val & DVS_ENABLE, 1327 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1328 plane_name(pipe), pipe_name(pipe)); 1329 } 1330 } 1331 1332 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1333 { 1334 u32 val; 1335 bool enabled; 1336 1337 WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); 1338 1339 val = I915_READ(PCH_DREF_CONTROL); 1340 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1341 DREF_SUPERSPREAD_SOURCE_MASK)); 1342 WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); 1343 } 1344 1345 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1346 enum i915_pipe pipe) 1347 { 1348 int reg; 1349 u32 val; 1350 bool enabled; 1351 1352 reg = PCH_TRANSCONF(pipe); 1353 val = I915_READ(reg); 1354 enabled = !!(val & TRANS_ENABLE); 1355 WARN(enabled, 1356 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1357 pipe_name(pipe)); 1358 } 1359 1360 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1361 enum i915_pipe pipe, u32 port_sel, u32 val) 1362 { 1363 if ((val & DP_PORT_EN) == 0) 1364 return false; 1365 1366 if (HAS_PCH_CPT(dev_priv->dev)) { 1367 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); 1368 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); 1369 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1370 return false; 1371 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1372 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) 1373 return false; 1374 } else { 1375 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1376 return false; 1377 } 1378 return true; 1379 } 1380 1381 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1382 enum i915_pipe pipe, u32 val) 1383 { 1384 if ((val & SDVO_ENABLE) == 0) 1385 return false; 1386 1387 if (HAS_PCH_CPT(dev_priv->dev)) { 1388 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1389 return false; 1390 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1391 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) 1392 return false; 1393 } else { 1394 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1395 return false; 1396 } 1397 return true; 1398 } 1399 1400 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1401 enum i915_pipe pipe, u32 val) 1402 { 1403 if ((val & LVDS_PORT_EN) == 0) 1404 return false; 1405 1406 if (HAS_PCH_CPT(dev_priv->dev)) { 1407 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1408 return false; 1409 } else { 1410 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1411 return false; 1412 } 1413 return true; 1414 } 1415 1416 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1417 enum i915_pipe pipe, u32 val) 1418 { 1419 if ((val & ADPA_DAC_ENABLE) == 0) 1420 return false; 1421 if (HAS_PCH_CPT(dev_priv->dev)) { 1422 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1423 return false; 1424 } else { 1425 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1426 return false; 1427 } 1428 return true; 1429 } 1430 1431 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1432 enum i915_pipe pipe, int reg, u32 port_sel) 1433 { 1434 u32 val = I915_READ(reg); 1435 WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1436 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1437 reg, pipe_name(pipe)); 1438 1439 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 1440 && (val & DP_PIPEB_SELECT), 1441 "IBX PCH dp port still using transcoder B\n"); 1442 } 1443 1444 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1445 enum i915_pipe pipe, int reg) 1446 { 1447 u32 val = I915_READ(reg); 1448 WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1449 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1450 reg, pipe_name(pipe)); 1451 1452 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 1453 && (val & SDVO_PIPE_B_SELECT), 1454 "IBX PCH hdmi port still using transcoder B\n"); 1455 } 1456 1457 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1458 enum i915_pipe pipe) 1459 { 1460 int reg; 1461 u32 val; 1462 1463 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1464 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1465 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1466 1467 reg = PCH_ADPA; 1468 val = I915_READ(reg); 1469 WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1470 "PCH VGA enabled on transcoder %c, should be disabled\n", 1471 pipe_name(pipe)); 1472 1473 reg = PCH_LVDS; 1474 val = I915_READ(reg); 1475 WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1476 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1477 pipe_name(pipe)); 1478 1479 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1480 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1481 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1482 } 1483 1484 static void intel_init_dpio(struct drm_device *dev) 1485 { 1486 struct drm_i915_private *dev_priv = dev->dev_private; 1487 1488 if (!IS_VALLEYVIEW(dev)) 1489 return; 1490 1491 /* 1492 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), 1493 * CHV x1 PHY (DP/HDMI D) 1494 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) 1495 */ 1496 if (IS_CHERRYVIEW(dev)) { 1497 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; 1498 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; 1499 } else { 1500 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; 1501 } 1502 } 1503 1504 static void intel_reset_dpio(struct drm_device *dev) 1505 { 1506 struct drm_i915_private *dev_priv = dev->dev_private; 1507 1508 if (IS_CHERRYVIEW(dev)) { 1509 enum dpio_phy phy; 1510 u32 val; 1511 1512 for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) { 1513 /* Poll for phypwrgood signal */ 1514 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & 1515 PHY_POWERGOOD(phy), 1)) 1516 DRM_ERROR("Display PHY %d is not power up\n", phy); 1517 1518 /* 1519 * Deassert common lane reset for PHY. 1520 * 1521 * This should only be done on init and resume from S3 1522 * with both PLLs disabled, or we risk losing DPIO and 1523 * PLL synchronization. 1524 */ 1525 val = I915_READ(DISPLAY_PHY_CONTROL); 1526 I915_WRITE(DISPLAY_PHY_CONTROL, 1527 PHY_COM_LANE_RESET_DEASSERT(phy, val)); 1528 } 1529 } 1530 } 1531 1532 static void vlv_enable_pll(struct intel_crtc *crtc) 1533 { 1534 struct drm_device *dev = crtc->base.dev; 1535 struct drm_i915_private *dev_priv = dev->dev_private; 1536 int reg = DPLL(crtc->pipe); 1537 u32 dpll = crtc->config.dpll_hw_state.dpll; 1538 1539 assert_pipe_disabled(dev_priv, crtc->pipe); 1540 1541 /* No really, not for ILK+ */ 1542 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); 1543 1544 /* PLL is protected by panel, make sure we can write it */ 1545 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) 1546 assert_panel_unlocked(dev_priv, crtc->pipe); 1547 1548 I915_WRITE(reg, dpll); 1549 POSTING_READ(reg); 1550 udelay(150); 1551 1552 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1553 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe); 1554 1555 I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md); 1556 POSTING_READ(DPLL_MD(crtc->pipe)); 1557 1558 /* We do this three times for luck */ 1559 I915_WRITE(reg, dpll); 1560 POSTING_READ(reg); 1561 udelay(150); /* wait for warmup */ 1562 I915_WRITE(reg, dpll); 1563 POSTING_READ(reg); 1564 udelay(150); /* wait for warmup */ 1565 I915_WRITE(reg, dpll); 1566 POSTING_READ(reg); 1567 udelay(150); /* wait for warmup */ 1568 } 1569 1570 static void chv_enable_pll(struct intel_crtc *crtc) 1571 { 1572 struct drm_device *dev = crtc->base.dev; 1573 struct drm_i915_private *dev_priv = dev->dev_private; 1574 int pipe = crtc->pipe; 1575 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1576 u32 tmp; 1577 1578 assert_pipe_disabled(dev_priv, crtc->pipe); 1579 1580 BUG_ON(!IS_CHERRYVIEW(dev_priv->dev)); 1581 1582 mutex_lock(&dev_priv->dpio_lock); 1583 1584 /* Enable back the 10bit clock to display controller */ 1585 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1586 tmp |= DPIO_DCLKP_EN; 1587 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1588 1589 /* 1590 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1591 */ 1592 udelay(1); 1593 1594 /* Enable PLL */ 1595 I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll); 1596 1597 /* Check PLL is locked */ 1598 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1599 DRM_ERROR("PLL %d failed to lock\n", pipe); 1600 1601 /* not sure when this should be written */ 1602 I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md); 1603 POSTING_READ(DPLL_MD(pipe)); 1604 1605 mutex_unlock(&dev_priv->dpio_lock); 1606 } 1607 1608 static void i9xx_enable_pll(struct intel_crtc *crtc) 1609 { 1610 struct drm_device *dev = crtc->base.dev; 1611 struct drm_i915_private *dev_priv = dev->dev_private; 1612 int reg = DPLL(crtc->pipe); 1613 u32 dpll = crtc->config.dpll_hw_state.dpll; 1614 1615 assert_pipe_disabled(dev_priv, crtc->pipe); 1616 1617 /* No really, not for ILK+ */ 1618 BUG_ON(INTEL_INFO(dev)->gen >= 5); 1619 1620 /* PLL is protected by panel, make sure we can write it */ 1621 if (IS_MOBILE(dev) && !IS_I830(dev)) 1622 assert_panel_unlocked(dev_priv, crtc->pipe); 1623 1624 I915_WRITE(reg, dpll); 1625 1626 /* Wait for the clocks to stabilize. */ 1627 POSTING_READ(reg); 1628 udelay(150); 1629 1630 if (INTEL_INFO(dev)->gen >= 4) { 1631 I915_WRITE(DPLL_MD(crtc->pipe), 1632 crtc->config.dpll_hw_state.dpll_md); 1633 } else { 1634 /* The pixel multiplier can only be updated once the 1635 * DPLL is enabled and the clocks are stable. 1636 * 1637 * So write it again. 1638 */ 1639 I915_WRITE(reg, dpll); 1640 } 1641 1642 /* We do this three times for luck */ 1643 I915_WRITE(reg, dpll); 1644 POSTING_READ(reg); 1645 udelay(150); /* wait for warmup */ 1646 I915_WRITE(reg, dpll); 1647 POSTING_READ(reg); 1648 udelay(150); /* wait for warmup */ 1649 I915_WRITE(reg, dpll); 1650 POSTING_READ(reg); 1651 udelay(150); /* wait for warmup */ 1652 } 1653 1654 /** 1655 * i9xx_disable_pll - disable a PLL 1656 * @dev_priv: i915 private structure 1657 * @pipe: pipe PLL to disable 1658 * 1659 * Disable the PLL for @pipe, making sure the pipe is off first. 1660 * 1661 * Note! This is for pre-ILK only. 1662 */ 1663 static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1664 { 1665 /* Don't disable pipe A or pipe A PLLs if needed */ 1666 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1667 return; 1668 1669 /* Make sure the pipe isn't still relying on us */ 1670 assert_pipe_disabled(dev_priv, pipe); 1671 1672 I915_WRITE(DPLL(pipe), 0); 1673 POSTING_READ(DPLL(pipe)); 1674 } 1675 1676 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1677 { 1678 u32 val = 0; 1679 1680 /* Make sure the pipe isn't still relying on us */ 1681 assert_pipe_disabled(dev_priv, pipe); 1682 1683 /* 1684 * Leave integrated clock source and reference clock enabled for pipe B. 1685 * The latter is needed for VGA hotplug / manual detection. 1686 */ 1687 if (pipe == PIPE_B) 1688 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV; 1689 I915_WRITE(DPLL(pipe), val); 1690 POSTING_READ(DPLL(pipe)); 1691 1692 } 1693 1694 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1695 { 1696 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1697 u32 val; 1698 1699 /* Make sure the pipe isn't still relying on us */ 1700 assert_pipe_disabled(dev_priv, pipe); 1701 1702 /* Set PLL en = 0 */ 1703 val = DPLL_SSC_REF_CLOCK_CHV; 1704 if (pipe != PIPE_A) 1705 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1706 I915_WRITE(DPLL(pipe), val); 1707 POSTING_READ(DPLL(pipe)); 1708 1709 mutex_lock(&dev_priv->dpio_lock); 1710 1711 /* Disable 10bit clock to display controller */ 1712 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1713 val &= ~DPIO_DCLKP_EN; 1714 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1715 1716 /* disable left/right clock distribution */ 1717 if (pipe != PIPE_B) { 1718 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); 1719 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); 1720 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); 1721 } else { 1722 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); 1723 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); 1724 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); 1725 } 1726 1727 mutex_unlock(&dev_priv->dpio_lock); 1728 } 1729 1730 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1731 struct intel_digital_port *dport) 1732 { 1733 u32 port_mask; 1734 int dpll_reg; 1735 1736 switch (dport->port) { 1737 case PORT_B: 1738 port_mask = DPLL_PORTB_READY_MASK; 1739 dpll_reg = DPLL(0); 1740 break; 1741 case PORT_C: 1742 port_mask = DPLL_PORTC_READY_MASK; 1743 dpll_reg = DPLL(0); 1744 break; 1745 case PORT_D: 1746 port_mask = DPLL_PORTD_READY_MASK; 1747 dpll_reg = DPIO_PHY_STATUS; 1748 break; 1749 default: 1750 BUG(); 1751 } 1752 1753 if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000)) 1754 WARN(1, "timed out waiting for port %c ready: 0x%08x\n", 1755 port_name(dport->port), I915_READ(dpll_reg)); 1756 } 1757 1758 static void intel_prepare_shared_dpll(struct intel_crtc *crtc) 1759 { 1760 struct drm_device *dev = crtc->base.dev; 1761 struct drm_i915_private *dev_priv = dev->dev_private; 1762 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1763 1764 if (WARN_ON(pll == NULL)) 1765 return; 1766 1767 WARN_ON(!pll->refcount); 1768 if (pll->active == 0) { 1769 DRM_DEBUG_DRIVER("setting up %s\n", pll->name); 1770 WARN_ON(pll->on); 1771 assert_shared_dpll_disabled(dev_priv, pll); 1772 1773 pll->mode_set(dev_priv, pll); 1774 } 1775 } 1776 1777 /** 1778 * intel_enable_shared_dpll - enable PCH PLL 1779 * @dev_priv: i915 private structure 1780 * @pipe: pipe PLL to enable 1781 * 1782 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1783 * drives the transcoder clock. 1784 */ 1785 static void intel_enable_shared_dpll(struct intel_crtc *crtc) 1786 { 1787 struct drm_device *dev = crtc->base.dev; 1788 struct drm_i915_private *dev_priv = dev->dev_private; 1789 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1790 1791 if (WARN_ON(pll == NULL)) 1792 return; 1793 1794 if (WARN_ON(pll->refcount == 0)) 1795 return; 1796 1797 DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n", 1798 pll->name, pll->active, pll->on, 1799 crtc->base.base.id); 1800 1801 if (pll->active++) { 1802 WARN_ON(!pll->on); 1803 assert_shared_dpll_enabled(dev_priv, pll); 1804 return; 1805 } 1806 WARN_ON(pll->on); 1807 1808 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 1809 1810 DRM_DEBUG_KMS("enabling %s\n", pll->name); 1811 pll->enable(dev_priv, pll); 1812 pll->on = true; 1813 } 1814 1815 static void intel_disable_shared_dpll(struct intel_crtc *crtc) 1816 { 1817 struct drm_device *dev = crtc->base.dev; 1818 struct drm_i915_private *dev_priv = dev->dev_private; 1819 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1820 1821 /* PCH only available on ILK+ */ 1822 BUG_ON(INTEL_INFO(dev)->gen < 5); 1823 if (WARN_ON(pll == NULL)) 1824 return; 1825 1826 if (WARN_ON(pll->refcount == 0)) 1827 return; 1828 1829 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", 1830 pll->name, pll->active, pll->on, 1831 crtc->base.base.id); 1832 1833 if (WARN_ON(pll->active == 0)) { 1834 assert_shared_dpll_disabled(dev_priv, pll); 1835 return; 1836 } 1837 1838 assert_shared_dpll_enabled(dev_priv, pll); 1839 WARN_ON(!pll->on); 1840 if (--pll->active) 1841 return; 1842 1843 DRM_DEBUG_KMS("disabling %s\n", pll->name); 1844 pll->disable(dev_priv, pll); 1845 pll->on = false; 1846 1847 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 1848 } 1849 1850 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1851 enum i915_pipe pipe) 1852 { 1853 struct drm_device *dev = dev_priv->dev; 1854 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1855 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1856 uint32_t reg, val, pipeconf_val; 1857 1858 /* PCH only available on ILK+ */ 1859 BUG_ON(INTEL_INFO(dev)->gen < 5); 1860 1861 /* Make sure PCH DPLL is enabled */ 1862 assert_shared_dpll_enabled(dev_priv, 1863 intel_crtc_to_shared_dpll(intel_crtc)); 1864 1865 /* FDI must be feeding us bits for PCH ports */ 1866 assert_fdi_tx_enabled(dev_priv, pipe); 1867 assert_fdi_rx_enabled(dev_priv, pipe); 1868 1869 if (HAS_PCH_CPT(dev)) { 1870 /* Workaround: Set the timing override bit before enabling the 1871 * pch transcoder. */ 1872 reg = TRANS_CHICKEN2(pipe); 1873 val = I915_READ(reg); 1874 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1875 I915_WRITE(reg, val); 1876 } 1877 1878 reg = PCH_TRANSCONF(pipe); 1879 val = I915_READ(reg); 1880 pipeconf_val = I915_READ(PIPECONF(pipe)); 1881 1882 if (HAS_PCH_IBX(dev_priv->dev)) { 1883 /* 1884 * make the BPC in transcoder be consistent with 1885 * that in pipeconf reg. 1886 */ 1887 val &= ~PIPECONF_BPC_MASK; 1888 val |= pipeconf_val & PIPECONF_BPC_MASK; 1889 } 1890 1891 val &= ~TRANS_INTERLACE_MASK; 1892 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1893 if (HAS_PCH_IBX(dev_priv->dev) && 1894 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) 1895 val |= TRANS_LEGACY_INTERLACED_ILK; 1896 else 1897 val |= TRANS_INTERLACED; 1898 else 1899 val |= TRANS_PROGRESSIVE; 1900 1901 I915_WRITE(reg, val | TRANS_ENABLE); 1902 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 1903 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1904 } 1905 1906 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1907 enum transcoder cpu_transcoder) 1908 { 1909 u32 val, pipeconf_val; 1910 1911 /* PCH only available on ILK+ */ 1912 BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5); 1913 1914 /* FDI must be feeding us bits for PCH ports */ 1915 assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder); 1916 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 1917 1918 /* Workaround: set timing override bit. */ 1919 val = I915_READ(_TRANSA_CHICKEN2); 1920 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1921 I915_WRITE(_TRANSA_CHICKEN2, val); 1922 1923 val = TRANS_ENABLE; 1924 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1925 1926 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1927 PIPECONF_INTERLACED_ILK) 1928 val |= TRANS_INTERLACED; 1929 else 1930 val |= TRANS_PROGRESSIVE; 1931 1932 I915_WRITE(LPT_TRANSCONF, val); 1933 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100)) 1934 DRM_ERROR("Failed to enable PCH transcoder\n"); 1935 } 1936 1937 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1938 enum i915_pipe pipe) 1939 { 1940 struct drm_device *dev = dev_priv->dev; 1941 uint32_t reg, val; 1942 1943 /* FDI relies on the transcoder */ 1944 assert_fdi_tx_disabled(dev_priv, pipe); 1945 assert_fdi_rx_disabled(dev_priv, pipe); 1946 1947 /* Ports must be off as well */ 1948 assert_pch_ports_disabled(dev_priv, pipe); 1949 1950 reg = PCH_TRANSCONF(pipe); 1951 val = I915_READ(reg); 1952 val &= ~TRANS_ENABLE; 1953 I915_WRITE(reg, val); 1954 /* wait for PCH transcoder off, transcoder state */ 1955 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 1956 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1957 1958 if (!HAS_PCH_IBX(dev)) { 1959 /* Workaround: Clear the timing override chicken bit again. */ 1960 reg = TRANS_CHICKEN2(pipe); 1961 val = I915_READ(reg); 1962 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1963 I915_WRITE(reg, val); 1964 } 1965 } 1966 1967 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1968 { 1969 u32 val; 1970 1971 val = I915_READ(LPT_TRANSCONF); 1972 val &= ~TRANS_ENABLE; 1973 I915_WRITE(LPT_TRANSCONF, val); 1974 /* wait for PCH transcoder off, transcoder state */ 1975 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50)) 1976 DRM_ERROR("Failed to disable PCH transcoder\n"); 1977 1978 /* Workaround: clear timing override bit. */ 1979 val = I915_READ(_TRANSA_CHICKEN2); 1980 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1981 I915_WRITE(_TRANSA_CHICKEN2, val); 1982 } 1983 1984 /** 1985 * intel_enable_pipe - enable a pipe, asserting requirements 1986 * @crtc: crtc responsible for the pipe 1987 * 1988 * Enable @crtc's pipe, making sure that various hardware specific requirements 1989 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 1990 */ 1991 static void intel_enable_pipe(struct intel_crtc *crtc) 1992 { 1993 struct drm_device *dev = crtc->base.dev; 1994 struct drm_i915_private *dev_priv = dev->dev_private; 1995 enum i915_pipe pipe = crtc->pipe; 1996 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1997 pipe); 1998 enum i915_pipe pch_transcoder; 1999 int reg; 2000 u32 val; 2001 2002 assert_planes_disabled(dev_priv, pipe); 2003 assert_cursor_disabled(dev_priv, pipe); 2004 assert_sprites_disabled(dev_priv, pipe); 2005 2006 if (HAS_PCH_LPT(dev_priv->dev)) 2007 pch_transcoder = TRANSCODER_A; 2008 else 2009 pch_transcoder = pipe; 2010 2011 /* 2012 * A pipe without a PLL won't actually be able to drive bits from 2013 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 2014 * need the check. 2015 */ 2016 if (!HAS_PCH_SPLIT(dev_priv->dev)) 2017 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI)) 2018 assert_dsi_pll_enabled(dev_priv); 2019 else 2020 assert_pll_enabled(dev_priv, pipe); 2021 else { 2022 if (crtc->config.has_pch_encoder) { 2023 /* if driving the PCH, we need FDI enabled */ 2024 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 2025 assert_fdi_tx_pll_enabled(dev_priv, 2026 (enum i915_pipe) cpu_transcoder); 2027 } 2028 /* FIXME: assert CPU port conditions for SNB+ */ 2029 } 2030 2031 reg = PIPECONF(cpu_transcoder); 2032 val = I915_READ(reg); 2033 if (val & PIPECONF_ENABLE) { 2034 WARN_ON(!(pipe == PIPE_A && 2035 dev_priv->quirks & QUIRK_PIPEA_FORCE)); 2036 return; 2037 } 2038 2039 I915_WRITE(reg, val | PIPECONF_ENABLE); 2040 POSTING_READ(reg); 2041 } 2042 2043 /** 2044 * intel_disable_pipe - disable a pipe, asserting requirements 2045 * @dev_priv: i915 private structure 2046 * @pipe: pipe to disable 2047 * 2048 * Disable @pipe, making sure that various hardware specific requirements 2049 * are met, if applicable, e.g. plane disabled, panel fitter off, etc. 2050 * 2051 * @pipe should be %PIPE_A or %PIPE_B. 2052 * 2053 * Will wait until the pipe has shut down before returning. 2054 */ 2055 static void intel_disable_pipe(struct drm_i915_private *dev_priv, 2056 enum i915_pipe pipe) 2057 { 2058 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 2059 pipe); 2060 int reg; 2061 u32 val; 2062 2063 /* 2064 * Make sure planes won't keep trying to pump pixels to us, 2065 * or we might hang the display. 2066 */ 2067 assert_planes_disabled(dev_priv, pipe); 2068 assert_cursor_disabled(dev_priv, pipe); 2069 assert_sprites_disabled(dev_priv, pipe); 2070 2071 /* Don't disable pipe A or pipe A PLLs if needed */ 2072 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 2073 return; 2074 2075 reg = PIPECONF(cpu_transcoder); 2076 val = I915_READ(reg); 2077 if ((val & PIPECONF_ENABLE) == 0) 2078 return; 2079 2080 I915_WRITE(reg, val & ~PIPECONF_ENABLE); 2081 intel_wait_for_pipe_off(dev_priv->dev, pipe); 2082 } 2083 2084 /* 2085 * Plane regs are double buffered, going from enabled->disabled needs a 2086 * trigger in order to latch. The display address reg provides this. 2087 */ 2088 void intel_flush_primary_plane(struct drm_i915_private *dev_priv, 2089 enum plane plane) 2090 { 2091 struct drm_device *dev = dev_priv->dev; 2092 u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane); 2093 2094 I915_WRITE(reg, I915_READ(reg)); 2095 POSTING_READ(reg); 2096 } 2097 2098 /** 2099 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe 2100 * @dev_priv: i915 private structure 2101 * @plane: plane to enable 2102 * @pipe: pipe being fed 2103 * 2104 * Enable @plane on @pipe, making sure that @pipe is running first. 2105 */ 2106 static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv, 2107 enum plane plane, enum i915_pipe pipe) 2108 { 2109 struct drm_device *dev = dev_priv->dev; 2110 struct intel_crtc *intel_crtc = 2111 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2112 int reg; 2113 u32 val; 2114 2115 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 2116 assert_pipe_enabled(dev_priv, pipe); 2117 2118 if (intel_crtc->primary_enabled) 2119 return; 2120 2121 intel_crtc->primary_enabled = true; 2122 2123 reg = DSPCNTR(plane); 2124 val = I915_READ(reg); 2125 WARN_ON(val & DISPLAY_PLANE_ENABLE); 2126 2127 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); 2128 intel_flush_primary_plane(dev_priv, plane); 2129 2130 /* 2131 * BDW signals flip done immediately if the plane 2132 * is disabled, even if the plane enable is already 2133 * armed to occur at the next vblank :( 2134 */ 2135 if (IS_BROADWELL(dev)) 2136 intel_wait_for_vblank(dev, intel_crtc->pipe); 2137 } 2138 2139 /** 2140 * intel_disable_primary_hw_plane - disable the primary hardware plane 2141 * @dev_priv: i915 private structure 2142 * @plane: plane to disable 2143 * @pipe: pipe consuming the data 2144 * 2145 * Disable @plane; should be an independent operation. 2146 */ 2147 static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv, 2148 enum plane plane, enum i915_pipe pipe) 2149 { 2150 struct intel_crtc *intel_crtc = 2151 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2152 int reg; 2153 u32 val; 2154 2155 if (!intel_crtc->primary_enabled) 2156 return; 2157 2158 intel_crtc->primary_enabled = false; 2159 2160 reg = DSPCNTR(plane); 2161 val = I915_READ(reg); 2162 WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0); 2163 2164 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); 2165 intel_flush_primary_plane(dev_priv, plane); 2166 } 2167 2168 static bool need_vtd_wa(struct drm_device *dev) 2169 { 2170 #ifdef CONFIG_INTEL_IOMMU 2171 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped) 2172 return true; 2173 #endif 2174 return false; 2175 } 2176 2177 static int intel_align_height(struct drm_device *dev, int height, bool tiled) 2178 { 2179 int tile_height; 2180 2181 tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1; 2182 return ALIGN(height, tile_height); 2183 } 2184 2185 int 2186 intel_pin_and_fence_fb_obj(struct drm_device *dev, 2187 struct drm_i915_gem_object *obj, 2188 struct intel_engine_cs *pipelined) 2189 { 2190 struct drm_i915_private *dev_priv = dev->dev_private; 2191 u32 alignment; 2192 int ret; 2193 2194 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2195 2196 switch (obj->tiling_mode) { 2197 case I915_TILING_NONE: 2198 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 2199 alignment = 128 * 1024; 2200 else if (INTEL_INFO(dev)->gen >= 4) 2201 alignment = 4 * 1024; 2202 else 2203 alignment = 64 * 1024; 2204 break; 2205 case I915_TILING_X: 2206 /* pin() will align the object as required by fence */ 2207 alignment = 0; 2208 break; 2209 case I915_TILING_Y: 2210 WARN(1, "Y tiled bo slipped through, driver bug!\n"); 2211 return -EINVAL; 2212 default: 2213 BUG(); 2214 } 2215 2216 /* Note that the w/a also requires 64 PTE of padding following the 2217 * bo. We currently fill all unused PTE with the shadow page and so 2218 * we should always have valid PTE following the scanout preventing 2219 * the VT-d warning. 2220 */ 2221 if (need_vtd_wa(dev) && alignment < 256 * 1024) 2222 alignment = 256 * 1024; 2223 2224 /* 2225 * Global gtt pte registers are special registers which actually forward 2226 * writes to a chunk of system memory. Which means that there is no risk 2227 * that the register values disappear as soon as we call 2228 * intel_runtime_pm_put(), so it is correct to wrap only the 2229 * pin/unpin/fence and not more. 2230 */ 2231 intel_runtime_pm_get(dev_priv); 2232 2233 dev_priv->mm.interruptible = false; 2234 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); 2235 if (ret) 2236 goto err_interruptible; 2237 2238 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2239 * fence, whereas 965+ only requires a fence if using 2240 * framebuffer compression. For simplicity, we always install 2241 * a fence as the cost is not that onerous. 2242 */ 2243 ret = i915_gem_object_get_fence(obj); 2244 if (ret) 2245 goto err_unpin; 2246 2247 i915_gem_object_pin_fence(obj); 2248 2249 dev_priv->mm.interruptible = true; 2250 intel_runtime_pm_put(dev_priv); 2251 return 0; 2252 2253 err_unpin: 2254 i915_gem_object_unpin_from_display_plane(obj); 2255 err_interruptible: 2256 dev_priv->mm.interruptible = true; 2257 intel_runtime_pm_put(dev_priv); 2258 return ret; 2259 } 2260 2261 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) 2262 { 2263 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2264 2265 i915_gem_object_unpin_fence(obj); 2266 i915_gem_object_unpin_from_display_plane(obj); 2267 } 2268 2269 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 2270 * is assumed to be a power-of-two. */ 2271 unsigned long intel_gen4_compute_page_offset(int *x, int *y, 2272 unsigned int tiling_mode, 2273 unsigned int cpp, 2274 unsigned int pitch) 2275 { 2276 if (tiling_mode != I915_TILING_NONE) { 2277 unsigned int tile_rows, tiles; 2278 2279 tile_rows = *y / 8; 2280 *y %= 8; 2281 2282 tiles = *x / (512/cpp); 2283 *x %= 512/cpp; 2284 2285 return tile_rows * pitch * 8 + tiles * 4096; 2286 } else { 2287 unsigned int offset; 2288 2289 offset = *y * pitch + *x * cpp; 2290 *y = 0; 2291 *x = (offset & 4095) / cpp; 2292 return offset & -4096; 2293 } 2294 } 2295 2296 int intel_format_to_fourcc(int format) 2297 { 2298 switch (format) { 2299 case DISPPLANE_8BPP: 2300 return DRM_FORMAT_C8; 2301 case DISPPLANE_BGRX555: 2302 return DRM_FORMAT_XRGB1555; 2303 case DISPPLANE_BGRX565: 2304 return DRM_FORMAT_RGB565; 2305 default: 2306 case DISPPLANE_BGRX888: 2307 return DRM_FORMAT_XRGB8888; 2308 case DISPPLANE_RGBX888: 2309 return DRM_FORMAT_XBGR8888; 2310 case DISPPLANE_BGRX101010: 2311 return DRM_FORMAT_XRGB2101010; 2312 case DISPPLANE_RGBX101010: 2313 return DRM_FORMAT_XBGR2101010; 2314 } 2315 } 2316 2317 static bool intel_alloc_plane_obj(struct intel_crtc *crtc, 2318 struct intel_plane_config *plane_config) 2319 { 2320 struct drm_device *dev = crtc->base.dev; 2321 struct drm_i915_gem_object *obj = NULL; 2322 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2323 u32 base = plane_config->base; 2324 2325 if (plane_config->size == 0) 2326 return false; 2327 2328 obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, 2329 plane_config->size); 2330 if (!obj) 2331 return false; 2332 2333 if (plane_config->tiled) { 2334 obj->tiling_mode = I915_TILING_X; 2335 obj->stride = crtc->base.primary->fb->pitches[0]; 2336 } 2337 2338 mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format; 2339 mode_cmd.width = crtc->base.primary->fb->width; 2340 mode_cmd.height = crtc->base.primary->fb->height; 2341 mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0]; 2342 2343 mutex_lock(&dev->struct_mutex); 2344 2345 if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb), 2346 &mode_cmd, obj)) { 2347 DRM_DEBUG_KMS("intel fb init failed\n"); 2348 goto out_unref_obj; 2349 } 2350 2351 obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe); 2352 mutex_unlock(&dev->struct_mutex); 2353 2354 DRM_DEBUG_KMS("plane fb obj %p\n", obj); 2355 return true; 2356 2357 out_unref_obj: 2358 drm_gem_object_unreference(&obj->base); 2359 mutex_unlock(&dev->struct_mutex); 2360 return false; 2361 } 2362 2363 static void intel_find_plane_obj(struct intel_crtc *intel_crtc, 2364 struct intel_plane_config *plane_config) 2365 { 2366 struct drm_device *dev = intel_crtc->base.dev; 2367 struct drm_crtc *c; 2368 struct intel_crtc *i; 2369 struct drm_i915_gem_object *obj; 2370 2371 if (!intel_crtc->base.primary->fb) 2372 return; 2373 2374 if (intel_alloc_plane_obj(intel_crtc, plane_config)) 2375 return; 2376 2377 kfree(intel_crtc->base.primary->fb); 2378 intel_crtc->base.primary->fb = NULL; 2379 2380 /* 2381 * Failed to alloc the obj, check to see if we should share 2382 * an fb with another CRTC instead 2383 */ 2384 for_each_crtc(dev, c) { 2385 i = to_intel_crtc(c); 2386 2387 if (c == &intel_crtc->base) 2388 continue; 2389 2390 if (!i->active) 2391 continue; 2392 2393 obj = intel_fb_obj(c->primary->fb); 2394 if (obj == NULL) 2395 continue; 2396 2397 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2398 drm_framebuffer_reference(c->primary->fb); 2399 intel_crtc->base.primary->fb = c->primary->fb; 2400 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 2401 break; 2402 } 2403 } 2404 } 2405 2406 static void i9xx_update_primary_plane(struct drm_crtc *crtc, 2407 struct drm_framebuffer *fb, 2408 int x, int y) 2409 { 2410 struct drm_device *dev = crtc->dev; 2411 struct drm_i915_private *dev_priv = dev->dev_private; 2412 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2413 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2414 int plane = intel_crtc->plane; 2415 unsigned long linear_offset; 2416 u32 dspcntr; 2417 u32 reg; 2418 2419 reg = DSPCNTR(plane); 2420 dspcntr = I915_READ(reg); 2421 /* Mask out pixel format bits in case we change it */ 2422 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 2423 switch (fb->pixel_format) { 2424 case DRM_FORMAT_C8: 2425 dspcntr |= DISPPLANE_8BPP; 2426 break; 2427 case DRM_FORMAT_XRGB1555: 2428 case DRM_FORMAT_ARGB1555: 2429 dspcntr |= DISPPLANE_BGRX555; 2430 break; 2431 case DRM_FORMAT_RGB565: 2432 dspcntr |= DISPPLANE_BGRX565; 2433 break; 2434 case DRM_FORMAT_XRGB8888: 2435 case DRM_FORMAT_ARGB8888: 2436 dspcntr |= DISPPLANE_BGRX888; 2437 break; 2438 case DRM_FORMAT_XBGR8888: 2439 case DRM_FORMAT_ABGR8888: 2440 dspcntr |= DISPPLANE_RGBX888; 2441 break; 2442 case DRM_FORMAT_XRGB2101010: 2443 case DRM_FORMAT_ARGB2101010: 2444 dspcntr |= DISPPLANE_BGRX101010; 2445 break; 2446 case DRM_FORMAT_XBGR2101010: 2447 case DRM_FORMAT_ABGR2101010: 2448 dspcntr |= DISPPLANE_RGBX101010; 2449 break; 2450 default: 2451 BUG(); 2452 } 2453 2454 if (INTEL_INFO(dev)->gen >= 4) { 2455 if (obj->tiling_mode != I915_TILING_NONE) 2456 dspcntr |= DISPPLANE_TILED; 2457 else 2458 dspcntr &= ~DISPPLANE_TILED; 2459 } 2460 2461 if (IS_G4X(dev)) 2462 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2463 2464 I915_WRITE(reg, dspcntr); 2465 2466 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 2467 2468 if (INTEL_INFO(dev)->gen >= 4) { 2469 intel_crtc->dspaddr_offset = 2470 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2471 fb->bits_per_pixel / 8, 2472 fb->pitches[0]); 2473 linear_offset -= intel_crtc->dspaddr_offset; 2474 } else { 2475 intel_crtc->dspaddr_offset = linear_offset; 2476 } 2477 2478 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 2479 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, 2480 fb->pitches[0]); 2481 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2482 if (INTEL_INFO(dev)->gen >= 4) { 2483 I915_WRITE(DSPSURF(plane), 2484 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2485 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2486 I915_WRITE(DSPLINOFF(plane), linear_offset); 2487 } else 2488 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); 2489 POSTING_READ(reg); 2490 } 2491 2492 static void ironlake_update_primary_plane(struct drm_crtc *crtc, 2493 struct drm_framebuffer *fb, 2494 int x, int y) 2495 { 2496 struct drm_device *dev = crtc->dev; 2497 struct drm_i915_private *dev_priv = dev->dev_private; 2498 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2499 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2500 int plane = intel_crtc->plane; 2501 unsigned long linear_offset; 2502 u32 dspcntr; 2503 u32 reg; 2504 2505 reg = DSPCNTR(plane); 2506 dspcntr = I915_READ(reg); 2507 /* Mask out pixel format bits in case we change it */ 2508 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 2509 switch (fb->pixel_format) { 2510 case DRM_FORMAT_C8: 2511 dspcntr |= DISPPLANE_8BPP; 2512 break; 2513 case DRM_FORMAT_RGB565: 2514 dspcntr |= DISPPLANE_BGRX565; 2515 break; 2516 case DRM_FORMAT_XRGB8888: 2517 case DRM_FORMAT_ARGB8888: 2518 dspcntr |= DISPPLANE_BGRX888; 2519 break; 2520 case DRM_FORMAT_XBGR8888: 2521 case DRM_FORMAT_ABGR8888: 2522 dspcntr |= DISPPLANE_RGBX888; 2523 break; 2524 case DRM_FORMAT_XRGB2101010: 2525 case DRM_FORMAT_ARGB2101010: 2526 dspcntr |= DISPPLANE_BGRX101010; 2527 break; 2528 case DRM_FORMAT_XBGR2101010: 2529 case DRM_FORMAT_ABGR2101010: 2530 dspcntr |= DISPPLANE_RGBX101010; 2531 break; 2532 default: 2533 BUG(); 2534 } 2535 2536 if (obj->tiling_mode != I915_TILING_NONE) 2537 dspcntr |= DISPPLANE_TILED; 2538 else 2539 dspcntr &= ~DISPPLANE_TILED; 2540 2541 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2542 dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE; 2543 else 2544 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2545 2546 I915_WRITE(reg, dspcntr); 2547 2548 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 2549 intel_crtc->dspaddr_offset = 2550 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2551 fb->bits_per_pixel / 8, 2552 fb->pitches[0]); 2553 linear_offset -= intel_crtc->dspaddr_offset; 2554 2555 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 2556 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, 2557 fb->pitches[0]); 2558 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2559 I915_WRITE(DSPSURF(plane), 2560 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2561 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2562 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2563 } else { 2564 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2565 I915_WRITE(DSPLINOFF(plane), linear_offset); 2566 } 2567 POSTING_READ(reg); 2568 } 2569 2570 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 2571 static int 2572 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 2573 int x, int y, enum mode_set_atomic state) 2574 { 2575 struct drm_device *dev = crtc->dev; 2576 struct drm_i915_private *dev_priv = dev->dev_private; 2577 2578 if (dev_priv->display.disable_fbc) 2579 dev_priv->display.disable_fbc(dev); 2580 intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe); 2581 2582 dev_priv->display.update_primary_plane(crtc, fb, x, y); 2583 2584 return 0; 2585 } 2586 2587 void intel_display_handle_reset(struct drm_device *dev) 2588 { 2589 struct drm_i915_private *dev_priv = dev->dev_private; 2590 struct drm_crtc *crtc; 2591 2592 /* 2593 * Flips in the rings have been nuked by the reset, 2594 * so complete all pending flips so that user space 2595 * will get its events and not get stuck. 2596 * 2597 * Also update the base address of all primary 2598 * planes to the the last fb to make sure we're 2599 * showing the correct fb after a reset. 2600 * 2601 * Need to make two loops over the crtcs so that we 2602 * don't try to grab a crtc mutex before the 2603 * pending_flip_queue really got woken up. 2604 */ 2605 2606 for_each_crtc(dev, crtc) { 2607 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2608 enum plane plane = intel_crtc->plane; 2609 2610 intel_prepare_page_flip(dev, plane); 2611 intel_finish_page_flip_plane(dev, plane); 2612 } 2613 2614 for_each_crtc(dev, crtc) { 2615 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2616 2617 drm_modeset_lock(&crtc->mutex, NULL); 2618 /* 2619 * FIXME: Once we have proper support for primary planes (and 2620 * disabling them without disabling the entire crtc) allow again 2621 * a NULL crtc->primary->fb. 2622 */ 2623 if (intel_crtc->active && crtc->primary->fb) 2624 dev_priv->display.update_primary_plane(crtc, 2625 crtc->primary->fb, 2626 crtc->x, 2627 crtc->y); 2628 drm_modeset_unlock(&crtc->mutex); 2629 } 2630 } 2631 2632 static int 2633 intel_finish_fb(struct drm_framebuffer *old_fb) 2634 { 2635 struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); 2636 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2637 bool was_interruptible = dev_priv->mm.interruptible; 2638 int ret; 2639 2640 /* Big Hammer, we also need to ensure that any pending 2641 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 2642 * current scanout is retired before unpinning the old 2643 * framebuffer. 2644 * 2645 * This should only fail upon a hung GPU, in which case we 2646 * can safely continue. 2647 */ 2648 dev_priv->mm.interruptible = false; 2649 ret = i915_gem_object_finish_gpu(obj); 2650 dev_priv->mm.interruptible = was_interruptible; 2651 2652 return ret; 2653 } 2654 2655 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 2656 { 2657 struct drm_device *dev = crtc->dev; 2658 struct drm_i915_private *dev_priv = dev->dev_private; 2659 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2660 bool pending; 2661 2662 if (i915_reset_in_progress(&dev_priv->gpu_error) || 2663 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 2664 return false; 2665 2666 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 2667 pending = to_intel_crtc(crtc)->unpin_work != NULL; 2668 lockmgr(&dev->event_lock, LK_RELEASE); 2669 2670 return pending; 2671 } 2672 2673 static int 2674 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 2675 struct drm_framebuffer *fb) 2676 { 2677 struct drm_device *dev = crtc->dev; 2678 struct drm_i915_private *dev_priv = dev->dev_private; 2679 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2680 enum i915_pipe pipe = intel_crtc->pipe; 2681 struct drm_framebuffer *old_fb = crtc->primary->fb; 2682 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2683 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb); 2684 int ret; 2685 2686 if (intel_crtc_has_pending_flip(crtc)) { 2687 DRM_ERROR("pipe is still busy with an old pageflip\n"); 2688 return -EBUSY; 2689 } 2690 2691 /* no fb bound */ 2692 if (!fb) { 2693 DRM_ERROR("No FB bound\n"); 2694 return 0; 2695 } 2696 2697 if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) { 2698 DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n", 2699 plane_name(intel_crtc->plane), 2700 INTEL_INFO(dev)->num_pipes); 2701 return -EINVAL; 2702 } 2703 2704 mutex_lock(&dev->struct_mutex); 2705 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 2706 if (ret == 0) 2707 i915_gem_track_fb(old_obj, obj, 2708 INTEL_FRONTBUFFER_PRIMARY(pipe)); 2709 mutex_unlock(&dev->struct_mutex); 2710 if (ret != 0) { 2711 DRM_ERROR("pin & fence failed\n"); 2712 return ret; 2713 } 2714 2715 /* 2716 * Update pipe size and adjust fitter if needed: the reason for this is 2717 * that in compute_mode_changes we check the native mode (not the pfit 2718 * mode) to see if we can flip rather than do a full mode set. In the 2719 * fastboot case, we'll flip, but if we don't update the pipesrc and 2720 * pfit state, we'll end up with a big fb scanned out into the wrong 2721 * sized surface. 2722 * 2723 * To fix this properly, we need to hoist the checks up into 2724 * compute_mode_changes (or above), check the actual pfit state and 2725 * whether the platform allows pfit disable with pipe active, and only 2726 * then update the pipesrc and pfit state, even on the flip path. 2727 */ 2728 if (i915.fastboot) { 2729 const struct drm_display_mode *adjusted_mode = 2730 &intel_crtc->config.adjusted_mode; 2731 2732 I915_WRITE(PIPESRC(intel_crtc->pipe), 2733 ((adjusted_mode->crtc_hdisplay - 1) << 16) | 2734 (adjusted_mode->crtc_vdisplay - 1)); 2735 if (!intel_crtc->config.pch_pfit.enabled && 2736 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 2737 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2738 I915_WRITE(PF_CTL(intel_crtc->pipe), 0); 2739 I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0); 2740 I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0); 2741 } 2742 intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay; 2743 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay; 2744 } 2745 2746 dev_priv->display.update_primary_plane(crtc, fb, x, y); 2747 2748 if (intel_crtc->active) 2749 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 2750 2751 crtc->primary->fb = fb; 2752 crtc->x = x; 2753 crtc->y = y; 2754 2755 if (old_fb) { 2756 if (intel_crtc->active && old_fb != fb) 2757 intel_wait_for_vblank(dev, intel_crtc->pipe); 2758 mutex_lock(&dev->struct_mutex); 2759 intel_unpin_fb_obj(old_obj); 2760 mutex_unlock(&dev->struct_mutex); 2761 } 2762 2763 mutex_lock(&dev->struct_mutex); 2764 intel_update_fbc(dev); 2765 mutex_unlock(&dev->struct_mutex); 2766 2767 return 0; 2768 } 2769 2770 static void intel_fdi_normal_train(struct drm_crtc *crtc) 2771 { 2772 struct drm_device *dev = crtc->dev; 2773 struct drm_i915_private *dev_priv = dev->dev_private; 2774 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2775 int pipe = intel_crtc->pipe; 2776 u32 reg, temp; 2777 2778 /* enable normal train */ 2779 reg = FDI_TX_CTL(pipe); 2780 temp = I915_READ(reg); 2781 if (IS_IVYBRIDGE(dev)) { 2782 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 2783 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 2784 } else { 2785 temp &= ~FDI_LINK_TRAIN_NONE; 2786 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 2787 } 2788 I915_WRITE(reg, temp); 2789 2790 reg = FDI_RX_CTL(pipe); 2791 temp = I915_READ(reg); 2792 if (HAS_PCH_CPT(dev)) { 2793 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2794 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 2795 } else { 2796 temp &= ~FDI_LINK_TRAIN_NONE; 2797 temp |= FDI_LINK_TRAIN_NONE; 2798 } 2799 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 2800 2801 /* wait one idle pattern time */ 2802 POSTING_READ(reg); 2803 udelay(1000); 2804 2805 /* IVB wants error correction enabled */ 2806 if (IS_IVYBRIDGE(dev)) 2807 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 2808 FDI_FE_ERRC_ENABLE); 2809 } 2810 2811 static bool pipe_has_enabled_pch(struct intel_crtc *crtc) 2812 { 2813 return crtc->base.enabled && crtc->active && 2814 crtc->config.has_pch_encoder; 2815 } 2816 2817 static void ivb_modeset_global_resources(struct drm_device *dev) 2818 { 2819 struct drm_i915_private *dev_priv = dev->dev_private; 2820 struct intel_crtc *pipe_B_crtc = 2821 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); 2822 struct intel_crtc *pipe_C_crtc = 2823 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); 2824 uint32_t temp; 2825 2826 /* 2827 * When everything is off disable fdi C so that we could enable fdi B 2828 * with all lanes. Note that we don't care about enabled pipes without 2829 * an enabled pch encoder. 2830 */ 2831 if (!pipe_has_enabled_pch(pipe_B_crtc) && 2832 !pipe_has_enabled_pch(pipe_C_crtc)) { 2833 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 2834 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 2835 2836 temp = I915_READ(SOUTH_CHICKEN1); 2837 temp &= ~FDI_BC_BIFURCATION_SELECT; 2838 DRM_DEBUG_KMS("disabling fdi C rx\n"); 2839 I915_WRITE(SOUTH_CHICKEN1, temp); 2840 } 2841 } 2842 2843 /* The FDI link training functions for ILK/Ibexpeak. */ 2844 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 2845 { 2846 struct drm_device *dev = crtc->dev; 2847 struct drm_i915_private *dev_priv = dev->dev_private; 2848 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2849 int pipe = intel_crtc->pipe; 2850 u32 reg, temp, tries; 2851 2852 /* FDI needs bits from pipe first */ 2853 assert_pipe_enabled(dev_priv, pipe); 2854 2855 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2856 for train result */ 2857 reg = FDI_RX_IMR(pipe); 2858 temp = I915_READ(reg); 2859 temp &= ~FDI_RX_SYMBOL_LOCK; 2860 temp &= ~FDI_RX_BIT_LOCK; 2861 I915_WRITE(reg, temp); 2862 I915_READ(reg); 2863 udelay(150); 2864 2865 /* enable CPU FDI TX and PCH FDI RX */ 2866 reg = FDI_TX_CTL(pipe); 2867 temp = I915_READ(reg); 2868 temp &= ~FDI_DP_PORT_WIDTH_MASK; 2869 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); 2870 temp &= ~FDI_LINK_TRAIN_NONE; 2871 temp |= FDI_LINK_TRAIN_PATTERN_1; 2872 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2873 2874 reg = FDI_RX_CTL(pipe); 2875 temp = I915_READ(reg); 2876 temp &= ~FDI_LINK_TRAIN_NONE; 2877 temp |= FDI_LINK_TRAIN_PATTERN_1; 2878 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2879 2880 POSTING_READ(reg); 2881 udelay(150); 2882 2883 /* Ironlake workaround, enable clock pointer after FDI enable*/ 2884 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2885 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 2886 FDI_RX_PHASE_SYNC_POINTER_EN); 2887 2888 reg = FDI_RX_IIR(pipe); 2889 for (tries = 0; tries < 5; tries++) { 2890 temp = I915_READ(reg); 2891 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2892 2893 if ((temp & FDI_RX_BIT_LOCK)) { 2894 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2895 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2896 break; 2897 } 2898 } 2899 if (tries == 5) 2900 DRM_ERROR("FDI train 1 fail!\n"); 2901 2902 /* Train 2 */ 2903 reg = FDI_TX_CTL(pipe); 2904 temp = I915_READ(reg); 2905 temp &= ~FDI_LINK_TRAIN_NONE; 2906 temp |= FDI_LINK_TRAIN_PATTERN_2; 2907 I915_WRITE(reg, temp); 2908 2909 reg = FDI_RX_CTL(pipe); 2910 temp = I915_READ(reg); 2911 temp &= ~FDI_LINK_TRAIN_NONE; 2912 temp |= FDI_LINK_TRAIN_PATTERN_2; 2913 I915_WRITE(reg, temp); 2914 2915 POSTING_READ(reg); 2916 udelay(150); 2917 2918 reg = FDI_RX_IIR(pipe); 2919 for (tries = 0; tries < 5; tries++) { 2920 temp = I915_READ(reg); 2921 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2922 2923 if (temp & FDI_RX_SYMBOL_LOCK) { 2924 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2925 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2926 break; 2927 } 2928 } 2929 if (tries == 5) 2930 DRM_ERROR("FDI train 2 fail!\n"); 2931 2932 DRM_DEBUG_KMS("FDI train done\n"); 2933 2934 } 2935 2936 static const int snb_b_fdi_train_param[] = { 2937 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 2938 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 2939 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 2940 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 2941 }; 2942 2943 /* The FDI link training functions for SNB/Cougarpoint. */ 2944 static void gen6_fdi_link_train(struct drm_crtc *crtc) 2945 { 2946 struct drm_device *dev = crtc->dev; 2947 struct drm_i915_private *dev_priv = dev->dev_private; 2948 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2949 int pipe = intel_crtc->pipe; 2950 u32 reg, temp, i, retry; 2951 2952 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2953 for train result */ 2954 reg = FDI_RX_IMR(pipe); 2955 temp = I915_READ(reg); 2956 temp &= ~FDI_RX_SYMBOL_LOCK; 2957 temp &= ~FDI_RX_BIT_LOCK; 2958 I915_WRITE(reg, temp); 2959 2960 POSTING_READ(reg); 2961 udelay(150); 2962 2963 /* enable CPU FDI TX and PCH FDI RX */ 2964 reg = FDI_TX_CTL(pipe); 2965 temp = I915_READ(reg); 2966 temp &= ~FDI_DP_PORT_WIDTH_MASK; 2967 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); 2968 temp &= ~FDI_LINK_TRAIN_NONE; 2969 temp |= FDI_LINK_TRAIN_PATTERN_1; 2970 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2971 /* SNB-B */ 2972 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2973 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2974 2975 I915_WRITE(FDI_RX_MISC(pipe), 2976 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 2977 2978 reg = FDI_RX_CTL(pipe); 2979 temp = I915_READ(reg); 2980 if (HAS_PCH_CPT(dev)) { 2981 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2982 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2983 } else { 2984 temp &= ~FDI_LINK_TRAIN_NONE; 2985 temp |= FDI_LINK_TRAIN_PATTERN_1; 2986 } 2987 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2988 2989 POSTING_READ(reg); 2990 udelay(150); 2991 2992 for (i = 0; i < 4; i++) { 2993 reg = FDI_TX_CTL(pipe); 2994 temp = I915_READ(reg); 2995 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2996 temp |= snb_b_fdi_train_param[i]; 2997 I915_WRITE(reg, temp); 2998 2999 POSTING_READ(reg); 3000 udelay(500); 3001 3002 for (retry = 0; retry < 5; retry++) { 3003 reg = FDI_RX_IIR(pipe); 3004 temp = I915_READ(reg); 3005 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3006 if (temp & FDI_RX_BIT_LOCK) { 3007 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3008 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3009 break; 3010 } 3011 udelay(50); 3012 } 3013 if (retry < 5) 3014 break; 3015 } 3016 if (i == 4) 3017 DRM_ERROR("FDI train 1 fail!\n"); 3018 3019 /* Train 2 */ 3020 reg = FDI_TX_CTL(pipe); 3021 temp = I915_READ(reg); 3022 temp &= ~FDI_LINK_TRAIN_NONE; 3023 temp |= FDI_LINK_TRAIN_PATTERN_2; 3024 if (IS_GEN6(dev)) { 3025 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3026 /* SNB-B */ 3027 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3028 } 3029 I915_WRITE(reg, temp); 3030 3031 reg = FDI_RX_CTL(pipe); 3032 temp = I915_READ(reg); 3033 if (HAS_PCH_CPT(dev)) { 3034 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3035 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3036 } else { 3037 temp &= ~FDI_LINK_TRAIN_NONE; 3038 temp |= FDI_LINK_TRAIN_PATTERN_2; 3039 } 3040 I915_WRITE(reg, temp); 3041 3042 POSTING_READ(reg); 3043 udelay(150); 3044 3045 for (i = 0; i < 4; i++) { 3046 reg = FDI_TX_CTL(pipe); 3047 temp = I915_READ(reg); 3048 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3049 temp |= snb_b_fdi_train_param[i]; 3050 I915_WRITE(reg, temp); 3051 3052 POSTING_READ(reg); 3053 udelay(500); 3054 3055 for (retry = 0; retry < 5; retry++) { 3056 reg = FDI_RX_IIR(pipe); 3057 temp = I915_READ(reg); 3058 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3059 if (temp & FDI_RX_SYMBOL_LOCK) { 3060 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3061 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3062 break; 3063 } 3064 udelay(50); 3065 } 3066 if (retry < 5) 3067 break; 3068 } 3069 if (i == 4) 3070 DRM_ERROR("FDI train 2 fail!\n"); 3071 3072 DRM_DEBUG_KMS("FDI train done.\n"); 3073 } 3074 3075 /* Manual link training for Ivy Bridge A0 parts */ 3076 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 3077 { 3078 struct drm_device *dev = crtc->dev; 3079 struct drm_i915_private *dev_priv = dev->dev_private; 3080 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3081 int pipe = intel_crtc->pipe; 3082 u32 reg, temp, i, j; 3083 3084 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3085 for train result */ 3086 reg = FDI_RX_IMR(pipe); 3087 temp = I915_READ(reg); 3088 temp &= ~FDI_RX_SYMBOL_LOCK; 3089 temp &= ~FDI_RX_BIT_LOCK; 3090 I915_WRITE(reg, temp); 3091 3092 POSTING_READ(reg); 3093 udelay(150); 3094 3095 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 3096 I915_READ(FDI_RX_IIR(pipe))); 3097 3098 /* Try each vswing and preemphasis setting twice before moving on */ 3099 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 3100 /* disable first in case we need to retry */ 3101 reg = FDI_TX_CTL(pipe); 3102 temp = I915_READ(reg); 3103 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 3104 temp &= ~FDI_TX_ENABLE; 3105 I915_WRITE(reg, temp); 3106 3107 reg = FDI_RX_CTL(pipe); 3108 temp = I915_READ(reg); 3109 temp &= ~FDI_LINK_TRAIN_AUTO; 3110 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3111 temp &= ~FDI_RX_ENABLE; 3112 I915_WRITE(reg, temp); 3113 3114 /* enable CPU FDI TX and PCH FDI RX */ 3115 reg = FDI_TX_CTL(pipe); 3116 temp = I915_READ(reg); 3117 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3118 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); 3119 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 3120 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3121 temp |= snb_b_fdi_train_param[j/2]; 3122 temp |= FDI_COMPOSITE_SYNC; 3123 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3124 3125 I915_WRITE(FDI_RX_MISC(pipe), 3126 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3127 3128 reg = FDI_RX_CTL(pipe); 3129 temp = I915_READ(reg); 3130 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3131 temp |= FDI_COMPOSITE_SYNC; 3132 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3133 3134 POSTING_READ(reg); 3135 udelay(1); /* should be 0.5us */ 3136 3137 for (i = 0; i < 4; i++) { 3138 reg = FDI_RX_IIR(pipe); 3139 temp = I915_READ(reg); 3140 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3141 3142 if (temp & FDI_RX_BIT_LOCK || 3143 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 3144 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3145 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 3146 i); 3147 break; 3148 } 3149 udelay(1); /* should be 0.5us */ 3150 } 3151 if (i == 4) { 3152 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 3153 continue; 3154 } 3155 3156 /* Train 2 */ 3157 reg = FDI_TX_CTL(pipe); 3158 temp = I915_READ(reg); 3159 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3160 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 3161 I915_WRITE(reg, temp); 3162 3163 reg = FDI_RX_CTL(pipe); 3164 temp = I915_READ(reg); 3165 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3166 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3167 I915_WRITE(reg, temp); 3168 3169 POSTING_READ(reg); 3170 udelay(2); /* should be 1.5us */ 3171 3172 for (i = 0; i < 4; i++) { 3173 reg = FDI_RX_IIR(pipe); 3174 temp = I915_READ(reg); 3175 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3176 3177 if (temp & FDI_RX_SYMBOL_LOCK || 3178 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 3179 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3180 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 3181 i); 3182 goto train_done; 3183 } 3184 udelay(2); /* should be 1.5us */ 3185 } 3186 if (i == 4) 3187 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 3188 } 3189 3190 train_done: 3191 DRM_DEBUG_KMS("FDI train done.\n"); 3192 } 3193 3194 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 3195 { 3196 struct drm_device *dev = intel_crtc->base.dev; 3197 struct drm_i915_private *dev_priv = dev->dev_private; 3198 int pipe = intel_crtc->pipe; 3199 u32 reg, temp; 3200 3201 3202 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 3203 reg = FDI_RX_CTL(pipe); 3204 temp = I915_READ(reg); 3205 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 3206 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); 3207 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3208 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 3209 3210 POSTING_READ(reg); 3211 udelay(200); 3212 3213 /* Switch from Rawclk to PCDclk */ 3214 temp = I915_READ(reg); 3215 I915_WRITE(reg, temp | FDI_PCDCLK); 3216 3217 POSTING_READ(reg); 3218 udelay(200); 3219 3220 /* Enable CPU FDI TX PLL, always on for Ironlake */ 3221 reg = FDI_TX_CTL(pipe); 3222 temp = I915_READ(reg); 3223 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 3224 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 3225 3226 POSTING_READ(reg); 3227 udelay(100); 3228 } 3229 } 3230 3231 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 3232 { 3233 struct drm_device *dev = intel_crtc->base.dev; 3234 struct drm_i915_private *dev_priv = dev->dev_private; 3235 int pipe = intel_crtc->pipe; 3236 u32 reg, temp; 3237 3238 /* Switch from PCDclk to Rawclk */ 3239 reg = FDI_RX_CTL(pipe); 3240 temp = I915_READ(reg); 3241 I915_WRITE(reg, temp & ~FDI_PCDCLK); 3242 3243 /* Disable CPU FDI TX PLL */ 3244 reg = FDI_TX_CTL(pipe); 3245 temp = I915_READ(reg); 3246 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 3247 3248 POSTING_READ(reg); 3249 udelay(100); 3250 3251 reg = FDI_RX_CTL(pipe); 3252 temp = I915_READ(reg); 3253 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 3254 3255 /* Wait for the clocks to turn off. */ 3256 POSTING_READ(reg); 3257 udelay(100); 3258 } 3259 3260 static void ironlake_fdi_disable(struct drm_crtc *crtc) 3261 { 3262 struct drm_device *dev = crtc->dev; 3263 struct drm_i915_private *dev_priv = dev->dev_private; 3264 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3265 int pipe = intel_crtc->pipe; 3266 u32 reg, temp; 3267 3268 /* disable CPU FDI tx and PCH FDI rx */ 3269 reg = FDI_TX_CTL(pipe); 3270 temp = I915_READ(reg); 3271 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 3272 POSTING_READ(reg); 3273 3274 reg = FDI_RX_CTL(pipe); 3275 temp = I915_READ(reg); 3276 temp &= ~(0x7 << 16); 3277 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3278 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 3279 3280 POSTING_READ(reg); 3281 udelay(100); 3282 3283 /* Ironlake workaround, disable clock pointer after downing FDI */ 3284 if (HAS_PCH_IBX(dev)) 3285 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3286 3287 /* still set train pattern 1 */ 3288 reg = FDI_TX_CTL(pipe); 3289 temp = I915_READ(reg); 3290 temp &= ~FDI_LINK_TRAIN_NONE; 3291 temp |= FDI_LINK_TRAIN_PATTERN_1; 3292 I915_WRITE(reg, temp); 3293 3294 reg = FDI_RX_CTL(pipe); 3295 temp = I915_READ(reg); 3296 if (HAS_PCH_CPT(dev)) { 3297 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3298 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3299 } else { 3300 temp &= ~FDI_LINK_TRAIN_NONE; 3301 temp |= FDI_LINK_TRAIN_PATTERN_1; 3302 } 3303 /* BPC in FDI rx is consistent with that in PIPECONF */ 3304 temp &= ~(0x07 << 16); 3305 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3306 I915_WRITE(reg, temp); 3307 3308 POSTING_READ(reg); 3309 udelay(100); 3310 } 3311 3312 bool intel_has_pending_fb_unpin(struct drm_device *dev) 3313 { 3314 struct intel_crtc *crtc; 3315 3316 /* Note that we don't need to be called with mode_config.lock here 3317 * as our list of CRTC objects is static for the lifetime of the 3318 * device and so cannot disappear as we iterate. Similarly, we can 3319 * happily treat the predicates as racy, atomic checks as userspace 3320 * cannot claim and pin a new fb without at least acquring the 3321 * struct_mutex and so serialising with us. 3322 */ 3323 for_each_intel_crtc(dev, crtc) { 3324 if (atomic_read(&crtc->unpin_work_count) == 0) 3325 continue; 3326 3327 if (crtc->unpin_work) 3328 intel_wait_for_vblank(dev, crtc->pipe); 3329 3330 return true; 3331 } 3332 3333 return false; 3334 } 3335 3336 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3337 { 3338 struct drm_device *dev = crtc->dev; 3339 struct drm_i915_private *dev_priv = dev->dev_private; 3340 3341 if (crtc->primary->fb == NULL) 3342 return; 3343 3344 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3345 3346 WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, 3347 !intel_crtc_has_pending_flip(crtc), 3348 60*HZ) == 0); 3349 3350 mutex_lock(&dev->struct_mutex); 3351 intel_finish_fb(crtc->primary->fb); 3352 mutex_unlock(&dev->struct_mutex); 3353 } 3354 3355 /* Program iCLKIP clock to the desired frequency */ 3356 static void lpt_program_iclkip(struct drm_crtc *crtc) 3357 { 3358 struct drm_device *dev = crtc->dev; 3359 struct drm_i915_private *dev_priv = dev->dev_private; 3360 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; 3361 u32 divsel, phaseinc, auxdiv, phasedir = 0; 3362 u32 temp; 3363 3364 mutex_lock(&dev_priv->dpio_lock); 3365 3366 /* It is necessary to ungate the pixclk gate prior to programming 3367 * the divisors, and gate it back when it is done. 3368 */ 3369 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 3370 3371 /* Disable SSCCTL */ 3372 intel_sbi_write(dev_priv, SBI_SSCCTL6, 3373 intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | 3374 SBI_SSCCTL_DISABLE, 3375 SBI_ICLK); 3376 3377 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 3378 if (clock == 20000) { 3379 auxdiv = 1; 3380 divsel = 0x41; 3381 phaseinc = 0x20; 3382 } else { 3383 /* The iCLK virtual clock root frequency is in MHz, 3384 * but the adjusted_mode->crtc_clock in in KHz. To get the 3385 * divisors, it is necessary to divide one by another, so we 3386 * convert the virtual clock precision to KHz here for higher 3387 * precision. 3388 */ 3389 u32 iclk_virtual_root_freq = 172800 * 1000; 3390 u32 iclk_pi_range = 64; 3391 u32 desired_divisor, msb_divisor_value, pi_value; 3392 3393 desired_divisor = (iclk_virtual_root_freq / clock); 3394 msb_divisor_value = desired_divisor / iclk_pi_range; 3395 pi_value = desired_divisor % iclk_pi_range; 3396 3397 auxdiv = 0; 3398 divsel = msb_divisor_value - 2; 3399 phaseinc = pi_value; 3400 } 3401 3402 /* This should not happen with any sane values */ 3403 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 3404 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 3405 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 3406 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 3407 3408 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 3409 clock, 3410 auxdiv, 3411 divsel, 3412 phasedir, 3413 phaseinc); 3414 3415 /* Program SSCDIVINTPHASE6 */ 3416 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 3417 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 3418 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 3419 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 3420 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 3421 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 3422 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 3423 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 3424 3425 /* Program SSCAUXDIV */ 3426 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 3427 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 3428 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 3429 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 3430 3431 /* Enable modulator and associated divider */ 3432 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 3433 temp &= ~SBI_SSCCTL_DISABLE; 3434 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 3435 3436 /* Wait for initialization time */ 3437 udelay(24); 3438 3439 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 3440 3441 mutex_unlock(&dev_priv->dpio_lock); 3442 } 3443 3444 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 3445 enum i915_pipe pch_transcoder) 3446 { 3447 struct drm_device *dev = crtc->base.dev; 3448 struct drm_i915_private *dev_priv = dev->dev_private; 3449 enum transcoder cpu_transcoder = crtc->config.cpu_transcoder; 3450 3451 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 3452 I915_READ(HTOTAL(cpu_transcoder))); 3453 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 3454 I915_READ(HBLANK(cpu_transcoder))); 3455 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 3456 I915_READ(HSYNC(cpu_transcoder))); 3457 3458 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 3459 I915_READ(VTOTAL(cpu_transcoder))); 3460 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 3461 I915_READ(VBLANK(cpu_transcoder))); 3462 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 3463 I915_READ(VSYNC(cpu_transcoder))); 3464 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 3465 I915_READ(VSYNCSHIFT(cpu_transcoder))); 3466 } 3467 3468 static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) 3469 { 3470 struct drm_i915_private *dev_priv = dev->dev_private; 3471 uint32_t temp; 3472 3473 temp = I915_READ(SOUTH_CHICKEN1); 3474 if (temp & FDI_BC_BIFURCATION_SELECT) 3475 return; 3476 3477 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 3478 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 3479 3480 temp |= FDI_BC_BIFURCATION_SELECT; 3481 DRM_DEBUG_KMS("enabling fdi C rx\n"); 3482 I915_WRITE(SOUTH_CHICKEN1, temp); 3483 POSTING_READ(SOUTH_CHICKEN1); 3484 } 3485 3486 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 3487 { 3488 struct drm_device *dev = intel_crtc->base.dev; 3489 struct drm_i915_private *dev_priv = dev->dev_private; 3490 3491 switch (intel_crtc->pipe) { 3492 case PIPE_A: 3493 break; 3494 case PIPE_B: 3495 if (intel_crtc->config.fdi_lanes > 2) 3496 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); 3497 else 3498 cpt_enable_fdi_bc_bifurcation(dev); 3499 3500 break; 3501 case PIPE_C: 3502 cpt_enable_fdi_bc_bifurcation(dev); 3503 3504 break; 3505 default: 3506 BUG(); 3507 } 3508 } 3509 3510 /* 3511 * Enable PCH resources required for PCH ports: 3512 * - PCH PLLs 3513 * - FDI training & RX/TX 3514 * - update transcoder timings 3515 * - DP transcoding bits 3516 * - transcoder 3517 */ 3518 static void ironlake_pch_enable(struct drm_crtc *crtc) 3519 { 3520 struct drm_device *dev = crtc->dev; 3521 struct drm_i915_private *dev_priv = dev->dev_private; 3522 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3523 int pipe = intel_crtc->pipe; 3524 u32 reg, temp; 3525 3526 assert_pch_transcoder_disabled(dev_priv, pipe); 3527 3528 if (IS_IVYBRIDGE(dev)) 3529 ivybridge_update_fdi_bc_bifurcation(intel_crtc); 3530 3531 /* Write the TU size bits before fdi link training, so that error 3532 * detection works. */ 3533 I915_WRITE(FDI_RX_TUSIZE1(pipe), 3534 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 3535 3536 /* For PCH output, training FDI link */ 3537 dev_priv->display.fdi_link_train(crtc); 3538 3539 /* We need to program the right clock selection before writing the pixel 3540 * mutliplier into the DPLL. */ 3541 if (HAS_PCH_CPT(dev)) { 3542 u32 sel; 3543 3544 temp = I915_READ(PCH_DPLL_SEL); 3545 temp |= TRANS_DPLL_ENABLE(pipe); 3546 sel = TRANS_DPLLB_SEL(pipe); 3547 if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B) 3548 temp |= sel; 3549 else 3550 temp &= ~sel; 3551 I915_WRITE(PCH_DPLL_SEL, temp); 3552 } 3553 3554 /* XXX: pch pll's can be enabled any time before we enable the PCH 3555 * transcoder, and we actually should do this to not upset any PCH 3556 * transcoder that already use the clock when we share it. 3557 * 3558 * Note that enable_shared_dpll tries to do the right thing, but 3559 * get_shared_dpll unconditionally resets the pll - we need that to have 3560 * the right LVDS enable sequence. */ 3561 intel_enable_shared_dpll(intel_crtc); 3562 3563 /* set transcoder timing, panel must allow it */ 3564 assert_panel_unlocked(dev_priv, pipe); 3565 ironlake_pch_transcoder_set_timings(intel_crtc, pipe); 3566 3567 intel_fdi_normal_train(crtc); 3568 3569 /* For PCH DP, enable TRANS_DP_CTL */ 3570 if (HAS_PCH_CPT(dev) && 3571 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 3572 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 3573 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 3574 reg = TRANS_DP_CTL(pipe); 3575 temp = I915_READ(reg); 3576 temp &= ~(TRANS_DP_PORT_SEL_MASK | 3577 TRANS_DP_SYNC_MASK | 3578 TRANS_DP_BPC_MASK); 3579 temp |= (TRANS_DP_OUTPUT_ENABLE | 3580 TRANS_DP_ENH_FRAMING); 3581 temp |= bpc << 9; /* same format but at 11:9 */ 3582 3583 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 3584 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 3585 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 3586 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 3587 3588 switch (intel_trans_dp_port_sel(crtc)) { 3589 case PCH_DP_B: 3590 temp |= TRANS_DP_PORT_SEL_B; 3591 break; 3592 case PCH_DP_C: 3593 temp |= TRANS_DP_PORT_SEL_C; 3594 break; 3595 case PCH_DP_D: 3596 temp |= TRANS_DP_PORT_SEL_D; 3597 break; 3598 default: 3599 BUG(); 3600 } 3601 3602 I915_WRITE(reg, temp); 3603 } 3604 3605 ironlake_enable_pch_transcoder(dev_priv, pipe); 3606 } 3607 3608 static void lpt_pch_enable(struct drm_crtc *crtc) 3609 { 3610 struct drm_device *dev = crtc->dev; 3611 struct drm_i915_private *dev_priv = dev->dev_private; 3612 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3613 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 3614 3615 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); 3616 3617 lpt_program_iclkip(crtc); 3618 3619 /* Set transcoder timing. */ 3620 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A); 3621 3622 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 3623 } 3624 3625 void intel_put_shared_dpll(struct intel_crtc *crtc) 3626 { 3627 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 3628 3629 if (pll == NULL) 3630 return; 3631 3632 if (pll->refcount == 0) { 3633 WARN(1, "bad %s refcount\n", pll->name); 3634 return; 3635 } 3636 3637 if (--pll->refcount == 0) { 3638 WARN_ON(pll->on); 3639 WARN_ON(pll->active); 3640 } 3641 3642 crtc->config.shared_dpll = DPLL_ID_PRIVATE; 3643 } 3644 3645 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) 3646 { 3647 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 3648 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 3649 enum intel_dpll_id i; 3650 3651 if (pll) { 3652 DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n", 3653 crtc->base.base.id, pll->name); 3654 intel_put_shared_dpll(crtc); 3655 } 3656 3657 if (HAS_PCH_IBX(dev_priv->dev)) { 3658 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 3659 i = (enum intel_dpll_id) crtc->pipe; 3660 pll = &dev_priv->shared_dplls[i]; 3661 3662 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 3663 crtc->base.base.id, pll->name); 3664 3665 WARN_ON(pll->refcount); 3666 3667 goto found; 3668 } 3669 3670 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3671 pll = &dev_priv->shared_dplls[i]; 3672 3673 /* Only want to check enabled timings first */ 3674 if (pll->refcount == 0) 3675 continue; 3676 3677 if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state, 3678 sizeof(pll->hw_state)) == 0) { 3679 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n", 3680 crtc->base.base.id, 3681 pll->name, pll->refcount, pll->active); 3682 3683 goto found; 3684 } 3685 } 3686 3687 /* Ok no matching timings, maybe there's a free one? */ 3688 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3689 pll = &dev_priv->shared_dplls[i]; 3690 if (pll->refcount == 0) { 3691 DRM_DEBUG_KMS("CRTC:%d allocated %s\n", 3692 crtc->base.base.id, pll->name); 3693 goto found; 3694 } 3695 } 3696 3697 return NULL; 3698 3699 found: 3700 if (pll->refcount == 0) 3701 pll->hw_state = crtc->config.dpll_hw_state; 3702 3703 crtc->config.shared_dpll = i; 3704 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, 3705 pipe_name(crtc->pipe)); 3706 3707 pll->refcount++; 3708 3709 return pll; 3710 } 3711 3712 static void cpt_verify_modeset(struct drm_device *dev, int pipe) 3713 { 3714 struct drm_i915_private *dev_priv = dev->dev_private; 3715 int dslreg = PIPEDSL(pipe); 3716 u32 temp; 3717 3718 temp = I915_READ(dslreg); 3719 udelay(500); 3720 if (wait_for(I915_READ(dslreg) != temp, 5)) { 3721 if (wait_for(I915_READ(dslreg) != temp, 5)) 3722 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 3723 } 3724 } 3725 3726 static void ironlake_pfit_enable(struct intel_crtc *crtc) 3727 { 3728 struct drm_device *dev = crtc->base.dev; 3729 struct drm_i915_private *dev_priv = dev->dev_private; 3730 int pipe = crtc->pipe; 3731 3732 if (crtc->config.pch_pfit.enabled) { 3733 /* Force use of hard-coded filter coefficients 3734 * as some pre-programmed values are broken, 3735 * e.g. x201. 3736 */ 3737 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 3738 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 3739 PF_PIPE_SEL_IVB(pipe)); 3740 else 3741 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 3742 I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos); 3743 I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size); 3744 } 3745 } 3746 3747 static void intel_enable_planes(struct drm_crtc *crtc) 3748 { 3749 struct drm_device *dev = crtc->dev; 3750 enum i915_pipe pipe = to_intel_crtc(crtc)->pipe; 3751 struct drm_plane *plane; 3752 struct intel_plane *intel_plane; 3753 3754 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { 3755 intel_plane = to_intel_plane(plane); 3756 if (intel_plane->pipe == pipe) 3757 intel_plane_restore(&intel_plane->base); 3758 } 3759 } 3760 3761 static void intel_disable_planes(struct drm_crtc *crtc) 3762 { 3763 struct drm_device *dev = crtc->dev; 3764 enum i915_pipe pipe = to_intel_crtc(crtc)->pipe; 3765 struct drm_plane *plane; 3766 struct intel_plane *intel_plane; 3767 3768 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { 3769 intel_plane = to_intel_plane(plane); 3770 if (intel_plane->pipe == pipe) 3771 intel_plane_disable(&intel_plane->base); 3772 } 3773 } 3774 3775 void hsw_enable_ips(struct intel_crtc *crtc) 3776 { 3777 struct drm_device *dev = crtc->base.dev; 3778 struct drm_i915_private *dev_priv = dev->dev_private; 3779 3780 if (!crtc->config.ips_enabled) 3781 return; 3782 3783 /* We can only enable IPS after we enable a plane and wait for a vblank */ 3784 intel_wait_for_vblank(dev, crtc->pipe); 3785 3786 assert_plane_enabled(dev_priv, crtc->plane); 3787 if (IS_BROADWELL(dev)) { 3788 mutex_lock(&dev_priv->rps.hw_lock); 3789 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 3790 mutex_unlock(&dev_priv->rps.hw_lock); 3791 /* Quoting Art Runyan: "its not safe to expect any particular 3792 * value in IPS_CTL bit 31 after enabling IPS through the 3793 * mailbox." Moreover, the mailbox may return a bogus state, 3794 * so we need to just enable it and continue on. 3795 */ 3796 } else { 3797 I915_WRITE(IPS_CTL, IPS_ENABLE); 3798 /* The bit only becomes 1 in the next vblank, so this wait here 3799 * is essentially intel_wait_for_vblank. If we don't have this 3800 * and don't wait for vblanks until the end of crtc_enable, then 3801 * the HW state readout code will complain that the expected 3802 * IPS_CTL value is not the one we read. */ 3803 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) 3804 DRM_ERROR("Timed out waiting for IPS enable\n"); 3805 } 3806 } 3807 3808 void hsw_disable_ips(struct intel_crtc *crtc) 3809 { 3810 struct drm_device *dev = crtc->base.dev; 3811 struct drm_i915_private *dev_priv = dev->dev_private; 3812 3813 if (!crtc->config.ips_enabled) 3814 return; 3815 3816 assert_plane_enabled(dev_priv, crtc->plane); 3817 if (IS_BROADWELL(dev)) { 3818 mutex_lock(&dev_priv->rps.hw_lock); 3819 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 3820 mutex_unlock(&dev_priv->rps.hw_lock); 3821 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 3822 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42)) 3823 DRM_ERROR("Timed out waiting for IPS disable\n"); 3824 } else { 3825 I915_WRITE(IPS_CTL, 0); 3826 POSTING_READ(IPS_CTL); 3827 } 3828 3829 /* We need to wait for a vblank before we can disable the plane. */ 3830 intel_wait_for_vblank(dev, crtc->pipe); 3831 } 3832 3833 /** Loads the palette/gamma unit for the CRTC with the prepared values */ 3834 static void intel_crtc_load_lut(struct drm_crtc *crtc) 3835 { 3836 struct drm_device *dev = crtc->dev; 3837 struct drm_i915_private *dev_priv = dev->dev_private; 3838 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3839 enum i915_pipe pipe = intel_crtc->pipe; 3840 int palreg = PALETTE(pipe); 3841 int i; 3842 bool reenable_ips = false; 3843 3844 /* The clocks have to be on to load the palette. */ 3845 if (!crtc->enabled || !intel_crtc->active) 3846 return; 3847 3848 if (!HAS_PCH_SPLIT(dev_priv->dev)) { 3849 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) 3850 assert_dsi_pll_enabled(dev_priv); 3851 else 3852 assert_pll_enabled(dev_priv, pipe); 3853 } 3854 3855 /* use legacy palette for Ironlake */ 3856 if (!HAS_GMCH_DISPLAY(dev)) 3857 palreg = LGC_PALETTE(pipe); 3858 3859 /* Workaround : Do not read or write the pipe palette/gamma data while 3860 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 3861 */ 3862 if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled && 3863 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == 3864 GAMMA_MODE_MODE_SPLIT)) { 3865 hsw_disable_ips(intel_crtc); 3866 reenable_ips = true; 3867 } 3868 3869 for (i = 0; i < 256; i++) { 3870 I915_WRITE(palreg + 4 * i, 3871 (intel_crtc->lut_r[i] << 16) | 3872 (intel_crtc->lut_g[i] << 8) | 3873 intel_crtc->lut_b[i]); 3874 } 3875 3876 if (reenable_ips) 3877 hsw_enable_ips(intel_crtc); 3878 } 3879 3880 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 3881 { 3882 if (!enable && intel_crtc->overlay) { 3883 struct drm_device *dev = intel_crtc->base.dev; 3884 struct drm_i915_private *dev_priv = dev->dev_private; 3885 3886 mutex_lock(&dev->struct_mutex); 3887 dev_priv->mm.interruptible = false; 3888 (void) intel_overlay_switch_off(intel_crtc->overlay); 3889 dev_priv->mm.interruptible = true; 3890 mutex_unlock(&dev->struct_mutex); 3891 } 3892 3893 /* Let userspace switch the overlay on again. In most cases userspace 3894 * has to recompute where to put it anyway. 3895 */ 3896 } 3897 3898 static void intel_crtc_enable_planes(struct drm_crtc *crtc) 3899 { 3900 struct drm_device *dev = crtc->dev; 3901 struct drm_i915_private *dev_priv = dev->dev_private; 3902 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3903 int pipe = intel_crtc->pipe; 3904 int plane = intel_crtc->plane; 3905 3906 drm_vblank_on(dev, pipe); 3907 3908 intel_enable_primary_hw_plane(dev_priv, plane, pipe); 3909 intel_enable_planes(crtc); 3910 intel_crtc_update_cursor(crtc, true); 3911 intel_crtc_dpms_overlay(intel_crtc, true); 3912 3913 hsw_enable_ips(intel_crtc); 3914 3915 mutex_lock(&dev->struct_mutex); 3916 intel_update_fbc(dev); 3917 mutex_unlock(&dev->struct_mutex); 3918 3919 /* 3920 * FIXME: Once we grow proper nuclear flip support out of this we need 3921 * to compute the mask of flip planes precisely. For the time being 3922 * consider this a flip from a NULL plane. 3923 */ 3924 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 3925 } 3926 3927 static void intel_crtc_disable_planes(struct drm_crtc *crtc) 3928 { 3929 struct drm_device *dev = crtc->dev; 3930 struct drm_i915_private *dev_priv = dev->dev_private; 3931 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3932 int pipe = intel_crtc->pipe; 3933 int plane = intel_crtc->plane; 3934 3935 intel_crtc_wait_for_pending_flips(crtc); 3936 3937 if (dev_priv->fbc.plane == plane) 3938 intel_disable_fbc(dev); 3939 3940 hsw_disable_ips(intel_crtc); 3941 3942 intel_crtc_dpms_overlay(intel_crtc, false); 3943 intel_crtc_update_cursor(crtc, false); 3944 intel_disable_planes(crtc); 3945 intel_disable_primary_hw_plane(dev_priv, plane, pipe); 3946 3947 /* 3948 * FIXME: Once we grow proper nuclear flip support out of this we need 3949 * to compute the mask of flip planes precisely. For the time being 3950 * consider this a flip to a NULL plane. 3951 */ 3952 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 3953 3954 drm_vblank_off(dev, pipe); 3955 } 3956 3957 static void ironlake_crtc_enable(struct drm_crtc *crtc) 3958 { 3959 struct drm_device *dev = crtc->dev; 3960 struct drm_i915_private *dev_priv = dev->dev_private; 3961 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3962 struct intel_encoder *encoder; 3963 int pipe = intel_crtc->pipe; 3964 enum plane plane = intel_crtc->plane; 3965 3966 WARN_ON(!crtc->enabled); 3967 3968 if (intel_crtc->active) 3969 return; 3970 3971 if (intel_crtc->config.has_pch_encoder) 3972 intel_prepare_shared_dpll(intel_crtc); 3973 3974 if (intel_crtc->config.has_dp_encoder) 3975 intel_dp_set_m_n(intel_crtc); 3976 3977 intel_set_pipe_timings(intel_crtc); 3978 3979 if (intel_crtc->config.has_pch_encoder) { 3980 intel_cpu_transcoder_set_m_n(intel_crtc, 3981 &intel_crtc->config.fdi_m_n); 3982 } 3983 3984 ironlake_set_pipeconf(crtc); 3985 3986 /* Set up the display plane register */ 3987 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); 3988 POSTING_READ(DSPCNTR(plane)); 3989 3990 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb, 3991 crtc->x, crtc->y); 3992 3993 intel_crtc->active = true; 3994 3995 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 3996 intel_set_pch_fifo_underrun_reporting(dev, pipe, true); 3997 3998 for_each_encoder_on_crtc(dev, crtc, encoder) 3999 if (encoder->pre_enable) 4000 encoder->pre_enable(encoder); 4001 4002 if (intel_crtc->config.has_pch_encoder) { 4003 /* Note: FDI PLL enabling _must_ be done before we enable the 4004 * cpu pipes, hence this is separate from all the other fdi/pch 4005 * enabling. */ 4006 ironlake_fdi_pll_enable(intel_crtc); 4007 } else { 4008 assert_fdi_tx_disabled(dev_priv, pipe); 4009 assert_fdi_rx_disabled(dev_priv, pipe); 4010 } 4011 4012 ironlake_pfit_enable(intel_crtc); 4013 4014 /* 4015 * On ILK+ LUT must be loaded before the pipe is running but with 4016 * clocks enabled 4017 */ 4018 intel_crtc_load_lut(crtc); 4019 4020 intel_update_watermarks(crtc); 4021 intel_enable_pipe(intel_crtc); 4022 4023 if (intel_crtc->config.has_pch_encoder) 4024 ironlake_pch_enable(crtc); 4025 4026 for_each_encoder_on_crtc(dev, crtc, encoder) 4027 encoder->enable(encoder); 4028 4029 if (HAS_PCH_CPT(dev)) 4030 cpt_verify_modeset(dev, intel_crtc->pipe); 4031 4032 intel_crtc_enable_planes(crtc); 4033 } 4034 4035 /* IPS only exists on ULT machines and is tied to pipe A. */ 4036 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 4037 { 4038 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 4039 } 4040 4041 /* 4042 * This implements the workaround described in the "notes" section of the mode 4043 * set sequence documentation. When going from no pipes or single pipe to 4044 * multiple pipes, and planes are enabled after the pipe, we need to wait at 4045 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 4046 */ 4047 static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc) 4048 { 4049 struct drm_device *dev = crtc->base.dev; 4050 struct intel_crtc *crtc_it, *other_active_crtc = NULL; 4051 4052 /* We want to get the other_active_crtc only if there's only 1 other 4053 * active crtc. */ 4054 for_each_intel_crtc(dev, crtc_it) { 4055 if (!crtc_it->active || crtc_it == crtc) 4056 continue; 4057 4058 if (other_active_crtc) 4059 return; 4060 4061 other_active_crtc = crtc_it; 4062 } 4063 if (!other_active_crtc) 4064 return; 4065 4066 intel_wait_for_vblank(dev, other_active_crtc->pipe); 4067 intel_wait_for_vblank(dev, other_active_crtc->pipe); 4068 } 4069 4070 static void haswell_crtc_enable(struct drm_crtc *crtc) 4071 { 4072 struct drm_device *dev = crtc->dev; 4073 struct drm_i915_private *dev_priv = dev->dev_private; 4074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4075 struct intel_encoder *encoder; 4076 int pipe = intel_crtc->pipe; 4077 enum plane plane = intel_crtc->plane; 4078 4079 WARN_ON(!crtc->enabled); 4080 4081 if (intel_crtc->active) 4082 return; 4083 4084 if (intel_crtc_to_shared_dpll(intel_crtc)) 4085 intel_enable_shared_dpll(intel_crtc); 4086 4087 if (intel_crtc->config.has_dp_encoder) 4088 intel_dp_set_m_n(intel_crtc); 4089 4090 intel_set_pipe_timings(intel_crtc); 4091 4092 if (intel_crtc->config.has_pch_encoder) { 4093 intel_cpu_transcoder_set_m_n(intel_crtc, 4094 &intel_crtc->config.fdi_m_n); 4095 } 4096 4097 haswell_set_pipeconf(crtc); 4098 4099 intel_set_pipe_csc(crtc); 4100 4101 /* Set up the display plane register */ 4102 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE); 4103 POSTING_READ(DSPCNTR(plane)); 4104 4105 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb, 4106 crtc->x, crtc->y); 4107 4108 intel_crtc->active = true; 4109 4110 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4111 for_each_encoder_on_crtc(dev, crtc, encoder) 4112 if (encoder->pre_enable) 4113 encoder->pre_enable(encoder); 4114 4115 if (intel_crtc->config.has_pch_encoder) { 4116 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); 4117 dev_priv->display.fdi_link_train(crtc); 4118 } 4119 4120 intel_ddi_enable_pipe_clock(intel_crtc); 4121 4122 ironlake_pfit_enable(intel_crtc); 4123 4124 /* 4125 * On ILK+ LUT must be loaded before the pipe is running but with 4126 * clocks enabled 4127 */ 4128 intel_crtc_load_lut(crtc); 4129 4130 intel_ddi_set_pipe_settings(crtc); 4131 intel_ddi_enable_transcoder_func(crtc); 4132 4133 intel_update_watermarks(crtc); 4134 intel_enable_pipe(intel_crtc); 4135 4136 if (intel_crtc->config.has_pch_encoder) 4137 lpt_pch_enable(crtc); 4138 4139 for_each_encoder_on_crtc(dev, crtc, encoder) { 4140 encoder->enable(encoder); 4141 intel_opregion_notify_encoder(encoder, true); 4142 } 4143 4144 /* If we change the relative order between pipe/planes enabling, we need 4145 * to change the workaround. */ 4146 haswell_mode_set_planes_workaround(intel_crtc); 4147 intel_crtc_enable_planes(crtc); 4148 } 4149 4150 static void ironlake_pfit_disable(struct intel_crtc *crtc) 4151 { 4152 struct drm_device *dev = crtc->base.dev; 4153 struct drm_i915_private *dev_priv = dev->dev_private; 4154 int pipe = crtc->pipe; 4155 4156 /* To avoid upsetting the power well on haswell only disable the pfit if 4157 * it's in use. The hw state code will make sure we get this right. */ 4158 if (crtc->config.pch_pfit.enabled) { 4159 I915_WRITE(PF_CTL(pipe), 0); 4160 I915_WRITE(PF_WIN_POS(pipe), 0); 4161 I915_WRITE(PF_WIN_SZ(pipe), 0); 4162 } 4163 } 4164 4165 static void ironlake_crtc_disable(struct drm_crtc *crtc) 4166 { 4167 struct drm_device *dev = crtc->dev; 4168 struct drm_i915_private *dev_priv = dev->dev_private; 4169 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4170 struct intel_encoder *encoder; 4171 int pipe = intel_crtc->pipe; 4172 u32 reg, temp; 4173 4174 if (!intel_crtc->active) 4175 return; 4176 4177 intel_crtc_disable_planes(crtc); 4178 4179 for_each_encoder_on_crtc(dev, crtc, encoder) 4180 encoder->disable(encoder); 4181 4182 if (intel_crtc->config.has_pch_encoder) 4183 intel_set_pch_fifo_underrun_reporting(dev, pipe, false); 4184 4185 intel_disable_pipe(dev_priv, pipe); 4186 4187 ironlake_pfit_disable(intel_crtc); 4188 4189 for_each_encoder_on_crtc(dev, crtc, encoder) 4190 if (encoder->post_disable) 4191 encoder->post_disable(encoder); 4192 4193 if (intel_crtc->config.has_pch_encoder) { 4194 ironlake_fdi_disable(crtc); 4195 4196 ironlake_disable_pch_transcoder(dev_priv, pipe); 4197 intel_set_pch_fifo_underrun_reporting(dev, pipe, true); 4198 4199 if (HAS_PCH_CPT(dev)) { 4200 /* disable TRANS_DP_CTL */ 4201 reg = TRANS_DP_CTL(pipe); 4202 temp = I915_READ(reg); 4203 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 4204 TRANS_DP_PORT_SEL_MASK); 4205 temp |= TRANS_DP_PORT_SEL_NONE; 4206 I915_WRITE(reg, temp); 4207 4208 /* disable DPLL_SEL */ 4209 temp = I915_READ(PCH_DPLL_SEL); 4210 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 4211 I915_WRITE(PCH_DPLL_SEL, temp); 4212 } 4213 4214 /* disable PCH DPLL */ 4215 intel_disable_shared_dpll(intel_crtc); 4216 4217 ironlake_fdi_pll_disable(intel_crtc); 4218 } 4219 4220 intel_crtc->active = false; 4221 intel_update_watermarks(crtc); 4222 4223 mutex_lock(&dev->struct_mutex); 4224 intel_update_fbc(dev); 4225 mutex_unlock(&dev->struct_mutex); 4226 } 4227 4228 static void haswell_crtc_disable(struct drm_crtc *crtc) 4229 { 4230 struct drm_device *dev = crtc->dev; 4231 struct drm_i915_private *dev_priv = dev->dev_private; 4232 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4233 struct intel_encoder *encoder; 4234 int pipe = intel_crtc->pipe; 4235 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 4236 4237 if (!intel_crtc->active) 4238 return; 4239 4240 intel_crtc_disable_planes(crtc); 4241 4242 for_each_encoder_on_crtc(dev, crtc, encoder) { 4243 intel_opregion_notify_encoder(encoder, false); 4244 encoder->disable(encoder); 4245 } 4246 4247 if (intel_crtc->config.has_pch_encoder) 4248 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); 4249 intel_disable_pipe(dev_priv, pipe); 4250 4251 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 4252 4253 ironlake_pfit_disable(intel_crtc); 4254 4255 intel_ddi_disable_pipe_clock(intel_crtc); 4256 4257 if (intel_crtc->config.has_pch_encoder) { 4258 lpt_disable_pch_transcoder(dev_priv); 4259 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); 4260 intel_ddi_fdi_disable(crtc); 4261 } 4262 4263 for_each_encoder_on_crtc(dev, crtc, encoder) 4264 if (encoder->post_disable) 4265 encoder->post_disable(encoder); 4266 4267 intel_crtc->active = false; 4268 intel_update_watermarks(crtc); 4269 4270 mutex_lock(&dev->struct_mutex); 4271 intel_update_fbc(dev); 4272 mutex_unlock(&dev->struct_mutex); 4273 4274 if (intel_crtc_to_shared_dpll(intel_crtc)) 4275 intel_disable_shared_dpll(intel_crtc); 4276 } 4277 4278 static void ironlake_crtc_off(struct drm_crtc *crtc) 4279 { 4280 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4281 intel_put_shared_dpll(intel_crtc); 4282 } 4283 4284 4285 static void i9xx_pfit_enable(struct intel_crtc *crtc) 4286 { 4287 struct drm_device *dev = crtc->base.dev; 4288 struct drm_i915_private *dev_priv = dev->dev_private; 4289 struct intel_crtc_config *pipe_config = &crtc->config; 4290 4291 if (!crtc->config.gmch_pfit.control) 4292 return; 4293 4294 /* 4295 * The panel fitter should only be adjusted whilst the pipe is disabled, 4296 * according to register description and PRM. 4297 */ 4298 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 4299 assert_pipe_disabled(dev_priv, crtc->pipe); 4300 4301 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 4302 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 4303 4304 /* Border color in case we don't scale up to the full screen. Black by 4305 * default, change to something else for debugging. */ 4306 I915_WRITE(BCLRPAT(crtc->pipe), 0); 4307 } 4308 4309 static enum intel_display_power_domain port_to_power_domain(enum port port) 4310 { 4311 switch (port) { 4312 case PORT_A: 4313 return POWER_DOMAIN_PORT_DDI_A_4_LANES; 4314 case PORT_B: 4315 return POWER_DOMAIN_PORT_DDI_B_4_LANES; 4316 case PORT_C: 4317 return POWER_DOMAIN_PORT_DDI_C_4_LANES; 4318 case PORT_D: 4319 return POWER_DOMAIN_PORT_DDI_D_4_LANES; 4320 default: 4321 WARN_ON_ONCE(1); 4322 return POWER_DOMAIN_PORT_OTHER; 4323 } 4324 } 4325 4326 enum intel_display_power_domain 4327 intel_display_port_power_domain(struct intel_encoder *intel_encoder) 4328 { 4329 struct drm_device *dev = intel_encoder->base.dev; 4330 struct intel_digital_port *intel_dig_port; 4331 4332 switch (intel_encoder->type) { 4333 case INTEL_OUTPUT_UNKNOWN: 4334 /* Only DDI platforms should ever use this output type */ 4335 WARN_ON_ONCE(!HAS_DDI(dev)); 4336 case INTEL_OUTPUT_DISPLAYPORT: 4337 case INTEL_OUTPUT_HDMI: 4338 case INTEL_OUTPUT_EDP: 4339 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 4340 return port_to_power_domain(intel_dig_port->port); 4341 case INTEL_OUTPUT_ANALOG: 4342 return POWER_DOMAIN_PORT_CRT; 4343 case INTEL_OUTPUT_DSI: 4344 return POWER_DOMAIN_PORT_DSI; 4345 default: 4346 return POWER_DOMAIN_PORT_OTHER; 4347 } 4348 } 4349 4350 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) 4351 { 4352 struct drm_device *dev = crtc->dev; 4353 struct intel_encoder *intel_encoder; 4354 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4355 enum i915_pipe pipe = intel_crtc->pipe; 4356 unsigned long mask; 4357 enum transcoder transcoder; 4358 4359 transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); 4360 4361 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 4362 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 4363 if (intel_crtc->config.pch_pfit.enabled || 4364 intel_crtc->config.pch_pfit.force_thru) 4365 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 4366 4367 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 4368 mask |= BIT(intel_display_port_power_domain(intel_encoder)); 4369 4370 return mask; 4371 } 4372 4373 void intel_display_set_init_power(struct drm_i915_private *dev_priv, 4374 bool enable) 4375 { 4376 if (dev_priv->power_domains.init_power_on == enable) 4377 return; 4378 4379 if (enable) 4380 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 4381 else 4382 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 4383 4384 dev_priv->power_domains.init_power_on = enable; 4385 } 4386 4387 static void modeset_update_crtc_power_domains(struct drm_device *dev) 4388 { 4389 struct drm_i915_private *dev_priv = dev->dev_private; 4390 unsigned long pipe_domains[I915_MAX_PIPES] = { 0, }; 4391 struct intel_crtc *crtc; 4392 4393 /* 4394 * First get all needed power domains, then put all unneeded, to avoid 4395 * any unnecessary toggling of the power wells. 4396 */ 4397 for_each_intel_crtc(dev, crtc) { 4398 enum intel_display_power_domain domain; 4399 4400 if (!crtc->base.enabled) 4401 continue; 4402 4403 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base); 4404 4405 for_each_power_domain(domain, pipe_domains[crtc->pipe]) 4406 intel_display_power_get(dev_priv, domain); 4407 } 4408 4409 for_each_intel_crtc(dev, crtc) { 4410 enum intel_display_power_domain domain; 4411 4412 for_each_power_domain(domain, crtc->enabled_power_domains) 4413 intel_display_power_put(dev_priv, domain); 4414 4415 crtc->enabled_power_domains = pipe_domains[crtc->pipe]; 4416 } 4417 4418 intel_display_set_init_power(dev_priv, false); 4419 } 4420 4421 /* returns HPLL frequency in kHz */ 4422 static int valleyview_get_vco(struct drm_i915_private *dev_priv) 4423 { 4424 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 4425 4426 /* Obtain SKU information */ 4427 mutex_lock(&dev_priv->dpio_lock); 4428 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 4429 CCK_FUSE_HPLL_FREQ_MASK; 4430 mutex_unlock(&dev_priv->dpio_lock); 4431 4432 return vco_freq[hpll_freq] * 1000; 4433 } 4434 4435 static void vlv_update_cdclk(struct drm_device *dev) 4436 { 4437 struct drm_i915_private *dev_priv = dev->dev_private; 4438 4439 dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 4440 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz", 4441 dev_priv->vlv_cdclk_freq); 4442 4443 /* 4444 * Program the gmbus_freq based on the cdclk frequency. 4445 * BSpec erroneously claims we should aim for 4MHz, but 4446 * in fact 1MHz is the correct frequency. 4447 */ 4448 I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq); 4449 } 4450 4451 /* Adjust CDclk dividers to allow high res or save power if possible */ 4452 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 4453 { 4454 struct drm_i915_private *dev_priv = dev->dev_private; 4455 u32 val, cmd; 4456 4457 WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq); 4458 4459 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ 4460 cmd = 2; 4461 else if (cdclk == 266667) 4462 cmd = 1; 4463 else 4464 cmd = 0; 4465 4466 mutex_lock(&dev_priv->rps.hw_lock); 4467 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 4468 val &= ~DSPFREQGUAR_MASK; 4469 val |= (cmd << DSPFREQGUAR_SHIFT); 4470 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 4471 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 4472 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 4473 50)) { 4474 DRM_ERROR("timed out waiting for CDclk change\n"); 4475 } 4476 mutex_unlock(&dev_priv->rps.hw_lock); 4477 4478 if (cdclk == 400000) { 4479 u32 divider, vco; 4480 4481 vco = valleyview_get_vco(dev_priv); 4482 divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1; 4483 4484 mutex_lock(&dev_priv->dpio_lock); 4485 /* adjust cdclk divider */ 4486 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 4487 val &= ~DISPLAY_FREQUENCY_VALUES; 4488 val |= divider; 4489 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 4490 4491 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & 4492 DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 4493 50)) 4494 DRM_ERROR("timed out waiting for CDclk change\n"); 4495 mutex_unlock(&dev_priv->dpio_lock); 4496 } 4497 4498 mutex_lock(&dev_priv->dpio_lock); 4499 /* adjust self-refresh exit latency value */ 4500 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); 4501 val &= ~0x7f; 4502 4503 /* 4504 * For high bandwidth configs, we set a higher latency in the bunit 4505 * so that the core display fetch happens in time to avoid underruns. 4506 */ 4507 if (cdclk == 400000) 4508 val |= 4500 / 250; /* 4.5 usec */ 4509 else 4510 val |= 3000 / 250; /* 3.0 usec */ 4511 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); 4512 mutex_unlock(&dev_priv->dpio_lock); 4513 4514 vlv_update_cdclk(dev); 4515 } 4516 4517 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 4518 int max_pixclk) 4519 { 4520 int vco = valleyview_get_vco(dev_priv); 4521 int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000; 4522 4523 /* 4524 * Really only a few cases to deal with, as only 4 CDclks are supported: 4525 * 200MHz 4526 * 267MHz 4527 * 320/333MHz (depends on HPLL freq) 4528 * 400MHz 4529 * So we check to see whether we're above 90% of the lower bin and 4530 * adjust if needed. 4531 * 4532 * We seem to get an unstable or solid color picture at 200MHz. 4533 * Not sure what's wrong. For now use 200MHz only when all pipes 4534 * are off. 4535 */ 4536 if (max_pixclk > freq_320*9/10) 4537 return 400000; 4538 else if (max_pixclk > 266667*9/10) 4539 return freq_320; 4540 else if (max_pixclk > 0) 4541 return 266667; 4542 else 4543 return 200000; 4544 } 4545 4546 /* compute the max pixel clock for new configuration */ 4547 static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv) 4548 { 4549 struct drm_device *dev = dev_priv->dev; 4550 struct intel_crtc *intel_crtc; 4551 int max_pixclk = 0; 4552 4553 for_each_intel_crtc(dev, intel_crtc) { 4554 if (intel_crtc->new_enabled) 4555 max_pixclk = max(max_pixclk, 4556 intel_crtc->new_config->adjusted_mode.crtc_clock); 4557 } 4558 4559 return max_pixclk; 4560 } 4561 4562 static void valleyview_modeset_global_pipes(struct drm_device *dev, 4563 unsigned *prepare_pipes) 4564 { 4565 struct drm_i915_private *dev_priv = dev->dev_private; 4566 struct intel_crtc *intel_crtc; 4567 int max_pixclk = intel_mode_max_pixclk(dev_priv); 4568 4569 if (valleyview_calc_cdclk(dev_priv, max_pixclk) == 4570 dev_priv->vlv_cdclk_freq) 4571 return; 4572 4573 /* disable/enable all currently active pipes while we change cdclk */ 4574 for_each_intel_crtc(dev, intel_crtc) 4575 if (intel_crtc->base.enabled) 4576 *prepare_pipes |= (1 << intel_crtc->pipe); 4577 } 4578 4579 static void valleyview_modeset_global_resources(struct drm_device *dev) 4580 { 4581 struct drm_i915_private *dev_priv = dev->dev_private; 4582 int max_pixclk = intel_mode_max_pixclk(dev_priv); 4583 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 4584 4585 if (req_cdclk != dev_priv->vlv_cdclk_freq) 4586 valleyview_set_cdclk(dev, req_cdclk); 4587 modeset_update_crtc_power_domains(dev); 4588 } 4589 4590 static void valleyview_crtc_enable(struct drm_crtc *crtc) 4591 { 4592 struct drm_device *dev = crtc->dev; 4593 struct drm_i915_private *dev_priv = dev->dev_private; 4594 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4595 struct intel_encoder *encoder; 4596 int pipe = intel_crtc->pipe; 4597 int plane = intel_crtc->plane; 4598 bool is_dsi; 4599 u32 dspcntr; 4600 4601 WARN_ON(!crtc->enabled); 4602 4603 if (intel_crtc->active) 4604 return; 4605 4606 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI); 4607 4608 if (!is_dsi && !IS_CHERRYVIEW(dev)) 4609 vlv_prepare_pll(intel_crtc); 4610 4611 /* Set up the display plane register */ 4612 dspcntr = DISPPLANE_GAMMA_ENABLE; 4613 4614 if (intel_crtc->config.has_dp_encoder) 4615 intel_dp_set_m_n(intel_crtc); 4616 4617 intel_set_pipe_timings(intel_crtc); 4618 4619 /* pipesrc and dspsize control the size that is scaled from, 4620 * which should always be the user's requested size. 4621 */ 4622 I915_WRITE(DSPSIZE(plane), 4623 ((intel_crtc->config.pipe_src_h - 1) << 16) | 4624 (intel_crtc->config.pipe_src_w - 1)); 4625 I915_WRITE(DSPPOS(plane), 0); 4626 4627 i9xx_set_pipeconf(intel_crtc); 4628 4629 I915_WRITE(DSPCNTR(plane), dspcntr); 4630 POSTING_READ(DSPCNTR(plane)); 4631 4632 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb, 4633 crtc->x, crtc->y); 4634 4635 intel_crtc->active = true; 4636 4637 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4638 4639 for_each_encoder_on_crtc(dev, crtc, encoder) 4640 if (encoder->pre_pll_enable) 4641 encoder->pre_pll_enable(encoder); 4642 4643 if (!is_dsi) { 4644 if (IS_CHERRYVIEW(dev)) 4645 chv_enable_pll(intel_crtc); 4646 else 4647 vlv_enable_pll(intel_crtc); 4648 } 4649 4650 for_each_encoder_on_crtc(dev, crtc, encoder) 4651 if (encoder->pre_enable) 4652 encoder->pre_enable(encoder); 4653 4654 i9xx_pfit_enable(intel_crtc); 4655 4656 intel_crtc_load_lut(crtc); 4657 4658 intel_update_watermarks(crtc); 4659 intel_enable_pipe(intel_crtc); 4660 4661 for_each_encoder_on_crtc(dev, crtc, encoder) 4662 encoder->enable(encoder); 4663 4664 intel_crtc_enable_planes(crtc); 4665 4666 /* Underruns don't raise interrupts, so check manually. */ 4667 i9xx_check_fifo_underruns(dev); 4668 } 4669 4670 static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 4671 { 4672 struct drm_device *dev = crtc->base.dev; 4673 struct drm_i915_private *dev_priv = dev->dev_private; 4674 4675 I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0); 4676 I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1); 4677 } 4678 4679 static void i9xx_crtc_enable(struct drm_crtc *crtc) 4680 { 4681 struct drm_device *dev = crtc->dev; 4682 struct drm_i915_private *dev_priv = dev->dev_private; 4683 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4684 struct intel_encoder *encoder; 4685 int pipe = intel_crtc->pipe; 4686 int plane = intel_crtc->plane; 4687 u32 dspcntr; 4688 4689 WARN_ON(!crtc->enabled); 4690 4691 if (intel_crtc->active) 4692 return; 4693 4694 i9xx_set_pll_dividers(intel_crtc); 4695 4696 /* Set up the display plane register */ 4697 dspcntr = DISPPLANE_GAMMA_ENABLE; 4698 4699 if (pipe == 0) 4700 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 4701 else 4702 dspcntr |= DISPPLANE_SEL_PIPE_B; 4703 4704 if (intel_crtc->config.has_dp_encoder) 4705 intel_dp_set_m_n(intel_crtc); 4706 4707 intel_set_pipe_timings(intel_crtc); 4708 4709 /* pipesrc and dspsize control the size that is scaled from, 4710 * which should always be the user's requested size. 4711 */ 4712 I915_WRITE(DSPSIZE(plane), 4713 ((intel_crtc->config.pipe_src_h - 1) << 16) | 4714 (intel_crtc->config.pipe_src_w - 1)); 4715 I915_WRITE(DSPPOS(plane), 0); 4716 4717 i9xx_set_pipeconf(intel_crtc); 4718 4719 I915_WRITE(DSPCNTR(plane), dspcntr); 4720 POSTING_READ(DSPCNTR(plane)); 4721 4722 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb, 4723 crtc->x, crtc->y); 4724 4725 intel_crtc->active = true; 4726 4727 if (!IS_GEN2(dev)) 4728 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4729 4730 for_each_encoder_on_crtc(dev, crtc, encoder) 4731 if (encoder->pre_enable) 4732 encoder->pre_enable(encoder); 4733 4734 i9xx_enable_pll(intel_crtc); 4735 4736 i9xx_pfit_enable(intel_crtc); 4737 4738 intel_crtc_load_lut(crtc); 4739 4740 intel_update_watermarks(crtc); 4741 intel_enable_pipe(intel_crtc); 4742 4743 for_each_encoder_on_crtc(dev, crtc, encoder) 4744 encoder->enable(encoder); 4745 4746 intel_crtc_enable_planes(crtc); 4747 4748 /* 4749 * Gen2 reports pipe underruns whenever all planes are disabled. 4750 * So don't enable underrun reporting before at least some planes 4751 * are enabled. 4752 * FIXME: Need to fix the logic to work when we turn off all planes 4753 * but leave the pipe running. 4754 */ 4755 if (IS_GEN2(dev)) 4756 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4757 4758 /* Underruns don't raise interrupts, so check manually. */ 4759 i9xx_check_fifo_underruns(dev); 4760 } 4761 4762 static void i9xx_pfit_disable(struct intel_crtc *crtc) 4763 { 4764 struct drm_device *dev = crtc->base.dev; 4765 struct drm_i915_private *dev_priv = dev->dev_private; 4766 4767 if (!crtc->config.gmch_pfit.control) 4768 return; 4769 4770 assert_pipe_disabled(dev_priv, crtc->pipe); 4771 4772 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 4773 I915_READ(PFIT_CONTROL)); 4774 I915_WRITE(PFIT_CONTROL, 0); 4775 } 4776 4777 static void i9xx_crtc_disable(struct drm_crtc *crtc) 4778 { 4779 struct drm_device *dev = crtc->dev; 4780 struct drm_i915_private *dev_priv = dev->dev_private; 4781 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4782 struct intel_encoder *encoder; 4783 int pipe = intel_crtc->pipe; 4784 4785 if (!intel_crtc->active) 4786 return; 4787 4788 /* 4789 * Gen2 reports pipe underruns whenever all planes are disabled. 4790 * So diasble underrun reporting before all the planes get disabled. 4791 * FIXME: Need to fix the logic to work when we turn off all planes 4792 * but leave the pipe running. 4793 */ 4794 if (IS_GEN2(dev)) 4795 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); 4796 4797 /* 4798 * Vblank time updates from the shadow to live plane control register 4799 * are blocked if the memory self-refresh mode is active at that 4800 * moment. So to make sure the plane gets truly disabled, disable 4801 * first the self-refresh mode. The self-refresh enable bit in turn 4802 * will be checked/applied by the HW only at the next frame start 4803 * event which is after the vblank start event, so we need to have a 4804 * wait-for-vblank between disabling the plane and the pipe. 4805 */ 4806 intel_set_memory_cxsr(dev_priv, false); 4807 intel_crtc_disable_planes(crtc); 4808 4809 for_each_encoder_on_crtc(dev, crtc, encoder) 4810 encoder->disable(encoder); 4811 4812 /* 4813 * On gen2 planes are double buffered but the pipe isn't, so we must 4814 * wait for planes to fully turn off before disabling the pipe. 4815 * We also need to wait on all gmch platforms because of the 4816 * self-refresh mode constraint explained above. 4817 */ 4818 intel_wait_for_vblank(dev, pipe); 4819 4820 intel_disable_pipe(dev_priv, pipe); 4821 4822 i9xx_pfit_disable(intel_crtc); 4823 4824 for_each_encoder_on_crtc(dev, crtc, encoder) 4825 if (encoder->post_disable) 4826 encoder->post_disable(encoder); 4827 4828 if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) { 4829 if (IS_CHERRYVIEW(dev)) 4830 chv_disable_pll(dev_priv, pipe); 4831 else if (IS_VALLEYVIEW(dev)) 4832 vlv_disable_pll(dev_priv, pipe); 4833 else 4834 i9xx_disable_pll(dev_priv, pipe); 4835 } 4836 4837 if (!IS_GEN2(dev)) 4838 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); 4839 4840 intel_crtc->active = false; 4841 intel_update_watermarks(crtc); 4842 4843 mutex_lock(&dev->struct_mutex); 4844 intel_update_fbc(dev); 4845 mutex_unlock(&dev->struct_mutex); 4846 } 4847 4848 static void i9xx_crtc_off(struct drm_crtc *crtc) 4849 { 4850 } 4851 4852 static void intel_crtc_update_sarea(struct drm_crtc *crtc, 4853 bool enabled) 4854 { 4855 struct drm_device *dev = crtc->dev; 4856 struct drm_i915_master_private *master_priv = dev->dev_private; 4857 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4858 int pipe = intel_crtc->pipe; 4859 4860 #if 0 4861 if (!dev->primary->master) 4862 return; 4863 4864 master_priv = dev->primary->master->driver_priv; 4865 #endif 4866 if (!master_priv->sarea_priv) 4867 return; 4868 4869 switch (pipe) { 4870 case 0: 4871 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; 4872 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; 4873 break; 4874 case 1: 4875 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; 4876 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; 4877 break; 4878 default: 4879 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); 4880 break; 4881 } 4882 } 4883 4884 /* Master function to enable/disable CRTC and corresponding power wells */ 4885 void intel_crtc_control(struct drm_crtc *crtc, bool enable) 4886 { 4887 struct drm_device *dev = crtc->dev; 4888 struct drm_i915_private *dev_priv = dev->dev_private; 4889 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4890 enum intel_display_power_domain domain; 4891 unsigned long domains; 4892 4893 if (enable) { 4894 if (!intel_crtc->active) { 4895 domains = get_crtc_power_domains(crtc); 4896 for_each_power_domain(domain, domains) 4897 intel_display_power_get(dev_priv, domain); 4898 intel_crtc->enabled_power_domains = domains; 4899 4900 dev_priv->display.crtc_enable(crtc); 4901 } 4902 } else { 4903 if (intel_crtc->active) { 4904 dev_priv->display.crtc_disable(crtc); 4905 4906 domains = intel_crtc->enabled_power_domains; 4907 for_each_power_domain(domain, domains) 4908 intel_display_power_put(dev_priv, domain); 4909 intel_crtc->enabled_power_domains = 0; 4910 } 4911 } 4912 } 4913 4914 /** 4915 * Sets the power management mode of the pipe and plane. 4916 */ 4917 void intel_crtc_update_dpms(struct drm_crtc *crtc) 4918 { 4919 struct drm_device *dev = crtc->dev; 4920 struct intel_encoder *intel_encoder; 4921 bool enable = false; 4922 4923 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 4924 enable |= intel_encoder->connectors_active; 4925 4926 intel_crtc_control(crtc, enable); 4927 4928 intel_crtc_update_sarea(crtc, enable); 4929 } 4930 4931 static void intel_crtc_disable(struct drm_crtc *crtc) 4932 { 4933 struct drm_device *dev = crtc->dev; 4934 struct drm_connector *connector; 4935 struct drm_i915_private *dev_priv = dev->dev_private; 4936 struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb); 4937 enum i915_pipe pipe = to_intel_crtc(crtc)->pipe; 4938 4939 /* crtc should still be enabled when we disable it. */ 4940 WARN_ON(!crtc->enabled); 4941 4942 dev_priv->display.crtc_disable(crtc); 4943 intel_crtc_update_sarea(crtc, false); 4944 dev_priv->display.off(crtc); 4945 4946 if (crtc->primary->fb) { 4947 mutex_lock(&dev->struct_mutex); 4948 intel_unpin_fb_obj(old_obj); 4949 i915_gem_track_fb(old_obj, NULL, 4950 INTEL_FRONTBUFFER_PRIMARY(pipe)); 4951 mutex_unlock(&dev->struct_mutex); 4952 crtc->primary->fb = NULL; 4953 } 4954 4955 /* Update computed state. */ 4956 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4957 if (!connector->encoder || !connector->encoder->crtc) 4958 continue; 4959 4960 if (connector->encoder->crtc != crtc) 4961 continue; 4962 4963 connector->dpms = DRM_MODE_DPMS_OFF; 4964 to_intel_encoder(connector->encoder)->connectors_active = false; 4965 } 4966 } 4967 4968 void intel_encoder_destroy(struct drm_encoder *encoder) 4969 { 4970 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 4971 4972 drm_encoder_cleanup(encoder); 4973 kfree(intel_encoder); 4974 } 4975 4976 /* Simple dpms helper for encoders with just one connector, no cloning and only 4977 * one kind of off state. It clamps all !ON modes to fully OFF and changes the 4978 * state of the entire output pipe. */ 4979 static void intel_encoder_dpms(struct intel_encoder *encoder, int mode) 4980 { 4981 if (mode == DRM_MODE_DPMS_ON) { 4982 encoder->connectors_active = true; 4983 4984 intel_crtc_update_dpms(encoder->base.crtc); 4985 } else { 4986 encoder->connectors_active = false; 4987 4988 intel_crtc_update_dpms(encoder->base.crtc); 4989 } 4990 } 4991 4992 /* Cross check the actual hw state with our own modeset state tracking (and it's 4993 * internal consistency). */ 4994 static void intel_connector_check_state(struct intel_connector *connector) 4995 { 4996 if (connector->get_hw_state(connector)) { 4997 struct intel_encoder *encoder = connector->encoder; 4998 struct drm_crtc *crtc; 4999 bool encoder_enabled; 5000 enum i915_pipe pipe; 5001 5002 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 5003 connector->base.base.id, 5004 connector->base.name); 5005 5006 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, 5007 "wrong connector dpms state\n"); 5008 WARN(connector->base.encoder != &encoder->base, 5009 "active connector not linked to encoder\n"); 5010 5011 if (encoder) { 5012 WARN(!encoder->connectors_active, 5013 "encoder->connectors_active not set\n"); 5014 5015 encoder_enabled = encoder->get_hw_state(encoder, &pipe); 5016 WARN(!encoder_enabled, "encoder not enabled\n"); 5017 if (WARN_ON(!encoder->base.crtc)) 5018 return; 5019 5020 crtc = encoder->base.crtc; 5021 5022 WARN(!crtc->enabled, "crtc not enabled\n"); 5023 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); 5024 WARN(pipe != to_intel_crtc(crtc)->pipe, 5025 "encoder active on the wrong pipe\n"); 5026 } 5027 } 5028 } 5029 5030 /* Even simpler default implementation, if there's really no special case to 5031 * consider. */ 5032 void intel_connector_dpms(struct drm_connector *connector, int mode) 5033 { 5034 /* All the simple cases only support two dpms states. */ 5035 if (mode != DRM_MODE_DPMS_ON) 5036 mode = DRM_MODE_DPMS_OFF; 5037 5038 if (mode == connector->dpms) 5039 return; 5040 5041 connector->dpms = mode; 5042 5043 /* Only need to change hw state when actually enabled */ 5044 if (connector->encoder) 5045 intel_encoder_dpms(to_intel_encoder(connector->encoder), mode); 5046 5047 intel_modeset_check_state(connector->dev); 5048 } 5049 5050 /* Simple connector->get_hw_state implementation for encoders that support only 5051 * one connector and no cloning and hence the encoder state determines the state 5052 * of the connector. */ 5053 bool intel_connector_get_hw_state(struct intel_connector *connector) 5054 { 5055 enum i915_pipe pipe = 0; 5056 struct intel_encoder *encoder = connector->encoder; 5057 5058 return encoder->get_hw_state(encoder, &pipe); 5059 } 5060 5061 static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe, 5062 struct intel_crtc_config *pipe_config) 5063 { 5064 struct drm_i915_private *dev_priv = dev->dev_private; 5065 struct intel_crtc *pipe_B_crtc = 5066 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); 5067 5068 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 5069 pipe_name(pipe), pipe_config->fdi_lanes); 5070 if (pipe_config->fdi_lanes > 4) { 5071 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 5072 pipe_name(pipe), pipe_config->fdi_lanes); 5073 return false; 5074 } 5075 5076 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 5077 if (pipe_config->fdi_lanes > 2) { 5078 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 5079 pipe_config->fdi_lanes); 5080 return false; 5081 } else { 5082 return true; 5083 } 5084 } 5085 5086 if (INTEL_INFO(dev)->num_pipes == 2) 5087 return true; 5088 5089 /* Ivybridge 3 pipe is really complicated */ 5090 switch (pipe) { 5091 case PIPE_A: 5092 return true; 5093 case PIPE_B: 5094 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && 5095 pipe_config->fdi_lanes > 2) { 5096 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 5097 pipe_name(pipe), pipe_config->fdi_lanes); 5098 return false; 5099 } 5100 return true; 5101 case PIPE_C: 5102 if (!pipe_has_enabled_pch(pipe_B_crtc) || 5103 pipe_B_crtc->config.fdi_lanes <= 2) { 5104 if (pipe_config->fdi_lanes > 2) { 5105 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 5106 pipe_name(pipe), pipe_config->fdi_lanes); 5107 return false; 5108 } 5109 } else { 5110 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 5111 return false; 5112 } 5113 return true; 5114 default: 5115 BUG(); 5116 } 5117 } 5118 5119 #define RETRY 1 5120 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 5121 struct intel_crtc_config *pipe_config) 5122 { 5123 struct drm_device *dev = intel_crtc->base.dev; 5124 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 5125 int lane, link_bw, fdi_dotclock; 5126 bool setup_ok, needs_recompute = false; 5127 5128 retry: 5129 /* FDI is a binary signal running at ~2.7GHz, encoding 5130 * each output octet as 10 bits. The actual frequency 5131 * is stored as a divider into a 100MHz clock, and the 5132 * mode pixel clock is stored in units of 1KHz. 5133 * Hence the bw of each lane in terms of the mode signal 5134 * is: 5135 */ 5136 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 5137 5138 fdi_dotclock = adjusted_mode->crtc_clock; 5139 5140 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 5141 pipe_config->pipe_bpp); 5142 5143 pipe_config->fdi_lanes = lane; 5144 5145 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 5146 link_bw, &pipe_config->fdi_m_n); 5147 5148 setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev, 5149 intel_crtc->pipe, pipe_config); 5150 if (!setup_ok && pipe_config->pipe_bpp > 6*3) { 5151 pipe_config->pipe_bpp -= 2*3; 5152 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 5153 pipe_config->pipe_bpp); 5154 needs_recompute = true; 5155 pipe_config->bw_constrained = true; 5156 5157 goto retry; 5158 } 5159 5160 if (needs_recompute) 5161 return RETRY; 5162 5163 return setup_ok ? 0 : -EINVAL; 5164 } 5165 5166 static void hsw_compute_ips_config(struct intel_crtc *crtc, 5167 struct intel_crtc_config *pipe_config) 5168 { 5169 pipe_config->ips_enabled = i915.enable_ips && 5170 hsw_crtc_supports_ips(crtc) && 5171 pipe_config->pipe_bpp <= 24; 5172 } 5173 5174 static int intel_crtc_compute_config(struct intel_crtc *crtc, 5175 struct intel_crtc_config *pipe_config) 5176 { 5177 struct drm_device *dev = crtc->base.dev; 5178 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 5179 5180 /* FIXME should check pixel clock limits on all platforms */ 5181 if (INTEL_INFO(dev)->gen < 4) { 5182 struct drm_i915_private *dev_priv = dev->dev_private; 5183 int clock_limit = 5184 dev_priv->display.get_display_clock_speed(dev); 5185 5186 /* 5187 * Enable pixel doubling when the dot clock 5188 * is > 90% of the (display) core speed. 5189 * 5190 * GDG double wide on either pipe, 5191 * otherwise pipe A only. 5192 */ 5193 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) && 5194 adjusted_mode->crtc_clock > clock_limit * 9 / 10) { 5195 clock_limit *= 2; 5196 pipe_config->double_wide = true; 5197 } 5198 5199 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10) 5200 return -EINVAL; 5201 } 5202 5203 /* 5204 * Pipe horizontal size must be even in: 5205 * - DVO ganged mode 5206 * - LVDS dual channel mode 5207 * - Double wide pipe 5208 */ 5209 if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5210 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 5211 pipe_config->pipe_src_w &= ~1; 5212 5213 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 5214 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 5215 */ 5216 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 5217 adjusted_mode->hsync_start == adjusted_mode->hdisplay) 5218 return -EINVAL; 5219 5220 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) { 5221 pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */ 5222 } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) { 5223 /* only a 8bpc pipe, with 6bpc dither through the panel fitter 5224 * for lvds. */ 5225 pipe_config->pipe_bpp = 8*3; 5226 } 5227 5228 if (HAS_IPS(dev)) 5229 hsw_compute_ips_config(crtc, pipe_config); 5230 5231 /* 5232 * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the 5233 * old clock survives for now. 5234 */ 5235 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev)) 5236 pipe_config->shared_dpll = crtc->config.shared_dpll; 5237 5238 if (pipe_config->has_pch_encoder) 5239 return ironlake_fdi_compute_config(crtc, pipe_config); 5240 5241 return 0; 5242 } 5243 5244 static int valleyview_get_display_clock_speed(struct drm_device *dev) 5245 { 5246 struct drm_i915_private *dev_priv = dev->dev_private; 5247 int vco = valleyview_get_vco(dev_priv); 5248 u32 val; 5249 int divider; 5250 5251 mutex_lock(&dev_priv->dpio_lock); 5252 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5253 mutex_unlock(&dev_priv->dpio_lock); 5254 5255 divider = val & DISPLAY_FREQUENCY_VALUES; 5256 5257 WARN((val & DISPLAY_FREQUENCY_STATUS) != 5258 (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 5259 "cdclk change in progress\n"); 5260 5261 return DIV_ROUND_CLOSEST(vco << 1, divider + 1); 5262 } 5263 5264 static int i945_get_display_clock_speed(struct drm_device *dev) 5265 { 5266 return 400000; 5267 } 5268 5269 static int i915_get_display_clock_speed(struct drm_device *dev) 5270 { 5271 return 333000; 5272 } 5273 5274 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 5275 { 5276 return 200000; 5277 } 5278 5279 static int pnv_get_display_clock_speed(struct drm_device *dev) 5280 { 5281 u16 gcfgc = 0; 5282 5283 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 5284 5285 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 5286 case GC_DISPLAY_CLOCK_267_MHZ_PNV: 5287 return 267000; 5288 case GC_DISPLAY_CLOCK_333_MHZ_PNV: 5289 return 333000; 5290 case GC_DISPLAY_CLOCK_444_MHZ_PNV: 5291 return 444000; 5292 case GC_DISPLAY_CLOCK_200_MHZ_PNV: 5293 return 200000; 5294 default: 5295 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); 5296 case GC_DISPLAY_CLOCK_133_MHZ_PNV: 5297 return 133000; 5298 case GC_DISPLAY_CLOCK_167_MHZ_PNV: 5299 return 167000; 5300 } 5301 } 5302 5303 static int i915gm_get_display_clock_speed(struct drm_device *dev) 5304 { 5305 u16 gcfgc = 0; 5306 5307 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 5308 5309 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 5310 return 133000; 5311 else { 5312 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 5313 case GC_DISPLAY_CLOCK_333_MHZ: 5314 return 333000; 5315 default: 5316 case GC_DISPLAY_CLOCK_190_200_MHZ: 5317 return 190000; 5318 } 5319 } 5320 } 5321 5322 static int i865_get_display_clock_speed(struct drm_device *dev) 5323 { 5324 return 266000; 5325 } 5326 5327 static int i855_get_display_clock_speed(struct drm_device *dev) 5328 { 5329 u16 hpllcc = 0; 5330 /* Assume that the hardware is in the high speed state. This 5331 * should be the default. 5332 */ 5333 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 5334 case GC_CLOCK_133_200: 5335 case GC_CLOCK_100_200: 5336 return 200000; 5337 case GC_CLOCK_166_250: 5338 return 250000; 5339 case GC_CLOCK_100_133: 5340 return 133000; 5341 } 5342 5343 /* Shouldn't happen */ 5344 return 0; 5345 } 5346 5347 static int i830_get_display_clock_speed(struct drm_device *dev) 5348 { 5349 return 133000; 5350 } 5351 5352 static void 5353 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) 5354 { 5355 while (*num > DATA_LINK_M_N_MASK || 5356 *den > DATA_LINK_M_N_MASK) { 5357 *num >>= 1; 5358 *den >>= 1; 5359 } 5360 } 5361 5362 static void compute_m_n(unsigned int m, unsigned int n, 5363 uint32_t *ret_m, uint32_t *ret_n) 5364 { 5365 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 5366 *ret_m = div_u64((uint64_t) m * *ret_n, n); 5367 intel_reduce_m_n_ratio(ret_m, ret_n); 5368 } 5369 5370 void 5371 intel_link_compute_m_n(int bits_per_pixel, int nlanes, 5372 int pixel_clock, int link_clock, 5373 struct intel_link_m_n *m_n) 5374 { 5375 m_n->tu = 64; 5376 5377 compute_m_n(bits_per_pixel * pixel_clock, 5378 link_clock * nlanes * 8, 5379 &m_n->gmch_m, &m_n->gmch_n); 5380 5381 compute_m_n(pixel_clock, link_clock, 5382 &m_n->link_m, &m_n->link_n); 5383 } 5384 5385 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 5386 { 5387 if (i915.panel_use_ssc >= 0) 5388 return i915.panel_use_ssc != 0; 5389 return dev_priv->vbt.lvds_use_ssc 5390 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 5391 } 5392 5393 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) 5394 { 5395 struct drm_device *dev = crtc->dev; 5396 struct drm_i915_private *dev_priv = dev->dev_private; 5397 int refclk; 5398 5399 if (IS_VALLEYVIEW(dev)) { 5400 refclk = 100000; 5401 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 5402 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 5403 refclk = dev_priv->vbt.lvds_ssc_freq; 5404 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 5405 } else if (!IS_GEN2(dev)) { 5406 refclk = 96000; 5407 } else { 5408 refclk = 48000; 5409 } 5410 5411 return refclk; 5412 } 5413 5414 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) 5415 { 5416 return (1 << dpll->n) << 16 | dpll->m2; 5417 } 5418 5419 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) 5420 { 5421 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 5422 } 5423 5424 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 5425 intel_clock_t *reduced_clock) 5426 { 5427 struct drm_device *dev = crtc->base.dev; 5428 u32 fp, fp2 = 0; 5429 5430 if (IS_PINEVIEW(dev)) { 5431 fp = pnv_dpll_compute_fp(&crtc->config.dpll); 5432 if (reduced_clock) 5433 fp2 = pnv_dpll_compute_fp(reduced_clock); 5434 } else { 5435 fp = i9xx_dpll_compute_fp(&crtc->config.dpll); 5436 if (reduced_clock) 5437 fp2 = i9xx_dpll_compute_fp(reduced_clock); 5438 } 5439 5440 crtc->config.dpll_hw_state.fp0 = fp; 5441 5442 crtc->lowfreq_avail = false; 5443 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5444 reduced_clock && i915.powersave) { 5445 crtc->config.dpll_hw_state.fp1 = fp2; 5446 crtc->lowfreq_avail = true; 5447 } else { 5448 crtc->config.dpll_hw_state.fp1 = fp; 5449 } 5450 } 5451 5452 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe 5453 pipe) 5454 { 5455 u32 reg_val; 5456 5457 /* 5458 * PLLB opamp always calibrates to max value of 0x3f, force enable it 5459 * and set it to a reasonable value instead. 5460 */ 5461 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 5462 reg_val &= 0xffffff00; 5463 reg_val |= 0x00000030; 5464 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 5465 5466 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 5467 reg_val &= 0x8cffffff; 5468 reg_val = 0x8c000000; 5469 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 5470 5471 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 5472 reg_val &= 0xffffff00; 5473 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 5474 5475 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 5476 reg_val &= 0x00ffffff; 5477 reg_val |= 0xb0000000; 5478 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 5479 } 5480 5481 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 5482 struct intel_link_m_n *m_n) 5483 { 5484 struct drm_device *dev = crtc->base.dev; 5485 struct drm_i915_private *dev_priv = dev->dev_private; 5486 int pipe = crtc->pipe; 5487 5488 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 5489 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 5490 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 5491 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 5492 } 5493 5494 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 5495 struct intel_link_m_n *m_n) 5496 { 5497 struct drm_device *dev = crtc->base.dev; 5498 struct drm_i915_private *dev_priv = dev->dev_private; 5499 int pipe = crtc->pipe; 5500 enum transcoder transcoder = crtc->config.cpu_transcoder; 5501 5502 if (INTEL_INFO(dev)->gen >= 5) { 5503 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 5504 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 5505 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 5506 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 5507 } else { 5508 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 5509 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 5510 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 5511 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 5512 } 5513 } 5514 5515 static void intel_dp_set_m_n(struct intel_crtc *crtc) 5516 { 5517 if (crtc->config.has_pch_encoder) 5518 intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); 5519 else 5520 intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); 5521 } 5522 5523 static void vlv_update_pll(struct intel_crtc *crtc) 5524 { 5525 u32 dpll, dpll_md; 5526 5527 /* 5528 * Enable DPIO clock input. We should never disable the reference 5529 * clock for pipe B, since VGA hotplug / manual detection depends 5530 * on it. 5531 */ 5532 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 5533 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 5534 /* We should never disable this, set it here for state tracking */ 5535 if (crtc->pipe == PIPE_B) 5536 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 5537 dpll |= DPLL_VCO_ENABLE; 5538 crtc->config.dpll_hw_state.dpll = dpll; 5539 5540 dpll_md = (crtc->config.pixel_multiplier - 1) 5541 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5542 crtc->config.dpll_hw_state.dpll_md = dpll_md; 5543 } 5544 5545 static void vlv_prepare_pll(struct intel_crtc *crtc) 5546 { 5547 struct drm_device *dev = crtc->base.dev; 5548 struct drm_i915_private *dev_priv = dev->dev_private; 5549 int pipe = crtc->pipe; 5550 u32 mdiv; 5551 u32 bestn, bestm1, bestm2, bestp1, bestp2; 5552 u32 coreclk, reg_val; 5553 5554 mutex_lock(&dev_priv->dpio_lock); 5555 5556 bestn = crtc->config.dpll.n; 5557 bestm1 = crtc->config.dpll.m1; 5558 bestm2 = crtc->config.dpll.m2; 5559 bestp1 = crtc->config.dpll.p1; 5560 bestp2 = crtc->config.dpll.p2; 5561 5562 /* See eDP HDMI DPIO driver vbios notes doc */ 5563 5564 /* PLL B needs special handling */ 5565 if (pipe == PIPE_B) 5566 vlv_pllb_recal_opamp(dev_priv, pipe); 5567 5568 /* Set up Tx target for periodic Rcomp update */ 5569 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 5570 5571 /* Disable target IRef on PLL */ 5572 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 5573 reg_val &= 0x00ffffff; 5574 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 5575 5576 /* Disable fast lock */ 5577 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 5578 5579 /* Set idtafcrecal before PLL is enabled */ 5580 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 5581 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 5582 mdiv |= ((bestn << DPIO_N_SHIFT)); 5583 mdiv |= (1 << DPIO_K_SHIFT); 5584 5585 /* 5586 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 5587 * but we don't support that). 5588 * Note: don't use the DAC post divider as it seems unstable. 5589 */ 5590 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 5591 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 5592 5593 mdiv |= DPIO_ENABLE_CALIBRATION; 5594 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 5595 5596 /* Set HBR and RBR LPF coefficients */ 5597 if (crtc->config.port_clock == 162000 || 5598 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || 5599 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) 5600 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 5601 0x009f0003); 5602 else 5603 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 5604 0x00d0000f); 5605 5606 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || 5607 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { 5608 /* Use SSC source */ 5609 if (pipe == PIPE_A) 5610 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 5611 0x0df40000); 5612 else 5613 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 5614 0x0df70000); 5615 } else { /* HDMI or VGA */ 5616 /* Use bend source */ 5617 if (pipe == PIPE_A) 5618 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 5619 0x0df70000); 5620 else 5621 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 5622 0x0df40000); 5623 } 5624 5625 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 5626 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 5627 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || 5628 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) 5629 coreclk |= 0x01000000; 5630 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 5631 5632 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 5633 mutex_unlock(&dev_priv->dpio_lock); 5634 } 5635 5636 static void chv_update_pll(struct intel_crtc *crtc) 5637 { 5638 struct drm_device *dev = crtc->base.dev; 5639 struct drm_i915_private *dev_priv = dev->dev_private; 5640 int pipe = crtc->pipe; 5641 int dpll_reg = DPLL(crtc->pipe); 5642 enum dpio_channel port = vlv_pipe_to_channel(pipe); 5643 u32 loopfilter, intcoeff; 5644 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 5645 int refclk; 5646 5647 crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV | 5648 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | 5649 DPLL_VCO_ENABLE; 5650 if (pipe != PIPE_A) 5651 crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 5652 5653 crtc->config.dpll_hw_state.dpll_md = 5654 (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5655 5656 bestn = crtc->config.dpll.n; 5657 bestm2_frac = crtc->config.dpll.m2 & 0x3fffff; 5658 bestm1 = crtc->config.dpll.m1; 5659 bestm2 = crtc->config.dpll.m2 >> 22; 5660 bestp1 = crtc->config.dpll.p1; 5661 bestp2 = crtc->config.dpll.p2; 5662 5663 /* 5664 * Enable Refclk and SSC 5665 */ 5666 I915_WRITE(dpll_reg, 5667 crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 5668 5669 mutex_lock(&dev_priv->dpio_lock); 5670 5671 /* p1 and p2 divider */ 5672 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 5673 5 << DPIO_CHV_S1_DIV_SHIFT | 5674 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 5675 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 5676 1 << DPIO_CHV_K_DIV_SHIFT); 5677 5678 /* Feedback post-divider - m2 */ 5679 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 5680 5681 /* Feedback refclk divider - n and m1 */ 5682 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 5683 DPIO_CHV_M1_DIV_BY_2 | 5684 1 << DPIO_CHV_N_DIV_SHIFT); 5685 5686 /* M2 fraction division */ 5687 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 5688 5689 /* M2 fraction division enable */ 5690 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), 5691 DPIO_CHV_FRAC_DIV_EN | 5692 (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT)); 5693 5694 /* Loop filter */ 5695 refclk = i9xx_get_refclk(&crtc->base, 0); 5696 loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT | 5697 2 << DPIO_CHV_GAIN_CTRL_SHIFT; 5698 if (refclk == 100000) 5699 intcoeff = 11; 5700 else if (refclk == 38400) 5701 intcoeff = 10; 5702 else 5703 intcoeff = 9; 5704 loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT; 5705 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 5706 5707 /* AFC Recal */ 5708 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 5709 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 5710 DPIO_AFC_RECAL); 5711 5712 mutex_unlock(&dev_priv->dpio_lock); 5713 } 5714 5715 static void i9xx_update_pll(struct intel_crtc *crtc, 5716 intel_clock_t *reduced_clock, 5717 int num_connectors) 5718 { 5719 struct drm_device *dev = crtc->base.dev; 5720 struct drm_i915_private *dev_priv = dev->dev_private; 5721 u32 dpll; 5722 bool is_sdvo; 5723 struct dpll *clock = &crtc->config.dpll; 5724 5725 i9xx_update_pll_dividers(crtc, reduced_clock); 5726 5727 is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) || 5728 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI); 5729 5730 dpll = DPLL_VGA_MODE_DIS; 5731 5732 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) 5733 dpll |= DPLLB_MODE_LVDS; 5734 else 5735 dpll |= DPLLB_MODE_DAC_SERIAL; 5736 5737 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 5738 dpll |= (crtc->config.pixel_multiplier - 1) 5739 << SDVO_MULTIPLIER_SHIFT_HIRES; 5740 } 5741 5742 if (is_sdvo) 5743 dpll |= DPLL_SDVO_HIGH_SPEED; 5744 5745 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) 5746 dpll |= DPLL_SDVO_HIGH_SPEED; 5747 5748 /* compute bitmask from p1 value */ 5749 if (IS_PINEVIEW(dev)) 5750 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 5751 else { 5752 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5753 if (IS_G4X(dev) && reduced_clock) 5754 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 5755 } 5756 switch (clock->p2) { 5757 case 5: 5758 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 5759 break; 5760 case 7: 5761 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 5762 break; 5763 case 10: 5764 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 5765 break; 5766 case 14: 5767 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 5768 break; 5769 } 5770 if (INTEL_INFO(dev)->gen >= 4) 5771 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 5772 5773 if (crtc->config.sdvo_tv_clock) 5774 dpll |= PLL_REF_INPUT_TVCLKINBC; 5775 else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5776 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 5777 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 5778 else 5779 dpll |= PLL_REF_INPUT_DREFCLK; 5780 5781 dpll |= DPLL_VCO_ENABLE; 5782 crtc->config.dpll_hw_state.dpll = dpll; 5783 5784 if (INTEL_INFO(dev)->gen >= 4) { 5785 u32 dpll_md = (crtc->config.pixel_multiplier - 1) 5786 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5787 crtc->config.dpll_hw_state.dpll_md = dpll_md; 5788 } 5789 } 5790 5791 static void i8xx_update_pll(struct intel_crtc *crtc, 5792 intel_clock_t *reduced_clock, 5793 int num_connectors) 5794 { 5795 struct drm_device *dev = crtc->base.dev; 5796 struct drm_i915_private *dev_priv = dev->dev_private; 5797 u32 dpll; 5798 struct dpll *clock = &crtc->config.dpll; 5799 5800 i9xx_update_pll_dividers(crtc, reduced_clock); 5801 5802 dpll = DPLL_VGA_MODE_DIS; 5803 5804 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) { 5805 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5806 } else { 5807 if (clock->p1 == 2) 5808 dpll |= PLL_P1_DIVIDE_BY_TWO; 5809 else 5810 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5811 if (clock->p2 == 4) 5812 dpll |= PLL_P2_DIVIDE_BY_4; 5813 } 5814 5815 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO)) 5816 dpll |= DPLL_DVO_2X_MODE; 5817 5818 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5819 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 5820 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 5821 else 5822 dpll |= PLL_REF_INPUT_DREFCLK; 5823 5824 dpll |= DPLL_VCO_ENABLE; 5825 crtc->config.dpll_hw_state.dpll = dpll; 5826 } 5827 5828 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 5829 { 5830 struct drm_device *dev = intel_crtc->base.dev; 5831 struct drm_i915_private *dev_priv = dev->dev_private; 5832 enum i915_pipe pipe = intel_crtc->pipe; 5833 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 5834 struct drm_display_mode *adjusted_mode = 5835 &intel_crtc->config.adjusted_mode; 5836 uint32_t crtc_vtotal, crtc_vblank_end; 5837 int vsyncshift = 0; 5838 5839 /* We need to be careful not to changed the adjusted mode, for otherwise 5840 * the hw state checker will get angry at the mismatch. */ 5841 crtc_vtotal = adjusted_mode->crtc_vtotal; 5842 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 5843 5844 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5845 /* the chip adds 2 halflines automatically */ 5846 crtc_vtotal -= 1; 5847 crtc_vblank_end -= 1; 5848 5849 if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) 5850 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 5851 else 5852 vsyncshift = adjusted_mode->crtc_hsync_start - 5853 adjusted_mode->crtc_htotal / 2; 5854 if (vsyncshift < 0) 5855 vsyncshift += adjusted_mode->crtc_htotal; 5856 } 5857 5858 if (INTEL_INFO(dev)->gen > 3) 5859 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 5860 5861 I915_WRITE(HTOTAL(cpu_transcoder), 5862 (adjusted_mode->crtc_hdisplay - 1) | 5863 ((adjusted_mode->crtc_htotal - 1) << 16)); 5864 I915_WRITE(HBLANK(cpu_transcoder), 5865 (adjusted_mode->crtc_hblank_start - 1) | 5866 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 5867 I915_WRITE(HSYNC(cpu_transcoder), 5868 (adjusted_mode->crtc_hsync_start - 1) | 5869 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 5870 5871 I915_WRITE(VTOTAL(cpu_transcoder), 5872 (adjusted_mode->crtc_vdisplay - 1) | 5873 ((crtc_vtotal - 1) << 16)); 5874 I915_WRITE(VBLANK(cpu_transcoder), 5875 (adjusted_mode->crtc_vblank_start - 1) | 5876 ((crtc_vblank_end - 1) << 16)); 5877 I915_WRITE(VSYNC(cpu_transcoder), 5878 (adjusted_mode->crtc_vsync_start - 1) | 5879 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 5880 5881 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 5882 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 5883 * documented on the DDI_FUNC_CTL register description, EDP Input Select 5884 * bits. */ 5885 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && 5886 (pipe == PIPE_B || pipe == PIPE_C)) 5887 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 5888 5889 /* pipesrc controls the size that is scaled from, which should 5890 * always be the user's requested size. 5891 */ 5892 I915_WRITE(PIPESRC(pipe), 5893 ((intel_crtc->config.pipe_src_w - 1) << 16) | 5894 (intel_crtc->config.pipe_src_h - 1)); 5895 } 5896 5897 static void intel_get_pipe_timings(struct intel_crtc *crtc, 5898 struct intel_crtc_config *pipe_config) 5899 { 5900 struct drm_device *dev = crtc->base.dev; 5901 struct drm_i915_private *dev_priv = dev->dev_private; 5902 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 5903 uint32_t tmp; 5904 5905 tmp = I915_READ(HTOTAL(cpu_transcoder)); 5906 pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 5907 pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 5908 tmp = I915_READ(HBLANK(cpu_transcoder)); 5909 pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; 5910 pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; 5911 tmp = I915_READ(HSYNC(cpu_transcoder)); 5912 pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 5913 pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 5914 5915 tmp = I915_READ(VTOTAL(cpu_transcoder)); 5916 pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 5917 pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 5918 tmp = I915_READ(VBLANK(cpu_transcoder)); 5919 pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; 5920 pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; 5921 tmp = I915_READ(VSYNC(cpu_transcoder)); 5922 pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 5923 pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 5924 5925 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 5926 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 5927 pipe_config->adjusted_mode.crtc_vtotal += 1; 5928 pipe_config->adjusted_mode.crtc_vblank_end += 1; 5929 } 5930 5931 tmp = I915_READ(PIPESRC(crtc->pipe)); 5932 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 5933 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 5934 5935 pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h; 5936 pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w; 5937 } 5938 5939 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 5940 struct intel_crtc_config *pipe_config) 5941 { 5942 mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay; 5943 mode->htotal = pipe_config->adjusted_mode.crtc_htotal; 5944 mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start; 5945 mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end; 5946 5947 mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay; 5948 mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal; 5949 mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start; 5950 mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end; 5951 5952 mode->flags = pipe_config->adjusted_mode.flags; 5953 5954 mode->clock = pipe_config->adjusted_mode.crtc_clock; 5955 mode->flags |= pipe_config->adjusted_mode.flags; 5956 } 5957 5958 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 5959 { 5960 struct drm_device *dev = intel_crtc->base.dev; 5961 struct drm_i915_private *dev_priv = dev->dev_private; 5962 uint32_t pipeconf; 5963 5964 pipeconf = 0; 5965 5966 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 5967 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE) 5968 pipeconf |= PIPECONF_ENABLE; 5969 5970 if (intel_crtc->config.double_wide) 5971 pipeconf |= PIPECONF_DOUBLE_WIDE; 5972 5973 /* only g4x and later have fancy bpc/dither controls */ 5974 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 5975 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 5976 if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30) 5977 pipeconf |= PIPECONF_DITHER_EN | 5978 PIPECONF_DITHER_TYPE_SP; 5979 5980 switch (intel_crtc->config.pipe_bpp) { 5981 case 18: 5982 pipeconf |= PIPECONF_6BPC; 5983 break; 5984 case 24: 5985 pipeconf |= PIPECONF_8BPC; 5986 break; 5987 case 30: 5988 pipeconf |= PIPECONF_10BPC; 5989 break; 5990 default: 5991 /* Case prevented by intel_choose_pipe_bpp_dither. */ 5992 BUG(); 5993 } 5994 } 5995 5996 if (HAS_PIPE_CXSR(dev)) { 5997 if (intel_crtc->lowfreq_avail) { 5998 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 5999 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 6000 } else { 6001 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 6002 } 6003 } 6004 6005 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 6006 if (INTEL_INFO(dev)->gen < 4 || 6007 intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) 6008 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 6009 else 6010 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 6011 } else 6012 pipeconf |= PIPECONF_PROGRESSIVE; 6013 6014 if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range) 6015 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 6016 6017 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 6018 POSTING_READ(PIPECONF(intel_crtc->pipe)); 6019 } 6020 6021 static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 6022 int x, int y, 6023 struct drm_framebuffer *fb) 6024 { 6025 struct drm_device *dev = crtc->dev; 6026 struct drm_i915_private *dev_priv = dev->dev_private; 6027 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6028 int refclk, num_connectors = 0; 6029 intel_clock_t clock, reduced_clock; 6030 bool ok, has_reduced_clock = false; 6031 bool is_lvds = false, is_dsi = false; 6032 struct intel_encoder *encoder; 6033 const intel_limit_t *limit; 6034 6035 for_each_encoder_on_crtc(dev, crtc, encoder) { 6036 switch (encoder->type) { 6037 case INTEL_OUTPUT_LVDS: 6038 is_lvds = true; 6039 break; 6040 case INTEL_OUTPUT_DSI: 6041 is_dsi = true; 6042 break; 6043 } 6044 6045 num_connectors++; 6046 } 6047 6048 if (is_dsi) 6049 return 0; 6050 6051 if (!intel_crtc->config.clock_set) { 6052 refclk = i9xx_get_refclk(crtc, num_connectors); 6053 6054 /* 6055 * Returns a set of divisors for the desired target clock with 6056 * the given refclk, or FALSE. The returned values represent 6057 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 6058 * 2) / p1 / p2. 6059 */ 6060 limit = intel_limit(crtc, refclk); 6061 ok = dev_priv->display.find_dpll(limit, crtc, 6062 intel_crtc->config.port_clock, 6063 refclk, NULL, &clock); 6064 if (!ok) { 6065 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 6066 return -EINVAL; 6067 } 6068 6069 if (is_lvds && dev_priv->lvds_downclock_avail) { 6070 /* 6071 * Ensure we match the reduced clock's P to the target 6072 * clock. If the clocks don't match, we can't switch 6073 * the display clock by using the FP0/FP1. In such case 6074 * we will disable the LVDS downclock feature. 6075 */ 6076 has_reduced_clock = 6077 dev_priv->display.find_dpll(limit, crtc, 6078 dev_priv->lvds_downclock, 6079 refclk, &clock, 6080 &reduced_clock); 6081 } 6082 /* Compat-code for transition, will disappear. */ 6083 intel_crtc->config.dpll.n = clock.n; 6084 intel_crtc->config.dpll.m1 = clock.m1; 6085 intel_crtc->config.dpll.m2 = clock.m2; 6086 intel_crtc->config.dpll.p1 = clock.p1; 6087 intel_crtc->config.dpll.p2 = clock.p2; 6088 } 6089 6090 if (IS_GEN2(dev)) { 6091 i8xx_update_pll(intel_crtc, 6092 has_reduced_clock ? &reduced_clock : NULL, 6093 num_connectors); 6094 } else if (IS_CHERRYVIEW(dev)) { 6095 chv_update_pll(intel_crtc); 6096 } else if (IS_VALLEYVIEW(dev)) { 6097 vlv_update_pll(intel_crtc); 6098 } else { 6099 i9xx_update_pll(intel_crtc, 6100 has_reduced_clock ? &reduced_clock : NULL, 6101 num_connectors); 6102 } 6103 6104 return 0; 6105 } 6106 6107 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 6108 struct intel_crtc_config *pipe_config) 6109 { 6110 struct drm_device *dev = crtc->base.dev; 6111 struct drm_i915_private *dev_priv = dev->dev_private; 6112 uint32_t tmp; 6113 6114 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 6115 return; 6116 6117 tmp = I915_READ(PFIT_CONTROL); 6118 if (!(tmp & PFIT_ENABLE)) 6119 return; 6120 6121 /* Check whether the pfit is attached to our pipe. */ 6122 if (INTEL_INFO(dev)->gen < 4) { 6123 if (crtc->pipe != PIPE_B) 6124 return; 6125 } else { 6126 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 6127 return; 6128 } 6129 6130 pipe_config->gmch_pfit.control = tmp; 6131 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 6132 if (INTEL_INFO(dev)->gen < 5) 6133 pipe_config->gmch_pfit.lvds_border_bits = 6134 I915_READ(LVDS) & LVDS_BORDER_ENABLE; 6135 } 6136 6137 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 6138 struct intel_crtc_config *pipe_config) 6139 { 6140 struct drm_device *dev = crtc->base.dev; 6141 struct drm_i915_private *dev_priv = dev->dev_private; 6142 int pipe = pipe_config->cpu_transcoder; 6143 intel_clock_t clock; 6144 u32 mdiv; 6145 int refclk = 100000; 6146 6147 /* In case of MIPI DPLL will not even be used */ 6148 if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)) 6149 return; 6150 6151 mutex_lock(&dev_priv->dpio_lock); 6152 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 6153 mutex_unlock(&dev_priv->dpio_lock); 6154 6155 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 6156 clock.m2 = mdiv & DPIO_M2DIV_MASK; 6157 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 6158 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 6159 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 6160 6161 vlv_clock(refclk, &clock); 6162 6163 /* clock.dot is the fast clock */ 6164 pipe_config->port_clock = clock.dot / 5; 6165 } 6166 6167 static void i9xx_get_plane_config(struct intel_crtc *crtc, 6168 struct intel_plane_config *plane_config) 6169 { 6170 struct drm_device *dev = crtc->base.dev; 6171 struct drm_i915_private *dev_priv = dev->dev_private; 6172 u32 val, base, offset; 6173 int pipe = crtc->pipe, plane = crtc->plane; 6174 int fourcc, pixel_format; 6175 int aligned_height; 6176 6177 crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL); 6178 if (!crtc->base.primary->fb) { 6179 DRM_DEBUG_KMS("failed to alloc fb\n"); 6180 return; 6181 } 6182 6183 val = I915_READ(DSPCNTR(plane)); 6184 6185 if (INTEL_INFO(dev)->gen >= 4) 6186 if (val & DISPPLANE_TILED) 6187 plane_config->tiled = true; 6188 6189 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 6190 fourcc = intel_format_to_fourcc(pixel_format); 6191 crtc->base.primary->fb->pixel_format = fourcc; 6192 crtc->base.primary->fb->bits_per_pixel = 6193 drm_format_plane_cpp(fourcc, 0) * 8; 6194 6195 if (INTEL_INFO(dev)->gen >= 4) { 6196 if (plane_config->tiled) 6197 offset = I915_READ(DSPTILEOFF(plane)); 6198 else 6199 offset = I915_READ(DSPLINOFF(plane)); 6200 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 6201 } else { 6202 base = I915_READ(DSPADDR(plane)); 6203 } 6204 plane_config->base = base; 6205 6206 val = I915_READ(PIPESRC(pipe)); 6207 crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1; 6208 crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; 6209 6210 val = I915_READ(DSPSTRIDE(pipe)); 6211 crtc->base.primary->fb->pitches[0] = val & 0xffffff80; 6212 6213 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, 6214 plane_config->tiled); 6215 6216 plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] * 6217 aligned_height); 6218 6219 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 6220 pipe, plane, crtc->base.primary->fb->width, 6221 crtc->base.primary->fb->height, 6222 crtc->base.primary->fb->bits_per_pixel, base, 6223 crtc->base.primary->fb->pitches[0], 6224 plane_config->size); 6225 6226 } 6227 6228 static void chv_crtc_clock_get(struct intel_crtc *crtc, 6229 struct intel_crtc_config *pipe_config) 6230 { 6231 struct drm_device *dev = crtc->base.dev; 6232 struct drm_i915_private *dev_priv = dev->dev_private; 6233 int pipe = pipe_config->cpu_transcoder; 6234 enum dpio_channel port = vlv_pipe_to_channel(pipe); 6235 intel_clock_t clock; 6236 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2; 6237 int refclk = 100000; 6238 6239 mutex_lock(&dev_priv->dpio_lock); 6240 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 6241 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 6242 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 6243 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 6244 mutex_unlock(&dev_priv->dpio_lock); 6245 6246 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 6247 clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff); 6248 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 6249 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 6250 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 6251 6252 chv_clock(refclk, &clock); 6253 6254 /* clock.dot is the fast clock */ 6255 pipe_config->port_clock = clock.dot / 5; 6256 } 6257 6258 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 6259 struct intel_crtc_config *pipe_config) 6260 { 6261 struct drm_device *dev = crtc->base.dev; 6262 struct drm_i915_private *dev_priv = dev->dev_private; 6263 uint32_t tmp; 6264 6265 if (!intel_display_power_enabled(dev_priv, 6266 POWER_DOMAIN_PIPE(crtc->pipe))) 6267 return false; 6268 6269 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 6270 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 6271 6272 tmp = I915_READ(PIPECONF(crtc->pipe)); 6273 if (!(tmp & PIPECONF_ENABLE)) 6274 return false; 6275 6276 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 6277 switch (tmp & PIPECONF_BPC_MASK) { 6278 case PIPECONF_6BPC: 6279 pipe_config->pipe_bpp = 18; 6280 break; 6281 case PIPECONF_8BPC: 6282 pipe_config->pipe_bpp = 24; 6283 break; 6284 case PIPECONF_10BPC: 6285 pipe_config->pipe_bpp = 30; 6286 break; 6287 default: 6288 break; 6289 } 6290 } 6291 6292 if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT)) 6293 pipe_config->limited_color_range = true; 6294 6295 if (INTEL_INFO(dev)->gen < 4) 6296 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 6297 6298 intel_get_pipe_timings(crtc, pipe_config); 6299 6300 i9xx_get_pfit_config(crtc, pipe_config); 6301 6302 if (INTEL_INFO(dev)->gen >= 4) { 6303 tmp = I915_READ(DPLL_MD(crtc->pipe)); 6304 pipe_config->pixel_multiplier = 6305 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 6306 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 6307 pipe_config->dpll_hw_state.dpll_md = tmp; 6308 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 6309 tmp = I915_READ(DPLL(crtc->pipe)); 6310 pipe_config->pixel_multiplier = 6311 ((tmp & SDVO_MULTIPLIER_MASK) 6312 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 6313 } else { 6314 /* Note that on i915G/GM the pixel multiplier is in the sdvo 6315 * port and will be fixed up in the encoder->get_config 6316 * function. */ 6317 pipe_config->pixel_multiplier = 1; 6318 } 6319 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 6320 if (!IS_VALLEYVIEW(dev)) { 6321 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 6322 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 6323 } else { 6324 /* Mask out read-only status bits. */ 6325 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 6326 DPLL_PORTC_READY_MASK | 6327 DPLL_PORTB_READY_MASK); 6328 } 6329 6330 if (IS_CHERRYVIEW(dev)) 6331 chv_crtc_clock_get(crtc, pipe_config); 6332 else if (IS_VALLEYVIEW(dev)) 6333 vlv_crtc_clock_get(crtc, pipe_config); 6334 else 6335 i9xx_crtc_clock_get(crtc, pipe_config); 6336 6337 return true; 6338 } 6339 6340 static void ironlake_init_pch_refclk(struct drm_device *dev) 6341 { 6342 struct drm_i915_private *dev_priv = dev->dev_private; 6343 struct drm_mode_config *mode_config = &dev->mode_config; 6344 struct intel_encoder *encoder; 6345 u32 val, final; 6346 bool has_lvds = false; 6347 bool has_cpu_edp = false; 6348 bool has_panel = false; 6349 bool has_ck505 = false; 6350 bool can_ssc = false; 6351 6352 /* We need to take the global config into account */ 6353 list_for_each_entry(encoder, &mode_config->encoder_list, 6354 base.head) { 6355 switch (encoder->type) { 6356 case INTEL_OUTPUT_LVDS: 6357 has_panel = true; 6358 has_lvds = true; 6359 break; 6360 case INTEL_OUTPUT_EDP: 6361 has_panel = true; 6362 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 6363 has_cpu_edp = true; 6364 break; 6365 } 6366 } 6367 6368 if (HAS_PCH_IBX(dev)) { 6369 has_ck505 = dev_priv->vbt.display_clock_mode; 6370 can_ssc = has_ck505; 6371 } else { 6372 has_ck505 = false; 6373 can_ssc = true; 6374 } 6375 6376 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", 6377 has_panel, has_lvds, has_ck505); 6378 6379 /* Ironlake: try to setup display ref clock before DPLL 6380 * enabling. This is only under driver's control after 6381 * PCH B stepping, previous chipset stepping should be 6382 * ignoring this setting. 6383 */ 6384 val = I915_READ(PCH_DREF_CONTROL); 6385 6386 /* As we must carefully and slowly disable/enable each source in turn, 6387 * compute the final state we want first and check if we need to 6388 * make any changes at all. 6389 */ 6390 final = val; 6391 final &= ~DREF_NONSPREAD_SOURCE_MASK; 6392 if (has_ck505) 6393 final |= DREF_NONSPREAD_CK505_ENABLE; 6394 else 6395 final |= DREF_NONSPREAD_SOURCE_ENABLE; 6396 6397 final &= ~DREF_SSC_SOURCE_MASK; 6398 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 6399 final &= ~DREF_SSC1_ENABLE; 6400 6401 if (has_panel) { 6402 final |= DREF_SSC_SOURCE_ENABLE; 6403 6404 if (intel_panel_use_ssc(dev_priv) && can_ssc) 6405 final |= DREF_SSC1_ENABLE; 6406 6407 if (has_cpu_edp) { 6408 if (intel_panel_use_ssc(dev_priv) && can_ssc) 6409 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 6410 else 6411 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 6412 } else 6413 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 6414 } else { 6415 final |= DREF_SSC_SOURCE_DISABLE; 6416 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 6417 } 6418 6419 if (final == val) 6420 return; 6421 6422 /* Always enable nonspread source */ 6423 val &= ~DREF_NONSPREAD_SOURCE_MASK; 6424 6425 if (has_ck505) 6426 val |= DREF_NONSPREAD_CK505_ENABLE; 6427 else 6428 val |= DREF_NONSPREAD_SOURCE_ENABLE; 6429 6430 if (has_panel) { 6431 val &= ~DREF_SSC_SOURCE_MASK; 6432 val |= DREF_SSC_SOURCE_ENABLE; 6433 6434 /* SSC must be turned on before enabling the CPU output */ 6435 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 6436 DRM_DEBUG_KMS("Using SSC on panel\n"); 6437 val |= DREF_SSC1_ENABLE; 6438 } else 6439 val &= ~DREF_SSC1_ENABLE; 6440 6441 /* Get SSC going before enabling the outputs */ 6442 I915_WRITE(PCH_DREF_CONTROL, val); 6443 POSTING_READ(PCH_DREF_CONTROL); 6444 udelay(200); 6445 6446 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 6447 6448 /* Enable CPU source on CPU attached eDP */ 6449 if (has_cpu_edp) { 6450 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 6451 DRM_DEBUG_KMS("Using SSC on eDP\n"); 6452 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 6453 } else 6454 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 6455 } else 6456 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 6457 6458 I915_WRITE(PCH_DREF_CONTROL, val); 6459 POSTING_READ(PCH_DREF_CONTROL); 6460 udelay(200); 6461 } else { 6462 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 6463 6464 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 6465 6466 /* Turn off CPU output */ 6467 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 6468 6469 I915_WRITE(PCH_DREF_CONTROL, val); 6470 POSTING_READ(PCH_DREF_CONTROL); 6471 udelay(200); 6472 6473 /* Turn off the SSC source */ 6474 val &= ~DREF_SSC_SOURCE_MASK; 6475 val |= DREF_SSC_SOURCE_DISABLE; 6476 6477 /* Turn off SSC1 */ 6478 val &= ~DREF_SSC1_ENABLE; 6479 6480 I915_WRITE(PCH_DREF_CONTROL, val); 6481 POSTING_READ(PCH_DREF_CONTROL); 6482 udelay(200); 6483 } 6484 6485 BUG_ON(val != final); 6486 } 6487 6488 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 6489 { 6490 uint32_t tmp; 6491 6492 tmp = I915_READ(SOUTH_CHICKEN2); 6493 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 6494 I915_WRITE(SOUTH_CHICKEN2, tmp); 6495 6496 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & 6497 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 6498 DRM_ERROR("FDI mPHY reset assert timeout\n"); 6499 6500 tmp = I915_READ(SOUTH_CHICKEN2); 6501 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 6502 I915_WRITE(SOUTH_CHICKEN2, tmp); 6503 6504 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & 6505 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 6506 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 6507 } 6508 6509 /* WaMPhyProgramming:hsw */ 6510 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 6511 { 6512 uint32_t tmp; 6513 6514 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 6515 tmp &= ~(0xFF << 24); 6516 tmp |= (0x12 << 24); 6517 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 6518 6519 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 6520 tmp |= (1 << 11); 6521 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 6522 6523 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 6524 tmp |= (1 << 11); 6525 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 6526 6527 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 6528 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 6529 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 6530 6531 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 6532 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 6533 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 6534 6535 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 6536 tmp &= ~(7 << 13); 6537 tmp |= (5 << 13); 6538 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 6539 6540 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 6541 tmp &= ~(7 << 13); 6542 tmp |= (5 << 13); 6543 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 6544 6545 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 6546 tmp &= ~0xFF; 6547 tmp |= 0x1C; 6548 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 6549 6550 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 6551 tmp &= ~0xFF; 6552 tmp |= 0x1C; 6553 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 6554 6555 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 6556 tmp &= ~(0xFF << 16); 6557 tmp |= (0x1C << 16); 6558 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 6559 6560 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 6561 tmp &= ~(0xFF << 16); 6562 tmp |= (0x1C << 16); 6563 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 6564 6565 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 6566 tmp |= (1 << 27); 6567 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 6568 6569 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 6570 tmp |= (1 << 27); 6571 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 6572 6573 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 6574 tmp &= ~(0xF << 28); 6575 tmp |= (4 << 28); 6576 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 6577 6578 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 6579 tmp &= ~(0xF << 28); 6580 tmp |= (4 << 28); 6581 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 6582 } 6583 6584 /* Implements 3 different sequences from BSpec chapter "Display iCLK 6585 * Programming" based on the parameters passed: 6586 * - Sequence to enable CLKOUT_DP 6587 * - Sequence to enable CLKOUT_DP without spread 6588 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 6589 */ 6590 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, 6591 bool with_fdi) 6592 { 6593 struct drm_i915_private *dev_priv = dev->dev_private; 6594 uint32_t reg, tmp; 6595 6596 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 6597 with_spread = true; 6598 if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE && 6599 with_fdi, "LP PCH doesn't have FDI\n")) 6600 with_fdi = false; 6601 6602 mutex_lock(&dev_priv->dpio_lock); 6603 6604 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 6605 tmp &= ~SBI_SSCCTL_DISABLE; 6606 tmp |= SBI_SSCCTL_PATHALT; 6607 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 6608 6609 udelay(24); 6610 6611 if (with_spread) { 6612 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 6613 tmp &= ~SBI_SSCCTL_PATHALT; 6614 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 6615 6616 if (with_fdi) { 6617 lpt_reset_fdi_mphy(dev_priv); 6618 lpt_program_fdi_mphy(dev_priv); 6619 } 6620 } 6621 6622 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? 6623 SBI_GEN0 : SBI_DBUFF0; 6624 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 6625 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 6626 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 6627 6628 mutex_unlock(&dev_priv->dpio_lock); 6629 } 6630 6631 /* Sequence to disable CLKOUT_DP */ 6632 static void lpt_disable_clkout_dp(struct drm_device *dev) 6633 { 6634 struct drm_i915_private *dev_priv = dev->dev_private; 6635 uint32_t reg, tmp; 6636 6637 mutex_lock(&dev_priv->dpio_lock); 6638 6639 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? 6640 SBI_GEN0 : SBI_DBUFF0; 6641 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 6642 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 6643 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 6644 6645 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 6646 if (!(tmp & SBI_SSCCTL_DISABLE)) { 6647 if (!(tmp & SBI_SSCCTL_PATHALT)) { 6648 tmp |= SBI_SSCCTL_PATHALT; 6649 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 6650 udelay(32); 6651 } 6652 tmp |= SBI_SSCCTL_DISABLE; 6653 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 6654 } 6655 6656 mutex_unlock(&dev_priv->dpio_lock); 6657 } 6658 6659 static void lpt_init_pch_refclk(struct drm_device *dev) 6660 { 6661 struct drm_mode_config *mode_config = &dev->mode_config; 6662 struct intel_encoder *encoder; 6663 bool has_vga = false; 6664 6665 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 6666 switch (encoder->type) { 6667 case INTEL_OUTPUT_ANALOG: 6668 has_vga = true; 6669 break; 6670 } 6671 } 6672 6673 if (has_vga) 6674 lpt_enable_clkout_dp(dev, true, true); 6675 else 6676 lpt_disable_clkout_dp(dev); 6677 } 6678 6679 /* 6680 * Initialize reference clocks when the driver loads 6681 */ 6682 void intel_init_pch_refclk(struct drm_device *dev) 6683 { 6684 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 6685 ironlake_init_pch_refclk(dev); 6686 else if (HAS_PCH_LPT(dev)) 6687 lpt_init_pch_refclk(dev); 6688 } 6689 6690 static int ironlake_get_refclk(struct drm_crtc *crtc) 6691 { 6692 struct drm_device *dev = crtc->dev; 6693 struct drm_i915_private *dev_priv = dev->dev_private; 6694 struct intel_encoder *encoder; 6695 int num_connectors = 0; 6696 bool is_lvds = false; 6697 6698 for_each_encoder_on_crtc(dev, crtc, encoder) { 6699 switch (encoder->type) { 6700 case INTEL_OUTPUT_LVDS: 6701 is_lvds = true; 6702 break; 6703 } 6704 num_connectors++; 6705 } 6706 6707 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 6708 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 6709 dev_priv->vbt.lvds_ssc_freq); 6710 return dev_priv->vbt.lvds_ssc_freq; 6711 } 6712 6713 return 120000; 6714 } 6715 6716 static void ironlake_set_pipeconf(struct drm_crtc *crtc) 6717 { 6718 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 6719 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6720 int pipe = intel_crtc->pipe; 6721 uint32_t val; 6722 6723 val = 0; 6724 6725 switch (intel_crtc->config.pipe_bpp) { 6726 case 18: 6727 val |= PIPECONF_6BPC; 6728 break; 6729 case 24: 6730 val |= PIPECONF_8BPC; 6731 break; 6732 case 30: 6733 val |= PIPECONF_10BPC; 6734 break; 6735 case 36: 6736 val |= PIPECONF_12BPC; 6737 break; 6738 default: 6739 /* Case prevented by intel_choose_pipe_bpp_dither. */ 6740 BUG(); 6741 } 6742 6743 if (intel_crtc->config.dither) 6744 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 6745 6746 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 6747 val |= PIPECONF_INTERLACED_ILK; 6748 else 6749 val |= PIPECONF_PROGRESSIVE; 6750 6751 if (intel_crtc->config.limited_color_range) 6752 val |= PIPECONF_COLOR_RANGE_SELECT; 6753 6754 I915_WRITE(PIPECONF(pipe), val); 6755 POSTING_READ(PIPECONF(pipe)); 6756 } 6757 6758 /* 6759 * Set up the pipe CSC unit. 6760 * 6761 * Currently only full range RGB to limited range RGB conversion 6762 * is supported, but eventually this should handle various 6763 * RGB<->YCbCr scenarios as well. 6764 */ 6765 static void intel_set_pipe_csc(struct drm_crtc *crtc) 6766 { 6767 struct drm_device *dev = crtc->dev; 6768 struct drm_i915_private *dev_priv = dev->dev_private; 6769 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6770 int pipe = intel_crtc->pipe; 6771 uint16_t coeff = 0x7800; /* 1.0 */ 6772 6773 /* 6774 * TODO: Check what kind of values actually come out of the pipe 6775 * with these coeff/postoff values and adjust to get the best 6776 * accuracy. Perhaps we even need to take the bpc value into 6777 * consideration. 6778 */ 6779 6780 if (intel_crtc->config.limited_color_range) 6781 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ 6782 6783 /* 6784 * GY/GU and RY/RU should be the other way around according 6785 * to BSpec, but reality doesn't agree. Just set them up in 6786 * a way that results in the correct picture. 6787 */ 6788 I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); 6789 I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); 6790 6791 I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); 6792 I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); 6793 6794 I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); 6795 I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); 6796 6797 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); 6798 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); 6799 I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); 6800 6801 if (INTEL_INFO(dev)->gen > 6) { 6802 uint16_t postoff = 0; 6803 6804 if (intel_crtc->config.limited_color_range) 6805 postoff = (16 * (1 << 12) / 255) & 0x1fff; 6806 6807 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 6808 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 6809 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); 6810 6811 I915_WRITE(PIPE_CSC_MODE(pipe), 0); 6812 } else { 6813 uint32_t mode = CSC_MODE_YUV_TO_RGB; 6814 6815 if (intel_crtc->config.limited_color_range) 6816 mode |= CSC_BLACK_SCREEN_OFFSET; 6817 6818 I915_WRITE(PIPE_CSC_MODE(pipe), mode); 6819 } 6820 } 6821 6822 static void haswell_set_pipeconf(struct drm_crtc *crtc) 6823 { 6824 struct drm_device *dev = crtc->dev; 6825 struct drm_i915_private *dev_priv = dev->dev_private; 6826 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6827 enum i915_pipe pipe = intel_crtc->pipe; 6828 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 6829 uint32_t val; 6830 6831 val = 0; 6832 6833 if (IS_HASWELL(dev) && intel_crtc->config.dither) 6834 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 6835 6836 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 6837 val |= PIPECONF_INTERLACED_ILK; 6838 else 6839 val |= PIPECONF_PROGRESSIVE; 6840 6841 I915_WRITE(PIPECONF(cpu_transcoder), val); 6842 POSTING_READ(PIPECONF(cpu_transcoder)); 6843 6844 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); 6845 POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); 6846 6847 if (IS_BROADWELL(dev)) { 6848 val = 0; 6849 6850 switch (intel_crtc->config.pipe_bpp) { 6851 case 18: 6852 val |= PIPEMISC_DITHER_6_BPC; 6853 break; 6854 case 24: 6855 val |= PIPEMISC_DITHER_8_BPC; 6856 break; 6857 case 30: 6858 val |= PIPEMISC_DITHER_10_BPC; 6859 break; 6860 case 36: 6861 val |= PIPEMISC_DITHER_12_BPC; 6862 break; 6863 default: 6864 /* Case prevented by pipe_config_set_bpp. */ 6865 BUG(); 6866 } 6867 6868 if (intel_crtc->config.dither) 6869 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 6870 6871 I915_WRITE(PIPEMISC(pipe), val); 6872 } 6873 } 6874 6875 static bool ironlake_compute_clocks(struct drm_crtc *crtc, 6876 intel_clock_t *clock, 6877 bool *has_reduced_clock, 6878 intel_clock_t *reduced_clock) 6879 { 6880 struct drm_device *dev = crtc->dev; 6881 struct drm_i915_private *dev_priv = dev->dev_private; 6882 struct intel_encoder *intel_encoder; 6883 int refclk; 6884 const intel_limit_t *limit; 6885 bool ret, is_lvds = false; 6886 6887 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 6888 switch (intel_encoder->type) { 6889 case INTEL_OUTPUT_LVDS: 6890 is_lvds = true; 6891 break; 6892 } 6893 } 6894 6895 refclk = ironlake_get_refclk(crtc); 6896 6897 /* 6898 * Returns a set of divisors for the desired target clock with the given 6899 * refclk, or FALSE. The returned values represent the clock equation: 6900 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 6901 */ 6902 limit = intel_limit(crtc, refclk); 6903 ret = dev_priv->display.find_dpll(limit, crtc, 6904 to_intel_crtc(crtc)->config.port_clock, 6905 refclk, NULL, clock); 6906 if (!ret) 6907 return false; 6908 6909 if (is_lvds && dev_priv->lvds_downclock_avail) { 6910 /* 6911 * Ensure we match the reduced clock's P to the target clock. 6912 * If the clocks don't match, we can't switch the display clock 6913 * by using the FP0/FP1. In such case we will disable the LVDS 6914 * downclock feature. 6915 */ 6916 *has_reduced_clock = 6917 dev_priv->display.find_dpll(limit, crtc, 6918 dev_priv->lvds_downclock, 6919 refclk, clock, 6920 reduced_clock); 6921 } 6922 6923 return true; 6924 } 6925 6926 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 6927 { 6928 /* 6929 * Account for spread spectrum to avoid 6930 * oversubscribing the link. Max center spread 6931 * is 2.5%; use 5% for safety's sake. 6932 */ 6933 u32 bps = target_clock * bpp * 21 / 20; 6934 return DIV_ROUND_UP(bps, link_bw * 8); 6935 } 6936 6937 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 6938 { 6939 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 6940 } 6941 6942 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, 6943 u32 *fp, 6944 intel_clock_t *reduced_clock, u32 *fp2) 6945 { 6946 struct drm_crtc *crtc = &intel_crtc->base; 6947 struct drm_device *dev = crtc->dev; 6948 struct drm_i915_private *dev_priv = dev->dev_private; 6949 struct intel_encoder *intel_encoder; 6950 uint32_t dpll; 6951 int factor, num_connectors = 0; 6952 bool is_lvds = false, is_sdvo = false; 6953 6954 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 6955 switch (intel_encoder->type) { 6956 case INTEL_OUTPUT_LVDS: 6957 is_lvds = true; 6958 break; 6959 case INTEL_OUTPUT_SDVO: 6960 case INTEL_OUTPUT_HDMI: 6961 is_sdvo = true; 6962 break; 6963 } 6964 6965 num_connectors++; 6966 } 6967 6968 /* Enable autotuning of the PLL clock (if permissible) */ 6969 factor = 21; 6970 if (is_lvds) { 6971 if ((intel_panel_use_ssc(dev_priv) && 6972 dev_priv->vbt.lvds_ssc_freq == 100000) || 6973 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 6974 factor = 25; 6975 } else if (intel_crtc->config.sdvo_tv_clock) 6976 factor = 20; 6977 6978 if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor)) 6979 *fp |= FP_CB_TUNE; 6980 6981 if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) 6982 *fp2 |= FP_CB_TUNE; 6983 6984 dpll = 0; 6985 6986 if (is_lvds) 6987 dpll |= DPLLB_MODE_LVDS; 6988 else 6989 dpll |= DPLLB_MODE_DAC_SERIAL; 6990 6991 dpll |= (intel_crtc->config.pixel_multiplier - 1) 6992 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 6993 6994 if (is_sdvo) 6995 dpll |= DPLL_SDVO_HIGH_SPEED; 6996 if (intel_crtc->config.has_dp_encoder) 6997 dpll |= DPLL_SDVO_HIGH_SPEED; 6998 6999 /* compute bitmask from p1 value */ 7000 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7001 /* also FPA1 */ 7002 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 7003 7004 switch (intel_crtc->config.dpll.p2) { 7005 case 5: 7006 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 7007 break; 7008 case 7: 7009 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 7010 break; 7011 case 10: 7012 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 7013 break; 7014 case 14: 7015 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 7016 break; 7017 } 7018 7019 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 7020 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7021 else 7022 dpll |= PLL_REF_INPUT_DREFCLK; 7023 7024 return dpll | DPLL_VCO_ENABLE; 7025 } 7026 7027 static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 7028 int x, int y, 7029 struct drm_framebuffer *fb) 7030 { 7031 struct drm_device *dev = crtc->dev; 7032 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7033 int num_connectors = 0; 7034 intel_clock_t clock, reduced_clock; 7035 u32 dpll = 0, fp = 0, fp2 = 0; 7036 bool ok, has_reduced_clock = false; 7037 bool is_lvds = false; 7038 struct intel_encoder *encoder; 7039 struct intel_shared_dpll *pll; 7040 7041 for_each_encoder_on_crtc(dev, crtc, encoder) { 7042 switch (encoder->type) { 7043 case INTEL_OUTPUT_LVDS: 7044 is_lvds = true; 7045 break; 7046 } 7047 7048 num_connectors++; 7049 } 7050 7051 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 7052 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 7053 7054 ok = ironlake_compute_clocks(crtc, &clock, 7055 &has_reduced_clock, &reduced_clock); 7056 if (!ok && !intel_crtc->config.clock_set) { 7057 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7058 return -EINVAL; 7059 } 7060 /* Compat-code for transition, will disappear. */ 7061 if (!intel_crtc->config.clock_set) { 7062 intel_crtc->config.dpll.n = clock.n; 7063 intel_crtc->config.dpll.m1 = clock.m1; 7064 intel_crtc->config.dpll.m2 = clock.m2; 7065 intel_crtc->config.dpll.p1 = clock.p1; 7066 intel_crtc->config.dpll.p2 = clock.p2; 7067 } 7068 7069 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 7070 if (intel_crtc->config.has_pch_encoder) { 7071 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); 7072 if (has_reduced_clock) 7073 fp2 = i9xx_dpll_compute_fp(&reduced_clock); 7074 7075 dpll = ironlake_compute_dpll(intel_crtc, 7076 &fp, &reduced_clock, 7077 has_reduced_clock ? &fp2 : NULL); 7078 7079 intel_crtc->config.dpll_hw_state.dpll = dpll; 7080 intel_crtc->config.dpll_hw_state.fp0 = fp; 7081 if (has_reduced_clock) 7082 intel_crtc->config.dpll_hw_state.fp1 = fp2; 7083 else 7084 intel_crtc->config.dpll_hw_state.fp1 = fp; 7085 7086 pll = intel_get_shared_dpll(intel_crtc); 7087 if (pll == NULL) { 7088 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 7089 pipe_name(intel_crtc->pipe)); 7090 return -EINVAL; 7091 } 7092 } else 7093 intel_put_shared_dpll(intel_crtc); 7094 7095 if (is_lvds && has_reduced_clock && i915.powersave) 7096 intel_crtc->lowfreq_avail = true; 7097 else 7098 intel_crtc->lowfreq_avail = false; 7099 7100 return 0; 7101 } 7102 7103 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 7104 struct intel_link_m_n *m_n) 7105 { 7106 struct drm_device *dev = crtc->base.dev; 7107 struct drm_i915_private *dev_priv = dev->dev_private; 7108 enum i915_pipe pipe = crtc->pipe; 7109 7110 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 7111 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 7112 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 7113 & ~TU_SIZE_MASK; 7114 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 7115 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 7116 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7117 } 7118 7119 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 7120 enum transcoder transcoder, 7121 struct intel_link_m_n *m_n) 7122 { 7123 struct drm_device *dev = crtc->base.dev; 7124 struct drm_i915_private *dev_priv = dev->dev_private; 7125 enum i915_pipe pipe = crtc->pipe; 7126 7127 if (INTEL_INFO(dev)->gen >= 5) { 7128 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 7129 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 7130 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 7131 & ~TU_SIZE_MASK; 7132 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 7133 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 7134 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7135 } else { 7136 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 7137 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 7138 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 7139 & ~TU_SIZE_MASK; 7140 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 7141 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 7142 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7143 } 7144 } 7145 7146 void intel_dp_get_m_n(struct intel_crtc *crtc, 7147 struct intel_crtc_config *pipe_config) 7148 { 7149 if (crtc->config.has_pch_encoder) 7150 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 7151 else 7152 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 7153 &pipe_config->dp_m_n); 7154 } 7155 7156 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 7157 struct intel_crtc_config *pipe_config) 7158 { 7159 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 7160 &pipe_config->fdi_m_n); 7161 } 7162 7163 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 7164 struct intel_crtc_config *pipe_config) 7165 { 7166 struct drm_device *dev = crtc->base.dev; 7167 struct drm_i915_private *dev_priv = dev->dev_private; 7168 uint32_t tmp; 7169 7170 tmp = I915_READ(PF_CTL(crtc->pipe)); 7171 7172 if (tmp & PF_ENABLE) { 7173 pipe_config->pch_pfit.enabled = true; 7174 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 7175 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 7176 7177 /* We currently do not free assignements of panel fitters on 7178 * ivb/hsw (since we don't use the higher upscaling modes which 7179 * differentiates them) so just WARN about this case for now. */ 7180 if (IS_GEN7(dev)) { 7181 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 7182 PF_PIPE_SEL_IVB(crtc->pipe)); 7183 } 7184 } 7185 } 7186 7187 static void ironlake_get_plane_config(struct intel_crtc *crtc, 7188 struct intel_plane_config *plane_config) 7189 { 7190 struct drm_device *dev = crtc->base.dev; 7191 struct drm_i915_private *dev_priv = dev->dev_private; 7192 u32 val, base, offset; 7193 int pipe = crtc->pipe, plane = crtc->plane; 7194 int fourcc, pixel_format; 7195 int aligned_height; 7196 7197 crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL); 7198 if (!crtc->base.primary->fb) { 7199 DRM_DEBUG_KMS("failed to alloc fb\n"); 7200 return; 7201 } 7202 7203 val = I915_READ(DSPCNTR(plane)); 7204 7205 if (INTEL_INFO(dev)->gen >= 4) 7206 if (val & DISPPLANE_TILED) 7207 plane_config->tiled = true; 7208 7209 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 7210 fourcc = intel_format_to_fourcc(pixel_format); 7211 crtc->base.primary->fb->pixel_format = fourcc; 7212 crtc->base.primary->fb->bits_per_pixel = 7213 drm_format_plane_cpp(fourcc, 0) * 8; 7214 7215 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 7216 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 7217 offset = I915_READ(DSPOFFSET(plane)); 7218 } else { 7219 if (plane_config->tiled) 7220 offset = I915_READ(DSPTILEOFF(plane)); 7221 else 7222 offset = I915_READ(DSPLINOFF(plane)); 7223 } 7224 plane_config->base = base; 7225 7226 val = I915_READ(PIPESRC(pipe)); 7227 crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1; 7228 crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; 7229 7230 val = I915_READ(DSPSTRIDE(pipe)); 7231 crtc->base.primary->fb->pitches[0] = val & 0xffffff80; 7232 7233 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, 7234 plane_config->tiled); 7235 7236 plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] * 7237 aligned_height); 7238 7239 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7240 pipe, plane, crtc->base.primary->fb->width, 7241 crtc->base.primary->fb->height, 7242 crtc->base.primary->fb->bits_per_pixel, base, 7243 crtc->base.primary->fb->pitches[0], 7244 plane_config->size); 7245 } 7246 7247 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 7248 struct intel_crtc_config *pipe_config) 7249 { 7250 struct drm_device *dev = crtc->base.dev; 7251 struct drm_i915_private *dev_priv = dev->dev_private; 7252 uint32_t tmp; 7253 7254 if (!intel_display_power_enabled(dev_priv, 7255 POWER_DOMAIN_PIPE(crtc->pipe))) 7256 return false; 7257 7258 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7259 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 7260 7261 tmp = I915_READ(PIPECONF(crtc->pipe)); 7262 if (!(tmp & PIPECONF_ENABLE)) 7263 return false; 7264 7265 switch (tmp & PIPECONF_BPC_MASK) { 7266 case PIPECONF_6BPC: 7267 pipe_config->pipe_bpp = 18; 7268 break; 7269 case PIPECONF_8BPC: 7270 pipe_config->pipe_bpp = 24; 7271 break; 7272 case PIPECONF_10BPC: 7273 pipe_config->pipe_bpp = 30; 7274 break; 7275 case PIPECONF_12BPC: 7276 pipe_config->pipe_bpp = 36; 7277 break; 7278 default: 7279 break; 7280 } 7281 7282 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 7283 pipe_config->limited_color_range = true; 7284 7285 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 7286 struct intel_shared_dpll *pll; 7287 7288 pipe_config->has_pch_encoder = true; 7289 7290 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 7291 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 7292 FDI_DP_PORT_WIDTH_SHIFT) + 1; 7293 7294 ironlake_get_fdi_m_n_config(crtc, pipe_config); 7295 7296 if (HAS_PCH_IBX(dev_priv->dev)) { 7297 pipe_config->shared_dpll = 7298 (enum intel_dpll_id) crtc->pipe; 7299 } else { 7300 tmp = I915_READ(PCH_DPLL_SEL); 7301 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 7302 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B; 7303 else 7304 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A; 7305 } 7306 7307 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 7308 7309 WARN_ON(!pll->get_hw_state(dev_priv, pll, 7310 &pipe_config->dpll_hw_state)); 7311 7312 tmp = pipe_config->dpll_hw_state.dpll; 7313 pipe_config->pixel_multiplier = 7314 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 7315 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 7316 7317 ironlake_pch_clock_get(crtc, pipe_config); 7318 } else { 7319 pipe_config->pixel_multiplier = 1; 7320 } 7321 7322 intel_get_pipe_timings(crtc, pipe_config); 7323 7324 ironlake_get_pfit_config(crtc, pipe_config); 7325 7326 return true; 7327 } 7328 7329 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 7330 { 7331 struct drm_device *dev = dev_priv->dev; 7332 struct intel_crtc *crtc; 7333 7334 for_each_intel_crtc(dev, crtc) 7335 WARN(crtc->active, "CRTC for pipe %c enabled\n", 7336 pipe_name(crtc->pipe)); 7337 7338 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 7339 WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 7340 WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 7341 WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 7342 WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 7343 WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 7344 "CPU PWM1 enabled\n"); 7345 if (IS_HASWELL(dev)) 7346 WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 7347 "CPU PWM2 enabled\n"); 7348 WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 7349 "PCH PWM1 enabled\n"); 7350 WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 7351 "Utility pin enabled\n"); 7352 WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 7353 7354 /* 7355 * In theory we can still leave IRQs enabled, as long as only the HPD 7356 * interrupts remain enabled. We used to check for that, but since it's 7357 * gen-specific and since we only disable LCPLL after we fully disable 7358 * the interrupts, the check below should be enough. 7359 */ 7360 WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 7361 } 7362 7363 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 7364 { 7365 struct drm_device *dev = dev_priv->dev; 7366 7367 if (IS_HASWELL(dev)) 7368 return I915_READ(D_COMP_HSW); 7369 else 7370 return I915_READ(D_COMP_BDW); 7371 } 7372 7373 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 7374 { 7375 struct drm_device *dev = dev_priv->dev; 7376 7377 if (IS_HASWELL(dev)) { 7378 mutex_lock(&dev_priv->rps.hw_lock); 7379 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 7380 val)) 7381 DRM_ERROR("Failed to write to D_COMP\n"); 7382 mutex_unlock(&dev_priv->rps.hw_lock); 7383 } else { 7384 I915_WRITE(D_COMP_BDW, val); 7385 POSTING_READ(D_COMP_BDW); 7386 } 7387 } 7388 7389 /* 7390 * This function implements pieces of two sequences from BSpec: 7391 * - Sequence for display software to disable LCPLL 7392 * - Sequence for display software to allow package C8+ 7393 * The steps implemented here are just the steps that actually touch the LCPLL 7394 * register. Callers should take care of disabling all the display engine 7395 * functions, doing the mode unset, fixing interrupts, etc. 7396 */ 7397 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 7398 bool switch_to_fclk, bool allow_power_down) 7399 { 7400 uint32_t val; 7401 7402 assert_can_disable_lcpll(dev_priv); 7403 7404 val = I915_READ(LCPLL_CTL); 7405 7406 if (switch_to_fclk) { 7407 val |= LCPLL_CD_SOURCE_FCLK; 7408 I915_WRITE(LCPLL_CTL, val); 7409 7410 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & 7411 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 7412 DRM_ERROR("Switching to FCLK failed\n"); 7413 7414 val = I915_READ(LCPLL_CTL); 7415 } 7416 7417 val |= LCPLL_PLL_DISABLE; 7418 I915_WRITE(LCPLL_CTL, val); 7419 POSTING_READ(LCPLL_CTL); 7420 7421 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) 7422 DRM_ERROR("LCPLL still locked\n"); 7423 7424 val = hsw_read_dcomp(dev_priv); 7425 val |= D_COMP_COMP_DISABLE; 7426 hsw_write_dcomp(dev_priv, val); 7427 ndelay(100); 7428 7429 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 7430 1)) 7431 DRM_ERROR("D_COMP RCOMP still in progress\n"); 7432 7433 if (allow_power_down) { 7434 val = I915_READ(LCPLL_CTL); 7435 val |= LCPLL_POWER_DOWN_ALLOW; 7436 I915_WRITE(LCPLL_CTL, val); 7437 POSTING_READ(LCPLL_CTL); 7438 } 7439 } 7440 7441 /* 7442 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 7443 * source. 7444 */ 7445 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 7446 { 7447 uint32_t val; 7448 7449 val = I915_READ(LCPLL_CTL); 7450 7451 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 7452 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 7453 return; 7454 7455 /* 7456 * Make sure we're not on PC8 state before disabling PC8, otherwise 7457 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 7458 * 7459 * The other problem is that hsw_restore_lcpll() is called as part of 7460 * the runtime PM resume sequence, so we can't just call 7461 * gen6_gt_force_wake_get() because that function calls 7462 * intel_runtime_pm_get(), and we can't change the runtime PM refcount 7463 * while we are on the resume sequence. So to solve this problem we have 7464 * to call special forcewake code that doesn't touch runtime PM and 7465 * doesn't enable the forcewake delayed work. 7466 */ 7467 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 7468 if (dev_priv->uncore.forcewake_count++ == 0) 7469 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); 7470 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 7471 7472 if (val & LCPLL_POWER_DOWN_ALLOW) { 7473 val &= ~LCPLL_POWER_DOWN_ALLOW; 7474 I915_WRITE(LCPLL_CTL, val); 7475 POSTING_READ(LCPLL_CTL); 7476 } 7477 7478 val = hsw_read_dcomp(dev_priv); 7479 val |= D_COMP_COMP_FORCE; 7480 val &= ~D_COMP_COMP_DISABLE; 7481 hsw_write_dcomp(dev_priv, val); 7482 7483 val = I915_READ(LCPLL_CTL); 7484 val &= ~LCPLL_PLL_DISABLE; 7485 I915_WRITE(LCPLL_CTL, val); 7486 7487 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) 7488 DRM_ERROR("LCPLL not locked yet\n"); 7489 7490 if (val & LCPLL_CD_SOURCE_FCLK) { 7491 val = I915_READ(LCPLL_CTL); 7492 val &= ~LCPLL_CD_SOURCE_FCLK; 7493 I915_WRITE(LCPLL_CTL, val); 7494 7495 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & 7496 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 7497 DRM_ERROR("Switching back to LCPLL failed\n"); 7498 } 7499 7500 /* See the big comment above. */ 7501 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 7502 if (--dev_priv->uncore.forcewake_count == 0) 7503 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); 7504 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 7505 } 7506 7507 /* 7508 * Package states C8 and deeper are really deep PC states that can only be 7509 * reached when all the devices on the system allow it, so even if the graphics 7510 * device allows PC8+, it doesn't mean the system will actually get to these 7511 * states. Our driver only allows PC8+ when going into runtime PM. 7512 * 7513 * The requirements for PC8+ are that all the outputs are disabled, the power 7514 * well is disabled and most interrupts are disabled, and these are also 7515 * requirements for runtime PM. When these conditions are met, we manually do 7516 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 7517 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 7518 * hang the machine. 7519 * 7520 * When we really reach PC8 or deeper states (not just when we allow it) we lose 7521 * the state of some registers, so when we come back from PC8+ we need to 7522 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 7523 * need to take care of the registers kept by RC6. Notice that this happens even 7524 * if we don't put the device in PCI D3 state (which is what currently happens 7525 * because of the runtime PM support). 7526 * 7527 * For more, read "Display Sequences for Package C8" on the hardware 7528 * documentation. 7529 */ 7530 void hsw_enable_pc8(struct drm_i915_private *dev_priv) 7531 { 7532 struct drm_device *dev = dev_priv->dev; 7533 uint32_t val; 7534 7535 DRM_DEBUG_KMS("Enabling package C8+\n"); 7536 7537 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 7538 val = I915_READ(SOUTH_DSPCLK_GATE_D); 7539 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 7540 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 7541 } 7542 7543 lpt_disable_clkout_dp(dev); 7544 hsw_disable_lcpll(dev_priv, true, true); 7545 } 7546 7547 void hsw_disable_pc8(struct drm_i915_private *dev_priv) 7548 { 7549 struct drm_device *dev = dev_priv->dev; 7550 uint32_t val; 7551 7552 DRM_DEBUG_KMS("Disabling package C8+\n"); 7553 7554 hsw_restore_lcpll(dev_priv); 7555 lpt_init_pch_refclk(dev); 7556 7557 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 7558 val = I915_READ(SOUTH_DSPCLK_GATE_D); 7559 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 7560 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 7561 } 7562 7563 intel_prepare_ddi(dev); 7564 } 7565 7566 static void snb_modeset_global_resources(struct drm_device *dev) 7567 { 7568 modeset_update_crtc_power_domains(dev); 7569 } 7570 7571 static void haswell_modeset_global_resources(struct drm_device *dev) 7572 { 7573 modeset_update_crtc_power_domains(dev); 7574 } 7575 7576 static int haswell_crtc_mode_set(struct drm_crtc *crtc, 7577 int x, int y, 7578 struct drm_framebuffer *fb) 7579 { 7580 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7581 7582 if (!intel_ddi_pll_select(intel_crtc)) 7583 return -EINVAL; 7584 7585 intel_crtc->lowfreq_avail = false; 7586 7587 return 0; 7588 } 7589 7590 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 7591 struct intel_crtc_config *pipe_config) 7592 { 7593 struct drm_device *dev = crtc->base.dev; 7594 struct drm_i915_private *dev_priv = dev->dev_private; 7595 struct intel_shared_dpll *pll; 7596 enum port port; 7597 uint32_t tmp; 7598 7599 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 7600 7601 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 7602 7603 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 7604 7605 switch (pipe_config->ddi_pll_sel) { 7606 case PORT_CLK_SEL_WRPLL1: 7607 pipe_config->shared_dpll = DPLL_ID_WRPLL1; 7608 break; 7609 case PORT_CLK_SEL_WRPLL2: 7610 pipe_config->shared_dpll = DPLL_ID_WRPLL2; 7611 break; 7612 } 7613 7614 if (pipe_config->shared_dpll >= 0) { 7615 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 7616 7617 WARN_ON(!pll->get_hw_state(dev_priv, pll, 7618 &pipe_config->dpll_hw_state)); 7619 } 7620 7621 /* 7622 * Haswell has only FDI/PCH transcoder A. It is which is connected to 7623 * DDI E. So just check whether this pipe is wired to DDI E and whether 7624 * the PCH transcoder is on. 7625 */ 7626 if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 7627 pipe_config->has_pch_encoder = true; 7628 7629 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 7630 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 7631 FDI_DP_PORT_WIDTH_SHIFT) + 1; 7632 7633 ironlake_get_fdi_m_n_config(crtc, pipe_config); 7634 } 7635 } 7636 7637 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 7638 struct intel_crtc_config *pipe_config) 7639 { 7640 struct drm_device *dev = crtc->base.dev; 7641 struct drm_i915_private *dev_priv = dev->dev_private; 7642 enum intel_display_power_domain pfit_domain; 7643 uint32_t tmp; 7644 7645 if (!intel_display_power_enabled(dev_priv, 7646 POWER_DOMAIN_PIPE(crtc->pipe))) 7647 return false; 7648 7649 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7650 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 7651 7652 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 7653 if (tmp & TRANS_DDI_FUNC_ENABLE) { 7654 enum i915_pipe trans_edp_pipe; 7655 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 7656 default: 7657 WARN(1, "unknown pipe linked to edp transcoder\n"); 7658 case TRANS_DDI_EDP_INPUT_A_ONOFF: 7659 case TRANS_DDI_EDP_INPUT_A_ON: 7660 trans_edp_pipe = PIPE_A; 7661 break; 7662 case TRANS_DDI_EDP_INPUT_B_ONOFF: 7663 trans_edp_pipe = PIPE_B; 7664 break; 7665 case TRANS_DDI_EDP_INPUT_C_ONOFF: 7666 trans_edp_pipe = PIPE_C; 7667 break; 7668 } 7669 7670 if (trans_edp_pipe == crtc->pipe) 7671 pipe_config->cpu_transcoder = TRANSCODER_EDP; 7672 } 7673 7674 if (!intel_display_power_enabled(dev_priv, 7675 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 7676 return false; 7677 7678 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 7679 if (!(tmp & PIPECONF_ENABLE)) 7680 return false; 7681 7682 haswell_get_ddi_port_state(crtc, pipe_config); 7683 7684 intel_get_pipe_timings(crtc, pipe_config); 7685 7686 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 7687 if (intel_display_power_enabled(dev_priv, pfit_domain)) 7688 ironlake_get_pfit_config(crtc, pipe_config); 7689 7690 if (IS_HASWELL(dev)) 7691 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 7692 (I915_READ(IPS_CTL) & IPS_ENABLE); 7693 7694 pipe_config->pixel_multiplier = 1; 7695 7696 return true; 7697 } 7698 7699 static struct { 7700 int clock; 7701 u32 config; 7702 } hdmi_audio_clock[] = { 7703 { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 }, 7704 { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */ 7705 { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 }, 7706 { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 }, 7707 { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 }, 7708 { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 }, 7709 { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 }, 7710 { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 }, 7711 { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 }, 7712 { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 }, 7713 }; 7714 7715 /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ 7716 static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode) 7717 { 7718 int i; 7719 7720 for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) { 7721 if (mode->clock == hdmi_audio_clock[i].clock) 7722 break; 7723 } 7724 7725 if (i == ARRAY_SIZE(hdmi_audio_clock)) { 7726 DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock); 7727 i = 1; 7728 } 7729 7730 DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n", 7731 hdmi_audio_clock[i].clock, 7732 hdmi_audio_clock[i].config); 7733 7734 return hdmi_audio_clock[i].config; 7735 } 7736 7737 static bool intel_eld_uptodate(struct drm_connector *connector, 7738 int reg_eldv, uint32_t bits_eldv, 7739 int reg_elda, uint32_t bits_elda, 7740 int reg_edid) 7741 { 7742 struct drm_i915_private *dev_priv = connector->dev->dev_private; 7743 uint8_t *eld = connector->eld; 7744 uint32_t i; 7745 7746 i = I915_READ(reg_eldv); 7747 i &= bits_eldv; 7748 7749 if (!eld[0]) 7750 return !i; 7751 7752 if (!i) 7753 return false; 7754 7755 i = I915_READ(reg_elda); 7756 i &= ~bits_elda; 7757 I915_WRITE(reg_elda, i); 7758 7759 for (i = 0; i < eld[2]; i++) 7760 if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) 7761 return false; 7762 7763 return true; 7764 } 7765 7766 static void g4x_write_eld(struct drm_connector *connector, 7767 struct drm_crtc *crtc, 7768 struct drm_display_mode *mode) 7769 { 7770 struct drm_i915_private *dev_priv = connector->dev->dev_private; 7771 uint8_t *eld = connector->eld; 7772 uint32_t eldv; 7773 uint32_t len; 7774 uint32_t i; 7775 7776 i = I915_READ(G4X_AUD_VID_DID); 7777 7778 if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) 7779 eldv = G4X_ELDV_DEVCL_DEVBLC; 7780 else 7781 eldv = G4X_ELDV_DEVCTG; 7782 7783 if (intel_eld_uptodate(connector, 7784 G4X_AUD_CNTL_ST, eldv, 7785 G4X_AUD_CNTL_ST, G4X_ELD_ADDR, 7786 G4X_HDMIW_HDMIEDID)) 7787 return; 7788 7789 i = I915_READ(G4X_AUD_CNTL_ST); 7790 i &= ~(eldv | G4X_ELD_ADDR); 7791 len = (i >> 9) & 0x1f; /* ELD buffer size */ 7792 I915_WRITE(G4X_AUD_CNTL_ST, i); 7793 7794 if (!eld[0]) 7795 return; 7796 7797 len = min_t(uint8_t, eld[2], len); 7798 DRM_DEBUG_DRIVER("ELD size %d\n", len); 7799 for (i = 0; i < len; i++) 7800 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); 7801 7802 i = I915_READ(G4X_AUD_CNTL_ST); 7803 i |= eldv; 7804 I915_WRITE(G4X_AUD_CNTL_ST, i); 7805 } 7806 7807 static void haswell_write_eld(struct drm_connector *connector, 7808 struct drm_crtc *crtc, 7809 struct drm_display_mode *mode) 7810 { 7811 struct drm_i915_private *dev_priv = connector->dev->dev_private; 7812 uint8_t *eld = connector->eld; 7813 uint32_t eldv; 7814 uint32_t i; 7815 int len; 7816 int pipe = to_intel_crtc(crtc)->pipe; 7817 int tmp; 7818 7819 int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe); 7820 int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe); 7821 int aud_config = HSW_AUD_CFG(pipe); 7822 int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD; 7823 7824 /* Audio output enable */ 7825 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n"); 7826 tmp = I915_READ(aud_cntrl_st2); 7827 tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4)); 7828 I915_WRITE(aud_cntrl_st2, tmp); 7829 POSTING_READ(aud_cntrl_st2); 7830 7831 assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe); 7832 7833 /* Set ELD valid state */ 7834 tmp = I915_READ(aud_cntrl_st2); 7835 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp); 7836 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); 7837 I915_WRITE(aud_cntrl_st2, tmp); 7838 tmp = I915_READ(aud_cntrl_st2); 7839 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp); 7840 7841 /* Enable HDMI mode */ 7842 tmp = I915_READ(aud_config); 7843 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp); 7844 /* clear N_programing_enable and N_value_index */ 7845 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); 7846 I915_WRITE(aud_config, tmp); 7847 7848 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); 7849 7850 eldv = AUDIO_ELD_VALID_A << (pipe * 4); 7851 7852 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 7853 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 7854 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 7855 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 7856 } else { 7857 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode)); 7858 } 7859 7860 if (intel_eld_uptodate(connector, 7861 aud_cntrl_st2, eldv, 7862 aud_cntl_st, IBX_ELD_ADDRESS, 7863 hdmiw_hdmiedid)) 7864 return; 7865 7866 i = I915_READ(aud_cntrl_st2); 7867 i &= ~eldv; 7868 I915_WRITE(aud_cntrl_st2, i); 7869 7870 if (!eld[0]) 7871 return; 7872 7873 i = I915_READ(aud_cntl_st); 7874 i &= ~IBX_ELD_ADDRESS; 7875 I915_WRITE(aud_cntl_st, i); 7876 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ 7877 DRM_DEBUG_DRIVER("port num:%d\n", i); 7878 7879 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ 7880 DRM_DEBUG_DRIVER("ELD size %d\n", len); 7881 for (i = 0; i < len; i++) 7882 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); 7883 7884 i = I915_READ(aud_cntrl_st2); 7885 i |= eldv; 7886 I915_WRITE(aud_cntrl_st2, i); 7887 7888 } 7889 7890 static void ironlake_write_eld(struct drm_connector *connector, 7891 struct drm_crtc *crtc, 7892 struct drm_display_mode *mode) 7893 { 7894 struct drm_i915_private *dev_priv = connector->dev->dev_private; 7895 uint8_t *eld = connector->eld; 7896 uint32_t eldv; 7897 uint32_t i; 7898 int len; 7899 int hdmiw_hdmiedid; 7900 int aud_config; 7901 int aud_cntl_st; 7902 int aud_cntrl_st2; 7903 int pipe = to_intel_crtc(crtc)->pipe; 7904 7905 if (HAS_PCH_IBX(connector->dev)) { 7906 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe); 7907 aud_config = IBX_AUD_CFG(pipe); 7908 aud_cntl_st = IBX_AUD_CNTL_ST(pipe); 7909 aud_cntrl_st2 = IBX_AUD_CNTL_ST2; 7910 } else if (IS_VALLEYVIEW(connector->dev)) { 7911 hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe); 7912 aud_config = VLV_AUD_CFG(pipe); 7913 aud_cntl_st = VLV_AUD_CNTL_ST(pipe); 7914 aud_cntrl_st2 = VLV_AUD_CNTL_ST2; 7915 } else { 7916 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); 7917 aud_config = CPT_AUD_CFG(pipe); 7918 aud_cntl_st = CPT_AUD_CNTL_ST(pipe); 7919 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; 7920 } 7921 7922 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); 7923 7924 if (IS_VALLEYVIEW(connector->dev)) { 7925 struct intel_encoder *intel_encoder; 7926 struct intel_digital_port *intel_dig_port; 7927 7928 intel_encoder = intel_attached_encoder(connector); 7929 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 7930 i = intel_dig_port->port; 7931 } else { 7932 i = I915_READ(aud_cntl_st); 7933 i = (i >> 29) & DIP_PORT_SEL_MASK; 7934 /* DIP_Port_Select, 0x1 = PortB */ 7935 } 7936 7937 if (!i) { 7938 DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); 7939 /* operate blindly on all ports */ 7940 eldv = IBX_ELD_VALIDB; 7941 eldv |= IBX_ELD_VALIDB << 4; 7942 eldv |= IBX_ELD_VALIDB << 8; 7943 } else { 7944 DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i)); 7945 eldv = IBX_ELD_VALIDB << ((i - 1) * 4); 7946 } 7947 7948 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 7949 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 7950 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 7951 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 7952 } else { 7953 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode)); 7954 } 7955 7956 if (intel_eld_uptodate(connector, 7957 aud_cntrl_st2, eldv, 7958 aud_cntl_st, IBX_ELD_ADDRESS, 7959 hdmiw_hdmiedid)) 7960 return; 7961 7962 i = I915_READ(aud_cntrl_st2); 7963 i &= ~eldv; 7964 I915_WRITE(aud_cntrl_st2, i); 7965 7966 if (!eld[0]) 7967 return; 7968 7969 i = I915_READ(aud_cntl_st); 7970 i &= ~IBX_ELD_ADDRESS; 7971 I915_WRITE(aud_cntl_st, i); 7972 7973 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ 7974 DRM_DEBUG_DRIVER("ELD size %d\n", len); 7975 for (i = 0; i < len; i++) 7976 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); 7977 7978 i = I915_READ(aud_cntrl_st2); 7979 i |= eldv; 7980 I915_WRITE(aud_cntrl_st2, i); 7981 } 7982 7983 void intel_write_eld(struct drm_encoder *encoder, 7984 struct drm_display_mode *mode) 7985 { 7986 struct drm_crtc *crtc = encoder->crtc; 7987 struct drm_connector *connector; 7988 struct drm_device *dev = encoder->dev; 7989 struct drm_i915_private *dev_priv = dev->dev_private; 7990 7991 connector = drm_select_eld(encoder, mode); 7992 if (!connector) 7993 return; 7994 7995 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 7996 connector->base.id, 7997 connector->name, 7998 connector->encoder->base.id, 7999 connector->encoder->name); 8000 8001 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; 8002 8003 if (dev_priv->display.write_eld) 8004 dev_priv->display.write_eld(connector, crtc, mode); 8005 } 8006 8007 static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 8008 { 8009 struct drm_device *dev = crtc->dev; 8010 struct drm_i915_private *dev_priv = dev->dev_private; 8011 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8012 uint32_t cntl; 8013 8014 if (base != intel_crtc->cursor_base) { 8015 /* On these chipsets we can only modify the base whilst 8016 * the cursor is disabled. 8017 */ 8018 if (intel_crtc->cursor_cntl) { 8019 I915_WRITE(_CURACNTR, 0); 8020 POSTING_READ(_CURACNTR); 8021 intel_crtc->cursor_cntl = 0; 8022 } 8023 8024 I915_WRITE(_CURABASE, base); 8025 POSTING_READ(_CURABASE); 8026 } 8027 8028 /* XXX width must be 64, stride 256 => 0x00 << 28 */ 8029 cntl = 0; 8030 if (base) 8031 cntl = (CURSOR_ENABLE | 8032 CURSOR_GAMMA_ENABLE | 8033 CURSOR_FORMAT_ARGB); 8034 if (intel_crtc->cursor_cntl != cntl) { 8035 I915_WRITE(_CURACNTR, cntl); 8036 POSTING_READ(_CURACNTR); 8037 intel_crtc->cursor_cntl = cntl; 8038 } 8039 } 8040 8041 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 8042 { 8043 struct drm_device *dev = crtc->dev; 8044 struct drm_i915_private *dev_priv = dev->dev_private; 8045 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8046 int pipe = intel_crtc->pipe; 8047 uint32_t cntl; 8048 8049 cntl = 0; 8050 if (base) { 8051 cntl = MCURSOR_GAMMA_ENABLE; 8052 switch (intel_crtc->cursor_width) { 8053 case 64: 8054 cntl |= CURSOR_MODE_64_ARGB_AX; 8055 break; 8056 case 128: 8057 cntl |= CURSOR_MODE_128_ARGB_AX; 8058 break; 8059 case 256: 8060 cntl |= CURSOR_MODE_256_ARGB_AX; 8061 break; 8062 default: 8063 WARN_ON(1); 8064 return; 8065 } 8066 cntl |= pipe << 28; /* Connect to correct pipe */ 8067 } 8068 if (intel_crtc->cursor_cntl != cntl) { 8069 I915_WRITE(CURCNTR(pipe), cntl); 8070 POSTING_READ(CURCNTR(pipe)); 8071 intel_crtc->cursor_cntl = cntl; 8072 } 8073 8074 /* and commit changes on next vblank */ 8075 I915_WRITE(CURBASE(pipe), base); 8076 POSTING_READ(CURBASE(pipe)); 8077 } 8078 8079 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) 8080 { 8081 struct drm_device *dev = crtc->dev; 8082 struct drm_i915_private *dev_priv = dev->dev_private; 8083 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8084 int pipe = intel_crtc->pipe; 8085 uint32_t cntl; 8086 8087 cntl = 0; 8088 if (base) { 8089 cntl = MCURSOR_GAMMA_ENABLE; 8090 switch (intel_crtc->cursor_width) { 8091 case 64: 8092 cntl |= CURSOR_MODE_64_ARGB_AX; 8093 break; 8094 case 128: 8095 cntl |= CURSOR_MODE_128_ARGB_AX; 8096 break; 8097 case 256: 8098 cntl |= CURSOR_MODE_256_ARGB_AX; 8099 break; 8100 default: 8101 WARN_ON(1); 8102 return; 8103 } 8104 } 8105 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 8106 cntl |= CURSOR_PIPE_CSC_ENABLE; 8107 8108 if (intel_crtc->cursor_cntl != cntl) { 8109 I915_WRITE(CURCNTR(pipe), cntl); 8110 POSTING_READ(CURCNTR(pipe)); 8111 intel_crtc->cursor_cntl = cntl; 8112 } 8113 8114 /* and commit changes on next vblank */ 8115 I915_WRITE(CURBASE(pipe), base); 8116 POSTING_READ(CURBASE(pipe)); 8117 } 8118 8119 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 8120 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 8121 bool on) 8122 { 8123 struct drm_device *dev = crtc->dev; 8124 struct drm_i915_private *dev_priv = dev->dev_private; 8125 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8126 int pipe = intel_crtc->pipe; 8127 int x = crtc->cursor_x; 8128 int y = crtc->cursor_y; 8129 u32 base = 0, pos = 0; 8130 8131 if (on) 8132 base = intel_crtc->cursor_addr; 8133 8134 if (x >= intel_crtc->config.pipe_src_w) 8135 base = 0; 8136 8137 if (y >= intel_crtc->config.pipe_src_h) 8138 base = 0; 8139 8140 if (x < 0) { 8141 if (x + intel_crtc->cursor_width <= 0) 8142 base = 0; 8143 8144 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 8145 x = -x; 8146 } 8147 pos |= x << CURSOR_X_SHIFT; 8148 8149 if (y < 0) { 8150 if (y + intel_crtc->cursor_height <= 0) 8151 base = 0; 8152 8153 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 8154 y = -y; 8155 } 8156 pos |= y << CURSOR_Y_SHIFT; 8157 8158 if (base == 0 && intel_crtc->cursor_base == 0) 8159 return; 8160 8161 I915_WRITE(CURPOS(pipe), pos); 8162 8163 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) 8164 ivb_update_cursor(crtc, base); 8165 else if (IS_845G(dev) || IS_I865G(dev)) 8166 i845_update_cursor(crtc, base); 8167 else 8168 i9xx_update_cursor(crtc, base); 8169 intel_crtc->cursor_base = base; 8170 } 8171 8172 /* 8173 * intel_crtc_cursor_set_obj - Set cursor to specified GEM object 8174 * 8175 * Note that the object's reference will be consumed if the update fails. If 8176 * the update succeeds, the reference of the old object (if any) will be 8177 * consumed. 8178 */ 8179 static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, 8180 struct drm_i915_gem_object *obj, 8181 uint32_t width, uint32_t height) 8182 { 8183 struct drm_device *dev = crtc->dev; 8184 struct drm_i915_private *dev_priv = dev->dev_private; 8185 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8186 enum i915_pipe pipe = intel_crtc->pipe; 8187 unsigned old_width; 8188 uint32_t addr; 8189 int ret; 8190 8191 /* if we want to turn off the cursor ignore width and height */ 8192 if (!obj) { 8193 DRM_DEBUG_KMS("cursor off\n"); 8194 addr = 0; 8195 obj = NULL; 8196 mutex_lock(&dev->struct_mutex); 8197 goto finish; 8198 } 8199 8200 /* Check for which cursor types we support */ 8201 if (!((width == 64 && height == 64) || 8202 (width == 128 && height == 128 && !IS_GEN2(dev)) || 8203 (width == 256 && height == 256 && !IS_GEN2(dev)))) { 8204 DRM_DEBUG("Cursor dimension not supported\n"); 8205 return -EINVAL; 8206 } 8207 8208 if (obj->base.size < width * height * 4) { 8209 DRM_DEBUG_KMS("buffer is too small\n"); 8210 ret = -ENOMEM; 8211 goto fail; 8212 } 8213 8214 /* we only need to pin inside GTT if cursor is non-phy */ 8215 mutex_lock(&dev->struct_mutex); 8216 if (!INTEL_INFO(dev)->cursor_needs_physical) { 8217 unsigned alignment; 8218 8219 if (obj->tiling_mode) { 8220 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 8221 ret = -EINVAL; 8222 goto fail_locked; 8223 } 8224 8225 /* 8226 * Global gtt pte registers are special registers which actually 8227 * forward writes to a chunk of system memory. Which means that 8228 * there is no risk that the register values disappear as soon 8229 * as we call intel_runtime_pm_put(), so it is correct to wrap 8230 * only the pin/unpin/fence and not more. 8231 */ 8232 intel_runtime_pm_get(dev_priv); 8233 8234 /* Note that the w/a also requires 2 PTE of padding following 8235 * the bo. We currently fill all unused PTE with the shadow 8236 * page and so we should always have valid PTE following the 8237 * cursor preventing the VT-d warning. 8238 */ 8239 alignment = 0; 8240 if (need_vtd_wa(dev)) 8241 alignment = 64*1024; 8242 8243 ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL); 8244 if (ret) { 8245 DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n"); 8246 intel_runtime_pm_put(dev_priv); 8247 goto fail_locked; 8248 } 8249 8250 ret = i915_gem_object_put_fence(obj); 8251 if (ret) { 8252 DRM_DEBUG_KMS("failed to release fence for cursor"); 8253 intel_runtime_pm_put(dev_priv); 8254 goto fail_unpin; 8255 } 8256 8257 addr = i915_gem_obj_ggtt_offset(obj); 8258 8259 intel_runtime_pm_put(dev_priv); 8260 } else { 8261 int align = IS_I830(dev) ? 16 * 1024 : 256; 8262 ret = i915_gem_object_attach_phys(obj, align); 8263 if (ret) { 8264 DRM_DEBUG_KMS("failed to attach phys object\n"); 8265 goto fail_locked; 8266 } 8267 addr = obj->phys_handle->busaddr; 8268 } 8269 8270 if (IS_GEN2(dev)) 8271 I915_WRITE(CURSIZE, (height << 12) | width); 8272 8273 finish: 8274 if (intel_crtc->cursor_bo) { 8275 if (!INTEL_INFO(dev)->cursor_needs_physical) 8276 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); 8277 } 8278 8279 i915_gem_track_fb(intel_crtc->cursor_bo, obj, 8280 INTEL_FRONTBUFFER_CURSOR(pipe)); 8281 mutex_unlock(&dev->struct_mutex); 8282 8283 old_width = intel_crtc->cursor_width; 8284 8285 intel_crtc->cursor_addr = addr; 8286 intel_crtc->cursor_bo = obj; 8287 intel_crtc->cursor_width = width; 8288 intel_crtc->cursor_height = height; 8289 8290 if (intel_crtc->active) { 8291 if (old_width != width) 8292 intel_update_watermarks(crtc); 8293 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 8294 } 8295 8296 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe)); 8297 8298 return 0; 8299 fail_unpin: 8300 i915_gem_object_unpin_from_display_plane(obj); 8301 fail_locked: 8302 mutex_unlock(&dev->struct_mutex); 8303 fail: 8304 drm_gem_object_unreference_unlocked(&obj->base); 8305 return ret; 8306 } 8307 8308 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 8309 u16 *blue, uint32_t start, uint32_t size) 8310 { 8311 int end = (start + size > 256) ? 256 : start + size, i; 8312 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8313 8314 for (i = start; i < end; i++) { 8315 intel_crtc->lut_r[i] = red[i] >> 8; 8316 intel_crtc->lut_g[i] = green[i] >> 8; 8317 intel_crtc->lut_b[i] = blue[i] >> 8; 8318 } 8319 8320 intel_crtc_load_lut(crtc); 8321 } 8322 8323 /* VESA 640x480x72Hz mode to set on the pipe */ 8324 static struct drm_display_mode load_detect_mode = { 8325 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 8326 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 8327 }; 8328 8329 struct drm_framebuffer * 8330 __intel_framebuffer_create(struct drm_device *dev, 8331 struct drm_mode_fb_cmd2 *mode_cmd, 8332 struct drm_i915_gem_object *obj) 8333 { 8334 struct intel_framebuffer *intel_fb; 8335 int ret; 8336 8337 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8338 if (!intel_fb) { 8339 drm_gem_object_unreference_unlocked(&obj->base); 8340 return ERR_PTR(-ENOMEM); 8341 } 8342 8343 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 8344 if (ret) 8345 goto err; 8346 8347 return &intel_fb->base; 8348 err: 8349 drm_gem_object_unreference_unlocked(&obj->base); 8350 kfree(intel_fb); 8351 8352 return ERR_PTR(ret); 8353 } 8354 8355 static struct drm_framebuffer * 8356 intel_framebuffer_create(struct drm_device *dev, 8357 struct drm_mode_fb_cmd2 *mode_cmd, 8358 struct drm_i915_gem_object *obj) 8359 { 8360 struct drm_framebuffer *fb; 8361 int ret; 8362 8363 ret = i915_mutex_lock_interruptible(dev); 8364 if (ret) 8365 return ERR_PTR(ret); 8366 fb = __intel_framebuffer_create(dev, mode_cmd, obj); 8367 mutex_unlock(&dev->struct_mutex); 8368 8369 return fb; 8370 } 8371 8372 static u32 8373 intel_framebuffer_pitch_for_width(int width, int bpp) 8374 { 8375 u32 pitch = DIV_ROUND_UP(width * bpp, 8); 8376 return ALIGN(pitch, 64); 8377 } 8378 8379 static u32 8380 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 8381 { 8382 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 8383 return PAGE_ALIGN(pitch * mode->vdisplay); 8384 } 8385 8386 static struct drm_framebuffer * 8387 intel_framebuffer_create_for_mode(struct drm_device *dev, 8388 struct drm_display_mode *mode, 8389 int depth, int bpp) 8390 { 8391 struct drm_i915_gem_object *obj; 8392 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 8393 8394 obj = i915_gem_alloc_object(dev, 8395 intel_framebuffer_size_for_mode(mode, bpp)); 8396 if (obj == NULL) 8397 return ERR_PTR(-ENOMEM); 8398 8399 mode_cmd.width = mode->hdisplay; 8400 mode_cmd.height = mode->vdisplay; 8401 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 8402 bpp); 8403 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 8404 8405 return intel_framebuffer_create(dev, &mode_cmd, obj); 8406 } 8407 8408 static struct drm_framebuffer * 8409 mode_fits_in_fbdev(struct drm_device *dev, 8410 struct drm_display_mode *mode) 8411 { 8412 #ifdef CONFIG_DRM_I915_FBDEV 8413 struct drm_i915_private *dev_priv = dev->dev_private; 8414 struct drm_i915_gem_object *obj; 8415 struct drm_framebuffer *fb; 8416 8417 if (!dev_priv->fbdev) 8418 return NULL; 8419 8420 if (!dev_priv->fbdev->fb) 8421 return NULL; 8422 8423 obj = dev_priv->fbdev->fb->obj; 8424 BUG_ON(!obj); 8425 8426 fb = &dev_priv->fbdev->fb->base; 8427 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 8428 fb->bits_per_pixel)) 8429 return NULL; 8430 8431 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 8432 return NULL; 8433 8434 return fb; 8435 #else 8436 return NULL; 8437 #endif 8438 } 8439 8440 bool intel_get_load_detect_pipe(struct drm_connector *connector, 8441 struct drm_display_mode *mode, 8442 struct intel_load_detect_pipe *old, 8443 struct drm_modeset_acquire_ctx *ctx) 8444 { 8445 struct intel_crtc *intel_crtc; 8446 struct intel_encoder *intel_encoder = 8447 intel_attached_encoder(connector); 8448 struct drm_crtc *possible_crtc; 8449 struct drm_encoder *encoder = &intel_encoder->base; 8450 struct drm_crtc *crtc = NULL; 8451 struct drm_device *dev = encoder->dev; 8452 struct drm_framebuffer *fb; 8453 struct drm_mode_config *config = &dev->mode_config; 8454 int ret, i = -1; 8455 8456 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 8457 connector->base.id, connector->name, 8458 encoder->base.id, encoder->name); 8459 8460 retry: 8461 ret = drm_modeset_lock(&config->connection_mutex, ctx); 8462 if (ret) 8463 goto fail_unlock; 8464 8465 /* 8466 * Algorithm gets a little messy: 8467 * 8468 * - if the connector already has an assigned crtc, use it (but make 8469 * sure it's on first) 8470 * 8471 * - try to find the first unused crtc that can drive this connector, 8472 * and use that if we find one 8473 */ 8474 8475 /* See if we already have a CRTC for this connector */ 8476 if (encoder->crtc) { 8477 crtc = encoder->crtc; 8478 8479 ret = drm_modeset_lock(&crtc->mutex, ctx); 8480 if (ret) 8481 goto fail_unlock; 8482 8483 old->dpms_mode = connector->dpms; 8484 old->load_detect_temp = false; 8485 8486 /* Make sure the crtc and connector are running */ 8487 if (connector->dpms != DRM_MODE_DPMS_ON) 8488 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); 8489 8490 return true; 8491 } 8492 8493 /* Find an unused one (if possible) */ 8494 for_each_crtc(dev, possible_crtc) { 8495 i++; 8496 if (!(encoder->possible_crtcs & (1 << i))) 8497 continue; 8498 if (possible_crtc->enabled) 8499 continue; 8500 /* This can occur when applying the pipe A quirk on resume. */ 8501 if (to_intel_crtc(possible_crtc)->new_enabled) 8502 continue; 8503 8504 crtc = possible_crtc; 8505 break; 8506 } 8507 8508 /* 8509 * If we didn't find an unused CRTC, don't use any. 8510 */ 8511 if (!crtc) { 8512 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 8513 goto fail_unlock; 8514 } 8515 8516 ret = drm_modeset_lock(&crtc->mutex, ctx); 8517 if (ret) 8518 goto fail_unlock; 8519 intel_encoder->new_crtc = to_intel_crtc(crtc); 8520 to_intel_connector(connector)->new_encoder = intel_encoder; 8521 8522 intel_crtc = to_intel_crtc(crtc); 8523 intel_crtc->new_enabled = true; 8524 intel_crtc->new_config = &intel_crtc->config; 8525 old->dpms_mode = connector->dpms; 8526 old->load_detect_temp = true; 8527 old->release_fb = NULL; 8528 8529 if (!mode) 8530 mode = &load_detect_mode; 8531 8532 /* We need a framebuffer large enough to accommodate all accesses 8533 * that the plane may generate whilst we perform load detection. 8534 * We can not rely on the fbcon either being present (we get called 8535 * during its initialisation to detect all boot displays, or it may 8536 * not even exist) or that it is large enough to satisfy the 8537 * requested mode. 8538 */ 8539 fb = mode_fits_in_fbdev(dev, mode); 8540 if (fb == NULL) { 8541 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 8542 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 8543 old->release_fb = fb; 8544 } else 8545 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 8546 if (IS_ERR(fb)) { 8547 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 8548 goto fail; 8549 } 8550 8551 if (intel_set_mode(crtc, mode, 0, 0, fb)) { 8552 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 8553 if (old->release_fb) 8554 old->release_fb->funcs->destroy(old->release_fb); 8555 goto fail; 8556 } 8557 8558 /* let the connector get through one full cycle before testing */ 8559 intel_wait_for_vblank(dev, intel_crtc->pipe); 8560 return true; 8561 8562 fail: 8563 intel_crtc->new_enabled = crtc->enabled; 8564 if (intel_crtc->new_enabled) 8565 intel_crtc->new_config = &intel_crtc->config; 8566 else 8567 intel_crtc->new_config = NULL; 8568 fail_unlock: 8569 if (ret == -EDEADLK) { 8570 drm_modeset_backoff(ctx); 8571 goto retry; 8572 } 8573 8574 return false; 8575 } 8576 8577 void intel_release_load_detect_pipe(struct drm_connector *connector, 8578 struct intel_load_detect_pipe *old) 8579 { 8580 struct intel_encoder *intel_encoder = 8581 intel_attached_encoder(connector); 8582 struct drm_encoder *encoder = &intel_encoder->base; 8583 struct drm_crtc *crtc = encoder->crtc; 8584 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8585 8586 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 8587 connector->base.id, connector->name, 8588 encoder->base.id, encoder->name); 8589 8590 if (old->load_detect_temp) { 8591 to_intel_connector(connector)->new_encoder = NULL; 8592 intel_encoder->new_crtc = NULL; 8593 intel_crtc->new_enabled = false; 8594 intel_crtc->new_config = NULL; 8595 intel_set_mode(crtc, NULL, 0, 0, NULL); 8596 8597 if (old->release_fb) { 8598 drm_framebuffer_unregister_private(old->release_fb); 8599 drm_framebuffer_unreference(old->release_fb); 8600 } 8601 8602 return; 8603 } 8604 8605 /* Switch crtc and encoder back off if necessary */ 8606 if (old->dpms_mode != DRM_MODE_DPMS_ON) 8607 connector->funcs->dpms(connector, old->dpms_mode); 8608 } 8609 8610 static int i9xx_pll_refclk(struct drm_device *dev, 8611 const struct intel_crtc_config *pipe_config) 8612 { 8613 struct drm_i915_private *dev_priv = dev->dev_private; 8614 u32 dpll = pipe_config->dpll_hw_state.dpll; 8615 8616 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 8617 return dev_priv->vbt.lvds_ssc_freq; 8618 else if (HAS_PCH_SPLIT(dev)) 8619 return 120000; 8620 else if (!IS_GEN2(dev)) 8621 return 96000; 8622 else 8623 return 48000; 8624 } 8625 8626 /* Returns the clock of the currently programmed mode of the given pipe. */ 8627 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 8628 struct intel_crtc_config *pipe_config) 8629 { 8630 struct drm_device *dev = crtc->base.dev; 8631 struct drm_i915_private *dev_priv = dev->dev_private; 8632 int pipe = pipe_config->cpu_transcoder; 8633 u32 dpll = pipe_config->dpll_hw_state.dpll; 8634 u32 fp; 8635 intel_clock_t clock; 8636 int refclk = i9xx_pll_refclk(dev, pipe_config); 8637 8638 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 8639 fp = pipe_config->dpll_hw_state.fp0; 8640 else 8641 fp = pipe_config->dpll_hw_state.fp1; 8642 8643 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 8644 if (IS_PINEVIEW(dev)) { 8645 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 8646 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 8647 } else { 8648 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 8649 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 8650 } 8651 8652 if (!IS_GEN2(dev)) { 8653 if (IS_PINEVIEW(dev)) 8654 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 8655 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 8656 else 8657 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 8658 DPLL_FPA01_P1_POST_DIV_SHIFT); 8659 8660 switch (dpll & DPLL_MODE_MASK) { 8661 case DPLLB_MODE_DAC_SERIAL: 8662 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 8663 5 : 10; 8664 break; 8665 case DPLLB_MODE_LVDS: 8666 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 8667 7 : 14; 8668 break; 8669 default: 8670 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 8671 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 8672 return; 8673 } 8674 8675 if (IS_PINEVIEW(dev)) 8676 pineview_clock(refclk, &clock); 8677 else 8678 i9xx_clock(refclk, &clock); 8679 } else { 8680 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); 8681 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 8682 8683 if (is_lvds) { 8684 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 8685 DPLL_FPA01_P1_POST_DIV_SHIFT); 8686 8687 if (lvds & LVDS_CLKB_POWER_UP) 8688 clock.p2 = 7; 8689 else 8690 clock.p2 = 14; 8691 } else { 8692 if (dpll & PLL_P1_DIVIDE_BY_TWO) 8693 clock.p1 = 2; 8694 else { 8695 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 8696 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 8697 } 8698 if (dpll & PLL_P2_DIVIDE_BY_4) 8699 clock.p2 = 4; 8700 else 8701 clock.p2 = 2; 8702 } 8703 8704 i9xx_clock(refclk, &clock); 8705 } 8706 8707 /* 8708 * This value includes pixel_multiplier. We will use 8709 * port_clock to compute adjusted_mode.crtc_clock in the 8710 * encoder's get_config() function. 8711 */ 8712 pipe_config->port_clock = clock.dot; 8713 } 8714 8715 int intel_dotclock_calculate(int link_freq, 8716 const struct intel_link_m_n *m_n) 8717 { 8718 /* 8719 * The calculation for the data clock is: 8720 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 8721 * But we want to avoid losing precison if possible, so: 8722 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 8723 * 8724 * and the link clock is simpler: 8725 * link_clock = (m * link_clock) / n 8726 */ 8727 8728 if (!m_n->link_n) 8729 return 0; 8730 8731 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); 8732 } 8733 8734 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 8735 struct intel_crtc_config *pipe_config) 8736 { 8737 struct drm_device *dev = crtc->base.dev; 8738 8739 /* read out port_clock from the DPLL */ 8740 i9xx_crtc_clock_get(crtc, pipe_config); 8741 8742 /* 8743 * This value does not include pixel_multiplier. 8744 * We will check that port_clock and adjusted_mode.crtc_clock 8745 * agree once we know their relationship in the encoder's 8746 * get_config() function. 8747 */ 8748 pipe_config->adjusted_mode.crtc_clock = 8749 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000, 8750 &pipe_config->fdi_m_n); 8751 } 8752 8753 /** Returns the currently programmed mode of the given pipe. */ 8754 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 8755 struct drm_crtc *crtc) 8756 { 8757 struct drm_i915_private *dev_priv = dev->dev_private; 8758 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8759 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 8760 struct drm_display_mode *mode; 8761 struct intel_crtc_config pipe_config; 8762 int htot = I915_READ(HTOTAL(cpu_transcoder)); 8763 int hsync = I915_READ(HSYNC(cpu_transcoder)); 8764 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 8765 int vsync = I915_READ(VSYNC(cpu_transcoder)); 8766 enum i915_pipe pipe = intel_crtc->pipe; 8767 8768 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 8769 if (!mode) 8770 return NULL; 8771 8772 /* 8773 * Construct a pipe_config sufficient for getting the clock info 8774 * back out of crtc_clock_get. 8775 * 8776 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 8777 * to use a real value here instead. 8778 */ 8779 pipe_config.cpu_transcoder = (enum transcoder) pipe; 8780 pipe_config.pixel_multiplier = 1; 8781 pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe)); 8782 pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe)); 8783 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe)); 8784 i9xx_crtc_clock_get(intel_crtc, &pipe_config); 8785 8786 mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier; 8787 mode->hdisplay = (htot & 0xffff) + 1; 8788 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 8789 mode->hsync_start = (hsync & 0xffff) + 1; 8790 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 8791 mode->vdisplay = (vtot & 0xffff) + 1; 8792 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 8793 mode->vsync_start = (vsync & 0xffff) + 1; 8794 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 8795 8796 drm_mode_set_name(mode); 8797 8798 return mode; 8799 } 8800 8801 static void intel_increase_pllclock(struct drm_device *dev, 8802 enum i915_pipe pipe) 8803 { 8804 struct drm_i915_private *dev_priv = dev->dev_private; 8805 int dpll_reg = DPLL(pipe); 8806 int dpll; 8807 8808 if (!HAS_GMCH_DISPLAY(dev)) 8809 return; 8810 8811 if (!dev_priv->lvds_downclock_avail) 8812 return; 8813 8814 dpll = I915_READ(dpll_reg); 8815 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { 8816 DRM_DEBUG_DRIVER("upclocking LVDS\n"); 8817 8818 assert_panel_unlocked(dev_priv, pipe); 8819 8820 dpll &= ~DISPLAY_RATE_SELECT_FPA1; 8821 I915_WRITE(dpll_reg, dpll); 8822 intel_wait_for_vblank(dev, pipe); 8823 8824 dpll = I915_READ(dpll_reg); 8825 if (dpll & DISPLAY_RATE_SELECT_FPA1) 8826 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); 8827 } 8828 } 8829 8830 static void intel_decrease_pllclock(struct drm_crtc *crtc) 8831 { 8832 struct drm_device *dev = crtc->dev; 8833 struct drm_i915_private *dev_priv = dev->dev_private; 8834 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8835 8836 if (!HAS_GMCH_DISPLAY(dev)) 8837 return; 8838 8839 if (!dev_priv->lvds_downclock_avail) 8840 return; 8841 8842 /* 8843 * Since this is called by a timer, we should never get here in 8844 * the manual case. 8845 */ 8846 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 8847 int pipe = intel_crtc->pipe; 8848 int dpll_reg = DPLL(pipe); 8849 int dpll; 8850 8851 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 8852 8853 assert_panel_unlocked(dev_priv, pipe); 8854 8855 dpll = I915_READ(dpll_reg); 8856 dpll |= DISPLAY_RATE_SELECT_FPA1; 8857 I915_WRITE(dpll_reg, dpll); 8858 intel_wait_for_vblank(dev, pipe); 8859 dpll = I915_READ(dpll_reg); 8860 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 8861 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 8862 } 8863 8864 } 8865 8866 void intel_mark_busy(struct drm_device *dev) 8867 { 8868 struct drm_i915_private *dev_priv = dev->dev_private; 8869 8870 if (dev_priv->mm.busy) 8871 return; 8872 8873 intel_runtime_pm_get(dev_priv); 8874 i915_update_gfx_val(dev_priv); 8875 dev_priv->mm.busy = true; 8876 } 8877 8878 void intel_mark_idle(struct drm_device *dev) 8879 { 8880 struct drm_i915_private *dev_priv = dev->dev_private; 8881 struct drm_crtc *crtc; 8882 8883 if (!dev_priv->mm.busy) 8884 return; 8885 8886 dev_priv->mm.busy = false; 8887 8888 if (!i915.powersave) 8889 goto out; 8890 8891 for_each_crtc(dev, crtc) { 8892 if (!crtc->primary->fb) 8893 continue; 8894 8895 intel_decrease_pllclock(crtc); 8896 } 8897 8898 if (INTEL_INFO(dev)->gen >= 6) 8899 gen6_rps_idle(dev->dev_private); 8900 8901 out: 8902 intel_runtime_pm_put(dev_priv); 8903 } 8904 8905 8906 /** 8907 * intel_mark_fb_busy - mark given planes as busy 8908 * @dev: DRM device 8909 * @frontbuffer_bits: bits for the affected planes 8910 * @ring: optional ring for asynchronous commands 8911 * 8912 * This function gets called every time the screen contents change. It can be 8913 * used to keep e.g. the update rate at the nominal refresh rate with DRRS. 8914 */ 8915 static void intel_mark_fb_busy(struct drm_device *dev, 8916 unsigned frontbuffer_bits, 8917 struct intel_engine_cs *ring) 8918 { 8919 enum i915_pipe pipe; 8920 8921 if (!i915.powersave) 8922 return; 8923 8924 for_each_pipe(pipe) { 8925 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe))) 8926 continue; 8927 8928 intel_increase_pllclock(dev, pipe); 8929 if (ring && intel_fbc_enabled(dev)) 8930 ring->fbc_dirty = true; 8931 } 8932 } 8933 8934 /** 8935 * intel_fb_obj_invalidate - invalidate frontbuffer object 8936 * @obj: GEM object to invalidate 8937 * @ring: set for asynchronous rendering 8938 * 8939 * This function gets called every time rendering on the given object starts and 8940 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must 8941 * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed 8942 * until the rendering completes or a flip on this frontbuffer plane is 8943 * scheduled. 8944 */ 8945 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, 8946 struct intel_engine_cs *ring) 8947 { 8948 struct drm_device *dev = obj->base.dev; 8949 struct drm_i915_private *dev_priv = dev->dev_private; 8950 8951 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 8952 8953 if (!obj->frontbuffer_bits) 8954 return; 8955 8956 if (ring) { 8957 mutex_lock(&dev_priv->fb_tracking.lock); 8958 dev_priv->fb_tracking.busy_bits 8959 |= obj->frontbuffer_bits; 8960 dev_priv->fb_tracking.flip_bits 8961 &= ~obj->frontbuffer_bits; 8962 mutex_unlock(&dev_priv->fb_tracking.lock); 8963 } 8964 8965 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); 8966 8967 intel_edp_psr_invalidate(dev, obj->frontbuffer_bits); 8968 } 8969 8970 /** 8971 * intel_frontbuffer_flush - flush frontbuffer 8972 * @dev: DRM device 8973 * @frontbuffer_bits: frontbuffer plane tracking bits 8974 * 8975 * This function gets called every time rendering on the given planes has 8976 * completed and frontbuffer caching can be started again. Flushes will get 8977 * delayed if they're blocked by some oustanding asynchronous rendering. 8978 * 8979 * Can be called without any locks held. 8980 */ 8981 void intel_frontbuffer_flush(struct drm_device *dev, 8982 unsigned frontbuffer_bits) 8983 { 8984 struct drm_i915_private *dev_priv = dev->dev_private; 8985 8986 /* Delay flushing when rings are still busy.*/ 8987 mutex_lock(&dev_priv->fb_tracking.lock); 8988 frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits; 8989 mutex_unlock(&dev_priv->fb_tracking.lock); 8990 8991 intel_mark_fb_busy(dev, frontbuffer_bits, NULL); 8992 8993 intel_edp_psr_flush(dev, frontbuffer_bits); 8994 } 8995 8996 /** 8997 * intel_fb_obj_flush - flush frontbuffer object 8998 * @obj: GEM object to flush 8999 * @retire: set when retiring asynchronous rendering 9000 * 9001 * This function gets called every time rendering on the given object has 9002 * completed and frontbuffer caching can be started again. If @retire is true 9003 * then any delayed flushes will be unblocked. 9004 */ 9005 void intel_fb_obj_flush(struct drm_i915_gem_object *obj, 9006 bool retire) 9007 { 9008 struct drm_device *dev = obj->base.dev; 9009 struct drm_i915_private *dev_priv = dev->dev_private; 9010 unsigned frontbuffer_bits; 9011 9012 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 9013 9014 if (!obj->frontbuffer_bits) 9015 return; 9016 9017 frontbuffer_bits = obj->frontbuffer_bits; 9018 9019 if (retire) { 9020 mutex_lock(&dev_priv->fb_tracking.lock); 9021 /* Filter out new bits since rendering started. */ 9022 frontbuffer_bits &= dev_priv->fb_tracking.busy_bits; 9023 9024 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; 9025 mutex_unlock(&dev_priv->fb_tracking.lock); 9026 } 9027 9028 intel_frontbuffer_flush(dev, frontbuffer_bits); 9029 } 9030 9031 /** 9032 * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip 9033 * @dev: DRM device 9034 * @frontbuffer_bits: frontbuffer plane tracking bits 9035 * 9036 * This function gets called after scheduling a flip on @obj. The actual 9037 * frontbuffer flushing will be delayed until completion is signalled with 9038 * intel_frontbuffer_flip_complete. If an invalidate happens in between this 9039 * flush will be cancelled. 9040 * 9041 * Can be called without any locks held. 9042 */ 9043 void intel_frontbuffer_flip_prepare(struct drm_device *dev, 9044 unsigned frontbuffer_bits) 9045 { 9046 struct drm_i915_private *dev_priv = dev->dev_private; 9047 9048 mutex_lock(&dev_priv->fb_tracking.lock); 9049 dev_priv->fb_tracking.flip_bits 9050 |= frontbuffer_bits; 9051 mutex_unlock(&dev_priv->fb_tracking.lock); 9052 } 9053 9054 /** 9055 * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush 9056 * @dev: DRM device 9057 * @frontbuffer_bits: frontbuffer plane tracking bits 9058 * 9059 * This function gets called after the flip has been latched and will complete 9060 * on the next vblank. It will execute the fush if it hasn't been cancalled yet. 9061 * 9062 * Can be called without any locks held. 9063 */ 9064 void intel_frontbuffer_flip_complete(struct drm_device *dev, 9065 unsigned frontbuffer_bits) 9066 { 9067 struct drm_i915_private *dev_priv = dev->dev_private; 9068 9069 mutex_lock(&dev_priv->fb_tracking.lock); 9070 /* Mask any cancelled flips. */ 9071 frontbuffer_bits &= dev_priv->fb_tracking.flip_bits; 9072 dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; 9073 mutex_unlock(&dev_priv->fb_tracking.lock); 9074 9075 intel_frontbuffer_flush(dev, frontbuffer_bits); 9076 } 9077 9078 static void intel_crtc_destroy(struct drm_crtc *crtc) 9079 { 9080 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9081 struct drm_device *dev = crtc->dev; 9082 struct intel_unpin_work *work; 9083 9084 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9085 work = intel_crtc->unpin_work; 9086 intel_crtc->unpin_work = NULL; 9087 lockmgr(&dev->event_lock, LK_RELEASE); 9088 9089 if (work) { 9090 cancel_work_sync(&work->work); 9091 kfree(work); 9092 } 9093 9094 drm_crtc_cleanup(crtc); 9095 9096 kfree(intel_crtc); 9097 } 9098 9099 static void intel_unpin_work_fn(struct work_struct *__work) 9100 { 9101 struct intel_unpin_work *work = 9102 container_of(__work, struct intel_unpin_work, work); 9103 struct drm_device *dev = work->crtc->dev; 9104 enum i915_pipe pipe = to_intel_crtc(work->crtc)->pipe; 9105 9106 mutex_lock(&dev->struct_mutex); 9107 intel_unpin_fb_obj(work->old_fb_obj); 9108 drm_gem_object_unreference(&work->pending_flip_obj->base); 9109 drm_gem_object_unreference(&work->old_fb_obj->base); 9110 9111 intel_update_fbc(dev); 9112 mutex_unlock(&dev->struct_mutex); 9113 9114 intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 9115 9116 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); 9117 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); 9118 9119 kfree(work); 9120 } 9121 9122 static void do_intel_finish_page_flip(struct drm_device *dev, 9123 struct drm_crtc *crtc) 9124 { 9125 struct drm_i915_private *dev_priv = dev->dev_private; 9126 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9127 struct intel_unpin_work *work; 9128 9129 /* Ignore early vblank irqs */ 9130 if (intel_crtc == NULL) 9131 return; 9132 9133 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9134 work = intel_crtc->unpin_work; 9135 9136 /* Ensure we don't miss a work->pending update ... */ 9137 smp_rmb(); 9138 9139 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 9140 lockmgr(&dev->event_lock, LK_RELEASE); 9141 return; 9142 } 9143 9144 /* and that the unpin work is consistent wrt ->pending. */ 9145 smp_rmb(); 9146 9147 intel_crtc->unpin_work = NULL; 9148 9149 if (work->event) 9150 drm_send_vblank_event(dev, intel_crtc->pipe, work->event); 9151 9152 drm_crtc_vblank_put(crtc); 9153 9154 lockmgr(&dev->event_lock, LK_RELEASE); 9155 9156 wake_up_all(&dev_priv->pending_flip_queue); 9157 9158 queue_work(dev_priv->wq, &work->work); 9159 9160 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); 9161 } 9162 9163 void intel_finish_page_flip(struct drm_device *dev, int pipe) 9164 { 9165 struct drm_i915_private *dev_priv = dev->dev_private; 9166 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 9167 9168 do_intel_finish_page_flip(dev, crtc); 9169 } 9170 9171 void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 9172 { 9173 struct drm_i915_private *dev_priv = dev->dev_private; 9174 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 9175 9176 do_intel_finish_page_flip(dev, crtc); 9177 } 9178 9179 /* Is 'a' after or equal to 'b'? */ 9180 static bool g4x_flip_count_after_eq(u32 a, u32 b) 9181 { 9182 return !((a - b) & 0x80000000); 9183 } 9184 9185 static bool page_flip_finished(struct intel_crtc *crtc) 9186 { 9187 struct drm_device *dev = crtc->base.dev; 9188 struct drm_i915_private *dev_priv = dev->dev_private; 9189 9190 if (i915_reset_in_progress(&dev_priv->gpu_error) || 9191 crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 9192 return true; 9193 9194 /* 9195 * The relevant registers doen't exist on pre-ctg. 9196 * As the flip done interrupt doesn't trigger for mmio 9197 * flips on gmch platforms, a flip count check isn't 9198 * really needed there. But since ctg has the registers, 9199 * include it in the check anyway. 9200 */ 9201 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev)) 9202 return true; 9203 9204 /* 9205 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips 9206 * used the same base address. In that case the mmio flip might 9207 * have completed, but the CS hasn't even executed the flip yet. 9208 * 9209 * A flip count check isn't enough as the CS might have updated 9210 * the base address just after start of vblank, but before we 9211 * managed to process the interrupt. This means we'd complete the 9212 * CS flip too soon. 9213 * 9214 * Combining both checks should get us a good enough result. It may 9215 * still happen that the CS flip has been executed, but has not 9216 * yet actually completed. But in case the base address is the same 9217 * anyway, we don't really care. 9218 */ 9219 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 9220 crtc->unpin_work->gtt_offset && 9221 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)), 9222 crtc->unpin_work->flip_count); 9223 } 9224 9225 void intel_prepare_page_flip(struct drm_device *dev, int plane) 9226 { 9227 struct drm_i915_private *dev_priv = dev->dev_private; 9228 struct intel_crtc *intel_crtc = 9229 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 9230 9231 /* NB: An MMIO update of the plane base pointer will also 9232 * generate a page-flip completion irq, i.e. every modeset 9233 * is also accompanied by a spurious intel_prepare_page_flip(). 9234 */ 9235 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9236 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) 9237 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 9238 lockmgr(&dev->event_lock, LK_RELEASE); 9239 } 9240 9241 static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) 9242 { 9243 /* Ensure that the work item is consistent when activating it ... */ 9244 smp_wmb(); 9245 atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); 9246 /* and that it is marked active as soon as the irq could fire. */ 9247 smp_wmb(); 9248 } 9249 9250 static int intel_gen2_queue_flip(struct drm_device *dev, 9251 struct drm_crtc *crtc, 9252 struct drm_framebuffer *fb, 9253 struct drm_i915_gem_object *obj, 9254 struct intel_engine_cs *ring, 9255 uint32_t flags) 9256 { 9257 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9258 u32 flip_mask; 9259 int ret; 9260 9261 ret = intel_ring_begin(ring, 6); 9262 if (ret) 9263 return ret; 9264 9265 /* Can't queue multiple flips, so wait for the previous 9266 * one to finish before executing the next. 9267 */ 9268 if (intel_crtc->plane) 9269 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 9270 else 9271 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 9272 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 9273 intel_ring_emit(ring, MI_NOOP); 9274 intel_ring_emit(ring, MI_DISPLAY_FLIP | 9275 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9276 intel_ring_emit(ring, fb->pitches[0]); 9277 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9278 intel_ring_emit(ring, 0); /* aux display base address, unused */ 9279 9280 intel_mark_page_flip_active(intel_crtc); 9281 __intel_ring_advance(ring); 9282 return 0; 9283 } 9284 9285 static int intel_gen3_queue_flip(struct drm_device *dev, 9286 struct drm_crtc *crtc, 9287 struct drm_framebuffer *fb, 9288 struct drm_i915_gem_object *obj, 9289 struct intel_engine_cs *ring, 9290 uint32_t flags) 9291 { 9292 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9293 u32 flip_mask; 9294 int ret; 9295 9296 ret = intel_ring_begin(ring, 6); 9297 if (ret) 9298 return ret; 9299 9300 if (intel_crtc->plane) 9301 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 9302 else 9303 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 9304 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 9305 intel_ring_emit(ring, MI_NOOP); 9306 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 9307 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9308 intel_ring_emit(ring, fb->pitches[0]); 9309 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9310 intel_ring_emit(ring, MI_NOOP); 9311 9312 intel_mark_page_flip_active(intel_crtc); 9313 __intel_ring_advance(ring); 9314 return 0; 9315 } 9316 9317 static int intel_gen4_queue_flip(struct drm_device *dev, 9318 struct drm_crtc *crtc, 9319 struct drm_framebuffer *fb, 9320 struct drm_i915_gem_object *obj, 9321 struct intel_engine_cs *ring, 9322 uint32_t flags) 9323 { 9324 struct drm_i915_private *dev_priv = dev->dev_private; 9325 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9326 uint32_t pf, pipesrc; 9327 int ret; 9328 9329 ret = intel_ring_begin(ring, 4); 9330 if (ret) 9331 return ret; 9332 9333 /* i965+ uses the linear or tiled offsets from the 9334 * Display Registers (which do not change across a page-flip) 9335 * so we need only reprogram the base address. 9336 */ 9337 intel_ring_emit(ring, MI_DISPLAY_FLIP | 9338 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9339 intel_ring_emit(ring, fb->pitches[0]); 9340 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset | 9341 obj->tiling_mode); 9342 9343 /* XXX Enabling the panel-fitter across page-flip is so far 9344 * untested on non-native modes, so ignore it for now. 9345 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 9346 */ 9347 pf = 0; 9348 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 9349 intel_ring_emit(ring, pf | pipesrc); 9350 9351 intel_mark_page_flip_active(intel_crtc); 9352 __intel_ring_advance(ring); 9353 return 0; 9354 } 9355 9356 static int intel_gen6_queue_flip(struct drm_device *dev, 9357 struct drm_crtc *crtc, 9358 struct drm_framebuffer *fb, 9359 struct drm_i915_gem_object *obj, 9360 struct intel_engine_cs *ring, 9361 uint32_t flags) 9362 { 9363 struct drm_i915_private *dev_priv = dev->dev_private; 9364 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9365 uint32_t pf, pipesrc; 9366 int ret; 9367 9368 ret = intel_ring_begin(ring, 4); 9369 if (ret) 9370 return ret; 9371 9372 intel_ring_emit(ring, MI_DISPLAY_FLIP | 9373 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9374 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 9375 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9376 9377 /* Contrary to the suggestions in the documentation, 9378 * "Enable Panel Fitter" does not seem to be required when page 9379 * flipping with a non-native mode, and worse causes a normal 9380 * modeset to fail. 9381 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 9382 */ 9383 pf = 0; 9384 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 9385 intel_ring_emit(ring, pf | pipesrc); 9386 9387 intel_mark_page_flip_active(intel_crtc); 9388 __intel_ring_advance(ring); 9389 return 0; 9390 } 9391 9392 static int intel_gen7_queue_flip(struct drm_device *dev, 9393 struct drm_crtc *crtc, 9394 struct drm_framebuffer *fb, 9395 struct drm_i915_gem_object *obj, 9396 struct intel_engine_cs *ring, 9397 uint32_t flags) 9398 { 9399 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9400 uint32_t plane_bit = 0; 9401 int len, ret; 9402 9403 switch (intel_crtc->plane) { 9404 case PLANE_A: 9405 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 9406 break; 9407 case PLANE_B: 9408 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 9409 break; 9410 case PLANE_C: 9411 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 9412 break; 9413 default: 9414 WARN_ONCE(1, "unknown plane in flip command\n"); 9415 return -ENODEV; 9416 } 9417 9418 len = 4; 9419 if (ring->id == RCS) { 9420 len += 6; 9421 /* 9422 * On Gen 8, SRM is now taking an extra dword to accommodate 9423 * 48bits addresses, and we need a NOOP for the batch size to 9424 * stay even. 9425 */ 9426 if (IS_GEN8(dev)) 9427 len += 2; 9428 } 9429 9430 /* 9431 * BSpec MI_DISPLAY_FLIP for IVB: 9432 * "The full packet must be contained within the same cache line." 9433 * 9434 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 9435 * cacheline, if we ever start emitting more commands before 9436 * the MI_DISPLAY_FLIP we may need to first emit everything else, 9437 * then do the cacheline alignment, and finally emit the 9438 * MI_DISPLAY_FLIP. 9439 */ 9440 ret = intel_ring_cacheline_align(ring); 9441 if (ret) 9442 return ret; 9443 9444 ret = intel_ring_begin(ring, len); 9445 if (ret) 9446 return ret; 9447 9448 /* Unmask the flip-done completion message. Note that the bspec says that 9449 * we should do this for both the BCS and RCS, and that we must not unmask 9450 * more than one flip event at any time (or ensure that one flip message 9451 * can be sent by waiting for flip-done prior to queueing new flips). 9452 * Experimentation says that BCS works despite DERRMR masking all 9453 * flip-done completion events and that unmasking all planes at once 9454 * for the RCS also doesn't appear to drop events. Setting the DERRMR 9455 * to zero does lead to lockups within MI_DISPLAY_FLIP. 9456 */ 9457 if (ring->id == RCS) { 9458 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 9459 intel_ring_emit(ring, DERRMR); 9460 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 9461 DERRMR_PIPEB_PRI_FLIP_DONE | 9462 DERRMR_PIPEC_PRI_FLIP_DONE)); 9463 if (IS_GEN8(dev)) 9464 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) | 9465 MI_SRM_LRM_GLOBAL_GTT); 9466 else 9467 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | 9468 MI_SRM_LRM_GLOBAL_GTT); 9469 intel_ring_emit(ring, DERRMR); 9470 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 9471 if (IS_GEN8(dev)) { 9472 intel_ring_emit(ring, 0); 9473 intel_ring_emit(ring, MI_NOOP); 9474 } 9475 } 9476 9477 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 9478 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 9479 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9480 intel_ring_emit(ring, (MI_NOOP)); 9481 9482 intel_mark_page_flip_active(intel_crtc); 9483 __intel_ring_advance(ring); 9484 return 0; 9485 } 9486 9487 static bool use_mmio_flip(struct intel_engine_cs *ring, 9488 struct drm_i915_gem_object *obj) 9489 { 9490 /* 9491 * This is not being used for older platforms, because 9492 * non-availability of flip done interrupt forces us to use 9493 * CS flips. Older platforms derive flip done using some clever 9494 * tricks involving the flip_pending status bits and vblank irqs. 9495 * So using MMIO flips there would disrupt this mechanism. 9496 */ 9497 9498 if (ring == NULL) 9499 return true; 9500 9501 if (INTEL_INFO(ring->dev)->gen < 5) 9502 return false; 9503 9504 if (i915.use_mmio_flip < 0) 9505 return false; 9506 else if (i915.use_mmio_flip > 0) 9507 return true; 9508 else 9509 return ring != obj->ring; 9510 } 9511 9512 static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) 9513 { 9514 struct drm_device *dev = intel_crtc->base.dev; 9515 struct drm_i915_private *dev_priv = dev->dev_private; 9516 struct intel_framebuffer *intel_fb = 9517 to_intel_framebuffer(intel_crtc->base.primary->fb); 9518 struct drm_i915_gem_object *obj = intel_fb->obj; 9519 u32 dspcntr; 9520 u32 reg; 9521 9522 intel_mark_page_flip_active(intel_crtc); 9523 9524 reg = DSPCNTR(intel_crtc->plane); 9525 dspcntr = I915_READ(reg); 9526 9527 if (INTEL_INFO(dev)->gen >= 4) { 9528 if (obj->tiling_mode != I915_TILING_NONE) 9529 dspcntr |= DISPPLANE_TILED; 9530 else 9531 dspcntr &= ~DISPPLANE_TILED; 9532 } 9533 I915_WRITE(reg, dspcntr); 9534 9535 I915_WRITE(DSPSURF(intel_crtc->plane), 9536 intel_crtc->unpin_work->gtt_offset); 9537 POSTING_READ(DSPSURF(intel_crtc->plane)); 9538 } 9539 9540 static int intel_postpone_flip(struct drm_i915_gem_object *obj) 9541 { 9542 struct intel_engine_cs *ring; 9543 int ret; 9544 9545 #if 0 9546 lockdep_assert_held(&obj->base.dev->struct_mutex); 9547 #endif 9548 9549 if (!obj->last_write_seqno) 9550 return 0; 9551 9552 ring = obj->ring; 9553 9554 if (i915_seqno_passed(ring->get_seqno(ring, true), 9555 obj->last_write_seqno)) 9556 return 0; 9557 9558 ret = i915_gem_check_olr(ring, obj->last_write_seqno); 9559 if (ret) 9560 return ret; 9561 9562 if (WARN_ON(!ring->irq_get(ring))) 9563 return 0; 9564 9565 return 1; 9566 } 9567 9568 void intel_notify_mmio_flip(struct intel_engine_cs *ring) 9569 { 9570 struct drm_i915_private *dev_priv = to_i915(ring->dev); 9571 struct intel_crtc *intel_crtc; 9572 u32 seqno; 9573 9574 seqno = ring->get_seqno(ring, false); 9575 9576 spin_lock(&dev_priv->mmio_flip_lock); 9577 for_each_intel_crtc(ring->dev, intel_crtc) { 9578 struct intel_mmio_flip *mmio_flip; 9579 9580 mmio_flip = &intel_crtc->mmio_flip; 9581 if (mmio_flip->seqno == 0) 9582 continue; 9583 9584 if (ring->id != mmio_flip->ring_id) 9585 continue; 9586 9587 if (i915_seqno_passed(seqno, mmio_flip->seqno)) { 9588 intel_do_mmio_flip(intel_crtc); 9589 mmio_flip->seqno = 0; 9590 ring->irq_put(ring); 9591 } 9592 } 9593 spin_unlock(&dev_priv->mmio_flip_lock); 9594 } 9595 9596 static int intel_queue_mmio_flip(struct drm_device *dev, 9597 struct drm_crtc *crtc, 9598 struct drm_framebuffer *fb, 9599 struct drm_i915_gem_object *obj, 9600 struct intel_engine_cs *ring, 9601 uint32_t flags) 9602 { 9603 struct drm_i915_private *dev_priv = dev->dev_private; 9604 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9605 int ret; 9606 9607 if (WARN_ON(intel_crtc->mmio_flip.seqno)) 9608 return -EBUSY; 9609 9610 ret = intel_postpone_flip(obj); 9611 if (ret < 0) 9612 return ret; 9613 if (ret == 0) { 9614 intel_do_mmio_flip(intel_crtc); 9615 return 0; 9616 } 9617 9618 spin_lock(&dev_priv->mmio_flip_lock); 9619 intel_crtc->mmio_flip.seqno = obj->last_write_seqno; 9620 intel_crtc->mmio_flip.ring_id = obj->ring->id; 9621 spin_unlock(&dev_priv->mmio_flip_lock); 9622 9623 /* 9624 * Double check to catch cases where irq fired before 9625 * mmio flip data was ready 9626 */ 9627 intel_notify_mmio_flip(obj->ring); 9628 return 0; 9629 } 9630 9631 static int intel_default_queue_flip(struct drm_device *dev, 9632 struct drm_crtc *crtc, 9633 struct drm_framebuffer *fb, 9634 struct drm_i915_gem_object *obj, 9635 struct intel_engine_cs *ring, 9636 uint32_t flags) 9637 { 9638 return -ENODEV; 9639 } 9640 9641 static int intel_crtc_page_flip(struct drm_crtc *crtc, 9642 struct drm_framebuffer *fb, 9643 struct drm_pending_vblank_event *event, 9644 uint32_t page_flip_flags) 9645 { 9646 struct drm_device *dev = crtc->dev; 9647 struct drm_i915_private *dev_priv = dev->dev_private; 9648 struct drm_framebuffer *old_fb = crtc->primary->fb; 9649 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 9650 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9651 enum i915_pipe pipe = intel_crtc->pipe; 9652 struct intel_unpin_work *work; 9653 struct intel_engine_cs *ring; 9654 int ret; 9655 9656 /* 9657 * drm_mode_page_flip_ioctl() should already catch this, but double 9658 * check to be safe. In the future we may enable pageflipping from 9659 * a disabled primary plane. 9660 */ 9661 if (WARN_ON(intel_fb_obj(old_fb) == NULL)) 9662 return -EBUSY; 9663 9664 /* Can't change pixel format via MI display flips. */ 9665 if (fb->pixel_format != crtc->primary->fb->pixel_format) 9666 return -EINVAL; 9667 9668 /* 9669 * TILEOFF/LINOFF registers can't be changed via MI display flips. 9670 * Note that pitch changes could also affect these register. 9671 */ 9672 if (INTEL_INFO(dev)->gen > 3 && 9673 (fb->offsets[0] != crtc->primary->fb->offsets[0] || 9674 fb->pitches[0] != crtc->primary->fb->pitches[0])) 9675 return -EINVAL; 9676 9677 if (i915_terminally_wedged(&dev_priv->gpu_error)) 9678 goto out_hang; 9679 9680 work = kzalloc(sizeof(*work), GFP_KERNEL); 9681 if (work == NULL) 9682 return -ENOMEM; 9683 9684 work->event = event; 9685 work->crtc = crtc; 9686 work->old_fb_obj = intel_fb_obj(old_fb); 9687 INIT_WORK(&work->work, intel_unpin_work_fn); 9688 9689 ret = drm_crtc_vblank_get(crtc); 9690 if (ret) 9691 goto free_work; 9692 9693 /* We borrow the event spin lock for protecting unpin_work */ 9694 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9695 if (intel_crtc->unpin_work) { 9696 lockmgr(&dev->event_lock, LK_RELEASE); 9697 kfree(work); 9698 drm_crtc_vblank_put(crtc); 9699 9700 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 9701 return -EBUSY; 9702 } 9703 intel_crtc->unpin_work = work; 9704 lockmgr(&dev->event_lock, LK_RELEASE); 9705 9706 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 9707 flush_workqueue(dev_priv->wq); 9708 9709 ret = i915_mutex_lock_interruptible(dev); 9710 if (ret) 9711 goto cleanup; 9712 9713 /* Reference the objects for the scheduled work. */ 9714 drm_gem_object_reference(&work->old_fb_obj->base); 9715 drm_gem_object_reference(&obj->base); 9716 9717 crtc->primary->fb = fb; 9718 9719 work->pending_flip_obj = obj; 9720 9721 work->enable_stall_check = true; 9722 9723 atomic_inc(&intel_crtc->unpin_work_count); 9724 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 9725 9726 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 9727 work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1; 9728 9729 if (IS_VALLEYVIEW(dev)) { 9730 ring = &dev_priv->ring[BCS]; 9731 if (obj->tiling_mode != work->old_fb_obj->tiling_mode) 9732 /* vlv: DISPLAY_FLIP fails to change tiling */ 9733 ring = NULL; 9734 } else if (IS_IVYBRIDGE(dev)) { 9735 ring = &dev_priv->ring[BCS]; 9736 } else if (INTEL_INFO(dev)->gen >= 7) { 9737 ring = obj->ring; 9738 if (ring == NULL || ring->id != RCS) 9739 ring = &dev_priv->ring[BCS]; 9740 } else { 9741 ring = &dev_priv->ring[RCS]; 9742 } 9743 9744 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 9745 if (ret) 9746 goto cleanup_pending; 9747 9748 work->gtt_offset = 9749 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset; 9750 9751 if (use_mmio_flip(ring, obj)) 9752 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, 9753 page_flip_flags); 9754 else 9755 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, 9756 page_flip_flags); 9757 if (ret) 9758 goto cleanup_unpin; 9759 9760 i915_gem_track_fb(work->old_fb_obj, obj, 9761 INTEL_FRONTBUFFER_PRIMARY(pipe)); 9762 9763 intel_disable_fbc(dev); 9764 intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 9765 mutex_unlock(&dev->struct_mutex); 9766 9767 trace_i915_flip_request(intel_crtc->plane, obj); 9768 9769 return 0; 9770 9771 cleanup_unpin: 9772 intel_unpin_fb_obj(obj); 9773 cleanup_pending: 9774 atomic_dec(&intel_crtc->unpin_work_count); 9775 crtc->primary->fb = old_fb; 9776 drm_gem_object_unreference(&work->old_fb_obj->base); 9777 drm_gem_object_unreference(&obj->base); 9778 mutex_unlock(&dev->struct_mutex); 9779 9780 cleanup: 9781 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9782 intel_crtc->unpin_work = NULL; 9783 lockmgr(&dev->event_lock, LK_RELEASE); 9784 9785 drm_crtc_vblank_put(crtc); 9786 free_work: 9787 kfree(work); 9788 9789 if (ret == -EIO) { 9790 out_hang: 9791 intel_crtc_wait_for_pending_flips(crtc); 9792 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); 9793 if (ret == 0 && event) 9794 drm_send_vblank_event(dev, pipe, event); 9795 } 9796 return ret; 9797 } 9798 9799 static struct drm_crtc_helper_funcs intel_helper_funcs = { 9800 .mode_set_base_atomic = intel_pipe_set_base_atomic, 9801 .load_lut = intel_crtc_load_lut, 9802 }; 9803 9804 /** 9805 * intel_modeset_update_staged_output_state 9806 * 9807 * Updates the staged output configuration state, e.g. after we've read out the 9808 * current hw state. 9809 */ 9810 static void intel_modeset_update_staged_output_state(struct drm_device *dev) 9811 { 9812 struct intel_crtc *crtc; 9813 struct intel_encoder *encoder; 9814 struct intel_connector *connector; 9815 9816 list_for_each_entry(connector, &dev->mode_config.connector_list, 9817 base.head) { 9818 connector->new_encoder = 9819 to_intel_encoder(connector->base.encoder); 9820 } 9821 9822 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9823 base.head) { 9824 encoder->new_crtc = 9825 to_intel_crtc(encoder->base.crtc); 9826 } 9827 9828 for_each_intel_crtc(dev, crtc) { 9829 crtc->new_enabled = crtc->base.enabled; 9830 9831 if (crtc->new_enabled) 9832 crtc->new_config = &crtc->config; 9833 else 9834 crtc->new_config = NULL; 9835 } 9836 } 9837 9838 /** 9839 * intel_modeset_commit_output_state 9840 * 9841 * This function copies the stage display pipe configuration to the real one. 9842 */ 9843 static void intel_modeset_commit_output_state(struct drm_device *dev) 9844 { 9845 struct intel_crtc *crtc; 9846 struct intel_encoder *encoder; 9847 struct intel_connector *connector; 9848 9849 list_for_each_entry(connector, &dev->mode_config.connector_list, 9850 base.head) { 9851 connector->base.encoder = &connector->new_encoder->base; 9852 } 9853 9854 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9855 base.head) { 9856 encoder->base.crtc = &encoder->new_crtc->base; 9857 } 9858 9859 for_each_intel_crtc(dev, crtc) { 9860 crtc->base.enabled = crtc->new_enabled; 9861 } 9862 } 9863 9864 static void 9865 connected_sink_compute_bpp(struct intel_connector *connector, 9866 struct intel_crtc_config *pipe_config) 9867 { 9868 int bpp = pipe_config->pipe_bpp; 9869 9870 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 9871 connector->base.base.id, 9872 connector->base.name); 9873 9874 /* Don't use an invalid EDID bpc value */ 9875 if (connector->base.display_info.bpc && 9876 connector->base.display_info.bpc * 3 < bpp) { 9877 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 9878 bpp, connector->base.display_info.bpc*3); 9879 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 9880 } 9881 9882 /* Clamp bpp to 8 on screens without EDID 1.4 */ 9883 if (connector->base.display_info.bpc == 0 && bpp > 24) { 9884 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 9885 bpp); 9886 pipe_config->pipe_bpp = 24; 9887 } 9888 } 9889 9890 static int 9891 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 9892 struct drm_framebuffer *fb, 9893 struct intel_crtc_config *pipe_config) 9894 { 9895 struct drm_device *dev = crtc->base.dev; 9896 struct intel_connector *connector; 9897 int bpp; 9898 9899 switch (fb->pixel_format) { 9900 case DRM_FORMAT_C8: 9901 bpp = 8*3; /* since we go through a colormap */ 9902 break; 9903 case DRM_FORMAT_XRGB1555: 9904 case DRM_FORMAT_ARGB1555: 9905 /* checked in intel_framebuffer_init already */ 9906 if (WARN_ON(INTEL_INFO(dev)->gen > 3)) 9907 return -EINVAL; 9908 case DRM_FORMAT_RGB565: 9909 bpp = 6*3; /* min is 18bpp */ 9910 break; 9911 case DRM_FORMAT_XBGR8888: 9912 case DRM_FORMAT_ABGR8888: 9913 /* checked in intel_framebuffer_init already */ 9914 if (WARN_ON(INTEL_INFO(dev)->gen < 4)) 9915 return -EINVAL; 9916 case DRM_FORMAT_XRGB8888: 9917 case DRM_FORMAT_ARGB8888: 9918 bpp = 8*3; 9919 break; 9920 case DRM_FORMAT_XRGB2101010: 9921 case DRM_FORMAT_ARGB2101010: 9922 case DRM_FORMAT_XBGR2101010: 9923 case DRM_FORMAT_ABGR2101010: 9924 /* checked in intel_framebuffer_init already */ 9925 if (WARN_ON(INTEL_INFO(dev)->gen < 4)) 9926 return -EINVAL; 9927 bpp = 10*3; 9928 break; 9929 /* TODO: gen4+ supports 16 bpc floating point, too. */ 9930 default: 9931 DRM_DEBUG_KMS("unsupported depth\n"); 9932 return -EINVAL; 9933 } 9934 9935 pipe_config->pipe_bpp = bpp; 9936 9937 /* Clamp display bpp to EDID value */ 9938 list_for_each_entry(connector, &dev->mode_config.connector_list, 9939 base.head) { 9940 if (!connector->new_encoder || 9941 connector->new_encoder->new_crtc != crtc) 9942 continue; 9943 9944 connected_sink_compute_bpp(connector, pipe_config); 9945 } 9946 9947 return bpp; 9948 } 9949 9950 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 9951 { 9952 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 9953 "type: 0x%x flags: 0x%x\n", 9954 mode->crtc_clock, 9955 mode->crtc_hdisplay, mode->crtc_hsync_start, 9956 mode->crtc_hsync_end, mode->crtc_htotal, 9957 mode->crtc_vdisplay, mode->crtc_vsync_start, 9958 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); 9959 } 9960 9961 static void intel_dump_pipe_config(struct intel_crtc *crtc, 9962 struct intel_crtc_config *pipe_config, 9963 const char *context) 9964 { 9965 DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id, 9966 context, pipe_name(crtc->pipe)); 9967 9968 DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder)); 9969 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", 9970 pipe_config->pipe_bpp, pipe_config->dither); 9971 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 9972 pipe_config->has_pch_encoder, 9973 pipe_config->fdi_lanes, 9974 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, 9975 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 9976 pipe_config->fdi_m_n.tu); 9977 DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 9978 pipe_config->has_dp_encoder, 9979 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 9980 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 9981 pipe_config->dp_m_n.tu); 9982 DRM_DEBUG_KMS("requested mode:\n"); 9983 drm_mode_debug_printmodeline(&pipe_config->requested_mode); 9984 DRM_DEBUG_KMS("adjusted mode:\n"); 9985 drm_mode_debug_printmodeline(&pipe_config->adjusted_mode); 9986 intel_dump_crtc_timings(&pipe_config->adjusted_mode); 9987 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); 9988 DRM_DEBUG_KMS("pipe src size: %dx%d\n", 9989 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 9990 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 9991 pipe_config->gmch_pfit.control, 9992 pipe_config->gmch_pfit.pgm_ratios, 9993 pipe_config->gmch_pfit.lvds_border_bits); 9994 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 9995 pipe_config->pch_pfit.pos, 9996 pipe_config->pch_pfit.size, 9997 pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); 9998 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 9999 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); 10000 } 10001 10002 static bool encoders_cloneable(const struct intel_encoder *a, 10003 const struct intel_encoder *b) 10004 { 10005 /* masks could be asymmetric, so check both ways */ 10006 return a == b || (a->cloneable & (1 << b->type) && 10007 b->cloneable & (1 << a->type)); 10008 } 10009 10010 static bool check_single_encoder_cloning(struct intel_crtc *crtc, 10011 struct intel_encoder *encoder) 10012 { 10013 struct drm_device *dev = crtc->base.dev; 10014 struct intel_encoder *source_encoder; 10015 10016 list_for_each_entry(source_encoder, 10017 &dev->mode_config.encoder_list, base.head) { 10018 if (source_encoder->new_crtc != crtc) 10019 continue; 10020 10021 if (!encoders_cloneable(encoder, source_encoder)) 10022 return false; 10023 } 10024 10025 return true; 10026 } 10027 10028 static bool check_encoder_cloning(struct intel_crtc *crtc) 10029 { 10030 struct drm_device *dev = crtc->base.dev; 10031 struct intel_encoder *encoder; 10032 10033 list_for_each_entry(encoder, 10034 &dev->mode_config.encoder_list, base.head) { 10035 if (encoder->new_crtc != crtc) 10036 continue; 10037 10038 if (!check_single_encoder_cloning(crtc, encoder)) 10039 return false; 10040 } 10041 10042 return true; 10043 } 10044 10045 static struct intel_crtc_config * 10046 intel_modeset_pipe_config(struct drm_crtc *crtc, 10047 struct drm_framebuffer *fb, 10048 struct drm_display_mode *mode) 10049 { 10050 struct drm_device *dev = crtc->dev; 10051 struct intel_encoder *encoder; 10052 struct intel_crtc_config *pipe_config; 10053 int plane_bpp, ret = -EINVAL; 10054 bool retry = true; 10055 10056 if (!check_encoder_cloning(to_intel_crtc(crtc))) { 10057 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 10058 return ERR_PTR(-EINVAL); 10059 } 10060 10061 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 10062 if (!pipe_config) 10063 return ERR_PTR(-ENOMEM); 10064 10065 drm_mode_copy(&pipe_config->adjusted_mode, mode); 10066 drm_mode_copy(&pipe_config->requested_mode, mode); 10067 10068 pipe_config->cpu_transcoder = 10069 (enum transcoder) to_intel_crtc(crtc)->pipe; 10070 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 10071 10072 /* 10073 * Sanitize sync polarity flags based on requested ones. If neither 10074 * positive or negative polarity is requested, treat this as meaning 10075 * negative polarity. 10076 */ 10077 if (!(pipe_config->adjusted_mode.flags & 10078 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 10079 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 10080 10081 if (!(pipe_config->adjusted_mode.flags & 10082 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 10083 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 10084 10085 /* Compute a starting value for pipe_config->pipe_bpp taking the source 10086 * plane pixel format and any sink constraints into account. Returns the 10087 * source plane bpp so that dithering can be selected on mismatches 10088 * after encoders and crtc also have had their say. */ 10089 plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 10090 fb, pipe_config); 10091 if (plane_bpp < 0) 10092 goto fail; 10093 10094 /* 10095 * Determine the real pipe dimensions. Note that stereo modes can 10096 * increase the actual pipe size due to the frame doubling and 10097 * insertion of additional space for blanks between the frame. This 10098 * is stored in the crtc timings. We use the requested mode to do this 10099 * computation to clearly distinguish it from the adjusted mode, which 10100 * can be changed by the connectors in the below retry loop. 10101 */ 10102 drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE); 10103 pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay; 10104 pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay; 10105 10106 encoder_retry: 10107 /* Ensure the port clock defaults are reset when retrying. */ 10108 pipe_config->port_clock = 0; 10109 pipe_config->pixel_multiplier = 1; 10110 10111 /* Fill in default crtc timings, allow encoders to overwrite them. */ 10112 drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE); 10113 10114 /* Pass our mode to the connectors and the CRTC to give them a chance to 10115 * adjust it according to limitations or connector properties, and also 10116 * a chance to reject the mode entirely. 10117 */ 10118 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10119 base.head) { 10120 10121 if (&encoder->new_crtc->base != crtc) 10122 continue; 10123 10124 if (!(encoder->compute_config(encoder, pipe_config))) { 10125 DRM_DEBUG_KMS("Encoder config failure\n"); 10126 goto fail; 10127 } 10128 } 10129 10130 /* Set default port clock if not overwritten by the encoder. Needs to be 10131 * done afterwards in case the encoder adjusts the mode. */ 10132 if (!pipe_config->port_clock) 10133 pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock 10134 * pipe_config->pixel_multiplier; 10135 10136 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 10137 if (ret < 0) { 10138 DRM_DEBUG_KMS("CRTC fixup failed\n"); 10139 goto fail; 10140 } 10141 10142 if (ret == RETRY) { 10143 if (WARN(!retry, "loop in pipe configuration computation\n")) { 10144 ret = -EINVAL; 10145 goto fail; 10146 } 10147 10148 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 10149 retry = false; 10150 goto encoder_retry; 10151 } 10152 10153 pipe_config->dither = pipe_config->pipe_bpp != plane_bpp; 10154 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", 10155 plane_bpp, pipe_config->pipe_bpp, pipe_config->dither); 10156 10157 return pipe_config; 10158 fail: 10159 kfree(pipe_config); 10160 return ERR_PTR(ret); 10161 } 10162 10163 /* Computes which crtcs are affected and sets the relevant bits in the mask. For 10164 * simplicity we use the crtc's pipe number (because it's easier to obtain). */ 10165 static void 10166 intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, 10167 unsigned *prepare_pipes, unsigned *disable_pipes) 10168 { 10169 struct intel_crtc *intel_crtc; 10170 struct drm_device *dev = crtc->dev; 10171 struct intel_encoder *encoder; 10172 struct intel_connector *connector; 10173 struct drm_crtc *tmp_crtc; 10174 10175 *disable_pipes = *modeset_pipes = *prepare_pipes = 0; 10176 10177 /* Check which crtcs have changed outputs connected to them, these need 10178 * to be part of the prepare_pipes mask. We don't (yet) support global 10179 * modeset across multiple crtcs, so modeset_pipes will only have one 10180 * bit set at most. */ 10181 list_for_each_entry(connector, &dev->mode_config.connector_list, 10182 base.head) { 10183 if (connector->base.encoder == &connector->new_encoder->base) 10184 continue; 10185 10186 if (connector->base.encoder) { 10187 tmp_crtc = connector->base.encoder->crtc; 10188 10189 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; 10190 } 10191 10192 if (connector->new_encoder) 10193 *prepare_pipes |= 10194 1 << connector->new_encoder->new_crtc->pipe; 10195 } 10196 10197 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10198 base.head) { 10199 if (encoder->base.crtc == &encoder->new_crtc->base) 10200 continue; 10201 10202 if (encoder->base.crtc) { 10203 tmp_crtc = encoder->base.crtc; 10204 10205 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; 10206 } 10207 10208 if (encoder->new_crtc) 10209 *prepare_pipes |= 1 << encoder->new_crtc->pipe; 10210 } 10211 10212 /* Check for pipes that will be enabled/disabled ... */ 10213 for_each_intel_crtc(dev, intel_crtc) { 10214 if (intel_crtc->base.enabled == intel_crtc->new_enabled) 10215 continue; 10216 10217 if (!intel_crtc->new_enabled) 10218 *disable_pipes |= 1 << intel_crtc->pipe; 10219 else 10220 *prepare_pipes |= 1 << intel_crtc->pipe; 10221 } 10222 10223 10224 /* set_mode is also used to update properties on life display pipes. */ 10225 intel_crtc = to_intel_crtc(crtc); 10226 if (intel_crtc->new_enabled) 10227 *prepare_pipes |= 1 << intel_crtc->pipe; 10228 10229 /* 10230 * For simplicity do a full modeset on any pipe where the output routing 10231 * changed. We could be more clever, but that would require us to be 10232 * more careful with calling the relevant encoder->mode_set functions. 10233 */ 10234 if (*prepare_pipes) 10235 *modeset_pipes = *prepare_pipes; 10236 10237 /* ... and mask these out. */ 10238 *modeset_pipes &= ~(*disable_pipes); 10239 *prepare_pipes &= ~(*disable_pipes); 10240 10241 /* 10242 * HACK: We don't (yet) fully support global modesets. intel_set_config 10243 * obies this rule, but the modeset restore mode of 10244 * intel_modeset_setup_hw_state does not. 10245 */ 10246 *modeset_pipes &= 1 << intel_crtc->pipe; 10247 *prepare_pipes &= 1 << intel_crtc->pipe; 10248 10249 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n", 10250 *modeset_pipes, *prepare_pipes, *disable_pipes); 10251 } 10252 10253 static bool intel_crtc_in_use(struct drm_crtc *crtc) 10254 { 10255 struct drm_encoder *encoder; 10256 struct drm_device *dev = crtc->dev; 10257 10258 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 10259 if (encoder->crtc == crtc) 10260 return true; 10261 10262 return false; 10263 } 10264 10265 static void 10266 intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) 10267 { 10268 struct intel_encoder *intel_encoder; 10269 struct intel_crtc *intel_crtc; 10270 struct drm_connector *connector; 10271 10272 list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list, 10273 base.head) { 10274 if (!intel_encoder->base.crtc) 10275 continue; 10276 10277 intel_crtc = to_intel_crtc(intel_encoder->base.crtc); 10278 10279 if (prepare_pipes & (1 << intel_crtc->pipe)) 10280 intel_encoder->connectors_active = false; 10281 } 10282 10283 intel_modeset_commit_output_state(dev); 10284 10285 /* Double check state. */ 10286 for_each_intel_crtc(dev, intel_crtc) { 10287 WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base)); 10288 WARN_ON(intel_crtc->new_config && 10289 intel_crtc->new_config != &intel_crtc->config); 10290 WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config); 10291 } 10292 10293 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 10294 if (!connector->encoder || !connector->encoder->crtc) 10295 continue; 10296 10297 intel_crtc = to_intel_crtc(connector->encoder->crtc); 10298 10299 if (prepare_pipes & (1 << intel_crtc->pipe)) { 10300 struct drm_property *dpms_property = 10301 dev->mode_config.dpms_property; 10302 10303 connector->dpms = DRM_MODE_DPMS_ON; 10304 drm_object_property_set_value(&connector->base, 10305 dpms_property, 10306 DRM_MODE_DPMS_ON); 10307 10308 intel_encoder = to_intel_encoder(connector->encoder); 10309 intel_encoder->connectors_active = true; 10310 } 10311 } 10312 10313 } 10314 10315 static bool intel_fuzzy_clock_check(int clock1, int clock2) 10316 { 10317 int diff; 10318 10319 if (clock1 == clock2) 10320 return true; 10321 10322 if (!clock1 || !clock2) 10323 return false; 10324 10325 diff = abs(clock1 - clock2); 10326 10327 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 10328 return true; 10329 10330 return false; 10331 } 10332 10333 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ 10334 list_for_each_entry((intel_crtc), \ 10335 &(dev)->mode_config.crtc_list, \ 10336 base.head) \ 10337 if (mask & (1 <<(intel_crtc)->pipe)) 10338 10339 static bool 10340 intel_pipe_config_compare(struct drm_device *dev, 10341 struct intel_crtc_config *current_config, 10342 struct intel_crtc_config *pipe_config) 10343 { 10344 #define PIPE_CONF_CHECK_X(name) \ 10345 if (current_config->name != pipe_config->name) { \ 10346 DRM_ERROR("mismatch in " #name " " \ 10347 "(expected 0x%08x, found 0x%08x)\n", \ 10348 current_config->name, \ 10349 pipe_config->name); \ 10350 return false; \ 10351 } 10352 10353 #define PIPE_CONF_CHECK_I(name) \ 10354 if (current_config->name != pipe_config->name) { \ 10355 DRM_ERROR("mismatch in " #name " " \ 10356 "(expected %i, found %i)\n", \ 10357 current_config->name, \ 10358 pipe_config->name); \ 10359 return false; \ 10360 } 10361 10362 #define PIPE_CONF_CHECK_FLAGS(name, mask) \ 10363 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 10364 DRM_ERROR("mismatch in " #name "(" #mask ") " \ 10365 "(expected %i, found %i)\n", \ 10366 current_config->name & (mask), \ 10367 pipe_config->name & (mask)); \ 10368 return false; \ 10369 } 10370 10371 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ 10372 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 10373 DRM_ERROR("mismatch in " #name " " \ 10374 "(expected %i, found %i)\n", \ 10375 current_config->name, \ 10376 pipe_config->name); \ 10377 return false; \ 10378 } 10379 10380 #define PIPE_CONF_QUIRK(quirk) \ 10381 ((current_config->quirks | pipe_config->quirks) & (quirk)) 10382 10383 PIPE_CONF_CHECK_I(cpu_transcoder); 10384 10385 PIPE_CONF_CHECK_I(has_pch_encoder); 10386 PIPE_CONF_CHECK_I(fdi_lanes); 10387 PIPE_CONF_CHECK_I(fdi_m_n.gmch_m); 10388 PIPE_CONF_CHECK_I(fdi_m_n.gmch_n); 10389 PIPE_CONF_CHECK_I(fdi_m_n.link_m); 10390 PIPE_CONF_CHECK_I(fdi_m_n.link_n); 10391 PIPE_CONF_CHECK_I(fdi_m_n.tu); 10392 10393 PIPE_CONF_CHECK_I(has_dp_encoder); 10394 PIPE_CONF_CHECK_I(dp_m_n.gmch_m); 10395 PIPE_CONF_CHECK_I(dp_m_n.gmch_n); 10396 PIPE_CONF_CHECK_I(dp_m_n.link_m); 10397 PIPE_CONF_CHECK_I(dp_m_n.link_n); 10398 PIPE_CONF_CHECK_I(dp_m_n.tu); 10399 10400 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay); 10401 PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal); 10402 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start); 10403 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end); 10404 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start); 10405 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end); 10406 10407 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay); 10408 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal); 10409 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start); 10410 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end); 10411 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start); 10412 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end); 10413 10414 PIPE_CONF_CHECK_I(pixel_multiplier); 10415 PIPE_CONF_CHECK_I(has_hdmi_sink); 10416 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || 10417 IS_VALLEYVIEW(dev)) 10418 PIPE_CONF_CHECK_I(limited_color_range); 10419 10420 PIPE_CONF_CHECK_I(has_audio); 10421 10422 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 10423 DRM_MODE_FLAG_INTERLACE); 10424 10425 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 10426 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 10427 DRM_MODE_FLAG_PHSYNC); 10428 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 10429 DRM_MODE_FLAG_NHSYNC); 10430 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 10431 DRM_MODE_FLAG_PVSYNC); 10432 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 10433 DRM_MODE_FLAG_NVSYNC); 10434 } 10435 10436 PIPE_CONF_CHECK_I(pipe_src_w); 10437 PIPE_CONF_CHECK_I(pipe_src_h); 10438 10439 /* 10440 * FIXME: BIOS likes to set up a cloned config with lvds+external 10441 * screen. Since we don't yet re-compute the pipe config when moving 10442 * just the lvds port away to another pipe the sw tracking won't match. 10443 * 10444 * Proper atomic modesets with recomputed global state will fix this. 10445 * Until then just don't check gmch state for inherited modes. 10446 */ 10447 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) { 10448 PIPE_CONF_CHECK_I(gmch_pfit.control); 10449 /* pfit ratios are autocomputed by the hw on gen4+ */ 10450 if (INTEL_INFO(dev)->gen < 4) 10451 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 10452 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); 10453 } 10454 10455 PIPE_CONF_CHECK_I(pch_pfit.enabled); 10456 if (current_config->pch_pfit.enabled) { 10457 PIPE_CONF_CHECK_I(pch_pfit.pos); 10458 PIPE_CONF_CHECK_I(pch_pfit.size); 10459 } 10460 10461 /* BDW+ don't expose a synchronous way to read the state */ 10462 if (IS_HASWELL(dev)) 10463 PIPE_CONF_CHECK_I(ips_enabled); 10464 10465 PIPE_CONF_CHECK_I(double_wide); 10466 10467 PIPE_CONF_CHECK_X(ddi_pll_sel); 10468 10469 PIPE_CONF_CHECK_I(shared_dpll); 10470 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 10471 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 10472 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 10473 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 10474 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 10475 10476 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 10477 PIPE_CONF_CHECK_I(pipe_bpp); 10478 10479 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); 10480 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 10481 10482 #undef PIPE_CONF_CHECK_X 10483 #undef PIPE_CONF_CHECK_I 10484 #undef PIPE_CONF_CHECK_FLAGS 10485 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 10486 #undef PIPE_CONF_QUIRK 10487 10488 return true; 10489 } 10490 10491 static void 10492 check_connector_state(struct drm_device *dev) 10493 { 10494 struct intel_connector *connector; 10495 10496 list_for_each_entry(connector, &dev->mode_config.connector_list, 10497 base.head) { 10498 /* This also checks the encoder/connector hw state with the 10499 * ->get_hw_state callbacks. */ 10500 intel_connector_check_state(connector); 10501 10502 WARN(&connector->new_encoder->base != connector->base.encoder, 10503 "connector's staged encoder doesn't match current encoder\n"); 10504 } 10505 } 10506 10507 static void 10508 check_encoder_state(struct drm_device *dev) 10509 { 10510 struct intel_encoder *encoder; 10511 struct intel_connector *connector; 10512 10513 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10514 base.head) { 10515 bool enabled = false; 10516 bool active = false; 10517 enum i915_pipe pipe, tracked_pipe; 10518 10519 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 10520 encoder->base.base.id, 10521 encoder->base.name); 10522 10523 WARN(&encoder->new_crtc->base != encoder->base.crtc, 10524 "encoder's stage crtc doesn't match current crtc\n"); 10525 WARN(encoder->connectors_active && !encoder->base.crtc, 10526 "encoder's active_connectors set, but no crtc\n"); 10527 10528 list_for_each_entry(connector, &dev->mode_config.connector_list, 10529 base.head) { 10530 if (connector->base.encoder != &encoder->base) 10531 continue; 10532 enabled = true; 10533 if (connector->base.dpms != DRM_MODE_DPMS_OFF) 10534 active = true; 10535 } 10536 WARN(!!encoder->base.crtc != enabled, 10537 "encoder's enabled state mismatch " 10538 "(expected %i, found %i)\n", 10539 !!encoder->base.crtc, enabled); 10540 WARN(active && !encoder->base.crtc, 10541 "active encoder with no crtc\n"); 10542 10543 WARN(encoder->connectors_active != active, 10544 "encoder's computed active state doesn't match tracked active state " 10545 "(expected %i, found %i)\n", active, encoder->connectors_active); 10546 10547 active = encoder->get_hw_state(encoder, &pipe); 10548 WARN(active != encoder->connectors_active, 10549 "encoder's hw state doesn't match sw tracking " 10550 "(expected %i, found %i)\n", 10551 encoder->connectors_active, active); 10552 10553 if (!encoder->base.crtc) 10554 continue; 10555 10556 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; 10557 WARN(active && pipe != tracked_pipe, 10558 "active encoder's pipe doesn't match" 10559 "(expected %i, found %i)\n", 10560 tracked_pipe, pipe); 10561 10562 } 10563 } 10564 10565 static void 10566 check_crtc_state(struct drm_device *dev) 10567 { 10568 struct drm_i915_private *dev_priv = dev->dev_private; 10569 struct intel_crtc *crtc; 10570 struct intel_encoder *encoder; 10571 struct intel_crtc_config pipe_config; 10572 10573 for_each_intel_crtc(dev, crtc) { 10574 bool enabled = false; 10575 bool active = false; 10576 10577 memset(&pipe_config, 0, sizeof(pipe_config)); 10578 10579 DRM_DEBUG_KMS("[CRTC:%d]\n", 10580 crtc->base.base.id); 10581 10582 WARN(crtc->active && !crtc->base.enabled, 10583 "active crtc, but not enabled in sw tracking\n"); 10584 10585 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10586 base.head) { 10587 if (encoder->base.crtc != &crtc->base) 10588 continue; 10589 enabled = true; 10590 if (encoder->connectors_active) 10591 active = true; 10592 } 10593 10594 WARN(active != crtc->active, 10595 "crtc's computed active state doesn't match tracked active state " 10596 "(expected %i, found %i)\n", active, crtc->active); 10597 WARN(enabled != crtc->base.enabled, 10598 "crtc's computed enabled state doesn't match tracked enabled state " 10599 "(expected %i, found %i)\n", enabled, crtc->base.enabled); 10600 10601 active = dev_priv->display.get_pipe_config(crtc, 10602 &pipe_config); 10603 10604 /* hw state is inconsistent with the pipe A quirk */ 10605 if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 10606 active = crtc->active; 10607 10608 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10609 base.head) { 10610 enum i915_pipe pipe; 10611 if (encoder->base.crtc != &crtc->base) 10612 continue; 10613 if (encoder->get_hw_state(encoder, &pipe)) 10614 encoder->get_config(encoder, &pipe_config); 10615 } 10616 10617 WARN(crtc->active != active, 10618 "crtc active state doesn't match with hw state " 10619 "(expected %i, found %i)\n", crtc->active, active); 10620 10621 if (active && 10622 !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) { 10623 WARN(1, "pipe state doesn't match!\n"); 10624 intel_dump_pipe_config(crtc, &pipe_config, 10625 "[hw state]"); 10626 intel_dump_pipe_config(crtc, &crtc->config, 10627 "[sw state]"); 10628 } 10629 } 10630 } 10631 10632 static void 10633 check_shared_dpll_state(struct drm_device *dev) 10634 { 10635 struct drm_i915_private *dev_priv = dev->dev_private; 10636 struct intel_crtc *crtc; 10637 struct intel_dpll_hw_state dpll_hw_state; 10638 int i; 10639 10640 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 10641 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 10642 int enabled_crtcs = 0, active_crtcs = 0; 10643 bool active; 10644 10645 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 10646 10647 DRM_DEBUG_KMS("%s\n", pll->name); 10648 10649 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); 10650 10651 WARN(pll->active > pll->refcount, 10652 "more active pll users than references: %i vs %i\n", 10653 pll->active, pll->refcount); 10654 WARN(pll->active && !pll->on, 10655 "pll in active use but not on in sw tracking\n"); 10656 WARN(pll->on && !pll->active, 10657 "pll in on but not on in use in sw tracking\n"); 10658 WARN(pll->on != active, 10659 "pll on state mismatch (expected %i, found %i)\n", 10660 pll->on, active); 10661 10662 for_each_intel_crtc(dev, crtc) { 10663 if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll) 10664 enabled_crtcs++; 10665 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 10666 active_crtcs++; 10667 } 10668 WARN(pll->active != active_crtcs, 10669 "pll active crtcs mismatch (expected %i, found %i)\n", 10670 pll->active, active_crtcs); 10671 WARN(pll->refcount != enabled_crtcs, 10672 "pll enabled crtcs mismatch (expected %i, found %i)\n", 10673 pll->refcount, enabled_crtcs); 10674 10675 WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state, 10676 sizeof(dpll_hw_state)), 10677 "pll hw state mismatch\n"); 10678 } 10679 } 10680 10681 void 10682 intel_modeset_check_state(struct drm_device *dev) 10683 { 10684 check_connector_state(dev); 10685 check_encoder_state(dev); 10686 check_crtc_state(dev); 10687 check_shared_dpll_state(dev); 10688 } 10689 10690 void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, 10691 int dotclock) 10692 { 10693 /* 10694 * FDI already provided one idea for the dotclock. 10695 * Yell if the encoder disagrees. 10696 */ 10697 WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock), 10698 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 10699 pipe_config->adjusted_mode.crtc_clock, dotclock); 10700 } 10701 10702 static void update_scanline_offset(struct intel_crtc *crtc) 10703 { 10704 struct drm_device *dev = crtc->base.dev; 10705 10706 /* 10707 * The scanline counter increments at the leading edge of hsync. 10708 * 10709 * On most platforms it starts counting from vtotal-1 on the 10710 * first active line. That means the scanline counter value is 10711 * always one less than what we would expect. Ie. just after 10712 * start of vblank, which also occurs at start of hsync (on the 10713 * last active line), the scanline counter will read vblank_start-1. 10714 * 10715 * On gen2 the scanline counter starts counting from 1 instead 10716 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 10717 * to keep the value positive), instead of adding one. 10718 * 10719 * On HSW+ the behaviour of the scanline counter depends on the output 10720 * type. For DP ports it behaves like most other platforms, but on HDMI 10721 * there's an extra 1 line difference. So we need to add two instead of 10722 * one to the value. 10723 */ 10724 if (IS_GEN2(dev)) { 10725 const struct drm_display_mode *mode = &crtc->config.adjusted_mode; 10726 int vtotal; 10727 10728 vtotal = mode->crtc_vtotal; 10729 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 10730 vtotal /= 2; 10731 10732 crtc->scanline_offset = vtotal - 1; 10733 } else if (HAS_DDI(dev) && 10734 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) { 10735 crtc->scanline_offset = 2; 10736 } else 10737 crtc->scanline_offset = 1; 10738 } 10739 10740 static int __intel_set_mode(struct drm_crtc *crtc, 10741 struct drm_display_mode *mode, 10742 int x, int y, struct drm_framebuffer *fb) 10743 { 10744 struct drm_device *dev = crtc->dev; 10745 struct drm_i915_private *dev_priv = dev->dev_private; 10746 struct drm_display_mode *saved_mode; 10747 struct intel_crtc_config *pipe_config = NULL; 10748 struct intel_crtc *intel_crtc; 10749 unsigned disable_pipes, prepare_pipes, modeset_pipes; 10750 int ret = 0; 10751 10752 saved_mode = kmalloc(sizeof(*saved_mode), M_DRM, M_WAITOK); 10753 if (!saved_mode) 10754 return -ENOMEM; 10755 10756 intel_modeset_affected_pipes(crtc, &modeset_pipes, 10757 &prepare_pipes, &disable_pipes); 10758 10759 *saved_mode = crtc->mode; 10760 10761 /* Hack: Because we don't (yet) support global modeset on multiple 10762 * crtcs, we don't keep track of the new mode for more than one crtc. 10763 * Hence simply check whether any bit is set in modeset_pipes in all the 10764 * pieces of code that are not yet converted to deal with mutliple crtcs 10765 * changing their mode at the same time. */ 10766 if (modeset_pipes) { 10767 pipe_config = intel_modeset_pipe_config(crtc, fb, mode); 10768 if (IS_ERR(pipe_config)) { 10769 ret = PTR_ERR(pipe_config); 10770 pipe_config = NULL; 10771 10772 goto out; 10773 } 10774 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 10775 "[modeset]"); 10776 to_intel_crtc(crtc)->new_config = pipe_config; 10777 } 10778 10779 /* 10780 * See if the config requires any additional preparation, e.g. 10781 * to adjust global state with pipes off. We need to do this 10782 * here so we can get the modeset_pipe updated config for the new 10783 * mode set on this crtc. For other crtcs we need to use the 10784 * adjusted_mode bits in the crtc directly. 10785 */ 10786 if (IS_VALLEYVIEW(dev)) { 10787 valleyview_modeset_global_pipes(dev, &prepare_pipes); 10788 10789 /* may have added more to prepare_pipes than we should */ 10790 prepare_pipes &= ~disable_pipes; 10791 } 10792 10793 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) 10794 intel_crtc_disable(&intel_crtc->base); 10795 10796 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { 10797 if (intel_crtc->base.enabled) 10798 dev_priv->display.crtc_disable(&intel_crtc->base); 10799 } 10800 10801 /* crtc->mode is already used by the ->mode_set callbacks, hence we need 10802 * to set it here already despite that we pass it down the callchain. 10803 */ 10804 if (modeset_pipes) { 10805 crtc->mode = *mode; 10806 /* mode_set/enable/disable functions rely on a correct pipe 10807 * config. */ 10808 to_intel_crtc(crtc)->config = *pipe_config; 10809 to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config; 10810 10811 /* 10812 * Calculate and store various constants which 10813 * are later needed by vblank and swap-completion 10814 * timestamping. They are derived from true hwmode. 10815 */ 10816 drm_calc_timestamping_constants(crtc, 10817 &pipe_config->adjusted_mode); 10818 } 10819 10820 /* Only after disabling all output pipelines that will be changed can we 10821 * update the the output configuration. */ 10822 intel_modeset_update_state(dev, prepare_pipes); 10823 10824 if (dev_priv->display.modeset_global_resources) 10825 dev_priv->display.modeset_global_resources(dev); 10826 10827 /* Set up the DPLL and any encoders state that needs to adjust or depend 10828 * on the DPLL. 10829 */ 10830 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 10831 struct drm_framebuffer *old_fb = crtc->primary->fb; 10832 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb); 10833 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 10834 10835 mutex_lock(&dev->struct_mutex); 10836 ret = intel_pin_and_fence_fb_obj(dev, 10837 obj, 10838 NULL); 10839 if (ret != 0) { 10840 DRM_ERROR("pin & fence failed\n"); 10841 mutex_unlock(&dev->struct_mutex); 10842 goto done; 10843 } 10844 if (old_fb) 10845 intel_unpin_fb_obj(old_obj); 10846 i915_gem_track_fb(old_obj, obj, 10847 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 10848 mutex_unlock(&dev->struct_mutex); 10849 10850 crtc->primary->fb = fb; 10851 crtc->x = x; 10852 crtc->y = y; 10853 10854 ret = dev_priv->display.crtc_mode_set(&intel_crtc->base, 10855 x, y, fb); 10856 if (ret) 10857 goto done; 10858 } 10859 10860 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 10861 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { 10862 update_scanline_offset(intel_crtc); 10863 10864 dev_priv->display.crtc_enable(&intel_crtc->base); 10865 } 10866 10867 /* FIXME: add subpixel order */ 10868 done: 10869 if (ret && crtc->enabled) 10870 crtc->mode = *saved_mode; 10871 10872 out: 10873 kfree(pipe_config); 10874 kfree(saved_mode); 10875 return ret; 10876 } 10877 10878 static int intel_set_mode(struct drm_crtc *crtc, 10879 struct drm_display_mode *mode, 10880 int x, int y, struct drm_framebuffer *fb) 10881 { 10882 int ret; 10883 10884 ret = __intel_set_mode(crtc, mode, x, y, fb); 10885 10886 if (ret == 0) 10887 intel_modeset_check_state(crtc->dev); 10888 10889 return ret; 10890 } 10891 10892 void intel_crtc_restore_mode(struct drm_crtc *crtc) 10893 { 10894 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); 10895 } 10896 10897 #undef for_each_intel_crtc_masked 10898 10899 static void intel_set_config_free(struct intel_set_config *config) 10900 { 10901 if (!config) 10902 return; 10903 10904 kfree(config->save_connector_encoders); 10905 kfree(config->save_encoder_crtcs); 10906 kfree(config->save_crtc_enabled); 10907 kfree(config); 10908 } 10909 10910 static int intel_set_config_save_state(struct drm_device *dev, 10911 struct intel_set_config *config) 10912 { 10913 struct drm_crtc *crtc; 10914 struct drm_encoder *encoder; 10915 struct drm_connector *connector; 10916 int count; 10917 10918 config->save_crtc_enabled = 10919 kcalloc(dev->mode_config.num_crtc, 10920 sizeof(bool), GFP_KERNEL); 10921 if (!config->save_crtc_enabled) 10922 return -ENOMEM; 10923 10924 config->save_encoder_crtcs = 10925 kcalloc(dev->mode_config.num_encoder, 10926 sizeof(struct drm_crtc *), GFP_KERNEL); 10927 if (!config->save_encoder_crtcs) 10928 return -ENOMEM; 10929 10930 config->save_connector_encoders = 10931 kcalloc(dev->mode_config.num_connector, 10932 sizeof(struct drm_encoder *), GFP_KERNEL); 10933 if (!config->save_connector_encoders) 10934 return -ENOMEM; 10935 10936 /* Copy data. Note that driver private data is not affected. 10937 * Should anything bad happen only the expected state is 10938 * restored, not the drivers personal bookkeeping. 10939 */ 10940 count = 0; 10941 for_each_crtc(dev, crtc) { 10942 config->save_crtc_enabled[count++] = crtc->enabled; 10943 } 10944 10945 count = 0; 10946 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 10947 config->save_encoder_crtcs[count++] = encoder->crtc; 10948 } 10949 10950 count = 0; 10951 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 10952 config->save_connector_encoders[count++] = connector->encoder; 10953 } 10954 10955 return 0; 10956 } 10957 10958 static void intel_set_config_restore_state(struct drm_device *dev, 10959 struct intel_set_config *config) 10960 { 10961 struct intel_crtc *crtc; 10962 struct intel_encoder *encoder; 10963 struct intel_connector *connector; 10964 int count; 10965 10966 count = 0; 10967 for_each_intel_crtc(dev, crtc) { 10968 crtc->new_enabled = config->save_crtc_enabled[count++]; 10969 10970 if (crtc->new_enabled) 10971 crtc->new_config = &crtc->config; 10972 else 10973 crtc->new_config = NULL; 10974 } 10975 10976 count = 0; 10977 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 10978 encoder->new_crtc = 10979 to_intel_crtc(config->save_encoder_crtcs[count++]); 10980 } 10981 10982 count = 0; 10983 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { 10984 connector->new_encoder = 10985 to_intel_encoder(config->save_connector_encoders[count++]); 10986 } 10987 } 10988 10989 static bool 10990 is_crtc_connector_off(struct drm_mode_set *set) 10991 { 10992 int i; 10993 10994 if (set->num_connectors == 0) 10995 return false; 10996 10997 if (WARN_ON(set->connectors == NULL)) 10998 return false; 10999 11000 for (i = 0; i < set->num_connectors; i++) 11001 if (set->connectors[i]->encoder && 11002 set->connectors[i]->encoder->crtc == set->crtc && 11003 set->connectors[i]->dpms != DRM_MODE_DPMS_ON) 11004 return true; 11005 11006 return false; 11007 } 11008 11009 static void 11010 intel_set_config_compute_mode_changes(struct drm_mode_set *set, 11011 struct intel_set_config *config) 11012 { 11013 11014 /* We should be able to check here if the fb has the same properties 11015 * and then just flip_or_move it */ 11016 if (is_crtc_connector_off(set)) { 11017 config->mode_changed = true; 11018 } else if (set->crtc->primary->fb != set->fb) { 11019 /* 11020 * If we have no fb, we can only flip as long as the crtc is 11021 * active, otherwise we need a full mode set. The crtc may 11022 * be active if we've only disabled the primary plane, or 11023 * in fastboot situations. 11024 */ 11025 if (set->crtc->primary->fb == NULL) { 11026 struct intel_crtc *intel_crtc = 11027 to_intel_crtc(set->crtc); 11028 11029 if (intel_crtc->active) { 11030 DRM_DEBUG_KMS("crtc has no fb, will flip\n"); 11031 config->fb_changed = true; 11032 } else { 11033 DRM_DEBUG_KMS("inactive crtc, full mode set\n"); 11034 config->mode_changed = true; 11035 } 11036 } else if (set->fb == NULL) { 11037 config->mode_changed = true; 11038 } else if (set->fb->pixel_format != 11039 set->crtc->primary->fb->pixel_format) { 11040 config->mode_changed = true; 11041 } else { 11042 config->fb_changed = true; 11043 } 11044 } 11045 11046 if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y)) 11047 config->fb_changed = true; 11048 11049 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { 11050 DRM_DEBUG_KMS("modes are different, full mode set\n"); 11051 drm_mode_debug_printmodeline(&set->crtc->mode); 11052 drm_mode_debug_printmodeline(set->mode); 11053 config->mode_changed = true; 11054 } 11055 11056 DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n", 11057 set->crtc->base.id, config->mode_changed, config->fb_changed); 11058 } 11059 11060 static int 11061 intel_modeset_stage_output_state(struct drm_device *dev, 11062 struct drm_mode_set *set, 11063 struct intel_set_config *config) 11064 { 11065 struct intel_connector *connector; 11066 struct intel_encoder *encoder; 11067 struct intel_crtc *crtc; 11068 int ro; 11069 11070 /* The upper layers ensure that we either disable a crtc or have a list 11071 * of connectors. For paranoia, double-check this. */ 11072 WARN_ON(!set->fb && (set->num_connectors != 0)); 11073 WARN_ON(set->fb && (set->num_connectors == 0)); 11074 11075 list_for_each_entry(connector, &dev->mode_config.connector_list, 11076 base.head) { 11077 /* Otherwise traverse passed in connector list and get encoders 11078 * for them. */ 11079 for (ro = 0; ro < set->num_connectors; ro++) { 11080 if (set->connectors[ro] == &connector->base) { 11081 connector->new_encoder = connector->encoder; 11082 break; 11083 } 11084 } 11085 11086 /* If we disable the crtc, disable all its connectors. Also, if 11087 * the connector is on the changing crtc but not on the new 11088 * connector list, disable it. */ 11089 if ((!set->fb || ro == set->num_connectors) && 11090 connector->base.encoder && 11091 connector->base.encoder->crtc == set->crtc) { 11092 connector->new_encoder = NULL; 11093 11094 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", 11095 connector->base.base.id, 11096 connector->base.name); 11097 } 11098 11099 11100 if (&connector->new_encoder->base != connector->base.encoder) { 11101 DRM_DEBUG_KMS("encoder changed, full mode switch\n"); 11102 config->mode_changed = true; 11103 } 11104 } 11105 /* connector->new_encoder is now updated for all connectors. */ 11106 11107 /* Update crtc of enabled connectors. */ 11108 list_for_each_entry(connector, &dev->mode_config.connector_list, 11109 base.head) { 11110 struct drm_crtc *new_crtc; 11111 11112 if (!connector->new_encoder) 11113 continue; 11114 11115 new_crtc = connector->new_encoder->base.crtc; 11116 11117 for (ro = 0; ro < set->num_connectors; ro++) { 11118 if (set->connectors[ro] == &connector->base) 11119 new_crtc = set->crtc; 11120 } 11121 11122 /* Make sure the new CRTC will work with the encoder */ 11123 if (!drm_encoder_crtc_ok(&connector->new_encoder->base, 11124 new_crtc)) { 11125 return -EINVAL; 11126 } 11127 connector->encoder->new_crtc = to_intel_crtc(new_crtc); 11128 11129 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", 11130 connector->base.base.id, 11131 connector->base.name, 11132 new_crtc->base.id); 11133 } 11134 11135 /* Check for any encoders that needs to be disabled. */ 11136 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 11137 base.head) { 11138 int num_connectors = 0; 11139 list_for_each_entry(connector, 11140 &dev->mode_config.connector_list, 11141 base.head) { 11142 if (connector->new_encoder == encoder) { 11143 WARN_ON(!connector->new_encoder->new_crtc); 11144 num_connectors++; 11145 } 11146 } 11147 11148 if (num_connectors == 0) 11149 encoder->new_crtc = NULL; 11150 else if (num_connectors > 1) 11151 return -EINVAL; 11152 11153 /* Only now check for crtc changes so we don't miss encoders 11154 * that will be disabled. */ 11155 if (&encoder->new_crtc->base != encoder->base.crtc) { 11156 DRM_DEBUG_KMS("crtc changed, full mode switch\n"); 11157 config->mode_changed = true; 11158 } 11159 } 11160 /* Now we've also updated encoder->new_crtc for all encoders. */ 11161 11162 for_each_intel_crtc(dev, crtc) { 11163 crtc->new_enabled = false; 11164 11165 list_for_each_entry(encoder, 11166 &dev->mode_config.encoder_list, 11167 base.head) { 11168 if (encoder->new_crtc == crtc) { 11169 crtc->new_enabled = true; 11170 break; 11171 } 11172 } 11173 11174 if (crtc->new_enabled != crtc->base.enabled) { 11175 DRM_DEBUG_KMS("crtc %sabled, full mode switch\n", 11176 crtc->new_enabled ? "en" : "dis"); 11177 config->mode_changed = true; 11178 } 11179 11180 if (crtc->new_enabled) 11181 crtc->new_config = &crtc->config; 11182 else 11183 crtc->new_config = NULL; 11184 } 11185 11186 return 0; 11187 } 11188 11189 static void disable_crtc_nofb(struct intel_crtc *crtc) 11190 { 11191 struct drm_device *dev = crtc->base.dev; 11192 struct intel_encoder *encoder; 11193 struct intel_connector *connector; 11194 11195 DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n", 11196 pipe_name(crtc->pipe)); 11197 11198 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { 11199 if (connector->new_encoder && 11200 connector->new_encoder->new_crtc == crtc) 11201 connector->new_encoder = NULL; 11202 } 11203 11204 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 11205 if (encoder->new_crtc == crtc) 11206 encoder->new_crtc = NULL; 11207 } 11208 11209 crtc->new_enabled = false; 11210 crtc->new_config = NULL; 11211 } 11212 11213 static int intel_crtc_set_config(struct drm_mode_set *set) 11214 { 11215 struct drm_device *dev; 11216 struct drm_mode_set save_set; 11217 struct intel_set_config *config; 11218 int ret; 11219 11220 BUG_ON(!set); 11221 BUG_ON(!set->crtc); 11222 BUG_ON(!set->crtc->helper_private); 11223 11224 /* Enforce sane interface api - has been abused by the fb helper. */ 11225 BUG_ON(!set->mode && set->fb); 11226 BUG_ON(set->fb && set->num_connectors == 0); 11227 11228 if (set->fb) { 11229 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", 11230 set->crtc->base.id, set->fb->base.id, 11231 (int)set->num_connectors, set->x, set->y); 11232 } else { 11233 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); 11234 } 11235 11236 dev = set->crtc->dev; 11237 11238 ret = -ENOMEM; 11239 config = kzalloc(sizeof(*config), GFP_KERNEL); 11240 if (!config) 11241 goto out_config; 11242 11243 ret = intel_set_config_save_state(dev, config); 11244 if (ret) 11245 goto out_config; 11246 11247 save_set.crtc = set->crtc; 11248 save_set.mode = &set->crtc->mode; 11249 save_set.x = set->crtc->x; 11250 save_set.y = set->crtc->y; 11251 save_set.fb = set->crtc->primary->fb; 11252 11253 /* Compute whether we need a full modeset, only an fb base update or no 11254 * change at all. In the future we might also check whether only the 11255 * mode changed, e.g. for LVDS where we only change the panel fitter in 11256 * such cases. */ 11257 intel_set_config_compute_mode_changes(set, config); 11258 11259 ret = intel_modeset_stage_output_state(dev, set, config); 11260 if (ret) 11261 goto fail; 11262 11263 if (config->mode_changed) { 11264 ret = intel_set_mode(set->crtc, set->mode, 11265 set->x, set->y, set->fb); 11266 } else if (config->fb_changed) { 11267 struct drm_i915_private *dev_priv = dev->dev_private; 11268 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); 11269 11270 intel_crtc_wait_for_pending_flips(set->crtc); 11271 11272 ret = intel_pipe_set_base(set->crtc, 11273 set->x, set->y, set->fb); 11274 11275 /* 11276 * We need to make sure the primary plane is re-enabled if it 11277 * has previously been turned off. 11278 */ 11279 if (!intel_crtc->primary_enabled && ret == 0) { 11280 WARN_ON(!intel_crtc->active); 11281 intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane, 11282 intel_crtc->pipe); 11283 } 11284 11285 /* 11286 * In the fastboot case this may be our only check of the 11287 * state after boot. It would be better to only do it on 11288 * the first update, but we don't have a nice way of doing that 11289 * (and really, set_config isn't used much for high freq page 11290 * flipping, so increasing its cost here shouldn't be a big 11291 * deal). 11292 */ 11293 if (i915.fastboot && ret == 0) 11294 intel_modeset_check_state(set->crtc->dev); 11295 } 11296 11297 if (ret) { 11298 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n", 11299 set->crtc->base.id, ret); 11300 fail: 11301 intel_set_config_restore_state(dev, config); 11302 11303 /* 11304 * HACK: if the pipe was on, but we didn't have a framebuffer, 11305 * force the pipe off to avoid oopsing in the modeset code 11306 * due to fb==NULL. This should only happen during boot since 11307 * we don't yet reconstruct the FB from the hardware state. 11308 */ 11309 if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb) 11310 disable_crtc_nofb(to_intel_crtc(save_set.crtc)); 11311 11312 /* Try to restore the config */ 11313 if (config->mode_changed && 11314 intel_set_mode(save_set.crtc, save_set.mode, 11315 save_set.x, save_set.y, save_set.fb)) 11316 DRM_ERROR("failed to restore config after modeset failure\n"); 11317 } 11318 11319 out_config: 11320 intel_set_config_free(config); 11321 return ret; 11322 } 11323 11324 static const struct drm_crtc_funcs intel_crtc_funcs = { 11325 .gamma_set = intel_crtc_gamma_set, 11326 .set_config = intel_crtc_set_config, 11327 .destroy = intel_crtc_destroy, 11328 .page_flip = intel_crtc_page_flip, 11329 }; 11330 11331 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, 11332 struct intel_shared_dpll *pll, 11333 struct intel_dpll_hw_state *hw_state) 11334 { 11335 uint32_t val; 11336 11337 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS)) 11338 return false; 11339 11340 val = I915_READ(PCH_DPLL(pll->id)); 11341 hw_state->dpll = val; 11342 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 11343 hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); 11344 11345 return val & DPLL_VCO_ENABLE; 11346 } 11347 11348 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, 11349 struct intel_shared_dpll *pll) 11350 { 11351 I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0); 11352 I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1); 11353 } 11354 11355 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 11356 struct intel_shared_dpll *pll) 11357 { 11358 /* PCH refclock must be enabled first */ 11359 ibx_assert_pch_refclk_enabled(dev_priv); 11360 11361 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); 11362 11363 /* Wait for the clocks to stabilize. */ 11364 POSTING_READ(PCH_DPLL(pll->id)); 11365 udelay(150); 11366 11367 /* The pixel multiplier can only be updated once the 11368 * DPLL is enabled and the clocks are stable. 11369 * 11370 * So write it again. 11371 */ 11372 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); 11373 POSTING_READ(PCH_DPLL(pll->id)); 11374 udelay(200); 11375 } 11376 11377 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, 11378 struct intel_shared_dpll *pll) 11379 { 11380 struct drm_device *dev = dev_priv->dev; 11381 struct intel_crtc *crtc; 11382 11383 /* Make sure no transcoder isn't still depending on us. */ 11384 for_each_intel_crtc(dev, crtc) { 11385 if (intel_crtc_to_shared_dpll(crtc) == pll) 11386 assert_pch_transcoder_disabled(dev_priv, crtc->pipe); 11387 } 11388 11389 I915_WRITE(PCH_DPLL(pll->id), 0); 11390 POSTING_READ(PCH_DPLL(pll->id)); 11391 udelay(200); 11392 } 11393 11394 static char *ibx_pch_dpll_names[] = { 11395 "PCH DPLL A", 11396 "PCH DPLL B", 11397 }; 11398 11399 static void ibx_pch_dpll_init(struct drm_device *dev) 11400 { 11401 struct drm_i915_private *dev_priv = dev->dev_private; 11402 int i; 11403 11404 dev_priv->num_shared_dpll = 2; 11405 11406 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 11407 dev_priv->shared_dplls[i].id = i; 11408 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; 11409 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set; 11410 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; 11411 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; 11412 dev_priv->shared_dplls[i].get_hw_state = 11413 ibx_pch_dpll_get_hw_state; 11414 } 11415 } 11416 11417 static void intel_shared_dpll_init(struct drm_device *dev) 11418 { 11419 struct drm_i915_private *dev_priv = dev->dev_private; 11420 11421 if (HAS_DDI(dev)) 11422 intel_ddi_pll_init(dev); 11423 else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 11424 ibx_pch_dpll_init(dev); 11425 else 11426 dev_priv->num_shared_dpll = 0; 11427 11428 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); 11429 } 11430 11431 static int 11432 intel_primary_plane_disable(struct drm_plane *plane) 11433 { 11434 struct drm_device *dev = plane->dev; 11435 struct drm_i915_private *dev_priv = dev->dev_private; 11436 struct intel_plane *intel_plane = to_intel_plane(plane); 11437 struct intel_crtc *intel_crtc; 11438 11439 if (!plane->fb) 11440 return 0; 11441 11442 BUG_ON(!plane->crtc); 11443 11444 intel_crtc = to_intel_crtc(plane->crtc); 11445 11446 /* 11447 * Even though we checked plane->fb above, it's still possible that 11448 * the primary plane has been implicitly disabled because the crtc 11449 * coordinates given weren't visible, or because we detected 11450 * that it was 100% covered by a sprite plane. Or, the CRTC may be 11451 * off and we've set a fb, but haven't actually turned on the CRTC yet. 11452 * In either case, we need to unpin the FB and let the fb pointer get 11453 * updated, but otherwise we don't need to touch the hardware. 11454 */ 11455 if (!intel_crtc->primary_enabled) 11456 goto disable_unpin; 11457 11458 intel_crtc_wait_for_pending_flips(plane->crtc); 11459 intel_disable_primary_hw_plane(dev_priv, intel_plane->plane, 11460 intel_plane->pipe); 11461 disable_unpin: 11462 mutex_lock(&dev->struct_mutex); 11463 i915_gem_track_fb(intel_fb_obj(plane->fb), NULL, 11464 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 11465 intel_unpin_fb_obj(intel_fb_obj(plane->fb)); 11466 mutex_unlock(&dev->struct_mutex); 11467 plane->fb = NULL; 11468 11469 return 0; 11470 } 11471 11472 static int 11473 intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, 11474 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 11475 unsigned int crtc_w, unsigned int crtc_h, 11476 uint32_t src_x, uint32_t src_y, 11477 uint32_t src_w, uint32_t src_h) 11478 { 11479 struct drm_device *dev = crtc->dev; 11480 struct drm_i915_private *dev_priv = dev->dev_private; 11481 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11482 struct intel_plane *intel_plane = to_intel_plane(plane); 11483 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11484 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 11485 struct drm_rect dest = { 11486 /* integer pixels */ 11487 .x1 = crtc_x, 11488 .y1 = crtc_y, 11489 .x2 = crtc_x + crtc_w, 11490 .y2 = crtc_y + crtc_h, 11491 }; 11492 struct drm_rect src = { 11493 /* 16.16 fixed point */ 11494 .x1 = src_x, 11495 .y1 = src_y, 11496 .x2 = src_x + src_w, 11497 .y2 = src_y + src_h, 11498 }; 11499 const struct drm_rect clip = { 11500 /* integer pixels */ 11501 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, 11502 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, 11503 }; 11504 bool visible; 11505 int ret; 11506 11507 ret = drm_plane_helper_check_update(plane, crtc, fb, 11508 &src, &dest, &clip, 11509 DRM_PLANE_HELPER_NO_SCALING, 11510 DRM_PLANE_HELPER_NO_SCALING, 11511 false, true, &visible); 11512 11513 if (ret) 11514 return ret; 11515 11516 /* 11517 * If the CRTC isn't enabled, we're just pinning the framebuffer, 11518 * updating the fb pointer, and returning without touching the 11519 * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to 11520 * turn on the display with all planes setup as desired. 11521 */ 11522 if (!crtc->enabled) { 11523 mutex_lock(&dev->struct_mutex); 11524 11525 /* 11526 * If we already called setplane while the crtc was disabled, 11527 * we may have an fb pinned; unpin it. 11528 */ 11529 if (plane->fb) 11530 intel_unpin_fb_obj(old_obj); 11531 11532 i915_gem_track_fb(old_obj, obj, 11533 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 11534 11535 /* Pin and return without programming hardware */ 11536 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 11537 mutex_unlock(&dev->struct_mutex); 11538 11539 return ret; 11540 } 11541 11542 intel_crtc_wait_for_pending_flips(crtc); 11543 11544 /* 11545 * If clipping results in a non-visible primary plane, we'll disable 11546 * the primary plane. Note that this is a bit different than what 11547 * happens if userspace explicitly disables the plane by passing fb=0 11548 * because plane->fb still gets set and pinned. 11549 */ 11550 if (!visible) { 11551 mutex_lock(&dev->struct_mutex); 11552 11553 /* 11554 * Try to pin the new fb first so that we can bail out if we 11555 * fail. 11556 */ 11557 if (plane->fb != fb) { 11558 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 11559 if (ret) { 11560 mutex_unlock(&dev->struct_mutex); 11561 return ret; 11562 } 11563 } 11564 11565 i915_gem_track_fb(old_obj, obj, 11566 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 11567 11568 if (intel_crtc->primary_enabled) 11569 intel_disable_primary_hw_plane(dev_priv, 11570 intel_plane->plane, 11571 intel_plane->pipe); 11572 11573 11574 if (plane->fb != fb) 11575 if (plane->fb) 11576 intel_unpin_fb_obj(old_obj); 11577 11578 mutex_unlock(&dev->struct_mutex); 11579 11580 return 0; 11581 } 11582 11583 ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb); 11584 if (ret) 11585 return ret; 11586 11587 if (!intel_crtc->primary_enabled) 11588 intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane, 11589 intel_crtc->pipe); 11590 11591 return 0; 11592 } 11593 11594 /* Common destruction function for both primary and cursor planes */ 11595 static void intel_plane_destroy(struct drm_plane *plane) 11596 { 11597 struct intel_plane *intel_plane = to_intel_plane(plane); 11598 drm_plane_cleanup(plane); 11599 kfree(intel_plane); 11600 } 11601 11602 static const struct drm_plane_funcs intel_primary_plane_funcs = { 11603 .update_plane = intel_primary_plane_setplane, 11604 .disable_plane = intel_primary_plane_disable, 11605 .destroy = intel_plane_destroy, 11606 }; 11607 11608 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, 11609 int pipe) 11610 { 11611 struct intel_plane *primary; 11612 const uint32_t *intel_primary_formats; 11613 int num_formats; 11614 11615 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 11616 if (primary == NULL) 11617 return NULL; 11618 11619 primary->can_scale = false; 11620 primary->max_downscale = 1; 11621 primary->pipe = pipe; 11622 primary->plane = pipe; 11623 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) 11624 primary->plane = !pipe; 11625 11626 if (INTEL_INFO(dev)->gen <= 3) { 11627 intel_primary_formats = intel_primary_formats_gen2; 11628 num_formats = ARRAY_SIZE(intel_primary_formats_gen2); 11629 } else { 11630 intel_primary_formats = intel_primary_formats_gen4; 11631 num_formats = ARRAY_SIZE(intel_primary_formats_gen4); 11632 } 11633 11634 drm_universal_plane_init(dev, &primary->base, 0, 11635 &intel_primary_plane_funcs, 11636 intel_primary_formats, num_formats, 11637 DRM_PLANE_TYPE_PRIMARY); 11638 return &primary->base; 11639 } 11640 11641 static int 11642 intel_cursor_plane_disable(struct drm_plane *plane) 11643 { 11644 if (!plane->fb) 11645 return 0; 11646 11647 BUG_ON(!plane->crtc); 11648 11649 return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0); 11650 } 11651 11652 static int 11653 intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, 11654 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 11655 unsigned int crtc_w, unsigned int crtc_h, 11656 uint32_t src_x, uint32_t src_y, 11657 uint32_t src_w, uint32_t src_h) 11658 { 11659 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11660 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 11661 struct drm_i915_gem_object *obj = intel_fb->obj; 11662 struct drm_rect dest = { 11663 /* integer pixels */ 11664 .x1 = crtc_x, 11665 .y1 = crtc_y, 11666 .x2 = crtc_x + crtc_w, 11667 .y2 = crtc_y + crtc_h, 11668 }; 11669 struct drm_rect src = { 11670 /* 16.16 fixed point */ 11671 .x1 = src_x, 11672 .y1 = src_y, 11673 .x2 = src_x + src_w, 11674 .y2 = src_y + src_h, 11675 }; 11676 const struct drm_rect clip = { 11677 /* integer pixels */ 11678 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, 11679 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, 11680 }; 11681 bool visible; 11682 int ret; 11683 11684 ret = drm_plane_helper_check_update(plane, crtc, fb, 11685 &src, &dest, &clip, 11686 DRM_PLANE_HELPER_NO_SCALING, 11687 DRM_PLANE_HELPER_NO_SCALING, 11688 true, true, &visible); 11689 if (ret) 11690 return ret; 11691 11692 crtc->cursor_x = crtc_x; 11693 crtc->cursor_y = crtc_y; 11694 if (fb != crtc->cursor->fb) { 11695 return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h); 11696 } else { 11697 intel_crtc_update_cursor(crtc, visible); 11698 return 0; 11699 } 11700 } 11701 static const struct drm_plane_funcs intel_cursor_plane_funcs = { 11702 .update_plane = intel_cursor_plane_update, 11703 .disable_plane = intel_cursor_plane_disable, 11704 .destroy = intel_plane_destroy, 11705 }; 11706 11707 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 11708 int pipe) 11709 { 11710 struct intel_plane *cursor; 11711 11712 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 11713 if (cursor == NULL) 11714 return NULL; 11715 11716 cursor->can_scale = false; 11717 cursor->max_downscale = 1; 11718 cursor->pipe = pipe; 11719 cursor->plane = pipe; 11720 11721 drm_universal_plane_init(dev, &cursor->base, 0, 11722 &intel_cursor_plane_funcs, 11723 intel_cursor_formats, 11724 ARRAY_SIZE(intel_cursor_formats), 11725 DRM_PLANE_TYPE_CURSOR); 11726 return &cursor->base; 11727 } 11728 11729 static void intel_crtc_init(struct drm_device *dev, int pipe) 11730 { 11731 struct drm_i915_private *dev_priv = dev->dev_private; 11732 struct intel_crtc *intel_crtc; 11733 struct drm_plane *primary = NULL; 11734 struct drm_plane *cursor = NULL; 11735 int i, ret; 11736 11737 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 11738 if (intel_crtc == NULL) 11739 return; 11740 11741 primary = intel_primary_plane_create(dev, pipe); 11742 if (!primary) 11743 goto fail; 11744 11745 cursor = intel_cursor_plane_create(dev, pipe); 11746 if (!cursor) 11747 goto fail; 11748 11749 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 11750 cursor, &intel_crtc_funcs); 11751 if (ret) 11752 goto fail; 11753 11754 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 11755 for (i = 0; i < 256; i++) { 11756 intel_crtc->lut_r[i] = i; 11757 intel_crtc->lut_g[i] = i; 11758 intel_crtc->lut_b[i] = i; 11759 } 11760 11761 /* 11762 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port 11763 * is hooked to pipe B. Hence we want plane A feeding pipe B. 11764 */ 11765 intel_crtc->pipe = pipe; 11766 intel_crtc->plane = pipe; 11767 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { 11768 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 11769 intel_crtc->plane = !pipe; 11770 } 11771 11772 intel_crtc->cursor_base = ~0; 11773 intel_crtc->cursor_cntl = ~0; 11774 11775 init_waitqueue_head(&intel_crtc->vbl_wait); 11776 11777 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 11778 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 11779 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 11780 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 11781 11782 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 11783 11784 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 11785 return; 11786 11787 fail: 11788 if (primary) 11789 drm_plane_cleanup(primary); 11790 if (cursor) 11791 drm_plane_cleanup(cursor); 11792 kfree(intel_crtc); 11793 } 11794 11795 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector) 11796 { 11797 struct drm_encoder *encoder = connector->base.encoder; 11798 struct drm_device *dev = connector->base.dev; 11799 11800 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 11801 11802 if (!encoder) 11803 return INVALID_PIPE; 11804 11805 return to_intel_crtc(encoder->crtc)->pipe; 11806 } 11807 11808 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 11809 struct drm_file *file) 11810 { 11811 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 11812 struct drm_crtc *drmmode_crtc; 11813 struct intel_crtc *crtc; 11814 11815 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 11816 return -ENODEV; 11817 11818 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 11819 11820 if (!drmmode_crtc) { 11821 DRM_ERROR("no such CRTC id\n"); 11822 return -ENOENT; 11823 } 11824 11825 crtc = to_intel_crtc(drmmode_crtc); 11826 pipe_from_crtc_id->pipe = crtc->pipe; 11827 11828 return 0; 11829 } 11830 11831 static int intel_encoder_clones(struct intel_encoder *encoder) 11832 { 11833 struct drm_device *dev = encoder->base.dev; 11834 struct intel_encoder *source_encoder; 11835 int index_mask = 0; 11836 int entry = 0; 11837 11838 list_for_each_entry(source_encoder, 11839 &dev->mode_config.encoder_list, base.head) { 11840 if (encoders_cloneable(encoder, source_encoder)) 11841 index_mask |= (1 << entry); 11842 11843 entry++; 11844 } 11845 11846 return index_mask; 11847 } 11848 11849 static bool has_edp_a(struct drm_device *dev) 11850 { 11851 struct drm_i915_private *dev_priv = dev->dev_private; 11852 11853 if (!IS_MOBILE(dev)) 11854 return false; 11855 11856 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 11857 return false; 11858 11859 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 11860 return false; 11861 11862 return true; 11863 } 11864 11865 const char *intel_output_name(int output) 11866 { 11867 static const char *names[] = { 11868 [INTEL_OUTPUT_UNUSED] = "Unused", 11869 [INTEL_OUTPUT_ANALOG] = "Analog", 11870 [INTEL_OUTPUT_DVO] = "DVO", 11871 [INTEL_OUTPUT_SDVO] = "SDVO", 11872 [INTEL_OUTPUT_LVDS] = "LVDS", 11873 [INTEL_OUTPUT_TVOUT] = "TV", 11874 [INTEL_OUTPUT_HDMI] = "HDMI", 11875 [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort", 11876 [INTEL_OUTPUT_EDP] = "eDP", 11877 [INTEL_OUTPUT_DSI] = "DSI", 11878 [INTEL_OUTPUT_UNKNOWN] = "Unknown", 11879 }; 11880 11881 if (output < 0 || output >= ARRAY_SIZE(names) || !names[output]) 11882 return "Invalid"; 11883 11884 return names[output]; 11885 } 11886 11887 static bool intel_crt_present(struct drm_device *dev) 11888 { 11889 struct drm_i915_private *dev_priv = dev->dev_private; 11890 11891 if (IS_ULT(dev)) 11892 return false; 11893 11894 if (IS_CHERRYVIEW(dev)) 11895 return false; 11896 11897 if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support) 11898 return false; 11899 11900 return true; 11901 } 11902 11903 static void intel_setup_outputs(struct drm_device *dev) 11904 { 11905 struct drm_i915_private *dev_priv = dev->dev_private; 11906 struct intel_encoder *encoder; 11907 bool dpd_is_edp = false; 11908 11909 intel_lvds_init(dev); 11910 11911 if (intel_crt_present(dev)) 11912 intel_crt_init(dev); 11913 11914 if (HAS_DDI(dev)) { 11915 int found; 11916 11917 /* Haswell uses DDI functions to detect digital outputs */ 11918 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; 11919 /* DDI A only supports eDP */ 11920 if (found) 11921 intel_ddi_init(dev, PORT_A); 11922 11923 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 11924 * register */ 11925 found = I915_READ(SFUSE_STRAP); 11926 11927 if (found & SFUSE_STRAP_DDIB_DETECTED) 11928 intel_ddi_init(dev, PORT_B); 11929 if (found & SFUSE_STRAP_DDIC_DETECTED) 11930 intel_ddi_init(dev, PORT_C); 11931 if (found & SFUSE_STRAP_DDID_DETECTED) 11932 intel_ddi_init(dev, PORT_D); 11933 } else if (HAS_PCH_SPLIT(dev)) { 11934 int found; 11935 dpd_is_edp = intel_dp_is_edp(dev, PORT_D); 11936 11937 if (has_edp_a(dev)) 11938 intel_dp_init(dev, DP_A, PORT_A); 11939 11940 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 11941 /* PCH SDVOB multiplex with HDMIB */ 11942 found = intel_sdvo_init(dev, PCH_SDVOB, true); 11943 if (!found) 11944 intel_hdmi_init(dev, PCH_HDMIB, PORT_B); 11945 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 11946 intel_dp_init(dev, PCH_DP_B, PORT_B); 11947 } 11948 11949 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 11950 intel_hdmi_init(dev, PCH_HDMIC, PORT_C); 11951 11952 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 11953 intel_hdmi_init(dev, PCH_HDMID, PORT_D); 11954 11955 if (I915_READ(PCH_DP_C) & DP_DETECTED) 11956 intel_dp_init(dev, PCH_DP_C, PORT_C); 11957 11958 if (I915_READ(PCH_DP_D) & DP_DETECTED) 11959 intel_dp_init(dev, PCH_DP_D, PORT_D); 11960 } else if (IS_VALLEYVIEW(dev)) { 11961 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { 11962 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, 11963 PORT_B); 11964 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) 11965 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 11966 } 11967 11968 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) { 11969 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 11970 PORT_C); 11971 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 11972 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 11973 } 11974 11975 if (IS_CHERRYVIEW(dev)) { 11976 if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) { 11977 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID, 11978 PORT_D); 11979 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED) 11980 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D); 11981 } 11982 } 11983 11984 intel_dsi_init(dev); 11985 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 11986 bool found = false; 11987 11988 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 11989 DRM_DEBUG_KMS("probing SDVOB\n"); 11990 found = intel_sdvo_init(dev, GEN3_SDVOB, true); 11991 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 11992 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 11993 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 11994 } 11995 11996 if (!found && SUPPORTS_INTEGRATED_DP(dev)) 11997 intel_dp_init(dev, DP_B, PORT_B); 11998 } 11999 12000 /* Before G4X SDVOC doesn't have its own detect register */ 12001 12002 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 12003 DRM_DEBUG_KMS("probing SDVOC\n"); 12004 found = intel_sdvo_init(dev, GEN3_SDVOC, false); 12005 } 12006 12007 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 12008 12009 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 12010 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 12011 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); 12012 } 12013 if (SUPPORTS_INTEGRATED_DP(dev)) 12014 intel_dp_init(dev, DP_C, PORT_C); 12015 } 12016 12017 if (SUPPORTS_INTEGRATED_DP(dev) && 12018 (I915_READ(DP_D) & DP_DETECTED)) 12019 intel_dp_init(dev, DP_D, PORT_D); 12020 #if 0 12021 } else if (IS_GEN2(dev)) 12022 intel_dvo_init(dev); 12023 #endif 12024 } 12025 12026 if (SUPPORTS_TV(dev)) 12027 intel_tv_init(dev); 12028 12029 intel_edp_psr_init(dev); 12030 12031 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 12032 encoder->base.possible_crtcs = encoder->crtc_mask; 12033 encoder->base.possible_clones = 12034 intel_encoder_clones(encoder); 12035 } 12036 12037 intel_init_pch_refclk(dev); 12038 12039 drm_helper_move_panel_connectors_to_head(dev); 12040 } 12041 12042 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 12043 { 12044 struct drm_device *dev = fb->dev; 12045 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 12046 12047 drm_framebuffer_cleanup(fb); 12048 mutex_lock(&dev->struct_mutex); 12049 WARN_ON(!intel_fb->obj->framebuffer_references--); 12050 drm_gem_object_unreference(&intel_fb->obj->base); 12051 mutex_unlock(&dev->struct_mutex); 12052 kfree(intel_fb); 12053 } 12054 12055 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 12056 struct drm_file *file, 12057 unsigned int *handle) 12058 { 12059 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 12060 struct drm_i915_gem_object *obj = intel_fb->obj; 12061 12062 return drm_gem_handle_create(file, &obj->base, handle); 12063 } 12064 12065 static const struct drm_framebuffer_funcs intel_fb_funcs = { 12066 .destroy = intel_user_framebuffer_destroy, 12067 .create_handle = intel_user_framebuffer_create_handle, 12068 }; 12069 12070 static int intel_framebuffer_init(struct drm_device *dev, 12071 struct intel_framebuffer *intel_fb, 12072 struct drm_mode_fb_cmd2 *mode_cmd, 12073 struct drm_i915_gem_object *obj) 12074 { 12075 int aligned_height; 12076 int pitch_limit; 12077 int ret; 12078 12079 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 12080 12081 if (obj->tiling_mode == I915_TILING_Y) { 12082 DRM_DEBUG("hardware does not support tiling Y\n"); 12083 return -EINVAL; 12084 } 12085 12086 if (mode_cmd->pitches[0] & 63) { 12087 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", 12088 mode_cmd->pitches[0]); 12089 return -EINVAL; 12090 } 12091 12092 if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) { 12093 pitch_limit = 32*1024; 12094 } else if (INTEL_INFO(dev)->gen >= 4) { 12095 if (obj->tiling_mode) 12096 pitch_limit = 16*1024; 12097 else 12098 pitch_limit = 32*1024; 12099 } else if (INTEL_INFO(dev)->gen >= 3) { 12100 if (obj->tiling_mode) 12101 pitch_limit = 8*1024; 12102 else 12103 pitch_limit = 16*1024; 12104 } else 12105 /* XXX DSPC is limited to 4k tiled */ 12106 pitch_limit = 8*1024; 12107 12108 if (mode_cmd->pitches[0] > pitch_limit) { 12109 DRM_DEBUG("%s pitch (%d) must be at less than %d\n", 12110 obj->tiling_mode ? "tiled" : "linear", 12111 mode_cmd->pitches[0], pitch_limit); 12112 return -EINVAL; 12113 } 12114 12115 if (obj->tiling_mode != I915_TILING_NONE && 12116 mode_cmd->pitches[0] != obj->stride) { 12117 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", 12118 mode_cmd->pitches[0], obj->stride); 12119 return -EINVAL; 12120 } 12121 12122 /* Reject formats not supported by any plane early. */ 12123 switch (mode_cmd->pixel_format) { 12124 case DRM_FORMAT_C8: 12125 case DRM_FORMAT_RGB565: 12126 case DRM_FORMAT_XRGB8888: 12127 case DRM_FORMAT_ARGB8888: 12128 break; 12129 case DRM_FORMAT_XRGB1555: 12130 case DRM_FORMAT_ARGB1555: 12131 if (INTEL_INFO(dev)->gen > 3) { 12132 DRM_DEBUG("unsupported pixel format: %s\n", 12133 drm_get_format_name(mode_cmd->pixel_format)); 12134 return -EINVAL; 12135 } 12136 break; 12137 case DRM_FORMAT_XBGR8888: 12138 case DRM_FORMAT_ABGR8888: 12139 case DRM_FORMAT_XRGB2101010: 12140 case DRM_FORMAT_ARGB2101010: 12141 case DRM_FORMAT_XBGR2101010: 12142 case DRM_FORMAT_ABGR2101010: 12143 if (INTEL_INFO(dev)->gen < 4) { 12144 DRM_DEBUG("unsupported pixel format: %s\n", 12145 drm_get_format_name(mode_cmd->pixel_format)); 12146 return -EINVAL; 12147 } 12148 break; 12149 case DRM_FORMAT_YUYV: 12150 case DRM_FORMAT_UYVY: 12151 case DRM_FORMAT_YVYU: 12152 case DRM_FORMAT_VYUY: 12153 if (INTEL_INFO(dev)->gen < 5) { 12154 DRM_DEBUG("unsupported pixel format: %s\n", 12155 drm_get_format_name(mode_cmd->pixel_format)); 12156 return -EINVAL; 12157 } 12158 break; 12159 default: 12160 DRM_DEBUG("unsupported pixel format: %s\n", 12161 drm_get_format_name(mode_cmd->pixel_format)); 12162 return -EINVAL; 12163 } 12164 12165 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 12166 if (mode_cmd->offsets[0] != 0) 12167 return -EINVAL; 12168 12169 aligned_height = intel_align_height(dev, mode_cmd->height, 12170 obj->tiling_mode); 12171 /* FIXME drm helper for size checks (especially planar formats)? */ 12172 if (obj->base.size < aligned_height * mode_cmd->pitches[0]) 12173 return -EINVAL; 12174 12175 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 12176 intel_fb->obj = obj; 12177 intel_fb->obj->framebuffer_references++; 12178 12179 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 12180 if (ret) { 12181 DRM_ERROR("framebuffer init failed %d\n", ret); 12182 return ret; 12183 } 12184 12185 return 0; 12186 } 12187 12188 static struct drm_framebuffer * 12189 intel_user_framebuffer_create(struct drm_device *dev, 12190 struct drm_file *filp, 12191 struct drm_mode_fb_cmd2 *mode_cmd) 12192 { 12193 struct drm_i915_gem_object *obj; 12194 12195 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 12196 mode_cmd->handles[0])); 12197 if (&obj->base == NULL) 12198 return ERR_PTR(-ENOENT); 12199 12200 return intel_framebuffer_create(dev, mode_cmd, obj); 12201 } 12202 12203 #ifndef CONFIG_DRM_I915_FBDEV 12204 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) 12205 { 12206 } 12207 #endif 12208 12209 static const struct drm_mode_config_funcs intel_mode_funcs = { 12210 .fb_create = intel_user_framebuffer_create, 12211 .output_poll_changed = intel_fbdev_output_poll_changed, 12212 }; 12213 12214 /* Set up chip specific display functions */ 12215 static void intel_init_display(struct drm_device *dev) 12216 { 12217 struct drm_i915_private *dev_priv = dev->dev_private; 12218 12219 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev)) 12220 dev_priv->display.find_dpll = g4x_find_best_dpll; 12221 else if (IS_CHERRYVIEW(dev)) 12222 dev_priv->display.find_dpll = chv_find_best_dpll; 12223 else if (IS_VALLEYVIEW(dev)) 12224 dev_priv->display.find_dpll = vlv_find_best_dpll; 12225 else if (IS_PINEVIEW(dev)) 12226 dev_priv->display.find_dpll = pnv_find_best_dpll; 12227 else 12228 dev_priv->display.find_dpll = i9xx_find_best_dpll; 12229 12230 if (HAS_DDI(dev)) { 12231 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 12232 dev_priv->display.get_plane_config = ironlake_get_plane_config; 12233 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; 12234 dev_priv->display.crtc_enable = haswell_crtc_enable; 12235 dev_priv->display.crtc_disable = haswell_crtc_disable; 12236 dev_priv->display.off = ironlake_crtc_off; 12237 dev_priv->display.update_primary_plane = 12238 ironlake_update_primary_plane; 12239 } else if (HAS_PCH_SPLIT(dev)) { 12240 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 12241 dev_priv->display.get_plane_config = ironlake_get_plane_config; 12242 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 12243 dev_priv->display.crtc_enable = ironlake_crtc_enable; 12244 dev_priv->display.crtc_disable = ironlake_crtc_disable; 12245 dev_priv->display.off = ironlake_crtc_off; 12246 dev_priv->display.update_primary_plane = 12247 ironlake_update_primary_plane; 12248 } else if (IS_VALLEYVIEW(dev)) { 12249 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 12250 dev_priv->display.get_plane_config = i9xx_get_plane_config; 12251 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 12252 dev_priv->display.crtc_enable = valleyview_crtc_enable; 12253 dev_priv->display.crtc_disable = i9xx_crtc_disable; 12254 dev_priv->display.off = i9xx_crtc_off; 12255 dev_priv->display.update_primary_plane = 12256 i9xx_update_primary_plane; 12257 } else { 12258 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 12259 dev_priv->display.get_plane_config = i9xx_get_plane_config; 12260 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 12261 dev_priv->display.crtc_enable = i9xx_crtc_enable; 12262 dev_priv->display.crtc_disable = i9xx_crtc_disable; 12263 dev_priv->display.off = i9xx_crtc_off; 12264 dev_priv->display.update_primary_plane = 12265 i9xx_update_primary_plane; 12266 } 12267 12268 /* Returns the core display clock speed */ 12269 if (IS_VALLEYVIEW(dev)) 12270 dev_priv->display.get_display_clock_speed = 12271 valleyview_get_display_clock_speed; 12272 else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) 12273 dev_priv->display.get_display_clock_speed = 12274 i945_get_display_clock_speed; 12275 else if (IS_I915G(dev)) 12276 dev_priv->display.get_display_clock_speed = 12277 i915_get_display_clock_speed; 12278 else if (IS_I945GM(dev) || IS_845G(dev)) 12279 dev_priv->display.get_display_clock_speed = 12280 i9xx_misc_get_display_clock_speed; 12281 else if (IS_PINEVIEW(dev)) 12282 dev_priv->display.get_display_clock_speed = 12283 pnv_get_display_clock_speed; 12284 else if (IS_I915GM(dev)) 12285 dev_priv->display.get_display_clock_speed = 12286 i915gm_get_display_clock_speed; 12287 else if (IS_I865G(dev)) 12288 dev_priv->display.get_display_clock_speed = 12289 i865_get_display_clock_speed; 12290 else if (IS_I85X(dev)) 12291 dev_priv->display.get_display_clock_speed = 12292 i855_get_display_clock_speed; 12293 else /* 852, 830 */ 12294 dev_priv->display.get_display_clock_speed = 12295 i830_get_display_clock_speed; 12296 12297 if (HAS_PCH_SPLIT(dev)) { 12298 if (IS_GEN5(dev)) { 12299 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 12300 dev_priv->display.write_eld = ironlake_write_eld; 12301 } else if (IS_GEN6(dev)) { 12302 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 12303 dev_priv->display.write_eld = ironlake_write_eld; 12304 dev_priv->display.modeset_global_resources = 12305 snb_modeset_global_resources; 12306 } else if (IS_IVYBRIDGE(dev)) { 12307 /* FIXME: detect B0+ stepping and use auto training */ 12308 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 12309 dev_priv->display.write_eld = ironlake_write_eld; 12310 dev_priv->display.modeset_global_resources = 12311 ivb_modeset_global_resources; 12312 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { 12313 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 12314 dev_priv->display.write_eld = haswell_write_eld; 12315 dev_priv->display.modeset_global_resources = 12316 haswell_modeset_global_resources; 12317 } 12318 } else if (IS_G4X(dev)) { 12319 dev_priv->display.write_eld = g4x_write_eld; 12320 } else if (IS_VALLEYVIEW(dev)) { 12321 dev_priv->display.modeset_global_resources = 12322 valleyview_modeset_global_resources; 12323 dev_priv->display.write_eld = ironlake_write_eld; 12324 } 12325 12326 /* Default just returns -ENODEV to indicate unsupported */ 12327 dev_priv->display.queue_flip = intel_default_queue_flip; 12328 12329 switch (INTEL_INFO(dev)->gen) { 12330 case 2: 12331 dev_priv->display.queue_flip = intel_gen2_queue_flip; 12332 break; 12333 12334 case 3: 12335 dev_priv->display.queue_flip = intel_gen3_queue_flip; 12336 break; 12337 12338 case 4: 12339 case 5: 12340 dev_priv->display.queue_flip = intel_gen4_queue_flip; 12341 break; 12342 12343 case 6: 12344 dev_priv->display.queue_flip = intel_gen6_queue_flip; 12345 break; 12346 case 7: 12347 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 12348 dev_priv->display.queue_flip = intel_gen7_queue_flip; 12349 break; 12350 } 12351 12352 intel_panel_init_backlight_funcs(dev); 12353 } 12354 12355 /* 12356 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 12357 * resume, or other times. This quirk makes sure that's the case for 12358 * affected systems. 12359 */ 12360 static void quirk_pipea_force(struct drm_device *dev) 12361 { 12362 struct drm_i915_private *dev_priv = dev->dev_private; 12363 12364 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 12365 DRM_INFO("applying pipe a force quirk\n"); 12366 } 12367 12368 /* 12369 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 12370 */ 12371 static void quirk_ssc_force_disable(struct drm_device *dev) 12372 { 12373 struct drm_i915_private *dev_priv = dev->dev_private; 12374 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 12375 DRM_INFO("applying lvds SSC disable quirk\n"); 12376 } 12377 12378 /* 12379 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 12380 * brightness value 12381 */ 12382 static void quirk_invert_brightness(struct drm_device *dev) 12383 { 12384 struct drm_i915_private *dev_priv = dev->dev_private; 12385 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 12386 DRM_INFO("applying inverted panel brightness quirk\n"); 12387 } 12388 12389 /* Some VBT's incorrectly indicate no backlight is present */ 12390 static void quirk_backlight_present(struct drm_device *dev) 12391 { 12392 struct drm_i915_private *dev_priv = dev->dev_private; 12393 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 12394 DRM_INFO("applying backlight present quirk\n"); 12395 } 12396 12397 struct intel_quirk { 12398 int device; 12399 int subsystem_vendor; 12400 int subsystem_device; 12401 void (*hook)(struct drm_device *dev); 12402 }; 12403 12404 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 12405 struct intel_dmi_quirk { 12406 void (*hook)(struct drm_device *dev); 12407 const struct dmi_system_id (*dmi_id_list)[]; 12408 }; 12409 12410 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 12411 { 12412 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 12413 return 1; 12414 } 12415 12416 static const struct intel_dmi_quirk intel_dmi_quirks[] = { 12417 { 12418 .dmi_id_list = &(const struct dmi_system_id[]) { 12419 { 12420 .callback = intel_dmi_reverse_brightness, 12421 .ident = "NCR Corporation", 12422 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 12423 DMI_MATCH(DMI_PRODUCT_NAME, ""), 12424 }, 12425 }, 12426 { } /* terminating entry */ 12427 }, 12428 .hook = quirk_invert_brightness, 12429 }, 12430 }; 12431 12432 static struct intel_quirk intel_quirks[] = { 12433 /* HP Mini needs pipe A force quirk (LP: #322104) */ 12434 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, 12435 12436 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 12437 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 12438 12439 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 12440 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 12441 12442 /* Lenovo U160 cannot use SSC on LVDS */ 12443 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 12444 12445 /* Sony Vaio Y cannot use SSC on LVDS */ 12446 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 12447 12448 /* Acer Aspire 5734Z must invert backlight brightness */ 12449 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 12450 12451 /* Acer/eMachines G725 */ 12452 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 12453 12454 /* Acer/eMachines e725 */ 12455 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 12456 12457 /* Acer/Packard Bell NCL20 */ 12458 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 12459 12460 /* Acer Aspire 4736Z */ 12461 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 12462 12463 /* Acer Aspire 5336 */ 12464 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 12465 12466 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 12467 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 12468 12469 /* Acer C720 Chromebook (Core i3 4005U) */ 12470 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 12471 12472 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 12473 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 12474 12475 /* HP Chromebook 14 (Celeron 2955U) */ 12476 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 12477 }; 12478 12479 static void intel_init_quirks(struct drm_device *dev) 12480 { 12481 struct device *d = dev->dev; 12482 int i; 12483 12484 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 12485 struct intel_quirk *q = &intel_quirks[i]; 12486 12487 if (pci_get_device(d) == q->device && 12488 (pci_get_subvendor(d) == q->subsystem_vendor || 12489 q->subsystem_vendor == PCI_ANY_ID) && 12490 (pci_get_subdevice(d) == q->subsystem_device || 12491 q->subsystem_device == PCI_ANY_ID)) 12492 q->hook(dev); 12493 } 12494 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 12495 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 12496 intel_dmi_quirks[i].hook(dev); 12497 } 12498 } 12499 12500 /* Disable the VGA plane that we never use */ 12501 static void i915_disable_vga(struct drm_device *dev) 12502 { 12503 struct drm_i915_private *dev_priv = dev->dev_private; 12504 u8 sr1; 12505 u32 vga_reg = i915_vgacntrl_reg(dev); 12506 12507 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 12508 #if 0 12509 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 12510 #endif 12511 outb(VGA_SR_INDEX, SR01); 12512 sr1 = inb(VGA_SR_DATA); 12513 outb(VGA_SR_DATA, sr1 | 1 << 5); 12514 #if 0 12515 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 12516 #endif 12517 udelay(300); 12518 12519 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 12520 POSTING_READ(vga_reg); 12521 } 12522 12523 void intel_modeset_init_hw(struct drm_device *dev) 12524 { 12525 intel_prepare_ddi(dev); 12526 12527 if (IS_VALLEYVIEW(dev)) 12528 vlv_update_cdclk(dev); 12529 12530 intel_init_clock_gating(dev); 12531 12532 intel_reset_dpio(dev); 12533 12534 intel_enable_gt_powersave(dev); 12535 } 12536 12537 void intel_modeset_suspend_hw(struct drm_device *dev) 12538 { 12539 intel_suspend_hw(dev); 12540 } 12541 12542 void intel_modeset_init(struct drm_device *dev) 12543 { 12544 struct drm_i915_private *dev_priv = dev->dev_private; 12545 int sprite, ret; 12546 enum i915_pipe pipe; 12547 struct intel_crtc *crtc; 12548 12549 drm_mode_config_init(dev); 12550 12551 dev->mode_config.min_width = 0; 12552 dev->mode_config.min_height = 0; 12553 12554 dev->mode_config.preferred_depth = 24; 12555 dev->mode_config.prefer_shadow = 1; 12556 12557 dev->mode_config.funcs = &intel_mode_funcs; 12558 12559 intel_init_quirks(dev); 12560 12561 intel_init_pm(dev); 12562 12563 if (INTEL_INFO(dev)->num_pipes == 0) 12564 return; 12565 12566 intel_init_display(dev); 12567 12568 if (IS_GEN2(dev)) { 12569 dev->mode_config.max_width = 2048; 12570 dev->mode_config.max_height = 2048; 12571 } else if (IS_GEN3(dev)) { 12572 dev->mode_config.max_width = 4096; 12573 dev->mode_config.max_height = 4096; 12574 } else { 12575 dev->mode_config.max_width = 8192; 12576 dev->mode_config.max_height = 8192; 12577 } 12578 12579 if (IS_GEN2(dev)) { 12580 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 12581 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 12582 } else { 12583 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 12584 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 12585 } 12586 12587 dev->mode_config.fb_base = dev_priv->gtt.mappable_base; 12588 12589 DRM_DEBUG_KMS("%d display pipe%s available.\n", 12590 INTEL_INFO(dev)->num_pipes, 12591 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 12592 12593 for_each_pipe(pipe) { 12594 intel_crtc_init(dev, pipe); 12595 for_each_sprite(pipe, sprite) { 12596 ret = intel_plane_init(dev, pipe, sprite); 12597 if (ret) 12598 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", 12599 pipe_name(pipe), sprite_name(pipe, sprite), ret); 12600 } 12601 } 12602 12603 intel_init_dpio(dev); 12604 intel_reset_dpio(dev); 12605 12606 intel_shared_dpll_init(dev); 12607 12608 /* Just disable it once at startup */ 12609 i915_disable_vga(dev); 12610 intel_setup_outputs(dev); 12611 12612 /* Just in case the BIOS is doing something questionable. */ 12613 intel_disable_fbc(dev); 12614 12615 drm_modeset_lock_all(dev); 12616 intel_modeset_setup_hw_state(dev, false); 12617 drm_modeset_unlock_all(dev); 12618 12619 for_each_intel_crtc(dev, crtc) { 12620 if (!crtc->active) 12621 continue; 12622 12623 /* 12624 * Note that reserving the BIOS fb up front prevents us 12625 * from stuffing other stolen allocations like the ring 12626 * on top. This prevents some ugliness at boot time, and 12627 * can even allow for smooth boot transitions if the BIOS 12628 * fb is large enough for the active pipe configuration. 12629 */ 12630 if (dev_priv->display.get_plane_config) { 12631 dev_priv->display.get_plane_config(crtc, 12632 &crtc->plane_config); 12633 /* 12634 * If the fb is shared between multiple heads, we'll 12635 * just get the first one. 12636 */ 12637 intel_find_plane_obj(crtc, &crtc->plane_config); 12638 } 12639 } 12640 } 12641 12642 static void intel_enable_pipe_a(struct drm_device *dev) 12643 { 12644 struct intel_connector *connector; 12645 struct drm_connector *crt = NULL; 12646 struct intel_load_detect_pipe load_detect_temp; 12647 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; 12648 12649 /* We can't just switch on the pipe A, we need to set things up with a 12650 * proper mode and output configuration. As a gross hack, enable pipe A 12651 * by enabling the load detect pipe once. */ 12652 list_for_each_entry(connector, 12653 &dev->mode_config.connector_list, 12654 base.head) { 12655 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 12656 crt = &connector->base; 12657 break; 12658 } 12659 } 12660 12661 if (!crt) 12662 return; 12663 12664 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) 12665 intel_release_load_detect_pipe(crt, &load_detect_temp); 12666 } 12667 12668 static bool 12669 intel_check_plane_mapping(struct intel_crtc *crtc) 12670 { 12671 struct drm_device *dev = crtc->base.dev; 12672 struct drm_i915_private *dev_priv = dev->dev_private; 12673 u32 reg, val; 12674 12675 if (INTEL_INFO(dev)->num_pipes == 1) 12676 return true; 12677 12678 reg = DSPCNTR(!crtc->plane); 12679 val = I915_READ(reg); 12680 12681 if ((val & DISPLAY_PLANE_ENABLE) && 12682 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 12683 return false; 12684 12685 return true; 12686 } 12687 12688 static void intel_sanitize_crtc(struct intel_crtc *crtc) 12689 { 12690 struct drm_device *dev = crtc->base.dev; 12691 struct drm_i915_private *dev_priv = dev->dev_private; 12692 u32 reg; 12693 12694 /* Clear any frame start delays used for debugging left by the BIOS */ 12695 reg = PIPECONF(crtc->config.cpu_transcoder); 12696 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 12697 12698 /* restore vblank interrupts to correct state */ 12699 if (crtc->active) 12700 drm_vblank_on(dev, crtc->pipe); 12701 else 12702 drm_vblank_off(dev, crtc->pipe); 12703 12704 /* We need to sanitize the plane -> pipe mapping first because this will 12705 * disable the crtc (and hence change the state) if it is wrong. Note 12706 * that gen4+ has a fixed plane -> pipe mapping. */ 12707 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 12708 struct intel_connector *connector; 12709 bool plane; 12710 12711 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 12712 crtc->base.base.id); 12713 12714 /* Pipe has the wrong plane attached and the plane is active. 12715 * Temporarily change the plane mapping and disable everything 12716 * ... */ 12717 plane = crtc->plane; 12718 crtc->plane = !plane; 12719 crtc->primary_enabled = true; 12720 dev_priv->display.crtc_disable(&crtc->base); 12721 crtc->plane = plane; 12722 12723 /* ... and break all links. */ 12724 list_for_each_entry(connector, &dev->mode_config.connector_list, 12725 base.head) { 12726 if (connector->encoder->base.crtc != &crtc->base) 12727 continue; 12728 12729 connector->base.dpms = DRM_MODE_DPMS_OFF; 12730 connector->base.encoder = NULL; 12731 } 12732 /* multiple connectors may have the same encoder: 12733 * handle them and break crtc link separately */ 12734 list_for_each_entry(connector, &dev->mode_config.connector_list, 12735 base.head) 12736 if (connector->encoder->base.crtc == &crtc->base) { 12737 connector->encoder->base.crtc = NULL; 12738 connector->encoder->connectors_active = false; 12739 } 12740 12741 WARN_ON(crtc->active); 12742 crtc->base.enabled = false; 12743 } 12744 12745 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 12746 crtc->pipe == PIPE_A && !crtc->active) { 12747 /* BIOS forgot to enable pipe A, this mostly happens after 12748 * resume. Force-enable the pipe to fix this, the update_dpms 12749 * call below we restore the pipe to the right state, but leave 12750 * the required bits on. */ 12751 intel_enable_pipe_a(dev); 12752 } 12753 12754 /* Adjust the state of the output pipe according to whether we 12755 * have active connectors/encoders. */ 12756 intel_crtc_update_dpms(&crtc->base); 12757 12758 if (crtc->active != crtc->base.enabled) { 12759 struct intel_encoder *encoder; 12760 12761 /* This can happen either due to bugs in the get_hw_state 12762 * functions or because the pipe is force-enabled due to the 12763 * pipe A quirk. */ 12764 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", 12765 crtc->base.base.id, 12766 crtc->base.enabled ? "enabled" : "disabled", 12767 crtc->active ? "enabled" : "disabled"); 12768 12769 crtc->base.enabled = crtc->active; 12770 12771 /* Because we only establish the connector -> encoder -> 12772 * crtc links if something is active, this means the 12773 * crtc is now deactivated. Break the links. connector 12774 * -> encoder links are only establish when things are 12775 * actually up, hence no need to break them. */ 12776 WARN_ON(crtc->active); 12777 12778 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 12779 WARN_ON(encoder->connectors_active); 12780 encoder->base.crtc = NULL; 12781 } 12782 } 12783 12784 if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) { 12785 /* 12786 * We start out with underrun reporting disabled to avoid races. 12787 * For correct bookkeeping mark this on active crtcs. 12788 * 12789 * Also on gmch platforms we dont have any hardware bits to 12790 * disable the underrun reporting. Which means we need to start 12791 * out with underrun reporting disabled also on inactive pipes, 12792 * since otherwise we'll complain about the garbage we read when 12793 * e.g. coming up after runtime pm. 12794 * 12795 * No protection against concurrent access is required - at 12796 * worst a fifo underrun happens which also sets this to false. 12797 */ 12798 crtc->cpu_fifo_underrun_disabled = true; 12799 crtc->pch_fifo_underrun_disabled = true; 12800 12801 update_scanline_offset(crtc); 12802 } 12803 } 12804 12805 static void intel_sanitize_encoder(struct intel_encoder *encoder) 12806 { 12807 struct intel_connector *connector; 12808 struct drm_device *dev = encoder->base.dev; 12809 12810 /* We need to check both for a crtc link (meaning that the 12811 * encoder is active and trying to read from a pipe) and the 12812 * pipe itself being active. */ 12813 bool has_active_crtc = encoder->base.crtc && 12814 to_intel_crtc(encoder->base.crtc)->active; 12815 12816 if (encoder->connectors_active && !has_active_crtc) { 12817 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 12818 encoder->base.base.id, 12819 encoder->base.name); 12820 12821 /* Connector is active, but has no active pipe. This is 12822 * fallout from our resume register restoring. Disable 12823 * the encoder manually again. */ 12824 if (encoder->base.crtc) { 12825 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 12826 encoder->base.base.id, 12827 encoder->base.name); 12828 encoder->disable(encoder); 12829 if (encoder->post_disable) 12830 encoder->post_disable(encoder); 12831 } 12832 encoder->base.crtc = NULL; 12833 encoder->connectors_active = false; 12834 12835 /* Inconsistent output/port/pipe state happens presumably due to 12836 * a bug in one of the get_hw_state functions. Or someplace else 12837 * in our code, like the register restore mess on resume. Clamp 12838 * things to off as a safer default. */ 12839 list_for_each_entry(connector, 12840 &dev->mode_config.connector_list, 12841 base.head) { 12842 if (connector->encoder != encoder) 12843 continue; 12844 12845 connector->base.dpms = DRM_MODE_DPMS_OFF; 12846 connector->base.encoder = NULL; 12847 } 12848 } 12849 /* Enabled encoders without active connectors will be fixed in 12850 * the crtc fixup. */ 12851 } 12852 12853 void i915_redisable_vga_power_on(struct drm_device *dev) 12854 { 12855 struct drm_i915_private *dev_priv = dev->dev_private; 12856 u32 vga_reg = i915_vgacntrl_reg(dev); 12857 12858 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 12859 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 12860 i915_disable_vga(dev); 12861 } 12862 } 12863 12864 void i915_redisable_vga(struct drm_device *dev) 12865 { 12866 struct drm_i915_private *dev_priv = dev->dev_private; 12867 12868 /* This function can be called both from intel_modeset_setup_hw_state or 12869 * at a very early point in our resume sequence, where the power well 12870 * structures are not yet restored. Since this function is at a very 12871 * paranoid "someone might have enabled VGA while we were not looking" 12872 * level, just check if the power well is enabled instead of trying to 12873 * follow the "don't touch the power well if we don't need it" policy 12874 * the rest of the driver uses. */ 12875 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA)) 12876 return; 12877 12878 i915_redisable_vga_power_on(dev); 12879 } 12880 12881 static bool primary_get_hw_state(struct intel_crtc *crtc) 12882 { 12883 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 12884 12885 if (!crtc->active) 12886 return false; 12887 12888 return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE; 12889 } 12890 12891 static void intel_modeset_readout_hw_state(struct drm_device *dev) 12892 { 12893 struct drm_i915_private *dev_priv = dev->dev_private; 12894 enum i915_pipe pipe; 12895 struct intel_crtc *crtc; 12896 struct intel_encoder *encoder; 12897 struct intel_connector *connector; 12898 int i; 12899 12900 for_each_intel_crtc(dev, crtc) { 12901 memset(&crtc->config, 0, sizeof(crtc->config)); 12902 12903 crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; 12904 12905 crtc->active = dev_priv->display.get_pipe_config(crtc, 12906 &crtc->config); 12907 12908 crtc->base.enabled = crtc->active; 12909 crtc->primary_enabled = primary_get_hw_state(crtc); 12910 12911 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 12912 crtc->base.base.id, 12913 crtc->active ? "enabled" : "disabled"); 12914 } 12915 12916 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 12917 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 12918 12919 pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state); 12920 pll->active = 0; 12921 for_each_intel_crtc(dev, crtc) { 12922 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 12923 pll->active++; 12924 } 12925 pll->refcount = pll->active; 12926 12927 DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", 12928 pll->name, pll->refcount, pll->on); 12929 12930 if (pll->refcount) 12931 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 12932 } 12933 12934 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 12935 base.head) { 12936 pipe = 0; 12937 12938 if (encoder->get_hw_state(encoder, &pipe)) { 12939 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 12940 encoder->base.crtc = &crtc->base; 12941 encoder->get_config(encoder, &crtc->config); 12942 } else { 12943 encoder->base.crtc = NULL; 12944 } 12945 12946 encoder->connectors_active = false; 12947 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 12948 encoder->base.base.id, 12949 encoder->base.name, 12950 encoder->base.crtc ? "enabled" : "disabled", 12951 pipe_name(pipe)); 12952 } 12953 12954 list_for_each_entry(connector, &dev->mode_config.connector_list, 12955 base.head) { 12956 if (connector->get_hw_state(connector)) { 12957 connector->base.dpms = DRM_MODE_DPMS_ON; 12958 connector->encoder->connectors_active = true; 12959 connector->base.encoder = &connector->encoder->base; 12960 } else { 12961 connector->base.dpms = DRM_MODE_DPMS_OFF; 12962 connector->base.encoder = NULL; 12963 } 12964 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 12965 connector->base.base.id, 12966 connector->base.name, 12967 connector->base.encoder ? "enabled" : "disabled"); 12968 } 12969 } 12970 12971 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm 12972 * and i915 state tracking structures. */ 12973 void intel_modeset_setup_hw_state(struct drm_device *dev, 12974 bool force_restore) 12975 { 12976 struct drm_i915_private *dev_priv = dev->dev_private; 12977 enum i915_pipe pipe; 12978 struct intel_crtc *crtc; 12979 struct intel_encoder *encoder; 12980 int i; 12981 12982 intel_modeset_readout_hw_state(dev); 12983 12984 /* 12985 * Now that we have the config, copy it to each CRTC struct 12986 * Note that this could go away if we move to using crtc_config 12987 * checking everywhere. 12988 */ 12989 for_each_intel_crtc(dev, crtc) { 12990 if (crtc->active && i915.fastboot) { 12991 intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config); 12992 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", 12993 crtc->base.base.id); 12994 drm_mode_debug_printmodeline(&crtc->base.mode); 12995 } 12996 } 12997 12998 /* HW state is read out, now we need to sanitize this mess. */ 12999 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 13000 base.head) { 13001 intel_sanitize_encoder(encoder); 13002 } 13003 13004 for_each_pipe(pipe) { 13005 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 13006 intel_sanitize_crtc(crtc); 13007 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); 13008 } 13009 13010 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 13011 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 13012 13013 if (!pll->on || pll->active) 13014 continue; 13015 13016 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 13017 13018 pll->disable(dev_priv, pll); 13019 pll->on = false; 13020 } 13021 13022 if (HAS_PCH_SPLIT(dev)) 13023 ilk_wm_get_hw_state(dev); 13024 13025 if (force_restore) { 13026 i915_redisable_vga(dev); 13027 13028 /* 13029 * We need to use raw interfaces for restoring state to avoid 13030 * checking (bogus) intermediate states. 13031 */ 13032 for_each_pipe(pipe) { 13033 struct drm_crtc *crtc = 13034 dev_priv->pipe_to_crtc_mapping[pipe]; 13035 13036 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, 13037 crtc->primary->fb); 13038 } 13039 } else { 13040 intel_modeset_update_staged_output_state(dev); 13041 } 13042 13043 intel_modeset_check_state(dev); 13044 } 13045 13046 void intel_modeset_gem_init(struct drm_device *dev) 13047 { 13048 struct drm_crtc *c; 13049 struct drm_i915_gem_object *obj; 13050 13051 mutex_lock(&dev->struct_mutex); 13052 intel_init_gt_powersave(dev); 13053 mutex_unlock(&dev->struct_mutex); 13054 13055 intel_modeset_init_hw(dev); 13056 13057 intel_setup_overlay(dev); 13058 13059 /* 13060 * Make sure any fbs we allocated at startup are properly 13061 * pinned & fenced. When we do the allocation it's too early 13062 * for this. 13063 */ 13064 mutex_lock(&dev->struct_mutex); 13065 for_each_crtc(dev, c) { 13066 obj = intel_fb_obj(c->primary->fb); 13067 if (obj == NULL) 13068 continue; 13069 13070 if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) { 13071 DRM_ERROR("failed to pin boot fb on pipe %d\n", 13072 to_intel_crtc(c)->pipe); 13073 drm_framebuffer_unreference(c->primary->fb); 13074 c->primary->fb = NULL; 13075 } 13076 } 13077 mutex_unlock(&dev->struct_mutex); 13078 } 13079 13080 void intel_connector_unregister(struct intel_connector *intel_connector) 13081 { 13082 struct drm_connector *connector = &intel_connector->base; 13083 13084 intel_panel_destroy_backlight(connector); 13085 drm_connector_unregister(connector); 13086 } 13087 13088 void intel_modeset_cleanup(struct drm_device *dev) 13089 { 13090 struct drm_i915_private *dev_priv = dev->dev_private; 13091 struct drm_connector *connector; 13092 13093 /* 13094 * Interrupts and polling as the first thing to avoid creating havoc. 13095 * Too much stuff here (turning of rps, connectors, ...) would 13096 * experience fancy races otherwise. 13097 */ 13098 drm_irq_uninstall(dev); 13099 intel_hpd_cancel_work(dev_priv); 13100 dev_priv->pm._irqs_disabled = true; 13101 13102 /* 13103 * Due to the hpd irq storm handling the hotplug work can re-arm the 13104 * poll handlers. Hence disable polling after hpd handling is shut down. 13105 */ 13106 drm_kms_helper_poll_fini(dev); 13107 13108 mutex_lock(&dev->struct_mutex); 13109 13110 intel_unregister_dsm_handler(); 13111 13112 intel_disable_fbc(dev); 13113 13114 intel_disable_gt_powersave(dev); 13115 13116 ironlake_teardown_rc6(dev); 13117 13118 mutex_unlock(&dev->struct_mutex); 13119 13120 /* flush any delayed tasks or pending work */ 13121 flush_scheduled_work(); 13122 13123 /* destroy the backlight and sysfs files before encoders/connectors */ 13124 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 13125 struct intel_connector *intel_connector; 13126 13127 intel_connector = to_intel_connector(connector); 13128 intel_connector->unregister(intel_connector); 13129 } 13130 13131 drm_mode_config_cleanup(dev); 13132 13133 intel_cleanup_overlay(dev); 13134 13135 mutex_lock(&dev->struct_mutex); 13136 intel_cleanup_gt_powersave(dev); 13137 mutex_unlock(&dev->struct_mutex); 13138 } 13139 13140 /* 13141 * Return which encoder is currently attached for connector. 13142 */ 13143 struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 13144 { 13145 return &intel_attached_encoder(connector)->base; 13146 } 13147 13148 void intel_connector_attach_encoder(struct intel_connector *connector, 13149 struct intel_encoder *encoder) 13150 { 13151 connector->encoder = encoder; 13152 drm_mode_connector_attach_encoder(&connector->base, 13153 &encoder->base); 13154 } 13155 13156 /* 13157 * set vga decode state - true == enable VGA decode 13158 */ 13159 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 13160 { 13161 struct drm_i915_private *dev_priv = dev->dev_private; 13162 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 13163 u16 gmch_ctrl; 13164 13165 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 13166 DRM_ERROR("failed to read control word\n"); 13167 return -EIO; 13168 } 13169 13170 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 13171 return 0; 13172 13173 if (state) 13174 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 13175 else 13176 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 13177 13178 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 13179 DRM_ERROR("failed to write control word\n"); 13180 return -EIO; 13181 } 13182 13183 return 0; 13184 } 13185 13186 #if 0 13187 struct intel_display_error_state { 13188 13189 u32 power_well_driver; 13190 13191 int num_transcoders; 13192 13193 struct intel_cursor_error_state { 13194 u32 control; 13195 u32 position; 13196 u32 base; 13197 u32 size; 13198 } cursor[I915_MAX_PIPES]; 13199 13200 struct intel_pipe_error_state { 13201 bool power_domain_on; 13202 u32 source; 13203 u32 stat; 13204 } pipe[I915_MAX_PIPES]; 13205 13206 struct intel_plane_error_state { 13207 u32 control; 13208 u32 stride; 13209 u32 size; 13210 u32 pos; 13211 u32 addr; 13212 u32 surface; 13213 u32 tile_offset; 13214 } plane[I915_MAX_PIPES]; 13215 13216 struct intel_transcoder_error_state { 13217 bool power_domain_on; 13218 enum transcoder cpu_transcoder; 13219 13220 u32 conf; 13221 13222 u32 htotal; 13223 u32 hblank; 13224 u32 hsync; 13225 u32 vtotal; 13226 u32 vblank; 13227 u32 vsync; 13228 } transcoder[4]; 13229 }; 13230 13231 struct intel_display_error_state * 13232 intel_display_capture_error_state(struct drm_device *dev) 13233 { 13234 struct drm_i915_private *dev_priv = dev->dev_private; 13235 struct intel_display_error_state *error; 13236 int transcoders[] = { 13237 TRANSCODER_A, 13238 TRANSCODER_B, 13239 TRANSCODER_C, 13240 TRANSCODER_EDP, 13241 }; 13242 int i; 13243 13244 if (INTEL_INFO(dev)->num_pipes == 0) 13245 return NULL; 13246 13247 error = kzalloc(sizeof(*error), GFP_ATOMIC); 13248 if (error == NULL) 13249 return NULL; 13250 13251 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 13252 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 13253 13254 for_each_pipe(i) { 13255 error->pipe[i].power_domain_on = 13256 intel_display_power_enabled_unlocked(dev_priv, 13257 POWER_DOMAIN_PIPE(i)); 13258 if (!error->pipe[i].power_domain_on) 13259 continue; 13260 13261 error->cursor[i].control = I915_READ(CURCNTR(i)); 13262 error->cursor[i].position = I915_READ(CURPOS(i)); 13263 error->cursor[i].base = I915_READ(CURBASE(i)); 13264 13265 error->plane[i].control = I915_READ(DSPCNTR(i)); 13266 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 13267 if (INTEL_INFO(dev)->gen <= 3) { 13268 error->plane[i].size = I915_READ(DSPSIZE(i)); 13269 error->plane[i].pos = I915_READ(DSPPOS(i)); 13270 } 13271 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 13272 error->plane[i].addr = I915_READ(DSPADDR(i)); 13273 if (INTEL_INFO(dev)->gen >= 4) { 13274 error->plane[i].surface = I915_READ(DSPSURF(i)); 13275 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 13276 } 13277 13278 error->pipe[i].source = I915_READ(PIPESRC(i)); 13279 13280 if (HAS_GMCH_DISPLAY(dev)) 13281 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 13282 } 13283 13284 error->num_transcoders = INTEL_INFO(dev)->num_pipes; 13285 if (HAS_DDI(dev_priv->dev)) 13286 error->num_transcoders++; /* Account for eDP. */ 13287 13288 for (i = 0; i < error->num_transcoders; i++) { 13289 enum transcoder cpu_transcoder = transcoders[i]; 13290 13291 error->transcoder[i].power_domain_on = 13292 intel_display_power_enabled_unlocked(dev_priv, 13293 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 13294 if (!error->transcoder[i].power_domain_on) 13295 continue; 13296 13297 error->transcoder[i].cpu_transcoder = cpu_transcoder; 13298 13299 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 13300 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 13301 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 13302 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 13303 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 13304 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 13305 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 13306 } 13307 13308 return error; 13309 } 13310 13311 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 13312 13313 void 13314 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 13315 struct drm_device *dev, 13316 struct intel_display_error_state *error) 13317 { 13318 int i; 13319 13320 if (!error) 13321 return; 13322 13323 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 13324 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 13325 err_printf(m, "PWR_WELL_CTL2: %08x\n", 13326 error->power_well_driver); 13327 for_each_pipe(i) { 13328 err_printf(m, "Pipe [%d]:\n", i); 13329 err_printf(m, " Power: %s\n", 13330 error->pipe[i].power_domain_on ? "on" : "off"); 13331 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 13332 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 13333 13334 err_printf(m, "Plane [%d]:\n", i); 13335 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 13336 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 13337 if (INTEL_INFO(dev)->gen <= 3) { 13338 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 13339 err_printf(m, " POS: %08x\n", error->plane[i].pos); 13340 } 13341 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 13342 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 13343 if (INTEL_INFO(dev)->gen >= 4) { 13344 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 13345 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 13346 } 13347 13348 err_printf(m, "Cursor [%d]:\n", i); 13349 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 13350 err_printf(m, " POS: %08x\n", error->cursor[i].position); 13351 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 13352 } 13353 13354 for (i = 0; i < error->num_transcoders; i++) { 13355 err_printf(m, "CPU transcoder: %c\n", 13356 transcoder_name(error->transcoder[i].cpu_transcoder)); 13357 err_printf(m, " Power: %s\n", 13358 error->transcoder[i].power_domain_on ? "on" : "off"); 13359 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 13360 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 13361 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 13362 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 13363 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 13364 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 13365 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 13366 } 13367 } 13368 #endif 13369