1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dmi.h> 28 #include <linux/module.h> 29 #include <linux/i2c.h> 30 #include <linux/kernel.h> 31 #include <drm/drm_edid.h> 32 #include <drm/drmP.h> 33 #include "intel_drv.h" 34 #include <drm/i915_drm.h> 35 #include "i915_drv.h" 36 #include "i915_trace.h" 37 #include <drm/drm_dp_helper.h> 38 #include <drm/drm_crtc_helper.h> 39 #include <drm/drm_plane_helper.h> 40 #include <drm/drm_rect.h> 41 42 /* Primary plane formats supported by all gen */ 43 #define COMMON_PRIMARY_FORMATS \ 44 DRM_FORMAT_C8, \ 45 DRM_FORMAT_RGB565, \ 46 DRM_FORMAT_XRGB8888, \ 47 DRM_FORMAT_ARGB8888 48 49 /* Primary plane formats for gen <= 3 */ 50 static const uint32_t intel_primary_formats_gen2[] = { 51 COMMON_PRIMARY_FORMATS, 52 DRM_FORMAT_XRGB1555, 53 DRM_FORMAT_ARGB1555, 54 }; 55 56 /* Primary plane formats for gen >= 4 */ 57 static const uint32_t intel_primary_formats_gen4[] = { 58 COMMON_PRIMARY_FORMATS, \ 59 DRM_FORMAT_XBGR8888, 60 DRM_FORMAT_ABGR8888, 61 DRM_FORMAT_XRGB2101010, 62 DRM_FORMAT_ARGB2101010, 63 DRM_FORMAT_XBGR2101010, 64 DRM_FORMAT_ABGR2101010, 65 }; 66 67 /* Cursor formats */ 68 static const uint32_t intel_cursor_formats[] = { 69 DRM_FORMAT_ARGB8888, 70 }; 71 72 static void intel_increase_pllclock(struct drm_device *dev, 73 enum i915_pipe pipe); 74 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 75 76 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 77 struct intel_crtc_config *pipe_config); 78 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 79 struct intel_crtc_config *pipe_config); 80 81 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, 82 int x, int y, struct drm_framebuffer *old_fb); 83 static int intel_framebuffer_init(struct drm_device *dev, 84 struct intel_framebuffer *ifb, 85 struct drm_mode_fb_cmd2 *mode_cmd, 86 struct drm_i915_gem_object *obj); 87 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 88 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 89 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 90 struct intel_link_m_n *m_n, 91 struct intel_link_m_n *m2_n2); 92 static void ironlake_set_pipeconf(struct drm_crtc *crtc); 93 static void haswell_set_pipeconf(struct drm_crtc *crtc); 94 static void intel_set_pipe_csc(struct drm_crtc *crtc); 95 static void vlv_prepare_pll(struct intel_crtc *crtc); 96 static void chv_prepare_pll(struct intel_crtc *crtc); 97 98 typedef struct { 99 int min, max; 100 } intel_range_t; 101 102 typedef struct { 103 int dot_limit; 104 int p2_slow, p2_fast; 105 } intel_p2_t; 106 107 typedef struct intel_limit intel_limit_t; 108 struct intel_limit { 109 intel_range_t dot, vco, n, m, m1, m2, p, p1; 110 intel_p2_t p2; 111 }; 112 113 int 114 intel_pch_rawclk(struct drm_device *dev) 115 { 116 struct drm_i915_private *dev_priv = dev->dev_private; 117 118 WARN_ON(!HAS_PCH_SPLIT(dev)); 119 120 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; 121 } 122 123 static inline u32 /* units of 100MHz */ 124 intel_fdi_link_freq(struct drm_device *dev) 125 { 126 if (IS_GEN5(dev)) { 127 struct drm_i915_private *dev_priv = dev->dev_private; 128 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; 129 } else 130 return 27; 131 } 132 133 static const intel_limit_t intel_limits_i8xx_dac = { 134 .dot = { .min = 25000, .max = 350000 }, 135 .vco = { .min = 908000, .max = 1512000 }, 136 .n = { .min = 2, .max = 16 }, 137 .m = { .min = 96, .max = 140 }, 138 .m1 = { .min = 18, .max = 26 }, 139 .m2 = { .min = 6, .max = 16 }, 140 .p = { .min = 4, .max = 128 }, 141 .p1 = { .min = 2, .max = 33 }, 142 .p2 = { .dot_limit = 165000, 143 .p2_slow = 4, .p2_fast = 2 }, 144 }; 145 146 static const intel_limit_t intel_limits_i8xx_dvo = { 147 .dot = { .min = 25000, .max = 350000 }, 148 .vco = { .min = 908000, .max = 1512000 }, 149 .n = { .min = 2, .max = 16 }, 150 .m = { .min = 96, .max = 140 }, 151 .m1 = { .min = 18, .max = 26 }, 152 .m2 = { .min = 6, .max = 16 }, 153 .p = { .min = 4, .max = 128 }, 154 .p1 = { .min = 2, .max = 33 }, 155 .p2 = { .dot_limit = 165000, 156 .p2_slow = 4, .p2_fast = 4 }, 157 }; 158 159 static const intel_limit_t intel_limits_i8xx_lvds = { 160 .dot = { .min = 25000, .max = 350000 }, 161 .vco = { .min = 908000, .max = 1512000 }, 162 .n = { .min = 2, .max = 16 }, 163 .m = { .min = 96, .max = 140 }, 164 .m1 = { .min = 18, .max = 26 }, 165 .m2 = { .min = 6, .max = 16 }, 166 .p = { .min = 4, .max = 128 }, 167 .p1 = { .min = 1, .max = 6 }, 168 .p2 = { .dot_limit = 165000, 169 .p2_slow = 14, .p2_fast = 7 }, 170 }; 171 172 static const intel_limit_t intel_limits_i9xx_sdvo = { 173 .dot = { .min = 20000, .max = 400000 }, 174 .vco = { .min = 1400000, .max = 2800000 }, 175 .n = { .min = 1, .max = 6 }, 176 .m = { .min = 70, .max = 120 }, 177 .m1 = { .min = 8, .max = 18 }, 178 .m2 = { .min = 3, .max = 7 }, 179 .p = { .min = 5, .max = 80 }, 180 .p1 = { .min = 1, .max = 8 }, 181 .p2 = { .dot_limit = 200000, 182 .p2_slow = 10, .p2_fast = 5 }, 183 }; 184 185 static const intel_limit_t intel_limits_i9xx_lvds = { 186 .dot = { .min = 20000, .max = 400000 }, 187 .vco = { .min = 1400000, .max = 2800000 }, 188 .n = { .min = 1, .max = 6 }, 189 .m = { .min = 70, .max = 120 }, 190 .m1 = { .min = 8, .max = 18 }, 191 .m2 = { .min = 3, .max = 7 }, 192 .p = { .min = 7, .max = 98 }, 193 .p1 = { .min = 1, .max = 8 }, 194 .p2 = { .dot_limit = 112000, 195 .p2_slow = 14, .p2_fast = 7 }, 196 }; 197 198 199 static const intel_limit_t intel_limits_g4x_sdvo = { 200 .dot = { .min = 25000, .max = 270000 }, 201 .vco = { .min = 1750000, .max = 3500000}, 202 .n = { .min = 1, .max = 4 }, 203 .m = { .min = 104, .max = 138 }, 204 .m1 = { .min = 17, .max = 23 }, 205 .m2 = { .min = 5, .max = 11 }, 206 .p = { .min = 10, .max = 30 }, 207 .p1 = { .min = 1, .max = 3}, 208 .p2 = { .dot_limit = 270000, 209 .p2_slow = 10, 210 .p2_fast = 10 211 }, 212 }; 213 214 static const intel_limit_t intel_limits_g4x_hdmi = { 215 .dot = { .min = 22000, .max = 400000 }, 216 .vco = { .min = 1750000, .max = 3500000}, 217 .n = { .min = 1, .max = 4 }, 218 .m = { .min = 104, .max = 138 }, 219 .m1 = { .min = 16, .max = 23 }, 220 .m2 = { .min = 5, .max = 11 }, 221 .p = { .min = 5, .max = 80 }, 222 .p1 = { .min = 1, .max = 8}, 223 .p2 = { .dot_limit = 165000, 224 .p2_slow = 10, .p2_fast = 5 }, 225 }; 226 227 static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 228 .dot = { .min = 20000, .max = 115000 }, 229 .vco = { .min = 1750000, .max = 3500000 }, 230 .n = { .min = 1, .max = 3 }, 231 .m = { .min = 104, .max = 138 }, 232 .m1 = { .min = 17, .max = 23 }, 233 .m2 = { .min = 5, .max = 11 }, 234 .p = { .min = 28, .max = 112 }, 235 .p1 = { .min = 2, .max = 8 }, 236 .p2 = { .dot_limit = 0, 237 .p2_slow = 14, .p2_fast = 14 238 }, 239 }; 240 241 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 242 .dot = { .min = 80000, .max = 224000 }, 243 .vco = { .min = 1750000, .max = 3500000 }, 244 .n = { .min = 1, .max = 3 }, 245 .m = { .min = 104, .max = 138 }, 246 .m1 = { .min = 17, .max = 23 }, 247 .m2 = { .min = 5, .max = 11 }, 248 .p = { .min = 14, .max = 42 }, 249 .p1 = { .min = 2, .max = 6 }, 250 .p2 = { .dot_limit = 0, 251 .p2_slow = 7, .p2_fast = 7 252 }, 253 }; 254 255 static const intel_limit_t intel_limits_pineview_sdvo = { 256 .dot = { .min = 20000, .max = 400000}, 257 .vco = { .min = 1700000, .max = 3500000 }, 258 /* Pineview's Ncounter is a ring counter */ 259 .n = { .min = 3, .max = 6 }, 260 .m = { .min = 2, .max = 256 }, 261 /* Pineview only has one combined m divider, which we treat as m2. */ 262 .m1 = { .min = 0, .max = 0 }, 263 .m2 = { .min = 0, .max = 254 }, 264 .p = { .min = 5, .max = 80 }, 265 .p1 = { .min = 1, .max = 8 }, 266 .p2 = { .dot_limit = 200000, 267 .p2_slow = 10, .p2_fast = 5 }, 268 }; 269 270 static const intel_limit_t intel_limits_pineview_lvds = { 271 .dot = { .min = 20000, .max = 400000 }, 272 .vco = { .min = 1700000, .max = 3500000 }, 273 .n = { .min = 3, .max = 6 }, 274 .m = { .min = 2, .max = 256 }, 275 .m1 = { .min = 0, .max = 0 }, 276 .m2 = { .min = 0, .max = 254 }, 277 .p = { .min = 7, .max = 112 }, 278 .p1 = { .min = 1, .max = 8 }, 279 .p2 = { .dot_limit = 112000, 280 .p2_slow = 14, .p2_fast = 14 }, 281 }; 282 283 /* Ironlake / Sandybridge 284 * 285 * We calculate clock using (register_value + 2) for N/M1/M2, so here 286 * the range value for them is (actual_value - 2). 287 */ 288 static const intel_limit_t intel_limits_ironlake_dac = { 289 .dot = { .min = 25000, .max = 350000 }, 290 .vco = { .min = 1760000, .max = 3510000 }, 291 .n = { .min = 1, .max = 5 }, 292 .m = { .min = 79, .max = 127 }, 293 .m1 = { .min = 12, .max = 22 }, 294 .m2 = { .min = 5, .max = 9 }, 295 .p = { .min = 5, .max = 80 }, 296 .p1 = { .min = 1, .max = 8 }, 297 .p2 = { .dot_limit = 225000, 298 .p2_slow = 10, .p2_fast = 5 }, 299 }; 300 301 static const intel_limit_t intel_limits_ironlake_single_lvds = { 302 .dot = { .min = 25000, .max = 350000 }, 303 .vco = { .min = 1760000, .max = 3510000 }, 304 .n = { .min = 1, .max = 3 }, 305 .m = { .min = 79, .max = 118 }, 306 .m1 = { .min = 12, .max = 22 }, 307 .m2 = { .min = 5, .max = 9 }, 308 .p = { .min = 28, .max = 112 }, 309 .p1 = { .min = 2, .max = 8 }, 310 .p2 = { .dot_limit = 225000, 311 .p2_slow = 14, .p2_fast = 14 }, 312 }; 313 314 static const intel_limit_t intel_limits_ironlake_dual_lvds = { 315 .dot = { .min = 25000, .max = 350000 }, 316 .vco = { .min = 1760000, .max = 3510000 }, 317 .n = { .min = 1, .max = 3 }, 318 .m = { .min = 79, .max = 127 }, 319 .m1 = { .min = 12, .max = 22 }, 320 .m2 = { .min = 5, .max = 9 }, 321 .p = { .min = 14, .max = 56 }, 322 .p1 = { .min = 2, .max = 8 }, 323 .p2 = { .dot_limit = 225000, 324 .p2_slow = 7, .p2_fast = 7 }, 325 }; 326 327 /* LVDS 100mhz refclk limits. */ 328 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 329 .dot = { .min = 25000, .max = 350000 }, 330 .vco = { .min = 1760000, .max = 3510000 }, 331 .n = { .min = 1, .max = 2 }, 332 .m = { .min = 79, .max = 126 }, 333 .m1 = { .min = 12, .max = 22 }, 334 .m2 = { .min = 5, .max = 9 }, 335 .p = { .min = 28, .max = 112 }, 336 .p1 = { .min = 2, .max = 8 }, 337 .p2 = { .dot_limit = 225000, 338 .p2_slow = 14, .p2_fast = 14 }, 339 }; 340 341 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 342 .dot = { .min = 25000, .max = 350000 }, 343 .vco = { .min = 1760000, .max = 3510000 }, 344 .n = { .min = 1, .max = 3 }, 345 .m = { .min = 79, .max = 126 }, 346 .m1 = { .min = 12, .max = 22 }, 347 .m2 = { .min = 5, .max = 9 }, 348 .p = { .min = 14, .max = 42 }, 349 .p1 = { .min = 2, .max = 6 }, 350 .p2 = { .dot_limit = 225000, 351 .p2_slow = 7, .p2_fast = 7 }, 352 }; 353 354 static const intel_limit_t intel_limits_vlv = { 355 /* 356 * These are the data rate limits (measured in fast clocks) 357 * since those are the strictest limits we have. The fast 358 * clock and actual rate limits are more relaxed, so checking 359 * them would make no difference. 360 */ 361 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 362 .vco = { .min = 4000000, .max = 6000000 }, 363 .n = { .min = 1, .max = 7 }, 364 .m1 = { .min = 2, .max = 3 }, 365 .m2 = { .min = 11, .max = 156 }, 366 .p1 = { .min = 2, .max = 3 }, 367 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 368 }; 369 370 static const intel_limit_t intel_limits_chv = { 371 /* 372 * These are the data rate limits (measured in fast clocks) 373 * since those are the strictest limits we have. The fast 374 * clock and actual rate limits are more relaxed, so checking 375 * them would make no difference. 376 */ 377 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 378 .vco = { .min = 4860000, .max = 6700000 }, 379 .n = { .min = 1, .max = 1 }, 380 .m1 = { .min = 2, .max = 2 }, 381 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 382 .p1 = { .min = 2, .max = 4 }, 383 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 384 }; 385 386 static void vlv_clock(int refclk, intel_clock_t *clock) 387 { 388 clock->m = clock->m1 * clock->m2; 389 clock->p = clock->p1 * clock->p2; 390 if (WARN_ON(clock->n == 0 || clock->p == 0)) 391 return; 392 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 393 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 394 } 395 396 /** 397 * Returns whether any output on the specified pipe is of the specified type 398 */ 399 static bool intel_pipe_has_type(struct drm_crtc *crtc, int type) 400 { 401 struct drm_device *dev = crtc->dev; 402 struct intel_encoder *encoder; 403 404 for_each_encoder_on_crtc(dev, crtc, encoder) 405 if (encoder->type == type) 406 return true; 407 408 return false; 409 } 410 411 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 412 int refclk) 413 { 414 struct drm_device *dev = crtc->dev; 415 const intel_limit_t *limit; 416 417 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 418 if (intel_is_dual_link_lvds(dev)) { 419 if (refclk == 100000) 420 limit = &intel_limits_ironlake_dual_lvds_100m; 421 else 422 limit = &intel_limits_ironlake_dual_lvds; 423 } else { 424 if (refclk == 100000) 425 limit = &intel_limits_ironlake_single_lvds_100m; 426 else 427 limit = &intel_limits_ironlake_single_lvds; 428 } 429 } else 430 limit = &intel_limits_ironlake_dac; 431 432 return limit; 433 } 434 435 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 436 { 437 struct drm_device *dev = crtc->dev; 438 const intel_limit_t *limit; 439 440 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 441 if (intel_is_dual_link_lvds(dev)) 442 limit = &intel_limits_g4x_dual_channel_lvds; 443 else 444 limit = &intel_limits_g4x_single_channel_lvds; 445 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || 446 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { 447 limit = &intel_limits_g4x_hdmi; 448 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { 449 limit = &intel_limits_g4x_sdvo; 450 } else /* The option is for other outputs */ 451 limit = &intel_limits_i9xx_sdvo; 452 453 return limit; 454 } 455 456 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) 457 { 458 struct drm_device *dev = crtc->dev; 459 const intel_limit_t *limit; 460 461 if (HAS_PCH_SPLIT(dev)) 462 limit = intel_ironlake_limit(crtc, refclk); 463 else if (IS_G4X(dev)) { 464 limit = intel_g4x_limit(crtc); 465 } else if (IS_PINEVIEW(dev)) { 466 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 467 limit = &intel_limits_pineview_lvds; 468 else 469 limit = &intel_limits_pineview_sdvo; 470 } else if (IS_CHERRYVIEW(dev)) { 471 limit = &intel_limits_chv; 472 } else if (IS_VALLEYVIEW(dev)) { 473 limit = &intel_limits_vlv; 474 } else if (!IS_GEN2(dev)) { 475 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 476 limit = &intel_limits_i9xx_lvds; 477 else 478 limit = &intel_limits_i9xx_sdvo; 479 } else { 480 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 481 limit = &intel_limits_i8xx_lvds; 482 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO)) 483 limit = &intel_limits_i8xx_dvo; 484 else 485 limit = &intel_limits_i8xx_dac; 486 } 487 return limit; 488 } 489 490 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 491 static void pineview_clock(int refclk, intel_clock_t *clock) 492 { 493 clock->m = clock->m2 + 2; 494 clock->p = clock->p1 * clock->p2; 495 if (WARN_ON(clock->n == 0 || clock->p == 0)) 496 return; 497 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 498 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 499 } 500 501 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 502 { 503 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 504 } 505 506 static void i9xx_clock(int refclk, intel_clock_t *clock) 507 { 508 clock->m = i9xx_dpll_compute_m(clock); 509 clock->p = clock->p1 * clock->p2; 510 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 511 return; 512 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 513 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 514 } 515 516 static void chv_clock(int refclk, intel_clock_t *clock) 517 { 518 clock->m = clock->m1 * clock->m2; 519 clock->p = clock->p1 * clock->p2; 520 if (WARN_ON(clock->n == 0 || clock->p == 0)) 521 return; 522 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 523 clock->n << 22); 524 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 525 } 526 527 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 528 /** 529 * Returns whether the given set of divisors are valid for a given refclk with 530 * the given connectors. 531 */ 532 533 static bool intel_PLL_is_valid(struct drm_device *dev, 534 const intel_limit_t *limit, 535 const intel_clock_t *clock) 536 { 537 if (clock->n < limit->n.min || limit->n.max < clock->n) 538 INTELPllInvalid("n out of range\n"); 539 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 540 INTELPllInvalid("p1 out of range\n"); 541 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 542 INTELPllInvalid("m2 out of range\n"); 543 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 544 INTELPllInvalid("m1 out of range\n"); 545 546 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev)) 547 if (clock->m1 <= clock->m2) 548 INTELPllInvalid("m1 <= m2\n"); 549 550 if (!IS_VALLEYVIEW(dev)) { 551 if (clock->p < limit->p.min || limit->p.max < clock->p) 552 INTELPllInvalid("p out of range\n"); 553 if (clock->m < limit->m.min || limit->m.max < clock->m) 554 INTELPllInvalid("m out of range\n"); 555 } 556 557 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 558 INTELPllInvalid("vco out of range\n"); 559 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 560 * connector, etc., rather than just a single range. 561 */ 562 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 563 INTELPllInvalid("dot out of range\n"); 564 565 return true; 566 } 567 568 static bool 569 i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 570 int target, int refclk, intel_clock_t *match_clock, 571 intel_clock_t *best_clock) 572 { 573 struct drm_device *dev = crtc->dev; 574 intel_clock_t clock; 575 int err = target; 576 577 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 578 /* 579 * For LVDS just rely on its current settings for dual-channel. 580 * We haven't figured out how to reliably set up different 581 * single/dual channel state, if we even can. 582 */ 583 if (intel_is_dual_link_lvds(dev)) 584 clock.p2 = limit->p2.p2_fast; 585 else 586 clock.p2 = limit->p2.p2_slow; 587 } else { 588 if (target < limit->p2.dot_limit) 589 clock.p2 = limit->p2.p2_slow; 590 else 591 clock.p2 = limit->p2.p2_fast; 592 } 593 594 memset(best_clock, 0, sizeof(*best_clock)); 595 596 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 597 clock.m1++) { 598 for (clock.m2 = limit->m2.min; 599 clock.m2 <= limit->m2.max; clock.m2++) { 600 if (clock.m2 >= clock.m1) 601 break; 602 for (clock.n = limit->n.min; 603 clock.n <= limit->n.max; clock.n++) { 604 for (clock.p1 = limit->p1.min; 605 clock.p1 <= limit->p1.max; clock.p1++) { 606 int this_err; 607 608 i9xx_clock(refclk, &clock); 609 if (!intel_PLL_is_valid(dev, limit, 610 &clock)) 611 continue; 612 if (match_clock && 613 clock.p != match_clock->p) 614 continue; 615 616 this_err = abs(clock.dot - target); 617 if (this_err < err) { 618 *best_clock = clock; 619 err = this_err; 620 } 621 } 622 } 623 } 624 } 625 626 return (err != target); 627 } 628 629 static bool 630 pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 631 int target, int refclk, intel_clock_t *match_clock, 632 intel_clock_t *best_clock) 633 { 634 struct drm_device *dev = crtc->dev; 635 intel_clock_t clock; 636 int err = target; 637 638 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 639 /* 640 * For LVDS just rely on its current settings for dual-channel. 641 * We haven't figured out how to reliably set up different 642 * single/dual channel state, if we even can. 643 */ 644 if (intel_is_dual_link_lvds(dev)) 645 clock.p2 = limit->p2.p2_fast; 646 else 647 clock.p2 = limit->p2.p2_slow; 648 } else { 649 if (target < limit->p2.dot_limit) 650 clock.p2 = limit->p2.p2_slow; 651 else 652 clock.p2 = limit->p2.p2_fast; 653 } 654 655 memset(best_clock, 0, sizeof(*best_clock)); 656 657 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 658 clock.m1++) { 659 for (clock.m2 = limit->m2.min; 660 clock.m2 <= limit->m2.max; clock.m2++) { 661 for (clock.n = limit->n.min; 662 clock.n <= limit->n.max; clock.n++) { 663 for (clock.p1 = limit->p1.min; 664 clock.p1 <= limit->p1.max; clock.p1++) { 665 int this_err; 666 667 pineview_clock(refclk, &clock); 668 if (!intel_PLL_is_valid(dev, limit, 669 &clock)) 670 continue; 671 if (match_clock && 672 clock.p != match_clock->p) 673 continue; 674 675 this_err = abs(clock.dot - target); 676 if (this_err < err) { 677 *best_clock = clock; 678 err = this_err; 679 } 680 } 681 } 682 } 683 } 684 685 return (err != target); 686 } 687 688 static bool 689 g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 690 int target, int refclk, intel_clock_t *match_clock, 691 intel_clock_t *best_clock) 692 { 693 struct drm_device *dev = crtc->dev; 694 intel_clock_t clock; 695 int max_n; 696 bool found; 697 /* approximately equals target * 0.00585 */ 698 int err_most = (target >> 8) + (target >> 9); 699 found = false; 700 701 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 702 if (intel_is_dual_link_lvds(dev)) 703 clock.p2 = limit->p2.p2_fast; 704 else 705 clock.p2 = limit->p2.p2_slow; 706 } else { 707 if (target < limit->p2.dot_limit) 708 clock.p2 = limit->p2.p2_slow; 709 else 710 clock.p2 = limit->p2.p2_fast; 711 } 712 713 memset(best_clock, 0, sizeof(*best_clock)); 714 max_n = limit->n.max; 715 /* based on hardware requirement, prefer smaller n to precision */ 716 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 717 /* based on hardware requirement, prefere larger m1,m2 */ 718 for (clock.m1 = limit->m1.max; 719 clock.m1 >= limit->m1.min; clock.m1--) { 720 for (clock.m2 = limit->m2.max; 721 clock.m2 >= limit->m2.min; clock.m2--) { 722 for (clock.p1 = limit->p1.max; 723 clock.p1 >= limit->p1.min; clock.p1--) { 724 int this_err; 725 726 i9xx_clock(refclk, &clock); 727 if (!intel_PLL_is_valid(dev, limit, 728 &clock)) 729 continue; 730 731 this_err = abs(clock.dot - target); 732 if (this_err < err_most) { 733 *best_clock = clock; 734 err_most = this_err; 735 max_n = clock.n; 736 found = true; 737 } 738 } 739 } 740 } 741 } 742 return found; 743 } 744 745 static bool 746 vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 747 int target, int refclk, intel_clock_t *match_clock, 748 intel_clock_t *best_clock) 749 { 750 struct drm_device *dev = crtc->dev; 751 intel_clock_t clock; 752 unsigned int bestppm = 1000000; 753 /* min update 19.2 MHz */ 754 int max_n = min(limit->n.max, refclk / 19200); 755 bool found = false; 756 757 target *= 5; /* fast clock */ 758 759 memset(best_clock, 0, sizeof(*best_clock)); 760 761 /* based on hardware requirement, prefer smaller n to precision */ 762 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 763 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 764 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 765 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 766 clock.p = clock.p1 * clock.p2; 767 /* based on hardware requirement, prefer bigger m1,m2 values */ 768 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 769 unsigned int ppm, diff; 770 771 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 772 refclk * clock.m1); 773 774 vlv_clock(refclk, &clock); 775 776 if (!intel_PLL_is_valid(dev, limit, 777 &clock)) 778 continue; 779 780 diff = abs(clock.dot - target); 781 ppm = div_u64(1000000ULL * diff, target); 782 783 if (ppm < 100 && clock.p > best_clock->p) { 784 bestppm = 0; 785 *best_clock = clock; 786 found = true; 787 } 788 789 if (bestppm >= 10 && ppm < bestppm - 10) { 790 bestppm = ppm; 791 *best_clock = clock; 792 found = true; 793 } 794 } 795 } 796 } 797 } 798 799 return found; 800 } 801 802 static bool 803 chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 804 int target, int refclk, intel_clock_t *match_clock, 805 intel_clock_t *best_clock) 806 { 807 struct drm_device *dev = crtc->dev; 808 intel_clock_t clock; 809 uint64_t m2; 810 int found = false; 811 812 memset(best_clock, 0, sizeof(*best_clock)); 813 814 /* 815 * Based on hardware doc, the n always set to 1, and m1 always 816 * set to 2. If requires to support 200Mhz refclk, we need to 817 * revisit this because n may not 1 anymore. 818 */ 819 clock.n = 1, clock.m1 = 2; 820 target *= 5; /* fast clock */ 821 822 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 823 for (clock.p2 = limit->p2.p2_fast; 824 clock.p2 >= limit->p2.p2_slow; 825 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 826 827 clock.p = clock.p1 * clock.p2; 828 829 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * 830 clock.n) << 22, refclk * clock.m1); 831 832 if (m2 > INT_MAX/clock.m1) 833 continue; 834 835 clock.m2 = m2; 836 837 chv_clock(refclk, &clock); 838 839 if (!intel_PLL_is_valid(dev, limit, &clock)) 840 continue; 841 842 /* based on hardware requirement, prefer bigger p 843 */ 844 if (clock.p > best_clock->p) { 845 *best_clock = clock; 846 found = true; 847 } 848 } 849 } 850 851 return found; 852 } 853 854 bool intel_crtc_active(struct drm_crtc *crtc) 855 { 856 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 857 858 /* Be paranoid as we can arrive here with only partial 859 * state retrieved from the hardware during setup. 860 * 861 * We can ditch the adjusted_mode.crtc_clock check as soon 862 * as Haswell has gained clock readout/fastboot support. 863 * 864 * We can ditch the crtc->primary->fb check as soon as we can 865 * properly reconstruct framebuffers. 866 */ 867 return intel_crtc->active && crtc->primary->fb && 868 intel_crtc->config.adjusted_mode.crtc_clock; 869 } 870 871 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 872 enum i915_pipe pipe) 873 { 874 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 875 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 876 877 return intel_crtc->config.cpu_transcoder; 878 } 879 880 static void g4x_wait_for_vblank(struct drm_device *dev, int pipe) 881 { 882 struct drm_i915_private *dev_priv = dev->dev_private; 883 u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe); 884 885 frame = I915_READ(frame_reg); 886 887 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50)) 888 WARN(1, "vblank wait on pipe %c timed out\n", 889 pipe_name(pipe)); 890 } 891 892 /** 893 * intel_wait_for_vblank - wait for vblank on a given pipe 894 * @dev: drm device 895 * @pipe: pipe to wait for 896 * 897 * Wait for vblank to occur on a given pipe. Needed for various bits of 898 * mode setting code. 899 */ 900 void intel_wait_for_vblank(struct drm_device *dev, int pipe) 901 { 902 struct drm_i915_private *dev_priv = dev->dev_private; 903 int pipestat_reg = PIPESTAT(pipe); 904 905 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 906 g4x_wait_for_vblank(dev, pipe); 907 return; 908 } 909 910 /* Clear existing vblank status. Note this will clear any other 911 * sticky status fields as well. 912 * 913 * This races with i915_driver_irq_handler() with the result 914 * that either function could miss a vblank event. Here it is not 915 * fatal, as we will either wait upon the next vblank interrupt or 916 * timeout. Generally speaking intel_wait_for_vblank() is only 917 * called during modeset at which time the GPU should be idle and 918 * should *not* be performing page flips and thus not waiting on 919 * vblanks... 920 * Currently, the result of us stealing a vblank from the irq 921 * handler is that a single frame will be skipped during swapbuffers. 922 */ 923 I915_WRITE(pipestat_reg, 924 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); 925 926 /* Wait for vblank interrupt bit to set */ 927 if (wait_for(I915_READ(pipestat_reg) & 928 PIPE_VBLANK_INTERRUPT_STATUS, 929 50)) 930 DRM_DEBUG_KMS("vblank wait on pipe %c timed out\n", 931 pipe_name(pipe)); 932 } 933 934 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe) 935 { 936 struct drm_i915_private *dev_priv = dev->dev_private; 937 u32 reg = PIPEDSL(pipe); 938 u32 line1, line2; 939 u32 line_mask; 940 941 if (IS_GEN2(dev)) 942 line_mask = DSL_LINEMASK_GEN2; 943 else 944 line_mask = DSL_LINEMASK_GEN3; 945 946 line1 = I915_READ(reg) & line_mask; 947 mdelay(5); 948 line2 = I915_READ(reg) & line_mask; 949 950 return line1 == line2; 951 } 952 953 /* 954 * intel_wait_for_pipe_off - wait for pipe to turn off 955 * @crtc: crtc whose pipe to wait for 956 * 957 * After disabling a pipe, we can't wait for vblank in the usual way, 958 * spinning on the vblank interrupt status bit, since we won't actually 959 * see an interrupt when the pipe is disabled. 960 * 961 * On Gen4 and above: 962 * wait for the pipe register state bit to turn off 963 * 964 * Otherwise: 965 * wait for the display line value to settle (it usually 966 * ends up stopping at the start of the next frame). 967 * 968 */ 969 static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 970 { 971 struct drm_device *dev = crtc->base.dev; 972 struct drm_i915_private *dev_priv = dev->dev_private; 973 enum transcoder cpu_transcoder = crtc->config.cpu_transcoder; 974 enum i915_pipe pipe = crtc->pipe; 975 976 if (INTEL_INFO(dev)->gen >= 4) { 977 int reg = PIPECONF(cpu_transcoder); 978 979 /* Wait for the Pipe State to go off */ 980 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 981 100)) 982 WARN(1, "pipe_off wait timed out\n"); 983 } else { 984 /* Wait for the display line to settle */ 985 if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) 986 WARN(1, "pipe_off wait timed out\n"); 987 } 988 } 989 990 /* 991 * ibx_digital_port_connected - is the specified port connected? 992 * @dev_priv: i915 private structure 993 * @port: the port to test 994 * 995 * Returns true if @port is connected, false otherwise. 996 */ 997 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, 998 struct intel_digital_port *port) 999 { 1000 u32 bit; 1001 1002 if (HAS_PCH_IBX(dev_priv->dev)) { 1003 switch (port->port) { 1004 case PORT_B: 1005 bit = SDE_PORTB_HOTPLUG; 1006 break; 1007 case PORT_C: 1008 bit = SDE_PORTC_HOTPLUG; 1009 break; 1010 case PORT_D: 1011 bit = SDE_PORTD_HOTPLUG; 1012 break; 1013 default: 1014 return true; 1015 } 1016 } else { 1017 switch (port->port) { 1018 case PORT_B: 1019 bit = SDE_PORTB_HOTPLUG_CPT; 1020 break; 1021 case PORT_C: 1022 bit = SDE_PORTC_HOTPLUG_CPT; 1023 break; 1024 case PORT_D: 1025 bit = SDE_PORTD_HOTPLUG_CPT; 1026 break; 1027 default: 1028 return true; 1029 } 1030 } 1031 1032 return I915_READ(SDEISR) & bit; 1033 } 1034 1035 static const char *state_string(bool enabled) 1036 { 1037 return enabled ? "on" : "off"; 1038 } 1039 1040 /* Only for pre-ILK configs */ 1041 void assert_pll(struct drm_i915_private *dev_priv, 1042 enum i915_pipe pipe, bool state) 1043 { 1044 int reg; 1045 u32 val; 1046 bool cur_state; 1047 1048 reg = DPLL(pipe); 1049 val = I915_READ(reg); 1050 cur_state = !!(val & DPLL_VCO_ENABLE); 1051 WARN(cur_state != state, 1052 "PLL state assertion failure (expected %s, current %s)\n", 1053 state_string(state), state_string(cur_state)); 1054 } 1055 1056 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1057 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1058 { 1059 u32 val; 1060 bool cur_state; 1061 1062 mutex_lock(&dev_priv->dpio_lock); 1063 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1064 mutex_unlock(&dev_priv->dpio_lock); 1065 1066 cur_state = val & DSI_PLL_VCO_EN; 1067 WARN(cur_state != state, 1068 "DSI PLL state assertion failure (expected %s, current %s)\n", 1069 state_string(state), state_string(cur_state)); 1070 } 1071 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) 1072 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) 1073 1074 struct intel_shared_dpll * 1075 intel_crtc_to_shared_dpll(struct intel_crtc *crtc) 1076 { 1077 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1078 1079 if (crtc->config.shared_dpll < 0) 1080 return NULL; 1081 1082 return &dev_priv->shared_dplls[crtc->config.shared_dpll]; 1083 } 1084 1085 /* For ILK+ */ 1086 void assert_shared_dpll(struct drm_i915_private *dev_priv, 1087 struct intel_shared_dpll *pll, 1088 bool state) 1089 { 1090 bool cur_state; 1091 struct intel_dpll_hw_state hw_state; 1092 1093 if (WARN (!pll, 1094 "asserting DPLL %s with no DPLL\n", state_string(state))) 1095 return; 1096 1097 cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); 1098 WARN(cur_state != state, 1099 "%s assertion failure (expected %s, current %s)\n", 1100 pll->name, state_string(state), state_string(cur_state)); 1101 } 1102 1103 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1104 enum i915_pipe pipe, bool state) 1105 { 1106 int reg; 1107 u32 val; 1108 bool cur_state; 1109 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1110 pipe); 1111 1112 if (HAS_DDI(dev_priv->dev)) { 1113 /* DDI does not have a specific FDI_TX register */ 1114 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1115 val = I915_READ(reg); 1116 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1117 } else { 1118 reg = FDI_TX_CTL(pipe); 1119 val = I915_READ(reg); 1120 cur_state = !!(val & FDI_TX_ENABLE); 1121 } 1122 WARN(cur_state != state, 1123 "FDI TX state assertion failure (expected %s, current %s)\n", 1124 state_string(state), state_string(cur_state)); 1125 } 1126 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1127 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1128 1129 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1130 enum i915_pipe pipe, bool state) 1131 { 1132 int reg; 1133 u32 val; 1134 bool cur_state; 1135 1136 reg = FDI_RX_CTL(pipe); 1137 val = I915_READ(reg); 1138 cur_state = !!(val & FDI_RX_ENABLE); 1139 WARN(cur_state != state, 1140 "FDI RX state assertion failure (expected %s, current %s)\n", 1141 state_string(state), state_string(cur_state)); 1142 } 1143 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1144 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1145 1146 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1147 enum i915_pipe pipe) 1148 { 1149 int reg; 1150 u32 val; 1151 1152 /* ILK FDI PLL is always enabled */ 1153 if (INTEL_INFO(dev_priv->dev)->gen == 5) 1154 return; 1155 1156 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1157 if (HAS_DDI(dev_priv->dev)) 1158 return; 1159 1160 reg = FDI_TX_CTL(pipe); 1161 val = I915_READ(reg); 1162 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1163 } 1164 1165 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1166 enum i915_pipe pipe, bool state) 1167 { 1168 int reg; 1169 u32 val; 1170 bool cur_state; 1171 1172 reg = FDI_RX_CTL(pipe); 1173 val = I915_READ(reg); 1174 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1175 WARN(cur_state != state, 1176 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1177 state_string(state), state_string(cur_state)); 1178 } 1179 1180 static void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1181 enum i915_pipe pipe) 1182 { 1183 struct drm_device *dev = dev_priv->dev; 1184 int pp_reg; 1185 u32 val; 1186 enum i915_pipe panel_pipe = PIPE_A; 1187 bool locked = true; 1188 1189 if (WARN_ON(HAS_DDI(dev))) 1190 return; 1191 1192 if (HAS_PCH_SPLIT(dev)) { 1193 u32 port_sel; 1194 1195 pp_reg = PCH_PP_CONTROL; 1196 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK; 1197 1198 if (port_sel == PANEL_PORT_SELECT_LVDS && 1199 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) 1200 panel_pipe = PIPE_B; 1201 /* XXX: else fix for eDP */ 1202 } else if (IS_VALLEYVIEW(dev)) { 1203 /* presumably write lock depends on pipe, not port select */ 1204 pp_reg = VLV_PIPE_PP_CONTROL(pipe); 1205 panel_pipe = pipe; 1206 } else { 1207 pp_reg = PP_CONTROL; 1208 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) 1209 panel_pipe = PIPE_B; 1210 } 1211 1212 val = I915_READ(pp_reg); 1213 if (!(val & PANEL_POWER_ON) || 1214 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1215 locked = false; 1216 1217 WARN(panel_pipe == pipe && locked, 1218 "panel assertion failure, pipe %c regs locked\n", 1219 pipe_name(pipe)); 1220 } 1221 1222 static void assert_cursor(struct drm_i915_private *dev_priv, 1223 enum i915_pipe pipe, bool state) 1224 { 1225 struct drm_device *dev = dev_priv->dev; 1226 bool cur_state; 1227 1228 if (IS_845G(dev) || IS_I865G(dev)) 1229 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 1230 else 1231 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1232 1233 WARN(cur_state != state, 1234 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1235 pipe_name(pipe), state_string(state), state_string(cur_state)); 1236 } 1237 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) 1238 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) 1239 1240 void assert_pipe(struct drm_i915_private *dev_priv, 1241 enum i915_pipe pipe, bool state) 1242 { 1243 int reg; 1244 u32 val; 1245 bool cur_state; 1246 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1247 pipe); 1248 1249 /* if we need the pipe quirk it must be always on */ 1250 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1251 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1252 state = true; 1253 1254 if (!intel_display_power_enabled(dev_priv, 1255 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { 1256 cur_state = false; 1257 } else { 1258 reg = PIPECONF(cpu_transcoder); 1259 val = I915_READ(reg); 1260 cur_state = !!(val & PIPECONF_ENABLE); 1261 } 1262 1263 WARN(cur_state != state, 1264 "pipe %c assertion failure (expected %s, current %s)\n", 1265 pipe_name(pipe), state_string(state), state_string(cur_state)); 1266 } 1267 1268 static void assert_plane(struct drm_i915_private *dev_priv, 1269 enum plane plane, bool state) 1270 { 1271 int reg; 1272 u32 val; 1273 bool cur_state; 1274 1275 reg = DSPCNTR(plane); 1276 val = I915_READ(reg); 1277 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1278 WARN(cur_state != state, 1279 "plane %c assertion failure (expected %s, current %s)\n", 1280 plane_name(plane), state_string(state), state_string(cur_state)); 1281 } 1282 1283 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 1284 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 1285 1286 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1287 enum i915_pipe pipe) 1288 { 1289 struct drm_device *dev = dev_priv->dev; 1290 int reg, i; 1291 u32 val; 1292 int cur_pipe; 1293 1294 /* Primary planes are fixed to pipes on gen4+ */ 1295 if (INTEL_INFO(dev)->gen >= 4) { 1296 reg = DSPCNTR(pipe); 1297 val = I915_READ(reg); 1298 WARN(val & DISPLAY_PLANE_ENABLE, 1299 "plane %c assertion failure, should be disabled but not\n", 1300 plane_name(pipe)); 1301 return; 1302 } 1303 1304 /* Need to check both planes against the pipe */ 1305 for_each_pipe(dev_priv, i) { 1306 reg = DSPCNTR(i); 1307 val = I915_READ(reg); 1308 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1309 DISPPLANE_SEL_PIPE_SHIFT; 1310 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1311 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1312 plane_name(i), pipe_name(pipe)); 1313 } 1314 } 1315 1316 static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1317 enum i915_pipe pipe) 1318 { 1319 struct drm_device *dev = dev_priv->dev; 1320 int reg, sprite; 1321 u32 val; 1322 1323 if (IS_VALLEYVIEW(dev)) { 1324 for_each_sprite(pipe, sprite) { 1325 reg = SPCNTR(pipe, sprite); 1326 val = I915_READ(reg); 1327 WARN(val & SP_ENABLE, 1328 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1329 sprite_name(pipe, sprite), pipe_name(pipe)); 1330 } 1331 } else if (INTEL_INFO(dev)->gen >= 7) { 1332 reg = SPRCTL(pipe); 1333 val = I915_READ(reg); 1334 WARN(val & SPRITE_ENABLE, 1335 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1336 plane_name(pipe), pipe_name(pipe)); 1337 } else if (INTEL_INFO(dev)->gen >= 5) { 1338 reg = DVSCNTR(pipe); 1339 val = I915_READ(reg); 1340 WARN(val & DVS_ENABLE, 1341 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1342 plane_name(pipe), pipe_name(pipe)); 1343 } 1344 } 1345 1346 static void assert_vblank_disabled(struct drm_crtc *crtc) 1347 { 1348 if (WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1349 drm_crtc_vblank_put(crtc); 1350 } 1351 1352 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1353 { 1354 u32 val; 1355 bool enabled; 1356 1357 WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); 1358 1359 val = I915_READ(PCH_DREF_CONTROL); 1360 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1361 DREF_SUPERSPREAD_SOURCE_MASK)); 1362 WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); 1363 } 1364 1365 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1366 enum i915_pipe pipe) 1367 { 1368 int reg; 1369 u32 val; 1370 bool enabled; 1371 1372 reg = PCH_TRANSCONF(pipe); 1373 val = I915_READ(reg); 1374 enabled = !!(val & TRANS_ENABLE); 1375 WARN(enabled, 1376 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1377 pipe_name(pipe)); 1378 } 1379 1380 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1381 enum i915_pipe pipe, u32 port_sel, u32 val) 1382 { 1383 if ((val & DP_PORT_EN) == 0) 1384 return false; 1385 1386 if (HAS_PCH_CPT(dev_priv->dev)) { 1387 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); 1388 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); 1389 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1390 return false; 1391 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1392 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) 1393 return false; 1394 } else { 1395 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1396 return false; 1397 } 1398 return true; 1399 } 1400 1401 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1402 enum i915_pipe pipe, u32 val) 1403 { 1404 if ((val & SDVO_ENABLE) == 0) 1405 return false; 1406 1407 if (HAS_PCH_CPT(dev_priv->dev)) { 1408 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1409 return false; 1410 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1411 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) 1412 return false; 1413 } else { 1414 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1415 return false; 1416 } 1417 return true; 1418 } 1419 1420 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1421 enum i915_pipe pipe, u32 val) 1422 { 1423 if ((val & LVDS_PORT_EN) == 0) 1424 return false; 1425 1426 if (HAS_PCH_CPT(dev_priv->dev)) { 1427 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1428 return false; 1429 } else { 1430 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1431 return false; 1432 } 1433 return true; 1434 } 1435 1436 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1437 enum i915_pipe pipe, u32 val) 1438 { 1439 if ((val & ADPA_DAC_ENABLE) == 0) 1440 return false; 1441 if (HAS_PCH_CPT(dev_priv->dev)) { 1442 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1443 return false; 1444 } else { 1445 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1446 return false; 1447 } 1448 return true; 1449 } 1450 1451 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1452 enum i915_pipe pipe, int reg, u32 port_sel) 1453 { 1454 u32 val = I915_READ(reg); 1455 WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1456 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1457 reg, pipe_name(pipe)); 1458 1459 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 1460 && (val & DP_PIPEB_SELECT), 1461 "IBX PCH dp port still using transcoder B\n"); 1462 } 1463 1464 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1465 enum i915_pipe pipe, int reg) 1466 { 1467 u32 val = I915_READ(reg); 1468 WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1469 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1470 reg, pipe_name(pipe)); 1471 1472 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 1473 && (val & SDVO_PIPE_B_SELECT), 1474 "IBX PCH hdmi port still using transcoder B\n"); 1475 } 1476 1477 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1478 enum i915_pipe pipe) 1479 { 1480 int reg; 1481 u32 val; 1482 1483 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1484 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1485 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1486 1487 reg = PCH_ADPA; 1488 val = I915_READ(reg); 1489 WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1490 "PCH VGA enabled on transcoder %c, should be disabled\n", 1491 pipe_name(pipe)); 1492 1493 reg = PCH_LVDS; 1494 val = I915_READ(reg); 1495 WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1496 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1497 pipe_name(pipe)); 1498 1499 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1500 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1501 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1502 } 1503 1504 static void intel_init_dpio(struct drm_device *dev) 1505 { 1506 struct drm_i915_private *dev_priv = dev->dev_private; 1507 1508 if (!IS_VALLEYVIEW(dev)) 1509 return; 1510 1511 /* 1512 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), 1513 * CHV x1 PHY (DP/HDMI D) 1514 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) 1515 */ 1516 if (IS_CHERRYVIEW(dev)) { 1517 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; 1518 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; 1519 } else { 1520 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; 1521 } 1522 } 1523 1524 static void vlv_enable_pll(struct intel_crtc *crtc) 1525 { 1526 struct drm_device *dev = crtc->base.dev; 1527 struct drm_i915_private *dev_priv = dev->dev_private; 1528 int reg = DPLL(crtc->pipe); 1529 u32 dpll = crtc->config.dpll_hw_state.dpll; 1530 1531 assert_pipe_disabled(dev_priv, crtc->pipe); 1532 1533 /* No really, not for ILK+ */ 1534 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); 1535 1536 /* PLL is protected by panel, make sure we can write it */ 1537 if (IS_MOBILE(dev_priv->dev)) 1538 assert_panel_unlocked(dev_priv, crtc->pipe); 1539 1540 I915_WRITE(reg, dpll); 1541 POSTING_READ(reg); 1542 udelay(150); 1543 1544 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1545 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe); 1546 1547 I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md); 1548 POSTING_READ(DPLL_MD(crtc->pipe)); 1549 1550 /* We do this three times for luck */ 1551 I915_WRITE(reg, dpll); 1552 POSTING_READ(reg); 1553 udelay(150); /* wait for warmup */ 1554 I915_WRITE(reg, dpll); 1555 POSTING_READ(reg); 1556 udelay(150); /* wait for warmup */ 1557 I915_WRITE(reg, dpll); 1558 POSTING_READ(reg); 1559 udelay(150); /* wait for warmup */ 1560 } 1561 1562 static void chv_enable_pll(struct intel_crtc *crtc) 1563 { 1564 struct drm_device *dev = crtc->base.dev; 1565 struct drm_i915_private *dev_priv = dev->dev_private; 1566 int pipe = crtc->pipe; 1567 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1568 u32 tmp; 1569 1570 assert_pipe_disabled(dev_priv, crtc->pipe); 1571 1572 BUG_ON(!IS_CHERRYVIEW(dev_priv->dev)); 1573 1574 mutex_lock(&dev_priv->dpio_lock); 1575 1576 /* Enable back the 10bit clock to display controller */ 1577 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1578 tmp |= DPIO_DCLKP_EN; 1579 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1580 1581 /* 1582 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1583 */ 1584 udelay(1); 1585 1586 /* Enable PLL */ 1587 I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll); 1588 1589 /* Check PLL is locked */ 1590 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1591 DRM_ERROR("PLL %d failed to lock\n", pipe); 1592 1593 /* not sure when this should be written */ 1594 I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md); 1595 POSTING_READ(DPLL_MD(pipe)); 1596 1597 mutex_unlock(&dev_priv->dpio_lock); 1598 } 1599 1600 static int intel_num_dvo_pipes(struct drm_device *dev) 1601 { 1602 struct intel_crtc *crtc; 1603 int count = 0; 1604 1605 for_each_intel_crtc(dev, crtc) 1606 count += crtc->active && 1607 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO); 1608 1609 return count; 1610 } 1611 1612 static void i9xx_enable_pll(struct intel_crtc *crtc) 1613 { 1614 struct drm_device *dev = crtc->base.dev; 1615 struct drm_i915_private *dev_priv = dev->dev_private; 1616 int reg = DPLL(crtc->pipe); 1617 u32 dpll = crtc->config.dpll_hw_state.dpll; 1618 1619 assert_pipe_disabled(dev_priv, crtc->pipe); 1620 1621 /* No really, not for ILK+ */ 1622 BUG_ON(INTEL_INFO(dev)->gen >= 5); 1623 1624 /* PLL is protected by panel, make sure we can write it */ 1625 if (IS_MOBILE(dev) && !IS_I830(dev)) 1626 assert_panel_unlocked(dev_priv, crtc->pipe); 1627 1628 /* Enable DVO 2x clock on both PLLs if necessary */ 1629 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) { 1630 /* 1631 * It appears to be important that we don't enable this 1632 * for the current pipe before otherwise configuring the 1633 * PLL. No idea how this should be handled if multiple 1634 * DVO outputs are enabled simultaneosly. 1635 */ 1636 dpll |= DPLL_DVO_2X_MODE; 1637 I915_WRITE(DPLL(!crtc->pipe), 1638 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1639 } 1640 1641 /* Wait for the clocks to stabilize. */ 1642 POSTING_READ(reg); 1643 udelay(150); 1644 1645 if (INTEL_INFO(dev)->gen >= 4) { 1646 I915_WRITE(DPLL_MD(crtc->pipe), 1647 crtc->config.dpll_hw_state.dpll_md); 1648 } else { 1649 /* The pixel multiplier can only be updated once the 1650 * DPLL is enabled and the clocks are stable. 1651 * 1652 * So write it again. 1653 */ 1654 I915_WRITE(reg, dpll); 1655 } 1656 1657 /* We do this three times for luck */ 1658 I915_WRITE(reg, dpll); 1659 POSTING_READ(reg); 1660 udelay(150); /* wait for warmup */ 1661 I915_WRITE(reg, dpll); 1662 POSTING_READ(reg); 1663 udelay(150); /* wait for warmup */ 1664 I915_WRITE(reg, dpll); 1665 POSTING_READ(reg); 1666 udelay(150); /* wait for warmup */ 1667 } 1668 1669 /** 1670 * i9xx_disable_pll - disable a PLL 1671 * @dev_priv: i915 private structure 1672 * @pipe: pipe PLL to disable 1673 * 1674 * Disable the PLL for @pipe, making sure the pipe is off first. 1675 * 1676 * Note! This is for pre-ILK only. 1677 */ 1678 static void i9xx_disable_pll(struct intel_crtc *crtc) 1679 { 1680 struct drm_device *dev = crtc->base.dev; 1681 struct drm_i915_private *dev_priv = dev->dev_private; 1682 enum i915_pipe pipe = crtc->pipe; 1683 1684 /* Disable DVO 2x clock on both PLLs if necessary */ 1685 if (IS_I830(dev) && 1686 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) && 1687 intel_num_dvo_pipes(dev) == 1) { 1688 I915_WRITE(DPLL(PIPE_B), 1689 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1690 I915_WRITE(DPLL(PIPE_A), 1691 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); 1692 } 1693 1694 /* Don't disable pipe or pipe PLLs if needed */ 1695 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1696 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1697 return; 1698 1699 /* Make sure the pipe isn't still relying on us */ 1700 assert_pipe_disabled(dev_priv, pipe); 1701 1702 I915_WRITE(DPLL(pipe), 0); 1703 POSTING_READ(DPLL(pipe)); 1704 } 1705 1706 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1707 { 1708 u32 val = 0; 1709 1710 /* Make sure the pipe isn't still relying on us */ 1711 assert_pipe_disabled(dev_priv, pipe); 1712 1713 /* 1714 * Leave integrated clock source and reference clock enabled for pipe B. 1715 * The latter is needed for VGA hotplug / manual detection. 1716 */ 1717 if (pipe == PIPE_B) 1718 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV; 1719 I915_WRITE(DPLL(pipe), val); 1720 POSTING_READ(DPLL(pipe)); 1721 1722 } 1723 1724 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1725 { 1726 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1727 u32 val; 1728 1729 /* Make sure the pipe isn't still relying on us */ 1730 assert_pipe_disabled(dev_priv, pipe); 1731 1732 /* Set PLL en = 0 */ 1733 val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV; 1734 if (pipe != PIPE_A) 1735 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1736 I915_WRITE(DPLL(pipe), val); 1737 POSTING_READ(DPLL(pipe)); 1738 1739 mutex_lock(&dev_priv->dpio_lock); 1740 1741 /* Disable 10bit clock to display controller */ 1742 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1743 val &= ~DPIO_DCLKP_EN; 1744 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1745 1746 /* disable left/right clock distribution */ 1747 if (pipe != PIPE_B) { 1748 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); 1749 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); 1750 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); 1751 } else { 1752 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); 1753 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); 1754 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); 1755 } 1756 1757 mutex_unlock(&dev_priv->dpio_lock); 1758 } 1759 1760 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1761 struct intel_digital_port *dport) 1762 { 1763 u32 port_mask; 1764 int dpll_reg; 1765 1766 switch (dport->port) { 1767 case PORT_B: 1768 port_mask = DPLL_PORTB_READY_MASK; 1769 dpll_reg = DPLL(0); 1770 break; 1771 case PORT_C: 1772 port_mask = DPLL_PORTC_READY_MASK; 1773 dpll_reg = DPLL(0); 1774 break; 1775 case PORT_D: 1776 port_mask = DPLL_PORTD_READY_MASK; 1777 dpll_reg = DPIO_PHY_STATUS; 1778 break; 1779 default: 1780 BUG(); 1781 } 1782 1783 if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000)) 1784 WARN(1, "timed out waiting for port %c ready: 0x%08x\n", 1785 port_name(dport->port), I915_READ(dpll_reg)); 1786 } 1787 1788 static void intel_prepare_shared_dpll(struct intel_crtc *crtc) 1789 { 1790 struct drm_device *dev = crtc->base.dev; 1791 struct drm_i915_private *dev_priv = dev->dev_private; 1792 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1793 1794 if (WARN_ON(pll == NULL)) 1795 return; 1796 1797 WARN_ON(!pll->refcount); 1798 if (pll->active == 0) { 1799 DRM_DEBUG_DRIVER("setting up %s\n", pll->name); 1800 WARN_ON(pll->on); 1801 assert_shared_dpll_disabled(dev_priv, pll); 1802 1803 pll->mode_set(dev_priv, pll); 1804 } 1805 } 1806 1807 /** 1808 * intel_enable_shared_dpll - enable PCH PLL 1809 * @dev_priv: i915 private structure 1810 * @pipe: pipe PLL to enable 1811 * 1812 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1813 * drives the transcoder clock. 1814 */ 1815 static void intel_enable_shared_dpll(struct intel_crtc *crtc) 1816 { 1817 struct drm_device *dev = crtc->base.dev; 1818 struct drm_i915_private *dev_priv = dev->dev_private; 1819 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1820 1821 if (WARN_ON(pll == NULL)) 1822 return; 1823 1824 if (WARN_ON(pll->refcount == 0)) 1825 return; 1826 1827 DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n", 1828 pll->name, pll->active, pll->on, 1829 crtc->base.base.id); 1830 1831 if (pll->active++) { 1832 WARN_ON(!pll->on); 1833 assert_shared_dpll_enabled(dev_priv, pll); 1834 return; 1835 } 1836 WARN_ON(pll->on); 1837 1838 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 1839 1840 DRM_DEBUG_KMS("enabling %s\n", pll->name); 1841 pll->enable(dev_priv, pll); 1842 pll->on = true; 1843 } 1844 1845 static void intel_disable_shared_dpll(struct intel_crtc *crtc) 1846 { 1847 struct drm_device *dev = crtc->base.dev; 1848 struct drm_i915_private *dev_priv = dev->dev_private; 1849 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1850 1851 /* PCH only available on ILK+ */ 1852 BUG_ON(INTEL_INFO(dev)->gen < 5); 1853 if (WARN_ON(pll == NULL)) 1854 return; 1855 1856 if (WARN_ON(pll->refcount == 0)) 1857 return; 1858 1859 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", 1860 pll->name, pll->active, pll->on, 1861 crtc->base.base.id); 1862 1863 if (WARN_ON(pll->active == 0)) { 1864 assert_shared_dpll_disabled(dev_priv, pll); 1865 return; 1866 } 1867 1868 assert_shared_dpll_enabled(dev_priv, pll); 1869 WARN_ON(!pll->on); 1870 if (--pll->active) 1871 return; 1872 1873 DRM_DEBUG_KMS("disabling %s\n", pll->name); 1874 pll->disable(dev_priv, pll); 1875 pll->on = false; 1876 1877 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 1878 } 1879 1880 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1881 enum i915_pipe pipe) 1882 { 1883 struct drm_device *dev = dev_priv->dev; 1884 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1885 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1886 uint32_t reg, val, pipeconf_val; 1887 1888 /* PCH only available on ILK+ */ 1889 BUG_ON(!HAS_PCH_SPLIT(dev)); 1890 1891 /* Make sure PCH DPLL is enabled */ 1892 assert_shared_dpll_enabled(dev_priv, 1893 intel_crtc_to_shared_dpll(intel_crtc)); 1894 1895 /* FDI must be feeding us bits for PCH ports */ 1896 assert_fdi_tx_enabled(dev_priv, pipe); 1897 assert_fdi_rx_enabled(dev_priv, pipe); 1898 1899 if (HAS_PCH_CPT(dev)) { 1900 /* Workaround: Set the timing override bit before enabling the 1901 * pch transcoder. */ 1902 reg = TRANS_CHICKEN2(pipe); 1903 val = I915_READ(reg); 1904 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1905 I915_WRITE(reg, val); 1906 } 1907 1908 reg = PCH_TRANSCONF(pipe); 1909 val = I915_READ(reg); 1910 pipeconf_val = I915_READ(PIPECONF(pipe)); 1911 1912 if (HAS_PCH_IBX(dev_priv->dev)) { 1913 /* 1914 * make the BPC in transcoder be consistent with 1915 * that in pipeconf reg. 1916 */ 1917 val &= ~PIPECONF_BPC_MASK; 1918 val |= pipeconf_val & PIPECONF_BPC_MASK; 1919 } 1920 1921 val &= ~TRANS_INTERLACE_MASK; 1922 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1923 if (HAS_PCH_IBX(dev_priv->dev) && 1924 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) 1925 val |= TRANS_LEGACY_INTERLACED_ILK; 1926 else 1927 val |= TRANS_INTERLACED; 1928 else 1929 val |= TRANS_PROGRESSIVE; 1930 1931 I915_WRITE(reg, val | TRANS_ENABLE); 1932 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 1933 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1934 } 1935 1936 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1937 enum transcoder cpu_transcoder) 1938 { 1939 u32 val, pipeconf_val; 1940 1941 /* PCH only available on ILK+ */ 1942 BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev)); 1943 1944 /* FDI must be feeding us bits for PCH ports */ 1945 assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder); 1946 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 1947 1948 /* Workaround: set timing override bit. */ 1949 val = I915_READ(_TRANSA_CHICKEN2); 1950 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1951 I915_WRITE(_TRANSA_CHICKEN2, val); 1952 1953 val = TRANS_ENABLE; 1954 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1955 1956 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1957 PIPECONF_INTERLACED_ILK) 1958 val |= TRANS_INTERLACED; 1959 else 1960 val |= TRANS_PROGRESSIVE; 1961 1962 I915_WRITE(LPT_TRANSCONF, val); 1963 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100)) 1964 DRM_ERROR("Failed to enable PCH transcoder\n"); 1965 } 1966 1967 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1968 enum i915_pipe pipe) 1969 { 1970 struct drm_device *dev = dev_priv->dev; 1971 uint32_t reg, val; 1972 1973 /* FDI relies on the transcoder */ 1974 assert_fdi_tx_disabled(dev_priv, pipe); 1975 assert_fdi_rx_disabled(dev_priv, pipe); 1976 1977 /* Ports must be off as well */ 1978 assert_pch_ports_disabled(dev_priv, pipe); 1979 1980 reg = PCH_TRANSCONF(pipe); 1981 val = I915_READ(reg); 1982 val &= ~TRANS_ENABLE; 1983 I915_WRITE(reg, val); 1984 /* wait for PCH transcoder off, transcoder state */ 1985 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 1986 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1987 1988 if (!HAS_PCH_IBX(dev)) { 1989 /* Workaround: Clear the timing override chicken bit again. */ 1990 reg = TRANS_CHICKEN2(pipe); 1991 val = I915_READ(reg); 1992 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1993 I915_WRITE(reg, val); 1994 } 1995 } 1996 1997 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1998 { 1999 u32 val; 2000 2001 val = I915_READ(LPT_TRANSCONF); 2002 val &= ~TRANS_ENABLE; 2003 I915_WRITE(LPT_TRANSCONF, val); 2004 /* wait for PCH transcoder off, transcoder state */ 2005 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50)) 2006 DRM_ERROR("Failed to disable PCH transcoder\n"); 2007 2008 /* Workaround: clear timing override bit. */ 2009 val = I915_READ(_TRANSA_CHICKEN2); 2010 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 2011 I915_WRITE(_TRANSA_CHICKEN2, val); 2012 } 2013 2014 /** 2015 * intel_enable_pipe - enable a pipe, asserting requirements 2016 * @crtc: crtc responsible for the pipe 2017 * 2018 * Enable @crtc's pipe, making sure that various hardware specific requirements 2019 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 2020 */ 2021 static void intel_enable_pipe(struct intel_crtc *crtc) 2022 { 2023 struct drm_device *dev = crtc->base.dev; 2024 struct drm_i915_private *dev_priv = dev->dev_private; 2025 enum i915_pipe pipe = crtc->pipe; 2026 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 2027 pipe); 2028 enum i915_pipe pch_transcoder; 2029 int reg; 2030 u32 val; 2031 2032 assert_planes_disabled(dev_priv, pipe); 2033 assert_cursor_disabled(dev_priv, pipe); 2034 assert_sprites_disabled(dev_priv, pipe); 2035 2036 if (HAS_PCH_LPT(dev_priv->dev)) 2037 pch_transcoder = TRANSCODER_A; 2038 else 2039 pch_transcoder = pipe; 2040 2041 /* 2042 * A pipe without a PLL won't actually be able to drive bits from 2043 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 2044 * need the check. 2045 */ 2046 if (!HAS_PCH_SPLIT(dev_priv->dev)) 2047 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI)) 2048 assert_dsi_pll_enabled(dev_priv); 2049 else 2050 assert_pll_enabled(dev_priv, pipe); 2051 else { 2052 if (crtc->config.has_pch_encoder) { 2053 /* if driving the PCH, we need FDI enabled */ 2054 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 2055 assert_fdi_tx_pll_enabled(dev_priv, 2056 (enum i915_pipe) cpu_transcoder); 2057 } 2058 /* FIXME: assert CPU port conditions for SNB+ */ 2059 } 2060 2061 reg = PIPECONF(cpu_transcoder); 2062 val = I915_READ(reg); 2063 if (val & PIPECONF_ENABLE) { 2064 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 2065 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))); 2066 return; 2067 } 2068 2069 I915_WRITE(reg, val | PIPECONF_ENABLE); 2070 POSTING_READ(reg); 2071 } 2072 2073 /** 2074 * intel_disable_pipe - disable a pipe, asserting requirements 2075 * @crtc: crtc whose pipes is to be disabled 2076 * 2077 * Disable the pipe of @crtc, making sure that various hardware 2078 * specific requirements are met, if applicable, e.g. plane 2079 * disabled, panel fitter off, etc. 2080 * 2081 * Will wait until the pipe has shut down before returning. 2082 */ 2083 static void intel_disable_pipe(struct intel_crtc *crtc) 2084 { 2085 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 2086 enum transcoder cpu_transcoder = crtc->config.cpu_transcoder; 2087 enum i915_pipe pipe = crtc->pipe; 2088 int reg; 2089 u32 val; 2090 2091 /* 2092 * Make sure planes won't keep trying to pump pixels to us, 2093 * or we might hang the display. 2094 */ 2095 assert_planes_disabled(dev_priv, pipe); 2096 assert_cursor_disabled(dev_priv, pipe); 2097 assert_sprites_disabled(dev_priv, pipe); 2098 2099 reg = PIPECONF(cpu_transcoder); 2100 val = I915_READ(reg); 2101 if ((val & PIPECONF_ENABLE) == 0) 2102 return; 2103 2104 /* 2105 * Double wide has implications for planes 2106 * so best keep it disabled when not needed. 2107 */ 2108 if (crtc->config.double_wide) 2109 val &= ~PIPECONF_DOUBLE_WIDE; 2110 2111 /* Don't disable pipe or pipe PLLs if needed */ 2112 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) && 2113 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 2114 val &= ~PIPECONF_ENABLE; 2115 2116 I915_WRITE(reg, val); 2117 if ((val & PIPECONF_ENABLE) == 0) 2118 intel_wait_for_pipe_off(crtc); 2119 } 2120 2121 /* 2122 * Plane regs are double buffered, going from enabled->disabled needs a 2123 * trigger in order to latch. The display address reg provides this. 2124 */ 2125 void intel_flush_primary_plane(struct drm_i915_private *dev_priv, 2126 enum plane plane) 2127 { 2128 struct drm_device *dev = dev_priv->dev; 2129 u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane); 2130 2131 I915_WRITE(reg, I915_READ(reg)); 2132 POSTING_READ(reg); 2133 } 2134 2135 /** 2136 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe 2137 * @plane: plane to be enabled 2138 * @crtc: crtc for the plane 2139 * 2140 * Enable @plane on @crtc, making sure that the pipe is running first. 2141 */ 2142 static void intel_enable_primary_hw_plane(struct drm_plane *plane, 2143 struct drm_crtc *crtc) 2144 { 2145 struct drm_device *dev = plane->dev; 2146 struct drm_i915_private *dev_priv = dev->dev_private; 2147 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2148 2149 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 2150 assert_pipe_enabled(dev_priv, intel_crtc->pipe); 2151 2152 if (intel_crtc->primary_enabled) 2153 return; 2154 2155 intel_crtc->primary_enabled = true; 2156 2157 dev_priv->display.update_primary_plane(crtc, plane->fb, 2158 crtc->x, crtc->y); 2159 2160 /* 2161 * BDW signals flip done immediately if the plane 2162 * is disabled, even if the plane enable is already 2163 * armed to occur at the next vblank :( 2164 */ 2165 if (IS_BROADWELL(dev)) 2166 intel_wait_for_vblank(dev, intel_crtc->pipe); 2167 } 2168 2169 /** 2170 * intel_disable_primary_hw_plane - disable the primary hardware plane 2171 * @plane: plane to be disabled 2172 * @crtc: crtc for the plane 2173 * 2174 * Disable @plane on @crtc, making sure that the pipe is running first. 2175 */ 2176 static void intel_disable_primary_hw_plane(struct drm_plane *plane, 2177 struct drm_crtc *crtc) 2178 { 2179 struct drm_device *dev = plane->dev; 2180 struct drm_i915_private *dev_priv = dev->dev_private; 2181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2182 2183 assert_pipe_enabled(dev_priv, intel_crtc->pipe); 2184 2185 if (!intel_crtc->primary_enabled) 2186 return; 2187 2188 intel_crtc->primary_enabled = false; 2189 2190 dev_priv->display.update_primary_plane(crtc, plane->fb, 2191 crtc->x, crtc->y); 2192 } 2193 2194 static bool need_vtd_wa(struct drm_device *dev) 2195 { 2196 #ifdef CONFIG_INTEL_IOMMU 2197 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped) 2198 return true; 2199 #endif 2200 return false; 2201 } 2202 2203 static int intel_align_height(struct drm_device *dev, int height, bool tiled) 2204 { 2205 int tile_height; 2206 2207 tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1; 2208 return ALIGN(height, tile_height); 2209 } 2210 2211 int 2212 intel_pin_and_fence_fb_obj(struct drm_device *dev, 2213 struct drm_i915_gem_object *obj, 2214 struct intel_engine_cs *pipelined) 2215 { 2216 struct drm_i915_private *dev_priv = dev->dev_private; 2217 u32 alignment; 2218 int ret; 2219 2220 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2221 2222 switch (obj->tiling_mode) { 2223 case I915_TILING_NONE: 2224 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 2225 alignment = 128 * 1024; 2226 else if (INTEL_INFO(dev)->gen >= 4) 2227 alignment = 4 * 1024; 2228 else 2229 alignment = 64 * 1024; 2230 break; 2231 case I915_TILING_X: 2232 /* pin() will align the object as required by fence */ 2233 alignment = 0; 2234 break; 2235 case I915_TILING_Y: 2236 WARN(1, "Y tiled bo slipped through, driver bug!\n"); 2237 return -EINVAL; 2238 default: 2239 BUG(); 2240 } 2241 2242 /* Note that the w/a also requires 64 PTE of padding following the 2243 * bo. We currently fill all unused PTE with the shadow page and so 2244 * we should always have valid PTE following the scanout preventing 2245 * the VT-d warning. 2246 */ 2247 if (need_vtd_wa(dev) && alignment < 256 * 1024) 2248 alignment = 256 * 1024; 2249 2250 /* 2251 * Global gtt pte registers are special registers which actually forward 2252 * writes to a chunk of system memory. Which means that there is no risk 2253 * that the register values disappear as soon as we call 2254 * intel_runtime_pm_put(), so it is correct to wrap only the 2255 * pin/unpin/fence and not more. 2256 */ 2257 intel_runtime_pm_get(dev_priv); 2258 2259 dev_priv->mm.interruptible = false; 2260 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); 2261 if (ret) 2262 goto err_interruptible; 2263 2264 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2265 * fence, whereas 965+ only requires a fence if using 2266 * framebuffer compression. For simplicity, we always install 2267 * a fence as the cost is not that onerous. 2268 */ 2269 ret = i915_gem_object_get_fence(obj); 2270 if (ret) 2271 goto err_unpin; 2272 2273 i915_gem_object_pin_fence(obj); 2274 2275 dev_priv->mm.interruptible = true; 2276 intel_runtime_pm_put(dev_priv); 2277 return 0; 2278 2279 err_unpin: 2280 i915_gem_object_unpin_from_display_plane(obj); 2281 err_interruptible: 2282 dev_priv->mm.interruptible = true; 2283 intel_runtime_pm_put(dev_priv); 2284 return ret; 2285 } 2286 2287 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) 2288 { 2289 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2290 2291 i915_gem_object_unpin_fence(obj); 2292 i915_gem_object_unpin_from_display_plane(obj); 2293 } 2294 2295 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 2296 * is assumed to be a power-of-two. */ 2297 unsigned long intel_gen4_compute_page_offset(int *x, int *y, 2298 unsigned int tiling_mode, 2299 unsigned int cpp, 2300 unsigned int pitch) 2301 { 2302 if (tiling_mode != I915_TILING_NONE) { 2303 unsigned int tile_rows, tiles; 2304 2305 tile_rows = *y / 8; 2306 *y %= 8; 2307 2308 tiles = *x / (512/cpp); 2309 *x %= 512/cpp; 2310 2311 return tile_rows * pitch * 8 + tiles * 4096; 2312 } else { 2313 unsigned int offset; 2314 2315 offset = *y * pitch + *x * cpp; 2316 *y = 0; 2317 *x = (offset & 4095) / cpp; 2318 return offset & -4096; 2319 } 2320 } 2321 2322 int intel_format_to_fourcc(int format) 2323 { 2324 switch (format) { 2325 case DISPPLANE_8BPP: 2326 return DRM_FORMAT_C8; 2327 case DISPPLANE_BGRX555: 2328 return DRM_FORMAT_XRGB1555; 2329 case DISPPLANE_BGRX565: 2330 return DRM_FORMAT_RGB565; 2331 default: 2332 case DISPPLANE_BGRX888: 2333 return DRM_FORMAT_XRGB8888; 2334 case DISPPLANE_RGBX888: 2335 return DRM_FORMAT_XBGR8888; 2336 case DISPPLANE_BGRX101010: 2337 return DRM_FORMAT_XRGB2101010; 2338 case DISPPLANE_RGBX101010: 2339 return DRM_FORMAT_XBGR2101010; 2340 } 2341 } 2342 2343 static bool intel_alloc_plane_obj(struct intel_crtc *crtc, 2344 struct intel_plane_config *plane_config) 2345 { 2346 struct drm_device *dev = crtc->base.dev; 2347 struct drm_i915_gem_object *obj = NULL; 2348 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2349 u32 base = plane_config->base; 2350 2351 if (plane_config->size == 0) 2352 return false; 2353 2354 obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, 2355 plane_config->size); 2356 if (!obj) 2357 return false; 2358 2359 if (plane_config->tiled) { 2360 obj->tiling_mode = I915_TILING_X; 2361 obj->stride = crtc->base.primary->fb->pitches[0]; 2362 } 2363 2364 mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format; 2365 mode_cmd.width = crtc->base.primary->fb->width; 2366 mode_cmd.height = crtc->base.primary->fb->height; 2367 mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0]; 2368 2369 mutex_lock(&dev->struct_mutex); 2370 2371 if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb), 2372 &mode_cmd, obj)) { 2373 DRM_DEBUG_KMS("intel fb init failed\n"); 2374 goto out_unref_obj; 2375 } 2376 2377 obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe); 2378 mutex_unlock(&dev->struct_mutex); 2379 2380 DRM_DEBUG_KMS("plane fb obj %p\n", obj); 2381 return true; 2382 2383 out_unref_obj: 2384 drm_gem_object_unreference(&obj->base); 2385 mutex_unlock(&dev->struct_mutex); 2386 return false; 2387 } 2388 2389 static void intel_find_plane_obj(struct intel_crtc *intel_crtc, 2390 struct intel_plane_config *plane_config) 2391 { 2392 struct drm_device *dev = intel_crtc->base.dev; 2393 struct drm_crtc *c; 2394 struct intel_crtc *i; 2395 struct drm_i915_gem_object *obj; 2396 2397 if (!intel_crtc->base.primary->fb) 2398 return; 2399 2400 if (intel_alloc_plane_obj(intel_crtc, plane_config)) 2401 return; 2402 2403 kfree(intel_crtc->base.primary->fb); 2404 intel_crtc->base.primary->fb = NULL; 2405 2406 /* 2407 * Failed to alloc the obj, check to see if we should share 2408 * an fb with another CRTC instead 2409 */ 2410 for_each_crtc(dev, c) { 2411 i = to_intel_crtc(c); 2412 2413 if (c == &intel_crtc->base) 2414 continue; 2415 2416 if (!i->active) 2417 continue; 2418 2419 obj = intel_fb_obj(c->primary->fb); 2420 if (obj == NULL) 2421 continue; 2422 2423 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2424 drm_framebuffer_reference(c->primary->fb); 2425 intel_crtc->base.primary->fb = c->primary->fb; 2426 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 2427 break; 2428 } 2429 } 2430 } 2431 2432 static void i9xx_update_primary_plane(struct drm_crtc *crtc, 2433 struct drm_framebuffer *fb, 2434 int x, int y) 2435 { 2436 struct drm_device *dev = crtc->dev; 2437 struct drm_i915_private *dev_priv = dev->dev_private; 2438 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2439 struct drm_i915_gem_object *obj; 2440 int plane = intel_crtc->plane; 2441 unsigned long linear_offset; 2442 u32 dspcntr; 2443 u32 reg = DSPCNTR(plane); 2444 int pixel_size; 2445 2446 if (!intel_crtc->primary_enabled) { 2447 I915_WRITE(reg, 0); 2448 if (INTEL_INFO(dev)->gen >= 4) 2449 I915_WRITE(DSPSURF(plane), 0); 2450 else 2451 I915_WRITE(DSPADDR(plane), 0); 2452 POSTING_READ(reg); 2453 return; 2454 } 2455 2456 obj = intel_fb_obj(fb); 2457 if (WARN_ON(obj == NULL)) 2458 return; 2459 2460 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 2461 2462 dspcntr = DISPPLANE_GAMMA_ENABLE; 2463 2464 dspcntr |= DISPLAY_PLANE_ENABLE; 2465 2466 if (INTEL_INFO(dev)->gen < 4) { 2467 if (intel_crtc->pipe == PIPE_B) 2468 dspcntr |= DISPPLANE_SEL_PIPE_B; 2469 2470 /* pipesrc and dspsize control the size that is scaled from, 2471 * which should always be the user's requested size. 2472 */ 2473 I915_WRITE(DSPSIZE(plane), 2474 ((intel_crtc->config.pipe_src_h - 1) << 16) | 2475 (intel_crtc->config.pipe_src_w - 1)); 2476 I915_WRITE(DSPPOS(plane), 0); 2477 } 2478 2479 switch (fb->pixel_format) { 2480 case DRM_FORMAT_C8: 2481 dspcntr |= DISPPLANE_8BPP; 2482 break; 2483 case DRM_FORMAT_XRGB1555: 2484 case DRM_FORMAT_ARGB1555: 2485 dspcntr |= DISPPLANE_BGRX555; 2486 break; 2487 case DRM_FORMAT_RGB565: 2488 dspcntr |= DISPPLANE_BGRX565; 2489 break; 2490 case DRM_FORMAT_XRGB8888: 2491 case DRM_FORMAT_ARGB8888: 2492 dspcntr |= DISPPLANE_BGRX888; 2493 break; 2494 case DRM_FORMAT_XBGR8888: 2495 case DRM_FORMAT_ABGR8888: 2496 dspcntr |= DISPPLANE_RGBX888; 2497 break; 2498 case DRM_FORMAT_XRGB2101010: 2499 case DRM_FORMAT_ARGB2101010: 2500 dspcntr |= DISPPLANE_BGRX101010; 2501 break; 2502 case DRM_FORMAT_XBGR2101010: 2503 case DRM_FORMAT_ABGR2101010: 2504 dspcntr |= DISPPLANE_RGBX101010; 2505 break; 2506 default: 2507 BUG(); 2508 } 2509 2510 if (INTEL_INFO(dev)->gen >= 4 && 2511 obj->tiling_mode != I915_TILING_NONE) 2512 dspcntr |= DISPPLANE_TILED; 2513 2514 if (IS_G4X(dev)) 2515 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2516 2517 linear_offset = y * fb->pitches[0] + x * pixel_size; 2518 2519 if (INTEL_INFO(dev)->gen >= 4) { 2520 intel_crtc->dspaddr_offset = 2521 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2522 pixel_size, 2523 fb->pitches[0]); 2524 linear_offset -= intel_crtc->dspaddr_offset; 2525 } else { 2526 intel_crtc->dspaddr_offset = linear_offset; 2527 } 2528 2529 if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) { 2530 dspcntr |= DISPPLANE_ROTATE_180; 2531 2532 x += (intel_crtc->config.pipe_src_w - 1); 2533 y += (intel_crtc->config.pipe_src_h - 1); 2534 2535 /* Finding the last pixel of the last line of the display 2536 data and adding to linear_offset*/ 2537 linear_offset += 2538 (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] + 2539 (intel_crtc->config.pipe_src_w - 1) * pixel_size; 2540 } 2541 2542 I915_WRITE(reg, dspcntr); 2543 2544 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 2545 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, 2546 fb->pitches[0]); 2547 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2548 if (INTEL_INFO(dev)->gen >= 4) { 2549 I915_WRITE(DSPSURF(plane), 2550 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2551 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2552 I915_WRITE(DSPLINOFF(plane), linear_offset); 2553 } else 2554 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); 2555 POSTING_READ(reg); 2556 } 2557 2558 static void ironlake_update_primary_plane(struct drm_crtc *crtc, 2559 struct drm_framebuffer *fb, 2560 int x, int y) 2561 { 2562 struct drm_device *dev = crtc->dev; 2563 struct drm_i915_private *dev_priv = dev->dev_private; 2564 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2565 struct drm_i915_gem_object *obj; 2566 int plane = intel_crtc->plane; 2567 unsigned long linear_offset; 2568 u32 dspcntr; 2569 u32 reg = DSPCNTR(plane); 2570 int pixel_size; 2571 2572 if (!intel_crtc->primary_enabled) { 2573 I915_WRITE(reg, 0); 2574 I915_WRITE(DSPSURF(plane), 0); 2575 POSTING_READ(reg); 2576 return; 2577 } 2578 2579 obj = intel_fb_obj(fb); 2580 if (WARN_ON(obj == NULL)) 2581 return; 2582 2583 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 2584 2585 dspcntr = DISPPLANE_GAMMA_ENABLE; 2586 2587 dspcntr |= DISPLAY_PLANE_ENABLE; 2588 2589 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2590 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 2591 2592 switch (fb->pixel_format) { 2593 case DRM_FORMAT_C8: 2594 dspcntr |= DISPPLANE_8BPP; 2595 break; 2596 case DRM_FORMAT_RGB565: 2597 dspcntr |= DISPPLANE_BGRX565; 2598 break; 2599 case DRM_FORMAT_XRGB8888: 2600 case DRM_FORMAT_ARGB8888: 2601 dspcntr |= DISPPLANE_BGRX888; 2602 break; 2603 case DRM_FORMAT_XBGR8888: 2604 case DRM_FORMAT_ABGR8888: 2605 dspcntr |= DISPPLANE_RGBX888; 2606 break; 2607 case DRM_FORMAT_XRGB2101010: 2608 case DRM_FORMAT_ARGB2101010: 2609 dspcntr |= DISPPLANE_BGRX101010; 2610 break; 2611 case DRM_FORMAT_XBGR2101010: 2612 case DRM_FORMAT_ABGR2101010: 2613 dspcntr |= DISPPLANE_RGBX101010; 2614 break; 2615 default: 2616 BUG(); 2617 } 2618 2619 if (obj->tiling_mode != I915_TILING_NONE) 2620 dspcntr |= DISPPLANE_TILED; 2621 2622 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) 2623 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2624 2625 linear_offset = y * fb->pitches[0] + x * pixel_size; 2626 intel_crtc->dspaddr_offset = 2627 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2628 pixel_size, 2629 fb->pitches[0]); 2630 linear_offset -= intel_crtc->dspaddr_offset; 2631 if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) { 2632 dspcntr |= DISPPLANE_ROTATE_180; 2633 2634 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { 2635 x += (intel_crtc->config.pipe_src_w - 1); 2636 y += (intel_crtc->config.pipe_src_h - 1); 2637 2638 /* Finding the last pixel of the last line of the display 2639 data and adding to linear_offset*/ 2640 linear_offset += 2641 (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] + 2642 (intel_crtc->config.pipe_src_w - 1) * pixel_size; 2643 } 2644 } 2645 2646 I915_WRITE(reg, dspcntr); 2647 2648 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 2649 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, 2650 fb->pitches[0]); 2651 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2652 I915_WRITE(DSPSURF(plane), 2653 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2654 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2655 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2656 } else { 2657 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2658 I915_WRITE(DSPLINOFF(plane), linear_offset); 2659 } 2660 POSTING_READ(reg); 2661 } 2662 2663 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 2664 static int 2665 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 2666 int x, int y, enum mode_set_atomic state) 2667 { 2668 struct drm_device *dev = crtc->dev; 2669 struct drm_i915_private *dev_priv = dev->dev_private; 2670 2671 if (dev_priv->display.disable_fbc) 2672 dev_priv->display.disable_fbc(dev); 2673 intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe); 2674 2675 dev_priv->display.update_primary_plane(crtc, fb, x, y); 2676 2677 return 0; 2678 } 2679 2680 void intel_display_handle_reset(struct drm_device *dev) 2681 { 2682 struct drm_i915_private *dev_priv = dev->dev_private; 2683 struct drm_crtc *crtc; 2684 2685 /* 2686 * Flips in the rings have been nuked by the reset, 2687 * so complete all pending flips so that user space 2688 * will get its events and not get stuck. 2689 * 2690 * Also update the base address of all primary 2691 * planes to the the last fb to make sure we're 2692 * showing the correct fb after a reset. 2693 * 2694 * Need to make two loops over the crtcs so that we 2695 * don't try to grab a crtc mutex before the 2696 * pending_flip_queue really got woken up. 2697 */ 2698 2699 for_each_crtc(dev, crtc) { 2700 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2701 enum plane plane = intel_crtc->plane; 2702 2703 intel_prepare_page_flip(dev, plane); 2704 intel_finish_page_flip_plane(dev, plane); 2705 } 2706 2707 for_each_crtc(dev, crtc) { 2708 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2709 2710 drm_modeset_lock(&crtc->mutex, NULL); 2711 /* 2712 * FIXME: Once we have proper support for primary planes (and 2713 * disabling them without disabling the entire crtc) allow again 2714 * a NULL crtc->primary->fb. 2715 */ 2716 if (intel_crtc->active && crtc->primary->fb) 2717 dev_priv->display.update_primary_plane(crtc, 2718 crtc->primary->fb, 2719 crtc->x, 2720 crtc->y); 2721 drm_modeset_unlock(&crtc->mutex); 2722 } 2723 } 2724 2725 static int 2726 intel_finish_fb(struct drm_framebuffer *old_fb) 2727 { 2728 struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); 2729 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2730 bool was_interruptible = dev_priv->mm.interruptible; 2731 int ret; 2732 2733 /* Big Hammer, we also need to ensure that any pending 2734 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 2735 * current scanout is retired before unpinning the old 2736 * framebuffer. 2737 * 2738 * This should only fail upon a hung GPU, in which case we 2739 * can safely continue. 2740 */ 2741 dev_priv->mm.interruptible = false; 2742 ret = i915_gem_object_finish_gpu(obj); 2743 dev_priv->mm.interruptible = was_interruptible; 2744 2745 return ret; 2746 } 2747 2748 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 2749 { 2750 struct drm_device *dev = crtc->dev; 2751 struct drm_i915_private *dev_priv = dev->dev_private; 2752 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2753 bool pending; 2754 2755 if (i915_reset_in_progress(&dev_priv->gpu_error) || 2756 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 2757 return false; 2758 2759 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 2760 pending = to_intel_crtc(crtc)->unpin_work != NULL; 2761 lockmgr(&dev->event_lock, LK_RELEASE); 2762 2763 return pending; 2764 } 2765 2766 static int 2767 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 2768 struct drm_framebuffer *fb) 2769 { 2770 struct drm_device *dev = crtc->dev; 2771 struct drm_i915_private *dev_priv = dev->dev_private; 2772 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2773 enum i915_pipe pipe = intel_crtc->pipe; 2774 struct drm_framebuffer *old_fb = crtc->primary->fb; 2775 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2776 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb); 2777 int ret; 2778 2779 if (intel_crtc_has_pending_flip(crtc)) { 2780 DRM_ERROR("pipe is still busy with an old pageflip\n"); 2781 return -EBUSY; 2782 } 2783 2784 /* no fb bound */ 2785 if (!fb) { 2786 DRM_ERROR("No FB bound\n"); 2787 return 0; 2788 } 2789 2790 if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) { 2791 DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n", 2792 plane_name(intel_crtc->plane), 2793 INTEL_INFO(dev)->num_pipes); 2794 return -EINVAL; 2795 } 2796 2797 mutex_lock(&dev->struct_mutex); 2798 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 2799 if (ret == 0) 2800 i915_gem_track_fb(old_obj, obj, 2801 INTEL_FRONTBUFFER_PRIMARY(pipe)); 2802 mutex_unlock(&dev->struct_mutex); 2803 if (ret != 0) { 2804 DRM_ERROR("pin & fence failed\n"); 2805 return ret; 2806 } 2807 2808 /* 2809 * Update pipe size and adjust fitter if needed: the reason for this is 2810 * that in compute_mode_changes we check the native mode (not the pfit 2811 * mode) to see if we can flip rather than do a full mode set. In the 2812 * fastboot case, we'll flip, but if we don't update the pipesrc and 2813 * pfit state, we'll end up with a big fb scanned out into the wrong 2814 * sized surface. 2815 * 2816 * To fix this properly, we need to hoist the checks up into 2817 * compute_mode_changes (or above), check the actual pfit state and 2818 * whether the platform allows pfit disable with pipe active, and only 2819 * then update the pipesrc and pfit state, even on the flip path. 2820 */ 2821 if (i915.fastboot) { 2822 const struct drm_display_mode *adjusted_mode = 2823 &intel_crtc->config.adjusted_mode; 2824 2825 I915_WRITE(PIPESRC(intel_crtc->pipe), 2826 ((adjusted_mode->crtc_hdisplay - 1) << 16) | 2827 (adjusted_mode->crtc_vdisplay - 1)); 2828 if (!intel_crtc->config.pch_pfit.enabled && 2829 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 2830 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2831 I915_WRITE(PF_CTL(intel_crtc->pipe), 0); 2832 I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0); 2833 I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0); 2834 } 2835 intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay; 2836 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay; 2837 } 2838 2839 dev_priv->display.update_primary_plane(crtc, fb, x, y); 2840 2841 if (intel_crtc->active) 2842 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 2843 2844 crtc->primary->fb = fb; 2845 crtc->x = x; 2846 crtc->y = y; 2847 2848 if (old_fb) { 2849 if (intel_crtc->active && old_fb != fb) 2850 intel_wait_for_vblank(dev, intel_crtc->pipe); 2851 mutex_lock(&dev->struct_mutex); 2852 intel_unpin_fb_obj(old_obj); 2853 mutex_unlock(&dev->struct_mutex); 2854 } 2855 2856 mutex_lock(&dev->struct_mutex); 2857 intel_update_fbc(dev); 2858 mutex_unlock(&dev->struct_mutex); 2859 2860 return 0; 2861 } 2862 2863 static void intel_fdi_normal_train(struct drm_crtc *crtc) 2864 { 2865 struct drm_device *dev = crtc->dev; 2866 struct drm_i915_private *dev_priv = dev->dev_private; 2867 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2868 int pipe = intel_crtc->pipe; 2869 u32 reg, temp; 2870 2871 /* enable normal train */ 2872 reg = FDI_TX_CTL(pipe); 2873 temp = I915_READ(reg); 2874 if (IS_IVYBRIDGE(dev)) { 2875 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 2876 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 2877 } else { 2878 temp &= ~FDI_LINK_TRAIN_NONE; 2879 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 2880 } 2881 I915_WRITE(reg, temp); 2882 2883 reg = FDI_RX_CTL(pipe); 2884 temp = I915_READ(reg); 2885 if (HAS_PCH_CPT(dev)) { 2886 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2887 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 2888 } else { 2889 temp &= ~FDI_LINK_TRAIN_NONE; 2890 temp |= FDI_LINK_TRAIN_NONE; 2891 } 2892 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 2893 2894 /* wait one idle pattern time */ 2895 POSTING_READ(reg); 2896 udelay(1000); 2897 2898 /* IVB wants error correction enabled */ 2899 if (IS_IVYBRIDGE(dev)) 2900 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 2901 FDI_FE_ERRC_ENABLE); 2902 } 2903 2904 static bool pipe_has_enabled_pch(struct intel_crtc *crtc) 2905 { 2906 return crtc->base.enabled && crtc->active && 2907 crtc->config.has_pch_encoder; 2908 } 2909 2910 static void ivb_modeset_global_resources(struct drm_device *dev) 2911 { 2912 struct drm_i915_private *dev_priv = dev->dev_private; 2913 struct intel_crtc *pipe_B_crtc = 2914 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); 2915 struct intel_crtc *pipe_C_crtc = 2916 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); 2917 uint32_t temp; 2918 2919 /* 2920 * When everything is off disable fdi C so that we could enable fdi B 2921 * with all lanes. Note that we don't care about enabled pipes without 2922 * an enabled pch encoder. 2923 */ 2924 if (!pipe_has_enabled_pch(pipe_B_crtc) && 2925 !pipe_has_enabled_pch(pipe_C_crtc)) { 2926 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 2927 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 2928 2929 temp = I915_READ(SOUTH_CHICKEN1); 2930 temp &= ~FDI_BC_BIFURCATION_SELECT; 2931 DRM_DEBUG_KMS("disabling fdi C rx\n"); 2932 I915_WRITE(SOUTH_CHICKEN1, temp); 2933 } 2934 } 2935 2936 /* The FDI link training functions for ILK/Ibexpeak. */ 2937 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 2938 { 2939 struct drm_device *dev = crtc->dev; 2940 struct drm_i915_private *dev_priv = dev->dev_private; 2941 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2942 int pipe = intel_crtc->pipe; 2943 u32 reg, temp, tries; 2944 2945 /* FDI needs bits from pipe first */ 2946 assert_pipe_enabled(dev_priv, pipe); 2947 2948 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2949 for train result */ 2950 reg = FDI_RX_IMR(pipe); 2951 temp = I915_READ(reg); 2952 temp &= ~FDI_RX_SYMBOL_LOCK; 2953 temp &= ~FDI_RX_BIT_LOCK; 2954 I915_WRITE(reg, temp); 2955 I915_READ(reg); 2956 udelay(150); 2957 2958 /* enable CPU FDI TX and PCH FDI RX */ 2959 reg = FDI_TX_CTL(pipe); 2960 temp = I915_READ(reg); 2961 temp &= ~FDI_DP_PORT_WIDTH_MASK; 2962 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); 2963 temp &= ~FDI_LINK_TRAIN_NONE; 2964 temp |= FDI_LINK_TRAIN_PATTERN_1; 2965 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2966 2967 reg = FDI_RX_CTL(pipe); 2968 temp = I915_READ(reg); 2969 temp &= ~FDI_LINK_TRAIN_NONE; 2970 temp |= FDI_LINK_TRAIN_PATTERN_1; 2971 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2972 2973 POSTING_READ(reg); 2974 udelay(150); 2975 2976 /* Ironlake workaround, enable clock pointer after FDI enable*/ 2977 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2978 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 2979 FDI_RX_PHASE_SYNC_POINTER_EN); 2980 2981 reg = FDI_RX_IIR(pipe); 2982 for (tries = 0; tries < 5; tries++) { 2983 temp = I915_READ(reg); 2984 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2985 2986 if ((temp & FDI_RX_BIT_LOCK)) { 2987 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2988 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2989 break; 2990 } 2991 } 2992 if (tries == 5) 2993 DRM_ERROR("FDI train 1 fail!\n"); 2994 2995 /* Train 2 */ 2996 reg = FDI_TX_CTL(pipe); 2997 temp = I915_READ(reg); 2998 temp &= ~FDI_LINK_TRAIN_NONE; 2999 temp |= FDI_LINK_TRAIN_PATTERN_2; 3000 I915_WRITE(reg, temp); 3001 3002 reg = FDI_RX_CTL(pipe); 3003 temp = I915_READ(reg); 3004 temp &= ~FDI_LINK_TRAIN_NONE; 3005 temp |= FDI_LINK_TRAIN_PATTERN_2; 3006 I915_WRITE(reg, temp); 3007 3008 POSTING_READ(reg); 3009 udelay(150); 3010 3011 reg = FDI_RX_IIR(pipe); 3012 for (tries = 0; tries < 5; tries++) { 3013 temp = I915_READ(reg); 3014 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3015 3016 if (temp & FDI_RX_SYMBOL_LOCK) { 3017 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3018 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3019 break; 3020 } 3021 } 3022 if (tries == 5) 3023 DRM_ERROR("FDI train 2 fail!\n"); 3024 3025 DRM_DEBUG_KMS("FDI train done\n"); 3026 3027 } 3028 3029 static const int snb_b_fdi_train_param[] = { 3030 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 3031 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 3032 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 3033 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 3034 }; 3035 3036 /* The FDI link training functions for SNB/Cougarpoint. */ 3037 static void gen6_fdi_link_train(struct drm_crtc *crtc) 3038 { 3039 struct drm_device *dev = crtc->dev; 3040 struct drm_i915_private *dev_priv = dev->dev_private; 3041 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3042 int pipe = intel_crtc->pipe; 3043 u32 reg, temp, i, retry; 3044 3045 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3046 for train result */ 3047 reg = FDI_RX_IMR(pipe); 3048 temp = I915_READ(reg); 3049 temp &= ~FDI_RX_SYMBOL_LOCK; 3050 temp &= ~FDI_RX_BIT_LOCK; 3051 I915_WRITE(reg, temp); 3052 3053 POSTING_READ(reg); 3054 udelay(150); 3055 3056 /* enable CPU FDI TX and PCH FDI RX */ 3057 reg = FDI_TX_CTL(pipe); 3058 temp = I915_READ(reg); 3059 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3060 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); 3061 temp &= ~FDI_LINK_TRAIN_NONE; 3062 temp |= FDI_LINK_TRAIN_PATTERN_1; 3063 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3064 /* SNB-B */ 3065 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3066 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3067 3068 I915_WRITE(FDI_RX_MISC(pipe), 3069 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3070 3071 reg = FDI_RX_CTL(pipe); 3072 temp = I915_READ(reg); 3073 if (HAS_PCH_CPT(dev)) { 3074 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3075 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3076 } else { 3077 temp &= ~FDI_LINK_TRAIN_NONE; 3078 temp |= FDI_LINK_TRAIN_PATTERN_1; 3079 } 3080 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3081 3082 POSTING_READ(reg); 3083 udelay(150); 3084 3085 for (i = 0; i < 4; i++) { 3086 reg = FDI_TX_CTL(pipe); 3087 temp = I915_READ(reg); 3088 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3089 temp |= snb_b_fdi_train_param[i]; 3090 I915_WRITE(reg, temp); 3091 3092 POSTING_READ(reg); 3093 udelay(500); 3094 3095 for (retry = 0; retry < 5; retry++) { 3096 reg = FDI_RX_IIR(pipe); 3097 temp = I915_READ(reg); 3098 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3099 if (temp & FDI_RX_BIT_LOCK) { 3100 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3101 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3102 break; 3103 } 3104 udelay(50); 3105 } 3106 if (retry < 5) 3107 break; 3108 } 3109 if (i == 4) 3110 DRM_ERROR("FDI train 1 fail!\n"); 3111 3112 /* Train 2 */ 3113 reg = FDI_TX_CTL(pipe); 3114 temp = I915_READ(reg); 3115 temp &= ~FDI_LINK_TRAIN_NONE; 3116 temp |= FDI_LINK_TRAIN_PATTERN_2; 3117 if (IS_GEN6(dev)) { 3118 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3119 /* SNB-B */ 3120 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3121 } 3122 I915_WRITE(reg, temp); 3123 3124 reg = FDI_RX_CTL(pipe); 3125 temp = I915_READ(reg); 3126 if (HAS_PCH_CPT(dev)) { 3127 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3128 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3129 } else { 3130 temp &= ~FDI_LINK_TRAIN_NONE; 3131 temp |= FDI_LINK_TRAIN_PATTERN_2; 3132 } 3133 I915_WRITE(reg, temp); 3134 3135 POSTING_READ(reg); 3136 udelay(150); 3137 3138 for (i = 0; i < 4; i++) { 3139 reg = FDI_TX_CTL(pipe); 3140 temp = I915_READ(reg); 3141 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3142 temp |= snb_b_fdi_train_param[i]; 3143 I915_WRITE(reg, temp); 3144 3145 POSTING_READ(reg); 3146 udelay(500); 3147 3148 for (retry = 0; retry < 5; retry++) { 3149 reg = FDI_RX_IIR(pipe); 3150 temp = I915_READ(reg); 3151 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3152 if (temp & FDI_RX_SYMBOL_LOCK) { 3153 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3154 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3155 break; 3156 } 3157 udelay(50); 3158 } 3159 if (retry < 5) 3160 break; 3161 } 3162 if (i == 4) 3163 DRM_ERROR("FDI train 2 fail!\n"); 3164 3165 DRM_DEBUG_KMS("FDI train done.\n"); 3166 } 3167 3168 /* Manual link training for Ivy Bridge A0 parts */ 3169 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 3170 { 3171 struct drm_device *dev = crtc->dev; 3172 struct drm_i915_private *dev_priv = dev->dev_private; 3173 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3174 int pipe = intel_crtc->pipe; 3175 u32 reg, temp, i, j; 3176 3177 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3178 for train result */ 3179 reg = FDI_RX_IMR(pipe); 3180 temp = I915_READ(reg); 3181 temp &= ~FDI_RX_SYMBOL_LOCK; 3182 temp &= ~FDI_RX_BIT_LOCK; 3183 I915_WRITE(reg, temp); 3184 3185 POSTING_READ(reg); 3186 udelay(150); 3187 3188 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 3189 I915_READ(FDI_RX_IIR(pipe))); 3190 3191 /* Try each vswing and preemphasis setting twice before moving on */ 3192 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 3193 /* disable first in case we need to retry */ 3194 reg = FDI_TX_CTL(pipe); 3195 temp = I915_READ(reg); 3196 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 3197 temp &= ~FDI_TX_ENABLE; 3198 I915_WRITE(reg, temp); 3199 3200 reg = FDI_RX_CTL(pipe); 3201 temp = I915_READ(reg); 3202 temp &= ~FDI_LINK_TRAIN_AUTO; 3203 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3204 temp &= ~FDI_RX_ENABLE; 3205 I915_WRITE(reg, temp); 3206 3207 /* enable CPU FDI TX and PCH FDI RX */ 3208 reg = FDI_TX_CTL(pipe); 3209 temp = I915_READ(reg); 3210 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3211 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); 3212 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 3213 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3214 temp |= snb_b_fdi_train_param[j/2]; 3215 temp |= FDI_COMPOSITE_SYNC; 3216 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3217 3218 I915_WRITE(FDI_RX_MISC(pipe), 3219 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3220 3221 reg = FDI_RX_CTL(pipe); 3222 temp = I915_READ(reg); 3223 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3224 temp |= FDI_COMPOSITE_SYNC; 3225 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3226 3227 POSTING_READ(reg); 3228 udelay(1); /* should be 0.5us */ 3229 3230 for (i = 0; i < 4; i++) { 3231 reg = FDI_RX_IIR(pipe); 3232 temp = I915_READ(reg); 3233 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3234 3235 if (temp & FDI_RX_BIT_LOCK || 3236 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 3237 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3238 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 3239 i); 3240 break; 3241 } 3242 udelay(1); /* should be 0.5us */ 3243 } 3244 if (i == 4) { 3245 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 3246 continue; 3247 } 3248 3249 /* Train 2 */ 3250 reg = FDI_TX_CTL(pipe); 3251 temp = I915_READ(reg); 3252 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3253 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 3254 I915_WRITE(reg, temp); 3255 3256 reg = FDI_RX_CTL(pipe); 3257 temp = I915_READ(reg); 3258 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3259 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3260 I915_WRITE(reg, temp); 3261 3262 POSTING_READ(reg); 3263 udelay(2); /* should be 1.5us */ 3264 3265 for (i = 0; i < 4; i++) { 3266 reg = FDI_RX_IIR(pipe); 3267 temp = I915_READ(reg); 3268 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3269 3270 if (temp & FDI_RX_SYMBOL_LOCK || 3271 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 3272 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3273 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 3274 i); 3275 goto train_done; 3276 } 3277 udelay(2); /* should be 1.5us */ 3278 } 3279 if (i == 4) 3280 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 3281 } 3282 3283 train_done: 3284 DRM_DEBUG_KMS("FDI train done.\n"); 3285 } 3286 3287 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 3288 { 3289 struct drm_device *dev = intel_crtc->base.dev; 3290 struct drm_i915_private *dev_priv = dev->dev_private; 3291 int pipe = intel_crtc->pipe; 3292 u32 reg, temp; 3293 3294 3295 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 3296 reg = FDI_RX_CTL(pipe); 3297 temp = I915_READ(reg); 3298 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 3299 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); 3300 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3301 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 3302 3303 POSTING_READ(reg); 3304 udelay(200); 3305 3306 /* Switch from Rawclk to PCDclk */ 3307 temp = I915_READ(reg); 3308 I915_WRITE(reg, temp | FDI_PCDCLK); 3309 3310 POSTING_READ(reg); 3311 udelay(200); 3312 3313 /* Enable CPU FDI TX PLL, always on for Ironlake */ 3314 reg = FDI_TX_CTL(pipe); 3315 temp = I915_READ(reg); 3316 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 3317 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 3318 3319 POSTING_READ(reg); 3320 udelay(100); 3321 } 3322 } 3323 3324 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 3325 { 3326 struct drm_device *dev = intel_crtc->base.dev; 3327 struct drm_i915_private *dev_priv = dev->dev_private; 3328 int pipe = intel_crtc->pipe; 3329 u32 reg, temp; 3330 3331 /* Switch from PCDclk to Rawclk */ 3332 reg = FDI_RX_CTL(pipe); 3333 temp = I915_READ(reg); 3334 I915_WRITE(reg, temp & ~FDI_PCDCLK); 3335 3336 /* Disable CPU FDI TX PLL */ 3337 reg = FDI_TX_CTL(pipe); 3338 temp = I915_READ(reg); 3339 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 3340 3341 POSTING_READ(reg); 3342 udelay(100); 3343 3344 reg = FDI_RX_CTL(pipe); 3345 temp = I915_READ(reg); 3346 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 3347 3348 /* Wait for the clocks to turn off. */ 3349 POSTING_READ(reg); 3350 udelay(100); 3351 } 3352 3353 static void ironlake_fdi_disable(struct drm_crtc *crtc) 3354 { 3355 struct drm_device *dev = crtc->dev; 3356 struct drm_i915_private *dev_priv = dev->dev_private; 3357 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3358 int pipe = intel_crtc->pipe; 3359 u32 reg, temp; 3360 3361 /* disable CPU FDI tx and PCH FDI rx */ 3362 reg = FDI_TX_CTL(pipe); 3363 temp = I915_READ(reg); 3364 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 3365 POSTING_READ(reg); 3366 3367 reg = FDI_RX_CTL(pipe); 3368 temp = I915_READ(reg); 3369 temp &= ~(0x7 << 16); 3370 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3371 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 3372 3373 POSTING_READ(reg); 3374 udelay(100); 3375 3376 /* Ironlake workaround, disable clock pointer after downing FDI */ 3377 if (HAS_PCH_IBX(dev)) 3378 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3379 3380 /* still set train pattern 1 */ 3381 reg = FDI_TX_CTL(pipe); 3382 temp = I915_READ(reg); 3383 temp &= ~FDI_LINK_TRAIN_NONE; 3384 temp |= FDI_LINK_TRAIN_PATTERN_1; 3385 I915_WRITE(reg, temp); 3386 3387 reg = FDI_RX_CTL(pipe); 3388 temp = I915_READ(reg); 3389 if (HAS_PCH_CPT(dev)) { 3390 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3391 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3392 } else { 3393 temp &= ~FDI_LINK_TRAIN_NONE; 3394 temp |= FDI_LINK_TRAIN_PATTERN_1; 3395 } 3396 /* BPC in FDI rx is consistent with that in PIPECONF */ 3397 temp &= ~(0x07 << 16); 3398 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3399 I915_WRITE(reg, temp); 3400 3401 POSTING_READ(reg); 3402 udelay(100); 3403 } 3404 3405 bool intel_has_pending_fb_unpin(struct drm_device *dev) 3406 { 3407 struct intel_crtc *crtc; 3408 3409 /* Note that we don't need to be called with mode_config.lock here 3410 * as our list of CRTC objects is static for the lifetime of the 3411 * device and so cannot disappear as we iterate. Similarly, we can 3412 * happily treat the predicates as racy, atomic checks as userspace 3413 * cannot claim and pin a new fb without at least acquring the 3414 * struct_mutex and so serialising with us. 3415 */ 3416 for_each_intel_crtc(dev, crtc) { 3417 if (atomic_read(&crtc->unpin_work_count) == 0) 3418 continue; 3419 3420 if (crtc->unpin_work) 3421 intel_wait_for_vblank(dev, crtc->pipe); 3422 3423 return true; 3424 } 3425 3426 return false; 3427 } 3428 3429 static void page_flip_completed(struct intel_crtc *intel_crtc) 3430 { 3431 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3432 struct intel_unpin_work *work = intel_crtc->unpin_work; 3433 3434 /* ensure that the unpin work is consistent wrt ->pending. */ 3435 smp_rmb(); 3436 intel_crtc->unpin_work = NULL; 3437 3438 if (work->event) 3439 drm_send_vblank_event(intel_crtc->base.dev, 3440 intel_crtc->pipe, 3441 work->event); 3442 3443 drm_crtc_vblank_put(&intel_crtc->base); 3444 3445 wake_up_all(&dev_priv->pending_flip_queue); 3446 queue_work(dev_priv->wq, &work->work); 3447 3448 trace_i915_flip_complete(intel_crtc->plane, 3449 work->pending_flip_obj); 3450 } 3451 3452 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3453 { 3454 struct drm_device *dev = crtc->dev; 3455 struct drm_i915_private *dev_priv = dev->dev_private; 3456 3457 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3458 if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, 3459 !intel_crtc_has_pending_flip(crtc), 3460 60*HZ) == 0)) { 3461 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3462 3463 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 3464 if (intel_crtc->unpin_work) { 3465 WARN_ONCE(1, "Removing stuck page flip\n"); 3466 page_flip_completed(intel_crtc); 3467 } 3468 lockmgr(&dev->event_lock, LK_RELEASE); 3469 } 3470 3471 if (crtc->primary->fb) { 3472 mutex_lock(&dev->struct_mutex); 3473 intel_finish_fb(crtc->primary->fb); 3474 mutex_unlock(&dev->struct_mutex); 3475 } 3476 } 3477 3478 /* Program iCLKIP clock to the desired frequency */ 3479 static void lpt_program_iclkip(struct drm_crtc *crtc) 3480 { 3481 struct drm_device *dev = crtc->dev; 3482 struct drm_i915_private *dev_priv = dev->dev_private; 3483 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; 3484 u32 divsel, phaseinc, auxdiv, phasedir = 0; 3485 u32 temp; 3486 3487 mutex_lock(&dev_priv->dpio_lock); 3488 3489 /* It is necessary to ungate the pixclk gate prior to programming 3490 * the divisors, and gate it back when it is done. 3491 */ 3492 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 3493 3494 /* Disable SSCCTL */ 3495 intel_sbi_write(dev_priv, SBI_SSCCTL6, 3496 intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | 3497 SBI_SSCCTL_DISABLE, 3498 SBI_ICLK); 3499 3500 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 3501 if (clock == 20000) { 3502 auxdiv = 1; 3503 divsel = 0x41; 3504 phaseinc = 0x20; 3505 } else { 3506 /* The iCLK virtual clock root frequency is in MHz, 3507 * but the adjusted_mode->crtc_clock in in KHz. To get the 3508 * divisors, it is necessary to divide one by another, so we 3509 * convert the virtual clock precision to KHz here for higher 3510 * precision. 3511 */ 3512 u32 iclk_virtual_root_freq = 172800 * 1000; 3513 u32 iclk_pi_range = 64; 3514 u32 desired_divisor, msb_divisor_value, pi_value; 3515 3516 desired_divisor = (iclk_virtual_root_freq / clock); 3517 msb_divisor_value = desired_divisor / iclk_pi_range; 3518 pi_value = desired_divisor % iclk_pi_range; 3519 3520 auxdiv = 0; 3521 divsel = msb_divisor_value - 2; 3522 phaseinc = pi_value; 3523 } 3524 3525 /* This should not happen with any sane values */ 3526 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 3527 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 3528 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 3529 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 3530 3531 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 3532 clock, 3533 auxdiv, 3534 divsel, 3535 phasedir, 3536 phaseinc); 3537 3538 /* Program SSCDIVINTPHASE6 */ 3539 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 3540 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 3541 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 3542 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 3543 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 3544 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 3545 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 3546 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 3547 3548 /* Program SSCAUXDIV */ 3549 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 3550 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 3551 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 3552 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 3553 3554 /* Enable modulator and associated divider */ 3555 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 3556 temp &= ~SBI_SSCCTL_DISABLE; 3557 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 3558 3559 /* Wait for initialization time */ 3560 udelay(24); 3561 3562 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 3563 3564 mutex_unlock(&dev_priv->dpio_lock); 3565 } 3566 3567 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 3568 enum i915_pipe pch_transcoder) 3569 { 3570 struct drm_device *dev = crtc->base.dev; 3571 struct drm_i915_private *dev_priv = dev->dev_private; 3572 enum transcoder cpu_transcoder = crtc->config.cpu_transcoder; 3573 3574 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 3575 I915_READ(HTOTAL(cpu_transcoder))); 3576 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 3577 I915_READ(HBLANK(cpu_transcoder))); 3578 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 3579 I915_READ(HSYNC(cpu_transcoder))); 3580 3581 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 3582 I915_READ(VTOTAL(cpu_transcoder))); 3583 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 3584 I915_READ(VBLANK(cpu_transcoder))); 3585 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 3586 I915_READ(VSYNC(cpu_transcoder))); 3587 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 3588 I915_READ(VSYNCSHIFT(cpu_transcoder))); 3589 } 3590 3591 static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) 3592 { 3593 struct drm_i915_private *dev_priv = dev->dev_private; 3594 uint32_t temp; 3595 3596 temp = I915_READ(SOUTH_CHICKEN1); 3597 if (temp & FDI_BC_BIFURCATION_SELECT) 3598 return; 3599 3600 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 3601 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 3602 3603 temp |= FDI_BC_BIFURCATION_SELECT; 3604 DRM_DEBUG_KMS("enabling fdi C rx\n"); 3605 I915_WRITE(SOUTH_CHICKEN1, temp); 3606 POSTING_READ(SOUTH_CHICKEN1); 3607 } 3608 3609 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 3610 { 3611 struct drm_device *dev = intel_crtc->base.dev; 3612 struct drm_i915_private *dev_priv = dev->dev_private; 3613 3614 switch (intel_crtc->pipe) { 3615 case PIPE_A: 3616 break; 3617 case PIPE_B: 3618 if (intel_crtc->config.fdi_lanes > 2) 3619 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); 3620 else 3621 cpt_enable_fdi_bc_bifurcation(dev); 3622 3623 break; 3624 case PIPE_C: 3625 cpt_enable_fdi_bc_bifurcation(dev); 3626 3627 break; 3628 default: 3629 BUG(); 3630 } 3631 } 3632 3633 /* 3634 * Enable PCH resources required for PCH ports: 3635 * - PCH PLLs 3636 * - FDI training & RX/TX 3637 * - update transcoder timings 3638 * - DP transcoding bits 3639 * - transcoder 3640 */ 3641 static void ironlake_pch_enable(struct drm_crtc *crtc) 3642 { 3643 struct drm_device *dev = crtc->dev; 3644 struct drm_i915_private *dev_priv = dev->dev_private; 3645 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3646 int pipe = intel_crtc->pipe; 3647 u32 reg, temp; 3648 3649 assert_pch_transcoder_disabled(dev_priv, pipe); 3650 3651 if (IS_IVYBRIDGE(dev)) 3652 ivybridge_update_fdi_bc_bifurcation(intel_crtc); 3653 3654 /* Write the TU size bits before fdi link training, so that error 3655 * detection works. */ 3656 I915_WRITE(FDI_RX_TUSIZE1(pipe), 3657 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 3658 3659 /* For PCH output, training FDI link */ 3660 dev_priv->display.fdi_link_train(crtc); 3661 3662 /* We need to program the right clock selection before writing the pixel 3663 * mutliplier into the DPLL. */ 3664 if (HAS_PCH_CPT(dev)) { 3665 u32 sel; 3666 3667 temp = I915_READ(PCH_DPLL_SEL); 3668 temp |= TRANS_DPLL_ENABLE(pipe); 3669 sel = TRANS_DPLLB_SEL(pipe); 3670 if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B) 3671 temp |= sel; 3672 else 3673 temp &= ~sel; 3674 I915_WRITE(PCH_DPLL_SEL, temp); 3675 } 3676 3677 /* XXX: pch pll's can be enabled any time before we enable the PCH 3678 * transcoder, and we actually should do this to not upset any PCH 3679 * transcoder that already use the clock when we share it. 3680 * 3681 * Note that enable_shared_dpll tries to do the right thing, but 3682 * get_shared_dpll unconditionally resets the pll - we need that to have 3683 * the right LVDS enable sequence. */ 3684 intel_enable_shared_dpll(intel_crtc); 3685 3686 /* set transcoder timing, panel must allow it */ 3687 assert_panel_unlocked(dev_priv, pipe); 3688 ironlake_pch_transcoder_set_timings(intel_crtc, pipe); 3689 3690 intel_fdi_normal_train(crtc); 3691 3692 /* For PCH DP, enable TRANS_DP_CTL */ 3693 if (HAS_PCH_CPT(dev) && 3694 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 3695 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 3696 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 3697 reg = TRANS_DP_CTL(pipe); 3698 temp = I915_READ(reg); 3699 temp &= ~(TRANS_DP_PORT_SEL_MASK | 3700 TRANS_DP_SYNC_MASK | 3701 TRANS_DP_BPC_MASK); 3702 temp |= (TRANS_DP_OUTPUT_ENABLE | 3703 TRANS_DP_ENH_FRAMING); 3704 temp |= bpc << 9; /* same format but at 11:9 */ 3705 3706 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 3707 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 3708 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 3709 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 3710 3711 switch (intel_trans_dp_port_sel(crtc)) { 3712 case PCH_DP_B: 3713 temp |= TRANS_DP_PORT_SEL_B; 3714 break; 3715 case PCH_DP_C: 3716 temp |= TRANS_DP_PORT_SEL_C; 3717 break; 3718 case PCH_DP_D: 3719 temp |= TRANS_DP_PORT_SEL_D; 3720 break; 3721 default: 3722 BUG(); 3723 } 3724 3725 I915_WRITE(reg, temp); 3726 } 3727 3728 ironlake_enable_pch_transcoder(dev_priv, pipe); 3729 } 3730 3731 static void lpt_pch_enable(struct drm_crtc *crtc) 3732 { 3733 struct drm_device *dev = crtc->dev; 3734 struct drm_i915_private *dev_priv = dev->dev_private; 3735 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3736 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 3737 3738 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); 3739 3740 lpt_program_iclkip(crtc); 3741 3742 /* Set transcoder timing. */ 3743 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A); 3744 3745 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 3746 } 3747 3748 void intel_put_shared_dpll(struct intel_crtc *crtc) 3749 { 3750 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 3751 3752 if (pll == NULL) 3753 return; 3754 3755 if (pll->refcount == 0) { 3756 WARN(1, "bad %s refcount\n", pll->name); 3757 return; 3758 } 3759 3760 if (--pll->refcount == 0) { 3761 WARN_ON(pll->on); 3762 WARN_ON(pll->active); 3763 } 3764 3765 crtc->config.shared_dpll = DPLL_ID_PRIVATE; 3766 } 3767 3768 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) 3769 { 3770 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 3771 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 3772 enum intel_dpll_id i; 3773 3774 if (pll) { 3775 DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n", 3776 crtc->base.base.id, pll->name); 3777 intel_put_shared_dpll(crtc); 3778 } 3779 3780 if (HAS_PCH_IBX(dev_priv->dev)) { 3781 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 3782 i = (enum intel_dpll_id) crtc->pipe; 3783 pll = &dev_priv->shared_dplls[i]; 3784 3785 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 3786 crtc->base.base.id, pll->name); 3787 3788 WARN_ON(pll->refcount); 3789 3790 goto found; 3791 } 3792 3793 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3794 pll = &dev_priv->shared_dplls[i]; 3795 3796 /* Only want to check enabled timings first */ 3797 if (pll->refcount == 0) 3798 continue; 3799 3800 if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state, 3801 sizeof(pll->hw_state)) == 0) { 3802 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n", 3803 crtc->base.base.id, 3804 pll->name, pll->refcount, pll->active); 3805 3806 goto found; 3807 } 3808 } 3809 3810 /* Ok no matching timings, maybe there's a free one? */ 3811 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3812 pll = &dev_priv->shared_dplls[i]; 3813 if (pll->refcount == 0) { 3814 DRM_DEBUG_KMS("CRTC:%d allocated %s\n", 3815 crtc->base.base.id, pll->name); 3816 goto found; 3817 } 3818 } 3819 3820 return NULL; 3821 3822 found: 3823 if (pll->refcount == 0) 3824 pll->hw_state = crtc->config.dpll_hw_state; 3825 3826 crtc->config.shared_dpll = i; 3827 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, 3828 pipe_name(crtc->pipe)); 3829 3830 pll->refcount++; 3831 3832 return pll; 3833 } 3834 3835 static void cpt_verify_modeset(struct drm_device *dev, int pipe) 3836 { 3837 struct drm_i915_private *dev_priv = dev->dev_private; 3838 int dslreg = PIPEDSL(pipe); 3839 u32 temp; 3840 3841 temp = I915_READ(dslreg); 3842 udelay(500); 3843 if (wait_for(I915_READ(dslreg) != temp, 5)) { 3844 if (wait_for(I915_READ(dslreg) != temp, 5)) 3845 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 3846 } 3847 } 3848 3849 static void ironlake_pfit_enable(struct intel_crtc *crtc) 3850 { 3851 struct drm_device *dev = crtc->base.dev; 3852 struct drm_i915_private *dev_priv = dev->dev_private; 3853 int pipe = crtc->pipe; 3854 3855 if (crtc->config.pch_pfit.enabled) { 3856 /* Force use of hard-coded filter coefficients 3857 * as some pre-programmed values are broken, 3858 * e.g. x201. 3859 */ 3860 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 3861 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 3862 PF_PIPE_SEL_IVB(pipe)); 3863 else 3864 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 3865 I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos); 3866 I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size); 3867 } 3868 } 3869 3870 static void intel_enable_planes(struct drm_crtc *crtc) 3871 { 3872 struct drm_device *dev = crtc->dev; 3873 enum i915_pipe pipe = to_intel_crtc(crtc)->pipe; 3874 struct drm_plane *plane; 3875 struct intel_plane *intel_plane; 3876 3877 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { 3878 intel_plane = to_intel_plane(plane); 3879 if (intel_plane->pipe == pipe) 3880 intel_plane_restore(&intel_plane->base); 3881 } 3882 } 3883 3884 static void intel_disable_planes(struct drm_crtc *crtc) 3885 { 3886 struct drm_device *dev = crtc->dev; 3887 enum i915_pipe pipe = to_intel_crtc(crtc)->pipe; 3888 struct drm_plane *plane; 3889 struct intel_plane *intel_plane; 3890 3891 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { 3892 intel_plane = to_intel_plane(plane); 3893 if (intel_plane->pipe == pipe) 3894 intel_plane_disable(&intel_plane->base); 3895 } 3896 } 3897 3898 void hsw_enable_ips(struct intel_crtc *crtc) 3899 { 3900 struct drm_device *dev = crtc->base.dev; 3901 struct drm_i915_private *dev_priv = dev->dev_private; 3902 3903 if (!crtc->config.ips_enabled) 3904 return; 3905 3906 /* We can only enable IPS after we enable a plane and wait for a vblank */ 3907 intel_wait_for_vblank(dev, crtc->pipe); 3908 3909 assert_plane_enabled(dev_priv, crtc->plane); 3910 if (IS_BROADWELL(dev)) { 3911 mutex_lock(&dev_priv->rps.hw_lock); 3912 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 3913 mutex_unlock(&dev_priv->rps.hw_lock); 3914 /* Quoting Art Runyan: "its not safe to expect any particular 3915 * value in IPS_CTL bit 31 after enabling IPS through the 3916 * mailbox." Moreover, the mailbox may return a bogus state, 3917 * so we need to just enable it and continue on. 3918 */ 3919 } else { 3920 I915_WRITE(IPS_CTL, IPS_ENABLE); 3921 /* The bit only becomes 1 in the next vblank, so this wait here 3922 * is essentially intel_wait_for_vblank. If we don't have this 3923 * and don't wait for vblanks until the end of crtc_enable, then 3924 * the HW state readout code will complain that the expected 3925 * IPS_CTL value is not the one we read. */ 3926 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) 3927 DRM_ERROR("Timed out waiting for IPS enable\n"); 3928 } 3929 } 3930 3931 void hsw_disable_ips(struct intel_crtc *crtc) 3932 { 3933 struct drm_device *dev = crtc->base.dev; 3934 struct drm_i915_private *dev_priv = dev->dev_private; 3935 3936 if (!crtc->config.ips_enabled) 3937 return; 3938 3939 assert_plane_enabled(dev_priv, crtc->plane); 3940 if (IS_BROADWELL(dev)) { 3941 mutex_lock(&dev_priv->rps.hw_lock); 3942 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 3943 mutex_unlock(&dev_priv->rps.hw_lock); 3944 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 3945 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42)) 3946 DRM_ERROR("Timed out waiting for IPS disable\n"); 3947 } else { 3948 I915_WRITE(IPS_CTL, 0); 3949 POSTING_READ(IPS_CTL); 3950 } 3951 3952 /* We need to wait for a vblank before we can disable the plane. */ 3953 intel_wait_for_vblank(dev, crtc->pipe); 3954 } 3955 3956 /** Loads the palette/gamma unit for the CRTC with the prepared values */ 3957 static void intel_crtc_load_lut(struct drm_crtc *crtc) 3958 { 3959 struct drm_device *dev = crtc->dev; 3960 struct drm_i915_private *dev_priv = dev->dev_private; 3961 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3962 enum i915_pipe pipe = intel_crtc->pipe; 3963 int palreg = PALETTE(pipe); 3964 int i; 3965 bool reenable_ips = false; 3966 3967 /* The clocks have to be on to load the palette. */ 3968 if (!crtc->enabled || !intel_crtc->active) 3969 return; 3970 3971 if (!HAS_PCH_SPLIT(dev_priv->dev)) { 3972 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) 3973 assert_dsi_pll_enabled(dev_priv); 3974 else 3975 assert_pll_enabled(dev_priv, pipe); 3976 } 3977 3978 /* use legacy palette for Ironlake */ 3979 if (!HAS_GMCH_DISPLAY(dev)) 3980 palreg = LGC_PALETTE(pipe); 3981 3982 /* Workaround : Do not read or write the pipe palette/gamma data while 3983 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 3984 */ 3985 if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled && 3986 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == 3987 GAMMA_MODE_MODE_SPLIT)) { 3988 hsw_disable_ips(intel_crtc); 3989 reenable_ips = true; 3990 } 3991 3992 for (i = 0; i < 256; i++) { 3993 I915_WRITE(palreg + 4 * i, 3994 (intel_crtc->lut_r[i] << 16) | 3995 (intel_crtc->lut_g[i] << 8) | 3996 intel_crtc->lut_b[i]); 3997 } 3998 3999 if (reenable_ips) 4000 hsw_enable_ips(intel_crtc); 4001 } 4002 4003 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 4004 { 4005 if (!enable && intel_crtc->overlay) { 4006 struct drm_device *dev = intel_crtc->base.dev; 4007 struct drm_i915_private *dev_priv = dev->dev_private; 4008 4009 mutex_lock(&dev->struct_mutex); 4010 dev_priv->mm.interruptible = false; 4011 (void) intel_overlay_switch_off(intel_crtc->overlay); 4012 dev_priv->mm.interruptible = true; 4013 mutex_unlock(&dev->struct_mutex); 4014 } 4015 4016 /* Let userspace switch the overlay on again. In most cases userspace 4017 * has to recompute where to put it anyway. 4018 */ 4019 } 4020 4021 static void intel_crtc_enable_planes(struct drm_crtc *crtc) 4022 { 4023 struct drm_device *dev = crtc->dev; 4024 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4025 int pipe = intel_crtc->pipe; 4026 4027 assert_vblank_disabled(crtc); 4028 4029 drm_vblank_on(dev, pipe); 4030 4031 intel_enable_primary_hw_plane(crtc->primary, crtc); 4032 intel_enable_planes(crtc); 4033 intel_crtc_update_cursor(crtc, true); 4034 intel_crtc_dpms_overlay(intel_crtc, true); 4035 4036 hsw_enable_ips(intel_crtc); 4037 4038 mutex_lock(&dev->struct_mutex); 4039 intel_update_fbc(dev); 4040 mutex_unlock(&dev->struct_mutex); 4041 4042 /* 4043 * FIXME: Once we grow proper nuclear flip support out of this we need 4044 * to compute the mask of flip planes precisely. For the time being 4045 * consider this a flip from a NULL plane. 4046 */ 4047 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4048 } 4049 4050 static void intel_crtc_disable_planes(struct drm_crtc *crtc) 4051 { 4052 struct drm_device *dev = crtc->dev; 4053 struct drm_i915_private *dev_priv = dev->dev_private; 4054 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4055 int pipe = intel_crtc->pipe; 4056 int plane = intel_crtc->plane; 4057 4058 intel_crtc_wait_for_pending_flips(crtc); 4059 4060 if (dev_priv->fbc.plane == plane) 4061 intel_disable_fbc(dev); 4062 4063 hsw_disable_ips(intel_crtc); 4064 4065 intel_crtc_dpms_overlay(intel_crtc, false); 4066 intel_crtc_update_cursor(crtc, false); 4067 intel_disable_planes(crtc); 4068 intel_disable_primary_hw_plane(crtc->primary, crtc); 4069 4070 /* 4071 * FIXME: Once we grow proper nuclear flip support out of this we need 4072 * to compute the mask of flip planes precisely. For the time being 4073 * consider this a flip to a NULL plane. 4074 */ 4075 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4076 4077 drm_vblank_off(dev, pipe); 4078 4079 assert_vblank_disabled(crtc); 4080 } 4081 4082 static void ironlake_crtc_enable(struct drm_crtc *crtc) 4083 { 4084 struct drm_device *dev = crtc->dev; 4085 struct drm_i915_private *dev_priv = dev->dev_private; 4086 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4087 struct intel_encoder *encoder; 4088 int pipe = intel_crtc->pipe; 4089 4090 WARN_ON(!crtc->enabled); 4091 4092 if (intel_crtc->active) 4093 return; 4094 4095 if (intel_crtc->config.has_pch_encoder) 4096 intel_prepare_shared_dpll(intel_crtc); 4097 4098 if (intel_crtc->config.has_dp_encoder) 4099 intel_dp_set_m_n(intel_crtc); 4100 4101 intel_set_pipe_timings(intel_crtc); 4102 4103 if (intel_crtc->config.has_pch_encoder) { 4104 intel_cpu_transcoder_set_m_n(intel_crtc, 4105 &intel_crtc->config.fdi_m_n, NULL); 4106 } 4107 4108 ironlake_set_pipeconf(crtc); 4109 4110 intel_crtc->active = true; 4111 4112 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4113 intel_set_pch_fifo_underrun_reporting(dev, pipe, true); 4114 4115 for_each_encoder_on_crtc(dev, crtc, encoder) 4116 if (encoder->pre_enable) 4117 encoder->pre_enable(encoder); 4118 4119 if (intel_crtc->config.has_pch_encoder) { 4120 /* Note: FDI PLL enabling _must_ be done before we enable the 4121 * cpu pipes, hence this is separate from all the other fdi/pch 4122 * enabling. */ 4123 ironlake_fdi_pll_enable(intel_crtc); 4124 } else { 4125 assert_fdi_tx_disabled(dev_priv, pipe); 4126 assert_fdi_rx_disabled(dev_priv, pipe); 4127 } 4128 4129 ironlake_pfit_enable(intel_crtc); 4130 4131 /* 4132 * On ILK+ LUT must be loaded before the pipe is running but with 4133 * clocks enabled 4134 */ 4135 intel_crtc_load_lut(crtc); 4136 4137 intel_update_watermarks(crtc); 4138 intel_enable_pipe(intel_crtc); 4139 4140 if (intel_crtc->config.has_pch_encoder) 4141 ironlake_pch_enable(crtc); 4142 4143 for_each_encoder_on_crtc(dev, crtc, encoder) 4144 encoder->enable(encoder); 4145 4146 if (HAS_PCH_CPT(dev)) 4147 cpt_verify_modeset(dev, intel_crtc->pipe); 4148 4149 intel_crtc_enable_planes(crtc); 4150 } 4151 4152 /* IPS only exists on ULT machines and is tied to pipe A. */ 4153 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 4154 { 4155 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 4156 } 4157 4158 /* 4159 * This implements the workaround described in the "notes" section of the mode 4160 * set sequence documentation. When going from no pipes or single pipe to 4161 * multiple pipes, and planes are enabled after the pipe, we need to wait at 4162 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 4163 */ 4164 static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc) 4165 { 4166 struct drm_device *dev = crtc->base.dev; 4167 struct intel_crtc *crtc_it, *other_active_crtc = NULL; 4168 4169 /* We want to get the other_active_crtc only if there's only 1 other 4170 * active crtc. */ 4171 for_each_intel_crtc(dev, crtc_it) { 4172 if (!crtc_it->active || crtc_it == crtc) 4173 continue; 4174 4175 if (other_active_crtc) 4176 return; 4177 4178 other_active_crtc = crtc_it; 4179 } 4180 if (!other_active_crtc) 4181 return; 4182 4183 intel_wait_for_vblank(dev, other_active_crtc->pipe); 4184 intel_wait_for_vblank(dev, other_active_crtc->pipe); 4185 } 4186 4187 static void haswell_crtc_enable(struct drm_crtc *crtc) 4188 { 4189 struct drm_device *dev = crtc->dev; 4190 struct drm_i915_private *dev_priv = dev->dev_private; 4191 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4192 struct intel_encoder *encoder; 4193 int pipe = intel_crtc->pipe; 4194 4195 WARN_ON(!crtc->enabled); 4196 4197 if (intel_crtc->active) 4198 return; 4199 4200 if (intel_crtc_to_shared_dpll(intel_crtc)) 4201 intel_enable_shared_dpll(intel_crtc); 4202 4203 if (intel_crtc->config.has_dp_encoder) 4204 intel_dp_set_m_n(intel_crtc); 4205 4206 intel_set_pipe_timings(intel_crtc); 4207 4208 if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) { 4209 I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder), 4210 intel_crtc->config.pixel_multiplier - 1); 4211 } 4212 4213 if (intel_crtc->config.has_pch_encoder) { 4214 intel_cpu_transcoder_set_m_n(intel_crtc, 4215 &intel_crtc->config.fdi_m_n, NULL); 4216 } 4217 4218 haswell_set_pipeconf(crtc); 4219 4220 intel_set_pipe_csc(crtc); 4221 4222 intel_crtc->active = true; 4223 4224 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4225 for_each_encoder_on_crtc(dev, crtc, encoder) 4226 if (encoder->pre_enable) 4227 encoder->pre_enable(encoder); 4228 4229 if (intel_crtc->config.has_pch_encoder) { 4230 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); 4231 dev_priv->display.fdi_link_train(crtc); 4232 } 4233 4234 intel_ddi_enable_pipe_clock(intel_crtc); 4235 4236 ironlake_pfit_enable(intel_crtc); 4237 4238 /* 4239 * On ILK+ LUT must be loaded before the pipe is running but with 4240 * clocks enabled 4241 */ 4242 intel_crtc_load_lut(crtc); 4243 4244 intel_ddi_set_pipe_settings(crtc); 4245 intel_ddi_enable_transcoder_func(crtc); 4246 4247 intel_update_watermarks(crtc); 4248 intel_enable_pipe(intel_crtc); 4249 4250 if (intel_crtc->config.has_pch_encoder) 4251 lpt_pch_enable(crtc); 4252 4253 for_each_encoder_on_crtc(dev, crtc, encoder) { 4254 encoder->enable(encoder); 4255 intel_opregion_notify_encoder(encoder, true); 4256 } 4257 4258 /* If we change the relative order between pipe/planes enabling, we need 4259 * to change the workaround. */ 4260 haswell_mode_set_planes_workaround(intel_crtc); 4261 intel_crtc_enable_planes(crtc); 4262 } 4263 4264 static void ironlake_pfit_disable(struct intel_crtc *crtc) 4265 { 4266 struct drm_device *dev = crtc->base.dev; 4267 struct drm_i915_private *dev_priv = dev->dev_private; 4268 int pipe = crtc->pipe; 4269 4270 /* To avoid upsetting the power well on haswell only disable the pfit if 4271 * it's in use. The hw state code will make sure we get this right. */ 4272 if (crtc->config.pch_pfit.enabled) { 4273 I915_WRITE(PF_CTL(pipe), 0); 4274 I915_WRITE(PF_WIN_POS(pipe), 0); 4275 I915_WRITE(PF_WIN_SZ(pipe), 0); 4276 } 4277 } 4278 4279 static void ironlake_crtc_disable(struct drm_crtc *crtc) 4280 { 4281 struct drm_device *dev = crtc->dev; 4282 struct drm_i915_private *dev_priv = dev->dev_private; 4283 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4284 struct intel_encoder *encoder; 4285 int pipe = intel_crtc->pipe; 4286 u32 reg, temp; 4287 4288 if (!intel_crtc->active) 4289 return; 4290 4291 intel_crtc_disable_planes(crtc); 4292 4293 for_each_encoder_on_crtc(dev, crtc, encoder) 4294 encoder->disable(encoder); 4295 4296 if (intel_crtc->config.has_pch_encoder) 4297 intel_set_pch_fifo_underrun_reporting(dev, pipe, false); 4298 4299 intel_disable_pipe(intel_crtc); 4300 4301 ironlake_pfit_disable(intel_crtc); 4302 4303 for_each_encoder_on_crtc(dev, crtc, encoder) 4304 if (encoder->post_disable) 4305 encoder->post_disable(encoder); 4306 4307 if (intel_crtc->config.has_pch_encoder) { 4308 ironlake_fdi_disable(crtc); 4309 4310 ironlake_disable_pch_transcoder(dev_priv, pipe); 4311 4312 if (HAS_PCH_CPT(dev)) { 4313 /* disable TRANS_DP_CTL */ 4314 reg = TRANS_DP_CTL(pipe); 4315 temp = I915_READ(reg); 4316 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 4317 TRANS_DP_PORT_SEL_MASK); 4318 temp |= TRANS_DP_PORT_SEL_NONE; 4319 I915_WRITE(reg, temp); 4320 4321 /* disable DPLL_SEL */ 4322 temp = I915_READ(PCH_DPLL_SEL); 4323 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 4324 I915_WRITE(PCH_DPLL_SEL, temp); 4325 } 4326 4327 /* disable PCH DPLL */ 4328 intel_disable_shared_dpll(intel_crtc); 4329 4330 ironlake_fdi_pll_disable(intel_crtc); 4331 } 4332 4333 intel_crtc->active = false; 4334 intel_update_watermarks(crtc); 4335 4336 mutex_lock(&dev->struct_mutex); 4337 intel_update_fbc(dev); 4338 mutex_unlock(&dev->struct_mutex); 4339 } 4340 4341 static void haswell_crtc_disable(struct drm_crtc *crtc) 4342 { 4343 struct drm_device *dev = crtc->dev; 4344 struct drm_i915_private *dev_priv = dev->dev_private; 4345 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4346 struct intel_encoder *encoder; 4347 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 4348 4349 if (!intel_crtc->active) 4350 return; 4351 4352 intel_crtc_disable_planes(crtc); 4353 4354 for_each_encoder_on_crtc(dev, crtc, encoder) { 4355 intel_opregion_notify_encoder(encoder, false); 4356 encoder->disable(encoder); 4357 } 4358 4359 if (intel_crtc->config.has_pch_encoder) 4360 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); 4361 intel_disable_pipe(intel_crtc); 4362 4363 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 4364 4365 ironlake_pfit_disable(intel_crtc); 4366 4367 intel_ddi_disable_pipe_clock(intel_crtc); 4368 4369 if (intel_crtc->config.has_pch_encoder) { 4370 lpt_disable_pch_transcoder(dev_priv); 4371 intel_ddi_fdi_disable(crtc); 4372 } 4373 4374 for_each_encoder_on_crtc(dev, crtc, encoder) 4375 if (encoder->post_disable) 4376 encoder->post_disable(encoder); 4377 4378 intel_crtc->active = false; 4379 intel_update_watermarks(crtc); 4380 4381 mutex_lock(&dev->struct_mutex); 4382 intel_update_fbc(dev); 4383 mutex_unlock(&dev->struct_mutex); 4384 4385 if (intel_crtc_to_shared_dpll(intel_crtc)) 4386 intel_disable_shared_dpll(intel_crtc); 4387 } 4388 4389 static void ironlake_crtc_off(struct drm_crtc *crtc) 4390 { 4391 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4392 intel_put_shared_dpll(intel_crtc); 4393 } 4394 4395 4396 static void i9xx_pfit_enable(struct intel_crtc *crtc) 4397 { 4398 struct drm_device *dev = crtc->base.dev; 4399 struct drm_i915_private *dev_priv = dev->dev_private; 4400 struct intel_crtc_config *pipe_config = &crtc->config; 4401 4402 if (!crtc->config.gmch_pfit.control) 4403 return; 4404 4405 /* 4406 * The panel fitter should only be adjusted whilst the pipe is disabled, 4407 * according to register description and PRM. 4408 */ 4409 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 4410 assert_pipe_disabled(dev_priv, crtc->pipe); 4411 4412 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 4413 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 4414 4415 /* Border color in case we don't scale up to the full screen. Black by 4416 * default, change to something else for debugging. */ 4417 I915_WRITE(BCLRPAT(crtc->pipe), 0); 4418 } 4419 4420 static enum intel_display_power_domain port_to_power_domain(enum port port) 4421 { 4422 switch (port) { 4423 case PORT_A: 4424 return POWER_DOMAIN_PORT_DDI_A_4_LANES; 4425 case PORT_B: 4426 return POWER_DOMAIN_PORT_DDI_B_4_LANES; 4427 case PORT_C: 4428 return POWER_DOMAIN_PORT_DDI_C_4_LANES; 4429 case PORT_D: 4430 return POWER_DOMAIN_PORT_DDI_D_4_LANES; 4431 default: 4432 WARN_ON_ONCE(1); 4433 return POWER_DOMAIN_PORT_OTHER; 4434 } 4435 } 4436 4437 enum intel_display_power_domain 4438 intel_display_port_power_domain(struct intel_encoder *intel_encoder) 4439 { 4440 struct drm_device *dev = intel_encoder->base.dev; 4441 struct intel_digital_port *intel_dig_port; 4442 4443 switch (intel_encoder->type) { 4444 case INTEL_OUTPUT_UNKNOWN: 4445 /* Only DDI platforms should ever use this output type */ 4446 WARN_ON_ONCE(!HAS_DDI(dev)); 4447 case INTEL_OUTPUT_DISPLAYPORT: 4448 case INTEL_OUTPUT_HDMI: 4449 case INTEL_OUTPUT_EDP: 4450 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 4451 return port_to_power_domain(intel_dig_port->port); 4452 case INTEL_OUTPUT_ANALOG: 4453 return POWER_DOMAIN_PORT_CRT; 4454 case INTEL_OUTPUT_DSI: 4455 return POWER_DOMAIN_PORT_DSI; 4456 default: 4457 return POWER_DOMAIN_PORT_OTHER; 4458 } 4459 } 4460 4461 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) 4462 { 4463 struct drm_device *dev = crtc->dev; 4464 struct intel_encoder *intel_encoder; 4465 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4466 enum i915_pipe pipe = intel_crtc->pipe; 4467 unsigned long mask; 4468 enum transcoder transcoder; 4469 4470 transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); 4471 4472 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 4473 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 4474 if (intel_crtc->config.pch_pfit.enabled || 4475 intel_crtc->config.pch_pfit.force_thru) 4476 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 4477 4478 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 4479 mask |= BIT(intel_display_port_power_domain(intel_encoder)); 4480 4481 return mask; 4482 } 4483 4484 void intel_display_set_init_power(struct drm_i915_private *dev_priv, 4485 bool enable) 4486 { 4487 if (dev_priv->power_domains.init_power_on == enable) 4488 return; 4489 4490 if (enable) 4491 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 4492 else 4493 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 4494 4495 dev_priv->power_domains.init_power_on = enable; 4496 } 4497 4498 static void modeset_update_crtc_power_domains(struct drm_device *dev) 4499 { 4500 struct drm_i915_private *dev_priv = dev->dev_private; 4501 unsigned long pipe_domains[I915_MAX_PIPES] = { 0, }; 4502 struct intel_crtc *crtc; 4503 4504 /* 4505 * First get all needed power domains, then put all unneeded, to avoid 4506 * any unnecessary toggling of the power wells. 4507 */ 4508 for_each_intel_crtc(dev, crtc) { 4509 enum intel_display_power_domain domain; 4510 4511 if (!crtc->base.enabled) 4512 continue; 4513 4514 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base); 4515 4516 for_each_power_domain(domain, pipe_domains[crtc->pipe]) 4517 intel_display_power_get(dev_priv, domain); 4518 } 4519 4520 for_each_intel_crtc(dev, crtc) { 4521 enum intel_display_power_domain domain; 4522 4523 for_each_power_domain(domain, crtc->enabled_power_domains) 4524 intel_display_power_put(dev_priv, domain); 4525 4526 crtc->enabled_power_domains = pipe_domains[crtc->pipe]; 4527 } 4528 4529 intel_display_set_init_power(dev_priv, false); 4530 } 4531 4532 /* returns HPLL frequency in kHz */ 4533 static int valleyview_get_vco(struct drm_i915_private *dev_priv) 4534 { 4535 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 4536 4537 /* Obtain SKU information */ 4538 mutex_lock(&dev_priv->dpio_lock); 4539 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 4540 CCK_FUSE_HPLL_FREQ_MASK; 4541 mutex_unlock(&dev_priv->dpio_lock); 4542 4543 return vco_freq[hpll_freq] * 1000; 4544 } 4545 4546 static void vlv_update_cdclk(struct drm_device *dev) 4547 { 4548 struct drm_i915_private *dev_priv = dev->dev_private; 4549 4550 dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 4551 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz", 4552 dev_priv->vlv_cdclk_freq); 4553 4554 /* 4555 * Program the gmbus_freq based on the cdclk frequency. 4556 * BSpec erroneously claims we should aim for 4MHz, but 4557 * in fact 1MHz is the correct frequency. 4558 */ 4559 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000)); 4560 } 4561 4562 /* Adjust CDclk dividers to allow high res or save power if possible */ 4563 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 4564 { 4565 struct drm_i915_private *dev_priv = dev->dev_private; 4566 u32 val, cmd; 4567 4568 WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq); 4569 4570 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ 4571 cmd = 2; 4572 else if (cdclk == 266667) 4573 cmd = 1; 4574 else 4575 cmd = 0; 4576 4577 mutex_lock(&dev_priv->rps.hw_lock); 4578 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 4579 val &= ~DSPFREQGUAR_MASK; 4580 val |= (cmd << DSPFREQGUAR_SHIFT); 4581 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 4582 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 4583 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 4584 50)) { 4585 DRM_ERROR("timed out waiting for CDclk change\n"); 4586 } 4587 mutex_unlock(&dev_priv->rps.hw_lock); 4588 4589 if (cdclk == 400000) { 4590 u32 divider, vco; 4591 4592 vco = valleyview_get_vco(dev_priv); 4593 divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1; 4594 4595 mutex_lock(&dev_priv->dpio_lock); 4596 /* adjust cdclk divider */ 4597 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 4598 val &= ~DISPLAY_FREQUENCY_VALUES; 4599 val |= divider; 4600 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 4601 4602 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & 4603 DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 4604 50)) 4605 DRM_ERROR("timed out waiting for CDclk change\n"); 4606 mutex_unlock(&dev_priv->dpio_lock); 4607 } 4608 4609 mutex_lock(&dev_priv->dpio_lock); 4610 /* adjust self-refresh exit latency value */ 4611 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); 4612 val &= ~0x7f; 4613 4614 /* 4615 * For high bandwidth configs, we set a higher latency in the bunit 4616 * so that the core display fetch happens in time to avoid underruns. 4617 */ 4618 if (cdclk == 400000) 4619 val |= 4500 / 250; /* 4.5 usec */ 4620 else 4621 val |= 3000 / 250; /* 3.0 usec */ 4622 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); 4623 mutex_unlock(&dev_priv->dpio_lock); 4624 4625 vlv_update_cdclk(dev); 4626 } 4627 4628 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) 4629 { 4630 struct drm_i915_private *dev_priv = dev->dev_private; 4631 u32 val, cmd; 4632 4633 WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq); 4634 4635 switch (cdclk) { 4636 case 400000: 4637 cmd = 3; 4638 break; 4639 case 333333: 4640 case 320000: 4641 cmd = 2; 4642 break; 4643 case 266667: 4644 cmd = 1; 4645 break; 4646 case 200000: 4647 cmd = 0; 4648 break; 4649 default: 4650 WARN_ON(1); 4651 return; 4652 } 4653 4654 mutex_lock(&dev_priv->rps.hw_lock); 4655 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 4656 val &= ~DSPFREQGUAR_MASK_CHV; 4657 val |= (cmd << DSPFREQGUAR_SHIFT_CHV); 4658 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 4659 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 4660 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 4661 50)) { 4662 DRM_ERROR("timed out waiting for CDclk change\n"); 4663 } 4664 mutex_unlock(&dev_priv->rps.hw_lock); 4665 4666 vlv_update_cdclk(dev); 4667 } 4668 4669 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 4670 int max_pixclk) 4671 { 4672 int vco = valleyview_get_vco(dev_priv); 4673 int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000; 4674 4675 /* FIXME: Punit isn't quite ready yet */ 4676 if (IS_CHERRYVIEW(dev_priv->dev)) 4677 return 400000; 4678 4679 /* 4680 * Really only a few cases to deal with, as only 4 CDclks are supported: 4681 * 200MHz 4682 * 267MHz 4683 * 320/333MHz (depends on HPLL freq) 4684 * 400MHz 4685 * So we check to see whether we're above 90% of the lower bin and 4686 * adjust if needed. 4687 * 4688 * We seem to get an unstable or solid color picture at 200MHz. 4689 * Not sure what's wrong. For now use 200MHz only when all pipes 4690 * are off. 4691 */ 4692 if (max_pixclk > freq_320*9/10) 4693 return 400000; 4694 else if (max_pixclk > 266667*9/10) 4695 return freq_320; 4696 else if (max_pixclk > 0) 4697 return 266667; 4698 else 4699 return 200000; 4700 } 4701 4702 /* compute the max pixel clock for new configuration */ 4703 static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv) 4704 { 4705 struct drm_device *dev = dev_priv->dev; 4706 struct intel_crtc *intel_crtc; 4707 int max_pixclk = 0; 4708 4709 for_each_intel_crtc(dev, intel_crtc) { 4710 if (intel_crtc->new_enabled) 4711 max_pixclk = max(max_pixclk, 4712 intel_crtc->new_config->adjusted_mode.crtc_clock); 4713 } 4714 4715 return max_pixclk; 4716 } 4717 4718 static void valleyview_modeset_global_pipes(struct drm_device *dev, 4719 unsigned *prepare_pipes) 4720 { 4721 struct drm_i915_private *dev_priv = dev->dev_private; 4722 struct intel_crtc *intel_crtc; 4723 int max_pixclk = intel_mode_max_pixclk(dev_priv); 4724 4725 if (valleyview_calc_cdclk(dev_priv, max_pixclk) == 4726 dev_priv->vlv_cdclk_freq) 4727 return; 4728 4729 /* disable/enable all currently active pipes while we change cdclk */ 4730 for_each_intel_crtc(dev, intel_crtc) 4731 if (intel_crtc->base.enabled) 4732 *prepare_pipes |= (1 << intel_crtc->pipe); 4733 } 4734 4735 static void valleyview_modeset_global_resources(struct drm_device *dev) 4736 { 4737 struct drm_i915_private *dev_priv = dev->dev_private; 4738 int max_pixclk = intel_mode_max_pixclk(dev_priv); 4739 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 4740 4741 if (req_cdclk != dev_priv->vlv_cdclk_freq) { 4742 if (IS_CHERRYVIEW(dev)) 4743 cherryview_set_cdclk(dev, req_cdclk); 4744 else 4745 valleyview_set_cdclk(dev, req_cdclk); 4746 } 4747 4748 modeset_update_crtc_power_domains(dev); 4749 } 4750 4751 static void valleyview_crtc_enable(struct drm_crtc *crtc) 4752 { 4753 struct drm_device *dev = crtc->dev; 4754 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4755 struct intel_encoder *encoder; 4756 int pipe = intel_crtc->pipe; 4757 bool is_dsi; 4758 4759 WARN_ON(!crtc->enabled); 4760 4761 if (intel_crtc->active) 4762 return; 4763 4764 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI); 4765 4766 if (!is_dsi) { 4767 if (IS_CHERRYVIEW(dev)) 4768 chv_prepare_pll(intel_crtc); 4769 else 4770 vlv_prepare_pll(intel_crtc); 4771 } 4772 4773 if (intel_crtc->config.has_dp_encoder) 4774 intel_dp_set_m_n(intel_crtc); 4775 4776 intel_set_pipe_timings(intel_crtc); 4777 4778 i9xx_set_pipeconf(intel_crtc); 4779 4780 intel_crtc->active = true; 4781 4782 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4783 4784 for_each_encoder_on_crtc(dev, crtc, encoder) 4785 if (encoder->pre_pll_enable) 4786 encoder->pre_pll_enable(encoder); 4787 4788 if (!is_dsi) { 4789 if (IS_CHERRYVIEW(dev)) 4790 chv_enable_pll(intel_crtc); 4791 else 4792 vlv_enable_pll(intel_crtc); 4793 } 4794 4795 for_each_encoder_on_crtc(dev, crtc, encoder) 4796 if (encoder->pre_enable) 4797 encoder->pre_enable(encoder); 4798 4799 i9xx_pfit_enable(intel_crtc); 4800 4801 intel_crtc_load_lut(crtc); 4802 4803 intel_update_watermarks(crtc); 4804 intel_enable_pipe(intel_crtc); 4805 4806 for_each_encoder_on_crtc(dev, crtc, encoder) 4807 encoder->enable(encoder); 4808 4809 intel_crtc_enable_planes(crtc); 4810 4811 /* Underruns don't raise interrupts, so check manually. */ 4812 i9xx_check_fifo_underruns(dev); 4813 } 4814 4815 static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 4816 { 4817 struct drm_device *dev = crtc->base.dev; 4818 struct drm_i915_private *dev_priv = dev->dev_private; 4819 4820 I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0); 4821 I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1); 4822 } 4823 4824 static void i9xx_crtc_enable(struct drm_crtc *crtc) 4825 { 4826 struct drm_device *dev = crtc->dev; 4827 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4828 struct intel_encoder *encoder; 4829 int pipe = intel_crtc->pipe; 4830 4831 WARN_ON(!crtc->enabled); 4832 4833 if (intel_crtc->active) 4834 return; 4835 4836 i9xx_set_pll_dividers(intel_crtc); 4837 4838 if (intel_crtc->config.has_dp_encoder) 4839 intel_dp_set_m_n(intel_crtc); 4840 4841 intel_set_pipe_timings(intel_crtc); 4842 4843 i9xx_set_pipeconf(intel_crtc); 4844 4845 intel_crtc->active = true; 4846 4847 if (!IS_GEN2(dev)) 4848 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4849 4850 for_each_encoder_on_crtc(dev, crtc, encoder) 4851 if (encoder->pre_enable) 4852 encoder->pre_enable(encoder); 4853 4854 i9xx_enable_pll(intel_crtc); 4855 4856 i9xx_pfit_enable(intel_crtc); 4857 4858 intel_crtc_load_lut(crtc); 4859 4860 intel_update_watermarks(crtc); 4861 intel_enable_pipe(intel_crtc); 4862 4863 for_each_encoder_on_crtc(dev, crtc, encoder) 4864 encoder->enable(encoder); 4865 4866 intel_crtc_enable_planes(crtc); 4867 4868 /* 4869 * Gen2 reports pipe underruns whenever all planes are disabled. 4870 * So don't enable underrun reporting before at least some planes 4871 * are enabled. 4872 * FIXME: Need to fix the logic to work when we turn off all planes 4873 * but leave the pipe running. 4874 */ 4875 if (IS_GEN2(dev)) 4876 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4877 4878 /* Underruns don't raise interrupts, so check manually. */ 4879 i9xx_check_fifo_underruns(dev); 4880 } 4881 4882 static void i9xx_pfit_disable(struct intel_crtc *crtc) 4883 { 4884 struct drm_device *dev = crtc->base.dev; 4885 struct drm_i915_private *dev_priv = dev->dev_private; 4886 4887 if (!crtc->config.gmch_pfit.control) 4888 return; 4889 4890 assert_pipe_disabled(dev_priv, crtc->pipe); 4891 4892 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 4893 I915_READ(PFIT_CONTROL)); 4894 I915_WRITE(PFIT_CONTROL, 0); 4895 } 4896 4897 static void i9xx_crtc_disable(struct drm_crtc *crtc) 4898 { 4899 struct drm_device *dev = crtc->dev; 4900 struct drm_i915_private *dev_priv = dev->dev_private; 4901 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4902 struct intel_encoder *encoder; 4903 int pipe = intel_crtc->pipe; 4904 4905 if (!intel_crtc->active) 4906 return; 4907 4908 /* 4909 * Gen2 reports pipe underruns whenever all planes are disabled. 4910 * So diasble underrun reporting before all the planes get disabled. 4911 * FIXME: Need to fix the logic to work when we turn off all planes 4912 * but leave the pipe running. 4913 */ 4914 if (IS_GEN2(dev)) 4915 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); 4916 4917 /* 4918 * Vblank time updates from the shadow to live plane control register 4919 * are blocked if the memory self-refresh mode is active at that 4920 * moment. So to make sure the plane gets truly disabled, disable 4921 * first the self-refresh mode. The self-refresh enable bit in turn 4922 * will be checked/applied by the HW only at the next frame start 4923 * event which is after the vblank start event, so we need to have a 4924 * wait-for-vblank between disabling the plane and the pipe. 4925 */ 4926 intel_set_memory_cxsr(dev_priv, false); 4927 intel_crtc_disable_planes(crtc); 4928 4929 for_each_encoder_on_crtc(dev, crtc, encoder) 4930 encoder->disable(encoder); 4931 4932 /* 4933 * On gen2 planes are double buffered but the pipe isn't, so we must 4934 * wait for planes to fully turn off before disabling the pipe. 4935 * We also need to wait on all gmch platforms because of the 4936 * self-refresh mode constraint explained above. 4937 */ 4938 intel_wait_for_vblank(dev, pipe); 4939 4940 intel_disable_pipe(intel_crtc); 4941 4942 i9xx_pfit_disable(intel_crtc); 4943 4944 for_each_encoder_on_crtc(dev, crtc, encoder) 4945 if (encoder->post_disable) 4946 encoder->post_disable(encoder); 4947 4948 if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) { 4949 if (IS_CHERRYVIEW(dev)) 4950 chv_disable_pll(dev_priv, pipe); 4951 else if (IS_VALLEYVIEW(dev)) 4952 vlv_disable_pll(dev_priv, pipe); 4953 else 4954 i9xx_disable_pll(intel_crtc); 4955 } 4956 4957 if (!IS_GEN2(dev)) 4958 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); 4959 4960 intel_crtc->active = false; 4961 intel_update_watermarks(crtc); 4962 4963 mutex_lock(&dev->struct_mutex); 4964 intel_update_fbc(dev); 4965 mutex_unlock(&dev->struct_mutex); 4966 } 4967 4968 static void i9xx_crtc_off(struct drm_crtc *crtc) 4969 { 4970 } 4971 4972 static void intel_crtc_update_sarea(struct drm_crtc *crtc, 4973 bool enabled) 4974 { 4975 struct drm_device *dev = crtc->dev; 4976 struct drm_i915_master_private *master_priv = dev->dev_private; 4977 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4978 int pipe = intel_crtc->pipe; 4979 4980 #if 0 4981 if (!dev->primary->master) 4982 return; 4983 4984 master_priv = dev->primary->master->driver_priv; 4985 #endif 4986 if (!master_priv->sarea_priv) 4987 return; 4988 4989 switch (pipe) { 4990 case 0: 4991 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; 4992 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; 4993 break; 4994 case 1: 4995 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; 4996 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; 4997 break; 4998 default: 4999 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); 5000 break; 5001 } 5002 } 5003 5004 /* Master function to enable/disable CRTC and corresponding power wells */ 5005 void intel_crtc_control(struct drm_crtc *crtc, bool enable) 5006 { 5007 struct drm_device *dev = crtc->dev; 5008 struct drm_i915_private *dev_priv = dev->dev_private; 5009 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5010 enum intel_display_power_domain domain; 5011 unsigned long domains; 5012 5013 if (enable) { 5014 if (!intel_crtc->active) { 5015 domains = get_crtc_power_domains(crtc); 5016 for_each_power_domain(domain, domains) 5017 intel_display_power_get(dev_priv, domain); 5018 intel_crtc->enabled_power_domains = domains; 5019 5020 dev_priv->display.crtc_enable(crtc); 5021 } 5022 } else { 5023 if (intel_crtc->active) { 5024 dev_priv->display.crtc_disable(crtc); 5025 5026 domains = intel_crtc->enabled_power_domains; 5027 for_each_power_domain(domain, domains) 5028 intel_display_power_put(dev_priv, domain); 5029 intel_crtc->enabled_power_domains = 0; 5030 } 5031 } 5032 } 5033 5034 /** 5035 * Sets the power management mode of the pipe and plane. 5036 */ 5037 void intel_crtc_update_dpms(struct drm_crtc *crtc) 5038 { 5039 struct drm_device *dev = crtc->dev; 5040 struct intel_encoder *intel_encoder; 5041 bool enable = false; 5042 5043 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 5044 enable |= intel_encoder->connectors_active; 5045 5046 intel_crtc_control(crtc, enable); 5047 5048 intel_crtc_update_sarea(crtc, enable); 5049 } 5050 5051 static void intel_crtc_disable(struct drm_crtc *crtc) 5052 { 5053 struct drm_device *dev = crtc->dev; 5054 struct drm_connector *connector; 5055 struct drm_i915_private *dev_priv = dev->dev_private; 5056 struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb); 5057 enum i915_pipe pipe = to_intel_crtc(crtc)->pipe; 5058 5059 /* crtc should still be enabled when we disable it. */ 5060 WARN_ON(!crtc->enabled); 5061 5062 dev_priv->display.crtc_disable(crtc); 5063 intel_crtc_update_sarea(crtc, false); 5064 dev_priv->display.off(crtc); 5065 5066 if (crtc->primary->fb) { 5067 mutex_lock(&dev->struct_mutex); 5068 intel_unpin_fb_obj(old_obj); 5069 i915_gem_track_fb(old_obj, NULL, 5070 INTEL_FRONTBUFFER_PRIMARY(pipe)); 5071 mutex_unlock(&dev->struct_mutex); 5072 crtc->primary->fb = NULL; 5073 } 5074 5075 /* Update computed state. */ 5076 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 5077 if (!connector->encoder || !connector->encoder->crtc) 5078 continue; 5079 5080 if (connector->encoder->crtc != crtc) 5081 continue; 5082 5083 connector->dpms = DRM_MODE_DPMS_OFF; 5084 to_intel_encoder(connector->encoder)->connectors_active = false; 5085 } 5086 } 5087 5088 void intel_encoder_destroy(struct drm_encoder *encoder) 5089 { 5090 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5091 5092 drm_encoder_cleanup(encoder); 5093 kfree(intel_encoder); 5094 } 5095 5096 /* Simple dpms helper for encoders with just one connector, no cloning and only 5097 * one kind of off state. It clamps all !ON modes to fully OFF and changes the 5098 * state of the entire output pipe. */ 5099 static void intel_encoder_dpms(struct intel_encoder *encoder, int mode) 5100 { 5101 if (mode == DRM_MODE_DPMS_ON) { 5102 encoder->connectors_active = true; 5103 5104 intel_crtc_update_dpms(encoder->base.crtc); 5105 } else { 5106 encoder->connectors_active = false; 5107 5108 intel_crtc_update_dpms(encoder->base.crtc); 5109 } 5110 } 5111 5112 /* Cross check the actual hw state with our own modeset state tracking (and it's 5113 * internal consistency). */ 5114 static void intel_connector_check_state(struct intel_connector *connector) 5115 { 5116 if (connector->get_hw_state(connector)) { 5117 struct intel_encoder *encoder = connector->encoder; 5118 struct drm_crtc *crtc; 5119 bool encoder_enabled; 5120 enum i915_pipe pipe; 5121 5122 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 5123 connector->base.base.id, 5124 connector->base.name); 5125 5126 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, 5127 "wrong connector dpms state\n"); 5128 WARN(connector->base.encoder != &encoder->base, 5129 "active connector not linked to encoder\n"); 5130 5131 if (encoder) { 5132 WARN(!encoder->connectors_active, 5133 "encoder->connectors_active not set\n"); 5134 5135 encoder_enabled = encoder->get_hw_state(encoder, &pipe); 5136 WARN(!encoder_enabled, "encoder not enabled\n"); 5137 if (WARN_ON(!encoder->base.crtc)) 5138 return; 5139 5140 crtc = encoder->base.crtc; 5141 5142 WARN(!crtc->enabled, "crtc not enabled\n"); 5143 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); 5144 WARN(pipe != to_intel_crtc(crtc)->pipe, 5145 "encoder active on the wrong pipe\n"); 5146 } 5147 } 5148 } 5149 5150 /* Even simpler default implementation, if there's really no special case to 5151 * consider. */ 5152 void intel_connector_dpms(struct drm_connector *connector, int mode) 5153 { 5154 /* All the simple cases only support two dpms states. */ 5155 if (mode != DRM_MODE_DPMS_ON) 5156 mode = DRM_MODE_DPMS_OFF; 5157 5158 if (mode == connector->dpms) 5159 return; 5160 5161 connector->dpms = mode; 5162 5163 /* Only need to change hw state when actually enabled */ 5164 if (connector->encoder) 5165 intel_encoder_dpms(to_intel_encoder(connector->encoder), mode); 5166 5167 intel_modeset_check_state(connector->dev); 5168 } 5169 5170 /* Simple connector->get_hw_state implementation for encoders that support only 5171 * one connector and no cloning and hence the encoder state determines the state 5172 * of the connector. */ 5173 bool intel_connector_get_hw_state(struct intel_connector *connector) 5174 { 5175 enum i915_pipe pipe = 0; 5176 struct intel_encoder *encoder = connector->encoder; 5177 5178 return encoder->get_hw_state(encoder, &pipe); 5179 } 5180 5181 static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe, 5182 struct intel_crtc_config *pipe_config) 5183 { 5184 struct drm_i915_private *dev_priv = dev->dev_private; 5185 struct intel_crtc *pipe_B_crtc = 5186 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); 5187 5188 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 5189 pipe_name(pipe), pipe_config->fdi_lanes); 5190 if (pipe_config->fdi_lanes > 4) { 5191 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 5192 pipe_name(pipe), pipe_config->fdi_lanes); 5193 return false; 5194 } 5195 5196 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 5197 if (pipe_config->fdi_lanes > 2) { 5198 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 5199 pipe_config->fdi_lanes); 5200 return false; 5201 } else { 5202 return true; 5203 } 5204 } 5205 5206 if (INTEL_INFO(dev)->num_pipes == 2) 5207 return true; 5208 5209 /* Ivybridge 3 pipe is really complicated */ 5210 switch (pipe) { 5211 case PIPE_A: 5212 return true; 5213 case PIPE_B: 5214 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && 5215 pipe_config->fdi_lanes > 2) { 5216 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 5217 pipe_name(pipe), pipe_config->fdi_lanes); 5218 return false; 5219 } 5220 return true; 5221 case PIPE_C: 5222 if (!pipe_has_enabled_pch(pipe_B_crtc) || 5223 pipe_B_crtc->config.fdi_lanes <= 2) { 5224 if (pipe_config->fdi_lanes > 2) { 5225 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 5226 pipe_name(pipe), pipe_config->fdi_lanes); 5227 return false; 5228 } 5229 } else { 5230 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 5231 return false; 5232 } 5233 return true; 5234 default: 5235 BUG(); 5236 } 5237 } 5238 5239 #define RETRY 1 5240 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 5241 struct intel_crtc_config *pipe_config) 5242 { 5243 struct drm_device *dev = intel_crtc->base.dev; 5244 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 5245 int lane, link_bw, fdi_dotclock; 5246 bool setup_ok, needs_recompute = false; 5247 5248 retry: 5249 /* FDI is a binary signal running at ~2.7GHz, encoding 5250 * each output octet as 10 bits. The actual frequency 5251 * is stored as a divider into a 100MHz clock, and the 5252 * mode pixel clock is stored in units of 1KHz. 5253 * Hence the bw of each lane in terms of the mode signal 5254 * is: 5255 */ 5256 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 5257 5258 fdi_dotclock = adjusted_mode->crtc_clock; 5259 5260 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 5261 pipe_config->pipe_bpp); 5262 5263 pipe_config->fdi_lanes = lane; 5264 5265 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 5266 link_bw, &pipe_config->fdi_m_n); 5267 5268 setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev, 5269 intel_crtc->pipe, pipe_config); 5270 if (!setup_ok && pipe_config->pipe_bpp > 6*3) { 5271 pipe_config->pipe_bpp -= 2*3; 5272 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 5273 pipe_config->pipe_bpp); 5274 needs_recompute = true; 5275 pipe_config->bw_constrained = true; 5276 5277 goto retry; 5278 } 5279 5280 if (needs_recompute) 5281 return RETRY; 5282 5283 return setup_ok ? 0 : -EINVAL; 5284 } 5285 5286 static void hsw_compute_ips_config(struct intel_crtc *crtc, 5287 struct intel_crtc_config *pipe_config) 5288 { 5289 pipe_config->ips_enabled = i915.enable_ips && 5290 hsw_crtc_supports_ips(crtc) && 5291 pipe_config->pipe_bpp <= 24; 5292 } 5293 5294 static int intel_crtc_compute_config(struct intel_crtc *crtc, 5295 struct intel_crtc_config *pipe_config) 5296 { 5297 struct drm_device *dev = crtc->base.dev; 5298 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 5299 5300 /* FIXME should check pixel clock limits on all platforms */ 5301 if (INTEL_INFO(dev)->gen < 4) { 5302 struct drm_i915_private *dev_priv = dev->dev_private; 5303 int clock_limit = 5304 dev_priv->display.get_display_clock_speed(dev); 5305 5306 /* 5307 * Enable pixel doubling when the dot clock 5308 * is > 90% of the (display) core speed. 5309 * 5310 * GDG double wide on either pipe, 5311 * otherwise pipe A only. 5312 */ 5313 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) && 5314 adjusted_mode->crtc_clock > clock_limit * 9 / 10) { 5315 clock_limit *= 2; 5316 pipe_config->double_wide = true; 5317 } 5318 5319 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10) 5320 return -EINVAL; 5321 } 5322 5323 /* 5324 * Pipe horizontal size must be even in: 5325 * - DVO ganged mode 5326 * - LVDS dual channel mode 5327 * - Double wide pipe 5328 */ 5329 if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5330 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 5331 pipe_config->pipe_src_w &= ~1; 5332 5333 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 5334 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 5335 */ 5336 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 5337 adjusted_mode->hsync_start == adjusted_mode->hdisplay) 5338 return -EINVAL; 5339 5340 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) { 5341 pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */ 5342 } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) { 5343 /* only a 8bpc pipe, with 6bpc dither through the panel fitter 5344 * for lvds. */ 5345 pipe_config->pipe_bpp = 8*3; 5346 } 5347 5348 if (HAS_IPS(dev)) 5349 hsw_compute_ips_config(crtc, pipe_config); 5350 5351 /* 5352 * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the 5353 * old clock survives for now. 5354 */ 5355 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev)) 5356 pipe_config->shared_dpll = crtc->config.shared_dpll; 5357 5358 if (pipe_config->has_pch_encoder) 5359 return ironlake_fdi_compute_config(crtc, pipe_config); 5360 5361 return 0; 5362 } 5363 5364 static int valleyview_get_display_clock_speed(struct drm_device *dev) 5365 { 5366 struct drm_i915_private *dev_priv = dev->dev_private; 5367 int vco = valleyview_get_vco(dev_priv); 5368 u32 val; 5369 int divider; 5370 5371 /* FIXME: Punit isn't quite ready yet */ 5372 if (IS_CHERRYVIEW(dev)) 5373 return 400000; 5374 5375 mutex_lock(&dev_priv->dpio_lock); 5376 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5377 mutex_unlock(&dev_priv->dpio_lock); 5378 5379 divider = val & DISPLAY_FREQUENCY_VALUES; 5380 5381 WARN((val & DISPLAY_FREQUENCY_STATUS) != 5382 (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 5383 "cdclk change in progress\n"); 5384 5385 return DIV_ROUND_CLOSEST(vco << 1, divider + 1); 5386 } 5387 5388 static int i945_get_display_clock_speed(struct drm_device *dev) 5389 { 5390 return 400000; 5391 } 5392 5393 static int i915_get_display_clock_speed(struct drm_device *dev) 5394 { 5395 return 333000; 5396 } 5397 5398 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 5399 { 5400 return 200000; 5401 } 5402 5403 static int pnv_get_display_clock_speed(struct drm_device *dev) 5404 { 5405 u16 gcfgc = 0; 5406 5407 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 5408 5409 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 5410 case GC_DISPLAY_CLOCK_267_MHZ_PNV: 5411 return 267000; 5412 case GC_DISPLAY_CLOCK_333_MHZ_PNV: 5413 return 333000; 5414 case GC_DISPLAY_CLOCK_444_MHZ_PNV: 5415 return 444000; 5416 case GC_DISPLAY_CLOCK_200_MHZ_PNV: 5417 return 200000; 5418 default: 5419 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); 5420 case GC_DISPLAY_CLOCK_133_MHZ_PNV: 5421 return 133000; 5422 case GC_DISPLAY_CLOCK_167_MHZ_PNV: 5423 return 167000; 5424 } 5425 } 5426 5427 static int i915gm_get_display_clock_speed(struct drm_device *dev) 5428 { 5429 u16 gcfgc = 0; 5430 5431 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 5432 5433 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 5434 return 133000; 5435 else { 5436 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 5437 case GC_DISPLAY_CLOCK_333_MHZ: 5438 return 333000; 5439 default: 5440 case GC_DISPLAY_CLOCK_190_200_MHZ: 5441 return 190000; 5442 } 5443 } 5444 } 5445 5446 static int i865_get_display_clock_speed(struct drm_device *dev) 5447 { 5448 return 266000; 5449 } 5450 5451 static int i855_get_display_clock_speed(struct drm_device *dev) 5452 { 5453 u16 hpllcc = 0; 5454 /* Assume that the hardware is in the high speed state. This 5455 * should be the default. 5456 */ 5457 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 5458 case GC_CLOCK_133_200: 5459 case GC_CLOCK_100_200: 5460 return 200000; 5461 case GC_CLOCK_166_250: 5462 return 250000; 5463 case GC_CLOCK_100_133: 5464 return 133000; 5465 } 5466 5467 /* Shouldn't happen */ 5468 return 0; 5469 } 5470 5471 static int i830_get_display_clock_speed(struct drm_device *dev) 5472 { 5473 return 133000; 5474 } 5475 5476 static void 5477 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) 5478 { 5479 while (*num > DATA_LINK_M_N_MASK || 5480 *den > DATA_LINK_M_N_MASK) { 5481 *num >>= 1; 5482 *den >>= 1; 5483 } 5484 } 5485 5486 static void compute_m_n(unsigned int m, unsigned int n, 5487 uint32_t *ret_m, uint32_t *ret_n) 5488 { 5489 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 5490 *ret_m = div_u64((uint64_t) m * *ret_n, n); 5491 intel_reduce_m_n_ratio(ret_m, ret_n); 5492 } 5493 5494 void 5495 intel_link_compute_m_n(int bits_per_pixel, int nlanes, 5496 int pixel_clock, int link_clock, 5497 struct intel_link_m_n *m_n) 5498 { 5499 m_n->tu = 64; 5500 5501 compute_m_n(bits_per_pixel * pixel_clock, 5502 link_clock * nlanes * 8, 5503 &m_n->gmch_m, &m_n->gmch_n); 5504 5505 compute_m_n(pixel_clock, link_clock, 5506 &m_n->link_m, &m_n->link_n); 5507 } 5508 5509 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 5510 { 5511 if (i915.panel_use_ssc >= 0) 5512 return i915.panel_use_ssc != 0; 5513 return dev_priv->vbt.lvds_use_ssc 5514 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 5515 } 5516 5517 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) 5518 { 5519 struct drm_device *dev = crtc->dev; 5520 struct drm_i915_private *dev_priv = dev->dev_private; 5521 int refclk; 5522 5523 if (IS_VALLEYVIEW(dev)) { 5524 refclk = 100000; 5525 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 5526 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 5527 refclk = dev_priv->vbt.lvds_ssc_freq; 5528 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 5529 } else if (!IS_GEN2(dev)) { 5530 refclk = 96000; 5531 } else { 5532 refclk = 48000; 5533 } 5534 5535 return refclk; 5536 } 5537 5538 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) 5539 { 5540 return (1 << dpll->n) << 16 | dpll->m2; 5541 } 5542 5543 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) 5544 { 5545 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 5546 } 5547 5548 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 5549 intel_clock_t *reduced_clock) 5550 { 5551 struct drm_device *dev = crtc->base.dev; 5552 u32 fp, fp2 = 0; 5553 5554 if (IS_PINEVIEW(dev)) { 5555 fp = pnv_dpll_compute_fp(&crtc->config.dpll); 5556 if (reduced_clock) 5557 fp2 = pnv_dpll_compute_fp(reduced_clock); 5558 } else { 5559 fp = i9xx_dpll_compute_fp(&crtc->config.dpll); 5560 if (reduced_clock) 5561 fp2 = i9xx_dpll_compute_fp(reduced_clock); 5562 } 5563 5564 crtc->config.dpll_hw_state.fp0 = fp; 5565 5566 crtc->lowfreq_avail = false; 5567 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5568 reduced_clock && i915.powersave) { 5569 crtc->config.dpll_hw_state.fp1 = fp2; 5570 crtc->lowfreq_avail = true; 5571 } else { 5572 crtc->config.dpll_hw_state.fp1 = fp; 5573 } 5574 } 5575 5576 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe 5577 pipe) 5578 { 5579 u32 reg_val; 5580 5581 /* 5582 * PLLB opamp always calibrates to max value of 0x3f, force enable it 5583 * and set it to a reasonable value instead. 5584 */ 5585 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 5586 reg_val &= 0xffffff00; 5587 reg_val |= 0x00000030; 5588 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 5589 5590 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 5591 reg_val &= 0x8cffffff; 5592 reg_val = 0x8c000000; 5593 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 5594 5595 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 5596 reg_val &= 0xffffff00; 5597 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 5598 5599 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 5600 reg_val &= 0x00ffffff; 5601 reg_val |= 0xb0000000; 5602 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 5603 } 5604 5605 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 5606 struct intel_link_m_n *m_n) 5607 { 5608 struct drm_device *dev = crtc->base.dev; 5609 struct drm_i915_private *dev_priv = dev->dev_private; 5610 int pipe = crtc->pipe; 5611 5612 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 5613 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 5614 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 5615 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 5616 } 5617 5618 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 5619 struct intel_link_m_n *m_n, 5620 struct intel_link_m_n *m2_n2) 5621 { 5622 struct drm_device *dev = crtc->base.dev; 5623 struct drm_i915_private *dev_priv = dev->dev_private; 5624 int pipe = crtc->pipe; 5625 enum transcoder transcoder = crtc->config.cpu_transcoder; 5626 5627 if (INTEL_INFO(dev)->gen >= 5) { 5628 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 5629 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 5630 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 5631 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 5632 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 5633 * for gen < 8) and if DRRS is supported (to make sure the 5634 * registers are not unnecessarily accessed). 5635 */ 5636 if (m2_n2 && INTEL_INFO(dev)->gen < 8 && 5637 crtc->config.has_drrs) { 5638 I915_WRITE(PIPE_DATA_M2(transcoder), 5639 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 5640 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 5641 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 5642 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 5643 } 5644 } else { 5645 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 5646 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 5647 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 5648 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 5649 } 5650 } 5651 5652 void intel_dp_set_m_n(struct intel_crtc *crtc) 5653 { 5654 if (crtc->config.has_pch_encoder) 5655 intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); 5656 else 5657 intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n, 5658 &crtc->config.dp_m2_n2); 5659 } 5660 5661 static void vlv_update_pll(struct intel_crtc *crtc) 5662 { 5663 u32 dpll, dpll_md; 5664 5665 /* 5666 * Enable DPIO clock input. We should never disable the reference 5667 * clock for pipe B, since VGA hotplug / manual detection depends 5668 * on it. 5669 */ 5670 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 5671 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 5672 /* We should never disable this, set it here for state tracking */ 5673 if (crtc->pipe == PIPE_B) 5674 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 5675 dpll |= DPLL_VCO_ENABLE; 5676 crtc->config.dpll_hw_state.dpll = dpll; 5677 5678 dpll_md = (crtc->config.pixel_multiplier - 1) 5679 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5680 crtc->config.dpll_hw_state.dpll_md = dpll_md; 5681 } 5682 5683 static void vlv_prepare_pll(struct intel_crtc *crtc) 5684 { 5685 struct drm_device *dev = crtc->base.dev; 5686 struct drm_i915_private *dev_priv = dev->dev_private; 5687 int pipe = crtc->pipe; 5688 u32 mdiv; 5689 u32 bestn, bestm1, bestm2, bestp1, bestp2; 5690 u32 coreclk, reg_val; 5691 5692 mutex_lock(&dev_priv->dpio_lock); 5693 5694 bestn = crtc->config.dpll.n; 5695 bestm1 = crtc->config.dpll.m1; 5696 bestm2 = crtc->config.dpll.m2; 5697 bestp1 = crtc->config.dpll.p1; 5698 bestp2 = crtc->config.dpll.p2; 5699 5700 /* See eDP HDMI DPIO driver vbios notes doc */ 5701 5702 /* PLL B needs special handling */ 5703 if (pipe == PIPE_B) 5704 vlv_pllb_recal_opamp(dev_priv, pipe); 5705 5706 /* Set up Tx target for periodic Rcomp update */ 5707 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 5708 5709 /* Disable target IRef on PLL */ 5710 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 5711 reg_val &= 0x00ffffff; 5712 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 5713 5714 /* Disable fast lock */ 5715 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 5716 5717 /* Set idtafcrecal before PLL is enabled */ 5718 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 5719 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 5720 mdiv |= ((bestn << DPIO_N_SHIFT)); 5721 mdiv |= (1 << DPIO_K_SHIFT); 5722 5723 /* 5724 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 5725 * but we don't support that). 5726 * Note: don't use the DAC post divider as it seems unstable. 5727 */ 5728 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 5729 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 5730 5731 mdiv |= DPIO_ENABLE_CALIBRATION; 5732 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 5733 5734 /* Set HBR and RBR LPF coefficients */ 5735 if (crtc->config.port_clock == 162000 || 5736 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || 5737 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) 5738 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 5739 0x009f0003); 5740 else 5741 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 5742 0x00d0000f); 5743 5744 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || 5745 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { 5746 /* Use SSC source */ 5747 if (pipe == PIPE_A) 5748 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 5749 0x0df40000); 5750 else 5751 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 5752 0x0df70000); 5753 } else { /* HDMI or VGA */ 5754 /* Use bend source */ 5755 if (pipe == PIPE_A) 5756 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 5757 0x0df70000); 5758 else 5759 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 5760 0x0df40000); 5761 } 5762 5763 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 5764 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 5765 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || 5766 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) 5767 coreclk |= 0x01000000; 5768 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 5769 5770 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 5771 mutex_unlock(&dev_priv->dpio_lock); 5772 } 5773 5774 static void chv_update_pll(struct intel_crtc *crtc) 5775 { 5776 crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV | 5777 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | 5778 DPLL_VCO_ENABLE; 5779 if (crtc->pipe != PIPE_A) 5780 crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 5781 5782 crtc->config.dpll_hw_state.dpll_md = 5783 (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5784 } 5785 5786 static void chv_prepare_pll(struct intel_crtc *crtc) 5787 { 5788 struct drm_device *dev = crtc->base.dev; 5789 struct drm_i915_private *dev_priv = dev->dev_private; 5790 int pipe = crtc->pipe; 5791 int dpll_reg = DPLL(crtc->pipe); 5792 enum dpio_channel port = vlv_pipe_to_channel(pipe); 5793 u32 loopfilter, intcoeff; 5794 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 5795 int refclk; 5796 5797 bestn = crtc->config.dpll.n; 5798 bestm2_frac = crtc->config.dpll.m2 & 0x3fffff; 5799 bestm1 = crtc->config.dpll.m1; 5800 bestm2 = crtc->config.dpll.m2 >> 22; 5801 bestp1 = crtc->config.dpll.p1; 5802 bestp2 = crtc->config.dpll.p2; 5803 5804 /* 5805 * Enable Refclk and SSC 5806 */ 5807 I915_WRITE(dpll_reg, 5808 crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 5809 5810 mutex_lock(&dev_priv->dpio_lock); 5811 5812 /* p1 and p2 divider */ 5813 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 5814 5 << DPIO_CHV_S1_DIV_SHIFT | 5815 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 5816 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 5817 1 << DPIO_CHV_K_DIV_SHIFT); 5818 5819 /* Feedback post-divider - m2 */ 5820 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 5821 5822 /* Feedback refclk divider - n and m1 */ 5823 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 5824 DPIO_CHV_M1_DIV_BY_2 | 5825 1 << DPIO_CHV_N_DIV_SHIFT); 5826 5827 /* M2 fraction division */ 5828 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 5829 5830 /* M2 fraction division enable */ 5831 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), 5832 DPIO_CHV_FRAC_DIV_EN | 5833 (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT)); 5834 5835 /* Loop filter */ 5836 refclk = i9xx_get_refclk(&crtc->base, 0); 5837 loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT | 5838 2 << DPIO_CHV_GAIN_CTRL_SHIFT; 5839 if (refclk == 100000) 5840 intcoeff = 11; 5841 else if (refclk == 38400) 5842 intcoeff = 10; 5843 else 5844 intcoeff = 9; 5845 loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT; 5846 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 5847 5848 /* AFC Recal */ 5849 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 5850 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 5851 DPIO_AFC_RECAL); 5852 5853 mutex_unlock(&dev_priv->dpio_lock); 5854 } 5855 5856 static void i9xx_update_pll(struct intel_crtc *crtc, 5857 intel_clock_t *reduced_clock, 5858 int num_connectors) 5859 { 5860 struct drm_device *dev = crtc->base.dev; 5861 struct drm_i915_private *dev_priv = dev->dev_private; 5862 u32 dpll; 5863 bool is_sdvo; 5864 struct dpll *clock = &crtc->config.dpll; 5865 5866 i9xx_update_pll_dividers(crtc, reduced_clock); 5867 5868 is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) || 5869 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI); 5870 5871 dpll = DPLL_VGA_MODE_DIS; 5872 5873 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) 5874 dpll |= DPLLB_MODE_LVDS; 5875 else 5876 dpll |= DPLLB_MODE_DAC_SERIAL; 5877 5878 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 5879 dpll |= (crtc->config.pixel_multiplier - 1) 5880 << SDVO_MULTIPLIER_SHIFT_HIRES; 5881 } 5882 5883 if (is_sdvo) 5884 dpll |= DPLL_SDVO_HIGH_SPEED; 5885 5886 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) 5887 dpll |= DPLL_SDVO_HIGH_SPEED; 5888 5889 /* compute bitmask from p1 value */ 5890 if (IS_PINEVIEW(dev)) 5891 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 5892 else { 5893 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5894 if (IS_G4X(dev) && reduced_clock) 5895 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 5896 } 5897 switch (clock->p2) { 5898 case 5: 5899 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 5900 break; 5901 case 7: 5902 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 5903 break; 5904 case 10: 5905 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 5906 break; 5907 case 14: 5908 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 5909 break; 5910 } 5911 if (INTEL_INFO(dev)->gen >= 4) 5912 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 5913 5914 if (crtc->config.sdvo_tv_clock) 5915 dpll |= PLL_REF_INPUT_TVCLKINBC; 5916 else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5917 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 5918 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 5919 else 5920 dpll |= PLL_REF_INPUT_DREFCLK; 5921 5922 dpll |= DPLL_VCO_ENABLE; 5923 crtc->config.dpll_hw_state.dpll = dpll; 5924 5925 if (INTEL_INFO(dev)->gen >= 4) { 5926 u32 dpll_md = (crtc->config.pixel_multiplier - 1) 5927 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5928 crtc->config.dpll_hw_state.dpll_md = dpll_md; 5929 } 5930 } 5931 5932 static void i8xx_update_pll(struct intel_crtc *crtc, 5933 intel_clock_t *reduced_clock, 5934 int num_connectors) 5935 { 5936 struct drm_device *dev = crtc->base.dev; 5937 struct drm_i915_private *dev_priv = dev->dev_private; 5938 u32 dpll; 5939 struct dpll *clock = &crtc->config.dpll; 5940 5941 i9xx_update_pll_dividers(crtc, reduced_clock); 5942 5943 dpll = DPLL_VGA_MODE_DIS; 5944 5945 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) { 5946 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5947 } else { 5948 if (clock->p1 == 2) 5949 dpll |= PLL_P1_DIVIDE_BY_TWO; 5950 else 5951 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5952 if (clock->p2 == 4) 5953 dpll |= PLL_P2_DIVIDE_BY_4; 5954 } 5955 5956 if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO)) 5957 dpll |= DPLL_DVO_2X_MODE; 5958 5959 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5960 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 5961 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 5962 else 5963 dpll |= PLL_REF_INPUT_DREFCLK; 5964 5965 dpll |= DPLL_VCO_ENABLE; 5966 crtc->config.dpll_hw_state.dpll = dpll; 5967 } 5968 5969 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 5970 { 5971 struct drm_device *dev = intel_crtc->base.dev; 5972 struct drm_i915_private *dev_priv = dev->dev_private; 5973 enum i915_pipe pipe = intel_crtc->pipe; 5974 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 5975 struct drm_display_mode *adjusted_mode = 5976 &intel_crtc->config.adjusted_mode; 5977 uint32_t crtc_vtotal, crtc_vblank_end; 5978 int vsyncshift = 0; 5979 5980 /* We need to be careful not to changed the adjusted mode, for otherwise 5981 * the hw state checker will get angry at the mismatch. */ 5982 crtc_vtotal = adjusted_mode->crtc_vtotal; 5983 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 5984 5985 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5986 /* the chip adds 2 halflines automatically */ 5987 crtc_vtotal -= 1; 5988 crtc_vblank_end -= 1; 5989 5990 if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) 5991 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 5992 else 5993 vsyncshift = adjusted_mode->crtc_hsync_start - 5994 adjusted_mode->crtc_htotal / 2; 5995 if (vsyncshift < 0) 5996 vsyncshift += adjusted_mode->crtc_htotal; 5997 } 5998 5999 if (INTEL_INFO(dev)->gen > 3) 6000 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 6001 6002 I915_WRITE(HTOTAL(cpu_transcoder), 6003 (adjusted_mode->crtc_hdisplay - 1) | 6004 ((adjusted_mode->crtc_htotal - 1) << 16)); 6005 I915_WRITE(HBLANK(cpu_transcoder), 6006 (adjusted_mode->crtc_hblank_start - 1) | 6007 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 6008 I915_WRITE(HSYNC(cpu_transcoder), 6009 (adjusted_mode->crtc_hsync_start - 1) | 6010 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 6011 6012 I915_WRITE(VTOTAL(cpu_transcoder), 6013 (adjusted_mode->crtc_vdisplay - 1) | 6014 ((crtc_vtotal - 1) << 16)); 6015 I915_WRITE(VBLANK(cpu_transcoder), 6016 (adjusted_mode->crtc_vblank_start - 1) | 6017 ((crtc_vblank_end - 1) << 16)); 6018 I915_WRITE(VSYNC(cpu_transcoder), 6019 (adjusted_mode->crtc_vsync_start - 1) | 6020 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 6021 6022 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 6023 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 6024 * documented on the DDI_FUNC_CTL register description, EDP Input Select 6025 * bits. */ 6026 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && 6027 (pipe == PIPE_B || pipe == PIPE_C)) 6028 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 6029 6030 /* pipesrc controls the size that is scaled from, which should 6031 * always be the user's requested size. 6032 */ 6033 I915_WRITE(PIPESRC(pipe), 6034 ((intel_crtc->config.pipe_src_w - 1) << 16) | 6035 (intel_crtc->config.pipe_src_h - 1)); 6036 } 6037 6038 static void intel_get_pipe_timings(struct intel_crtc *crtc, 6039 struct intel_crtc_config *pipe_config) 6040 { 6041 struct drm_device *dev = crtc->base.dev; 6042 struct drm_i915_private *dev_priv = dev->dev_private; 6043 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 6044 uint32_t tmp; 6045 6046 tmp = I915_READ(HTOTAL(cpu_transcoder)); 6047 pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 6048 pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 6049 tmp = I915_READ(HBLANK(cpu_transcoder)); 6050 pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; 6051 pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; 6052 tmp = I915_READ(HSYNC(cpu_transcoder)); 6053 pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 6054 pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 6055 6056 tmp = I915_READ(VTOTAL(cpu_transcoder)); 6057 pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 6058 pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 6059 tmp = I915_READ(VBLANK(cpu_transcoder)); 6060 pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; 6061 pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; 6062 tmp = I915_READ(VSYNC(cpu_transcoder)); 6063 pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 6064 pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 6065 6066 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 6067 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 6068 pipe_config->adjusted_mode.crtc_vtotal += 1; 6069 pipe_config->adjusted_mode.crtc_vblank_end += 1; 6070 } 6071 6072 tmp = I915_READ(PIPESRC(crtc->pipe)); 6073 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 6074 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 6075 6076 pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h; 6077 pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w; 6078 } 6079 6080 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 6081 struct intel_crtc_config *pipe_config) 6082 { 6083 mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay; 6084 mode->htotal = pipe_config->adjusted_mode.crtc_htotal; 6085 mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start; 6086 mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end; 6087 6088 mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay; 6089 mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal; 6090 mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start; 6091 mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end; 6092 6093 mode->flags = pipe_config->adjusted_mode.flags; 6094 6095 mode->clock = pipe_config->adjusted_mode.crtc_clock; 6096 mode->flags |= pipe_config->adjusted_mode.flags; 6097 } 6098 6099 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 6100 { 6101 struct drm_device *dev = intel_crtc->base.dev; 6102 struct drm_i915_private *dev_priv = dev->dev_private; 6103 uint32_t pipeconf; 6104 6105 pipeconf = 0; 6106 6107 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 6108 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 6109 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 6110 6111 if (intel_crtc->config.double_wide) 6112 pipeconf |= PIPECONF_DOUBLE_WIDE; 6113 6114 /* only g4x and later have fancy bpc/dither controls */ 6115 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 6116 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 6117 if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30) 6118 pipeconf |= PIPECONF_DITHER_EN | 6119 PIPECONF_DITHER_TYPE_SP; 6120 6121 switch (intel_crtc->config.pipe_bpp) { 6122 case 18: 6123 pipeconf |= PIPECONF_6BPC; 6124 break; 6125 case 24: 6126 pipeconf |= PIPECONF_8BPC; 6127 break; 6128 case 30: 6129 pipeconf |= PIPECONF_10BPC; 6130 break; 6131 default: 6132 /* Case prevented by intel_choose_pipe_bpp_dither. */ 6133 BUG(); 6134 } 6135 } 6136 6137 if (HAS_PIPE_CXSR(dev)) { 6138 if (intel_crtc->lowfreq_avail) { 6139 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 6140 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 6141 } else { 6142 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 6143 } 6144 } 6145 6146 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 6147 if (INTEL_INFO(dev)->gen < 4 || 6148 intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) 6149 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 6150 else 6151 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 6152 } else 6153 pipeconf |= PIPECONF_PROGRESSIVE; 6154 6155 if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range) 6156 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 6157 6158 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 6159 POSTING_READ(PIPECONF(intel_crtc->pipe)); 6160 } 6161 6162 static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 6163 int x, int y, 6164 struct drm_framebuffer *fb) 6165 { 6166 struct drm_device *dev = crtc->dev; 6167 struct drm_i915_private *dev_priv = dev->dev_private; 6168 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6169 int refclk, num_connectors = 0; 6170 intel_clock_t clock, reduced_clock; 6171 bool ok, has_reduced_clock = false; 6172 bool is_lvds = false, is_dsi = false; 6173 struct intel_encoder *encoder; 6174 const intel_limit_t *limit; 6175 6176 for_each_encoder_on_crtc(dev, crtc, encoder) { 6177 switch (encoder->type) { 6178 case INTEL_OUTPUT_LVDS: 6179 is_lvds = true; 6180 break; 6181 case INTEL_OUTPUT_DSI: 6182 is_dsi = true; 6183 break; 6184 } 6185 6186 num_connectors++; 6187 } 6188 6189 if (is_dsi) 6190 return 0; 6191 6192 if (!intel_crtc->config.clock_set) { 6193 refclk = i9xx_get_refclk(crtc, num_connectors); 6194 6195 /* 6196 * Returns a set of divisors for the desired target clock with 6197 * the given refclk, or FALSE. The returned values represent 6198 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 6199 * 2) / p1 / p2. 6200 */ 6201 limit = intel_limit(crtc, refclk); 6202 ok = dev_priv->display.find_dpll(limit, crtc, 6203 intel_crtc->config.port_clock, 6204 refclk, NULL, &clock); 6205 if (!ok) { 6206 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 6207 return -EINVAL; 6208 } 6209 6210 if (is_lvds && dev_priv->lvds_downclock_avail) { 6211 /* 6212 * Ensure we match the reduced clock's P to the target 6213 * clock. If the clocks don't match, we can't switch 6214 * the display clock by using the FP0/FP1. In such case 6215 * we will disable the LVDS downclock feature. 6216 */ 6217 has_reduced_clock = 6218 dev_priv->display.find_dpll(limit, crtc, 6219 dev_priv->lvds_downclock, 6220 refclk, &clock, 6221 &reduced_clock); 6222 } 6223 /* Compat-code for transition, will disappear. */ 6224 intel_crtc->config.dpll.n = clock.n; 6225 intel_crtc->config.dpll.m1 = clock.m1; 6226 intel_crtc->config.dpll.m2 = clock.m2; 6227 intel_crtc->config.dpll.p1 = clock.p1; 6228 intel_crtc->config.dpll.p2 = clock.p2; 6229 } 6230 6231 if (IS_GEN2(dev)) { 6232 i8xx_update_pll(intel_crtc, 6233 has_reduced_clock ? &reduced_clock : NULL, 6234 num_connectors); 6235 } else if (IS_CHERRYVIEW(dev)) { 6236 chv_update_pll(intel_crtc); 6237 } else if (IS_VALLEYVIEW(dev)) { 6238 vlv_update_pll(intel_crtc); 6239 } else { 6240 i9xx_update_pll(intel_crtc, 6241 has_reduced_clock ? &reduced_clock : NULL, 6242 num_connectors); 6243 } 6244 6245 return 0; 6246 } 6247 6248 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 6249 struct intel_crtc_config *pipe_config) 6250 { 6251 struct drm_device *dev = crtc->base.dev; 6252 struct drm_i915_private *dev_priv = dev->dev_private; 6253 uint32_t tmp; 6254 6255 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 6256 return; 6257 6258 tmp = I915_READ(PFIT_CONTROL); 6259 if (!(tmp & PFIT_ENABLE)) 6260 return; 6261 6262 /* Check whether the pfit is attached to our pipe. */ 6263 if (INTEL_INFO(dev)->gen < 4) { 6264 if (crtc->pipe != PIPE_B) 6265 return; 6266 } else { 6267 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 6268 return; 6269 } 6270 6271 pipe_config->gmch_pfit.control = tmp; 6272 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 6273 if (INTEL_INFO(dev)->gen < 5) 6274 pipe_config->gmch_pfit.lvds_border_bits = 6275 I915_READ(LVDS) & LVDS_BORDER_ENABLE; 6276 } 6277 6278 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 6279 struct intel_crtc_config *pipe_config) 6280 { 6281 struct drm_device *dev = crtc->base.dev; 6282 struct drm_i915_private *dev_priv = dev->dev_private; 6283 int pipe = pipe_config->cpu_transcoder; 6284 intel_clock_t clock; 6285 u32 mdiv; 6286 int refclk = 100000; 6287 6288 /* In case of MIPI DPLL will not even be used */ 6289 if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)) 6290 return; 6291 6292 mutex_lock(&dev_priv->dpio_lock); 6293 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 6294 mutex_unlock(&dev_priv->dpio_lock); 6295 6296 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 6297 clock.m2 = mdiv & DPIO_M2DIV_MASK; 6298 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 6299 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 6300 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 6301 6302 vlv_clock(refclk, &clock); 6303 6304 /* clock.dot is the fast clock */ 6305 pipe_config->port_clock = clock.dot / 5; 6306 } 6307 6308 static void i9xx_get_plane_config(struct intel_crtc *crtc, 6309 struct intel_plane_config *plane_config) 6310 { 6311 struct drm_device *dev = crtc->base.dev; 6312 struct drm_i915_private *dev_priv = dev->dev_private; 6313 u32 val, base, offset; 6314 int pipe = crtc->pipe, plane = crtc->plane; 6315 int fourcc, pixel_format; 6316 int aligned_height; 6317 6318 crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL); 6319 if (!crtc->base.primary->fb) { 6320 DRM_DEBUG_KMS("failed to alloc fb\n"); 6321 return; 6322 } 6323 6324 val = I915_READ(DSPCNTR(plane)); 6325 6326 if (INTEL_INFO(dev)->gen >= 4) 6327 if (val & DISPPLANE_TILED) 6328 plane_config->tiled = true; 6329 6330 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 6331 fourcc = intel_format_to_fourcc(pixel_format); 6332 crtc->base.primary->fb->pixel_format = fourcc; 6333 crtc->base.primary->fb->bits_per_pixel = 6334 drm_format_plane_cpp(fourcc, 0) * 8; 6335 6336 if (INTEL_INFO(dev)->gen >= 4) { 6337 if (plane_config->tiled) 6338 offset = I915_READ(DSPTILEOFF(plane)); 6339 else 6340 offset = I915_READ(DSPLINOFF(plane)); 6341 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 6342 } else { 6343 base = I915_READ(DSPADDR(plane)); 6344 } 6345 plane_config->base = base; 6346 6347 val = I915_READ(PIPESRC(pipe)); 6348 crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1; 6349 crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; 6350 6351 val = I915_READ(DSPSTRIDE(pipe)); 6352 crtc->base.primary->fb->pitches[0] = val & 0xffffffc0; 6353 6354 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, 6355 plane_config->tiled); 6356 6357 plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] * 6358 aligned_height); 6359 6360 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 6361 pipe, plane, crtc->base.primary->fb->width, 6362 crtc->base.primary->fb->height, 6363 crtc->base.primary->fb->bits_per_pixel, base, 6364 crtc->base.primary->fb->pitches[0], 6365 plane_config->size); 6366 6367 } 6368 6369 static void chv_crtc_clock_get(struct intel_crtc *crtc, 6370 struct intel_crtc_config *pipe_config) 6371 { 6372 struct drm_device *dev = crtc->base.dev; 6373 struct drm_i915_private *dev_priv = dev->dev_private; 6374 int pipe = pipe_config->cpu_transcoder; 6375 enum dpio_channel port = vlv_pipe_to_channel(pipe); 6376 intel_clock_t clock; 6377 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2; 6378 int refclk = 100000; 6379 6380 mutex_lock(&dev_priv->dpio_lock); 6381 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 6382 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 6383 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 6384 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 6385 mutex_unlock(&dev_priv->dpio_lock); 6386 6387 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 6388 clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff); 6389 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 6390 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 6391 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 6392 6393 chv_clock(refclk, &clock); 6394 6395 /* clock.dot is the fast clock */ 6396 pipe_config->port_clock = clock.dot / 5; 6397 } 6398 6399 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 6400 struct intel_crtc_config *pipe_config) 6401 { 6402 struct drm_device *dev = crtc->base.dev; 6403 struct drm_i915_private *dev_priv = dev->dev_private; 6404 uint32_t tmp; 6405 6406 if (!intel_display_power_enabled(dev_priv, 6407 POWER_DOMAIN_PIPE(crtc->pipe))) 6408 return false; 6409 6410 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 6411 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 6412 6413 tmp = I915_READ(PIPECONF(crtc->pipe)); 6414 if (!(tmp & PIPECONF_ENABLE)) 6415 return false; 6416 6417 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 6418 switch (tmp & PIPECONF_BPC_MASK) { 6419 case PIPECONF_6BPC: 6420 pipe_config->pipe_bpp = 18; 6421 break; 6422 case PIPECONF_8BPC: 6423 pipe_config->pipe_bpp = 24; 6424 break; 6425 case PIPECONF_10BPC: 6426 pipe_config->pipe_bpp = 30; 6427 break; 6428 default: 6429 break; 6430 } 6431 } 6432 6433 if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT)) 6434 pipe_config->limited_color_range = true; 6435 6436 if (INTEL_INFO(dev)->gen < 4) 6437 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 6438 6439 intel_get_pipe_timings(crtc, pipe_config); 6440 6441 i9xx_get_pfit_config(crtc, pipe_config); 6442 6443 if (INTEL_INFO(dev)->gen >= 4) { 6444 tmp = I915_READ(DPLL_MD(crtc->pipe)); 6445 pipe_config->pixel_multiplier = 6446 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 6447 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 6448 pipe_config->dpll_hw_state.dpll_md = tmp; 6449 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 6450 tmp = I915_READ(DPLL(crtc->pipe)); 6451 pipe_config->pixel_multiplier = 6452 ((tmp & SDVO_MULTIPLIER_MASK) 6453 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 6454 } else { 6455 /* Note that on i915G/GM the pixel multiplier is in the sdvo 6456 * port and will be fixed up in the encoder->get_config 6457 * function. */ 6458 pipe_config->pixel_multiplier = 1; 6459 } 6460 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 6461 if (!IS_VALLEYVIEW(dev)) { 6462 /* 6463 * DPLL_DVO_2X_MODE must be enabled for both DPLLs 6464 * on 830. Filter it out here so that we don't 6465 * report errors due to that. 6466 */ 6467 if (IS_I830(dev)) 6468 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; 6469 6470 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 6471 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 6472 } else { 6473 /* Mask out read-only status bits. */ 6474 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 6475 DPLL_PORTC_READY_MASK | 6476 DPLL_PORTB_READY_MASK); 6477 } 6478 6479 if (IS_CHERRYVIEW(dev)) 6480 chv_crtc_clock_get(crtc, pipe_config); 6481 else if (IS_VALLEYVIEW(dev)) 6482 vlv_crtc_clock_get(crtc, pipe_config); 6483 else 6484 i9xx_crtc_clock_get(crtc, pipe_config); 6485 6486 return true; 6487 } 6488 6489 static void ironlake_init_pch_refclk(struct drm_device *dev) 6490 { 6491 struct drm_i915_private *dev_priv = dev->dev_private; 6492 struct intel_encoder *encoder; 6493 u32 val, final; 6494 bool has_lvds = false; 6495 bool has_cpu_edp = false; 6496 bool has_panel = false; 6497 bool has_ck505 = false; 6498 bool can_ssc = false; 6499 6500 /* We need to take the global config into account */ 6501 for_each_intel_encoder(dev, encoder) { 6502 switch (encoder->type) { 6503 case INTEL_OUTPUT_LVDS: 6504 has_panel = true; 6505 has_lvds = true; 6506 break; 6507 case INTEL_OUTPUT_EDP: 6508 has_panel = true; 6509 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 6510 has_cpu_edp = true; 6511 break; 6512 } 6513 } 6514 6515 if (HAS_PCH_IBX(dev)) { 6516 has_ck505 = dev_priv->vbt.display_clock_mode; 6517 can_ssc = has_ck505; 6518 } else { 6519 has_ck505 = false; 6520 can_ssc = true; 6521 } 6522 6523 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", 6524 has_panel, has_lvds, has_ck505); 6525 6526 /* Ironlake: try to setup display ref clock before DPLL 6527 * enabling. This is only under driver's control after 6528 * PCH B stepping, previous chipset stepping should be 6529 * ignoring this setting. 6530 */ 6531 val = I915_READ(PCH_DREF_CONTROL); 6532 6533 /* As we must carefully and slowly disable/enable each source in turn, 6534 * compute the final state we want first and check if we need to 6535 * make any changes at all. 6536 */ 6537 final = val; 6538 final &= ~DREF_NONSPREAD_SOURCE_MASK; 6539 if (has_ck505) 6540 final |= DREF_NONSPREAD_CK505_ENABLE; 6541 else 6542 final |= DREF_NONSPREAD_SOURCE_ENABLE; 6543 6544 final &= ~DREF_SSC_SOURCE_MASK; 6545 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 6546 final &= ~DREF_SSC1_ENABLE; 6547 6548 if (has_panel) { 6549 final |= DREF_SSC_SOURCE_ENABLE; 6550 6551 if (intel_panel_use_ssc(dev_priv) && can_ssc) 6552 final |= DREF_SSC1_ENABLE; 6553 6554 if (has_cpu_edp) { 6555 if (intel_panel_use_ssc(dev_priv) && can_ssc) 6556 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 6557 else 6558 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 6559 } else 6560 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 6561 } else { 6562 final |= DREF_SSC_SOURCE_DISABLE; 6563 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 6564 } 6565 6566 if (final == val) 6567 return; 6568 6569 /* Always enable nonspread source */ 6570 val &= ~DREF_NONSPREAD_SOURCE_MASK; 6571 6572 if (has_ck505) 6573 val |= DREF_NONSPREAD_CK505_ENABLE; 6574 else 6575 val |= DREF_NONSPREAD_SOURCE_ENABLE; 6576 6577 if (has_panel) { 6578 val &= ~DREF_SSC_SOURCE_MASK; 6579 val |= DREF_SSC_SOURCE_ENABLE; 6580 6581 /* SSC must be turned on before enabling the CPU output */ 6582 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 6583 DRM_DEBUG_KMS("Using SSC on panel\n"); 6584 val |= DREF_SSC1_ENABLE; 6585 } else 6586 val &= ~DREF_SSC1_ENABLE; 6587 6588 /* Get SSC going before enabling the outputs */ 6589 I915_WRITE(PCH_DREF_CONTROL, val); 6590 POSTING_READ(PCH_DREF_CONTROL); 6591 udelay(200); 6592 6593 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 6594 6595 /* Enable CPU source on CPU attached eDP */ 6596 if (has_cpu_edp) { 6597 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 6598 DRM_DEBUG_KMS("Using SSC on eDP\n"); 6599 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 6600 } else 6601 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 6602 } else 6603 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 6604 6605 I915_WRITE(PCH_DREF_CONTROL, val); 6606 POSTING_READ(PCH_DREF_CONTROL); 6607 udelay(200); 6608 } else { 6609 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 6610 6611 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 6612 6613 /* Turn off CPU output */ 6614 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 6615 6616 I915_WRITE(PCH_DREF_CONTROL, val); 6617 POSTING_READ(PCH_DREF_CONTROL); 6618 udelay(200); 6619 6620 /* Turn off the SSC source */ 6621 val &= ~DREF_SSC_SOURCE_MASK; 6622 val |= DREF_SSC_SOURCE_DISABLE; 6623 6624 /* Turn off SSC1 */ 6625 val &= ~DREF_SSC1_ENABLE; 6626 6627 I915_WRITE(PCH_DREF_CONTROL, val); 6628 POSTING_READ(PCH_DREF_CONTROL); 6629 udelay(200); 6630 } 6631 6632 BUG_ON(val != final); 6633 } 6634 6635 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 6636 { 6637 uint32_t tmp; 6638 6639 tmp = I915_READ(SOUTH_CHICKEN2); 6640 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 6641 I915_WRITE(SOUTH_CHICKEN2, tmp); 6642 6643 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & 6644 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 6645 DRM_ERROR("FDI mPHY reset assert timeout\n"); 6646 6647 tmp = I915_READ(SOUTH_CHICKEN2); 6648 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 6649 I915_WRITE(SOUTH_CHICKEN2, tmp); 6650 6651 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & 6652 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 6653 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 6654 } 6655 6656 /* WaMPhyProgramming:hsw */ 6657 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 6658 { 6659 uint32_t tmp; 6660 6661 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 6662 tmp &= ~(0xFF << 24); 6663 tmp |= (0x12 << 24); 6664 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 6665 6666 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 6667 tmp |= (1 << 11); 6668 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 6669 6670 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 6671 tmp |= (1 << 11); 6672 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 6673 6674 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 6675 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 6676 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 6677 6678 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 6679 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 6680 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 6681 6682 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 6683 tmp &= ~(7 << 13); 6684 tmp |= (5 << 13); 6685 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 6686 6687 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 6688 tmp &= ~(7 << 13); 6689 tmp |= (5 << 13); 6690 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 6691 6692 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 6693 tmp &= ~0xFF; 6694 tmp |= 0x1C; 6695 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 6696 6697 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 6698 tmp &= ~0xFF; 6699 tmp |= 0x1C; 6700 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 6701 6702 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 6703 tmp &= ~(0xFF << 16); 6704 tmp |= (0x1C << 16); 6705 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 6706 6707 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 6708 tmp &= ~(0xFF << 16); 6709 tmp |= (0x1C << 16); 6710 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 6711 6712 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 6713 tmp |= (1 << 27); 6714 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 6715 6716 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 6717 tmp |= (1 << 27); 6718 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 6719 6720 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 6721 tmp &= ~(0xF << 28); 6722 tmp |= (4 << 28); 6723 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 6724 6725 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 6726 tmp &= ~(0xF << 28); 6727 tmp |= (4 << 28); 6728 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 6729 } 6730 6731 /* Implements 3 different sequences from BSpec chapter "Display iCLK 6732 * Programming" based on the parameters passed: 6733 * - Sequence to enable CLKOUT_DP 6734 * - Sequence to enable CLKOUT_DP without spread 6735 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 6736 */ 6737 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, 6738 bool with_fdi) 6739 { 6740 struct drm_i915_private *dev_priv = dev->dev_private; 6741 uint32_t reg, tmp; 6742 6743 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 6744 with_spread = true; 6745 if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE && 6746 with_fdi, "LP PCH doesn't have FDI\n")) 6747 with_fdi = false; 6748 6749 mutex_lock(&dev_priv->dpio_lock); 6750 6751 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 6752 tmp &= ~SBI_SSCCTL_DISABLE; 6753 tmp |= SBI_SSCCTL_PATHALT; 6754 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 6755 6756 udelay(24); 6757 6758 if (with_spread) { 6759 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 6760 tmp &= ~SBI_SSCCTL_PATHALT; 6761 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 6762 6763 if (with_fdi) { 6764 lpt_reset_fdi_mphy(dev_priv); 6765 lpt_program_fdi_mphy(dev_priv); 6766 } 6767 } 6768 6769 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? 6770 SBI_GEN0 : SBI_DBUFF0; 6771 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 6772 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 6773 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 6774 6775 mutex_unlock(&dev_priv->dpio_lock); 6776 } 6777 6778 /* Sequence to disable CLKOUT_DP */ 6779 static void lpt_disable_clkout_dp(struct drm_device *dev) 6780 { 6781 struct drm_i915_private *dev_priv = dev->dev_private; 6782 uint32_t reg, tmp; 6783 6784 mutex_lock(&dev_priv->dpio_lock); 6785 6786 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? 6787 SBI_GEN0 : SBI_DBUFF0; 6788 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 6789 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 6790 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 6791 6792 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 6793 if (!(tmp & SBI_SSCCTL_DISABLE)) { 6794 if (!(tmp & SBI_SSCCTL_PATHALT)) { 6795 tmp |= SBI_SSCCTL_PATHALT; 6796 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 6797 udelay(32); 6798 } 6799 tmp |= SBI_SSCCTL_DISABLE; 6800 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 6801 } 6802 6803 mutex_unlock(&dev_priv->dpio_lock); 6804 } 6805 6806 static void lpt_init_pch_refclk(struct drm_device *dev) 6807 { 6808 struct intel_encoder *encoder; 6809 bool has_vga = false; 6810 6811 for_each_intel_encoder(dev, encoder) { 6812 switch (encoder->type) { 6813 case INTEL_OUTPUT_ANALOG: 6814 has_vga = true; 6815 break; 6816 } 6817 } 6818 6819 if (has_vga) 6820 lpt_enable_clkout_dp(dev, true, true); 6821 else 6822 lpt_disable_clkout_dp(dev); 6823 } 6824 6825 /* 6826 * Initialize reference clocks when the driver loads 6827 */ 6828 void intel_init_pch_refclk(struct drm_device *dev) 6829 { 6830 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 6831 ironlake_init_pch_refclk(dev); 6832 else if (HAS_PCH_LPT(dev)) 6833 lpt_init_pch_refclk(dev); 6834 } 6835 6836 static int ironlake_get_refclk(struct drm_crtc *crtc) 6837 { 6838 struct drm_device *dev = crtc->dev; 6839 struct drm_i915_private *dev_priv = dev->dev_private; 6840 struct intel_encoder *encoder; 6841 int num_connectors = 0; 6842 bool is_lvds = false; 6843 6844 for_each_encoder_on_crtc(dev, crtc, encoder) { 6845 switch (encoder->type) { 6846 case INTEL_OUTPUT_LVDS: 6847 is_lvds = true; 6848 break; 6849 } 6850 num_connectors++; 6851 } 6852 6853 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 6854 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 6855 dev_priv->vbt.lvds_ssc_freq); 6856 return dev_priv->vbt.lvds_ssc_freq; 6857 } 6858 6859 return 120000; 6860 } 6861 6862 static void ironlake_set_pipeconf(struct drm_crtc *crtc) 6863 { 6864 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 6865 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6866 int pipe = intel_crtc->pipe; 6867 uint32_t val; 6868 6869 val = 0; 6870 6871 switch (intel_crtc->config.pipe_bpp) { 6872 case 18: 6873 val |= PIPECONF_6BPC; 6874 break; 6875 case 24: 6876 val |= PIPECONF_8BPC; 6877 break; 6878 case 30: 6879 val |= PIPECONF_10BPC; 6880 break; 6881 case 36: 6882 val |= PIPECONF_12BPC; 6883 break; 6884 default: 6885 /* Case prevented by intel_choose_pipe_bpp_dither. */ 6886 BUG(); 6887 } 6888 6889 if (intel_crtc->config.dither) 6890 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 6891 6892 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 6893 val |= PIPECONF_INTERLACED_ILK; 6894 else 6895 val |= PIPECONF_PROGRESSIVE; 6896 6897 if (intel_crtc->config.limited_color_range) 6898 val |= PIPECONF_COLOR_RANGE_SELECT; 6899 6900 I915_WRITE(PIPECONF(pipe), val); 6901 POSTING_READ(PIPECONF(pipe)); 6902 } 6903 6904 /* 6905 * Set up the pipe CSC unit. 6906 * 6907 * Currently only full range RGB to limited range RGB conversion 6908 * is supported, but eventually this should handle various 6909 * RGB<->YCbCr scenarios as well. 6910 */ 6911 static void intel_set_pipe_csc(struct drm_crtc *crtc) 6912 { 6913 struct drm_device *dev = crtc->dev; 6914 struct drm_i915_private *dev_priv = dev->dev_private; 6915 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6916 int pipe = intel_crtc->pipe; 6917 uint16_t coeff = 0x7800; /* 1.0 */ 6918 6919 /* 6920 * TODO: Check what kind of values actually come out of the pipe 6921 * with these coeff/postoff values and adjust to get the best 6922 * accuracy. Perhaps we even need to take the bpc value into 6923 * consideration. 6924 */ 6925 6926 if (intel_crtc->config.limited_color_range) 6927 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ 6928 6929 /* 6930 * GY/GU and RY/RU should be the other way around according 6931 * to BSpec, but reality doesn't agree. Just set them up in 6932 * a way that results in the correct picture. 6933 */ 6934 I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); 6935 I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); 6936 6937 I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); 6938 I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); 6939 6940 I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); 6941 I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); 6942 6943 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); 6944 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); 6945 I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); 6946 6947 if (INTEL_INFO(dev)->gen > 6) { 6948 uint16_t postoff = 0; 6949 6950 if (intel_crtc->config.limited_color_range) 6951 postoff = (16 * (1 << 12) / 255) & 0x1fff; 6952 6953 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 6954 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 6955 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); 6956 6957 I915_WRITE(PIPE_CSC_MODE(pipe), 0); 6958 } else { 6959 uint32_t mode = CSC_MODE_YUV_TO_RGB; 6960 6961 if (intel_crtc->config.limited_color_range) 6962 mode |= CSC_BLACK_SCREEN_OFFSET; 6963 6964 I915_WRITE(PIPE_CSC_MODE(pipe), mode); 6965 } 6966 } 6967 6968 static void haswell_set_pipeconf(struct drm_crtc *crtc) 6969 { 6970 struct drm_device *dev = crtc->dev; 6971 struct drm_i915_private *dev_priv = dev->dev_private; 6972 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6973 enum i915_pipe pipe = intel_crtc->pipe; 6974 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 6975 uint32_t val; 6976 6977 val = 0; 6978 6979 if (IS_HASWELL(dev) && intel_crtc->config.dither) 6980 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 6981 6982 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 6983 val |= PIPECONF_INTERLACED_ILK; 6984 else 6985 val |= PIPECONF_PROGRESSIVE; 6986 6987 I915_WRITE(PIPECONF(cpu_transcoder), val); 6988 POSTING_READ(PIPECONF(cpu_transcoder)); 6989 6990 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); 6991 POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); 6992 6993 if (IS_BROADWELL(dev)) { 6994 val = 0; 6995 6996 switch (intel_crtc->config.pipe_bpp) { 6997 case 18: 6998 val |= PIPEMISC_DITHER_6_BPC; 6999 break; 7000 case 24: 7001 val |= PIPEMISC_DITHER_8_BPC; 7002 break; 7003 case 30: 7004 val |= PIPEMISC_DITHER_10_BPC; 7005 break; 7006 case 36: 7007 val |= PIPEMISC_DITHER_12_BPC; 7008 break; 7009 default: 7010 /* Case prevented by pipe_config_set_bpp. */ 7011 BUG(); 7012 } 7013 7014 if (intel_crtc->config.dither) 7015 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 7016 7017 I915_WRITE(PIPEMISC(pipe), val); 7018 } 7019 } 7020 7021 static bool ironlake_compute_clocks(struct drm_crtc *crtc, 7022 intel_clock_t *clock, 7023 bool *has_reduced_clock, 7024 intel_clock_t *reduced_clock) 7025 { 7026 struct drm_device *dev = crtc->dev; 7027 struct drm_i915_private *dev_priv = dev->dev_private; 7028 struct intel_encoder *intel_encoder; 7029 int refclk; 7030 const intel_limit_t *limit; 7031 bool ret, is_lvds = false; 7032 7033 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 7034 switch (intel_encoder->type) { 7035 case INTEL_OUTPUT_LVDS: 7036 is_lvds = true; 7037 break; 7038 } 7039 } 7040 7041 refclk = ironlake_get_refclk(crtc); 7042 7043 /* 7044 * Returns a set of divisors for the desired target clock with the given 7045 * refclk, or FALSE. The returned values represent the clock equation: 7046 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 7047 */ 7048 limit = intel_limit(crtc, refclk); 7049 ret = dev_priv->display.find_dpll(limit, crtc, 7050 to_intel_crtc(crtc)->config.port_clock, 7051 refclk, NULL, clock); 7052 if (!ret) 7053 return false; 7054 7055 if (is_lvds && dev_priv->lvds_downclock_avail) { 7056 /* 7057 * Ensure we match the reduced clock's P to the target clock. 7058 * If the clocks don't match, we can't switch the display clock 7059 * by using the FP0/FP1. In such case we will disable the LVDS 7060 * downclock feature. 7061 */ 7062 *has_reduced_clock = 7063 dev_priv->display.find_dpll(limit, crtc, 7064 dev_priv->lvds_downclock, 7065 refclk, clock, 7066 reduced_clock); 7067 } 7068 7069 return true; 7070 } 7071 7072 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 7073 { 7074 /* 7075 * Account for spread spectrum to avoid 7076 * oversubscribing the link. Max center spread 7077 * is 2.5%; use 5% for safety's sake. 7078 */ 7079 u32 bps = target_clock * bpp * 21 / 20; 7080 return DIV_ROUND_UP(bps, link_bw * 8); 7081 } 7082 7083 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 7084 { 7085 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 7086 } 7087 7088 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, 7089 u32 *fp, 7090 intel_clock_t *reduced_clock, u32 *fp2) 7091 { 7092 struct drm_crtc *crtc = &intel_crtc->base; 7093 struct drm_device *dev = crtc->dev; 7094 struct drm_i915_private *dev_priv = dev->dev_private; 7095 struct intel_encoder *intel_encoder; 7096 uint32_t dpll; 7097 int factor, num_connectors = 0; 7098 bool is_lvds = false, is_sdvo = false; 7099 7100 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 7101 switch (intel_encoder->type) { 7102 case INTEL_OUTPUT_LVDS: 7103 is_lvds = true; 7104 break; 7105 case INTEL_OUTPUT_SDVO: 7106 case INTEL_OUTPUT_HDMI: 7107 is_sdvo = true; 7108 break; 7109 } 7110 7111 num_connectors++; 7112 } 7113 7114 /* Enable autotuning of the PLL clock (if permissible) */ 7115 factor = 21; 7116 if (is_lvds) { 7117 if ((intel_panel_use_ssc(dev_priv) && 7118 dev_priv->vbt.lvds_ssc_freq == 100000) || 7119 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 7120 factor = 25; 7121 } else if (intel_crtc->config.sdvo_tv_clock) 7122 factor = 20; 7123 7124 if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor)) 7125 *fp |= FP_CB_TUNE; 7126 7127 if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) 7128 *fp2 |= FP_CB_TUNE; 7129 7130 dpll = 0; 7131 7132 if (is_lvds) 7133 dpll |= DPLLB_MODE_LVDS; 7134 else 7135 dpll |= DPLLB_MODE_DAC_SERIAL; 7136 7137 dpll |= (intel_crtc->config.pixel_multiplier - 1) 7138 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 7139 7140 if (is_sdvo) 7141 dpll |= DPLL_SDVO_HIGH_SPEED; 7142 if (intel_crtc->config.has_dp_encoder) 7143 dpll |= DPLL_SDVO_HIGH_SPEED; 7144 7145 /* compute bitmask from p1 value */ 7146 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7147 /* also FPA1 */ 7148 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 7149 7150 switch (intel_crtc->config.dpll.p2) { 7151 case 5: 7152 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 7153 break; 7154 case 7: 7155 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 7156 break; 7157 case 10: 7158 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 7159 break; 7160 case 14: 7161 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 7162 break; 7163 } 7164 7165 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 7166 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7167 else 7168 dpll |= PLL_REF_INPUT_DREFCLK; 7169 7170 return dpll | DPLL_VCO_ENABLE; 7171 } 7172 7173 static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 7174 int x, int y, 7175 struct drm_framebuffer *fb) 7176 { 7177 struct drm_device *dev = crtc->dev; 7178 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7179 int num_connectors = 0; 7180 intel_clock_t clock, reduced_clock; 7181 u32 dpll = 0, fp = 0, fp2 = 0; 7182 bool ok, has_reduced_clock = false; 7183 bool is_lvds = false; 7184 struct intel_encoder *encoder; 7185 struct intel_shared_dpll *pll; 7186 7187 for_each_encoder_on_crtc(dev, crtc, encoder) { 7188 switch (encoder->type) { 7189 case INTEL_OUTPUT_LVDS: 7190 is_lvds = true; 7191 break; 7192 } 7193 7194 num_connectors++; 7195 } 7196 7197 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 7198 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 7199 7200 ok = ironlake_compute_clocks(crtc, &clock, 7201 &has_reduced_clock, &reduced_clock); 7202 if (!ok && !intel_crtc->config.clock_set) { 7203 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7204 return -EINVAL; 7205 } 7206 /* Compat-code for transition, will disappear. */ 7207 if (!intel_crtc->config.clock_set) { 7208 intel_crtc->config.dpll.n = clock.n; 7209 intel_crtc->config.dpll.m1 = clock.m1; 7210 intel_crtc->config.dpll.m2 = clock.m2; 7211 intel_crtc->config.dpll.p1 = clock.p1; 7212 intel_crtc->config.dpll.p2 = clock.p2; 7213 } 7214 7215 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 7216 if (intel_crtc->config.has_pch_encoder) { 7217 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); 7218 if (has_reduced_clock) 7219 fp2 = i9xx_dpll_compute_fp(&reduced_clock); 7220 7221 dpll = ironlake_compute_dpll(intel_crtc, 7222 &fp, &reduced_clock, 7223 has_reduced_clock ? &fp2 : NULL); 7224 7225 intel_crtc->config.dpll_hw_state.dpll = dpll; 7226 intel_crtc->config.dpll_hw_state.fp0 = fp; 7227 if (has_reduced_clock) 7228 intel_crtc->config.dpll_hw_state.fp1 = fp2; 7229 else 7230 intel_crtc->config.dpll_hw_state.fp1 = fp; 7231 7232 pll = intel_get_shared_dpll(intel_crtc); 7233 if (pll == NULL) { 7234 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 7235 pipe_name(intel_crtc->pipe)); 7236 return -EINVAL; 7237 } 7238 } else 7239 intel_put_shared_dpll(intel_crtc); 7240 7241 if (is_lvds && has_reduced_clock && i915.powersave) 7242 intel_crtc->lowfreq_avail = true; 7243 else 7244 intel_crtc->lowfreq_avail = false; 7245 7246 return 0; 7247 } 7248 7249 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 7250 struct intel_link_m_n *m_n) 7251 { 7252 struct drm_device *dev = crtc->base.dev; 7253 struct drm_i915_private *dev_priv = dev->dev_private; 7254 enum i915_pipe pipe = crtc->pipe; 7255 7256 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 7257 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 7258 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 7259 & ~TU_SIZE_MASK; 7260 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 7261 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 7262 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7263 } 7264 7265 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 7266 enum transcoder transcoder, 7267 struct intel_link_m_n *m_n, 7268 struct intel_link_m_n *m2_n2) 7269 { 7270 struct drm_device *dev = crtc->base.dev; 7271 struct drm_i915_private *dev_priv = dev->dev_private; 7272 enum i915_pipe pipe = crtc->pipe; 7273 7274 if (INTEL_INFO(dev)->gen >= 5) { 7275 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 7276 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 7277 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 7278 & ~TU_SIZE_MASK; 7279 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 7280 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 7281 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7282 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 7283 * gen < 8) and if DRRS is supported (to make sure the 7284 * registers are not unnecessarily read). 7285 */ 7286 if (m2_n2 && INTEL_INFO(dev)->gen < 8 && 7287 crtc->config.has_drrs) { 7288 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 7289 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 7290 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 7291 & ~TU_SIZE_MASK; 7292 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 7293 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 7294 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7295 } 7296 } else { 7297 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 7298 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 7299 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 7300 & ~TU_SIZE_MASK; 7301 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 7302 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 7303 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7304 } 7305 } 7306 7307 void intel_dp_get_m_n(struct intel_crtc *crtc, 7308 struct intel_crtc_config *pipe_config) 7309 { 7310 if (crtc->config.has_pch_encoder) 7311 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 7312 else 7313 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 7314 &pipe_config->dp_m_n, 7315 &pipe_config->dp_m2_n2); 7316 } 7317 7318 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 7319 struct intel_crtc_config *pipe_config) 7320 { 7321 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 7322 &pipe_config->fdi_m_n, NULL); 7323 } 7324 7325 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 7326 struct intel_crtc_config *pipe_config) 7327 { 7328 struct drm_device *dev = crtc->base.dev; 7329 struct drm_i915_private *dev_priv = dev->dev_private; 7330 uint32_t tmp; 7331 7332 tmp = I915_READ(PF_CTL(crtc->pipe)); 7333 7334 if (tmp & PF_ENABLE) { 7335 pipe_config->pch_pfit.enabled = true; 7336 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 7337 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 7338 7339 /* We currently do not free assignements of panel fitters on 7340 * ivb/hsw (since we don't use the higher upscaling modes which 7341 * differentiates them) so just WARN about this case for now. */ 7342 if (IS_GEN7(dev)) { 7343 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 7344 PF_PIPE_SEL_IVB(crtc->pipe)); 7345 } 7346 } 7347 } 7348 7349 static void ironlake_get_plane_config(struct intel_crtc *crtc, 7350 struct intel_plane_config *plane_config) 7351 { 7352 struct drm_device *dev = crtc->base.dev; 7353 struct drm_i915_private *dev_priv = dev->dev_private; 7354 u32 val, base, offset; 7355 int pipe = crtc->pipe, plane = crtc->plane; 7356 int fourcc, pixel_format; 7357 int aligned_height; 7358 7359 crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL); 7360 if (!crtc->base.primary->fb) { 7361 DRM_DEBUG_KMS("failed to alloc fb\n"); 7362 return; 7363 } 7364 7365 val = I915_READ(DSPCNTR(plane)); 7366 7367 if (INTEL_INFO(dev)->gen >= 4) 7368 if (val & DISPPLANE_TILED) 7369 plane_config->tiled = true; 7370 7371 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 7372 fourcc = intel_format_to_fourcc(pixel_format); 7373 crtc->base.primary->fb->pixel_format = fourcc; 7374 crtc->base.primary->fb->bits_per_pixel = 7375 drm_format_plane_cpp(fourcc, 0) * 8; 7376 7377 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 7378 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 7379 offset = I915_READ(DSPOFFSET(plane)); 7380 } else { 7381 if (plane_config->tiled) 7382 offset = I915_READ(DSPTILEOFF(plane)); 7383 else 7384 offset = I915_READ(DSPLINOFF(plane)); 7385 } 7386 plane_config->base = base; 7387 7388 val = I915_READ(PIPESRC(pipe)); 7389 crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1; 7390 crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; 7391 7392 val = I915_READ(DSPSTRIDE(pipe)); 7393 crtc->base.primary->fb->pitches[0] = val & 0xffffffc0; 7394 7395 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, 7396 plane_config->tiled); 7397 7398 plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] * 7399 aligned_height); 7400 7401 DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7402 pipe, plane, crtc->base.primary->fb->width, 7403 crtc->base.primary->fb->height, 7404 crtc->base.primary->fb->bits_per_pixel, base, 7405 crtc->base.primary->fb->pitches[0], 7406 plane_config->size); 7407 } 7408 7409 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 7410 struct intel_crtc_config *pipe_config) 7411 { 7412 struct drm_device *dev = crtc->base.dev; 7413 struct drm_i915_private *dev_priv = dev->dev_private; 7414 uint32_t tmp; 7415 7416 if (!intel_display_power_enabled(dev_priv, 7417 POWER_DOMAIN_PIPE(crtc->pipe))) 7418 return false; 7419 7420 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7421 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 7422 7423 tmp = I915_READ(PIPECONF(crtc->pipe)); 7424 if (!(tmp & PIPECONF_ENABLE)) 7425 return false; 7426 7427 switch (tmp & PIPECONF_BPC_MASK) { 7428 case PIPECONF_6BPC: 7429 pipe_config->pipe_bpp = 18; 7430 break; 7431 case PIPECONF_8BPC: 7432 pipe_config->pipe_bpp = 24; 7433 break; 7434 case PIPECONF_10BPC: 7435 pipe_config->pipe_bpp = 30; 7436 break; 7437 case PIPECONF_12BPC: 7438 pipe_config->pipe_bpp = 36; 7439 break; 7440 default: 7441 break; 7442 } 7443 7444 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 7445 pipe_config->limited_color_range = true; 7446 7447 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 7448 struct intel_shared_dpll *pll; 7449 7450 pipe_config->has_pch_encoder = true; 7451 7452 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 7453 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 7454 FDI_DP_PORT_WIDTH_SHIFT) + 1; 7455 7456 ironlake_get_fdi_m_n_config(crtc, pipe_config); 7457 7458 if (HAS_PCH_IBX(dev_priv->dev)) { 7459 pipe_config->shared_dpll = 7460 (enum intel_dpll_id) crtc->pipe; 7461 } else { 7462 tmp = I915_READ(PCH_DPLL_SEL); 7463 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 7464 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B; 7465 else 7466 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A; 7467 } 7468 7469 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 7470 7471 WARN_ON(!pll->get_hw_state(dev_priv, pll, 7472 &pipe_config->dpll_hw_state)); 7473 7474 tmp = pipe_config->dpll_hw_state.dpll; 7475 pipe_config->pixel_multiplier = 7476 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 7477 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 7478 7479 ironlake_pch_clock_get(crtc, pipe_config); 7480 } else { 7481 pipe_config->pixel_multiplier = 1; 7482 } 7483 7484 intel_get_pipe_timings(crtc, pipe_config); 7485 7486 ironlake_get_pfit_config(crtc, pipe_config); 7487 7488 return true; 7489 } 7490 7491 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 7492 { 7493 struct drm_device *dev = dev_priv->dev; 7494 struct intel_crtc *crtc; 7495 7496 for_each_intel_crtc(dev, crtc) 7497 WARN(crtc->active, "CRTC for pipe %c enabled\n", 7498 pipe_name(crtc->pipe)); 7499 7500 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 7501 WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 7502 WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 7503 WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 7504 WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 7505 WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 7506 "CPU PWM1 enabled\n"); 7507 if (IS_HASWELL(dev)) 7508 WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 7509 "CPU PWM2 enabled\n"); 7510 WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 7511 "PCH PWM1 enabled\n"); 7512 WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 7513 "Utility pin enabled\n"); 7514 WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 7515 7516 /* 7517 * In theory we can still leave IRQs enabled, as long as only the HPD 7518 * interrupts remain enabled. We used to check for that, but since it's 7519 * gen-specific and since we only disable LCPLL after we fully disable 7520 * the interrupts, the check below should be enough. 7521 */ 7522 WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 7523 } 7524 7525 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 7526 { 7527 struct drm_device *dev = dev_priv->dev; 7528 7529 if (IS_HASWELL(dev)) 7530 return I915_READ(D_COMP_HSW); 7531 else 7532 return I915_READ(D_COMP_BDW); 7533 } 7534 7535 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 7536 { 7537 struct drm_device *dev = dev_priv->dev; 7538 7539 if (IS_HASWELL(dev)) { 7540 mutex_lock(&dev_priv->rps.hw_lock); 7541 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 7542 val)) 7543 DRM_ERROR("Failed to write to D_COMP\n"); 7544 mutex_unlock(&dev_priv->rps.hw_lock); 7545 } else { 7546 I915_WRITE(D_COMP_BDW, val); 7547 POSTING_READ(D_COMP_BDW); 7548 } 7549 } 7550 7551 /* 7552 * This function implements pieces of two sequences from BSpec: 7553 * - Sequence for display software to disable LCPLL 7554 * - Sequence for display software to allow package C8+ 7555 * The steps implemented here are just the steps that actually touch the LCPLL 7556 * register. Callers should take care of disabling all the display engine 7557 * functions, doing the mode unset, fixing interrupts, etc. 7558 */ 7559 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 7560 bool switch_to_fclk, bool allow_power_down) 7561 { 7562 uint32_t val; 7563 7564 assert_can_disable_lcpll(dev_priv); 7565 7566 val = I915_READ(LCPLL_CTL); 7567 7568 if (switch_to_fclk) { 7569 val |= LCPLL_CD_SOURCE_FCLK; 7570 I915_WRITE(LCPLL_CTL, val); 7571 7572 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & 7573 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 7574 DRM_ERROR("Switching to FCLK failed\n"); 7575 7576 val = I915_READ(LCPLL_CTL); 7577 } 7578 7579 val |= LCPLL_PLL_DISABLE; 7580 I915_WRITE(LCPLL_CTL, val); 7581 POSTING_READ(LCPLL_CTL); 7582 7583 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) 7584 DRM_ERROR("LCPLL still locked\n"); 7585 7586 val = hsw_read_dcomp(dev_priv); 7587 val |= D_COMP_COMP_DISABLE; 7588 hsw_write_dcomp(dev_priv, val); 7589 ndelay(100); 7590 7591 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 7592 1)) 7593 DRM_ERROR("D_COMP RCOMP still in progress\n"); 7594 7595 if (allow_power_down) { 7596 val = I915_READ(LCPLL_CTL); 7597 val |= LCPLL_POWER_DOWN_ALLOW; 7598 I915_WRITE(LCPLL_CTL, val); 7599 POSTING_READ(LCPLL_CTL); 7600 } 7601 } 7602 7603 /* 7604 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 7605 * source. 7606 */ 7607 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 7608 { 7609 uint32_t val; 7610 7611 val = I915_READ(LCPLL_CTL); 7612 7613 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 7614 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 7615 return; 7616 7617 /* 7618 * Make sure we're not on PC8 state before disabling PC8, otherwise 7619 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 7620 * 7621 * The other problem is that hsw_restore_lcpll() is called as part of 7622 * the runtime PM resume sequence, so we can't just call 7623 * gen6_gt_force_wake_get() because that function calls 7624 * intel_runtime_pm_get(), and we can't change the runtime PM refcount 7625 * while we are on the resume sequence. So to solve this problem we have 7626 * to call special forcewake code that doesn't touch runtime PM and 7627 * doesn't enable the forcewake delayed work. 7628 */ 7629 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 7630 if (dev_priv->uncore.forcewake_count++ == 0) 7631 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); 7632 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 7633 7634 if (val & LCPLL_POWER_DOWN_ALLOW) { 7635 val &= ~LCPLL_POWER_DOWN_ALLOW; 7636 I915_WRITE(LCPLL_CTL, val); 7637 POSTING_READ(LCPLL_CTL); 7638 } 7639 7640 val = hsw_read_dcomp(dev_priv); 7641 val |= D_COMP_COMP_FORCE; 7642 val &= ~D_COMP_COMP_DISABLE; 7643 hsw_write_dcomp(dev_priv, val); 7644 7645 val = I915_READ(LCPLL_CTL); 7646 val &= ~LCPLL_PLL_DISABLE; 7647 I915_WRITE(LCPLL_CTL, val); 7648 7649 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) 7650 DRM_ERROR("LCPLL not locked yet\n"); 7651 7652 if (val & LCPLL_CD_SOURCE_FCLK) { 7653 val = I915_READ(LCPLL_CTL); 7654 val &= ~LCPLL_CD_SOURCE_FCLK; 7655 I915_WRITE(LCPLL_CTL, val); 7656 7657 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & 7658 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 7659 DRM_ERROR("Switching back to LCPLL failed\n"); 7660 } 7661 7662 /* See the big comment above. */ 7663 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 7664 if (--dev_priv->uncore.forcewake_count == 0) 7665 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); 7666 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 7667 } 7668 7669 /* 7670 * Package states C8 and deeper are really deep PC states that can only be 7671 * reached when all the devices on the system allow it, so even if the graphics 7672 * device allows PC8+, it doesn't mean the system will actually get to these 7673 * states. Our driver only allows PC8+ when going into runtime PM. 7674 * 7675 * The requirements for PC8+ are that all the outputs are disabled, the power 7676 * well is disabled and most interrupts are disabled, and these are also 7677 * requirements for runtime PM. When these conditions are met, we manually do 7678 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 7679 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 7680 * hang the machine. 7681 * 7682 * When we really reach PC8 or deeper states (not just when we allow it) we lose 7683 * the state of some registers, so when we come back from PC8+ we need to 7684 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 7685 * need to take care of the registers kept by RC6. Notice that this happens even 7686 * if we don't put the device in PCI D3 state (which is what currently happens 7687 * because of the runtime PM support). 7688 * 7689 * For more, read "Display Sequences for Package C8" on the hardware 7690 * documentation. 7691 */ 7692 void hsw_enable_pc8(struct drm_i915_private *dev_priv) 7693 { 7694 struct drm_device *dev = dev_priv->dev; 7695 uint32_t val; 7696 7697 DRM_DEBUG_KMS("Enabling package C8+\n"); 7698 7699 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 7700 val = I915_READ(SOUTH_DSPCLK_GATE_D); 7701 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 7702 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 7703 } 7704 7705 lpt_disable_clkout_dp(dev); 7706 hsw_disable_lcpll(dev_priv, true, true); 7707 } 7708 7709 void hsw_disable_pc8(struct drm_i915_private *dev_priv) 7710 { 7711 struct drm_device *dev = dev_priv->dev; 7712 uint32_t val; 7713 7714 DRM_DEBUG_KMS("Disabling package C8+\n"); 7715 7716 hsw_restore_lcpll(dev_priv); 7717 lpt_init_pch_refclk(dev); 7718 7719 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 7720 val = I915_READ(SOUTH_DSPCLK_GATE_D); 7721 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 7722 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 7723 } 7724 7725 intel_prepare_ddi(dev); 7726 } 7727 7728 static void snb_modeset_global_resources(struct drm_device *dev) 7729 { 7730 modeset_update_crtc_power_domains(dev); 7731 } 7732 7733 static void haswell_modeset_global_resources(struct drm_device *dev) 7734 { 7735 modeset_update_crtc_power_domains(dev); 7736 } 7737 7738 static int haswell_crtc_mode_set(struct drm_crtc *crtc, 7739 int x, int y, 7740 struct drm_framebuffer *fb) 7741 { 7742 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7743 7744 if (!intel_ddi_pll_select(intel_crtc)) 7745 return -EINVAL; 7746 7747 intel_crtc->lowfreq_avail = false; 7748 7749 return 0; 7750 } 7751 7752 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 7753 enum port port, 7754 struct intel_crtc_config *pipe_config) 7755 { 7756 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 7757 7758 switch (pipe_config->ddi_pll_sel) { 7759 case PORT_CLK_SEL_WRPLL1: 7760 pipe_config->shared_dpll = DPLL_ID_WRPLL1; 7761 break; 7762 case PORT_CLK_SEL_WRPLL2: 7763 pipe_config->shared_dpll = DPLL_ID_WRPLL2; 7764 break; 7765 } 7766 } 7767 7768 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 7769 struct intel_crtc_config *pipe_config) 7770 { 7771 struct drm_device *dev = crtc->base.dev; 7772 struct drm_i915_private *dev_priv = dev->dev_private; 7773 struct intel_shared_dpll *pll; 7774 enum port port; 7775 uint32_t tmp; 7776 7777 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 7778 7779 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 7780 7781 haswell_get_ddi_pll(dev_priv, port, pipe_config); 7782 7783 if (pipe_config->shared_dpll >= 0) { 7784 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 7785 7786 WARN_ON(!pll->get_hw_state(dev_priv, pll, 7787 &pipe_config->dpll_hw_state)); 7788 } 7789 7790 /* 7791 * Haswell has only FDI/PCH transcoder A. It is which is connected to 7792 * DDI E. So just check whether this pipe is wired to DDI E and whether 7793 * the PCH transcoder is on. 7794 */ 7795 if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 7796 pipe_config->has_pch_encoder = true; 7797 7798 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 7799 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 7800 FDI_DP_PORT_WIDTH_SHIFT) + 1; 7801 7802 ironlake_get_fdi_m_n_config(crtc, pipe_config); 7803 } 7804 } 7805 7806 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 7807 struct intel_crtc_config *pipe_config) 7808 { 7809 struct drm_device *dev = crtc->base.dev; 7810 struct drm_i915_private *dev_priv = dev->dev_private; 7811 enum intel_display_power_domain pfit_domain; 7812 uint32_t tmp; 7813 7814 if (!intel_display_power_enabled(dev_priv, 7815 POWER_DOMAIN_PIPE(crtc->pipe))) 7816 return false; 7817 7818 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7819 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 7820 7821 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 7822 if (tmp & TRANS_DDI_FUNC_ENABLE) { 7823 enum i915_pipe trans_edp_pipe; 7824 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 7825 default: 7826 WARN(1, "unknown pipe linked to edp transcoder\n"); 7827 case TRANS_DDI_EDP_INPUT_A_ONOFF: 7828 case TRANS_DDI_EDP_INPUT_A_ON: 7829 trans_edp_pipe = PIPE_A; 7830 break; 7831 case TRANS_DDI_EDP_INPUT_B_ONOFF: 7832 trans_edp_pipe = PIPE_B; 7833 break; 7834 case TRANS_DDI_EDP_INPUT_C_ONOFF: 7835 trans_edp_pipe = PIPE_C; 7836 break; 7837 } 7838 7839 if (trans_edp_pipe == crtc->pipe) 7840 pipe_config->cpu_transcoder = TRANSCODER_EDP; 7841 } 7842 7843 if (!intel_display_power_enabled(dev_priv, 7844 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 7845 return false; 7846 7847 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 7848 if (!(tmp & PIPECONF_ENABLE)) 7849 return false; 7850 7851 haswell_get_ddi_port_state(crtc, pipe_config); 7852 7853 intel_get_pipe_timings(crtc, pipe_config); 7854 7855 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 7856 if (intel_display_power_enabled(dev_priv, pfit_domain)) 7857 ironlake_get_pfit_config(crtc, pipe_config); 7858 7859 if (IS_HASWELL(dev)) 7860 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 7861 (I915_READ(IPS_CTL) & IPS_ENABLE); 7862 7863 if (pipe_config->cpu_transcoder != TRANSCODER_EDP) { 7864 pipe_config->pixel_multiplier = 7865 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 7866 } else { 7867 pipe_config->pixel_multiplier = 1; 7868 } 7869 7870 return true; 7871 } 7872 7873 static struct { 7874 int clock; 7875 u32 config; 7876 } hdmi_audio_clock[] = { 7877 { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 }, 7878 { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */ 7879 { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 }, 7880 { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 }, 7881 { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 }, 7882 { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 }, 7883 { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 }, 7884 { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 }, 7885 { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 }, 7886 { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 }, 7887 }; 7888 7889 /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ 7890 static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode) 7891 { 7892 int i; 7893 7894 for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) { 7895 if (mode->clock == hdmi_audio_clock[i].clock) 7896 break; 7897 } 7898 7899 if (i == ARRAY_SIZE(hdmi_audio_clock)) { 7900 DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock); 7901 i = 1; 7902 } 7903 7904 DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n", 7905 hdmi_audio_clock[i].clock, 7906 hdmi_audio_clock[i].config); 7907 7908 return hdmi_audio_clock[i].config; 7909 } 7910 7911 static bool intel_eld_uptodate(struct drm_connector *connector, 7912 int reg_eldv, uint32_t bits_eldv, 7913 int reg_elda, uint32_t bits_elda, 7914 int reg_edid) 7915 { 7916 struct drm_i915_private *dev_priv = connector->dev->dev_private; 7917 uint8_t *eld = connector->eld; 7918 uint32_t i; 7919 7920 i = I915_READ(reg_eldv); 7921 i &= bits_eldv; 7922 7923 if (!eld[0]) 7924 return !i; 7925 7926 if (!i) 7927 return false; 7928 7929 i = I915_READ(reg_elda); 7930 i &= ~bits_elda; 7931 I915_WRITE(reg_elda, i); 7932 7933 for (i = 0; i < eld[2]; i++) 7934 if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) 7935 return false; 7936 7937 return true; 7938 } 7939 7940 static void g4x_write_eld(struct drm_connector *connector, 7941 struct drm_crtc *crtc, 7942 struct drm_display_mode *mode) 7943 { 7944 struct drm_i915_private *dev_priv = connector->dev->dev_private; 7945 uint8_t *eld = connector->eld; 7946 uint32_t eldv; 7947 uint32_t len; 7948 uint32_t i; 7949 7950 i = I915_READ(G4X_AUD_VID_DID); 7951 7952 if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) 7953 eldv = G4X_ELDV_DEVCL_DEVBLC; 7954 else 7955 eldv = G4X_ELDV_DEVCTG; 7956 7957 if (intel_eld_uptodate(connector, 7958 G4X_AUD_CNTL_ST, eldv, 7959 G4X_AUD_CNTL_ST, G4X_ELD_ADDR, 7960 G4X_HDMIW_HDMIEDID)) 7961 return; 7962 7963 i = I915_READ(G4X_AUD_CNTL_ST); 7964 i &= ~(eldv | G4X_ELD_ADDR); 7965 len = (i >> 9) & 0x1f; /* ELD buffer size */ 7966 I915_WRITE(G4X_AUD_CNTL_ST, i); 7967 7968 if (!eld[0]) 7969 return; 7970 7971 len = min_t(uint8_t, eld[2], len); 7972 DRM_DEBUG_DRIVER("ELD size %d\n", len); 7973 for (i = 0; i < len; i++) 7974 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); 7975 7976 i = I915_READ(G4X_AUD_CNTL_ST); 7977 i |= eldv; 7978 I915_WRITE(G4X_AUD_CNTL_ST, i); 7979 } 7980 7981 static void haswell_write_eld(struct drm_connector *connector, 7982 struct drm_crtc *crtc, 7983 struct drm_display_mode *mode) 7984 { 7985 struct drm_i915_private *dev_priv = connector->dev->dev_private; 7986 uint8_t *eld = connector->eld; 7987 uint32_t eldv; 7988 uint32_t i; 7989 int len; 7990 int pipe = to_intel_crtc(crtc)->pipe; 7991 int tmp; 7992 7993 int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe); 7994 int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe); 7995 int aud_config = HSW_AUD_CFG(pipe); 7996 int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD; 7997 7998 /* Audio output enable */ 7999 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n"); 8000 tmp = I915_READ(aud_cntrl_st2); 8001 tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4)); 8002 I915_WRITE(aud_cntrl_st2, tmp); 8003 POSTING_READ(aud_cntrl_st2); 8004 8005 assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe); 8006 8007 /* Set ELD valid state */ 8008 tmp = I915_READ(aud_cntrl_st2); 8009 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp); 8010 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); 8011 I915_WRITE(aud_cntrl_st2, tmp); 8012 tmp = I915_READ(aud_cntrl_st2); 8013 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp); 8014 8015 /* Enable HDMI mode */ 8016 tmp = I915_READ(aud_config); 8017 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp); 8018 /* clear N_programing_enable and N_value_index */ 8019 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); 8020 I915_WRITE(aud_config, tmp); 8021 8022 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); 8023 8024 eldv = AUDIO_ELD_VALID_A << (pipe * 4); 8025 8026 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 8027 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 8028 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 8029 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 8030 } else { 8031 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode)); 8032 } 8033 8034 if (intel_eld_uptodate(connector, 8035 aud_cntrl_st2, eldv, 8036 aud_cntl_st, IBX_ELD_ADDRESS, 8037 hdmiw_hdmiedid)) 8038 return; 8039 8040 i = I915_READ(aud_cntrl_st2); 8041 i &= ~eldv; 8042 I915_WRITE(aud_cntrl_st2, i); 8043 8044 if (!eld[0]) 8045 return; 8046 8047 i = I915_READ(aud_cntl_st); 8048 i &= ~IBX_ELD_ADDRESS; 8049 I915_WRITE(aud_cntl_st, i); 8050 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ 8051 DRM_DEBUG_DRIVER("port num:%d\n", i); 8052 8053 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ 8054 DRM_DEBUG_DRIVER("ELD size %d\n", len); 8055 for (i = 0; i < len; i++) 8056 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); 8057 8058 i = I915_READ(aud_cntrl_st2); 8059 i |= eldv; 8060 I915_WRITE(aud_cntrl_st2, i); 8061 8062 } 8063 8064 static void ironlake_write_eld(struct drm_connector *connector, 8065 struct drm_crtc *crtc, 8066 struct drm_display_mode *mode) 8067 { 8068 struct drm_i915_private *dev_priv = connector->dev->dev_private; 8069 uint8_t *eld = connector->eld; 8070 uint32_t eldv; 8071 uint32_t i; 8072 int len; 8073 int hdmiw_hdmiedid; 8074 int aud_config; 8075 int aud_cntl_st; 8076 int aud_cntrl_st2; 8077 int pipe = to_intel_crtc(crtc)->pipe; 8078 8079 if (HAS_PCH_IBX(connector->dev)) { 8080 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe); 8081 aud_config = IBX_AUD_CFG(pipe); 8082 aud_cntl_st = IBX_AUD_CNTL_ST(pipe); 8083 aud_cntrl_st2 = IBX_AUD_CNTL_ST2; 8084 } else if (IS_VALLEYVIEW(connector->dev)) { 8085 hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe); 8086 aud_config = VLV_AUD_CFG(pipe); 8087 aud_cntl_st = VLV_AUD_CNTL_ST(pipe); 8088 aud_cntrl_st2 = VLV_AUD_CNTL_ST2; 8089 } else { 8090 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); 8091 aud_config = CPT_AUD_CFG(pipe); 8092 aud_cntl_st = CPT_AUD_CNTL_ST(pipe); 8093 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; 8094 } 8095 8096 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); 8097 8098 if (IS_VALLEYVIEW(connector->dev)) { 8099 struct intel_encoder *intel_encoder; 8100 struct intel_digital_port *intel_dig_port; 8101 8102 intel_encoder = intel_attached_encoder(connector); 8103 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 8104 i = intel_dig_port->port; 8105 } else { 8106 i = I915_READ(aud_cntl_st); 8107 i = (i >> 29) & DIP_PORT_SEL_MASK; 8108 /* DIP_Port_Select, 0x1 = PortB */ 8109 } 8110 8111 if (!i) { 8112 DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); 8113 /* operate blindly on all ports */ 8114 eldv = IBX_ELD_VALIDB; 8115 eldv |= IBX_ELD_VALIDB << 4; 8116 eldv |= IBX_ELD_VALIDB << 8; 8117 } else { 8118 DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i)); 8119 eldv = IBX_ELD_VALIDB << ((i - 1) * 4); 8120 } 8121 8122 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 8123 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 8124 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 8125 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 8126 } else { 8127 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode)); 8128 } 8129 8130 if (intel_eld_uptodate(connector, 8131 aud_cntrl_st2, eldv, 8132 aud_cntl_st, IBX_ELD_ADDRESS, 8133 hdmiw_hdmiedid)) 8134 return; 8135 8136 i = I915_READ(aud_cntrl_st2); 8137 i &= ~eldv; 8138 I915_WRITE(aud_cntrl_st2, i); 8139 8140 if (!eld[0]) 8141 return; 8142 8143 i = I915_READ(aud_cntl_st); 8144 i &= ~IBX_ELD_ADDRESS; 8145 I915_WRITE(aud_cntl_st, i); 8146 8147 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ 8148 DRM_DEBUG_DRIVER("ELD size %d\n", len); 8149 for (i = 0; i < len; i++) 8150 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); 8151 8152 i = I915_READ(aud_cntrl_st2); 8153 i |= eldv; 8154 I915_WRITE(aud_cntrl_st2, i); 8155 } 8156 8157 void intel_write_eld(struct drm_encoder *encoder, 8158 struct drm_display_mode *mode) 8159 { 8160 struct drm_crtc *crtc = encoder->crtc; 8161 struct drm_connector *connector; 8162 struct drm_device *dev = encoder->dev; 8163 struct drm_i915_private *dev_priv = dev->dev_private; 8164 8165 connector = drm_select_eld(encoder, mode); 8166 if (!connector) 8167 return; 8168 8169 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 8170 connector->base.id, 8171 connector->name, 8172 connector->encoder->base.id, 8173 connector->encoder->name); 8174 8175 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; 8176 8177 if (dev_priv->display.write_eld) 8178 dev_priv->display.write_eld(connector, crtc, mode); 8179 } 8180 8181 static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 8182 { 8183 struct drm_device *dev = crtc->dev; 8184 struct drm_i915_private *dev_priv = dev->dev_private; 8185 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8186 uint32_t cntl = 0, size = 0; 8187 8188 if (base) { 8189 unsigned int width = intel_crtc->cursor_width; 8190 unsigned int height = intel_crtc->cursor_height; 8191 unsigned int stride = roundup_pow_of_two(width) * 4; 8192 8193 switch (stride) { 8194 default: 8195 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n", 8196 width, stride); 8197 stride = 256; 8198 /* fallthrough */ 8199 case 256: 8200 case 512: 8201 case 1024: 8202 case 2048: 8203 break; 8204 } 8205 8206 cntl |= CURSOR_ENABLE | 8207 CURSOR_GAMMA_ENABLE | 8208 CURSOR_FORMAT_ARGB | 8209 CURSOR_STRIDE(stride); 8210 8211 size = (height << 12) | width; 8212 } 8213 8214 if (intel_crtc->cursor_cntl != 0 && 8215 (intel_crtc->cursor_base != base || 8216 intel_crtc->cursor_size != size || 8217 intel_crtc->cursor_cntl != cntl)) { 8218 /* On these chipsets we can only modify the base/size/stride 8219 * whilst the cursor is disabled. 8220 */ 8221 I915_WRITE(_CURACNTR, 0); 8222 POSTING_READ(_CURACNTR); 8223 intel_crtc->cursor_cntl = 0; 8224 } 8225 8226 if (intel_crtc->cursor_base != base) 8227 I915_WRITE(_CURABASE, base); 8228 8229 if (intel_crtc->cursor_size != size) { 8230 I915_WRITE(CURSIZE, size); 8231 intel_crtc->cursor_size = size; 8232 } 8233 8234 if (intel_crtc->cursor_cntl != cntl) { 8235 I915_WRITE(_CURACNTR, cntl); 8236 POSTING_READ(_CURACNTR); 8237 intel_crtc->cursor_cntl = cntl; 8238 } 8239 } 8240 8241 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 8242 { 8243 struct drm_device *dev = crtc->dev; 8244 struct drm_i915_private *dev_priv = dev->dev_private; 8245 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8246 int pipe = intel_crtc->pipe; 8247 uint32_t cntl; 8248 8249 cntl = 0; 8250 if (base) { 8251 cntl = MCURSOR_GAMMA_ENABLE; 8252 switch (intel_crtc->cursor_width) { 8253 case 64: 8254 cntl |= CURSOR_MODE_64_ARGB_AX; 8255 break; 8256 case 128: 8257 cntl |= CURSOR_MODE_128_ARGB_AX; 8258 break; 8259 case 256: 8260 cntl |= CURSOR_MODE_256_ARGB_AX; 8261 break; 8262 default: 8263 WARN_ON(1); 8264 return; 8265 } 8266 cntl |= pipe << 28; /* Connect to correct pipe */ 8267 } 8268 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 8269 cntl |= CURSOR_PIPE_CSC_ENABLE; 8270 8271 if (intel_crtc->cursor_cntl != cntl) { 8272 I915_WRITE(CURCNTR(pipe), cntl); 8273 POSTING_READ(CURCNTR(pipe)); 8274 intel_crtc->cursor_cntl = cntl; 8275 } 8276 8277 /* and commit changes on next vblank */ 8278 I915_WRITE(CURBASE(pipe), base); 8279 POSTING_READ(CURBASE(pipe)); 8280 } 8281 8282 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 8283 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 8284 bool on) 8285 { 8286 struct drm_device *dev = crtc->dev; 8287 struct drm_i915_private *dev_priv = dev->dev_private; 8288 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8289 int pipe = intel_crtc->pipe; 8290 int x = crtc->cursor_x; 8291 int y = crtc->cursor_y; 8292 u32 base = 0, pos = 0; 8293 8294 if (on) 8295 base = intel_crtc->cursor_addr; 8296 8297 if (x >= intel_crtc->config.pipe_src_w) 8298 base = 0; 8299 8300 if (y >= intel_crtc->config.pipe_src_h) 8301 base = 0; 8302 8303 if (x < 0) { 8304 if (x + intel_crtc->cursor_width <= 0) 8305 base = 0; 8306 8307 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 8308 x = -x; 8309 } 8310 pos |= x << CURSOR_X_SHIFT; 8311 8312 if (y < 0) { 8313 if (y + intel_crtc->cursor_height <= 0) 8314 base = 0; 8315 8316 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 8317 y = -y; 8318 } 8319 pos |= y << CURSOR_Y_SHIFT; 8320 8321 if (base == 0 && intel_crtc->cursor_base == 0) 8322 return; 8323 8324 I915_WRITE(CURPOS(pipe), pos); 8325 8326 if (IS_845G(dev) || IS_I865G(dev)) 8327 i845_update_cursor(crtc, base); 8328 else 8329 i9xx_update_cursor(crtc, base); 8330 intel_crtc->cursor_base = base; 8331 } 8332 8333 static bool cursor_size_ok(struct drm_device *dev, 8334 uint32_t width, uint32_t height) 8335 { 8336 if (width == 0 || height == 0) 8337 return false; 8338 8339 /* 8340 * 845g/865g are special in that they are only limited by 8341 * the width of their cursors, the height is arbitrary up to 8342 * the precision of the register. Everything else requires 8343 * square cursors, limited to a few power-of-two sizes. 8344 */ 8345 if (IS_845G(dev) || IS_I865G(dev)) { 8346 if ((width & 63) != 0) 8347 return false; 8348 8349 if (width > (IS_845G(dev) ? 64 : 512)) 8350 return false; 8351 8352 if (height > 1023) 8353 return false; 8354 } else { 8355 switch (width | height) { 8356 case 256: 8357 case 128: 8358 if (IS_GEN2(dev)) 8359 return false; 8360 case 64: 8361 break; 8362 default: 8363 return false; 8364 } 8365 } 8366 8367 return true; 8368 } 8369 8370 /* 8371 * intel_crtc_cursor_set_obj - Set cursor to specified GEM object 8372 * 8373 * Note that the object's reference will be consumed if the update fails. If 8374 * the update succeeds, the reference of the old object (if any) will be 8375 * consumed. 8376 */ 8377 static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, 8378 struct drm_i915_gem_object *obj, 8379 uint32_t width, uint32_t height) 8380 { 8381 struct drm_device *dev = crtc->dev; 8382 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8383 enum i915_pipe pipe = intel_crtc->pipe; 8384 unsigned old_width, stride; 8385 uint32_t addr; 8386 int ret; 8387 8388 /* if we want to turn off the cursor ignore width and height */ 8389 if (!obj) { 8390 DRM_DEBUG_KMS("cursor off\n"); 8391 addr = 0; 8392 mutex_lock(&dev->struct_mutex); 8393 goto finish; 8394 } 8395 8396 /* Check for which cursor types we support */ 8397 if (!cursor_size_ok(dev, width, height)) { 8398 DRM_DEBUG("Cursor dimension not supported\n"); 8399 return -EINVAL; 8400 } 8401 8402 stride = roundup_pow_of_two(width) * 4; 8403 if (obj->base.size < stride * height) { 8404 DRM_DEBUG_KMS("buffer is too small\n"); 8405 ret = -ENOMEM; 8406 goto fail; 8407 } 8408 8409 /* we only need to pin inside GTT if cursor is non-phy */ 8410 mutex_lock(&dev->struct_mutex); 8411 if (!INTEL_INFO(dev)->cursor_needs_physical) { 8412 unsigned alignment; 8413 8414 if (obj->tiling_mode) { 8415 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 8416 ret = -EINVAL; 8417 goto fail_locked; 8418 } 8419 8420 /* Note that the w/a also requires 2 PTE of padding following 8421 * the bo. We currently fill all unused PTE with the shadow 8422 * page and so we should always have valid PTE following the 8423 * cursor preventing the VT-d warning. 8424 */ 8425 alignment = 0; 8426 if (need_vtd_wa(dev)) 8427 alignment = 64*1024; 8428 8429 ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL); 8430 if (ret) { 8431 DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n"); 8432 goto fail_locked; 8433 } 8434 8435 ret = i915_gem_object_put_fence(obj); 8436 if (ret) { 8437 DRM_DEBUG_KMS("failed to release fence for cursor"); 8438 goto fail_unpin; 8439 } 8440 8441 addr = i915_gem_obj_ggtt_offset(obj); 8442 } else { 8443 int align = IS_I830(dev) ? 16 * 1024 : 256; 8444 ret = i915_gem_object_attach_phys(obj, align); 8445 if (ret) { 8446 DRM_DEBUG_KMS("failed to attach phys object\n"); 8447 goto fail_locked; 8448 } 8449 addr = obj->phys_handle->busaddr; 8450 } 8451 8452 finish: 8453 if (intel_crtc->cursor_bo) { 8454 if (!INTEL_INFO(dev)->cursor_needs_physical) 8455 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); 8456 } 8457 8458 i915_gem_track_fb(intel_crtc->cursor_bo, obj, 8459 INTEL_FRONTBUFFER_CURSOR(pipe)); 8460 mutex_unlock(&dev->struct_mutex); 8461 8462 old_width = intel_crtc->cursor_width; 8463 8464 intel_crtc->cursor_addr = addr; 8465 intel_crtc->cursor_bo = obj; 8466 intel_crtc->cursor_width = width; 8467 intel_crtc->cursor_height = height; 8468 8469 if (intel_crtc->active) { 8470 if (old_width != width) 8471 intel_update_watermarks(crtc); 8472 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 8473 } 8474 8475 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe)); 8476 8477 return 0; 8478 fail_unpin: 8479 i915_gem_object_unpin_from_display_plane(obj); 8480 fail_locked: 8481 mutex_unlock(&dev->struct_mutex); 8482 fail: 8483 drm_gem_object_unreference_unlocked(&obj->base); 8484 return ret; 8485 } 8486 8487 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 8488 u16 *blue, uint32_t start, uint32_t size) 8489 { 8490 int end = (start + size > 256) ? 256 : start + size, i; 8491 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8492 8493 for (i = start; i < end; i++) { 8494 intel_crtc->lut_r[i] = red[i] >> 8; 8495 intel_crtc->lut_g[i] = green[i] >> 8; 8496 intel_crtc->lut_b[i] = blue[i] >> 8; 8497 } 8498 8499 intel_crtc_load_lut(crtc); 8500 } 8501 8502 /* VESA 640x480x72Hz mode to set on the pipe */ 8503 static struct drm_display_mode load_detect_mode = { 8504 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 8505 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 8506 }; 8507 8508 struct drm_framebuffer * 8509 __intel_framebuffer_create(struct drm_device *dev, 8510 struct drm_mode_fb_cmd2 *mode_cmd, 8511 struct drm_i915_gem_object *obj) 8512 { 8513 struct intel_framebuffer *intel_fb; 8514 int ret; 8515 8516 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8517 if (!intel_fb) { 8518 drm_gem_object_unreference_unlocked(&obj->base); 8519 return ERR_PTR(-ENOMEM); 8520 } 8521 8522 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 8523 if (ret) 8524 goto err; 8525 8526 return &intel_fb->base; 8527 err: 8528 drm_gem_object_unreference_unlocked(&obj->base); 8529 kfree(intel_fb); 8530 8531 return ERR_PTR(ret); 8532 } 8533 8534 static struct drm_framebuffer * 8535 intel_framebuffer_create(struct drm_device *dev, 8536 struct drm_mode_fb_cmd2 *mode_cmd, 8537 struct drm_i915_gem_object *obj) 8538 { 8539 struct drm_framebuffer *fb; 8540 int ret; 8541 8542 ret = i915_mutex_lock_interruptible(dev); 8543 if (ret) 8544 return ERR_PTR(ret); 8545 fb = __intel_framebuffer_create(dev, mode_cmd, obj); 8546 mutex_unlock(&dev->struct_mutex); 8547 8548 return fb; 8549 } 8550 8551 static u32 8552 intel_framebuffer_pitch_for_width(int width, int bpp) 8553 { 8554 u32 pitch = DIV_ROUND_UP(width * bpp, 8); 8555 return ALIGN(pitch, 64); 8556 } 8557 8558 static u32 8559 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 8560 { 8561 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 8562 return PAGE_ALIGN(pitch * mode->vdisplay); 8563 } 8564 8565 static struct drm_framebuffer * 8566 intel_framebuffer_create_for_mode(struct drm_device *dev, 8567 struct drm_display_mode *mode, 8568 int depth, int bpp) 8569 { 8570 struct drm_i915_gem_object *obj; 8571 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 8572 8573 obj = i915_gem_alloc_object(dev, 8574 intel_framebuffer_size_for_mode(mode, bpp)); 8575 if (obj == NULL) 8576 return ERR_PTR(-ENOMEM); 8577 8578 mode_cmd.width = mode->hdisplay; 8579 mode_cmd.height = mode->vdisplay; 8580 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 8581 bpp); 8582 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 8583 8584 return intel_framebuffer_create(dev, &mode_cmd, obj); 8585 } 8586 8587 static struct drm_framebuffer * 8588 mode_fits_in_fbdev(struct drm_device *dev, 8589 struct drm_display_mode *mode) 8590 { 8591 #ifdef CONFIG_DRM_I915_FBDEV 8592 struct drm_i915_private *dev_priv = dev->dev_private; 8593 struct drm_i915_gem_object *obj; 8594 struct drm_framebuffer *fb; 8595 8596 if (!dev_priv->fbdev) 8597 return NULL; 8598 8599 if (!dev_priv->fbdev->fb) 8600 return NULL; 8601 8602 obj = dev_priv->fbdev->fb->obj; 8603 BUG_ON(!obj); 8604 8605 fb = &dev_priv->fbdev->fb->base; 8606 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 8607 fb->bits_per_pixel)) 8608 return NULL; 8609 8610 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 8611 return NULL; 8612 8613 return fb; 8614 #else 8615 return NULL; 8616 #endif 8617 } 8618 8619 bool intel_get_load_detect_pipe(struct drm_connector *connector, 8620 struct drm_display_mode *mode, 8621 struct intel_load_detect_pipe *old, 8622 struct drm_modeset_acquire_ctx *ctx) 8623 { 8624 struct intel_crtc *intel_crtc; 8625 struct intel_encoder *intel_encoder = 8626 intel_attached_encoder(connector); 8627 struct drm_crtc *possible_crtc; 8628 struct drm_encoder *encoder = &intel_encoder->base; 8629 struct drm_crtc *crtc = NULL; 8630 struct drm_device *dev = encoder->dev; 8631 struct drm_framebuffer *fb; 8632 struct drm_mode_config *config = &dev->mode_config; 8633 int ret, i = -1; 8634 8635 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 8636 connector->base.id, connector->name, 8637 encoder->base.id, encoder->name); 8638 8639 retry: 8640 ret = drm_modeset_lock(&config->connection_mutex, ctx); 8641 if (ret) 8642 goto fail_unlock; 8643 8644 /* 8645 * Algorithm gets a little messy: 8646 * 8647 * - if the connector already has an assigned crtc, use it (but make 8648 * sure it's on first) 8649 * 8650 * - try to find the first unused crtc that can drive this connector, 8651 * and use that if we find one 8652 */ 8653 8654 /* See if we already have a CRTC for this connector */ 8655 if (encoder->crtc) { 8656 crtc = encoder->crtc; 8657 8658 ret = drm_modeset_lock(&crtc->mutex, ctx); 8659 if (ret) 8660 goto fail_unlock; 8661 8662 old->dpms_mode = connector->dpms; 8663 old->load_detect_temp = false; 8664 8665 /* Make sure the crtc and connector are running */ 8666 if (connector->dpms != DRM_MODE_DPMS_ON) 8667 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); 8668 8669 return true; 8670 } 8671 8672 /* Find an unused one (if possible) */ 8673 for_each_crtc(dev, possible_crtc) { 8674 i++; 8675 if (!(encoder->possible_crtcs & (1 << i))) 8676 continue; 8677 if (possible_crtc->enabled) 8678 continue; 8679 /* This can occur when applying the pipe A quirk on resume. */ 8680 if (to_intel_crtc(possible_crtc)->new_enabled) 8681 continue; 8682 8683 crtc = possible_crtc; 8684 break; 8685 } 8686 8687 /* 8688 * If we didn't find an unused CRTC, don't use any. 8689 */ 8690 if (!crtc) { 8691 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 8692 goto fail_unlock; 8693 } 8694 8695 ret = drm_modeset_lock(&crtc->mutex, ctx); 8696 if (ret) 8697 goto fail_unlock; 8698 intel_encoder->new_crtc = to_intel_crtc(crtc); 8699 to_intel_connector(connector)->new_encoder = intel_encoder; 8700 8701 intel_crtc = to_intel_crtc(crtc); 8702 intel_crtc->new_enabled = true; 8703 intel_crtc->new_config = &intel_crtc->config; 8704 old->dpms_mode = connector->dpms; 8705 old->load_detect_temp = true; 8706 old->release_fb = NULL; 8707 8708 if (!mode) 8709 mode = &load_detect_mode; 8710 8711 /* We need a framebuffer large enough to accommodate all accesses 8712 * that the plane may generate whilst we perform load detection. 8713 * We can not rely on the fbcon either being present (we get called 8714 * during its initialisation to detect all boot displays, or it may 8715 * not even exist) or that it is large enough to satisfy the 8716 * requested mode. 8717 */ 8718 fb = mode_fits_in_fbdev(dev, mode); 8719 if (fb == NULL) { 8720 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 8721 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 8722 old->release_fb = fb; 8723 } else 8724 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 8725 if (IS_ERR(fb)) { 8726 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 8727 goto fail; 8728 } 8729 8730 if (intel_set_mode(crtc, mode, 0, 0, fb)) { 8731 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 8732 if (old->release_fb) 8733 old->release_fb->funcs->destroy(old->release_fb); 8734 goto fail; 8735 } 8736 8737 /* let the connector get through one full cycle before testing */ 8738 intel_wait_for_vblank(dev, intel_crtc->pipe); 8739 return true; 8740 8741 fail: 8742 intel_crtc->new_enabled = crtc->enabled; 8743 if (intel_crtc->new_enabled) 8744 intel_crtc->new_config = &intel_crtc->config; 8745 else 8746 intel_crtc->new_config = NULL; 8747 fail_unlock: 8748 if (ret == -EDEADLK) { 8749 drm_modeset_backoff(ctx); 8750 goto retry; 8751 } 8752 8753 return false; 8754 } 8755 8756 void intel_release_load_detect_pipe(struct drm_connector *connector, 8757 struct intel_load_detect_pipe *old) 8758 { 8759 struct intel_encoder *intel_encoder = 8760 intel_attached_encoder(connector); 8761 struct drm_encoder *encoder = &intel_encoder->base; 8762 struct drm_crtc *crtc = encoder->crtc; 8763 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8764 8765 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 8766 connector->base.id, connector->name, 8767 encoder->base.id, encoder->name); 8768 8769 if (old->load_detect_temp) { 8770 to_intel_connector(connector)->new_encoder = NULL; 8771 intel_encoder->new_crtc = NULL; 8772 intel_crtc->new_enabled = false; 8773 intel_crtc->new_config = NULL; 8774 intel_set_mode(crtc, NULL, 0, 0, NULL); 8775 8776 if (old->release_fb) { 8777 drm_framebuffer_unregister_private(old->release_fb); 8778 drm_framebuffer_unreference(old->release_fb); 8779 } 8780 8781 return; 8782 } 8783 8784 /* Switch crtc and encoder back off if necessary */ 8785 if (old->dpms_mode != DRM_MODE_DPMS_ON) 8786 connector->funcs->dpms(connector, old->dpms_mode); 8787 } 8788 8789 static int i9xx_pll_refclk(struct drm_device *dev, 8790 const struct intel_crtc_config *pipe_config) 8791 { 8792 struct drm_i915_private *dev_priv = dev->dev_private; 8793 u32 dpll = pipe_config->dpll_hw_state.dpll; 8794 8795 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 8796 return dev_priv->vbt.lvds_ssc_freq; 8797 else if (HAS_PCH_SPLIT(dev)) 8798 return 120000; 8799 else if (!IS_GEN2(dev)) 8800 return 96000; 8801 else 8802 return 48000; 8803 } 8804 8805 /* Returns the clock of the currently programmed mode of the given pipe. */ 8806 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 8807 struct intel_crtc_config *pipe_config) 8808 { 8809 struct drm_device *dev = crtc->base.dev; 8810 struct drm_i915_private *dev_priv = dev->dev_private; 8811 int pipe = pipe_config->cpu_transcoder; 8812 u32 dpll = pipe_config->dpll_hw_state.dpll; 8813 u32 fp; 8814 intel_clock_t clock; 8815 int refclk = i9xx_pll_refclk(dev, pipe_config); 8816 8817 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 8818 fp = pipe_config->dpll_hw_state.fp0; 8819 else 8820 fp = pipe_config->dpll_hw_state.fp1; 8821 8822 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 8823 if (IS_PINEVIEW(dev)) { 8824 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 8825 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 8826 } else { 8827 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 8828 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 8829 } 8830 8831 if (!IS_GEN2(dev)) { 8832 if (IS_PINEVIEW(dev)) 8833 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 8834 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 8835 else 8836 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 8837 DPLL_FPA01_P1_POST_DIV_SHIFT); 8838 8839 switch (dpll & DPLL_MODE_MASK) { 8840 case DPLLB_MODE_DAC_SERIAL: 8841 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 8842 5 : 10; 8843 break; 8844 case DPLLB_MODE_LVDS: 8845 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 8846 7 : 14; 8847 break; 8848 default: 8849 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 8850 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 8851 return; 8852 } 8853 8854 if (IS_PINEVIEW(dev)) 8855 pineview_clock(refclk, &clock); 8856 else 8857 i9xx_clock(refclk, &clock); 8858 } else { 8859 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); 8860 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 8861 8862 if (is_lvds) { 8863 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 8864 DPLL_FPA01_P1_POST_DIV_SHIFT); 8865 8866 if (lvds & LVDS_CLKB_POWER_UP) 8867 clock.p2 = 7; 8868 else 8869 clock.p2 = 14; 8870 } else { 8871 if (dpll & PLL_P1_DIVIDE_BY_TWO) 8872 clock.p1 = 2; 8873 else { 8874 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 8875 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 8876 } 8877 if (dpll & PLL_P2_DIVIDE_BY_4) 8878 clock.p2 = 4; 8879 else 8880 clock.p2 = 2; 8881 } 8882 8883 i9xx_clock(refclk, &clock); 8884 } 8885 8886 /* 8887 * This value includes pixel_multiplier. We will use 8888 * port_clock to compute adjusted_mode.crtc_clock in the 8889 * encoder's get_config() function. 8890 */ 8891 pipe_config->port_clock = clock.dot; 8892 } 8893 8894 int intel_dotclock_calculate(int link_freq, 8895 const struct intel_link_m_n *m_n) 8896 { 8897 /* 8898 * The calculation for the data clock is: 8899 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 8900 * But we want to avoid losing precison if possible, so: 8901 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 8902 * 8903 * and the link clock is simpler: 8904 * link_clock = (m * link_clock) / n 8905 */ 8906 8907 if (!m_n->link_n) 8908 return 0; 8909 8910 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); 8911 } 8912 8913 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 8914 struct intel_crtc_config *pipe_config) 8915 { 8916 struct drm_device *dev = crtc->base.dev; 8917 8918 /* read out port_clock from the DPLL */ 8919 i9xx_crtc_clock_get(crtc, pipe_config); 8920 8921 /* 8922 * This value does not include pixel_multiplier. 8923 * We will check that port_clock and adjusted_mode.crtc_clock 8924 * agree once we know their relationship in the encoder's 8925 * get_config() function. 8926 */ 8927 pipe_config->adjusted_mode.crtc_clock = 8928 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000, 8929 &pipe_config->fdi_m_n); 8930 } 8931 8932 /** Returns the currently programmed mode of the given pipe. */ 8933 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 8934 struct drm_crtc *crtc) 8935 { 8936 struct drm_i915_private *dev_priv = dev->dev_private; 8937 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8938 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 8939 struct drm_display_mode *mode; 8940 struct intel_crtc_config pipe_config; 8941 int htot = I915_READ(HTOTAL(cpu_transcoder)); 8942 int hsync = I915_READ(HSYNC(cpu_transcoder)); 8943 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 8944 int vsync = I915_READ(VSYNC(cpu_transcoder)); 8945 enum i915_pipe pipe = intel_crtc->pipe; 8946 8947 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 8948 if (!mode) 8949 return NULL; 8950 8951 /* 8952 * Construct a pipe_config sufficient for getting the clock info 8953 * back out of crtc_clock_get. 8954 * 8955 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 8956 * to use a real value here instead. 8957 */ 8958 pipe_config.cpu_transcoder = (enum transcoder) pipe; 8959 pipe_config.pixel_multiplier = 1; 8960 pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe)); 8961 pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe)); 8962 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe)); 8963 i9xx_crtc_clock_get(intel_crtc, &pipe_config); 8964 8965 mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier; 8966 mode->hdisplay = (htot & 0xffff) + 1; 8967 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 8968 mode->hsync_start = (hsync & 0xffff) + 1; 8969 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 8970 mode->vdisplay = (vtot & 0xffff) + 1; 8971 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 8972 mode->vsync_start = (vsync & 0xffff) + 1; 8973 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 8974 8975 drm_mode_set_name(mode); 8976 8977 return mode; 8978 } 8979 8980 static void intel_increase_pllclock(struct drm_device *dev, 8981 enum i915_pipe pipe) 8982 { 8983 struct drm_i915_private *dev_priv = dev->dev_private; 8984 int dpll_reg = DPLL(pipe); 8985 int dpll; 8986 8987 if (!HAS_GMCH_DISPLAY(dev)) 8988 return; 8989 8990 if (!dev_priv->lvds_downclock_avail) 8991 return; 8992 8993 dpll = I915_READ(dpll_reg); 8994 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { 8995 DRM_DEBUG_DRIVER("upclocking LVDS\n"); 8996 8997 assert_panel_unlocked(dev_priv, pipe); 8998 8999 dpll &= ~DISPLAY_RATE_SELECT_FPA1; 9000 I915_WRITE(dpll_reg, dpll); 9001 intel_wait_for_vblank(dev, pipe); 9002 9003 dpll = I915_READ(dpll_reg); 9004 if (dpll & DISPLAY_RATE_SELECT_FPA1) 9005 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); 9006 } 9007 } 9008 9009 static void intel_decrease_pllclock(struct drm_crtc *crtc) 9010 { 9011 struct drm_device *dev = crtc->dev; 9012 struct drm_i915_private *dev_priv = dev->dev_private; 9013 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9014 9015 if (!HAS_GMCH_DISPLAY(dev)) 9016 return; 9017 9018 if (!dev_priv->lvds_downclock_avail) 9019 return; 9020 9021 /* 9022 * Since this is called by a timer, we should never get here in 9023 * the manual case. 9024 */ 9025 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 9026 int pipe = intel_crtc->pipe; 9027 int dpll_reg = DPLL(pipe); 9028 int dpll; 9029 9030 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 9031 9032 assert_panel_unlocked(dev_priv, pipe); 9033 9034 dpll = I915_READ(dpll_reg); 9035 dpll |= DISPLAY_RATE_SELECT_FPA1; 9036 I915_WRITE(dpll_reg, dpll); 9037 intel_wait_for_vblank(dev, pipe); 9038 dpll = I915_READ(dpll_reg); 9039 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 9040 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 9041 } 9042 9043 } 9044 9045 void intel_mark_busy(struct drm_device *dev) 9046 { 9047 struct drm_i915_private *dev_priv = dev->dev_private; 9048 9049 if (dev_priv->mm.busy) 9050 return; 9051 9052 intel_runtime_pm_get(dev_priv); 9053 i915_update_gfx_val(dev_priv); 9054 dev_priv->mm.busy = true; 9055 } 9056 9057 void intel_mark_idle(struct drm_device *dev) 9058 { 9059 struct drm_i915_private *dev_priv = dev->dev_private; 9060 struct drm_crtc *crtc; 9061 9062 if (!dev_priv->mm.busy) 9063 return; 9064 9065 dev_priv->mm.busy = false; 9066 9067 if (!i915.powersave) 9068 goto out; 9069 9070 for_each_crtc(dev, crtc) { 9071 if (!crtc->primary->fb) 9072 continue; 9073 9074 intel_decrease_pllclock(crtc); 9075 } 9076 9077 if (INTEL_INFO(dev)->gen >= 6) 9078 gen6_rps_idle(dev->dev_private); 9079 9080 out: 9081 intel_runtime_pm_put(dev_priv); 9082 } 9083 9084 9085 /** 9086 * intel_mark_fb_busy - mark given planes as busy 9087 * @dev: DRM device 9088 * @frontbuffer_bits: bits for the affected planes 9089 * @ring: optional ring for asynchronous commands 9090 * 9091 * This function gets called every time the screen contents change. It can be 9092 * used to keep e.g. the update rate at the nominal refresh rate with DRRS. 9093 */ 9094 static void intel_mark_fb_busy(struct drm_device *dev, 9095 unsigned frontbuffer_bits, 9096 struct intel_engine_cs *ring) 9097 { 9098 struct drm_i915_private *dev_priv = dev->dev_private; 9099 enum i915_pipe pipe; 9100 9101 if (!i915.powersave) 9102 return; 9103 9104 for_each_pipe(dev_priv, pipe) { 9105 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe))) 9106 continue; 9107 9108 intel_increase_pllclock(dev, pipe); 9109 if (ring && intel_fbc_enabled(dev)) 9110 ring->fbc_dirty = true; 9111 } 9112 } 9113 9114 /** 9115 * intel_fb_obj_invalidate - invalidate frontbuffer object 9116 * @obj: GEM object to invalidate 9117 * @ring: set for asynchronous rendering 9118 * 9119 * This function gets called every time rendering on the given object starts and 9120 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must 9121 * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed 9122 * until the rendering completes or a flip on this frontbuffer plane is 9123 * scheduled. 9124 */ 9125 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, 9126 struct intel_engine_cs *ring) 9127 { 9128 struct drm_device *dev = obj->base.dev; 9129 struct drm_i915_private *dev_priv = dev->dev_private; 9130 9131 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 9132 9133 if (!obj->frontbuffer_bits) 9134 return; 9135 9136 if (ring) { 9137 mutex_lock(&dev_priv->fb_tracking.lock); 9138 dev_priv->fb_tracking.busy_bits 9139 |= obj->frontbuffer_bits; 9140 dev_priv->fb_tracking.flip_bits 9141 &= ~obj->frontbuffer_bits; 9142 mutex_unlock(&dev_priv->fb_tracking.lock); 9143 } 9144 9145 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); 9146 9147 intel_edp_psr_invalidate(dev, obj->frontbuffer_bits); 9148 } 9149 9150 /** 9151 * intel_frontbuffer_flush - flush frontbuffer 9152 * @dev: DRM device 9153 * @frontbuffer_bits: frontbuffer plane tracking bits 9154 * 9155 * This function gets called every time rendering on the given planes has 9156 * completed and frontbuffer caching can be started again. Flushes will get 9157 * delayed if they're blocked by some oustanding asynchronous rendering. 9158 * 9159 * Can be called without any locks held. 9160 */ 9161 void intel_frontbuffer_flush(struct drm_device *dev, 9162 unsigned frontbuffer_bits) 9163 { 9164 struct drm_i915_private *dev_priv = dev->dev_private; 9165 9166 /* Delay flushing when rings are still busy.*/ 9167 mutex_lock(&dev_priv->fb_tracking.lock); 9168 frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits; 9169 mutex_unlock(&dev_priv->fb_tracking.lock); 9170 9171 intel_mark_fb_busy(dev, frontbuffer_bits, NULL); 9172 9173 intel_edp_psr_flush(dev, frontbuffer_bits); 9174 9175 /* 9176 * FIXME: Unconditional fbc flushing here is a rather gross hack and 9177 * needs to be reworked into a proper frontbuffer tracking scheme like 9178 * psr employs. 9179 */ 9180 if (IS_BROADWELL(dev)) 9181 gen8_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN); 9182 } 9183 9184 /** 9185 * intel_fb_obj_flush - flush frontbuffer object 9186 * @obj: GEM object to flush 9187 * @retire: set when retiring asynchronous rendering 9188 * 9189 * This function gets called every time rendering on the given object has 9190 * completed and frontbuffer caching can be started again. If @retire is true 9191 * then any delayed flushes will be unblocked. 9192 */ 9193 void intel_fb_obj_flush(struct drm_i915_gem_object *obj, 9194 bool retire) 9195 { 9196 struct drm_device *dev = obj->base.dev; 9197 struct drm_i915_private *dev_priv = dev->dev_private; 9198 unsigned frontbuffer_bits; 9199 9200 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 9201 9202 if (!obj->frontbuffer_bits) 9203 return; 9204 9205 frontbuffer_bits = obj->frontbuffer_bits; 9206 9207 if (retire) { 9208 mutex_lock(&dev_priv->fb_tracking.lock); 9209 /* Filter out new bits since rendering started. */ 9210 frontbuffer_bits &= dev_priv->fb_tracking.busy_bits; 9211 9212 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; 9213 mutex_unlock(&dev_priv->fb_tracking.lock); 9214 } 9215 9216 intel_frontbuffer_flush(dev, frontbuffer_bits); 9217 } 9218 9219 /** 9220 * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip 9221 * @dev: DRM device 9222 * @frontbuffer_bits: frontbuffer plane tracking bits 9223 * 9224 * This function gets called after scheduling a flip on @obj. The actual 9225 * frontbuffer flushing will be delayed until completion is signalled with 9226 * intel_frontbuffer_flip_complete. If an invalidate happens in between this 9227 * flush will be cancelled. 9228 * 9229 * Can be called without any locks held. 9230 */ 9231 void intel_frontbuffer_flip_prepare(struct drm_device *dev, 9232 unsigned frontbuffer_bits) 9233 { 9234 struct drm_i915_private *dev_priv = dev->dev_private; 9235 9236 mutex_lock(&dev_priv->fb_tracking.lock); 9237 dev_priv->fb_tracking.flip_bits 9238 |= frontbuffer_bits; 9239 mutex_unlock(&dev_priv->fb_tracking.lock); 9240 } 9241 9242 /** 9243 * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush 9244 * @dev: DRM device 9245 * @frontbuffer_bits: frontbuffer plane tracking bits 9246 * 9247 * This function gets called after the flip has been latched and will complete 9248 * on the next vblank. It will execute the fush if it hasn't been cancalled yet. 9249 * 9250 * Can be called without any locks held. 9251 */ 9252 void intel_frontbuffer_flip_complete(struct drm_device *dev, 9253 unsigned frontbuffer_bits) 9254 { 9255 struct drm_i915_private *dev_priv = dev->dev_private; 9256 9257 mutex_lock(&dev_priv->fb_tracking.lock); 9258 /* Mask any cancelled flips. */ 9259 frontbuffer_bits &= dev_priv->fb_tracking.flip_bits; 9260 dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; 9261 mutex_unlock(&dev_priv->fb_tracking.lock); 9262 9263 intel_frontbuffer_flush(dev, frontbuffer_bits); 9264 } 9265 9266 static void intel_crtc_destroy(struct drm_crtc *crtc) 9267 { 9268 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9269 struct drm_device *dev = crtc->dev; 9270 struct intel_unpin_work *work; 9271 9272 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9273 work = intel_crtc->unpin_work; 9274 intel_crtc->unpin_work = NULL; 9275 lockmgr(&dev->event_lock, LK_RELEASE); 9276 9277 if (work) { 9278 cancel_work_sync(&work->work); 9279 kfree(work); 9280 } 9281 9282 drm_crtc_cleanup(crtc); 9283 9284 kfree(intel_crtc); 9285 } 9286 9287 static void intel_unpin_work_fn(struct work_struct *__work) 9288 { 9289 struct intel_unpin_work *work = 9290 container_of(__work, struct intel_unpin_work, work); 9291 struct drm_device *dev = work->crtc->dev; 9292 enum i915_pipe pipe = to_intel_crtc(work->crtc)->pipe; 9293 9294 mutex_lock(&dev->struct_mutex); 9295 intel_unpin_fb_obj(work->old_fb_obj); 9296 drm_gem_object_unreference(&work->pending_flip_obj->base); 9297 drm_gem_object_unreference(&work->old_fb_obj->base); 9298 9299 intel_update_fbc(dev); 9300 mutex_unlock(&dev->struct_mutex); 9301 9302 intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 9303 9304 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); 9305 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); 9306 9307 kfree(work); 9308 } 9309 9310 static void do_intel_finish_page_flip(struct drm_device *dev, 9311 struct drm_crtc *crtc) 9312 { 9313 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9314 struct intel_unpin_work *work; 9315 9316 /* Ignore early vblank irqs */ 9317 if (intel_crtc == NULL) 9318 return; 9319 9320 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9321 work = intel_crtc->unpin_work; 9322 9323 /* Ensure we don't miss a work->pending update ... */ 9324 smp_rmb(); 9325 9326 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 9327 lockmgr(&dev->event_lock, LK_RELEASE); 9328 return; 9329 } 9330 9331 page_flip_completed(intel_crtc); 9332 9333 lockmgr(&dev->event_lock, LK_RELEASE); 9334 } 9335 9336 void intel_finish_page_flip(struct drm_device *dev, int pipe) 9337 { 9338 struct drm_i915_private *dev_priv = dev->dev_private; 9339 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 9340 9341 do_intel_finish_page_flip(dev, crtc); 9342 } 9343 9344 void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 9345 { 9346 struct drm_i915_private *dev_priv = dev->dev_private; 9347 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 9348 9349 do_intel_finish_page_flip(dev, crtc); 9350 } 9351 9352 /* Is 'a' after or equal to 'b'? */ 9353 static bool g4x_flip_count_after_eq(u32 a, u32 b) 9354 { 9355 return !((a - b) & 0x80000000); 9356 } 9357 9358 static bool page_flip_finished(struct intel_crtc *crtc) 9359 { 9360 struct drm_device *dev = crtc->base.dev; 9361 struct drm_i915_private *dev_priv = dev->dev_private; 9362 9363 if (i915_reset_in_progress(&dev_priv->gpu_error) || 9364 crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 9365 return true; 9366 9367 /* 9368 * The relevant registers doen't exist on pre-ctg. 9369 * As the flip done interrupt doesn't trigger for mmio 9370 * flips on gmch platforms, a flip count check isn't 9371 * really needed there. But since ctg has the registers, 9372 * include it in the check anyway. 9373 */ 9374 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev)) 9375 return true; 9376 9377 /* 9378 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips 9379 * used the same base address. In that case the mmio flip might 9380 * have completed, but the CS hasn't even executed the flip yet. 9381 * 9382 * A flip count check isn't enough as the CS might have updated 9383 * the base address just after start of vblank, but before we 9384 * managed to process the interrupt. This means we'd complete the 9385 * CS flip too soon. 9386 * 9387 * Combining both checks should get us a good enough result. It may 9388 * still happen that the CS flip has been executed, but has not 9389 * yet actually completed. But in case the base address is the same 9390 * anyway, we don't really care. 9391 */ 9392 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 9393 crtc->unpin_work->gtt_offset && 9394 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)), 9395 crtc->unpin_work->flip_count); 9396 } 9397 9398 void intel_prepare_page_flip(struct drm_device *dev, int plane) 9399 { 9400 struct drm_i915_private *dev_priv = dev->dev_private; 9401 struct intel_crtc *intel_crtc = 9402 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 9403 9404 /* NB: An MMIO update of the plane base pointer will also 9405 * generate a page-flip completion irq, i.e. every modeset 9406 * is also accompanied by a spurious intel_prepare_page_flip(). 9407 */ 9408 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9409 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) 9410 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 9411 lockmgr(&dev->event_lock, LK_RELEASE); 9412 } 9413 9414 static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) 9415 { 9416 /* Ensure that the work item is consistent when activating it ... */ 9417 smp_wmb(); 9418 atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); 9419 /* and that it is marked active as soon as the irq could fire. */ 9420 smp_wmb(); 9421 } 9422 9423 static int intel_gen2_queue_flip(struct drm_device *dev, 9424 struct drm_crtc *crtc, 9425 struct drm_framebuffer *fb, 9426 struct drm_i915_gem_object *obj, 9427 struct intel_engine_cs *ring, 9428 uint32_t flags) 9429 { 9430 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9431 u32 flip_mask; 9432 int ret; 9433 9434 ret = intel_ring_begin(ring, 6); 9435 if (ret) 9436 return ret; 9437 9438 /* Can't queue multiple flips, so wait for the previous 9439 * one to finish before executing the next. 9440 */ 9441 if (intel_crtc->plane) 9442 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 9443 else 9444 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 9445 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 9446 intel_ring_emit(ring, MI_NOOP); 9447 intel_ring_emit(ring, MI_DISPLAY_FLIP | 9448 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9449 intel_ring_emit(ring, fb->pitches[0]); 9450 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9451 intel_ring_emit(ring, 0); /* aux display base address, unused */ 9452 9453 intel_mark_page_flip_active(intel_crtc); 9454 __intel_ring_advance(ring); 9455 return 0; 9456 } 9457 9458 static int intel_gen3_queue_flip(struct drm_device *dev, 9459 struct drm_crtc *crtc, 9460 struct drm_framebuffer *fb, 9461 struct drm_i915_gem_object *obj, 9462 struct intel_engine_cs *ring, 9463 uint32_t flags) 9464 { 9465 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9466 u32 flip_mask; 9467 int ret; 9468 9469 ret = intel_ring_begin(ring, 6); 9470 if (ret) 9471 return ret; 9472 9473 if (intel_crtc->plane) 9474 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 9475 else 9476 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 9477 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 9478 intel_ring_emit(ring, MI_NOOP); 9479 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 9480 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9481 intel_ring_emit(ring, fb->pitches[0]); 9482 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9483 intel_ring_emit(ring, MI_NOOP); 9484 9485 intel_mark_page_flip_active(intel_crtc); 9486 __intel_ring_advance(ring); 9487 return 0; 9488 } 9489 9490 static int intel_gen4_queue_flip(struct drm_device *dev, 9491 struct drm_crtc *crtc, 9492 struct drm_framebuffer *fb, 9493 struct drm_i915_gem_object *obj, 9494 struct intel_engine_cs *ring, 9495 uint32_t flags) 9496 { 9497 struct drm_i915_private *dev_priv = dev->dev_private; 9498 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9499 uint32_t pf, pipesrc; 9500 int ret; 9501 9502 ret = intel_ring_begin(ring, 4); 9503 if (ret) 9504 return ret; 9505 9506 /* i965+ uses the linear or tiled offsets from the 9507 * Display Registers (which do not change across a page-flip) 9508 * so we need only reprogram the base address. 9509 */ 9510 intel_ring_emit(ring, MI_DISPLAY_FLIP | 9511 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9512 intel_ring_emit(ring, fb->pitches[0]); 9513 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset | 9514 obj->tiling_mode); 9515 9516 /* XXX Enabling the panel-fitter across page-flip is so far 9517 * untested on non-native modes, so ignore it for now. 9518 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 9519 */ 9520 pf = 0; 9521 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 9522 intel_ring_emit(ring, pf | pipesrc); 9523 9524 intel_mark_page_flip_active(intel_crtc); 9525 __intel_ring_advance(ring); 9526 return 0; 9527 } 9528 9529 static int intel_gen6_queue_flip(struct drm_device *dev, 9530 struct drm_crtc *crtc, 9531 struct drm_framebuffer *fb, 9532 struct drm_i915_gem_object *obj, 9533 struct intel_engine_cs *ring, 9534 uint32_t flags) 9535 { 9536 struct drm_i915_private *dev_priv = dev->dev_private; 9537 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9538 uint32_t pf, pipesrc; 9539 int ret; 9540 9541 ret = intel_ring_begin(ring, 4); 9542 if (ret) 9543 return ret; 9544 9545 intel_ring_emit(ring, MI_DISPLAY_FLIP | 9546 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9547 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 9548 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9549 9550 /* Contrary to the suggestions in the documentation, 9551 * "Enable Panel Fitter" does not seem to be required when page 9552 * flipping with a non-native mode, and worse causes a normal 9553 * modeset to fail. 9554 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 9555 */ 9556 pf = 0; 9557 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 9558 intel_ring_emit(ring, pf | pipesrc); 9559 9560 intel_mark_page_flip_active(intel_crtc); 9561 __intel_ring_advance(ring); 9562 return 0; 9563 } 9564 9565 static int intel_gen7_queue_flip(struct drm_device *dev, 9566 struct drm_crtc *crtc, 9567 struct drm_framebuffer *fb, 9568 struct drm_i915_gem_object *obj, 9569 struct intel_engine_cs *ring, 9570 uint32_t flags) 9571 { 9572 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9573 uint32_t plane_bit = 0; 9574 int len, ret; 9575 9576 switch (intel_crtc->plane) { 9577 case PLANE_A: 9578 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 9579 break; 9580 case PLANE_B: 9581 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 9582 break; 9583 case PLANE_C: 9584 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 9585 break; 9586 default: 9587 WARN_ONCE(1, "unknown plane in flip command\n"); 9588 return -ENODEV; 9589 } 9590 9591 len = 4; 9592 if (ring->id == RCS) { 9593 len += 6; 9594 /* 9595 * On Gen 8, SRM is now taking an extra dword to accommodate 9596 * 48bits addresses, and we need a NOOP for the batch size to 9597 * stay even. 9598 */ 9599 if (IS_GEN8(dev)) 9600 len += 2; 9601 } 9602 9603 /* 9604 * BSpec MI_DISPLAY_FLIP for IVB: 9605 * "The full packet must be contained within the same cache line." 9606 * 9607 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 9608 * cacheline, if we ever start emitting more commands before 9609 * the MI_DISPLAY_FLIP we may need to first emit everything else, 9610 * then do the cacheline alignment, and finally emit the 9611 * MI_DISPLAY_FLIP. 9612 */ 9613 ret = intel_ring_cacheline_align(ring); 9614 if (ret) 9615 return ret; 9616 9617 ret = intel_ring_begin(ring, len); 9618 if (ret) 9619 return ret; 9620 9621 /* Unmask the flip-done completion message. Note that the bspec says that 9622 * we should do this for both the BCS and RCS, and that we must not unmask 9623 * more than one flip event at any time (or ensure that one flip message 9624 * can be sent by waiting for flip-done prior to queueing new flips). 9625 * Experimentation says that BCS works despite DERRMR masking all 9626 * flip-done completion events and that unmasking all planes at once 9627 * for the RCS also doesn't appear to drop events. Setting the DERRMR 9628 * to zero does lead to lockups within MI_DISPLAY_FLIP. 9629 */ 9630 if (ring->id == RCS) { 9631 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 9632 intel_ring_emit(ring, DERRMR); 9633 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 9634 DERRMR_PIPEB_PRI_FLIP_DONE | 9635 DERRMR_PIPEC_PRI_FLIP_DONE)); 9636 if (IS_GEN8(dev)) 9637 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) | 9638 MI_SRM_LRM_GLOBAL_GTT); 9639 else 9640 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | 9641 MI_SRM_LRM_GLOBAL_GTT); 9642 intel_ring_emit(ring, DERRMR); 9643 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 9644 if (IS_GEN8(dev)) { 9645 intel_ring_emit(ring, 0); 9646 intel_ring_emit(ring, MI_NOOP); 9647 } 9648 } 9649 9650 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 9651 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 9652 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9653 intel_ring_emit(ring, (MI_NOOP)); 9654 9655 intel_mark_page_flip_active(intel_crtc); 9656 __intel_ring_advance(ring); 9657 return 0; 9658 } 9659 9660 static bool use_mmio_flip(struct intel_engine_cs *ring, 9661 struct drm_i915_gem_object *obj) 9662 { 9663 /* 9664 * This is not being used for older platforms, because 9665 * non-availability of flip done interrupt forces us to use 9666 * CS flips. Older platforms derive flip done using some clever 9667 * tricks involving the flip_pending status bits and vblank irqs. 9668 * So using MMIO flips there would disrupt this mechanism. 9669 */ 9670 9671 if (ring == NULL) 9672 return true; 9673 9674 if (INTEL_INFO(ring->dev)->gen < 5) 9675 return false; 9676 9677 if (i915.use_mmio_flip < 0) 9678 return false; 9679 else if (i915.use_mmio_flip > 0) 9680 return true; 9681 else if (i915.enable_execlists) 9682 return true; 9683 else 9684 return ring != obj->ring; 9685 } 9686 9687 static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) 9688 { 9689 struct drm_device *dev = intel_crtc->base.dev; 9690 struct drm_i915_private *dev_priv = dev->dev_private; 9691 struct intel_framebuffer *intel_fb = 9692 to_intel_framebuffer(intel_crtc->base.primary->fb); 9693 struct drm_i915_gem_object *obj = intel_fb->obj; 9694 u32 dspcntr; 9695 u32 reg; 9696 9697 intel_mark_page_flip_active(intel_crtc); 9698 9699 reg = DSPCNTR(intel_crtc->plane); 9700 dspcntr = I915_READ(reg); 9701 9702 if (INTEL_INFO(dev)->gen >= 4) { 9703 if (obj->tiling_mode != I915_TILING_NONE) 9704 dspcntr |= DISPPLANE_TILED; 9705 else 9706 dspcntr &= ~DISPPLANE_TILED; 9707 } 9708 I915_WRITE(reg, dspcntr); 9709 9710 I915_WRITE(DSPSURF(intel_crtc->plane), 9711 intel_crtc->unpin_work->gtt_offset); 9712 POSTING_READ(DSPSURF(intel_crtc->plane)); 9713 } 9714 9715 static int intel_postpone_flip(struct drm_i915_gem_object *obj) 9716 { 9717 struct intel_engine_cs *ring; 9718 int ret; 9719 9720 #if 0 9721 lockdep_assert_held(&obj->base.dev->struct_mutex); 9722 #endif 9723 9724 if (!obj->last_write_seqno) 9725 return 0; 9726 9727 ring = obj->ring; 9728 9729 if (i915_seqno_passed(ring->get_seqno(ring, true), 9730 obj->last_write_seqno)) 9731 return 0; 9732 9733 ret = i915_gem_check_olr(ring, obj->last_write_seqno); 9734 if (ret) 9735 return ret; 9736 9737 if (WARN_ON(!ring->irq_get(ring))) 9738 return 0; 9739 9740 return 1; 9741 } 9742 9743 void intel_notify_mmio_flip(struct intel_engine_cs *ring) 9744 { 9745 struct drm_i915_private *dev_priv = to_i915(ring->dev); 9746 struct intel_crtc *intel_crtc; 9747 u32 seqno; 9748 9749 seqno = ring->get_seqno(ring, false); 9750 9751 spin_lock(&dev_priv->mmio_flip_lock); 9752 for_each_intel_crtc(ring->dev, intel_crtc) { 9753 struct intel_mmio_flip *mmio_flip; 9754 9755 mmio_flip = &intel_crtc->mmio_flip; 9756 if (mmio_flip->seqno == 0) 9757 continue; 9758 9759 if (ring->id != mmio_flip->ring_id) 9760 continue; 9761 9762 if (i915_seqno_passed(seqno, mmio_flip->seqno)) { 9763 intel_do_mmio_flip(intel_crtc); 9764 mmio_flip->seqno = 0; 9765 ring->irq_put(ring); 9766 } 9767 } 9768 spin_unlock(&dev_priv->mmio_flip_lock); 9769 } 9770 9771 static int intel_queue_mmio_flip(struct drm_device *dev, 9772 struct drm_crtc *crtc, 9773 struct drm_framebuffer *fb, 9774 struct drm_i915_gem_object *obj, 9775 struct intel_engine_cs *ring, 9776 uint32_t flags) 9777 { 9778 struct drm_i915_private *dev_priv = dev->dev_private; 9779 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9780 int ret; 9781 9782 if (WARN_ON(intel_crtc->mmio_flip.seqno)) 9783 return -EBUSY; 9784 9785 ret = intel_postpone_flip(obj); 9786 if (ret < 0) 9787 return ret; 9788 if (ret == 0) { 9789 intel_do_mmio_flip(intel_crtc); 9790 return 0; 9791 } 9792 9793 spin_lock(&dev_priv->mmio_flip_lock); 9794 intel_crtc->mmio_flip.seqno = obj->last_write_seqno; 9795 intel_crtc->mmio_flip.ring_id = obj->ring->id; 9796 spin_unlock(&dev_priv->mmio_flip_lock); 9797 9798 /* 9799 * Double check to catch cases where irq fired before 9800 * mmio flip data was ready 9801 */ 9802 intel_notify_mmio_flip(obj->ring); 9803 return 0; 9804 } 9805 9806 static int intel_default_queue_flip(struct drm_device *dev, 9807 struct drm_crtc *crtc, 9808 struct drm_framebuffer *fb, 9809 struct drm_i915_gem_object *obj, 9810 struct intel_engine_cs *ring, 9811 uint32_t flags) 9812 { 9813 return -ENODEV; 9814 } 9815 9816 static bool __intel_pageflip_stall_check(struct drm_device *dev, 9817 struct drm_crtc *crtc) 9818 { 9819 struct drm_i915_private *dev_priv = dev->dev_private; 9820 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9821 struct intel_unpin_work *work = intel_crtc->unpin_work; 9822 u32 addr; 9823 9824 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) 9825 return true; 9826 9827 if (!work->enable_stall_check) 9828 return false; 9829 9830 if (work->flip_ready_vblank == 0) { 9831 if (work->flip_queued_ring && 9832 !i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true), 9833 work->flip_queued_seqno)) 9834 return false; 9835 9836 work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe); 9837 } 9838 9839 if (drm_vblank_count(dev, intel_crtc->pipe) - work->flip_ready_vblank < 3) 9840 return false; 9841 9842 /* Potential stall - if we see that the flip has happened, 9843 * assume a missed interrupt. */ 9844 if (INTEL_INFO(dev)->gen >= 4) 9845 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 9846 else 9847 addr = I915_READ(DSPADDR(intel_crtc->plane)); 9848 9849 /* There is a potential issue here with a false positive after a flip 9850 * to the same address. We could address this by checking for a 9851 * non-incrementing frame counter. 9852 */ 9853 return addr == work->gtt_offset; 9854 } 9855 9856 void intel_check_page_flip(struct drm_device *dev, int pipe) 9857 { 9858 struct drm_i915_private *dev_priv = dev->dev_private; 9859 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 9860 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9861 9862 if (crtc == NULL) 9863 return; 9864 9865 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9866 if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) { 9867 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 9868 intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe)); 9869 page_flip_completed(intel_crtc); 9870 } 9871 lockmgr(&dev->event_lock, LK_RELEASE); 9872 } 9873 9874 static int intel_crtc_page_flip(struct drm_crtc *crtc, 9875 struct drm_framebuffer *fb, 9876 struct drm_pending_vblank_event *event, 9877 uint32_t page_flip_flags) 9878 { 9879 struct drm_device *dev = crtc->dev; 9880 struct drm_i915_private *dev_priv = dev->dev_private; 9881 struct drm_framebuffer *old_fb = crtc->primary->fb; 9882 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 9883 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9884 enum i915_pipe pipe = intel_crtc->pipe; 9885 struct intel_unpin_work *work; 9886 struct intel_engine_cs *ring; 9887 int ret; 9888 9889 /* 9890 * drm_mode_page_flip_ioctl() should already catch this, but double 9891 * check to be safe. In the future we may enable pageflipping from 9892 * a disabled primary plane. 9893 */ 9894 if (WARN_ON(intel_fb_obj(old_fb) == NULL)) 9895 return -EBUSY; 9896 9897 /* Can't change pixel format via MI display flips. */ 9898 if (fb->pixel_format != crtc->primary->fb->pixel_format) 9899 return -EINVAL; 9900 9901 /* 9902 * TILEOFF/LINOFF registers can't be changed via MI display flips. 9903 * Note that pitch changes could also affect these register. 9904 */ 9905 if (INTEL_INFO(dev)->gen > 3 && 9906 (fb->offsets[0] != crtc->primary->fb->offsets[0] || 9907 fb->pitches[0] != crtc->primary->fb->pitches[0])) 9908 return -EINVAL; 9909 9910 if (i915_terminally_wedged(&dev_priv->gpu_error)) 9911 goto out_hang; 9912 9913 work = kzalloc(sizeof(*work), GFP_KERNEL); 9914 if (work == NULL) 9915 return -ENOMEM; 9916 9917 work->event = event; 9918 work->crtc = crtc; 9919 work->old_fb_obj = intel_fb_obj(old_fb); 9920 INIT_WORK(&work->work, intel_unpin_work_fn); 9921 9922 ret = drm_crtc_vblank_get(crtc); 9923 if (ret) 9924 goto free_work; 9925 9926 /* We borrow the event spin lock for protecting unpin_work */ 9927 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9928 if (intel_crtc->unpin_work) { 9929 /* Before declaring the flip queue wedged, check if 9930 * the hardware completed the operation behind our backs. 9931 */ 9932 if (__intel_pageflip_stall_check(dev, crtc)) { 9933 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 9934 page_flip_completed(intel_crtc); 9935 } else { 9936 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 9937 lockmgr(&dev->event_lock, LK_RELEASE); 9938 9939 drm_crtc_vblank_put(crtc); 9940 kfree(work); 9941 return -EBUSY; 9942 } 9943 } 9944 intel_crtc->unpin_work = work; 9945 lockmgr(&dev->event_lock, LK_RELEASE); 9946 9947 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 9948 flush_workqueue(dev_priv->wq); 9949 9950 ret = i915_mutex_lock_interruptible(dev); 9951 if (ret) 9952 goto cleanup; 9953 9954 /* Reference the objects for the scheduled work. */ 9955 drm_gem_object_reference(&work->old_fb_obj->base); 9956 drm_gem_object_reference(&obj->base); 9957 9958 crtc->primary->fb = fb; 9959 9960 work->pending_flip_obj = obj; 9961 9962 atomic_inc(&intel_crtc->unpin_work_count); 9963 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 9964 9965 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 9966 work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1; 9967 9968 if (IS_VALLEYVIEW(dev)) { 9969 ring = &dev_priv->ring[BCS]; 9970 if (obj->tiling_mode != work->old_fb_obj->tiling_mode) 9971 /* vlv: DISPLAY_FLIP fails to change tiling */ 9972 ring = NULL; 9973 } else if (IS_IVYBRIDGE(dev)) { 9974 ring = &dev_priv->ring[BCS]; 9975 } else if (INTEL_INFO(dev)->gen >= 7) { 9976 ring = obj->ring; 9977 if (ring == NULL || ring->id != RCS) 9978 ring = &dev_priv->ring[BCS]; 9979 } else { 9980 ring = &dev_priv->ring[RCS]; 9981 } 9982 9983 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 9984 if (ret) 9985 goto cleanup_pending; 9986 9987 work->gtt_offset = 9988 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset; 9989 9990 if (use_mmio_flip(ring, obj)) { 9991 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, 9992 page_flip_flags); 9993 if (ret) 9994 goto cleanup_unpin; 9995 9996 work->flip_queued_seqno = obj->last_write_seqno; 9997 work->flip_queued_ring = obj->ring; 9998 } else { 9999 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, 10000 page_flip_flags); 10001 if (ret) 10002 goto cleanup_unpin; 10003 10004 work->flip_queued_seqno = intel_ring_get_seqno(ring); 10005 work->flip_queued_ring = ring; 10006 } 10007 10008 work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe); 10009 work->enable_stall_check = true; 10010 10011 i915_gem_track_fb(work->old_fb_obj, obj, 10012 INTEL_FRONTBUFFER_PRIMARY(pipe)); 10013 10014 intel_disable_fbc(dev); 10015 intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 10016 mutex_unlock(&dev->struct_mutex); 10017 10018 trace_i915_flip_request(intel_crtc->plane, obj); 10019 10020 return 0; 10021 10022 cleanup_unpin: 10023 intel_unpin_fb_obj(obj); 10024 cleanup_pending: 10025 atomic_dec(&intel_crtc->unpin_work_count); 10026 crtc->primary->fb = old_fb; 10027 drm_gem_object_unreference(&work->old_fb_obj->base); 10028 drm_gem_object_unreference(&obj->base); 10029 mutex_unlock(&dev->struct_mutex); 10030 10031 cleanup: 10032 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 10033 intel_crtc->unpin_work = NULL; 10034 lockmgr(&dev->event_lock, LK_RELEASE); 10035 10036 drm_crtc_vblank_put(crtc); 10037 free_work: 10038 kfree(work); 10039 10040 if (ret == -EIO) { 10041 out_hang: 10042 intel_crtc_wait_for_pending_flips(crtc); 10043 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); 10044 if (ret == 0 && event) { 10045 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 10046 drm_send_vblank_event(dev, pipe, event); 10047 lockmgr(&dev->event_lock, LK_RELEASE); 10048 } 10049 } 10050 return ret; 10051 } 10052 10053 static struct drm_crtc_helper_funcs intel_helper_funcs = { 10054 .mode_set_base_atomic = intel_pipe_set_base_atomic, 10055 .load_lut = intel_crtc_load_lut, 10056 }; 10057 10058 /** 10059 * intel_modeset_update_staged_output_state 10060 * 10061 * Updates the staged output configuration state, e.g. after we've read out the 10062 * current hw state. 10063 */ 10064 static void intel_modeset_update_staged_output_state(struct drm_device *dev) 10065 { 10066 struct intel_crtc *crtc; 10067 struct intel_encoder *encoder; 10068 struct intel_connector *connector; 10069 10070 list_for_each_entry(connector, &dev->mode_config.connector_list, 10071 base.head) { 10072 connector->new_encoder = 10073 to_intel_encoder(connector->base.encoder); 10074 } 10075 10076 for_each_intel_encoder(dev, encoder) { 10077 encoder->new_crtc = 10078 to_intel_crtc(encoder->base.crtc); 10079 } 10080 10081 for_each_intel_crtc(dev, crtc) { 10082 crtc->new_enabled = crtc->base.enabled; 10083 10084 if (crtc->new_enabled) 10085 crtc->new_config = &crtc->config; 10086 else 10087 crtc->new_config = NULL; 10088 } 10089 } 10090 10091 /** 10092 * intel_modeset_commit_output_state 10093 * 10094 * This function copies the stage display pipe configuration to the real one. 10095 */ 10096 static void intel_modeset_commit_output_state(struct drm_device *dev) 10097 { 10098 struct intel_crtc *crtc; 10099 struct intel_encoder *encoder; 10100 struct intel_connector *connector; 10101 10102 list_for_each_entry(connector, &dev->mode_config.connector_list, 10103 base.head) { 10104 connector->base.encoder = &connector->new_encoder->base; 10105 } 10106 10107 for_each_intel_encoder(dev, encoder) { 10108 encoder->base.crtc = &encoder->new_crtc->base; 10109 } 10110 10111 for_each_intel_crtc(dev, crtc) { 10112 crtc->base.enabled = crtc->new_enabled; 10113 } 10114 } 10115 10116 static void 10117 connected_sink_compute_bpp(struct intel_connector *connector, 10118 struct intel_crtc_config *pipe_config) 10119 { 10120 int bpp = pipe_config->pipe_bpp; 10121 10122 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 10123 connector->base.base.id, 10124 connector->base.name); 10125 10126 /* Don't use an invalid EDID bpc value */ 10127 if (connector->base.display_info.bpc && 10128 connector->base.display_info.bpc * 3 < bpp) { 10129 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 10130 bpp, connector->base.display_info.bpc*3); 10131 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 10132 } 10133 10134 /* Clamp bpp to 8 on screens without EDID 1.4 */ 10135 if (connector->base.display_info.bpc == 0 && bpp > 24) { 10136 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 10137 bpp); 10138 pipe_config->pipe_bpp = 24; 10139 } 10140 } 10141 10142 static int 10143 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 10144 struct drm_framebuffer *fb, 10145 struct intel_crtc_config *pipe_config) 10146 { 10147 struct drm_device *dev = crtc->base.dev; 10148 struct intel_connector *connector; 10149 int bpp; 10150 10151 switch (fb->pixel_format) { 10152 case DRM_FORMAT_C8: 10153 bpp = 8*3; /* since we go through a colormap */ 10154 break; 10155 case DRM_FORMAT_XRGB1555: 10156 case DRM_FORMAT_ARGB1555: 10157 /* checked in intel_framebuffer_init already */ 10158 if (WARN_ON(INTEL_INFO(dev)->gen > 3)) 10159 return -EINVAL; 10160 case DRM_FORMAT_RGB565: 10161 bpp = 6*3; /* min is 18bpp */ 10162 break; 10163 case DRM_FORMAT_XBGR8888: 10164 case DRM_FORMAT_ABGR8888: 10165 /* checked in intel_framebuffer_init already */ 10166 if (WARN_ON(INTEL_INFO(dev)->gen < 4)) 10167 return -EINVAL; 10168 case DRM_FORMAT_XRGB8888: 10169 case DRM_FORMAT_ARGB8888: 10170 bpp = 8*3; 10171 break; 10172 case DRM_FORMAT_XRGB2101010: 10173 case DRM_FORMAT_ARGB2101010: 10174 case DRM_FORMAT_XBGR2101010: 10175 case DRM_FORMAT_ABGR2101010: 10176 /* checked in intel_framebuffer_init already */ 10177 if (WARN_ON(INTEL_INFO(dev)->gen < 4)) 10178 return -EINVAL; 10179 bpp = 10*3; 10180 break; 10181 /* TODO: gen4+ supports 16 bpc floating point, too. */ 10182 default: 10183 DRM_DEBUG_KMS("unsupported depth\n"); 10184 return -EINVAL; 10185 } 10186 10187 pipe_config->pipe_bpp = bpp; 10188 10189 /* Clamp display bpp to EDID value */ 10190 list_for_each_entry(connector, &dev->mode_config.connector_list, 10191 base.head) { 10192 if (!connector->new_encoder || 10193 connector->new_encoder->new_crtc != crtc) 10194 continue; 10195 10196 connected_sink_compute_bpp(connector, pipe_config); 10197 } 10198 10199 return bpp; 10200 } 10201 10202 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 10203 { 10204 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 10205 "type: 0x%x flags: 0x%x\n", 10206 mode->crtc_clock, 10207 mode->crtc_hdisplay, mode->crtc_hsync_start, 10208 mode->crtc_hsync_end, mode->crtc_htotal, 10209 mode->crtc_vdisplay, mode->crtc_vsync_start, 10210 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); 10211 } 10212 10213 static void intel_dump_pipe_config(struct intel_crtc *crtc, 10214 struct intel_crtc_config *pipe_config, 10215 const char *context) 10216 { 10217 DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id, 10218 context, pipe_name(crtc->pipe)); 10219 10220 DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder)); 10221 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", 10222 pipe_config->pipe_bpp, pipe_config->dither); 10223 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 10224 pipe_config->has_pch_encoder, 10225 pipe_config->fdi_lanes, 10226 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, 10227 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 10228 pipe_config->fdi_m_n.tu); 10229 DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 10230 pipe_config->has_dp_encoder, 10231 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 10232 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 10233 pipe_config->dp_m_n.tu); 10234 10235 DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", 10236 pipe_config->has_dp_encoder, 10237 pipe_config->dp_m2_n2.gmch_m, 10238 pipe_config->dp_m2_n2.gmch_n, 10239 pipe_config->dp_m2_n2.link_m, 10240 pipe_config->dp_m2_n2.link_n, 10241 pipe_config->dp_m2_n2.tu); 10242 10243 DRM_DEBUG_KMS("requested mode:\n"); 10244 drm_mode_debug_printmodeline(&pipe_config->requested_mode); 10245 DRM_DEBUG_KMS("adjusted mode:\n"); 10246 drm_mode_debug_printmodeline(&pipe_config->adjusted_mode); 10247 intel_dump_crtc_timings(&pipe_config->adjusted_mode); 10248 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); 10249 DRM_DEBUG_KMS("pipe src size: %dx%d\n", 10250 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 10251 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 10252 pipe_config->gmch_pfit.control, 10253 pipe_config->gmch_pfit.pgm_ratios, 10254 pipe_config->gmch_pfit.lvds_border_bits); 10255 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 10256 pipe_config->pch_pfit.pos, 10257 pipe_config->pch_pfit.size, 10258 pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); 10259 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 10260 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); 10261 } 10262 10263 static bool encoders_cloneable(const struct intel_encoder *a, 10264 const struct intel_encoder *b) 10265 { 10266 /* masks could be asymmetric, so check both ways */ 10267 return a == b || (a->cloneable & (1 << b->type) && 10268 b->cloneable & (1 << a->type)); 10269 } 10270 10271 static bool check_single_encoder_cloning(struct intel_crtc *crtc, 10272 struct intel_encoder *encoder) 10273 { 10274 struct drm_device *dev = crtc->base.dev; 10275 struct intel_encoder *source_encoder; 10276 10277 for_each_intel_encoder(dev, source_encoder) { 10278 if (source_encoder->new_crtc != crtc) 10279 continue; 10280 10281 if (!encoders_cloneable(encoder, source_encoder)) 10282 return false; 10283 } 10284 10285 return true; 10286 } 10287 10288 static bool check_encoder_cloning(struct intel_crtc *crtc) 10289 { 10290 struct drm_device *dev = crtc->base.dev; 10291 struct intel_encoder *encoder; 10292 10293 for_each_intel_encoder(dev, encoder) { 10294 if (encoder->new_crtc != crtc) 10295 continue; 10296 10297 if (!check_single_encoder_cloning(crtc, encoder)) 10298 return false; 10299 } 10300 10301 return true; 10302 } 10303 10304 static struct intel_crtc_config * 10305 intel_modeset_pipe_config(struct drm_crtc *crtc, 10306 struct drm_framebuffer *fb, 10307 struct drm_display_mode *mode) 10308 { 10309 struct drm_device *dev = crtc->dev; 10310 struct intel_encoder *encoder; 10311 struct intel_crtc_config *pipe_config; 10312 int plane_bpp, ret = -EINVAL; 10313 bool retry = true; 10314 10315 if (!check_encoder_cloning(to_intel_crtc(crtc))) { 10316 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 10317 return ERR_PTR(-EINVAL); 10318 } 10319 10320 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 10321 if (!pipe_config) 10322 return ERR_PTR(-ENOMEM); 10323 10324 drm_mode_copy(&pipe_config->adjusted_mode, mode); 10325 drm_mode_copy(&pipe_config->requested_mode, mode); 10326 10327 pipe_config->cpu_transcoder = 10328 (enum transcoder) to_intel_crtc(crtc)->pipe; 10329 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 10330 10331 /* 10332 * Sanitize sync polarity flags based on requested ones. If neither 10333 * positive or negative polarity is requested, treat this as meaning 10334 * negative polarity. 10335 */ 10336 if (!(pipe_config->adjusted_mode.flags & 10337 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 10338 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 10339 10340 if (!(pipe_config->adjusted_mode.flags & 10341 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 10342 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 10343 10344 /* Compute a starting value for pipe_config->pipe_bpp taking the source 10345 * plane pixel format and any sink constraints into account. Returns the 10346 * source plane bpp so that dithering can be selected on mismatches 10347 * after encoders and crtc also have had their say. */ 10348 plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 10349 fb, pipe_config); 10350 if (plane_bpp < 0) 10351 goto fail; 10352 10353 /* 10354 * Determine the real pipe dimensions. Note that stereo modes can 10355 * increase the actual pipe size due to the frame doubling and 10356 * insertion of additional space for blanks between the frame. This 10357 * is stored in the crtc timings. We use the requested mode to do this 10358 * computation to clearly distinguish it from the adjusted mode, which 10359 * can be changed by the connectors in the below retry loop. 10360 */ 10361 drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE); 10362 pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay; 10363 pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay; 10364 10365 encoder_retry: 10366 /* Ensure the port clock defaults are reset when retrying. */ 10367 pipe_config->port_clock = 0; 10368 pipe_config->pixel_multiplier = 1; 10369 10370 /* Fill in default crtc timings, allow encoders to overwrite them. */ 10371 drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE); 10372 10373 /* Pass our mode to the connectors and the CRTC to give them a chance to 10374 * adjust it according to limitations or connector properties, and also 10375 * a chance to reject the mode entirely. 10376 */ 10377 for_each_intel_encoder(dev, encoder) { 10378 10379 if (&encoder->new_crtc->base != crtc) 10380 continue; 10381 10382 if (!(encoder->compute_config(encoder, pipe_config))) { 10383 DRM_DEBUG_KMS("Encoder config failure\n"); 10384 goto fail; 10385 } 10386 } 10387 10388 /* Set default port clock if not overwritten by the encoder. Needs to be 10389 * done afterwards in case the encoder adjusts the mode. */ 10390 if (!pipe_config->port_clock) 10391 pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock 10392 * pipe_config->pixel_multiplier; 10393 10394 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 10395 if (ret < 0) { 10396 DRM_DEBUG_KMS("CRTC fixup failed\n"); 10397 goto fail; 10398 } 10399 10400 if (ret == RETRY) { 10401 if (WARN(!retry, "loop in pipe configuration computation\n")) { 10402 ret = -EINVAL; 10403 goto fail; 10404 } 10405 10406 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 10407 retry = false; 10408 goto encoder_retry; 10409 } 10410 10411 pipe_config->dither = pipe_config->pipe_bpp != plane_bpp; 10412 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", 10413 plane_bpp, pipe_config->pipe_bpp, pipe_config->dither); 10414 10415 return pipe_config; 10416 fail: 10417 kfree(pipe_config); 10418 return ERR_PTR(ret); 10419 } 10420 10421 /* Computes which crtcs are affected and sets the relevant bits in the mask. For 10422 * simplicity we use the crtc's pipe number (because it's easier to obtain). */ 10423 static void 10424 intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, 10425 unsigned *prepare_pipes, unsigned *disable_pipes) 10426 { 10427 struct intel_crtc *intel_crtc; 10428 struct drm_device *dev = crtc->dev; 10429 struct intel_encoder *encoder; 10430 struct intel_connector *connector; 10431 struct drm_crtc *tmp_crtc; 10432 10433 *disable_pipes = *modeset_pipes = *prepare_pipes = 0; 10434 10435 /* Check which crtcs have changed outputs connected to them, these need 10436 * to be part of the prepare_pipes mask. We don't (yet) support global 10437 * modeset across multiple crtcs, so modeset_pipes will only have one 10438 * bit set at most. */ 10439 list_for_each_entry(connector, &dev->mode_config.connector_list, 10440 base.head) { 10441 if (connector->base.encoder == &connector->new_encoder->base) 10442 continue; 10443 10444 if (connector->base.encoder) { 10445 tmp_crtc = connector->base.encoder->crtc; 10446 10447 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; 10448 } 10449 10450 if (connector->new_encoder) 10451 *prepare_pipes |= 10452 1 << connector->new_encoder->new_crtc->pipe; 10453 } 10454 10455 for_each_intel_encoder(dev, encoder) { 10456 if (encoder->base.crtc == &encoder->new_crtc->base) 10457 continue; 10458 10459 if (encoder->base.crtc) { 10460 tmp_crtc = encoder->base.crtc; 10461 10462 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; 10463 } 10464 10465 if (encoder->new_crtc) 10466 *prepare_pipes |= 1 << encoder->new_crtc->pipe; 10467 } 10468 10469 /* Check for pipes that will be enabled/disabled ... */ 10470 for_each_intel_crtc(dev, intel_crtc) { 10471 if (intel_crtc->base.enabled == intel_crtc->new_enabled) 10472 continue; 10473 10474 if (!intel_crtc->new_enabled) 10475 *disable_pipes |= 1 << intel_crtc->pipe; 10476 else 10477 *prepare_pipes |= 1 << intel_crtc->pipe; 10478 } 10479 10480 10481 /* set_mode is also used to update properties on life display pipes. */ 10482 intel_crtc = to_intel_crtc(crtc); 10483 if (intel_crtc->new_enabled) 10484 *prepare_pipes |= 1 << intel_crtc->pipe; 10485 10486 /* 10487 * For simplicity do a full modeset on any pipe where the output routing 10488 * changed. We could be more clever, but that would require us to be 10489 * more careful with calling the relevant encoder->mode_set functions. 10490 */ 10491 if (*prepare_pipes) 10492 *modeset_pipes = *prepare_pipes; 10493 10494 /* ... and mask these out. */ 10495 *modeset_pipes &= ~(*disable_pipes); 10496 *prepare_pipes &= ~(*disable_pipes); 10497 10498 /* 10499 * HACK: We don't (yet) fully support global modesets. intel_set_config 10500 * obies this rule, but the modeset restore mode of 10501 * intel_modeset_setup_hw_state does not. 10502 */ 10503 *modeset_pipes &= 1 << intel_crtc->pipe; 10504 *prepare_pipes &= 1 << intel_crtc->pipe; 10505 10506 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n", 10507 *modeset_pipes, *prepare_pipes, *disable_pipes); 10508 } 10509 10510 static bool intel_crtc_in_use(struct drm_crtc *crtc) 10511 { 10512 struct drm_encoder *encoder; 10513 struct drm_device *dev = crtc->dev; 10514 10515 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 10516 if (encoder->crtc == crtc) 10517 return true; 10518 10519 return false; 10520 } 10521 10522 static void 10523 intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) 10524 { 10525 struct intel_encoder *intel_encoder; 10526 struct intel_crtc *intel_crtc; 10527 struct drm_connector *connector; 10528 10529 for_each_intel_encoder(dev, intel_encoder) { 10530 if (!intel_encoder->base.crtc) 10531 continue; 10532 10533 intel_crtc = to_intel_crtc(intel_encoder->base.crtc); 10534 10535 if (prepare_pipes & (1 << intel_crtc->pipe)) 10536 intel_encoder->connectors_active = false; 10537 } 10538 10539 intel_modeset_commit_output_state(dev); 10540 10541 /* Double check state. */ 10542 for_each_intel_crtc(dev, intel_crtc) { 10543 WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base)); 10544 WARN_ON(intel_crtc->new_config && 10545 intel_crtc->new_config != &intel_crtc->config); 10546 WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config); 10547 } 10548 10549 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 10550 if (!connector->encoder || !connector->encoder->crtc) 10551 continue; 10552 10553 intel_crtc = to_intel_crtc(connector->encoder->crtc); 10554 10555 if (prepare_pipes & (1 << intel_crtc->pipe)) { 10556 struct drm_property *dpms_property = 10557 dev->mode_config.dpms_property; 10558 10559 connector->dpms = DRM_MODE_DPMS_ON; 10560 drm_object_property_set_value(&connector->base, 10561 dpms_property, 10562 DRM_MODE_DPMS_ON); 10563 10564 intel_encoder = to_intel_encoder(connector->encoder); 10565 intel_encoder->connectors_active = true; 10566 } 10567 } 10568 10569 } 10570 10571 static bool intel_fuzzy_clock_check(int clock1, int clock2) 10572 { 10573 int diff; 10574 10575 if (clock1 == clock2) 10576 return true; 10577 10578 if (!clock1 || !clock2) 10579 return false; 10580 10581 diff = abs(clock1 - clock2); 10582 10583 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 10584 return true; 10585 10586 return false; 10587 } 10588 10589 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ 10590 list_for_each_entry((intel_crtc), \ 10591 &(dev)->mode_config.crtc_list, \ 10592 base.head) \ 10593 if (mask & (1 <<(intel_crtc)->pipe)) 10594 10595 static bool 10596 intel_pipe_config_compare(struct drm_device *dev, 10597 struct intel_crtc_config *current_config, 10598 struct intel_crtc_config *pipe_config) 10599 { 10600 #define PIPE_CONF_CHECK_X(name) \ 10601 if (current_config->name != pipe_config->name) { \ 10602 DRM_ERROR("mismatch in " #name " " \ 10603 "(expected 0x%08x, found 0x%08x)\n", \ 10604 current_config->name, \ 10605 pipe_config->name); \ 10606 return false; \ 10607 } 10608 10609 #define PIPE_CONF_CHECK_I(name) \ 10610 if (current_config->name != pipe_config->name) { \ 10611 DRM_ERROR("mismatch in " #name " " \ 10612 "(expected %i, found %i)\n", \ 10613 current_config->name, \ 10614 pipe_config->name); \ 10615 return false; \ 10616 } 10617 10618 /* This is required for BDW+ where there is only one set of registers for 10619 * switching between high and low RR. 10620 * This macro can be used whenever a comparison has to be made between one 10621 * hw state and multiple sw state variables. 10622 */ 10623 #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \ 10624 if ((current_config->name != pipe_config->name) && \ 10625 (current_config->alt_name != pipe_config->name)) { \ 10626 DRM_ERROR("mismatch in " #name " " \ 10627 "(expected %i or %i, found %i)\n", \ 10628 current_config->name, \ 10629 current_config->alt_name, \ 10630 pipe_config->name); \ 10631 return false; \ 10632 } 10633 10634 #define PIPE_CONF_CHECK_FLAGS(name, mask) \ 10635 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 10636 DRM_ERROR("mismatch in " #name "(" #mask ") " \ 10637 "(expected %i, found %i)\n", \ 10638 current_config->name & (mask), \ 10639 pipe_config->name & (mask)); \ 10640 return false; \ 10641 } 10642 10643 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ 10644 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 10645 DRM_ERROR("mismatch in " #name " " \ 10646 "(expected %i, found %i)\n", \ 10647 current_config->name, \ 10648 pipe_config->name); \ 10649 return false; \ 10650 } 10651 10652 #define PIPE_CONF_QUIRK(quirk) \ 10653 ((current_config->quirks | pipe_config->quirks) & (quirk)) 10654 10655 PIPE_CONF_CHECK_I(cpu_transcoder); 10656 10657 PIPE_CONF_CHECK_I(has_pch_encoder); 10658 PIPE_CONF_CHECK_I(fdi_lanes); 10659 PIPE_CONF_CHECK_I(fdi_m_n.gmch_m); 10660 PIPE_CONF_CHECK_I(fdi_m_n.gmch_n); 10661 PIPE_CONF_CHECK_I(fdi_m_n.link_m); 10662 PIPE_CONF_CHECK_I(fdi_m_n.link_n); 10663 PIPE_CONF_CHECK_I(fdi_m_n.tu); 10664 10665 PIPE_CONF_CHECK_I(has_dp_encoder); 10666 10667 if (INTEL_INFO(dev)->gen < 8) { 10668 PIPE_CONF_CHECK_I(dp_m_n.gmch_m); 10669 PIPE_CONF_CHECK_I(dp_m_n.gmch_n); 10670 PIPE_CONF_CHECK_I(dp_m_n.link_m); 10671 PIPE_CONF_CHECK_I(dp_m_n.link_n); 10672 PIPE_CONF_CHECK_I(dp_m_n.tu); 10673 10674 if (current_config->has_drrs) { 10675 PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m); 10676 PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n); 10677 PIPE_CONF_CHECK_I(dp_m2_n2.link_m); 10678 PIPE_CONF_CHECK_I(dp_m2_n2.link_n); 10679 PIPE_CONF_CHECK_I(dp_m2_n2.tu); 10680 } 10681 } else { 10682 PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m); 10683 PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n); 10684 PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m); 10685 PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n); 10686 PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu); 10687 } 10688 10689 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay); 10690 PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal); 10691 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start); 10692 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end); 10693 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start); 10694 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end); 10695 10696 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay); 10697 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal); 10698 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start); 10699 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end); 10700 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start); 10701 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end); 10702 10703 PIPE_CONF_CHECK_I(pixel_multiplier); 10704 PIPE_CONF_CHECK_I(has_hdmi_sink); 10705 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || 10706 IS_VALLEYVIEW(dev)) 10707 PIPE_CONF_CHECK_I(limited_color_range); 10708 10709 PIPE_CONF_CHECK_I(has_audio); 10710 10711 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 10712 DRM_MODE_FLAG_INTERLACE); 10713 10714 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 10715 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 10716 DRM_MODE_FLAG_PHSYNC); 10717 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 10718 DRM_MODE_FLAG_NHSYNC); 10719 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 10720 DRM_MODE_FLAG_PVSYNC); 10721 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, 10722 DRM_MODE_FLAG_NVSYNC); 10723 } 10724 10725 PIPE_CONF_CHECK_I(pipe_src_w); 10726 PIPE_CONF_CHECK_I(pipe_src_h); 10727 10728 /* 10729 * FIXME: BIOS likes to set up a cloned config with lvds+external 10730 * screen. Since we don't yet re-compute the pipe config when moving 10731 * just the lvds port away to another pipe the sw tracking won't match. 10732 * 10733 * Proper atomic modesets with recomputed global state will fix this. 10734 * Until then just don't check gmch state for inherited modes. 10735 */ 10736 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) { 10737 PIPE_CONF_CHECK_I(gmch_pfit.control); 10738 /* pfit ratios are autocomputed by the hw on gen4+ */ 10739 if (INTEL_INFO(dev)->gen < 4) 10740 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 10741 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); 10742 } 10743 10744 PIPE_CONF_CHECK_I(pch_pfit.enabled); 10745 if (current_config->pch_pfit.enabled) { 10746 PIPE_CONF_CHECK_I(pch_pfit.pos); 10747 PIPE_CONF_CHECK_I(pch_pfit.size); 10748 } 10749 10750 /* BDW+ don't expose a synchronous way to read the state */ 10751 if (IS_HASWELL(dev)) 10752 PIPE_CONF_CHECK_I(ips_enabled); 10753 10754 PIPE_CONF_CHECK_I(double_wide); 10755 10756 PIPE_CONF_CHECK_X(ddi_pll_sel); 10757 10758 PIPE_CONF_CHECK_I(shared_dpll); 10759 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 10760 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 10761 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 10762 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 10763 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 10764 10765 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 10766 PIPE_CONF_CHECK_I(pipe_bpp); 10767 10768 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); 10769 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 10770 10771 #undef PIPE_CONF_CHECK_X 10772 #undef PIPE_CONF_CHECK_I 10773 #undef PIPE_CONF_CHECK_I_ALT 10774 #undef PIPE_CONF_CHECK_FLAGS 10775 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 10776 #undef PIPE_CONF_QUIRK 10777 10778 return true; 10779 } 10780 10781 static void 10782 check_connector_state(struct drm_device *dev) 10783 { 10784 struct intel_connector *connector; 10785 10786 list_for_each_entry(connector, &dev->mode_config.connector_list, 10787 base.head) { 10788 /* This also checks the encoder/connector hw state with the 10789 * ->get_hw_state callbacks. */ 10790 intel_connector_check_state(connector); 10791 10792 WARN(&connector->new_encoder->base != connector->base.encoder, 10793 "connector's staged encoder doesn't match current encoder\n"); 10794 } 10795 } 10796 10797 static void 10798 check_encoder_state(struct drm_device *dev) 10799 { 10800 struct intel_encoder *encoder; 10801 struct intel_connector *connector; 10802 10803 for_each_intel_encoder(dev, encoder) { 10804 bool enabled = false; 10805 bool active = false; 10806 enum i915_pipe pipe, tracked_pipe; 10807 10808 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 10809 encoder->base.base.id, 10810 encoder->base.name); 10811 10812 WARN(&encoder->new_crtc->base != encoder->base.crtc, 10813 "encoder's stage crtc doesn't match current crtc\n"); 10814 WARN(encoder->connectors_active && !encoder->base.crtc, 10815 "encoder's active_connectors set, but no crtc\n"); 10816 10817 list_for_each_entry(connector, &dev->mode_config.connector_list, 10818 base.head) { 10819 if (connector->base.encoder != &encoder->base) 10820 continue; 10821 enabled = true; 10822 if (connector->base.dpms != DRM_MODE_DPMS_OFF) 10823 active = true; 10824 } 10825 WARN(!!encoder->base.crtc != enabled, 10826 "encoder's enabled state mismatch " 10827 "(expected %i, found %i)\n", 10828 !!encoder->base.crtc, enabled); 10829 WARN(active && !encoder->base.crtc, 10830 "active encoder with no crtc\n"); 10831 10832 WARN(encoder->connectors_active != active, 10833 "encoder's computed active state doesn't match tracked active state " 10834 "(expected %i, found %i)\n", active, encoder->connectors_active); 10835 10836 active = encoder->get_hw_state(encoder, &pipe); 10837 WARN(active != encoder->connectors_active, 10838 "encoder's hw state doesn't match sw tracking " 10839 "(expected %i, found %i)\n", 10840 encoder->connectors_active, active); 10841 10842 if (!encoder->base.crtc) 10843 continue; 10844 10845 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; 10846 WARN(active && pipe != tracked_pipe, 10847 "active encoder's pipe doesn't match" 10848 "(expected %i, found %i)\n", 10849 tracked_pipe, pipe); 10850 10851 } 10852 } 10853 10854 static void 10855 check_crtc_state(struct drm_device *dev) 10856 { 10857 struct drm_i915_private *dev_priv = dev->dev_private; 10858 struct intel_crtc *crtc; 10859 struct intel_encoder *encoder; 10860 struct intel_crtc_config pipe_config; 10861 10862 for_each_intel_crtc(dev, crtc) { 10863 bool enabled = false; 10864 bool active = false; 10865 10866 memset(&pipe_config, 0, sizeof(pipe_config)); 10867 10868 DRM_DEBUG_KMS("[CRTC:%d]\n", 10869 crtc->base.base.id); 10870 10871 WARN(crtc->active && !crtc->base.enabled, 10872 "active crtc, but not enabled in sw tracking\n"); 10873 10874 for_each_intel_encoder(dev, encoder) { 10875 if (encoder->base.crtc != &crtc->base) 10876 continue; 10877 enabled = true; 10878 if (encoder->connectors_active) 10879 active = true; 10880 } 10881 10882 WARN(active != crtc->active, 10883 "crtc's computed active state doesn't match tracked active state " 10884 "(expected %i, found %i)\n", active, crtc->active); 10885 WARN(enabled != crtc->base.enabled, 10886 "crtc's computed enabled state doesn't match tracked enabled state " 10887 "(expected %i, found %i)\n", enabled, crtc->base.enabled); 10888 10889 active = dev_priv->display.get_pipe_config(crtc, 10890 &pipe_config); 10891 10892 /* hw state is inconsistent with the pipe quirk */ 10893 if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 10894 (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 10895 active = crtc->active; 10896 10897 for_each_intel_encoder(dev, encoder) { 10898 enum i915_pipe pipe; 10899 if (encoder->base.crtc != &crtc->base) 10900 continue; 10901 if (encoder->get_hw_state(encoder, &pipe)) 10902 encoder->get_config(encoder, &pipe_config); 10903 } 10904 10905 WARN(crtc->active != active, 10906 "crtc active state doesn't match with hw state " 10907 "(expected %i, found %i)\n", crtc->active, active); 10908 10909 if (active && 10910 !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) { 10911 WARN(1, "pipe state doesn't match!\n"); 10912 intel_dump_pipe_config(crtc, &pipe_config, 10913 "[hw state]"); 10914 intel_dump_pipe_config(crtc, &crtc->config, 10915 "[sw state]"); 10916 } 10917 } 10918 } 10919 10920 static void 10921 check_shared_dpll_state(struct drm_device *dev) 10922 { 10923 struct drm_i915_private *dev_priv = dev->dev_private; 10924 struct intel_crtc *crtc; 10925 struct intel_dpll_hw_state dpll_hw_state; 10926 int i; 10927 10928 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 10929 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 10930 int enabled_crtcs = 0, active_crtcs = 0; 10931 bool active; 10932 10933 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 10934 10935 DRM_DEBUG_KMS("%s\n", pll->name); 10936 10937 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); 10938 10939 WARN(pll->active > pll->refcount, 10940 "more active pll users than references: %i vs %i\n", 10941 pll->active, pll->refcount); 10942 WARN(pll->active && !pll->on, 10943 "pll in active use but not on in sw tracking\n"); 10944 WARN(pll->on && !pll->active, 10945 "pll in on but not on in use in sw tracking\n"); 10946 WARN(pll->on != active, 10947 "pll on state mismatch (expected %i, found %i)\n", 10948 pll->on, active); 10949 10950 for_each_intel_crtc(dev, crtc) { 10951 if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll) 10952 enabled_crtcs++; 10953 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 10954 active_crtcs++; 10955 } 10956 WARN(pll->active != active_crtcs, 10957 "pll active crtcs mismatch (expected %i, found %i)\n", 10958 pll->active, active_crtcs); 10959 WARN(pll->refcount != enabled_crtcs, 10960 "pll enabled crtcs mismatch (expected %i, found %i)\n", 10961 pll->refcount, enabled_crtcs); 10962 10963 WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state, 10964 sizeof(dpll_hw_state)), 10965 "pll hw state mismatch\n"); 10966 } 10967 } 10968 10969 void 10970 intel_modeset_check_state(struct drm_device *dev) 10971 { 10972 check_connector_state(dev); 10973 check_encoder_state(dev); 10974 check_crtc_state(dev); 10975 check_shared_dpll_state(dev); 10976 } 10977 10978 void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, 10979 int dotclock) 10980 { 10981 /* 10982 * FDI already provided one idea for the dotclock. 10983 * Yell if the encoder disagrees. 10984 */ 10985 WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock), 10986 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 10987 pipe_config->adjusted_mode.crtc_clock, dotclock); 10988 } 10989 10990 static void update_scanline_offset(struct intel_crtc *crtc) 10991 { 10992 struct drm_device *dev = crtc->base.dev; 10993 10994 /* 10995 * The scanline counter increments at the leading edge of hsync. 10996 * 10997 * On most platforms it starts counting from vtotal-1 on the 10998 * first active line. That means the scanline counter value is 10999 * always one less than what we would expect. Ie. just after 11000 * start of vblank, which also occurs at start of hsync (on the 11001 * last active line), the scanline counter will read vblank_start-1. 11002 * 11003 * On gen2 the scanline counter starts counting from 1 instead 11004 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 11005 * to keep the value positive), instead of adding one. 11006 * 11007 * On HSW+ the behaviour of the scanline counter depends on the output 11008 * type. For DP ports it behaves like most other platforms, but on HDMI 11009 * there's an extra 1 line difference. So we need to add two instead of 11010 * one to the value. 11011 */ 11012 if (IS_GEN2(dev)) { 11013 const struct drm_display_mode *mode = &crtc->config.adjusted_mode; 11014 int vtotal; 11015 11016 vtotal = mode->crtc_vtotal; 11017 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 11018 vtotal /= 2; 11019 11020 crtc->scanline_offset = vtotal - 1; 11021 } else if (HAS_DDI(dev) && 11022 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) { 11023 crtc->scanline_offset = 2; 11024 } else 11025 crtc->scanline_offset = 1; 11026 } 11027 11028 static int __intel_set_mode(struct drm_crtc *crtc, 11029 struct drm_display_mode *mode, 11030 int x, int y, struct drm_framebuffer *fb) 11031 { 11032 struct drm_device *dev = crtc->dev; 11033 struct drm_i915_private *dev_priv = dev->dev_private; 11034 struct drm_display_mode *saved_mode; 11035 struct intel_crtc_config *pipe_config = NULL; 11036 struct intel_crtc *intel_crtc; 11037 unsigned disable_pipes, prepare_pipes, modeset_pipes; 11038 int ret = 0; 11039 11040 saved_mode = kmalloc(sizeof(*saved_mode), M_DRM, M_WAITOK); 11041 if (!saved_mode) 11042 return -ENOMEM; 11043 11044 intel_modeset_affected_pipes(crtc, &modeset_pipes, 11045 &prepare_pipes, &disable_pipes); 11046 11047 *saved_mode = crtc->mode; 11048 11049 /* Hack: Because we don't (yet) support global modeset on multiple 11050 * crtcs, we don't keep track of the new mode for more than one crtc. 11051 * Hence simply check whether any bit is set in modeset_pipes in all the 11052 * pieces of code that are not yet converted to deal with mutliple crtcs 11053 * changing their mode at the same time. */ 11054 if (modeset_pipes) { 11055 pipe_config = intel_modeset_pipe_config(crtc, fb, mode); 11056 if (IS_ERR(pipe_config)) { 11057 ret = PTR_ERR(pipe_config); 11058 pipe_config = NULL; 11059 11060 goto out; 11061 } 11062 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 11063 "[modeset]"); 11064 to_intel_crtc(crtc)->new_config = pipe_config; 11065 } 11066 11067 /* 11068 * See if the config requires any additional preparation, e.g. 11069 * to adjust global state with pipes off. We need to do this 11070 * here so we can get the modeset_pipe updated config for the new 11071 * mode set on this crtc. For other crtcs we need to use the 11072 * adjusted_mode bits in the crtc directly. 11073 */ 11074 if (IS_VALLEYVIEW(dev)) { 11075 valleyview_modeset_global_pipes(dev, &prepare_pipes); 11076 11077 /* may have added more to prepare_pipes than we should */ 11078 prepare_pipes &= ~disable_pipes; 11079 } 11080 11081 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) 11082 intel_crtc_disable(&intel_crtc->base); 11083 11084 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { 11085 if (intel_crtc->base.enabled) 11086 dev_priv->display.crtc_disable(&intel_crtc->base); 11087 } 11088 11089 /* crtc->mode is already used by the ->mode_set callbacks, hence we need 11090 * to set it here already despite that we pass it down the callchain. 11091 */ 11092 if (modeset_pipes) { 11093 crtc->mode = *mode; 11094 /* mode_set/enable/disable functions rely on a correct pipe 11095 * config. */ 11096 to_intel_crtc(crtc)->config = *pipe_config; 11097 to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config; 11098 11099 /* 11100 * Calculate and store various constants which 11101 * are later needed by vblank and swap-completion 11102 * timestamping. They are derived from true hwmode. 11103 */ 11104 drm_calc_timestamping_constants(crtc, 11105 &pipe_config->adjusted_mode); 11106 } 11107 11108 /* Only after disabling all output pipelines that will be changed can we 11109 * update the the output configuration. */ 11110 intel_modeset_update_state(dev, prepare_pipes); 11111 11112 if (dev_priv->display.modeset_global_resources) 11113 dev_priv->display.modeset_global_resources(dev); 11114 11115 /* Set up the DPLL and any encoders state that needs to adjust or depend 11116 * on the DPLL. 11117 */ 11118 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 11119 struct drm_framebuffer *old_fb = crtc->primary->fb; 11120 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb); 11121 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11122 11123 mutex_lock(&dev->struct_mutex); 11124 ret = intel_pin_and_fence_fb_obj(dev, 11125 obj, 11126 NULL); 11127 if (ret != 0) { 11128 DRM_ERROR("pin & fence failed\n"); 11129 mutex_unlock(&dev->struct_mutex); 11130 goto done; 11131 } 11132 if (old_fb) 11133 intel_unpin_fb_obj(old_obj); 11134 i915_gem_track_fb(old_obj, obj, 11135 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 11136 mutex_unlock(&dev->struct_mutex); 11137 11138 crtc->primary->fb = fb; 11139 crtc->x = x; 11140 crtc->y = y; 11141 11142 ret = dev_priv->display.crtc_mode_set(&intel_crtc->base, 11143 x, y, fb); 11144 if (ret) 11145 goto done; 11146 } 11147 11148 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 11149 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { 11150 update_scanline_offset(intel_crtc); 11151 11152 dev_priv->display.crtc_enable(&intel_crtc->base); 11153 } 11154 11155 /* FIXME: add subpixel order */ 11156 done: 11157 if (ret && crtc->enabled) 11158 crtc->mode = *saved_mode; 11159 11160 out: 11161 kfree(pipe_config); 11162 kfree(saved_mode); 11163 return ret; 11164 } 11165 11166 static int intel_set_mode(struct drm_crtc *crtc, 11167 struct drm_display_mode *mode, 11168 int x, int y, struct drm_framebuffer *fb) 11169 { 11170 int ret; 11171 11172 ret = __intel_set_mode(crtc, mode, x, y, fb); 11173 11174 if (ret == 0) 11175 intel_modeset_check_state(crtc->dev); 11176 11177 return ret; 11178 } 11179 11180 void intel_crtc_restore_mode(struct drm_crtc *crtc) 11181 { 11182 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); 11183 } 11184 11185 #undef for_each_intel_crtc_masked 11186 11187 static void intel_set_config_free(struct intel_set_config *config) 11188 { 11189 if (!config) 11190 return; 11191 11192 kfree(config->save_connector_encoders); 11193 kfree(config->save_encoder_crtcs); 11194 kfree(config->save_crtc_enabled); 11195 kfree(config); 11196 } 11197 11198 static int intel_set_config_save_state(struct drm_device *dev, 11199 struct intel_set_config *config) 11200 { 11201 struct drm_crtc *crtc; 11202 struct drm_encoder *encoder; 11203 struct drm_connector *connector; 11204 int count; 11205 11206 config->save_crtc_enabled = 11207 kcalloc(dev->mode_config.num_crtc, 11208 sizeof(bool), GFP_KERNEL); 11209 if (!config->save_crtc_enabled) 11210 return -ENOMEM; 11211 11212 config->save_encoder_crtcs = 11213 kcalloc(dev->mode_config.num_encoder, 11214 sizeof(struct drm_crtc *), GFP_KERNEL); 11215 if (!config->save_encoder_crtcs) 11216 return -ENOMEM; 11217 11218 config->save_connector_encoders = 11219 kcalloc(dev->mode_config.num_connector, 11220 sizeof(struct drm_encoder *), GFP_KERNEL); 11221 if (!config->save_connector_encoders) 11222 return -ENOMEM; 11223 11224 /* Copy data. Note that driver private data is not affected. 11225 * Should anything bad happen only the expected state is 11226 * restored, not the drivers personal bookkeeping. 11227 */ 11228 count = 0; 11229 for_each_crtc(dev, crtc) { 11230 config->save_crtc_enabled[count++] = crtc->enabled; 11231 } 11232 11233 count = 0; 11234 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 11235 config->save_encoder_crtcs[count++] = encoder->crtc; 11236 } 11237 11238 count = 0; 11239 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 11240 config->save_connector_encoders[count++] = connector->encoder; 11241 } 11242 11243 return 0; 11244 } 11245 11246 static void intel_set_config_restore_state(struct drm_device *dev, 11247 struct intel_set_config *config) 11248 { 11249 struct intel_crtc *crtc; 11250 struct intel_encoder *encoder; 11251 struct intel_connector *connector; 11252 int count; 11253 11254 count = 0; 11255 for_each_intel_crtc(dev, crtc) { 11256 crtc->new_enabled = config->save_crtc_enabled[count++]; 11257 11258 if (crtc->new_enabled) 11259 crtc->new_config = &crtc->config; 11260 else 11261 crtc->new_config = NULL; 11262 } 11263 11264 count = 0; 11265 for_each_intel_encoder(dev, encoder) { 11266 encoder->new_crtc = 11267 to_intel_crtc(config->save_encoder_crtcs[count++]); 11268 } 11269 11270 count = 0; 11271 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { 11272 connector->new_encoder = 11273 to_intel_encoder(config->save_connector_encoders[count++]); 11274 } 11275 } 11276 11277 static bool 11278 is_crtc_connector_off(struct drm_mode_set *set) 11279 { 11280 int i; 11281 11282 if (set->num_connectors == 0) 11283 return false; 11284 11285 if (WARN_ON(set->connectors == NULL)) 11286 return false; 11287 11288 for (i = 0; i < set->num_connectors; i++) 11289 if (set->connectors[i]->encoder && 11290 set->connectors[i]->encoder->crtc == set->crtc && 11291 set->connectors[i]->dpms != DRM_MODE_DPMS_ON) 11292 return true; 11293 11294 return false; 11295 } 11296 11297 static void 11298 intel_set_config_compute_mode_changes(struct drm_mode_set *set, 11299 struct intel_set_config *config) 11300 { 11301 11302 /* We should be able to check here if the fb has the same properties 11303 * and then just flip_or_move it */ 11304 if (is_crtc_connector_off(set)) { 11305 config->mode_changed = true; 11306 } else if (set->crtc->primary->fb != set->fb) { 11307 /* 11308 * If we have no fb, we can only flip as long as the crtc is 11309 * active, otherwise we need a full mode set. The crtc may 11310 * be active if we've only disabled the primary plane, or 11311 * in fastboot situations. 11312 */ 11313 if (set->crtc->primary->fb == NULL) { 11314 struct intel_crtc *intel_crtc = 11315 to_intel_crtc(set->crtc); 11316 11317 if (intel_crtc->active) { 11318 DRM_DEBUG_KMS("crtc has no fb, will flip\n"); 11319 config->fb_changed = true; 11320 } else { 11321 DRM_DEBUG_KMS("inactive crtc, full mode set\n"); 11322 config->mode_changed = true; 11323 } 11324 } else if (set->fb == NULL) { 11325 config->mode_changed = true; 11326 } else if (set->fb->pixel_format != 11327 set->crtc->primary->fb->pixel_format) { 11328 config->mode_changed = true; 11329 } else { 11330 config->fb_changed = true; 11331 } 11332 } 11333 11334 if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y)) 11335 config->fb_changed = true; 11336 11337 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { 11338 DRM_DEBUG_KMS("modes are different, full mode set\n"); 11339 drm_mode_debug_printmodeline(&set->crtc->mode); 11340 drm_mode_debug_printmodeline(set->mode); 11341 config->mode_changed = true; 11342 } 11343 11344 DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n", 11345 set->crtc->base.id, config->mode_changed, config->fb_changed); 11346 } 11347 11348 static int 11349 intel_modeset_stage_output_state(struct drm_device *dev, 11350 struct drm_mode_set *set, 11351 struct intel_set_config *config) 11352 { 11353 struct intel_connector *connector; 11354 struct intel_encoder *encoder; 11355 struct intel_crtc *crtc; 11356 int ro; 11357 11358 /* The upper layers ensure that we either disable a crtc or have a list 11359 * of connectors. For paranoia, double-check this. */ 11360 WARN_ON(!set->fb && (set->num_connectors != 0)); 11361 WARN_ON(set->fb && (set->num_connectors == 0)); 11362 11363 list_for_each_entry(connector, &dev->mode_config.connector_list, 11364 base.head) { 11365 /* Otherwise traverse passed in connector list and get encoders 11366 * for them. */ 11367 for (ro = 0; ro < set->num_connectors; ro++) { 11368 if (set->connectors[ro] == &connector->base) { 11369 connector->new_encoder = connector->encoder; 11370 break; 11371 } 11372 } 11373 11374 /* If we disable the crtc, disable all its connectors. Also, if 11375 * the connector is on the changing crtc but not on the new 11376 * connector list, disable it. */ 11377 if ((!set->fb || ro == set->num_connectors) && 11378 connector->base.encoder && 11379 connector->base.encoder->crtc == set->crtc) { 11380 connector->new_encoder = NULL; 11381 11382 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", 11383 connector->base.base.id, 11384 connector->base.name); 11385 } 11386 11387 11388 if (&connector->new_encoder->base != connector->base.encoder) { 11389 DRM_DEBUG_KMS("encoder changed, full mode switch\n"); 11390 config->mode_changed = true; 11391 } 11392 } 11393 /* connector->new_encoder is now updated for all connectors. */ 11394 11395 /* Update crtc of enabled connectors. */ 11396 list_for_each_entry(connector, &dev->mode_config.connector_list, 11397 base.head) { 11398 struct drm_crtc *new_crtc; 11399 11400 if (!connector->new_encoder) 11401 continue; 11402 11403 new_crtc = connector->new_encoder->base.crtc; 11404 11405 for (ro = 0; ro < set->num_connectors; ro++) { 11406 if (set->connectors[ro] == &connector->base) 11407 new_crtc = set->crtc; 11408 } 11409 11410 /* Make sure the new CRTC will work with the encoder */ 11411 if (!drm_encoder_crtc_ok(&connector->new_encoder->base, 11412 new_crtc)) { 11413 return -EINVAL; 11414 } 11415 connector->encoder->new_crtc = to_intel_crtc(new_crtc); 11416 11417 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", 11418 connector->base.base.id, 11419 connector->base.name, 11420 new_crtc->base.id); 11421 } 11422 11423 /* Check for any encoders that needs to be disabled. */ 11424 for_each_intel_encoder(dev, encoder) { 11425 int num_connectors = 0; 11426 list_for_each_entry(connector, 11427 &dev->mode_config.connector_list, 11428 base.head) { 11429 if (connector->new_encoder == encoder) { 11430 WARN_ON(!connector->new_encoder->new_crtc); 11431 num_connectors++; 11432 } 11433 } 11434 11435 if (num_connectors == 0) 11436 encoder->new_crtc = NULL; 11437 else if (num_connectors > 1) 11438 return -EINVAL; 11439 11440 /* Only now check for crtc changes so we don't miss encoders 11441 * that will be disabled. */ 11442 if (&encoder->new_crtc->base != encoder->base.crtc) { 11443 DRM_DEBUG_KMS("crtc changed, full mode switch\n"); 11444 config->mode_changed = true; 11445 } 11446 } 11447 /* Now we've also updated encoder->new_crtc for all encoders. */ 11448 11449 for_each_intel_crtc(dev, crtc) { 11450 crtc->new_enabled = false; 11451 11452 for_each_intel_encoder(dev, encoder) { 11453 if (encoder->new_crtc == crtc) { 11454 crtc->new_enabled = true; 11455 break; 11456 } 11457 } 11458 11459 if (crtc->new_enabled != crtc->base.enabled) { 11460 DRM_DEBUG_KMS("crtc %sabled, full mode switch\n", 11461 crtc->new_enabled ? "en" : "dis"); 11462 config->mode_changed = true; 11463 } 11464 11465 if (crtc->new_enabled) 11466 crtc->new_config = &crtc->config; 11467 else 11468 crtc->new_config = NULL; 11469 } 11470 11471 return 0; 11472 } 11473 11474 static void disable_crtc_nofb(struct intel_crtc *crtc) 11475 { 11476 struct drm_device *dev = crtc->base.dev; 11477 struct intel_encoder *encoder; 11478 struct intel_connector *connector; 11479 11480 DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n", 11481 pipe_name(crtc->pipe)); 11482 11483 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { 11484 if (connector->new_encoder && 11485 connector->new_encoder->new_crtc == crtc) 11486 connector->new_encoder = NULL; 11487 } 11488 11489 for_each_intel_encoder(dev, encoder) { 11490 if (encoder->new_crtc == crtc) 11491 encoder->new_crtc = NULL; 11492 } 11493 11494 crtc->new_enabled = false; 11495 crtc->new_config = NULL; 11496 } 11497 11498 static int intel_crtc_set_config(struct drm_mode_set *set) 11499 { 11500 struct drm_device *dev; 11501 struct drm_mode_set save_set; 11502 struct intel_set_config *config; 11503 int ret; 11504 11505 BUG_ON(!set); 11506 BUG_ON(!set->crtc); 11507 BUG_ON(!set->crtc->helper_private); 11508 11509 /* Enforce sane interface api - has been abused by the fb helper. */ 11510 BUG_ON(!set->mode && set->fb); 11511 BUG_ON(set->fb && set->num_connectors == 0); 11512 11513 if (set->fb) { 11514 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", 11515 set->crtc->base.id, set->fb->base.id, 11516 (int)set->num_connectors, set->x, set->y); 11517 } else { 11518 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); 11519 } 11520 11521 dev = set->crtc->dev; 11522 11523 ret = -ENOMEM; 11524 config = kzalloc(sizeof(*config), GFP_KERNEL); 11525 if (!config) 11526 goto out_config; 11527 11528 ret = intel_set_config_save_state(dev, config); 11529 if (ret) 11530 goto out_config; 11531 11532 save_set.crtc = set->crtc; 11533 save_set.mode = &set->crtc->mode; 11534 save_set.x = set->crtc->x; 11535 save_set.y = set->crtc->y; 11536 save_set.fb = set->crtc->primary->fb; 11537 11538 /* Compute whether we need a full modeset, only an fb base update or no 11539 * change at all. In the future we might also check whether only the 11540 * mode changed, e.g. for LVDS where we only change the panel fitter in 11541 * such cases. */ 11542 intel_set_config_compute_mode_changes(set, config); 11543 11544 ret = intel_modeset_stage_output_state(dev, set, config); 11545 if (ret) 11546 goto fail; 11547 11548 if (config->mode_changed) { 11549 ret = intel_set_mode(set->crtc, set->mode, 11550 set->x, set->y, set->fb); 11551 } else if (config->fb_changed) { 11552 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); 11553 11554 intel_crtc_wait_for_pending_flips(set->crtc); 11555 11556 ret = intel_pipe_set_base(set->crtc, 11557 set->x, set->y, set->fb); 11558 11559 /* 11560 * We need to make sure the primary plane is re-enabled if it 11561 * has previously been turned off. 11562 */ 11563 if (!intel_crtc->primary_enabled && ret == 0) { 11564 WARN_ON(!intel_crtc->active); 11565 intel_enable_primary_hw_plane(set->crtc->primary, set->crtc); 11566 } 11567 11568 /* 11569 * In the fastboot case this may be our only check of the 11570 * state after boot. It would be better to only do it on 11571 * the first update, but we don't have a nice way of doing that 11572 * (and really, set_config isn't used much for high freq page 11573 * flipping, so increasing its cost here shouldn't be a big 11574 * deal). 11575 */ 11576 if (i915.fastboot && ret == 0) 11577 intel_modeset_check_state(set->crtc->dev); 11578 } 11579 11580 if (ret) { 11581 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n", 11582 set->crtc->base.id, ret); 11583 fail: 11584 intel_set_config_restore_state(dev, config); 11585 11586 /* 11587 * HACK: if the pipe was on, but we didn't have a framebuffer, 11588 * force the pipe off to avoid oopsing in the modeset code 11589 * due to fb==NULL. This should only happen during boot since 11590 * we don't yet reconstruct the FB from the hardware state. 11591 */ 11592 if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb) 11593 disable_crtc_nofb(to_intel_crtc(save_set.crtc)); 11594 11595 /* Try to restore the config */ 11596 if (config->mode_changed && 11597 intel_set_mode(save_set.crtc, save_set.mode, 11598 save_set.x, save_set.y, save_set.fb)) 11599 DRM_ERROR("failed to restore config after modeset failure\n"); 11600 } 11601 11602 out_config: 11603 intel_set_config_free(config); 11604 return ret; 11605 } 11606 11607 static const struct drm_crtc_funcs intel_crtc_funcs = { 11608 .gamma_set = intel_crtc_gamma_set, 11609 .set_config = intel_crtc_set_config, 11610 .destroy = intel_crtc_destroy, 11611 .page_flip = intel_crtc_page_flip, 11612 }; 11613 11614 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, 11615 struct intel_shared_dpll *pll, 11616 struct intel_dpll_hw_state *hw_state) 11617 { 11618 uint32_t val; 11619 11620 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS)) 11621 return false; 11622 11623 val = I915_READ(PCH_DPLL(pll->id)); 11624 hw_state->dpll = val; 11625 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 11626 hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); 11627 11628 return val & DPLL_VCO_ENABLE; 11629 } 11630 11631 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, 11632 struct intel_shared_dpll *pll) 11633 { 11634 I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0); 11635 I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1); 11636 } 11637 11638 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 11639 struct intel_shared_dpll *pll) 11640 { 11641 /* PCH refclock must be enabled first */ 11642 ibx_assert_pch_refclk_enabled(dev_priv); 11643 11644 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); 11645 11646 /* Wait for the clocks to stabilize. */ 11647 POSTING_READ(PCH_DPLL(pll->id)); 11648 udelay(150); 11649 11650 /* The pixel multiplier can only be updated once the 11651 * DPLL is enabled and the clocks are stable. 11652 * 11653 * So write it again. 11654 */ 11655 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); 11656 POSTING_READ(PCH_DPLL(pll->id)); 11657 udelay(200); 11658 } 11659 11660 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, 11661 struct intel_shared_dpll *pll) 11662 { 11663 struct drm_device *dev = dev_priv->dev; 11664 struct intel_crtc *crtc; 11665 11666 /* Make sure no transcoder isn't still depending on us. */ 11667 for_each_intel_crtc(dev, crtc) { 11668 if (intel_crtc_to_shared_dpll(crtc) == pll) 11669 assert_pch_transcoder_disabled(dev_priv, crtc->pipe); 11670 } 11671 11672 I915_WRITE(PCH_DPLL(pll->id), 0); 11673 POSTING_READ(PCH_DPLL(pll->id)); 11674 udelay(200); 11675 } 11676 11677 static char *ibx_pch_dpll_names[] = { 11678 "PCH DPLL A", 11679 "PCH DPLL B", 11680 }; 11681 11682 static void ibx_pch_dpll_init(struct drm_device *dev) 11683 { 11684 struct drm_i915_private *dev_priv = dev->dev_private; 11685 int i; 11686 11687 dev_priv->num_shared_dpll = 2; 11688 11689 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 11690 dev_priv->shared_dplls[i].id = i; 11691 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; 11692 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set; 11693 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; 11694 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; 11695 dev_priv->shared_dplls[i].get_hw_state = 11696 ibx_pch_dpll_get_hw_state; 11697 } 11698 } 11699 11700 static void intel_shared_dpll_init(struct drm_device *dev) 11701 { 11702 struct drm_i915_private *dev_priv = dev->dev_private; 11703 11704 if (HAS_DDI(dev)) 11705 intel_ddi_pll_init(dev); 11706 else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 11707 ibx_pch_dpll_init(dev); 11708 else 11709 dev_priv->num_shared_dpll = 0; 11710 11711 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); 11712 } 11713 11714 static int 11715 intel_primary_plane_disable(struct drm_plane *plane) 11716 { 11717 struct drm_device *dev = plane->dev; 11718 struct intel_crtc *intel_crtc; 11719 11720 if (!plane->fb) 11721 return 0; 11722 11723 BUG_ON(!plane->crtc); 11724 11725 intel_crtc = to_intel_crtc(plane->crtc); 11726 11727 /* 11728 * Even though we checked plane->fb above, it's still possible that 11729 * the primary plane has been implicitly disabled because the crtc 11730 * coordinates given weren't visible, or because we detected 11731 * that it was 100% covered by a sprite plane. Or, the CRTC may be 11732 * off and we've set a fb, but haven't actually turned on the CRTC yet. 11733 * In either case, we need to unpin the FB and let the fb pointer get 11734 * updated, but otherwise we don't need to touch the hardware. 11735 */ 11736 if (!intel_crtc->primary_enabled) 11737 goto disable_unpin; 11738 11739 intel_crtc_wait_for_pending_flips(plane->crtc); 11740 intel_disable_primary_hw_plane(plane, plane->crtc); 11741 11742 disable_unpin: 11743 mutex_lock(&dev->struct_mutex); 11744 i915_gem_track_fb(intel_fb_obj(plane->fb), NULL, 11745 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 11746 intel_unpin_fb_obj(intel_fb_obj(plane->fb)); 11747 mutex_unlock(&dev->struct_mutex); 11748 plane->fb = NULL; 11749 11750 return 0; 11751 } 11752 11753 static int 11754 intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, 11755 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 11756 unsigned int crtc_w, unsigned int crtc_h, 11757 uint32_t src_x, uint32_t src_y, 11758 uint32_t src_w, uint32_t src_h) 11759 { 11760 struct drm_device *dev = crtc->dev; 11761 struct drm_i915_private *dev_priv = dev->dev_private; 11762 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11763 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11764 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 11765 struct drm_rect dest = { 11766 /* integer pixels */ 11767 .x1 = crtc_x, 11768 .y1 = crtc_y, 11769 .x2 = crtc_x + crtc_w, 11770 .y2 = crtc_y + crtc_h, 11771 }; 11772 struct drm_rect src = { 11773 /* 16.16 fixed point */ 11774 .x1 = src_x, 11775 .y1 = src_y, 11776 .x2 = src_x + src_w, 11777 .y2 = src_y + src_h, 11778 }; 11779 const struct drm_rect clip = { 11780 /* integer pixels */ 11781 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, 11782 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, 11783 }; 11784 const struct { 11785 int crtc_x, crtc_y; 11786 unsigned int crtc_w, crtc_h; 11787 uint32_t src_x, src_y, src_w, src_h; 11788 } orig = { 11789 .crtc_x = crtc_x, 11790 .crtc_y = crtc_y, 11791 .crtc_w = crtc_w, 11792 .crtc_h = crtc_h, 11793 .src_x = src_x, 11794 .src_y = src_y, 11795 .src_w = src_w, 11796 .src_h = src_h, 11797 }; 11798 struct intel_plane *intel_plane = to_intel_plane(plane); 11799 bool visible; 11800 int ret; 11801 11802 ret = drm_plane_helper_check_update(plane, crtc, fb, 11803 &src, &dest, &clip, 11804 DRM_PLANE_HELPER_NO_SCALING, 11805 DRM_PLANE_HELPER_NO_SCALING, 11806 false, true, &visible); 11807 11808 if (ret) 11809 return ret; 11810 11811 /* 11812 * If the CRTC isn't enabled, we're just pinning the framebuffer, 11813 * updating the fb pointer, and returning without touching the 11814 * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to 11815 * turn on the display with all planes setup as desired. 11816 */ 11817 if (!crtc->enabled) { 11818 mutex_lock(&dev->struct_mutex); 11819 11820 /* 11821 * If we already called setplane while the crtc was disabled, 11822 * we may have an fb pinned; unpin it. 11823 */ 11824 if (plane->fb) 11825 intel_unpin_fb_obj(old_obj); 11826 11827 i915_gem_track_fb(old_obj, obj, 11828 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 11829 11830 /* Pin and return without programming hardware */ 11831 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 11832 mutex_unlock(&dev->struct_mutex); 11833 11834 return ret; 11835 } 11836 11837 intel_crtc_wait_for_pending_flips(crtc); 11838 11839 /* 11840 * If clipping results in a non-visible primary plane, we'll disable 11841 * the primary plane. Note that this is a bit different than what 11842 * happens if userspace explicitly disables the plane by passing fb=0 11843 * because plane->fb still gets set and pinned. 11844 */ 11845 if (!visible) { 11846 mutex_lock(&dev->struct_mutex); 11847 11848 /* 11849 * Try to pin the new fb first so that we can bail out if we 11850 * fail. 11851 */ 11852 if (plane->fb != fb) { 11853 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 11854 if (ret) { 11855 mutex_unlock(&dev->struct_mutex); 11856 return ret; 11857 } 11858 } 11859 11860 i915_gem_track_fb(old_obj, obj, 11861 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 11862 11863 if (intel_crtc->primary_enabled) 11864 intel_disable_primary_hw_plane(plane, crtc); 11865 11866 11867 if (plane->fb != fb) 11868 if (plane->fb) 11869 intel_unpin_fb_obj(old_obj); 11870 11871 mutex_unlock(&dev->struct_mutex); 11872 11873 } else { 11874 if (intel_crtc && intel_crtc->active && 11875 intel_crtc->primary_enabled) { 11876 /* 11877 * FBC does not work on some platforms for rotated 11878 * planes, so disable it when rotation is not 0 and 11879 * update it when rotation is set back to 0. 11880 * 11881 * FIXME: This is redundant with the fbc update done in 11882 * the primary plane enable function except that that 11883 * one is done too late. We eventually need to unify 11884 * this. 11885 */ 11886 if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && 11887 dev_priv->fbc.plane == intel_crtc->plane && 11888 intel_plane->rotation != BIT(DRM_ROTATE_0)) { 11889 intel_disable_fbc(dev); 11890 } 11891 } 11892 ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb); 11893 if (ret) 11894 return ret; 11895 11896 if (!intel_crtc->primary_enabled) 11897 intel_enable_primary_hw_plane(plane, crtc); 11898 } 11899 11900 intel_plane->crtc_x = orig.crtc_x; 11901 intel_plane->crtc_y = orig.crtc_y; 11902 intel_plane->crtc_w = orig.crtc_w; 11903 intel_plane->crtc_h = orig.crtc_h; 11904 intel_plane->src_x = orig.src_x; 11905 intel_plane->src_y = orig.src_y; 11906 intel_plane->src_w = orig.src_w; 11907 intel_plane->src_h = orig.src_h; 11908 intel_plane->obj = obj; 11909 11910 return 0; 11911 } 11912 11913 /* Common destruction function for both primary and cursor planes */ 11914 static void intel_plane_destroy(struct drm_plane *plane) 11915 { 11916 struct intel_plane *intel_plane = to_intel_plane(plane); 11917 drm_plane_cleanup(plane); 11918 kfree(intel_plane); 11919 } 11920 11921 static const struct drm_plane_funcs intel_primary_plane_funcs = { 11922 .update_plane = intel_primary_plane_setplane, 11923 .disable_plane = intel_primary_plane_disable, 11924 .destroy = intel_plane_destroy, 11925 .set_property = intel_plane_set_property 11926 }; 11927 11928 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, 11929 int pipe) 11930 { 11931 struct intel_plane *primary; 11932 const uint32_t *intel_primary_formats; 11933 int num_formats; 11934 11935 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 11936 if (primary == NULL) 11937 return NULL; 11938 11939 primary->can_scale = false; 11940 primary->max_downscale = 1; 11941 primary->pipe = pipe; 11942 primary->plane = pipe; 11943 primary->rotation = BIT(DRM_ROTATE_0); 11944 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) 11945 primary->plane = !pipe; 11946 11947 if (INTEL_INFO(dev)->gen <= 3) { 11948 intel_primary_formats = intel_primary_formats_gen2; 11949 num_formats = ARRAY_SIZE(intel_primary_formats_gen2); 11950 } else { 11951 intel_primary_formats = intel_primary_formats_gen4; 11952 num_formats = ARRAY_SIZE(intel_primary_formats_gen4); 11953 } 11954 11955 drm_universal_plane_init(dev, &primary->base, 0, 11956 &intel_primary_plane_funcs, 11957 intel_primary_formats, num_formats, 11958 DRM_PLANE_TYPE_PRIMARY); 11959 11960 if (INTEL_INFO(dev)->gen >= 4) { 11961 if (!dev->mode_config.rotation_property) 11962 dev->mode_config.rotation_property = 11963 drm_mode_create_rotation_property(dev, 11964 BIT(DRM_ROTATE_0) | 11965 BIT(DRM_ROTATE_180)); 11966 if (dev->mode_config.rotation_property) 11967 drm_object_attach_property(&primary->base.base, 11968 dev->mode_config.rotation_property, 11969 primary->rotation); 11970 } 11971 11972 return &primary->base; 11973 } 11974 11975 static int 11976 intel_cursor_plane_disable(struct drm_plane *plane) 11977 { 11978 if (!plane->fb) 11979 return 0; 11980 11981 BUG_ON(!plane->crtc); 11982 11983 return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0); 11984 } 11985 11986 static int 11987 intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, 11988 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 11989 unsigned int crtc_w, unsigned int crtc_h, 11990 uint32_t src_x, uint32_t src_y, 11991 uint32_t src_w, uint32_t src_h) 11992 { 11993 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11994 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 11995 struct drm_i915_gem_object *obj = intel_fb->obj; 11996 struct drm_rect dest = { 11997 /* integer pixels */ 11998 .x1 = crtc_x, 11999 .y1 = crtc_y, 12000 .x2 = crtc_x + crtc_w, 12001 .y2 = crtc_y + crtc_h, 12002 }; 12003 struct drm_rect src = { 12004 /* 16.16 fixed point */ 12005 .x1 = src_x, 12006 .y1 = src_y, 12007 .x2 = src_x + src_w, 12008 .y2 = src_y + src_h, 12009 }; 12010 const struct drm_rect clip = { 12011 /* integer pixels */ 12012 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, 12013 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, 12014 }; 12015 bool visible; 12016 int ret; 12017 12018 ret = drm_plane_helper_check_update(plane, crtc, fb, 12019 &src, &dest, &clip, 12020 DRM_PLANE_HELPER_NO_SCALING, 12021 DRM_PLANE_HELPER_NO_SCALING, 12022 true, true, &visible); 12023 if (ret) 12024 return ret; 12025 12026 crtc->cursor_x = crtc_x; 12027 crtc->cursor_y = crtc_y; 12028 if (fb != crtc->cursor->fb) { 12029 return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h); 12030 } else { 12031 intel_crtc_update_cursor(crtc, visible); 12032 12033 intel_frontbuffer_flip(crtc->dev, 12034 INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe)); 12035 12036 return 0; 12037 } 12038 } 12039 static const struct drm_plane_funcs intel_cursor_plane_funcs = { 12040 .update_plane = intel_cursor_plane_update, 12041 .disable_plane = intel_cursor_plane_disable, 12042 .destroy = intel_plane_destroy, 12043 }; 12044 12045 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 12046 int pipe) 12047 { 12048 struct intel_plane *cursor; 12049 12050 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 12051 if (cursor == NULL) 12052 return NULL; 12053 12054 cursor->can_scale = false; 12055 cursor->max_downscale = 1; 12056 cursor->pipe = pipe; 12057 cursor->plane = pipe; 12058 12059 drm_universal_plane_init(dev, &cursor->base, 0, 12060 &intel_cursor_plane_funcs, 12061 intel_cursor_formats, 12062 ARRAY_SIZE(intel_cursor_formats), 12063 DRM_PLANE_TYPE_CURSOR); 12064 return &cursor->base; 12065 } 12066 12067 static void intel_crtc_init(struct drm_device *dev, int pipe) 12068 { 12069 struct drm_i915_private *dev_priv = dev->dev_private; 12070 struct intel_crtc *intel_crtc; 12071 struct drm_plane *primary = NULL; 12072 struct drm_plane *cursor = NULL; 12073 int i, ret; 12074 12075 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 12076 if (intel_crtc == NULL) 12077 return; 12078 12079 primary = intel_primary_plane_create(dev, pipe); 12080 if (!primary) 12081 goto fail; 12082 12083 cursor = intel_cursor_plane_create(dev, pipe); 12084 if (!cursor) 12085 goto fail; 12086 12087 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 12088 cursor, &intel_crtc_funcs); 12089 if (ret) 12090 goto fail; 12091 12092 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 12093 for (i = 0; i < 256; i++) { 12094 intel_crtc->lut_r[i] = i; 12095 intel_crtc->lut_g[i] = i; 12096 intel_crtc->lut_b[i] = i; 12097 } 12098 12099 /* 12100 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port 12101 * is hooked to pipe B. Hence we want plane A feeding pipe B. 12102 */ 12103 intel_crtc->pipe = pipe; 12104 intel_crtc->plane = pipe; 12105 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { 12106 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 12107 intel_crtc->plane = !pipe; 12108 } 12109 12110 intel_crtc->cursor_base = ~0; 12111 intel_crtc->cursor_cntl = ~0; 12112 intel_crtc->cursor_size = ~0; 12113 12114 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 12115 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 12116 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 12117 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 12118 12119 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 12120 12121 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 12122 return; 12123 12124 fail: 12125 if (primary) 12126 drm_plane_cleanup(primary); 12127 if (cursor) 12128 drm_plane_cleanup(cursor); 12129 kfree(intel_crtc); 12130 } 12131 12132 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector) 12133 { 12134 struct drm_encoder *encoder = connector->base.encoder; 12135 struct drm_device *dev = connector->base.dev; 12136 12137 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 12138 12139 if (!encoder) 12140 return INVALID_PIPE; 12141 12142 return to_intel_crtc(encoder->crtc)->pipe; 12143 } 12144 12145 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 12146 struct drm_file *file) 12147 { 12148 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 12149 struct drm_crtc *drmmode_crtc; 12150 struct intel_crtc *crtc; 12151 12152 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 12153 return -ENODEV; 12154 12155 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 12156 12157 if (!drmmode_crtc) { 12158 DRM_ERROR("no such CRTC id\n"); 12159 return -ENOENT; 12160 } 12161 12162 crtc = to_intel_crtc(drmmode_crtc); 12163 pipe_from_crtc_id->pipe = crtc->pipe; 12164 12165 return 0; 12166 } 12167 12168 static int intel_encoder_clones(struct intel_encoder *encoder) 12169 { 12170 struct drm_device *dev = encoder->base.dev; 12171 struct intel_encoder *source_encoder; 12172 int index_mask = 0; 12173 int entry = 0; 12174 12175 for_each_intel_encoder(dev, source_encoder) { 12176 if (encoders_cloneable(encoder, source_encoder)) 12177 index_mask |= (1 << entry); 12178 12179 entry++; 12180 } 12181 12182 return index_mask; 12183 } 12184 12185 static bool has_edp_a(struct drm_device *dev) 12186 { 12187 struct drm_i915_private *dev_priv = dev->dev_private; 12188 12189 if (!IS_MOBILE(dev)) 12190 return false; 12191 12192 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 12193 return false; 12194 12195 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 12196 return false; 12197 12198 return true; 12199 } 12200 12201 const char *intel_output_name(int output) 12202 { 12203 static const char *names[] = { 12204 [INTEL_OUTPUT_UNUSED] = "Unused", 12205 [INTEL_OUTPUT_ANALOG] = "Analog", 12206 [INTEL_OUTPUT_DVO] = "DVO", 12207 [INTEL_OUTPUT_SDVO] = "SDVO", 12208 [INTEL_OUTPUT_LVDS] = "LVDS", 12209 [INTEL_OUTPUT_TVOUT] = "TV", 12210 [INTEL_OUTPUT_HDMI] = "HDMI", 12211 [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort", 12212 [INTEL_OUTPUT_EDP] = "eDP", 12213 [INTEL_OUTPUT_DSI] = "DSI", 12214 [INTEL_OUTPUT_UNKNOWN] = "Unknown", 12215 }; 12216 12217 if (output < 0 || output >= ARRAY_SIZE(names) || !names[output]) 12218 return "Invalid"; 12219 12220 return names[output]; 12221 } 12222 12223 static bool intel_crt_present(struct drm_device *dev) 12224 { 12225 struct drm_i915_private *dev_priv = dev->dev_private; 12226 12227 if (IS_ULT(dev)) 12228 return false; 12229 12230 if (IS_CHERRYVIEW(dev)) 12231 return false; 12232 12233 if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support) 12234 return false; 12235 12236 return true; 12237 } 12238 12239 static void intel_setup_outputs(struct drm_device *dev) 12240 { 12241 struct drm_i915_private *dev_priv = dev->dev_private; 12242 struct intel_encoder *encoder; 12243 bool dpd_is_edp = false; 12244 12245 intel_lvds_init(dev); 12246 12247 if (intel_crt_present(dev)) 12248 intel_crt_init(dev); 12249 12250 if (HAS_DDI(dev)) { 12251 int found; 12252 12253 /* Haswell uses DDI functions to detect digital outputs */ 12254 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; 12255 /* DDI A only supports eDP */ 12256 if (found) 12257 intel_ddi_init(dev, PORT_A); 12258 12259 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 12260 * register */ 12261 found = I915_READ(SFUSE_STRAP); 12262 12263 if (found & SFUSE_STRAP_DDIB_DETECTED) 12264 intel_ddi_init(dev, PORT_B); 12265 if (found & SFUSE_STRAP_DDIC_DETECTED) 12266 intel_ddi_init(dev, PORT_C); 12267 if (found & SFUSE_STRAP_DDID_DETECTED) 12268 intel_ddi_init(dev, PORT_D); 12269 } else if (HAS_PCH_SPLIT(dev)) { 12270 int found; 12271 dpd_is_edp = intel_dp_is_edp(dev, PORT_D); 12272 12273 if (has_edp_a(dev)) 12274 intel_dp_init(dev, DP_A, PORT_A); 12275 12276 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 12277 /* PCH SDVOB multiplex with HDMIB */ 12278 found = intel_sdvo_init(dev, PCH_SDVOB, true); 12279 if (!found) 12280 intel_hdmi_init(dev, PCH_HDMIB, PORT_B); 12281 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 12282 intel_dp_init(dev, PCH_DP_B, PORT_B); 12283 } 12284 12285 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 12286 intel_hdmi_init(dev, PCH_HDMIC, PORT_C); 12287 12288 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 12289 intel_hdmi_init(dev, PCH_HDMID, PORT_D); 12290 12291 if (I915_READ(PCH_DP_C) & DP_DETECTED) 12292 intel_dp_init(dev, PCH_DP_C, PORT_C); 12293 12294 if (I915_READ(PCH_DP_D) & DP_DETECTED) 12295 intel_dp_init(dev, PCH_DP_D, PORT_D); 12296 } else if (IS_VALLEYVIEW(dev)) { 12297 /* 12298 * The DP_DETECTED bit is the latched state of the DDC 12299 * SDA pin at boot. However since eDP doesn't require DDC 12300 * (no way to plug in a DP->HDMI dongle) the DDC pins for 12301 * eDP ports may have been muxed to an alternate function. 12302 * Thus we can't rely on the DP_DETECTED bit alone to detect 12303 * eDP ports. Consult the VBT as well as DP_DETECTED to 12304 * detect eDP ports. 12305 */ 12306 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) 12307 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, 12308 PORT_B); 12309 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED || 12310 intel_dp_is_edp(dev, PORT_B)) 12311 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 12312 12313 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) 12314 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 12315 PORT_C); 12316 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED || 12317 intel_dp_is_edp(dev, PORT_C)) 12318 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 12319 12320 if (IS_CHERRYVIEW(dev)) { 12321 if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) 12322 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID, 12323 PORT_D); 12324 /* eDP not supported on port D, so don't check VBT */ 12325 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED) 12326 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D); 12327 } 12328 12329 intel_dsi_init(dev); 12330 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 12331 bool found = false; 12332 12333 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 12334 DRM_DEBUG_KMS("probing SDVOB\n"); 12335 found = intel_sdvo_init(dev, GEN3_SDVOB, true); 12336 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 12337 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 12338 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 12339 } 12340 12341 if (!found && SUPPORTS_INTEGRATED_DP(dev)) 12342 intel_dp_init(dev, DP_B, PORT_B); 12343 } 12344 12345 /* Before G4X SDVOC doesn't have its own detect register */ 12346 12347 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 12348 DRM_DEBUG_KMS("probing SDVOC\n"); 12349 found = intel_sdvo_init(dev, GEN3_SDVOC, false); 12350 } 12351 12352 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 12353 12354 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 12355 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 12356 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); 12357 } 12358 if (SUPPORTS_INTEGRATED_DP(dev)) 12359 intel_dp_init(dev, DP_C, PORT_C); 12360 } 12361 12362 if (SUPPORTS_INTEGRATED_DP(dev) && 12363 (I915_READ(DP_D) & DP_DETECTED)) 12364 intel_dp_init(dev, DP_D, PORT_D); 12365 #if 0 12366 } else if (IS_GEN2(dev)) 12367 intel_dvo_init(dev); 12368 #endif 12369 } 12370 12371 if (SUPPORTS_TV(dev)) 12372 intel_tv_init(dev); 12373 12374 intel_edp_psr_init(dev); 12375 12376 for_each_intel_encoder(dev, encoder) { 12377 encoder->base.possible_crtcs = encoder->crtc_mask; 12378 encoder->base.possible_clones = 12379 intel_encoder_clones(encoder); 12380 } 12381 12382 intel_init_pch_refclk(dev); 12383 12384 drm_helper_move_panel_connectors_to_head(dev); 12385 } 12386 12387 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 12388 { 12389 struct drm_device *dev = fb->dev; 12390 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 12391 12392 drm_framebuffer_cleanup(fb); 12393 mutex_lock(&dev->struct_mutex); 12394 WARN_ON(!intel_fb->obj->framebuffer_references--); 12395 drm_gem_object_unreference(&intel_fb->obj->base); 12396 mutex_unlock(&dev->struct_mutex); 12397 kfree(intel_fb); 12398 } 12399 12400 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 12401 struct drm_file *file, 12402 unsigned int *handle) 12403 { 12404 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 12405 struct drm_i915_gem_object *obj = intel_fb->obj; 12406 12407 return drm_gem_handle_create(file, &obj->base, handle); 12408 } 12409 12410 static const struct drm_framebuffer_funcs intel_fb_funcs = { 12411 .destroy = intel_user_framebuffer_destroy, 12412 .create_handle = intel_user_framebuffer_create_handle, 12413 }; 12414 12415 static int intel_framebuffer_init(struct drm_device *dev, 12416 struct intel_framebuffer *intel_fb, 12417 struct drm_mode_fb_cmd2 *mode_cmd, 12418 struct drm_i915_gem_object *obj) 12419 { 12420 int aligned_height; 12421 int pitch_limit; 12422 int ret; 12423 12424 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 12425 12426 if (obj->tiling_mode == I915_TILING_Y) { 12427 DRM_DEBUG("hardware does not support tiling Y\n"); 12428 return -EINVAL; 12429 } 12430 12431 if (mode_cmd->pitches[0] & 63) { 12432 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", 12433 mode_cmd->pitches[0]); 12434 return -EINVAL; 12435 } 12436 12437 if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) { 12438 pitch_limit = 32*1024; 12439 } else if (INTEL_INFO(dev)->gen >= 4) { 12440 if (obj->tiling_mode) 12441 pitch_limit = 16*1024; 12442 else 12443 pitch_limit = 32*1024; 12444 } else if (INTEL_INFO(dev)->gen >= 3) { 12445 if (obj->tiling_mode) 12446 pitch_limit = 8*1024; 12447 else 12448 pitch_limit = 16*1024; 12449 } else 12450 /* XXX DSPC is limited to 4k tiled */ 12451 pitch_limit = 8*1024; 12452 12453 if (mode_cmd->pitches[0] > pitch_limit) { 12454 DRM_DEBUG("%s pitch (%d) must be at less than %d\n", 12455 obj->tiling_mode ? "tiled" : "linear", 12456 mode_cmd->pitches[0], pitch_limit); 12457 return -EINVAL; 12458 } 12459 12460 if (obj->tiling_mode != I915_TILING_NONE && 12461 mode_cmd->pitches[0] != obj->stride) { 12462 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", 12463 mode_cmd->pitches[0], obj->stride); 12464 return -EINVAL; 12465 } 12466 12467 /* Reject formats not supported by any plane early. */ 12468 switch (mode_cmd->pixel_format) { 12469 case DRM_FORMAT_C8: 12470 case DRM_FORMAT_RGB565: 12471 case DRM_FORMAT_XRGB8888: 12472 case DRM_FORMAT_ARGB8888: 12473 break; 12474 case DRM_FORMAT_XRGB1555: 12475 case DRM_FORMAT_ARGB1555: 12476 if (INTEL_INFO(dev)->gen > 3) { 12477 DRM_DEBUG("unsupported pixel format: %s\n", 12478 drm_get_format_name(mode_cmd->pixel_format)); 12479 return -EINVAL; 12480 } 12481 break; 12482 case DRM_FORMAT_XBGR8888: 12483 case DRM_FORMAT_ABGR8888: 12484 case DRM_FORMAT_XRGB2101010: 12485 case DRM_FORMAT_ARGB2101010: 12486 case DRM_FORMAT_XBGR2101010: 12487 case DRM_FORMAT_ABGR2101010: 12488 if (INTEL_INFO(dev)->gen < 4) { 12489 DRM_DEBUG("unsupported pixel format: %s\n", 12490 drm_get_format_name(mode_cmd->pixel_format)); 12491 return -EINVAL; 12492 } 12493 break; 12494 case DRM_FORMAT_YUYV: 12495 case DRM_FORMAT_UYVY: 12496 case DRM_FORMAT_YVYU: 12497 case DRM_FORMAT_VYUY: 12498 if (INTEL_INFO(dev)->gen < 5) { 12499 DRM_DEBUG("unsupported pixel format: %s\n", 12500 drm_get_format_name(mode_cmd->pixel_format)); 12501 return -EINVAL; 12502 } 12503 break; 12504 default: 12505 DRM_DEBUG("unsupported pixel format: %s\n", 12506 drm_get_format_name(mode_cmd->pixel_format)); 12507 return -EINVAL; 12508 } 12509 12510 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 12511 if (mode_cmd->offsets[0] != 0) 12512 return -EINVAL; 12513 12514 aligned_height = intel_align_height(dev, mode_cmd->height, 12515 obj->tiling_mode); 12516 /* FIXME drm helper for size checks (especially planar formats)? */ 12517 if (obj->base.size < aligned_height * mode_cmd->pitches[0]) 12518 return -EINVAL; 12519 12520 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 12521 intel_fb->obj = obj; 12522 intel_fb->obj->framebuffer_references++; 12523 12524 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 12525 if (ret) { 12526 DRM_ERROR("framebuffer init failed %d\n", ret); 12527 return ret; 12528 } 12529 12530 return 0; 12531 } 12532 12533 static struct drm_framebuffer * 12534 intel_user_framebuffer_create(struct drm_device *dev, 12535 struct drm_file *filp, 12536 struct drm_mode_fb_cmd2 *mode_cmd) 12537 { 12538 struct drm_i915_gem_object *obj; 12539 12540 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 12541 mode_cmd->handles[0])); 12542 if (&obj->base == NULL) 12543 return ERR_PTR(-ENOENT); 12544 12545 return intel_framebuffer_create(dev, mode_cmd, obj); 12546 } 12547 12548 #ifndef CONFIG_DRM_I915_FBDEV 12549 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) 12550 { 12551 } 12552 #endif 12553 12554 static const struct drm_mode_config_funcs intel_mode_funcs = { 12555 .fb_create = intel_user_framebuffer_create, 12556 .output_poll_changed = intel_fbdev_output_poll_changed, 12557 }; 12558 12559 /* Set up chip specific display functions */ 12560 static void intel_init_display(struct drm_device *dev) 12561 { 12562 struct drm_i915_private *dev_priv = dev->dev_private; 12563 12564 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev)) 12565 dev_priv->display.find_dpll = g4x_find_best_dpll; 12566 else if (IS_CHERRYVIEW(dev)) 12567 dev_priv->display.find_dpll = chv_find_best_dpll; 12568 else if (IS_VALLEYVIEW(dev)) 12569 dev_priv->display.find_dpll = vlv_find_best_dpll; 12570 else if (IS_PINEVIEW(dev)) 12571 dev_priv->display.find_dpll = pnv_find_best_dpll; 12572 else 12573 dev_priv->display.find_dpll = i9xx_find_best_dpll; 12574 12575 if (HAS_DDI(dev)) { 12576 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 12577 dev_priv->display.get_plane_config = ironlake_get_plane_config; 12578 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; 12579 dev_priv->display.crtc_enable = haswell_crtc_enable; 12580 dev_priv->display.crtc_disable = haswell_crtc_disable; 12581 dev_priv->display.off = ironlake_crtc_off; 12582 dev_priv->display.update_primary_plane = 12583 ironlake_update_primary_plane; 12584 } else if (HAS_PCH_SPLIT(dev)) { 12585 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 12586 dev_priv->display.get_plane_config = ironlake_get_plane_config; 12587 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 12588 dev_priv->display.crtc_enable = ironlake_crtc_enable; 12589 dev_priv->display.crtc_disable = ironlake_crtc_disable; 12590 dev_priv->display.off = ironlake_crtc_off; 12591 dev_priv->display.update_primary_plane = 12592 ironlake_update_primary_plane; 12593 } else if (IS_VALLEYVIEW(dev)) { 12594 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 12595 dev_priv->display.get_plane_config = i9xx_get_plane_config; 12596 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 12597 dev_priv->display.crtc_enable = valleyview_crtc_enable; 12598 dev_priv->display.crtc_disable = i9xx_crtc_disable; 12599 dev_priv->display.off = i9xx_crtc_off; 12600 dev_priv->display.update_primary_plane = 12601 i9xx_update_primary_plane; 12602 } else { 12603 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 12604 dev_priv->display.get_plane_config = i9xx_get_plane_config; 12605 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 12606 dev_priv->display.crtc_enable = i9xx_crtc_enable; 12607 dev_priv->display.crtc_disable = i9xx_crtc_disable; 12608 dev_priv->display.off = i9xx_crtc_off; 12609 dev_priv->display.update_primary_plane = 12610 i9xx_update_primary_plane; 12611 } 12612 12613 /* Returns the core display clock speed */ 12614 if (IS_VALLEYVIEW(dev)) 12615 dev_priv->display.get_display_clock_speed = 12616 valleyview_get_display_clock_speed; 12617 else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) 12618 dev_priv->display.get_display_clock_speed = 12619 i945_get_display_clock_speed; 12620 else if (IS_I915G(dev)) 12621 dev_priv->display.get_display_clock_speed = 12622 i915_get_display_clock_speed; 12623 else if (IS_I945GM(dev) || IS_845G(dev)) 12624 dev_priv->display.get_display_clock_speed = 12625 i9xx_misc_get_display_clock_speed; 12626 else if (IS_PINEVIEW(dev)) 12627 dev_priv->display.get_display_clock_speed = 12628 pnv_get_display_clock_speed; 12629 else if (IS_I915GM(dev)) 12630 dev_priv->display.get_display_clock_speed = 12631 i915gm_get_display_clock_speed; 12632 else if (IS_I865G(dev)) 12633 dev_priv->display.get_display_clock_speed = 12634 i865_get_display_clock_speed; 12635 else if (IS_I85X(dev)) 12636 dev_priv->display.get_display_clock_speed = 12637 i855_get_display_clock_speed; 12638 else /* 852, 830 */ 12639 dev_priv->display.get_display_clock_speed = 12640 i830_get_display_clock_speed; 12641 12642 if (IS_G4X(dev)) { 12643 dev_priv->display.write_eld = g4x_write_eld; 12644 } else if (IS_GEN5(dev)) { 12645 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 12646 dev_priv->display.write_eld = ironlake_write_eld; 12647 } else if (IS_GEN6(dev)) { 12648 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 12649 dev_priv->display.write_eld = ironlake_write_eld; 12650 dev_priv->display.modeset_global_resources = 12651 snb_modeset_global_resources; 12652 } else if (IS_IVYBRIDGE(dev)) { 12653 /* FIXME: detect B0+ stepping and use auto training */ 12654 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 12655 dev_priv->display.write_eld = ironlake_write_eld; 12656 dev_priv->display.modeset_global_resources = 12657 ivb_modeset_global_resources; 12658 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 12659 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 12660 dev_priv->display.write_eld = haswell_write_eld; 12661 dev_priv->display.modeset_global_resources = 12662 haswell_modeset_global_resources; 12663 } else if (IS_VALLEYVIEW(dev)) { 12664 dev_priv->display.modeset_global_resources = 12665 valleyview_modeset_global_resources; 12666 dev_priv->display.write_eld = ironlake_write_eld; 12667 } 12668 12669 /* Default just returns -ENODEV to indicate unsupported */ 12670 dev_priv->display.queue_flip = intel_default_queue_flip; 12671 12672 switch (INTEL_INFO(dev)->gen) { 12673 case 2: 12674 dev_priv->display.queue_flip = intel_gen2_queue_flip; 12675 break; 12676 12677 case 3: 12678 dev_priv->display.queue_flip = intel_gen3_queue_flip; 12679 break; 12680 12681 case 4: 12682 case 5: 12683 dev_priv->display.queue_flip = intel_gen4_queue_flip; 12684 break; 12685 12686 case 6: 12687 dev_priv->display.queue_flip = intel_gen6_queue_flip; 12688 break; 12689 case 7: 12690 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 12691 dev_priv->display.queue_flip = intel_gen7_queue_flip; 12692 break; 12693 } 12694 12695 intel_panel_init_backlight_funcs(dev); 12696 12697 lockinit(&dev_priv->pps_mutex, "i915pm", 0, LK_CANRECURSE); 12698 } 12699 12700 /* 12701 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 12702 * resume, or other times. This quirk makes sure that's the case for 12703 * affected systems. 12704 */ 12705 static void quirk_pipea_force(struct drm_device *dev) 12706 { 12707 struct drm_i915_private *dev_priv = dev->dev_private; 12708 12709 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 12710 DRM_INFO("applying pipe a force quirk\n"); 12711 } 12712 12713 static void quirk_pipeb_force(struct drm_device *dev) 12714 { 12715 struct drm_i915_private *dev_priv = dev->dev_private; 12716 12717 dev_priv->quirks |= QUIRK_PIPEB_FORCE; 12718 DRM_INFO("applying pipe b force quirk\n"); 12719 } 12720 12721 /* 12722 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 12723 */ 12724 static void quirk_ssc_force_disable(struct drm_device *dev) 12725 { 12726 struct drm_i915_private *dev_priv = dev->dev_private; 12727 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 12728 DRM_INFO("applying lvds SSC disable quirk\n"); 12729 } 12730 12731 /* 12732 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 12733 * brightness value 12734 */ 12735 static void quirk_invert_brightness(struct drm_device *dev) 12736 { 12737 struct drm_i915_private *dev_priv = dev->dev_private; 12738 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 12739 DRM_INFO("applying inverted panel brightness quirk\n"); 12740 } 12741 12742 /* Some VBT's incorrectly indicate no backlight is present */ 12743 static void quirk_backlight_present(struct drm_device *dev) 12744 { 12745 struct drm_i915_private *dev_priv = dev->dev_private; 12746 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 12747 DRM_INFO("applying backlight present quirk\n"); 12748 } 12749 12750 struct intel_quirk { 12751 int device; 12752 int subsystem_vendor; 12753 int subsystem_device; 12754 void (*hook)(struct drm_device *dev); 12755 }; 12756 12757 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 12758 struct intel_dmi_quirk { 12759 void (*hook)(struct drm_device *dev); 12760 const struct dmi_system_id (*dmi_id_list)[]; 12761 }; 12762 12763 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 12764 { 12765 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 12766 return 1; 12767 } 12768 12769 static const struct intel_dmi_quirk intel_dmi_quirks[] = { 12770 { 12771 .dmi_id_list = &(const struct dmi_system_id[]) { 12772 { 12773 .callback = intel_dmi_reverse_brightness, 12774 .ident = "NCR Corporation", 12775 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 12776 DMI_MATCH(DMI_PRODUCT_NAME, ""), 12777 }, 12778 }, 12779 { } /* terminating entry */ 12780 }, 12781 .hook = quirk_invert_brightness, 12782 }, 12783 }; 12784 12785 static struct intel_quirk intel_quirks[] = { 12786 /* HP Mini needs pipe A force quirk (LP: #322104) */ 12787 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, 12788 12789 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 12790 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 12791 12792 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 12793 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 12794 12795 /* 830 needs to leave pipe A & dpll A up */ 12796 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 12797 12798 /* 830 needs to leave pipe B & dpll B up */ 12799 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force }, 12800 12801 /* Lenovo U160 cannot use SSC on LVDS */ 12802 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 12803 12804 /* Sony Vaio Y cannot use SSC on LVDS */ 12805 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 12806 12807 /* Acer Aspire 5734Z must invert backlight brightness */ 12808 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 12809 12810 /* Acer/eMachines G725 */ 12811 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 12812 12813 /* Acer/eMachines e725 */ 12814 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 12815 12816 /* Acer/Packard Bell NCL20 */ 12817 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 12818 12819 /* Acer Aspire 4736Z */ 12820 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 12821 12822 /* Acer Aspire 5336 */ 12823 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 12824 12825 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 12826 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 12827 12828 /* Acer C720 Chromebook (Core i3 4005U) */ 12829 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 12830 12831 /* Apple Macbook 2,1 (Core 2 T7400) */ 12832 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 12833 12834 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 12835 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 12836 12837 /* HP Chromebook 14 (Celeron 2955U) */ 12838 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 12839 }; 12840 12841 static void intel_init_quirks(struct drm_device *dev) 12842 { 12843 struct device *d = dev->dev; 12844 int i; 12845 12846 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 12847 struct intel_quirk *q = &intel_quirks[i]; 12848 12849 if (pci_get_device(d) == q->device && 12850 (pci_get_subvendor(d) == q->subsystem_vendor || 12851 q->subsystem_vendor == PCI_ANY_ID) && 12852 (pci_get_subdevice(d) == q->subsystem_device || 12853 q->subsystem_device == PCI_ANY_ID)) 12854 q->hook(dev); 12855 } 12856 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 12857 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 12858 intel_dmi_quirks[i].hook(dev); 12859 } 12860 } 12861 12862 /* Disable the VGA plane that we never use */ 12863 static void i915_disable_vga(struct drm_device *dev) 12864 { 12865 struct drm_i915_private *dev_priv = dev->dev_private; 12866 u8 sr1; 12867 u32 vga_reg = i915_vgacntrl_reg(dev); 12868 12869 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 12870 #if 0 12871 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 12872 #endif 12873 outb(VGA_SR_INDEX, SR01); 12874 sr1 = inb(VGA_SR_DATA); 12875 outb(VGA_SR_DATA, sr1 | 1 << 5); 12876 #if 0 12877 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 12878 #endif 12879 udelay(300); 12880 12881 /* 12882 * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming 12883 * from S3 without preserving (some of?) the other bits. 12884 */ 12885 I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE); 12886 POSTING_READ(vga_reg); 12887 } 12888 12889 void intel_modeset_init_hw(struct drm_device *dev) 12890 { 12891 intel_prepare_ddi(dev); 12892 12893 if (IS_VALLEYVIEW(dev)) 12894 vlv_update_cdclk(dev); 12895 12896 intel_init_clock_gating(dev); 12897 12898 intel_enable_gt_powersave(dev); 12899 } 12900 12901 void intel_modeset_suspend_hw(struct drm_device *dev) 12902 { 12903 intel_suspend_hw(dev); 12904 } 12905 12906 void intel_modeset_init(struct drm_device *dev) 12907 { 12908 struct drm_i915_private *dev_priv = dev->dev_private; 12909 int sprite, ret; 12910 enum i915_pipe pipe; 12911 struct intel_crtc *crtc; 12912 12913 drm_mode_config_init(dev); 12914 12915 dev->mode_config.min_width = 0; 12916 dev->mode_config.min_height = 0; 12917 12918 dev->mode_config.preferred_depth = 24; 12919 dev->mode_config.prefer_shadow = 1; 12920 12921 dev->mode_config.funcs = &intel_mode_funcs; 12922 12923 intel_init_quirks(dev); 12924 12925 intel_init_pm(dev); 12926 12927 if (INTEL_INFO(dev)->num_pipes == 0) 12928 return; 12929 12930 intel_init_display(dev); 12931 12932 if (IS_GEN2(dev)) { 12933 dev->mode_config.max_width = 2048; 12934 dev->mode_config.max_height = 2048; 12935 } else if (IS_GEN3(dev)) { 12936 dev->mode_config.max_width = 4096; 12937 dev->mode_config.max_height = 4096; 12938 } else { 12939 dev->mode_config.max_width = 8192; 12940 dev->mode_config.max_height = 8192; 12941 } 12942 12943 if (IS_845G(dev) || IS_I865G(dev)) { 12944 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512; 12945 dev->mode_config.cursor_height = 1023; 12946 } else if (IS_GEN2(dev)) { 12947 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 12948 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 12949 } else { 12950 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 12951 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 12952 } 12953 12954 dev->mode_config.fb_base = dev_priv->gtt.mappable_base; 12955 12956 DRM_DEBUG_KMS("%d display pipe%s available.\n", 12957 INTEL_INFO(dev)->num_pipes, 12958 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 12959 12960 for_each_pipe(dev_priv, pipe) { 12961 intel_crtc_init(dev, pipe); 12962 for_each_sprite(pipe, sprite) { 12963 ret = intel_plane_init(dev, pipe, sprite); 12964 if (ret) 12965 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", 12966 pipe_name(pipe), sprite_name(pipe, sprite), ret); 12967 } 12968 } 12969 12970 intel_init_dpio(dev); 12971 12972 intel_shared_dpll_init(dev); 12973 12974 /* save the BIOS value before clobbering it */ 12975 dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev)); 12976 /* Just disable it once at startup */ 12977 i915_disable_vga(dev); 12978 intel_setup_outputs(dev); 12979 12980 /* Just in case the BIOS is doing something questionable. */ 12981 intel_disable_fbc(dev); 12982 12983 drm_modeset_lock_all(dev); 12984 intel_modeset_setup_hw_state(dev, false); 12985 drm_modeset_unlock_all(dev); 12986 12987 for_each_intel_crtc(dev, crtc) { 12988 if (!crtc->active) 12989 continue; 12990 12991 /* 12992 * Note that reserving the BIOS fb up front prevents us 12993 * from stuffing other stolen allocations like the ring 12994 * on top. This prevents some ugliness at boot time, and 12995 * can even allow for smooth boot transitions if the BIOS 12996 * fb is large enough for the active pipe configuration. 12997 */ 12998 if (dev_priv->display.get_plane_config) { 12999 dev_priv->display.get_plane_config(crtc, 13000 &crtc->plane_config); 13001 /* 13002 * If the fb is shared between multiple heads, we'll 13003 * just get the first one. 13004 */ 13005 intel_find_plane_obj(crtc, &crtc->plane_config); 13006 } 13007 } 13008 } 13009 13010 static void intel_enable_pipe_a(struct drm_device *dev) 13011 { 13012 struct intel_connector *connector; 13013 struct drm_connector *crt = NULL; 13014 struct intel_load_detect_pipe load_detect_temp; 13015 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; 13016 13017 /* We can't just switch on the pipe A, we need to set things up with a 13018 * proper mode and output configuration. As a gross hack, enable pipe A 13019 * by enabling the load detect pipe once. */ 13020 list_for_each_entry(connector, 13021 &dev->mode_config.connector_list, 13022 base.head) { 13023 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 13024 crt = &connector->base; 13025 break; 13026 } 13027 } 13028 13029 if (!crt) 13030 return; 13031 13032 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) 13033 intel_release_load_detect_pipe(crt, &load_detect_temp); 13034 } 13035 13036 static bool 13037 intel_check_plane_mapping(struct intel_crtc *crtc) 13038 { 13039 struct drm_device *dev = crtc->base.dev; 13040 struct drm_i915_private *dev_priv = dev->dev_private; 13041 u32 reg, val; 13042 13043 if (INTEL_INFO(dev)->num_pipes == 1) 13044 return true; 13045 13046 reg = DSPCNTR(!crtc->plane); 13047 val = I915_READ(reg); 13048 13049 if ((val & DISPLAY_PLANE_ENABLE) && 13050 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 13051 return false; 13052 13053 return true; 13054 } 13055 13056 static void intel_sanitize_crtc(struct intel_crtc *crtc) 13057 { 13058 struct drm_device *dev = crtc->base.dev; 13059 struct drm_i915_private *dev_priv = dev->dev_private; 13060 u32 reg; 13061 13062 /* Clear any frame start delays used for debugging left by the BIOS */ 13063 reg = PIPECONF(crtc->config.cpu_transcoder); 13064 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 13065 13066 /* restore vblank interrupts to correct state */ 13067 if (crtc->active) { 13068 update_scanline_offset(crtc); 13069 drm_vblank_on(dev, crtc->pipe); 13070 } else 13071 drm_vblank_off(dev, crtc->pipe); 13072 13073 /* We need to sanitize the plane -> pipe mapping first because this will 13074 * disable the crtc (and hence change the state) if it is wrong. Note 13075 * that gen4+ has a fixed plane -> pipe mapping. */ 13076 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 13077 struct intel_connector *connector; 13078 bool plane; 13079 13080 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 13081 crtc->base.base.id); 13082 13083 /* Pipe has the wrong plane attached and the plane is active. 13084 * Temporarily change the plane mapping and disable everything 13085 * ... */ 13086 plane = crtc->plane; 13087 crtc->plane = !plane; 13088 crtc->primary_enabled = true; 13089 dev_priv->display.crtc_disable(&crtc->base); 13090 crtc->plane = plane; 13091 13092 /* ... and break all links. */ 13093 list_for_each_entry(connector, &dev->mode_config.connector_list, 13094 base.head) { 13095 if (connector->encoder->base.crtc != &crtc->base) 13096 continue; 13097 13098 connector->base.dpms = DRM_MODE_DPMS_OFF; 13099 connector->base.encoder = NULL; 13100 } 13101 /* multiple connectors may have the same encoder: 13102 * handle them and break crtc link separately */ 13103 list_for_each_entry(connector, &dev->mode_config.connector_list, 13104 base.head) 13105 if (connector->encoder->base.crtc == &crtc->base) { 13106 connector->encoder->base.crtc = NULL; 13107 connector->encoder->connectors_active = false; 13108 } 13109 13110 WARN_ON(crtc->active); 13111 crtc->base.enabled = false; 13112 } 13113 13114 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 13115 crtc->pipe == PIPE_A && !crtc->active) { 13116 /* BIOS forgot to enable pipe A, this mostly happens after 13117 * resume. Force-enable the pipe to fix this, the update_dpms 13118 * call below we restore the pipe to the right state, but leave 13119 * the required bits on. */ 13120 intel_enable_pipe_a(dev); 13121 } 13122 13123 /* Adjust the state of the output pipe according to whether we 13124 * have active connectors/encoders. */ 13125 intel_crtc_update_dpms(&crtc->base); 13126 13127 if (crtc->active != crtc->base.enabled) { 13128 struct intel_encoder *encoder; 13129 13130 /* This can happen either due to bugs in the get_hw_state 13131 * functions or because the pipe is force-enabled due to the 13132 * pipe A quirk. */ 13133 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", 13134 crtc->base.base.id, 13135 crtc->base.enabled ? "enabled" : "disabled", 13136 crtc->active ? "enabled" : "disabled"); 13137 13138 crtc->base.enabled = crtc->active; 13139 13140 /* Because we only establish the connector -> encoder -> 13141 * crtc links if something is active, this means the 13142 * crtc is now deactivated. Break the links. connector 13143 * -> encoder links are only establish when things are 13144 * actually up, hence no need to break them. */ 13145 WARN_ON(crtc->active); 13146 13147 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 13148 WARN_ON(encoder->connectors_active); 13149 encoder->base.crtc = NULL; 13150 } 13151 } 13152 13153 if (crtc->active || HAS_GMCH_DISPLAY(dev)) { 13154 /* 13155 * We start out with underrun reporting disabled to avoid races. 13156 * For correct bookkeeping mark this on active crtcs. 13157 * 13158 * Also on gmch platforms we dont have any hardware bits to 13159 * disable the underrun reporting. Which means we need to start 13160 * out with underrun reporting disabled also on inactive pipes, 13161 * since otherwise we'll complain about the garbage we read when 13162 * e.g. coming up after runtime pm. 13163 * 13164 * No protection against concurrent access is required - at 13165 * worst a fifo underrun happens which also sets this to false. 13166 */ 13167 crtc->cpu_fifo_underrun_disabled = true; 13168 crtc->pch_fifo_underrun_disabled = true; 13169 } 13170 } 13171 13172 static void intel_sanitize_encoder(struct intel_encoder *encoder) 13173 { 13174 struct intel_connector *connector; 13175 struct drm_device *dev = encoder->base.dev; 13176 13177 /* We need to check both for a crtc link (meaning that the 13178 * encoder is active and trying to read from a pipe) and the 13179 * pipe itself being active. */ 13180 bool has_active_crtc = encoder->base.crtc && 13181 to_intel_crtc(encoder->base.crtc)->active; 13182 13183 if (encoder->connectors_active && !has_active_crtc) { 13184 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 13185 encoder->base.base.id, 13186 encoder->base.name); 13187 13188 /* Connector is active, but has no active pipe. This is 13189 * fallout from our resume register restoring. Disable 13190 * the encoder manually again. */ 13191 if (encoder->base.crtc) { 13192 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 13193 encoder->base.base.id, 13194 encoder->base.name); 13195 encoder->disable(encoder); 13196 if (encoder->post_disable) 13197 encoder->post_disable(encoder); 13198 } 13199 encoder->base.crtc = NULL; 13200 encoder->connectors_active = false; 13201 13202 /* Inconsistent output/port/pipe state happens presumably due to 13203 * a bug in one of the get_hw_state functions. Or someplace else 13204 * in our code, like the register restore mess on resume. Clamp 13205 * things to off as a safer default. */ 13206 list_for_each_entry(connector, 13207 &dev->mode_config.connector_list, 13208 base.head) { 13209 if (connector->encoder != encoder) 13210 continue; 13211 13212 connector->base.dpms = DRM_MODE_DPMS_OFF; 13213 connector->base.encoder = NULL; 13214 } 13215 } 13216 /* Enabled encoders without active connectors will be fixed in 13217 * the crtc fixup. */ 13218 } 13219 13220 void i915_redisable_vga_power_on(struct drm_device *dev) 13221 { 13222 struct drm_i915_private *dev_priv = dev->dev_private; 13223 u32 vga_reg = i915_vgacntrl_reg(dev); 13224 13225 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 13226 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 13227 i915_disable_vga(dev); 13228 } 13229 } 13230 13231 void i915_redisable_vga(struct drm_device *dev) 13232 { 13233 struct drm_i915_private *dev_priv = dev->dev_private; 13234 13235 /* This function can be called both from intel_modeset_setup_hw_state or 13236 * at a very early point in our resume sequence, where the power well 13237 * structures are not yet restored. Since this function is at a very 13238 * paranoid "someone might have enabled VGA while we were not looking" 13239 * level, just check if the power well is enabled instead of trying to 13240 * follow the "don't touch the power well if we don't need it" policy 13241 * the rest of the driver uses. */ 13242 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA)) 13243 return; 13244 13245 i915_redisable_vga_power_on(dev); 13246 } 13247 13248 static bool primary_get_hw_state(struct intel_crtc *crtc) 13249 { 13250 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 13251 13252 if (!crtc->active) 13253 return false; 13254 13255 return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE; 13256 } 13257 13258 static void intel_modeset_readout_hw_state(struct drm_device *dev) 13259 { 13260 struct drm_i915_private *dev_priv = dev->dev_private; 13261 enum i915_pipe pipe; 13262 struct intel_crtc *crtc; 13263 struct intel_encoder *encoder; 13264 struct intel_connector *connector; 13265 int i; 13266 13267 for_each_intel_crtc(dev, crtc) { 13268 memset(&crtc->config, 0, sizeof(crtc->config)); 13269 13270 crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; 13271 13272 crtc->active = dev_priv->display.get_pipe_config(crtc, 13273 &crtc->config); 13274 13275 crtc->base.enabled = crtc->active; 13276 crtc->primary_enabled = primary_get_hw_state(crtc); 13277 13278 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 13279 crtc->base.base.id, 13280 crtc->active ? "enabled" : "disabled"); 13281 } 13282 13283 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 13284 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 13285 13286 pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state); 13287 pll->active = 0; 13288 for_each_intel_crtc(dev, crtc) { 13289 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 13290 pll->active++; 13291 } 13292 pll->refcount = pll->active; 13293 13294 DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", 13295 pll->name, pll->refcount, pll->on); 13296 13297 if (pll->refcount) 13298 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 13299 } 13300 13301 for_each_intel_encoder(dev, encoder) { 13302 pipe = 0; 13303 13304 if (encoder->get_hw_state(encoder, &pipe)) { 13305 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 13306 encoder->base.crtc = &crtc->base; 13307 encoder->get_config(encoder, &crtc->config); 13308 } else { 13309 encoder->base.crtc = NULL; 13310 } 13311 13312 encoder->connectors_active = false; 13313 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 13314 encoder->base.base.id, 13315 encoder->base.name, 13316 encoder->base.crtc ? "enabled" : "disabled", 13317 pipe_name(pipe)); 13318 } 13319 13320 list_for_each_entry(connector, &dev->mode_config.connector_list, 13321 base.head) { 13322 if (connector->get_hw_state(connector)) { 13323 connector->base.dpms = DRM_MODE_DPMS_ON; 13324 connector->encoder->connectors_active = true; 13325 connector->base.encoder = &connector->encoder->base; 13326 } else { 13327 connector->base.dpms = DRM_MODE_DPMS_OFF; 13328 connector->base.encoder = NULL; 13329 } 13330 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 13331 connector->base.base.id, 13332 connector->base.name, 13333 connector->base.encoder ? "enabled" : "disabled"); 13334 } 13335 } 13336 13337 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm 13338 * and i915 state tracking structures. */ 13339 void intel_modeset_setup_hw_state(struct drm_device *dev, 13340 bool force_restore) 13341 { 13342 struct drm_i915_private *dev_priv = dev->dev_private; 13343 enum i915_pipe pipe; 13344 struct intel_crtc *crtc; 13345 struct intel_encoder *encoder; 13346 int i; 13347 13348 intel_modeset_readout_hw_state(dev); 13349 13350 /* 13351 * Now that we have the config, copy it to each CRTC struct 13352 * Note that this could go away if we move to using crtc_config 13353 * checking everywhere. 13354 */ 13355 for_each_intel_crtc(dev, crtc) { 13356 if (crtc->active && i915.fastboot) { 13357 intel_mode_from_pipe_config(&crtc->base.mode, &crtc->config); 13358 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", 13359 crtc->base.base.id); 13360 drm_mode_debug_printmodeline(&crtc->base.mode); 13361 } 13362 } 13363 13364 /* HW state is read out, now we need to sanitize this mess. */ 13365 for_each_intel_encoder(dev, encoder) { 13366 intel_sanitize_encoder(encoder); 13367 } 13368 13369 for_each_pipe(dev_priv, pipe) { 13370 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 13371 intel_sanitize_crtc(crtc); 13372 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); 13373 } 13374 13375 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 13376 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 13377 13378 if (!pll->on || pll->active) 13379 continue; 13380 13381 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 13382 13383 pll->disable(dev_priv, pll); 13384 pll->on = false; 13385 } 13386 13387 if (HAS_PCH_SPLIT(dev)) 13388 ilk_wm_get_hw_state(dev); 13389 13390 if (force_restore) { 13391 i915_redisable_vga(dev); 13392 13393 /* 13394 * We need to use raw interfaces for restoring state to avoid 13395 * checking (bogus) intermediate states. 13396 */ 13397 for_each_pipe(dev_priv, pipe) { 13398 struct drm_crtc *crtc = 13399 dev_priv->pipe_to_crtc_mapping[pipe]; 13400 13401 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, 13402 crtc->primary->fb); 13403 } 13404 } else { 13405 intel_modeset_update_staged_output_state(dev); 13406 } 13407 13408 intel_modeset_check_state(dev); 13409 } 13410 13411 void intel_modeset_gem_init(struct drm_device *dev) 13412 { 13413 struct drm_crtc *c; 13414 struct drm_i915_gem_object *obj; 13415 13416 mutex_lock(&dev->struct_mutex); 13417 intel_init_gt_powersave(dev); 13418 mutex_unlock(&dev->struct_mutex); 13419 13420 intel_modeset_init_hw(dev); 13421 13422 intel_setup_overlay(dev); 13423 13424 /* 13425 * Make sure any fbs we allocated at startup are properly 13426 * pinned & fenced. When we do the allocation it's too early 13427 * for this. 13428 */ 13429 mutex_lock(&dev->struct_mutex); 13430 for_each_crtc(dev, c) { 13431 obj = intel_fb_obj(c->primary->fb); 13432 if (obj == NULL) 13433 continue; 13434 13435 if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) { 13436 DRM_ERROR("failed to pin boot fb on pipe %d\n", 13437 to_intel_crtc(c)->pipe); 13438 drm_framebuffer_unreference(c->primary->fb); 13439 c->primary->fb = NULL; 13440 } 13441 } 13442 mutex_unlock(&dev->struct_mutex); 13443 } 13444 13445 void intel_connector_unregister(struct intel_connector *intel_connector) 13446 { 13447 struct drm_connector *connector = &intel_connector->base; 13448 13449 intel_panel_destroy_backlight(connector); 13450 drm_connector_unregister(connector); 13451 } 13452 13453 void intel_modeset_cleanup(struct drm_device *dev) 13454 { 13455 struct drm_i915_private *dev_priv = dev->dev_private; 13456 struct drm_connector *connector; 13457 13458 /* 13459 * Interrupts and polling as the first thing to avoid creating havoc. 13460 * Too much stuff here (turning of rps, connectors, ...) would 13461 * experience fancy races otherwise. 13462 */ 13463 drm_irq_uninstall(dev); 13464 intel_hpd_cancel_work(dev_priv); 13465 dev_priv->pm._irqs_disabled = true; 13466 13467 /* 13468 * Due to the hpd irq storm handling the hotplug work can re-arm the 13469 * poll handlers. Hence disable polling after hpd handling is shut down. 13470 */ 13471 drm_kms_helper_poll_fini(dev); 13472 13473 mutex_lock(&dev->struct_mutex); 13474 13475 intel_unregister_dsm_handler(); 13476 13477 intel_disable_fbc(dev); 13478 13479 intel_disable_gt_powersave(dev); 13480 13481 ironlake_teardown_rc6(dev); 13482 13483 mutex_unlock(&dev->struct_mutex); 13484 13485 /* flush any delayed tasks or pending work */ 13486 flush_scheduled_work(); 13487 13488 /* destroy the backlight and sysfs files before encoders/connectors */ 13489 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 13490 struct intel_connector *intel_connector; 13491 13492 intel_connector = to_intel_connector(connector); 13493 intel_connector->unregister(intel_connector); 13494 } 13495 13496 drm_mode_config_cleanup(dev); 13497 13498 intel_cleanup_overlay(dev); 13499 13500 mutex_lock(&dev->struct_mutex); 13501 intel_cleanup_gt_powersave(dev); 13502 mutex_unlock(&dev->struct_mutex); 13503 } 13504 13505 /* 13506 * Return which encoder is currently attached for connector. 13507 */ 13508 struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 13509 { 13510 return &intel_attached_encoder(connector)->base; 13511 } 13512 13513 void intel_connector_attach_encoder(struct intel_connector *connector, 13514 struct intel_encoder *encoder) 13515 { 13516 connector->encoder = encoder; 13517 drm_mode_connector_attach_encoder(&connector->base, 13518 &encoder->base); 13519 } 13520 13521 /* 13522 * set vga decode state - true == enable VGA decode 13523 */ 13524 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 13525 { 13526 struct drm_i915_private *dev_priv = dev->dev_private; 13527 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 13528 u16 gmch_ctrl; 13529 13530 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 13531 DRM_ERROR("failed to read control word\n"); 13532 return -EIO; 13533 } 13534 13535 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 13536 return 0; 13537 13538 if (state) 13539 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 13540 else 13541 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 13542 13543 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 13544 DRM_ERROR("failed to write control word\n"); 13545 return -EIO; 13546 } 13547 13548 return 0; 13549 } 13550 13551 #if 0 13552 struct intel_display_error_state { 13553 13554 u32 power_well_driver; 13555 13556 int num_transcoders; 13557 13558 struct intel_cursor_error_state { 13559 u32 control; 13560 u32 position; 13561 u32 base; 13562 u32 size; 13563 } cursor[I915_MAX_PIPES]; 13564 13565 struct intel_pipe_error_state { 13566 bool power_domain_on; 13567 u32 source; 13568 u32 stat; 13569 } pipe[I915_MAX_PIPES]; 13570 13571 struct intel_plane_error_state { 13572 u32 control; 13573 u32 stride; 13574 u32 size; 13575 u32 pos; 13576 u32 addr; 13577 u32 surface; 13578 u32 tile_offset; 13579 } plane[I915_MAX_PIPES]; 13580 13581 struct intel_transcoder_error_state { 13582 bool power_domain_on; 13583 enum transcoder cpu_transcoder; 13584 13585 u32 conf; 13586 13587 u32 htotal; 13588 u32 hblank; 13589 u32 hsync; 13590 u32 vtotal; 13591 u32 vblank; 13592 u32 vsync; 13593 } transcoder[4]; 13594 }; 13595 13596 struct intel_display_error_state * 13597 intel_display_capture_error_state(struct drm_device *dev) 13598 { 13599 struct drm_i915_private *dev_priv = dev->dev_private; 13600 struct intel_display_error_state *error; 13601 int transcoders[] = { 13602 TRANSCODER_A, 13603 TRANSCODER_B, 13604 TRANSCODER_C, 13605 TRANSCODER_EDP, 13606 }; 13607 int i; 13608 13609 if (INTEL_INFO(dev)->num_pipes == 0) 13610 return NULL; 13611 13612 error = kzalloc(sizeof(*error), GFP_ATOMIC); 13613 if (error == NULL) 13614 return NULL; 13615 13616 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 13617 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 13618 13619 for_each_pipe(dev_priv, i) { 13620 error->pipe[i].power_domain_on = 13621 intel_display_power_enabled_unlocked(dev_priv, 13622 POWER_DOMAIN_PIPE(i)); 13623 if (!error->pipe[i].power_domain_on) 13624 continue; 13625 13626 error->cursor[i].control = I915_READ(CURCNTR(i)); 13627 error->cursor[i].position = I915_READ(CURPOS(i)); 13628 error->cursor[i].base = I915_READ(CURBASE(i)); 13629 13630 error->plane[i].control = I915_READ(DSPCNTR(i)); 13631 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 13632 if (INTEL_INFO(dev)->gen <= 3) { 13633 error->plane[i].size = I915_READ(DSPSIZE(i)); 13634 error->plane[i].pos = I915_READ(DSPPOS(i)); 13635 } 13636 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 13637 error->plane[i].addr = I915_READ(DSPADDR(i)); 13638 if (INTEL_INFO(dev)->gen >= 4) { 13639 error->plane[i].surface = I915_READ(DSPSURF(i)); 13640 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 13641 } 13642 13643 error->pipe[i].source = I915_READ(PIPESRC(i)); 13644 13645 if (HAS_GMCH_DISPLAY(dev)) 13646 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 13647 } 13648 13649 error->num_transcoders = INTEL_INFO(dev)->num_pipes; 13650 if (HAS_DDI(dev_priv->dev)) 13651 error->num_transcoders++; /* Account for eDP. */ 13652 13653 for (i = 0; i < error->num_transcoders; i++) { 13654 enum transcoder cpu_transcoder = transcoders[i]; 13655 13656 error->transcoder[i].power_domain_on = 13657 intel_display_power_enabled_unlocked(dev_priv, 13658 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 13659 if (!error->transcoder[i].power_domain_on) 13660 continue; 13661 13662 error->transcoder[i].cpu_transcoder = cpu_transcoder; 13663 13664 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 13665 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 13666 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 13667 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 13668 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 13669 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 13670 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 13671 } 13672 13673 return error; 13674 } 13675 13676 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 13677 13678 void 13679 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 13680 struct drm_device *dev, 13681 struct intel_display_error_state *error) 13682 { 13683 struct drm_i915_private *dev_priv = dev->dev_private; 13684 int i; 13685 13686 if (!error) 13687 return; 13688 13689 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 13690 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 13691 err_printf(m, "PWR_WELL_CTL2: %08x\n", 13692 error->power_well_driver); 13693 for_each_pipe(dev_priv, i) { 13694 err_printf(m, "Pipe [%d]:\n", i); 13695 err_printf(m, " Power: %s\n", 13696 error->pipe[i].power_domain_on ? "on" : "off"); 13697 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 13698 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 13699 13700 err_printf(m, "Plane [%d]:\n", i); 13701 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 13702 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 13703 if (INTEL_INFO(dev)->gen <= 3) { 13704 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 13705 err_printf(m, " POS: %08x\n", error->plane[i].pos); 13706 } 13707 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 13708 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 13709 if (INTEL_INFO(dev)->gen >= 4) { 13710 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 13711 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 13712 } 13713 13714 err_printf(m, "Cursor [%d]:\n", i); 13715 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 13716 err_printf(m, " POS: %08x\n", error->cursor[i].position); 13717 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 13718 } 13719 13720 for (i = 0; i < error->num_transcoders; i++) { 13721 err_printf(m, "CPU transcoder: %c\n", 13722 transcoder_name(error->transcoder[i].cpu_transcoder)); 13723 err_printf(m, " Power: %s\n", 13724 error->transcoder[i].power_domain_on ? "on" : "off"); 13725 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 13726 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 13727 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 13728 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 13729 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 13730 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 13731 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 13732 } 13733 } 13734 #endif 13735 13736 void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file) 13737 { 13738 struct intel_crtc *crtc; 13739 13740 for_each_intel_crtc(dev, crtc) { 13741 struct intel_unpin_work *work; 13742 13743 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 13744 13745 work = crtc->unpin_work; 13746 13747 if (work && work->event && 13748 work->event->base.file_priv == file) { 13749 kfree(work->event); 13750 work->event = NULL; 13751 } 13752 13753 lockmgr(&dev->event_lock, LK_RELEASE); 13754 } 13755 } 13756